From 6db3a96a0ededaa95c73f2d2cde9a85557892ea1 Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Mon, 8 Sep 2025 11:01:01 +0800 Subject: [PATCH 01/16] anolis: net: wangxun: remove wangxun txgbe inbox drivers ANBZ: #26488 OLK-6.6 had wangxun txgbe drivers which is from linux upstream. But this driver do not have many capabilities and is limited by the integration strategy of upstream. Wangxun want to merge new drivers, so remove it. Signed-off-by: Duanqiang Wen --- drivers/net/ethernet/wangxun/Kconfig | 25 - drivers/net/ethernet/wangxun/Makefile | 1 - drivers/net/ethernet/wangxun/txgbe/Makefile | 12 - .../ethernet/wangxun/txgbe/txgbe_ethtool.c | 102 --- .../ethernet/wangxun/txgbe/txgbe_ethtool.h | 9 - drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 216 ----- drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h | 12 - .../net/ethernet/wangxun/txgbe/txgbe_main.c | 840 ------------------ .../net/ethernet/wangxun/txgbe/txgbe_phy.c | 792 ----------------- .../net/ethernet/wangxun/txgbe/txgbe_phy.h | 10 - .../net/ethernet/wangxun/txgbe/txgbe_type.h | 183 ---- 11 files changed, 2202 deletions(-) delete mode 100644 drivers/net/ethernet/wangxun/txgbe/Makefile delete mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c delete mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h delete mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c delete mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h delete mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_main.c delete mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c delete mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h delete mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_type.h diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig index 85cdbdd44fec..457113f57ec1 100644 --- a/drivers/net/ethernet/wangxun/Kconfig +++ b/drivers/net/ethernet/wangxun/Kconfig @@ -37,29 +37,4 @@ config NGBE To compile this driver as a module, choose M here. The module will be called ngbe. -config TXGBE - tristate "Wangxun(R) 10GbE PCI Express adapters support" - depends on PCI - depends on COMMON_CLK - select MARVELL_10G_PHY - select REGMAP - select I2C - select I2C_DESIGNWARE_PLATFORM - select PHYLINK - select HWMON if TXGBE=y - select SFP - select GPIOLIB - select GPIOLIB_IRQCHIP - select PCS_XPCS - select LIBWX - help - This driver supports Wangxun(R) 10GbE PCI Express family of - adapters. - - More specific information on configuring the driver is in - . - - To compile this driver as a module, choose M here. The module - will be called txgbe. - endif # NET_VENDOR_WANGXUN diff --git a/drivers/net/ethernet/wangxun/Makefile b/drivers/net/ethernet/wangxun/Makefile index ca19311dbe38..f300fc503b96 100644 --- a/drivers/net/ethernet/wangxun/Makefile +++ b/drivers/net/ethernet/wangxun/Makefile @@ -4,5 +4,4 @@ # obj-$(CONFIG_LIBWX) += libwx/ -obj-$(CONFIG_TXGBE) += txgbe/ obj-$(CONFIG_NGBE) += ngbe/ diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile deleted file mode 100644 index 7507f762edfe..000000000000 --- a/drivers/net/ethernet/wangxun/txgbe/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. -# -# Makefile for the Wangxun(R) 10GbE PCI Express ethernet driver -# - -obj-$(CONFIG_TXGBE) += txgbe.o - -txgbe-objs := txgbe_main.o \ - txgbe_hw.o \ - txgbe_phy.o \ - txgbe_ethtool.o diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c deleted file mode 100644 index 084e2faf9db1..000000000000 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ /dev/null @@ -1,102 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ - -#include -#include -#include - -#include "../libwx/wx_ethtool.h" -#include "../libwx/wx_type.h" -#include "../libwx/wx_lib.h" -#include "txgbe_type.h" -#include "txgbe_ethtool.h" - -static int txgbe_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring, - struct kernel_ethtool_ringparam *kernel_ring, - struct netlink_ext_ack *extack) -{ - struct wx *wx = netdev_priv(netdev); - u32 new_rx_count, new_tx_count; - struct wx_ring *temp_ring; - int i; - - new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD); - new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE); - - new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD); - new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE); - - if (new_tx_count == wx->tx_ring_count && - new_rx_count == wx->rx_ring_count) - return 0; - - if (!netif_running(wx->netdev)) { - for (i = 0; i < wx->num_tx_queues; i++) - wx->tx_ring[i]->count = new_tx_count; - for (i = 0; i < wx->num_rx_queues; i++) - wx->rx_ring[i]->count = new_rx_count; - wx->tx_ring_count = new_tx_count; - wx->rx_ring_count = new_rx_count; - - return 0; - } - - /* allocate temporary buffer to store rings in */ - i = max_t(int, wx->num_tx_queues, wx->num_rx_queues); - temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL); - if (!temp_ring) - return -ENOMEM; - - txgbe_down(wx); - - wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring); - kvfree(temp_ring); - - txgbe_up(wx); - - return 0; -} - -static int txgbe_set_channels(struct net_device *dev, - struct ethtool_channels *ch) -{ - int err; - - err = wx_set_channels(dev, ch); - if (err < 0) - return err; - - /* use setup TC to update any traffic class queue mapping */ - return txgbe_setup_tc(dev, netdev_get_num_tc(dev)); -} - -static const struct ethtool_ops txgbe_ethtool_ops = { - .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, - .get_drvinfo = wx_get_drvinfo, - .nway_reset = wx_nway_reset, - .get_link = ethtool_op_get_link, - .get_link_ksettings = wx_get_link_ksettings, - .set_link_ksettings = wx_set_link_ksettings, - .get_sset_count = wx_get_sset_count, - .get_strings = wx_get_strings, - .get_ethtool_stats = wx_get_ethtool_stats, - .get_eth_mac_stats = wx_get_mac_stats, - .get_pause_stats = wx_get_pause_stats, - .get_pauseparam = wx_get_pauseparam, - .set_pauseparam = wx_set_pauseparam, - .get_ringparam = wx_get_ringparam, - .set_ringparam = txgbe_set_ringparam, - .get_coalesce = wx_get_coalesce, - .set_coalesce = wx_set_coalesce, - .get_msglevel = wx_get_msglevel, - .set_msglevel = wx_set_msglevel, - .get_channels = wx_get_channels, - .set_channels = txgbe_set_channels, -}; - -void txgbe_set_ethtool_ops(struct net_device *netdev) -{ - netdev->ethtool_ops = &txgbe_ethtool_ops; -} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h deleted file mode 100644 index ace1b3571012..000000000000 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h +++ /dev/null @@ -1,9 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ - -#ifndef _TXGBE_ETHTOOL_H_ -#define _TXGBE_ETHTOOL_H_ - -void txgbe_set_ethtool_ops(struct net_device *netdev); - -#endif /* _TXGBE_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c deleted file mode 100644 index d6b2b3c781b6..000000000000 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ /dev/null @@ -1,216 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ - -#include -#include -#include -#include -#include -#include - -#include "../libwx/wx_type.h" -#include "../libwx/wx_hw.h" -#include "txgbe_type.h" -#include "txgbe_hw.h" - -/** - * txgbe_disable_sec_tx_path - Stops the transmit data path - * @wx: pointer to hardware structure - * - * Stops the transmit data path and waits for the HW to internally empty - * the tx security block - **/ -int txgbe_disable_sec_tx_path(struct wx *wx) -{ - int val; - - wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS, WX_TSC_CTL_TX_DIS); - return read_poll_timeout(rd32, val, val & WX_TSC_ST_SECTX_RDY, - 1000, 20000, false, wx, WX_TSC_ST); -} - -/** - * txgbe_enable_sec_tx_path - Enables the transmit data path - * @wx: pointer to hardware structure - * - * Enables the transmit data path. - **/ -void txgbe_enable_sec_tx_path(struct wx *wx) -{ - wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS, 0); - WX_WRITE_FLUSH(wx); -} - -/** - * txgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds - * @wx: pointer to hardware structure - * - * Inits the thermal sensor thresholds according to the NVM map - * and save off the threshold and location values into mac.thermal_sensor_data - **/ -static void txgbe_init_thermal_sensor_thresh(struct wx *wx) -{ - struct wx_thermal_sensor_data *data = &wx->mac.sensor; - - memset(data, 0, sizeof(struct wx_thermal_sensor_data)); - - /* Only support thermal sensors attached to SP physical port 0 */ - if (wx->bus.func) - return; - - wr32(wx, TXGBE_TS_CTL, TXGBE_TS_CTL_EVAL_MD); - - wr32(wx, WX_TS_INT_EN, - WX_TS_INT_EN_ALARM_INT_EN | WX_TS_INT_EN_DALARM_INT_EN); - wr32(wx, WX_TS_EN, WX_TS_EN_ENA); - - data->alarm_thresh = 100; - wr32(wx, WX_TS_ALARM_THRE, 677); - data->dalarm_thresh = 90; - wr32(wx, WX_TS_DALARM_THRE, 614); -} - -/** - * txgbe_calc_eeprom_checksum - Calculates and returns the checksum - * @wx: pointer to hardware structure - * @checksum: pointer to cheksum - * - * Returns a negative error code on error - **/ -static int txgbe_calc_eeprom_checksum(struct wx *wx, u16 *checksum) -{ - u16 *eeprom_ptrs = NULL; - u16 *local_buffer; - int status; - u16 i; - - wx_init_eeprom_params(wx); - - eeprom_ptrs = kvmalloc_array(TXGBE_EEPROM_LAST_WORD, sizeof(u16), - GFP_KERNEL); - if (!eeprom_ptrs) - return -ENOMEM; - /* Read pointer area */ - status = wx_read_ee_hostif_buffer(wx, 0, TXGBE_EEPROM_LAST_WORD, eeprom_ptrs); - if (status != 0) { - wx_err(wx, "Failed to read EEPROM image\n"); - kvfree(eeprom_ptrs); - return status; - } - local_buffer = eeprom_ptrs; - - for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++) - if (i != wx->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM) - *checksum += local_buffer[i]; - - if (eeprom_ptrs) - kvfree(eeprom_ptrs); - - *checksum = TXGBE_EEPROM_SUM - *checksum; - - return 0; -} - -/** - * txgbe_validate_eeprom_checksum - Validate EEPROM checksum - * @wx: pointer to hardware structure - * @checksum_val: calculated checksum - * - * Performs checksum calculation and validates the EEPROM checksum. If the - * caller does not need checksum_val, the value can be NULL. - **/ -int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val) -{ - u16 read_checksum = 0; - u16 checksum; - int status; - - /* Read the first word from the EEPROM. If this times out or fails, do - * not continue or we could be in for a very long wait while every - * EEPROM read fails - */ - status = wx_read_ee_hostif(wx, 0, &checksum); - if (status) { - wx_err(wx, "EEPROM read failed\n"); - return status; - } - - checksum = 0; - status = txgbe_calc_eeprom_checksum(wx, &checksum); - if (status != 0) - return status; - - status = wx_read_ee_hostif(wx, wx->eeprom.sw_region_offset + - TXGBE_EEPROM_CHECKSUM, &read_checksum); - if (status != 0) - return status; - - /* Verify read checksum from EEPROM is the same as - * calculated checksum - */ - if (read_checksum != checksum) { - status = -EIO; - wx_err(wx, "Invalid EEPROM checksum\n"); - } - - /* If the user cares, return the calculated checksum */ - if (checksum_val) - *checksum_val = checksum; - - return status; -} - -static void txgbe_reset_misc(struct wx *wx) -{ - wx_reset_misc(wx); - txgbe_init_thermal_sensor_thresh(wx); -} - -/** - * txgbe_reset_hw - Perform hardware reset - * @wx: pointer to wx structure - * - * Resets the hardware by resetting the transmit and receive units, masks - * and clears all interrupts, perform a PHY reset, and perform a link (MAC) - * reset. - **/ -int txgbe_reset_hw(struct wx *wx) -{ - int status; - - /* Call adapter stop to disable tx/rx and clear interrupts */ - status = wx_stop_adapter(wx); - if (status != 0) - return status; - - if (wx->media_type != sp_media_copper) { - u32 val; - - val = WX_MIS_RST_LAN_RST(wx->bus.func); - wr32(wx, WX_MIS_RST, val | rd32(wx, WX_MIS_RST)); - WX_WRITE_FLUSH(wx); - usleep_range(10, 100); - } - - status = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(wx->bus.func)); - if (status != 0) - return status; - - txgbe_reset_misc(wx); - - wx_clear_hw_cntrs(wx); - - /* Store the permanent mac address */ - wx_get_mac_addr(wx, wx->mac.perm_addr); - - /* Store MAC address from RAR0, clear receive address registers, and - * clear the multicast table. Also reset num_rar_entries to 128, - * since we modify this value when programming the SAN MAC address. - */ - wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; - wx_init_rx_addrs(wx); - - pci_set_master(wx->pdev); - - return 0; -} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h deleted file mode 100644 index 1f3ecf60e3c4..000000000000 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ - -#ifndef _TXGBE_HW_H_ -#define _TXGBE_HW_H_ - -int txgbe_disable_sec_tx_path(struct wx *wx); -void txgbe_enable_sec_tx_path(struct wx *wx); -int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val); -int txgbe_reset_hw(struct wx *wx); - -#endif /* _TXGBE_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c deleted file mode 100644 index 3b151c410a5c..000000000000 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ /dev/null @@ -1,840 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../libwx/wx_type.h" -#include "../libwx/wx_lib.h" -#include "../libwx/wx_hw.h" -#include "txgbe_type.h" -#include "txgbe_hw.h" -#include "txgbe_phy.h" -#include "txgbe_ethtool.h" - -char txgbe_driver_name[] = "txgbe"; - -/* txgbe_pci_tbl - PCI Device ID Table - * - * Wildcard entries (PCI_ANY_ID) should come last - * Last entry must be all 0s - * - * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, - * Class, Class Mask, private data (not used) } - */ -static const struct pci_device_id txgbe_pci_tbl[] = { - { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_SP1000), 0}, - { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_WX1820), 0}, - /* required last entry */ - { .device = 0 } -}; - -#define DEFAULT_DEBUG_LEVEL_SHIFT 3 - -static void txgbe_check_minimum_link(struct wx *wx) -{ - struct pci_dev *pdev; - - pdev = wx->pdev; - pcie_print_link_status(pdev); -} - -/** - * txgbe_enumerate_functions - Get the number of ports this device has - * @wx: wx structure - * - * This function enumerates the phsyical functions co-located on a single slot, - * in order to determine how many ports a device has. This is most useful in - * determining the required GT/s of PCIe bandwidth necessary for optimal - * performance. - **/ -static int txgbe_enumerate_functions(struct wx *wx) -{ - struct pci_dev *entry, *pdev = wx->pdev; - int physfns = 0; - - list_for_each_entry(entry, &pdev->bus->devices, bus_list) { - /* When the devices on the bus don't all match our device ID, - * we can't reliably determine the correct number of - * functions. This can occur if a function has been direct - * attached to a virtual machine using VT-d. - */ - if (entry->vendor != pdev->vendor || - entry->device != pdev->device) - return -EINVAL; - - physfns++; - } - - return physfns; -} - -/** - * txgbe_irq_enable - Enable default interrupt generation settings - * @wx: pointer to private structure - * @queues: enable irqs for queues - **/ -static void txgbe_irq_enable(struct wx *wx, bool queues) -{ - wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK); - - /* unmask interrupt */ - wx_intr_enable(wx, TXGBE_INTR_MISC); - if (queues) - wx_intr_enable(wx, TXGBE_INTR_QALL(wx)); -} - -/** - * txgbe_intr - msi/legacy mode Interrupt Handler - * @irq: interrupt number - * @data: pointer to a network interface device structure - **/ -static irqreturn_t txgbe_intr(int __always_unused irq, void *data) -{ - struct wx_q_vector *q_vector; - struct wx *wx = data; - struct pci_dev *pdev; - u32 eicr; - - q_vector = wx->q_vector[0]; - pdev = wx->pdev; - - eicr = wx_misc_isb(wx, WX_ISB_VEC0); - if (!eicr) { - /* shared interrupt alert! - * the interrupt that we masked before the ICR read. - */ - if (netif_running(wx->netdev)) - txgbe_irq_enable(wx, true); - return IRQ_NONE; /* Not our interrupt */ - } - wx->isb_mem[WX_ISB_VEC0] = 0; - if (!(pdev->msi_enabled)) - wr32(wx, WX_PX_INTA, 1); - - wx->isb_mem[WX_ISB_MISC] = 0; - /* would disable interrupts here but it is auto disabled */ - napi_schedule_irqoff(&q_vector->napi); - - /* re-enable link(maybe) and non-queue interrupts, no flush. - * txgbe_poll will re-enable the queue interrupts - */ - if (netif_running(wx->netdev)) - txgbe_irq_enable(wx, false); - - return IRQ_HANDLED; -} - -/** - * txgbe_request_msix_irqs - Initialize MSI-X interrupts - * @wx: board private structure - * - * Allocate MSI-X vectors and request interrupts from the kernel. - **/ -static int txgbe_request_msix_irqs(struct wx *wx) -{ - struct net_device *netdev = wx->netdev; - int vector, err; - - for (vector = 0; vector < wx->num_q_vectors; vector++) { - struct wx_q_vector *q_vector = wx->q_vector[vector]; - struct msix_entry *entry = &wx->msix_q_entries[vector]; - - if (q_vector->tx.ring && q_vector->rx.ring) - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-TxRx-%d", netdev->name, entry->entry); - else - /* skip this unused q_vector */ - continue; - - err = request_irq(entry->vector, wx_msix_clean_rings, 0, - q_vector->name, q_vector); - if (err) { - wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n", - q_vector->name, err); - goto free_queue_irqs; - } - } - - return 0; - -free_queue_irqs: - while (vector) { - vector--; - free_irq(wx->msix_q_entries[vector].vector, - wx->q_vector[vector]); - } - wx_reset_interrupt_capability(wx); - return err; -} - -/** - * txgbe_request_irq - initialize interrupts - * @wx: board private structure - * - * Attempt to configure interrupts using the best available - * capabilities of the hardware and kernel. - **/ -static int txgbe_request_irq(struct wx *wx) -{ - struct net_device *netdev = wx->netdev; - struct pci_dev *pdev = wx->pdev; - int err; - - if (pdev->msix_enabled) - err = txgbe_request_msix_irqs(wx); - else if (pdev->msi_enabled) - err = request_irq(wx->pdev->irq, &txgbe_intr, 0, - netdev->name, wx); - else - err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED, - netdev->name, wx); - - if (err) - wx_err(wx, "request_irq failed, Error %d\n", err); - - return err; -} - -static void txgbe_up_complete(struct wx *wx) -{ - struct net_device *netdev = wx->netdev; - - wx_control_hw(wx, true); - wx_configure_vectors(wx); - - /* make sure to complete pre-operations */ - smp_mb__before_atomic(); - wx_napi_enable_all(wx); - - phylink_start(wx->phylink); - - /* clear any pending interrupts, may auto mask */ - rd32(wx, WX_PX_IC(0)); - rd32(wx, WX_PX_IC(1)); - rd32(wx, WX_PX_MISC_IC); - txgbe_irq_enable(wx, true); - - /* enable transmits */ - netif_tx_start_all_queues(netdev); -} - -static void txgbe_reset(struct wx *wx) -{ - struct net_device *netdev = wx->netdev; - u8 old_addr[ETH_ALEN]; - int err; - - err = txgbe_reset_hw(wx); - if (err != 0) - wx_err(wx, "Hardware Error: %d\n", err); - - wx_start_hw(wx); - /* do not flush user set addresses */ - memcpy(old_addr, &wx->mac_table[0].addr, netdev->addr_len); - wx_flush_sw_mac_table(wx); - wx_mac_set_default_filter(wx, old_addr); -} - -static void txgbe_disable_device(struct wx *wx) -{ - struct net_device *netdev = wx->netdev; - u32 i; - - wx_disable_pcie_master(wx); - /* disable receives */ - wx_disable_rx(wx); - - /* disable all enabled rx queues */ - for (i = 0; i < wx->num_rx_queues; i++) - /* this call also flushes the previous write */ - wx_disable_rx_queue(wx, wx->rx_ring[i]); - - netif_tx_stop_all_queues(netdev); - netif_tx_disable(netdev); - - wx_irq_disable(wx); - wx_napi_disable_all(wx); - - if (wx->bus.func < 2) - wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0); - else - wx_err(wx, "%s: invalid bus lan id %d\n", - __func__, wx->bus.func); - - if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || - ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { - /* disable mac transmiter */ - wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); - } - - /* disable transmits in the hardware now that interrupts are off */ - for (i = 0; i < wx->num_tx_queues; i++) { - u8 reg_idx = wx->tx_ring[i]->reg_idx; - - wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH); - } - - /* Disable the Tx DMA engine */ - wr32m(wx, WX_TDM_CTL, WX_TDM_CTL_TE, 0); - - wx_update_stats(wx); -} - -void txgbe_down(struct wx *wx) -{ - txgbe_disable_device(wx); - txgbe_reset(wx); - phylink_stop(wx->phylink); - - wx_clean_all_tx_rings(wx); - wx_clean_all_rx_rings(wx); -} - -void txgbe_up(struct wx *wx) -{ - wx_configure(wx); - txgbe_up_complete(wx); -} - -/** - * txgbe_init_type_code - Initialize the shared code - * @wx: pointer to hardware structure - **/ -static void txgbe_init_type_code(struct wx *wx) -{ - u8 device_type = wx->subsystem_device_id & 0xF0; - - switch (wx->device_id) { - case TXGBE_DEV_ID_SP1000: - case TXGBE_DEV_ID_WX1820: - wx->mac.type = wx_mac_sp; - break; - default: - wx->mac.type = wx_mac_unknown; - break; - } - - switch (device_type) { - case TXGBE_ID_SFP: - wx->media_type = sp_media_fiber; - break; - case TXGBE_ID_XAUI: - case TXGBE_ID_SGMII: - wx->media_type = sp_media_copper; - break; - case TXGBE_ID_KR_KX_KX4: - case TXGBE_ID_MAC_XAUI: - case TXGBE_ID_MAC_SGMII: - wx->media_type = sp_media_backplane; - break; - case TXGBE_ID_SFI_XAUI: - if (wx->bus.func == 0) - wx->media_type = sp_media_fiber; - else - wx->media_type = sp_media_copper; - break; - default: - wx->media_type = sp_media_unknown; - break; - } -} - -/** - * txgbe_sw_init - Initialize general software structures (struct wx) - * @wx: board private structure to initialize - **/ -static int txgbe_sw_init(struct wx *wx) -{ - u16 msix_count = 0; - int err; - - wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; - wx->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES; - wx->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES; - wx->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE; - wx->mac.vft_size = TXGBE_SP_VFT_TBL_SIZE; - wx->mac.rx_pb_size = TXGBE_SP_RX_PB_SIZE; - wx->mac.tx_pb_size = TXGBE_SP_TDB_PB_SZ; - - /* PCI config space info */ - err = wx_sw_init(wx); - if (err < 0) - return err; - - txgbe_init_type_code(wx); - - /* Set common capability flags and settings */ - wx->max_q_vectors = TXGBE_MAX_MSIX_VECTORS; - err = wx_get_pcie_msix_counts(wx, &msix_count, TXGBE_MAX_MSIX_VECTORS); - if (err) - wx_err(wx, "Do not support MSI-X\n"); - wx->mac.max_msix_vectors = msix_count; - - wx->ring_feature[RING_F_RSS].limit = min_t(int, TXGBE_MAX_RSS_INDICES, - num_online_cpus()); - wx->rss_enabled = true; - - /* enable itr by default in dynamic mode */ - wx->rx_itr_setting = 1; - wx->tx_itr_setting = 1; - - /* set default ring sizes */ - wx->tx_ring_count = TXGBE_DEFAULT_TXD; - wx->rx_ring_count = TXGBE_DEFAULT_RXD; - - /* set default work limits */ - wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK; - wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK; - - return 0; -} - -/** - * txgbe_open - Called when a network interface is made active - * @netdev: network interface device structure - * - * Returns 0 on success, negative value on failure - * - * The open entry point is called when a network interface is made - * active by the system (IFF_UP). - **/ -static int txgbe_open(struct net_device *netdev) -{ - struct wx *wx = netdev_priv(netdev); - int err; - - err = wx_setup_resources(wx); - if (err) - goto err_reset; - - wx_configure(wx); - - err = txgbe_request_irq(wx); - if (err) - goto err_free_isb; - - /* Notify the stack of the actual queue counts. */ - err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues); - if (err) - goto err_free_irq; - - err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues); - if (err) - goto err_free_irq; - - txgbe_up_complete(wx); - - return 0; - -err_free_irq: - wx_free_irq(wx); -err_free_isb: - wx_free_isb_resources(wx); -err_reset: - txgbe_reset(wx); - - return err; -} - -/** - * txgbe_close_suspend - actions necessary to both suspend and close flows - * @wx: the private wx struct - * - * This function should contain the necessary work common to both suspending - * and closing of the device. - */ -static void txgbe_close_suspend(struct wx *wx) -{ - txgbe_disable_device(wx); - wx_free_resources(wx); -} - -/** - * txgbe_close - Disables a network interface - * @netdev: network interface device structure - * - * Returns 0, this is not allowed to fail - * - * The close entry point is called when an interface is de-activated - * by the OS. The hardware is still under the drivers control, but - * needs to be disabled. A global MAC reset is issued to stop the - * hardware, and all transmit and receive resources are freed. - **/ -static int txgbe_close(struct net_device *netdev) -{ - struct wx *wx = netdev_priv(netdev); - - txgbe_down(wx); - wx_free_irq(wx); - wx_free_resources(wx); - wx_control_hw(wx, false); - - return 0; -} - -static void txgbe_dev_shutdown(struct pci_dev *pdev) -{ - struct wx *wx = pci_get_drvdata(pdev); - struct net_device *netdev; - - netdev = wx->netdev; - netif_device_detach(netdev); - - rtnl_lock(); - if (netif_running(netdev)) - txgbe_close_suspend(wx); - rtnl_unlock(); - - wx_control_hw(wx, false); - - pci_disable_device(pdev); -} - -static void txgbe_shutdown(struct pci_dev *pdev) -{ - txgbe_dev_shutdown(pdev); - - if (system_state == SYSTEM_POWER_OFF) { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } -} - -/** - * txgbe_setup_tc - routine to configure net_device for multiple traffic - * classes. - * - * @dev: net device to configure - * @tc: number of traffic classes to enable - */ -int txgbe_setup_tc(struct net_device *dev, u8 tc) -{ - struct wx *wx = netdev_priv(dev); - - /* Hardware has to reinitialize queues and interrupts to - * match packet buffer alignment. Unfortunately, the - * hardware is not flexible enough to do this dynamically. - */ - if (netif_running(dev)) - txgbe_close(dev); - else - txgbe_reset(wx); - - wx_clear_interrupt_scheme(wx); - - if (tc) - netdev_set_num_tc(dev, tc); - else - netdev_reset_tc(dev); - - wx_init_interrupt_scheme(wx); - - if (netif_running(dev)) - txgbe_open(dev); - - return 0; -} - -static const struct net_device_ops txgbe_netdev_ops = { - .ndo_open = txgbe_open, - .ndo_stop = txgbe_close, - .ndo_change_mtu = wx_change_mtu, - .ndo_start_xmit = wx_xmit_frame, - .ndo_set_rx_mode = wx_set_rx_mode, - .ndo_set_features = wx_set_features, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = wx_set_mac, - .ndo_get_stats64 = wx_get_stats64, - .ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid, -}; - -/** - * txgbe_probe - Device Initialization Routine - * @pdev: PCI device information struct - * @ent: entry in txgbe_pci_tbl - * - * Returns 0 on success, negative on failure - * - * txgbe_probe initializes an adapter identified by a pci_dev structure. - * The OS initialization, configuring of the wx private structure, - * and a hardware reset occur. - **/ -static int txgbe_probe(struct pci_dev *pdev, - const struct pci_device_id __always_unused *ent) -{ - struct net_device *netdev; - int err, expected_gts; - struct wx *wx = NULL; - struct txgbe *txgbe; - - u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0; - u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0; - u16 build = 0, major = 0, patch = 0; - u32 etrack_id = 0; - - err = pci_enable_device_mem(pdev); - if (err) - return err; - - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, - "No usable DMA configuration, aborting\n"); - goto err_pci_disable_dev; - } - - err = pci_request_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM), - txgbe_driver_name); - if (err) { - dev_err(&pdev->dev, - "pci_request_selected_regions failed 0x%x\n", err); - goto err_pci_disable_dev; - } - - pci_set_master(pdev); - - netdev = devm_alloc_etherdev_mqs(&pdev->dev, - sizeof(struct wx), - TXGBE_MAX_TX_QUEUES, - TXGBE_MAX_RX_QUEUES); - if (!netdev) { - err = -ENOMEM; - goto err_pci_release_regions; - } - - SET_NETDEV_DEV(netdev, &pdev->dev); - - wx = netdev_priv(netdev); - wx->netdev = netdev; - wx->pdev = pdev; - - wx->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; - - wx->hw_addr = devm_ioremap(&pdev->dev, - pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - if (!wx->hw_addr) { - err = -EIO; - goto err_pci_release_regions; - } - - wx->driver_name = txgbe_driver_name; - txgbe_set_ethtool_ops(netdev); - netdev->netdev_ops = &txgbe_netdev_ops; - - /* setup the private structure */ - err = txgbe_sw_init(wx); - if (err) - goto err_free_mac_table; - - /* check if flash load is done after hw power up */ - err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PERST); - if (err) - goto err_free_mac_table; - err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PWRRST); - if (err) - goto err_free_mac_table; - - err = wx_mng_present(wx); - if (err) { - dev_err(&pdev->dev, "Management capability is not present\n"); - goto err_free_mac_table; - } - - err = txgbe_reset_hw(wx); - if (err) { - dev_err(&pdev->dev, "HW Init failed: %d\n", err); - goto err_free_mac_table; - } - - netdev->features = NETIF_F_SG | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_RXHASH | - NETIF_F_RXCSUM | - NETIF_F_HW_CSUM; - - netdev->gso_partial_features = NETIF_F_GSO_ENCAP_ALL; - netdev->features |= netdev->gso_partial_features; - netdev->features |= NETIF_F_SCTP_CRC; - netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; - netdev->hw_enc_features |= netdev->vlan_features; - netdev->features |= NETIF_F_VLAN_FEATURES; - /* copy netdev features into list of user selectable features */ - netdev->hw_features |= netdev->features | NETIF_F_RXALL; - netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; - netdev->features |= NETIF_F_HIGHDMA; - netdev->hw_features |= NETIF_F_GRO; - netdev->features |= NETIF_F_GRO; - - netdev->priv_flags |= IFF_UNICAST_FLT; - netdev->priv_flags |= IFF_SUPP_NOFCS; - netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; - - netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE - - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); - - /* make sure the EEPROM is good */ - err = txgbe_validate_eeprom_checksum(wx, NULL); - if (err != 0) { - dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); - wr32(wx, WX_MIS_RST, WX_MIS_RST_SW_RST); - err = -EIO; - goto err_free_mac_table; - } - - eth_hw_addr_set(netdev, wx->mac.perm_addr); - wx_mac_set_default_filter(wx, wx->mac.perm_addr); - - err = wx_init_interrupt_scheme(wx); - if (err) - goto err_free_mac_table; - - /* Save off EEPROM version number and Option Rom version which - * together make a unique identify for the eeprom - */ - wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_H, - &eeprom_verh); - wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_L, - &eeprom_verl); - etrack_id = (eeprom_verh << 16) | eeprom_verl; - - wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + TXGBE_ISCSI_BOOT_CONFIG, - &offset); - - /* Make sure offset to SCSI block is valid */ - if (!(offset == 0x0) && !(offset == 0xffff)) { - wx_read_ee_hostif(wx, offset + 0x84, &eeprom_cfg_blkh); - wx_read_ee_hostif(wx, offset + 0x83, &eeprom_cfg_blkl); - - /* Only display Option Rom if exist */ - if (eeprom_cfg_blkl && eeprom_cfg_blkh) { - major = eeprom_cfg_blkl >> 8; - build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8); - patch = eeprom_cfg_blkh & 0x00ff; - - snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), - "0x%08x, %d.%d.%d", etrack_id, major, build, - patch); - } else { - snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), - "0x%08x", etrack_id); - } - } else { - snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), - "0x%08x", etrack_id); - } - - if (etrack_id < 0x20010) - dev_warn(&pdev->dev, "Please upgrade the firmware to 0x20010 or above.\n"); - - txgbe = devm_kzalloc(&pdev->dev, sizeof(*txgbe), GFP_KERNEL); - if (!txgbe) { - err = -ENOMEM; - goto err_release_hw; - } - - txgbe->wx = wx; - wx->priv = txgbe; - - err = txgbe_init_phy(txgbe); - if (err) - goto err_release_hw; - - err = register_netdev(netdev); - if (err) - goto err_remove_phy; - - pci_set_drvdata(pdev, wx); - - netif_tx_stop_all_queues(netdev); - - /* calculate the expected PCIe bandwidth required for optimal - * performance. Note that some older parts will never have enough - * bandwidth due to being older generation PCIe parts. We clamp these - * parts to ensure that no warning is displayed, as this could confuse - * users otherwise. - */ - expected_gts = txgbe_enumerate_functions(wx) * 10; - - /* don't check link if we failed to enumerate functions */ - if (expected_gts > 0) - txgbe_check_minimum_link(wx); - else - dev_warn(&pdev->dev, "Failed to enumerate PF devices.\n"); - - return 0; - -err_remove_phy: - txgbe_remove_phy(txgbe); -err_release_hw: - wx_clear_interrupt_scheme(wx); - wx_control_hw(wx, false); -err_free_mac_table: - kfree(wx->mac_table); -err_pci_release_regions: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); -err_pci_disable_dev: - pci_disable_device(pdev); - return err; -} - -/** - * txgbe_remove - Device Removal Routine - * @pdev: PCI device information struct - * - * txgbe_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. The could be caused by a - * Hot-Plug event, or because the driver is going to be removed from - * memory. - **/ -static void txgbe_remove(struct pci_dev *pdev) -{ - struct wx *wx = pci_get_drvdata(pdev); - struct txgbe *txgbe = wx->priv; - struct net_device *netdev; - - netdev = wx->netdev; - unregister_netdev(netdev); - - txgbe_remove_phy(txgbe); - - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); - - kfree(wx->rss_key); - kfree(wx->mac_table); - wx_clear_interrupt_scheme(wx); - - pci_disable_device(pdev); -} - -static struct pci_driver txgbe_driver = { - .name = txgbe_driver_name, - .id_table = txgbe_pci_tbl, - .probe = txgbe_probe, - .remove = txgbe_remove, - .shutdown = txgbe_shutdown, -}; - -module_pci_driver(txgbe_driver); - -MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl); -MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, "); -MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Network Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c deleted file mode 100644 index 1b84d495d14e..000000000000 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ /dev/null @@ -1,792 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../libwx/wx_type.h" -#include "../libwx/wx_lib.h" -#include "../libwx/wx_hw.h" -#include "txgbe_type.h" -#include "txgbe_phy.h" -#include "txgbe_hw.h" - -static int txgbe_swnodes_register(struct txgbe *txgbe) -{ - struct txgbe_nodes *nodes = &txgbe->nodes; - struct pci_dev *pdev = txgbe->wx->pdev; - struct software_node *swnodes; - u32 id; - - id = pci_dev_id(pdev); - - snprintf(nodes->gpio_name, sizeof(nodes->gpio_name), "txgbe_gpio-%x", id); - snprintf(nodes->i2c_name, sizeof(nodes->i2c_name), "txgbe_i2c-%x", id); - snprintf(nodes->sfp_name, sizeof(nodes->sfp_name), "txgbe_sfp-%x", id); - snprintf(nodes->phylink_name, sizeof(nodes->phylink_name), "txgbe_phylink-%x", id); - - swnodes = nodes->swnodes; - - /* GPIO 0: tx fault - * GPIO 1: tx disable - * GPIO 2: sfp module absent - * GPIO 3: rx signal lost - * GPIO 4: rate select, 1G(0) 10G(1) - * GPIO 5: rate select, 1G(0) 10G(1) - */ - nodes->gpio_props[0] = PROPERTY_ENTRY_STRING("pinctrl-names", "default"); - swnodes[SWNODE_GPIO] = NODE_PROP(nodes->gpio_name, nodes->gpio_props); - nodes->gpio0_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_GPIO], 0, GPIO_ACTIVE_HIGH); - nodes->gpio1_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_GPIO], 1, GPIO_ACTIVE_HIGH); - nodes->gpio2_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_GPIO], 2, GPIO_ACTIVE_LOW); - nodes->gpio3_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_GPIO], 3, GPIO_ACTIVE_HIGH); - nodes->gpio4_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_GPIO], 4, GPIO_ACTIVE_HIGH); - nodes->gpio5_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_GPIO], 5, GPIO_ACTIVE_HIGH); - - nodes->i2c_props[0] = PROPERTY_ENTRY_STRING("compatible", "snps,designware-i2c"); - nodes->i2c_props[1] = PROPERTY_ENTRY_BOOL("wx,i2c-snps-model"); - nodes->i2c_props[2] = PROPERTY_ENTRY_U32("clock-frequency", I2C_MAX_STANDARD_MODE_FREQ); - swnodes[SWNODE_I2C] = NODE_PROP(nodes->i2c_name, nodes->i2c_props); - nodes->i2c_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_I2C]); - - nodes->sfp_props[0] = PROPERTY_ENTRY_STRING("compatible", "sff,sfp"); - nodes->sfp_props[1] = PROPERTY_ENTRY_REF_ARRAY("i2c-bus", nodes->i2c_ref); - nodes->sfp_props[2] = PROPERTY_ENTRY_REF_ARRAY("tx-fault-gpios", nodes->gpio0_ref); - nodes->sfp_props[3] = PROPERTY_ENTRY_REF_ARRAY("tx-disable-gpios", nodes->gpio1_ref); - nodes->sfp_props[4] = PROPERTY_ENTRY_REF_ARRAY("mod-def0-gpios", nodes->gpio2_ref); - nodes->sfp_props[5] = PROPERTY_ENTRY_REF_ARRAY("los-gpios", nodes->gpio3_ref); - nodes->sfp_props[6] = PROPERTY_ENTRY_REF_ARRAY("rate-select1-gpios", nodes->gpio4_ref); - nodes->sfp_props[7] = PROPERTY_ENTRY_REF_ARRAY("rate-select0-gpios", nodes->gpio5_ref); - swnodes[SWNODE_SFP] = NODE_PROP(nodes->sfp_name, nodes->sfp_props); - nodes->sfp_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_SFP]); - - nodes->phylink_props[0] = PROPERTY_ENTRY_STRING("managed", "in-band-status"); - nodes->phylink_props[1] = PROPERTY_ENTRY_REF_ARRAY("sfp", nodes->sfp_ref); - swnodes[SWNODE_PHYLINK] = NODE_PROP(nodes->phylink_name, nodes->phylink_props); - - nodes->group[SWNODE_GPIO] = &swnodes[SWNODE_GPIO]; - nodes->group[SWNODE_I2C] = &swnodes[SWNODE_I2C]; - nodes->group[SWNODE_SFP] = &swnodes[SWNODE_SFP]; - nodes->group[SWNODE_PHYLINK] = &swnodes[SWNODE_PHYLINK]; - - return software_node_register_node_group(nodes->group); -} - -static int txgbe_pcs_read(struct mii_bus *bus, int addr, int devnum, int regnum) -{ - struct wx *wx = bus->priv; - u32 offset, val; - - if (addr) - return -EOPNOTSUPP; - - offset = devnum << 16 | regnum; - - /* Set the LAN port indicator to IDA_ADDR */ - wr32(wx, TXGBE_XPCS_IDA_ADDR, offset); - - /* Read the data from IDA_DATA register */ - val = rd32(wx, TXGBE_XPCS_IDA_DATA); - - return (u16)val; -} - -static int txgbe_pcs_write(struct mii_bus *bus, int addr, int devnum, int regnum, u16 val) -{ - struct wx *wx = bus->priv; - u32 offset; - - if (addr) - return -EOPNOTSUPP; - - offset = devnum << 16 | regnum; - - /* Set the LAN port indicator to IDA_ADDR */ - wr32(wx, TXGBE_XPCS_IDA_ADDR, offset); - - /* Write the data to IDA_DATA register */ - wr32(wx, TXGBE_XPCS_IDA_DATA, val); - - return 0; -} - -static int txgbe_mdio_pcs_init(struct txgbe *txgbe) -{ - struct mii_bus *mii_bus; - struct dw_xpcs *xpcs; - struct pci_dev *pdev; - struct wx *wx; - int ret = 0; - - wx = txgbe->wx; - pdev = wx->pdev; - - mii_bus = devm_mdiobus_alloc(&pdev->dev); - if (!mii_bus) - return -ENOMEM; - - mii_bus->name = "txgbe_pcs_mdio_bus"; - mii_bus->read_c45 = &txgbe_pcs_read; - mii_bus->write_c45 = &txgbe_pcs_write; - mii_bus->parent = &pdev->dev; - mii_bus->phy_mask = ~0; - mii_bus->priv = wx; - snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe_pcs-%x", - pci_dev_id(pdev)); - - ret = devm_mdiobus_register(&pdev->dev, mii_bus); - if (ret) - return ret; - - xpcs = xpcs_create_mdiodev(mii_bus, 0, PHY_INTERFACE_MODE_10GBASER); - if (IS_ERR(xpcs)) - return PTR_ERR(xpcs); - - txgbe->xpcs = xpcs; - - return 0; -} - -static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *config, - phy_interface_t interface) -{ - struct wx *wx = phylink_to_wx(config); - struct txgbe *txgbe = wx->priv; - - if (interface == PHY_INTERFACE_MODE_10GBASER) - return &txgbe->xpcs->pcs; - - return NULL; -} - -static void txgbe_mac_config(struct phylink_config *config, unsigned int mode, - const struct phylink_link_state *state) -{ -} - -static void txgbe_mac_link_down(struct phylink_config *config, - unsigned int mode, phy_interface_t interface) -{ - struct wx *wx = phylink_to_wx(config); - - wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); -} - -static void txgbe_mac_link_up(struct phylink_config *config, - struct phy_device *phy, - unsigned int mode, phy_interface_t interface, - int speed, int duplex, - bool tx_pause, bool rx_pause) -{ - struct wx *wx = phylink_to_wx(config); - u32 txcfg, wdg; - - wx_fc_enable(wx, tx_pause, rx_pause); - - txcfg = rd32(wx, WX_MAC_TX_CFG); - txcfg &= ~WX_MAC_TX_CFG_SPEED_MASK; - - switch (speed) { - case SPEED_10000: - txcfg |= WX_MAC_TX_CFG_SPEED_10G; - break; - case SPEED_1000: - case SPEED_100: - case SPEED_10: - txcfg |= WX_MAC_TX_CFG_SPEED_1G; - break; - default: - break; - } - - wr32(wx, WX_MAC_TX_CFG, txcfg | WX_MAC_TX_CFG_TE); - - /* Re configure MAC Rx */ - wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE); - wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); - wdg = rd32(wx, WX_MAC_WDG_TIMEOUT); - wr32(wx, WX_MAC_WDG_TIMEOUT, wdg); -} - -static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode, - phy_interface_t interface) -{ - struct wx *wx = phylink_to_wx(config); - - wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); - wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0); - - return txgbe_disable_sec_tx_path(wx); -} - -static int txgbe_mac_finish(struct phylink_config *config, unsigned int mode, - phy_interface_t interface) -{ - struct wx *wx = phylink_to_wx(config); - - txgbe_enable_sec_tx_path(wx); - wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE); - - return 0; -} - -static const struct phylink_mac_ops txgbe_mac_ops = { - .mac_select_pcs = txgbe_phylink_mac_select, - .mac_prepare = txgbe_mac_prepare, - .mac_finish = txgbe_mac_finish, - .mac_config = txgbe_mac_config, - .mac_link_down = txgbe_mac_link_down, - .mac_link_up = txgbe_mac_link_up, -}; - -static int txgbe_phylink_init(struct txgbe *txgbe) -{ - struct fwnode_handle *fwnode = NULL; - struct phylink_config *config; - struct wx *wx = txgbe->wx; - phy_interface_t phy_mode; - struct phylink *phylink; - - config = &wx->phylink_config; - config->dev = &wx->netdev->dev; - config->type = PHYLINK_NETDEV; - config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD | - MAC_SYM_PAUSE | MAC_ASYM_PAUSE; - - if (wx->media_type == sp_media_copper) { - phy_mode = PHY_INTERFACE_MODE_XAUI; - __set_bit(PHY_INTERFACE_MODE_XAUI, config->supported_interfaces); - } else { - phy_mode = PHY_INTERFACE_MODE_10GBASER; - fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_PHYLINK]); - __set_bit(PHY_INTERFACE_MODE_10GBASER, config->supported_interfaces); - __set_bit(PHY_INTERFACE_MODE_1000BASEX, config->supported_interfaces); - __set_bit(PHY_INTERFACE_MODE_SGMII, config->supported_interfaces); - } - - phylink = phylink_create(config, fwnode, phy_mode, &txgbe_mac_ops); - if (IS_ERR(phylink)) - return PTR_ERR(phylink); - - if (wx->phydev) { - int ret; - - ret = phylink_connect_phy(phylink, wx->phydev); - if (ret) { - phylink_destroy(phylink); - return ret; - } - } - - wx->phylink = phylink; - - return 0; -} - -static int txgbe_gpio_get(struct gpio_chip *chip, unsigned int offset) -{ - struct wx *wx = gpiochip_get_data(chip); - int val; - - val = rd32m(wx, WX_GPIO_EXT, BIT(offset)); - - return !!(val & BIT(offset)); -} - -static int txgbe_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) -{ - struct wx *wx = gpiochip_get_data(chip); - u32 val; - - val = rd32(wx, WX_GPIO_DDR); - if (BIT(offset) & val) - return GPIO_LINE_DIRECTION_OUT; - - return GPIO_LINE_DIRECTION_IN; -} - -static int txgbe_gpio_direction_in(struct gpio_chip *chip, unsigned int offset) -{ - struct wx *wx = gpiochip_get_data(chip); - unsigned long flags; - - raw_spin_lock_irqsave(&wx->gpio_lock, flags); - wr32m(wx, WX_GPIO_DDR, BIT(offset), 0); - raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); - - return 0; -} - -static int txgbe_gpio_direction_out(struct gpio_chip *chip, unsigned int offset, - int val) -{ - struct wx *wx = gpiochip_get_data(chip); - unsigned long flags; - u32 set; - - set = val ? BIT(offset) : 0; - - raw_spin_lock_irqsave(&wx->gpio_lock, flags); - wr32m(wx, WX_GPIO_DR, BIT(offset), set); - wr32m(wx, WX_GPIO_DDR, BIT(offset), BIT(offset)); - raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); - - return 0; -} - -static void txgbe_gpio_irq_ack(struct irq_data *d) -{ - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - irq_hw_number_t hwirq = irqd_to_hwirq(d); - struct wx *wx = gpiochip_get_data(gc); - unsigned long flags; - - raw_spin_lock_irqsave(&wx->gpio_lock, flags); - wr32(wx, WX_GPIO_EOI, BIT(hwirq)); - raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); -} - -static void txgbe_gpio_irq_mask(struct irq_data *d) -{ - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - irq_hw_number_t hwirq = irqd_to_hwirq(d); - struct wx *wx = gpiochip_get_data(gc); - unsigned long flags; - - gpiochip_disable_irq(gc, hwirq); - - raw_spin_lock_irqsave(&wx->gpio_lock, flags); - wr32m(wx, WX_GPIO_INTMASK, BIT(hwirq), BIT(hwirq)); - raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); -} - -static void txgbe_gpio_irq_unmask(struct irq_data *d) -{ - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - irq_hw_number_t hwirq = irqd_to_hwirq(d); - struct wx *wx = gpiochip_get_data(gc); - unsigned long flags; - - gpiochip_enable_irq(gc, hwirq); - - raw_spin_lock_irqsave(&wx->gpio_lock, flags); - wr32m(wx, WX_GPIO_INTMASK, BIT(hwirq), 0); - raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); -} - -static void txgbe_toggle_trigger(struct gpio_chip *gc, unsigned int offset) -{ - struct wx *wx = gpiochip_get_data(gc); - u32 pol, val; - - pol = rd32(wx, WX_GPIO_POLARITY); - val = rd32(wx, WX_GPIO_EXT); - - if (val & BIT(offset)) - pol &= ~BIT(offset); - else - pol |= BIT(offset); - - wr32(wx, WX_GPIO_POLARITY, pol); -} - -static int txgbe_gpio_set_type(struct irq_data *d, unsigned int type) -{ - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - irq_hw_number_t hwirq = irqd_to_hwirq(d); - struct wx *wx = gpiochip_get_data(gc); - u32 level, polarity, mask; - unsigned long flags; - - mask = BIT(hwirq); - - if (type & IRQ_TYPE_LEVEL_MASK) { - level = 0; - irq_set_handler_locked(d, handle_level_irq); - } else { - level = mask; - irq_set_handler_locked(d, handle_edge_irq); - } - - if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH) - polarity = mask; - else - polarity = 0; - - raw_spin_lock_irqsave(&wx->gpio_lock, flags); - - wr32m(wx, WX_GPIO_INTEN, mask, mask); - wr32m(wx, WX_GPIO_INTTYPE_LEVEL, mask, level); - if (type == IRQ_TYPE_EDGE_BOTH) - txgbe_toggle_trigger(gc, hwirq); - else - wr32m(wx, WX_GPIO_POLARITY, mask, polarity); - - raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); - - return 0; -} - -static const struct irq_chip txgbe_gpio_irq_chip = { - .name = "txgbe_gpio_irq", - .irq_ack = txgbe_gpio_irq_ack, - .irq_mask = txgbe_gpio_irq_mask, - .irq_unmask = txgbe_gpio_irq_unmask, - .irq_set_type = txgbe_gpio_set_type, - .flags = IRQCHIP_IMMUTABLE, - GPIOCHIP_IRQ_RESOURCE_HELPERS, -}; - -static void txgbe_irq_handler(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct wx *wx = irq_desc_get_handler_data(desc); - struct txgbe *txgbe = wx->priv; - irq_hw_number_t hwirq; - unsigned long gpioirq; - struct gpio_chip *gc; - unsigned long flags; - u32 eicr; - - eicr = wx_misc_isb(wx, WX_ISB_MISC); - - chained_irq_enter(chip, desc); - - gpioirq = rd32(wx, WX_GPIO_INTSTATUS); - - gc = txgbe->gpio; - for_each_set_bit(hwirq, &gpioirq, gc->ngpio) { - int gpio = irq_find_mapping(gc->irq.domain, hwirq); - u32 irq_type = irq_get_trigger_type(gpio); - - generic_handle_domain_irq(gc->irq.domain, hwirq); - - if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { - raw_spin_lock_irqsave(&wx->gpio_lock, flags); - txgbe_toggle_trigger(gc, hwirq); - raw_spin_unlock_irqrestore(&wx->gpio_lock, flags); - } - } - - chained_irq_exit(chip, desc); - - if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN | - TXGBE_PX_MISC_ETH_AN)) { - u32 reg = rd32(wx, TXGBE_CFG_PORT_ST); - - phylink_mac_change(wx->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP)); - } - - /* unmask interrupt */ - wx_intr_enable(wx, TXGBE_INTR_MISC); -} - -static int txgbe_gpio_init(struct txgbe *txgbe) -{ - struct gpio_irq_chip *girq; - struct gpio_chip *gc; - struct device *dev; - struct wx *wx; - int ret; - - wx = txgbe->wx; - dev = &wx->pdev->dev; - - raw_spin_lock_init(&wx->gpio_lock); - - gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL); - if (!gc) - return -ENOMEM; - - gc->label = devm_kasprintf(dev, GFP_KERNEL, "txgbe_gpio-%x", - pci_dev_id(wx->pdev)); - if (!gc->label) - return -ENOMEM; - - gc->base = -1; - gc->ngpio = 6; - gc->owner = THIS_MODULE; - gc->parent = dev; - gc->fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_GPIO]); - gc->get = txgbe_gpio_get; - gc->get_direction = txgbe_gpio_get_direction; - gc->direction_input = txgbe_gpio_direction_in; - gc->direction_output = txgbe_gpio_direction_out; - - girq = &gc->irq; - gpio_irq_chip_set_chip(girq, &txgbe_gpio_irq_chip); - girq->parent_handler = txgbe_irq_handler; - girq->parent_handler_data = wx; - girq->num_parents = 1; - girq->parents = devm_kcalloc(dev, girq->num_parents, - sizeof(*girq->parents), GFP_KERNEL); - if (!girq->parents) - return -ENOMEM; - - /* now only suuported on MSI-X interrupt */ - if (!wx->msix_entry) - return -EPERM; - - girq->parents[0] = wx->msix_entry->vector; - girq->default_type = IRQ_TYPE_NONE; - girq->handler = handle_bad_irq; - - ret = devm_gpiochip_add_data(dev, gc, wx); - if (ret) - return ret; - - txgbe->gpio = gc; - - return 0; -} - -static int txgbe_clock_register(struct txgbe *txgbe) -{ - struct pci_dev *pdev = txgbe->wx->pdev; - struct clk_lookup *clock; - char clk_name[32]; - struct clk *clk; - - snprintf(clk_name, sizeof(clk_name), "i2c_designware.%d", - pci_dev_id(pdev)); - - clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000); - if (IS_ERR(clk)) - return PTR_ERR(clk); - - clock = clkdev_create(clk, NULL, clk_name); - if (!clock) { - clk_unregister(clk); - return -ENOMEM; - } - - txgbe->clk = clk; - txgbe->clock = clock; - - return 0; -} - -static int txgbe_i2c_read(void *context, unsigned int reg, unsigned int *val) -{ - struct wx *wx = context; - - *val = rd32(wx, reg + TXGBE_I2C_BASE); - - return 0; -} - -static int txgbe_i2c_write(void *context, unsigned int reg, unsigned int val) -{ - struct wx *wx = context; - - wr32(wx, reg + TXGBE_I2C_BASE, val); - - return 0; -} - -static const struct regmap_config i2c_regmap_config = { - .reg_bits = 32, - .val_bits = 32, - .reg_read = txgbe_i2c_read, - .reg_write = txgbe_i2c_write, - .fast_io = true, -}; - -static int txgbe_i2c_register(struct txgbe *txgbe) -{ - struct platform_device_info info = {}; - struct platform_device *i2c_dev; - struct regmap *i2c_regmap; - struct pci_dev *pdev; - struct wx *wx; - - wx = txgbe->wx; - pdev = wx->pdev; - i2c_regmap = devm_regmap_init(&pdev->dev, NULL, wx, &i2c_regmap_config); - if (IS_ERR(i2c_regmap)) { - wx_err(wx, "failed to init I2C regmap\n"); - return PTR_ERR(i2c_regmap); - } - - info.parent = &pdev->dev; - info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_I2C]); - info.name = "i2c_designware"; - info.id = pci_dev_id(pdev); - - info.res = &DEFINE_RES_IRQ(pdev->irq); - info.num_res = 1; - i2c_dev = platform_device_register_full(&info); - if (IS_ERR(i2c_dev)) - return PTR_ERR(i2c_dev); - - txgbe->i2c_dev = i2c_dev; - - return 0; -} - -static int txgbe_sfp_register(struct txgbe *txgbe) -{ - struct pci_dev *pdev = txgbe->wx->pdev; - struct platform_device_info info = {}; - struct platform_device *sfp_dev; - - info.parent = &pdev->dev; - info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_SFP]); - info.name = "sfp"; - info.id = pci_dev_id(pdev); - sfp_dev = platform_device_register_full(&info); - if (IS_ERR(sfp_dev)) - return PTR_ERR(sfp_dev); - - txgbe->sfp_dev = sfp_dev; - - return 0; -} - -static int txgbe_ext_phy_init(struct txgbe *txgbe) -{ - struct phy_device *phydev; - struct mii_bus *mii_bus; - struct pci_dev *pdev; - struct wx *wx; - int ret = 0; - - wx = txgbe->wx; - pdev = wx->pdev; - - mii_bus = devm_mdiobus_alloc(&pdev->dev); - if (!mii_bus) - return -ENOMEM; - - mii_bus->name = "txgbe_mii_bus"; - mii_bus->read_c45 = &wx_phy_read_reg_mdi_c45; - mii_bus->write_c45 = &wx_phy_write_reg_mdi_c45; - mii_bus->parent = &pdev->dev; - mii_bus->phy_mask = GENMASK(31, 1); - mii_bus->priv = wx; - snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe-%x", - (pdev->bus->number << 8) | pdev->devfn); - - ret = devm_mdiobus_register(&pdev->dev, mii_bus); - if (ret) { - wx_err(wx, "failed to register MDIO bus: %d\n", ret); - return ret; - } - - phydev = phy_find_first(mii_bus); - if (!phydev) { - wx_err(wx, "no PHY found\n"); - return -ENODEV; - } - - phy_attached_info(phydev); - - wx->link = 0; - wx->speed = 0; - wx->duplex = 0; - wx->phydev = phydev; - - ret = txgbe_phylink_init(txgbe); - if (ret) { - wx_err(wx, "failed to init phylink: %d\n", ret); - return ret; - } - - return 0; -} - -int txgbe_init_phy(struct txgbe *txgbe) -{ - struct wx *wx = txgbe->wx; - int ret; - - if (txgbe->wx->media_type == sp_media_copper) - return txgbe_ext_phy_init(txgbe); - - ret = txgbe_swnodes_register(txgbe); - if (ret) { - wx_err(wx, "failed to register software nodes\n"); - return ret; - } - - ret = txgbe_mdio_pcs_init(txgbe); - if (ret) { - wx_err(wx, "failed to init mdio pcs: %d\n", ret); - goto err_unregister_swnode; - } - - ret = txgbe_phylink_init(txgbe); - if (ret) { - wx_err(wx, "failed to init phylink\n"); - goto err_destroy_xpcs; - } - - ret = txgbe_gpio_init(txgbe); - if (ret) { - wx_err(wx, "failed to init gpio\n"); - goto err_destroy_phylink; - } - - ret = txgbe_clock_register(txgbe); - if (ret) { - wx_err(wx, "failed to register clock: %d\n", ret); - goto err_destroy_phylink; - } - - ret = txgbe_i2c_register(txgbe); - if (ret) { - wx_err(wx, "failed to init i2c interface: %d\n", ret); - goto err_unregister_clk; - } - - ret = txgbe_sfp_register(txgbe); - if (ret) { - wx_err(wx, "failed to register sfp\n"); - goto err_unregister_i2c; - } - - wx->msix_in_use = true; - - return 0; - -err_unregister_i2c: - platform_device_unregister(txgbe->i2c_dev); -err_unregister_clk: - clkdev_drop(txgbe->clock); - clk_unregister(txgbe->clk); -err_destroy_phylink: - phylink_destroy(wx->phylink); -err_destroy_xpcs: - xpcs_destroy(txgbe->xpcs); -err_unregister_swnode: - software_node_unregister_node_group(txgbe->nodes.group); - - return ret; -} - -void txgbe_remove_phy(struct txgbe *txgbe) -{ - if (txgbe->wx->media_type == sp_media_copper) { - phylink_disconnect_phy(txgbe->wx->phylink); - phylink_destroy(txgbe->wx->phylink); - return; - } - - platform_device_unregister(txgbe->sfp_dev); - platform_device_unregister(txgbe->i2c_dev); - clkdev_drop(txgbe->clock); - clk_unregister(txgbe->clk); - phylink_destroy(txgbe->wx->phylink); - xpcs_destroy(txgbe->xpcs); - software_node_unregister_node_group(txgbe->nodes.group); - txgbe->wx->msix_in_use = false; -} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h deleted file mode 100644 index 1ab592124986..000000000000 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ - -#ifndef _TXGBE_PHY_H_ -#define _TXGBE_PHY_H_ - -int txgbe_init_phy(struct txgbe *txgbe); -void txgbe_remove_phy(struct txgbe *txgbe); - -#endif /* _TXGBE_NODE_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h deleted file mode 100644 index 270a6fd9ad0b..000000000000 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ /dev/null @@ -1,183 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ - -#ifndef _TXGBE_TYPE_H_ -#define _TXGBE_TYPE_H_ - -#include - -/* Device IDs */ -#define TXGBE_DEV_ID_SP1000 0x1001 -#define TXGBE_DEV_ID_WX1820 0x2001 - -/* Subsystem IDs */ -/* SFP */ -#define TXGBE_ID_SP1000_SFP 0x0000 -#define TXGBE_ID_WX1820_SFP 0x2000 -#define TXGBE_ID_SFP 0x00 - -/* copper */ -#define TXGBE_ID_SP1000_XAUI 0x1010 -#define TXGBE_ID_WX1820_XAUI 0x2010 -#define TXGBE_ID_XAUI 0x10 -#define TXGBE_ID_SP1000_SGMII 0x1020 -#define TXGBE_ID_WX1820_SGMII 0x2020 -#define TXGBE_ID_SGMII 0x20 -/* backplane */ -#define TXGBE_ID_SP1000_KR_KX_KX4 0x1030 -#define TXGBE_ID_WX1820_KR_KX_KX4 0x2030 -#define TXGBE_ID_KR_KX_KX4 0x30 -/* MAC Interface */ -#define TXGBE_ID_SP1000_MAC_XAUI 0x1040 -#define TXGBE_ID_WX1820_MAC_XAUI 0x2040 -#define TXGBE_ID_MAC_XAUI 0x40 -#define TXGBE_ID_SP1000_MAC_SGMII 0x1060 -#define TXGBE_ID_WX1820_MAC_SGMII 0x2060 -#define TXGBE_ID_MAC_SGMII 0x60 - -/* Combined interface*/ -#define TXGBE_ID_SFI_XAUI 0x50 - -/* Revision ID */ -#define TXGBE_SP_MPW 1 - -/**************** SP Registers ****************************/ -/* chip control Registers */ -#define TXGBE_MIS_PRB_CTL 0x10010 -#define TXGBE_MIS_PRB_CTL_LAN_UP(_i) BIT(1 - (_i)) -/* FMGR Registers */ -#define TXGBE_SPI_ILDR_STATUS 0x10120 -#define TXGBE_SPI_ILDR_STATUS_PERST BIT(0) /* PCIE_PERST is done */ -#define TXGBE_SPI_ILDR_STATUS_PWRRST BIT(1) /* Power on reset is done */ -#define TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(_i) BIT((_i) + 9) /* lan soft reset done */ - -/* Sensors for PVT(Process Voltage Temperature) */ -#define TXGBE_TS_CTL 0x10300 -#define TXGBE_TS_CTL_EVAL_MD BIT(31) - -/* GPIO register bit */ -#define TXGBE_GPIOBIT_0 BIT(0) /* I:tx fault */ -#define TXGBE_GPIOBIT_1 BIT(1) /* O:tx disabled */ -#define TXGBE_GPIOBIT_2 BIT(2) /* I:sfp module absent */ -#define TXGBE_GPIOBIT_3 BIT(3) /* I:rx signal lost */ -#define TXGBE_GPIOBIT_4 BIT(4) /* O:rate select, 1G(0) 10G(1) */ -#define TXGBE_GPIOBIT_5 BIT(5) /* O:rate select, 1G(0) 10G(1) */ - -/* Extended Interrupt Enable Set */ -#define TXGBE_PX_MISC_ETH_LKDN BIT(8) -#define TXGBE_PX_MISC_DEV_RST BIT(10) -#define TXGBE_PX_MISC_ETH_EVENT BIT(17) -#define TXGBE_PX_MISC_ETH_LK BIT(18) -#define TXGBE_PX_MISC_ETH_AN BIT(19) -#define TXGBE_PX_MISC_INT_ERR BIT(20) -#define TXGBE_PX_MISC_GPIO BIT(26) -#define TXGBE_PX_MISC_IEN_MASK \ - (TXGBE_PX_MISC_ETH_LKDN | TXGBE_PX_MISC_DEV_RST | \ - TXGBE_PX_MISC_ETH_EVENT | TXGBE_PX_MISC_ETH_LK | \ - TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR | \ - TXGBE_PX_MISC_GPIO) - -/* Port cfg registers */ -#define TXGBE_CFG_PORT_ST 0x14404 -#define TXGBE_CFG_PORT_ST_LINK_UP BIT(0) - -/* I2C registers */ -#define TXGBE_I2C_BASE 0x14900 - -/************************************** ETH PHY ******************************/ -#define TXGBE_XPCS_IDA_ADDR 0x13000 -#define TXGBE_XPCS_IDA_DATA 0x13004 - -/* Checksum and EEPROM pointers */ -#define TXGBE_EEPROM_LAST_WORD 0x800 -#define TXGBE_EEPROM_CHECKSUM 0x2F -#define TXGBE_EEPROM_SUM 0xBABA -#define TXGBE_EEPROM_VERSION_L 0x1D -#define TXGBE_EEPROM_VERSION_H 0x1E -#define TXGBE_ISCSI_BOOT_CONFIG 0x07 - -#define TXGBE_MAX_MSIX_VECTORS 64 -#define TXGBE_MAX_FDIR_INDICES 63 -#define TXGBE_MAX_RSS_INDICES 63 - -#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) -#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) - -#define TXGBE_SP_MAX_TX_QUEUES 128 -#define TXGBE_SP_MAX_RX_QUEUES 128 -#define TXGBE_SP_RAR_ENTRIES 128 -#define TXGBE_SP_MC_TBL_SIZE 128 -#define TXGBE_SP_VFT_TBL_SIZE 128 -#define TXGBE_SP_RX_PB_SIZE 512 -#define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */ - -/* TX/RX descriptor defines */ -#define TXGBE_DEFAULT_TXD 512 -#define TXGBE_DEFAULT_TX_WORK 256 - -#if (PAGE_SIZE < 8192) -#define TXGBE_DEFAULT_RXD 512 -#define TXGBE_DEFAULT_RX_WORK 256 -#else -#define TXGBE_DEFAULT_RXD 256 -#define TXGBE_DEFAULT_RX_WORK 128 -#endif - -#define TXGBE_INTR_MISC BIT(0) -#define TXGBE_INTR_QALL(A) GENMASK((A)->num_q_vectors, 1) - -#define TXGBE_MAX_EITR GENMASK(11, 3) - -extern char txgbe_driver_name[]; - -void txgbe_down(struct wx *wx); -void txgbe_up(struct wx *wx); -int txgbe_setup_tc(struct net_device *dev, u8 tc); - -#define NODE_PROP(_NAME, _PROP) \ - (const struct software_node) { \ - .name = _NAME, \ - .properties = _PROP, \ - } - -enum txgbe_swnodes { - SWNODE_GPIO = 0, - SWNODE_I2C, - SWNODE_SFP, - SWNODE_PHYLINK, - SWNODE_MAX -}; - -struct txgbe_nodes { - char gpio_name[32]; - char i2c_name[32]; - char sfp_name[32]; - char phylink_name[32]; - struct property_entry gpio_props[1]; - struct property_entry i2c_props[3]; - struct property_entry sfp_props[8]; - struct property_entry phylink_props[2]; - struct software_node_ref_args i2c_ref[1]; - struct software_node_ref_args gpio0_ref[1]; - struct software_node_ref_args gpio1_ref[1]; - struct software_node_ref_args gpio2_ref[1]; - struct software_node_ref_args gpio3_ref[1]; - struct software_node_ref_args gpio4_ref[1]; - struct software_node_ref_args gpio5_ref[1]; - struct software_node_ref_args sfp_ref[1]; - struct software_node swnodes[SWNODE_MAX]; - const struct software_node *group[SWNODE_MAX + 1]; -}; - -struct txgbe { - struct wx *wx; - struct txgbe_nodes nodes; - struct dw_xpcs *xpcs; - struct platform_device *sfp_dev; - struct platform_device *i2c_dev; - struct clk_lookup *clock; - struct clk *clk; - struct gpio_chip *gpio; -}; - -#endif /* _TXGBE_TYPE_H_ */ -- Gitee From fce8f8b02d5aa8e10afd85f04f3b05cc6463c62c Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Wed, 16 Jul 2025 10:52:58 +0800 Subject: [PATCH 02/16] anolis: net: wangxun: txgbe: add support for wangxun 10G/25G/40G ANBZ: #26488 this driver is supported for wangxun SP1000A/WX5025/WX5040 chips, and supports TSO, tx checksum, rx checksum, RSS, flow director, vlan insert, vlan strip, vlan filter offload. Signed-off-by: Duanqiang Wen --- drivers/net/ethernet/wangxun/Kconfig | 13 + drivers/net/ethernet/wangxun/Makefile | 1 + drivers/net/ethernet/wangxun/txgbe/Makefile | 37 + drivers/net/ethernet/wangxun/txgbe/txgbe.h | 1351 ++ .../net/ethernet/wangxun/txgbe/txgbe_aml.c | 457 + .../net/ethernet/wangxun/txgbe/txgbe_aml.h | 9 + .../net/ethernet/wangxun/txgbe/txgbe_aml40.c | 293 + .../net/ethernet/wangxun/txgbe/txgbe_aml40.h | 10 + drivers/net/ethernet/wangxun/txgbe/txgbe_bp.c | 476 + drivers/net/ethernet/wangxun/txgbe/txgbe_bp.h | 53 + .../net/ethernet/wangxun/txgbe/txgbe_dcb.c | 592 + .../net/ethernet/wangxun/txgbe/txgbe_dcb.h | 192 + .../net/ethernet/wangxun/txgbe/txgbe_dcb_nl.c | 798 ++ .../ethernet/wangxun/txgbe/txgbe_debugfs.c | 473 + .../net/ethernet/wangxun/txgbe/txgbe_e56.c | 4049 ++++++ .../net/ethernet/wangxun/txgbe/txgbe_e56.h | 1846 +++ .../net/ethernet/wangxun/txgbe/txgbe_e56_bp.c | 2791 ++++ .../net/ethernet/wangxun/txgbe/txgbe_e56_bp.h | 283 + .../ethernet/wangxun/txgbe/txgbe_ethtool.c | 4744 +++++++ .../net/ethernet/wangxun/txgbe/txgbe_fcoe.c | 940 ++ .../net/ethernet/wangxun/txgbe/txgbe_fcoe.h | 72 + drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 7705 +++++++++++ drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h | 350 + .../net/ethernet/wangxun/txgbe/txgbe_lib.c | 1208 ++ .../net/ethernet/wangxun/txgbe/txgbe_main.c | 11176 ++++++++++++++++ .../net/ethernet/wangxun/txgbe/txgbe_mbx.c | 688 + .../net/ethernet/wangxun/txgbe/txgbe_mbx.h | 181 + .../net/ethernet/wangxun/txgbe/txgbe_mtd.c | 1301 ++ .../net/ethernet/wangxun/txgbe/txgbe_mtd.h | 464 + .../net/ethernet/wangxun/txgbe/txgbe_param.c | 1089 ++ .../net/ethernet/wangxun/txgbe/txgbe_pcierr.c | 310 + .../net/ethernet/wangxun/txgbe/txgbe_pcierr.h | 16 + .../net/ethernet/wangxun/txgbe/txgbe_phy.c | 1538 +++ .../net/ethernet/wangxun/txgbe/txgbe_phy.h | 222 + .../net/ethernet/wangxun/txgbe/txgbe_ptp.c | 1050 ++ .../net/ethernet/wangxun/txgbe/txgbe_sriov.c | 2047 +++ .../net/ethernet/wangxun/txgbe/txgbe_sriov.h | 54 + .../net/ethernet/wangxun/txgbe/txgbe_sysfs.c | 193 + .../net/ethernet/wangxun/txgbe/txgbe_type.h | 3555 +++++ .../net/ethernet/wangxun/txgbe/txgbe_xsk.c | 804 ++ .../net/ethernet/wangxun/txgbe/txgbe_xsk.h | 53 + 41 files changed, 53484 insertions(+) create mode 100644 drivers/net/ethernet/wangxun/txgbe/Makefile create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe.h create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_aml40.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_aml40.h create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_bp.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_bp.h create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.h create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_dcb_nl.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_debugfs.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_e56.h create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_e56_bp.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_e56_bp.h create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.h create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_main.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.h create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.h create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_param.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_pcierr.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_pcierr.h create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_ptp.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.h create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_sysfs.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_type.h create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_xsk.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_xsk.h diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig index 457113f57ec1..a0be4d7b24c2 100644 --- a/drivers/net/ethernet/wangxun/Kconfig +++ b/drivers/net/ethernet/wangxun/Kconfig @@ -37,4 +37,17 @@ config NGBE To compile this driver as a module, choose M here. The module will be called ngbe. +config TXGBE + tristate "Wangxun(R) 10/25/40GbE PCI Express adapters support" + depends on PCI + imply PTP_1588_CLOCK + help + This driver supports Wangxun(R) 10GbE PCI Express family of + adapters. + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called txgbe. endif # NET_VENDOR_WANGXUN diff --git a/drivers/net/ethernet/wangxun/Makefile b/drivers/net/ethernet/wangxun/Makefile index f300fc503b96..390652086fde 100644 --- a/drivers/net/ethernet/wangxun/Makefile +++ b/drivers/net/ethernet/wangxun/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_LIBWX) += libwx/ obj-$(CONFIG_NGBE) += ngbe/ +obj-$(CONFIG_TXGBE) += txgbe/ diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile new file mode 100644 index 000000000000..38c5485450f0 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/Makefile @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. +# +# Makefile for the Wangxun(R) 10GbE PCI Express ethernet driver +# + +obj-$(CONFIG_TXGBE) += txgbe.o + +txgbe-objs := txgbe_main.o \ + txgbe_hw.o \ + txgbe_phy.o \ + txgbe_ethtool.o \ + txgbe_bp.o \ + txgbe_dcb_nl.o \ + txgbe_dcb.o \ + txgbe_debugfs.o \ + txgbe_fcoe.o \ + txgbe_mbx.o \ + txgbe_mtd.o \ + txgbe_e56.o \ + txgbe_param.o \ + txgbe_ptp.o \ + txgbe_sriov.o \ + txgbe_sysfs.o \ + txgbe_xsk.o \ + txgbe_lib.o \ + txgbe_aml.o \ + txgbe_aml40.o \ + txgbe_pcierr.o \ + txgbe_e56_bp.o + +KERNELDIR ?= /lib/modules/$(shell uname -r)/build +all: + $(MAKE) -C $(KERNELDIR) M=$(PWD) modules +clean: + $(MAKE) -C $(KERNELDIR) M=$(PWD) clean + rm -rf *.${MANSECTION}.gz *.ko diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h new file mode 100644 index 000000000000..68ae39ff2f2c --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h @@ -0,0 +1,1351 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_H_ +#define _TXGBE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include "txgbe_type.h" +#include "txgbe_dcb.h" + +#include +#include + +#if IS_ENABLED(CONFIG_FCOE) +#include "txgbe_fcoe.h" +#endif /* CONFIG_FCOE */ + +#include + +#include +#include +#include +#include + +/* Ether Types */ +#define TXGBE_ETH_P_LLDP 0x88CC +#define TXGBE_ETH_P_CNM 0x22E7 + +DECLARE_STATIC_KEY_FALSE(txgbe_xdp_locking_key); + +/* TX/RX descriptor defines */ +#define TXGBE_DEFAULT_TXD 1024 +#define TXGBE_DEFAULT_TX_WORK 256 +#define TXGBE_MAX_TXD 8192 +#define TXGBE_MIN_TXD 128 +#define TXGBE_MAX_TX_WORK 65535 + +#if (PAGE_SIZE < 8192) +#define TXGBE_DEFAULT_RXD 512 +#define TXGBE_DEFAULT_RX_WORK 256 +#else +#define TXGBE_DEFAULT_RXD 256 +#define TXGBE_DEFAULT_RX_WORK 128 +#endif + +#define TXGBE_MAX_RXD 8192 +#define TXGBE_MIN_RXD 128 + +#define TXGBE_ETH_P_LLDP 0x88CC + +/* flow control */ +#define TXGBE_MIN_FCRTL 0x40 +#define TXGBE_MAX_FCRTL 0x7FF80 +#define TXGBE_MIN_FCRTH 0x600 +#define TXGBE_MAX_FCRTH 0x7FFF0 + +#define TXGBE_DEFAULT_FCPAUSE 0xFFFF + +#define TXGBE_MIN_FCPAUSE 0 +#define TXGBE_MAX_FCPAUSE 0xFFFF + +/* Supported Rx Buffer Sizes */ +#define TXGBE_RXBUFFER_256 256 /* Used for skb receive header */ +#define TXGBE_RXBUFFER_2K 2048 +#define TXGBE_RXBUFFER_3K 3072 +#define TXGBE_RXBUFFER_4K 4096 +#define TXGBE_RXBUFFER_1536 1536 +#define TXGBE_RXBUFFER_7K 7168 +#define TXGBE_RXBUFFER_8K 8192 +#define TXGBE_RXBUFFER_15K 15360 +#define TXGBE_MAX_RXBUFFER 16384 /* largest size for single descriptor */ + +#define TXGBE_BP_M_NULL 0 +#define TXGBE_BP_M_SFI 1 +#define TXGBE_BP_M_KR 2 +#define TXGBE_BP_M_KX4 3 +#define TXGBE_BP_M_KX 4 +#define TXGBE_BP_M_NAUTO 0 +#define TXGBE_BP_M_AUTO 1 + +#define TXGBE_RX_HDR_SIZE TXGBE_RXBUFFER_256 + +#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define TXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define TXGBE_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +enum txgbe_tx_flags { + /* cmd_type flags */ + TXGBE_TX_FLAGS_HW_VLAN = 0x01, + TXGBE_TX_FLAGS_TSO = 0x02, + TXGBE_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + TXGBE_TX_FLAGS_CC = 0x08, + TXGBE_TX_FLAGS_IPV4 = 0x10, + TXGBE_TX_FLAGS_CSUM = 0x20, + TXGBE_TX_FLAGS_OUTER_IPV4 = 0x100, + TXGBE_TX_FLAGS_LINKSEC = 0x200, + TXGBE_TX_FLAGS_IPSEC = 0x400, + + /* software defined flags */ + TXGBE_TX_FLAGS_SW_VLAN = 0x40, + TXGBE_TX_FLAGS_FCOE = 0x80, +}; + +/* VLAN info */ +#define TXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 +#define TXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 +#define TXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 +#define TXGBE_TX_FLAGS_VLAN_SHIFT 16 + +#define TXGBE_MAX_RX_DESC_POLL 10 + +#define TXGBE_MAX_VF_MC_ENTRIES 30 +#define TXGBE_MAX_VF_FUNCTIONS 64 +#define MAX_EMULATION_MAC_ADDRS 16 +#define TXGBE_MAX_PF_MACVLANS 15 +#define TXGBE_VF_DEVICE_ID 0x1000 + +/* must account for pools assigned to VFs. */ +#ifdef CONFIG_PCI_IOV +#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) +#else +#define VMDQ_P(p) (p) +#endif + +#define TXGBE_VF_MAX_TX_QUEUES 4 + +struct vf_data_storage { + struct pci_dev *vfdev; + u8 __iomem *b4_addr; + u32 b4_buf[16]; + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[TXGBE_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u16 default_vf_vlan_id; + u16 vlans_enabled; + bool clear_to_send; + bool pf_set_mac; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + __be16 vlan_proto; + u16 min_tx_rate; + u16 max_tx_rate; + u16 vlan_count; + u8 spoofchk_enabled; + int link_enable; + int link_state; + bool rss_query_enabled; + + u8 trusted; + int xcast_mode; + unsigned int vf_api; + u16 ft_filter_idx[TXGBE_MAX_RDB_5T_CTL0_FILTERS]; + u16 queue_max_tx_rate[TXGBE_VF_MAX_TX_QUEUES]; +}; + +struct vf_macvlans { + struct list_head l; + int vf; + bool free; + bool is_macvlan; + u8 vf_macvlan[ETH_ALEN]; +}; + +#define TXGBE_MAX_TXD_PWR 14 +#define TXGBE_MAX_DATA_PER_TXD BIT(TXGBE_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), TXGBE_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +#define DESC_RESERVED 96 +#define DESC_RESERVED_AML 192 + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct txgbe_tx_buffer { + union txgbe_tx_desc *next_to_watch; + u32 next_eop; + unsigned long time_stamp; + union { + struct sk_buff *skb; + /* XDP uses address ptr on irq_clean */ + struct xdp_frame *xdpf; + }; + unsigned int bytecount; + unsigned short gso_segs; + __be16 protocol; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + void *va; + u32 tx_flags; +}; + +struct txgbe_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + dma_addr_t page_dma; + union{ + struct { + struct page *page; + unsigned int page_offset; + u16 pagecnt_bias; + }; + struct { + bool discard; + struct xdp_buff *xdp; + }; + }; +}; + +struct txgbe_queue_stats { + u64 packets; + u64 bytes; +}; + +struct txgbe_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct txgbe_rx_queue_stats { + u64 rsc_count; + u64 rsc_flush; + u64 non_eop_descs; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 csum_good_cnt; + u64 csum_err; +}; + +#define TXGBE_TS_HDR_LEN 8 +enum txgbe_ring_state_t { + __TXGBE_RX_3K_BUFFER, + __TXGBE_RX_BUILD_SKB_ENABLED, + __TXGBE_TX_FDIR_INIT_DONE, + __TXGBE_TX_XPS_INIT_DONE, + __TXGBE_TX_DETECT_HANG, + __TXGBE_HANG_CHECK_ARMED, + __TXGBE_RX_HS_ENABLED, + __TXGBE_RX_RSC_ENABLED, + __TXGBE_TX_XDP_RING, +#if IS_ENABLED(CONFIG_FCOE) + __TXGBE_RX_FCOE, +#endif + __TXGBE_TX_DISABLED, +}; + +struct txgbe_fwd_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct net_device *vdev; + struct txgbe_adapter *adapter; + unsigned int tx_base_queue; + unsigned int rx_base_queue; + int index; /* pool index on PF */ +}; + +#define ring_uses_build_skb(ring) \ + test_bit(__TXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state) + +#define ring_is_hs_enabled(ring) \ + test_bit(__TXGBE_RX_HS_ENABLED, &(ring)->state) +#define set_ring_hs_enabled(ring) \ + set_bit(__TXGBE_RX_HS_ENABLED, &(ring)->state) +#define clear_ring_hs_enabled(ring) \ + clear_bit(__TXGBE_RX_HS_ENABLED, &(ring)->state) +#define check_for_tx_hang(ring) \ + test_bit(__TXGBE_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__TXGBE_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__TXGBE_TX_DETECT_HANG, &(ring)->state) +#define ring_is_rsc_enabled(ring) \ + test_bit(__TXGBE_RX_RSC_ENABLED, &(ring)->state) +#define set_ring_rsc_enabled(ring) \ + set_bit(__TXGBE_RX_RSC_ENABLED, &(ring)->state) +#define clear_ring_rsc_enabled(ring) \ + clear_bit(__TXGBE_RX_RSC_ENABLED, &(ring)->state) +#define ring_is_xdp(ring) \ + test_bit(__TXGBE_TX_XDP_RING, &(ring)->state) +#define set_ring_xdp(ring) \ + set_bit(__TXGBE_TX_XDP_RING, &(ring)->state) +#define clear_ring_xdp(ring) \ + clear_bit(__TXGBE_TX_XDP_RING, &(ring)->state) + +struct txgbe_ring { + struct txgbe_ring *next; /* pointer to next ring in q_vector */ + struct txgbe_q_vector *q_vector; /* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ + struct device *dev; /* device for DMA mapping */ + struct bpf_prog *xdp_prog; + struct txgbe_fwd_adapter *accel; + void *desc; /* descriptor ring memory */ + union { + struct txgbe_tx_buffer *tx_buffer_info; + struct txgbe_rx_buffer *rx_buffer_info; + }; + spinlock_t tx_lock; /* used in XDP mode */ + unsigned long state; + u8 __iomem *tail; + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + + u16 count; /* amount of descriptors */ + + u8 queue_index; /* needed for multiqueue queue management */ + u8 reg_idx; /* holds the special value that gets + * the hardware register offset + * associated with this ring, which is + * different for DCB and RSS modes + */ + u16 next_to_use; + u16 next_to_clean; + u16 next_to_free; + u16 rx_offset; + + unsigned long last_rx_timestamp; + + u16 rx_buf_len; + union { + union { + u16 next_to_alloc; + u16 next_rs_idx; + }; + struct { + u8 atr_sample_rate; + u8 atr_count; + }; + }; + + u16 xdp_tx_active; + + u8 dcb_tc; + struct txgbe_queue_stats stats; + struct u64_stats_sync syncp; + union { + struct txgbe_tx_queue_stats tx_stats; + struct txgbe_rx_queue_stats rx_stats; + }; + + struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *xsk_pool; + + dma_addr_t headwb_dma; + u32 *headwb_mem; +} ____cacheline_internodealigned_in_smp; + +enum txgbe_ring_f_enum { + RING_F_NONE = 0, + RING_F_VMDQ, /* SR-IOV uses the same ring feature */ + RING_F_RSS, + RING_F_FDIR, +#if IS_ENABLED(CONFIG_FCOE) + RING_F_FCOE, +#endif /* CONFIG_FCOE */ + RING_F_ARRAY_SIZE /* must be last in enum set */ +}; + +#define TXGBE_MAX_DCB_INDICES 8 +#define TXGBE_MAX_XDP_RSS_INDICES 32 +#define TXGBE_MAX_RSS_INDICES 63 +#define TXGBE_MAX_VMDQ_INDICES 64 +#define TXGBE_MAX_FDIR_INDICES 63 +#if IS_ENABLED(CONFIG_FCOE) +#define TXGBE_MAX_FCOE_INDICES 8 +#define MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + TXGBE_MAX_FCOE_INDICES) +#define MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + TXGBE_MAX_FCOE_INDICES) +#else +#define MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) +#define MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) +#endif /* CONFIG_FCOE */ +#define MAX_XDP_QUEUES 32 + +#define TXGBE_MAX_L2A_QUEUES 4 +#define TXGBE_BAD_L2A_QUEUE 3 + +#define TXGBE_MAX_MACVLANS 32 +#define TXGBE_MAX_DCBMACVLANS 8 + +struct txgbe_ring_feature { + u16 limit; /* upper limit on feature indices */ + u16 indices; /* current value of indices */ + u16 mask; /* Mask used for feature to ring mapping */ + u16 offset; /* offset to start of feature */ +}; + +#define TXGBE_VMDQ_8Q_MASK 0x78 +#define TXGBE_VMDQ_4Q_MASK 0x7C +#define TXGBE_VMDQ_2Q_MASK 0x7E + +#define TXGBE_RSS_64Q_MASK 0x3F +#define TXGBE_RSS_16Q_MASK 0xF +#define TXGBE_RSS_8Q_MASK 0x7 +#define TXGBE_RSS_4Q_MASK 0x3 +#define TXGBE_RSS_2Q_MASK 0x1 +#define TXGBE_RSS_DISABLED_MASK 0x0 + +#if (PAGE_SIZE < 8192) +#define TXGBE_MAX_2K_FRAME_BUILD_SKB (TXGBE_RXBUFFER_1536 - NET_IP_ALIGN) +#define TXGBE_2K_TOO_SMALL_WITH_PADDING \ +((NET_SKB_PAD + TXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(TXGBE_RXBUFFER_2K)) + +static inline int txgbe_compute_pad(int rx_buf_len) +{ + int page_size, pad_size; + + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; + + return pad_size; +} + +static inline int txgbe_skb_pad(void) +{ + int rx_buf_len; + + /* If a 2K buffer cannot handle a standard Ethernet frame then + * optimize padding for a 3K buffer instead of a 1.5K buffer. + * + * For a 3K buffer we need to add enough padding to allow for + * tailroom due to NET_IP_ALIGN possibly shifting us out of + * cache-line alignment. + */ + if (TXGBE_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = TXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = TXGBE_RXBUFFER_1536; + + /* if needed make room for NET_IP_ALIGN */ + rx_buf_len -= NET_IP_ALIGN; + + return txgbe_compute_pad(rx_buf_len); +} + +#define TXGBE_SKB_PAD txgbe_skb_pad() +#else +#define TXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#endif + +/* FCoE requires that all Rx buffers be over 2200 bytes in length. Since + * this is twice the size of a half page we need to double the page order + * for FCoE enabled Rx queues. + */ +static inline unsigned int txgbe_rx_bufsz(struct txgbe_ring __maybe_unused *ring) +{ +#if MAX_SKB_FRAGS < 8 + return ALIGN(TXGBE_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024); +#else + if (test_bit(__TXGBE_RX_3K_BUFFER, &ring->state)) + return TXGBE_RXBUFFER_3K; +#if (PAGE_SIZE < 8192) + if (ring_uses_build_skb(ring)) + return TXGBE_MAX_2K_FRAME_BUILD_SKB; +#endif + return TXGBE_RXBUFFER_2K; +#endif +} + +static inline unsigned int txgbe_rx_pg_order(struct txgbe_ring __maybe_unused *ring) +{ +#if (PAGE_SIZE < 8192) + if (test_bit(__TXGBE_RX_3K_BUFFER, &ring->state)) + return 1; +#endif + return 0; +} + +#define txgbe_rx_pg_size(_ring) (PAGE_SIZE << txgbe_rx_pg_order(_ring)) + +static inline unsigned int txgbe_rx_offset(struct txgbe_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? TXGBE_SKB_PAD : 0; +} + +struct txgbe_ring_container { + struct txgbe_ring *ring; /* pointer to linked list of rings */ + unsigned long next_update; /* jiffies value of last update */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +/* iterator for handling rings in ring container */ +#define txgbe_for_each_ring(pos, head) \ + for (pos = (head).ring; pos; pos = pos->next) + +#define MAX_RX_PACKET_BUFFERS ((adapter->flags & TXGBE_FLAG_DCB_ENABLED) \ + ? 8 : 1) +#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS + +/* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct txgbe_q_vector { + struct txgbe_adapter *adapter; + int cpu; + u16 v_idx; + u16 itr; + struct txgbe_ring_container rx, tx; + + struct napi_struct napi; + cpumask_t affinity_mask; + + int numa_node; + struct rcu_head rcu; + char name[IFNAMSIZ + 17]; + bool netpoll_rx; + + /* for dynamic allocation of rings associated with this q_vector */ + struct txgbe_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +#define TXGBE_HWMON_TYPE_TEMP 0 +#define TXGBE_HWMON_TYPE_ALARMTHRESH 1 +#define TXGBE_HWMON_TYPE_DALARMTHRESH 2 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct txgbe_hw *hw; + struct txgbe_thermal_diode_data *sensor; + char name[19]; +}; + +struct hwmon_buff { + struct device *device; + struct hwmon_attr *hwmon_list; + unsigned int n_hwmon; +}; + +/* microsecond values for various ITR rates shifted by 2 to fit itr register + * with the first 3 bits reserved 0 + */ +#define TXGBE_MIN_RSC_ITR 24 +#define TXGBE_100K_ITR 40 +#define TXGBE_20K_ITR 200 +#define TXGBE_16K_ITR 248 +#define TXGBE_12K_ITR 336 + +#define TXGBE_ITR_ADAPTIVE_MIN_INC 2 +#define TXGBE_ITR_ADAPTIVE_MIN_USECS 10 +#define TXGBE_ITR_ADAPTIVE_MAX_USECS 84 +#define TXGBE_ITR_ADAPTIVE_LATENCY 0x80 +#define TXGBE_ITR_ADAPTIVE_BULK 0x00 +#define TXGBE_ITR_ADAPTIVE_MASK_USECS (TXGBE_ITR_ADAPTIVE_LATENCY - \ + TXGBE_ITR_ADAPTIVE_MIN_INC) + +/* txgbe_test_staterr - tests bits in Rx descriptor status and error fields */ +static inline __le32 txgbe_test_staterr(union txgbe_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +/* txgbe_desc_unused - calculate if we have unused descriptors */ +static inline u16 txgbe_desc_unused(struct txgbe_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +#define TXGBE_RX_DESC(R, i) \ + (&(((union txgbe_rx_desc *)((R)->desc))[i])) +#define TXGBE_TX_DESC(R, i) \ + (&(((union txgbe_tx_desc *)((R)->desc))[i])) +#define TXGBE_TX_CTXTDESC(R, i) \ + (&(((struct txgbe_tx_context_desc *)((R)->desc))[i])) + +#define TXGBE_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ +#if IS_ENABLED(CONFIG_FCOE) +/* use 3K as the baby jumbo frame size for FCoE */ +#define TXGBE_FCOE_JUMBO_FRAME_SIZE 3072 +#endif /* CONFIG_FCOE */ + +#define TCP_TIMER_VECTOR 0 +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR) + +#define TXGBE_MAX_MSIX_Q_VECTORS_SAPPHIRE 64 + +struct txgbe_mac_addr { + u8 addr[ETH_ALEN]; + u16 state; /* bitmask */ + u64 pools; +}; + +#define TXGBE_MAC_STATE_DEFAULT 0x1 +#define TXGBE_MAC_STATE_MODIFIED 0x2 +#define TXGBE_MAC_STATE_IN_USE 0x4 + +#ifdef TXGBE_PROCFS +struct txgbe_therm_proc_data { + struct txgbe_hw *hw; + struct txgbe_thermal_diode_data *sensor_data; +}; +#endif + +/* Only for array allocations in our adapter struct. + * we can actually assign 64 queue vectors based on our extended-extended + * interrupt registers. + */ +#define MAX_MSIX_Q_VECTORS TXGBE_MAX_MSIX_Q_VECTORS_SAPPHIRE +#define MAX_MSIX_COUNT TXGBE_MAX_MSIX_VECTORS_SAPPHIRE + +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) + +/* default to trying for four seconds */ +#define TXGBE_TRY_LINK_TIMEOUT (4 * HZ) +#define TXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */ + +#define TXGBE_FLAG_MSI_CAPABLE BIT(0) +#define TXGBE_FLAG_MSI_ENABLED BIT(1) +#define TXGBE_FLAG_MSIX_CAPABLE BIT(2) +#define TXGBE_FLAG_MSIX_ENABLED BIT(3) +#define TXGBE_FLAG_LLI_PUSH BIT(4) +#define TXGBE_FLAG_IPSEC_ENABLED BIT(5) +#define TXGBE_FLAG_TPH_ENABLED BIT(6) +#define TXGBE_FLAG_TPH_CAPABLE BIT(7) +#define TXGBE_FLAG_TPH_ENABLED_DATA BIT(8) +#define TXGBE_FLAG_MQ_CAPABLE BIT(9) +#define TXGBE_FLAG_DCB_ENABLED BIT(10) +#define TXGBE_FLAG_VMDQ_ENABLED BIT(11) +#define TXGBE_FLAG_FAN_FAIL_CAPABLE BIT(12) +#define TXGBE_FLAG_NEED_LINK_UPDATE BIT(13) +#define TXGBE_FLAG_NEED_LINK_CONFIG BIT(14) +#define TXGBE_FLAG_FDIR_HASH_CAPABLE BIT(15) +#define TXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(16) +#define TXGBE_FLAG_FCOE_CAPABLE BIT(17) +#define TXGBE_FLAG_FCOE_ENABLED BIT(18) +#define TXGBE_FLAG_SRIOV_CAPABLE BIT(19) +#define TXGBE_FLAG_SRIOV_ENABLED BIT(20) +#define TXGBE_FLAG_SRIOV_REPLICATION_ENABLE BIT(21) +#define TXGBE_FLAG_SRIOV_L2SWITCH_ENABLE BIT(22) +#define TXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE BIT(23) +#define TXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(24) +#define TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(25) +#define TXGBE_FLAG_VXLAN_OFFLOAD_ENABLE BIT(26) +#define TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(27) +#define TXGBE_FLAG_NEED_ETH_PHY_RESET BIT(28) + +#define TXGBE_FLAG_RX_HS_ENABLED BIT(30) +#define TXGBE_FLAG_LINKSEC_ENABLED BIT(31) + +/* preset defaults */ +#define TXGBE_FLAGS_SP_INIT (TXGBE_FLAG_MSI_CAPABLE \ + | TXGBE_FLAG_MSIX_CAPABLE \ + | TXGBE_FLAG_MQ_CAPABLE \ + | TXGBE_FLAG_SRIOV_CAPABLE) + +/** + * txgbe_adapter.flag2 + **/ +#define TXGBE_FLAG2_RSC_CAPABLE BIT(0) +#define TXGBE_FLAG2_RSC_ENABLED BIT(1) +#define TXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(3) +#define TXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(4) +#define TXGBE_FLAG2_SEARCH_FOR_SFP BIT(5) +#define TXGBE_FLAG2_SFP_NEEDS_RESET BIT(6) +#define TXGBE_FLAG2_PF_RESET_REQUESTED BIT(7) +#define TXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(8) +#define TXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(9) +#define TXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(10) +#define TXGBE_FLAG2_RSS_ENABLED BIT(12) +#define TXGBE_FLAG2_PTP_PPS_ENABLED BIT(11) +#define TXGBE_FLAG2_EEE_CAPABLE BIT(14) +#define TXGBE_FLAG2_EEE_ENABLED BIT(15) +#define TXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(16) +#define TXGBE_FLAG2_VLAN_PROMISC BIT(17) +#define TXGBE_FLAG2_DEV_RESET_REQUESTED BIT(18) +#define TXGBE_FLAG2_RESET_INTR_RECEIVED BIT(19) +#define TXGBE_FLAG2_GLOBAL_RESET_REQUESTED BIT(20) +#define TXGBE_FLAG2_CLOUD_SWITCH_ENABLED BIT(21) +#define TXGBE_FLAG2_MNG_REG_ACCESS_DISABLED BIT(22) +#define TXGBE_FLAG2_KR_TRAINING BIT(24) +#define TXGBE_FLAG2_KR_AUTO BIT(25) +#define TXGBE_FLAG2_LINK_DOWN BIT(26) +#define TXGBE_FLAG2_KR_PRO_DOWN BIT(27) +#define TXGBE_FLAG2_KR_PRO_REINIT BIT(28) +#define TXGBE_FLAG2_ECC_ERR_RESET BIT(29) +#define TXGBE_FLAG2_RX_LEGACY BIT(30) +#define TXGBE_FLAG2_PCIE_NEED_RECOVER BIT(31) +#define TXGBE_FLAG2_PCIE_NEED_Q_RESET BIT(30) +#define TXGBE_FLAG2_SERVICE_RUNNING BIT(13) + +/* amlite: dma reset */ +#define TXGBE_FLAG2_DMA_RESET_REQUESTED BIT(2) + +#define TXGBE_FLAG3_PHY_EVENT BIT(0) +#define TXGBE_FLAG3_TEMP_SENSOR_INPROGRESS BIT(1) + +#define TXGBE_SET_FLAG(_input, _flag, _result) \ + (((_flag) <= (_result)) ? \ + ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ + ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) + +enum txgbe_isb_idx { + TXGBE_ISB_HEADER, + TXGBE_ISB_MISC, + TXGBE_ISB_VEC0, + TXGBE_ISB_VEC1, + TXGBE_ISB_MAX +}; + +#define TXGBE_PHY_FEC_RS BIT(0) +#define TXGBE_PHY_FEC_BASER BIT(1) +#define TXGBE_PHY_FEC_OFF BIT(2) +#define TXGBE_PHY_FEC_AUTO (TXGBE_PHY_FEC_OFF | TXGBE_PHY_FEC_BASER |\ + TXGBE_PHY_FEC_RS) + +/* board specific private data structure */ +struct txgbe_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + /* OS defined structs */ + struct net_device *netdev; + struct bpf_prog *xdp_prog; + struct pci_dev *pdev; + + unsigned long state; + u32 bp_link_mode; + u32 curbp_link_mode; + /* Some features need tri-state capability, + * thus the additional *_CAPABLE flags. + */ + u32 flags; + u32 flags2; + u32 flags3; + u8 tx_unidir_mode; + u8 an73_mode; + u8 backplane_an; + u8 an73; + u8 autoneg; + u16 ffe_main; + u16 ffe_pre; + u16 ffe_post; + u8 ffe_set; + u16 fec_mode; + u8 backplane_mode; + u8 backplane_auto; + struct phytxeq aml_txeq; + bool an_done; + u32 fsm; + + bool cloud_mode; + + /* Tx fast path data */ + int num_tx_queues; + u16 tx_itr_setting; + u16 tx_work_limit; + + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr_setting; + u16 rx_work_limit; + + unsigned int num_vmdqs; /* does not include pools assigned to VFs */ + unsigned int queues_per_pool; + + bool lro_before_xdp; + u16 old_rss_limit; + /* XDP */ + int num_xdp_queues; + struct txgbe_ring *xdp_ring[MAX_XDP_QUEUES]; + unsigned long *af_xdp_zc_qps; + + /* TX */ + struct txgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; + + u64 restart_queue; + u64 lsc_int; + u32 tx_timeout_count; + + /* RX */ + struct txgbe_ring *rx_ring[MAX_RX_QUEUES]; + int num_rx_pools; /* does not include pools assigned to VFs */ + int num_rx_queues_per_pool; + u64 hw_csum_rx_error; + u64 hw_csum_rx_good; + u64 hw_rx_no_dma_resources; + u64 rsc_total_count; + u64 rsc_total_flush; + u64 non_eop_descs; + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + + struct txgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + + struct ieee_pfc *txgbe_ieee_pfc; + struct ieee_ets *txgbe_ieee_ets; + struct txgbe_dcb_config dcb_cfg; + struct txgbe_dcb_config temp_dcb_cfg; + u8 dcb_set_bitmap; + u8 dcbx_cap; + u8 dcb_tc; + + enum txgbe_fc_mode last_lfc_mode; + + int num_q_vectors; /* current number of q_vectors for device */ + int max_q_vectors; /* upper limit of q_vectors for device */ + struct txgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; + struct msix_entry *msix_entries; + struct net_device_stats net_stats; + + u64 test_icr; + struct txgbe_ring test_tx_ring; + struct txgbe_ring test_rx_ring; + + /* structs defined in txgbe_hw.h */ + struct txgbe_hw hw; + u16 msg_enable; + struct txgbe_hw_stats stats; + u32 lli_port; + u32 lli_size; + u32 lli_etype; + u32 lli_vlan_pri; + + u32 *config_space; + u64 tx_busy; + unsigned int tx_ring_count; + unsigned int xdp_ring_count; + unsigned int rx_ring_count; + + u32 link_speed; + u32 speed; + bool link_up; + unsigned long sfp_poll_time; + unsigned long link_check_timeout; + + /* mutex for amblite phy access */ + struct mutex e56_lock; + + struct timer_list service_timer; + struct work_struct service_task; + struct work_struct sfp_sta_task; + struct work_struct temp_task; + struct hlist_head fdir_filter_list; + unsigned long fdir_overflow; /* number of times ATR was backed off */ + union txgbe_atr_input fdir_mask; + int fdir_filter_count; + u32 fdir_pballoc; + u32 atr_sample_rate; + + /* spinlock for flow director */ + spinlock_t fdir_perfect_lock; + + struct txgbe_etype_filter_info etype_filter_info; + struct txgbe_5tuple_filter_info ft_filter_info; + +#if IS_ENABLED(CONFIG_FCOE) + struct txgbe_fcoe fcoe; +#endif /* CONFIG_FCOE */ + u8 __iomem *io_addr; + u32 wol; + + u16 bd_number; + + u16 bridge_mode; + + u8 fec_link_mode; + u8 cur_fec_link; + bool link_valid; + u32 etrack_id; + char eeprom_id[32]; + char fl_version[16]; + char fw_version[64]; + bool netdev_registered; + u32 interrupt_event; + u32 led_reg; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned long last_overflow_check; + unsigned long last_rx_ptp_check; + + /* ptp spin lock */ + spinlock_t tmreg_lock; + struct cyclecounter hw_cc; + struct timecounter hw_tc; + u32 base_incval; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + void (*ptp_setup_sdp)(struct txgbe_adapter *adapter); + u64 pps_edge_start; + u64 pps_edge_end; + u64 sec_to_cc; + u8 pps_enabled; + + DECLARE_BITMAP(active_vfs, TXGBE_MAX_VF_FUNCTIONS); + unsigned int num_vfs; + unsigned int max_vfs; + struct vf_data_storage *vfinfo; + int vf_rate_link_speed; + struct vf_macvlans vf_mvs; + struct vf_macvlans *mv_list; +#ifdef CONFIG_PCI_IOV + u32 timer_event_accumulator; + u32 vferr_refcount; +#endif + struct txgbe_mac_addr *mac_table; + __le16 vxlan_port; + __le16 geneve_port; + + struct hwmon_buff txgbe_hwmon_buff; + + struct dentry *txgbe_dbg_adapter; + u8 default_up; + unsigned int indices; + + unsigned long fwd_bitmask; /* bitmask indicating in use pools */ + unsigned long tx_timeout_last_recovery; + u32 tx_timeout_recovery_level; + +#define TXGBE_MAX_RETA_ENTRIES 128 + u8 rss_indir_tbl[TXGBE_MAX_RETA_ENTRIES]; +#define TXGBE_RSS_KEY_SIZE 40 + u32 rss_key[TXGBE_RSS_KEY_SIZE / sizeof(u32)]; + + void *ipsec; + + /* misc interrupt status block */ + dma_addr_t isb_dma; + u32 *isb_mem; + u32 isb_tag[TXGBE_ISB_MAX]; + + u64 eth_priv_flags; +#define TXGBE_ETH_PRIV_FLAG_LLDP BIT(0) +#define TXGBE_ETH_PRIV_FLAG_LEGACY_RX BIT(1) + + /* AF_XDP zero-copy */ + struct xsk_buff_pool **xsk_pools; + u16 num_xsk_pools_used; + u16 num_xsk_pools; + + bool cmplt_to_dis; + u8 i2c_eeprom[512]; + u32 eeprom_len; + u32 eeprom_type; + + /* amlite: new SW-FW mbox */ +/* u32 swfw_mbox_buf[64]; */ + u8 swfw_index; + u8 desc_reserved; + + int amlite_temp; + + int vlan_rate_link_speed; + DECLARE_BITMAP(limited_vlans, 4096); + int active_vlan_limited; + int queue_rate_limit[64]; // From back to front +}; + +static inline u32 txgbe_misc_isb(struct txgbe_adapter *adapter, + enum txgbe_isb_idx idx) +{ + u32 cur_tag = 0; + u32 cur_diff = 0; + + cur_tag = adapter->isb_mem[TXGBE_ISB_HEADER]; + cur_diff = cur_tag - adapter->isb_tag[idx]; + + adapter->isb_tag[idx] = cur_tag; + + return adapter->isb_mem[idx]; +} + +static inline u8 txgbe_max_rss_indices(struct txgbe_adapter *adapter) +{ + if (adapter->xdp_prog) + return TXGBE_MAX_XDP_RSS_INDICES; + return TXGBE_MAX_RSS_INDICES; +} + +struct txgbe_fdir_filter { + struct hlist_node fdir_node; + union txgbe_atr_input filter; + u16 sw_idx; + u64 action; +}; + +enum txgbe_state_t { + __TXGBE_TESTING, + __TXGBE_RESETTING, + __TXGBE_DOWN, + __TXGBE_HANGING, + __TXGBE_DISABLED, + __TXGBE_REMOVING, + __TXGBE_SERVICE_SCHED, + __TXGBE_SERVICE_INITED, + __TXGBE_IN_SFP_INIT, + __TXGBE_PTP_RUNNING, + __TXGBE_PTP_TX_IN_PROGRESS, + __TXGBE_SWFW_BUSY, +}; + +struct txgbe_cb { + dma_addr_t dma; + + u16 append_cnt; /* number of skb's appended */ + bool page_released; + bool dma_released; +}; + +#define TXGBE_CB(skb) ((struct txgbe_cb *)(skb)->cb) + +#define TXGBE_1588_PPS_WIDTH 100 +#define TXGBE_1588_TOD_ENABLE 1 +#define TXGBE_1588_PPS_LEVEL 1 + +/* ESX txgbe CIM IOCTL definition */ +void txgbe_sysfs_exit(struct txgbe_adapter *adapter); +int txgbe_sysfs_init(struct txgbe_adapter *adapter); + +extern struct dcbnl_rtnl_ops dcbnl_ops; +int txgbe_copy_dcb_cfg(struct txgbe_adapter *adapter, int tc_max); + +u8 txgbe_dcb_txq_to_tc(struct txgbe_adapter *adapter, u8 index); + +/* needed by txgbe_main.c */ +int txgbe_validate_mac_addr(u8 *mc_addr); +void txgbe_check_options(struct txgbe_adapter *adapter); +void txgbe_assign_netdev_ops(struct net_device *netdev); + +/* needed by txgbe_ethtool.c */ +extern char txgbe_driver_name[]; +extern const char txgbe_driver_version[]; + +void txgbe_service_event_schedule(struct txgbe_adapter *adapter); +void txgbe_irq_disable(struct txgbe_adapter *adapter); +void txgbe_irq_enable(struct txgbe_adapter *adapter, bool queues, bool flush); +int txgbe_open(struct net_device *netdev); +int txgbe_close(struct net_device *netdev); +void txgbe_up(struct txgbe_adapter *adapter); +void txgbe_down(struct txgbe_adapter *adapter); +void txgbe_reinit_locked(struct txgbe_adapter *adapter); +void txgbe_reset(struct txgbe_adapter *adapter); +void txgbe_set_ethtool_ops(struct net_device *netdev); +int txgbe_setup_rx_resources(struct txgbe_ring *ring); +int txgbe_setup_tx_resources(struct txgbe_ring *ring); +void txgbe_free_rx_resources(struct txgbe_ring *ring); +void txgbe_free_tx_resources(struct txgbe_ring *ring); +void txgbe_configure_rx_ring(struct txgbe_adapter *adapter, + struct txgbe_ring *ring); +void txgbe_configure_tx_ring(struct txgbe_adapter *adapter, + struct txgbe_ring *ring); +void txgbe_update_stats(struct txgbe_adapter *adapter); +int txgbe_init_interrupt_scheme(struct txgbe_adapter *adapter); +void txgbe_reset_interrupt_capability(struct txgbe_adapter *adapter); +void txgbe_set_interrupt_capability(struct txgbe_adapter *adapter); +void txgbe_clear_interrupt_scheme(struct txgbe_adapter *adapter); +bool txgbe_is_txgbe(struct pci_dev *pcidev); +netdev_tx_t txgbe_xmit_frame_ring(struct sk_buff *skb, + struct txgbe_adapter *adapter, + struct txgbe_ring *ring); +void txgbe_unmap_and_free_tx_resource(struct txgbe_ring *ring, + struct txgbe_tx_buffer *tx_buffer); +bool txgbe_alloc_rx_buffers(struct txgbe_ring *rx_ring, u16 cleaned_count); +void txgbe_configure_rscctl(struct txgbe_adapter *adapter, + struct txgbe_ring *ring); +void txgbe_clear_rscctl(struct txgbe_adapter *adapter, + struct txgbe_ring *ring); +void txgbe_clear_vxlan_port(struct txgbe_adapter *adapter); +void txgbe_set_rx_mode(struct net_device *netdev); +int txgbe_write_mc_addr_list(struct net_device *netdev); +int txgbe_setup_tc(struct net_device *dev, u8 tc); +void txgbe_tx_ctxtdesc(struct txgbe_ring *tx_ring, u32 vlan_macip_lens, + u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx); +void txgbe_do_reset(struct net_device *netdev); +void txgbe_write_eitr(struct txgbe_q_vector *q_vector); +int txgbe_poll(struct napi_struct *napi, int budget); +void txgbe_disable_rx_queue(struct txgbe_adapter *adapter, + struct txgbe_ring *ring); +void txgbe_vlan_strip_enable(struct txgbe_adapter *adapter); +void txgbe_vlan_strip_disable(struct txgbe_adapter *adapter); +void txgbe_print_tx_hang_status(struct txgbe_adapter *adapter); + +#if IS_ENABLED(CONFIG_FCOE) +void txgbe_configure_fcoe(struct txgbe_adapter *adapter); +int txgbe_fso(struct txgbe_ring *tx_ring, + struct txgbe_tx_buffer *first, + u8 *hdr_len); +int txgbe_fcoe_ddp(struct txgbe_adapter *adapter, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb); +int txgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc); + +int txgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc); + +int txgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); +int txgbe_setup_fcoe_ddp_resources(struct txgbe_adapter *adapter); +void txgbe_free_fcoe_ddp_resources(struct txgbe_adapter *adapter); +int txgbe_fcoe_enable(struct net_device *netdev); +int txgbe_fcoe_disable(struct net_device *netdev); +#endif /* CONFIG_FCOE */ +#if IS_ENABLED(CONFIG_DCB) +u8 txgbe_fcoe_getapp(struct net_device *netdev); +u8 txgbe_fcoe_get_tc(struct txgbe_adapter *adapter); +int txgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); +int txgbe_fcoe_ddp_enable(struct txgbe_adapter *adapter); +void txgbe_fcoe_ddp_disable(struct txgbe_adapter *adapter); +#endif /* CONFIG_FCOE */ + +void txgbe_dbg_adapter_init(struct txgbe_adapter *adapter); +void txgbe_dbg_adapter_exit(struct txgbe_adapter *adapter); +void txgbe_dbg_init(void); +void txgbe_dbg_exit(void); +void txgbe_dump(struct txgbe_adapter *adapter); +void txgbe_setup_reta(struct txgbe_adapter *adapter); + +static inline struct netdev_queue *txring_txq(const struct txgbe_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + +#if IS_ENABLED(CONFIG_DCB) +s32 txgbe_dcb_hw_ets(struct txgbe_hw *hw, struct ieee_ets *ets, int max_frame); +#endif /* CONFIG_DCB */ + +int txgbe_wol_supported(struct txgbe_adapter *adapter); +int txgbe_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd); +int txgbe_write_uc_addr_list(struct net_device *netdev, int pool); +void txgbe_full_sync_mac_table(struct txgbe_adapter *adapter); +int txgbe_add_mac_filter(struct txgbe_adapter *adapter, + const u8 *addr, u16 pool); +int txgbe_del_mac_filter(struct txgbe_adapter *adapter, + const u8 *addr, u16 pool); +int txgbe_available_rars(struct txgbe_adapter *adapter); +void txgbe_vlan_mode(struct net_device *netdev, u32 features); +void txgbe_ptp_init(struct txgbe_adapter *adapter); +void txgbe_ptp_stop(struct txgbe_adapter *adapter); +void txgbe_ptp_suspend(struct txgbe_adapter *adapter); +void txgbe_ptp_overflow_check(struct txgbe_adapter *adapter); +void txgbe_ptp_rx_hang(struct txgbe_adapter *adapter); +void txgbe_ptp_rx_hwtstamp(struct txgbe_adapter *adapter, struct sk_buff *skb); +int txgbe_ptp_set_ts_config(struct txgbe_adapter *adapter, struct ifreq *ifr); +int txgbe_ptp_get_ts_config(struct txgbe_adapter *adapter, struct ifreq *ifr); +void txgbe_ptp_start_cyclecounter(struct txgbe_adapter *adapter); +void txgbe_ptp_reset(struct txgbe_adapter *adapter); +void txgbe_ptp_check_pps_event(struct txgbe_adapter *adapter); + +#ifdef CONFIG_PCI_IOV +void txgbe_sriov_reinit(struct txgbe_adapter *adapter); +#endif + +void txgbe_set_rx_drop_en(struct txgbe_adapter *adapter); + +u32 txgbe_rss_indir_tbl_entries(struct txgbe_adapter *adapter); +void txgbe_store_reta(struct txgbe_adapter *adapter); +void txgbe_store_vfreta(struct txgbe_adapter *adapter); + +int txgbe_setup_isb_resources(struct txgbe_adapter *adapter); +void txgbe_free_isb_resources(struct txgbe_adapter *adapter); +void txgbe_configure_isb(struct txgbe_adapter *adapter); + +void txgbe_clean_tx_ring(struct txgbe_ring *tx_ring); +void txgbe_clean_rx_ring(struct txgbe_ring *rx_ring); +u32 txgbe_tx_cmd_type(u32 tx_flags); +void txgbe_free_headwb_resources(struct txgbe_ring *ring); +u16 txgbe_frac_to_bi(u16 frac, u16 denom, int max_bits); +int txgbe_link_mbps(struct txgbe_adapter *adapter); + +int txgbe_find_nth_limited_vlan(struct txgbe_adapter *adapter, int vlan); +void txgbe_del_vlan_limit(struct txgbe_adapter *adapter, int vlan); +void txgbe_set_vlan_limit(struct txgbe_adapter *adapter, int vlan, int rate_limit); +void txgbe_check_vlan_rate_limit(struct txgbe_adapter *adapter); + +/** + * interrupt masking operations. each bit in PX_ICn correspond to a interrupt. + * disable a interrupt by writing to PX_IMS with the corresponding bit=1 + * enable a interrupt by writing to PX_IMC with the corresponding bit=1 + * trigger a interrupt by writing to PX_ICS with the corresponding bit=1 + **/ +#define TXGBE_INTR_ALL (~0ULL) +#define TXGBE_INTR_MISC(A) (1ULL << (A)->num_q_vectors) +#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1) +#define TXGBE_INTR_Q(i) (1ULL << (i)) +static inline void txgbe_intr_enable(struct txgbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) + wr32(hw, TXGBE_PX_IMC(0), mask); + mask = (qmask >> 32); + if (mask) + wr32(hw, TXGBE_PX_IMC(1), mask); + + /* skip the flush */ +} + +static inline void txgbe_intr_disable(struct txgbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) + wr32(hw, TXGBE_PX_IMS(0), mask); + mask = (qmask >> 32); + if (mask) + wr32(hw, TXGBE_PX_IMS(1), mask); + + /* skip the flush */ +} + +static inline void txgbe_intr_trigger(struct txgbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) + wr32(hw, TXGBE_PX_ICS(0), mask); + mask = (qmask >> 32); + if (mask) + wr32(hw, TXGBE_PX_ICS(1), mask); + + /* skip the flush */ +} + +#define TXGBE_RING_SIZE(R) ((R)->count < TXGBE_MAX_TXD ? (R)->count / 128 : 0) + +#define TXGBE_CPU_TO_BE16(_x) cpu_to_be16(_x) +#define TXGBE_BE16_TO_CPU(_x) be16_to_cpu(_x) +#define TXGBE_CPU_TO_BE32(_x) cpu_to_be32(_x) +#define TXGBE_BE32_TO_CPU(_x) be32_to_cpu(_x) + +#define msec_delay(_x) msleep(_x) +#define usec_delay(_x) udelay(_x) + +#define TXGBE_NAME "txgbe" + +struct txgbe_hw; +struct txgbe_msg { + u16 msg_enable; +}; + +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} + +struct net_device *txgbe_hw_to_netdev(const struct txgbe_hw *hw); +struct txgbe_msg *txgbe_hw_to_msg(const struct txgbe_hw *hw); + +#define hw_dbg(hw, format, arg...) \ + netdev_dbg(txgbe_hw_to_netdev(hw), format, ## arg) +#define hw_err(hw, format, arg...) \ + netdev_err(txgbe_hw_to_netdev(hw), format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_notice(format, arg...) \ + dev_notice(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dbg(msglvl, format, arg...) \ + netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_crit(msglvl, format, arg...) \ + netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) + +#define TXGBE_FAILED_READ_CFG_DWORD 0xffffffffU +#define TXGBE_FAILED_READ_CFG_WORD 0xffffU +#define TXGBE_FAILED_READ_CFG_BYTE 0xffU + +u32 txgbe_read_reg(struct txgbe_hw *hw, u32 reg, bool quiet); +u16 txgbe_read_pci_cfg_word(struct txgbe_hw *hw, u32 reg); +void txgbe_write_pci_cfg_word(struct txgbe_hw *hw, u32 reg, u16 value); + +#define TXGBE_READ_PCIE_WORD txgbe_read_pci_cfg_word +#define TXGBE_WRITE_PCIE_WORD txgbe_write_pci_cfg_word +#define TXGBE_R32_Q(h, r) txgbe_read_reg(h, r, true) + +#define TXGBE_EEPROM_GRANT_ATTEMPS 100 +#define TXGBE_HTONL(_i) htonl(_i) +#define TXGBE_NTOHL(_i) ntohl(_i) +#define TXGBE_NTOHS(_i) ntohs(_i) +#define TXGBE_CPU_TO_LE32(_i) cpu_to_le32(_i) +#define TXGBE_LE32_TO_CPUS(_i) le32_to_cpus(_i) + +enum { + TXGBE_ERROR_SOFTWARE, + TXGBE_ERROR_POLLING, + TXGBE_ERROR_INVALID_STATE, + TXGBE_ERROR_UNSUPPORTED, + TXGBE_ERROR_ARGUMENT, + TXGBE_ERROR_CAUTION, +}; + +#define ERROR_REPORT(level, format, arg...) do { \ + switch (level) { \ + case TXGBE_ERROR_SOFTWARE: \ + case TXGBE_ERROR_CAUTION: \ + case TXGBE_ERROR_POLLING: \ + netif_warn(txgbe_hw_to_msg(hw), drv, txgbe_hw_to_netdev(hw), \ + format, ## arg); \ + break; \ + case TXGBE_ERROR_INVALID_STATE: \ + case TXGBE_ERROR_UNSUPPORTED: \ + case TXGBE_ERROR_ARGUMENT: \ + netif_err(txgbe_hw_to_msg(hw), hw, txgbe_hw_to_netdev(hw), \ + format, ## arg); \ + break; \ + default: \ + break; \ + } \ +} while (0) + +#define ERROR_REPORT1 ERROR_REPORT +#define ERROR_REPORT2 ERROR_REPORT +#define ERROR_REPORT3 ERROR_REPORT + +#endif /* _TXGBE_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c new file mode 100644 index 000000000000..76dd9223e90e --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c @@ -0,0 +1,457 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "txgbe_type.h" +#include "txgbe_hw.h" +#include "txgbe_aml.h" +#include "txgbe_e56.h" +#include "txgbe_e56_bp.h" +#include "txgbe_phy.h" + +#include "txgbe.h" + +/** + * txgbe_get_media_type_aml - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +static enum txgbe_media_type txgbe_get_media_type_aml(struct txgbe_hw *hw) +{ + u8 device_type = hw->subsystem_device_id & 0xF0; + enum txgbe_media_type media_type; + + switch (device_type) { + case TXGBE_ID_KR_KX_KX4: + media_type = txgbe_media_type_backplane; + break; + case TXGBE_ID_SFP: + media_type = txgbe_media_type_fiber; + break; + default: + media_type = txgbe_media_type_unknown; + break; + } + + return media_type; +} + +/** + * txgbe_setup_mac_link_aml - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +static s32 txgbe_setup_mac_link_aml(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + u32 link_capabilities = TXGBE_LINK_SPEED_UNKNOWN; + u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; + struct txgbe_adapter *adapter = hw->back; + bool link_up = false; + bool autoneg = false; + s32 ret_status = 0; + int i = 0; + s32 status = 0; + u32 value = 0; + + /* Check to see if speed passed in is supported. */ + status = hw->mac.ops.get_link_capabilities(hw, + &link_capabilities, &autoneg); + if (status) + goto out; + speed &= link_capabilities; + + if (speed == TXGBE_LINK_SPEED_UNKNOWN) { + status = TXGBE_ERR_LINK_SETUP; + goto out; + } + + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1 || + txgbe_is_backplane(hw)) { + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + if (!adapter->backplane_an) { + if (link_speed == speed && link_up) + goto out; + } else { + if (link_up && adapter->an_done) + goto out; + } + mutex_lock(&adapter->e56_lock); + txgbe_e56_set_phylinkmode(adapter, 25, hw->bypass_ctle); + mutex_unlock(&adapter->e56_lock); + return 0; + } + + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & (TXGBE_SFP1_MOD_ABS_LS | TXGBE_SFP1_RX_LOS_LS)) + goto out; + + for (i = 0; i < 4; i++) { + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + if (link_up) + break; + msleep(250); + } + + if (speed == TXGBE_LINK_SPEED_25GB_FULL) + adapter->cur_fec_link = txgbe_get_cur_fec_mode(hw); + + if (link_speed == speed && link_up && + !(speed == TXGBE_LINK_SPEED_25GB_FULL && + !(adapter->fec_link_mode & adapter->cur_fec_link))) + goto out; + + if (speed == TXGBE_LINK_SPEED_25GB_FULL && + link_speed == TXGBE_LINK_SPEED_25GB_FULL) { + txgbe_e56_fec_mode_polling(hw, &link_up); + + if (link_up) + goto out; + } + + mutex_lock(&adapter->e56_lock); + ret_status = txgbe_set_link_to_amlite(hw, speed); + mutex_unlock(&adapter->e56_lock); + + if (ret_status == TXGBE_ERR_PHY_INIT_NOT_DONE) + goto out; + + if (ret_status == TXGBE_ERR_TIMEOUT) { + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + goto out; + } + + if (speed == TXGBE_LINK_SPEED_25GB_FULL) { + txgbe_e56_fec_mode_polling(hw, &link_up); + } else { + for (i = 0; i < 4; i++) { + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + if (link_up) + goto out; + msleep(250); + } + } + +out: + return status; +} + +/** + * txgbe_get_link_capabilities_aml - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +static s32 txgbe_get_link_capabilities_aml(struct txgbe_hw *hw, + u32 *speed, + bool *autoneg) +{ + struct txgbe_adapter *adapter = hw->back; + s32 status = 0; + + if (hw->phy.multispeed_fiber) { + *speed = TXGBE_LINK_SPEED_10GB_FULL | + TXGBE_LINK_SPEED_25GB_FULL; + *autoneg = true; + } else if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) { + if (hw->phy.fiber_suppport_speed == + TXGBE_LINK_SPEED_10GB_FULL) { + adapter->backplane_an = false; + *autoneg = false; + } else { + *autoneg = true; + } + *speed = hw->phy.fiber_suppport_speed; + } else if (hw->phy.sfp_type == txgbe_sfp_type_25g_sr_core0 || + hw->phy.sfp_type == txgbe_sfp_type_25g_sr_core1 || + hw->phy.sfp_type == txgbe_sfp_type_25g_lr_core0 || + hw->phy.sfp_type == txgbe_sfp_type_25g_lr_core1) { + *speed = TXGBE_LINK_SPEED_25GB_FULL; + *autoneg = false; + } else if (hw->phy.sfp_type == txgbe_sfp_type_25g_aoc_core0 || + hw->phy.sfp_type == txgbe_sfp_type_25g_aoc_core1) { + *speed = TXGBE_LINK_SPEED_25GB_FULL; + *autoneg = false; + } else { + /* SFP */ + if (hw->phy.sfp_type == txgbe_sfp_type_not_present) + *speed = TXGBE_LINK_SPEED_25GB_FULL; + else + *speed = TXGBE_LINK_SPEED_10GB_FULL; + *autoneg = true; + } + + return status; +} + +/** + * txgbe_check_mac_link_aml - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +static s32 txgbe_check_mac_link_aml(struct txgbe_hw *hw, u32 *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + u32 links_reg = 0; + u32 i; + + if (link_up_wait_to_complete) { + for (i = 0; i < TXGBE_LINK_UP_TIME; i++) { + links_reg = rd32(hw, + TXGBE_CFG_PORT_ST); + if (!(links_reg & TXGBE_CFG_PORT_ST_LINK_UP)) { + *link_up = false; + } else { + *link_up = true; + break; + } + msleep(100); + } + } else { + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) + *link_up = true; + else + *link_up = false; + } + + if (hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core1) { + *link_up = hw->f2c_mod_status; + + if (*link_up) + wr32(hw, TXGBE_CFG_LED_CTL, 0); + else + hw->mac.ops.led_off(hw, TXGBE_LED_LINK_UP | TXGBE_AMLITE_LED_LINK_25G | + TXGBE_AMLITE_LED_LINK_10G | TXGBE_AMLITE_LED_LINK_ACTIVE); + } + + if (*link_up) { + if ((links_reg & TXGBE_CFG_PORT_ST_AML_LINK_25G) == + TXGBE_CFG_PORT_ST_AML_LINK_25G) + *speed = TXGBE_LINK_SPEED_25GB_FULL; + else if ((links_reg & TXGBE_CFG_PORT_ST_AML_LINK_10G) == + TXGBE_CFG_PORT_ST_AML_LINK_10G) + *speed = TXGBE_LINK_SPEED_10GB_FULL; + } else { + *speed = TXGBE_LINK_SPEED_UNKNOWN; + } + + return 0; +} + +/** + * txgbe_setup_mac_link_multispeed_fiber_aml - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the MAC and/or PHY register and restarts link. + **/ +static s32 txgbe_setup_mac_link_multispeed_fiber_aml(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + u32 highest_link_speed = TXGBE_LINK_SPEED_UNKNOWN; + u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; + struct txgbe_adapter *adapter = hw->back; + bool autoneg, link_up = false; + u32 speedcnt = 0; + s32 status = 0; + + /* Mask off requested but non-supported speeds */ + status = hw->mac.ops.get_link_capabilities(hw, + &link_speed, &autoneg); + if (status != 0) + return status; + + speed &= link_speed; + + /* Try each speed one by one, highest priority first. We do this in + * software because 10Gb fiber doesn't support speed autonegotiation. + */ + if (speed & TXGBE_LINK_SPEED_25GB_FULL) { + speedcnt++; + highest_link_speed = TXGBE_LINK_SPEED_25GB_FULL; + + /* If we already have link at this speed, just jump out */ + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + + adapter->cur_fec_link = txgbe_get_cur_fec_mode(hw); + + if (link_speed == TXGBE_LINK_SPEED_25GB_FULL && link_up && + adapter->fec_link_mode & adapter->cur_fec_link) + goto out; + + /* Allow module to change analog characteristics (10G->25G) */ + msec_delay(40); + + status = hw->mac.ops.setup_mac_link(hw, + TXGBE_LINK_SPEED_25GB_FULL, + autoneg_wait_to_complete); + if (status != 0) + return status; + + /*aml wait link in setup,no need to repeatly wait*/ + /* If we have link, just jump out */ + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + + if (link_up) + goto out; + } + + if (speed & TXGBE_LINK_SPEED_10GB_FULL) { + speedcnt++; + if (highest_link_speed == TXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = TXGBE_LINK_SPEED_10GB_FULL; + + /* If we already have link at this speed, just jump out */ + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + + if (link_speed == TXGBE_LINK_SPEED_10GB_FULL && link_up) + goto out; + + /* Allow module to change analog characteristics (25G->10G) */ + msec_delay(40); + + status = hw->mac.ops.setup_mac_link(hw, + TXGBE_LINK_SPEED_10GB_FULL, + autoneg_wait_to_complete); + if (status != 0) + return status; + + /*aml wait link in setup,no need to repeatly wait*/ + /* If we have link, just jump out */ + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + + if (link_up) { + adapter->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; + goto out; + } + } + + /* We didn't get link. Configure back to the highest speed we tried, + * (if there was more than one). We call ourselves back with just the + * single highest speed that the user requested. + */ + if (speedcnt > 1) + status = txgbe_setup_mac_link_multispeed_fiber_aml(hw, + highest_link_speed, + autoneg_wait_to_complete); + +out: + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & TXGBE_LINK_SPEED_25GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_25GB_FULL; + + if (speed & TXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_10GB_FULL; + + return status; +} + +static void txgbe_init_mac_link_ops_aml(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + + if (mac->ops.get_media_type(hw) == txgbe_media_type_fiber) { + mac->ops.disable_tx_laser = + txgbe_disable_tx_laser_multispeed_fiber; + mac->ops.enable_tx_laser = + txgbe_enable_tx_laser_multispeed_fiber; + mac->ops.flap_tx_laser = txgbe_flap_tx_laser_multispeed_fiber; + + if (hw->phy.multispeed_fiber) { + /* Set up dual speed SFP+ support */ + mac->ops.setup_link = txgbe_setup_mac_link_multispeed_fiber_aml; + mac->ops.setup_mac_link = txgbe_setup_mac_link_aml; + mac->ops.set_rate_select_speed = + txgbe_set_hard_rate_select_speed; + } else { + mac->ops.setup_link = txgbe_setup_mac_link_aml; + mac->ops.set_rate_select_speed = + txgbe_set_hard_rate_select_speed; + } + } +} + +static s32 txgbe_setup_sfp_modules_aml(struct txgbe_hw *hw) +{ + s32 ret_val = 0; + + if (hw->phy.sfp_type != txgbe_sfp_type_unknown) { + txgbe_init_mac_link_ops_aml(hw); + + hw->phy.ops.reset = NULL; + } + + return ret_val; +} + +/** + * txgbe_init_phy_ops - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +static s32 txgbe_init_phy_ops_aml(struct txgbe_hw *hw) +{ + s32 ret_val = 0; + + txgbe_init_i2c(hw); + wr32(hw, TXGBE_MAC_MDIO_CLAUSE_22_PORT, + TXGBE_MAC_MDIO_CLAUSE_ALL_PRTCL22); + + /* Identify the PHY or SFP module */ + ret_val = hw->phy.ops.identify(hw); + if (ret_val == TXGBE_ERR_SFP_NOT_SUPPORTED) + goto init_phy_ops_out; + + /* Setup function pointers based on detected SFP module and speeds */ + txgbe_init_mac_link_ops_aml(hw); + if (hw->phy.sfp_type != txgbe_sfp_type_unknown) + hw->phy.ops.reset = NULL; + +init_phy_ops_out: + return ret_val; +} + +s32 txgbe_init_ops_aml(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + struct txgbe_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + ret_val = txgbe_init_ops_generic(hw); + + /* PHY */ + phy->ops.init = txgbe_init_phy_ops_aml; + + /* MAC */ + mac->ops.get_media_type = txgbe_get_media_type_aml; + mac->ops.setup_sfp = txgbe_setup_sfp_modules_aml; + + /* LINK */ + mac->ops.get_link_capabilities = txgbe_get_link_capabilities_aml; + mac->ops.setup_link = txgbe_setup_mac_link_aml; + mac->ops.check_link = txgbe_check_mac_link_aml; + + return ret_val; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h new file mode 100644 index 000000000000..1178efcaea48 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_AML_H_ +#define _TXGBE_AML_H_ + +s32 txgbe_init_ops_aml(struct txgbe_hw *hw); + +#endif /* _TXGBE_AML_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml40.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml40.c new file mode 100644 index 000000000000..8726c658fe70 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml40.c @@ -0,0 +1,293 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "txgbe_type.h" +#include "txgbe_hw.h" +#include "txgbe_aml40.h" +#include "txgbe_e56.h" +#include "txgbe_e56_bp.h" +#include "txgbe_phy.h" + +#include "txgbe.h" + +/** + * txgbe_get_media_type_aml40 - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum txgbe_media_type txgbe_get_media_type_aml40(struct txgbe_hw *hw) +{ + u8 device_type = hw->subsystem_device_id & 0xF0; + enum txgbe_media_type media_type; + + switch (device_type) { + case TXGBE_ID_KR_KX_KX4: + media_type = txgbe_media_type_backplane; + break; + case TXGBE_ID_SFP: + media_type = txgbe_media_type_fiber_qsfp; + break; + default: + media_type = txgbe_media_type_unknown; + break; + } + + return media_type; +} + +/** + * txgbe_setup_mac_link_aml - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +static s32 txgbe_setup_mac_link_aml40(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + u32 link_capabilities = TXGBE_LINK_SPEED_UNKNOWN; + u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; + struct txgbe_adapter *adapter = hw->back; + bool link_up = false; + bool autoneg = false; + s32 ret_status = 0; + int i = 0; + s32 status = 0; + + /* Check to see if speed passed in is supported. */ + status = hw->mac.ops.get_link_capabilities(hw, + &link_capabilities, &autoneg); + if (status) + goto out; + + speed &= link_capabilities; + + if (speed == TXGBE_LINK_SPEED_UNKNOWN) { + status = TXGBE_ERR_LINK_SETUP; + goto out; + } + + if (hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1 || + txgbe_is_backplane(hw)) { + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + if (!adapter->backplane_an) { + if (link_speed == speed && link_up) + goto out; + } else { + if (link_up && adapter->an_done) + goto out; + } + mutex_lock(&adapter->e56_lock); + txgbe_e56_set_phylinkmode(adapter, 40, hw->bypass_ctle); + mutex_unlock(&adapter->e56_lock); + goto out; + } + + for (i = 0; i < 4; i++) { + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + if (link_up) + break; + msleep(250); + } + + if (link_speed == speed && link_up) + goto out; + + mutex_lock(&adapter->e56_lock); + ret_status = txgbe_set_link_to_amlite(hw, speed); + mutex_unlock(&adapter->e56_lock); + + if (ret_status == TXGBE_ERR_TIMEOUT) + adapter->link_valid = false; + + for (i = 0; i < 4; i++) { + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + if (link_up) + goto out; + msleep(250); + } + + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + +out: + return status; +} + +/** + * txgbe_get_link_capabilities_aml40 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +static s32 txgbe_get_link_capabilities_aml40(struct txgbe_hw *hw, + u32 *speed, + bool *autoneg) +{ + s32 status = 0; + + if (hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1) { + *autoneg = true; + *speed = TXGBE_LINK_SPEED_40GB_FULL; + } else if (txgbe_is_backplane(hw)) { + *speed = TXGBE_LINK_SPEED_40GB_FULL; + *autoneg = true; + } else { + *speed = TXGBE_LINK_SPEED_40GB_FULL; + *autoneg = true; + } + + return status; +} + +/** + * txgbe_check_mac_link_aml40 - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +static s32 txgbe_check_mac_link_aml40(struct txgbe_hw *hw, u32 *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + struct txgbe_adapter *adapter = hw->back; + u32 links_reg = 0; + u32 i; + + if (link_up_wait_to_complete) { + for (i = 0; i < TXGBE_LINK_UP_TIME; i++) { + links_reg = rd32(hw, + TXGBE_CFG_PORT_ST); + + if (!adapter->link_valid) { + *link_up = false; + + msleep(100); + continue; + } + + if (!(links_reg & TXGBE_CFG_PORT_ST_LINK_UP)) { + *link_up = false; + } else { + *link_up = true; + break; + } + msleep(100); + } + } else { + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) + *link_up = true; + else + *link_up = false; + } + + if (!adapter->link_valid) + *link_up = false; + + if (*link_up) { + if ((links_reg & TXGBE_CFG_PORT_ST_AML_LINK_40G) == + TXGBE_CFG_PORT_ST_AML_LINK_40G) + *speed = TXGBE_LINK_SPEED_40GB_FULL; + } else { + *speed = TXGBE_LINK_SPEED_UNKNOWN; + } + + if (txgbe_is_backplane(hw)) { + if (!adapter->an_done) { + *link_up = false; + *speed = TXGBE_LINK_SPEED_UNKNOWN; + } + } + + return 0; +} + +static void txgbe_init_mac_link_ops_aml40(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + + mac->ops.disable_tx_laser = + txgbe_disable_tx_laser_multispeed_fiber; + mac->ops.enable_tx_laser = + txgbe_enable_tx_laser_multispeed_fiber; + mac->ops.flap_tx_laser = txgbe_flap_tx_laser_multispeed_fiber; + + mac->ops.setup_link = txgbe_setup_mac_link_aml40; + mac->ops.set_rate_select_speed = txgbe_set_hard_rate_select_speed; +} + +static s32 txgbe_setup_sfp_modules_aml40(struct txgbe_hw *hw) +{ + s32 ret_val = 0; + + if (hw->phy.sfp_type != txgbe_sfp_type_unknown) { + txgbe_init_mac_link_ops_aml40(hw); + + hw->phy.ops.reset = NULL; + } + + return ret_val; +} + +/** + * txgbe_init_phy_ops - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +s32 txgbe_init_phy_ops_aml40(struct txgbe_hw *hw) +{ + s32 ret_val = 0; + + txgbe_init_i2c(hw); + wr32(hw, TXGBE_MAC_MDIO_CLAUSE_22_PORT, + TXGBE_MAC_MDIO_CLAUSE_ALL_PRTCL22); + + /* Identify the PHY or SFP module */ + ret_val = hw->phy.ops.identify(hw); + if (ret_val == TXGBE_ERR_SFP_NOT_SUPPORTED) + goto init_phy_ops_out; + + /* Setup function pointers based on detected SFP module and speeds */ + txgbe_init_mac_link_ops_aml40(hw); + if (hw->phy.sfp_type != txgbe_sfp_type_unknown) + hw->phy.ops.reset = NULL; + +init_phy_ops_out: + return ret_val; +} + +s32 txgbe_init_ops_aml40(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + struct txgbe_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + ret_val = txgbe_init_ops_generic(hw); + + /* PHY */ + phy->ops.init = txgbe_init_phy_ops_aml40; + + /* MAC */ + mac->ops.get_media_type = txgbe_get_media_type_aml40; + mac->ops.setup_sfp = txgbe_setup_sfp_modules_aml40; + + /* LINK */ + mac->ops.check_link = txgbe_check_mac_link_aml40; + mac->ops.setup_link = txgbe_setup_mac_link_aml40; + mac->ops.get_link_capabilities = txgbe_get_link_capabilities_aml40; + + return ret_val; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml40.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml40.h new file mode 100644 index 000000000000..aea300e69b03 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml40.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_AML40_H_ +#define _TXGBE_AML40_H_ + +enum txgbe_media_type txgbe_get_media_type_aml40(struct txgbe_hw *hw); +s32 txgbe_init_ops_aml40(struct txgbe_hw *hw); +s32 txgbe_init_phy_ops_aml40(struct txgbe_hw *hw); +#endif /* _TXGBE_AML40_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_bp.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_bp.c new file mode 100644 index 000000000000..ea3506698c7c --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_bp.c @@ -0,0 +1,476 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "txgbe_bp.h" + +void txgbe_bp_close_protect(struct txgbe_adapter *adapter) +{ + adapter->flags2 |= TXGBE_FLAG2_KR_PRO_DOWN; + while (adapter->flags2 & TXGBE_FLAG2_KR_PRO_REINIT) { + msleep(100); + kr_dbg(KR_MODE, "wait to reinited ok..%x\n", adapter->flags2); + } +} + +int txgbe_bp_mode_setting(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + /*default to open an73*/ + if ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_KR_KX_KX4) + adapter->backplane_an = 1; + + switch (hw->mac.type) { + case txgbe_mac_sp: + adapter->backplane_an = 0; + break; + case txgbe_mac_aml40: + case txgbe_mac_aml: + default: + adapter->backplane_an = 1; + break; + } + + adapter->autoneg = 1; + switch (adapter->backplane_mode) { + case TXGBE_BP_M_KR: + hw->subsystem_device_id = TXGBE_ID_WX1820_KR_KX_KX4; + break; + case TXGBE_BP_M_KX4: + hw->subsystem_device_id = TXGBE_ID_WX1820_MAC_XAUI; + break; + case TXGBE_BP_M_KX: + hw->subsystem_device_id = TXGBE_ID_WX1820_MAC_SGMII; + break; + case TXGBE_BP_M_SFI: + hw->subsystem_device_id = TXGBE_ID_WX1820_SFP; + break; + default: + break; + } + + if (adapter->backplane_auto == TXGBE_BP_M_AUTO) { + adapter->backplane_an = 1; + adapter->autoneg = 1; + } else if (adapter->backplane_auto == TXGBE_BP_M_NAUTO) { + adapter->backplane_an = 0; + adapter->autoneg = 0; + } + + return 0; +} + +void txgbe_bp_watchdog_event(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 value = 0; + int ret = 0; + + /* only continue if link is down */ + if (netif_carrier_ok(adapter->netdev)) + return; + + if (adapter->flags2 & TXGBE_FLAG2_KR_TRAINING) { + value = txgbe_rd32_epcs(hw, 0x78002); + if ((value & BIT(2)) == BIT(2)) { + e_info(hw, "Enter training\n"); + ret = handle_bkp_an73_flow(0, adapter); + if (ret) + txgbe_set_link_to_kr(hw, 1); + } + adapter->flags2 &= ~TXGBE_FLAG2_KR_TRAINING; + } +} + +void txgbe_bp_down_event(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 val = 0, val1 = 0; + + if (adapter->backplane_an == 0) + return; + + val = txgbe_rd32_epcs(hw, 0x78002); + val1 = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_CTL); + kr_dbg(KR_MODE, "AN INT : %x - AN CTL : %x - PL : %x\n", + val, val1, txgbe_rd32_epcs(hw, 0x70012)); + + msleep(100); + if ((val & BIT(2)) == BIT(2)) { + if (!(adapter->flags2 & TXGBE_FLAG2_KR_TRAINING)) + adapter->flags2 |= TXGBE_FLAG2_KR_TRAINING; + } else { + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0); + txgbe_wr32_epcs(hw, 0x78002, 0x0000); + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3000); + } +} + +int chk_bkp_an73_ability(struct bkpan73ability tbkp_an73_ability, + struct bkpan73ability tlpbkp_an73_ability, + struct txgbe_adapter *adapter) +{ + unsigned int com_link_ability; + + kr_dbg(KR_MODE, "CheckBkpAn73Ability():\n"); + kr_dbg(KR_MODE, "------------------------\n"); + + /*-- Check the common link ability and take action based on the result*/ + com_link_ability = tbkp_an73_ability.link_ability & tlpbkp_an73_ability.link_ability; + kr_dbg(KR_MODE, "com_link_ability= 0x%x, link_ability= 0x%x, lpLinkAbility= 0x%x\n", + com_link_ability, tbkp_an73_ability.link_ability, tlpbkp_an73_ability.link_ability); + + /*only support kr*/ + if (com_link_ability == 0) { + kr_dbg(KR_MODE, "WARNING: The Link Partner does not support any compatible speed mode!!!\n\n"); + return -1; + } else if (com_link_ability & 0x80) { + if (tbkp_an73_ability.cu_linkmode == 0) { + kr_dbg(KR_MODE, "Link mode is matched with Link Partner: [LINK_KR].\n"); + goto out; + } else { + kr_dbg(KR_MODE, "Link mode is not matched with Link Partner: [LINK_KR].\n"); + kr_dbg(KR_MODE, "Set the local link mode to [LINK_KR] ...\n"); + return 1; + } + } + +out: + return 0; +} + +static void txgbe_bp_print_page_status(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rdata = 0; + + rdata = txgbe_rd32_epcs(hw, 0x70010); + kr_dbg(KR_MODE, "read 70010 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70011); + kr_dbg(KR_MODE, "read 70011 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70012); + kr_dbg(KR_MODE, "read 70012 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70013); + kr_dbg(KR_MODE, "read 70013 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70014); + kr_dbg(KR_MODE, "read 70014 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70015); + kr_dbg(KR_MODE, "read 70015 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70016); + kr_dbg(KR_MODE, "read 70016 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70017); + kr_dbg(KR_MODE, "read 70017 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70018); + kr_dbg(KR_MODE, "read 70018 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70019); + kr_dbg(KR_MODE, "read 70019 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70020); + kr_dbg(KR_MODE, "read 70020 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, 0x70021); + kr_dbg(KR_MODE, "read 70021 data %0x\n", rdata); +} + +static void txgbe_bp_exchange_page(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 an_int, base_page = 0; + int count = 0; + + an_int = txgbe_rd32_epcs(hw, 0x78002); + if (!(an_int & BIT(2))) + return; + /* 500ms timeout */ + for (count = 0; count < 5000; count++) { + kr_dbg(KR_MODE, "-----count----- %d\n", count); + if (an_int & BIT(2)) { + u8 next_page = 0; + u32 rdata, addr; + + txgbe_bp_print_page_status(adapter); + addr = base_page == 0 ? 0x70013 : 0x70019; + rdata = txgbe_rd32_epcs(hw, addr); + if (rdata & BIT(14)) { + if (rdata & BIT(15)) { + /* always set null message */ + txgbe_wr32_epcs(hw, 0x70016, 0x2001); + kr_dbg(KR_MODE, "write 70016 0x%0x\n", + 0x2001); + rdata = txgbe_rd32_epcs(hw, 0x70010); + txgbe_wr32_epcs(hw, 0x70010, + rdata | BIT(15)); + kr_dbg(KR_MODE, "write 70010 0x%0x\n", + rdata); + next_page = 1; + } else { + next_page = 0; + } + base_page = 1; + } + /* clear an pacv int */ + txgbe_wr32_epcs(hw, 0x78002, 0x0000); + kr_dbg(KR_MODE, "write 78002 0x%0x\n", 0x0000); + usec_delay(100); + if (next_page == 0) + return; + } + usec_delay(100); + } +} + +int get_bkp_an73_ability(struct bkpan73ability *pt_bkp_an73_ability, unsigned char by_link_partner, + struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + unsigned int rdata; + int status = 0; + + kr_dbg(KR_MODE, "by_link_partner = %d\n", by_link_partner); + kr_dbg(KR_MODE, "----------------------------------------\n"); + + /* Link Partner Base Page */ + if (by_link_partner == 1) { + /*Read the link partner AN73 Base Page Ability Registers*/ + kr_dbg(KR_MODE, "Read the link partner AN73 Base Page Ability Registers...\n"); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_LP_ABL1); + kr_dbg(KR_MODE, "SR AN MMD LP Base Page Ability Register 1: 0x%x\n", rdata); + pt_bkp_an73_ability->next_page = (rdata >> 15) & 0x01; + kr_dbg(KR_MODE, " Next Page (bit15): %d\n", pt_bkp_an73_ability->next_page); + + /* if have next pages, exchange next pages. */ + if (pt_bkp_an73_ability->next_page) + txgbe_bp_exchange_page(adapter); + + rdata = txgbe_rd32_epcs(hw, 0x70014); + kr_dbg(KR_MODE, "SR AN MMD LP Base Page Ability Register 2: 0x%x\n", rdata); + pt_bkp_an73_ability->link_ability = rdata & 0xE0; + kr_dbg(KR_MODE, " Link Ability (bit[15:0]): 0x%x\n", + pt_bkp_an73_ability->link_ability); + kr_dbg(KR_MODE, " (0x20- KX_ONLY, 0x40- KX4_ONLY, 0x60- KX4_KX\n"); + kr_dbg(KR_MODE, " 0x80- KR_ONLY, 0xA0- KR_KX, 0xC0- KR_KX4, 0xE0- KR_KX4_KX)\n"); + + rdata = txgbe_rd32_epcs(hw, 0x70015); + kr_dbg(KR_MODE, "SR AN MMD LP Base Page Ability Register 3: 0x%x\n", rdata); + kr_dbg(KR_MODE, " FEC Request (bit15): %d\n", ((rdata >> 15) & 0x01)); + kr_dbg(KR_MODE, " FEC Enable (bit14): %d\n", ((rdata >> 14) & 0x01)); + pt_bkp_an73_ability->fec_ability = (rdata >> 14) & 0x03; + } else if (by_link_partner == 2) {/*Link Partner Next Page*/ + /*Read the link partner AN73 Next Page Ability Registers*/ + kr_dbg(KR_MODE, "\nRead the link partner AN73 Next Page Ability Registers...\n"); + rdata = txgbe_rd32_epcs(hw, 0x70019); + kr_dbg(KR_MODE, " SR AN MMD LP XNP Ability Register 1: 0x%x\n", rdata); + pt_bkp_an73_ability->next_page = (rdata >> 15) & 0x01; + if (KR_MODE) + e_dev_info(" Next Page (bit15): %d\n", pt_bkp_an73_ability->next_page); + } else { + /*Read the local AN73 Base Page Ability Registers*/ + kr_dbg(KR_MODE, "\nRead the local AN73 Base Page Ability Registers...\n"); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG1); + kr_dbg(KR_MODE, "SR AN MMD Advertisement Register 1: 0x%x\n", rdata); + pt_bkp_an73_ability->next_page = (rdata >> 15) & 0x01; + kr_dbg(KR_MODE, " Next Page (bit15): %d\n", pt_bkp_an73_ability->next_page); + + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG2); + kr_dbg(KR_MODE, "SR AN MMD Advertisement Register 2: 0x%x\n", rdata); + pt_bkp_an73_ability->link_ability = rdata & 0xE0; + kr_dbg(KR_MODE, " Link Ability (bit[15:0]): 0x%x\n", + pt_bkp_an73_ability->link_ability); + kr_dbg(KR_MODE, " (0x20- KX_ONLY, 0x40- KX4_ONLY, 0x60- KX4_KX\n"); + kr_dbg(KR_MODE, " 0x80- KR_ONLY, 0xA0- KR_KX, 0xC0- KR_KX4, 0xE0- KR_KX4_KX)\n"); + + rdata = txgbe_rd32_epcs(hw, 0x70012); + kr_dbg(KR_MODE, "SR AN MMD Advertisement Register 3: 0x%x\n", rdata); + kr_dbg(KR_MODE, " FEC Request (bit15): %d\n", ((rdata >> 15) & 0x01)); + kr_dbg(KR_MODE, " FEC Enable (bit14): %d\n", ((rdata >> 14) & 0x01)); + pt_bkp_an73_ability->fec_ability = (rdata >> 14) & 0x03; + } /*if (by_link_partner == 1) Link Partner Base Page*/ + + return status; +} + +static void read_phy_lane_txeq(unsigned short lane, struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + unsigned int addr, rdata; + + /*LANEN_DIG_ASIC_TX_ASIC_IN_1[11:6]: TX_MAIN_CURSOR*/ + addr = 0x100E | (lane << 8); + rdata = rd32_ephy(hw, addr); + kr_dbg(KR_MODE, "PHY LANE%0d TX EQ Read Value:\n", lane); + kr_dbg(KR_MODE, " TX_MAIN_CURSOR: %d\n", ((rdata >> 6) & 0x3F)); + + /*LANEN_DIG_ASIC_TX_ASIC_IN_2[5 :0]: TX_PRE_CURSOR*/ + /*LANEN_DIG_ASIC_TX_ASIC_IN_2[11:6]: TX_POST_CURSOR*/ + addr = 0x100F | (lane << 8); + rdata = rd32_ephy(hw, addr); + kr_dbg(KR_MODE, " TX_PRE_CURSOR : %d\n", (rdata & 0x3F)); + kr_dbg(KR_MODE, " TX_POST_CURSOR: %d\n", ((rdata >> 6) & 0x3F)); + kr_dbg(KR_MODE, "**********************************************\n"); +} + +static int en_cl72_krtr(unsigned int enable, struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + unsigned int wdata = 0; + u32 val; + + if (enable == 1) { + kr_dbg(KR_MODE, "\nDisable Clause 72 KR Training ...\n"); + read_phy_lane_txeq(0, adapter); + } else if (enable == 3) { + kr_dbg(KR_MODE, "\nEnable Clause 72 KR Training ...\n"); + + val = txgbe_rd32_epcs(hw, 0x18003); + wdata |= val; + txgbe_wr32_epcs(hw, 0x18003, wdata); + read_phy_lane_txeq(0, adapter); + } + + /* Enable the Clause 72 start-up protocol by setting Bit 1 of SR_PMA_KR_PMD_CTRL Register. + * Restart the Clause 72 start-up protocol by setting Bit 0 of SR_PMA_KR_PMD_CTRL Register + */ + wdata = enable; + txgbe_wr32_epcs(hw, 0x10096, wdata); + return 0; +} + +static int chk_cl72_krtr_status(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + unsigned int rdata = 0, rdata1; + int status = 0; + + status = read_poll_timeout(txgbe_rd32_epcs, rdata1, (rdata1 & 0x9), 1000, + 400000, false, hw, 0x10097); + if (!status) { + //Get the latest received coefficient update or status + rdata = txgbe_rd32_epcs(hw, 0x010098); + kr_dbg(KR_MODE, "SR PMA MMD 10GBASE-KR LP Coefficient Update Register: 0x%x\n", + rdata); + rdata = txgbe_rd32_epcs(hw, 0x010099); + kr_dbg(KR_MODE, "SR PMA MMD 10GBASE-KR LP Coefficient Status Register: 0x%x\n", + rdata); + rdata = txgbe_rd32_epcs(hw, 0x01009a); + kr_dbg(KR_MODE, "SR PMA MMD 10GBASE-KR LD Coefficient Update: 0x%x\n", rdata); + + rdata = txgbe_rd32_epcs(hw, 0x01009b); + kr_dbg(KR_MODE, " SR PMA MMD 10GBASE-KR LD Coefficient Status: 0x%x\n", rdata); + + rdata = txgbe_rd32_epcs(hw, 0x010097); + kr_dbg(KR_MODE, "SR PMA MMD 10GBASE-KR Status Register: 0x%x\n", rdata); + kr_dbg(KR_MODE, " Training Failure (bit3): %d\n", ((rdata >> 3) & 0x01)); + kr_dbg(KR_MODE, " Start-Up Protocol Status (bit2): %d\n", ((rdata >> 2) & 0x01)); + kr_dbg(KR_MODE, " Frame Lock (bit1): %d\n", ((rdata >> 1) & 0x01)); + kr_dbg(KR_MODE, " Receiver Status (bit0): %d\n", ((rdata >> 0) & 0x01)); + + /*If bit3 is set, Training is completed with failure*/ + if ((rdata1 >> 3) & 0x01) { + kr_dbg(KR_MODE, "Training is completed with failure!!!\n"); + read_phy_lane_txeq(0, adapter); + return status; + } + + /*If bit0 is set, Receiver trained and ready to receive data*/ + if ((rdata1 >> 0) & 0x01) { + kr_dbg(KR_MODE, "Receiver trained and ready to receive data ^_^\n"); + e_info(hw, "Receiver ready.\n"); + read_phy_lane_txeq(0, adapter); + return status; + } + } + + kr_dbg(KR_MODE, "ERROR: Check Clause 72 KR Training Complete Timeout!!!\n"); + + return status; +} + +static int txgbe_cl72_trainning(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rdata = 0, rdata1 = 0; + bool lpld_all_rd = false; + int ret = 0; + + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0); + + ret |= en_cl72_krtr(3, adapter); + kr_dbg(KR_MODE, "\nCheck the Clause 72 KR Training status ...\n"); + ret |= chk_cl72_krtr_status(adapter); + + ret = read_poll_timeout(txgbe_rd32_epcs, rdata, (rdata & 0x8000), 1000, + 200000, false, hw, 0x10099); + if (!ret) { + rdata1 = txgbe_rd32_epcs(hw, 0x1009b) & 0x8000; + if (rdata1 == 0x8000) + lpld_all_rd = true; + } + + if (lpld_all_rd) { + rdata = rd32_ephy(hw, 0x100E); + rdata1 = rd32_ephy(hw, 0x100F); + e_dev_info("Lp and Ld all Ready, FFE : %d-%d-%d.\n", + (rdata >> 6) & 0x3F, rdata1 & 0x3F, (rdata1 >> 6) & 0x3F); + if (!hw->dac_sfp) + if ((((rdata >> 6) & 0x3F) == 27) && + ((rdata1 & 0x3F) == 8) && + (((rdata1 >> 6) & 0x3F)) == 44) + return -1; + /* clear an pacv int */ + txgbe_wr32_epcs(hw, 0x78002, 0x0000); + ret = read_poll_timeout(txgbe_rd32_epcs, rdata, (rdata & 0x1000), 1000, + 100000, false, hw, 0x30020); + if (!ret) + e_dev_info("INT_AN_INT_CMPLT =1, AN73 Done Success.\n"); + return 0; + } + /* clear an pacv int */ + txgbe_wr32_epcs(hw, 0x78002, 0x0000); + + return -1; +} + +int handle_bkp_an73_flow(unsigned char bp_link_mode, struct txgbe_adapter *adapter) +{ + struct bkpan73ability tbkp_an73_ability, tlpbkp_an73_ability; + struct txgbe_hw *hw = &adapter->hw; + bool fec_en = false; + u32 fec_ability = 0; + int ret = 0; + + tbkp_an73_ability.cu_linkmode = bp_link_mode; + + kr_dbg(KR_MODE, "HandleBkpAn73Flow().\n"); + kr_dbg(KR_MODE, "---------------------------------\n"); + + /*1. Get the local AN73 Base Page Ability*/ + kr_dbg(KR_MODE, "<1>. Get the local AN73 Base Page Ability ...\n"); + get_bkp_an73_ability(&tbkp_an73_ability, 0, adapter); + /*2. Check the AN73 Interrupt Status*/ + kr_dbg(KR_MODE, "<2>. Check the AN73 Interrupt Status ...\n"); + + /*3.1. Get the link partner AN73 Base Page Ability*/ + kr_dbg(KR_MODE, "<3.1>. Get the link partner AN73 Base Page Ability ...\n"); + get_bkp_an73_ability(&tlpbkp_an73_ability, 1, adapter); + + /*3.2. Check the AN73 Link Ability with Link Partner*/ + kr_dbg(KR_MODE, "<3.2>. Check the AN73 Link Ability with Link Partner ...\n"); + kr_dbg(KR_MODE, "Local Link Ability: 0x%x\n", tbkp_an73_ability.link_ability); + kr_dbg(KR_MODE, "Link Partner Link Ability: 0x%x\n", tlpbkp_an73_ability.link_ability); + + chk_bkp_an73_ability(tbkp_an73_ability, tlpbkp_an73_ability, adapter); + + /*Check the FEC and KR Training for KR mode*/ + kr_dbg(KR_MODE, "<3.3>. Check the FEC for KR mode ...\n"); + fec_ability = tbkp_an73_ability.fec_ability & tlpbkp_an73_ability.fec_ability; + fec_en = fec_ability >= 0x1 ? true : false; + adapter->cur_fec_link = fec_en ? + TXGBE_PHY_FEC_BASER : TXGBE_PHY_FEC_OFF; + /* SR_PMA_KR_FEC_CTRL bit0 */ + txgbe_wr32_epcs(hw, 0x100ab, fec_en); + e_dev_info("KR FEC is %s.\n", fec_en ? "endabled" : "disabled"); + kr_dbg(KR_MODE, "\n<3.4>. Check the CL72 KR Training for KR mode ...\n"); + + ret = txgbe_cl72_trainning(adapter); + if (ret) + kr_dbg(KR_MODE, "Trainning failure\n"); + return ret; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_bp.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_bp.h new file mode 100644 index 000000000000..9395ffbb6fa9 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_bp.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_BP_H_ +#define _TXGBE_BP_H_ + +#include "txgbe.h" +#include "txgbe_type.h" +#include "txgbe_hw.h" + +/* Backplane AN73 Base Page Ability struct*/ +struct bkpan73ability { + unsigned int next_page; //Next Page (bit0) + unsigned int link_ability; //Link Ability (bit[7:0]) + unsigned int fec_ability; //FEC Request (bit1), FEC Enable (bit0) + unsigned int cu_linkmode; //current link mode for local device +}; + +enum ability_filed_encding { + ABILITY_1000BASE_KX, + ABILITY_10GBASE_KX4, + ABILITY_10GBASE_KR, + ABILITY_40GBASE_KR4, + ABILITY_40GBASE_CR4, + ABILITY_100GBASE_CR10, + ABILITY_100GBASE_KP4, + ABILITY_100GBASE_KR4, + ABILITY_100GBASE_CR4, + ABILITY_25GBASE_KRCR_S, + ABILITY_25GBASE_KRCR, + ABILITY_MAX, +}; + +#define KR_MODE 0 + +#define kr_dbg(KR_MODE, fmt, arg...) \ + do { \ + if (KR_MODE) \ + e_dev_info(fmt, ##arg); \ + } while (0) + +void txgbe_bp_down_event(struct txgbe_adapter *adapter); +void txgbe_bp_watchdog_event(struct txgbe_adapter *adapter); +int txgbe_bp_mode_setting(struct txgbe_adapter *adapter); +void txgbe_bp_close_protect(struct txgbe_adapter *adapter); +int handle_bkp_an73_flow(unsigned char bp_link_mode, struct txgbe_adapter *adapter); +int get_bkp_an73_ability(struct bkpan73ability *pt_bkp_an73_ability, + unsigned char by_link_partner, + struct txgbe_adapter *adapter); +int chk_bkp_an73_ability(struct bkpan73ability tbkp_an73_ability, + struct bkpan73ability tlpbkp_an73_ability, + struct txgbe_adapter *adapter); +#endif diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.c new file mode 100644 index 000000000000..aff3ff47505a --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.c @@ -0,0 +1,592 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "txgbe_type.h" +#include "txgbe_dcb.h" +#include "txgbe.h" + +s32 txgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max, + int max_frame_size) +{ + int min_percent = 100; + int min_credit, multiplier; + int i; + + min_credit = ((max_frame_size / 2) + TXGBE_DCB_CREDIT_QUANTUM - 1) / + TXGBE_DCB_CREDIT_QUANTUM; + + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if (bw[i] < min_percent && bw[i]) + min_percent = bw[i]; + } + + multiplier = (min_credit / min_percent) + 1; + + /* Find out the hw credits for each TC */ + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + int val = min(bw[i] * multiplier, TXGBE_DCB_MAX_CREDIT_REFILL); + + if (val < min_credit) + val = min_credit; + refill[i] = (u16)val; + + max[i] = (u16)(bw[i] ? (bw[i] * TXGBE_DCB_MAX_CREDIT) / 100 : min_credit); + } + + return 0; +} + +/** + * txgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits + * @txgbe_dcb_config: Struct containing DCB settings. + * @direction: Configuring either Tx or Rx. + * + * This function calculates the credits allocated to each traffic class. + * It should be called only after the rules are checked by + * txgbe_dcb_check_config_cee(). + */ +s32 txgbe_dcb_calculate_tc_credits_cee(struct txgbe_hw __always_unused *hw, + struct txgbe_dcb_config *dcb_config, + u32 max_frame_size, u8 direction) +{ + struct txgbe_dcb_tc_path *p; + u32 min_multiplier = 0; + u16 min_percent = 100; + s32 ret_val = 0; + /* Initialization values default for Tx settings */ + u32 min_credit = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u16 link_percentage = 0; + u8 bw_percent = 0; + u8 i; + + if (!dcb_config) { + ret_val = TXGBE_ERR_CONFIG; + goto out; + } + + min_credit = ((max_frame_size / 2) + TXGBE_DCB_CREDIT_QUANTUM - 1) / + TXGBE_DCB_CREDIT_QUANTUM; + + /* Find smallest link percentage */ + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + p = &dcb_config->tc_config[i].path[direction]; + bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; + link_percentage = p->bwg_percent; + + link_percentage = (link_percentage * bw_percent) / 100; + + if (link_percentage && link_percentage < min_percent) + min_percent = link_percentage; + } + + /* The ratio between traffic classes will control the bandwidth + * percentages seen on the wire. To calculate this ratio we use + * a multiplier. It is required that the refill credits must be + * larger than the max frame size so here we find the smallest + * multiplier that will allow all bandwidth percentages to be + * greater than the max frame size. + */ + min_multiplier = (min_credit / min_percent) + 1; + + /* Find out the link percentage for each TC first */ + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + p = &dcb_config->tc_config[i].path[direction]; + bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; + + link_percentage = p->bwg_percent; + /* Must be careful of integer division for very small nums */ + link_percentage = (link_percentage * bw_percent) / 100; + if (p->bwg_percent > 0 && link_percentage == 0) + link_percentage = 1; + + /* Save link_percentage for reference */ + p->link_percent = (u8)link_percentage; + + /* Calculate credit refill ratio using multiplier */ + credit_refill = min(link_percentage * min_multiplier, + (u32)TXGBE_DCB_MAX_CREDIT_REFILL); + + /* Refill at least minimum credit */ + if (credit_refill < min_credit) + credit_refill = min_credit; + + p->data_credits_refill = (u16)credit_refill; + + /* Calculate maximum credit for the TC */ + credit_max = (link_percentage * TXGBE_DCB_MAX_CREDIT) / 100; + + /* Adjustment based on rule checking, if the percentage + * of a TC is too small, the maximum credit may not be + * enough to send out a jumbo frame in data plane arbitration. + */ + if (credit_max < min_credit) + credit_max = min_credit; + + if (direction == TXGBE_DCB_TX_CONFIG) { + /* Adjustment based on rule checking, if the + * percentage of a TC is too small, the maximum + * credit may not be enough to send out a TSO + * packet in descriptor plane arbitration. + */ + + dcb_config->tc_config[i].desc_credits_max = + (u16)credit_max; + } + + p->data_credits_max = (u16)credit_max; + } + +out: + return ret_val; +} + +/** + * txgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info + * @cfg: dcb configuration to unpack into hardware consumable fields + * @map: user priority to traffic class map + * @pfc_up: u8 to store user priority PFC bitmask + * + * This unpacks the dcb configuration PFC info which is stored per + * traffic class into a 8bit user priority bitmask that can be + * consumed by hardware routines. The priority to tc map must be + * updated before calling this routine to use current up-to maps. + */ +void txgbe_dcb_unpack_pfc_cee(struct txgbe_dcb_config *cfg, u8 *map, u8 *pfc_up) +{ + struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int up; + + /* If the TC for this user priority has PFC enabled then set the + * matching bit in 'pfc_up' to reflect that PFC is enabled. + */ + for (*pfc_up = 0, up = 0; up < TXGBE_DCB_MAX_USER_PRIORITY; up++) { + if (tc_config[map[up]].pfc != txgbe_dcb_pfc_disabled) + *pfc_up |= 1 << up; + } +} + +void txgbe_dcb_unpack_refill_cee(struct txgbe_dcb_config *cfg, int direction, + u16 *refill) +{ + struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < TXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + refill[tc] = tc_config[tc].path[direction].data_credits_refill; +} + +void txgbe_dcb_unpack_max_cee(struct txgbe_dcb_config *cfg, u16 *max) +{ + struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < TXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + max[tc] = tc_config[tc].desc_credits_max; +} + +void txgbe_dcb_unpack_bwgid_cee(struct txgbe_dcb_config *cfg, int direction, + u8 *bwgid) +{ + struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < TXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + bwgid[tc] = tc_config[tc].path[direction].bwg_id; +} + +void txgbe_dcb_unpack_tsa_cee(struct txgbe_dcb_config *cfg, int direction, + u8 *tsa) +{ + struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < TXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + tsa[tc] = tc_config[tc].path[direction].tsa; +} + +u8 txgbe_dcb_get_tc_from_up(struct txgbe_dcb_config *cfg, int direction, u8 up) +{ + struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + u8 prio_mask = 1 << up; + u8 tc = cfg->num_tcs.pg_tcs; + + /* If tc is 0 then DCB is likely not enabled or supported */ + if (!tc) + goto out; + + /* Test from maximum TC to 1 and report the first match we find. If + * we find no match we can assume that the TC is 0 since the TC must + * be set for all user priorities + */ + for (tc--; tc; tc--) { + if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap) + break; + } +out: + return tc; +} + +void txgbe_dcb_unpack_map_cee(struct txgbe_dcb_config *cfg, int direction, + u8 *map) +{ + u8 up; + + for (up = 0; up < TXGBE_DCB_MAX_USER_PRIORITY; up++) + map[up] = txgbe_dcb_get_tc_from_up(cfg, direction, up); +} + +/** + * txgbe_dcb_config_tc_stats - Config traffic class statistics + * @hw: pointer to hardware structure + * + * Configure queue statistics registers, all queues belonging to same traffic + * class uses a single set of queue statistics counters. + */ +s32 txgbe_dcb_config_tc_stats(struct txgbe_hw __always_unused *hw, + struct txgbe_dcb_config __always_unused *dcb_config) +{ + return 0; +} + +/** + * txgbe_dcb_hw_config_cee - Config and enable DCB + * @hw: pointer to hardware structure + * @dcb_config: pointer to txgbe_dcb_config structure + * + * Configure dcb settings and enable dcb mode. + */ +s32 txgbe_dcb_hw_config_cee(struct txgbe_hw *hw, + struct txgbe_dcb_config *dcb_config) +{ + s32 ret = TXGBE_NOT_IMPLEMENTED; + u8 pfc_en; + u8 tsa[TXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 bwgid[TXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 map[TXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + u16 refill[TXGBE_DCB_MAX_TRAFFIC_CLASS]; + u16 max[TXGBE_DCB_MAX_TRAFFIC_CLASS]; + + /* Unpack CEE standard containers */ + txgbe_dcb_unpack_refill_cee(dcb_config, TXGBE_DCB_TX_CONFIG, refill); + txgbe_dcb_unpack_max_cee(dcb_config, max); + txgbe_dcb_unpack_bwgid_cee(dcb_config, TXGBE_DCB_TX_CONFIG, bwgid); + txgbe_dcb_unpack_tsa_cee(dcb_config, TXGBE_DCB_TX_CONFIG, tsa); + txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_TX_CONFIG, map); + + txgbe_dcb_config(hw, dcb_config); + ret = txgbe_dcb_hw_config(hw, + refill, max, bwgid, + tsa, map); + + txgbe_dcb_config_tc_stats(hw, dcb_config); + + if (!ret && dcb_config->pfc_mode_enable) { + txgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en); + ret = txgbe_dcb_config_pfc(hw, pfc_en, map); + } + + return ret; +} + +/* Helper routines to abstract HW specifics from DCB netlink ops */ +s32 txgbe_dcb_config_pfc(struct txgbe_hw *hw, u8 pfc_en, u8 *map) +{ + int ret = TXGBE_ERR_PARAM; + + u32 i, j, fcrtl, reg; + u8 max_tc = 0; + + /* Enable Transmit Priority Flow Control */ + wr32(hw, TXGBE_RDB_RFCC, TXGBE_RDB_RFCC_RFCE_PRIORITY); + + /* Enable Receive Priority Flow Control */ + reg = 0; + + if (pfc_en) + reg |= (TXGBE_MAC_RX_FLOW_CTRL_PFCE | 0x1); + + wr32(hw, TXGBE_MAC_RX_FLOW_CTRL, reg); + + for (i = 0; i < TXGBE_DCB_MAX_USER_PRIORITY; i++) { + if (map[i] > max_tc) + max_tc = map[i]; + } + + /* Configure PFC Tx thresholds per TC */ + for (i = 0; i <= max_tc; i++) { + int enabled = 0; + + for (j = 0; j < TXGBE_DCB_MAX_USER_PRIORITY; j++) { + if (map[j] == i && pfc_en & (1 << j)) { + enabled = 1; + break; + } + } + + if (enabled) { + reg = (hw->fc.high_water[i] << 10) | + TXGBE_RDB_RFCH_XOFFE; + fcrtl = (hw->fc.low_water[i] << 10) | + TXGBE_RDB_RFCL_XONE; + wr32(hw, TXGBE_RDB_RFCH(i), fcrtl); + } else { + /* In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + reg = rd32(hw, TXGBE_RDB_PB_SZ(i)); + wr32(hw, TXGBE_RDB_RFCL(i), 0); + } + + wr32(hw, TXGBE_RDB_RFCH(i), reg); + } + + for (; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + wr32(hw, TXGBE_RDB_RFCL(i), 0); + wr32(hw, TXGBE_RDB_RFCH(i), 0); + } + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time | (hw->fc.pause_time << 16); + for (i = 0; i < (TXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) + wr32(hw, TXGBE_RDB_RFCV(i), reg); + + /* Configure flow control refresh threshold value */ + wr32(hw, TXGBE_RDB_RFCRT, hw->fc.pause_time / 2); + + return ret; +} + +s32 txgbe_dcb_hw_config(struct txgbe_hw *hw, u16 *refill, u16 *max, + u8 *bwg_id, u8 *tsa, u8 *map) +{ + txgbe_dcb_config_rx_arbiter(hw, refill, max, bwg_id, + tsa, map); + txgbe_dcb_config_tx_desc_arbiter(hw, refill, max, + bwg_id, tsa); + txgbe_dcb_config_tx_data_arbiter(hw, refill, max, + bwg_id, tsa, map); + + return 0; +} + +/** + * txgbe_dcb_config_rx_arbiter - Config Rx Data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to txgbe_dcb_config structure + * + * Configure Rx Packet Arbiter and credits for each traffic class. + */ +s32 txgbe_dcb_config_rx_arbiter(struct txgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa, + u8 *map) +{ + u32 reg = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u8 i = 0; + + /* Disable the arbiter before changing parameters + * (always enable recycle mode; WSP) + */ + reg = TXGBE_RDM_ARB_CTL_RRM | TXGBE_RDM_ARB_CTL_RAC | + TXGBE_RDM_ARB_CTL_ARBDIS; + wr32(hw, TXGBE_RDM_ARB_CTL, reg); + + /* map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding + * bits sets for the UPs that needs to be mappped to that TC. + * e.g if priorities 6 and 7 are to be mapped to a TC then the + * up_to_tc_bitmap value for that TC will be 11000000 in binary. + */ + reg = 0; + for (i = 0; i < TXGBE_DCB_MAX_USER_PRIORITY; i++) + reg |= (map[i] << (i * TXGBE_RDB_UP2TC_UP_SHIFT)); + + wr32(hw, TXGBE_RDB_UP2TC, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + credit_refill = refill[i]; + credit_max = max[i]; + reg = credit_refill | + (credit_max << TXGBE_RDM_ARB_CFG_MCL_SHIFT); + + reg |= (u32)(bwg_id[i]) << TXGBE_RDM_ARB_CFG_BWG_SHIFT; + + if (tsa[i] == txgbe_dcb_tsa_strict) + reg |= TXGBE_RDM_ARB_CFG_LSP; + + wr32(hw, TXGBE_RDM_ARB_CFG(i), reg); + } + + /* Configure Rx packet plane (recycle mode; WSP) and + * enable arbiter + */ + reg = TXGBE_RDM_ARB_CTL_RRM | TXGBE_RDM_ARB_CTL_RAC; + wr32(hw, TXGBE_RDM_ARB_CTL, reg); + + return 0; +} + +/** + * txgbe_dcb_config_tx_desc_arbiter - Config Tx Desc. arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to txgbe_dcb_config structure + * + * Configure Tx Descriptor Arbiter and credits for each traffic class. + */ +s32 txgbe_dcb_config_tx_desc_arbiter(struct txgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa) +{ + u32 reg, max_credits; + u8 i; + + /* Clear the per-Tx queue credits; we use per-TC instead */ + for (i = 0; i < 128; i++) + wr32(hw, TXGBE_TDM_VM_CREDIT(i), 0); + + /* Configure traffic class credits and priority */ + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + max_credits = max[i]; + reg = max_credits << TXGBE_TDM_PBWARB_CFG_MCL_SHIFT; + reg |= refill[i]; + reg |= (u32)(bwg_id[i]) << TXGBE_TDM_PBWARB_CFG_BWG_SHIFT; + + if (tsa[i] == txgbe_dcb_tsa_group_strict_cee) + reg |= TXGBE_TDM_PBWARB_CFG_GSP; + + if (tsa[i] == txgbe_dcb_tsa_strict) + reg |= TXGBE_TDM_PBWARB_CFG_LSP; + + wr32(hw, TXGBE_TDM_PBWARB_CFG(i), reg); + } + + /* Configure Tx descriptor plane (recycle mode; WSP) and + * enable arbiter + */ + reg = TXGBE_TDM_PBWARB_CTL_TDPAC | TXGBE_TDM_PBWARB_CTL_TDRM; + wr32(hw, TXGBE_TDM_PBWARB_CTL, reg); + + return 0; +} + +/** + * txgbe_dcb_config_tx_data_arbiter - Config Tx Data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to txgbe_dcb_config structure + * + * Configure Tx Packet Arbiter and credits for each traffic class. + */ +s32 txgbe_dcb_config_tx_data_arbiter(struct txgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa, + u8 *map) +{ + u32 reg; + u8 i; + + /* Disable the arbiter before changing parameters + * (always enable recycle mode; SP; arb delay) + */ + reg = TXGBE_TDB_PBRARB_CTL_TPPAC | TXGBE_TDB_PBRARB_CTL_TPRM | + TXGBE_RTTPCS_ARBDIS; + wr32(hw, TXGBE_TDB_PBRARB_CTL, reg); + + /* map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding + * bits sets for the UPs that needs to be mappped to that TC. + * e.g if priorities 6 and 7 are to be mapped to a TC then the + * up_to_tc_bitmap value for that TC will be 11000000 in binary. + */ + reg = 0; + for (i = 0; i < TXGBE_DCB_MAX_USER_PRIORITY; i++) + reg |= (map[i] << (i * TXGBE_TDB_UP2TC_UP_SHIFT)); + + wr32(hw, TXGBE_TDB_UP2TC, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + reg = refill[i]; + reg |= (u32)(max[i]) << TXGBE_TDB_PBRARB_CFG_MCL_SHIFT; + reg |= (u32)(bwg_id[i]) << TXGBE_TDB_PBRARB_CFG_BWG_SHIFT; + + if (tsa[i] == txgbe_dcb_tsa_group_strict_cee) + reg |= TXGBE_TDB_PBRARB_CFG_GSP; + + if (tsa[i] == txgbe_dcb_tsa_strict) + reg |= TXGBE_TDB_PBRARB_CFG_LSP; + + wr32(hw, TXGBE_TDB_PBRARB_CFG(i), reg); + } + + /* Configure Tx packet plane (recycle mode; SP; arb delay) and + * enable arbiter + */ + reg = TXGBE_TDB_PBRARB_CTL_TPPAC | TXGBE_TDB_PBRARB_CTL_TPRM; + wr32(hw, TXGBE_TDB_PBRARB_CTL, reg); + + return 0; +} + +/** + * txgbe_dcb_config - Configure general DCB parameters + * @hw: pointer to hardware structure + * @dcb_config: pointer to txgbe_dcb_config structure + * + * Configure general DCB parameters. + */ +s32 txgbe_dcb_config(struct txgbe_hw *hw, + struct txgbe_dcb_config *dcb_config) +{ + u32 n, value; + + struct txgbe_adapter *adapter = hw->back; + + if (dcb_config->vt_mode) + adapter->flags |= TXGBE_FLAG_VMDQ_ENABLED; + else + adapter->flags &= ~TXGBE_FLAG_VMDQ_ENABLED; + + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) { + if (dcb_config->num_tcs.pg_tcs == 8) + /* 8 TCs */ + value = TXGBE_CFG_PORT_CTL_NUM_TC_8 | + TXGBE_CFG_PORT_CTL_NUM_VT_16 | + TXGBE_CFG_PORT_CTL_DCB_EN; + else if (dcb_config->num_tcs.pg_tcs == 4) + /* 4 TCs */ + value = TXGBE_CFG_PORT_CTL_NUM_TC_4 | + TXGBE_CFG_PORT_CTL_NUM_VT_32 | + TXGBE_CFG_PORT_CTL_DCB_EN; + else if (adapter->ring_feature[RING_F_RSS].indices == 4) + value = TXGBE_CFG_PORT_CTL_NUM_VT_32; + else + value = TXGBE_CFG_PORT_CTL_NUM_VT_64; + } else { + if (dcb_config->num_tcs.pg_tcs == 8) + value = TXGBE_CFG_PORT_CTL_NUM_TC_8 | + TXGBE_CFG_PORT_CTL_DCB_EN; + else if (dcb_config->num_tcs.pg_tcs == 4) + value = TXGBE_CFG_PORT_CTL_NUM_TC_4 | + TXGBE_CFG_PORT_CTL_DCB_EN; + else + value = 0; + } + + value |= TXGBE_CFG_PORT_CTL_D_VLAN | TXGBE_CFG_PORT_CTL_QINQ; + wr32m(hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_NUM_TC_MASK | + TXGBE_CFG_PORT_CTL_NUM_VT_MASK | + TXGBE_CFG_PORT_CTL_DCB_EN | + TXGBE_CFG_PORT_CTL_D_VLAN | + TXGBE_CFG_PORT_CTL_QINQ, + value); + + /* Disable drop for all queues */ + for (n = 0; n < 4; n++) + wr32(hw, TXGBE_RDM_PF_QDE(n), 0x0); + + return 0; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.h new file mode 100644 index 000000000000..999432b08280 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.h @@ -0,0 +1,192 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_DCB_H_ +#define _TXGBE_DCB_H_ + +#include "txgbe_type.h" + +/* DCB defines */ +/* DCB credit calculation defines */ +#define TXGBE_DCB_CREDIT_QUANTUM 64 +#define TXGBE_DCB_MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */ +#define TXGBE_DCB_MAX_TSO_SIZE (32 * 1024) /* Max TSO pkt size in DCB*/ +#define TXGBE_DCB_MAX_CREDIT (2 * TXGBE_DCB_MAX_CREDIT_REFILL) + +/* 513 for 32KB TSO packet */ +#define TXGBE_DCB_MIN_TSO_CREDIT \ + ((TXGBE_DCB_MAX_TSO_SIZE / TXGBE_DCB_CREDIT_QUANTUM) + 1) + +/* DCB configuration defines */ +#define TXGBE_DCB_MAX_USER_PRIORITY 8 +#define TXGBE_DCB_MAX_BW_GROUP 8 +#define TXGBE_DCB_BW_PERCENT 100 + +#define TXGBE_DCB_TX_CONFIG 0 +#define TXGBE_DCB_RX_CONFIG 1 + +/* DCB capability defines */ +#define TXGBE_DCB_PG_SUPPORT 0x00000001 +#define TXGBE_DCB_PFC_SUPPORT 0x00000002 +#define TXGBE_DCB_BCN_SUPPORT 0x00000004 +#define TXGBE_DCB_UP2TC_SUPPORT 0x00000008 +#define TXGBE_DCB_GSP_SUPPORT 0x00000010 + +/* DCB register definitions */ +#define TXGBE_TDM_PBWARB_CTL_TDPAC 0x00000001 +#define TXGBE_TDM_PBWARB_CTL_TDRM 0x00000010 /* Transmit Recycle Mode */ +#define TXGBE_TDM_PBWARB_CTL_ARBDIS 0x00000040 /* DCB arbiter disable */ + +/* Receive UP2TC mapping */ +#define TXGBE_RDB_UP2TC_UP_SHIFT 4 +#define TXGBE_RDB_UP2TC_UP_MASK 7 +/* Transmit UP2TC mapping */ +#define TXGBE_TDB_UP2TC_UP_SHIFT 4 + +#define TXGBE_RDM_ARB_CFG_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ +#define TXGBE_RDM_ARB_CFG_BWG_SHIFT 9 /* Offset to BWG index */ +#define TXGBE_RDM_ARB_CFG_GSP 0x40000000 /* GSP enable bit */ +#define TXGBE_RDM_ARB_CFG_LSP 0x80000000 /* LSP enable bit */ + +/* RTRPCS Bit Masks */ +#define TXGBE_RDM_ARB_CTL_RRM 0x00000002 /* Receive Recycle Mode enable */ +/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ +#define TXGBE_RDM_ARB_CTL_RAC 0x00000004 +#define TXGBE_RDM_ARB_CTL_ARBDIS 0x00000040 /* Arbitration disable bit */ + +/* RTTDT2C Bit Masks */ +#define TXGBE_TDM_PBWARB_CFG_MCL_SHIFT 12 +#define TXGBE_TDM_PBWARB_CFG_BWG_SHIFT 9 +#define TXGBE_TDM_PBWARB_CFG_GSP 0x40000000 +#define TXGBE_TDM_PBWARB_CFG_LSP 0x80000000 + +#define TXGBE_TDB_PBRARB_CFG_MCL_SHIFT 12 +#define TXGBE_TDB_PBRARB_CFG_BWG_SHIFT 9 +#define TXGBE_TDB_PBRARB_CFG_GSP 0x40000000 +#define TXGBE_TDB_PBRARB_CFG_LSP 0x80000000 + +/* RTTPCS Bit Masks */ +#define TXGBE_TDB_PBRARB_CTL_TPPAC 0x00000020 +#define TXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */ +#define TXGBE_TDB_PBRARB_CTL_TPRM 0x00000100 /* Transmit Recycle Mode enable*/ + +#define TXGBE_TDM_PB_THRE_DCB 0xA /* THRESH value for DCB mode */ + +struct txgbe_dcb_support { + u32 capabilities; /* DCB capabilities */ + + /* Each bit represents a number of TCs configurable in the hw. + * If 8 traffic classes can be configured, the value is 0x80. + */ + u8 traffic_classes; + u8 pfc_traffic_classes; +}; + +enum txgbe_dcb_tsa { + txgbe_dcb_tsa_ets = 0, + txgbe_dcb_tsa_group_strict_cee, + txgbe_dcb_tsa_strict +}; + +/* Traffic class bandwidth allocation per direction */ +struct txgbe_dcb_tc_path { + u8 bwg_id; /* Bandwidth Group (BWG) ID */ + u8 bwg_percent; /* % of BWG's bandwidth */ + u8 link_percent; /* % of link bandwidth */ + u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */ + u16 data_credits_refill; /* Credit refill amount in 64B granularity */ + u16 data_credits_max; /* Max credits for a configured packet buffer */ + enum txgbe_dcb_tsa tsa; /* Link or Group Strict Priority */ +}; + +enum txgbe_dcb_pfc { + txgbe_dcb_pfc_disabled = 0, + txgbe_dcb_pfc_enabled, + txgbe_dcb_pfc_enabled_txonly, + txgbe_dcb_pfc_enabled_rxonly +}; + +/* Traffic class configuration */ +struct txgbe_dcb_tc_config { + struct txgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */ + enum txgbe_dcb_pfc pfc; /* Class based flow control setting */ + + u16 desc_credits_max; /* For Tx Descriptor arbitration */ + u8 tc; /* Traffic class (TC) */ +}; + +enum txgbe_dcb_pba { + /* PBA[0-7] each use 64KB FIFO */ + txgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL, + /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */ + txgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED +}; + +struct txgbe_dcb_num_tcs { + u8 pg_tcs; + u8 pfc_tcs; +}; + +struct txgbe_dcb_config { + struct txgbe_dcb_tc_config tc_config[TXGBE_DCB_MAX_TRAFFIC_CLASS]; + struct txgbe_dcb_support support; + struct txgbe_dcb_num_tcs num_tcs; + u8 bw_percentage[2][TXGBE_DCB_MAX_BW_GROUP]; /* One each for Tx/Rx */ + bool pfc_mode_enable; + bool round_robin_enable; + + enum txgbe_dcb_pba rx_pba_cfg; + + u32 dcb_cfg_version; /* Not used...OS-specific? */ + u32 link_speed; /* For bandwidth allocation validation purpose */ + bool vt_mode; +}; + +/* DCB driver APIs */ + +/* DCB credits calculation */ +s32 txgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max, int max_frame_size); +s32 txgbe_dcb_calculate_tc_credits_cee(struct txgbe_hw __always_unused *hw, + struct txgbe_dcb_config *dcb_config, + u32 max_frame_size, u8 direction); + +/* DCB PFC */ +s32 txgbe_dcb_config_pfc(struct txgbe_hw *hw, u8 pfc_en, u8 *map); + +/* DCB stats */ +s32 txgbe_dcb_config_tc_stats(struct txgbe_hw __always_unused *hw, + struct txgbe_dcb_config __always_unused *dcb_config); + +/* DCB config arbiters */ +s32 txgbe_dcb_config_tx_desc_arbiter(struct txgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa); +s32 txgbe_dcb_config_tx_data_arbiter(struct txgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa, + u8 *map); +s32 txgbe_dcb_config_rx_arbiter(struct txgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa, + u8 *map); + +/* DCB unpack routines */ +void txgbe_dcb_unpack_pfc_cee(struct txgbe_dcb_config *cfg, u8 *map, u8 *pfc_up); +void txgbe_dcb_unpack_refill_cee(struct txgbe_dcb_config *cfg, int direction, + u16 *refill); +void txgbe_dcb_unpack_max_cee(struct txgbe_dcb_config *cfg, u16 *max); +void txgbe_dcb_unpack_bwgid_cee(struct txgbe_dcb_config *cfg, int direction, + u8 *bwgid); +void txgbe_dcb_unpack_tsa_cee(struct txgbe_dcb_config *cfg, int direction, + u8 *tsa); +void txgbe_dcb_unpack_map_cee(struct txgbe_dcb_config *cfg, int direction, + u8 *map); +s32 txgbe_dcb_config_tc_stats(struct txgbe_hw __always_unused *hw, + struct txgbe_dcb_config __always_unused *dcb_config); +u8 txgbe_dcb_get_tc_from_up(struct txgbe_dcb_config *cfg, int direction, u8 up); + +/* DCB initialization */ +s32 txgbe_dcb_config(struct txgbe_hw *hw, + struct txgbe_dcb_config *dcb_config); +s32 txgbe_dcb_hw_config(struct txgbe_hw *hw, u16 *refill, u16 *max, + u8 *bwg_id, u8 *tsa, u8 *map); +s32 txgbe_dcb_hw_config_cee(struct txgbe_hw *hw, + struct txgbe_dcb_config *dcb_config); +#endif /* _TXGBE_DCB_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb_nl.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb_nl.c new file mode 100644 index 000000000000..0e6b86a26856 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb_nl.c @@ -0,0 +1,798 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "txgbe.h" + +#if IS_ENABLED(CONFIG_DCB) +#include +#include "txgbe_dcb.h" + +/* Callbacks for DCB netlink in the kernel */ +#define BIT_DCB_MODE 0x01 +#define BIT_PFC 0x02 +#define BIT_PG_RX 0x04 +#define BIT_PG_TX 0x08 +#define BIT_APP_UPCHG 0x10 +#define BIT_RESETLINK 0x40 +#define BIT_LINKSPEED 0x80 + +/* Responses for the DCB_C_SET_ALL command */ +#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ +#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ +#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ + +int txgbe_copy_dcb_cfg(struct txgbe_adapter *adapter, int tc_max) +{ + struct txgbe_dcb_config *scfg = &adapter->temp_dcb_cfg; + struct txgbe_dcb_config *dcfg = &adapter->dcb_cfg; + struct txgbe_dcb_tc_config *src = NULL; + struct txgbe_dcb_tc_config *dst = NULL; + int i, j; + int tx = TXGBE_DCB_TX_CONFIG; + int rx = TXGBE_DCB_RX_CONFIG; + int changes = 0; + +#if IS_ENABLED(CONFIG_FCOE) + if (adapter->fcoe.up_set != adapter->fcoe.up) + changes |= BIT_APP_UPCHG; +#endif /* CONFIG_FCOE */ + + for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { + src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0]; + dst = &dcfg->tc_config[i - DCB_PG_ATTR_TC_0]; + + if (dst->path[tx].tsa != src->path[tx].tsa) { + dst->path[tx].tsa = src->path[tx].tsa; + changes |= BIT_PG_TX; + } + + if (dst->path[tx].bwg_id != src->path[tx].bwg_id) { + dst->path[tx].bwg_id = src->path[tx].bwg_id; + changes |= BIT_PG_TX; + } + + if (dst->path[tx].bwg_percent != src->path[tx].bwg_percent) { + dst->path[tx].bwg_percent = src->path[tx].bwg_percent; + changes |= BIT_PG_TX; + } + + if (dst->path[tx].up_to_tc_bitmap != + src->path[tx].up_to_tc_bitmap) { + dst->path[tx].up_to_tc_bitmap = + src->path[tx].up_to_tc_bitmap; + changes |= (BIT_PG_TX | BIT_PFC | BIT_APP_UPCHG); + } + + if (dst->path[rx].tsa != src->path[rx].tsa) { + dst->path[rx].tsa = src->path[rx].tsa; + changes |= BIT_PG_RX; + } + + if (dst->path[rx].bwg_id != src->path[rx].bwg_id) { + dst->path[rx].bwg_id = src->path[rx].bwg_id; + changes |= BIT_PG_RX; + } + + if (dst->path[rx].bwg_percent != src->path[rx].bwg_percent) { + dst->path[rx].bwg_percent = src->path[rx].bwg_percent; + changes |= BIT_PG_RX; + } + + if (dst->path[rx].up_to_tc_bitmap != + src->path[rx].up_to_tc_bitmap) { + dst->path[rx].up_to_tc_bitmap = + src->path[rx].up_to_tc_bitmap; + changes |= (BIT_PG_RX | BIT_PFC | BIT_APP_UPCHG); + } + } + + for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) { + j = i - DCB_PG_ATTR_BW_ID_0; + + if (dcfg->bw_percentage[tx][j] != scfg->bw_percentage[tx][j]) { + dcfg->bw_percentage[tx][j] = scfg->bw_percentage[tx][j]; + changes |= BIT_PG_TX; + } + if (dcfg->bw_percentage[rx][j] != scfg->bw_percentage[rx][j]) { + dcfg->bw_percentage[rx][j] = scfg->bw_percentage[rx][j]; + changes |= BIT_PG_RX; + } + } + + for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) { + j = i - DCB_PFC_UP_ATTR_0; + if (dcfg->tc_config[j].pfc != scfg->tc_config[j].pfc) { + dcfg->tc_config[j].pfc = scfg->tc_config[j].pfc; + changes |= BIT_PFC; + } + } + + if (dcfg->pfc_mode_enable != scfg->pfc_mode_enable) { + dcfg->pfc_mode_enable = scfg->pfc_mode_enable; + changes |= BIT_PFC; + } + + return changes; +} + +static u8 txgbe_dcbnl_get_state(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + return !!(adapter->flags & TXGBE_FLAG_DCB_ENABLED); +} + +static u8 txgbe_dcbnl_set_state(struct net_device *netdev, u8 state) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int err = 0; + + /* Fail command if not in CEE mode */ + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return 1; + + /* verify there is something to do, if not then exit */ + if (!state == !(adapter->flags & TXGBE_FLAG_DCB_ENABLED)) + goto out; + + err = txgbe_setup_tc(netdev, + state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0); +out: + return !!err; +} + +static void txgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, + u8 *perm_addr) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int i, j; + + memset(perm_addr, 0xff, MAX_ADDR_LEN); + + for (i = 0; i < netdev->addr_len; i++) + perm_addr[i] = adapter->hw.mac.perm_addr[i]; + + for (j = 0; j < netdev->addr_len; j++, i++) + perm_addr[i] = adapter->hw.mac.san_addr[j]; +} + +static void txgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, + u8 prio, u8 bwg_id, u8 bw_pct, + u8 up_map) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (prio != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].tsa = prio; + if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id; + if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent = + bw_pct; + if (up_map != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap = + up_map; +} + +static void txgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, + u8 bw_pct) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; +} + +static void txgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, + u8 prio, u8 bwg_id, u8 bw_pct, + u8 up_map) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (prio != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].tsa = prio; + if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id; + if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent = + bw_pct; + if (up_map != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap = + up_map; +} + +static void txgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, + u8 bw_pct) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; +} + +static void txgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, + u8 *prio, u8 *bwg_id, u8 *bw_pct, + u8 *up_map) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + *prio = adapter->dcb_cfg.tc_config[tc].path[0].tsa; + *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; + *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent; + *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; +} + +static void txgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, + u8 *bw_pct) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id]; +} + +static void txgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, + u8 *prio, u8 *bwg_id, u8 *bw_pct, + u8 *up_map) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + *prio = adapter->dcb_cfg.tc_config[tc].path[1].tsa; + *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; + *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent; + *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap; +} + +static void txgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, + u8 *bw_pct) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id]; +} + +static void txgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int up, u8 pfc) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + u8 tc = txgbe_dcb_get_tc_from_up(&adapter->temp_dcb_cfg, 0, up); + + adapter->temp_dcb_cfg.tc_config[tc].pfc = pfc; + if (adapter->temp_dcb_cfg.tc_config[tc].pfc != + adapter->dcb_cfg.tc_config[tc].pfc) + adapter->temp_dcb_cfg.pfc_mode_enable = true; +} + +static void txgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int up, u8 *pfc) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + u8 tc = txgbe_dcb_get_tc_from_up(&adapter->dcb_cfg, 0, up); + *pfc = adapter->dcb_cfg.tc_config[tc].pfc; +} + +static void txgbe_dcbnl_devreset(struct net_device *dev) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + + while (test_and_set_bit(__TXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (netif_running(dev)) + dev->netdev_ops->ndo_stop(dev); + + txgbe_clear_interrupt_scheme(adapter); + txgbe_init_interrupt_scheme(adapter); + + if (netif_running(dev)) + dev->netdev_ops->ndo_open(dev); + + clear_bit(__TXGBE_RESETTING, &adapter->state); +} + +static u8 txgbe_dcbnl_set_all(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; + struct txgbe_hw *hw = &adapter->hw; + int ret = DCB_NO_HW_CHG; + u8 prio_tc[TXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + + /* Fail command if not in CEE mode */ + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return ret; + + adapter->dcb_set_bitmap |= txgbe_copy_dcb_cfg(adapter, + TXGBE_DCB_MAX_TRAFFIC_CLASS); + if (!adapter->dcb_set_bitmap) + return ret; + + txgbe_dcb_unpack_map_cee(dcb_cfg, TXGBE_DCB_TX_CONFIG, prio_tc); + + if (adapter->dcb_set_bitmap & (BIT_PG_TX | BIT_PG_RX)) { + /* Priority to TC mapping in CEE case default to 1:1 */ + int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + int i; + +#if IS_ENABLED(CONFIG_FCOE) + if (adapter->netdev->features & NETIF_F_FCOE_MTU) + max_frame = max(max_frame, TXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif + + txgbe_dcb_calculate_tc_credits_cee(hw, dcb_cfg, max_frame, + TXGBE_DCB_TX_CONFIG); + + txgbe_dcb_calculate_tc_credits_cee(hw, dcb_cfg, max_frame, + TXGBE_DCB_RX_CONFIG); + + txgbe_dcb_hw_config_cee(hw, dcb_cfg); + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + netdev_set_prio_tc_map(netdev, i, prio_tc[i]); + + ret = DCB_HW_CHG_RST; + } + + if (adapter->dcb_set_bitmap & BIT_PFC) { + if (dcb_cfg->pfc_mode_enable) { + u8 pfc_en; + + txgbe_dcb_unpack_pfc_cee(dcb_cfg, prio_tc, &pfc_en); + txgbe_dcb_config_pfc(hw, pfc_en, prio_tc); + } else { + hw->mac.ops.fc_enable(hw); + } + txgbe_set_rx_drop_en(adapter); + if (ret != DCB_HW_CHG_RST) + ret = DCB_HW_CHG; + } + +#if IS_ENABLED(CONFIG_FCOE) + /* Reprogam FCoE hardware offloads when the traffic class + * FCoE is using changes. This happens if the APP info + * changes or the up2tc mapping is updated. + */ + if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { + adapter->fcoe.up_set = adapter->fcoe.up; + txgbe_dcbnl_devreset(netdev); + ret = DCB_HW_CHG_RST; + } +#endif /* CONFIG_FCOE */ + + adapter->dcb_set_bitmap = 0x00; + return ret; +} + +static u8 txgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + switch (capid) { + case DCB_CAP_ATTR_PG: + *cap = true; + break; + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_UP2TC: + *cap = false; + break; + case DCB_CAP_ATTR_PG_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_GSP: + *cap = true; + break; + case DCB_CAP_ATTR_BCN: + *cap = false; + break; + case DCB_CAP_ATTR_DCBX: + *cap = adapter->dcbx_cap; + break; + default: + *cap = false; + break; + } + + return 0; +} + +static int txgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + u8 rval = 0; + + if (adapter->flags & TXGBE_FLAG_DCB_ENABLED) { + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + *num = adapter->dcb_cfg.num_tcs.pg_tcs; + break; + case DCB_NUMTCS_ATTR_PFC: + *num = adapter->dcb_cfg.num_tcs.pfc_tcs; + break; + default: + rval = -EINVAL; + break; + } + } else { + rval = -EINVAL; + } + + return rval; +} + +static int txgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + u8 rval = 0; + + if (adapter->flags & TXGBE_FLAG_DCB_ENABLED) { + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + adapter->dcb_cfg.num_tcs.pg_tcs = num; + break; + case DCB_NUMTCS_ATTR_PFC: + adapter->dcb_cfg.num_tcs.pfc_tcs = num; + break; + default: + rval = -EINVAL; + break; + } + } else { + rval = -EINVAL; + } + + return rval; +} + +static u8 txgbe_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + return adapter->dcb_cfg.pfc_mode_enable; +} + +static void txgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + adapter->temp_dcb_cfg.pfc_mode_enable = state; +} + +/** + * txgbe_dcbnl_getapp - retrieve the DCBX application user priority + * @netdev : the corresponding netdev + * @idtype : identifies the id as ether type or TCP/UDP port number + * @id: id is either ether type or TCP/UDP port number + * + * Returns : on success, returns a non-zero 802.1p user priority bitmap + * otherwise returns 0 as the invalid user priority bitmap to indicate an + * error. + */ +static int txgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) +{ + u8 rval = 0; + struct dcb_app app = { + .selector = idtype, + .protocol = id, + }; + + rval = dcb_getapp(netdev, &app); + + switch (idtype) { + case DCB_APP_IDTYPE_ETHTYPE: +#if IS_ENABLED(CONFIG_FCOE) + if (id == ETH_P_FCOE) + rval = txgbe_fcoe_getapp(netdev); +#endif + break; + case DCB_APP_IDTYPE_PORTNUM: + break; + default: + break; + } + + return rval; +} + +/** + * txgbe_dcbnl_setapp - set the DCBX application user priority + * @netdev : the corresponding netdev + * @idtype : identifies the id as ether type or TCP/UDP port number + * @id: id is either ether type or TCP/UDP port number + * @up: the 802.1p user priority bitmap + * + * Returns : 0 on success or 1 on error + */ +static int txgbe_dcbnl_setapp(struct net_device *netdev, + u8 idtype, u16 id, u8 up) +{ + int err = 0; + struct dcb_app app; + + app.selector = idtype; + app.protocol = id; + app.priority = up; + err = dcb_setapp(netdev, &app); + + switch (idtype) { + case DCB_APP_IDTYPE_ETHTYPE: +#if IS_ENABLED(CONFIG_FCOE) + if (id == ETH_P_FCOE) { + struct txgbe_adapter *adapter = netdev_priv(netdev); + + adapter->fcoe.up = up ? ffs(up) - 1 : TXGBE_FCOE_DEFUP; + } +#endif + break; + case DCB_APP_IDTYPE_PORTNUM: + break; + default: + break; + } + + return err; +} + +static int txgbe_dcbnl_ieee_getets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct ieee_ets *my_ets = adapter->txgbe_ieee_ets; + + /* No IEEE PFC settings available */ + if (!my_ets) + return -EINVAL; + + ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs; + ets->cbs = my_ets->cbs; + memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); + memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); + return 0; +} + +static int txgbe_dcbnl_ieee_setets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + struct txgbe_hw *hw = &adapter->hw; + int i, err = 0; + __u8 max_tc = 0; + __u8 map_chg = 0; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + if (!adapter->txgbe_ieee_ets) { + adapter->txgbe_ieee_ets = kmalloc(sizeof(*adapter->txgbe_ieee_ets), + GFP_KERNEL); + if (!adapter->txgbe_ieee_ets) + return -ENOMEM; + /* initialize UP2TC mappings to invalid value */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + adapter->txgbe_ieee_ets->prio_tc[i] = + IEEE_8021QAZ_MAX_TCS; + /* if possible update UP2TC mappings from HW */ + hw->mac.ops.get_rtrup2tc(hw, + adapter->txgbe_ieee_ets->prio_tc); + } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->prio_tc[i] > max_tc) + max_tc = ets->prio_tc[i]; + if (ets->prio_tc[i] != adapter->txgbe_ieee_ets->prio_tc[i]) + map_chg = 1; + } + + memcpy(adapter->txgbe_ieee_ets, ets, sizeof(*adapter->txgbe_ieee_ets)); + + if (max_tc) + max_tc++; + + if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs) + return -EINVAL; + + if (max_tc != netdev_get_num_tc(dev)) + err = txgbe_setup_tc(dev, max_tc); + else if (map_chg) + txgbe_dcbnl_devreset(dev); + + if (err) + goto err_out; + + err = txgbe_dcb_hw_ets(&adapter->hw, ets, max_frame); +err_out: + return err; +} + +static int txgbe_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct ieee_pfc *my_pfc = adapter->txgbe_ieee_pfc; + int i; + + /* No IEEE PFC settings available */ + if (!my_pfc) + return -EINVAL; + + pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs; + pfc->pfc_en = my_pfc->pfc_en; + pfc->mbc = my_pfc->mbc; + pfc->delay = my_pfc->delay; + + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + pfc->requests[i] = adapter->stats.pxoffrxc[i]; + pfc->indications[i] = adapter->stats.pxofftxc[i]; + } + + return 0; +} + +static int txgbe_dcbnl_ieee_setpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_hw *hw = &adapter->hw; + u8 *prio_tc; + int err; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + if (!adapter->txgbe_ieee_pfc) { + adapter->txgbe_ieee_pfc = kmalloc(sizeof(*adapter->txgbe_ieee_pfc), + GFP_KERNEL); + if (!adapter->txgbe_ieee_pfc) + return -ENOMEM; + } + + prio_tc = adapter->txgbe_ieee_ets->prio_tc; + memcpy(adapter->txgbe_ieee_pfc, pfc, sizeof(*adapter->txgbe_ieee_pfc)); + + /* Enable link flow control parameters if PFC is disabled */ + if (pfc->pfc_en) + err = txgbe_dcb_config_pfc(hw, pfc->pfc_en, prio_tc); + else + err = hw->mac.ops.fc_enable(hw); + + txgbe_set_rx_drop_en(adapter); + + return err; +} + +static int txgbe_dcbnl_ieee_setapp(struct net_device *dev, + struct dcb_app *app) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + int err = -EINVAL; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return err; + + err = dcb_ieee_setapp(dev, app); + +#if IS_ENABLED(CONFIG_FCOE) + if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == ETH_P_FCOE) { + u8 app_mask = dcb_ieee_getapp_mask(dev, app); + + if (app_mask & (1 << adapter->fcoe.up)) + return err; + + adapter->fcoe.up = app->priority; + adapter->fcoe.up_set = adapter->fcoe.up; + txgbe_dcbnl_devreset(dev); + } +#endif + return 0; +} + +static int txgbe_dcbnl_ieee_delapp(struct net_device *dev, + struct dcb_app *app) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + int err; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + err = dcb_ieee_delapp(dev, app); + +#if IS_ENABLED(CONFIG_FCOE) + if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == ETH_P_FCOE) { + u8 app_mask = dcb_ieee_getapp_mask(dev, app); + + if (app_mask & (1 << adapter->fcoe.up)) + return err; + + adapter->fcoe.up = app_mask ? + ffs(app_mask) - 1 : TXGBE_FCOE_DEFUP; + txgbe_dcbnl_devreset(dev); + } +#endif + return err; +} + +static u8 txgbe_dcbnl_getdcbx(struct net_device *dev) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + + return adapter->dcbx_cap; +} + +static u8 txgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct ieee_ets ets = { .ets_cap = 0 }; + struct ieee_pfc pfc = { .pfc_en = 0 }; + + /* no support for LLD_MANAGED modes or CEE+IEEE */ + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || + !(mode & DCB_CAP_DCBX_HOST)) + return 1; + + if (mode == adapter->dcbx_cap) + return 0; + + adapter->dcbx_cap = mode; + + /* ETS and PFC defaults */ + ets.ets_cap = 8; + pfc.pfc_cap = 8; + + if (mode & DCB_CAP_DCBX_VER_IEEE) { + txgbe_dcbnl_ieee_setets(dev, &ets); + txgbe_dcbnl_ieee_setpfc(dev, &pfc); + } else if (mode & DCB_CAP_DCBX_VER_CEE) { + u8 mask = (BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG); + + adapter->dcb_set_bitmap |= mask; + txgbe_dcbnl_set_all(dev); + } else { + /* Drop into single TC mode strict priority as this + * indicates CEE and IEEE versions are disabled + */ + txgbe_dcbnl_ieee_setets(dev, &ets); + txgbe_dcbnl_ieee_setpfc(dev, &pfc); + txgbe_setup_tc(dev, 0); + } + + return 0; +} + +struct dcbnl_rtnl_ops dcbnl_ops = { + .ieee_getets = txgbe_dcbnl_ieee_getets, + .ieee_setets = txgbe_dcbnl_ieee_setets, + .ieee_getpfc = txgbe_dcbnl_ieee_getpfc, + .ieee_setpfc = txgbe_dcbnl_ieee_setpfc, + .ieee_setapp = txgbe_dcbnl_ieee_setapp, + .ieee_delapp = txgbe_dcbnl_ieee_delapp, + .getstate = txgbe_dcbnl_get_state, + .setstate = txgbe_dcbnl_set_state, + .getpermhwaddr = txgbe_dcbnl_get_perm_hw_addr, + .setpgtccfgtx = txgbe_dcbnl_set_pg_tc_cfg_tx, + .setpgbwgcfgtx = txgbe_dcbnl_set_pg_bwg_cfg_tx, + .setpgtccfgrx = txgbe_dcbnl_set_pg_tc_cfg_rx, + .setpgbwgcfgrx = txgbe_dcbnl_set_pg_bwg_cfg_rx, + .getpgtccfgtx = txgbe_dcbnl_get_pg_tc_cfg_tx, + .getpgbwgcfgtx = txgbe_dcbnl_get_pg_bwg_cfg_tx, + .getpgtccfgrx = txgbe_dcbnl_get_pg_tc_cfg_rx, + .getpgbwgcfgrx = txgbe_dcbnl_get_pg_bwg_cfg_rx, + .setpfccfg = txgbe_dcbnl_set_pfc_cfg, + .getpfccfg = txgbe_dcbnl_get_pfc_cfg, + .setall = txgbe_dcbnl_set_all, + .getcap = txgbe_dcbnl_getcap, + .getnumtcs = txgbe_dcbnl_getnumtcs, + .setnumtcs = txgbe_dcbnl_setnumtcs, + .getpfcstate = txgbe_dcbnl_getpfcstate, + .setpfcstate = txgbe_dcbnl_setpfcstate, + .getapp = txgbe_dcbnl_getapp, + .setapp = txgbe_dcbnl_setapp, + .getdcbx = txgbe_dcbnl_getdcbx, + .setdcbx = txgbe_dcbnl_setdcbx, +}; +#endif /* CONFIG_DCB */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_debugfs.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_debugfs.c new file mode 100644 index 000000000000..d24c2b57d487 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_debugfs.c @@ -0,0 +1,473 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "txgbe.h" +#include +#include + +static struct dentry *txgbe_dbg_root; +static int txgbe_data_mode; + +#define TXGBE_DATA_FUNC(dm) ((dm) & ~0xFFFF) +#define TXGBE_DATA_ARGS(dm) ((dm) & 0xFFFF) +enum txgbe_data_func { + TXGBE_FUNC_NONE = (0 << 16), + TXGBE_FUNC_DUMP_BAR = (1 << 16), + TXGBE_FUNC_DUMP_RDESC = (2 << 16), + TXGBE_FUNC_DUMP_TDESC = (3 << 16), + TXGBE_FUNC_FLASH_READ = (4 << 16), + TXGBE_FUNC_FLASH_WRITE = (5 << 16), +}; + +/** + * data operation + **/ +static ssize_t +txgbe_simple_read_from_pcibar(struct txgbe_adapter *adapter, int res, + void __user *buf, size_t size, loff_t *ppos) +{ + loff_t pos = *ppos; + u32 miss, len, limit = pci_resource_len(adapter->pdev, res); + + if (pos < 0) + return 0; + + limit = (pos + size <= limit ? pos + size : limit); + for (miss = 0; pos < limit && !miss; buf += len, pos += len) { + u32 val = 0, reg = round_down(pos, 4); + u32 off = pos - reg; + + len = (reg + 4 <= limit ? 4 - off : 4 - off - (limit - reg - 4)); + val = txgbe_rd32(adapter->io_addr + reg); + miss = copy_to_user(buf, &val + off, len); + } + + size = pos - *ppos - miss; + *ppos += size; + + return size; +} + +static ssize_t +txgbe_simple_read_from_flash(struct txgbe_adapter *adapter, + void __user *buf, size_t size, loff_t *ppos) +{ + struct txgbe_hw *hw = &adapter->hw; + loff_t pos = *ppos; + size_t ret = 0; + loff_t rpos, rtail; + void __user *to = buf; + size_t available = adapter->hw.flash.dword_size << 2; + + if (pos < 0) + return -EINVAL; + if (pos >= available || !size) + return 0; + if (size > available - pos) + size = available - pos; + + rpos = round_up(pos, 4); + rtail = round_down(pos + size, 4); + if (rtail < rpos) + return 0; + + to += rpos - pos; + while (rpos <= rtail) { + u32 value = txgbe_rd32(adapter->io_addr + rpos); + + if (hw->flash.ops.write_buffer(hw, rpos >> 2, 1, &value)) { + ret = size; + break; + } + if (copy_to_user(to, &value, 4) == 4) { + ret = size; + break; + } + to += 4; + rpos += 4; + } + + if (ret == size) + return -EFAULT; + size -= ret; + *ppos = pos + size; + return size; +} + +static ssize_t +txgbe_simple_write_to_flash(struct txgbe_adapter *adapter, + const void __user *from, size_t size, loff_t *ppos, size_t available) +{ + return size; +} + +static ssize_t +txgbe_dbg_data_ops_read(struct file *filp, char __user *buffer, + size_t size, loff_t *ppos) +{ + struct txgbe_adapter *adapter = filp->private_data; + u32 func = TXGBE_DATA_FUNC(txgbe_data_mode); + + /* rmb for debugfs */ + rmb(); + + switch (func) { + case TXGBE_FUNC_DUMP_BAR: { + u32 bar = TXGBE_DATA_ARGS(txgbe_data_mode); + + return txgbe_simple_read_from_pcibar(adapter, bar, buffer, size, + ppos); + } + case TXGBE_FUNC_FLASH_READ: { + return txgbe_simple_read_from_flash(adapter, buffer, size, ppos); + } + case TXGBE_FUNC_DUMP_RDESC: { + struct txgbe_ring *ring; + u32 queue = TXGBE_DATA_ARGS(txgbe_data_mode); + + if (queue >= adapter->num_rx_queues) + return 0; + queue += VMDQ_P(0) * adapter->queues_per_pool; + ring = adapter->rx_ring[queue]; + + return simple_read_from_buffer(buffer, size, ppos, + ring->desc, ring->size); + } + case TXGBE_FUNC_DUMP_TDESC: { + struct txgbe_ring *ring; + u32 queue = TXGBE_DATA_ARGS(txgbe_data_mode); + + if (queue >= adapter->num_tx_queues) + return 0; + queue += VMDQ_P(0) * adapter->queues_per_pool; + ring = adapter->tx_ring[queue]; + + return simple_read_from_buffer(buffer, size, ppos, + ring->desc, ring->size); + } + default: + break; + } + + return 0; +} + +static ssize_t +txgbe_dbg_data_ops_write(struct file *filp, + const char __user *buffer, + size_t size, loff_t *ppos) +{ + struct txgbe_adapter *adapter = filp->private_data; + u32 func = TXGBE_DATA_FUNC(txgbe_data_mode); + + /* rmb for debugfs */ + rmb(); + + switch (func) { + case TXGBE_FUNC_FLASH_WRITE: { + u32 size = TXGBE_DATA_ARGS(txgbe_data_mode); + + if (size > adapter->hw.flash.dword_size << 2) + size = adapter->hw.flash.dword_size << 2; + + return txgbe_simple_write_to_flash(adapter, buffer, size, ppos, size); + } + default: + break; + } + + return size; +} + +static const struct file_operations txgbe_dbg_data_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = txgbe_dbg_data_ops_read, + .write = txgbe_dbg_data_ops_write, +}; + +/** + * reg_ops operation + **/ +static char txgbe_dbg_reg_ops_buf[256] = ""; +static ssize_t +txgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct txgbe_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: mode=0x%08x\n%s\n", + adapter->netdev->name, txgbe_data_mode, + txgbe_dbg_reg_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +static ssize_t +txgbe_dbg_reg_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct txgbe_adapter *adapter = filp->private_data; + char *pc = txgbe_dbg_reg_ops_buf; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(txgbe_dbg_reg_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(txgbe_dbg_reg_ops_buf, + sizeof(txgbe_dbg_reg_ops_buf) - 1, + ppos, + buffer, + count); + if (len < 0) + return len; + + pc[len] = '\0'; + + if (strncmp(pc, "dump", 4) == 0) { + u32 mode = 0; + u16 args; + + pc += 4; + pc += strspn(pc, " \t"); + + if (!strncmp(pc, "bar", 3)) { + pc += 3; + mode = TXGBE_FUNC_DUMP_BAR; + } else if (!strncmp(pc, "rdesc", 5)) { + pc += 5; + mode = TXGBE_FUNC_DUMP_RDESC; + } else if (!strncmp(pc, "tdesc", 5)) { + pc += 5; + mode = TXGBE_FUNC_DUMP_TDESC; + } else { + txgbe_dump(adapter); + } + + if (mode && 1 == kstrtou16(pc, 16, &args)) + mode |= args; + + txgbe_data_mode = mode; + } else if (strncmp(pc, "flash", 4) == 0) { + u32 mode = 0; + u16 args; + + pc += 5; + pc += strspn(pc, " \t"); + if (!strncmp(pc, "read", 3)) { + pc += 4; + mode = TXGBE_FUNC_FLASH_READ; + } else if (!strncmp(pc, "write", 5)) { + pc += 5; + mode = TXGBE_FUNC_FLASH_WRITE; + } + + if (mode && 1 == kstrtou16(pc, 16, &args)) + mode |= args; + + txgbe_data_mode = mode; + } else if (strncmp(txgbe_dbg_reg_ops_buf, "write", 5) == 0) { + u32 reg, value; + int cnt; + + cnt = kstrtou32(&txgbe_dbg_reg_ops_buf[5], 16, ®); + cnt += kstrtou32(&txgbe_dbg_reg_ops_buf[5] + 4, 16, &value); + + if (cnt == 2) { + wr32(&adapter->hw, reg, value); + e_dev_info("write: 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("write \n"); + } + } else if (strncmp(txgbe_dbg_reg_ops_buf, "read", 4) == 0) { + u32 reg, value; + int cnt; + + cnt = kstrtou32(&txgbe_dbg_reg_ops_buf[4], 16, ®); + if (cnt == 1) { + value = rd32(&adapter->hw, reg); + e_dev_info("read 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("read \n"); + } + } else { + e_dev_info("Unknown command %s\n", txgbe_dbg_reg_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" read \n"); + e_dev_info(" write \n"); + } + return count; +} + +static const struct file_operations txgbe_dbg_reg_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = txgbe_dbg_reg_ops_read, + .write = txgbe_dbg_reg_ops_write, +}; + +/** + * netdev_ops operation + **/ +static char txgbe_dbg_netdev_ops_buf[256] = ""; +static ssize_t +txgbe_dbg_netdev_ops_read(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct txgbe_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: mode=0x%08x\n%s\n", + adapter->netdev->name, txgbe_data_mode, + txgbe_dbg_netdev_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +static ssize_t +txgbe_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct txgbe_adapter *adapter = filp->private_data; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(txgbe_dbg_netdev_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(txgbe_dbg_netdev_ops_buf, + sizeof(txgbe_dbg_netdev_ops_buf) - 1, + ppos, + buffer, + count); + if (len < 0) + return len; + + txgbe_dbg_netdev_ops_buf[len] = '\0'; + + if (strncmp(txgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev, 0); + e_dev_info("tx_timeout called\n"); + } else { + e_dev_info("Unknown command: %s\n", txgbe_dbg_netdev_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" tx_timeout\n"); + } + return count; +} + +static const struct file_operations txgbe_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = txgbe_dbg_netdev_ops_read, + .write = txgbe_dbg_netdev_ops_write, +}; + +/** + * txgbe_dbg_adapter_init - setup the debugfs directory for the adapter + * @adapter: the adapter that is starting up + **/ +void txgbe_dbg_adapter_init(struct txgbe_adapter *adapter) +{ + const char *name = pci_name(adapter->pdev); + struct dentry *pfile; + + adapter->txgbe_dbg_adapter = debugfs_create_dir(name, txgbe_dbg_root); + if (!adapter->txgbe_dbg_adapter) { + e_dev_err("debugfs entry for %s failed\n", name); + return; + } + + pfile = debugfs_create_file("data", 0600, + adapter->txgbe_dbg_adapter, adapter, + &txgbe_dbg_data_ops_fops); + if (!pfile) + e_dev_err("debugfs netdev_ops for %s failed\n", name); + + pfile = debugfs_create_file("reg_ops", 0600, + adapter->txgbe_dbg_adapter, adapter, + &txgbe_dbg_reg_ops_fops); + if (!pfile) + e_dev_err("debugfs reg_ops for %s failed\n", name); + + pfile = debugfs_create_file("netdev_ops", 0600, + adapter->txgbe_dbg_adapter, adapter, + &txgbe_dbg_netdev_ops_fops); + if (!pfile) + e_dev_err("debugfs netdev_ops for %s failed\n", name); +} + +/** + * txgbe_dbg_adapter_exit - clear out the adapter's debugfs entries + * @pf: the pf that is stopping + **/ +void txgbe_dbg_adapter_exit(struct txgbe_adapter *adapter) +{ + debugfs_remove_recursive(adapter->txgbe_dbg_adapter); + adapter->txgbe_dbg_adapter = NULL; +} + +/** + * txgbe_dbg_init - start up debugfs for the driver + **/ +void txgbe_dbg_init(void) +{ + txgbe_dbg_root = debugfs_create_dir(txgbe_driver_name, NULL); + if (!txgbe_dbg_root) + pr_err("init of debugfs failed\n"); +} + +/** + * txgbe_dbg_exit - clean out the driver's debugfs entries + **/ +void txgbe_dbg_exit(void) +{ + debugfs_remove_recursive(txgbe_dbg_root); +} + +/** + * txgbe_dump - Print registers, tx-rings and rx-rings + **/ +void txgbe_dump(struct txgbe_adapter *adapter) +{ + dev_info(&adapter->pdev->dev, "skip dump\n"); +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c new file mode 100644 index 000000000000..e9f65652273a --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c @@ -0,0 +1,4049 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "txgbe_e56.h" +#include "linux/delay.h" +#include "txgbe.h" +#include "txgbe_hw.h" + +#include + +#define FIELD_PREP_M(_mask, _val) \ + ({ ((typeof(_mask))(_val) << __bf_shf_m(_mask)) & (_mask); }) + +void txgbe_field_set(u32 *psrcdata, u32 bithigh, u32 bitlow, u32 setvalue) +{ + *psrcdata &= ~GENMASK(bithigh, bitlow); + *psrcdata |= FIELD_PREP_M(GENMASK(bithigh, bitlow), setvalue); +} + +s32 txgbe_e56_check_phy_link(struct txgbe_hw *hw, u32 *speed, bool *link_up) +{ + struct txgbe_adapter *adapter = hw->back; + u32 rdata = 0; + u32 links_reg = 0; + + /* must read it twice because the state may + * not be correct the first time you read it + */ + rdata = txgbe_rd32_epcs(hw, 0x30001); + rdata = txgbe_rd32_epcs(hw, 0x30001); + + if (rdata & TXGBE_E56_PHY_LINK_UP) + *link_up = true; + else + *link_up = false; + + if (!adapter->link_valid) + *link_up = false; + + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (*link_up) { + if ((links_reg & TXGBE_CFG_PORT_ST_AML_LINK_40G) == + TXGBE_CFG_PORT_ST_AML_LINK_40G) + *speed = TXGBE_LINK_SPEED_40GB_FULL; + else if ((links_reg & TXGBE_CFG_PORT_ST_AML_LINK_25G) == + TXGBE_CFG_PORT_ST_AML_LINK_25G) + *speed = TXGBE_LINK_SPEED_25GB_FULL; + else if ((links_reg & TXGBE_CFG_PORT_ST_AML_LINK_10G) == + TXGBE_CFG_PORT_ST_AML_LINK_10G) + *speed = TXGBE_LINK_SPEED_10GB_FULL; + } else { + *speed = TXGBE_LINK_SPEED_UNKNOWN; + } + + return 0; +} + +static u32 txgbe_e56_phy_tx_ffe_cfg(struct txgbe_hw *hw, u32 speed) +{ + struct txgbe_adapter *adapter = hw->back; + u32 addr; + + if (speed == TXGBE_LINK_SPEED_10GB_FULL) { + adapter->aml_txeq.main = S10G_TX_FFE_CFG_MAIN; + adapter->aml_txeq.pre1 = S10G_TX_FFE_CFG_PRE1; + adapter->aml_txeq.pre2 = S10G_TX_FFE_CFG_PRE2; + adapter->aml_txeq.post = S10G_TX_FFE_CFG_POST; + } else if (speed == TXGBE_LINK_SPEED_25GB_FULL) { + adapter->aml_txeq.main = S25G_TX_FFE_CFG_MAIN; + adapter->aml_txeq.pre1 = S25G_TX_FFE_CFG_PRE1; + adapter->aml_txeq.pre2 = S25G_TX_FFE_CFG_PRE2; + adapter->aml_txeq.post = S25G_TX_FFE_CFG_POST; + + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1 || + txgbe_is_backplane(hw)) { + adapter->aml_txeq.main = S25G_TX_FFE_CFG_DAC_MAIN; + adapter->aml_txeq.pre1 = S25G_TX_FFE_CFG_DAC_PRE1; + adapter->aml_txeq.pre2 = S25G_TX_FFE_CFG_DAC_PRE2; + adapter->aml_txeq.post = S25G_TX_FFE_CFG_DAC_POST; + } + } else if (speed == TXGBE_LINK_SPEED_40GB_FULL) { + adapter->aml_txeq.main = S10G_TX_FFE_CFG_MAIN; + adapter->aml_txeq.pre1 = S10G_TX_FFE_CFG_PRE1; + adapter->aml_txeq.pre2 = S10G_TX_FFE_CFG_PRE2; + adapter->aml_txeq.post = S10G_TX_FFE_CFG_POST; + + if (hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1 || + txgbe_is_backplane(hw)) { + adapter->aml_txeq.main = 0x2b2b2b2b; + adapter->aml_txeq.pre1 = 0x03030303; + adapter->aml_txeq.pre2 = 0; + adapter->aml_txeq.post = 0x11111111; + } + } else { + return 0; + } + + addr = 0x141c; + txgbe_wr32_ephy(hw, addr, adapter->aml_txeq.main); + + addr = 0x1420; + txgbe_wr32_ephy(hw, addr, adapter->aml_txeq.pre1); + + addr = 0x1424; + txgbe_wr32_ephy(hw, addr, adapter->aml_txeq.pre2); + + addr = 0x1428; + txgbe_wr32_ephy(hw, addr, adapter->aml_txeq.post); + + return 0; +} + +int txgbe_e56_get_temp(struct txgbe_hw *hw, int *temp) +{ + int data_code, temp_data, temp_fraction; + u32 rdata; + u32 timer = 0; + + while (1) { + rdata = rd32(hw, 0x1033c); + if (((rdata >> 12) & 0x1) != 0) + break; + + if (timer++ > PHYINIT_TIMEOUT) + return -ETIMEDOUT; + } + + data_code = rdata & 0xFFF; + temp_data = 419400 + 2205 * (data_code * 1000 / 4094 - 500); + + //Change double Temperature to int + *temp = temp_data / 10000; + temp_fraction = temp_data - (*temp * 10000); + if (temp_fraction >= 5000) + *temp += 1; + + return 0; +} + +u32 txgbe_e56_cfg_40g(struct txgbe_hw *hw) +{ + u32 addr; + u32 rdata = 0; + int i; + + //CMS Config Master + addr = E56G_CMS_ANA_OVRDVAL_7_ADDR; + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56_cms_ana_ovrdval7 *)&rdata)->ana_lcpll_lf_vco_swing_ctrl_i = 0xf; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56_cms_ana_ovrden1 *)&rdata) + ->ovrd_en_ana_lcpll_lf_vco_swing_ctrl_i = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G_CMS_ANA_OVRDVAL_9_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 23, 0, 0x260000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56_cms_ana_ovrden1 *)&rdata)->ovrd_en_ana_lcpll_lf_test_in_i = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + //TXS Config Master + for (i = 0; i < 4; i++) { + addr = E56PHY_TXS_TXS_CFG_1_ADDR + (E56PHY_TXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_TXS_CFG_1_ADAPTATION_WAIT_CNT_X256, + 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_WKUP_CNT_ADDR + (E56PHY_TXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_WKUP_CNTLDO_WKUP_CNT_X32, 0xff); + txgbe_field_set(&rdata, E56PHY_TXS_WKUP_CNTDCC_WKUP_CNT_X32, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_PIN_OVRDVAL_6_ADDR + (E56PHY_TXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 19, 16, 0x6); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_PIN_OVRDEN_0_ADDR + (E56PHY_TXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_EFUSE_BITS_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_ANA_OVRDVAL_1_ADDR + (E56PHY_TXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_ANA_OVRDVAL_1_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_ANA_OVRDEN_0_ADDR + (E56PHY_TXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_TXS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + } + //Setting TX FFE + txgbe_e56_phy_tx_ffe_cfg(hw, TXGBE_LINK_SPEED_40GB_FULL); + + //RXS Config master + for (i = 0; i < 4; i++) { + addr = E56PHY_RXS_RXS_CFG_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_RXS_CFG_0_DSER_DATA_SEL, 0x0); + txgbe_field_set(&rdata, E56PHY_RXS_RXS_CFG_0_TRAIN_CLK_GATE_BYPASS_EN, + 0x1fff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_1_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56_rxs0_osc_cal_n_cdr0 *)&rdata)->prediv0 = 0xfa0; + ((union txgbe_e56_rxs0_osc_cal_n_cdr0 *)&rdata)->target_cnt0 = 0x203a; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_4_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56_rxs0_osc_cal_n_cdr4 *)&rdata)->osc_range_sel0 = 0x2; + ((union txgbe_e56_rxs0_osc_cal_n_cdr4 *)&rdata)->vco_code_init = 0x7ff; + ((union txgbe_e56_rxs0_osc_cal_n_cdr4 *)&rdata)->osc_current_boost_en0 = + 0x1; + ((union txgbe_e56_rxs0_osc_cal_n_cdr4 *)&rdata)->bbcdr_current_boost0 = + 0x0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_5_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_SDM_WIDTH, 0x3); + txgbe_field_set(&rdata, + E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_PRELOCK, + 0xf); + txgbe_field_set(&rdata, + E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_POSTLOCK, + 0xf); + txgbe_field_set(&rdata, + E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_POSTLOCK, + 0xc); + txgbe_field_set(&rdata, + E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_PRELOCK, + 0xf); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BBCDR_RDY_CNT, + 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_6_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_PRELOCK, 0x7); + txgbe_field_set(&rdata, + E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_POSTLOCK, + 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_INTL_CONFIG_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56_rxs0_intl_config0 *)&rdata)->adc_intl2slice_delay0 = + 0x5555; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_INTL_CONFIG_2_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56_rxs0_intl_config2 *)&rdata)->interleaver_hbw_disable0 = + 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_0_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_LTH, + 0x56); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_UTH, + 0x6a); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_1_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_LTH, 0x1e8); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_UTH, 0x78); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_2_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_LTH, 0x100); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_UTH, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_3_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_LTH, 0x4); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_UTH, 0x37); + txgbe_field_set(&rdata, + E56PHY_RXS_TXFFE_TRAINING_3_TXFFE_TRAIN_MOD_TYPE, + 0x38); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_0_VGA_TARGET, 0x34); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT0, + 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT0, + 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT123, + 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT123, + 0xa); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_0_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT0, + 0x9); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT123, + 0x9); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_1_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_1_LFEQ_LUT, + 0x1ffffea); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_2_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P1, + S10G_PHY_RX_CTLE_TAP_FRACP1); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P2, + S10G_PHY_RX_CTLE_TAP_FRACP2); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P3, + S10G_PHY_RX_CTLE_TAP_FRACP3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_3_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P1, + S10G_PHY_RX_CTLE_TAPWT_WEIGHT1); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P2, + S10G_PHY_RX_CTLE_TAPWT_WEIGHT2); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P3, + S10G_PHY_RX_CTLE_TAPWT_WEIGHT3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_SLICE_DATA_AVG_CNT, + 0x3); + txgbe_field_set(&rdata, + E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_DATA_AVG_CNT, 0x3); + txgbe_field_set(&rdata, + E56PHY_RXS_OFFSET_N_GAIN_CAL_0_FE_OFFSET_DAC_CLK_CNT_X8, + 0xc); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_1_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_1_SAMP_ADAPT_CFG, + 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_FFE_TRAINING_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_FFE_TRAINING_0_FFE_TAP_EN, 0xf9ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_IDLE_DETECT_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, + 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, + 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS3_ANA_OVRDVAL_11_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + ((E56G__RXS3_ANA_OVRDVAL_11 *)&rdata)->ana_test_adc_clkgen_i = + 0x0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS0_ANA_OVRDEN_2_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + ((E56G__RXS0_ANA_OVRDEN_2 *)&rdata) + ->ovrd_en_ana_test_adc_clkgen_i = 0x0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_0_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_6_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 4, 0, 0x6); + txgbe_field_set(&rdata, 14, 13, 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_BBCDR_VCOFILT_BYP_I, + 0x1); + txgbe_field_set(&rdata, + E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_TEST_BBCDR_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_15_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 2, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_17_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS_ANA_OVRDVAL_17_ANA_VGA2_BOOST_CSTM_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_3_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_ANABS_CONFIG_I, + 0x1); + txgbe_field_set(&rdata, + E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_VGA2_BOOST_CSTM_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_14_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_4_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_EYE_SCAN_1_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_EYE_SCAN_1_EYE_SCAN_REF_TIMER, + 0x400); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_RINGO_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 9, 4, 0x366); + txgbe_wr32_ephy(hw, addr, rdata); + } + + // PDIG Config master + addr = E56PHY_PMD_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_3_CTRL_FSM_TIMEOUT_X64K, 0x80); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_ON_PERIOD_X64K, 0x18); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_PERIOD_X512K, 0x3e); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_5_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_5_USE_RECENT_MARKER_OFFSET, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_GAIN_CAL_ERR, 0x1); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_DO_RX_ADC_OFST_CAL, 0x3); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_RX_ERR_ACTION_EN, 0x40); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST0_WAIT_CNT_X4096, 0xff); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST1_WAIT_CNT_X4096, 0xff); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST2_WAIT_CNT_X4096, 0xff); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST3_WAIT_CNT_X4096, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST4_WAIT_CNT_X4096, 0x1); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST5_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST6_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST7_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST8_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST9_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST10_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST11_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST12_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST13_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST14_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST15_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_7_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST4_EN, 0x4bf); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST5_EN, 0xc4bf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_8_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_8_TRAIN_ST7_EN, 0x47ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_12_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_12_TRAIN_ST15_EN, 0x67ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_13_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST0_DONE_EN, 0x8001); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST1_DONE_EN, 0x8002); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_14_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_14_TRAIN_ST3_DONE_EN, 0x8008); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_15_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_15_TRAIN_ST4_DONE_EN, 0x8004); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_17_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_17_TRAIN_ST8_DONE_EN, 0x20c0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_18_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_18_TRAIN_ST10_DONE_EN, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_29_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_29_TRAIN_ST15_DC_EN, 0x3f6d); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_33_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN0_RATE_SEL, 0x8000); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN1_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_34_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN2_RATE_SEL, 0x8000); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN3_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_KRT_TFSM_CFG_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X1000K, + 0x49); + txgbe_field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X8000K, + 0x37); + txgbe_field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_HOLDOFF_TIMER_X256K, + 0x2f); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_FETX_FFE_TRAIN_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_2, + 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + return 0; +} + +u32 txgbe_e56_cfg_25g(struct txgbe_hw *hw) +{ + u32 addr; + u32 rdata = 0; + + addr = E56PHY_CMS_PIN_OVRDVAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CMS_PIN_OVRDVAL_0_INT_PLL0_TX_SIGNAL_TYPE_I, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CMS_PIN_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CMS_PIN_OVRDEN_0_OVRD_EN_PLL0_TX_SIGNAL_TYPE_I, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CMS_ANA_OVRDVAL_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_CMS_ANA_OVRDVAL_2_ANA_LCPLL_HF_VCO_SWING_CTRL_I, 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CMS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_VCO_SWING_CTRL_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CMS_ANA_OVRDVAL_4_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 23, 0, 0x260000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_HF_TEST_IN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_TXS_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_TXS_CFG_1_ADAPTATION_WAIT_CNT_X256, 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_WKUP_CNT_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_WKUP_CNTLDO_WKUP_CNT_X32, 0xff); + txgbe_field_set(&rdata, E56PHY_TXS_WKUP_CNTDCC_WKUP_CNT_X32, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_PIN_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 27, 24, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_PIN_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_EFUSE_BITS_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_ANA_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_ANA_OVRDVAL_1_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + txgbe_e56_phy_tx_ffe_cfg(hw, TXGBE_LINK_SPEED_25GB_FULL); + + addr = E56PHY_RXS_RXS_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_RXS_CFG_0_DSER_DATA_SEL, 0x0); + txgbe_field_set(&rdata, E56PHY_RXS_RXS_CFG_0_TRAIN_CLK_GATE_BYPASS_EN, + 0x1fff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_1_PREDIV1, 0x700); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_1_TARGET_CNT1, 0x2418); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_4_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_OSC_RANGE_SEL1, 0x1); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_VCO_CODE_INIT, 0x7fb); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_OSC_CURRENT_BOOST_EN1, + 0x0); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_BBCDR_CURRENT_BOOST1, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_5_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_SDM_WIDTH, 0x3); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_PRELOCK, + 0xf); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_POSTLOCK, + 0x3); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_POSTLOCK, + 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_PRELOCK, + 0xf); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BBCDR_RDY_CNT, 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_6_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_PRELOCK, 0x7); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_POSTLOCK, + 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_INTL_CONFIG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_INTL_CONFIG_0_ADC_INTL2SLICE_DELAY1, + 0x3333); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_INTL_CONFIG_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_INTL_CONFIG_2_INTERLEAVER_HBW_DISABLE1, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_TXFFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_LTH, 0x56); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_UTH, 0x6a); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_TXFFE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_LTH, 0x1f8); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_UTH, 0xf0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_TXFFE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_LTH, 0x100); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_UTH, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_TXFFE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_LTH, 0x4); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_UTH, 0x37); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_TXFFE_TRAIN_MOD_TYPE, + 0x38); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS0_FOM_18__ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56G__RXS0_FOM_18__DFE_COEFFL_HINT__MSB, + E56G__RXS0_FOM_18__DFE_COEFFL_HINT__LSB, 0x0); + //change 0x90 to 0x0 to fix 25G link up keep when cable unplugged + txgbe_field_set(&rdata, E56G__RXS0_FOM_18__DFE_COEFFH_HINT__MSB, + E56G__RXS0_FOM_18__DFE_COEFFH_HINT__LSB, 0x0); + txgbe_field_set(&rdata, E56G__RXS0_FOM_18__DFE_COEFF_HINT_LOAD__MSB, + E56G__RXS0_FOM_18__DFE_COEFF_HINT_LOAD__LSB, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_VGA_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_0_VGA_TARGET, 0x34); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_VGA_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT0, 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT0, 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT123, 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT123, 0xa); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT0, 0x9); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT123, 0x9); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_1_LFEQ_LUT, 0x1ffffea); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P1, + S25G_PHY_RX_CTLE_TAP_FRACP1); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P2, + S25G_PHY_RX_CTLE_TAP_FRACP2); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P3, + S25G_PHY_RX_CTLE_TAP_FRACP3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P1, + S25G_PHY_RX_CTLE_TAPWT_WEIGHT1); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P2, + S25G_PHY_RX_CTLE_TAPWT_WEIGHT2); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P3, + S25G_PHY_RX_CTLE_TAPWT_WEIGHT3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_SLICE_DATA_AVG_CNT, + 0x3); + txgbe_field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_DATA_AVG_CNT, 0x3); + txgbe_field_set(&rdata, + E56PHY_RXS_OFFSET_N_GAIN_CAL_0_FE_OFFSET_DAC_CLK_CNT_X8, 0xc); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_1_SAMP_ADAPT_CFG, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_FFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_FFE_TRAINING_0_FFE_TAP_EN, 0xf9ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_IDLE_DETECT_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDVAL_11, ana_test_adc_clkgen_i, + 0x0); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_2, + ovrd_en_ana_test_adc_clkgen_i, 0x0); + + addr = E56PHY_RXS_ANA_OVRDVAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_0_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 4, 0, 0x0); + txgbe_field_set(&rdata, 14, 13, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_BBCDR_VCOFILT_BYP_I, 0x1); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_TEST_BBCDR_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_15_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 2, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_17_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_17_ANA_VGA2_BOOST_CSTM_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_ANABS_CONFIG_I, + 0x1); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_VGA2_BOOST_CSTM_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_14_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 13, 13, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_4_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_EYE_SCAN_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_EYE_SCAN_1_EYE_SCAN_REF_TIMER, 0x400); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_RINGO_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 21, 12, 0x366); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_3_CTRL_FSM_TIMEOUT_X64K, 0x80); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_ON_PERIOD_X64K, 0x18); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_PERIOD_X512K, 0x3e); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_5_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_5_USE_RECENT_MARKER_OFFSET, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_GAIN_CAL_ERR, 0x1); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_DO_RX_ADC_OFST_CAL, 0x3); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_RX_ERR_ACTION_EN, 0x40); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST0_WAIT_CNT_X4096, 0xff); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST1_WAIT_CNT_X4096, 0xff); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST2_WAIT_CNT_X4096, 0xff); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST3_WAIT_CNT_X4096, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST4_WAIT_CNT_X4096, 0x1); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST5_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST6_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST7_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST8_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST9_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST10_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST11_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST12_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST13_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST14_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST15_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_7_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST4_EN, 0x4bf); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST5_EN, 0xc4bf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_8_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_8_TRAIN_ST7_EN, 0x47ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_12_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_12_TRAIN_ST15_EN, 0x67ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_13_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST0_DONE_EN, 0x8001); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST1_DONE_EN, 0x8002); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_14_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_14_TRAIN_ST3_DONE_EN, 0x8008); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_15_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_15_TRAIN_ST4_DONE_EN, 0x8004); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_17_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_17_TRAIN_ST8_DONE_EN, 0x20c0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_18_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_18_TRAIN_ST10_DONE_EN, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_29_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_29_TRAIN_ST15_DC_EN, 0x3f6d); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_33_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN0_RATE_SEL, 0x8000); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN1_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_34_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN2_RATE_SEL, 0x8000); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN3_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_KRT_TFSM_CFG_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X1000K, + 0x49); + txgbe_field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X8000K, + 0x37); + txgbe_field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_HOLDOFF_TIMER_X256K, + 0x2f); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_FETX_FFE_TRAIN_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_2, + 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + return 0; +} + +u32 txgbe_e56_cfg_10g(struct txgbe_hw *hw) +{ + u32 addr; + u32 rdata = 0; + + addr = E56G_CMS_ANA_OVRDVAL_7_ADDR; + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56_cms_ana_ovrdval7 *)&rdata)->ana_lcpll_lf_vco_swing_ctrl_i = 0xf; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56_cms_ana_ovrden1 *)&rdata) + ->ovrd_en_ana_lcpll_lf_vco_swing_ctrl_i = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G_CMS_ANA_OVRDVAL_9_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 23, 0, 0x260000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56_cms_ana_ovrden1 *)&rdata)->ovrd_en_ana_lcpll_lf_test_in_i = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_TXS_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_TXS_CFG_1_ADAPTATION_WAIT_CNT_X256, 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_WKUP_CNT_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_WKUP_CNTLDO_WKUP_CNT_X32, 0xff); + txgbe_field_set(&rdata, E56PHY_TXS_WKUP_CNTDCC_WKUP_CNT_X32, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_PIN_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 19, 16, 0x6); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_PIN_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_EFUSE_BITS_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_ANA_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_ANA_OVRDVAL_1_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_TXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //Setting TX FFE + txgbe_e56_phy_tx_ffe_cfg(hw, TXGBE_LINK_SPEED_10GB_FULL); + + addr = E56PHY_RXS_RXS_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_RXS_CFG_0_DSER_DATA_SEL, 0x0); + txgbe_field_set(&rdata, E56PHY_RXS_RXS_CFG_0_TRAIN_CLK_GATE_BYPASS_EN, + 0x1fff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56_rxs0_osc_cal_n_cdr0 *)&rdata)->prediv0 = 0xfa0; + ((union txgbe_e56_rxs0_osc_cal_n_cdr0 *)&rdata)->target_cnt0 = 0x203a; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_4_ADDR; + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56_rxs0_osc_cal_n_cdr4 *)&rdata)->osc_range_sel0 = 0x2; + ((union txgbe_e56_rxs0_osc_cal_n_cdr4 *)&rdata)->vco_code_init = 0x7ff; + ((union txgbe_e56_rxs0_osc_cal_n_cdr4 *)&rdata)->osc_current_boost_en0 = 0x1; + ((union txgbe_e56_rxs0_osc_cal_n_cdr4 *)&rdata)->bbcdr_current_boost0 = 0x0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_5_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_SDM_WIDTH, 0x3); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_PRELOCK, + 0xf); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_POSTLOCK, + 0xf); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_POSTLOCK, + 0xc); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_PRELOCK, + 0xf); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BBCDR_RDY_CNT, 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OSC_CAL_N_CDR_6_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_PRELOCK, 0x7); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_POSTLOCK, + 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_INTL_CONFIG_0_ADDR; + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56_rxs0_intl_config0 *)&rdata)->adc_intl2slice_delay0 = 0x5555; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_INTL_CONFIG_2_ADDR; + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56_rxs0_intl_config2 *)&rdata)->interleaver_hbw_disable0 = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_LTH, 0x56); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_UTH, 0x6a); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_LTH, 0x1e8); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_UTH, 0x78); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_LTH, 0x100); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_UTH, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_LTH, 0x4); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_UTH, 0x37); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_TXFFE_TRAIN_MOD_TYPE, + 0x38); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_0_VGA_TARGET, 0x34); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT0, 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT0, 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT123, 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT123, 0xa); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT0, 0x9); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT123, 0x9); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_1_LFEQ_LUT, 0x1ffffea); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P1, + S10G_PHY_RX_CTLE_TAP_FRACP1); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P2, + S10G_PHY_RX_CTLE_TAP_FRACP2); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P3, + S10G_PHY_RX_CTLE_TAP_FRACP3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_CTLE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P1, + S10G_PHY_RX_CTLE_TAPWT_WEIGHT1); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P2, + S10G_PHY_RX_CTLE_TAPWT_WEIGHT2); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P3, + S10G_PHY_RX_CTLE_TAPWT_WEIGHT3); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_SLICE_DATA_AVG_CNT, + 0x3); + txgbe_field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_DATA_AVG_CNT, 0x3); + txgbe_field_set(&rdata, + E56PHY_RXS_OFFSET_N_GAIN_CAL_0_FE_OFFSET_DAC_CLK_CNT_X8, 0xc); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_1_SAMP_ADAPT_CFG, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_FFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_FFE_TRAINING_0_FFE_TAP_EN, 0xf9ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_IDLE_DETECT_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDVAL_11, ana_test_adc_clkgen_i, + 0x0); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_2, + ovrd_en_ana_test_adc_clkgen_i, 0x0); + + addr = E56PHY_RXS_ANA_OVRDVAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_0_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 4, 0, 0x6); + txgbe_field_set(&rdata, 14, 13, 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_BBCDR_VCOFILT_BYP_I, 0x1); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_TEST_BBCDR_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_15_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 2, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_17_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_17_ANA_VGA2_BOOST_CSTM_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_ANABS_CONFIG_I, + 0x1); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_VGA2_BOOST_CSTM_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDVAL_14_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_ANA_OVRDEN_4_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_EYE_SCAN_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_EYE_SCAN_1_EYE_SCAN_REF_TIMER, 0x400); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS_RINGO_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 9, 4, 0x366); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_3_CTRL_FSM_TIMEOUT_X64K, 0x80); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_ON_PERIOD_X64K, 0x18); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_PERIOD_X512K, 0x3e); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_PMD_CFG_5_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_5_USE_RECENT_MARKER_OFFSET, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_GAIN_CAL_ERR, 0x1); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_DO_RX_ADC_OFST_CAL, 0x3); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_RX_ERR_ACTION_EN, 0x40); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST0_WAIT_CNT_X4096, 0xff); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST1_WAIT_CNT_X4096, 0xff); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST2_WAIT_CNT_X4096, 0xff); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST3_WAIT_CNT_X4096, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST4_WAIT_CNT_X4096, 0x1); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST5_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST6_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST7_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST8_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST9_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST10_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST11_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST12_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST13_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST14_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST15_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_7_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST4_EN, 0x4bf); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST5_EN, 0xc4bf); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_8_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_8_TRAIN_ST7_EN, 0x47ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_12_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_12_TRAIN_ST15_EN, 0x67ff); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_13_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST0_DONE_EN, 0x8001); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST1_DONE_EN, 0x8002); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_14_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_14_TRAIN_ST3_DONE_EN, 0x8008); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_15_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_15_TRAIN_ST4_DONE_EN, 0x8004); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_17_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_17_TRAIN_ST8_DONE_EN, 0x20c0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_18_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_18_TRAIN_ST10_DONE_EN, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_29_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_29_TRAIN_ST15_DC_EN, 0x3f6d); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_33_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN0_RATE_SEL, 0x8000); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN1_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_CTRL_FSM_CFG_34_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN2_RATE_SEL, 0x8000); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN3_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_KRT_TFSM_CFG_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X1000K, + 0x49); + txgbe_field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X8000K, + 0x37); + txgbe_field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_HOLDOFF_TIMER_X256K, + 0x2f); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_FETX_FFE_TRAIN_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_2, + 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + return 0; +} + +static int txgbe_e56_rxs_oscinit_temp_track(struct txgbe_hw *hw, u32 speed) +{ + int status = 0; + unsigned int addr, rdata, timer; + int T = 40; + int RX_COARSE_MID_TD, CMVAR_RANGE_H = 0, CMVAR_RANGE_L = 0; + int OFFSET_CENTRE_RANGE_H, OFFSET_CENTRE_RANGE_L, RANGE_FINAL; + int osc_freq_err_occur; + int i = 0; + int lane_num = 1; + struct txgbe_adapter *adapter = hw->back; + //1. Read the temperature T just before RXS is enabled. + txgbe_e56_get_temp(hw, &T); + + if (T < -5) + RX_COARSE_MID_TD = 10; + else if (T < 30) + RX_COARSE_MID_TD = 9; + else if (T < 65) + RX_COARSE_MID_TD = 8; + else if (T < 100) + RX_COARSE_MID_TD = 7; + else + RX_COARSE_MID_TD = 6; + + //Set CMVAR_RANGE_H/L based on the link speed mode + if (speed == TXGBE_LINK_SPEED_10GB_FULL || + speed == TXGBE_LINK_SPEED_40GB_FULL) { //10G mode + CMVAR_RANGE_H = S10G_CMVAR_RANGE_H; + CMVAR_RANGE_L = S10G_CMVAR_RANGE_L; + } else if (speed == TXGBE_LINK_SPEED_25GB_FULL) { //25G mode + CMVAR_RANGE_H = S25G_CMVAR_RANGE_H; + CMVAR_RANGE_L = S25G_CMVAR_RANGE_L; + } + + if (speed == TXGBE_LINK_SPEED_40GB_FULL) + lane_num = 4; + // TBD select all lane + //3. Program ALIAS::RXS::RANGE_SEL = CMVAR::RANGE_H + // RXS0_ANA_OVRDVAL[5] + // ana_bbcdr_osc_range_sel_i[1:0] + for (i = 0; i < lane_num; i++) { + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS_ANA_OVRDVAL_5_ANA_BBCDR_OSC_RANGE_SEL_I, + CMVAR_RANGE_H); + txgbe_wr32_ephy(hw, addr, rdata); + + // RXS0_ANA_OVRDEN[0] + // [29] ovrd_en_ana_bbcdr_osc_range_sel_i + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_BBCDR_OSC_RANGE_SEL_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + // RXS0_OVRDVAL[0] + // [22] rxs0_rx0_samp_cal_done_o + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_0_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDVAL_0_RXS0_RX0_SAMP_CAL_DONE_O, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + // RXS0_OVRDEN[0] + // [27] ovrd_en_rxs0_rx0_samp_cal_done_o + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDEN_0_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDEN_0_OVRD_EN_RXS0_RX0_SAMP_CAL_DONE_O, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //Do SEQ::RX_ENABLE to enable RXS + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, (0x1 << i)); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Poll ALIAS::PDIG::CTRL_FSM_RX_ST and confirm its value is RX_SAMP_CAL_ST + // poll CTRL_FSM_RX_ST + rdata = 0; + timer = 0; + osc_freq_err_occur = 0; + while ((rdata >> (i * 8) & 0x3f) != 0x9) { //Bit[5:0]!= 0x9 + usleep_range(500, 1000); + // INTR[0] + // [11:8] intr_rx_osc_freq_err + rdata = 0; + addr = E56PHY_INTR_0_ADDR; + rdata = rd32_ephy(hw, addr); + // TBD is always osc_freq_err occur? + if (rdata & (0x100 << i)) { + osc_freq_err_occur = 1; + break; + } + rdata = 0; + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + rdata = rd32_ephy(hw, addr); + + if (timer++ > PHYINIT_TIMEOUT) { + e_info(drv, "ERROR: Wait E56PHY_CTRL_FSM_RX_STAT_0_ADDR Timeout!!!\n"); + break; + } + } + + //5/6.Define software variable as OFFSET_CENTRE_RANGE_H = ALIAS::RXS::COARSE + //- RX_COARSE_MID_TD. Clear the INTR. + rdata = 0; + addr = E56PHY_RXS_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + OFFSET_CENTRE_RANGE_H = (rdata >> 4) & 0xf; + if (OFFSET_CENTRE_RANGE_H > RX_COARSE_MID_TD) { + OFFSET_CENTRE_RANGE_H = + OFFSET_CENTRE_RANGE_H - RX_COARSE_MID_TD; + } else { + OFFSET_CENTRE_RANGE_H = + RX_COARSE_MID_TD - OFFSET_CENTRE_RANGE_H; + } + + //7. Do SEQ::RX_DISABLE to disable RXS. Poll ALIAS::PDIG::CTRL_FSM_RX_ST and confirm + //its value is POWERDN_ST + + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + timer = 0; + while (1) { + usleep_range(500, 1000); + rdata = 0; + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + rdata = rd32_ephy(hw, addr); + if (((rdata >> (i * 8)) & 0x3f) == 0x21) + break; + + if (timer++ > PHYINIT_TIMEOUT) { + e_info(drv, "ERROR: Wait E56PHY_CTRL_FSM_RX_STAT_0_ADDR Timeout!!!\n"); + break; + } + } + + usleep_range(500, 1000); + rdata = 0; + addr = E56PHY_INTR_0_ADDR; + rdata = rd32_ephy(hw, addr); + + usleep_range(500, 1000); + addr = E56PHY_INTR_0_ADDR; + txgbe_wr32_ephy(hw, addr, rdata); + + usleep_range(500, 1000); + rdata = 0; + addr = E56PHY_INTR_0_ADDR; + rdata = rd32_ephy(hw, addr); + // next round + + //9. Program ALIAS::RXS::RANGE_SEL = CMVAR::RANGE_L + // RXS0_ANA_OVRDVAL[5] + // ana_bbcdr_osc_range_sel_i[1:0] + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS_ANA_OVRDVAL_5_ANA_BBCDR_OSC_RANGE_SEL_I, + CMVAR_RANGE_L); + txgbe_wr32_ephy(hw, addr, rdata); + + // RXS0_ANA_OVRDEN[0] + // [29] ovrd_en_ana_bbcdr_osc_range_sel_i + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_BBCDR_OSC_RANGE_SEL_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //10. Do SEQ::RX_ENABLE to enable RXS, and let it stop after oscillator calibration. + // RXS0_OVRDVAL[0] + // [22] rxs0_rx0_samp_cal_done_o + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_0_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDVAL_0_RXS0_RX0_SAMP_CAL_DONE_O, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + // RXS0_OVRDEN[0] + // [27] ovrd_en_rxs0_rx0_samp_cal_done_o + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDEN_0_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDEN_0_OVRD_EN_RXS0_RX0_SAMP_CAL_DONE_O, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, (0x1 << i)); + txgbe_wr32_ephy(hw, addr, rdata); + + // poll CTRL_FSM_RX_ST + timer = 0; + osc_freq_err_occur = 0; + while (((rdata >> (i * 8)) & 0x3f) != 0x9) { //Bit[5:0]!= 0x9 + usleep_range(500, 1000); + // INTR[0] + // [11:8] intr_rx_osc_freq_err + rdata = 0; + addr = E56PHY_INTR_0_ADDR; + rdata = rd32_ephy(hw, addr); + // TBD is always osc_freq_err occur? + if ((rdata & 0x100) == 0x100) { + osc_freq_err_occur = 1; + break; + } + rdata = 0; + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + rdata = rd32_ephy(hw, addr); + if (timer++ > PHYINIT_TIMEOUT) { + e_info(drv, "ERROR: Wait E56PHY_CTRL_FSM_RX_STAT_0_ADDR Timeout!!!\n"); + break; + } + } + + //11/12.Define software variable as OFFSET_CENTRE_RANGE_L = ALIAS::RXS::COARSE - + //RX_COARSE_MID_TD. Clear the INTR. + rdata = 0; + addr = E56PHY_RXS_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + OFFSET_CENTRE_RANGE_L = (rdata >> 4) & 0xf; + if (OFFSET_CENTRE_RANGE_L > RX_COARSE_MID_TD) { + OFFSET_CENTRE_RANGE_L = + OFFSET_CENTRE_RANGE_L - RX_COARSE_MID_TD; + } else { + OFFSET_CENTRE_RANGE_L = + RX_COARSE_MID_TD - OFFSET_CENTRE_RANGE_L; + } + + if (OFFSET_CENTRE_RANGE_L < OFFSET_CENTRE_RANGE_H) + RANGE_FINAL = CMVAR_RANGE_L; + else + RANGE_FINAL = CMVAR_RANGE_H; + + //14. Do SEQ::RX_DISABLE to disable RXS. Poll ALIAS::PDIG::CTRL_FSM_RX_ST + //and confirm its value is POWERDN_ST + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + timer = 0; + while (1) { + usleep_range(500, 1000); + rdata = 0; + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + rdata = rd32_ephy(hw, addr); + if (((rdata >> (i * 8)) & 0x3f) == 0x21) + break; + + if (timer++ > PHYINIT_TIMEOUT) { + e_info(drv, "ERROR: Wait E56PHY_CTRL_FSM_RX_STAT_0_ADDR Timeout!!!\n"); + break; + } + } + + //15. Since RX power-up fsm is stopped in RX_SAMP_CAL_ST, + //it is possible the timeout interrupt is set. Clear the same by clearing + //ALIAS::PDIG::INTR_CTRL_FSM_RX_ERR. Also clear ALIAS::PDIG::INTR_RX_OSC_FREQ_ERR + //which could also be set. + usleep_range(500, 1000); + rdata = 0; + addr = E56PHY_INTR_0_ADDR; + rdata = rd32_ephy(hw, addr); + usleep_range(500, 1000); + txgbe_wr32_ephy(hw, addr, rdata); + + usleep_range(500, 1000); + rdata = 0; + addr = E56PHY_INTR_0_ADDR; + rdata = rd32_ephy(hw, addr); + + //16. Program ALIAS::RXS::RANGE_SEL = RANGE_FINAL + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS_ANA_OVRDVAL_5_ANA_BBCDR_OSC_RANGE_SEL_I, + RANGE_FINAL); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDEN_0_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDEN_0_OVRD_EN_RXS0_RX0_SAMP_CAL_DONE_O, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + } + //Do SEQ::RX_ENABLE + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + if (speed == TXGBE_LINK_SPEED_40GB_FULL) + txgbe_field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, 0xf); + else + txgbe_field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + return status; +} + +static int txgbe_e56_set_rx_ufine_lemax40(struct txgbe_hw *hw, u32 speed) +{ + int status = 0; + unsigned int rdata; + unsigned int ULTRAFINE_CODE; + int i = 0; + unsigned int CMVAR_UFINE_MAX = 0; + u32 addr; + + for (i = 0; i < 4; i++) { + if (speed == TXGBE_LINK_SPEED_10GB_FULL || + speed == TXGBE_LINK_SPEED_40GB_FULL) + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX; + else if (speed == TXGBE_LINK_SPEED_25GB_FULL) + CMVAR_UFINE_MAX = S25G_CMVAR_UFINE_MAX; + + //a. Assign software defined variables as below �C + //ii. ULTRAFINE_CODE = ALIAS::RXS::ULTRAFINE + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + ULTRAFINE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i); + + //b. Perform the below logic sequence �C + while (ULTRAFINE_CODE > CMVAR_UFINE_MAX) { + ULTRAFINE_CODE = ULTRAFINE_CODE - 1; + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i) = ULTRAFINE_CODE; + txgbe_wr32_ephy(hw, addr, rdata); + + //Set ovrd_en=1 to override ASIC value + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + + // Wait until 1milliseconds or greater + usleep_range(10000, 20000); + } + } + return status; +} + +static int txgbe_e56_set_rxs_ufine_lemax(struct txgbe_hw *hw, u32 speed) +{ + int status = 0; + unsigned int rdata; + unsigned int ULTRAFINE_CODE; + + unsigned int CMVAR_UFINE_MAX = 0; + + if (speed == TXGBE_LINK_SPEED_10GB_FULL) + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX; + else if (speed == TXGBE_LINK_SPEED_25GB_FULL) + CMVAR_UFINE_MAX = S25G_CMVAR_UFINE_MAX; + + //a. Assign software defined variables as below �C + //ii. ULTRAFINE_CODE = ALIAS::RXS::ULTRAFINE + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + ULTRAFINE_CODE = + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i); + + //b. Perform the below logic sequence �C + while (ULTRAFINE_CODE > CMVAR_UFINE_MAX) { + ULTRAFINE_CODE = ULTRAFINE_CODE - 1; + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i, ULTRAFINE_CODE); + //Set ovrd_en=1 to override ASIC value + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i, 1); + // Wait until 1milliseconds or greater + usleep_range(10000, 20000); + } + + return status; +} + +//-------------------------------------------------------------- +//compare function for qsort() +//-------------------------------------------------------------- +static int compare(const void *a, const void *b) +{ + const int *num1 = (const int *)a; + const int *num2 = (const int *)b; + + if (*num1 < *num2) + return -1; + else if (*num1 > *num2) + return 1; + else + return 0; +} + +static int txgbe_e56_set_rxrd_sec_code_40g(struct txgbe_hw *hw, int *SECOND_CODE, + int lane) +{ + int status = 0, i, N, median; + unsigned int rdata; + u32 addr; + int array_size, RXS_BBCDR_SECOND_ORDER_ST[5]; + + //Set ovrd_en=0 to read ASIC value + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + (lane * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_int_cstm_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + N = 5; + for (i = 0; i < N; i = i + 1) { + //set RXS_BBCDR_SECOND_ORDER_ST[i] = RXS::ANA_OVRDVAL[5]::ana_bbcdr_int_cstm_i[4:0] + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + + (lane * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + RXS_BBCDR_SECOND_ORDER_ST[i] = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_int_cstm_i); + usleep_range(100, 200); + } + + //sort array RXS_BBCDR_SECOND_ORDER_ST[i] + array_size = sizeof(RXS_BBCDR_SECOND_ORDER_ST) / + sizeof(RXS_BBCDR_SECOND_ORDER_ST[0]); + sort(RXS_BBCDR_SECOND_ORDER_ST, array_size, + sizeof(int), compare, NULL); + + median = ((N + 1) / 2) - 1; + *SECOND_CODE = RXS_BBCDR_SECOND_ORDER_ST[median]; + + return status; +} + +int txgbe_e56_rxrd_sec_code(struct txgbe_hw *hw, int *SECOND_CODE) +{ + int status = 0, i, N, median; + unsigned int rdata; + int array_size, RXS_BBCDR_SECOND_ORDER_ST[5]; + + //Set ovrd_en=0 to read ASIC value + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_int_cstm_i, 0); + + N = 5; + for (i = 0; i < N; i = i + 1) { + //set RXS_BBCDR_SECOND_ORDER_ST[i] = RXS::ANA_OVRDVAL[5]::ana_bbcdr_int_cstm_i[4:0] + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + RXS_BBCDR_SECOND_ORDER_ST[i] = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_int_cstm_i); + usleep_range(100, 200); + } + + //sort array RXS_BBCDR_SECOND_ORDER_ST[i] + array_size = sizeof(RXS_BBCDR_SECOND_ORDER_ST) / + sizeof(RXS_BBCDR_SECOND_ORDER_ST[0]); + sort(RXS_BBCDR_SECOND_ORDER_ST, array_size, + sizeof(int), compare, NULL); + + median = ((N + 1) / 2) - 1; + *SECOND_CODE = RXS_BBCDR_SECOND_ORDER_ST[median]; + + return status; +} + +int txgbe_temp_track_seq_40g(struct txgbe_hw *hw, u32 speed) +{ + int status = 0; + unsigned int rdata; + int SECOND_CODE; + int COARSE_CODE; + int FINE_CODE; + int ULTRAFINE_CODE; + + int CMVAR_SEC_LOW_TH; + int CMVAR_UFINE_MAX = 0; + int CMVAR_FINE_MAX; + int CMVAR_UFINE_UMAX_WRAP = 0; + int CMVAR_COARSE_MAX; + int CMVAR_UFINE_FMAX_WRAP = 0; + int CMVAR_FINE_FMAX_WRAP = 0; + int CMVAR_SEC_HIGH_TH; + int CMVAR_UFINE_MIN; + int CMVAR_FINE_MIN; + int CMVAR_UFINE_UMIN_WRAP; + int CMVAR_COARSE_MIN; + int CMVAR_UFINE_FMIN_WRAP; + int CMVAR_FINE_FMIN_WRAP; + int i; + u32 addr; + int temp; + struct txgbe_adapter *adapter = hw->back; + + for (i = 0; i < 4; i++) { + if (speed == TXGBE_LINK_SPEED_10GB_FULL || + speed == TXGBE_LINK_SPEED_40GB_FULL) { + CMVAR_SEC_LOW_TH = S10G_CMVAR_SEC_LOW_TH; + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX; + CMVAR_FINE_MAX = S10G_CMVAR_FINE_MAX; + CMVAR_UFINE_UMAX_WRAP = S10G_CMVAR_UFINE_UMAX_WRAP; + CMVAR_COARSE_MAX = S10G_CMVAR_COARSE_MAX; + CMVAR_UFINE_FMAX_WRAP = S10G_CMVAR_UFINE_FMAX_WRAP; + CMVAR_FINE_FMAX_WRAP = S10G_CMVAR_FINE_FMAX_WRAP; + CMVAR_SEC_HIGH_TH = S10G_CMVAR_SEC_HIGH_TH; + CMVAR_UFINE_MIN = S10G_CMVAR_UFINE_MIN; + CMVAR_FINE_MIN = S10G_CMVAR_FINE_MIN; + CMVAR_UFINE_UMIN_WRAP = S10G_CMVAR_UFINE_UMIN_WRAP; + CMVAR_COARSE_MIN = S10G_CMVAR_COARSE_MIN; + CMVAR_UFINE_FMIN_WRAP = S10G_CMVAR_UFINE_FMIN_WRAP; + CMVAR_FINE_FMIN_WRAP = S10G_CMVAR_FINE_FMIN_WRAP; + } else if (speed == TXGBE_LINK_SPEED_25GB_FULL) { + CMVAR_SEC_LOW_TH = S25G_CMVAR_SEC_LOW_TH; + CMVAR_UFINE_MAX = S25G_CMVAR_UFINE_MAX; + CMVAR_FINE_MAX = S25G_CMVAR_FINE_MAX; + CMVAR_UFINE_UMAX_WRAP = S25G_CMVAR_UFINE_UMAX_WRAP; + CMVAR_COARSE_MAX = S25G_CMVAR_COARSE_MAX; + CMVAR_UFINE_FMAX_WRAP = S25G_CMVAR_UFINE_FMAX_WRAP; + CMVAR_FINE_FMAX_WRAP = S25G_CMVAR_FINE_FMAX_WRAP; + CMVAR_SEC_HIGH_TH = S25G_CMVAR_SEC_HIGH_TH; + CMVAR_UFINE_MIN = S25G_CMVAR_UFINE_MIN; + CMVAR_FINE_MIN = S25G_CMVAR_FINE_MIN; + CMVAR_UFINE_UMIN_WRAP = S25G_CMVAR_UFINE_UMIN_WRAP; + CMVAR_COARSE_MIN = S25G_CMVAR_COARSE_MIN; + CMVAR_UFINE_FMIN_WRAP = S25G_CMVAR_UFINE_FMIN_WRAP; + CMVAR_FINE_FMIN_WRAP = S25G_CMVAR_FINE_FMIN_WRAP; + } else { + e_info(drv, "Error Speed\n"); + return 0; + } + + status = txgbe_e56_get_temp(hw, &temp); + if (status) + return 0; + + adapter->amlite_temp = temp; + + //Assign software defined variables as below �C + //a. SECOND_CODE = ALIAS::RXS::SECOND_ORDER + status |= txgbe_e56_set_rxrd_sec_code_40g(hw, &SECOND_CODE, i); + + //b. COARSE_CODE = ALIAS::RXS::COARSE + //c. FINE_CODE = ALIAS::RXS::FINE + //d. ULTRAFINE_CODE = ALIAS::RXS::ULTRAFINE + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + COARSE_CODE = + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i); + FINE_CODE = + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i); + ULTRAFINE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i); + + if (SECOND_CODE <= CMVAR_SEC_LOW_TH) { + if (ULTRAFINE_CODE < CMVAR_UFINE_MAX) { + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i) = + ULTRAFINE_CODE + 1; + txgbe_wr32_ephy(hw, addr, rdata); + + //Set ovrd_en=1 to override ASIC value + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + } else if (FINE_CODE < CMVAR_FINE_MAX) { + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i) = + CMVAR_UFINE_UMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_fine_i) = FINE_CODE + 1; + txgbe_wr32_ephy(hw, addr, rdata); + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + } else if (COARSE_CODE < CMVAR_COARSE_MAX) { + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i) = + CMVAR_UFINE_FMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_fine_i) = + CMVAR_FINE_FMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_coarse_i) = COARSE_CODE + 1; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_coarse_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + } else { + e_info(drv, "ERROR: (SECOND_CODE <= CMVAR_SEC_LOW_TH) temperature tracking occurs Error condition\n"); + } + } else if (SECOND_CODE >= CMVAR_SEC_HIGH_TH) { + if (ULTRAFINE_CODE > CMVAR_UFINE_MIN) { + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i) = + ULTRAFINE_CODE - 1; + txgbe_wr32_ephy(hw, addr, rdata); + + //Set ovrd_en=1 to override ASIC value + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + } else if (FINE_CODE > CMVAR_FINE_MIN) { + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i) = + CMVAR_UFINE_UMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_fine_i) = FINE_CODE - 1; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + } else if (COARSE_CODE > CMVAR_COARSE_MIN) { + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i) = + CMVAR_UFINE_FMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_fine_i) = + CMVAR_FINE_FMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_coarse_i) = COARSE_CODE - 1; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + + (E56PHY_RXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_coarse_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + } else { + e_info(drv, "ERROR: (SECOND_CODE >= CMVAR_SEC_HIGH_TH) temperature tracking occurs Error condition\n"); + } + } + } + return status; +} + +int txgbe_temp_track_seq(struct txgbe_hw *hw, u32 speed) +{ + struct txgbe_adapter *adapter = hw->back; + int status = 0; + unsigned int rdata; + int SECOND_CODE; + int COARSE_CODE; + int FINE_CODE; + int ULTRAFINE_CODE; + + int CMVAR_SEC_LOW_TH; + int CMVAR_UFINE_MAX = 0; + int CMVAR_FINE_MAX; + int CMVAR_UFINE_UMAX_WRAP = 0; + int CMVAR_COARSE_MAX; + int CMVAR_UFINE_FMAX_WRAP = 0; + int CMVAR_FINE_FMAX_WRAP = 0; + int CMVAR_SEC_HIGH_TH; + int CMVAR_UFINE_MIN; + int CMVAR_FINE_MIN; + int CMVAR_UFINE_UMIN_WRAP; + int CMVAR_COARSE_MIN; + int CMVAR_UFINE_FMIN_WRAP; + int CMVAR_FINE_FMIN_WRAP; + int temp; + + if (speed == TXGBE_LINK_SPEED_10GB_FULL) { + CMVAR_SEC_LOW_TH = S10G_CMVAR_SEC_LOW_TH; + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX; + CMVAR_FINE_MAX = S10G_CMVAR_FINE_MAX; + CMVAR_UFINE_UMAX_WRAP = S10G_CMVAR_UFINE_UMAX_WRAP; + CMVAR_COARSE_MAX = S10G_CMVAR_COARSE_MAX; + CMVAR_UFINE_FMAX_WRAP = S10G_CMVAR_UFINE_FMAX_WRAP; + CMVAR_FINE_FMAX_WRAP = S10G_CMVAR_FINE_FMAX_WRAP; + CMVAR_SEC_HIGH_TH = S10G_CMVAR_SEC_HIGH_TH; + CMVAR_UFINE_MIN = S10G_CMVAR_UFINE_MIN; + CMVAR_FINE_MIN = S10G_CMVAR_FINE_MIN; + CMVAR_UFINE_UMIN_WRAP = S10G_CMVAR_UFINE_UMIN_WRAP; + CMVAR_COARSE_MIN = S10G_CMVAR_COARSE_MIN; + CMVAR_UFINE_FMIN_WRAP = S10G_CMVAR_UFINE_FMIN_WRAP; + CMVAR_FINE_FMIN_WRAP = S10G_CMVAR_FINE_FMIN_WRAP; + } else if (speed == TXGBE_LINK_SPEED_25GB_FULL) { + CMVAR_SEC_LOW_TH = S25G_CMVAR_SEC_LOW_TH; + CMVAR_UFINE_MAX = S25G_CMVAR_UFINE_MAX; + CMVAR_FINE_MAX = S25G_CMVAR_FINE_MAX; + CMVAR_UFINE_UMAX_WRAP = S25G_CMVAR_UFINE_UMAX_WRAP; + CMVAR_COARSE_MAX = S25G_CMVAR_COARSE_MAX; + CMVAR_UFINE_FMAX_WRAP = S25G_CMVAR_UFINE_FMAX_WRAP; + CMVAR_FINE_FMAX_WRAP = S25G_CMVAR_FINE_FMAX_WRAP; + CMVAR_SEC_HIGH_TH = S25G_CMVAR_SEC_HIGH_TH; + CMVAR_UFINE_MIN = S25G_CMVAR_UFINE_MIN; + CMVAR_FINE_MIN = S25G_CMVAR_FINE_MIN; + CMVAR_UFINE_UMIN_WRAP = S25G_CMVAR_UFINE_UMIN_WRAP; + CMVAR_COARSE_MIN = S25G_CMVAR_COARSE_MIN; + CMVAR_UFINE_FMIN_WRAP = S25G_CMVAR_UFINE_FMIN_WRAP; + CMVAR_FINE_FMIN_WRAP = S25G_CMVAR_FINE_FMIN_WRAP; + } else { + e_info(drv, "Error Speed\n"); + return 0; + } + + status = txgbe_e56_get_temp(hw, &temp); + if (status) + return 0; + + adapter->amlite_temp = temp; + + //Assign software defined variables as below �C + //a. SECOND_CODE = ALIAS::RXS::SECOND_ORDER + status |= txgbe_e56_rxrd_sec_code(hw, &SECOND_CODE); + + //b. COARSE_CODE = ALIAS::RXS::COARSE + //c. FINE_CODE = ALIAS::RXS::FINE + //d. ULTRAFINE_CODE = ALIAS::RXS::ULTRAFINE + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + COARSE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i); + FINE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i); + ULTRAFINE_CODE = + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i); + + if (SECOND_CODE <= CMVAR_SEC_LOW_TH) { + if (ULTRAFINE_CODE < CMVAR_UFINE_MAX) { + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i, + ULTRAFINE_CODE + 1); + //Set ovrd_en=1 to override ASIC value + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (FINE_CODE < CMVAR_FINE_MAX) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i) = + CMVAR_UFINE_UMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = + FINE_CODE + 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (COARSE_CODE < CMVAR_COARSE_MAX) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i) = + CMVAR_UFINE_FMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = + CMVAR_FINE_FMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_coarse_i) = COARSE_CODE + 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_coarse_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else { + e_info(drv, "ERROR: (SECOND_CODE <= CMVAR_SEC_LOW_TH) temperature tracking occurs Error condition\n"); + } + } else if (SECOND_CODE >= CMVAR_SEC_HIGH_TH) { + if (ULTRAFINE_CODE > CMVAR_UFINE_MIN) { + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i, + ULTRAFINE_CODE - 1); + //Set ovrd_en=1 to override ASIC value + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (FINE_CODE > CMVAR_FINE_MIN) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i) = + CMVAR_UFINE_UMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = + FINE_CODE - 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (COARSE_CODE > CMVAR_COARSE_MIN) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i) = + CMVAR_UFINE_FMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = + CMVAR_FINE_FMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_coarse_i) = COARSE_CODE - 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_coarse_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else { + e_info(drv, "ERROR: (SECOND_CODE >= CMVAR_SEC_HIGH_TH) temperature\n"); + } + } + + return status; +} + +static int txgbe_e56_ctle_bypass_seq(struct txgbe_hw *hw, u32 speed) +{ + int status = 0; + unsigned int rdata; + + //1. Program the following RXS registers as mentioned below. + //RXS::ANA_OVRDVAL[0]::ana_ctle_bypass_i = 1��b1 + //RXS::ANA_OVRDEN[0]::ovrd_en_ana_ctle_bypass_i = 1��b1 + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_0, ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_0, + ovrd_en_ana_ctle_bypass_i, 1); + + //RXS::ANA_OVRDVAL[3]::ana_ctle_cz_cstm_i[4:0] = 0 + //RXS::ANA_OVRDEN[0]::ovrd_en_ana_ctle_cz_cstm_i = 1��b1 + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_3, ana_ctle_cz_cstm_i, 0); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_0, + ovrd_en_ana_ctle_cz_cstm_i, 1); + + //2. Program the following PDIG registers as mentioned below. + //PDIG::RXS_OVRDVAL[1]::rxs_rx0_ctle_train_en_i = 1��b0 + //PDIG::RXS_OVRDEN[1]::ovrd_en_rxs_rx0_ctle_train_en_i = 1��b1 + // + //PDIG::RXS_OVRDVAL[1]::rxs_rx0_ctle_train_done_o = 1��b1 + //PDIG::RXS_OVRDEN[1]::ovrd_en_rxs_rx0_ctle_train_done_o = 1��b1 + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_en_i) = 0; + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS0_OVRDVAL_1); + + EPHY_RREG(E56G__PMD_RXS0_OVRDEN_1); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_en_i) = + 1; + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_done_o) = + 1; + EPHY_WREG(E56G__PMD_RXS0_OVRDEN_1); + + if (speed == TXGBE_LINK_SPEED_40GB_FULL) { + //1. Program the following RXS registers as mentioned below. + //RXS::ANA_OVRDVAL[0]::ana_ctle_bypass_i = 1��b1 + //RXS::ANA_OVRDEN[0]::ovrd_en_ana_ctle_bypass_i = 1��b1 + txgbe_e56_ephy_config(E56G__RXS1_ANA_OVRDVAL_0, + ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS1_ANA_OVRDEN_0, + ovrd_en_ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS2_ANA_OVRDVAL_0, + ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS2_ANA_OVRDEN_0, + ovrd_en_ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDVAL_0, + ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDEN_0, + ovrd_en_ana_ctle_bypass_i, 1); + + //RXS::ANA_OVRDVAL[3]::ana_ctle_cz_cstm_i[4:0] = 0 + //RXS::ANA_OVRDEN[0]::ovrd_en_ana_ctle_cz_cstm_i = 1��b1 + txgbe_e56_ephy_config(E56G__RXS1_ANA_OVRDVAL_3, + ana_ctle_cz_cstm_i, 0); + txgbe_e56_ephy_config(E56G__RXS1_ANA_OVRDEN_0, + ovrd_en_ana_ctle_cz_cstm_i, 1); + txgbe_e56_ephy_config(E56G__RXS2_ANA_OVRDVAL_3, + ana_ctle_cz_cstm_i, 0); + txgbe_e56_ephy_config(E56G__RXS2_ANA_OVRDEN_0, + ovrd_en_ana_ctle_cz_cstm_i, 1); + txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDVAL_3, + ana_ctle_cz_cstm_i, 0); + txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDEN_0, + ovrd_en_ana_ctle_cz_cstm_i, 1); + + //2. Program the following PDIG registers as mentioned below. + //PDIG::RXS_OVRDVAL[1]::rxs_rx0_ctle_train_en_i = 1��b0 + //PDIG::RXS_OVRDEN[1]::ovrd_en_rxs_rx0_ctle_train_en_i = 1��b1 + // + //PDIG::RXS_OVRDVAL[1]::rxs_rx0_ctle_train_done_o = 1��b1 + //PDIG::RXS_OVRDEN[1]::ovrd_en_rxs_rx0_ctle_train_done_o = 1��b1 + EPHY_RREG(E56G__PMD_RXS1_OVRDVAL_1); + EPHY_XFLD(E56G__PMD_RXS1_OVRDVAL_1, rxs1_rx0_ctle_train_en_i) = + 0; + EPHY_XFLD(E56G__PMD_RXS1_OVRDVAL_1, + rxs1_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS1_OVRDVAL_1); + EPHY_RREG(E56G__PMD_RXS2_OVRDVAL_1); + EPHY_XFLD(E56G__PMD_RXS2_OVRDVAL_1, rxs2_rx0_ctle_train_en_i) = + 0; + EPHY_XFLD(E56G__PMD_RXS2_OVRDVAL_1, + rxs2_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS2_OVRDVAL_1); + EPHY_RREG(E56G__PMD_RXS3_OVRDVAL_1); + EPHY_XFLD(E56G__PMD_RXS3_OVRDVAL_1, rxs3_rx0_ctle_train_en_i) = + 0; + EPHY_XFLD(E56G__PMD_RXS3_OVRDVAL_1, + rxs3_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS3_OVRDVAL_1); + + EPHY_RREG(E56G__PMD_RXS1_OVRDEN_1); + EPHY_XFLD(E56G__PMD_RXS1_OVRDEN_1, + ovrd_en_rxs1_rx0_ctle_train_en_i) = 1; + EPHY_XFLD(E56G__PMD_RXS1_OVRDEN_1, + ovrd_en_rxs1_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS1_OVRDEN_1); + EPHY_RREG(E56G__PMD_RXS2_OVRDEN_1); + EPHY_XFLD(E56G__PMD_RXS2_OVRDEN_1, + ovrd_en_rxs2_rx0_ctle_train_en_i) = 1; + EPHY_XFLD(E56G__PMD_RXS2_OVRDEN_1, + ovrd_en_rxs2_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS2_OVRDEN_1); + EPHY_RREG(E56G__PMD_RXS3_OVRDEN_1); + EPHY_XFLD(E56G__PMD_RXS3_OVRDEN_1, + ovrd_en_rxs3_rx0_ctle_train_en_i) = 1; + EPHY_XFLD(E56G__PMD_RXS3_OVRDEN_1, + ovrd_en_rxs3_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS3_OVRDEN_1); + } + return status; +} + +static int txgbe_e56_rxs_calib_adapt_seq40(struct txgbe_hw *hw, u32 speed) +{ + int status = 0, i, j; + u32 addr, timer; + u32 rdata = 0x0; + u32 bypass_ctle = true; + + for (i = 0; i < 4; i++) { + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_2_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_OFST_ADAPT_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_2_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_EN_I, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_DONE_O, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_DONE_O, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_ADAPT_EN_I, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_2_ADDR + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_INTL_ADAPT_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + } + + if (bypass_ctle == 1) + txgbe_e56_ctle_bypass_seq(hw, speed); + txgbe_e56_rxs_oscinit_temp_track(hw, speed); + + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + timer = 0; + rdata = 0; + while (EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx0_st) != + E56PHY_RX_RDY_ST || + EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx1_st) != + E56PHY_RX_RDY_ST || + EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx2_st) != + E56PHY_RX_RDY_ST || + EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx3_st) != + E56PHY_RX_RDY_ST) { + rdata = rd32_ephy(hw, addr); + usleep_range(500, 1000); + if (timer++ > PHYINIT_TIMEOUT) { + //Do SEQ::RX_DISABLE + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + return TXGBE_ERR_TIMEOUT; + } + } + + //RXS ADC adaptation sequence + //txgbe_e56_rxs_adc_adapt_seq + rdata = 0; + timer = 0; + while (EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_cdr_rdy_o) != 1) { + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + usleep_range(500, 1000); + if (timer++ > PHYINIT_TIMEOUT) + return TXGBE_ERR_TIMEOUT; + } + + rdata = 0; + timer = 0; + while (EPHY_XFLD(E56G__PMD_RXS1_OVRDVAL_1, rxs1_rx0_cdr_rdy_o) != 1) { + EPHY_RREG(E56G__PMD_RXS1_OVRDVAL_1); + usleep_range(500, 1000); + if (timer++ > PHYINIT_TIMEOUT) + return TXGBE_ERR_TIMEOUT; + } + rdata = 0; + timer = 0; + while (EPHY_XFLD(E56G__PMD_RXS2_OVRDVAL_1, rxs2_rx0_cdr_rdy_o) != 1) { + EPHY_RREG(E56G__PMD_RXS2_OVRDVAL_1); + usleep_range(500, 1000); + if (timer++ > PHYINIT_TIMEOUT) + return TXGBE_ERR_TIMEOUT; + } + + rdata = 0; + timer = 0; + while (EPHY_XFLD(E56G__PMD_RXS3_OVRDVAL_1, rxs3_rx0_cdr_rdy_o) != 1) { + EPHY_RREG(E56G__PMD_RXS3_OVRDVAL_1); + usleep_range(500, 1000); + if (timer++ > PHYINIT_TIMEOUT) + return TXGBE_ERR_TIMEOUT; + } + + for (i = 0; i < 4; i++) { + //4. Disable VGA and CTLE training so that they don't interfere with ADC calibration + //a. Set ALIAS::RXS::VGA_TRAIN_EN = 0b0 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_VGA_TRAIN_EN_I, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_VGA_TRAIN_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Set ALIAS::RXS::CTLE_TRAIN_EN = 0b0 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_CTLE_TRAIN_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_CTLE_TRAIN_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //5. Perform ADC interleaver calibration + //a. Remove the OVERRIDE on ALIAS::RXS::ADC_INTL_CAL_DONE + addr = E56PHY_RXS0_OVRDEN_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_DONE_O, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + timer = 0; + while (((rdata >> + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_DONE_O_LSB) & + 1) != 1) { + rdata = rd32_ephy(hw, addr); + usleep_range(1000, 2000); + + if (timer++ > PHYINIT_TIMEOUT) + break; + } + + for (j = 0; j < 16; j++) { + //a. ALIAS::RXS::ADC_OFST_ADAPT_EN = 0b1 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Wait for 1ms or greater + addr = E56G__PMD_RXS0_OVRDEN_2_ADDR + + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, + ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0; + addr = E56G__PMD_RXS0_OVRDVAL_1_ADDR + + (E56PHY_PMD_RX_OFFSET * i); + timer = 0; + while (EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, + rxs0_rx0_adc_ofst_adapt_done_o) != 1) { + rdata = rd32_ephy(hw, addr); + usleep_range(500, 1000); + if (timer++ > PHYINIT_TIMEOUT) + break; + } + + //c. ALIAS::RXS::ADC_OFST_ADAPT_EN = 0b0 + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + //d. ALIAS::RXS::ADC_GAIN_ADAPT_EN = 0b1 + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //e. Wait for 1ms or greater + addr = E56G__PMD_RXS0_OVRDEN_2_ADDR + + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, + ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0; + timer = 0; + addr = E56G__PMD_RXS0_OVRDVAL_1_ADDR + + (E56PHY_PMD_RX_OFFSET * i); + while (EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, + rxs0_rx0_adc_gain_adapt_done_o) != 1) { + rdata = rd32_ephy(hw, addr); + usleep_range(500, 1000); + + if (timer++ > PHYINIT_TIMEOUT) + break; + } + + //f. ALIAS::RXS::ADC_GAIN_ADAPT_EN = 0b0 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + } + addr = E56PHY_RXS0_OVRDVAL_1_ADDR + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_ADAPT_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + //b. Wait for 10ms or greater + usleep_range(10000, 20000); + + //c. ALIAS::RXS::ADC_INTL_ADAPT_EN = 0b0 + addr = E56G__PMD_RXS0_OVRDEN_2_ADDR + + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, + ovrd_en_rxs0_rx0_adc_intl_adapt_en_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__PMD_RXS0_OVRDVAL_1_ADDR + + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_vga_train_en_i) = + 1; + if (bypass_ctle == 0) { + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, + rxs0_rx0_ctle_train_en_i) = 1; + } + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__PMD_RXS0_OVRDEN_1_ADDR + + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_vga_train_done_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0; + timer = 0; + addr = E56G__PMD_RXS0_OVRDVAL_1_ADDR + + (E56PHY_PMD_RX_OFFSET * i); + while (EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, + rxs0_rx0_vga_train_done_o) != 1) { + rdata = rd32_ephy(hw, addr); + usleep_range(500, 1000); + + if (timer++ > PHYINIT_TIMEOUT) + break; + } + + if (bypass_ctle == 0) { + addr = E56G__PMD_RXS0_OVRDEN_1_ADDR + + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_ctle_train_done_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0; + timer = 0; + addr = E56G__PMD_RXS0_OVRDVAL_1_ADDR + + (E56PHY_PMD_RX_OFFSET * i); + while (EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, + rxs0_rx0_ctle_train_done_o) != 1) { + rdata = rd32_ephy(hw, addr); + usleep_range(500, 1000); + + if (timer++ > PHYINIT_TIMEOUT) + break; + } //while + } + + //a. Remove the OVERRIDE on ALIAS::RXS::VGA_TRAIN_EN + addr = E56G__PMD_RXS0_OVRDEN_1_ADDR + + (E56PHY_PMD_RX_OFFSET * i); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_vga_train_en_i) = 0; + //b. Remove the OVERRIDE on ALIAS::RXS::CTLE_TRAIN_EN + if (bypass_ctle == 0) { + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_ctle_train_en_i) = 0; + } + ////Remove the OVERRIDE on ALIAS::RXS::FFE_TRAIN_EN + //printf("Setting RXS0_OVRDEN[1]::ovrd_en_rxs0_rx0_ffe_train_en_i to 0\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ffe_train_en_i) = 0; + ////Remove the OVERRIDE on ALIAS::RXS::DFE_TRAIN_EN + //printf("Setting RXS0_OVRDEN[1]::ovrd_en_rxs0_rx0_dfe_train_en_i to 0\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_dfe_train_en_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + } + return status; +} + +static int txgbe_e56_rxs_calib_adapt_seq(struct txgbe_hw *hw, u32 speed) +{ + int status = 0, i; + u32 addr, timer; + u32 rdata = 0x0; + u32 bypass_ctle = true; + + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) + bypass_ctle = false; + else + bypass_ctle = true; + + if (hw->mac.type == txgbe_mac_aml) { + msleep(350); + rdata = rd32(hw, TXGBE_GPIO_EXT); + if (rdata & (TXGBE_SFP1_MOD_ABS_LS | TXGBE_SFP1_RX_LOS_LS)) + return TXGBE_ERR_PHY_INIT_NOT_DONE; + } + + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_OFST_ADAPT_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_EN_I, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_EN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_DONE_O, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_DONE_O, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_ADAPT_EN_I, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_INTL_ADAPT_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + if (bypass_ctle == 1) + txgbe_e56_ctle_bypass_seq(hw, speed); + + txgbe_e56_rxs_oscinit_temp_track(hw, speed); + + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + timer = 0; + rdata = 0; + while (EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx0_st) != + E56PHY_RX_RDY_ST) { + rdata = rd32_ephy(hw, addr); + usleep_range(500, 1000); + EPHY_RREG(E56G__PMD_CTRL_FSM_RX_STAT_0); + if (timer++ > PHYINIT_TIMEOUT) { + //Do SEQ::RX_DISABLE + rdata = 0; + addr = E56PHY_PMD_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + return TXGBE_ERR_TIMEOUT; + } + } + + //RXS ADC adaptation sequence + //txgbe_e56_rxs_adc_adapt_seq + rdata = 0; + timer = 0; + while (EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_cdr_rdy_o) != 1) { + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + usleep_range(500, 1000); + if (timer++ > PHYINIT_TIMEOUT) + return TXGBE_ERR_TIMEOUT; + } + + //4. Disable VGA and CTLE training so that they don't interfere with ADC calibration + //a. Set ALIAS::RXS::VGA_TRAIN_EN = 0b0 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_VGA_TRAIN_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_VGA_TRAIN_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Set ALIAS::RXS::CTLE_TRAIN_EN = 0b0 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_CTLE_TRAIN_EN_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_CTLE_TRAIN_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //5. Perform ADC interleaver calibration + //a. Remove the OVERRIDE on ALIAS::RXS::ADC_INTL_CAL_DONE + addr = E56PHY_RXS0_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_DONE_O, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + timer = 0; + while (((rdata >> + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_DONE_O_LSB) & + 1) != 1) { + rdata = rd32_ephy(hw, addr); + usleep_range(1000, 2000); + + if (timer++ > PHYINIT_TIMEOUT) + break; + } + + for (i = 0; i < 16; i++) { + //a. ALIAS::RXS::ADC_OFST_ADAPT_EN = 0b1 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Wait for 1ms or greater + txgbe_e56_ephy_config(E56G__PMD_RXS0_OVRDEN_2, + ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o, + 0); + rdata = 0; + timer = 0; + while (EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, + rxs0_rx0_adc_ofst_adapt_done_o) != 1) { + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + usleep_range(500, 1000); + if (timer++ > PHYINIT_TIMEOUT) + break; + } + + //c. ALIAS::RXS::ADC_OFST_ADAPT_EN = 0b0 + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + //d. ALIAS::RXS::ADC_GAIN_ADAPT_EN = 0b1 + rdata = 0x0000; + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //e. Wait for 1ms or greater + txgbe_e56_ephy_config(E56G__PMD_RXS0_OVRDEN_2, + ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o, + 0); + rdata = 0; + timer = 0; + while (EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, + rxs0_rx0_adc_gain_adapt_done_o) != 1) { + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + usleep_range(500, 1000); + + if (timer++ > PHYINIT_TIMEOUT) + break; + } + + //f. ALIAS::RXS::ADC_GAIN_ADAPT_EN = 0b0 + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + } + + addr = E56PHY_RXS0_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_ADAPT_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + //b. Wait for 10ms or greater + usleep_range(10000, 20000); + + //c. ALIAS::RXS::ADC_INTL_ADAPT_EN = 0b0 + txgbe_e56_ephy_config(E56G__PMD_RXS0_OVRDEN_2, + ovrd_en_rxs0_rx0_adc_intl_adapt_en_i, 0); + + addr = E56PHY_RXS0_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_VGA_TRAIN_EN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_vga_train_en_i) = 1; + if (bypass_ctle == 0) { + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_en_i) = + 1; + } + EPHY_WREG(E56G__PMD_RXS0_OVRDVAL_1); + txgbe_e56_ephy_config(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_vga_train_done_o, 0); + rdata = 0; + timer = 0; + while (EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_vga_train_done_o) != 1) { + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + usleep_range(500, 1000); + + if (timer++ > PHYINIT_TIMEOUT) + break; + } + + if (bypass_ctle == 0) { + txgbe_e56_ephy_config(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_ctle_train_done_o, 0); + rdata = 0; + timer = 0; + while (EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, + rxs0_rx0_ctle_train_done_o) != 1) { + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + usleep_range(500, 1000); + + if (timer++ > PHYINIT_TIMEOUT) + break; + } //while + } + + //a. Remove the OVERRIDE on ALIAS::RXS::VGA_TRAIN_EN + EPHY_RREG(E56G__PMD_RXS0_OVRDEN_1); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_vga_train_en_i) = 0; + //b. Remove the OVERRIDE on ALIAS::RXS::CTLE_TRAIN_EN + if (bypass_ctle == 0) { + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_ctle_train_en_i) = 0; + } + ////Remove the OVERRIDE on ALIAS::RXS::FFE_TRAIN_EN + //printf("Setting RXS0_OVRDEN[1]::ovrd_en_rxs0_rx0_ffe_train_en_i to 0\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ffe_train_en_i) = 0; + ////Remove the OVERRIDE on ALIAS::RXS::DFE_TRAIN_EN + //printf("Setting RXS0_OVRDEN[1]::ovrd_en_rxs0_rx0_dfe_train_en_i to 0\n"); + //EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_dfe_train_en_i) = 0; + EPHY_WREG(E56G__PMD_RXS0_OVRDEN_1); + + return status; +} + +u32 txgbe_e56_cfg_temp(struct txgbe_hw *hw) +{ + u32 status; + u32 value; + int temp; + + status = txgbe_e56_get_temp(hw, &temp); + if (status) + temp = DEFAULT_TEMP; + + if (temp < DEFAULT_TEMP) { + value = rd32_ephy(hw, CMS_ANA_OVRDEN0); + txgbe_field_set(&value, 25, 25, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDEN0, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL2); + txgbe_field_set(&value, 20, 16, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL2, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDEN1); + txgbe_field_set(&value, 12, 12, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDEN1, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL7); + txgbe_field_set(&value, 8, 4, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL7, value); + } else if (temp > HIGH_TEMP) { + value = rd32_ephy(hw, CMS_ANA_OVRDEN0); + txgbe_field_set(&value, 25, 25, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDEN0, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL2); + txgbe_field_set(&value, 20, 16, 0x3); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL2, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDEN1); + txgbe_field_set(&value, 12, 12, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDEN1, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL7); + txgbe_field_set(&value, 8, 4, 0x3); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL7, value); + } else { + value = rd32_ephy(hw, CMS_ANA_OVRDEN1); + txgbe_field_set(&value, 4, 4, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDEN1, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL4); + txgbe_field_set(&value, 24, 24, 0x1); + txgbe_field_set(&value, 31, 29, 0x4); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL4, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL5); + txgbe_field_set(&value, 1, 0, 0x0); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL5, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDEN1); + txgbe_field_set(&value, 23, 23, 0x1); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDEN1, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL9); + txgbe_field_set(&value, 24, 24, 0x1); + txgbe_field_set(&value, 31, 29, 0x4); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL9, value); + + value = rd32_ephy(hw, CMS_ANA_OVRDVAL10); + txgbe_field_set(&value, 1, 0, 0x0); + txgbe_wr32_ephy(hw, CMS_ANA_OVRDVAL10, value); + } + + return 0; +} + +int txgbe_e56_config_rx_40G(struct txgbe_hw *hw, u32 speed) +{ + struct txgbe_adapter *adapter = hw->back; + s32 status; + + status = txgbe_e56_rxs_calib_adapt_seq40(hw, speed); + if (status) + return status; + + //Step 2 of 2.3.4 + txgbe_e56_set_rx_ufine_lemax40(hw, speed); + + //2.3.4 RXS post CDR lock temperature tracking sequence + txgbe_temp_track_seq_40g(hw, speed); + + adapter->link_valid = true; + return 0; +} + +static int txgbe_e56_config_rx(struct txgbe_hw *hw, u32 speed) +{ + s32 status; + + if (speed == TXGBE_LINK_SPEED_40GB_FULL) { + txgbe_e56_config_rx_40G(hw, speed); + } else { + status = txgbe_e56_rxs_calib_adapt_seq(hw, speed); + if (status) + return status; + + //Step 2 of 2.3.4 + txgbe_e56_set_rxs_ufine_lemax(hw, speed); + + //2.3.4 RXS post CDR lock temperature tracking sequence + txgbe_temp_track_seq(hw, speed); + } + return 0; +} + +//-------------------------------------------------------------- +//2.2.10 SEQ::RX_DISABLE +//Use PDIG::PMD_CFG[0]::rx_en_cfg[] = 0b0 to powerdown specific RXS lanes. +//Completion of RXS powerdown can be confirmed by observing ALIAS::PDIG::CTRL_FSM_RX_ST = POWERDN_ST +//-------------------------------------------------------------- +static int txgbe_e56_disable_rx40G(struct txgbe_hw *hw) +{ + int status = 0; + unsigned int rdata, timer; + unsigned int addr, temp; + int i; + struct txgbe_adapter *adapter; + + for (i = 0; i < 4; i++) { + //1. Disable OVERRIDE on below aliases + //a. ALIAS::RXS::RANGE_SEL + rdata = 0x0000; + addr = E56G__RXS0_ANA_OVRDEN_0_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_0, + ovrd_en_ana_bbcdr_osc_range_sel_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + //b. ALIAS::RXS::COARSE + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_coarse_i) = + 0; + //c. ALIAS::RXS::FINE + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = + 0; + //d. ALIAS::RXS::ULTRAFINE + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + //e. ALIAS::RXS::SAMP_CAL_DONE + addr = E56G__PMD_RXS0_OVRDEN_0_ADDR + + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_0, + ovrd_en_rxs0_rx0_samp_cal_done_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__PMD_RXS0_OVRDEN_2_ADDR + + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + //f. ALIAS::RXS::ADC_OFST_ADAPT_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, + ovrd_en_rxs0_rx0_adc_ofst_adapt_en_i) = 0; + //g. ALIAS::RXS::ADC_GAIN_ADAPT_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, + ovrd_en_rxs0_rx0_adc_gain_adapt_en_i) = 0; + //j. ALIAS::RXS::ADC_INTL_ADAPT_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, + ovrd_en_rxs0_rx0_adc_intl_adapt_en_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__PMD_RXS0_OVRDEN_1_ADDR + + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + //h. ALIAS::RXS::ADC_INTL_CAL_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_adc_intl_cal_en_i) = 0; + //i. ALIAS::RXS::ADC_INTL_CAL_DONE + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_adc_intl_cal_done_o) = 0; + //k. ALIAS::RXS::CDR_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_cdr_en_i) = + 0; + //l. ALIAS::RXS::VGA_TRAIN_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_vga_train_en_i) = 0; + //m. ALIAS::RXS::CTLE_TRAIN_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_ctle_train_en_i) = 0; + //p. ALIAS::RXS::RX_FETX_TRAIN_DONE + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_txffe_train_done_o) = 0; + //r. ALIAS::RXS::RX_TXFFE_COEFF_CHANGE + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_txffe_coeff_change_o) = 0; + //s. ALIAS::RXS::RX_TXFFE_TRAIN_ENACK + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_txffe_train_enack_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__PMD_RXS0_OVRDEN_3_ADDR + + (i * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + //n. ALIAS::RXS::RX_FETX_MOD_TYPE + //o. ALIAS::RXS::RX_FETX_MOD_TYPE_UPDATE + temp = EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_3, + ovrd_en_rxs0_rx0_spareout_o); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_3, + ovrd_en_rxs0_rx0_spareout_o) = temp & 0x8F; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS0_DIG_OVRDEN_1_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + //q. ALIAS::RXS::SLICER_THRESHOLD_OVRD_EN + EPHY_XFLD(E56G__RXS0_DIG_OVRDEN_1, top_comp_th_ovrd_en) = 0; + EPHY_XFLD(E56G__RXS0_DIG_OVRDEN_1, mid_comp_th_ovrd_en) = 0; + EPHY_XFLD(E56G__RXS0_DIG_OVRDEN_1, bot_comp_th_ovrd_en) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + //2. Disable pattern checker �C + addr = E56G__RXS0_DFT_1_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_DFT_1, ber_en) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + //3. Disable internal serial loopback mode �C + addr = E56G__RXS0_ANA_OVRDEN_3_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_3, ovrd_en_ana_sel_lpbk_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + addr = E56G__RXS0_ANA_OVRDEN_2_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + (((E56G__RXS0_ANA_OVRDEN_2 *)&rdata)->ovrd_en_ana_en_adccal_lpbk_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + //4. Enable bypass of clock gates in RXS - + addr = E56G__RXS0_RXS_CFG_0_ADDR + (i * E56PHY_RXS_OFFSET); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__RXS0_RXS_CFG_0, train_clk_gate_bypass_en) = + 0x1FFF; + txgbe_wr32_ephy(hw, addr, rdata); + } + + //5. Disable KR training mode �C + //a. ALIAS::PDIG::KR_TRAINING_MODE = 0b0 + addr = E56G__PMD_BASER_PMD_CONTROL_ADDR; + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_BASER_PMD_CONTROL, training_enable_ln0) = 0; + EPHY_XFLD(E56G__PMD_BASER_PMD_CONTROL, training_enable_ln1) = 0; + EPHY_XFLD(E56G__PMD_BASER_PMD_CONTROL, training_enable_ln2) = 0; + EPHY_XFLD(E56G__PMD_BASER_PMD_CONTROL, training_enable_ln3) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + //6. Disable RX to TX parallel loopback �C + //a. ALIAS::PDIG::RX_TO_TX_LPBK_EN = 0b0 + addr = E56G__PMD_PMD_CFG_5_ADDR; + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_PMD_CFG_5, rx_to_tx_lpbk_en) = 0x0; + txgbe_wr32_ephy(hw, addr, rdata); + + //The FSM to disable RXS is present in PDIG. The FSM disables the RXS when �C + //PDIG::PMD_CFG[0]::rx_en_cfg[] = 0b0 + txgbe_e56_ephy_config(E56G__PMD_PMD_CFG_0, rx_en_cfg, 0); + + //Wait RX FSM to be POWERDN_ST + timer = 0; + + while (EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx0_st) != + 0x21 || + EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx1_st) != + 0x21 || + EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx2_st) != + 0x21 || + EPHY_XFLD(E56G__PMD_CTRL_FSM_RX_STAT_0, ctrl_fsm_rx3_st) != + 0x21) { + rdata = 0; + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + rdata = rd32_ephy(hw, addr); + usleep_range(100, 200); + if (timer++ > PHYINIT_TIMEOUT) { + e_info(drv, "ERROR: Wait E56PHY_CTRL_FSM_RX_STAT_0_ADDR Timeout!!!\n"); + break; + } + } + + return status; +} + +//-------------------------------------------------------------- +//2.2.10 SEQ::RX_DISABLE +//Use PDIG::PMD_CFG[0]::rx_en_cfg[] = 0b0 to powerdown specific RXS lanes. +//Completion of RXS powerdown can be confirmed by observing ALIAS::PDIG::CTRL_FSM_RX_ST = POWERDN_ST +//-------------------------------------------------------------- +static int txgbe_e56_disable_rx(struct txgbe_hw *hw) +{ + int status = 0; + unsigned int rdata, timer; + unsigned int addr, temp; + struct txgbe_adapter *adapter = hw->back; + + //1. Disable OVERRIDE on below aliases + //a. ALIAS::RXS::RANGE_SEL + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_0, + ovrd_en_ana_bbcdr_osc_range_sel_i, 0); + + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + //b. ALIAS::RXS::COARSE + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_coarse_i) = 0; + //c. ALIAS::RXS::FINE + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_fine_i) = 0; + //d. ALIAS::RXS::ULTRAFINE + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, ovrd_en_ana_bbcdr_ultrafine_i) = 0; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + + //e. ALIAS::RXS::SAMP_CAL_DONE + txgbe_e56_ephy_config(E56G__PMD_RXS0_OVRDEN_0, + ovrd_en_rxs0_rx0_samp_cal_done_o, 0); + + EPHY_RREG(E56G__PMD_RXS0_OVRDEN_2); + //f. ALIAS::RXS::ADC_OFST_ADAPT_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, + ovrd_en_rxs0_rx0_adc_ofst_adapt_en_i) = 0; + //g. ALIAS::RXS::ADC_GAIN_ADAPT_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, + ovrd_en_rxs0_rx0_adc_gain_adapt_en_i) = 0; + //j. ALIAS::RXS::ADC_INTL_ADAPT_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_2, + ovrd_en_rxs0_rx0_adc_intl_adapt_en_i) = 0; + EPHY_WREG(E56G__PMD_RXS0_OVRDEN_2); + + EPHY_RREG(E56G__PMD_RXS0_OVRDEN_1); + //h. ALIAS::RXS::ADC_INTL_CAL_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_adc_intl_cal_en_i) = + 0; + //i. ALIAS::RXS::ADC_INTL_CAL_DONE + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_adc_intl_cal_done_o) = 0; + //k. ALIAS::RXS::CDR_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_cdr_en_i) = 0; + //l. ALIAS::RXS::VGA_TRAIN_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_vga_train_en_i) = 0; + //m. ALIAS::RXS::CTLE_TRAIN_EN + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_en_i) = + 0; + //p. ALIAS::RXS::RX_FETX_TRAIN_DONE + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_txffe_train_done_o) = 0; + //r. ALIAS::RXS::RX_TXFFE_COEFF_CHANGE + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_txffe_coeff_change_o) = 0; + //s. ALIAS::RXS::RX_TXFFE_TRAIN_ENACK + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_txffe_train_enack_o) = 0; + EPHY_WREG(E56G__PMD_RXS0_OVRDEN_1); + + EPHY_RREG(E56G__PMD_RXS0_OVRDEN_3); + //n. ALIAS::RXS::RX_FETX_MOD_TYPE + //o. ALIAS::RXS::RX_FETX_MOD_TYPE_UPDATE + temp = EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_3, ovrd_en_rxs0_rx0_spareout_o); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_3, ovrd_en_rxs0_rx0_spareout_o) = temp & + 0x8F; + EPHY_WREG(E56G__PMD_RXS0_OVRDEN_3); + + //q. ALIAS::RXS::SLICER_THRESHOLD_OVRD_EN + EPHY_RREG(E56G__RXS0_DIG_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_DIG_OVRDEN_1, top_comp_th_ovrd_en) = 0; + EPHY_XFLD(E56G__RXS0_DIG_OVRDEN_1, mid_comp_th_ovrd_en) = 0; + EPHY_XFLD(E56G__RXS0_DIG_OVRDEN_1, bot_comp_th_ovrd_en) = 0; + EPHY_WREG(E56G__RXS0_DIG_OVRDEN_1); + + //2. Disable pattern checker �C + txgbe_e56_ephy_config(E56G__RXS0_DFT_1, ber_en, 0); + + //3. Disable internal serial loopback mode �C + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_3, ovrd_en_ana_sel_lpbk_i, + 0); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_2, + ovrd_en_ana_en_adccal_lpbk_i, 0); + + //4. Enable bypass of clock gates in RXS - + txgbe_e56_ephy_config(E56G__RXS0_RXS_CFG_0, train_clk_gate_bypass_en, + 0x1FFF); + + //5. Disable KR training mode �C + //a. ALIAS::PDIG::KR_TRAINING_MODE = 0b0 + txgbe_e56_ephy_config(E56G__PMD_BASER_PMD_CONTROL, training_enable_ln0, + 0); + + //6. Disable RX to TX parallel loopback �C + //a. ALIAS::PDIG::RX_TO_TX_LPBK_EN = 0b0 + txgbe_e56_ephy_config(E56G__PMD_PMD_CFG_5, rx_to_tx_lpbk_en, 0); + + //The FSM to disable RXS is present in PDIG. The FSM disables the RXS when �C + //PDIG::PMD_CFG[0]::rx_en_cfg[] = 0b0 + txgbe_e56_ephy_config(E56G__PMD_PMD_CFG_0, rx_en_cfg, 0); + + //Wait RX FSM to be POWERDN_ST + timer = 0; + while (1) { + rdata = 0; + addr = E56PHY_CTRL_FSM_RX_STAT_0_ADDR; + rdata = rd32_ephy(hw, addr); + if ((rdata & 0x3f) == 0x21) + break; + + usleep_range(100, 200); + if (timer++ > PHYINIT_TIMEOUT) { + e_info(drv, "ERROR: Wait E56PHY_CTRL_FSM_RX_STAT_0_ADDR Timeout!!!\n"); + break; + } + } + + return status; +} + +int txgbe_e56_reconfig_rx(struct txgbe_hw *hw, u32 speed) +{ + int status = 0; + u32 rdata; + u32 addr; + + wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, ~TXGBE_MAC_TX_CFG_TE); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, ~TXGBE_MAC_RX_CFG_RE); + + if (hw->mac.type == txgbe_mac_aml) { + rdata = rd32(hw, TXGBE_GPIO_EXT); + if (rdata & (TXGBE_SFP1_MOD_ABS_LS | TXGBE_SFP1_RX_LOS_LS)) + return TXGBE_ERR_TIMEOUT; + } + + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ENABLE_ADDR, 0x0); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ENABLE_ADDR, 0x0); + + if (hw->mac.type == txgbe_mac_aml40) { + //14. Do SEQ::RX_DISABLE to disable RXS. Poll ALIAS::PDIG::CTRL_FSM_RX_ST + //and confirm its value is POWERDN_ST + txgbe_e56_disable_rx40G(hw); + status = txgbe_e56_config_rx_40G(hw, speed); + } else { + //14. Do SEQ::RX_DISABLE to disable RXS. Poll ALIAS::PDIG::CTRL_FSM_RX_ST + //and confirm its value is POWERDN_ST + txgbe_e56_disable_rx(hw); + status = txgbe_e56_config_rx(hw, speed); + } + + addr = E56PHY_INTR_0_ADDR; + txgbe_wr32_ephy(hw, addr, E56PHY_INTR_0_IDLE_ENTRY1); + + addr = E56PHY_INTR_1_ADDR; + txgbe_wr32_ephy(hw, addr, E56PHY_INTR_1_IDLE_EXIT1); + + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ENABLE_ADDR, + E56PHY_INTR_0_IDLE_ENTRY1); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ENABLE_ADDR, + E56PHY_INTR_1_IDLE_EXIT1); + + hw->mac.ops.enable_sec_tx_path(hw); + + return status; +} + +//Reference setting code for SFP mode +int txgbe_set_link_to_amlite(struct txgbe_hw *hw, u32 speed) +{ + struct txgbe_adapter *adapter = hw->back; + u32 value = 0; + u32 ppl_lock = false; + int status = 0; + u32 reset = 0; + + if ((rd32(hw, TXGBE_EPHY_STAT) & TXGBE_EPHY_STAT_PPL_LOCK) == + TXGBE_EPHY_STAT_PPL_LOCK) { + ppl_lock = true; + wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, + ~TXGBE_MAC_TX_CFG_TE); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + ~TXGBE_MAC_RX_CFG_RE); + } + + if (hw->bus.lan_id == 0) + reset = TXGBE_MIS_RST_LAN0_EPHY_RST; + else + reset = TXGBE_MIS_RST_LAN1_EPHY_RST; + + wr32(hw, TXGBE_MIS_RST, reset | rd32(hw, TXGBE_MIS_RST)); + TXGBE_WRITE_FLUSH(hw); + usec_delay(10); + + /////////////////////////// XLGPCS REGS Start + value = txgbe_rd32_epcs(hw, VR_PCS_DIG_CTRL1); + value |= 0x8000; + txgbe_wr32_epcs(hw, VR_PCS_DIG_CTRL1, value); + + usleep_range(1000, 2000); + value = txgbe_rd32_epcs(hw, VR_PCS_DIG_CTRL1); + if ((value & 0x8000)) { + status = TXGBE_ERR_PHY_INIT_NOT_DONE; + ; + goto out; + } + + value = txgbe_rd32_epcs(hw, SR_AN_CTRL); + txgbe_field_set(&value, 12, 12, 0); + txgbe_wr32_epcs(hw, SR_AN_CTRL, value); + + if (speed == TXGBE_LINK_SPEED_40GB_FULL) { + value = txgbe_rd32_epcs(hw, SR_PCS_CTRL1); + txgbe_field_set(&value, 5, 2, 0x3); + txgbe_wr32_epcs(hw, SR_PCS_CTRL1, value); + + value = txgbe_rd32_epcs(hw, SR_PCS_CTRL2); + txgbe_field_set(&value, 3, 0, 0x4); + txgbe_wr32_epcs(hw, SR_PCS_CTRL2, value); + + value = rd32_ephy(hw, ANA_OVRDVAL0); + txgbe_field_set(&value, 29, 29, 0x1); + txgbe_field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDVAL0, value); + + value = rd32_ephy(hw, ANA_OVRDVAL5); + txgbe_field_set(&value, 24, 24, 0x0); + txgbe_wr32_ephy(hw, ANA_OVRDVAL5, value); + + value = rd32_ephy(hw, ANA_OVRDEN0); + txgbe_field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN0, value); + + value = rd32_ephy(hw, ANA_OVRDEN1); + txgbe_field_set(&value, 30, 30, 0x1); + txgbe_field_set(&value, 25, 25, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN1, value); + + value = rd32_ephy(hw, PLL0_CFG0); + txgbe_field_set(&value, 25, 24, 0x1); + txgbe_field_set(&value, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL0_CFG0, value); + + value = rd32_ephy(hw, PLL0_CFG2); + txgbe_field_set(&value, 12, 8, 0x4); + txgbe_wr32_ephy(hw, PLL0_CFG2, value); + + value = rd32_ephy(hw, PLL1_CFG0); + txgbe_field_set(&value, 25, 24, 0x1); + txgbe_field_set(&value, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL1_CFG0, value); + + value = rd32_ephy(hw, PLL1_CFG2); + txgbe_field_set(&value, 12, 8, 0x8); + txgbe_wr32_ephy(hw, PLL1_CFG2, value); + + value = rd32_ephy(hw, PLL0_DIV_CFG0); + txgbe_field_set(&value, 18, 8, 0x294); + txgbe_field_set(&value, 4, 0, 0x8); + txgbe_wr32_ephy(hw, PLL0_DIV_CFG0, value); + + value = rd32_ephy(hw, DATAPATH_CFG0); + txgbe_field_set(&value, 30, 28, 0x7); + txgbe_field_set(&value, 26, 24, 0x5); + txgbe_field_set(&value, 18, 16, 0x5); + txgbe_field_set(&value, 14, 12, 0x5); + txgbe_field_set(&value, 10, 8, 0x5); + txgbe_wr32_ephy(hw, DATAPATH_CFG0, value); + + value = rd32_ephy(hw, DATAPATH_CFG1); + txgbe_field_set(&value, 26, 24, 0x5); + txgbe_field_set(&value, 10, 8, 0x5); + txgbe_field_set(&value, 18, 16, 0x5); + txgbe_field_set(&value, 2, 0, 0x5); + txgbe_wr32_ephy(hw, DATAPATH_CFG1, value); + + value = rd32_ephy(hw, AN_CFG1); + txgbe_field_set(&value, 4, 0, 0x2); + txgbe_wr32_ephy(hw, AN_CFG1, value); + + txgbe_e56_cfg_temp(hw); + txgbe_e56_cfg_40g(hw); + + value = rd32_ephy(hw, PMD_CFG0); + txgbe_field_set(&value, 21, 20, 0x3); + txgbe_field_set(&value, 19, 12, 0xf); //TX_EN set + txgbe_field_set(&value, 8, 8, 0x0); + txgbe_field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, PMD_CFG0, value); + } + + if (speed == TXGBE_LINK_SPEED_25GB_FULL) { + value = txgbe_rd32_epcs(hw, SR_PCS_CTRL1); + txgbe_field_set(&value, 5, 2, 5); + txgbe_wr32_epcs(hw, SR_PCS_CTRL1, value); + + value = txgbe_rd32_epcs(hw, SR_PCS_CTRL2); + txgbe_field_set(&value, 3, 0, 7); + txgbe_wr32_epcs(hw, SR_PCS_CTRL2, value); + + value = txgbe_rd32_epcs(hw, SR_PMA_CTRL2); + txgbe_field_set(&value, 6, 0, 0x39); + txgbe_wr32_epcs(hw, SR_PMA_CTRL2, value); + + value = rd32_ephy(hw, ANA_OVRDVAL0); + txgbe_field_set(&value, 29, 29, 0x1); + txgbe_field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDVAL0, value); + + value = rd32_ephy(hw, ANA_OVRDVAL5); + //Update to 0 from SNPS for PIN CLKP/N: Enable the termination of the input buffer + txgbe_field_set(&value, 24, 24, 0x0); + txgbe_wr32_ephy(hw, ANA_OVRDVAL5, value); + + value = rd32_ephy(hw, ANA_OVRDEN0); + txgbe_field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN0, value); + + value = rd32_ephy(hw, ANA_OVRDEN1); + txgbe_field_set(&value, 30, 30, 0x1); + txgbe_field_set(&value, 25, 25, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN1, value); + + value = rd32_ephy(hw, PLL0_CFG0); + txgbe_field_set(&value, 25, 24, 0x1); + txgbe_field_set(&value, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL0_CFG0, value); + + value = rd32_ephy(hw, PLL0_CFG2); + txgbe_field_set(&value, 12, 8, 0x4); + txgbe_wr32_ephy(hw, PLL0_CFG2, value); + + value = rd32_ephy(hw, PLL1_CFG0); + txgbe_field_set(&value, 25, 24, 0x1); + txgbe_field_set(&value, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL1_CFG0, value); + + value = rd32_ephy(hw, PLL1_CFG2); + txgbe_field_set(&value, 12, 8, 0x8); + txgbe_wr32_ephy(hw, PLL1_CFG2, value); + + value = rd32_ephy(hw, PLL0_DIV_CFG0); + txgbe_field_set(&value, 18, 8, 0x294); + txgbe_field_set(&value, 4, 0, 0x8); + txgbe_wr32_ephy(hw, PLL0_DIV_CFG0, value); + + value = rd32_ephy(hw, DATAPATH_CFG0); + txgbe_field_set(&value, 30, 28, 0x7); + txgbe_field_set(&value, 26, 24, 0x5); + txgbe_field_set(&value, 18, 16, 0x3); + txgbe_field_set(&value, 14, 12, 0x5); + txgbe_field_set(&value, 10, 8, 0x5); + txgbe_wr32_ephy(hw, DATAPATH_CFG0, value); + + value = rd32_ephy(hw, DATAPATH_CFG1); + txgbe_field_set(&value, 26, 24, 0x5); + txgbe_field_set(&value, 10, 8, 0x5); + txgbe_field_set(&value, 18, 16, 0x3); + txgbe_field_set(&value, 2, 0, 0x3); + txgbe_wr32_ephy(hw, DATAPATH_CFG1, value); + + value = rd32_ephy(hw, AN_CFG1); + txgbe_field_set(&value, 4, 0, 0x9); + txgbe_wr32_ephy(hw, AN_CFG1, value); + + txgbe_e56_cfg_temp(hw); + txgbe_e56_cfg_25g(hw); + + value = rd32_ephy(hw, PMD_CFG0); + txgbe_field_set(&value, 21, 20, 0x3); + txgbe_field_set(&value, 19, 12, 0x1); //TX_EN set + txgbe_field_set(&value, 8, 8, 0x0); + txgbe_field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, PMD_CFG0, value); + } + + if (speed == TXGBE_LINK_SPEED_10GB_FULL) { + value = txgbe_rd32_epcs(hw, SR_PCS_CTRL1); + txgbe_field_set(&value, 5, 2, 0); + txgbe_wr32_epcs(hw, SR_PCS_CTRL1, value); + + value = txgbe_rd32_epcs(hw, SR_PCS_CTRL2); + txgbe_field_set(&value, 3, 0, 0); + txgbe_wr32_epcs(hw, SR_PCS_CTRL2, value); + + value = txgbe_rd32_epcs(hw, SR_PMA_CTRL2); + txgbe_field_set(&value, 6, 0, 0xb); + txgbe_wr32_epcs(hw, SR_PMA_CTRL2, value); + + value = rd32_ephy(hw, ANA_OVRDVAL0); + txgbe_field_set(&value, 29, 29, 0x1); + txgbe_field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDVAL0, value); + + value = rd32_ephy(hw, ANA_OVRDVAL5); + txgbe_field_set(&value, 24, 24, 0x0); + txgbe_wr32_ephy(hw, ANA_OVRDVAL5, value); + + value = rd32_ephy(hw, ANA_OVRDEN0); + txgbe_field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN0, value); + + value = rd32_ephy(hw, ANA_OVRDEN1); + txgbe_field_set(&value, 30, 30, 0x1); + txgbe_field_set(&value, 25, 25, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN1, value); + + value = rd32_ephy(hw, PLL0_CFG0); + txgbe_field_set(&value, 25, 24, 0x1); + txgbe_field_set(&value, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL0_CFG0, value); + + value = rd32_ephy(hw, PLL0_CFG2); + txgbe_field_set(&value, 12, 8, 0x4); + txgbe_wr32_ephy(hw, PLL0_CFG2, value); + + value = rd32_ephy(hw, PLL1_CFG0); + txgbe_field_set(&value, 25, 24, 0x1); + txgbe_field_set(&value, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL1_CFG0, value); + + value = rd32_ephy(hw, PLL1_CFG2); + txgbe_field_set(&value, 12, 8, 0x8); + txgbe_wr32_ephy(hw, PLL1_CFG2, value); + + value = rd32_ephy(hw, PLL0_DIV_CFG0); + txgbe_field_set(&value, 18, 8, 0x294); + txgbe_field_set(&value, 4, 0, 0x8); + txgbe_wr32_ephy(hw, PLL0_DIV_CFG0, value); + + value = rd32_ephy(hw, DATAPATH_CFG0); + txgbe_field_set(&value, 30, 28, 0x7); + txgbe_field_set(&value, 26, 24, 0x5); + txgbe_field_set(&value, 18, 16, 0x5); + txgbe_field_set(&value, 14, 12, 0x5); + txgbe_field_set(&value, 10, 8, 0x5); + txgbe_wr32_ephy(hw, DATAPATH_CFG0, value); + + value = rd32_ephy(hw, DATAPATH_CFG1); + txgbe_field_set(&value, 26, 24, 0x5); + txgbe_field_set(&value, 10, 8, 0x5); + txgbe_field_set(&value, 18, 16, 0x5); + txgbe_field_set(&value, 2, 0, 0x5); + txgbe_wr32_ephy(hw, DATAPATH_CFG1, value); + + value = rd32_ephy(hw, AN_CFG1); + txgbe_field_set(&value, 4, 0, 0x2); + txgbe_wr32_ephy(hw, AN_CFG1, value); + + txgbe_e56_cfg_temp(hw); + txgbe_e56_cfg_10g(hw); + + value = rd32_ephy(hw, PMD_CFG0); + txgbe_field_set(&value, 21, 20, 0x3); + txgbe_field_set(&value, 19, 12, 0x1); //TX_EN set + txgbe_field_set(&value, 8, 8, 0x0); + txgbe_field_set(&value, 1, 1, 0x1); + txgbe_wr32_ephy(hw, PMD_CFG0, value); + } + + hw->mac.ops.enable_tx_laser(hw); + + status = txgbe_e56_config_rx(hw, speed); + + value = rd32_ephy(hw, E56PHY_RXS_IDLE_DETECT_1_ADDR); + txgbe_field_set(&value, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, 0x28); + txgbe_field_set(&value, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, 0xa); + txgbe_wr32_ephy(hw, E56PHY_RXS_IDLE_DETECT_1_ADDR, value); + + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ADDR, E56PHY_INTR_0_IDLE_ENTRY1); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ADDR, E56PHY_INTR_1_IDLE_EXIT1); + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ENABLE_ADDR, + E56PHY_INTR_0_IDLE_ENTRY1); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ENABLE_ADDR, + E56PHY_INTR_1_IDLE_EXIT1); + + if (adapter->fec_link_mode != TXGBE_PHY_FEC_AUTO) { + adapter->cur_fec_link = adapter->fec_link_mode; + txgbe_e56_set_fec_mode(hw, adapter->cur_fec_link); + } + + if (status) + goto out; + +out: + if (ppl_lock) + hw->mac.ops.enable_sec_tx_path(hw); + + return status; +} + +int txgbe_get_cur_fec_mode(struct txgbe_hw *hw) +{ + struct txgbe_adapter *adapter = hw->back; + u32 value = 0; + + mutex_lock(&adapter->e56_lock); + value = txgbe_rd32_epcs(hw, SR_PMA_RS_FEC_CTRL); + mutex_unlock(&adapter->e56_lock); + + if (value & 0x4) + return TXGBE_PHY_FEC_RS; + + mutex_lock(&adapter->e56_lock); + value = txgbe_rd32_epcs(hw, SR_PMA_KR_FEC_CTRL); + mutex_unlock(&adapter->e56_lock); + + if (value & 0x1) + return TXGBE_PHY_FEC_BASER; + + return TXGBE_PHY_FEC_OFF; +} + +int txgbe_e56_set_fec_mode(struct txgbe_hw *hw, u8 fec_mode) +{ + u32 value = 0; + + if (fec_mode & TXGBE_PHY_FEC_RS) { + //disable BASER FEC + value = txgbe_rd32_epcs(hw, SR_PMA_KR_FEC_CTRL); + txgbe_field_set(&value, 0, 0, 0); + txgbe_wr32_epcs(hw, SR_PMA_KR_FEC_CTRL, value); + + //enable RS FEC + txgbe_wr32_epcs(hw, 0x180a3, 0x68c1); + txgbe_wr32_epcs(hw, 0x180a4, 0x3321); + txgbe_wr32_epcs(hw, 0x180a5, 0x973e); + txgbe_wr32_epcs(hw, 0x180a6, 0xccde); + + txgbe_wr32_epcs(hw, 0x38018, 1024); + value = txgbe_rd32_epcs(hw, 0x100c8); + txgbe_field_set(&value, 2, 2, 1); + txgbe_wr32_epcs(hw, 0x100c8, value); + } else if (fec_mode & TXGBE_PHY_FEC_BASER) { + //disable RS FEC + txgbe_wr32_epcs(hw, 0x180a3, 0x7690); + txgbe_wr32_epcs(hw, 0x180a4, 0x3347); + txgbe_wr32_epcs(hw, 0x180a5, 0x896f); + txgbe_wr32_epcs(hw, 0x180a6, 0xccb8); + txgbe_wr32_epcs(hw, 0x38018, 0x3fff); + value = txgbe_rd32_epcs(hw, 0x100c8); + txgbe_field_set(&value, 2, 2, 0); + txgbe_wr32_epcs(hw, 0x100c8, value); + + //enable BASER FEC + value = txgbe_rd32_epcs(hw, SR_PMA_KR_FEC_CTRL); + txgbe_field_set(&value, 0, 0, 1); + txgbe_wr32_epcs(hw, SR_PMA_KR_FEC_CTRL, value); + } else { + //disable RS FEC + txgbe_wr32_epcs(hw, 0x180a3, 0x7690); + txgbe_wr32_epcs(hw, 0x180a4, 0x3347); + txgbe_wr32_epcs(hw, 0x180a5, 0x896f); + txgbe_wr32_epcs(hw, 0x180a6, 0xccb8); + txgbe_wr32_epcs(hw, 0x38018, 0x3fff); + value = txgbe_rd32_epcs(hw, 0x100c8); + txgbe_field_set(&value, 2, 2, 0); + txgbe_wr32_epcs(hw, 0x100c8, value); + + //disable BASER FEC + value = txgbe_rd32_epcs(hw, SR_PMA_KR_FEC_CTRL); + txgbe_field_set(&value, 0, 0, 0); + txgbe_wr32_epcs(hw, SR_PMA_KR_FEC_CTRL, value); + } + + return 0; +} + +int txgbe_e56_fec_mode_polling(struct txgbe_hw *hw, bool *link_up) +{ + struct txgbe_adapter *adapter = hw->back; + int i = 0, j = 0; + u32 speed; + + do { + if (!(adapter->fec_link_mode & BIT(j))) { + j += 1; + continue; + } + + adapter->cur_fec_link = adapter->fec_link_mode & BIT(j); + + mutex_lock(&adapter->e56_lock); + txgbe_e56_set_fec_mode(hw, adapter->cur_fec_link); + mutex_unlock(&adapter->e56_lock); + + for (i = 0; i < 4; i++) { + msleep(250); + txgbe_e56_check_phy_link(hw, &speed, link_up); + if (*link_up) + return 0; + } + + j += 1; + } while (j < 3); + + return 0; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.h new file mode 100644 index 000000000000..381565570eb0 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.h @@ -0,0 +1,1846 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_E56_H_ +#define _TXGBE_E56_H_ + +#include "txgbe_type.h" +#include "txgbe.h" + +#define FORMAT_NOPARENTHERSES(...) __VA_ARGS__ + +#define EPHY_RREG(REG) \ + do { \ + rdata = 0; \ + rdata = rd32_ephy(hw, REG##_ADDR); \ + } while (0) + +#define EPHY_WREG(REG) txgbe_wr32_ephy(hw, REG##_ADDR, rdata) + +#define EPCS_RREG(REG) \ + do { \ + rdata = 0; \ + rdata = txgbe_rd32_epcs(hw, REG##_ADDR); \ + } while (0) + +#define EPCS_WREG(REG) txgbe_wr32_epcs(hw, REG##_ADDR, rdata) + +#define txgbe_e56_ephy_config(reg, field, val) \ + do { \ + EPHY_RREG(reg); \ + EPHY_XFLD(reg, field) = (val); \ + EPHY_WREG(reg); \ + } while (0) + +#define txgbe_e56_epcs_config(reg, field, val) \ + do { \ + EPCS_RREG(reg); \ + EPCS_XFLD(reg, field) = (val); \ + EPCS_WREG(reg); \ + } while (0) + +//-------------------------------- +//LAN GPIO define for SFP+ module +//-------------------------------- +//-- Fields +#define SFP1_RS0 FORMAT_NOPARENTHERSES(5, 5) +#define SFP1_RS1 FORMAT_NOPARENTHERSES(4, 4) +#define SFP1_RX_LOS FORMAT_NOPARENTHERSES(3, 3) +#define SFP1_MOD_ABS FORMAT_NOPARENTHERSES(2, 2) +#define SFP1_TX_DISABLE FORMAT_NOPARENTHERSES(1, 1) +#define SFP1_TX_FAULT FORMAT_NOPARENTHERSES(0, 0) +#define EPHY_XFLD(REG, FLD) (((REG *)&rdata)->FLD) +#define EPCS_XFLD(REG, FLD) (((REG *)&rdata)->FLD) + +union txgbe_e56_cms_ana_ovrdval0 { + struct { + u32 ana_refclk_buf_daisy_en_i : 1; + u32 ana_refclk_buf_pad_en_i : 1; + u32 ana_vddinoff_dcore_dig_o : 1; + u32 ana_lcpll_en_clkout_hf_left_top_i : 1; + u32 ana_lcpll_en_clkout_hf_right_top_i : 1; + u32 ana_lcpll_en_clkout_hf_left_bot_i : 1; + u32 ana_lcpll_en_clkout_hf_right_bot_i : 1; + u32 ana_lcpll_en_clkout_lf_left_top_i : 1; + u32 ana_lcpll_en_clkout_lf_right_top_i : 1; + u32 ana_lcpll_en_clkout_lf_left_bot_i : 1; + u32 ana_lcpll_en_clkout_lf_right_bot_i : 1; + u32 ana_bg_en_i : 1; + u32 ana_en_rescal_i : 1; + u32 ana_rescal_comp_o : 1; + u32 ana_en_ldo_core_i : 1; + u32 ana_lcpll_hf_en_bias_i : 1; + u32 ana_lcpll_hf_en_loop_i : 1; + u32 ana_lcpll_hf_en_cp_i : 1; + u32 ana_lcpll_hf_set_lpf_i : 1; + u32 ana_lcpll_hf_en_vco_i : 1; + u32 ana_lcpll_hf_vco_amp_status_o : 1; + u32 ana_lcpll_hf_en_odiv_i : 1; + u32 ana_lcpll_lf_en_bias_i : 1; + u32 ana_lcpll_lf_en_loop_i : 1; + u32 ana_lcpll_lf_en_cp_i : 1; + u32 ana_lcpll_lf_set_lpf_i : 1; + u32 ana_lcpll_lf_en_vco_i : 1; + u32 ana_lcpll_lf_vco_amp_status_o : 1; + u32 ana_lcpll_lf_en_odiv_i : 1; + u32 ana_lcpll_hf_refclk_select_i : 1; + u32 ana_lcpll_lf_refclk_select_i : 1; + u32 rsvd0 : 1; + }; + u32 reg; +}; + +#define E56G_CMS_ANA_OVRDVAL_0_ADDR 0xcb0 +/* AMLITE ETH PHY Registers */ +#define SR_PMA_KR_FEC_CTRL 0x100ab +#define SR_AN_CTRL 0x70000 +#define VR_PCS_DIG_CTRL1 0x38000 +#define SR_PCS_CTRL1 0x30000 +#define SR_PCS_CTRL2 0x30007 +#define SR_PMA_CTRL2 0x10007 +#define VR_PCS_DIG_CTRL3 0x38003 +#define VR_PMA_CTRL3 0x180a8 +#define VR_PMA_CTRL4 0x180a9 +#define SR_PMA_RS_FEC_CTRL 0x100c8 +#define CMS_ANA_OVRDEN0 0xca4 +#define ANA_OVRDEN1 0xca8 +#define ANA_OVRDVAL0 0xcb0 +#define ANA_OVRDVAL5 0xcc4 +#define OSC_CAL_N_CDR4 0x14 +#define PLL0_CFG0 0xc10 +#define PLL0_CFG2 0xc18 +#define PLL0_DIV_CFG0 0xc1c +#define PLL1_CFG0 0xc48 +#define PLL1_CFG2 0xc50 +#define CMS_PIN_OVRDEN0 0xc8c +#define CMS_PIN_OVRDVAL0 0xc94 +#define DATAPATH_CFG0 0x142c +#define DATAPATH_CFG1 0x1430 +#define AN_CFG1 0x1438 +#define SPARE52 0x16fc +#define RXS_CFG0 0x000 +#define PMD_CFG0 0x1400 +#define SR_PCS_STS1 0x30001 +#define PMD_CTRL_FSM_TX_STAT0 0x14dc +#define CMS_ANA_OVRDEN0 0xca4 +#define CMS_ANA_OVRDEN1 0xca8 +#define CMS_ANA_OVRDVAL2 0xcb8 +#define CMS_ANA_OVRDVAL4 0xcc0 +#define CMS_ANA_OVRDVAL5 0xcc4 +#define CMS_ANA_OVRDVAL7 0xccc +#define CMS_ANA_OVRDVAL9 0xcd4 +#define CMS_ANA_OVRDVAL10 0xcd8 + +#define TXS_TXS_CFG1 0x804 +#define TXS_WKUP_CNT 0x808 +#define TXS_PIN_OVRDEN0 0x80c +#define TXS_PIN_OVRDVAL6 0x82c +#define TXS_ANA_OVRDVAL1 0x854 + +#define E56PHY_CMS_BASE_ADDR 0x0C00 + +#define E56PHY_CMS_PIN_OVRDEN_0_ADDR (E56PHY_CMS_BASE_ADDR + 0x8C) +#define E56PHY_CMS_PIN_OVRDEN_0_OVRD_EN_PLL0_TX_SIGNAL_TYPE_I \ + FORMAT_NOPARENTHERSES(12, 12) + +#define E56PHY_CMS_PIN_OVRDVAL_0_ADDR (E56PHY_CMS_BASE_ADDR + 0x94) +#define E56PHY_CMS_PIN_OVRDVAL_0_INT_PLL0_TX_SIGNAL_TYPE_I \ + FORMAT_NOPARENTHERSES(10, 10) + +#define E56PHY_CMS_ANA_OVRDEN_0_ADDR (E56PHY_CMS_BASE_ADDR + 0xA4) + +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_VCO_SWING_CTRL_I \ + FORMAT_NOPARENTHERSES(29, 29) + +#define E56PHY_CMS_ANA_OVRDEN_1_ADDR (E56PHY_CMS_BASE_ADDR + 0xA8) +#define E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_HF_TEST_IN_I \ + FORMAT_NOPARENTHERSES(4, 4) + +#define E56PHY_CMS_ANA_OVRDVAL_2_ADDR (E56PHY_CMS_BASE_ADDR + 0xB8) + +#define E56PHY_CMS_ANA_OVRDVAL_2_ANA_LCPLL_HF_VCO_SWING_CTRL_I \ + FORMAT_NOPARENTHERSES(31, 28) + +#define E56PHY_CMS_ANA_OVRDVAL_4_ADDR (E56PHY_CMS_BASE_ADDR + 0xC0) + +#define E56PHY_TXS_BASE_ADDR 0x0800 +#define E56PHY_TXS1_BASE_ADDR 0x0900 +#define E56PHY_TXS2_BASE_ADDR 0x0A00 +#define E56PHY_TXS3_BASE_ADDR 0x0B00 +#define E56PHY_TXS_OFFSET 0x0100 + +#define E56PHY_PMD_RX_OFFSET 0x02C + +#define E56PHY_TXS_TXS_CFG_1_ADDR (E56PHY_TXS_BASE_ADDR + 0x04) +#define E56PHY_TXS_TXS_CFG_1_ADAPTATION_WAIT_CNT_X256 \ + FORMAT_NOPARENTHERSES(7, 4) +#define E56PHY_TXS_WKUP_CNT_ADDR (E56PHY_TXS_BASE_ADDR + 0x08) +#define E56PHY_TXS_WKUP_CNTLDO_WKUP_CNT_X32 FORMAT_NOPARENTHERSES(7, 0) +#define E56PHY_TXS_WKUP_CNTDCC_WKUP_CNT_X32 FORMAT_NOPARENTHERSES(15, 8) + +#define E56PHY_TXS_PIN_OVRDEN_0_ADDR (E56PHY_TXS_BASE_ADDR + 0x0C) +#define E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_EFUSE_BITS_I \ + FORMAT_NOPARENTHERSES(28, 28) + +#define E56PHY_TXS_PIN_OVRDVAL_6_ADDR (E56PHY_TXS_BASE_ADDR + 0x2C) + +#define E56PHY_TXS_ANA_OVRDVAL_1_ADDR (E56PHY_TXS_BASE_ADDR + 0x54) +#define E56PHY_TXS_ANA_OVRDVAL_1_ANA_TEST_DAC_I FORMAT_NOPARENTHERSES(23, 8) + +#define E56PHY_TXS_ANA_OVRDEN_0_ADDR (E56PHY_TXS_BASE_ADDR + 0x44) +#define E56PHY_TXS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_DAC_I \ + FORMAT_NOPARENTHERSES(13, 13) + +#define E56PHY_RXS_BASE_ADDR 0x0000 +#define E56PHY_RXS1_BASE_ADDR 0x0200 +#define E56PHY_RXS2_BASE_ADDR 0x0400 +#define E56PHY_RXS3_BASE_ADDR 0x0600 +#define E56PHY_RXS_OFFSET 0x0200 + +#define E56PHY_RXS_RXS_CFG_0_ADDR (E56PHY_RXS_BASE_ADDR + 0x000) +#define E56PHY_RXS_RXS_CFG_0_DSER_DATA_SEL FORMAT_NOPARENTHERSES(1, 1) +#define E56PHY_RXS_RXS_CFG_0_TRAIN_CLK_GATE_BYPASS_EN \ + FORMAT_NOPARENTHERSES(17, 4) + +#define E56PHY_RXS_OSC_CAL_N_CDR_1_ADDR (E56PHY_RXS_BASE_ADDR + 0x008) +#define E56PHY_RXS_OSC_CAL_N_CDR_1_PREDIV1 FORMAT_NOPARENTHERSES(15, 0) +#define E56PHY_RXS_OSC_CAL_N_CDR_1_PREDIV1_LSB 0 +#define E56PHY_RXS_OSC_CAL_N_CDR_1_TARGET_CNT1 FORMAT_NOPARENTHERSES(31, 16) +#define E56PHY_RXS_OSC_CAL_N_CDR_1_TARGET_CNT1_LSB 16 + +#define E56PHY_RXS_OSC_CAL_N_CDR_4_ADDR (E56PHY_RXS_BASE_ADDR + 0x014) +#define E56PHY_RXS_OSC_CAL_N_CDR_4_OSC_RANGE_SEL1 FORMAT_NOPARENTHERSES(3, 2) +#define E56PHY_RXS_OSC_CAL_N_CDR_4_VCO_CODE_INIT FORMAT_NOPARENTHERSES(18, 8) +#define E56PHY_RXS_OSC_CAL_N_CDR_4_OSC_CURRENT_BOOST_EN1 \ + FORMAT_NOPARENTHERSES(21, 21) +#define E56PHY_RXS_OSC_CAL_N_CDR_4_BBCDR_CURRENT_BOOST1 \ + FORMAT_NOPARENTHERSES(27, 26) + +#define E56PHY_RXS_OSC_CAL_N_CDR_5_ADDR (E56PHY_RXS_BASE_ADDR + 0x018) +#define E56PHY_RXS_OSC_CAL_N_CDR_5_SDM_WIDTH FORMAT_NOPARENTHERSES(3, 2) +#define E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_PRELOCK \ + FORMAT_NOPARENTHERSES(15, 12) +#define E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_POSTLOCK \ + FORMAT_NOPARENTHERSES(19, 16) +#define E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_POSTLOCK \ + FORMAT_NOPARENTHERSES(23, 20) +#define E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_PRELOCK \ + FORMAT_NOPARENTHERSES(27, 24) +#define E56PHY_RXS_OSC_CAL_N_CDR_5_BBCDR_RDY_CNT FORMAT_NOPARENTHERSES(30, 28) + +#define E56PHY_RXS_OSC_CAL_N_CDR_6_ADDR (E56PHY_RXS_BASE_ADDR + 0x01C) +#define E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_PRELOCK \ + FORMAT_NOPARENTHERSES(3, 0) +#define E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_POSTLOCK \ + FORMAT_NOPARENTHERSES(7, 4) + +#define E56PHY_RXS_INTL_CONFIG_0_ADDR (E56PHY_RXS_BASE_ADDR + 0x020) +#define E56PHY_RXS_INTL_CONFIG_0_ADC_INTL2SLICE_DELAY1 \ + FORMAT_NOPARENTHERSES(31, 16) + +#define E56PHY_RXS_INTL_CONFIG_2_ADDR (E56PHY_RXS_BASE_ADDR + 0x028) +#define E56PHY_RXS_INTL_CONFIG_2_INTERLEAVER_HBW_DISABLE1 \ + FORMAT_NOPARENTHERSES(1, 1) + +#define E56PHY_RXS_TXFFE_TRAINING_0_ADDR (E56PHY_RXS_BASE_ADDR + 0x02C) +#define E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_LTH \ + FORMAT_NOPARENTHERSES(18, 12) +#define E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_UTH \ + FORMAT_NOPARENTHERSES(26, 20) + +#define E56PHY_RXS_TXFFE_TRAINING_1_ADDR (E56PHY_RXS_BASE_ADDR + 0x030) +#define E56PHY_RXS_TXFFE_TRAINING_1_C1_LTH FORMAT_NOPARENTHERSES(8, 0) +#define E56PHY_RXS_TXFFE_TRAINING_1_C1_UTH FORMAT_NOPARENTHERSES(20, 12) + +#define E56PHY_RXS_TXFFE_TRAINING_2_ADDR (E56PHY_RXS_BASE_ADDR + 0x034) +#define E56PHY_RXS_TXFFE_TRAINING_2_CM1_LTH FORMAT_NOPARENTHERSES(8, 0) +#define E56PHY_RXS_TXFFE_TRAINING_2_CM1_UTH FORMAT_NOPARENTHERSES(20, 12) + +#define E56PHY_RXS_TXFFE_TRAINING_3_ADDR (E56PHY_RXS_BASE_ADDR + 0x038) +#define E56PHY_RXS_TXFFE_TRAINING_3_CM2_LTH FORMAT_NOPARENTHERSES(8, 0) +#define E56PHY_RXS_TXFFE_TRAINING_3_CM2_UTH FORMAT_NOPARENTHERSES(20, 12) +#define E56PHY_RXS_TXFFE_TRAINING_3_TXFFE_TRAIN_MOD_TYPE \ + FORMAT_NOPARENTHERSES(26, 21) + +#define E56PHY_RXS_VGA_TRAINING_0_ADDR (E56PHY_RXS_BASE_ADDR + 0x04C) +#define E56PHY_RXS_VGA_TRAINING_0_VGA_TARGET FORMAT_NOPARENTHERSES(18, 12) + +#define E56PHY_RXS_VGA_TRAINING_1_ADDR (E56PHY_RXS_BASE_ADDR + 0x050) +#define E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT0 FORMAT_NOPARENTHERSES(4, 0) +#define E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT0 FORMAT_NOPARENTHERSES(12, 8) +#define E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT123 \ + FORMAT_NOPARENTHERSES(20, 16) +#define E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT123 \ + FORMAT_NOPARENTHERSES(28, 24) + +#define E56PHY_RXS_CTLE_TRAINING_0_ADDR (E56PHY_RXS_BASE_ADDR + 0x054) +#define E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT0 FORMAT_NOPARENTHERSES(24, 20) +#define E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT123 \ + FORMAT_NOPARENTHERSES(31, 27) + +#define E56PHY_RXS_CTLE_TRAINING_1_ADDR (E56PHY_RXS_BASE_ADDR + 0x058) +#define E56PHY_RXS_CTLE_TRAINING_1_LFEQ_LUT FORMAT_NOPARENTHERSES(24, 0) + +#define E56PHY_RXS_CTLE_TRAINING_2_ADDR (E56PHY_RXS_BASE_ADDR + 0x05C) +#define E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P1 FORMAT_NOPARENTHERSES(5, 0) +#define E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P2 FORMAT_NOPARENTHERSES(13, 8) +#define E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P3 FORMAT_NOPARENTHERSES(21, 16) + +#define E56PHY_RXS_CTLE_TRAINING_3_ADDR (E56PHY_RXS_BASE_ADDR + 0x060) +#define E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P1 FORMAT_NOPARENTHERSES(9, 8) +#define E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P2 FORMAT_NOPARENTHERSES(11, 10) +#define E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P3 FORMAT_NOPARENTHERSES(13, 12) + +#define E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADDR (E56PHY_RXS_BASE_ADDR + 0x064) +#define E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_SLICE_DATA_AVG_CNT \ + FORMAT_NOPARENTHERSES(5, 4) +#define E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_DATA_AVG_CNT \ + FORMAT_NOPARENTHERSES(9, 8) +#define E56PHY_RXS_OFFSET_N_GAIN_CAL_0_FE_OFFSET_DAC_CLK_CNT_X8 \ + FORMAT_NOPARENTHERSES(31, 28) + +#define E56PHY_RXS_OFFSET_N_GAIN_CAL_1_ADDR (E56PHY_RXS_BASE_ADDR + 0x068) +#define E56PHY_RXS_OFFSET_N_GAIN_CAL_1_SAMP_ADAPT_CFG \ + FORMAT_NOPARENTHERSES(31, 28) + +#define E56PHY_RXS_FFE_TRAINING_0_ADDR (E56PHY_RXS_BASE_ADDR + 0x070) +#define E56PHY_RXS_FFE_TRAINING_0_FFE_TAP_EN FORMAT_NOPARENTHERSES(23, 8) + +#define E56PHY_RXS_IDLE_DETECT_1_ADDR (E56PHY_RXS_BASE_ADDR + 0x088) +#define E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX \ + FORMAT_NOPARENTHERSES(22, 16) +#define E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN \ + FORMAT_NOPARENTHERSES(30, 24) + +#define E56PHY_RXS_ANA_OVRDEN_0_ADDR (E56PHY_RXS_BASE_ADDR + 0x08C) +#define E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RTERM_I \ + FORMAT_NOPARENTHERSES(0, 0) +#define E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_TRIM_RTERM_I \ + FORMAT_NOPARENTHERSES(1, 1) +#define E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_BBCDR_OSC_RANGE_SEL_I \ + FORMAT_NOPARENTHERSES(29, 29) + +#define E56PHY_RXS_ANA_OVRDEN_1_ADDR (E56PHY_RXS_BASE_ADDR + 0x090) +#define E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_BBCDR_VCOFILT_BYP_I \ + FORMAT_NOPARENTHERSES(0, 0) +#define E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_TEST_BBCDR_I \ + FORMAT_NOPARENTHERSES(9, 9) + +#define E56PHY_RXS_ANA_OVRDEN_3_ADDR (E56PHY_RXS_BASE_ADDR + 0x098) +#define E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_ANABS_CONFIG_I \ + FORMAT_NOPARENTHERSES(15, 15) +#define E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_VGA2_BOOST_CSTM_I \ + FORMAT_NOPARENTHERSES(25, 25) + +#define E56PHY_RXS_ANA_OVRDEN_4_ADDR (E56PHY_RXS_BASE_ADDR + 0x09C) +#define E56PHY_RXS_ANA_OVRDVAL_0_ADDR (E56PHY_RXS_BASE_ADDR + 0x0A0) +#define E56PHY_RXS_ANA_OVRDVAL_0_ANA_EN_RTERM_I FORMAT_NOPARENTHERSES(0, 0) + +#define E56PHY_RXS_ANA_OVRDVAL_6_ADDR (E56PHY_RXS_BASE_ADDR + 0x0B8) +#define E56PHY_RXS_ANA_OVRDVAL_14_ADDR (E56PHY_RXS_BASE_ADDR + 0x0D8) +#define E56PHY_RXS_ANA_OVRDVAL_15_ADDR (E56PHY_RXS_BASE_ADDR + 0x0DC) +#define E56PHY_RXS_ANA_OVRDVAL_17_ADDR (E56PHY_RXS_BASE_ADDR + 0x0E4) +#define E56PHY_RXS_ANA_OVRDVAL_17_ANA_VGA2_BOOST_CSTM_I \ + FORMAT_NOPARENTHERSES(18, 16) + +#define E56PHY_RXS_EYE_SCAN_1_ADDR (E56PHY_RXS_BASE_ADDR + 0x1A4) +#define E56PHY_RXS_EYE_SCAN_1_EYE_SCAN_REF_TIMER FORMAT_NOPARENTHERSES(31, 0) + +#define E56PHY_RXS_ANA_OVRDVAL_5_ADDR (E56PHY_RXS_BASE_ADDR + 0x0B4) +#define E56PHY_RXS_ANA_OVRDVAL_5_ANA_BBCDR_OSC_RANGE_SEL_I \ + FORMAT_NOPARENTHERSES(1, 0) + +#define E56PHY_RXS_RINGO_0_ADDR (E56PHY_RXS_BASE_ADDR + 0x1FC) + +#define E56PHY_PMD_BASE_ADDR 0x1400 +#define E56PHY_PMD_CFG_0_ADDR (E56PHY_PMD_BASE_ADDR + 0x000) +#define E56PHY_PMD_CFG_0_RX_EN_CFG FORMAT_NOPARENTHERSES(19, 16) + +#define E56PHY_PMD_CFG_3_ADDR (E56PHY_PMD_BASE_ADDR + 0x00C) +#define E56PHY_PMD_CFG_3_CTRL_FSM_TIMEOUT_X64K FORMAT_NOPARENTHERSES(31, 24) +#define E56PHY_PMD_CFG_4_ADDR (E56PHY_PMD_BASE_ADDR + 0x010) +#define E56PHY_PMD_CFG_4_TRAIN_DC_ON_PERIOD_X64K FORMAT_NOPARENTHERSES(7, 0) +#define E56PHY_PMD_CFG_4_TRAIN_DC_PERIOD_X512K FORMAT_NOPARENTHERSES(15, 8) +#define E56PHY_PMD_CFG_5_ADDR (E56PHY_PMD_BASE_ADDR + 0x014) +#define E56PHY_PMD_CFG_5_USE_RECENT_MARKER_OFFSET FORMAT_NOPARENTHERSES(12, 12) +#define E56PHY_CTRL_FSM_CFG_0_ADDR (E56PHY_PMD_BASE_ADDR + 0x040) +#define E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_OFST_CAL_ERR \ + FORMAT_NOPARENTHERSES(4, 4) +#define E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_GAIN_CAL_ERR \ + FORMAT_NOPARENTHERSES(5, 5) +#define E56PHY_CTRL_FSM_CFG_0_DO_RX_ADC_OFST_CAL FORMAT_NOPARENTHERSES(9, 8) +#define E56PHY_CTRL_FSM_CFG_0_RX_ERR_ACTION_EN FORMAT_NOPARENTHERSES(31, 24) + +#define E56PHY_CTRL_FSM_CFG_1_ADDR (E56PHY_PMD_BASE_ADDR + 0x044) +#define E56PHY_CTRL_FSM_CFG_1_TRAIN_ST0_WAIT_CNT_X4096 \ + FORMAT_NOPARENTHERSES(7, 0) +#define E56PHY_CTRL_FSM_CFG_1_TRAIN_ST1_WAIT_CNT_X4096 \ + FORMAT_NOPARENTHERSES(15, 8) +#define E56PHY_CTRL_FSM_CFG_1_TRAIN_ST2_WAIT_CNT_X4096 \ + FORMAT_NOPARENTHERSES(23, 16) +#define E56PHY_CTRL_FSM_CFG_1_TRAIN_ST3_WAIT_CNT_X4096 \ + FORMAT_NOPARENTHERSES(31, 24) + +#define E56PHY_CTRL_FSM_CFG_2_ADDR (E56PHY_PMD_BASE_ADDR + 0x048) +#define E56PHY_CTRL_FSM_CFG_2_TRAIN_ST4_WAIT_CNT_X4096 \ + FORMAT_NOPARENTHERSES(7, 0) +#define E56PHY_CTRL_FSM_CFG_2_TRAIN_ST5_WAIT_CNT_X4096 \ + FORMAT_NOPARENTHERSES(15, 8) +#define E56PHY_CTRL_FSM_CFG_2_TRAIN_ST6_WAIT_CNT_X4096 \ + FORMAT_NOPARENTHERSES(23, 16) +#define E56PHY_CTRL_FSM_CFG_2_TRAIN_ST7_WAIT_CNT_X4096 \ + FORMAT_NOPARENTHERSES(31, 24) + +#define E56PHY_CTRL_FSM_CFG_3_ADDR (E56PHY_PMD_BASE_ADDR + 0x04C) +#define E56PHY_CTRL_FSM_CFG_3_TRAIN_ST8_WAIT_CNT_X4096 \ + FORMAT_NOPARENTHERSES(7, 0) + +#define E56PHY_CTRL_FSM_CFG_3_TRAIN_ST9_WAIT_CNT_X4096 \ + FORMAT_NOPARENTHERSES(15, 8) +#define E56PHY_CTRL_FSM_CFG_3_TRAIN_ST10_WAIT_CNT_X4096 \ + FORMAT_NOPARENTHERSES(23, 16) +#define E56PHY_CTRL_FSM_CFG_3_TRAIN_ST11_WAIT_CNT_X4096 \ + FORMAT_NOPARENTHERSES(31, 24) + +#define E56PHY_CTRL_FSM_CFG_4_ADDR (E56PHY_PMD_BASE_ADDR + 0x050) +#define E56PHY_CTRL_FSM_CFG_4_TRAIN_ST12_WAIT_CNT_X4096 \ + FORMAT_NOPARENTHERSES(7, 0) +#define E56PHY_CTRL_FSM_CFG_4_TRAIN_ST13_WAIT_CNT_X4096 \ + FORMAT_NOPARENTHERSES(15, 8) +#define E56PHY_CTRL_FSM_CFG_4_TRAIN_ST14_WAIT_CNT_X4096 \ + FORMAT_NOPARENTHERSES(23, 16) +#define E56PHY_CTRL_FSM_CFG_4_TRAIN_ST15_WAIT_CNT_X4096 \ + FORMAT_NOPARENTHERSES(31, 24) + +#define E56PHY_CTRL_FSM_CFG_7_ADDR (E56PHY_PMD_BASE_ADDR + 0x05C) +#define E56PHY_CTRL_FSM_CFG_7_TRAIN_ST4_EN FORMAT_NOPARENTHERSES(15, 0) +#define E56PHY_CTRL_FSM_CFG_7_TRAIN_ST5_EN FORMAT_NOPARENTHERSES(31, 16) + +#define E56PHY_CTRL_FSM_CFG_8_ADDR (E56PHY_PMD_BASE_ADDR + 0x060) +#define E56PHY_CTRL_FSM_CFG_8_TRAIN_ST7_EN FORMAT_NOPARENTHERSES(31, 16) + +#define E56PHY_CTRL_FSM_CFG_12_ADDR (E56PHY_PMD_BASE_ADDR + 0x070) +#define E56PHY_CTRL_FSM_CFG_12_TRAIN_ST15_EN FORMAT_NOPARENTHERSES(31, 16) + +#define E56PHY_CTRL_FSM_CFG_13_ADDR (E56PHY_PMD_BASE_ADDR + 0x074) +#define E56PHY_CTRL_FSM_CFG_13_TRAIN_ST0_DONE_EN FORMAT_NOPARENTHERSES(15, 0) +#define E56PHY_CTRL_FSM_CFG_13_TRAIN_ST1_DONE_EN FORMAT_NOPARENTHERSES(31, 16) + +#define E56PHY_CTRL_FSM_CFG_14_ADDR (E56PHY_PMD_BASE_ADDR + 0x078) +#define E56PHY_CTRL_FSM_CFG_14_TRAIN_ST3_DONE_EN FORMAT_NOPARENTHERSES(31, 16) + +#define E56PHY_CTRL_FSM_CFG_15_ADDR (E56PHY_PMD_BASE_ADDR + 0x07C) +#define E56PHY_CTRL_FSM_CFG_15_TRAIN_ST4_DONE_EN FORMAT_NOPARENTHERSES(15, 0) + +#define E56PHY_CTRL_FSM_CFG_17_ADDR (E56PHY_PMD_BASE_ADDR + 0x084) +#define E56PHY_CTRL_FSM_CFG_17_TRAIN_ST8_DONE_EN FORMAT_NOPARENTHERSES(15, 0) + +#define E56PHY_CTRL_FSM_CFG_18_ADDR (E56PHY_PMD_BASE_ADDR + 0x088) +#define E56PHY_CTRL_FSM_CFG_18_TRAIN_ST10_DONE_EN FORMAT_NOPARENTHERSES(15, 0) + +#define E56PHY_CTRL_FSM_CFG_29_ADDR (E56PHY_PMD_BASE_ADDR + 0x0B4) +#define E56PHY_CTRL_FSM_CFG_29_TRAIN_ST15_DC_EN FORMAT_NOPARENTHERSES(31, 16) + +#define E56PHY_CTRL_FSM_CFG_33_ADDR (E56PHY_PMD_BASE_ADDR + 0x0C4) +#define E56PHY_CTRL_FSM_CFG_33_TRAIN0_RATE_SEL FORMAT_NOPARENTHERSES(15, 0) +#define E56PHY_CTRL_FSM_CFG_33_TRAIN1_RATE_SEL FORMAT_NOPARENTHERSES(31, 16) + +#define E56PHY_CTRL_FSM_CFG_34_ADDR (E56PHY_PMD_BASE_ADDR + 0x0C8) +#define E56PHY_CTRL_FSM_CFG_34_TRAIN2_RATE_SEL FORMAT_NOPARENTHERSES(15, 0) +#define E56PHY_CTRL_FSM_CFG_34_TRAIN3_RATE_SEL FORMAT_NOPARENTHERSES(31, 16) + +#define E56PHY_CTRL_FSM_RX_STAT_0_ADDR (E56PHY_PMD_BASE_ADDR + 0x0FC) +#define E56PHY_RXS0_OVRDEN_0_ADDR (E56PHY_PMD_BASE_ADDR + 0x130) +#define E56PHY_RXS0_OVRDEN_0_OVRD_EN_RXS0_RX0_SAMP_CAL_DONE_O \ + FORMAT_NOPARENTHERSES(27, 27) + +#define E56PHY_RXS0_OVRDEN_1_ADDR (E56PHY_PMD_BASE_ADDR + 0x134) +#define E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_VGA_TRAIN_EN_I \ + FORMAT_NOPARENTHERSES(14, 14) +#define E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_CTLE_TRAIN_EN_I \ + FORMAT_NOPARENTHERSES(16, 16) +#define E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_CDR_EN_I \ + FORMAT_NOPARENTHERSES(18, 18) +#define E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_EN_I \ + FORMAT_NOPARENTHERSES(23, 23) +#define E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_DONE_O \ + FORMAT_NOPARENTHERSES(24, 24) +#define E56PHY_RXS0_OVRDEN_1_OVRD_EN_RXS0_RX0_ADC_INTL_CAL_DONE_O_LSB 24 + +#define E56PHY_RXS0_OVRDEN_2_ADDR (E56PHY_PMD_BASE_ADDR + 0x138) +#define E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_OFST_ADAPT_EN_I \ + FORMAT_NOPARENTHERSES(0, 0) +#define E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_GAIN_ADAPT_EN_I \ + FORMAT_NOPARENTHERSES(3, 3) +#define E56PHY_RXS0_OVRDEN_2_OVRD_EN_RXS0_RX0_ADC_INTL_ADAPT_EN_I \ + FORMAT_NOPARENTHERSES(6, 6) + +#define E56PHY_RXS0_OVRDVAL_0_ADDR (E56PHY_PMD_BASE_ADDR + 0x140) +#define E56PHY_RXS0_OVRDVAL_0_RXS0_RX0_SAMP_CAL_DONE_O \ + FORMAT_NOPARENTHERSES(22, 22) + +#define E56PHY_RXS0_OVRDVAL_1_ADDR (E56PHY_PMD_BASE_ADDR + 0x144) +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_VGA_TRAIN_EN_I \ + FORMAT_NOPARENTHERSES(7, 7) +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_CTLE_TRAIN_EN_I \ + FORMAT_NOPARENTHERSES(9, 9) +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_CDR_EN_I FORMAT_NOPARENTHERSES(11, 11) +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_EN_I \ + FORMAT_NOPARENTHERSES(16, 16) +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_DONE_O \ + FORMAT_NOPARENTHERSES(17, 17) +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_CAL_DONE_O_LSB 17 +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_OFST_ADAPT_EN_I \ + FORMAT_NOPARENTHERSES(25, 25) +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_GAIN_ADAPT_EN_I \ + FORMAT_NOPARENTHERSES(28, 28) +#define E56PHY_RXS0_OVRDVAL_1_RXS0_RX0_ADC_INTL_ADAPT_EN_I \ + FORMAT_NOPARENTHERSES(31, 31) + +#define E56PHY_INTR_0_IDLE_ENTRY1 0x10000000 +#define E56PHY_INTR_0_ADDR (E56PHY_PMD_BASE_ADDR + 0x1EC) +#define E56PHY_INTR_0_ENABLE_ADDR (E56PHY_PMD_BASE_ADDR + 0x1E0) + +#define E56PHY_INTR_1_IDLE_EXIT1 0x1 +#define E56PHY_INTR_1_ADDR (E56PHY_PMD_BASE_ADDR + 0x1F0) +#define E56PHY_INTR_1_ENABLE_ADDR (E56PHY_PMD_BASE_ADDR + 0x1E4) + +#define E56PHY_KRT_TFSM_CFG_ADDR (E56PHY_PMD_BASE_ADDR + 0x2B8) +#define E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X1000K \ + FORMAT_NOPARENTHERSES(7, 0) +#define E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X8000K \ + FORMAT_NOPARENTHERSES(15, 8) +#define E56PHY_KRT_TFSM_CFGKRT_TFSM_HOLDOFF_TIMER_X256K \ + FORMAT_NOPARENTHERSES(23, 16) + +#define E56PHY_FETX_FFE_TRAIN_CFG_0_ADDR (E56PHY_PMD_BASE_ADDR + 0x2BC) +#define E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_2 \ + FORMAT_NOPARENTHERSES(9, 8) +#define E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_3 \ + FORMAT_NOPARENTHERSES(13, 12) + +#define PHYINIT_TIMEOUT 1000 //PHY initialization timeout value in 0.5ms unit + +#define E56G__BASEADDR 0x0 + +union txgbe_e56_cms_ana_ovrdval7 { + struct { + u32 ana_lcpll_lf_vco_swing_ctrl_i : 4; + u32 ana_lcpll_lf_lpf_setcode_calib_i : 5; + u32 rsvd0 : 3; + u32 ana_lcpll_lf_vco_coarse_bin_i : 5; + u32 rsvd1 : 3; + u32 ana_lcpll_lf_vco_fine_therm_i : 8; + u32 ana_lcpll_lf_clkout_fb_ctrl_i : 2; + u32 rsvd2 : 2; + }; + u32 reg; +}; + +#define E56G_CMS_ANA_OVRDVAL_7_ADDR (E56G__BASEADDR + 0xccc) + +union txgbe_e56_cms_ana_ovrden1 { + struct { + u32 ovrd_en_ana_lcpll_hf_vco_amp_status_o : 1; + u32 ovrd_en_ana_lcpll_hf_clkout_fb_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_hf_clkdiv_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_hf_en_odiv_i : 1; + u32 ovrd_en_ana_lcpll_hf_test_in_i : 1; + u32 ovrd_en_ana_lcpll_hf_test_out_o : 1; + u32 ovrd_en_ana_lcpll_lf_en_bias_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_loop_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_cp_i : 1; + u32 ovrd_en_ana_lcpll_lf_icp_base_i : 1; + u32 ovrd_en_ana_lcpll_lf_icp_fine_i : 1; + u32 ovrd_en_ana_lcpll_lf_lpf_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_lpf_setcode_calib_i : 1; + u32 ovrd_en_ana_lcpll_lf_set_lpf_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_vco_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_sel_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_swing_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_coarse_bin_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_fine_therm_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_amp_status_o : 1; + u32 ovrd_en_ana_lcpll_lf_clkout_fb_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_clkdiv_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_odiv_i : 1; + u32 ovrd_en_ana_lcpll_lf_test_in_i : 1; + u32 ovrd_en_ana_lcpll_lf_test_out_o : 1; + u32 ovrd_en_ana_lcpll_hf_refclk_select_i : 1; + u32 ovrd_en_ana_lcpll_lf_refclk_select_i : 1; + u32 ovrd_en_ana_lcpll_hf_clk_ref_sel_i : 1; + u32 ovrd_en_ana_lcpll_lf_clk_ref_sel_i : 1; + u32 ovrd_en_ana_test_bias_i : 1; + u32 ovrd_en_ana_test_slicer_i : 1; + u32 ovrd_en_ana_test_sampler_i : 1; + }; + u32 reg; +}; + +#define E56G_CMS_ANA_OVRDEN_1_ADDR (E56G__BASEADDR + 0xca8) + +union txgbe_e56_cms_ana_ovrdval9 { + struct { + u32 ana_lcpll_lf_test_in_i : 32; + }; + u32 reg; +}; + +#define E56G_CMS_ANA_OVRDVAL_9_ADDR (E56G__BASEADDR + 0xcd4) + +typedef union { + struct { + u32 ovrd_en_ana_bbcdr_vcofilt_byp_i : 1; + u32 ovrd_en_ana_bbcdr_coarse_i : 1; + u32 ovrd_en_ana_bbcdr_fine_i : 1; + u32 ovrd_en_ana_bbcdr_ultrafine_i : 1; + u32 ovrd_en_ana_en_bbcdr_i : 1; + u32 ovrd_en_ana_bbcdr_divctrl_i : 1; + u32 ovrd_en_ana_bbcdr_int_cstm_i : 1; + u32 ovrd_en_ana_bbcdr_prop_step_i : 1; + u32 ovrd_en_ana_en_bbcdr_clk_i : 1; + u32 ovrd_en_ana_test_bbcdr_i : 1; + u32 ovrd_en_ana_bbcdr_en_elv_cnt_ping0_pong1_i : 1; + u32 ovrd_en_ana_bbcdr_clrz_elv_cnt_ping_i : 1; + u32 ovrd_en_ana_bbcdr_clrz_elv_cnt_pong_i : 1; + u32 ovrd_en_ana_bbcdr_clrz_cnt_sync_i : 1; + u32 ovrd_en_ana_bbcdr_en_elv_cnt_rd_i : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_rdout_0_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_rdout_90_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_rdout_180_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_rdout_270_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_ping_0_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_ping_90_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_ping_180_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_ping_270_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_pong_0_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_pong_90_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_pong_180_o : 1; + u32 ovrd_en_ana_bbcdr_elv_cnt_pong_270_o : 1; + u32 ovrd_en_ana_en_bbcdr_samp_dac_i : 1; + u32 ovrd_en_ana_bbcdr_dac0_i : 1; + u32 ovrd_en_ana_bbcdr_dac90_i : 1; + u32 ovrd_en_ana_vga2_cload_in_cstm_i : 1; + u32 ovrd_en_ana_intlvr_cut_bw_i : 1; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDEN_1; + +#define E56G__RXS0_ANA_OVRDEN_1_ADDR (E56G__BASEADDR + 0x90) + +//-----Access structure typedef for Register:E56G__RXS0_OSC_CAL_N_CDR_0 +union txgbe_e56_rxs0_osc_cal_n_cdr0 { + struct { + u32 prediv0 : 16; + u32 target_cnt0 : 16; + }; + u32 reg; +}; + +#define E56G_RXS0_OSC_CAL_N_CDR_0_ADDR (E56G__BASEADDR + 0x4) + +union txgbe_e56_rxs0_osc_cal_n_cdr4 { + struct { + u32 osc_range_sel0 : 2; + u32 osc_range_sel1 : 2; + u32 osc_range_sel2 : 2; + u32 osc_range_sel3 : 2; + u32 vco_code_init : 11; + u32 calibrate_range_sel : 1; + u32 osc_current_boost_en0 : 1; + u32 osc_current_boost_en1 : 1; + u32 osc_current_boost_en2 : 1; + u32 osc_current_boost_en3 : 1; + u32 bbcdr_current_boost0 : 2; + u32 bbcdr_current_boost1 : 2; + u32 bbcdr_current_boost2 : 2; + u32 bbcdr_current_boost3 : 2; + }; + u32 reg; +}; + +#define E56G_RXS0_OSC_CAL_N_CDR_4_ADDR (E56G__BASEADDR + 0x14) + +union txgbe_e56_rxs0_intl_config0 { + struct { + u32 adc_intl2slice_delay0 : 16; + u32 adc_intl2slice_delay1 : 16; + }; + u32 reg; +}; + +#define E56G_RXS0_INTL_CONFIG_0_ADDR (E56G__BASEADDR + 0x20) + +union txgbe_e56_rxs0_intl_config2 { + struct { + u32 interleaver_hbw_disable0 : 1; + u32 interleaver_hbw_disable1 : 1; + u32 interleaver_hbw_disable2 : 1; + u32 interleaver_hbw_disable3 : 1; + u32 rsvd0 : 28; + }; + u32 reg; +}; + +#define E56G_RXS0_INTL_CONFIG_2_ADDR (E56G__BASEADDR + 0x28) + +typedef union { + struct { + u32 ovrd_en_ana_bbcdr_dac180_i : 1; + u32 ovrd_en_ana_bbcdr_dac270_i : 1; + u32 ovrd_en_ana_bbcdr_en_samp_cal_cnt_i : 1; + u32 ovrd_en_ana_bbcdr_clrz_samp_cal_cnt_i : 1; + u32 ovrd_en_ana_bbcdr_samp_cnt_0_o : 1; + u32 ovrd_en_ana_bbcdr_samp_cnt_90_o : 1; + u32 ovrd_en_ana_bbcdr_samp_cnt_180_o : 1; + u32 ovrd_en_ana_bbcdr_samp_cnt_270_o : 1; + u32 ovrd_en_ana_en_adcbuf1_i : 1; + u32 ovrd_en_ana_test_adcbuf1_i : 1; + u32 ovrd_en_ana_en_adc_clk4ui_i : 1; + u32 ovrd_en_ana_adc_clk_skew0_i : 1; + u32 ovrd_en_ana_adc_clk_skew90_i : 1; + u32 ovrd_en_ana_adc_clk_skew180_i : 1; + u32 ovrd_en_ana_adc_clk_skew270_i : 1; + u32 ovrd_en_ana_adc_update_skew_i : 1; + u32 ovrd_en_ana_en_adc_pi_i : 1; + u32 ovrd_en_ana_adc_pictrl_quad_i : 1; + u32 ovrd_en_ana_adc_pctrl_code_i : 1; + u32 ovrd_en_ana_adc_clkdiv_i : 1; + u32 ovrd_en_ana_test_adc_clkgen_i : 1; + u32 ovrd_en_ana_en_adc_i : 1; + u32 ovrd_en_ana_en_adc_vref_i : 1; + u32 ovrd_en_ana_vref_cnfg_i : 1; + u32 ovrd_en_ana_adc_data_cstm_o : 1; + u32 ovrd_en_ana_en_adccal_lpbk_i : 1; + u32 ovrd_en_ana_sel_adcoffset_cal_i : 1; + u32 ovrd_en_ana_sel_adcgain_cal_i : 1; + u32 ovrd_en_ana_adcgain_cal_swing_ctrl_i : 1; + u32 ovrd_en_ana_adc_gain_i : 1; + u32 ovrd_en_ana_vga_cload_out_cstm_i : 1; + u32 ovrd_en_ana_vga2_cload_out_cstm_i : 1; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDEN_2; + +#define E56G__RXS0_ANA_OVRDEN_2_ADDR (E56G__BASEADDR + 0x94) + +typedef union { + struct { + u32 ovrd_en_ana_adc_offset_i : 1; + u32 ovrd_en_ana_adc_slice_addr_i : 1; + u32 ovrd_en_ana_slice_wr_i : 1; + u32 ovrd_en_ana_test_adc_i : 1; + u32 ovrd_en_ana_test_adc_o : 1; + u32 ovrd_en_ana_spare_o : 8; + u32 ovrd_en_ana_sel_lpbk_i : 1; + u32 ovrd_en_ana_ana_debug_sel_i : 1; + u32 ovrd_en_ana_anabs_config_i : 1; + u32 ovrd_en_ana_en_anabs_i : 1; + u32 ovrd_en_ana_anabs_rxn_o : 1; + u32 ovrd_en_ana_anabs_rxp_o : 1; + u32 ovrd_en_ana_dser_clk_en_i : 1; + u32 ovrd_en_ana_dser_clk_config_i : 1; + u32 ovrd_en_ana_en_mmcdr_clk_obs_i : 1; + u32 ovrd_en_ana_skew_coarse0_fine1_i : 1; + u32 ovrd_en_ana_vddinoff_acore_dig_o : 1; + u32 ovrd_en_ana_vddinoff_dcore_dig_o : 1; + u32 ovrd_en_ana_vga2_boost_cstm_i : 1; + u32 ovrd_en_ana_adc_sel_vbgr_bias_i : 1; + u32 ovrd_en_ana_adc_nbuf_cnfg_i : 1; + u32 ovrd_en_ana_adc_pbuf_cnfg_i : 1; + u32 rsvd0 : 3; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDEN_3; + +#define E56G__RXS0_ANA_OVRDEN_3_NUM 1 +#define E56G__RXS0_ANA_OVRDEN_3_ADDR (E56G__BASEADDR + 0x98) + +typedef union { + struct { + u32 pam4_ab_swap_en : 1; + u32 dser_data_sel : 1; + u32 signal_type : 1; + u32 precode_en : 1; + u32 train_clk_gate_bypass_en : 14; + u32 rsvd0 : 14; + }; + u32 reg; +} E56G__RXS0_RXS_CFG_0; + +#define E56G__RXS0_RXS_CFG_0_NUM 1 +#define E56G__RXS0_RXS_CFG_0_ADDR (E56G__BASEADDR + 0x0) + +typedef union { + struct { + u32 restart_training_ln0 : 1; + u32 training_enable_ln0 : 1; + u32 restart_training_ln1 : 1; + u32 training_enable_ln1 : 1; + u32 restart_training_ln2 : 1; + u32 training_enable_ln2 : 1; + u32 restart_training_ln3 : 1; + u32 training_enable_ln3 : 1; + u32 rsvd0 : 24; + }; + u32 reg; +} E56G__PMD_BASER_PMD_CONTROL; + +#define E56G__PMD_BASER_PMD_CONTROL_NUM 1 +#define E56G__PMD_BASER_PMD_CONTROL_ADDR (E56G__BASEADDR + 0x1640) + +typedef union { + struct { + u32 rx_to_tx_lpbk_en : 4; + u32 sel_wp_pmt_out : 4; + u32 sel_wp_pmt_clkout : 4; + u32 use_recent_marker_offset : 1; + u32 interrupt_debug_mode : 1; + u32 rsvd0 : 2; + u32 tx_ffe_coeff_update : 4; + u32 rsvd1 : 12; + }; + u32 reg; +} E56G__PMD_PMD_CFG_5; + +#define E56G__PMD_PMD_CFG_5_NUM 1 +#define E56G__PMD_PMD_CFG_5_ADDR (E56G__BASEADDR + 0x1414) + +typedef union { + struct { + u32 soft_reset : 1; + u32 pmd_en : 1; + u32 rsvd0 : 2; + u32 pll_refclk_sel : 2; + u32 rsvd1 : 2; + u32 pmd_mode : 1; + u32 rsvd2 : 3; + u32 tx_en_cfg : 4; + u32 rx_en_cfg : 4; + u32 pll_en_cfg : 2; + u32 rsvd3 : 2; + u32 pam4_precode_no_krt_en : 4; + u32 rsvd4 : 4; + }; + u32 reg; +} E56G__PMD_PMD_CFG_0; + +#define E56G__PMD_PMD_CFG_0_NUM 1 +#define E56G__PMD_PMD_CFG_0_ADDR (E56G__BASEADDR + 0x1400) + +typedef union { + struct { + u32 ovrd_en_rxs0_rx0_adc_ofst_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o : 1; + u32 ovrd_en_rxs0_rx0_adc_ofst_adapt_error_o : 1; + u32 ovrd_en_rxs0_rx0_adc_gain_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_gain_adapt_done_o : 1; + u32 ovrd_en_rxs0_rx0_adc_gain_adapt_error_o : 1; + u32 ovrd_en_rxs0_rx0_adc_intl_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_intl_adapt_done_o : 1; + u32 ovrd_en_rxs0_rx0_adc_intl_adapt_error_o : 1; + u32 ovrd_en_rxs0_rx0_fe_ofst_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_fe_ofst_adapt_done_o : 1; + u32 ovrd_en_rxs0_rx0_fe_ofst_adapt_error_o : 1; + u32 ovrd_en_rxs0_rx0_samp_th_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_samp_th_adapt_done_o : 1; + u32 ovrd_en_rxs0_rx0_efuse_bits_i : 1; + u32 ovrd_en_rxs0_rx0_wp_pmt_in_i : 1; + u32 ovrd_en_rxs0_rx0_wp_pmt_out_o : 1; + u32 rsvd0 : 15; + }; + u32 reg; +} E56G__PMD_RXS0_OVRDEN_2; + +#define E56G__PMD_RXS0_OVRDEN_2_ADDR (E56G__BASEADDR + 0x1538) + +typedef union { + struct { + u32 ana_bbcdr_osc_range_sel_i : 2; + u32 rsvd0 : 2; + u32 ana_bbcdr_coarse_i : 4; + u32 ana_bbcdr_fine_i : 3; + u32 rsvd1 : 1; + u32 ana_bbcdr_ultrafine_i : 3; + u32 rsvd2 : 1; + u32 ana_bbcdr_divctrl_i : 2; + u32 rsvd3 : 2; + u32 ana_bbcdr_int_cstm_i : 5; + u32 rsvd4 : 3; + u32 ana_bbcdr_prop_step_i : 4; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDVAL_5; + +#define E56G__RXS0_ANA_OVRDVAL_5_ADDR (E56G__BASEADDR + 0xb4) + +typedef union { + struct { + u32 ana_adc_pictrl_quad_i : 2; + u32 rsvd0 : 2; + u32 ana_adc_clkdiv_i : 2; + u32 rsvd1 : 2; + u32 ana_test_adc_clkgen_i : 4; + u32 ana_vref_cnfg_i : 4; + u32 ana_adcgain_cal_swing_ctrl_i : 4; + u32 ana_adc_gain_i : 4; + u32 ana_adc_offset_i : 4; + u32 ana_ana_debug_sel_i : 4; + }; + u32 reg; +} E56G__RXS3_ANA_OVRDVAL_11; + +#define E56G__RXS3_ANA_OVRDVAL_11_ADDR (E56G__BASEADDR + 0x6cc) + +typedef union { + struct { + u32 rxs0_rx0_fe_ofst_cal_error_o : 1; + u32 rxs0_rx0_fom_en_i : 1; + u32 rxs0_rx0_idle_detect_en_i : 1; + u32 rxs0_rx0_idle_o : 1; + u32 rxs0_rx0_txffe_train_en_i : 1; + u32 rxs0_rx0_txffe_train_enack_o : 1; + u32 rxs0_rx0_txffe_train_done_o : 1; + u32 rxs0_rx0_vga_train_en_i : 1; + u32 rxs0_rx0_vga_train_done_o : 1; + u32 rxs0_rx0_ctle_train_en_i : 1; + u32 rxs0_rx0_ctle_train_done_o : 1; + u32 rxs0_rx0_cdr_en_i : 1; + u32 rxs0_rx0_cdr_rdy_o : 1; + u32 rxs0_rx0_ffe_train_en_i : 1; + u32 rxs0_rx0_ffe_train_done_o : 1; + u32 rxs0_rx0_mmpd_en_i : 1; + u32 rxs0_rx0_adc_intl_cal_en_i : 1; + u32 rxs0_rx0_adc_intl_cal_done_o : 1; + u32 rxs0_rx0_adc_intl_cal_error_o : 1; + u32 rxs0_rx0_dfe_train_en_i : 1; + u32 rxs0_rx0_dfe_train_done_o : 1; + u32 rxs0_rx0_vga_adapt_en_i : 1; + u32 rxs0_rx0_vga_adapt_done_o : 1; + u32 rxs0_rx0_ctle_adapt_en_i : 1; + u32 rxs0_rx0_ctle_adapt_done_o : 1; + u32 rxs0_rx0_adc_ofst_adapt_en_i : 1; + u32 rxs0_rx0_adc_ofst_adapt_done_o : 1; + u32 rxs0_rx0_adc_ofst_adapt_error_o : 1; + u32 rxs0_rx0_adc_gain_adapt_en_i : 1; + u32 rxs0_rx0_adc_gain_adapt_done_o : 1; + u32 rxs0_rx0_adc_gain_adapt_error_o : 1; + u32 rxs0_rx0_adc_intl_adapt_en_i : 1; + }; + u32 reg; +} E56G__PMD_RXS0_OVRDVAL_1; +#define E56G__PMD_RXS0_OVRDVAL_1_ADDR (E56G__BASEADDR + 0x1544) + +typedef union { + struct { + u32 rxs1_rx0_fe_ofst_cal_error_o : 1; + u32 rxs1_rx0_fom_en_i : 1; + u32 rxs1_rx0_idle_detect_en_i : 1; + u32 rxs1_rx0_idle_o : 1; + u32 rxs1_rx0_txffe_train_en_i : 1; + u32 rxs1_rx0_txffe_train_enack_o : 1; + u32 rxs1_rx0_txffe_train_done_o : 1; + u32 rxs1_rx0_vga_train_en_i : 1; + u32 rxs1_rx0_vga_train_done_o : 1; + u32 rxs1_rx0_ctle_train_en_i : 1; + u32 rxs1_rx0_ctle_train_done_o : 1; + u32 rxs1_rx0_cdr_en_i : 1; + u32 rxs1_rx0_cdr_rdy_o : 1; + u32 rxs1_rx0_ffe_train_en_i : 1; + u32 rxs1_rx0_ffe_train_done_o : 1; + u32 rxs1_rx0_mmpd_en_i : 1; + u32 rxs1_rx0_adc_intl_cal_en_i : 1; + u32 rxs1_rx0_adc_intl_cal_done_o : 1; + u32 rxs1_rx0_adc_intl_cal_error_o : 1; + u32 rxs1_rx0_dfe_train_en_i : 1; + u32 rxs1_rx0_dfe_train_done_o : 1; + u32 rxs1_rx0_vga_adapt_en_i : 1; + u32 rxs1_rx0_vga_adapt_done_o : 1; + u32 rxs1_rx0_ctle_adapt_en_i : 1; + u32 rxs1_rx0_ctle_adapt_done_o : 1; + u32 rxs1_rx0_adc_ofst_adapt_en_i : 1; + u32 rxs1_rx0_adc_ofst_adapt_done_o : 1; + u32 rxs1_rx0_adc_ofst_adapt_error_o : 1; + u32 rxs1_rx0_adc_gain_adapt_en_i : 1; + u32 rxs1_rx0_adc_gain_adapt_done_o : 1; + u32 rxs1_rx0_adc_gain_adapt_error_o : 1; + u32 rxs1_rx0_adc_intl_adapt_en_i : 1; + }; + u32 reg; +} E56G__PMD_RXS1_OVRDVAL_1; + +#define E56G__PMD_RXS1_OVRDVAL_1_ADDR (E56G__BASEADDR + 0x1570) + +typedef union { + struct { + u32 rxs2_rx0_fe_ofst_cal_error_o : 1; + u32 rxs2_rx0_fom_en_i : 1; + u32 rxs2_rx0_idle_detect_en_i : 1; + u32 rxs2_rx0_idle_o : 1; + u32 rxs2_rx0_txffe_train_en_i : 1; + u32 rxs2_rx0_txffe_train_enack_o : 1; + u32 rxs2_rx0_txffe_train_done_o : 1; + u32 rxs2_rx0_vga_train_en_i : 1; + u32 rxs2_rx0_vga_train_done_o : 1; + u32 rxs2_rx0_ctle_train_en_i : 1; + u32 rxs2_rx0_ctle_train_done_o : 1; + u32 rxs2_rx0_cdr_en_i : 1; + u32 rxs2_rx0_cdr_rdy_o : 1; + u32 rxs2_rx0_ffe_train_en_i : 1; + u32 rxs2_rx0_ffe_train_done_o : 1; + u32 rxs2_rx0_mmpd_en_i : 1; + u32 rxs2_rx0_adc_intl_cal_en_i : 1; + u32 rxs2_rx0_adc_intl_cal_done_o : 1; + u32 rxs2_rx0_adc_intl_cal_error_o : 1; + u32 rxs2_rx0_dfe_train_en_i : 1; + u32 rxs2_rx0_dfe_train_done_o : 1; + u32 rxs2_rx0_vga_adapt_en_i : 1; + u32 rxs2_rx0_vga_adapt_done_o : 1; + u32 rxs2_rx0_ctle_adapt_en_i : 1; + u32 rxs2_rx0_ctle_adapt_done_o : 1; + u32 rxs2_rx0_adc_ofst_adapt_en_i : 1; + u32 rxs2_rx0_adc_ofst_adapt_done_o : 1; + u32 rxs2_rx0_adc_ofst_adapt_error_o : 1; + u32 rxs2_rx0_adc_gain_adapt_en_i : 1; + u32 rxs2_rx0_adc_gain_adapt_done_o : 1; + u32 rxs2_rx0_adc_gain_adapt_error_o : 1; + u32 rxs2_rx0_adc_intl_adapt_en_i : 1; + }; + u32 reg; +} E56G__PMD_RXS2_OVRDVAL_1; + +#define E56G__PMD_RXS2_OVRDVAL_1_ADDR (E56G__BASEADDR + 0x159c) + +typedef union { + struct { + u32 rxs3_rx0_fe_ofst_cal_error_o : 1; + u32 rxs3_rx0_fom_en_i : 1; + u32 rxs3_rx0_idle_detect_en_i : 1; + u32 rxs3_rx0_idle_o : 1; + u32 rxs3_rx0_txffe_train_en_i : 1; + u32 rxs3_rx0_txffe_train_enack_o : 1; + u32 rxs3_rx0_txffe_train_done_o : 1; + u32 rxs3_rx0_vga_train_en_i : 1; + u32 rxs3_rx0_vga_train_done_o : 1; + u32 rxs3_rx0_ctle_train_en_i : 1; + u32 rxs3_rx0_ctle_train_done_o : 1; + u32 rxs3_rx0_cdr_en_i : 1; + u32 rxs3_rx0_cdr_rdy_o : 1; + u32 rxs3_rx0_ffe_train_en_i : 1; + u32 rxs3_rx0_ffe_train_done_o : 1; + u32 rxs3_rx0_mmpd_en_i : 1; + u32 rxs3_rx0_adc_intl_cal_en_i : 1; + u32 rxs3_rx0_adc_intl_cal_done_o : 1; + u32 rxs3_rx0_adc_intl_cal_error_o : 1; + u32 rxs3_rx0_dfe_train_en_i : 1; + u32 rxs3_rx0_dfe_train_done_o : 1; + u32 rxs3_rx0_vga_adapt_en_i : 1; + u32 rxs3_rx0_vga_adapt_done_o : 1; + u32 rxs3_rx0_ctle_adapt_en_i : 1; + u32 rxs3_rx0_ctle_adapt_done_o : 1; + u32 rxs3_rx0_adc_ofst_adapt_en_i : 1; + u32 rxs3_rx0_adc_ofst_adapt_done_o : 1; + u32 rxs3_rx0_adc_ofst_adapt_error_o : 1; + u32 rxs3_rx0_adc_gain_adapt_en_i : 1; + u32 rxs3_rx0_adc_gain_adapt_done_o : 1; + u32 rxs3_rx0_adc_gain_adapt_error_o : 1; + u32 rxs3_rx0_adc_intl_adapt_en_i : 1; + }; + u32 reg; +} E56G__PMD_RXS3_OVRDVAL_1; + +#define E56G__PMD_RXS3_OVRDVAL_1_ADDR (E56G__BASEADDR + 0x15c8) + +typedef union { + struct { + u32 ctrl_fsm_rx0_st : 6; + u32 rsvd0 : 2; + u32 ctrl_fsm_rx1_st : 6; + u32 rsvd1 : 2; + u32 ctrl_fsm_rx2_st : 6; + u32 rsvd2 : 2; + u32 ctrl_fsm_rx3_st : 6; + u32 rsvd3 : 2; + }; + u32 reg; +} E56G__PMD_CTRL_FSM_RX_STAT_0; + +#define E56G__PMD_CTRL_FSM_RX_STAT_0_ADDR (E56G__BASEADDR + 0x14fc) + +typedef union { + struct { + u32 ana_en_rterm_i : 1; + u32 ana_en_bias_i : 1; + u32 ana_en_ldo_i : 1; + u32 ana_rstn_i : 1; + u32 ana_en_blwc_i : 1; + u32 ana_en_acc_amp_i : 1; + u32 ana_en_acc_dac_i : 1; + u32 ana_en_afe_offset_cal_i : 1; + u32 ana_clk_offsetcal_i : 1; + u32 ana_acc_os_comp_o : 1; + u32 ana_en_ctle_i : 1; + u32 ana_ctle_bypass_i : 1; + u32 ana_en_ctlecdr_i : 1; + u32 ana_cdr_ctle_boost_i : 1; + u32 ana_en_vga_i : 1; + u32 ana_en_bbcdr_vco_i : 1; + u32 ana_bbcdr_vcofilt_byp_i : 1; + u32 ana_en_bbcdr_i : 1; + u32 ana_en_bbcdr_clk_i : 1; + u32 ana_bbcdr_en_elv_cnt_ping0_pong1_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_ping_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_pong_i : 1; + u32 ana_bbcdr_clrz_cnt_sync_i : 1; + u32 ana_bbcdr_en_elv_cnt_rd_i : 1; + u32 ana_bbcdr_elv_cnt_ping_0_o : 1; + u32 ana_bbcdr_elv_cnt_ping_90_o : 1; + u32 ana_bbcdr_elv_cnt_ping_180_o : 1; + u32 ana_bbcdr_elv_cnt_ping_270_o : 1; + u32 ana_bbcdr_elv_cnt_pong_0_o : 1; + u32 ana_bbcdr_elv_cnt_pong_90_o : 1; + u32 ana_bbcdr_elv_cnt_pong_180_o : 1; + u32 ana_bbcdr_elv_cnt_pong_270_o : 1; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDVAL_0; +#define E56G__RXS0_ANA_OVRDVAL_0_ADDR (E56G__BASEADDR + 0xa0) + +typedef union { + struct { + u32 ana_en_rterm_i : 1; + u32 ana_en_bias_i : 1; + u32 ana_en_ldo_i : 1; + u32 ana_rstn_i : 1; + u32 ana_en_blwc_i : 1; + u32 ana_en_acc_amp_i : 1; + u32 ana_en_acc_dac_i : 1; + u32 ana_en_afe_offset_cal_i : 1; + u32 ana_clk_offsetcal_i : 1; + u32 ana_acc_os_comp_o : 1; + u32 ana_en_ctle_i : 1; + u32 ana_ctle_bypass_i : 1; + u32 ana_en_ctlecdr_i : 1; + u32 ana_cdr_ctle_boost_i : 1; + u32 ana_en_vga_i : 1; + u32 ana_en_bbcdr_vco_i : 1; + u32 ana_bbcdr_vcofilt_byp_i : 1; + u32 ana_en_bbcdr_i : 1; + u32 ana_en_bbcdr_clk_i : 1; + u32 ana_bbcdr_en_elv_cnt_ping0_pong1_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_ping_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_pong_i : 1; + u32 ana_bbcdr_clrz_cnt_sync_i : 1; + u32 ana_bbcdr_en_elv_cnt_rd_i : 1; + u32 ana_bbcdr_elv_cnt_ping_0_o : 1; + u32 ana_bbcdr_elv_cnt_ping_90_o : 1; + u32 ana_bbcdr_elv_cnt_ping_180_o : 1; + u32 ana_bbcdr_elv_cnt_ping_270_o : 1; + u32 ana_bbcdr_elv_cnt_pong_0_o : 1; + u32 ana_bbcdr_elv_cnt_pong_90_o : 1; + u32 ana_bbcdr_elv_cnt_pong_180_o : 1; + u32 ana_bbcdr_elv_cnt_pong_270_o : 1; + }; + u32 reg; +} E56G__RXS1_ANA_OVRDVAL_0; + +#define E56G__RXS1_ANA_OVRDVAL_0_ADDR (E56G__BASEADDR + 0x2a0) + +typedef union { + struct { + u32 ana_en_rterm_i : 1; + u32 ana_en_bias_i : 1; + u32 ana_en_ldo_i : 1; + u32 ana_rstn_i : 1; + u32 ana_en_blwc_i : 1; + u32 ana_en_acc_amp_i : 1; + u32 ana_en_acc_dac_i : 1; + u32 ana_en_afe_offset_cal_i : 1; + u32 ana_clk_offsetcal_i : 1; + u32 ana_acc_os_comp_o : 1; + u32 ana_en_ctle_i : 1; + u32 ana_ctle_bypass_i : 1; + u32 ana_en_ctlecdr_i : 1; + u32 ana_cdr_ctle_boost_i : 1; + u32 ana_en_vga_i : 1; + u32 ana_en_bbcdr_vco_i : 1; + u32 ana_bbcdr_vcofilt_byp_i : 1; + u32 ana_en_bbcdr_i : 1; + u32 ana_en_bbcdr_clk_i : 1; + u32 ana_bbcdr_en_elv_cnt_ping0_pong1_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_ping_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_pong_i : 1; + u32 ana_bbcdr_clrz_cnt_sync_i : 1; + u32 ana_bbcdr_en_elv_cnt_rd_i : 1; + u32 ana_bbcdr_elv_cnt_ping_0_o : 1; + u32 ana_bbcdr_elv_cnt_ping_90_o : 1; + u32 ana_bbcdr_elv_cnt_ping_180_o : 1; + u32 ana_bbcdr_elv_cnt_ping_270_o : 1; + u32 ana_bbcdr_elv_cnt_pong_0_o : 1; + u32 ana_bbcdr_elv_cnt_pong_90_o : 1; + u32 ana_bbcdr_elv_cnt_pong_180_o : 1; + u32 ana_bbcdr_elv_cnt_pong_270_o : 1; + }; + u32 reg; +} E56G__RXS2_ANA_OVRDVAL_0; + +#define E56G__RXS2_ANA_OVRDVAL_0_ADDR (E56G__BASEADDR + 0x4a0) + +typedef union { + struct { + u32 ana_en_rterm_i : 1; + u32 ana_en_bias_i : 1; + u32 ana_en_ldo_i : 1; + u32 ana_rstn_i : 1; + u32 ana_en_blwc_i : 1; + u32 ana_en_acc_amp_i : 1; + u32 ana_en_acc_dac_i : 1; + u32 ana_en_afe_offset_cal_i : 1; + u32 ana_clk_offsetcal_i : 1; + u32 ana_acc_os_comp_o : 1; + u32 ana_en_ctle_i : 1; + u32 ana_ctle_bypass_i : 1; + u32 ana_en_ctlecdr_i : 1; + u32 ana_cdr_ctle_boost_i : 1; + u32 ana_en_vga_i : 1; + u32 ana_en_bbcdr_vco_i : 1; + u32 ana_bbcdr_vcofilt_byp_i : 1; + u32 ana_en_bbcdr_i : 1; + u32 ana_en_bbcdr_clk_i : 1; + u32 ana_bbcdr_en_elv_cnt_ping0_pong1_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_ping_i : 1; + u32 ana_bbcdr_clrz_elv_cnt_pong_i : 1; + u32 ana_bbcdr_clrz_cnt_sync_i : 1; + u32 ana_bbcdr_en_elv_cnt_rd_i : 1; + u32 ana_bbcdr_elv_cnt_ping_0_o : 1; + u32 ana_bbcdr_elv_cnt_ping_90_o : 1; + u32 ana_bbcdr_elv_cnt_ping_180_o : 1; + u32 ana_bbcdr_elv_cnt_ping_270_o : 1; + u32 ana_bbcdr_elv_cnt_pong_0_o : 1; + u32 ana_bbcdr_elv_cnt_pong_90_o : 1; + u32 ana_bbcdr_elv_cnt_pong_180_o : 1; + u32 ana_bbcdr_elv_cnt_pong_270_o : 1; + }; + u32 reg; +} E56G__RXS3_ANA_OVRDVAL_0; + +#define E56G__RXS3_ANA_OVRDVAL_0_ADDR (E56G__BASEADDR + 0x6a0) + +typedef union { + struct { + u32 ovrd_en_ana_en_rterm_i : 1; + u32 ovrd_en_ana_trim_rterm_i : 1; + u32 ovrd_en_ana_en_bias_i : 1; + u32 ovrd_en_ana_test_bias_i : 1; + u32 ovrd_en_ana_en_ldo_i : 1; + u32 ovrd_en_ana_test_ldo_i : 1; + u32 ovrd_en_ana_rstn_i : 1; + u32 ovrd_en_ana_en_blwc_i : 1; + u32 ovrd_en_ana_en_acc_amp_i : 1; + u32 ovrd_en_ana_en_acc_dac_i : 1; + u32 ovrd_en_ana_en_afe_offset_cal_i : 1; + u32 ovrd_en_ana_clk_offsetcal_i : 1; + u32 ovrd_en_ana_acc_os_code_i : 1; + u32 ovrd_en_ana_acc_os_comp_o : 1; + u32 ovrd_en_ana_test_acc_i : 1; + u32 ovrd_en_ana_en_ctle_i : 1; + u32 ovrd_en_ana_ctle_bypass_i : 1; + u32 ovrd_en_ana_ctle_cz_cstm_i : 1; + u32 ovrd_en_ana_ctle_cload_cstm_i : 1; + u32 ovrd_en_ana_test_ctle_i : 1; + u32 ovrd_en_ana_lfeq_ctrl_cstm_i : 1; + u32 ovrd_en_ana_en_ctlecdr_i : 1; + u32 ovrd_en_ana_cdr_ctle_boost_i : 1; + u32 ovrd_en_ana_test_ctlecdr_i : 1; + u32 ovrd_en_ana_en_vga_i : 1; + u32 ovrd_en_ana_vga_gain_cstm_i : 1; + u32 ovrd_en_ana_vga_cload_in_cstm_i : 1; + u32 ovrd_en_ana_test_vga_i : 1; + u32 ovrd_en_ana_en_bbcdr_vco_i : 1; + u32 ovrd_en_ana_bbcdr_osc_range_sel_i : 1; + u32 ovrd_en_ana_sel_vga_gain_byp_i : 1; + u32 ovrd_en_ana_vga2_gain_cstm_i : 1; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDEN_0; + +#define E56G__RXS0_ANA_OVRDEN_0_ADDR (E56G__BASEADDR + 0x8c) + +typedef union { + struct { + u32 ovrd_en_ana_en_rterm_i : 1; + u32 ovrd_en_ana_trim_rterm_i : 1; + u32 ovrd_en_ana_en_bias_i : 1; + u32 ovrd_en_ana_test_bias_i : 1; + u32 ovrd_en_ana_en_ldo_i : 1; + u32 ovrd_en_ana_test_ldo_i : 1; + u32 ovrd_en_ana_rstn_i : 1; + u32 ovrd_en_ana_en_blwc_i : 1; + u32 ovrd_en_ana_en_acc_amp_i : 1; + u32 ovrd_en_ana_en_acc_dac_i : 1; + u32 ovrd_en_ana_en_afe_offset_cal_i : 1; + u32 ovrd_en_ana_clk_offsetcal_i : 1; + u32 ovrd_en_ana_acc_os_code_i : 1; + u32 ovrd_en_ana_acc_os_comp_o : 1; + u32 ovrd_en_ana_test_acc_i : 1; + u32 ovrd_en_ana_en_ctle_i : 1; + u32 ovrd_en_ana_ctle_bypass_i : 1; + u32 ovrd_en_ana_ctle_cz_cstm_i : 1; + u32 ovrd_en_ana_ctle_cload_cstm_i : 1; + u32 ovrd_en_ana_test_ctle_i : 1; + u32 ovrd_en_ana_lfeq_ctrl_cstm_i : 1; + u32 ovrd_en_ana_en_ctlecdr_i : 1; + u32 ovrd_en_ana_cdr_ctle_boost_i : 1; + u32 ovrd_en_ana_test_ctlecdr_i : 1; + u32 ovrd_en_ana_en_vga_i : 1; + u32 ovrd_en_ana_vga_gain_cstm_i : 1; + u32 ovrd_en_ana_vga_cload_in_cstm_i : 1; + u32 ovrd_en_ana_test_vga_i : 1; + u32 ovrd_en_ana_en_bbcdr_vco_i : 1; + u32 ovrd_en_ana_bbcdr_osc_range_sel_i : 1; + u32 ovrd_en_ana_sel_vga_gain_byp_i : 1; + u32 ovrd_en_ana_vga2_gain_cstm_i : 1; + }; + u32 reg; +} E56G__RXS1_ANA_OVRDEN_0; + +#define E56G__RXS1_ANA_OVRDEN_0_ADDR (E56G__BASEADDR + 0x28c) + +typedef union { + struct { + u32 ovrd_en_ana_en_rterm_i : 1; + u32 ovrd_en_ana_trim_rterm_i : 1; + u32 ovrd_en_ana_en_bias_i : 1; + u32 ovrd_en_ana_test_bias_i : 1; + u32 ovrd_en_ana_en_ldo_i : 1; + u32 ovrd_en_ana_test_ldo_i : 1; + u32 ovrd_en_ana_rstn_i : 1; + u32 ovrd_en_ana_en_blwc_i : 1; + u32 ovrd_en_ana_en_acc_amp_i : 1; + u32 ovrd_en_ana_en_acc_dac_i : 1; + u32 ovrd_en_ana_en_afe_offset_cal_i : 1; + u32 ovrd_en_ana_clk_offsetcal_i : 1; + u32 ovrd_en_ana_acc_os_code_i : 1; + u32 ovrd_en_ana_acc_os_comp_o : 1; + u32 ovrd_en_ana_test_acc_i : 1; + u32 ovrd_en_ana_en_ctle_i : 1; + u32 ovrd_en_ana_ctle_bypass_i : 1; + u32 ovrd_en_ana_ctle_cz_cstm_i : 1; + u32 ovrd_en_ana_ctle_cload_cstm_i : 1; + u32 ovrd_en_ana_test_ctle_i : 1; + u32 ovrd_en_ana_lfeq_ctrl_cstm_i : 1; + u32 ovrd_en_ana_en_ctlecdr_i : 1; + u32 ovrd_en_ana_cdr_ctle_boost_i : 1; + u32 ovrd_en_ana_test_ctlecdr_i : 1; + u32 ovrd_en_ana_en_vga_i : 1; + u32 ovrd_en_ana_vga_gain_cstm_i : 1; + u32 ovrd_en_ana_vga_cload_in_cstm_i : 1; + u32 ovrd_en_ana_test_vga_i : 1; + u32 ovrd_en_ana_en_bbcdr_vco_i : 1; + u32 ovrd_en_ana_bbcdr_osc_range_sel_i : 1; + u32 ovrd_en_ana_sel_vga_gain_byp_i : 1; + u32 ovrd_en_ana_vga2_gain_cstm_i : 1; + }; + u32 reg; +} E56G__RXS2_ANA_OVRDEN_0; + +#define E56G__RXS2_ANA_OVRDEN_0_ADDR (E56G__BASEADDR + 0x48c) + +typedef union { + struct { + u32 ovrd_en_ana_en_rterm_i : 1; + u32 ovrd_en_ana_trim_rterm_i : 1; + u32 ovrd_en_ana_en_bias_i : 1; + u32 ovrd_en_ana_test_bias_i : 1; + u32 ovrd_en_ana_en_ldo_i : 1; + u32 ovrd_en_ana_test_ldo_i : 1; + u32 ovrd_en_ana_rstn_i : 1; + u32 ovrd_en_ana_en_blwc_i : 1; + u32 ovrd_en_ana_en_acc_amp_i : 1; + u32 ovrd_en_ana_en_acc_dac_i : 1; + u32 ovrd_en_ana_en_afe_offset_cal_i : 1; + u32 ovrd_en_ana_clk_offsetcal_i : 1; + u32 ovrd_en_ana_acc_os_code_i : 1; + u32 ovrd_en_ana_acc_os_comp_o : 1; + u32 ovrd_en_ana_test_acc_i : 1; + u32 ovrd_en_ana_en_ctle_i : 1; + u32 ovrd_en_ana_ctle_bypass_i : 1; + u32 ovrd_en_ana_ctle_cz_cstm_i : 1; + u32 ovrd_en_ana_ctle_cload_cstm_i : 1; + u32 ovrd_en_ana_test_ctle_i : 1; + u32 ovrd_en_ana_lfeq_ctrl_cstm_i : 1; + u32 ovrd_en_ana_en_ctlecdr_i : 1; + u32 ovrd_en_ana_cdr_ctle_boost_i : 1; + u32 ovrd_en_ana_test_ctlecdr_i : 1; + u32 ovrd_en_ana_en_vga_i : 1; + u32 ovrd_en_ana_vga_gain_cstm_i : 1; + u32 ovrd_en_ana_vga_cload_in_cstm_i : 1; + u32 ovrd_en_ana_test_vga_i : 1; + u32 ovrd_en_ana_en_bbcdr_vco_i : 1; + u32 ovrd_en_ana_bbcdr_osc_range_sel_i : 1; + u32 ovrd_en_ana_sel_vga_gain_byp_i : 1; + u32 ovrd_en_ana_vga2_gain_cstm_i : 1; + }; + u32 reg; +} E56G__RXS3_ANA_OVRDEN_0; + +#define E56G__RXS3_ANA_OVRDEN_0_NUM 1 +#define E56G__RXS3_ANA_OVRDEN_0_ADDR (E56G__BASEADDR + 0x68c) + +typedef union { + struct { + u32 ana_ctle_cz_cstm_i : 5; + u32 rsvd0 : 3; + u32 ana_ctle_cload_cstm_i : 5; + u32 rsvd1 : 3; + u32 ana_test_ctle_i : 2; + u32 rsvd2 : 2; + u32 ana_lfeq_ctrl_cstm_i : 4; + u32 ana_test_ctlecdr_i : 2; + u32 rsvd3 : 2; + u32 ana_vga_cload_in_cstm_i : 3; + u32 rsvd4 : 1; + }; + u32 reg; +} E56G__RXS0_ANA_OVRDVAL_3; + +#define E56G__RXS0_ANA_OVRDVAL_3_NUM 1 +#define E56G__RXS0_ANA_OVRDVAL_3_ADDR (E56G__BASEADDR + 0xac) + +typedef union { + struct { + u32 ana_ctle_cz_cstm_i : 5; + u32 rsvd0 : 3; + u32 ana_ctle_cload_cstm_i : 5; + u32 rsvd1 : 3; + u32 ana_test_ctle_i : 2; + u32 rsvd2 : 2; + u32 ana_lfeq_ctrl_cstm_i : 4; + u32 ana_test_ctlecdr_i : 2; + u32 rsvd3 : 2; + u32 ana_vga_cload_in_cstm_i : 3; + u32 rsvd4 : 1; + }; + u32 reg; +} E56G__RXS1_ANA_OVRDVAL_3; + +#define E56G__RXS1_ANA_OVRDVAL_3_ADDR (E56G__BASEADDR + 0x2ac) + +typedef union { + struct { + u32 ana_ctle_cz_cstm_i : 5; + u32 rsvd0 : 3; + u32 ana_ctle_cload_cstm_i : 5; + u32 rsvd1 : 3; + u32 ana_test_ctle_i : 2; + u32 rsvd2 : 2; + u32 ana_lfeq_ctrl_cstm_i : 4; + u32 ana_test_ctlecdr_i : 2; + u32 rsvd3 : 2; + u32 ana_vga_cload_in_cstm_i : 3; + u32 rsvd4 : 1; + }; + u32 reg; +} E56G__RXS2_ANA_OVRDVAL_3; + +#define E56G__RXS2_ANA_OVRDVAL_3_ADDR (E56G__BASEADDR + 0x4ac) + +typedef union { + struct { + u32 ana_ctle_cz_cstm_i : 5; + u32 rsvd0 : 3; + u32 ana_ctle_cload_cstm_i : 5; + u32 rsvd1 : 3; + u32 ana_test_ctle_i : 2; + u32 rsvd2 : 2; + u32 ana_lfeq_ctrl_cstm_i : 4; + u32 ana_test_ctlecdr_i : 2; + u32 rsvd3 : 2; + u32 ana_vga_cload_in_cstm_i : 3; + u32 rsvd4 : 1; + }; + u32 reg; +} E56G__RXS3_ANA_OVRDVAL_3; + +#define E56G__RXS3_ANA_OVRDVAL_3_ADDR (E56G__BASEADDR + 0x6ac) + +typedef union { + struct { + u32 ovrd_en_rxs0_rx0_rstn_i : 1; + u32 ovrd_en_rxs0_rx0_bitclk_divctrl_i : 1; + u32 ovrd_en_rxs0_rx0_bitclk_rate_i : 1; + u32 ovrd_en_rxs0_rx0_symdata_width_i : 1; + u32 ovrd_en_rxs0_rx0_symdata_o : 1; + u32 ovrd_en_rxs0_rx0_precode_en_i : 1; + u32 ovrd_en_rxs0_rx0_signal_type_i : 1; + u32 ovrd_en_rxs0_rx0_sync_detect_en_i : 1; + u32 ovrd_en_rxs0_rx0_sync_o : 1; + u32 ovrd_en_rxs0_rx0_rate_select_i : 1; + u32 ovrd_en_rxs0_rx0_rterm_en_i : 1; + u32 ovrd_en_rxs0_rx0_bias_en_i : 1; + u32 ovrd_en_rxs0_rx0_ldo_en_i : 1; + u32 ovrd_en_rxs0_rx0_ldo_rdy_i : 1; + u32 ovrd_en_rxs0_rx0_blwc_en_i : 1; + u32 ovrd_en_rxs0_rx0_ctle_en_i : 1; + u32 ovrd_en_rxs0_rx0_vga_en_i : 1; + u32 ovrd_en_rxs0_rx0_osc_sel_i : 1; + u32 ovrd_en_rxs0_rx0_osc_en_i : 1; + u32 ovrd_en_rxs0_rx0_clkgencdr_en_i : 1; + u32 ovrd_en_rxs0_rx0_ctlecdr_en_i : 1; + u32 ovrd_en_rxs0_rx0_samp_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_en_i : 1; + u32 ovrd_en_rxs0_rx0_osc_cal_en_i : 1; + u32 ovrd_en_rxs0_rx0_osc_cal_done_o : 1; + u32 ovrd_en_rxs0_rx0_osc_freq_error_o : 1; + u32 ovrd_en_rxs0_rx0_samp_cal_en_i : 1; + u32 ovrd_en_rxs0_rx0_samp_cal_done_o : 1; + u32 ovrd_en_rxs0_rx0_samp_cal_err_o : 1; + u32 ovrd_en_rxs0_rx0_adc_ofst_cal_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_ofst_cal_done_o : 1; + u32 ovrd_en_rxs0_rx0_adc_ofst_cal_error_o : 1; + }; + u32 reg; +} E56G__PMD_RXS0_OVRDEN_0; + +#define E56G__PMD_RXS0_OVRDEN_0_NUM 1 +#define E56G__PMD_RXS0_OVRDEN_0_ADDR (E56G__BASEADDR + 0x1530) + +typedef union { + struct { + u32 ber_en : 1; + u32 rsvd0 : 3; + u32 read_mode_en : 1; + u32 rsvd1 : 3; + u32 err_cnt_mode_all0_one1 : 1; + u32 rsvd2 : 3; + u32 init_lfsr_mode_continue0_restart1 : 1; + u32 rsvd3 : 3; + u32 pattern_sel : 4; + u32 rsvd4 : 12; + }; + u32 reg; +} E56G__RXS0_DFT_1; + +#define E56G__RXS0_DFT_1_NUM 1 +#define E56G__RXS0_DFT_1_ADDR (E56G__BASEADDR + 0xec) + +typedef union { + struct { + u32 ovrd_en_rxs0_rx0_adc_gain_cal_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_gain_cal_done_o : 1; + u32 ovrd_en_rxs0_rx0_adc_gain_cal_error_o : 1; + u32 ovrd_en_rxs0_rx0_fe_ofst_cal_en_i : 1; + u32 ovrd_en_rxs0_rx0_fe_ofst_cal_done_o : 1; + u32 ovrd_en_rxs0_rx0_fe_ofst_cal_error_o : 1; + u32 ovrd_en_rxs0_rx0_fom_en_i : 1; + u32 ovrd_en_rxs0_rx0_idle_detect_en_i : 1; + u32 ovrd_en_rxs0_rx0_idle_o : 1; + u32 ovrd_en_rxs0_rx0_txffe_train_en_i : 1; + u32 ovrd_en_rxs0_rx0_txffe_coeff_rst_i : 1; + u32 ovrd_en_rxs0_rx0_txffe_train_enack_o : 1; + u32 ovrd_en_rxs0_rx0_txffe_train_done_o : 1; + u32 ovrd_en_rxs0_rx0_txffe_coeff_change_o : 1; + u32 ovrd_en_rxs0_rx0_vga_train_en_i : 1; + u32 ovrd_en_rxs0_rx0_vga_train_done_o : 1; + u32 ovrd_en_rxs0_rx0_ctle_train_en_i : 1; + u32 ovrd_en_rxs0_rx0_ctle_train_done_o : 1; + u32 ovrd_en_rxs0_rx0_cdr_en_i : 1; + u32 ovrd_en_rxs0_rx0_cdr_rdy_o : 1; + u32 ovrd_en_rxs0_rx0_ffe_train_en_i : 1; + u32 ovrd_en_rxs0_rx0_ffe_train_done_o : 1; + u32 ovrd_en_rxs0_rx0_mmpd_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_intl_cal_en_i : 1; + u32 ovrd_en_rxs0_rx0_adc_intl_cal_done_o : 1; + u32 ovrd_en_rxs0_rx0_adc_intl_cal_error_o : 1; + u32 ovrd_en_rxs0_rx0_dfe_train_en_i : 1; + u32 ovrd_en_rxs0_rx0_dfe_train_done_o : 1; + u32 ovrd_en_rxs0_rx0_vga_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_vga_adapt_done_o : 1; + u32 ovrd_en_rxs0_rx0_ctle_adapt_en_i : 1; + u32 ovrd_en_rxs0_rx0_ctle_adapt_done_o : 1; + }; + u32 reg; +} E56G__PMD_RXS0_OVRDEN_1; + +#define E56G__PMD_RXS0_OVRDEN_1_NUM 1 +#define E56G__PMD_RXS0_OVRDEN_1_ADDR (E56G__BASEADDR + 0x1534) + +typedef union { + struct { + u32 ovrd_en_rxs0_rx0_sparein_i : 8; + u32 ovrd_en_rxs0_rx0_spareout_o : 8; + u32 rsvd0 : 16; + }; + u32 reg; +} E56G__PMD_RXS0_OVRDEN_3; + +#define E56G__PMD_RXS0_OVRDEN_3_NUM 1 +#define E56G__PMD_RXS0_OVRDEN_3_ADDR (E56G__BASEADDR + 0x153c) + +typedef union { + struct { + u32 vco_code_cont_adj_done_ovrd_en : 1; + u32 dfe_coeffl_ovrd_en : 1; + u32 dfe_coeffh_ovrd_en : 1; + u32 rsvd0 : 1; + u32 top_comp_th_ovrd_en : 1; + u32 mid_comp_th_ovrd_en : 1; + u32 bot_comp_th_ovrd_en : 1; + u32 rsvd1 : 1; + u32 level_target_ovrd_en : 4; + u32 ffe_coeff_c0to3_ovrd_en : 4; + u32 ffe_coeff_c4to7_ovrd_en : 4; + u32 ffe_coeff_c8to11_ovrd_en : 4; + u32 ffe_coeff_c12to15_ovrd_en : 4; + u32 ffe_coeff_update_ovrd_en : 1; + u32 rsvd2 : 3; + }; + u32 reg; +} E56G__RXS0_DIG_OVRDEN_1; + +#define E56G__RXS0_DIG_OVRDEN_1_NUM 1 +#define E56G__RXS0_DIG_OVRDEN_1_ADDR (E56G__BASEADDR + 0x160) + +typedef union { + struct { + u32 ovrd_en_rxs1_rx0_adc_gain_cal_en_i : 1; + u32 ovrd_en_rxs1_rx0_adc_gain_cal_done_o : 1; + u32 ovrd_en_rxs1_rx0_adc_gain_cal_error_o : 1; + u32 ovrd_en_rxs1_rx0_fe_ofst_cal_en_i : 1; + u32 ovrd_en_rxs1_rx0_fe_ofst_cal_done_o : 1; + u32 ovrd_en_rxs1_rx0_fe_ofst_cal_error_o : 1; + u32 ovrd_en_rxs1_rx0_fom_en_i : 1; + u32 ovrd_en_rxs1_rx0_idle_detect_en_i : 1; + u32 ovrd_en_rxs1_rx0_idle_o : 1; + u32 ovrd_en_rxs1_rx0_txffe_train_en_i : 1; + u32 ovrd_en_rxs1_rx0_txffe_coeff_rst_i : 1; + u32 ovrd_en_rxs1_rx0_txffe_train_enack_o : 1; + u32 ovrd_en_rxs1_rx0_txffe_train_done_o : 1; + u32 ovrd_en_rxs1_rx0_txffe_coeff_change_o : 1; + u32 ovrd_en_rxs1_rx0_vga_train_en_i : 1; + u32 ovrd_en_rxs1_rx0_vga_train_done_o : 1; + u32 ovrd_en_rxs1_rx0_ctle_train_en_i : 1; + u32 ovrd_en_rxs1_rx0_ctle_train_done_o : 1; + u32 ovrd_en_rxs1_rx0_cdr_en_i : 1; + u32 ovrd_en_rxs1_rx0_cdr_rdy_o : 1; + u32 ovrd_en_rxs1_rx0_ffe_train_en_i : 1; + u32 ovrd_en_rxs1_rx0_ffe_train_done_o : 1; + u32 ovrd_en_rxs1_rx0_mmpd_en_i : 1; + u32 ovrd_en_rxs1_rx0_adc_intl_cal_en_i : 1; + u32 ovrd_en_rxs1_rx0_adc_intl_cal_done_o : 1; + u32 ovrd_en_rxs1_rx0_adc_intl_cal_error_o : 1; + u32 ovrd_en_rxs1_rx0_dfe_train_en_i : 1; + u32 ovrd_en_rxs1_rx0_dfe_train_done_o : 1; + u32 ovrd_en_rxs1_rx0_vga_adapt_en_i : 1; + u32 ovrd_en_rxs1_rx0_vga_adapt_done_o : 1; + u32 ovrd_en_rxs1_rx0_ctle_adapt_en_i : 1; + u32 ovrd_en_rxs1_rx0_ctle_adapt_done_o : 1; + }; + u32 reg; +} E56G__PMD_RXS1_OVRDEN_1; + +#define E56G__PMD_RXS1_OVRDEN_1_ADDR (E56G__BASEADDR + 0x1560) + +typedef union { + struct { + u32 ovrd_en_rxs2_rx0_adc_gain_cal_en_i : 1; + u32 ovrd_en_rxs2_rx0_adc_gain_cal_done_o : 1; + u32 ovrd_en_rxs2_rx0_adc_gain_cal_error_o : 1; + u32 ovrd_en_rxs2_rx0_fe_ofst_cal_en_i : 1; + u32 ovrd_en_rxs2_rx0_fe_ofst_cal_done_o : 1; + u32 ovrd_en_rxs2_rx0_fe_ofst_cal_error_o : 1; + u32 ovrd_en_rxs2_rx0_fom_en_i : 1; + u32 ovrd_en_rxs2_rx0_idle_detect_en_i : 1; + u32 ovrd_en_rxs2_rx0_idle_o : 1; + u32 ovrd_en_rxs2_rx0_txffe_train_en_i : 1; + u32 ovrd_en_rxs2_rx0_txffe_coeff_rst_i : 1; + u32 ovrd_en_rxs2_rx0_txffe_train_enack_o : 1; + u32 ovrd_en_rxs2_rx0_txffe_train_done_o : 1; + u32 ovrd_en_rxs2_rx0_txffe_coeff_change_o : 1; + u32 ovrd_en_rxs2_rx0_vga_train_en_i : 1; + u32 ovrd_en_rxs2_rx0_vga_train_done_o : 1; + u32 ovrd_en_rxs2_rx0_ctle_train_en_i : 1; + u32 ovrd_en_rxs2_rx0_ctle_train_done_o : 1; + u32 ovrd_en_rxs2_rx0_cdr_en_i : 1; + u32 ovrd_en_rxs2_rx0_cdr_rdy_o : 1; + u32 ovrd_en_rxs2_rx0_ffe_train_en_i : 1; + u32 ovrd_en_rxs2_rx0_ffe_train_done_o : 1; + u32 ovrd_en_rxs2_rx0_mmpd_en_i : 1; + u32 ovrd_en_rxs2_rx0_adc_intl_cal_en_i : 1; + u32 ovrd_en_rxs2_rx0_adc_intl_cal_done_o : 1; + u32 ovrd_en_rxs2_rx0_adc_intl_cal_error_o : 1; + u32 ovrd_en_rxs2_rx0_dfe_train_en_i : 1; + u32 ovrd_en_rxs2_rx0_dfe_train_done_o : 1; + u32 ovrd_en_rxs2_rx0_vga_adapt_en_i : 1; + u32 ovrd_en_rxs2_rx0_vga_adapt_done_o : 1; + u32 ovrd_en_rxs2_rx0_ctle_adapt_en_i : 1; + u32 ovrd_en_rxs2_rx0_ctle_adapt_done_o : 1; + }; + u32 reg; +} E56G__PMD_RXS2_OVRDEN_1; + +#define E56G__PMD_RXS2_OVRDEN_1_ADDR (E56G__BASEADDR + 0x158c) + +typedef union { + struct { + u32 ovrd_en_rxs3_rx0_adc_gain_cal_en_i : 1; + u32 ovrd_en_rxs3_rx0_adc_gain_cal_done_o : 1; + u32 ovrd_en_rxs3_rx0_adc_gain_cal_error_o : 1; + u32 ovrd_en_rxs3_rx0_fe_ofst_cal_en_i : 1; + u32 ovrd_en_rxs3_rx0_fe_ofst_cal_done_o : 1; + u32 ovrd_en_rxs3_rx0_fe_ofst_cal_error_o : 1; + u32 ovrd_en_rxs3_rx0_fom_en_i : 1; + u32 ovrd_en_rxs3_rx0_idle_detect_en_i : 1; + u32 ovrd_en_rxs3_rx0_idle_o : 1; + u32 ovrd_en_rxs3_rx0_txffe_train_en_i : 1; + u32 ovrd_en_rxs3_rx0_txffe_coeff_rst_i : 1; + u32 ovrd_en_rxs3_rx0_txffe_train_enack_o : 1; + u32 ovrd_en_rxs3_rx0_txffe_train_done_o : 1; + u32 ovrd_en_rxs3_rx0_txffe_coeff_change_o : 1; + u32 ovrd_en_rxs3_rx0_vga_train_en_i : 1; + u32 ovrd_en_rxs3_rx0_vga_train_done_o : 1; + u32 ovrd_en_rxs3_rx0_ctle_train_en_i : 1; + u32 ovrd_en_rxs3_rx0_ctle_train_done_o : 1; + u32 ovrd_en_rxs3_rx0_cdr_en_i : 1; + u32 ovrd_en_rxs3_rx0_cdr_rdy_o : 1; + u32 ovrd_en_rxs3_rx0_ffe_train_en_i : 1; + u32 ovrd_en_rxs3_rx0_ffe_train_done_o : 1; + u32 ovrd_en_rxs3_rx0_mmpd_en_i : 1; + u32 ovrd_en_rxs3_rx0_adc_intl_cal_en_i : 1; + u32 ovrd_en_rxs3_rx0_adc_intl_cal_done_o : 1; + u32 ovrd_en_rxs3_rx0_adc_intl_cal_error_o : 1; + u32 ovrd_en_rxs3_rx0_dfe_train_en_i : 1; + u32 ovrd_en_rxs3_rx0_dfe_train_done_o : 1; + u32 ovrd_en_rxs3_rx0_vga_adapt_en_i : 1; + u32 ovrd_en_rxs3_rx0_vga_adapt_done_o : 1; + u32 ovrd_en_rxs3_rx0_ctle_adapt_en_i : 1; + u32 ovrd_en_rxs3_rx0_ctle_adapt_done_o : 1; + }; + u32 reg; +} E56G__PMD_RXS3_OVRDEN_1; + +#define E56G__PMD_RXS3_OVRDEN_1_ADDR (E56G__BASEADDR + 0x15b8) + +#define E56G__RXS0_FOM_18__ADDR (E56G__BASEADDR + 0x1f8) +#define E56G__RXS0_FOM_18__DFE_COEFFL_HINT__MSB 11 +#define E56G__RXS0_FOM_18__DFE_COEFFL_HINT__LSB 0 +#define E56G__RXS0_FOM_18__DFE_COEFFH_HINT__MSB 23 +#define E56G__RXS0_FOM_18__DFE_COEFFH_HINT__LSB 12 +#define E56G__RXS0_FOM_18__DFE_COEFF_HINT_LOAD__MSB 25 +#define E56G__RXS0_FOM_18__DFE_COEFF_HINT_LOAD__LSB 25 + +#define DEFAULT_TEMP 40 +#define HIGH_TEMP 70 + +#define E56PHY_RX_RDY_ST 0x1B + +#define S10G_CMVAR_RANGE_H 0x3 +#define S10G_CMVAR_RANGE_L 0x2 +#define S25G_CMVAR_RANGE_H 0x1 +#define S25G_CMVAR_RANGE_L 0x0 + +#define S25G_CMVAR_RANGE_H 0x1 +#define S25G_CMVAR_RANGE_L 0x0 +#define S25G_CMVAR_SEC_LOW_TH 0x1A +#define S25G_CMVAR_SEC_HIGH_TH 0x1D +#define S25G_CMVAR_UFINE_MAX 0x2 +#define S25G_CMVAR_FINE_MAX 0x7 +#define S25G_CMVAR_COARSE_MAX 0xF +#define S25G_CMVAR_UFINE_UMAX_WRAP 0x0 +#define S25G_CMVAR_UFINE_FMAX_WRAP 0x0 +#define S25G_CMVAR_FINE_FMAX_WRAP 0x2 +#define S25G_CMVAR_UFINE_MIN 0x0 +#define S25G_CMVAR_FINE_MIN 0x0 +#define S25G_CMVAR_COARSE_MIN 0x1 +#define S25G_CMVAR_UFINE_UMIN_WRAP 0x2 +#define S25G_CMVAR_UFINE_FMIN_WRAP 0x2 +#define S25G_CMVAR_FINE_FMIN_WRAP 0x5 + +#define S10G_CMVAR_RANGE_H 0x3 +#define S10G_CMVAR_RANGE_L 0x2 +#define S10G_CMVAR_SEC_LOW_TH 0x1A +#define S10G_CMVAR_SEC_HIGH_TH 0x1D +#define S10G_CMVAR_UFINE_MAX 0x7 +#define S10G_CMVAR_FINE_MAX 0x7 +#define S10G_CMVAR_COARSE_MAX 0xF +#define S10G_CMVAR_UFINE_UMAX_WRAP 0x6 +#define S10G_CMVAR_UFINE_FMAX_WRAP 0x7 +#define S10G_CMVAR_FINE_FMAX_WRAP 0x1 +#define S10G_CMVAR_UFINE_MIN 0x0 +#define S10G_CMVAR_FINE_MIN 0x0 +#define S10G_CMVAR_COARSE_MIN 0x1 +#define S10G_CMVAR_UFINE_UMIN_WRAP 0x2 +#define S10G_CMVAR_UFINE_FMIN_WRAP 0x2 +#define S10G_CMVAR_FINE_FMIN_WRAP 0x5 + +#define S10G_TX_FFE_CFG_MAIN 0x2c2c2c2c +#define S10G_TX_FFE_CFG_PRE1 0x0 +#define S10G_TX_FFE_CFG_PRE2 0x0 +#define S10G_TX_FFE_CFG_POST 0x6060606 +#define S25G_TX_FFE_CFG_MAIN 49 +#define S25G_TX_FFE_CFG_PRE1 4 +#define S25G_TX_FFE_CFG_PRE2 1 +#define S25G_TX_FFE_CFG_POST 9 + +/* for dac test*/ +#define S25G_TX_FFE_CFG_DAC_MAIN 0x2a +#define S25G_TX_FFE_CFG_DAC_PRE1 0x3 +#define S25G_TX_FFE_CFG_DAC_PRE2 0x0 +#define S25G_TX_FFE_CFG_DAC_POST 0x11 + +#define BYPASS_CTLE_TAG 0x0 + +#define S10G_PHY_RX_CTLE_TAPWT_WEIGHT1 0x1 +#define S10G_PHY_RX_CTLE_TAPWT_WEIGHT2 0x0 +#define S10G_PHY_RX_CTLE_TAPWT_WEIGHT3 0x0 +#define S10G_PHY_RX_CTLE_TAP_FRACP1 0x18 +#define S10G_PHY_RX_CTLE_TAP_FRACP2 0x0 +#define S10G_PHY_RX_CTLE_TAP_FRACP3 0x0 + +#define S25G_PHY_RX_CTLE_TAPWT_WEIGHT1 0x1 +#define S25G_PHY_RX_CTLE_TAPWT_WEIGHT2 0x0 +#define S25G_PHY_RX_CTLE_TAPWT_WEIGHT3 0x0 +#define S25G_PHY_RX_CTLE_TAP_FRACP1 0x18 +#define S25G_PHY_RX_CTLE_TAP_FRACP2 0x0 +#define S25G_PHY_RX_CTLE_TAP_FRACP3 0x0 + +#define TXGBE_E56_PHY_LINK_UP 0x4 + +#define __bf_shf_m(x) (ffs(x) - 1) + +#define FIELD_PREP_M(_mask, _val) \ + ({ \ + ((typeof(_mask))(_val) << __bf_shf_m(_mask)) & (_mask); \ + }) + +/* FIELD_GET_M() - extract a bitfield element + * @_mask: shifted mask defining the field's length and position + * @_reg: value of entire bitfield + * + * FIELD_GET_M() extracts the field specified by @_mask from the + * bitfield passed in as @_reg by masking and shifting it down. + */ +#define FIELD_GET_M(_mask, _reg) \ + ({ \ + (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf_m(_mask)); \ + }) + +void txgbe_field_set(u32 *psrcdata, u32 bithigh, u32 bitlow, u32 setvalue); +int txgbe_e56_rxrd_sec_code(struct txgbe_hw *hw, int *SECOND_CODE); +u32 txgbe_e56_cfg_25g(struct txgbe_hw *hw); +u32 txgbe_e56_cfg_10g(struct txgbe_hw *hw); +u32 txgbe_e56_cfg_40g(struct txgbe_hw *hw); + +int txgbe_set_link_to_amlite(struct txgbe_hw *hw, u32 speed); +u32 txgbe_e56_cfg_temp(struct txgbe_hw *hw); +int txgbe_e56_get_temp(struct txgbe_hw *hw, int *temp); +int txgbe_e56_reconfig_rx(struct txgbe_hw *hw, u32 speed); +int txgbe_e56_config_rx_40G(struct txgbe_hw *hw, u32 speed); +int txgbe_temp_track_seq(struct txgbe_hw *hw, u32 speed); +int txgbe_temp_track_seq_40g(struct txgbe_hw *hw, u32 speed); +int txgbe_get_cur_fec_mode(struct txgbe_hw *hw); +int txgbe_e56_set_fec_mode(struct txgbe_hw *hw, u8 fec_mode); +int txgbe_e56_fec_mode_polling(struct txgbe_hw *hw, bool *link_up); +s32 txgbe_e56_check_phy_link(struct txgbe_hw *hw, u32 *speed, bool *link_up); + +#endif /* _TXGBE_E56_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_e56_bp.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56_bp.c new file mode 100644 index 000000000000..49678eb07ef7 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56_bp.c @@ -0,0 +1,2791 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "txgbe_e56.h" +#include "txgbe_hw.h" + +#include "txgbe.h" +#include "txgbe_type.h" +#include "txgbe_e56_bp.h" +#include "txgbe_bp.h" + +static int txgbe_e56_set_rxs_ufine_lemax(struct txgbe_adapter *adapter, u32 speed) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rdata, addr; + u32 ULTRAFINE_CODE[4] = { 0 }; + int lane_num = 0, lane_idx = 0; + u32 CMVAR_UFINE_MAX = 0; + + switch (speed) { + case 10: + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX; + lane_num = 1; + break; + case 40: + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX; + lane_num = 4; + break; + case 25: + CMVAR_UFINE_MAX = S25G_CMVAR_UFINE_MAX; + lane_num = 1; + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid speed\n", __func__, __LINE__); + break; + } + + for (lane_idx = 0; lane_idx < lane_num; lane_idx++) { + /* ii get rx ana_bbcdr_ultrafine_i[14, 12] per lane */ + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + + (E56PHY_RXS_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + ULTRAFINE_CODE[lane_idx] = FIELD_GET_M(GENMASK(14, 12), rdata); + kr_dbg(KR_MODE, + "ULTRAFINE_CODE[%d] = %d, CMVAR_UFINE_MAX: %x\n", + lane_idx, ULTRAFINE_CODE[lane_idx], CMVAR_UFINE_MAX); + } + + for (lane_idx = 0; lane_idx < lane_num; lane_idx++) { + //b. Perform the below logic sequence + while (ULTRAFINE_CODE[lane_idx] > CMVAR_UFINE_MAX) { + ULTRAFINE_CODE[lane_idx] -= 1; + addr = E56G__RXS0_ANA_OVRDVAL_5_ADDR + + (E56PHY_RXS_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 14, 12, ULTRAFINE_CODE[lane_idx]); + txgbe_wr32_ephy(hw, addr, rdata); + + /* ovrd_en_ana_bbcdr_ultrafine=1 override ASIC value */ + addr = E56G__RXS0_ANA_OVRDEN_1_ADDR + + (E56PHY_RXS_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_wr32_ephy(hw, addr, rdata | BIT(3)); + + // Wait until 1milliseconds or greater + usec_delay(1000); + } + } + + return 0; +} + +static int txgbe_e56_rxs_oscinit_temp_track(struct txgbe_adapter *adapter, + u32 speed) +{ + int OFFSET_CENTRE_RANGE_H[4] = { 0 }, OFFSET_CENTRE_RANGE_L[4] = {}, + RANGE_FINAL[4] = {}; + int RX_COARSE_MID_TD, CMVAR_RANGE_H = 0, CMVAR_RANGE_L = 0; + struct txgbe_hw *hw = &adapter->hw; + int status = 0, lane_num = 0; + int T = 40, lane_id = 0; + u32 addr, rdata; + + /* Set CMVAR_RANGE_H/L based on the link speed mode */ + switch (speed) { + case 10: + CMVAR_RANGE_H = S10G_CMVAR_RANGE_H; + CMVAR_RANGE_L = S10G_CMVAR_RANGE_L; + lane_num = 1; + break; + case 40: + CMVAR_RANGE_H = S10G_CMVAR_RANGE_H; + CMVAR_RANGE_L = S10G_CMVAR_RANGE_L; + lane_num = 4; + break; + case 25: + CMVAR_RANGE_H = S25G_CMVAR_RANGE_H; + CMVAR_RANGE_L = S25G_CMVAR_RANGE_L; + lane_num = 1; + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid speed\n", __func__, __LINE__); + break; + } + + /* 1. Read the temperature T just before RXS is enabled. */ + txgbe_e56_get_temp(hw, &T); + + /* 2. Define software variable RX_COARSE_MID_TD */ + if (T < -5) + RX_COARSE_MID_TD = 10; + else if (T < 30) + RX_COARSE_MID_TD = 9; + else if (T < 65) + RX_COARSE_MID_TD = 8; + else if (T < 100) + RX_COARSE_MID_TD = 7; + else + RX_COARSE_MID_TD = 6; + + for (lane_id = 0; lane_id < lane_num; lane_id++) { + addr = 0x0b4 + (0x200 * lane_id); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 1, 0, CMVAR_RANGE_H); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x08c + (0x200 * lane_id); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 29, 29, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1540 + (0x02c * lane_id); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 22, 22, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1530 + (0x02c * lane_id); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 27, 27, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + } + rdata = rd32_ephy(hw, 0x1400); + txgbe_field_set(&rdata, 19, 16, GENMASK(lane_num - 1, 0)); + txgbe_wr32_ephy(hw, 0x1400, rdata); + status |= read_poll_timeout(rd32_ephy, rdata, + (((rdata & 0x3f3f3f3f) & GENMASK(8 * lane_num - 1, 0)) == + (0x09090909 & GENMASK(8 * lane_num - 1, 0))), + 100, 200000, false, hw, E56PHY_CTRL_FSM_RX_STAT_0_ADDR); + if (status) + kr_dbg(KR_MODE, "Wait fsm_rx_sts 1 = %x : %d, Wait rx_sts %s.\n", + rdata, status, status ? "FAILED" : "SUCCESS"); + + for (lane_id = 0; lane_id < lane_num; lane_id++) { + addr = 0x0b4 + (0x0200 * lane_id); + rdata = rd32_ephy(hw, addr); + OFFSET_CENTRE_RANGE_H[lane_id] = (rdata >> 4) & 0xf; + if (OFFSET_CENTRE_RANGE_H[lane_id] > RX_COARSE_MID_TD) + OFFSET_CENTRE_RANGE_H[lane_id] = + OFFSET_CENTRE_RANGE_H[lane_id] - + RX_COARSE_MID_TD; + else + OFFSET_CENTRE_RANGE_H[lane_id] = + RX_COARSE_MID_TD - + OFFSET_CENTRE_RANGE_H[lane_id]; + } + + //7. Do SEQ::RX_DISABLE to disable RXS. + rdata = rd32_ephy(hw, 0x1400); + txgbe_field_set(&rdata, 19, 16, 0x0); + txgbe_wr32_ephy(hw, 0x1400, rdata); + status |= read_poll_timeout(rd32_ephy, rdata, + (((rdata & 0x3f3f3f3f) & GENMASK(8 * lane_num - 1, 0)) == + (0x21212121 & GENMASK(8 * lane_num - 1, 0))), + 100, 200000, false, hw, E56PHY_CTRL_FSM_RX_STAT_0_ADDR); + if (status) + kr_dbg(KR_MODE, "Wait fsm_rx_sts 2 = %x : %d, Wait rx_sts %s.\n", + rdata, status, status ? "FAILED" : "SUCCESS"); + rdata = rd32_ephy(hw, 0x15ec); + txgbe_wr32_ephy(hw, 0x15ec, rdata); + + for (lane_id = 0; lane_id < lane_num; lane_id++) { + addr = 0x0b4 + (0x200 * lane_id); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 1, 0, CMVAR_RANGE_L); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x08c + (0x200 * lane_id); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 29, 29, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1540 + (0x02c * lane_id); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 22, 22, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1530 + (0x02c * lane_id); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 27, 27, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + } + rdata = rd32_ephy(hw, 0x1400); + txgbe_field_set(&rdata, 19, 16, 0xf); + txgbe_wr32_ephy(hw, 0x1400, rdata); + status |= read_poll_timeout(rd32_ephy, rdata, + (((rdata & 0x3f3f3f3f) & GENMASK(8 * lane_num - 1, 0)) == + (0x09090909 & GENMASK(8 * lane_num - 1, 0))), + 100, 200000, false, hw, E56PHY_CTRL_FSM_RX_STAT_0_ADDR); + if (status) + kr_dbg(KR_MODE, "Wait fsm_rx_sts 3 = %x : %d, Wait rx_sts %s.\n", + rdata, status, status ? "FAILED" : "SUCCESS"); + + for (lane_id = 0; lane_id < lane_num; lane_id++) { + addr = 0x0b4 + (0x0200 * lane_id); + rdata = rd32_ephy(hw, addr); + OFFSET_CENTRE_RANGE_L[lane_id] = (rdata >> 4) & 0xf; + if (OFFSET_CENTRE_RANGE_L[lane_id] > RX_COARSE_MID_TD) + OFFSET_CENTRE_RANGE_L[lane_id] = + OFFSET_CENTRE_RANGE_L[lane_id] - + RX_COARSE_MID_TD; + else + OFFSET_CENTRE_RANGE_L[lane_id] = + RX_COARSE_MID_TD - + OFFSET_CENTRE_RANGE_L[lane_id]; + } + + for (lane_id = 0; lane_id < lane_num; lane_id++) { + RANGE_FINAL[lane_id] = + OFFSET_CENTRE_RANGE_L[lane_id] < + OFFSET_CENTRE_RANGE_H[lane_id] ? + CMVAR_RANGE_L : + CMVAR_RANGE_H; + kr_dbg(KR_MODE, + "lane_id:%d-RANGE_L:%x-RANGE_H:%x-RANGE_FINAL:%x\n", + lane_id, OFFSET_CENTRE_RANGE_L[lane_id], + OFFSET_CENTRE_RANGE_H[lane_id], RANGE_FINAL[lane_id]); + } + + //7. Do SEQ::RX_DISABLE to disable RXS. + rdata = rd32_ephy(hw, 0x1400); + txgbe_field_set(&rdata, 19, 16, 0x0); + txgbe_wr32_ephy(hw, 0x1400, rdata); + status |= read_poll_timeout(rd32_ephy, rdata, + (((rdata & 0x3f3f3f3f) & GENMASK(8 * lane_num - 1, 0)) == + (0x21212121 & GENMASK(8 * lane_num - 1, 0))), + 100, 200000, false, hw, E56PHY_CTRL_FSM_RX_STAT_0_ADDR); + if (status) + kr_dbg(KR_MODE, "Wait fsm_rx_sts 4 = %x : %d, Wait rx_sts %s.\n", + rdata, status, status ? "FAILED" : "SUCCESS"); + rdata = rd32_ephy(hw, 0x15ec); + txgbe_wr32_ephy(hw, 0x15ec, rdata); + + for (lane_id = 0; lane_id < lane_num; lane_id++) { + addr = 0x0b4 + (0x0200 * lane_id); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 1, 0, RANGE_FINAL[lane_id]); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = 0x1544 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 25, 25, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1538 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 0, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 28, 28, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1538 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 3, 3, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = 0x1544 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 16, 16, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1534 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 23, 23, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 17, 17, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1534 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 24, 24, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 31, 31, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1538 + (lane_id * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 6, 6, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1530 + (0x02c * lane_id); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 27, 27, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + } + + //Do SEQ::RX_ENABLE + rdata = rd32_ephy(hw, 0x1400); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, GENMASK(lane_num - 1, 0)); + txgbe_wr32_ephy(hw, 0x1400, rdata); + + return status; +} + +static int txgbe_e56_rxs_post_cdr_lock_temp_track_seq(struct txgbe_adapter *adapter, + u32 speed) +{ + struct txgbe_hw *hw = &adapter->hw; + + int status = 0; + u32 rdata; + int SECOND_CODE; + int COARSE_CODE; + int FINE_CODE; + int ULTRAFINE_CODE; + + int CMVAR_SEC_LOW_TH; + int CMVAR_UFINE_MAX = 0; + int CMVAR_FINE_MAX; + int CMVAR_UFINE_UMAX_WRAP = 0; + int CMVAR_COARSE_MAX; + int CMVAR_UFINE_FMAX_WRAP = 0; + int CMVAR_FINE_FMAX_WRAP = 0; + int CMVAR_SEC_HIGH_TH; + int CMVAR_UFINE_MIN; + int CMVAR_FINE_MIN; + int CMVAR_UFINE_UMIN_WRAP; + int CMVAR_COARSE_MIN; + int CMVAR_UFINE_FMIN_WRAP; + int CMVAR_FINE_FMIN_WRAP; + + if (speed == 10) { + CMVAR_SEC_LOW_TH = S10G_CMVAR_SEC_LOW_TH; + CMVAR_UFINE_MAX = S10G_CMVAR_UFINE_MAX; + CMVAR_FINE_MAX = S10G_CMVAR_FINE_MAX; + CMVAR_UFINE_UMAX_WRAP = S10G_CMVAR_UFINE_UMAX_WRAP; + CMVAR_COARSE_MAX = S10G_CMVAR_COARSE_MAX; + CMVAR_UFINE_FMAX_WRAP = S10G_CMVAR_UFINE_FMAX_WRAP; + CMVAR_FINE_FMAX_WRAP = S10G_CMVAR_FINE_FMAX_WRAP; + CMVAR_SEC_HIGH_TH = S10G_CMVAR_SEC_HIGH_TH; + CMVAR_UFINE_MIN = S10G_CMVAR_UFINE_MIN; + CMVAR_FINE_MIN = S10G_CMVAR_FINE_MIN; + CMVAR_UFINE_UMIN_WRAP = S10G_CMVAR_UFINE_UMIN_WRAP; + CMVAR_COARSE_MIN = S10G_CMVAR_COARSE_MIN; + CMVAR_UFINE_FMIN_WRAP = S10G_CMVAR_UFINE_FMIN_WRAP; + CMVAR_FINE_FMIN_WRAP = S10G_CMVAR_FINE_FMIN_WRAP; + } else if (speed == 25) { + CMVAR_SEC_LOW_TH = S25G_CMVAR_SEC_LOW_TH; + CMVAR_UFINE_MAX = S25G_CMVAR_UFINE_MAX; + CMVAR_FINE_MAX = S25G_CMVAR_FINE_MAX; + CMVAR_UFINE_UMAX_WRAP = S25G_CMVAR_UFINE_UMAX_WRAP; + CMVAR_COARSE_MAX = S25G_CMVAR_COARSE_MAX; + CMVAR_UFINE_FMAX_WRAP = S25G_CMVAR_UFINE_FMAX_WRAP; + CMVAR_FINE_FMAX_WRAP = S25G_CMVAR_FINE_FMAX_WRAP; + CMVAR_SEC_HIGH_TH = S25G_CMVAR_SEC_HIGH_TH; + CMVAR_UFINE_MIN = S25G_CMVAR_UFINE_MIN; + CMVAR_FINE_MIN = S25G_CMVAR_FINE_MIN; + CMVAR_UFINE_UMIN_WRAP = S25G_CMVAR_UFINE_UMIN_WRAP; + CMVAR_COARSE_MIN = S25G_CMVAR_COARSE_MIN; + CMVAR_UFINE_FMIN_WRAP = S25G_CMVAR_UFINE_FMIN_WRAP; + CMVAR_FINE_FMIN_WRAP = S25G_CMVAR_FINE_FMIN_WRAP; + } + + status |= txgbe_e56_rxrd_sec_code(hw, &SECOND_CODE); + + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + COARSE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_coarse_i); + FINE_CODE = EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i); + ULTRAFINE_CODE = + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_ultrafine_i); + + if (SECOND_CODE <= CMVAR_SEC_LOW_TH) { + if (ULTRAFINE_CODE < CMVAR_UFINE_MAX) { + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i, + ULTRAFINE_CODE + 1); + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (FINE_CODE < CMVAR_FINE_MAX) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i) = + CMVAR_UFINE_UMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = + FINE_CODE + 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (COARSE_CODE < CMVAR_COARSE_MAX) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i) = + CMVAR_UFINE_FMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = + CMVAR_FINE_FMAX_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_coarse_i) = COARSE_CODE + 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_coarse_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else { + kr_dbg(KR_MODE, + "ERROR: (SECOND_CODE <= CMVAR_SEC_LOW_TH) temperature tracking occurs Error condition\n"); + } + } else if (SECOND_CODE >= CMVAR_SEC_HIGH_TH) { + if (ULTRAFINE_CODE > CMVAR_UFINE_MIN) { + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i, + ULTRAFINE_CODE - 1); + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (FINE_CODE > CMVAR_FINE_MIN) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i) = + CMVAR_UFINE_UMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = + FINE_CODE - 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else if (COARSE_CODE > CMVAR_COARSE_MIN) { + EPHY_RREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_ultrafine_i) = + CMVAR_UFINE_FMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, ana_bbcdr_fine_i) = + CMVAR_FINE_FMIN_WRAP; + EPHY_XFLD(E56G__RXS0_ANA_OVRDVAL_5, + ana_bbcdr_coarse_i) = COARSE_CODE - 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDVAL_5); + EPHY_RREG(E56G__RXS0_ANA_OVRDEN_1); + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_coarse_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_fine_i) = 1; + EPHY_XFLD(E56G__RXS0_ANA_OVRDEN_1, + ovrd_en_ana_bbcdr_ultrafine_i) = 1; + EPHY_WREG(E56G__RXS0_ANA_OVRDEN_1); + } else { + kr_dbg(KR_MODE, + "ERROR: (SECOND_CODE >= CMVAR_SEC_HIGH_TH) temperature tracking occurs Error condition\n"); + } + } + + return status; +} + +static int txgbe_e56_ctle_bypass_seq(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int status = 0; + u32 rdata; + + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_0, ana_ctle_bypass_i, 1); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_0, + ovrd_en_ana_ctle_bypass_i, 1); + + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDVAL_3, ana_ctle_cz_cstm_i, 0); + txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_0, + ovrd_en_ana_ctle_cz_cstm_i, 1); + + EPHY_RREG(E56G__PMD_RXS0_OVRDVAL_1); + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_en_i) = 0; + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, rxs0_rx0_ctle_train_done_o) = 1; + EPHY_WREG(E56G__PMD_RXS0_OVRDVAL_1); + + EPHY_RREG(E56G__PMD_RXS0_OVRDEN_1); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_en_i) = + 1; + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, ovrd_en_rxs0_rx0_ctle_train_done_o) = + 1; + EPHY_WREG(E56G__PMD_RXS0_OVRDEN_1); + + return status; +} + +static int txgbe_e56_rxs_adc_adapt_seq(struct txgbe_adapter *adapter, u32 bypass_ctle) +{ + struct txgbe_hw *hw = &adapter->hw; + int lane_num = 0, lane_idx = 0; + u32 rdata = 0, addr = 0; + int status = 0; + + int timer = 0, j = 0; + + switch (adapter->bp_link_mode) { + case 10: + lane_num = 1; + break; + case 40: + lane_num = 4; + break; + case 25: + lane_num = 1; + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid speed\n", __func__, __LINE__); + break; + } + for (lane_idx = 0; lane_idx < lane_num; lane_idx++) { + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + /* Wait RXS0-3_OVRDVAL[1]::rxs0-3_rx0_cdr_rdy_o = 1 */ + status = read_poll_timeout(rd32_ephy, rdata, (rdata & BIT(12)), + 100, 200000, false, hw, 0x1544); + if (status) + kr_dbg(KR_MODE, "rxs%d_rx0_cdr_rdy_o = %x, %s.\n", + lane_idx, rdata, status ? "FAILED" : "SUCCESS"); + } + + for (lane_idx = 0; lane_idx < lane_num; lane_idx++) { + //4. Disable VGA and CTLE training so that they don't interfere with ADC calibration + //a. Set ALIAS::RXS::VGA_TRAIN_EN = 0b0 + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 7, 7, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1534 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 14, 14, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Set ALIAS::RXS::CTLE_TRAIN_EN = 0b0 + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 9, 9, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1534 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 16, 16, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //5. Perform ADC interleaver calibration + //a. Remove the OVERRIDE on ALIAS::RXS::ADC_INTL_CAL_DONE + addr = 0x1534 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 24, 24, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 16, 16, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + /* Wait rxs0_rx0_adc_intl_cal_done_o bit17 = 1 */ + status = read_poll_timeout(rd32_ephy, rdata, (rdata & BIT(17)), + 100, 200000, false, hw, addr); + if (status) + kr_dbg(KR_MODE, + "rxs0_rx0_adc_intl_cal_done_o = %x, %s.\n", + rdata, status ? "FAILED" : "SUCCESS"); + + /* 6. Perform ADC offset adaptation and ADC gain adaptation, + * repeat them a few times and after that keep it disabled. + */ + for (j = 0; j < 16; j++) { + //a. ALIAS::RXS::ADC_OFST_ADAPT_EN = 0b1 + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 25, 25, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //b. Wait for 1ms or greater + //usec_delay(1000); + /* set ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o bit1=0 */ + addr = 0x1538 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 1, 1, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + /* Wait rxs0_rx0_adc_ofst_adapt_done_o bit26 = 0 */ + status = read_poll_timeout(rd32_ephy, rdata, + !(rdata & BIT(26)), 100, + 200000, false, hw, addr); + if (status) + kr_dbg(KR_MODE, + "rxs0_rx0_adc_ofst_adapt_done_o %d = %x, %s.\n", + j, rdata, status ? "FAILED" : "SUCCESS"); + + //c. ALIAS::RXS::ADC_OFST_ADAPT_EN = 0b0 + rdata = 0x0000; + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 25, 25, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + //d. ALIAS::RXS::ADC_GAIN_ADAPT_EN = 0b1 + rdata = 0x0000; + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 28, 28, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + //e. Wait for 1ms or greater + //usec_delay(1000); + /* set ovrd_en_rxs0_rx0_adc_ofst_adapt_done_o bit1=0 */ + addr = 0x1538 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 1, 1, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + /* Wait rxs0_rx0_adc_gain_adapt_done_o bit29 = 0 */ + status = read_poll_timeout(rd32_ephy, rdata, + !(rdata & BIT(29)), 100, + 200000, false, hw, addr); + if (status) + kr_dbg(KR_MODE, + "rxs0_rx0_adc_gain_adapt_done_o %d = %x, %s.\n", + j, rdata, status ? "FAILED" : "SUCCESS"); + + //f. ALIAS::RXS::ADC_GAIN_ADAPT_EN = 0b0 + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 28, 28, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + } + //g. Repeat #a to #f total 16 times + + /* 7. Perform ADC interleaver adaptation for 10ms or greater, + * and after that disable it + */ + //a. ALIAS::RXS::ADC_INTL_ADAPT_EN = 0b1 + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 31, 31, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + //b. Wait for 10ms or greater + msleep(20); + + //c. ALIAS::RXS::ADC_INTL_ADAPT_EN = 0b0 + /* set ovrd_en_rxs0_rx0_adc_intl_adapt_en_i=0*/ + addr = 0x1538 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 6, 6, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + /* 8. Now re-enable VGA and CTLE trainings, so that it continues + * to adapt tracking changes in temperature or voltage + * <1>Set ALIAS::RXS::VGA_TRAIN_EN = 0b1 + */ + /* set rxs0_rx0_vga_train_en_i=1*/ + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 7, 7, 0x1); + if (bypass_ctle == 0) + EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, + rxs0_rx0_ctle_train_en_i) = 1; + txgbe_wr32_ephy(hw, addr, rdata); + + //<2>wait for ALIAS::RXS::VGA_TRAIN_DONE = 1 + /* set ovrd_en_rxs0_rx0_vga_train_done_o = 0*/ + addr = 0x1534 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 15, 15, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + /* Wait rxs0_rx0_vga_train_done_o bit8 = 0 */ + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + status = read_poll_timeout(rd32_ephy, rdata, (rdata & BIT(8)), + 100, 300000, false, hw, addr); + if (status) + kr_dbg(KR_MODE, "rxs0_rx0_vga_train_done_o = %x, %s.\n", + rdata, status ? "FAILED" : "SUCCESS"); + + if (bypass_ctle == 0) { + addr = 0x1534 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_ctle_train_done_o) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0; + timer = 0; + addr = 0x1544 + (E56PHY_PMD_RX_OFFSET * lane_idx); + while (EPHY_XFLD(E56G__PMD_RXS0_OVRDVAL_1, + rxs0_rx0_ctle_train_done_o) != 1) { + rdata = rd32_ephy(hw, addr); + usleep_range(500, 1000); + + if (timer++ > PHYINIT_TIMEOUT) + break; + } + } + + //a. Remove the OVERRIDE on ALIAS::RXS::VGA_TRAIN_EN + addr = 0x1534 + (E56PHY_PMD_RX_OFFSET * lane_idx); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 15, 15, 0); + //b. Remove the OVERRIDE on ALIAS::RXS::CTLE_TRAIN_EN + if (bypass_ctle == 0) + EPHY_XFLD(E56G__PMD_RXS0_OVRDEN_1, + ovrd_en_rxs0_rx0_ctle_train_en_i) = 0; + txgbe_wr32_ephy(hw, addr, rdata); + } + + return status; +} + +static int txgbe_e56_rxs_calib_adapt_seq(struct txgbe_adapter *adapter, u8 bplinkmode, + u32 bypass_ctle) +{ + struct txgbe_hw *hw = &adapter->hw; + int lane_num = 0, lane_idx = 0; + int status = 0; + u32 rdata, addr; + + switch (bplinkmode) { + case 10: + lane_num = 1; + break; + case 40: + lane_num = 4; + break; + case 25: + lane_num = 1; + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid speed\n", __func__, __LINE__); + break; + } + + for (lane_idx = 0; lane_idx < lane_num; lane_idx++) { + rdata = 0x0000; + addr = 0x1544 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 25, 25, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1538 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 0, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 28, 28, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1538 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 3, 3, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = 0x1544 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 16, 16, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1534 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 23, 23, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 17, 17, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1534 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 24, 24, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1544 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 31, 31, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x1538 + (lane_idx * E56PHY_PMD_RX_OFFSET); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 6, 6, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + } + if (bypass_ctle != 0) + status |= txgbe_e56_ctle_bypass_seq(adapter); + + status |= txgbe_e56_rxs_oscinit_temp_track(adapter, bplinkmode); + + /* Wait an fsm_rx_sts 25G */ + kr_dbg(KR_MODE, + "Wait CTRL_FSM_RX_STAT[0]::ctrl_fsm_rx0_st to be ready ...\n"); + + status |= read_poll_timeout(rd32_ephy, rdata, + (((rdata & 0x3f3f3f3f) & GENMASK(8 * lane_num - 1, 0)) == + (0x1b1b1b1b & GENMASK(8 * lane_num - 1, 0))), + 1000, 300000, false, hw, E56PHY_CTRL_FSM_RX_STAT_0_ADDR); + kr_dbg(KR_MODE, "wait ctrl_fsm_rx0_st = %x, %s.\n", rdata, + status ? "FAILED" : "SUCCESS"); + + return status; +} + +static int txgbe_e56_cms_cfg_temp_track_range(struct txgbe_adapter *adapter, + u8 bplinkmode) +{ + struct txgbe_hw *hw = &adapter->hw; + int status = 0, T = 40; + u32 addr, rdata; + + status = txgbe_e56_get_temp(hw, &T); + if (T < 40) { + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_CMS_ANA_OVRD_0_EN_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_CMS_ANA_OVRDVAL_2_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_CMS_ANA_OVRDEN_1_EN_ANA_LCPLL_LF_LPF_SETCODE_CALIB_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_7_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_CMS_ANA_OVRDVAL_7_ANA_LCPLL_LF_LPF_SETCODE_CALIB_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + } else if (T > 70) { + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_CMS_ANA_OVRD_0_EN_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_CMS_ANA_OVRDVAL_2_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I, + 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_CMS_ANA_OVRDEN_1_EN_ANA_LCPLL_LF_LPF_SETCODE_CALIB_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_7_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_CMS_ANA_OVRDVAL_7_ANA_LCPLL_LF_LPF_SETCODE_CALIB_I, + 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + } else { + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_HF_TEST_IN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_4_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 24, 24, 0x1); + txgbe_field_set(&rdata, 31, 29, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_5_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 1, 0, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_LF_TEST_IN_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_9_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 24, 24, 0x1); + txgbe_field_set(&rdata, 31, 29, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_10_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 1, 0, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + } + return status; +} + +static int txgbe_e56_phy_tx_ffe_cfg(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + /* Setting the TX EQ main/pre1/pre2/post value */ + adapter->aml_txeq.main = S25G_TX_FFE_CFG_DAC_MAIN; + adapter->aml_txeq.pre1 = S25G_TX_FFE_CFG_DAC_PRE1; + adapter->aml_txeq.pre2 = S25G_TX_FFE_CFG_DAC_PRE2; + adapter->aml_txeq.post = S25G_TX_FFE_CFG_DAC_POST; + txgbe_wr32_ephy(hw, 0x141c, adapter->aml_txeq.main); + txgbe_wr32_ephy(hw, 0x1420, adapter->aml_txeq.pre1); + txgbe_wr32_ephy(hw, 0x1424, adapter->aml_txeq.pre2); + txgbe_wr32_ephy(hw, 0x1428, adapter->aml_txeq.post); + + return 0; +} + +static int txgbe_e56_25g_cfg(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 addr, rdata; + + rdata = 0x0000; + addr = E56PHY_CMS_PIN_OVRDVAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CMS_PIN_OVRDVAL_0_INT_PLL0_TX_SIGNAL_TYPE_I, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_PIN_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CMS_PIN_OVRDEN_0_OVRD_EN_PLL0_TX_SIGNAL_TYPE_I, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_CMS_ANA_OVRDVAL_2_ANA_LCPLL_HF_VCO_SWING_CTRL_I, 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_VCO_SWING_CTRL_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDVAL_4_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 23, 0, 0x260000); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_HF_TEST_IN_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_TXS_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_TXS_CFG_1_ADAPTATION_WAIT_CNT_X256, 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_WKUP_CNT_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_WKUP_CNTLDO_WKUP_CNT_X32, 0xff); + txgbe_field_set(&rdata, E56PHY_TXS_WKUP_CNTDCC_WKUP_CNT_X32, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_PIN_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 27, 24, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_PIN_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_EFUSE_BITS_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_ANA_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_ANA_OVRDVAL_1_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + txgbe_e56_phy_tx_ffe_cfg(adapter); + + rdata = 0x0000; + addr = E56PHY_RXS_RXS_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_RXS_CFG_0_DSER_DATA_SEL, 0x0); + txgbe_field_set(&rdata, E56PHY_RXS_RXS_CFG_0_TRAIN_CLK_GATE_BYPASS_EN, + 0x1fff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_1_PREDIV1, 0x700); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_1_TARGET_CNT1, 0x2418); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_4_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_OSC_RANGE_SEL1, 0x1); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_VCO_CODE_INIT, 0x7fb); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_OSC_CURRENT_BOOST_EN1, + 0x0); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_4_BBCDR_CURRENT_BOOST1, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_5_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_SDM_WIDTH, 0x3); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_PRELOCK, + 0xf); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_POSTLOCK, + 0x3); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_POSTLOCK, + 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_PRELOCK, + 0xf); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BBCDR_RDY_CNT, 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_6_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_PRELOCK, 0x7); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_POSTLOCK, + 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_INTL_CONFIG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_INTL_CONFIG_0_ADC_INTL2SLICE_DELAY1, + 0x3333); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_INTL_CONFIG_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_INTL_CONFIG_2_INTERLEAVER_HBW_DISABLE1, + 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_LTH, 0x56); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_UTH, 0x6a); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_LTH, 0x1f8); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_UTH, 0xf0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_LTH, 0x100); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_UTH, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_LTH, 0x4); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_UTH, 0x37); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_TXFFE_TRAIN_MOD_TYPE, + 0x38); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56G__RXS0_FOM_18__ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56G__RXS0_FOM_18__DFE_COEFFL_HINT__MSB, + E56G__RXS0_FOM_18__DFE_COEFFL_HINT__LSB, 0x0); + //change 0x90 to 0x0 to fix 25G link up keep when cable unplugged + txgbe_field_set(&rdata, E56G__RXS0_FOM_18__DFE_COEFFH_HINT__MSB, + E56G__RXS0_FOM_18__DFE_COEFFH_HINT__LSB, 0x0); + txgbe_field_set(&rdata, E56G__RXS0_FOM_18__DFE_COEFF_HINT_LOAD__MSB, + E56G__RXS0_FOM_18__DFE_COEFF_HINT_LOAD__LSB, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_0_VGA_TARGET, 0x34); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT0, 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT0, 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT123, 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT123, 0xa); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT0, 0x9); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT123, 0x9); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_1_LFEQ_LUT, 0x1ffffea); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P1, 18); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P2, 0); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P3, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P1, 1); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P2, 0); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P3, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_SLICE_DATA_AVG_CNT, + 0x3); + txgbe_field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_DATA_AVG_CNT, 0x3); + txgbe_field_set(&rdata, + E56PHY_RXS_OFFSET_N_GAIN_CAL_0_FE_OFFSET_DAC_CLK_CNT_X8, 0xc); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_1_SAMP_ADAPT_CFG, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_FFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_FFE_TRAINING_0_FFE_TAP_EN, 0xf9ff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_IDLE_DETECT_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + addr = 0x6cc; + rdata = 0x8020000; + txgbe_wr32_ephy(hw, addr, rdata); + addr = 0x94; + rdata = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_0_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 4, 0, 0x0); + txgbe_field_set(&rdata, 14, 13, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_BBCDR_VCOFILT_BYP_I, 0x1); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_TEST_BBCDR_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_15_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 2, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_17_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_17_ANA_VGA2_BOOST_CSTM_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_ANABS_CONFIG_I, + 0x1); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_VGA2_BOOST_CSTM_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_14_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 13, 13, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_4_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_EYE_SCAN_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_EYE_SCAN_1_EYE_SCAN_REF_TIMER, 0x400); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_RINGO_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 21, 12, 0x366); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_PMD_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_3_CTRL_FSM_TIMEOUT_X64K, 0x80); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_PMD_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_ON_PERIOD_X64K, 0x18); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_PERIOD_X512K, 0x3e); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_PMD_CFG_5_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_5_USE_RECENT_MARKER_OFFSET, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_GAIN_CAL_ERR, 0x1); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_DO_RX_ADC_OFST_CAL, 0x3); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_RX_ERR_ACTION_EN, 0x40); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST0_WAIT_CNT_X4096, 0xff); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST1_WAIT_CNT_X4096, 0xff); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST2_WAIT_CNT_X4096, 0xff); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST3_WAIT_CNT_X4096, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST4_WAIT_CNT_X4096, 0x1); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST5_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST6_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST7_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST8_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST9_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST10_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST11_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST12_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST13_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST14_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST15_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_7_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST4_EN, 0x4bf); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST5_EN, 0xc4bf); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_8_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_8_TRAIN_ST7_EN, 0x47ff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_12_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_12_TRAIN_ST15_EN, 0x67ff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_13_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST0_DONE_EN, 0x8001); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST1_DONE_EN, 0x8002); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_14_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_14_TRAIN_ST3_DONE_EN, 0x8008); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_15_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_15_TRAIN_ST4_DONE_EN, 0x8004); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_17_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_17_TRAIN_ST8_DONE_EN, 0x20c0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_18_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_18_TRAIN_ST10_DONE_EN, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_29_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_29_TRAIN_ST15_DC_EN, 0x3f6d); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_33_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN0_RATE_SEL, 0x8000); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN1_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_34_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN2_RATE_SEL, 0x8000); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN3_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_KRT_TFSM_CFG_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X1000K, + 0x49); + txgbe_field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X8000K, + 0x37); + txgbe_field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_HOLDOFF_TIMER_X256K, + 0x2f); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_FETX_FFE_TRAIN_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_2, + 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + return 0; +} + +static int txgbe_e56_10g_cfg(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 addr, rdata; + + rdata = 0x0000; + addr = E56G__CMS_ANA_OVRDVAL_7_ADDR; + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56g_cms_ana_ovrdval7 *)&rdata)->ana_lcpll_lf_vco_swing_ctrl_i = + 0xf; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56G__CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56g_cms_ana_ovrden1 *)&rdata) + ->ovrd_en_ana_lcpll_lf_vco_swing_ctrl_i = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56G__CMS_ANA_OVRDVAL_9_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 23, 0, 0x260000); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56G__CMS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56g_cms_ana_ovrden1 *)&rdata)->ovrd_en_ana_lcpll_lf_test_in_i = + 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_TXS_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_TXS_CFG_1_ADAPTATION_WAIT_CNT_X256, 0xf); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_WKUP_CNT_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_WKUP_CNTLDO_WKUP_CNT_X32, 0xff); + txgbe_field_set(&rdata, E56PHY_TXS_WKUP_CNTDCC_WKUP_CNT_X32, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_PIN_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 19, 16, 0x6); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_PIN_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_EFUSE_BITS_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_ANA_OVRDVAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_ANA_OVRDVAL_1_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_TXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_DAC_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + txgbe_e56_phy_tx_ffe_cfg(adapter); + + rdata = 0x0000; + addr = E56PHY_RXS_RXS_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_RXS_CFG_0_DSER_DATA_SEL, 0x0); + txgbe_field_set(&rdata, E56PHY_RXS_RXS_CFG_0_TRAIN_CLK_GATE_BYPASS_EN, + 0x1fff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_1_ADDR; + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56_rxs0_osc_cal_n_cdr0 *)&rdata)->prediv0 = 0xfa0; + ((union txgbe_e56_rxs0_osc_cal_n_cdr0 *)&rdata)->target_cnt0 = 0x203a; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_4_ADDR; + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56_rxs0_osc_cal_n_cdr4 *)&rdata)->osc_range_sel0 = 0x2; + ((union txgbe_e56_rxs0_osc_cal_n_cdr4 *)&rdata)->vco_code_init = 0x7ff; + ((union txgbe_e56_rxs0_osc_cal_n_cdr4 *)&rdata)->osc_current_boost_en0 = 0x1; + ((union txgbe_e56_rxs0_osc_cal_n_cdr4 *)&rdata)->bbcdr_current_boost0 = 0x0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_5_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_SDM_WIDTH, 0x3); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_PRELOCK, + 0xf); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_PROP_STEP_POSTLOCK, + 0xf); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_POSTLOCK, + 0xc); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BB_CDR_GAIN_CTRL_PRELOCK, + 0xf); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_5_BBCDR_RDY_CNT, 0x3); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OSC_CAL_N_CDR_6_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_PRELOCK, 0x7); + txgbe_field_set(&rdata, E56PHY_RXS_OSC_CAL_N_CDR_6_PI_GAIN_CTRL_POSTLOCK, + 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_INTL_CONFIG_0_ADDR; + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56_rxs0_intl_config0 *)&rdata)->adc_intl2slice_delay0 = 0x5555; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_INTL_CONFIG_2_ADDR; + rdata = rd32_ephy(hw, addr); + ((union txgbe_e56_rxs0_intl_config2 *)&rdata)->interleaver_hbw_disable0 = 0x1; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_LTH, 0x56); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_0_ADC_DATA_PEAK_UTH, 0x6a); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_LTH, 0x1e8); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_1_C1_UTH, 0x78); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_LTH, 0x100); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_2_CM1_UTH, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_TXFFE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_LTH, 0x4); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_CM2_UTH, 0x37); + txgbe_field_set(&rdata, E56PHY_RXS_TXFFE_TRAINING_3_TXFFE_TRAIN_MOD_TYPE, + 0x38); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_0_VGA_TARGET, 0x34); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_VGA_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT0, 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT0, 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA1_CODE_INIT123, 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_VGA_TRAINING_1_VGA2_CODE_INIT123, 0xa); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT0, 0x9); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_0_CTLE_CODE_INIT123, 0x9); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_1_LFEQ_LUT, 0x1ffffea); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P1, 0x18); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P2, 0); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_2_ISI_TH_FRAC_P3, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_CTLE_TRAINING_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P1, 1); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P2, 0); + txgbe_field_set(&rdata, E56PHY_RXS_CTLE_TRAINING_3_TAP_WEIGHT_P3, 0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_SLICE_DATA_AVG_CNT, + 0x3); + txgbe_field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_0_ADC_DATA_AVG_CNT, 0x3); + txgbe_field_set(&rdata, + E56PHY_RXS_OFFSET_N_GAIN_CAL_0_FE_OFFSET_DAC_CLK_CNT_X8, 0xc); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_OFFSET_N_GAIN_CAL_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_OFFSET_N_GAIN_CAL_1_SAMP_ADAPT_CFG, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_FFE_TRAINING_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_FFE_TRAINING_0_FFE_TAP_EN, 0xf9ff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_IDLE_DETECT_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, 0xa); + txgbe_field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, 0x5); + txgbe_wr32_ephy(hw, addr, rdata); + + //txgbe_e56_ephy_config(E56G__RXS3_ANA_OVRDVAL_11, ana_test_adc_clkgen_i, 0x0); + //txgbe_e56_ephy_config(E56G__RXS0_ANA_OVRDEN_2, ovrd_en_ana_test_adc_clkgen_i, 0x0); + addr = 0x6cc; + rdata = 0x8020000; + txgbe_wr32_ephy(hw, addr, rdata); + addr = 0x94; + rdata = 0; + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_0_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RTERM_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_6_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 4, 0, 0x6); + txgbe_field_set(&rdata, 14, 13, 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, + E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_BBCDR_VCOFILT_BYP_I, 0x1); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_1_OVRD_EN_ANA_TEST_BBCDR_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_15_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 2, 0, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_17_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDVAL_17_ANA_VGA2_BOOST_CSTM_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_ANABS_CONFIG_I, + 0x1); + txgbe_field_set(&rdata, E56PHY_RXS_ANA_OVRDEN_3_OVRD_EN_ANA_VGA2_BOOST_CSTM_I, + 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDVAL_14_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_ANA_OVRDEN_4_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 13, 13, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_EYE_SCAN_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_RXS_EYE_SCAN_1_EYE_SCAN_REF_TIMER, 0x400); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_RXS_RINGO_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, 21, 12, 0x366); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_PMD_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_3_CTRL_FSM_TIMEOUT_X64K, 0x80); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_PMD_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_ON_PERIOD_X64K, 0x18); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_4_TRAIN_DC_PERIOD_X512K, 0x3e); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_PMD_CFG_5_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_PMD_CFG_5_USE_RECENT_MARKER_OFFSET, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_CONT_ON_ADC_GAIN_CAL_ERR, 0x1); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_DO_RX_ADC_OFST_CAL, 0x3); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_0_RX_ERR_ACTION_EN, 0x40); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_1_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST0_WAIT_CNT_X4096, 0xff); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST1_WAIT_CNT_X4096, 0xff); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST2_WAIT_CNT_X4096, 0xff); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_1_TRAIN_ST3_WAIT_CNT_X4096, 0xff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_2_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST4_WAIT_CNT_X4096, 0x1); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST5_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST6_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_2_TRAIN_ST7_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_3_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST8_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST9_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST10_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_3_TRAIN_ST11_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_4_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST12_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST13_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST14_WAIT_CNT_X4096, 0x4); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_4_TRAIN_ST15_WAIT_CNT_X4096, 0x4); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_7_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST4_EN, 0x4bf); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_7_TRAIN_ST5_EN, 0xc4bf); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_8_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_8_TRAIN_ST7_EN, 0x47ff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_12_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_12_TRAIN_ST15_EN, 0x67ff); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_13_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST0_DONE_EN, 0x8001); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_13_TRAIN_ST1_DONE_EN, 0x8002); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_14_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_14_TRAIN_ST3_DONE_EN, 0x8008); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_15_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_15_TRAIN_ST4_DONE_EN, 0x8004); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_17_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_17_TRAIN_ST8_DONE_EN, 0x20c0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_18_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_18_TRAIN_ST10_DONE_EN, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_29_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_29_TRAIN_ST15_DC_EN, 0x3f6d); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_33_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN0_RATE_SEL, 0x8000); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_33_TRAIN1_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_CTRL_FSM_CFG_34_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN2_RATE_SEL, 0x8000); + txgbe_field_set(&rdata, E56PHY_CTRL_FSM_CFG_34_TRAIN3_RATE_SEL, 0x8000); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_KRT_TFSM_CFG_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X1000K, + 0x49); + txgbe_field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_MAX_WAIT_TIMER_X8000K, + 0x37); + txgbe_field_set(&rdata, E56PHY_KRT_TFSM_CFGKRT_TFSM_HOLDOFF_TIMER_X256K, + 0x2f); + txgbe_wr32_ephy(hw, addr, rdata); + + rdata = 0x0000; + addr = E56PHY_FETX_FFE_TRAIN_CFG_0_ADDR; + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_FETX_FFE_TRAIN_CFG_0_KRT_FETX_INIT_FFE_CFG_2, + 0x2); + txgbe_wr32_ephy(hw, addr, rdata); + + return 0; +} + +static int setphylinkmode(struct txgbe_adapter *adapter, u8 bplinkmode, + u32 bypass_ctle) +{ + struct txgbe_hw *hw = &adapter->hw; + int lane_num = 0, status = 0; + u32 rdata = 0; + + u32 speed_select = 0; + u32 pcs_type_sel = 0; + u32 cns_en = 0; + u32 rsfec_en = 0; + u32 pma_type = 0; + u32 an0_rate_select = 0; + + switch (bplinkmode) { + case 10: + bplinkmode = 10; + lane_num = 1; + speed_select = 0; /* 10 Gb/s */ + pcs_type_sel = 0; /* 10GBASE-R PCS Type */ + cns_en = 0; /* CNS_EN disable */ + rsfec_en = 0; /* RS-FEC disable */ + pma_type = 0xb; /* 10GBASE-KR PMA/PMD type */ + an0_rate_select = 2; /* 10G-KR */ + break; + case 40: + bplinkmode = 40; + lane_num = 4; + speed_select = 3; /* 40 Gb/s */ + pcs_type_sel = 4; /* 40GBASE-R PCS Type */ + cns_en = 0; /* CNS_EN disable */ + rsfec_en = 0; /* RS-FEC disable */ + pma_type = 0b0100001; /* 40GBASE-CR PMA/PMD type */ + an0_rate_select = 4; /* 40G-KR: 3 40G-CR: 4 */ + break; + case 25: + bplinkmode = 25; + lane_num = 1; + speed_select = 5; /* 25 Gb/s */ + pcs_type_sel = 7; /* 25GBASE-R PCS Type */ + cns_en = 1; /* CNS_EN */ + rsfec_en = 1; /* RS-FEC enable*/ + pma_type = 0b0111001; /* 25GBASE-KR PMA/PMD type */ + an0_rate_select = 9; /* 9/10/17 25GK/CR-S or 25GK/CR */ + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid bplinkmode\n", __func__, + __LINE__); + break; + } + + adapter->curbp_link_mode = bplinkmode; + /* To switch to the 40G mode Ethernet operation, complete the following steps:*/ + /* 1. Initiate the vendor-specific software reset by programming + * the VR_RST field (bit [15]) of the VR_PCS_DIG_CTRL1 register to 1. + */ + rdata = txgbe_rd32_epcs(hw, 0x038000); + txgbe_wr32_epcs(hw, 0x038000, rdata | BIT(15)); + + /* 2. Wait for the hardware to clear the value for the VR_RST + * field (bit [15]) of the VR_PCS_DIG_CTRL1 register. + */ + kr_dbg(KR_MODE, "Wait for the bit [15] (VR_RST) to get cleared.\n"); + status = read_poll_timeout(txgbe_rd32_epcs, rdata, + FIELD_GET_M(BIT(15), rdata) == 0, 100, + 200000, false, hw, + 0x038000); + kr_dbg(KR_MODE, "Wait PHY VR_RST = %x, Wait VR_RST %s.\n", + rdata, status ? "FAILED" : "SUCCESS"); + + /* wait rx/tx/cm powerdn_st according pmd 50 2.0.5 */ + status = read_poll_timeout(rd32_ephy, rdata, + (rdata & GENMASK(3, 0)) == 0x9, 100, + 200000, false, hw, 0x14d4); + kr_dbg(KR_MODE, "wait ctrl_fsm_cm_st = %x, %s.\n", + rdata, status ? "FAILED" : "SUCCESS"); + + /* 3. Write 4'b0011 to bits [5:2] of the SR_PCS_CTRL1 register. + * 10G: 0 25G: 5 40G: 3 + */ + rdata = txgbe_rd32_epcs(hw, 0x030000); + txgbe_field_set(&rdata, 5, 2, speed_select); + txgbe_wr32_epcs(hw, 0x030000, rdata); + + /* 4. Write pcs mode sel to bits [3:0] of the SR_PCS_CTRL2 register. + * 10G: 0 25G: 4'b0111 40G: 4'b0100 + */ + rdata = txgbe_rd32_epcs(hw, 0x030007); + txgbe_field_set(&rdata, 3, 0, pcs_type_sel); + txgbe_wr32_epcs(hw, 0x030007, rdata); + + /* 0 1 1 1 0 0 1 : 25GBASE-KR or 25GBASE-KR-S PMA/PMD type + * 0 1 1 1 0 0 0 : 25GBASE-CR or 25GBASE-CR-S PMA/PMD type + * 0 1 0 0 0 0 1 : 40GBASE-CR4 PMA/PMD type + * 0 1 0 0 0 0 0 : 40GBASE-KR4 PMA/PMD type + * 0 0 0 1 0 1 1 : 10GBASE-KR PMA/PMD type + */ + rdata = txgbe_rd32_epcs(hw, 0x010007); + txgbe_field_set(&rdata, 6, 0, pma_type); + txgbe_wr32_epcs(hw, 0x010007, rdata); + + /* 5. Write only 25g en to Bits [1:0] of VR_PCS_DIG_CTRL3 register. */ + rdata = txgbe_rd32_epcs(hw, 0x38003); + txgbe_field_set(&rdata, 1, 0, cns_en); + txgbe_wr32_epcs(hw, 0x38003, rdata); + + /* 6. Program PCS_AM_CNT field of VR_PCS_AM_CNT register to 'd16383 to + * configure the alignment marker interval. To speed-up simulation, + * program a smaller value to this field. + */ + if (bplinkmode == 40) + txgbe_wr32_epcs(hw, 0x38018, 16383); + + /* 7. Program bit [2] of SR_PMA_RS_FEC_CTRL register to 0 + * if previously 1 (as RS-FEC is supported in 25G Mode). + */ + + rdata = txgbe_rd32_epcs(hw, 0x100c8); + txgbe_field_set(&rdata, 2, 2, rsfec_en); + txgbe_wr32_epcs(hw, 0x100c8, rdata); + + /* 8. To enable BASE-R FEC (if desired), set bit [0]. + * in SR_PMA_KR_FEC_CTRL register + */ + + /* 3. temp applied */ + //status = txgbe_e56_cms_cfg_temp_track_range(adapter, bplinkmode); + + /* 4. set phy an status to 0 */ + //txgbe_wr32_ephy(hw, 0x1640, 0x0000); + rdata = rd32_ephy(hw, 0x1434); + txgbe_field_set(&rdata, 7, 4, 0xe); // anstatus in single mode just set to 0xe + txgbe_wr32_ephy(hw, 0x1434, rdata); + + /* 9. Program Enterprise 56G PHY regs through its own APB interface: + * a. Program PHY registers as mentioned in Table 6-6 on page 1197 to + * configure the PHY to 40G + * Mode. For fast-simulation mode, additionally program, + * the registers shown in the Table 6-7 on page 1199 + * b. Enable the PMD by setting pmd_en field in PMD_CFG[0] (0x1400) + * register + */ + + rdata = 0x0000; + rdata = rd32_ephy(hw, ANA_OVRDVAL0); + txgbe_field_set(&rdata, 29, 29, 0x1); + txgbe_field_set(&rdata, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDVAL0, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, ANA_OVRDVAL5); + txgbe_field_set(&rdata, 24, 24, 0x0); + txgbe_wr32_ephy(hw, ANA_OVRDVAL5, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, ANA_OVRDEN0); + txgbe_field_set(&rdata, 1, 1, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN0, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, ANA_OVRDEN1); + txgbe_field_set(&rdata, 30, 30, 0x1); + txgbe_field_set(&rdata, 25, 25, 0x1); + txgbe_wr32_ephy(hw, ANA_OVRDEN1, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, PLL0_CFG0); + txgbe_field_set(&rdata, 25, 24, 0x1); + txgbe_field_set(&rdata, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL0_CFG0, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, PLL0_CFG2); + txgbe_field_set(&rdata, 12, 8, 0x4); + txgbe_wr32_ephy(hw, PLL0_CFG2, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, PLL1_CFG0); + txgbe_field_set(&rdata, 25, 24, 0x1); + txgbe_field_set(&rdata, 17, 16, 0x3); + txgbe_wr32_ephy(hw, PLL1_CFG0, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, PLL1_CFG2); + txgbe_field_set(&rdata, 12, 8, 0x8); + txgbe_wr32_ephy(hw, PLL1_CFG2, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, PLL0_DIV_CFG0); + txgbe_field_set(&rdata, 18, 8, 0x294); + txgbe_field_set(&rdata, 4, 0, 0x8); + txgbe_wr32_ephy(hw, PLL0_DIV_CFG0, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, DATAPATH_CFG0); + txgbe_field_set(&rdata, 30, 28, 0x7); + txgbe_field_set(&rdata, 26, 24, 0x5); + if (bplinkmode == 10 || bplinkmode == 40) + txgbe_field_set(&rdata, 18, 16, 0x5); + else if (bplinkmode == 25) + txgbe_field_set(&rdata, 18, 16, 0x3); + txgbe_field_set(&rdata, 14, 12, 0x5); + txgbe_field_set(&rdata, 10, 8, 0x5); + txgbe_wr32_ephy(hw, DATAPATH_CFG0, rdata); + + rdata = 0x0000; + rdata = rd32_ephy(hw, DATAPATH_CFG1); + txgbe_field_set(&rdata, 26, 24, 0x5); + txgbe_field_set(&rdata, 10, 8, 0x5); + if (bplinkmode == 10 || bplinkmode == 40) { + txgbe_field_set(&rdata, 18, 16, 0x5); + txgbe_field_set(&rdata, 2, 0, 0x5); + } else if (bplinkmode == 25) { + txgbe_field_set(&rdata, 18, 16, 0x3); + txgbe_field_set(&rdata, 2, 0, 0x3); + } + txgbe_wr32_ephy(hw, DATAPATH_CFG1, rdata); + + rdata = rd32_ephy(hw, AN_CFG1); + txgbe_field_set(&rdata, 4, 0, an0_rate_select); + txgbe_wr32_ephy(hw, AN_CFG1, rdata); + + status = txgbe_e56_cms_cfg_temp_track_range(adapter, bplinkmode); + + if (bplinkmode == 10) + txgbe_e56_10g_cfg(adapter); + else if (bplinkmode == 25) + txgbe_e56_25g_cfg(adapter); + else if (bplinkmode == 40) + txgbe_e56_cfg_40g(hw); + + return status; +} + +int txgbe_e56_set_phylinkmode(struct txgbe_adapter *adapter, u8 bplinkmode, + u32 bypass_ctle) +{ + struct txgbe_hw *hw = &adapter->hw; + int status = 0; + u32 rdata; + + switch (bplinkmode) { + case TXGBE_LINK_SPEED_10GB_FULL: + case 10: + bplinkmode = 10; + break; + case TXGBE_LINK_SPEED_40GB_FULL: + case 40: + bplinkmode = 40; + break; + case TXGBE_LINK_SPEED_25GB_FULL: + case 25: + bplinkmode = 25; + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid bplinkmode\n", __func__, + __LINE__); + break; + } + + adapter->an_done = false; + if (adapter->curbp_link_mode == 10) + return 0; + kr_dbg(KR_MODE, "Setup to backplane mode ==========\n"); + + if (adapter->backplane_an) { + u32 backplane_mode = 0; + u32 fec_advertise = 0; + + adapter->an_done = false; + /* pcs + phy rst */ + rdata = rd32(hw, 0x1000c); + if (hw->bus.lan_id == 1) + rdata |= BIT(16); + else + rdata |= BIT(19); + wr32(hw, 0x1000c, rdata); + msleep(20); + + /* clear interrupt */ + txgbe_wr32_epcs(hw, 0x070000, 0); + txgbe_wr32_epcs(hw, 0x030000, 0x8000); + rdata = txgbe_rd32_epcs(hw, 0x070000); + txgbe_field_set(&rdata, 12, 12, 0x1); + txgbe_wr32_epcs(hw, 0x070000, rdata); + txgbe_wr32_epcs(hw, 0x078002, 0x0000); + /* pcs case fec en to work around first */ + txgbe_wr32_epcs(hw, 0x100ab, 1); + + if (txgbe_is_backplane(hw)) { + if ((hw->device_id & 0xFF) == 0x10) { + backplane_mode |= 0x80; + fec_advertise |= TXGBE_10G_FEC_ABL; + } else if ((hw->device_id & 0xFF) == 0x25) { + backplane_mode |= 0xc000; + fec_advertise |= TXGBE_25G_RS_FEC_REQ | + TXGBE_25G_BASE_FEC_REQ; + } else if ((hw->device_id & 0xFF) == 0x40) { + backplane_mode |= BIT(9) | BIT(8); + fec_advertise |= TXGBE_10G_FEC_ABL; + } + } else { + if ((hw->phy.fiber_suppport_speed & + TXGBE_LINK_SPEED_10GB_FULL) == + TXGBE_LINK_SPEED_10GB_FULL) { + backplane_mode |= 0x80; + fec_advertise |= TXGBE_10G_FEC_ABL; + } + + if ((hw->phy.fiber_suppport_speed & + TXGBE_LINK_SPEED_25GB_FULL) == + TXGBE_LINK_SPEED_25GB_FULL) { + backplane_mode |= 0xc000; + fec_advertise |= TXGBE_25G_RS_FEC_REQ | + TXGBE_25G_BASE_FEC_REQ; + } + + if ((hw->phy.fiber_suppport_speed & + TXGBE_LINK_SPEED_40GB_FULL) == + TXGBE_LINK_SPEED_40GB_FULL) { + backplane_mode |= BIT(9) | BIT(8); + fec_advertise |= TXGBE_10G_FEC_ABL; + } + } + + txgbe_wr32_epcs(hw, 0x070010, 0x0001); + + /* 10GKR:7-25KR:14/15-40GKR:8-40GCR:9 */ + txgbe_wr32_epcs(hw, 0x070011, backplane_mode | 0x11); + + /* BASE-R FEC */ + rdata = txgbe_rd32_epcs(hw, 0x70012); + txgbe_wr32_epcs(hw, 0x70012, fec_advertise); + + txgbe_wr32_epcs(hw, 0x070016, 0x0000); + txgbe_wr32_epcs(hw, 0x070017, 0x0); + txgbe_wr32_epcs(hw, 0x070018, 0x0); + + /* config timer */ + txgbe_wr32_epcs(hw, 0x078004, 0x003c); + txgbe_wr32_epcs(hw, 0x078005, 3000); + txgbe_wr32_epcs(hw, 0x078006, 25); + txgbe_wr32_epcs(hw, 0x078000, 0x0008 | BIT(2)); + + kr_dbg(KR_MODE, "1.2 Wait 10G KR phy/pcs mode init ....\n"); + status = setphylinkmode(adapter, 10, bypass_ctle); + if (status) + return status; + + /* 5. CM_ENABLE */ + rdata = rd32_ephy(hw, 0x1400); + txgbe_field_set(&rdata, 21, 20, 0x3); //pll en + txgbe_field_set(&rdata, 19, 12, 0x0); // tx disable + txgbe_field_set(&rdata, 8, 8, 0x0); // pmd mode + txgbe_field_set(&rdata, 1, 1, 0x1); // pmd en + txgbe_wr32_ephy(hw, 0x1400, rdata); + + /* 6, TX_ENABLE */ + rdata = rd32_ephy(hw, 0x1400); + txgbe_field_set(&rdata, 19, 12, 0x1); // tx en + txgbe_wr32_ephy(hw, 0x1400, rdata); + + kr_dbg(KR_MODE, "1.3 Wait 10G PHY RXS....\n"); + status = txgbe_e56_rxs_oscinit_temp_track(adapter, 10); + if (status) + return status; + + /* Wait an 10g fsm_rx_sts */ + status = read_poll_timeout(rd32_ephy, rdata, + ((rdata & 0x3f) == 0xb), 1000, + 200000, false, hw, + E56PHY_CTRL_FSM_RX_STAT_0_ADDR); + kr_dbg(KR_MODE, "Wait 10g fsm_rx_sts = %x, Wait rx_sts %s.\n", + rdata, status ? "FAILED" : "SUCCESS"); + + rdata = txgbe_rd32_epcs(hw, 0x070000); + txgbe_field_set(&rdata, 12, 12, 0x1); + txgbe_wr32_epcs(hw, 0x070000, rdata); + kr_dbg(KR_MODE, "Setup the backplane mode========end ==\n"); + } else { + if ((hw->phy.fiber_suppport_speed & + TXGBE_LINK_SPEED_40GB_FULL) == TXGBE_LINK_SPEED_40GB_FULL) + txgbe_set_link_to_amlite(hw, + TXGBE_LINK_SPEED_40GB_FULL); + else if ((hw->phy.fiber_suppport_speed & + TXGBE_LINK_SPEED_25GB_FULL) == + TXGBE_LINK_SPEED_25GB_FULL) + txgbe_set_link_to_amlite(hw, + TXGBE_LINK_SPEED_25GB_FULL); + else if (hw->phy.fiber_suppport_speed == + TXGBE_LINK_SPEED_10GB_FULL) + txgbe_set_link_to_amlite(hw, + TXGBE_LINK_SPEED_10GB_FULL); + } + + return status; +} + +static void txgbe_e56_print_page_status(struct txgbe_adapter *adapter, + struct bkpan73ability *tbkp_an73_ability, + struct bkpan73ability *tlpbkp_an73_ability) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rdata = 0; + + /* Read the local AN73 Base Page Ability Registers */ + kr_dbg(KR_MODE, "Read the local Base Page Ability Registers\n"); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG1); + tbkp_an73_ability->next_page = (rdata & BIT(15)) ? 1 : 0; + kr_dbg(KR_MODE, "\tread 70010 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG2); + kr_dbg(KR_MODE, "\tread 70011 data %0x\n", rdata); + tbkp_an73_ability->link_ability = (rdata >> 5) & GENMASK(10, 0); + /* amber-lite only support 10GKR - 25GKR/CR - 25GKR-S/CR-S */ + kr_dbg(KR_MODE, "\t10GKR : %x\t25GKR/CR-S: %x\t25GKR/CR : %x\n", + tbkp_an73_ability->link_ability & BIT(ABILITY_10GBASE_KR) ? 1 : 0, + tbkp_an73_ability->link_ability & BIT(ABILITY_25GBASE_KRCR_S) ? 1 : + 0, + tbkp_an73_ability->link_ability & BIT(ABILITY_25GBASE_KRCR) ? 1 : + 0); + kr_dbg(KR_MODE, "\t40GCR4 : %x\t40GKR4 : %x\n", + tbkp_an73_ability->link_ability & BIT(ABILITY_40GBASE_CR4) ? 1 : 0, + tbkp_an73_ability->link_ability & BIT(ABILITY_40GBASE_KR4) ? 1 : 0); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG3); + kr_dbg(KR_MODE, "\tF1:FEC Req\tF0:FEC Sup\tF3:25GFEC\tF2:25GRS\n"); + kr_dbg(KR_MODE, "\tF1: %d\t\tF0: %d\t\tF3: %d\t\tF2: %d\n", + ((rdata >> 15) & 0x01), ((rdata >> 14) & 0x01), + ((rdata >> 13) & 0x01), ((rdata >> 12) & 0x01)); + tbkp_an73_ability->fec_ability = rdata; + kr_dbg(KR_MODE, "\tread 70012 data %0x\n", rdata); + + /* Read the link partner AN73 Base Page Ability Registers */ + kr_dbg(KR_MODE, "Read the link partner Base Page Ability Registers\n"); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_LP_ABL1); + tlpbkp_an73_ability->next_page = (rdata & BIT(15)) ? 1 : 0; + kr_dbg(KR_MODE, "\tread 70013 data %0x\n", rdata); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_LP_ABL2); + tlpbkp_an73_ability->link_ability = (rdata >> 5) & GENMASK(10, 0); + kr_dbg(KR_MODE, "\tread 70014 data %0x\n", rdata); + kr_dbg(KR_MODE, "\tKX : %x\tKX4 : %x\n", + tlpbkp_an73_ability->link_ability & BIT(ABILITY_1000BASE_KX) ? 1 : + 0, + tlpbkp_an73_ability->link_ability & BIT(ABILITY_10GBASE_KX4) ? 1 : + 0); + kr_dbg(KR_MODE, "\t10GKR : %x\t25GKR/CR-S: %x\t25GKR/CR : %x\n", + tlpbkp_an73_ability->link_ability & BIT(ABILITY_10GBASE_KR) ? 1 : 0, + tlpbkp_an73_ability->link_ability & BIT(ABILITY_25GBASE_KRCR_S) ? + 1 : + 0, + tlpbkp_an73_ability->link_ability & BIT(ABILITY_25GBASE_KRCR) ? 1 : + 0); + kr_dbg(KR_MODE, "\t40GCR4 : %x\t40GKR4 : %x\n", + tlpbkp_an73_ability->link_ability & BIT(ABILITY_40GBASE_CR4) ? 1 : + 0, + tlpbkp_an73_ability->link_ability & BIT(ABILITY_40GBASE_KR4) ? 1 : + 0); + rdata = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_LP_ABL3); + kr_dbg(KR_MODE, "\tF1:FEC Req\tF0:FEC Sup\tF3:25GFEC\tF2:25GRS\n"); + kr_dbg(KR_MODE, "\tF1: %d\t\tF0: %d\t\tF3: %d\t\tF2: %d\n", + ((rdata >> 15) & 0x01), ((rdata >> 14) & 0x01), + ((rdata >> 13) & 0x01), ((rdata >> 12) & 0x01)); + tlpbkp_an73_ability->fec_ability = rdata; + adapter->fec_mode = 0; + if (rdata & TXGBE_25G_RS_FEC_REQ) + adapter->fec_mode |= TXGBE_25G_RS_FEC_REQ; + if (rdata & TXGBE_25G_BASE_FEC_REQ) + adapter->fec_mode |= TXGBE_25G_BASE_FEC_REQ; + if (rdata & TXGBE_10G_FEC_ABL) + adapter->fec_mode |= TXGBE_10G_FEC_ABL; + if (rdata & TXGBE_10G_FEC_REQ) + adapter->fec_mode |= TXGBE_10G_FEC_REQ; + kr_dbg(KR_MODE, "\tread 70015 data %0x\n", rdata); + + kr_dbg(KR_MODE, "\tread 70016 data %0x\n", + txgbe_rd32_epcs(hw, 0x70016)); + kr_dbg(KR_MODE, "\tread 70017 data %0x\n", + txgbe_rd32_epcs(hw, 0x70017)); + kr_dbg(KR_MODE, "\tread 70018 data %0x\n", + txgbe_rd32_epcs(hw, 0x70018)); + kr_dbg(KR_MODE, "\tread 70019 data %0x\n", + txgbe_rd32_epcs(hw, 0x70019)); + kr_dbg(KR_MODE, "\tread 7001a data %0x\n", + txgbe_rd32_epcs(hw, 0x7001a)); + kr_dbg(KR_MODE, "\tread 7001b data %0x\n", + txgbe_rd32_epcs(hw, 0x7001b)); +} + +static int chk_bkp_ability(struct txgbe_adapter *adapter, + struct bkpan73ability tbkp_an73_ability, + struct bkpan73ability tlpbkp_an73_ability) +{ + unsigned int com_link_ability; + + kr_dbg(KR_MODE, "CheckBkpAn73Ability():\n"); + /* Check the common link ability and take action based on the result*/ + com_link_ability = tbkp_an73_ability.link_ability & + tlpbkp_an73_ability.link_ability; + kr_dbg(KR_MODE, "comAbility= 0x%x, Ability= 0x%x, lpAbility= 0x%x\n", + com_link_ability, tbkp_an73_ability.link_ability, + tlpbkp_an73_ability.link_ability); + + if (com_link_ability == 0) { + adapter->bp_link_mode = 0; + kr_dbg(KR_MODE, "Do not support any compatible speed mode!\n"); + return -EINVAL; + } else if (com_link_ability & BIT(ABILITY_40GBASE_CR4)) { + kr_dbg(KR_MODE, "Link mode is [ABILITY_40GBASE_CR4].\n"); + adapter->bp_link_mode = 40; + } else if (com_link_ability & BIT(ABILITY_25GBASE_KRCR_S)) { + kr_dbg(KR_MODE, "Link mode is [ABILITY_25GBASE_KRCR_S].\n"); + adapter->fec_mode = TXGBE_25G_RS_FEC_REQ; + adapter->bp_link_mode = 25; + } else if (com_link_ability & BIT(ABILITY_25GBASE_KRCR)) { + kr_dbg(KR_MODE, "Link mode is [ABILITY_25GBASE_KRCR].\n"); + adapter->bp_link_mode = 25; + } else if (com_link_ability & BIT(ABILITY_10GBASE_KR)) { + kr_dbg(KR_MODE, "Link mode is [ABILITY_10GBASE_KR].\n"); + adapter->bp_link_mode = 10; + } + + return 0; +} + +static int txgbe_e56_exchange_page(struct txgbe_adapter *adapter) +{ + struct bkpan73ability tbkp_an73_ability = { 0 }, tlpbkp_an73_ability = { 0 }; + struct txgbe_hw *hw = &adapter->hw; + u32 an_int, base_page = 0; + int count = 0; + + an_int = txgbe_rd32_epcs(hw, 0x78002); + if (!(an_int & TXGBE_E56_AN_PG_RCV)) + return -EINVAL; + + /* 500ms timeout */ + for (count = 0; count < 5000; count++) { + u32 fsm = txgbe_rd32_epcs(hw, 0x78010); + + kr_dbg(KR_MODE, "-----count----- %d - fsm: %x\n", + count, fsm); + if (an_int & TXGBE_E56_AN_PG_RCV) { + u8 next_page = 0; + u32 rdata, addr; + + txgbe_e56_print_page_status(adapter, &tbkp_an73_ability, + &tlpbkp_an73_ability); + addr = base_page == 0 ? 0x70013 : 0x70019; + rdata = txgbe_rd32_epcs(hw, addr); + if (rdata & BIT(14)) { + if (rdata & BIT(15)) { + /* always set null message */ + txgbe_wr32_epcs(hw, 0x70016, 0x2001); + kr_dbg(KR_MODE, "write 70016 0x%0x\n", + 0x2001); + rdata = txgbe_rd32_epcs(hw, 0x70010); + txgbe_wr32_epcs(hw, 0x70010, + rdata | BIT(15)); + kr_dbg(KR_MODE, "write 70010 0x%0x\n", + rdata); + next_page = 1; + } else { + next_page = 0; + } + base_page = 1; + } + /* clear an pacv int */ + rdata = txgbe_rd32_epcs(hw, 0x78002); + kr_dbg(KR_MODE, "read 78002 data %0x and clear pacv\n", rdata); + txgbe_field_set(&rdata, 2, 2, 0x0); + txgbe_wr32_epcs(hw, 0x78002, rdata); + if (next_page == 0) { + if ((fsm & 0x8) == 0x8) { + adapter->fsm = 0x8; + goto check_ability; + } + } + } + usec_delay(100); + } + +check_ability: + return chk_bkp_ability(adapter, tbkp_an73_ability, tlpbkp_an73_ability); +} + +static int txgbe_e56_cl72_trainning(struct txgbe_adapter *adapter) +{ + u32 bylinkmode = adapter->bp_link_mode; + struct txgbe_hw *hw = &adapter->hw; + u8 bypass_ctle = hw->bypass_ctle; + int status = 0, temp_data = 0; + u32 lane_num = 0, lane_idx = 0; + u32 pmd_ctrl = 0; + u32 txffe = 0; + int ret = 0; + u32 rdata; + + u8 pll_en_cfg = 0; + u8 pmd_mode = 0; + + switch (bylinkmode) { + case 10: + bylinkmode = 10; + lane_num = 1; + pll_en_cfg = 3; + pmd_mode = 0; + break; + case 40: + bylinkmode = 40; + lane_num = 4; + pll_en_cfg = 0; /* pll_en_cfg : single link to 0 */ + pmd_mode = 1; /* pmd mode : 1 - single link */ + break; + case 25: + bylinkmode = 25; + lane_num = 1; + pll_en_cfg = 3; + pmd_mode = 0; + break; + default: + kr_dbg(KR_MODE, "%s %d :Invalid speed\n", __func__, __LINE__); + break; + } + + kr_dbg(KR_MODE, "2.3 Wait %dG KR phy mode init ....\n", bylinkmode); + status = setphylinkmode(adapter, bylinkmode, bypass_ctle); + + /* 13. set phy an status to 1 - AN_CFG[0]: 4-7 lane0-lane3 */ + rdata = rd32_ephy(hw, 0x1434); + txgbe_field_set(&rdata, 7, 4, GENMASK(lane_num - 1, 0)); + txgbe_wr32_ephy(hw, 0x1434, rdata); + + /* 14 and 15. kr training: set BASER_PMD_CONTROL[0, 7] for lane0-4 */ + rdata = rd32_ephy(hw, 0x1640); + txgbe_field_set(&rdata, 7, 0, GENMASK(2 * lane_num - 1, 0)); + txgbe_wr32_ephy(hw, 0x1640, rdata); + + /* 16. enable CMS and its internal PLL */ + rdata = rd32_ephy(hw, 0x1400); + txgbe_field_set(&rdata, 21, 20, pll_en_cfg); + txgbe_field_set(&rdata, 19, 12, 0); /* tx/rx off */ + txgbe_field_set(&rdata, 8, 8, pmd_mode); + txgbe_field_set(&rdata, 1, 1, 0x1); /* pmd en */ + txgbe_wr32_ephy(hw, 0x1400, rdata); + + /* 17. tx enable PMD_CFG[0] */ + rdata = rd32_ephy(hw, 0x1400); + txgbe_field_set(&rdata, 15, 12, GENMASK(lane_num - 1, 0)); /* tx en */ + txgbe_wr32_ephy(hw, 0x1400, rdata); + + /* 18 */ + /* 19. rxs calibration and adaotation sequeence */ + kr_dbg(KR_MODE, "2.4 Wait %dG RXS.... fsm: %x\n", bylinkmode, + txgbe_rd32_epcs(hw, 0x78010)); + status = txgbe_e56_rxs_calib_adapt_seq(adapter, bylinkmode, bypass_ctle); + ret |= status; + /* 20 */ + kr_dbg(KR_MODE, "2.5 Wait %dG phy calibration.... fsm: %x\n", + bylinkmode, txgbe_rd32_epcs(hw, 0x78010)); + txgbe_e56_set_rxs_ufine_lemax(adapter, bylinkmode); + status = txgbe_e56_get_temp(hw, &temp_data); + if (bylinkmode == 40) + status = txgbe_temp_track_seq_40g(hw, + TXGBE_LINK_SPEED_40GB_FULL); + else + status = txgbe_e56_rxs_post_cdr_lock_temp_track_seq(adapter, bylinkmode); + /* 21 */ + kr_dbg(KR_MODE, "2.6 Wait %dG phy kr training check.... fsm: %x\n", + bylinkmode, txgbe_rd32_epcs(hw, 0x78010)); + status = read_poll_timeout(rd32_ephy, rdata, + ((rdata & 0xe) & GENMASK(lane_num, 1)) == + (0xe & GENMASK(lane_num, 1)), + 100, 200000, false, hw, 0x163c); + pmd_ctrl = rd32_ephy(hw, 0x1644); + kr_dbg(KR_MODE, + "KR TRAINNING CHECK = %x, %s. pmd_ctrl:%lx-%lx-%lx-%lx\n", rdata, + status ? "FAILED" : "SUCCESS", + FIELD_GET_M(GENMASK(3, 0), pmd_ctrl), + FIELD_GET_M(GENMASK(7, 4), pmd_ctrl), + FIELD_GET_M(GENMASK(11, 8), pmd_ctrl), + FIELD_GET_M(GENMASK(15, 12), pmd_ctrl)); + ret |= status; + kr_dbg(KR_MODE, "before: %x-%x-%x-%x\n", rd32_ephy(hw, 0x141c), + rd32_ephy(hw, 0x1420), rd32_ephy(hw, 0x1424), + rd32_ephy(hw, 0x1428)); + + for (lane_idx = 0; lane_idx < lane_num; lane_idx++) { + txffe = rd32_ephy(hw, 0x828 + lane_idx * 0x100); + kr_dbg(KR_MODE, "after[%x]: %lx-%lx-%lx-%lx\n", lane_idx, + FIELD_GET_M(GENMASK(6, 0), txffe), + FIELD_GET_M(GENMASK(21, 16), txffe), + FIELD_GET_M(GENMASK(29, 24), txffe), + FIELD_GET_M(GENMASK(13, 8), txffe)); + } + + /* 22 */ + kr_dbg(KR_MODE, "2.7 Wait %dG phy Rx adc.... fsm:%x\n", bylinkmode, + txgbe_rd32_epcs(hw, 0x78010)); + status = txgbe_e56_rxs_adc_adapt_seq(adapter, bypass_ctle); + + return ret; +} + +static int handle_e56_bkp_an73_flow(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int status = 0; + u32 rdata; + + kr_dbg(KR_MODE, "2.1 Wait page changed ....\n"); + status = txgbe_e56_exchange_page(adapter); + if (status) { + kr_dbg(KR_MODE, "Exchange page failed\n"); + return status; + } + + kr_dbg(KR_MODE, "2.2 Wait page changed ..done..\n"); + txgbe_wr32_epcs(hw, 0x100ab, 0); + + rdata = txgbe_rd32_epcs(hw, 0x78002); + kr_dbg(KR_MODE, "read 78002 data %0x and clear page int\n", rdata); + txgbe_field_set(&rdata, 2, 2, 0x0); + txgbe_wr32_epcs(hw, 0x78002, rdata); + + /* 10 RXS_DISABLE - TXS_DISABLE - CMS_DISABLE */ + /* dis phy tx/rx lane */ + rdata = rd32_ephy(hw, 0x1400); + txgbe_field_set(&rdata, 19, 16, 0x0); + txgbe_field_set(&rdata, 15, 12, 0x0); + txgbe_field_set(&rdata, 1, 1, 0x0); + txgbe_wr32_ephy(hw, 0x1400, rdata); + kr_dbg(KR_MODE, "Ephy Write A: 0x%x, D: 0x%x\n", 0x1400, rdata); + /* wait rx/tx/cm powerdn_st according pmd 50 2.0.5 */ + status = read_poll_timeout(rd32_ephy, rdata, + (rdata & GENMASK(3, 0)) == 0x9, 100, 200000, + false, hw, 0x14d4); + kr_dbg(KR_MODE, "wait ctrl_fsm_cm_st = %x, %s.\n", rdata, + status ? "FAILED" : "SUCCESS"); + + if (adapter->fec_mode & TXGBE_25G_RS_FEC_REQ) { + txgbe_wr32_epcs(hw, 0x180a3, 0x68c1); + txgbe_wr32_epcs(hw, 0x180a4, 0x3321); + txgbe_wr32_epcs(hw, 0x180a5, 0x973e); + txgbe_wr32_epcs(hw, 0x180a6, 0xccde); + + txgbe_wr32_epcs(hw, 0x38018, 1024); + rdata = txgbe_rd32_epcs(hw, 0x100c8); + txgbe_field_set(&rdata, 2, 2, 1); + txgbe_wr32_epcs(hw, 0x100c8, rdata); + kr_dbg(KR_MODE, "Advertised FEC modes : %s\n", "RS-FEC"); + adapter->cur_fec_link = TXGBE_PHY_FEC_RS; + } else if (adapter->fec_mode & TXGBE_25G_BASE_FEC_REQ) { + /* FEC: FC-FEC/BASE-R */ + txgbe_wr32_epcs(hw, 0x100ab, BIT(0)); + kr_dbg(KR_MODE, "Epcs Write A: 0x%x, D: 0x%x\n", 0x100ab, 1); + kr_dbg(KR_MODE, "Advertised FEC modes : %s\n", "25GBASE-R"); + adapter->cur_fec_link = TXGBE_PHY_FEC_BASER; + } else if (adapter->fec_mode & (TXGBE_10G_FEC_REQ)) { + /* FEC: FC-FEC/BASE-R */ + txgbe_wr32_epcs(hw, 0x100ab, BIT(0)); + kr_dbg(KR_MODE, "Epcs Write A: 0x%x, D: 0x%x\n", 0x100ab, 1); + kr_dbg(KR_MODE, "Advertised FEC modes : %s\n", "BASE-R"); + adapter->cur_fec_link = TXGBE_PHY_FEC_BASER; + } else { + kr_dbg(KR_MODE, "Advertised FEC modes : %s\n", "NONE"); + adapter->cur_fec_link = TXGBE_PHY_FEC_OFF; + } + + status = txgbe_e56_cl72_trainning(adapter); + rdata = rd32_ephy(hw, E56PHY_RXS_IDLE_DETECT_1_ADDR); + txgbe_field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MAX, 0x28); + txgbe_field_set(&rdata, E56PHY_RXS_IDLE_DETECT_1_IDLE_TH_ADC_PEAK_MIN, 0xa); + txgbe_wr32_ephy(hw, E56PHY_RXS_IDLE_DETECT_1_ADDR, rdata); + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ADDR, E56PHY_INTR_0_IDLE_ENTRY1); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ADDR, E56PHY_INTR_1_IDLE_EXIT1); + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ENABLE_ADDR, + E56PHY_INTR_0_IDLE_ENTRY1); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ENABLE_ADDR, + E56PHY_INTR_1_IDLE_EXIT1); + + return status; +} + +void txgbe_e56_bp_watchdog_event(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 rlu = 0, an_int = 0, an_int1 = 0; + struct txgbe_hw *hw = &adapter->hw; + u32 value = 0, fsm = 0; + int ret = 0; + + if (!(hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1 || + txgbe_is_backplane(hw))) + return; + + /* only continue if link is down */ + if (netif_carrier_ok(netdev)) + return; + + if (!adapter->backplane_an) + return; + + value = txgbe_rd32_epcs(hw, 0x78002); + an_int = value; + if (value & TXGBE_E56_AN_INT_CMPLT) { + adapter->an_done = true; + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + txgbe_field_set(&value, 0, 0, 0); + txgbe_wr32_epcs(hw, 0x78002, value); + } + + if (value & TXGBE_E56_AN_INC_LINK) { + txgbe_field_set(&value, 1, 1, 0); + txgbe_wr32_epcs(hw, 0x78002, value); + } + + if (value & TXGBE_E56_AN_TXDIS) { + txgbe_field_set(&value, 3, 3, 0); + txgbe_wr32_epcs(hw, 0x78002, value); + mutex_lock(&adapter->e56_lock); + txgbe_e56_set_phylinkmode(adapter, 10, hw->bypass_ctle); + mutex_unlock(&adapter->e56_lock); + goto an_status; + } + + if (value & TXGBE_E56_AN_PG_RCV) { + kr_dbg(KR_MODE, "Enter training\n"); + ret = handle_e56_bkp_an73_flow(adapter); + + fsm = txgbe_rd32_epcs(hw, 0x78010); + if (fsm & 0x8) + goto an_status; + if (ret) { + kr_dbg(KR_MODE, "Training FAILED, do reset\n"); + mutex_lock(&adapter->e56_lock); + txgbe_e56_set_phylinkmode(adapter, 10, hw->bypass_ctle); + mutex_unlock(&adapter->e56_lock); + } else { + kr_dbg(KR_MODE, "ALL SUCCEEDED\n"); + } + } + +an_status: + an_int1 = txgbe_rd32_epcs(hw, 0x78002); + if (an_int1 & TXGBE_E56_AN_INT_CMPLT) { + adapter->an_done = true; + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + } + rlu = txgbe_rd32_epcs(hw, 0x30001); + kr_dbg(KR_MODE, + "RLU:%x MLU:%x INT:%x-%x CTL:%x fsm:%x pmd_cfg0:%x an_done:%d by:%d\n", + txgbe_rd32_epcs(hw, 0x30001), rd32(hw, 0x14404), an_int, an_int1, + txgbe_rd32_epcs(hw, 0x70000), txgbe_rd32_epcs(hw, 0x78010), + rd32_ephy(hw, 0x1400), adapter->an_done, hw->bypass_ctle); +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_e56_bp.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56_bp.h new file mode 100644 index 000000000000..55c10a6f9d96 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56_bp.h @@ -0,0 +1,283 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_E56_BP_H_ +#define _TXGBE_E56_BP_H_ + +#define TXGBE_E56_AN_TXDIS BIT(3) +#define TXGBE_E56_AN_PG_RCV BIT(2) +#define TXGBE_E56_AN_INC_LINK BIT(1) +#define TXGBE_E56_AN_INT_CMPLT BIT(0) + +#define TXGBE_10G_FEC_REQ BIT(15) +#define TXGBE_10G_FEC_ABL BIT(14) +#define TXGBE_25G_BASE_FEC_REQ BIT(13) +#define TXGBE_25G_RS_FEC_REQ BIT(12) + +union txgbe_e56_pmd_tx_ffe_cfg1 { + struct { + u32 tx0_cursor_factor : 7; + u32 rsvd0 : 1; + u32 tx1_cursor_factor : 7; + u32 rsvd1 : 1; + u32 tx2_cursor_factor : 7; + u32 rsvd2 : 1; + u32 tx3_cursor_factor : 7; + u32 rsvd3 : 1; + }; + u32 reg; +}; + +#define E56G__PMD_TX_FFE_CFG_1_NUM 1 +#define E56G__PMD_TX_FFE_CFG_1_ADDR (E56G__BASEADDR + 0x141c) +#define E56G__PMD_TX_FFE_CFG_1_PTR ((union txgbe_e56_pmd_tx_ffe_cfg1 *)\ + (E56G__PMD_TX_FFE_CFG_1_ADDR)) +#define E56G__PMD_TX_FFE_CFG_1_STRIDE 4 +#define E56G__PMD_TX_FFE_CFG_1_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_1_ACC_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_1_READ_MSB 30 +#define E56G__PMD_TX_FFE_CFG_1_READ_LSB 0 +#define E56G__PMD_TX_FFE_CFG_1_WRITE_MSB 30 +#define E56G__PMD_TX_FFE_CFG_1_WRITE_LSB 0 +#define E56G__PMD_TX_FFE_CFG_1_RESET_VALUE 0x3f3f3f3f + +union txgbe_e56_pmd_tx_ffe_cfg2 { + struct { + u32 tx0_precursor1_factor : 6; + u32 rsvd0 : 2; + u32 tx1_precursor1_factor : 6; + u32 rsvd1 : 2; + u32 tx2_precursor1_factor : 6; + u32 rsvd2 : 2; + u32 tx3_precursor1_factor : 6; + u32 rsvd3 : 2; + }; + u32 reg; +}; + +#define E56G__PMD_TX_FFE_CFG_2_NUM 1 +#define E56G__PMD_TX_FFE_CFG_2_ADDR (E56G__BASEADDR + 0x1420) +#define E56G__PMD_TX_FFE_CFG_2_PTR ((union txgbe_e56_pmd_tx_ffe_cfg2 *)\ + (E56G__PMD_TX_FFE_CFG_2_ADDR)) +#define E56G__PMD_TX_FFE_CFG_2_STRIDE 4 +#define E56G__PMD_TX_FFE_CFG_2_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_2_ACC_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_2_READ_MSB 29 +#define E56G__PMD_TX_FFE_CFG_2_READ_LSB 0 +#define E56G__PMD_TX_FFE_CFG_2_WRITE_MSB 29 +#define E56G__PMD_TX_FFE_CFG_2_WRITE_LSB 0 +#define E56G__PMD_TX_FFE_CFG_2_RESET_VALUE 0x0 + +union txgbe_e56_pmd_tx_ffe_cfg3 { + struct { + u32 tx0_precursor2_factor : 6; + u32 rsvd0 : 2; + u32 tx1_precursor2_factor : 6; + u32 rsvd1 : 2; + u32 tx2_precursor2_factor : 6; + u32 rsvd2 : 2; + u32 tx3_precursor2_factor : 6; + u32 rsvd3 : 2; + }; + u32 reg; +}; + +#define E56G__PMD_TX_FFE_CFG_3_NUM 1 +#define E56G__PMD_TX_FFE_CFG_3_ADDR (E56G__BASEADDR + 0x1424) +#define E56G__PMD_TX_FFE_CFG_3_PTR ((union txgbe_e56_pmd_tx_ffe_cfg3 *)\ + (E56G__PMD_TX_FFE_CFG_3_ADDR)) +#define E56G__PMD_TX_FFE_CFG_3_STRIDE 4 +#define E56G__PMD_TX_FFE_CFG_3_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_3_ACC_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_3_READ_MSB 29 +#define E56G__PMD_TX_FFE_CFG_3_READ_LSB 0 +#define E56G__PMD_TX_FFE_CFG_3_WRITE_MSB 29 +#define E56G__PMD_TX_FFE_CFG_3_WRITE_LSB 0 +#define E56G__PMD_TX_FFE_CFG_3_RESET_VALUE 0x0 + +union txgbe_e56_pmd_tx_ffe_cfg4 { + struct { + u32 tx0_postcursor_factor : 6; + u32 rsvd0 : 2; + u32 tx1_postcursor_factor : 6; + u32 rsvd1 : 2; + u32 tx2_postcursor_factor : 6; + u32 rsvd2 : 2; + u32 tx3_postcursor_factor : 6; + u32 rsvd3 : 2; + }; + u32 reg; +}; + +#define E56G__PMD_TX_FFE_CFG_4_NUM 1 +#define E56G__PMD_TX_FFE_CFG_4_ADDR (E56G__BASEADDR + 0x1428) +#define E56G__PMD_TX_FFE_CFG_4_PTR ((union txgbe_e56_pmd_tx_ffe_cfg4 *)\ + (E56G__PMD_TX_FFE_CFG_4_ADDR)) +#define E56G__PMD_TX_FFE_CFG_4_STRIDE 4 +#define E56G__PMD_TX_FFE_CFG_4_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_4_ACC_SIZE 32 +#define E56G__PMD_TX_FFE_CFG_4_READ_MSB 29 +#define E56G__PMD_TX_FFE_CFG_4_READ_LSB 0 +#define E56G__PMD_TX_FFE_CFG_4_WRITE_MSB 29 +#define E56G__PMD_TX_FFE_CFG_4_WRITE_LSB 0 +#define E56G__PMD_TX_FFE_CFG_4_RESET_VALUE 0x0 + +union txgbe_e56g_cms_ana_ovrdval7 { + struct { + u32 ana_lcpll_lf_vco_swing_ctrl_i : 4; + u32 ana_lcpll_lf_lpf_setcode_calib_i : 5; + u32 rsvd0 : 3; + u32 ana_lcpll_lf_vco_coarse_bin_i : 5; + u32 rsvd1 : 3; + u32 ana_lcpll_lf_vco_fine_therm_i : 8; + u32 ana_lcpll_lf_clkout_fb_ctrl_i : 2; + u32 rsvd2 : 2; + }; + u32 reg; +}; + +#define E56G__CMS_ANA_OVRDVAL_7_NUM 1 +#define E56G__CMS_ANA_OVRDVAL_7_ADDR (E56G__BASEADDR + 0xccc) +#define E56G__CMS_ANA_OVRDVAL_7_PTR ((union txgbe_e56g_cms_ana_ovrdval7 *)\ + (E56G__CMS_ANA_OVRDVAL_7_ADDR)) +#define E56G__CMS_ANA_OVRDVAL_7_STRIDE 4 +#define E56G__CMS_ANA_OVRDVAL_7_SIZE 32 +#define E56G__CMS_ANA_OVRDVAL_7_ACC_SIZE 32 +#define E56G__CMS_ANA_OVRDVAL_7_READ_MSB 29 +#define E56G__CMS_ANA_OVRDVAL_7_READ_LSB 0 +#define E56G__CMS_ANA_OVRDVAL_7_WRITE_MSB 29 +#define E56G__CMS_ANA_OVRDVAL_7_WRITE_LSB 0 +#define E56G__CMS_ANA_OVRDVAL_7_RESET_VALUE 0x0 + +union txgbe_e56g_cms_ana_ovrden1 { + struct { + u32 ovrd_en_ana_lcpll_hf_vco_amp_status_o : 1; + u32 ovrd_en_ana_lcpll_hf_clkout_fb_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_hf_clkdiv_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_hf_en_odiv_i : 1; + u32 ovrd_en_ana_lcpll_hf_test_in_i : 1; + u32 ovrd_en_ana_lcpll_hf_test_out_o : 1; + u32 ovrd_en_ana_lcpll_lf_en_bias_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_loop_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_cp_i : 1; + u32 ovrd_en_ana_lcpll_lf_icp_base_i : 1; + u32 ovrd_en_ana_lcpll_lf_icp_fine_i : 1; + u32 ovrd_en_ana_lcpll_lf_lpf_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_lpf_setcode_calib_i : 1; + u32 ovrd_en_ana_lcpll_lf_set_lpf_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_vco_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_sel_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_swing_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_coarse_bin_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_fine_therm_i : 1; + u32 ovrd_en_ana_lcpll_lf_vco_amp_status_o : 1; + u32 ovrd_en_ana_lcpll_lf_clkout_fb_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_clkdiv_ctrl_i : 1; + u32 ovrd_en_ana_lcpll_lf_en_odiv_i : 1; + u32 ovrd_en_ana_lcpll_lf_test_in_i : 1; + u32 ovrd_en_ana_lcpll_lf_test_out_o : 1; + u32 ovrd_en_ana_lcpll_hf_refclk_select_i : 1; + u32 ovrd_en_ana_lcpll_lf_refclk_select_i : 1; + u32 ovrd_en_ana_lcpll_hf_clk_ref_sel_i : 1; + u32 ovrd_en_ana_lcpll_lf_clk_ref_sel_i : 1; + u32 ovrd_en_ana_test_bias_i : 1; + u32 ovrd_en_ana_test_slicer_i : 1; + u32 ovrd_en_ana_test_sampler_i : 1; + }; + u32 reg; +}; + +#define E56G__CMS_ANA_OVRDEN_1_NUM 1 +#define E56G__CMS_ANA_OVRDEN_1_ADDR (E56G__BASEADDR + 0xca8) +#define E56G__CMS_ANA_OVRDEN_1_PTR ((union txgbe_e56g_cms_ana_ovrden1 *)\ + (E56G__CMS_ANA_OVRDEN_1_ADDR)) +#define E56G__CMS_ANA_OVRDEN_1_STRIDE 4 +#define E56G__CMS_ANA_OVRDEN_1_SIZE 32 +#define E56G__CMS_ANA_OVRDEN_1_ACC_SIZE 32 +#define E56G__CMS_ANA_OVRDEN_1_READ_MSB 31 +#define E56G__CMS_ANA_OVRDEN_1_READ_LSB 0 +#define E56G__CMS_ANA_OVRDEN_1_WRITE_MSB 31 +#define E56G__CMS_ANA_OVRDEN_1_WRITE_LSB 0 +#define E56G__CMS_ANA_OVRDEN_1_RESET_VALUE 0x0 + +union txgbe_e56g_cms_ana_ovrdval9 { + struct { + u32 ana_lcpll_lf_test_in_i : 32; + }; + u32 reg; +}; + +#define E56G__CMS_ANA_OVRDVAL_9_NUM 1 +#define E56G__CMS_ANA_OVRDVAL_9_ADDR (E56G__BASEADDR + 0xcd4) +#define E56G__CMS_ANA_OVRDVAL_9_PTR ((union txgbe_e56g_cms_ana_ovrdval9 *)\ + (E56G__CMS_ANA_OVRDVAL_9_ADDR)) +#define E56G__CMS_ANA_OVRDVAL_9_STRIDE 4 +#define E56G__CMS_ANA_OVRDVAL_9_SIZE 32 +#define E56G__CMS_ANA_OVRDVAL_9_ACC_SIZE 32 +#define E56G__CMS_ANA_OVRDVAL_9_READ_MSB 31 +#define E56G__CMS_ANA_OVRDVAL_9_READ_LSB 0 +#define E56G__CMS_ANA_OVRDVAL_9_WRITE_MSB 31 +#define E56G__CMS_ANA_OVRDVAL_9_WRITE_LSB 0 +#define E56G__CMS_ANA_OVRDVAL_9_RESET_VALUE 0x0 + +#define SFP2_RS0 5 +#define SFP2_RS1 4 +#define SFP2_TX_DISABLE 1 +#define SFP2_TX_FAULT 0 +#define SFP2_RX_LOS_BIT 3 +#ifdef PHYINIT_TIMEOUT +#undef PHYINIT_TIMEOUT +#define PHYINIT_TIMEOUT 2000 +#endif + +//#define E56PHY_CMS_ANA_OVRDEN_0_ADDR (E56PHY_CMS_BASE_ADDR+0xA4) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_REFCLK_BUF_DAISY_EN_I FORMAT_NOPARENTHERSES(0, 0) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_REFCLK_BUF_PAD_EN_I FORMAT_NOPARENTHERSES(1, 1) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_REFCLK_BUF_PAD_EN_I_LSB 1 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_VDDINOFF_DCORE_DIG_O FORMAT_NOPARENTHERSES(2, 2) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_BG_EN_I FORMAT_NOPARENTHERSES(11, 11) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_BG_EN_I_LSB 11 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_BG_TESTIN_I FORMAT_NOPARENTHERSES(12, 12) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_BG_TESTIN_I_LSB 12 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RESCAL_I FORMAT_NOPARENTHERSES(13, 13) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_RESCAL_I_LSB 13 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_RESCAL_COMP_O FORMAT_NOPARENTHERSES(14, 14) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_RESCAL_COMP_O_LSB 14 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_RESCAL_CODE_I FORMAT_NOPARENTHERSES(15, 15) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_RESCAL_CODE_I_LSB 15 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_LDO_CORE_I FORMAT_NOPARENTHERSES(16, 16) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_EN_LDO_CORE_I_LSB 16 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_LDO_I FORMAT_NOPARENTHERSES(17, 17) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_TEST_LDO_I_LSB 17 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_ANA_DEBUG_SEL_I FORMAT_NOPARENTHERSES(18, 18) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_ANA_DEBUG_SEL_I_LSB 18 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_EN_BIAS_I FORMAT_NOPARENTHERSES(19, 19) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_EN_BIAS_I_LSB 19 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_EN_LOOP_I FORMAT_NOPARENTHERSES(20, 20) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_EN_LOOP_I_LSB 20 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_EN_CP_I FORMAT_NOPARENTHERSES(21, 21) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_EN_CP_I_LSB 21 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_ICP_BASE_I FORMAT_NOPARENTHERSES(22, 22) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_ICP_BASE_I_LSB 22 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_ICP_FINE_I FORMAT_NOPARENTHERSES(23, 23) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_ICP_FINE_I_LSB 23 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_LPF_CTRL_I FORMAT_NOPARENTHERSES(24, 24) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_LPF_CTRL_I_LSB 24 +#define E56PHY_CMS_ANA_OVRD_0_EN_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I FORMAT_NOPARENTHERSES(25, 25) +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I_LSB 25 +#define E56PHY_CMS_ANA_OVRDEN_0_OVRD_EN_ANA_LCPLL_HF_SET_LPF_I FORMAT_NOPARENTHERSES(26, 26) + +#define E56PHY_CMS_ANA_OVRDVAL_2_ANA_LCPLL_HF_LPF_SETCODE_CALIB_I FORMAT_NOPARENTHERSES(20, 16) +#define E56PHY_CMS_ANA_OVRDEN_1_EN_ANA_LCPLL_LF_LPF_SETCODE_CALIB_I FORMAT_NOPARENTHERSES(12, 12) +#define E56PHY_CMS_ANA_OVRDVAL_7_ADDR (E56PHY_CMS_BASE_ADDR + 0xCC) +#define E56PHY_CMS_ANA_OVRDVAL_5_ADDR (E56PHY_CMS_BASE_ADDR + 0xC4) +#define E56PHY_CMS_ANA_OVRDEN_1_OVRD_EN_ANA_LCPLL_LF_TEST_IN_I FORMAT_NOPARENTHERSES(23, 23) +#define E56PHY_CMS_ANA_OVRDVAL_9_ADDR (E56PHY_CMS_BASE_ADDR + 0xD4) +#define E56PHY_CMS_ANA_OVRDVAL_10_ADDR (E56PHY_CMS_BASE_ADDR + 0xD8) +#define E56PHY_CMS_ANA_OVRDVAL_7_ANA_LCPLL_LF_LPF_SETCODE_CALIB_I FORMAT_NOPARENTHERSES(8, 4) + +int txgbe_e56_set_phylinkmode(struct txgbe_adapter *adapter, + unsigned char link_mode, unsigned int bypass_ctle); +void txgbe_e56_bp_watchdog_event(struct txgbe_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c new file mode 100644 index 000000000000..9a87fa1b6af8 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -0,0 +1,4744 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "txgbe.h" +#include "txgbe_hw.h" +#include "txgbe_phy.h" +#include "txgbe_e56.h" +#include + +#define TXGBE_ALL_RAR_ENTRIES 16 + +#include "txgbe_xsk.h" + +#define ETHTOOL_LINK_MODE_SPEED_MASK 0xfffe903f + +struct txgbe_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define TXGBE_NETDEV_STAT(_net_stat) { \ + .stat_string = #_net_stat, \ + .sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ +} + +static const struct txgbe_stats txgbe_gstrings_net_stats[] = { + TXGBE_NETDEV_STAT(rx_packets), + TXGBE_NETDEV_STAT(tx_packets), + TXGBE_NETDEV_STAT(rx_bytes), + TXGBE_NETDEV_STAT(tx_bytes), + TXGBE_NETDEV_STAT(rx_errors), + TXGBE_NETDEV_STAT(tx_errors), + TXGBE_NETDEV_STAT(rx_dropped), + TXGBE_NETDEV_STAT(tx_dropped), + TXGBE_NETDEV_STAT(collisions), + TXGBE_NETDEV_STAT(rx_over_errors), + TXGBE_NETDEV_STAT(rx_crc_errors), + TXGBE_NETDEV_STAT(rx_frame_errors), + TXGBE_NETDEV_STAT(rx_fifo_errors), + TXGBE_NETDEV_STAT(rx_missed_errors), + TXGBE_NETDEV_STAT(tx_aborted_errors), + TXGBE_NETDEV_STAT(tx_carrier_errors), + TXGBE_NETDEV_STAT(tx_fifo_errors), + TXGBE_NETDEV_STAT(tx_heartbeat_errors), +}; + +#define TXGBE_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct txgbe_adapter, _stat), \ + .stat_offset = offsetof(struct txgbe_adapter, _stat) \ +} + +static struct txgbe_stats txgbe_gstrings_stats[] = { + TXGBE_STAT("rx_pkts_nic", stats.gprc), + TXGBE_STAT("tx_pkts_nic", stats.gptc), + TXGBE_STAT("rx_bytes_nic", stats.gorc), + TXGBE_STAT("tx_bytes_nic", stats.gotc), + TXGBE_STAT("lsc_int", lsc_int), + TXGBE_STAT("tx_busy", tx_busy), + TXGBE_STAT("non_eop_descs", non_eop_descs), + TXGBE_STAT("rx_broadcast", stats.bprc), + TXGBE_STAT("tx_broadcast", stats.bptc), + TXGBE_STAT("rx_multicast", stats.mprc), + TXGBE_STAT("tx_multicast", stats.mptc), + TXGBE_STAT("rx_mac_good", stats.tpr), + TXGBE_STAT("rdb_pkts", stats.rdpc), + TXGBE_STAT("rdb_drop", stats.rddc), + TXGBE_STAT("tdm_pkts", stats.tdmpc), + TXGBE_STAT("tdm_drop", stats.tdmdc), + TXGBE_STAT("tdb_pkts", stats.tdbpc), + TXGBE_STAT("rx_parser_pkts", stats.psrpc), + TXGBE_STAT("rx_parser_drop", stats.psrdc), + TXGBE_STAT("lsec_untag_pkts", stats.untag), + TXGBE_STAT("rx_no_buffer_count", stats.rnbc[0]), + TXGBE_STAT("tx_timeout_count", tx_timeout_count), + TXGBE_STAT("tx_restart_queue", restart_queue), + TXGBE_STAT("rx_long_length_count", stats.roc), + TXGBE_STAT("rx_short_length_count", stats.ruc), + TXGBE_STAT("tx_flow_control_xon", stats.lxontxc), + TXGBE_STAT("rx_flow_control_xon", stats.lxonrxc), + TXGBE_STAT("tx_flow_control_xoff", stats.lxofftxc), + TXGBE_STAT("rx_flow_control_xoff", stats.lxoffrxc), + TXGBE_STAT("rx_csum_offload_good_count", hw_csum_rx_good), + TXGBE_STAT("rx_csum_offload_errors", hw_csum_rx_error), + TXGBE_STAT("alloc_rx_page_failed", alloc_rx_page_failed), + TXGBE_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), + TXGBE_STAT("rx_no_dma_resources", hw_rx_no_dma_resources), + TXGBE_STAT("hw_rsc_aggregated", rsc_total_count), + TXGBE_STAT("hw_rsc_flushed", rsc_total_flush), + TXGBE_STAT("fdir_match", stats.fdirmatch), + TXGBE_STAT("fdir_miss", stats.fdirmiss), + TXGBE_STAT("fdir_overflow", fdir_overflow), +#if IS_ENABLED(CONFIG_FCOE) + TXGBE_STAT("fcoe_bad_fccrc", stats.fccrc), + TXGBE_STAT("fcoe_last_errors", stats.fclast), + TXGBE_STAT("rx_fcoe_dropped", stats.fcoerpdc), + TXGBE_STAT("rx_fcoe_packets", stats.fcoeprc), + TXGBE_STAT("rx_fcoe_dwords", stats.fcoedwrc), + TXGBE_STAT("fcoe_noddp", stats.fcoe_noddp), + TXGBE_STAT("fcoe_noddp_ext_buff", stats.fcoe_noddp_ext_buff), + TXGBE_STAT("tx_fcoe_packets", stats.fcoeptc), + TXGBE_STAT("tx_fcoe_dwords", stats.fcoedwtc), +#endif /* CONFIG_FCOE */ + TXGBE_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + TXGBE_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + TXGBE_STAT("os2bmc_tx_by_host", stats.o2bspc), + TXGBE_STAT("os2bmc_rx_by_host", stats.b2ogprc), + TXGBE_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + TXGBE_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +}; + +/* txgbe allocates num_tx_queues and num_rx_queues symmetrically so + * we set the num_rx_queues to evaluate to num_tx_queues. This is + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ + +#define TXGBE_NUM_RX_QUEUES netdev->num_tx_queues +#define TXGBE_NUM_TX_QUEUES netdev->num_tx_queues + +#define TXGBE_QUEUE_STATS_LEN ( \ + (TXGBE_NUM_TX_QUEUES + TXGBE_NUM_RX_QUEUES) * \ + (sizeof(struct txgbe_queue_stats) / sizeof(u64))) +#define TXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(txgbe_gstrings_stats) +#define TXGBE_NETDEV_STATS_LEN ARRAY_SIZE(txgbe_gstrings_net_stats) +#define TXGBE_PB_STATS_LEN ( \ + (sizeof(((struct txgbe_adapter *)0)->stats.pxonrxc) + \ + sizeof(((struct txgbe_adapter *)0)->stats.pxontxc) + \ + sizeof(((struct txgbe_adapter *)0)->stats.pxoffrxc) + \ + sizeof(((struct txgbe_adapter *)0)->stats.pxofftxc)) \ + / sizeof(u64)) +#define TXGBE_STATS_LEN (TXGBE_GLOBAL_STATS_LEN + \ + TXGBE_NETDEV_STATS_LEN + \ + TXGBE_PB_STATS_LEN + \ + TXGBE_QUEUE_STATS_LEN) + +static const char txgbe_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; + +#define TXGBE_TEST_LEN (sizeof(txgbe_gstrings_test) / ETH_GSTRING_LEN) + +struct txgbe_priv_flags { + char flag_string[ETH_GSTRING_LEN]; + u64 flag; + bool read_only; +}; + +#define TXGBE_PRIV_FLAG(_name, _flag, _read_only) { \ + .flag_string = _name, \ + .flag = _flag, \ + .read_only = _read_only, \ +} + +static const struct txgbe_priv_flags txgbe_gstrings_priv_flags[] = { + TXGBE_PRIV_FLAG("lldp", TXGBE_ETH_PRIV_FLAG_LLDP, 0), + TXGBE_PRIV_FLAG("legacy-rx", TXGBE_ETH_PRIV_FLAG_LEGACY_RX, 0), +}; + +#define TXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(txgbe_gstrings_priv_flags) + +/* currently supported speeds for 10G */ +#define ADVERTISED_MASK_10G (SUPPORTED_10000baseT_Full | \ + SUPPORTED_10000baseKX4_Full | SUPPORTED_10000baseKR_Full) + +#define txgbe_isbackplane(type) \ + ((type == txgbe_media_type_backplane) ? true : false) + +static int txgbe_set_advertising_1g_10gtypes(struct txgbe_hw *hw, + struct ethtool_link_ksettings *cmd, u32 advertised_speed) +{ + switch (hw->phy.sfp_type) { + case txgbe_sfp_type_da_cu: + case txgbe_sfp_type_da_act_lmt_core0: + case txgbe_sfp_type_da_act_lmt_core1: + case txgbe_sfp_type_da_cu_core0: + case txgbe_sfp_type_da_cu_core1: + case txgbe_sfp_type_srlr_core0: + case txgbe_sfp_type_srlr_core1: + if (advertised_speed & TXGBE_LINK_SPEED_10GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseLR_Full); + } + if (advertised_speed & TXGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseX_Full); + + break; + case txgbe_sfp_type_sr: + case txgbe_sfp_type_25g_sr_core0: + case txgbe_sfp_type_25g_sr_core1: + if (advertised_speed & TXGBE_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseSR_Full); + + if (advertised_speed & TXGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseX_Full); + + break; + case txgbe_sfp_type_lr: + case txgbe_sfp_type_25g_lr_core0: + case txgbe_sfp_type_25g_lr_core1: + if (advertised_speed & TXGBE_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseLR_Full); + + if (advertised_speed & TXGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseX_Full); + + break; + case txgbe_sfp_type_1g_cu_core0: + case txgbe_sfp_type_1g_cu_core1: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); + break; + case txgbe_sfp_type_1g_sx_core0: + case txgbe_sfp_type_1g_sx_core1: + case txgbe_sfp_type_1g_lx_core0: + case txgbe_sfp_type_1g_lx_core1: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseX_Full); + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseT_Full); + break; + } + + return 0; +} + +static int txgbe_set_supported_1g_10gtypes(struct txgbe_hw *hw, + struct ethtool_link_ksettings *cmd) +{ + switch (hw->phy.sfp_type) { + case txgbe_sfp_type_da_cu: + case txgbe_sfp_type_da_act_lmt_core0: + case txgbe_sfp_type_da_act_lmt_core1: + case txgbe_sfp_type_da_cu_core0: + case txgbe_sfp_type_da_cu_core1: + case txgbe_sfp_type_srlr_core0: + case txgbe_sfp_type_srlr_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseLR_Full); + break; + case txgbe_sfp_type_sr: + case txgbe_sfp_type_25g_sr_core0: + case txgbe_sfp_type_25g_sr_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseSR_Full); + break; + case txgbe_sfp_type_lr: + case txgbe_sfp_type_25g_lr_core0: + case txgbe_sfp_type_25g_lr_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseLR_Full); + break; + case txgbe_sfp_type_1g_cu_core0: + case txgbe_sfp_type_1g_cu_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Full); + break; + case txgbe_sfp_type_1g_sx_core0: + case txgbe_sfp_type_1g_sx_core1: + case txgbe_sfp_type_1g_lx_core0: + case txgbe_sfp_type_1g_lx_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseX_Full); + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseT_Full); + break; + } + + return 0; +} + +static int txgbe_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 supported_link; + u32 link_speed = 0; + bool autoneg = false; + bool link_up; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); + + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4) + autoneg = adapter->backplane_an ? 1 : 0; + else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) + autoneg = adapter->autoneg ? 1 : 0; + + /* set the supported link speeds */ + if (hw->phy.media_type == txgbe_media_type_copper) { + if (supported_link & TXGBE_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseT_Full); + if (supported_link & TXGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Full); + if (supported_link & TXGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Full); + + if (supported_link & TXGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Full); + } else if (hw->phy.media_type == txgbe_media_type_fiber_qsfp) { + if (supported_link & TXGBE_LINK_SPEED_40GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 40000baseSR4_Full); + } else if (hw->phy.media_type == txgbe_media_type_fiber) { + if (supported_link & TXGBE_LINK_SPEED_25GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 25000baseSR_Full); + + if ((supported_link & TXGBE_LINK_SPEED_10GB_FULL) || + (supported_link & TXGBE_LINK_SPEED_1GB_FULL)) + txgbe_set_supported_1g_10gtypes(hw, cmd); + if (hw->phy.multispeed_fiber && hw->mac.type == txgbe_mac_sp) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseX_Full); + } else { + switch (hw->phy.link_mode) { + case TXGBE_PHYSICAL_LAYER_10GBASE_KX4: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseKX4_Full); + break; + case TXGBE_PHYSICAL_LAYER_10GBASE_KR: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseKR_Full); + break; + case TXGBE_PHYSICAL_LAYER_1000BASE_KX: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseKX_Full); + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseKX4_Full); + break; + } + } + + /* set the advertised speeds */ + if (hw->phy.autoneg_advertised) { + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_40GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 40000baseSR4_Full); + } + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_25GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 25000baseSR_Full); + } + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10GB_FULL) { + if (hw->phy.media_type == txgbe_media_type_copper) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseT_Full); + } else if (hw->phy.media_type == txgbe_media_type_fiber) { + txgbe_set_advertising_1g_10gtypes(hw, cmd, + hw->phy.autoneg_advertised); + } else { + switch (hw->phy.link_mode) { + case TXGBE_PHYSICAL_LAYER_10GBASE_KX4: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseKX4_Full); + break; + case TXGBE_PHYSICAL_LAYER_10GBASE_KR: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseKR_Full); + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseKX4_Full); + break; + } + } + } + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_1GB_FULL) { + if (hw->phy.media_type == txgbe_media_type_copper) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); + else if (hw->phy.media_type == txgbe_media_type_fiber) + txgbe_set_advertising_1g_10gtypes(hw, cmd, + hw->phy.autoneg_advertised); + else + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseKX_Full); + } + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_100_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); + } + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + } + } else { + if (supported_link & TXGBE_LINK_SPEED_40GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 40000baseSR4_Full); + } + if (supported_link & TXGBE_LINK_SPEED_25GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 25000baseSR_Full); + } + if (supported_link & TXGBE_LINK_SPEED_10GB_FULL) { + if (hw->phy.media_type == txgbe_media_type_copper) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseT_Full); + } else if (hw->phy.media_type == txgbe_media_type_fiber) { + txgbe_set_advertising_1g_10gtypes(hw, cmd, + TXGBE_LINK_SPEED_10GB_FULL); + } else { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseKX4_Full); + } + } + if (supported_link & TXGBE_LINK_SPEED_1GB_FULL) { + if (hw->phy.media_type == txgbe_media_type_copper) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); + else if (hw->phy.media_type == txgbe_media_type_fiber) + txgbe_set_advertising_1g_10gtypes(hw, cmd, + TXGBE_LINK_SPEED_1GB_FULL); + else + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseKX_Full); + } + if (supported_link & TXGBE_LINK_SPEED_100_FULL) { + if (hw->phy.media_type == txgbe_media_type_copper) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); + } + if (supported_link & TXGBE_LINK_SPEED_10_FULL) { + if (hw->phy.media_type == txgbe_media_type_copper) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + } + } + + if (autoneg) { + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + cmd->base.autoneg = AUTONEG_ENABLE; + } else { + cmd->base.autoneg = AUTONEG_DISABLE; + } + + /* Determine the remaining settings based on the PHY type. */ + switch (adapter->hw.phy.type) { + case txgbe_phy_tn: + case txgbe_phy_aq: + case txgbe_phy_cu_unknown: + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + cmd->base.port = PORT_TP; + break; + case txgbe_phy_qt: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_FIBRE; + break; + case txgbe_phy_nl: + case txgbe_phy_sfp_passive_tyco: + case txgbe_phy_sfp_passive_unknown: + case txgbe_phy_sfp_active_unknown: + case txgbe_phy_sfp_ftl_active: + case txgbe_phy_sfp_ftl: + case txgbe_phy_sfp_avago: + case txgbe_phy_sfp_intel: + case txgbe_phy_sfp_unknown: + switch (adapter->hw.phy.sfp_type) { + /* SFP+ devices, further checking needed */ + case txgbe_sfp_type_da_cu: + case txgbe_sfp_type_da_cu_core0: + case txgbe_sfp_type_da_cu_core1: + case txgbe_qsfp_type_40g_cu_core0: + case txgbe_qsfp_type_40g_cu_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_DA; + break; + case txgbe_sfp_type_sr: + case txgbe_sfp_type_lr: + case txgbe_sfp_type_srlr_core0: + case txgbe_sfp_type_srlr_core1: + case txgbe_sfp_type_1g_sx_core0: + case txgbe_sfp_type_1g_sx_core1: + case txgbe_sfp_type_1g_lx_core0: + case txgbe_sfp_type_1g_lx_core1: + case txgbe_sfp_type_da_act_lmt_core0: + case txgbe_sfp_type_da_act_lmt_core1: + case txgbe_sfp_type_25g_sr_core0: + case txgbe_sfp_type_25g_sr_core1: + case txgbe_sfp_type_25g_lr_core0: + case txgbe_sfp_type_25g_lr_core1: + case txgbe_sfp_type_25g_aoc_core0: + case txgbe_sfp_type_25g_aoc_core1: + case txgbe_qsfp_type_40g_sr_core0: + case txgbe_qsfp_type_40g_sr_core1: + case txgbe_qsfp_type_40g_lr_core0: + case txgbe_qsfp_type_40g_lr_core1: + case txgbe_qsfp_type_40g_active_core0: + case txgbe_qsfp_type_40g_active_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_FIBRE; + break; + case txgbe_sfp_type_not_present: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_NONE; + break; + case txgbe_sfp_type_1g_cu_core0: + case txgbe_sfp_type_1g_cu_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + cmd->base.port = PORT_TP; + break; + case txgbe_sfp_type_unknown: + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_OTHER; + break; + } + break; + case txgbe_phy_xaui: + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + cmd->base.port = PORT_TP; + break; + case txgbe_phy_unknown: + case txgbe_phy_generic: + case txgbe_phy_sfp_unsupported: + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_OTHER; + break; + } + + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI && + (hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) { + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) { + cmd->base.speed = -1; + cmd->base.duplex = -1; + + return 0; + } + } + if (!in_interrupt()) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + } else { + /* this case is a special workaround for RHEL5 bonding + * that calls this routine from interrupt context + */ + link_speed = adapter->link_speed; + link_up = adapter->link_up; + } + + /* Indicate pause support */ + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + + switch (hw->fc.requested_mode) { + case txgbe_fc_full: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + break; + case txgbe_fc_rx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + case txgbe_fc_tx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + default: + ethtool_link_ksettings_del_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_del_link_mode(cmd, advertising, + Asym_Pause); + } + + if (link_up) { + switch (link_speed) { + case TXGBE_LINK_SPEED_40GB_FULL: + cmd->base.speed = SPEED_40000; + break; + case TXGBE_LINK_SPEED_25GB_FULL: + cmd->base.speed = SPEED_25000; + break; + case TXGBE_LINK_SPEED_10GB_FULL: + cmd->base.speed = SPEED_10000; + break; + case TXGBE_LINK_SPEED_1GB_FULL: + cmd->base.speed = SPEED_1000; + break; + case TXGBE_LINK_SPEED_100_FULL: + cmd->base.speed = SPEED_100; + break; + case TXGBE_LINK_SPEED_10_FULL: + cmd->base.speed = SPEED_10; + break; + default: + break; + } + cmd->base.duplex = DUPLEX_FULL; + } else { + cmd->base.speed = -1; + cmd->base.duplex = -1; + } + + if (!netif_carrier_ok(netdev)) { + cmd->base.speed = -1; + cmd->base.duplex = -1; + } + + if (hw->mac.type == txgbe_mac_aml) { + ethtool_link_ksettings_add_link_mode(cmd, supported, FEC_NONE); + ethtool_link_ksettings_add_link_mode(cmd, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode(cmd, supported, FEC_BASER); + if (adapter->fec_link_mode & TXGBE_PHY_FEC_OFF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, FEC_NONE); + if (adapter->fec_link_mode & TXGBE_PHY_FEC_RS) + ethtool_link_ksettings_add_link_mode(cmd, advertising, FEC_RS); + if (adapter->fec_link_mode & TXGBE_PHY_FEC_BASER) + ethtool_link_ksettings_add_link_mode(cmd, advertising, FEC_BASER); + } + if (!adapter->autoneg) + ethtool_link_ksettings_del_link_mode(cmd, advertising, Autoneg); + else + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + cmd->base.autoneg = adapter->autoneg; + + return 0; +} + +static int txgbe_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 advertised, old, link_support; + bool autoneg; + s32 err = 0; + struct ethtool_link_ksettings temp_ks; + u32 curr_autoneg = 2; + + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4) + adapter->backplane_an = cmd->base.autoneg ? 1 : 0; + + if (hw->phy.media_type == txgbe_media_type_copper || hw->phy.multispeed_fiber) { + memcpy(&temp_ks, cmd, sizeof(struct ethtool_link_ksettings)); + /* To be compatible with test cases */ + if (hw->phy.media_type == txgbe_media_type_fiber) { + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 25000baseSR_Full)) { + ethtool_link_ksettings_add_link_mode(&temp_ks, supported, + 25000baseSR_Full); + } + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseT_Full)) { + ethtool_link_ksettings_add_link_mode(&temp_ks, supported, + 10000baseT_Full); + ethtool_link_ksettings_del_link_mode(&temp_ks, supported, + 10000baseSR_Full); + ethtool_link_ksettings_del_link_mode(&temp_ks, supported, + 10000baseLR_Full); + } + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full)) { + ethtool_link_ksettings_add_link_mode(&temp_ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_del_link_mode(&temp_ks, supported, + 1000baseX_Full); + } + } + + /* this function does not support duplex forcing, but can + * limit the advertising of the adapter to the specified speed + */ + if (!bitmap_subset(cmd->link_modes.advertising, temp_ks.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) + return -EINVAL; + + old = hw->phy.autoneg_advertised; + advertised = 0; + + if (!cmd->base.autoneg) { + if (cmd->base.speed == SPEED_25000) + advertised |= TXGBE_LINK_SPEED_25GB_FULL; + else if (cmd->base.speed == SPEED_10000) + advertised |= TXGBE_LINK_SPEED_10GB_FULL; + else if (cmd->base.speed == SPEED_1000) + advertised |= TXGBE_LINK_SPEED_1GB_FULL; + else + advertised |= old; + } else { + if (ethtool_link_ksettings_test_link_mode(cmd, + advertising, 25000baseSR_Full)) + advertised |= TXGBE_LINK_SPEED_25GB_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, + advertising, 10000baseSR_Full) || + ethtool_link_ksettings_test_link_mode(cmd, + advertising, 10000baseLR_Full) || + ethtool_link_ksettings_test_link_mode(cmd, + advertising, 10000baseT_Full)) + advertised |= TXGBE_LINK_SPEED_10GB_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, + advertising, 1000baseX_Full) || + ethtool_link_ksettings_test_link_mode(cmd, + advertising, 1000baseT_Full)) + advertised |= TXGBE_LINK_SPEED_1GB_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, + advertising, 100baseT_Full)) + advertised |= TXGBE_LINK_SPEED_100_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, + advertising, 10baseT_Full)) + advertised |= TXGBE_LINK_SPEED_10_FULL; + } + + if (advertised == TXGBE_LINK_SPEED_1GB_FULL && + hw->phy.media_type != txgbe_media_type_copper) { + curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); + curr_autoneg = !!(curr_autoneg & (0x1 << 12)); + if (old == advertised && (curr_autoneg == !!(cmd->base.autoneg))) + return 0; + } + + err = hw->mac.ops.get_link_capabilities(hw, + &link_support, &autoneg); + if (err) + e_info(probe, "get link capabiliyies failed with code %d\n", err); + if (!(link_support & advertised)) { + e_info(probe, "unsupported advertised: %x", advertised); + return -EINVAL; + } + + /* this sets the link speed and restarts auto-neg */ + while (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + adapter->autoneg = cmd->base.autoneg ? 1 : 0; + hw->mac.autotry_restart = true; + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + txgbe_service_event_schedule(adapter); + err = hw->mac.ops.setup_link(hw, advertised, true); + if (err) { + e_info(probe, "setup link failed with code %d\n", err); + hw->mac.ops.setup_link(hw, old, true); + } + if ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) + hw->mac.ops.flap_tx_laser(hw); + + /* notify fw autoneg status */ + txgbe_hic_write_autoneg_status(hw, cmd->base.autoneg); + + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); + } else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4 || + (hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) { + if (!cmd->base.autoneg) { + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseKR_Full) & + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseKX_Full) & + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseKX4_Full)) + return -EINVAL; + } + advertised = 0; + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseKR_Full)) { + err = txgbe_set_link_to_kr(hw, 1); + advertised |= TXGBE_LINK_SPEED_10GB_FULL; + } else if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseKX4_Full)) { + err = txgbe_set_link_to_kx4(hw, 1); + advertised |= TXGBE_LINK_SPEED_10GB_FULL; + } else if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseKX_Full)) { + advertised |= TXGBE_LINK_SPEED_1GB_FULL; + err = txgbe_set_link_to_kx(hw, TXGBE_LINK_SPEED_1GB_FULL, 0); + } + if (err) + return -EACCES; + goto out; + } else { + /* in this case we currently only support 10Gb/FULL */ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + return -EINVAL; + } else if ((ethtool_link_ksettings_test_link_mode(cmd, + advertising, 1000baseT_Full) || + ethtool_link_ksettings_test_link_mode(cmd, + advertising, 1000baseKX_Full) || + ethtool_link_ksettings_test_link_mode(cmd, + advertising, 1000baseX_Full))) { + memcpy(&temp_ks, cmd, sizeof(struct ethtool_link_ksettings)); + + if (ethtool_link_ksettings_test_link_mode(cmd, + advertising, 1000baseT_Full)) { + ethtool_link_ksettings_add_link_mode(&temp_ks, + supported, 1000baseT_Full); + ethtool_link_ksettings_del_link_mode(&temp_ks, + supported, 1000baseKX_Full); + } + + if (!bitmap_subset(cmd->link_modes.advertising, + temp_ks.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) + return -EINVAL; + + old = hw->phy.autoneg_advertised; + advertised = 0; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseX_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, 1000baseT_Full)) + advertised |= TXGBE_LINK_SPEED_1GB_FULL; + + if (advertised == TXGBE_LINK_SPEED_1GB_FULL) { + curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); + curr_autoneg = !!(curr_autoneg & (0x1 << 12)); + } + if (old == advertised && (curr_autoneg == !!cmd->base.autoneg)) + return -EINVAL; + /* this sets the link speed and restarts auto-neg */ + while (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + adapter->autoneg = cmd->base.autoneg ? 1 : 0; + hw->mac.autotry_restart = true; + err = hw->mac.ops.setup_link(hw, advertised, true); + if (err) { + e_info(probe, "setup link failed with code %d\n", err); + hw->mac.ops.setup_link(hw, old, true); + } + if ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) + hw->mac.ops.flap_tx_laser(hw); + + /* notify fw autoneg status */ + txgbe_hic_write_autoneg_status(hw, cmd->base.autoneg); + + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); + } + adapter->autoneg = cmd->base.autoneg ? 1 : 0; + } + if (err) + return -EINVAL; +out: + return err; +} + +static int txgbe_get_fec_param(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + int err = 0; + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 supported_link = 0; + bool autoneg = false; + u32 speed = 0; + bool link_up; + + hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); + + if (hw->mac.type != txgbe_mac_aml) { + err = -EAGAIN; + goto done; + } + hw->mac.ops.check_link(hw, &speed, &link_up, false); + fecparam->fec = 0; + if (speed == TXGBE_LINK_SPEED_10GB_FULL) { + fecparam->fec |= ETHTOOL_FEC_OFF; + fecparam->active_fec = ETHTOOL_FEC_OFF; + goto done; + } + if (adapter->fec_link_mode == TXGBE_PHY_FEC_AUTO) + fecparam->fec |= ETHTOOL_FEC_AUTO; + else if (adapter->fec_link_mode & TXGBE_PHY_FEC_BASER) + fecparam->fec |= ETHTOOL_FEC_BASER; + else if (adapter->fec_link_mode & TXGBE_PHY_FEC_RS) + fecparam->fec |= ETHTOOL_FEC_RS; + else + fecparam->fec |= ETHTOOL_FEC_OFF; + + if (!link_up) { + fecparam->active_fec = ETHTOOL_FEC_OFF; + goto done; + } + switch (adapter->cur_fec_link) { + case TXGBE_PHY_FEC_BASER: + fecparam->active_fec = ETHTOOL_FEC_BASER; + break; + case TXGBE_PHY_FEC_RS: + fecparam->active_fec = ETHTOOL_FEC_RS; + break; + case TXGBE_PHY_FEC_OFF: + fecparam->active_fec = ETHTOOL_FEC_OFF; + break; + default: + fecparam->active_fec = ETHTOOL_FEC_OFF; + break; + } +done: + return err; +} + +static int txgbe_set_fec_param(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + int err = 0; + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u8 cur_fec_mode = adapter->fec_link_mode; + bool autoneg = false; + u32 supported_link = 0; + + hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); + + if (hw->mac.type != txgbe_mac_aml) { + err = -EAGAIN; + goto done; + } + + switch (fecparam->fec) { + case ETHTOOL_FEC_AUTO: + adapter->fec_link_mode = TXGBE_PHY_FEC_AUTO; + break; + case ETHTOOL_FEC_BASER: + adapter->fec_link_mode = TXGBE_PHY_FEC_BASER; + break; + case ETHTOOL_FEC_OFF: + case ETHTOOL_FEC_NONE: + adapter->fec_link_mode = TXGBE_PHY_FEC_OFF; + break; + case ETHTOOL_FEC_RS: + adapter->fec_link_mode = TXGBE_PHY_FEC_RS; + break; + default: + e_warn(drv, "Unsupported FEC mode: %d", + fecparam->fec); + err = -EINVAL; + goto done; + } + if (cur_fec_mode != adapter->fec_link_mode) { + /* reset link */ + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + txgbe_service_event_schedule(adapter); + } +done: + return err; +} + +static void txgbe_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + + if (txgbe_device_supports_autoneg_fc(hw) && + !hw->fc.disable_fc_autoneg) + pause->autoneg = 1; + else + pause->autoneg = 0; + + if (hw->fc.current_mode == txgbe_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == txgbe_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == txgbe_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int txgbe_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_fc_info fc = hw->fc; + + /* some devices do not support autoneg of flow control */ + if (pause->autoneg == AUTONEG_ENABLE && + !txgbe_device_supports_autoneg_fc(hw)) + return -EINVAL; + + fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); + + if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) + fc.requested_mode = txgbe_fc_full; + else if (pause->rx_pause) + fc.requested_mode = txgbe_fc_rx_pause; + else if (pause->tx_pause) + fc.requested_mode = txgbe_fc_tx_pause; + else + fc.requested_mode = txgbe_fc_none; + + /* if the thing changed then we'll update and use new autoneg */ + if (memcmp(&fc, &hw->fc, sizeof(struct txgbe_fc_info))) { + hw->fc = fc; + if (netif_running(netdev)) + txgbe_reinit_locked(adapter); + else + txgbe_reset(adapter); + } + + return 0; +} + +static u32 txgbe_get_msglevel(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void txgbe_set_msglevel(struct net_device *netdev, u32 data) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int txgbe_get_regs_len(struct net_device __always_unused *netdev) +{ +#define TXGBE_REGS_LEN 4096 + return TXGBE_REGS_LEN * sizeof(u32); +} + +#define TXGBE_GET_STAT(_A_, _R_) ((_A_)->stats._R_) + +static void txgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u32 i; + u32 id = 0; + + memset(p, 0, TXGBE_REGS_LEN * sizeof(u32)); + regs_buff[TXGBE_REGS_LEN - 1] = 0x55555555; + + regs->version = hw->revision_id << 16 | + hw->device_id; + + /* Global Registers */ + /* chip control */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_PWR);//0 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_CTL);//1 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_PF_SM);//2 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_RST);//3 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_ST);//4 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_SWSM);//5 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_RST_ST);//6 + /* pvt sensor */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_CTL);//7 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_EN);//8 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_ST);//9 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_ALARM_THRE);//10 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_DALARM_THRE);//11 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_INT_EN);//12 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_ALARM_ST);//13 + /* Fmgr Register */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_CMD);//14 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_DATA);//15 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_STATUS);//16 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_USR_CMD);//17 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_CMDCFG0);//18 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_CMDCFG1);//19 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_ILDR_STATUS);//20 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_ILDR_SWPTR);//21 + + /* Port Registers */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_PORT_CTL);//22 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_PORT_ST);//23 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_EX_VTYPE);//24 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_VXLAN);//25 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_VXLAN_GPE);//26 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_GENEVE);//27 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_TEREDO);//28 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_TCP_TIME);//29 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_LED_CTL);//30 + /* GPIO */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_GPIO_DR);//31 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_GPIO_DDR);//32 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_GPIO_CTL);//33 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_GPIO_INTEN);//34 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_GPIO_INTMASK);//35 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_GPIO_INTSTATUS);//36 + /* I2C */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CON);//37 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_TAR);//38 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_DATA_CMD);//39 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_SS_SCL_HCNT);//40 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_SS_SCL_LCNT);//41 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_FS_SCL_HCNT);//42 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_FS_SCL_LCNT);//43 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_HS_SCL_HCNT);//44 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_INTR_STAT);//45 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_INTR_MASK);//46 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_RAW_INTR_STAT);//47 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_RX_TL);//48 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_TX_TL);//49 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_INTR);//50 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_RX_UNDER);//51 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_RX_OVER);//52 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_TX_OVER);//53 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_RD_REQ);//54 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_TX_ABRT);//55 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_RX_DONE);//56 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_ACTIVITY);//57 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_STOP_DET);//58 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_START_DET);//59 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_GEN_CALL);//60 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_ENABLE);//61 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_STATUS);//62 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_TXFLR);//63 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_RXFLR);//64 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_SDA_HOLD);//65 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_TX_ABRT_SOURCE);//66 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_SDA_SETUP);//67 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_ENABLE_STATUS);//68 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_FS_SPKLEN);//69 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_HS_SPKLEN);//70 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_SCL_STUCK_TIMEOUT);//71 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_SDA_STUCK_TIMEOUT);//72 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_SCL_STUCK_DET);//73 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_DEVICE_ID);//74 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_COMP_PARAM_1);//75 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_COMP_VERSION);//76 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_COMP_TYPE);//77 + /* TX TPH */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_TPH_TDESC);//78 + /* RX TPH */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_TPH_RDESC);//79 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_TPH_RHDR);//80 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_TPH_RPL);//81 + + /* TDMA */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_CTL);//82 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_VF_TE(0));//83 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_VF_TE(1));//84 + for (i = 0; i < 8; i++) + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_PB_THRE(i));//85-92 + + for (i = 0; i < 4; i++) + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_LLQ(i));//93-96 + + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_ETYPE_LB_L);//97 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_ETYPE_LB_H);//98 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_ETYPE_AS_L);//99 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_ETYPE_AS_H);//100 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_MAC_AS_L);//101 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_MAC_AS_H);//102 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_VLAN_AS_L);//103 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_VLAN_AS_H);//104 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_TCP_FLG_L);//105 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_TCP_FLG_H);//106 + for (i = 0; i < 64; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_VLAN_INS(i));//107-234 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_ETAG_INS(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_PBWARB_CTL);//235 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_MMW);//236 + for (i = 0; i < 8; i++) + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_PBWARB_CFG(i));//237-244 + + for (i = 0; i < 128; i++) + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_VM_CREDIT(i));//245-372 + + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_FC_EOF);//373 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_FC_SOF);//374 + + /* RDMA */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_ARB_CTL);//375 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_VF_RE(0));//376 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_VF_RE(1));//377 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_RSC_CTL);//378 + for (i = 0; i < 8; i++) + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_ARB_CFG(i));//379-386 + + for (i = 0; i < 4; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_PF_QDE(i));//387-394 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_PF_HIDE(i)); + } + + /* RDB */ + /*flow control */ + for (i = 0; i < 4; i++) + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RFCV(i));//395-398 + + for (i = 0; i < 8; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RFCL(i));//399-414 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RFCH(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RFCRT);//415 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RFCC);//416 + /* receive packet buffer */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_PB_CTL);//417 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_PB_WRAP);//418 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_UP2TC);//419 + for (i = 0; i < 8; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_PB_SZ(i));//420-435 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_MPCNT(i)); + } + /* lli interrupt */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_LLI_THRE);//436 + /* ring assignment */ + for (i = 0; i < 64; i++) + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_PL_CFG(i));//437-500 + + for (i = 0; i < 32; i++) + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RSSTBL(i));//501-532 + + for (i = 0; i < 10; i++) + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RSSRK(i));//533-542 + + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RSS_TC);//543 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RA_CTL);//544 + for (i = 0; i < 128; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_5T_SA(i));//545-1184 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_5T_DA(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_5T_SDP(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_5T_CTL0(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_5T_CTL1(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_SYN_CLS);//1185 + for (i = 0; i < 8; i++) + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_ETYPE_CLS(i));//1186-1193 + + /* fcoe redirection table */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FCRE_CTL);//1194 + for (i = 0; i < 8; i++) + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FCRE_TBL(i));//1195-1202 + + /*flow director */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_CTL);//1203 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_HKEY);//1204 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_SKEY);//1205 + for (i = 0; i < 16; i++) + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_FLEX_CFG(i));//1206-1221 + + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_FREE);//1222 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_LEN);//1223 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_USE_ST);//1224 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_FAIL_ST);//1225 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_MATCH);//1226 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_MISS);//1227 + for (i = 0; i < 3; i++) + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_IP6(i));//1228-1230 + + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_SA);//1231 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_DA);//1232 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_PORT);//1233 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_FLEX);//1234 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_HASH);//1235 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_CMD);//1236 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_DA4_MSK);//1237 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_SA4_MSK);//1238 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_TCP_MSK);//1239 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_UDP_MSK);//1240 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_SCTP_MSK);//1241 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_IP6_MSK);//1242 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_OTHER_MSK);//1243 + + /* PSR */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_CTL);//1244 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VLAN_CTL);//1245 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VM_CTL);//1246 + for (i = 0; i < 64; i++) + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VM_L2CTL(i));//1247-1310 + + for (i = 0; i < 8; i++) + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_ETYPE_SWC(i));//1311-1318 + + for (i = 0; i < 128; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MC_TBL(i));//1319-1702 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_UC_TBL(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VLAN_TBL(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MAC_SWC_AD_L);//1703 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MAC_SWC_AD_H);//1704 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MAC_SWC_VM_L);//1705 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MAC_SWC_VM_H);//1706 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MAC_SWC_IDX);//1707 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VLAN_SWC);//1708 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VLAN_SWC_VM_L);//1709 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VLAN_SWC_VM_H);//1710 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VLAN_SWC_IDX);//1711 + for (i = 0; i < 4; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MR_CTL(i));//1712-1731 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MR_VLAN_L(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MR_VLAN_H(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MR_VM_L(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MR_VM_H(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_1588_CTL);//1732 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_1588_STMPL);//1733 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_1588_STMPH);//1734 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_1588_ATTRL);//1735 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_1588_ATTRH);//1736 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_1588_MSGTYPE);//1737 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_WKUP_CTL);//1738 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_WKUP_IPV);//1739 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_LAN_FLEX_CTL);//1740 + for (i = 0; i < 4; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_WKUP_IP4TBL(i));//1741-1748 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_WKUP_IP6TBL(i)); + } + for (i = 0; i < 16; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_LAN_FLEX_DW_L(i));//1749-1796 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_LAN_FLEX_DW_H(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_LAN_FLEX_MSK(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_LAN_FLEX_CTL);//1797 + + /* TDB */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_TFCS);//1798 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_PB_SZ(0));//1799 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_UP2TC);//1800 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_PBRARB_CTL);//1801 + for (i = 0; i < 8; i++) + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_PBRARB_CFG(i));//1802-1809 + + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_MNG_TC);//1810 + + /* tsec */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_CTL);//1811 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_ST);//1812 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_BUF_AF);//1813 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_BUF_AE);//1814 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_MIN_IFG);//1815 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_CTL);//1816 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_STMPL);//1817 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_STMPH);//1818 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_SYSTIML);//1819 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_SYSTIMH);//1820 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_INC);//1821 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_ADJL);//1822 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_ADJH);//1823 + + /* RSEC */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RSC_CTL);//1824 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RSC_ST);//1825 + + /* BAR register */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_MISC_IC);//1826 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_MISC_ICS);//1827 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_MISC_IEN);//1828 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_GPIE);//1829 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IC(0));//1830 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IC(1));//1831 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_ICS(0));//1832 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_ICS(1));//1833 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IMS(0));//1834 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IMS(1));//1835 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IMC(0));//1836 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IMC(1));//1837 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_ISB_ADDR_L);//1838 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_ISB_ADDR_H);//1839 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_ITRSEL);//1840 + for (i = 0; i < 64; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_ITR(i));//1841-1968 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IVAR(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_MISC_IVAR);//1969 + for (i = 0; i < 128; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_RR_BAL(i));//1970-3249 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_RR_BAH(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_RR_WP(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_RR_RP(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_RR_CFG(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_TR_BAL(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_TR_BAH(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_TR_WP(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_TR_RP(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_TR_CFG(i)); + } +} + +static int txgbe_get_eeprom_len(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + return adapter->hw.eeprom.word_size * 2; +} + +static int txgbe_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word, eeprom_len; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_len = last_word - first_word + 1; + + eeprom_buff = kmalloc_array(eeprom_len, sizeof(u16), GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, + eeprom_buff); + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < eeprom_len; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int txgbe_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EINVAL; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = hw->eeprom.ops.read(hw, first_word, + &eeprom_buff[0]); + if (ret_val) + goto err; + + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && !ret_val) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = hw->eeprom.ops.read(hw, last_word, + &eeprom_buff[last_word - first_word]); + if (ret_val) + goto err; + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = hw->eeprom.ops.write_buffer(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + + /* Update the checksum */ + if (ret_val == 0) + hw->eeprom.ops.update_checksum(hw); + +err: + kfree(eeprom_buff); + return ret_val; +} + +static void txgbe_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + strscpy(drvinfo->driver, txgbe_driver_name, + sizeof(drvinfo->driver)); + strscpy(drvinfo->version, txgbe_driver_version, + sizeof(drvinfo->version)); + strscpy(drvinfo->fw_version, adapter->fw_version, + sizeof(drvinfo->fw_version)); + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + if (adapter->num_tx_queues <= TXGBE_NUM_RX_QUEUES) + drvinfo->n_stats = TXGBE_STATS_LEN - + (TXGBE_NUM_RX_QUEUES - adapter->num_tx_queues) * + (sizeof(struct txgbe_queue_stats) / sizeof(u64)) * 2; + else + drvinfo->n_stats = TXGBE_STATS_LEN; + + drvinfo->testinfo_len = TXGBE_TEST_LEN; + drvinfo->regdump_len = txgbe_get_regs_len(netdev); +} + +static void txgbe_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *ringp, + struct netlink_ext_ack *extack) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = TXGBE_MAX_RXD; + ring->tx_max_pending = TXGBE_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int txgbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *ringp, + struct netlink_ext_ack *extack) +{ + struct txgbe_ring *tx_ring = NULL, *rx_ring = NULL; + struct txgbe_adapter *adapter = netdev_priv(netdev); + u32 new_rx_count, new_tx_count; + int i, j, err = 0; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + new_tx_count = clamp_t(u32, ring->tx_pending, + TXGBE_MIN_TXD, TXGBE_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, TXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, + TXGBE_MIN_RXD, TXGBE_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, TXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + + if (new_tx_count == adapter->tx_ring_count && + new_rx_count == adapter->rx_ring_count) + return 0; + + /* If there is a AF_XDP UMEM attached to any of Rx rings, + * disallow changing the number of descriptors -- regardless + * if the netdev is running or not. + */ + if (txgbe_xsk_any_rx_ring_enabled(adapter)) + return -EBUSY; + + while (test_and_set_bit(__TXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_xdp_queues; i++) + adapter->xdp_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->xdp_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto done; + } + + i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues, + adapter->num_rx_queues); + + /* Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ + if (new_tx_count != adapter->tx_ring_count) { + netdev_info(netdev, + "Changing Tx descriptor count from %d to %d.\n", + adapter->tx_ring[0]->count, new_tx_count); + tx_ring = kcalloc(i, sizeof(struct txgbe_ring), GFP_KERNEL); + if (!tx_ring) { + err = -ENOMEM; + goto done; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&tx_ring[i], adapter->tx_ring[i], + sizeof(struct txgbe_ring)); + + tx_ring[i].count = new_tx_count; + /* the desc and bi pointers will be reallocated + * in the setup call + */ + tx_ring[i].desc = NULL; + tx_ring[i].tx_buffer_info = NULL; + err = txgbe_setup_tx_resources(&tx_ring[i]); + if (err) { + while (i) { + i--; + txgbe_free_tx_resources(&tx_ring[i]); + } + + kfree(tx_ring); + tx_ring = NULL; + err = -ENOMEM; + + goto done; + } + } + + for (j = 0; j < adapter->num_xdp_queues; j++, i++) { + memcpy(&tx_ring[i], adapter->xdp_ring[j], + sizeof(struct txgbe_ring)); + + tx_ring[i].count = new_tx_count; + /* the desc and bi pointers will be reallocated + * in the setup call + */ + tx_ring[i].desc = NULL; + tx_ring[i].tx_buffer_info = NULL; + err = txgbe_setup_tx_resources(&tx_ring[i]); + if (err) { + while (i) { + i--; + txgbe_free_tx_resources(&tx_ring[i]); + } + + kfree(tx_ring); + tx_ring = NULL; + err = -ENOMEM; + + goto done; + } + } + } + + /* Repeat the process for the Rx rings if needed */ + if (new_rx_count != adapter->rx_ring_count) { + netdev_info(netdev, + "Changing Rx descriptor count from %d to %d\n", + adapter->rx_ring[0]->count, new_rx_count); + rx_ring = kcalloc(i, sizeof(struct txgbe_ring), GFP_KERNEL); + if (!rx_ring) { + err = -ENOMEM; + goto free_tx; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + u16 unused; + + memcpy(&rx_ring[i], adapter->rx_ring[i], + sizeof(struct txgbe_ring)); + xdp_rxq_info_unreg(&rx_ring[i].xdp_rxq); + rx_ring[i].count = new_rx_count; + /* the desc and bi pointers will be reallocated + * in the setup call + */ + rx_ring[i].desc = NULL; + rx_ring[i].rx_buffer_info = NULL; + err = txgbe_setup_rx_resources(&rx_ring[i]); + if (err) + goto rx_unwind; + + unused = txgbe_desc_unused(&rx_ring[i]); + err = txgbe_alloc_rx_buffers(&rx_ring[i], unused); +rx_unwind: + if (err) { + err = -ENOMEM; + + do { + txgbe_free_rx_resources(&rx_ring[i]); + } while (i--); + kfree(rx_ring); + rx_ring = NULL; + + goto free_tx; + } + } + } + + /* Bring interface down, copy in the new ring info, + * then restore the interface + */ + txgbe_down(adapter); + + if (tx_ring) { + for (i = 0; i < adapter->num_tx_queues; i++) { + txgbe_free_tx_resources(adapter->tx_ring[i]); + memcpy(adapter->tx_ring[i], &tx_ring[i], + sizeof(struct txgbe_ring)); + } + + for (j = 0; j < adapter->num_xdp_queues; j++, i++) { + txgbe_free_tx_resources(adapter->xdp_ring[j]); + memcpy(adapter->xdp_ring[j], &tx_ring[i], + sizeof(struct txgbe_ring)); + } + + kfree(tx_ring); + tx_ring = NULL; + } + + if (rx_ring) { + for (i = 0; i < adapter->num_rx_queues; i++) { + txgbe_free_rx_resources(adapter->rx_ring[i]); + /* this is to fake out the allocation routine + * into thinking it has to realloc everything + * but the recycling logic will let us re-use + * the buffers allocated above + */ + rx_ring[i].next_to_use = 0; + rx_ring[i].next_to_clean = 0; + rx_ring[i].next_to_alloc = 0; + /* do a struct copy */ + memcpy(adapter->rx_ring[i], &rx_ring[i], + sizeof(struct txgbe_ring)); + } + kfree(rx_ring); + rx_ring = NULL; + } + + adapter->tx_ring_count = new_tx_count; + adapter->xdp_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + + txgbe_up(adapter); + +free_tx: +/* error cleanup if the Rx allocations failed after getting Tx */ + if (tx_ring) { + for (i = 0; i < adapter->num_tx_queues; i++) { + txgbe_free_tx_resources(adapter->tx_ring[i]); + memcpy(adapter->tx_ring[i], &tx_ring[i], + sizeof(struct txgbe_ring)); + } + + for (j = 0; j < adapter->num_xdp_queues; j++, i++) { + txgbe_free_tx_resources(adapter->xdp_ring[j]); + memcpy(adapter->xdp_ring[j], &tx_ring[i], + sizeof(struct txgbe_ring)); + } + + kfree(tx_ring); + tx_ring = NULL; + } + +done: + clear_bit(__TXGBE_RESETTING, &adapter->state); + + return err; +} + +static int txgbe_get_sset_count(struct net_device *netdev, int sset) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + switch (sset) { + case ETH_SS_TEST: + return TXGBE_TEST_LEN; + case ETH_SS_STATS: + if (adapter->num_tx_queues <= TXGBE_NUM_RX_QUEUES) + return TXGBE_STATS_LEN - (TXGBE_NUM_RX_QUEUES - adapter->num_tx_queues) * + (sizeof(struct txgbe_queue_stats) / sizeof(u64)) * 2; + else + return TXGBE_STATS_LEN; + + case ETH_SS_PRIV_FLAGS: + return TXGBE_PRIV_FLAGS_STR_LEN; + default: + return -EOPNOTSUPP; + } +} + +/** + * txgbe_get_priv_flags - report device private flags + * @dev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the txgbe_gstrings_priv_flags + * array. + * + * Returns a u32 bitmap of flags. + **/ +static u32 txgbe_get_priv_flags(struct net_device *dev) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + u32 i, ret_flags = 0; + + for (i = 0; i < TXGBE_PRIV_FLAGS_STR_LEN; i++) { + const struct txgbe_priv_flags *priv_flags; + + priv_flags = &txgbe_gstrings_priv_flags[i]; + + if (priv_flags->flag & adapter->eth_priv_flags) + ret_flags |= BIT(i); + } + return ret_flags; +} + +/** + * txgbe_set_priv_flags - set private flags + * @dev: network interface device structure + * @flags: bit flags to be set + **/ +static int txgbe_set_priv_flags(struct net_device *dev, u32 flags) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + u32 orig_flags, new_flags, changed_flags; + bool reset_needed = 0; + u32 i; + s32 status = 0; + + orig_flags = adapter->eth_priv_flags; + new_flags = orig_flags; + + for (i = 0; i < TXGBE_PRIV_FLAGS_STR_LEN; i++) { + const struct txgbe_priv_flags *priv_flags; + + priv_flags = &txgbe_gstrings_priv_flags[i]; + + if (flags & BIT(i)) + new_flags |= priv_flags->flag; + else + new_flags &= ~(priv_flags->flag); + + /* If this is a read-only flag, it can't be changed */ + if (priv_flags->read_only && + ((orig_flags ^ new_flags) & BIT(i))) + return -EOPNOTSUPP; + } + + changed_flags = orig_flags ^ new_flags; + + if (!changed_flags) + return 0; + + if (changed_flags & TXGBE_ETH_PRIV_FLAG_LLDP) + reset_needed = 1; + + if (changed_flags & TXGBE_ETH_PRIV_FLAG_LLDP) { + status = txgbe_hic_write_lldp(&adapter->hw, + (u32)(new_flags & TXGBE_ETH_PRIV_FLAG_LLDP)); + if (!status) + adapter->eth_priv_flags = new_flags; + } + + if (changed_flags & TXGBE_ETH_PRIV_FLAG_LEGACY_RX) { + adapter->eth_priv_flags = new_flags; + + if (adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LEGACY_RX) + adapter->flags2 |= TXGBE_FLAG2_RX_LEGACY; + else + adapter->flags2 &= ~TXGBE_FLAG2_RX_LEGACY; + + /* reset interface to repopulate queues */ + if (netif_running(dev)) + txgbe_reinit_locked(adapter); + } + + return status; +} + +static void txgbe_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct net_device_stats *net_stats = &adapter->net_stats; + unsigned int start; + struct txgbe_ring *ring; + int i, j; + char *p; + + txgbe_update_stats(adapter); + + for (i = 0; i < TXGBE_NETDEV_STATS_LEN; i++) { + p = (char *)net_stats + txgbe_gstrings_net_stats[i].stat_offset; + data[i] = (txgbe_gstrings_net_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < TXGBE_GLOBAL_STATS_LEN; j++, i++) { + p = (char *)adapter + txgbe_gstrings_stats[j].stat_offset; + data[i] = (txgbe_gstrings_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + ring = adapter->tx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + + continue; + } + + do { + start = u64_stats_fetch_begin(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + + i += 2; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin(&ring->syncp); + + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + + } while (u64_stats_fetch_retry(&ring->syncp, start)); + i += 2; + } + for (j = 0; j < TXGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxontxc[j]; + data[i++] = adapter->stats.pxofftxc[j]; + } + for (j = 0; j < TXGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxonrxc[j]; + data[i++] = adapter->stats.pxoffrxc[j]; + } +} + +static void txgbe_get_priv_flag_strings(struct net_device *netdev, u8 *data) +{ + char *p = (char *)data; + unsigned int i; + + for (i = 0; i < TXGBE_PRIV_FLAGS_STR_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + txgbe_gstrings_priv_flags[i].flag_string); + p += ETH_GSTRING_LEN; + } +} + +static void txgbe_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + char *p = (char *)data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *txgbe_gstrings_test, + TXGBE_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < TXGBE_NETDEV_STATS_LEN; i++) { + memcpy(p, txgbe_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < TXGBE_GLOBAL_STATS_LEN; i++) { + memcpy(p, txgbe_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_tx_queues; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_rx_queues; i++) { + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < TXGBE_MAX_PACKET_BUFFERS; i++) { + sprintf(p, "tx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < TXGBE_MAX_PACKET_BUFFERS; i++) { + sprintf(p, "rx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != TXGBE_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + txgbe_get_priv_flag_strings(netdev, data); + break; + } +} + +static int txgbe_link_test(struct txgbe_adapter *adapter, u64 *data) +{ + struct txgbe_hw *hw = &adapter->hw; + bool link_up = false; + u32 link_speed = 0; + + if (TXGBE_REMOVED(hw->hw_addr)) { + *data = 1; + return 1; + } + *data = 0; + + hw->mac.ops.check_link(hw, &link_speed, &link_up, true); + if (link_up) + goto out; + else + *data = 1; + +out: + return *data; +} + +/* ethtool register test data */ +struct txgbe_reg_test { + u32 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* default sapphire register test */ +static struct txgbe_reg_test reg_test_sapphire[] = { + { TXGBE_RDB_RFCL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { TXGBE_RDB_RFCH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { TXGBE_PSR_VLAN_CTL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, + { TXGBE_PX_RR_BAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { TXGBE_PX_RR_BAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { TXGBE_PX_RR_CFG(0), 4, WRITE_NO_TEST, 0, TXGBE_PX_RR_CFG_RR_EN }, + { TXGBE_RDB_RFCH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { TXGBE_RDB_RFCV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { TXGBE_PX_TR_BAL(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { TXGBE_PX_TR_BAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { TXGBE_RDB_PB_CTL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, + { TXGBE_PSR_MC_TBL(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { .reg = 0 } +}; + +static bool reg_pattern_test(struct txgbe_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + + if (TXGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = rd32(&adapter->hw, reg); + wr32(&adapter->hw, reg, test_pattern[pat] & write); + val = rd32(&adapter->hw, reg); + if (val != (test_pattern[pat] & write & mask)) { + e_err(drv, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg, val, test_pattern[pat] & write & mask); + *data = reg; + wr32(&adapter->hw, reg, before); + return true; + } + wr32(&adapter->hw, reg, before); + } + return false; +} + +static bool reg_set_and_check(struct txgbe_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 val, before; + + if (TXGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + before = rd32(&adapter->hw, reg); + wr32(&adapter->hw, reg, write & mask); + val = rd32(&adapter->hw, reg); + if ((write & mask) != (val & mask)) { + e_err(drv, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + wr32(&adapter->hw, reg, before); + return true; + } + wr32(&adapter->hw, reg, before); + return false; +} + +static bool txgbe_reg_test(struct txgbe_adapter *adapter, u64 *data) +{ + struct txgbe_reg_test *test; + struct txgbe_hw *hw = &adapter->hw; + u32 i; + + if (TXGBE_REMOVED(hw->hw_addr)) { + e_err(drv, "Adapter removed - register test blocked\n"); + *data = 1; + return true; + } + + test = reg_test_sapphire; + + /* Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + bool b = false; + + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + wr32(hw, test->reg + (i * 0x40), + test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + (test->reg + 4) + (i * 8), + test->mask, + test->write); + break; + } + if (b) + return true; + } + test++; + } + + *data = 0; + return false; +} + +static bool txgbe_eeprom_test(struct txgbe_adapter *adapter, u64 *data) +{ + struct txgbe_hw *hw = &adapter->hw; + + if (hw->eeprom.ops.validate_checksum(hw, NULL)) { + *data = 1; + goto out; + } else { + *data = 0; + return false; + } + +out: + return true; +} + +static irqreturn_t txgbe_test_intr(int __always_unused irq, void *data) +{ + struct net_device *netdev = (struct net_device *)data; + struct txgbe_adapter *adapter = netdev_priv(netdev); + u64 icr; + + /* get misc interrupt, as cannot get ring interrupt status */ + icr = txgbe_misc_isb(adapter, TXGBE_ISB_VEC1); + icr <<= 32; + icr |= txgbe_misc_isb(adapter, TXGBE_ISB_VEC0); + + adapter->test_icr = icr; + + return IRQ_HANDLED; +} + +static int txgbe_intr_test(struct txgbe_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u64 mask; + u32 i = 0, shared_int = true; + u32 irq = adapter->pdev->irq; + + if (TXGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return -1; + } + *data = 0; + + txgbe_setup_isb_resources(adapter); + txgbe_configure_isb(adapter); + /* Hook up test interrupt handler just for this test */ + if (adapter->msix_entries) { + /* NOTE: we don't test MSI-X interrupts here, yet */ + return 0; + } else if (adapter->flags & TXGBE_FLAG_MSI_ENABLED) { + shared_int = false; + if (request_irq(irq, &txgbe_test_intr, 0, netdev->name, + netdev)) { + *data = 1; + return -1; + } + } else if (!request_irq(irq, &txgbe_test_intr, IRQF_PROBE_SHARED, + netdev->name, netdev)) { + shared_int = false; + } else if (request_irq(irq, &txgbe_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + txgbe_irq_disable(adapter); + TXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Test each interrupt */ + for (; i < 1; i++) { + /* Interrupt to test */ + mask = 1ULL << i; + + if (!shared_int) { + /* Disable the interrupts to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + txgbe_intr_disable(&adapter->hw, ~mask); + txgbe_intr_trigger(&adapter->hw, ~mask); + TXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in the cause + * register and then force the same interrupt and see + * if one gets posted. If an interrupt was not posted + * to the bus, the test failed. + */ + adapter->test_icr = 0; + txgbe_intr_enable(&adapter->hw, mask); + txgbe_intr_trigger(&adapter->hw, mask); + TXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + } + + /* Disable all the interrupts */ + txgbe_intr_disable(&adapter->hw, TXGBE_INTR_ALL); + TXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + txgbe_free_isb_resources(adapter); + + return *data; +} + +static void txgbe_free_desc_rings(struct txgbe_adapter *adapter) +{ + struct txgbe_ring *tx_ring = &adapter->test_tx_ring; + struct txgbe_ring *rx_ring = &adapter->test_rx_ring; + struct txgbe_hw *hw = &adapter->hw; + + /* shut down the DMA engines now so they can be reinitialized later */ + hw->mac.ops.disable_rx(hw); + txgbe_disable_rx_queue(adapter, rx_ring); + + /* now Tx */ + wr32(hw, TXGBE_PX_TR_CFG(tx_ring->reg_idx), 0); + + wr32m(hw, TXGBE_TDM_CTL, TXGBE_TDM_CTL_TE, 0); + + txgbe_reset(adapter); + + txgbe_free_tx_resources(&adapter->test_tx_ring); + txgbe_free_rx_resources(&adapter->test_rx_ring); +} + +static void txgbe_loopback_configure_tx_ring(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u64 tdba = ring->dma; + int wait_loop = 10; + u32 txdctl = TXGBE_PX_TR_CFG_ENABLE; + u8 reg_idx = ring->reg_idx; + + ring->xsk_pool = NULL; + if (ring_is_xdp(ring)) + ring->xsk_pool = txgbe_xsk_umem(adapter, ring); + + /* disable queue to avoid issues while updating state */ + wr32(hw, TXGBE_PX_TR_CFG(reg_idx), TXGBE_PX_TR_CFG_SWFLSH); + TXGBE_WRITE_FLUSH(hw); + + wr32(hw, TXGBE_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32)); + wr32(hw, TXGBE_PX_TR_BAH(reg_idx), tdba >> 32); + + /* reset head and tail pointers */ + wr32(hw, TXGBE_PX_TR_RP(reg_idx), 0); + wr32(hw, TXGBE_PX_TR_WP(reg_idx), 0); + ring->tail = adapter->io_addr + TXGBE_PX_TR_WP(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + txdctl |= TXGBE_RING_SIZE(ring) << TXGBE_PX_TR_CFG_TR_SIZE_SHIFT; + + /* set WTHRESH to encourage burst writeback, it should not be set + * higher than 1 when: + * - ITR is 0 as it could cause false TX hangs + * - ITR is set to > 100k int/sec and BQL is enabled + * + * In order to avoid issues WTHRESH + PTHRESH should always be equal + * to or less than the number of on chip descriptors, which is + * currently 40. + */ + + txdctl |= 0x20 << TXGBE_PX_TR_CFG_WTHRESH_SHIFT; + + /* reinitialize flowdirector state */ + if (adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) { + ring->atr_sample_rate = adapter->atr_sample_rate; + ring->atr_count = 0; + set_bit(__TXGBE_TX_FDIR_INIT_DONE, &ring->state); + } else { + ring->atr_sample_rate = 0; + } + + /* initialize XPS */ + if (!test_and_set_bit(__TXGBE_TX_XPS_INIT_DONE, &ring->state)) { + struct txgbe_q_vector *q_vector = ring->q_vector; + + if (q_vector) + netif_set_xps_queue(adapter->netdev, + &q_vector->affinity_mask, + ring->queue_index); + } + + clear_bit(__TXGBE_HANG_CHECK_ARMED, &ring->state); + + /* enable queue */ + wr32(hw, TXGBE_PX_TR_CFG(reg_idx), txdctl); + + /* poll to verify queue is enabled */ + do { + msleep(20); + txdctl = rd32(hw, TXGBE_PX_TR_CFG(reg_idx)); + } while (--wait_loop && !(txdctl & TXGBE_PX_TR_CFG_ENABLE)); + if (!wait_loop) + e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +} + +static int txgbe_setup_desc_rings(struct txgbe_adapter *adapter) +{ + struct txgbe_ring *tx_ring = &adapter->test_tx_ring; + struct txgbe_ring *rx_ring = &adapter->test_rx_ring; + struct txgbe_hw *hw = &adapter->hw; + int ret_val; + int err; + + hw->mac.ops.setup_rxpba(hw, 0, 0, PBA_STRATEGY_EQUAL); + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = TXGBE_DEFAULT_TXD; + tx_ring->queue_index = 0; + tx_ring->dev = pci_dev_to_dev(adapter->pdev); + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; + + err = txgbe_setup_tx_resources(tx_ring); + if (err) + return 1; + + wr32m(&adapter->hw, TXGBE_TDM_CTL, + TXGBE_TDM_CTL_TE, TXGBE_TDM_CTL_TE); + + txgbe_loopback_configure_tx_ring(adapter, tx_ring); + + /* enable mac transmitter */ + if (hw->mac.type == txgbe_mac_aml40) { + wr32(hw, TXGBE_MAC_TX_CFG, (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_40G); + } else if (hw->mac.type == txgbe_mac_aml) { + if ((rd32(hw, TXGBE_CFG_PORT_ST) & TXGBE_CFG_PORT_ST_AML_LINK_10G) == + TXGBE_CFG_PORT_ST_AML_LINK_10G) + wr32(hw, TXGBE_MAC_TX_CFG, (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_10G); + else + wr32(hw, TXGBE_MAC_TX_CFG, (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_25G); + } else { + if (txgbe_check_reset_blocked(hw) && + (hw->phy.autoneg_advertised == TXGBE_LINK_SPEED_1GB_FULL || + adapter->link_speed == TXGBE_LINK_SPEED_1GB_FULL)) + wr32m(hw, TXGBE_MAC_TX_CFG, + TXGBE_MAC_TX_CFG_TE | TXGBE_MAC_TX_CFG_SPEED_MASK, + TXGBE_MAC_TX_CFG_TE | TXGBE_MAC_TX_CFG_SPEED_1G); + else + wr32m(hw, TXGBE_MAC_TX_CFG, + TXGBE_MAC_TX_CFG_TE | TXGBE_MAC_TX_CFG_SPEED_MASK, + TXGBE_MAC_TX_CFG_TE | TXGBE_MAC_TX_CFG_SPEED_10G); + } + + /* Setup Rx Descriptor ring and Rx buffers */ + rx_ring->count = TXGBE_DEFAULT_RXD; + rx_ring->queue_index = 0; + rx_ring->dev = pci_dev_to_dev(adapter->pdev); + rx_ring->netdev = adapter->netdev; + rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; + + err = txgbe_setup_rx_resources(rx_ring); + if (err) { + ret_val = 4; + goto err_nomem; + } + + hw->mac.ops.disable_rx(hw); + + txgbe_configure_rx_ring(adapter, rx_ring); + + hw->mac.ops.enable_rx(hw); + + return 0; + +err_nomem: + txgbe_free_desc_rings(adapter); + return ret_val; +} + +static int txgbe_setup_config(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 reg_data; + + /* Setup traffic loopback */ + reg_data = rd32(hw, TXGBE_PSR_CTL); + reg_data |= TXGBE_PSR_CTL_BAM | TXGBE_PSR_CTL_UPE | + TXGBE_PSR_CTL_MPE | TXGBE_PSR_CTL_TPE; + wr32(hw, TXGBE_PSR_CTL, reg_data); + + wr32(hw, TXGBE_RSC_CTL, + (rd32(hw, TXGBE_RSC_CTL) | + TXGBE_RSC_CTL_SAVE_MAC_ERR) & ~TXGBE_RSC_CTL_SECRX_DIS); + + wr32(hw, TXGBE_RSC_LSEC_CTL, 0x4); + + wr32(hw, TXGBE_PSR_VLAN_CTL, + rd32(hw, TXGBE_PSR_VLAN_CTL) & + ~TXGBE_PSR_VLAN_CTL_VFE); + + wr32m(&adapter->hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_LM, ~TXGBE_MAC_RX_CFG_LM); + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_FORCE_LKUP, ~TXGBE_CFG_PORT_CTL_FORCE_LKUP); + + /* enable mac transmitter */ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + wr32m(hw, TXGBE_TSC_CTL, + TXGBE_TSC_CTL_TX_DIS | TXGBE_TSC_MACTX_AFIFO_RD_WTRMRK, 0xd0000); + + wr32m(hw, TXGBE_RSC_CTL, + TXGBE_RSC_CTL_RX_DIS, 0); + } + + TXGBE_WRITE_FLUSH(hw); + usleep_range(10000, 20000); + + return 0; +} + +static int txgbe_setup_mac_loopback_test(struct txgbe_adapter *adapter) +{ + wr32m(&adapter->hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_LM | TXGBE_MAC_RX_CFG_RE, + TXGBE_MAC_RX_CFG_LM | TXGBE_MAC_RX_CFG_RE); + + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_FORCE_LKUP, TXGBE_CFG_PORT_CTL_FORCE_LKUP); + + return 0; +} + +static void txgbe_mac_loopback_cleanup(struct txgbe_adapter *adapter) +{ + wr32m(&adapter->hw, TXGBE_TSC_CTL, + TXGBE_TSC_MACTX_AFIFO_RD_WTRMRK, 0x20000); + wr32m(&adapter->hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_LM, ~TXGBE_MAC_RX_CFG_LM); + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_FORCE_LKUP, ~TXGBE_CFG_PORT_CTL_FORCE_LKUP); +} + +static int txgbe_setup_phy_loopback_test(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 value; + /* setup phy loopback */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_MISC_CTL0); + value |= TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_0 | + TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_3_1; + + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, value); + + value = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1); + txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1, + value | TXGBE_SR_PMA_MMD_CTL1_LB_EN); + return 0; +} + +static void txgbe_phy_loopback_cleanup(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 value; + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_MISC_CTL0); + value &= ~(TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_0 | + TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_3_1); + + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, value); + value = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1); + txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1, + value & ~TXGBE_SR_PMA_MMD_CTL1_LB_EN); +} + +static void txgbe_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size >>= 1; + memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); + skb->data[frame_size + 10] = 0xBE; + skb->data[frame_size + 12] = 0xAF; +} + +static bool txgbe_check_lbtest_frame(struct txgbe_rx_buffer *rx_buffer, + unsigned int frame_size) +{ + unsigned char *data; + bool match = true; + + frame_size >>= 1; + + data = kmap_local_page(rx_buffer->page) + rx_buffer->page_offset; + + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + + kunmap_local((void *)rx_buffer->page); + + return match; +} + +static u16 txgbe_clean_test_rings(struct txgbe_ring *rx_ring, + struct txgbe_ring *tx_ring, + unsigned int size) +{ + union txgbe_rx_desc *rx_desc; + struct txgbe_rx_buffer *rx_buffer; + struct txgbe_tx_buffer *tx_buffer; + const int bufsz = txgbe_rx_bufsz(rx_ring); + + u16 rx_ntc, tx_ntc, count = 0; + + /* initialize next to clean and descriptor values */ + rx_ntc = rx_ring->next_to_clean; + tx_ntc = tx_ring->next_to_clean; + rx_desc = TXGBE_RX_DESC(rx_ring, rx_ntc); + + while (txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_DD)) { + /* unmap buffer on Tx side */ + tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; + txgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); + + /* check Rx buffer */ + rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; + + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + bufsz, + DMA_FROM_DEVICE); + + /* verify contents of skb */ + if (txgbe_check_lbtest_frame(rx_buffer, size)) + count++; + + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, + rx_buffer->page_dma, + bufsz, + DMA_FROM_DEVICE); + + /* increment Rx/Tx next to clean counters */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; + tx_ntc++; + if (tx_ntc == tx_ring->count) + tx_ntc = 0; + + /* fetch next descriptor */ + rx_desc = TXGBE_RX_DESC(rx_ring, rx_ntc); + } + + /* re-map buffers to ring, store next to clean values */ + txgbe_alloc_rx_buffers(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; + tx_ring->next_to_clean = tx_ntc; + + return count; +} + +static int txgbe_run_loopback_test(struct txgbe_adapter *adapter) +{ + struct txgbe_ring *tx_ring = &adapter->test_tx_ring; + struct txgbe_ring *rx_ring = &adapter->test_rx_ring; + int i, j, lc, good_cnt, ret_val = 0; + unsigned int size = 1024; + netdev_tx_t tx_ret_val; + struct sk_buff *skb; + u32 flags_orig = adapter->flags; + + /* DCB can modify the frames on Tx */ + adapter->flags &= ~TXGBE_FLAG_DCB_ENABLED; + + /* allocate test skb */ + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return 11; + + /* place data into test skb */ + txgbe_create_lbtest_frame(skb, size); + skb_put(skb, size); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + for (j = 0; j <= lc; j++) { + /* reset count of good packets */ + good_cnt = 0; + + /* place 64 packets on the transmit queue*/ + for (i = 0; i < 64; i++) { + skb_get(skb); + tx_ret_val = txgbe_xmit_frame_ring(skb, + adapter, + tx_ring); + if (tx_ret_val == NETDEV_TX_OK) + good_cnt++; + } + + if (good_cnt != 64) { + ret_val = 12; + break; + } + + /* allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + good_cnt = txgbe_clean_test_rings(rx_ring, tx_ring, size); + if (j == 0) { + continue; + } else if (good_cnt != 64) { + ret_val = 13; + break; + } + } + + /* free the original skb */ + kfree_skb(skb); + adapter->flags = flags_orig; + + return ret_val; +} + +static int txgbe_loopback_test(struct txgbe_adapter *adapter, u64 *data) +{ + struct txgbe_hw *hw = &adapter->hw; + /* Let firmware know the driver has taken over */ + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_DRV_LOAD, TXGBE_CFG_PORT_CTL_DRV_LOAD); + *data = txgbe_setup_config(adapter); + if (*data) + goto err_loopback; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + *data = txgbe_setup_mac_loopback_test(adapter); + else + *data = txgbe_setup_phy_loopback_test(adapter); + if (*data) + goto err_loopback; + + *data = txgbe_setup_desc_rings(adapter); + if (*data) + goto out; + *data = txgbe_run_loopback_test(adapter); + if (*data) + e_info(hw, "phy loopback testing failed\n"); + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + txgbe_mac_loopback_cleanup(adapter); + else + txgbe_phy_loopback_cleanup(adapter); + +err_loopback: + txgbe_free_desc_rings(adapter); +out: + /* Let firmware take over control of h/w */ + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_DRV_LOAD, 0); + + return *data; +} + +static void txgbe_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + struct txgbe_hw *hw = &adapter->hw; + + if (TXGBE_REMOVED(hw->hw_addr)) { + e_err(hw, "Adapter removed - test blocked\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + set_bit(__TXGBE_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + int i; + + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) { + e_warn(drv, "Please take active VFS offline and restart diagnostics\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__TXGBE_TESTING, + &adapter->state); + goto skip_ol_tests; + } + } + } + + /* Offline tests */ + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (txgbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + txgbe_close(netdev); + else + txgbe_reset(adapter); + + e_info(hw, "register testing starting\n"); + if (txgbe_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + txgbe_reset(adapter); + e_info(hw, "eeprom testing starting\n"); + if (txgbe_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + txgbe_reset(adapter); + e_info(hw, "interrupt testing starting\n"); + if (txgbe_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP)) { + e_info(hw, "skip MAC loopback diagnostic when veto set\n"); + data[3] = 0; + goto skip_loopback; + } + /* If SRIOV or VMDq is enabled then skip MAC + * loopback diagnostic. + */ + if (adapter->flags & (TXGBE_FLAG_SRIOV_ENABLED | + TXGBE_FLAG_VMDQ_ENABLED)) { + e_info(hw, "skip MAC loopback diagnostic in VT mode\n"); + data[3] = 0; + goto skip_loopback; + } + + txgbe_reset(adapter); + e_info(hw, "loopback testing starting\n"); + if (txgbe_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + +skip_loopback: + txgbe_reset(adapter); + + /* clear testing bit and return adapter to previous state */ + clear_bit(__TXGBE_TESTING, &adapter->state); + if (if_running) + txgbe_open(netdev); + } else { + e_info(hw, "online testing starting\n"); + + /* Online tests */ + if (txgbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Offline tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__TXGBE_TESTING, &adapter->state); + } + +skip_ol_tests: + msleep_interruptible(4 * 1000); +} + +static void txgbe_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + if (!device_can_wakeup(pci_dev_to_dev(adapter->pdev))) + return; + + if ((hw->subsystem_device_id & TXGBE_WOL_MASK) != TXGBE_WOL_SUP) + return; + + if (adapter->wol & TXGBE_PSR_WKUP_CTL_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & TXGBE_PSR_WKUP_CTL_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & TXGBE_PSR_WKUP_CTL_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & TXGBE_PSR_WKUP_CTL_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int txgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if ((hw->subsystem_device_id & TXGBE_WOL_MASK) != TXGBE_WOL_SUP) + return -EOPNOTSUPP; + + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= TXGBE_PSR_WKUP_CTL_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= TXGBE_PSR_WKUP_CTL_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= TXGBE_PSR_WKUP_CTL_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= TXGBE_PSR_WKUP_CTL_MAG; + + hw->wol_enabled = !!(adapter->wol); + wr32(hw, TXGBE_PSR_WKUP_CTL, adapter->wol); + + device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); + + return 0; +} + +static int txgbe_nway_reset(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + txgbe_reinit_locked(adapter); + + return 0; +} + +static int txgbe_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u16 value = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + txgbe_hic_notify_led_active(hw, 1); + adapter->led_reg = rd32(hw, TXGBE_CFG_LED_CTL); + return 2; + + case ETHTOOL_ID_ON: + if ((hw->oem_ssid != 0x0085 && hw->oem_svid == 0x1bd4) || + (hw->oem_ssid != 0x0085 && hw->oem_svid == 0x1ff9)) { + if (adapter->link_up) { + switch (adapter->link_speed) { + case TXGBE_LINK_SPEED_10GB_FULL: + hw->mac.ops.led_on(hw, TXGBE_LED_LINK_10G); + break; + case TXGBE_LINK_SPEED_1GB_FULL: + hw->mac.ops.led_on(hw, TXGBE_LED_LINK_1G); + break; + case TXGBE_LINK_SPEED_100_FULL: + hw->mac.ops.led_on(hw, TXGBE_LED_LINK_100M); + break; + default: + break; + } + } else { + hw->mac.ops.led_on(hw, TXGBE_LED_LINK_10G); + } + } else { + hw->mac.ops.led_on(hw, TXGBE_LED_LINK_UP); + } + break; + + case ETHTOOL_ID_OFF: + if ((hw->oem_ssid != 0x0085 && hw->oem_svid == 0x1bd4) || + (hw->oem_ssid != 0x0085 && hw->oem_svid == 0x1ff9)) { + if (adapter->link_up) { + switch (adapter->link_speed) { + case TXGBE_LINK_SPEED_10GB_FULL: + hw->mac.ops.led_off(hw, TXGBE_LED_LINK_10G); + break; + case TXGBE_LINK_SPEED_1GB_FULL: + hw->mac.ops.led_off(hw, TXGBE_LED_LINK_1G); + break; + case TXGBE_LINK_SPEED_100_FULL: + hw->mac.ops.led_off(hw, TXGBE_LED_LINK_100M); + break; + default: + break; + } + } else { + hw->mac.ops.led_off(hw, TXGBE_LED_LINK_10G); + } + } else { + hw->mac.ops.led_off(hw, TXGBE_LED_LINK_UP); + } + break; + + case ETHTOOL_ID_INACTIVE: + /* Restore LED settings */ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + txgbe_hic_notify_led_active(hw, 0); + wr32(&adapter->hw, TXGBE_CFG_LED_CTL, + adapter->led_reg); + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) { + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, + 31, 0xF021, &value); + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, + 31, 0xF021, (value & 0xFFFC) | 0x0); + } + break; + } + + return 0; +} + +static int txgbe_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; + /* only valid if in constant ITR mode */ + if (adapter->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + if (adapter->rx_itr_setting == 1) + ec->use_adaptive_rx_coalesce = 1; + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (adapter->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + + return 0; +} + +static bool txgbe_update_rsc(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + /* nothing to do if LRO or RSC are not enabled */ + if (!(adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) || + !(netdev->features & NETIF_F_LRO)) + return false; + + /* check the feature flag value and enable RSC if necessary */ + if (adapter->rx_itr_setting == 1 || + adapter->rx_itr_setting > TXGBE_MIN_RSC_ITR) { + if (!(adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED)) { + adapter->flags2 |= TXGBE_FLAG2_RSC_ENABLED; + e_info(probe, "rx-usecs value high enough to re-enable RSC\n"); + return true; + } + /* if interrupt rate is too high then disable RSC */ + } else if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) { + adapter->flags2 &= ~TXGBE_FLAG2_RSC_ENABLED; + e_info(probe, "rx-usecs set too low, falling back to software LRO\n"); + return true; + } + return false; +} + +static int txgbe_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_q_vector *q_vector; + int i; + u16 tx_itr_param, rx_itr_param; + u16 tx_itr_prev; + bool need_reset = false; + + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { + /* reject Tx specific changes in case of mixed RxTx vectors */ + if (ec->tx_coalesce_usecs) + return -EINVAL; + tx_itr_prev = adapter->rx_itr_setting; + } else { + tx_itr_prev = adapter->tx_itr_setting; + } + + if (ec->tx_max_coalesced_frames_irq) { + if (ec->tx_max_coalesced_frames_irq <= TXGBE_MAX_TX_WORK) + adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; + else + return -EINVAL; + } else { + return -EINVAL; + } + + if ((ec->rx_coalesce_usecs > (TXGBE_MAX_EITR >> 2)) || + (ec->tx_coalesce_usecs > (TXGBE_MAX_EITR >> 2))) + return -EINVAL; + + if (ec->use_adaptive_tx_coalesce) + return -EINVAL; + + if (ec->use_adaptive_rx_coalesce) { + adapter->rx_itr_setting = 1; + return 0; + } + + /* restore to default rxusecs value when adaptive itr turn off * + * user shall turn off adaptive itr and set user-defined rx usecs value + * in two cmds separately. + */ + if (adapter->rx_itr_setting == 1) { + adapter->rx_itr_setting = TXGBE_20K_ITR; + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + } + + if (ec->rx_coalesce_usecs > 1) + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + + if (adapter->rx_itr_setting == 1) + rx_itr_param = TXGBE_20K_ITR; + else + rx_itr_param = adapter->rx_itr_setting; + + if (ec->tx_coalesce_usecs > 1) + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + + if (adapter->tx_itr_setting == 1) + tx_itr_param = TXGBE_12K_ITR; + else + tx_itr_param = adapter->tx_itr_setting; + + /* mixed Rx/Tx */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + adapter->tx_itr_setting = adapter->rx_itr_setting; + + /* detect ITR changes that require update of TXDCTL.WTHRESH */ + if (adapter->tx_itr_setting != 1 && + adapter->tx_itr_setting < TXGBE_100K_ITR) { + if (tx_itr_prev == 1 || + tx_itr_prev >= TXGBE_100K_ITR) + need_reset = true; + } else { + if (tx_itr_prev != 1 && + tx_itr_prev < TXGBE_100K_ITR) + need_reset = true; + } + + /* check the old value and enable RSC if necessary */ + need_reset |= txgbe_update_rsc(adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) { + q_vector = adapter->q_vector[i]; + q_vector->tx.work_limit = adapter->tx_work_limit; + q_vector->rx.work_limit = adapter->rx_work_limit; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + txgbe_write_eitr(q_vector); + } + + /* do reset here at the end to make sure EITR==0 case is handled + * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings + * also locks in RSC enable/disable which requires reset + */ + if (need_reset) + txgbe_do_reset(netdev); + + return 0; +} + +static int txgbe_match_etype_entry(struct txgbe_adapter *adapter, u16 sw_idx) +{ + struct txgbe_etype_filter_info *ef_info = &adapter->etype_filter_info; + int i; + + for (i = 0; i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS; i++) { + if (ef_info->etype_filters[i].rule_idx == sw_idx) + break; + } + + return i; +} + +static int txgbe_get_etype_rule(struct txgbe_adapter *adapter, + struct ethtool_rx_flow_spec *fsp, int ef_idx) +{ + struct txgbe_etype_filter_info *ef_info = &adapter->etype_filter_info; + u8 mask[6] = {0, 0, 0, 0, 0, 0}; + u8 mac[6] = {0, 0, 0, 0, 0, 0}; + + fsp->flow_type = ETHER_FLOW; + ether_addr_copy(fsp->h_u.ether_spec.h_dest, mac); + ether_addr_copy(fsp->m_u.ether_spec.h_dest, mask); + ether_addr_copy(fsp->h_u.ether_spec.h_source, mac); + ether_addr_copy(fsp->m_u.ether_spec.h_source, mask); + fsp->h_u.ether_spec.h_proto = htons(ef_info->etype_filters[ef_idx].ethertype); + fsp->m_u.ether_spec.h_proto = 0xFFFF; + fsp->ring_cookie = ef_info->etype_filters[ef_idx].action; + + return 0; +} + +static int txgbe_get_ethtool_fdir_entry(struct txgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + union txgbe_atr_input *mask = &adapter->fdir_mask; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct hlist_node *node; + struct txgbe_fdir_filter *rule = NULL; + + if (adapter->etype_filter_info.count > 0) { + int ef_idx; + + ef_idx = txgbe_match_etype_entry(adapter, fsp->location); + if (ef_idx < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS) + return txgbe_get_etype_rule(adapter, fsp, ef_idx); + } + + /* report total rule count */ + cmd->data = (1024 << adapter->fdir_pballoc) - 2; + + hlist_for_each_entry_safe(rule, node, + &adapter->fdir_filter_list, fdir_node) { + if (fsp->location <= rule->sw_idx) + break; + } + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + + /* fill out the flow spec entry */ + + /* set flow type field */ + switch (rule->filter.formatted.flow_type) { + case TXGBE_ATR_FLOW_TYPE_TCPV4: + fsp->flow_type = TCP_V4_FLOW; + break; + case TXGBE_ATR_FLOW_TYPE_UDPV4: + fsp->flow_type = UDP_V4_FLOW; + break; + case TXGBE_ATR_FLOW_TYPE_SCTPV4: + fsp->flow_type = SCTP_V4_FLOW; + break; + case TXGBE_ATR_FLOW_TYPE_IPV4: + fsp->flow_type = IP_USER_FLOW; + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + fsp->h_u.usr_ip4_spec.proto = 0; + fsp->m_u.usr_ip4_spec.proto = 0; + break; + default: + return -EINVAL; + } + + fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; + fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; + fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; + fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; + fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; + fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); + fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); + fsp->flow_type |= FLOW_EXT; + + /* record action */ + if (rule->action == TXGBE_RDB_FDIR_DROP_QUEUE) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else + fsp->ring_cookie = rule->action; + + return 0; +} + +static int txgbe_get_ethtool_fdir_all(struct txgbe_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct txgbe_etype_filter_info *ef_info = &adapter->etype_filter_info; + struct hlist_node *node; + struct txgbe_fdir_filter *rule; + int cnt = 0, i; + + /* report total rule count */ + cmd->data = (1024 << adapter->fdir_pballoc) - 2; + + hlist_for_each_entry_safe(rule, node, + &adapter->fdir_filter_list, fdir_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + for (i = 0; i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS; i++) { + if (ef_info->ethertype_mask & (1 << i)) { + rule_locs[cnt] = ef_info->etype_filters[i].rule_idx; + cnt++; + } + } + + cmd->rule_cnt = cnt; + + return 0; +} + +static int txgbe_get_rss_hash_opts(struct txgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on txgbe */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V4_FLOW: + if (adapter->flags2 & TXGBE_FLAG2_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V6_FLOW: + if (adapter->flags2 & TXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->fdir_filter_count + + adapter->etype_filter_info.count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = txgbe_get_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = txgbe_get_ethtool_fdir_all(adapter, cmd, + (u32 *)rule_locs); + break; + case ETHTOOL_GRXFH: + ret = txgbe_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +static int +txgbe_ethertype_filter_lookup(struct txgbe_etype_filter_info *ef_info, + u16 ethertype) +{ + int i; + + for (i = 0; i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS; i++) { + if (ef_info->etype_filters[i].ethertype == ethertype && + (ef_info->ethertype_mask & (1 << i))) + return i; + } + return -1; +} + +static int +txgbe_ethertype_filter_insert(struct txgbe_etype_filter_info *ef_info, + struct txgbe_ethertype_filter *etype_filter) +{ + int i; + + for (i = 0; i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS; i++) { + if (ef_info->ethertype_mask & (1 << i)) + continue; + + ef_info->ethertype_mask |= 1 << i; + ef_info->etype_filters[i].ethertype = etype_filter->ethertype; + ef_info->etype_filters[i].etqf = etype_filter->etqf; + ef_info->etype_filters[i].etqs = etype_filter->etqs; + ef_info->etype_filters[i].rule_idx = etype_filter->rule_idx; + ef_info->etype_filters[i].action = etype_filter->action; + break; + } + + return (i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS ? i : -1); +} + +static int txgbe_add_ethertype_filter(struct txgbe_adapter *adapter, + struct ethtool_rx_flow_spec *fsp) +{ + struct txgbe_etype_filter_info *ef_info = &adapter->etype_filter_info; + struct txgbe_ethertype_filter etype_filter; + struct txgbe_hw *hw = &adapter->hw; + u16 ethertype; + u32 etqf = 0; + u32 etqs = 0; + u8 queue, vf; + u32 ring; + int ret; + + ethertype = ntohs(fsp->h_u.ether_spec.h_proto); + if (!ethertype) { + e_err(drv, "protocol number is missing for ethertype filter\n"); + return -EINVAL; + } + if (ethertype == ETH_P_IP || ethertype == ETH_P_IPV6) { + e_err(drv, "unsupported ether_type(0x%04x) in ethertype filter\n", + ethertype); + return -EINVAL; + } + + ret = txgbe_ethertype_filter_lookup(ef_info, ethertype); + if (ret >= 0) { + e_err(drv, "ethertype (0x%04x) filter exists.", ethertype); + return -EEXIST; + } + + /* ring_cookie is a masked into a set of queues and txgbe pools */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + e_err(drv, "drop option is unsupported."); + return -EINVAL; + } + + ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + if (!vf && ring >= adapter->num_rx_queues) + return -EINVAL; + else if (vf && ((vf > adapter->num_vfs) || + ring >= adapter->num_rx_queues_per_pool)) + return -EINVAL; + + /* Map the ring onto the absolute queue index */ + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * adapter->num_rx_queues_per_pool) + ring; + + etqs |= queue << TXGBE_RDB_ETYPE_CLS_RX_QUEUE_SHIFT; + etqs |= TXGBE_RDB_ETYPE_CLS_QUEUE_EN; + etqf = TXGBE_PSR_ETYPE_SWC_FILTER_EN | ethertype; + if (adapter->num_vfs) { + u8 pool; + + if (!vf) + pool = adapter->num_vfs; + else + pool = vf - 1; + + etqf |= TXGBE_PSR_ETYPE_SWC_POOL_ENABLE; + etqf |= pool << TXGBE_PSR_ETYPE_SWC_POOL_SHIFT; + } + + etype_filter.ethertype = ethertype; + etype_filter.etqf = etqf; + etype_filter.etqs = etqs; + etype_filter.rule_idx = fsp->location; + etype_filter.action = fsp->ring_cookie; + ret = txgbe_ethertype_filter_insert(ef_info, &etype_filter); + if (ret < 0) { + e_err(drv, "ethertype filters are full."); + return -ENOSPC; + } + + wr32(hw, TXGBE_PSR_ETYPE_SWC(ret), etqf); + wr32(hw, TXGBE_RDB_ETYPE_CLS(ret), etqs); + TXGBE_WRITE_FLUSH(hw); + + ef_info->count++; + + return 0; +} + +static int txgbe_del_ethertype_filter(struct txgbe_adapter *adapter, u16 sw_idx) +{ + struct txgbe_etype_filter_info *ef_info = &adapter->etype_filter_info; + struct txgbe_hw *hw = &adapter->hw; + u16 ethertype; + int idx; + + idx = txgbe_match_etype_entry(adapter, sw_idx); + if (idx == TXGBE_MAX_PSR_ETYPE_SWC_FILTERS) + return -EINVAL; + + ethertype = ef_info->etype_filters[idx].ethertype; + if (!ethertype) { + e_err(drv, "ethertype filter doesn't exist."); + return -ENOENT; + } + + ef_info->ethertype_mask &= ~(1 << idx); + ef_info->etype_filters[idx].ethertype = 0; + ef_info->etype_filters[idx].etqf = 0; + ef_info->etype_filters[idx].etqs = 0; + ef_info->etype_filters[idx].etqs = false; + ef_info->etype_filters[idx].rule_idx = 0; + + wr32(hw, TXGBE_PSR_ETYPE_SWC(idx), 0); + wr32(hw, TXGBE_RDB_ETYPE_CLS(idx), 0); + TXGBE_WRITE_FLUSH(hw); + + ef_info->count--; + + return 0; +} + +static int txgbe_update_ethtool_fdir_entry(struct txgbe_adapter *adapter, + struct txgbe_fdir_filter *input, + u16 sw_idx) +{ + struct txgbe_hw *hw = &adapter->hw; + struct hlist_node *node, *parent; + struct txgbe_fdir_filter *rule; + bool deleted = false; + s32 err; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry_safe(rule, node, + &adapter->fdir_filter_list, fdir_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + parent = node; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && rule->sw_idx == sw_idx) { + /* hardware filters are only configured when interface is up, + * and we should not issue filter commands while the interface + * is down + */ + if (netif_running(adapter->netdev) && + (!input || rule->filter.formatted.bkt_hash != + input->filter.formatted.bkt_hash)) { + err = txgbe_fdir_erase_perfect_filter(hw, + &rule->filter, + sw_idx); + if (err) + return -EINVAL; + } + + hlist_del(&rule->fdir_node); + kfree(rule); + adapter->fdir_filter_count--; + deleted = true; + } + + /* If we weren't given an input, then this was a request to delete a + * filter. We should return -EINVAL if the filter wasn't found, but + * return 0 if the rule was successfully deleted. + */ + if (!input) + return deleted ? 0 : -EINVAL; + + /* initialize node and set software index */ + INIT_HLIST_NODE(&input->fdir_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&input->fdir_node, parent); + else + hlist_add_head(&input->fdir_node, + &adapter->fdir_filter_list); + + /* update counts */ + adapter->fdir_filter_count++; + + return 0; +} + +static int txgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, + u8 *flow_type) +{ + switch (fsp->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case UDP_V4_FLOW: + *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case SCTP_V4_FLOW: + *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case IP_USER_FLOW: + switch (fsp->h_u.usr_ip4_spec.proto) { + case IPPROTO_TCP: + *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case IPPROTO_UDP: + *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case IPPROTO_SCTP: + *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case 0: + if (!fsp->m_u.usr_ip4_spec.proto) { + *flow_type = TXGBE_ATR_FLOW_TYPE_IPV4; + break; + } + fallthrough; + default: + return 0; + } + break; + default: + return 0; + } + + return 1; +} + +static bool txgbe_match_ethtool_fdir_entry(struct txgbe_adapter *adapter, + struct txgbe_fdir_filter *input) +{ + struct hlist_node *node2; + struct txgbe_fdir_filter *rule = NULL; + + hlist_for_each_entry_safe(rule, node2, + &adapter->fdir_filter_list, fdir_node) { + if (rule->filter.formatted.bkt_hash == + input->filter.formatted.bkt_hash && + rule->action == input->action) { + e_info(drv, "FDIR entry already exist\n"); + return true; + } + } + return false; +} + +static int txgbe_add_ethtool_fdir_entry(struct txgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_fdir_filter *input; + union txgbe_atr_input mask; + u8 queue; + int err; + u16 ptype = 0; + + if ((fsp->flow_type & ~FLOW_EXT) == ETHER_FLOW) + return txgbe_add_ethertype_filter(adapter, fsp); + + if (!(adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + return -EOPNOTSUPP; + + /* ring_cookie is a masked into a set of queues and txgbe pools or + * we use drop index + */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + queue = TXGBE_RDB_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + + if (!vf && ring >= adapter->num_rx_queues) + return -EINVAL; + else if (vf && + ((vf > adapter->num_vfs) || + ring >= adapter->num_rx_queues_per_pool)) + return -EINVAL; + + /* Map the ring onto the absolute queue index */ + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * + adapter->num_rx_queues_per_pool) + ring; + } + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { + e_err(drv, "Location out of range\n"); + return -EINVAL; + } + + input = kzalloc(sizeof(*input), GFP_ATOMIC); + if (!input) + return -ENOMEM; + + memset(&mask, 0, sizeof(union txgbe_atr_input)); + + /* set SW index */ + input->sw_idx = fsp->location; + + /* record flow type */ + if (!txgbe_flowspec_to_flow_type(fsp, + &input->filter.formatted.flow_type)) { + e_err(drv, "Unrecognized flow type\n"); + goto err_out; + } + + mask.formatted.flow_type = TXGBE_ATR_L4TYPE_IPV6_MASK | + TXGBE_ATR_L4TYPE_MASK; + + if (input->filter.formatted.flow_type == TXGBE_ATR_FLOW_TYPE_IPV4) + mask.formatted.flow_type &= TXGBE_ATR_L4TYPE_IPV6_MASK; + + /* Copy input into formatted structures */ + input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; + mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; + input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; + input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; + mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; + input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + + if (fsp->flow_type & FLOW_EXT) { + input->filter.formatted.vm_pool = + (unsigned char)ntohl(fsp->h_ext.data[1]); + mask.formatted.vm_pool = + (unsigned char)ntohl(fsp->m_ext.data[1]); + input->filter.formatted.flex_bytes = + fsp->h_ext.vlan_etype; + mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; + } + + switch (input->filter.formatted.flow_type) { + case TXGBE_ATR_FLOW_TYPE_TCPV4: + ptype = TXGBE_PTYPE_L2_IPV4_TCP; + break; + case TXGBE_ATR_FLOW_TYPE_UDPV4: + ptype = TXGBE_PTYPE_L2_IPV4_UDP; + break; + case TXGBE_ATR_FLOW_TYPE_SCTPV4: + ptype = TXGBE_PTYPE_L2_IPV4_SCTP; + break; + case TXGBE_ATR_FLOW_TYPE_IPV4: + ptype = TXGBE_PTYPE_L2_IPV4; + break; + case TXGBE_ATR_FLOW_TYPE_TCPV6: + ptype = TXGBE_PTYPE_L2_IPV6_TCP; + break; + case TXGBE_ATR_FLOW_TYPE_UDPV6: + ptype = TXGBE_PTYPE_L2_IPV6_UDP; + break; + case TXGBE_ATR_FLOW_TYPE_SCTPV6: + ptype = TXGBE_PTYPE_L2_IPV6_SCTP; + break; + case TXGBE_ATR_FLOW_TYPE_IPV6: + ptype = TXGBE_PTYPE_L2_IPV6; + break; + default: + break; + } + + input->filter.formatted.vlan_id = htons(ptype); + if (mask.formatted.flow_type & TXGBE_ATR_L4TYPE_MASK) + mask.formatted.vlan_id = 0xFFFF; + else + mask.formatted.vlan_id = htons(0xFFF8); + + /* determine if we need to drop or route the packet */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) + input->action = TXGBE_RDB_FDIR_DROP_QUEUE; + else + input->action = fsp->ring_cookie; + + spin_lock(&adapter->fdir_perfect_lock); + + if (hlist_empty(&adapter->fdir_filter_list)) { + /* save mask and program input mask into HW */ + memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); + err = txgbe_fdir_set_input_mask(hw, &mask, + adapter->cloud_mode); + if (err) { + e_err(drv, "Error writing mask\n"); + goto err_out_w_lock; + } + } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { + e_err(drv, "Hardware only supports one mask per port.\n"); + goto err_out_w_lock; + } + + /* apply mask and compute/store hash */ + txgbe_atr_compute_perfect_hash(&input->filter, &mask); + + /* check if new entry does not exist on filter list */ + if (txgbe_match_ethtool_fdir_entry(adapter, input)) + goto err_out_w_lock; + + /* only program filters to hardware if the net device is running, as + * we store the filters in the Rx buffer which is not allocated when + * the device is down + */ + if (netif_running(adapter->netdev)) { + err = txgbe_fdir_write_perfect_filter(hw, + &input->filter, input->sw_idx, + queue, + adapter->cloud_mode); + if (err) + goto err_out_w_lock; + } + + txgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +err_out_w_lock: + spin_unlock(&adapter->fdir_perfect_lock); +err_out: + kfree(input); + return -EINVAL; +} + +static int txgbe_del_ethtool_fdir_entry(struct txgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err; + + if (adapter->etype_filter_info.count > 0) { + err = txgbe_del_ethertype_filter(adapter, fsp->location); + if (!err) + return 0; + } + + spin_lock(&adapter->fdir_perfect_lock); + err = txgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location); + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +} + +#define UDP_RSS_FLAGS (TXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ + TXGBE_FLAG2_RSS_FIELD_IPV6_UDP) +static int txgbe_set_rss_hash_opt(struct txgbe_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags2 = adapter->flags2; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags2 &= ~TXGBE_FLAG2_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags2 |= TXGBE_FLAG2_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags2 &= ~TXGBE_FLAG2_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags2 |= TXGBE_FLAG2_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags2 != adapter->flags2) { + struct txgbe_hw *hw = &adapter->hw; + u32 mrqc; + + mrqc = rd32(hw, TXGBE_RDB_RA_CTL); + + if ((flags2 & UDP_RSS_FLAGS) && + !(adapter->flags2 & UDP_RSS_FLAGS)) + e_warn(drv, "enabling UDP RSS: fragmented packets may out of order\n"); + + adapter->flags2 = flags2; + + /* Perform hash on these packet types */ + mrqc |= TXGBE_RDB_RA_CTL_RSS_IPV4 + | TXGBE_RDB_RA_CTL_RSS_IPV4_TCP + | TXGBE_RDB_RA_CTL_RSS_IPV6 + | TXGBE_RDB_RA_CTL_RSS_IPV6_TCP; + + mrqc &= ~(TXGBE_RDB_RA_CTL_RSS_IPV4_UDP | + TXGBE_RDB_RA_CTL_RSS_IPV6_UDP); + + if (flags2 & TXGBE_FLAG2_RSS_FIELD_IPV4_UDP) + mrqc |= TXGBE_RDB_RA_CTL_RSS_IPV4_UDP; + + if (flags2 & TXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + mrqc |= TXGBE_RDB_RA_CTL_RSS_IPV6_UDP; + + wr32(hw, TXGBE_RDB_RA_CTL, mrqc); + } + + return 0; +} + +static int txgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = txgbe_add_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = txgbe_del_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_SRXFH: + ret = txgbe_set_rss_hash_opt(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +static int txgbe_rss_indir_tbl_max(struct txgbe_adapter *adapter) +{ + return 64; +} + +static u32 txgbe_get_rxfh_key_size(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + return sizeof(adapter->rss_key); +} + +static u32 txgbe_rss_indir_size(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + return txgbe_rss_indir_tbl_entries(adapter); +} + +static void txgbe_get_reta(struct txgbe_adapter *adapter, u32 *indir) +{ + int i, reta_size = txgbe_rss_indir_tbl_entries(adapter); + + for (i = 0; i < reta_size; i++) + indir[i] = adapter->rss_indir_tbl[i]; +} + +static int txgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (indir) + txgbe_get_reta(adapter, indir); + if (key) + memcpy(key, adapter->rss_key, txgbe_get_rxfh_key_size(netdev)); + + return 0; +} + +static int txgbe_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int i; + u32 reta_entries = txgbe_rss_indir_tbl_entries(adapter); + struct txgbe_hw *hw = &adapter->hw; + + if (hfunc) + return -EINVAL; + + /* Fill out the redirection table */ + if (indir) { + int max_queues = min_t(int, adapter->num_rx_queues, + txgbe_rss_indir_tbl_max(adapter)); + + /*Allow at least 2 queues w/ SR-IOV.*/ + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED && + max_queues < 2) + max_queues = 2; + + /* Verify user input. */ + for (i = 0; i < reta_entries; i++) + if (indir[i] >= max_queues) + return -EINVAL; + + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + for (i = 0; i < reta_entries; i++) + adapter->rss_indir_tbl[i] = indir[i]; + txgbe_store_vfreta(adapter); + } else { + for (i = 0; i < reta_entries; i++) + adapter->rss_indir_tbl[i] = indir[i]; + txgbe_store_reta(adapter); + } + } + + if (key) { + memcpy(adapter->rss_key, key, txgbe_get_rxfh_key_size(netdev)); + + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + unsigned int pf_pool = adapter->num_vfs; + + for (i = 0; i < 10; i++) + wr32(hw, TXGBE_RDB_VMRSSRK(i, pf_pool), adapter->rss_key[i]); + } else { + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + wr32(hw, TXGBE_RDB_RSSRK(i), adapter->rss_key[i]); + } + } + + return 0; +} + +static int txgbe_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + + /* we always support timestamping disabled */ + info->rx_filters = 1 << HWTSTAMP_FILTER_NONE; + + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters |= + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + return 0; +} + +static unsigned int txgbe_max_channels(struct txgbe_adapter *adapter) +{ + unsigned int max_combined; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (!(adapter->flags & TXGBE_FLAG_MSIX_ENABLED)) { + /* We only support one q_vector without MSI-X */ + max_combined = 1; + } else if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + /* SR-IOV currently only allows one queue on the PF */ + max_combined = adapter->ring_feature[RING_F_RSS].mask + 1; + } else if (tcs > 1) { + /* For DCB report channels per traffic class */ + if (tcs > 4) { + /* 8 TC w/ 8 queues per TC */ + max_combined = 8; + } else { + /* 4 TC w/ 16 queues per TC */ + max_combined = 16; + } + } else if (adapter->atr_sample_rate) { + /* support up to 64 queues with ATR */ + max_combined = TXGBE_MAX_FDIR_INDICES; + if (adapter->xdp_prog) + max_combined = TXGBE_MAX_XDP_RSS_INDICES; + } else { + /* support up to max allowed queues with RSS */ + max_combined = txgbe_max_rss_indices(adapter); + } + + return max_combined; +} + +static void txgbe_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + + /* report maximum channels */ + ch->max_combined = txgbe_max_channels(adapter); + + /* report info for other vector */ + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + /* record RSS queues */ + ch->combined_count = adapter->ring_feature[RING_F_RSS].indices; + + /* nothing else to report if RSS is disabled */ + if (ch->combined_count == 1) + return; + + /* we do not support ATR queueing if SR-IOV is enabled */ + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + return; + + /* same thing goes for being DCB enabled */ + if (netdev_get_num_tc(dev) > 1) + return; + + /* if ATR is disabled we can exit */ + if (!adapter->atr_sample_rate) + return; + + /* report flow director queues as maximum channels */ + ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices; +} + +static int txgbe_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + unsigned int count = ch->combined_count; + u8 max_rss_indices = txgbe_max_rss_indices(adapter); + + /* verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* verify other_count has not changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* verify the number of channels does not exceed hardware limits */ + if (count > txgbe_max_channels(adapter)) + return -EINVAL; + + if (count < adapter->active_vlan_limited + 1) { + e_dev_info("cannot set less than active limited vlan + 1:%d", + (adapter->active_vlan_limited + 1)); + return -EINVAL; + } + /* update feature limits from largest to smallest supported values */ + adapter->ring_feature[RING_F_FDIR].limit = count; + + /* cap RSS limit */ + if (count > max_rss_indices) + count = max_rss_indices; + adapter->ring_feature[RING_F_RSS].limit = count; + +#if IS_ENABLED(CONFIG_FCOE) + /* cap FCoE limit at 8 */ + if (count > TXGBE_RDB_FCRE_TBL_SIZE) + count = TXGBE_RDB_FCRE_TBL_SIZE; + adapter->ring_feature[RING_F_FCOE].limit = count; +#endif /* CONFIG_FCOE */ + + /* use setup TC to update any traffic class queue mapping */ + return txgbe_setup_tc(dev, netdev_get_num_tc(dev)); +} + +static int txgbe_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_hw *hw = &adapter->hw; + u32 status; + u8 sff8472_rev, addr_mode; + u8 identifier = 0; + u8 sff8636_rev = 0; + bool page_swap = false; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + u32 value; + + if (hw->mac.type == txgbe_mac_aml40) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_PRST_LS) + return -EIO; + + if (!netif_carrier_ok(dev)) { + e_err(drv, "\"Ethool -m\" is supported only when link is up for 40G.\n"); + return -EIO; + } + } + + if (hw->mac.type == txgbe_mac_aml) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_ABS_LS) + return -EIO; + } + + if (hw->mac.type != txgbe_mac_sp) { + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) + return -EBUSY; + + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + cancel_work_sync(&adapter->sfp_sta_task); + + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_IDENTIFIER, + &identifier); + if (status != 0) + goto ERROR_IO; + + switch (identifier) { + case TXGBE_SFF_IDENTIFIER_SFP: + /* Check whether we support SFF-8472 or not */ + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_SFF_8472_COMP, + &sff8472_rev); + if (status != 0) + goto ERROR_IO; + + /* addressing mode is not supported */ + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_SFF_8472_SWAP, + &addr_mode); + if (status != 0) + goto ERROR_IO; + + if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) { + e_err(drv, "Address change required to access page 0xA2, but not supported.\n"); + page_swap = true; + } + + if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap || + !(addr_mode & TXGBE_SFF_DDM_IMPLEMENTED)) { + /* We have a SFP, but it does not support SFF-8472 */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have a SFP which supports a revision of SFF-8472. */ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + break; + case TXGBE_SFF_IDENTIFIER_QSFP: + case TXGBE_SFF_IDENTIFIER_QSFP_PLUS: + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_SFF_REVISION_ADDR, + &sff8636_rev); + if (status != 0) + goto ERROR_IO; + + /* Check revision compliance */ + if (sff8636_rev > 0x02) { + /* Module is SFF-8636 compliant */ + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = TXGBE_MODULE_QSFP_MAX_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = TXGBE_MODULE_QSFP_MAX_LEN; + } + break; + default: + e_err(drv, "SFF Module Type not recognized.\n"); + return -EINVAL; + } + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + } else { + modinfo->type = adapter->eeprom_type; + modinfo->eeprom_len = adapter->eeprom_len; + } + + return 0; + +ERROR_IO: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return -EIO; +} + +#define SFF_A2_ALRM_FLG 0x170 +#define SFF_A2_WARN_FLG 0x174 +#define SFF_A2_TEMP 0x160 +#define SFF_A2_RX_PWR 0x169 + +static int txgbe_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_hw *hw = &adapter->hw; + int i = 0; + bool is_sfp = false; + u32 value; + u8 identifier = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + u8 databyte; + s32 status = 0; + + if (hw->mac.type == txgbe_mac_aml40) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_PRST_LS) + return -EIO; + + if (!netif_carrier_ok(dev)) { + e_err(drv, "\"Ethool -m\" is supported only when link is up for 40G.\n"); + return -EIO; + } + } + + if (hw->mac.type == txgbe_mac_aml) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_ABS_LS) + return -EIO; + } + + if (hw->mac.type != txgbe_mac_sp) { + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) + return -EBUSY; + + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + cancel_work_sync(&adapter->sfp_sta_task); + + if (ee->len == 0) + goto ERROR_INVAL; + + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_IDENTIFIER, + &identifier); + if (status != 0) + goto ERROR_IO; + + if (identifier == TXGBE_SFF_IDENTIFIER_SFP) + is_sfp = true; + + memset(data, 0, ee->len); + for (i = 0; i < ee->len; i++) { + u32 offset = i + ee->offset; + u32 page = 0; + + /* I2C reads can take long time */ + if (test_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + goto ERROR_BUSY; + + if (is_sfp) { + if (offset < ETH_MODULE_SFF_8079_LEN) + status = hw->phy.ops.read_i2c_eeprom(hw, offset, + &databyte); + else + status = hw->phy.ops.read_i2c_sff8472(hw, offset, + &databyte); + + if (status != 0) + goto ERROR_IO; + } else { + while (offset >= ETH_MODULE_SFF_8436_LEN) { + offset -= ETH_MODULE_SFF_8436_LEN / 2; + page++; + } + + if (page == 0 || !(data[0x2] & 0x4)) { + status = hw->phy.ops.read_i2c_sff8636(hw, page, offset, + &databyte); + + if (status != 0) + goto ERROR_IO; + } + } + data[i] = databyte; + } + } else { + if (ee->len == 0) + goto ERROR_INVAL; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) + return -EBUSY; + + /*when down, can't know sfp change, get eeprom from i2c*/ + if (test_bit(__TXGBE_DOWN, &adapter->state)) { + for (i = ee->offset; i < ee->offset + ee->len; i++) { + /* I2C reads can take long time */ + if (test_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + goto ERROR_BUSY; + + if (i < ETH_MODULE_SFF_8079_LEN) + status = hw->phy.ops.read_i2c_eeprom(hw, i, + &databyte); + else + status = hw->phy.ops.read_i2c_sff8472(hw, i, + &databyte); + + if (status != 0) + goto ERROR_IO; + + data[i - ee->offset] = databyte; + } + } else { + if (adapter->eeprom_type == ETH_MODULE_SFF_8472) { + cancel_work_sync(&adapter->sfp_sta_task); + + /*alarm flag*/ + for (i = SFF_A2_ALRM_FLG; i <= SFF_A2_ALRM_FLG + 1; i++) { + status = hw->phy.ops.read_i2c_sff8472(hw, i, + &databyte); + + if (status != 0) + goto ERROR_IO; + + adapter->i2c_eeprom[i] = databyte; + } + /*warm flag*/ + for (i = SFF_A2_WARN_FLG; i <= SFF_A2_WARN_FLG + 1; i++) { + status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); + + if (status != 0) + goto ERROR_IO; + + adapter->i2c_eeprom[i] = databyte; + } + /*dom monitor value*/ + for (i = SFF_A2_TEMP; i <= SFF_A2_RX_PWR + 1; i++) { + status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); + + if (status != 0) + goto ERROR_IO; + + adapter->i2c_eeprom[i] = databyte; + } + } + for (i = ee->offset; i < ee->offset + ee->len; i++) + data[i - ee->offset] = adapter->i2c_eeprom[i]; + } + } + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return 0; +ERROR_BUSY: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return -EBUSY; +ERROR_IO: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return -EIO; +ERROR_INVAL: + return -EINVAL; +} + +static int txgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + return 0; +} + +static int txgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + struct ethtool_eee eee_data; + s32 ret_val; + + if (!(hw->mac.ops.setup_eee && + (adapter->flags2 & TXGBE_FLAG2_EEE_CAPABLE))) + return -EOPNOTSUPP; + + memset(&eee_data, 0, sizeof(struct ethtool_eee)); + + ret_val = txgbe_get_eee(netdev, &eee_data); + if (ret_val) + return ret_val; + + if (eee_data.eee_enabled && !edata->eee_enabled) { + if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) { + e_dev_err("Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) { + e_dev_err("Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + + if (eee_data.advertised != edata->advertised) { + e_dev_err("Setting EEE advertised speeds is not supported\n"); + return -EINVAL; + } + } + + if (eee_data.eee_enabled != edata->eee_enabled) { + if (edata->eee_enabled) + adapter->flags2 |= TXGBE_FLAG2_EEE_ENABLED; + else + adapter->flags2 &= ~TXGBE_FLAG2_EEE_ENABLED; + + /* reset link */ + if (netif_running(netdev)) + txgbe_reinit_locked(adapter); + else + txgbe_reset(adapter); + } + + return 0; +} + +static int txgbe_set_flash(struct net_device *netdev, struct ethtool_flash *ef) +{ + int ret; + const struct firmware *fw; + struct txgbe_adapter *adapter = netdev_priv(netdev); + + ret = request_firmware(&fw, ef->data, &netdev->dev); + if (ret < 0) + return ret; + + if (ef->region == 0) { + ret = txgbe_upgrade_flash(&adapter->hw, ef->region, + fw->data, fw->size); + } else { + if (txgbe_mng_present(&adapter->hw)) + ret = txgbe_upgrade_flash_hostif(&adapter->hw, ef->region, + fw->data, fw->size); + else + ret = -EOPNOTSUPP; + } + + release_firmware(fw); + if (!ret) + dev_info(&netdev->dev, + "loaded firmware %s, reboot to make firmware work\n", ef->data); + return ret; +} + + +static const struct ethtool_ops txgbe_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES_IRQ | + ETHTOOL_COALESCE_USE_ADAPTIVE, + .get_link_ksettings = txgbe_get_link_ksettings, + .set_link_ksettings = txgbe_set_link_ksettings, + .get_fecparam = txgbe_get_fec_param, + .set_fecparam = txgbe_set_fec_param, + .get_drvinfo = txgbe_get_drvinfo, + .get_regs_len = txgbe_get_regs_len, + .get_regs = txgbe_get_regs, + .get_wol = txgbe_get_wol, + .set_wol = txgbe_set_wol, + .nway_reset = txgbe_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = txgbe_get_eeprom_len, + .get_eeprom = txgbe_get_eeprom, + .set_eeprom = txgbe_set_eeprom, + .get_ringparam = txgbe_get_ringparam, + .set_ringparam = txgbe_set_ringparam, + .get_pauseparam = txgbe_get_pauseparam, + .set_pauseparam = txgbe_set_pauseparam, + .get_msglevel = txgbe_get_msglevel, + .set_msglevel = txgbe_set_msglevel, + .self_test = txgbe_diag_test, + .get_strings = txgbe_get_strings, + .set_phys_id = txgbe_set_phys_id, + .get_sset_count = txgbe_get_sset_count, + .get_priv_flags = txgbe_get_priv_flags, + .set_priv_flags = txgbe_set_priv_flags, + .get_ethtool_stats = txgbe_get_ethtool_stats, + .get_coalesce = txgbe_get_coalesce, + .set_coalesce = txgbe_set_coalesce, + .get_rxnfc = txgbe_get_rxnfc, + .set_rxnfc = txgbe_set_rxnfc, + .get_eee = txgbe_get_eee, + .set_eee = txgbe_set_eee, + .get_channels = txgbe_get_channels, + .set_channels = txgbe_set_channels, + .get_module_info = txgbe_get_module_info, + .get_module_eeprom = txgbe_get_module_eeprom, + .get_ts_info = txgbe_get_ts_info, + .get_rxfh_indir_size = txgbe_rss_indir_size, + .get_rxfh_key_size = txgbe_get_rxfh_key_size, + .get_rxfh = txgbe_get_rxfh, + .set_rxfh = txgbe_set_rxfh, + .flash_device = txgbe_set_flash, +}; + +void txgbe_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &txgbe_ethtool_ops; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.c new file mode 100644 index 000000000000..851530c39a09 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.c @@ -0,0 +1,940 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "txgbe.h" + +#if IS_ENABLED(CONFIG_FCOE) +#if IS_ENABLED(CONFIG_DCB) +#include "txgbe_dcb.h" +#endif /* CONFIG_DCB */ +#include +#include +#include +#include +#include +#include +#include + +/** + * txgbe_fcoe_clear_ddp - clear the given ddp context + * @ddp - ptr to the txgbe_fcoe_ddp + * + * Returns : none + * + */ +static inline void txgbe_fcoe_clear_ddp(struct txgbe_fcoe_ddp *ddp) +{ + ddp->len = 0; + ddp->err = 1; + ddp->udl = NULL; + ddp->udp = 0UL; + ddp->sgl = NULL; + ddp->sgc = 0; +} + +/** + * txgbe_fcoe_ddp_put - free the ddp context for a given xid + * @netdev: the corresponding net_device + * @xid: the xid that corresponding ddp will be freed + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_done + * and it is expected to be called by ULD, i.e., FCP layer of libfc + * to release the corresponding ddp context when the I/O is done. + * + * Returns : data length already ddp-ed in bytes + */ +int txgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) +{ + int len = 0; + struct txgbe_fcoe *fcoe; + struct txgbe_adapter *adapter; + struct txgbe_hw *hw; + struct txgbe_fcoe_ddp *ddp; + u32 fcbuff; + + if (!netdev) + goto out_ddp_put; + + if (xid > netdev->fcoe_ddp_xid) + goto out_ddp_put; + + adapter = netdev_priv(netdev); + hw = &adapter->hw; + fcoe = &adapter->fcoe; + ddp = &fcoe->ddp[xid]; + if (!ddp->udl) + goto out_ddp_put; + + len = ddp->len; + /* if there an error, force to invalidate ddp context */ + if (ddp->err) { + /* other hardware requires DDP FCoE lock */ + spin_lock_bh(&fcoe->lock); + + wr32(hw, TXGBE_PSR_FC_FLT_CTXT, 0); + wr32(hw, TXGBE_PSR_FC_FLT_RW, + (xid | TXGBE_PSR_FC_FLT_RW_WE)); + wr32(hw, TXGBE_RDM_FCBUF, 0); + wr32(hw, TXGBE_RDM_FCRW, + (xid | TXGBE_RDM_FCRW_WE)); + + /* read FCBUFF to check context invalidated */ + wr32(hw, TXGBE_RDM_FCRW, + (xid | TXGBE_RDM_FCRW_RE)); + fcbuff = rd32(hw, TXGBE_RDM_FCBUF); + + spin_unlock_bh(&fcoe->lock); + + /* guaranteed to be invalidated after 100us */ + if (fcbuff & TXGBE_RDM_FCBUF_VALID) + usleep_range(100, 200); + } + if (ddp->sgl) + dma_unmap_sg(pci_dev_to_dev(adapter->pdev), ddp->sgl, ddp->sgc, + DMA_FROM_DEVICE); + if (ddp->pool) { + dma_pool_free(ddp->pool, ddp->udl, ddp->udp); + ddp->pool = NULL; + } + + txgbe_fcoe_clear_ddp(ddp); + +out_ddp_put: + return len; +} + +/** + * txgbe_fcoe_ddp_setup - called to set up ddp context + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * Returns : 1 for success and 0 for no ddp + **/ +static int txgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc, + int target_mode) +{ + struct txgbe_adapter *adapter; + struct txgbe_hw *hw; + struct txgbe_fcoe *fcoe; + struct txgbe_fcoe_ddp *ddp; + struct txgbe_fcoe_ddp_pool *ddp_pool; + struct scatterlist *sg; + unsigned int i, j, dmacount; + unsigned int len; + static const unsigned int bufflen = TXGBE_FCBUFF_MIN; + unsigned int firstoff = 0; + unsigned int lastsize; + unsigned int thisoff = 0; + unsigned int thislen = 0; + u32 fcbuff, fcdmarw, fcfltrw, fcfltctxt; + dma_addr_t addr = 0; + + if (!netdev || !sgl || !sgc) + return 0; + + adapter = netdev_priv(netdev); + if (xid > netdev->fcoe_ddp_xid) { + e_warn(drv, "xid=0x%x out-of-range\n", xid); + return 0; + } + + /* no DDP if we are already down or resetting */ + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return 0; + + fcoe = &adapter->fcoe; + ddp = &fcoe->ddp[xid]; + if (ddp->sgl) { + e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", + xid, ddp->sgl, ddp->sgc); + return 0; + } + txgbe_fcoe_clear_ddp(ddp); + + if (!fcoe->ddp_pool) { + e_warn(drv, "No ddp_pool resources allocated\n"); + return 0; + } + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); + if (!ddp_pool->pool) { + e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); + goto out_noddp; + } + + /* setup dma from scsi command sgl */ + dmacount = dma_map_sg(pci_dev_to_dev(adapter->pdev), sgl, sgc, + DMA_FROM_DEVICE); + if (dmacount == 0) { + e_err(drv, "xid 0x%x DMA map error\n", xid); + goto out_noddp; + } + + /* alloc the udl from per cpu ddp pool */ + ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); + if (!ddp->udl) { + e_err(drv, "failed allocated ddp context\n"); + goto out_noddp_unmap; + } + ddp->pool = ddp_pool->pool; + ddp->sgl = sgl; + ddp->sgc = sgc; + + j = 0; + for_each_sg(sgl, sg, dmacount, i) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + while (len) { + /* max number of buffers allowed in one DDP context */ + if (j >= TXGBE_BUFFCNT_MAX) { + ddp_pool->noddp++; + goto out_noddp_free; + } + + /* get the offset of length of current buffer */ + thisoff = addr & ((dma_addr_t)bufflen - 1); + thislen = min((bufflen - thisoff), len); + /* all but the 1st buffer (j == 0) + * must be aligned on bufflen + */ + if (j != 0 && thisoff) + goto out_noddp_free; + /* all but the last buffer + * ((i == (dmacount - 1)) && (thislen == len)) + * must end at bufflen + */ + if ((i != (dmacount - 1) || thislen != len) && + (thislen + thisoff) != bufflen) + goto out_noddp_free; + + ddp->udl[j] = (u64)(addr - thisoff); + /* only the first buffer may have none-zero offset */ + if (j == 0) + firstoff = thisoff; + len -= thislen; + addr += thislen; + j++; + } + } + /* only the last buffer may have non-full bufflen */ + lastsize = thisoff + thislen; + + /* lastsize can not be bufflen. + * If it is then adding another buffer with lastsize = 1. + * Since lastsize is 1 there will be no HW access to this buffer. + */ + if (lastsize == bufflen) { + if (j >= TXGBE_BUFFCNT_MAX) { + ddp_pool->noddp_ext_buff++; + goto out_noddp_free; + } + + ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); + j++; + lastsize = 1; + } + put_cpu(); + + fcbuff = TXGBE_RDM_FCBUF_SIZE(TXGBE_FCBUFF_4KB) | + TXGBE_RDM_FCBUF_COUNT(j) | + TXGBE_RDM_FCBUF_OFFSET(firstoff) | + TXGBE_RDM_FCBUF_VALID; + + /* Set WRCONTX bit to allow DDP for target */ + fcfltctxt = TXGBE_PSR_FC_FLT_CTXT_VALID; + if (!target_mode) + fcfltctxt |= TXGBE_PSR_FC_FLT_CTXT_WR; + + fcdmarw = xid | TXGBE_RDM_FCRW_WE | + TXGBE_RDM_FCRW_LASTSIZE(lastsize); + + fcfltrw = xid; + fcfltrw |= TXGBE_PSR_FC_FLT_RW_WE; + + /* program DMA context */ + hw = &adapter->hw; + + /* turn on last frame indication for target mode as FCP_RSPtarget is + * supposed to send FCP_RSP when it is done. + */ + if (target_mode && !test_bit(__TXGBE_FCOE_TARGET, &fcoe->mode)) { + set_bit(__TXGBE_FCOE_TARGET, &fcoe->mode); + wr32m(hw, TXGBE_PSR_FC_CTL, + TXGBE_PSR_FC_CTL_LASTSEQH, TXGBE_PSR_FC_CTL_LASTSEQH); + } + + /* other devices require DDP lock with direct DDP context access */ + spin_lock_bh(&fcoe->lock); + + wr32(hw, TXGBE_RDM_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); + wr32(hw, TXGBE_RDM_FCPTRH, (u64)ddp->udp >> 32); + wr32(hw, TXGBE_RDM_FCBUF, fcbuff); + wr32(hw, TXGBE_RDM_FCRW, fcdmarw); + /* program filter context */ + wr32(hw, TXGBE_PSR_FC_PARAM, 0); + wr32(hw, TXGBE_PSR_FC_FLT_CTXT, fcfltctxt); + wr32(hw, TXGBE_PSR_FC_FLT_RW, fcfltrw); + + spin_unlock_bh(&fcoe->lock); + + return 1; + +out_noddp_free: + dma_pool_free(ddp->pool, ddp->udl, ddp->udp); + txgbe_fcoe_clear_ddp(ddp); + +out_noddp_unmap: + dma_unmap_sg(pci_dev_to_dev(adapter->pdev), sgl, sgc, DMA_FROM_DEVICE); +out_noddp: + put_cpu(); + return 0; +} + +/** + * txgbe_fcoe_ddp_get - called to set up ddp context in initiator mode + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup + * and is expected to be called from ULD, e.g., FCP layer of libfc + * to set up ddp for the corresponding xid of the given sglist for + * the corresponding I/O. + * + * Returns : 1 for success and 0 for no ddp + */ +int txgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + return txgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); +} + +/** + * txgbe_fcoe_ddp_target - called to set up ddp context in target mode + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_target + * and is expected to be called from ULD, e.g., FCP layer of libfc + * to set up ddp for the corresponding xid of the given sglist for + * the corresponding I/O. The DDP in target mode is a write I/O request + * from the initiator. + * + * Returns : 1 for success and 0 for no ddp + */ +int txgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + return txgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); +} + +/** + * txgbe_fcoe_ddp - check ddp status and mark it done + * @adapter: txgbe adapter + * @rx_desc: advanced rx descriptor + * @skb: the skb holding the received data + * + * This checks ddp status. + * + * Returns : < 0 indicates an error or not a FCoE ddp, 0 indicates + * not passing the skb to ULD, > 0 indicates is the length of data + * being ddped. + */ +int txgbe_fcoe_ddp(struct txgbe_adapter *adapter, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct txgbe_fcoe *fcoe = &adapter->fcoe; + struct txgbe_fcoe_ddp *ddp; + struct fc_frame_header *fh; + int rc = -EINVAL, ddp_max; + __le32 fcerr = txgbe_test_staterr(rx_desc, TXGBE_RXD_ERR_FCERR); + __le32 ddp_err; + u32 fctl; + u16 xid; + + if (fcerr == cpu_to_le32(TXGBE_FCERR_BADCRC)) + skb->ip_summed = CHECKSUM_NONE; + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* verify header contains at least the FCOE header */ + WARN_ON_ONCE(skb_headlen(skb) < FCOE_HEADER_LEN); + + fh = (struct fc_frame_header *)(skb->data + sizeof(struct fcoe_hdr)); + + if (skb->protocol == htons(ETH_P_8021Q)) + fh = (struct fc_frame_header *)((char *)fh + VLAN_HLEN); + + fctl = ntoh24(fh->fh_f_ctl); + if (fctl & FC_FC_EX_CTX) + xid = ntohs(fh->fh_ox_id); + else + xid = ntohs(fh->fh_rx_id); + + ddp_max = TXGBE_FCOE_DDP_MAX; + + if (xid >= ddp_max) + goto ddp_out; + + ddp = &fcoe->ddp[xid]; + if (!ddp->udl) + goto ddp_out; + + ddp_err = txgbe_test_staterr(rx_desc, TXGBE_RXD_ERR_FCEOFE | + TXGBE_RXD_ERR_FCERR); + if (ddp_err) + goto ddp_out; + + switch (txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_FCSTAT)) { + /* return 0 to bypass going to ULD for DDPed data */ + case cpu_to_le32(TXGBE_RXD_STAT_FCSTAT_DDP): + /* update length of DDPed data */ + ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); + rc = 0; + break; + /* unmap the sg list when FCPRSP is received */ + case cpu_to_le32(TXGBE_RXD_STAT_FCSTAT_FCPRSP): + dma_unmap_sg(pci_dev_to_dev(adapter->pdev), ddp->sgl, + ddp->sgc, DMA_FROM_DEVICE); + ddp->err = ddp_err; + ddp->sgl = NULL; + ddp->sgc = 0; + fallthrough; + /* if DDP length is present pass it through to ULD */ + case cpu_to_le32(TXGBE_RXD_STAT_FCSTAT_NODDP): + /* update length of DDPed data */ + ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); + if (ddp->len) + rc = ddp->len; + break; + /* no match will return as an error */ + case cpu_to_le32(TXGBE_RXD_STAT_FCSTAT_NOMTCH): + default: + break; + } + + /* In target mode, check the last data frame of the sequence. + * For DDP in target mode, data is already DDPed but the header + * indication of the last data frame ould allow is to tell if we + * got all the data and the ULP can send FCP_RSP back, as this is + * not a full fcoe frame, we fill the trailer here so it won't be + * dropped by the ULP stack. + */ + if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && + fctl & FC_FC_END_SEQ) { + struct fcoe_crc_eof *crc; + + skb_linearize(skb); + crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); + crc->fcoe_eof = FC_EOF_T; + } +ddp_out: + return rc; +} + +/** + * txgbe_fso - txgbe FCoE Sequence Offload (FSO) + * @tx_ring: tx desc ring + * @first: first tx_buffer structure containing skb, tx_flags, and protocol + * @hdr_len: hdr_len to be returned + * + * This sets up large send offload for FCoE + * + * Returns : 0 indicates success, < 0 for error + */ +int txgbe_fso(struct txgbe_ring *tx_ring, + struct txgbe_tx_buffer *first, + u8 *hdr_len) +{ + struct sk_buff *skb = first->skb; + struct fc_frame_header *fh; + u32 vlan_macip_lens; + u32 fcoe_sof_eof = 0; + u32 mss_l4len_idx; + u8 sof, eof; + + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type != SKB_GSO_FCOE) { + dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", + skb_shinfo(skb)->gso_type); + return -EINVAL; + } + + /* resets the header to point fcoe/fc */ + skb_set_network_header(skb, skb->mac_len); + skb_set_transport_header(skb, skb->mac_len + + sizeof(struct fcoe_hdr)); + + /* sets up SOF and ORIS */ + sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; + switch (sof) { + case FC_SOF_I2: + fcoe_sof_eof = TXGBE_TXD_FCOEF_ORIS; + break; + case FC_SOF_I3: + fcoe_sof_eof = TXGBE_TXD_FCOEF_SOF | + TXGBE_TXD_FCOEF_ORIS; + break; + case FC_SOF_N2: + break; + case FC_SOF_N3: + fcoe_sof_eof = TXGBE_TXD_FCOEF_SOF; + break; + default: + dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof); + return -EINVAL; + } + + /* the first byte of the last dword is EOF */ + skb_copy_bits(skb, skb->len - 4, &eof, 1); + /* sets up EOF and ORIE */ + switch (eof) { + case FC_EOF_N: + fcoe_sof_eof |= TXGBE_TXD_FCOEF_EOF_N; + break; + case FC_EOF_T: + /* lso needs ORIE */ + if (skb_is_gso(skb)) + fcoe_sof_eof |= TXGBE_TXD_FCOEF_EOF_N | + TXGBE_TXD_FCOEF_ORIE; + else + fcoe_sof_eof |= TXGBE_TXD_FCOEF_EOF_T; + break; + case FC_EOF_NI: + fcoe_sof_eof |= TXGBE_TXD_FCOEF_EOF_NI; + break; + case FC_EOF_A: + fcoe_sof_eof |= TXGBE_TXD_FCOEF_EOF_A; + break; + default: + dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof); + return -EINVAL; + } + + /* sets up PARINC indicating data offset */ + fh = (struct fc_frame_header *)skb_transport_header(skb); + if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) + fcoe_sof_eof |= TXGBE_TXD_FCOEF_PARINC; + + /* include trailer in headlen as it is replicated per frame */ + *hdr_len = sizeof(struct fcoe_crc_eof); + + /* hdr_len includes fc_hdr if FCoE LSO is enabled */ + if (skb_is_gso(skb)) { + *hdr_len += skb_transport_offset(skb) + + sizeof(struct fc_frame_header); + /* update gso_segs and bytecount */ + first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, + skb_shinfo(skb)->gso_size); + first->bytecount += (first->gso_segs - 1) * *hdr_len; + first->tx_flags |= TXGBE_TX_FLAGS_TSO; + } + + /* set flag indicating FCOE to txgbe_tx_map call */ + first->tx_flags |= TXGBE_TX_FLAGS_FCOE | TXGBE_TX_FLAGS_CC; + + /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */ + mss_l4len_idx = skb_shinfo(skb)->gso_size << TXGBE_TXD_MSS_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + vlan_macip_lens = skb_transport_offset(skb) + + sizeof(struct fc_frame_header); + vlan_macip_lens |= (skb_transport_offset(skb) - 4) + << TXGBE_TXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & TXGBE_TX_FLAGS_VLAN_MASK; + + /* write context desc */ + txgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, + TXGBE_TXD_TUCMD_FCOE, mss_l4len_idx); + + return 0; +} + +static void txgbe_fcoe_dma_pool_free(struct txgbe_fcoe *fcoe, unsigned int cpu) +{ + struct txgbe_fcoe_ddp_pool *ddp_pool; + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + dma_pool_destroy(ddp_pool->pool); + ddp_pool->pool = NULL; +} + +static int txgbe_fcoe_dma_pool_alloc(struct txgbe_fcoe *fcoe, + struct device *dev, + unsigned int cpu) +{ + struct txgbe_fcoe_ddp_pool *ddp_pool; + struct dma_pool *pool; + char pool_name[32]; + + snprintf(pool_name, 32, "txgbe_fcoe_ddp_%d", cpu); + + pool = dma_pool_create(pool_name, dev, TXGBE_FCPTR_MAX, + TXGBE_FCPTR_ALIGN, PAGE_SIZE); + if (!pool) + return -ENOMEM; + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + ddp_pool->pool = pool; + ddp_pool->noddp = 0; + ddp_pool->noddp_ext_buff = 0; + + return 0; +} + +/** + * txgbe_configure_fcoe - configures registers for fcoe at start + * @adapter: ptr to txgbe adapter + * + * This sets up FCoE related registers + * + * Returns : none + */ +void txgbe_configure_fcoe(struct txgbe_adapter *adapter) +{ + struct txgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; + struct txgbe_hw *hw = &adapter->hw; + int i, fcoe_i; + u32 fcoe_q; + u32 etqf; + int fcreta_size; + + /* Minimal funcionality for FCoE requires at least CRC offloads */ + if (!(adapter->netdev->features & NETIF_F_FCOE_CRC)) + return; + + /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */ + etqf = ETH_P_FCOE | TXGBE_PSR_ETYPE_SWC_FCOE | + TXGBE_PSR_ETYPE_SWC_FILTER_EN; + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + etqf |= TXGBE_PSR_ETYPE_SWC_POOL_ENABLE; + etqf |= VMDQ_P(0) << TXGBE_PSR_ETYPE_SWC_POOL_SHIFT; + } + wr32(hw, + TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_FCOE), + etqf); + wr32(hw, + TXGBE_RDB_ETYPE_CLS(TXGBE_PSR_ETYPE_SWC_FILTER_FCOE), + 0); + + /* leave remaining registers unconfigued if FCoE is disabled */ + if (!(adapter->flags & TXGBE_FLAG_FCOE_ENABLED)) + return; + + /* Use one or more Rx queues for FCoE by redirection table */ + fcreta_size = TXGBE_RDB_FCRE_TBL_SIZE; + + for (i = 0; i < fcreta_size; i++) { + fcoe_i = + TXGBE_RDB_FCRE_TBL_RING(fcoe->offset + (i % fcoe->indices)); + fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; + wr32(hw, TXGBE_RDB_FCRE_TBL(i), fcoe_q); + } + wr32(hw, TXGBE_RDB_FCRE_CTL, TXGBE_RDB_FCRE_CTL_ENA); + + /* Enable L2 EtherType filter for FIP */ + etqf = ETH_P_FIP | TXGBE_PSR_ETYPE_SWC_FILTER_EN; + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + etqf |= TXGBE_PSR_ETYPE_SWC_POOL_ENABLE; + etqf |= VMDQ_P(0) << TXGBE_PSR_ETYPE_SWC_POOL_SHIFT; + } + wr32(hw, TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_FIP), + etqf); + + /* Send FIP frames to the first FCoE queue */ + fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx; + wr32(hw, TXGBE_RDB_ETYPE_CLS(TXGBE_PSR_ETYPE_SWC_FILTER_FIP), + TXGBE_RDB_ETYPE_CLS_QUEUE_EN | + (fcoe_q << TXGBE_RDB_ETYPE_CLS_RX_QUEUE_SHIFT)); + + /* Configure FCoE Rx control */ + wr32(hw, TXGBE_PSR_FC_CTL, + TXGBE_PSR_FC_CTL_FCCRCBO | + TXGBE_PSR_FC_CTL_FCOEVER(FC_FCOE_VER) | + TXGBE_PSR_FC_CTL_ALLH); +} + +/** + * txgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources + * @adapter : txgbe adapter + * + * Cleans up outstanding ddp context resources + * + * Returns : none + */ +void txgbe_free_fcoe_ddp_resources(struct txgbe_adapter *adapter) +{ + struct txgbe_fcoe *fcoe = &adapter->fcoe; + int cpu, i, ddp_max; + + /* do nothing if no DDP pools were allocated */ + if (!fcoe->ddp_pool) + return; + + ddp_max = TXGBE_FCOE_DDP_MAX; + + for (i = 0; i < ddp_max; i++) + txgbe_fcoe_ddp_put(adapter->netdev, i); + + for_each_possible_cpu(cpu) + txgbe_fcoe_dma_pool_free(fcoe, cpu); + + dma_unmap_single(pci_dev_to_dev(adapter->pdev), + fcoe->extra_ddp_buffer_dma, + TXGBE_FCBUFF_MIN, + DMA_FROM_DEVICE); + kfree(fcoe->extra_ddp_buffer); + + fcoe->extra_ddp_buffer = NULL; + fcoe->extra_ddp_buffer_dma = 0; +} + +/** + * txgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources + * @adapter: txgbe adapter + * + * Sets up ddp context resources + * + * Returns : 0 indicates success or -EINVAL on failure + */ +int txgbe_setup_fcoe_ddp_resources(struct txgbe_adapter *adapter) +{ + struct txgbe_fcoe *fcoe = &adapter->fcoe; + struct device *dev = pci_dev_to_dev(adapter->pdev); + void *buffer; + dma_addr_t dma; + unsigned int cpu; + + /* do nothing if no DDP pools were allocated */ + if (!fcoe->ddp_pool) + return 0; + + /* Extra buffer to be shared by all DDPs for HW work around */ + buffer = kmalloc(TXGBE_FCBUFF_MIN, GFP_ATOMIC); + if (!buffer) + return -ENOMEM; + + dma = dma_map_single(dev, buffer, TXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dma)) { + kfree(buffer); + return -ENOMEM; + } + + fcoe->extra_ddp_buffer = buffer; + fcoe->extra_ddp_buffer_dma = dma; + + /* allocate pci pool for each cpu */ + for_each_possible_cpu(cpu) { + int err = txgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu); + + if (!err) + continue; + + e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); + txgbe_free_fcoe_ddp_resources(adapter); + return -ENOMEM; + } + + return 0; +} + +int txgbe_fcoe_ddp_enable(struct txgbe_adapter *adapter) +{ + struct txgbe_fcoe *fcoe = &adapter->fcoe; + + if (!(adapter->flags & TXGBE_FLAG_FCOE_CAPABLE)) + return -EINVAL; + + fcoe->ddp_pool = alloc_percpu(struct txgbe_fcoe_ddp_pool); + + if (!fcoe->ddp_pool) { + e_err(drv, "failed to allocate percpu DDP resources\n"); + return -ENOMEM; + } + + adapter->netdev->fcoe_ddp_xid = TXGBE_FCOE_DDP_MAX - 1; + + return 0; +} + +void txgbe_fcoe_ddp_disable(struct txgbe_adapter *adapter) +{ + struct txgbe_fcoe *fcoe = &adapter->fcoe; + + adapter->netdev->fcoe_ddp_xid = 0; + + if (!fcoe->ddp_pool) + return; + + free_percpu(fcoe->ddp_pool); + fcoe->ddp_pool = NULL; +} + +/** + * txgbe_fcoe_enable - turn on FCoE offload feature + * @netdev: the corresponding netdev + * + * Turns on FCoE offload feature in sapphire/amber-lite. + * + * Returns : 0 indicates success or -EINVAL on failure + */ +int txgbe_fcoe_enable(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_fcoe *fcoe = &adapter->fcoe; + + atomic_inc(&fcoe->refcnt); + + if (!(adapter->flags & TXGBE_FLAG_FCOE_CAPABLE)) + return -EINVAL; + + if (adapter->flags & TXGBE_FLAG_FCOE_ENABLED) + return -EINVAL; + + e_info(drv, "Enabling FCoE offload features.\n"); + + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n"); + + if (netif_running(netdev)) + netdev->netdev_ops->ndo_stop(netdev); + + /* Allocate per CPU memory to track DDP pools */ + txgbe_fcoe_ddp_enable(adapter); + + /* enable FCoE and notify stack */ + adapter->flags |= TXGBE_FLAG_FCOE_ENABLED; + netdev->features |= NETIF_F_FCOE_MTU; + netdev_features_change(netdev); + + /* release existing queues and reallocate them */ + txgbe_clear_interrupt_scheme(adapter); + txgbe_init_interrupt_scheme(adapter); + + if (netif_running(netdev)) + netdev->netdev_ops->ndo_open(netdev); + + return 0; +} + +/** + * txgbe_fcoe_disable - turn off FCoE offload feature + * @netdev: the corresponding netdev + * + * Turns off FCoE offload feature in sapphire/amber-lite. + * + * Returns : 0 indicates success or -EINVAL on failure + */ +int txgbe_fcoe_disable(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (!atomic_dec_and_test(&adapter->fcoe.refcnt)) + return -EINVAL; + + if (!(adapter->flags & TXGBE_FLAG_FCOE_ENABLED)) + return -EINVAL; + + e_info(drv, "Disabling FCoE offload features.\n"); + if (netif_running(netdev)) + netdev->netdev_ops->ndo_stop(netdev); + + /* Free per CPU memory to track DDP pools */ + txgbe_fcoe_ddp_disable(adapter); + + /* disable FCoE and notify stack */ + adapter->flags &= ~TXGBE_FLAG_FCOE_ENABLED; + netdev->features &= ~NETIF_F_FCOE_MTU; + + netdev_features_change(netdev); + + /* release existing queues and reallocate them */ + txgbe_clear_interrupt_scheme(adapter); + txgbe_init_interrupt_scheme(adapter); + + if (netif_running(netdev)) + netdev->netdev_ops->ndo_open(netdev); + + return 0; +} + +#if IS_ENABLED(CONFIG_DCB) +/** + * txgbe_fcoe_getapp - retrieves current user priority bitmap for FCoE + * @netdev: the corresponding net_device + * + * Finds out the corresponding user priority bitmap from the current + * traffic class that FCoE belongs to. Returns 0 as the invalid user + * priority bitmap to indicate an error. + * + * Returns : 802.1p user priority bitmap for FCoE + */ +u8 txgbe_fcoe_getapp(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + return 1 << adapter->fcoe.up; +} +#endif /* CONFIG_DCB */ + +/** + * txgbe_fcoe_get_wwn - get world wide name for the node or the port + * @netdev : txgbe adapter + * @wwn : the world wide name + * @type: the type of world wide name + * + * Returns the node or port world wide name if both the prefix and the san + * mac address are valid, then the wwn is formed based on the NAA-2 for + * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3). + * + * Returns : 0 on success + */ +int txgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) +{ + int rc = -EINVAL; + u16 prefix = 0xffff; + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_mac_info *mac = &adapter->hw.mac; + + switch (type) { + case NETDEV_FCOE_WWNN: + prefix = mac->wwnn_prefix; + break; + case NETDEV_FCOE_WWPN: + prefix = mac->wwpn_prefix; + break; + default: + break; + } + + if (prefix != 0xffff && + is_valid_ether_addr(mac->san_addr)) { + *wwn = ((u64)prefix << 48) | + ((u64)mac->san_addr[0] << 40) | + ((u64)mac->san_addr[1] << 32) | + ((u64)mac->san_addr[2] << 24) | + ((u64)mac->san_addr[3] << 16) | + ((u64)mac->san_addr[4] << 8) | + ((u64)mac->san_addr[5]); + rc = 0; + } + return rc; +} + +/* txgbe_fcoe_get_tc - get the current TC that fcoe is mapped to + * @adapter - pointer to the device adapter structure + * + * Return : TC that FCoE is mapped to + */ +u8 txgbe_fcoe_get_tc(struct txgbe_adapter *adapter) +{ + return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up); +} +#endif /* CONFIG_FCOE */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.h new file mode 100644 index 000000000000..af1353746104 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fcoe.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_FCOE_H_ +#define _TXGBE_FCOE_H_ + +#if IS_ENABLED(CONFIG_FCOE) + +#include +#include + +/* shift bits within STAT fo FCSTAT */ +#define TXGBE_RXD_FCSTAT_SHIFT 4 + +/* ddp user buffer */ +#define TXGBE_BUFFCNT_MAX 256 /* 8 bits bufcnt */ +#define TXGBE_FCPTR_ALIGN 16 +#define TXGBE_FCPTR_MAX (TXGBE_BUFFCNT_MAX * sizeof(dma_addr_t)) +#define TXGBE_FCBUFF_4KB 0x0 +#define TXGBE_FCBUFF_8KB 0x1 +#define TXGBE_FCBUFF_16KB 0x2 +#define TXGBE_FCBUFF_64KB 0x3 +#define TXGBE_FCBUFF_MAX 65536 /* 64KB max */ +#define TXGBE_FCBUFF_MIN 4096 /* 4KB min */ +#define TXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */ + +/* Default user priority to use for FCoE */ +#define TXGBE_FCOE_DEFUP 3 + +/* fcerr */ +#define TXGBE_FCERR_BADCRC 0x00100000 +#define TXGBE_FCERR_EOFSOF 0x00200000 +#define TXGBE_FCERR_NOFIRST 0x00300000 +#define TXGBE_FCERR_OOOSEQ 0x00400000 +#define TXGBE_FCERR_NODMA 0x00500000 +#define TXGBE_FCERR_PKTLOST 0x00600000 + +/* FCoE DDP for target mode */ +#define __TXGBE_FCOE_TARGET 1 + +struct txgbe_fcoe_ddp { + int len; + u32 err; + unsigned int sgc; + struct scatterlist *sgl; + dma_addr_t udp; + u64 *udl; + struct dma_pool *pool; +}; + +/* per cpu variables */ +struct txgbe_fcoe_ddp_pool { + struct dma_pool *pool; + u64 noddp; + u64 noddp_ext_buff; +}; + +struct txgbe_fcoe { + struct txgbe_fcoe_ddp_pool __percpu *ddp_pool; + atomic_t refcnt; + /* spinlock for fcoe */ + spinlock_t lock; + struct txgbe_fcoe_ddp ddp[TXGBE_FCOE_DDP_MAX]; + void *extra_ddp_buffer; + dma_addr_t extra_ddp_buffer_dma; + unsigned long mode; + u8 up; + u8 up_set; +}; +#endif /* CONFIG_FCOE */ + +#endif /* _TXGBE_FCOE_H */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c new file mode 100644 index 000000000000..5fc14df3e80c --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -0,0 +1,7705 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "linux/compiler_attributes.h" +#include "linux/delay.h" +#include "txgbe_type.h" +#include "txgbe_hw.h" +#include "txgbe_phy.h" +#include "txgbe_dcb.h" +#include "txgbe_e56.h" +#include "txgbe_e56_bp.h" +#include "txgbe.h" + +#define TXGBE_SP_MAX_TX_QUEUES 128 +#define TXGBE_SP_MAX_RX_QUEUES 128 + +#define TXGBE_SP_RAR_ENTRIES 128 +#define TXGBE_SP_MC_TBL_SIZE 128 +#define TXGBE_SP_VFT_TBL_SIZE 128 +#define TXGBE_SP_RX_PB_SIZE 512 + +static s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw); +static void txgbe_release_eeprom_semaphore(struct txgbe_hw *hw); +static s32 txgbe_mta_vector(struct txgbe_hw *hw, u8 *mc_addr); +static s32 txgbe_get_san_mac_addr_offset(struct txgbe_hw *hw, + u16 *san_mac_offset); + +static s32 txgbe_setup_copper_link(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); + +u32 rd32_ephy(struct txgbe_hw *hw, u32 addr) +{ + unsigned int port_reg_offset; + u32 data; + + /* Set the LAN port indicator to port_reg_offset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + port_reg_offset = TXGBE_ETHPHY_IDA_ADDR; + wr32(hw, port_reg_offset, addr); + + /* 2nd, read the data from IDA_DATA register */ + port_reg_offset = TXGBE_ETHPHY_IDA_DATA; + data = rd32(hw, port_reg_offset); + return data; +} + +u32 txgbe_rd32_epcs(struct txgbe_hw *hw, u32 addr) +{ + unsigned int port_reg_offset; + u32 data; + /* Set the LAN port indicator to port_reg_offset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + port_reg_offset = TXGBE_XPCS_IDA_ADDR; + wr32(hw, port_reg_offset, addr); + + /* 2nd, read the data from IDA_DATA register */ + port_reg_offset = TXGBE_XPCS_IDA_DATA; + data = rd32(hw, port_reg_offset); + + return data; +} + +void txgbe_wr32_ephy(struct txgbe_hw *hw, u32 addr, u32 data) +{ + unsigned int port_reg_offset; + + /* Set the LAN port indicator to port_reg_offset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + port_reg_offset = TXGBE_ETHPHY_IDA_ADDR; + wr32(hw, port_reg_offset, addr); + + /* 2nd, read the data from IDA_DATA register */ + port_reg_offset = TXGBE_ETHPHY_IDA_DATA; + wr32(hw, port_reg_offset, data); +} + +void txgbe_wr32_epcs(struct txgbe_hw *hw, u32 addr, u32 data) +{ + unsigned int port_reg_offset; + + /* Set the LAN port indicator to port_reg_offset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + port_reg_offset = TXGBE_XPCS_IDA_ADDR; + wr32(hw, port_reg_offset, addr); + + /* 2nd, read the data from IDA_DATA register */ + port_reg_offset = TXGBE_XPCS_IDA_DATA; + wr32(hw, port_reg_offset, data); +} + +/** + * txgbe_dcb_get_rtrup2tc - read rtrup2tc reg + * @hw: pointer to hardware structure + * @map: pointer to u8 arr for returning map + * + * Read the rtrup2tc HW register and resolve its content into map + **/ +void txgbe_dcb_get_rtrup2tc(struct txgbe_hw *hw, u8 *map) +{ + u32 reg, i; + + reg = rd32(hw, TXGBE_RDB_UP2TC); + for (i = 0; i < TXGBE_DCB_MAX_USER_PRIORITY; i++) + map[i] = TXGBE_RDB_UP2TC_UP_MASK & + (reg >> (i * TXGBE_RDB_UP2TC_UP_SHIFT)); +} + +/** + * txgbe_get_pcie_msix_count - Gets MSI-X vector count + * @hw: pointer to hardware structure + * + * Read PCIe configuration space, and get the MSI-X vector count from + * the capabilities table. + **/ +u16 txgbe_get_pcie_msix_count(struct txgbe_hw *hw) +{ + u16 msix_count = 1; + u16 max_msix_count; + u32 pos; + + max_msix_count = TXGBE_MAX_MSIX_VECTORS_SAPPHIRE; + pos = pci_find_capability(((struct txgbe_adapter *)hw->back)->pdev, PCI_CAP_ID_MSIX); + if (!pos) + return msix_count; + pci_read_config_word(((struct txgbe_adapter *)hw->back)->pdev, + pos + PCI_MSIX_FLAGS, &msix_count); + + if (TXGBE_REMOVED(hw->hw_addr)) + msix_count = 0; + msix_count &= TXGBE_PCIE_MSIX_TBL_SZ_MASK; + + /* MSI-X count is zero-based in HW */ + msix_count++; + + if (msix_count > max_msix_count) + msix_count = max_msix_count; + + return msix_count; +} + +/** + * txgbe_init_hw - Generic hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware, filling the bus info + * structure and media type, clears all on chip counters, initializes receive + * address registers, multicast table, VLAN filter table, calls routine to set + * up link and flow control settings, and leaves transmit and receive units + * disabled and uninitialized + **/ +s32 txgbe_init_hw(struct txgbe_hw *hw) +{ + s32 status; + + /* Reset the hardware */ + status = hw->mac.ops.reset_hw(hw); + + if (status == 0) { + /* Start the HW */ + status = hw->mac.ops.start_hw(hw); + } + + return status; +} + +/** + * txgbe_clear_hw_cntrs - Generic clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +s32 txgbe_clear_hw_cntrs(struct txgbe_hw *hw) +{ + u16 i = 0; + + rd32(hw, TXGBE_RX_CRC_ERROR_FRAMES_LOW); + for (i = 0; i < 8; i++) + rd32(hw, TXGBE_RDB_MPCNT(i)); + + rd32(hw, TXGBE_RX_LEN_ERROR_FRAMES_LOW); + rd32(hw, TXGBE_RDB_LXONTXC); + rd32(hw, TXGBE_RDB_LXOFFTXC); + rd32(hw, TXGBE_MAC_LXONRXC); + rd32(hw, TXGBE_MAC_LXOFFRXC); + + for (i = 0; i < 8; i++) { + rd32(hw, TXGBE_RDB_PXONTXC(i)); + rd32(hw, TXGBE_RDB_PXOFFTXC(i)); + rd32(hw, TXGBE_MAC_PXONRXC(i)); + wr32m(hw, TXGBE_MMC_CONTROL, TXGBE_MMC_CONTROL_UP, i << 16); + rd32(hw, TXGBE_MAC_PXOFFRXC); + } + for (i = 0; i < 8; i++) + rd32(hw, TXGBE_RDB_PXON2OFFCNT(i)); + for (i = 0; i < 128; i++) + wr32(hw, TXGBE_PX_MPRC(i), 0); + + rd32(hw, TXGBE_PX_GPRC); + rd32(hw, TXGBE_PX_GPTC); + rd32(hw, TXGBE_PX_GORC_MSB); + rd32(hw, TXGBE_PX_GOTC_MSB); + + rd32(hw, TXGBE_RX_BC_FRAMES_GOOD_LOW); + rd32(hw, TXGBE_RX_UNDERSIZE_FRAMES_GOOD); + rd32(hw, TXGBE_RX_OVERSIZE_FRAMES_GOOD); + rd32(hw, TXGBE_RX_FRAME_CNT_GOOD_BAD_LOW); + rd32(hw, TXGBE_TX_FRAME_CNT_GOOD_BAD_LOW); + rd32(hw, TXGBE_TX_MC_FRAMES_GOOD_LOW); + rd32(hw, TXGBE_TX_BC_FRAMES_GOOD_LOW); + rd32(hw, TXGBE_RDM_DRP_PKT); + + return 0; +} + +/* txgbe_device_supports_autoneg_fc - Check if device supports autonegotiation + * of flow control + * @hw: pointer to hardware structure + * + * This function returns true if the device supports flow control + * autonegotiation, and false if it does not. + */ +bool txgbe_device_supports_autoneg_fc(struct txgbe_hw *hw) +{ + bool supported = false; + u32 speed; + bool link_up = false; + u8 device_type = hw->subsystem_device_id & 0xF0; + + switch (hw->phy.media_type) { + case txgbe_media_type_fiber_qsfp: + case txgbe_media_type_fiber: + hw->mac.ops.check_link(hw, &speed, &link_up, false); + /* if link is down, assume supported */ + /* amlite TODO*/ + if (link_up) + supported = speed == TXGBE_LINK_SPEED_1GB_FULL ? + true : false; + else + supported = true; + break; + case txgbe_media_type_backplane: + supported = (device_type != TXGBE_ID_MAC_XAUI && + device_type != TXGBE_ID_MAC_SGMII); + break; + case txgbe_media_type_copper: + /* only some copper devices support flow control autoneg */ + supported = true; + break; + default: + break; + } + + ERROR_REPORT2(TXGBE_ERROR_UNSUPPORTED, + "Device %x does not support flow control autoneg", + hw->device_id); + return supported; +} + +/* txgbe_setup_fc - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + */ +s32 txgbe_setup_fc(struct txgbe_hw *hw) +{ + s32 ret_val = 0; + u32 pcap = 0; + u32 value = 0; + u32 pcap_backplane = 0; + + /*amlite TODO*/ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + return 0; + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == txgbe_fc_rx_pause) { + ERROR_REPORT1(TXGBE_ERROR_UNSUPPORTED, + "txgbe_fc_rx_pause not valid in strict IEEE mode\n"); + ret_val = TXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* 10gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == txgbe_fc_default) + hw->fc.requested_mode = txgbe_fc_full; + + /* Set up the 1G and 10G flow control advertisement registers so the + * HW will be able to do fc autoneg once the cable is plugged in. If + * we link at 10G, the 1G advertisement is harmless and vice versa. + */ + + /* The possible values of fc.requested_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.requested_mode) { + case txgbe_fc_none: + /* Flow control completely disabled by software override. */ + break; + case txgbe_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + pcap |= TXGBE_SR_MII_MMD_AN_ADV_PAUSE_ASM; + pcap_backplane |= TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM; + break; + case txgbe_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + case txgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + pcap |= TXGBE_SR_MII_MMD_AN_ADV_PAUSE_SYM | + TXGBE_SR_MII_MMD_AN_ADV_PAUSE_ASM; + pcap_backplane |= TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM | + TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM; + break; + default: + ERROR_REPORT1(TXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = TXGBE_ERR_CONFIG; + goto out; + } + + /* Enable auto-negotiation between the MAC & PHY; + * the MAC will advertise clause 37 flow control. + */ + value = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_AN_ADV); + value = (value & ~(TXGBE_SR_MII_MMD_AN_ADV_PAUSE_ASM | + TXGBE_SR_MII_MMD_AN_ADV_PAUSE_SYM)) | pcap; + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_AN_ADV, value); + + /* AUTOC restart handles negotiation of 1G and 10G on backplane + * and copper. + */ + if (hw->phy.media_type == txgbe_media_type_backplane) { + value = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG1); + value = (value & ~(TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM | + TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM)) | + pcap_backplane; + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG1, value); + + } else if ((hw->phy.media_type == txgbe_media_type_copper) && + (txgbe_device_supports_autoneg_fc(hw))) { + /* avoid fw access phy */ + if (((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) && + ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP)) { + /* Let firmware know the driver has taken over */ + wr32m(hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_DRV_LOAD, TXGBE_CFG_PORT_CTL_DRV_LOAD); + + mdelay(100); + } + + ret_val = txgbe_set_phy_pause_advertisement(hw, pcap_backplane); + + /* Let firmware take over control of h/w */ + if (((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) && + ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP)) + wr32m(hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_DRV_LOAD, 0); + } +out: + return ret_val; +} + +/* txgbe_read_pba_string - Reads part number string from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number string from the EEPROM + * @pba_num_size: part number string buffer length + * Reads the part number string from the EEPROM. + */ +s32 txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 data; + u16 pba_ptr; + u16 offset; + u16 length; + + if (!pba_num) + return TXGBE_ERR_INVALID_ARGUMENT; + + ret_val = hw->eeprom.ops.read(hw, + hw->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR, + &data); + if (ret_val) + return ret_val; + + ret_val = hw->eeprom.ops.read(hw, + hw->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR, + &pba_ptr); + if (ret_val) + return ret_val; + + /* if data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (data != TXGBE_PBANUM_PTR_GUARD) { + /* we will need 11 characters to store the PBA */ + if (pba_num_size < 11) + return TXGBE_ERR_NO_SPACE; + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (data >> 12) & 0xF; + pba_num[1] = (data >> 8) & 0xF; + pba_num[2] = (data >> 4) & 0xF; + pba_num[3] = data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + return 0; + } + + ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); + if (ret_val) + return ret_val; + + if (length == 0xFFFF || length == 0) + return TXGBE_ERR_PBA_SECTION; + + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) + return TXGBE_ERR_NO_SPACE; + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); + if (ret_val) + return ret_val; + + pba_num[offset * 2] = (u8)(data >> 8); + pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); + } + pba_num[offset * 2] = '\0'; + + return 0; +} + +/* txgbe_get_mac_addr - Generic get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address + * Reads the adapter's MAC address from first Receive Address Register (RAR0) + * A reset of the adapter must be performed prior to calling this function + * in order for the MAC address to have been loaded from the EEPROM into RAR0 + */ +s32 txgbe_get_mac_addr(struct txgbe_hw *hw, u8 *mac_addr) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, 0); + rar_high = rd32(hw, TXGBE_PSR_MAC_SWC_AD_H); + rar_low = rd32(hw, TXGBE_PSR_MAC_SWC_AD_L); + + for (i = 0; i < 2; i++) + mac_addr[i] = (u8)(rar_high >> (1 - i) * 8); + + for (i = 0; i < 4; i++) + mac_addr[i + 2] = (u8)(rar_low >> (3 - i) * 8); + + return 0; +} + +/* txgbe_set_pci_config_data - Generic store PCI bus info + * @hw: pointer to hardware structure + * @link_status: the link status returned by the PCI config space + * * Stores the PCI bus info (speed, width, type) within the txgbe_hw structure + */ +void txgbe_set_pci_config_data(struct txgbe_hw *hw, u16 link_status) +{ + /* amlite: TODO */ + if (hw->bus.type == txgbe_bus_type_unknown) + hw->bus.type = txgbe_bus_type_pci_express; + + switch (link_status & TXGBE_PCI_LINK_WIDTH) { + case TXGBE_PCI_LINK_WIDTH_1: + hw->bus.width = PCIE_LNK_X1; + break; + case TXGBE_PCI_LINK_WIDTH_2: + hw->bus.width = PCIE_LNK_X2; + break; + case TXGBE_PCI_LINK_WIDTH_4: + hw->bus.width = PCIE_LNK_X4; + break; + case TXGBE_PCI_LINK_WIDTH_8: + hw->bus.width = PCIE_LNK_X8; + break; + default: + hw->bus.width = PCIE_LNK_WIDTH_UNKNOWN; + break; + } + + switch (link_status & TXGBE_PCI_LINK_SPEED) { + case TXGBE_PCI_LINK_SPEED_2500: + hw->bus.speed = PCIE_SPEED_2_5GT; + break; + case TXGBE_PCI_LINK_SPEED_5000: + hw->bus.speed = PCIE_SPEED_5_0GT; + break; + case TXGBE_PCI_LINK_SPEED_8000: + hw->bus.speed = PCIE_SPEED_8_0GT; + break; + default: + hw->bus.speed = PCI_SPEED_UNKNOWN; + break; + } +} + +/* txgbe_get_bus_info - Generic set PCI bus info + * @hw: pointer to hardware structure + * Gets the PCI bus info (speed, width, type) then calls helper function to + * store this data within the txgbe_hw structure. + */ +s32 txgbe_get_bus_info(struct txgbe_hw *hw) +{ + u16 link_status; + + /* Get the negotiated link width and speed from PCI config space */ + link_status = TXGBE_READ_PCIE_WORD(hw, TXGBE_PCI_LINK_STATUS); + + txgbe_set_pci_config_data(hw, link_status); + + return 0; +} + +/* txgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * @hw: pointer to the HW structure + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + */ +void txgbe_set_lan_id_multi_port_pcie(struct txgbe_hw *hw) +{ + struct txgbe_bus_info *bus = &hw->bus; + u32 reg; + + reg = rd32(hw, TXGBE_CFG_PORT_ST); + bus->lan_id = TXGBE_CFG_PORT_ST_LAN_ID(reg); + + /* check for a port swap */ + reg = rd32(hw, TXGBE_MIS_PWR); + if (TXGBE_MIS_PWR_LAN_ID(reg) == TXGBE_MIS_PWR_LAN_ID_1) + bus->func = 0; + else + bus->func = bus->lan_id; +} + +/** + * txgbe_stop_adapter - Generic stop Tx/Rx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within txgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 txgbe_stop_adapter(struct txgbe_hw *hw) +{ + u16 i; + struct txgbe_adapter *adapter = hw->back; + + /*Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = true; + + /* Disable the receive unit */ + hw->mac.ops.disable_rx(hw); + + /* Set interrupt mask to stop interrupts from being generated */ + txgbe_intr_disable(hw, TXGBE_INTR_ALL); + + /* Clear any pending interrupts, flush previous writes */ + wr32(hw, TXGBE_PX_MISC_IC, 0xffffffff); + wr32(hw, TXGBE_BME_CTL, 0x3); + + /* Disable the transmit unit. Each queue must be disabled. */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + wr32m(hw, TXGBE_PX_TR_CFG(i), + TXGBE_PX_TR_CFG_SWFLSH | TXGBE_PX_TR_CFG_ENABLE, + TXGBE_PX_TR_CFG_SWFLSH); + } + + /* Disable the receive unit by stopping each queue */ + for (i = 0; i < hw->mac.max_rx_queues; i++) { + wr32m(hw, TXGBE_PX_RR_CFG(i), + TXGBE_PX_RR_CFG_RR_EN, 0); + } + + /* flush all queues disables */ + TXGBE_WRITE_FLUSH(hw); + + /* Prevent the PCI-E bus from hanging by disabling PCI-E master + * access and verify no pending requests + */ + if (!(adapter->flags2 & TXGBE_FLAG2_ECC_ERR_RESET)) + return txgbe_disable_pcie_master(hw); + else + return 0; +} + +/** + * txgbe_led_on - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn on + **/ +s32 txgbe_led_on(struct txgbe_hw *hw, u32 index) +{ + u32 led_reg = rd32(hw, TXGBE_CFG_LED_CTL); + u16 value = 0; + + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) { + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF021, &value); + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF021, (value & 0xFFFC) | 0x0); + } + /* To turn on the LED, set mode to ON. */ + led_reg |= index | (index << TXGBE_CFG_LED_CTL_LINK_OD_SHIFT); + wr32(hw, TXGBE_CFG_LED_CTL, led_reg); + TXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * txgbe_led_off - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn off + **/ +s32 txgbe_led_off(struct txgbe_hw *hw, u32 index) +{ + u32 led_reg = rd32(hw, TXGBE_CFG_LED_CTL); + u16 value = 0; + + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) { + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF021, &value); + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF021, (value & 0xFFFC) | 0x1); + } + + /* To turn off the LED, set mode to OFF. */ + led_reg &= ~(index << TXGBE_CFG_LED_CTL_LINK_OD_SHIFT); + led_reg |= index; + wr32(hw, TXGBE_CFG_LED_CTL, led_reg); + TXGBE_WRITE_FLUSH(hw); + return 0; +} + +/** + * txgbe_get_eeprom_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so EEPROM access can occur for bit-bang method + **/ +static s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw) +{ + s32 status = TXGBE_ERR_EEPROM; + u32 timeout = 4000; + u32 i; + u32 swsm; + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = rd32(hw, TXGBE_MIS_SWSM); + if (!(swsm & TXGBE_MIS_SWSM_SMBI)) { + status = 0; + break; + } + usec_delay(50); + } + + if (i == timeout) { + /* this release is particularly important because our attempts + * above to get the semaphore may have succeeded, and if there + * was a timeout, we should unconditionally clear the semaphore + * bits to free the driver to make progress + */ + txgbe_release_eeprom_semaphore(hw); + + usec_delay(50); + /* one last try + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = rd32(hw, TXGBE_MIS_SWSM); + if (!(swsm & TXGBE_MIS_SWSM_SMBI)) + status = 0; + } + + /* Now get the semaphore between SW/FW through the SWESMBI bit */ + if (status == 0) { + for (i = 0; i < timeout; i++) { + if (txgbe_check_mng_access(hw)) { + /* Set the SW EEPROM semaphore bit to request access */ + wr32m(hw, TXGBE_MNG_SW_SM, + TXGBE_MNG_SW_SM_SM, TXGBE_MNG_SW_SM_SM); + + /* If we set the bit successfully then we got + * semaphore. + */ + swsm = rd32(hw, TXGBE_MNG_SW_SM); + if (swsm & TXGBE_MNG_SW_SM_SM) + break; + } + usec_delay(50); + } + + /* Release semaphores and return error if SW EEPROM semaphore + * was not granted because we don't have access to the EEPROM + */ + if (i >= timeout) { + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "SWESMBI Software EEPROM semaphore not granted, MNG_SW_SM_SM is 0x%08x.\n", + swsm); + txgbe_release_eeprom_semaphore(hw); + status = TXGBE_ERR_EEPROM; + } + } else { + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "Software semaphore SMBI is not granted, MNG_SW_SM_SM is 0x%08x.\n", + swsm); + } + + return status; +} + +/** + * txgbe_release_eeprom_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +static void txgbe_release_eeprom_semaphore(struct txgbe_hw *hw) +{ + if (txgbe_check_mng_access(hw)) { + wr32m(hw, TXGBE_MNG_SW_SM, + TXGBE_MNG_SW_SM_SM, 0); + wr32m(hw, TXGBE_MIS_SWSM, + TXGBE_MIS_SWSM_SMBI, 0); + TXGBE_WRITE_FLUSH(hw); + } +} + +/** + * txgbe_validate_mac_addr - Validate MAC address + * @mac_addr: pointer to MAC address. + * + * Tests a MAC address to ensure it is a valid Individual Address + **/ +s32 txgbe_validate_mac_addr(u8 *mac_addr) +{ + s32 status = 0; + + /* Make sure it is not a multicast address */ + if (TXGBE_IS_MULTICAST(mac_addr)) { + status = TXGBE_ERR_INVALID_MAC_ADDR; + /* Not a broadcast address */ + } else if (TXGBE_IS_BROADCAST(mac_addr)) { + status = TXGBE_ERR_INVALID_MAC_ADDR; + /* Reject the zero address */ + } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { + status = TXGBE_ERR_INVALID_MAC_ADDR; + } + return status; +} + +/** + * txgbe_set_rar - Set Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. + **/ +s32 txgbe_set_rar(struct txgbe_hw *hw, u32 index, u8 *addr, u64 pools, + u32 enable_addr) +{ + u32 rar_low, rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + ERROR_REPORT2(TXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); + return TXGBE_ERR_INVALID_ARGUMENT; + } + + /* select the MAC address */ + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, index); + + /* setup VMDq pool mapping */ + wr32(hw, TXGBE_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); + wr32(hw, TXGBE_PSR_MAC_SWC_VM_H, pools >> 32); + + /* HW expects these in little endian so we reverse the byte + * order from network order (big endian) to little endian + * + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_low = ((u32)addr[5] | + ((u32)addr[4] << 8) | + ((u32)addr[3] << 16) | + ((u32)addr[2] << 24)); + rar_high = ((u32)addr[1] | + ((u32)addr[0] << 8)); + if (enable_addr != 0) + rar_high |= TXGBE_PSR_MAC_SWC_AD_H_AV; + + wr32(hw, TXGBE_PSR_MAC_SWC_AD_L, rar_low); + wr32m(hw, TXGBE_PSR_MAC_SWC_AD_H, + (TXGBE_PSR_MAC_SWC_AD_H_AD(~0) | + TXGBE_PSR_MAC_SWC_AD_H_ADTYPE(~0) | + TXGBE_PSR_MAC_SWC_AD_H_AV), + rar_high); + + return 0; +} + +/** + * txgbe_clear_rar - Remove Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * + * Clears an ethernet address from a receive address register. + **/ +s32 txgbe_clear_rar(struct txgbe_hw *hw, u32 index) +{ + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + ERROR_REPORT2(TXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); + return TXGBE_ERR_INVALID_ARGUMENT; + } + + /* Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, index); + + wr32(hw, TXGBE_PSR_MAC_SWC_VM_L, 0); + wr32(hw, TXGBE_PSR_MAC_SWC_VM_H, 0); + + wr32(hw, TXGBE_PSR_MAC_SWC_AD_L, 0); + wr32m(hw, TXGBE_PSR_MAC_SWC_AD_H, + (TXGBE_PSR_MAC_SWC_AD_H_AD(~0) | + TXGBE_PSR_MAC_SWC_AD_H_ADTYPE(~0) | + TXGBE_PSR_MAC_SWC_AD_H_AV), + 0); + + return 0; +} + +/** + * txgbe_init_rx_addrs - Initializes receive address filters. + * @hw: pointer to hardware structure + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + **/ +s32 txgbe_init_rx_addrs(struct txgbe_hw *hw) +{ + u32 i; + u32 rar_entries = hw->mac.num_rar_entries; + u32 psrctl; + + /* If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (txgbe_validate_mac_addr(hw->mac.addr) == + TXGBE_ERR_INVALID_MAC_ADDR) { + /* Get the MAC address from the RAR0 for later reference */ + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + } else { + /* Setup the receive address. */ + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, + TXGBE_PSR_MAC_SWC_AD_H_AV); + } + hw->addr_ctrl.overflow_promisc = 0; + + hw->addr_ctrl.rar_used_count = 1; + + /* Zero out the other receive addresses. */ + for (i = 1; i < rar_entries; i++) { + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, i); + wr32(hw, TXGBE_PSR_MAC_SWC_AD_L, 0); + wr32(hw, TXGBE_PSR_MAC_SWC_AD_H, 0); + } + + /* Clear the MTA */ + hw->addr_ctrl.mta_in_use = 0; + psrctl = rd32(hw, TXGBE_PSR_CTL); + psrctl &= ~(TXGBE_PSR_CTL_MO | TXGBE_PSR_CTL_MFE); + psrctl |= hw->mac.mc_filter_type << TXGBE_PSR_CTL_MO_SHIFT; + wr32(hw, TXGBE_PSR_CTL, psrctl); + + for (i = 0; i < hw->mac.mcft_size; i++) + wr32(hw, TXGBE_PSR_MC_TBL(i), 0); + + hw->mac.ops.init_uta_tables(hw); + + return 0; +} + +/** + * txgbe_add_uc_addr - Adds a secondary unicast address. + * @hw: pointer to hardware structure + * @addr: new address + * + * Adds it to unused receive address register or goes into promiscuous mode. + **/ +static void txgbe_add_uc_addr(struct txgbe_hw *hw, u8 *addr, u32 vmdq) +{ + u32 rar_entries = hw->mac.num_rar_entries; + u32 rar; + + /* Place this address in the RAR if there is room, + * else put the controller into promiscuous mode + */ + if (hw->addr_ctrl.rar_used_count < rar_entries) { + rar = hw->addr_ctrl.rar_used_count; + hw->mac.ops.set_rar(hw, rar, addr, vmdq, + TXGBE_PSR_MAC_SWC_AD_H_AV); + + hw->addr_ctrl.rar_used_count++; + } else { + hw->addr_ctrl.overflow_promisc++; + } +} + +/** + * txgbe_update_uc_addr_list - Updates MAC list of secondary addresses + * @hw: pointer to hardware structure + * @addr_list: the list of new addresses + * @addr_count: number of addresses + * @next: iterator function to walk the address list + * + * The given list replaces any existing list. Clears the secondary addrs from + * receive address registers. Uses unused receive address registers for the + * first secondary addresses, and falls back to promiscuous mode as needed. + * + * Drivers using secondary unicast addresses must set user_set_promisc when + * manually putting the device into promiscuous mode. + **/ +s32 txgbe_update_uc_addr_list(struct txgbe_hw *hw, u8 *addr_list, + u32 addr_count, txgbe_mc_addr_itr next) +{ + u8 *addr; + u32 i; + u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; + u32 uc_addr_in_use; + u32 vmdq; + + /* Clear accounting of old secondary address list, + * don't count RAR[0] + */ + uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; + hw->addr_ctrl.rar_used_count -= uc_addr_in_use; + hw->addr_ctrl.overflow_promisc = 0; + + /* Zero out the other receive addresses */ + for (i = 0; i < uc_addr_in_use; i++) { + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, 1 + i); + wr32(hw, TXGBE_PSR_MAC_SWC_AD_L, 0); + wr32(hw, TXGBE_PSR_MAC_SWC_AD_H, 0); + } + + /* Add the new addresses */ + for (i = 0; i < addr_count; i++) { + addr = next(hw, &addr_list, &vmdq); + txgbe_add_uc_addr(hw, addr, vmdq); + } + + if (hw->addr_ctrl.overflow_promisc) { + /* enable promisc if not already in overflow or set by user */ + if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_UPE, TXGBE_PSR_CTL_UPE); + } + } else { + /* only disable if set by overflow, not by user */ + if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_UPE, 0); + } + } + + return 0; +} + +/** + * txgbe_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +static s32 txgbe_mta_vector(struct txgbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + WARN_ON(1); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * txgbe_set_mta - Set bit-vector in multicast table + * @hw: pointer to hardware structure + * @hash_value: Multicast address hash value + * + * Sets the bit-vector in the multicast table. + **/ +static void txgbe_set_mta(struct txgbe_hw *hw, u8 *mc_addr) +{ + u32 vector; + u32 vector_bit; + u32 vector_reg; + + hw->addr_ctrl.mta_in_use++; + + vector = txgbe_mta_vector(hw, mc_addr); + /* The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); +} + +/** + * txgbe_update_mc_addr_list - Updates MAC list of multicast addresses + * @hw: pointer to hardware structure + * @mc_addr_list: the list of new multicast addresses + * @mc_addr_count: number of addresses + * @next: iterator function to walk the multicast address list + * @clear: flag, when set clears the table beforehand + * + * When the clear flag is set, the given list replaces any existing list. + * Hashes the given addresses into the multicast table. + **/ +s32 txgbe_update_mc_addr_list(struct txgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, txgbe_mc_addr_itr next, + bool clear) +{ + u32 i; + u32 vmdq; + u32 psrctl; + + /* Set the new number of MC addresses that we + * are being requested to use. + */ + hw->addr_ctrl.num_mc_addrs = mc_addr_count; + hw->addr_ctrl.mta_in_use = 0; + + /* Clear mta_shadow */ + if (clear) + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* Update mta_shadow */ + for (i = 0; i < mc_addr_count; i++) + txgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); + + /* Enable mta */ + for (i = 0; i < hw->mac.mcft_size; i++) + wr32a(hw, TXGBE_PSR_MC_TBL(0), i, + hw->mac.mta_shadow[i]); + + if (hw->addr_ctrl.mta_in_use > 0) { + psrctl = rd32(hw, TXGBE_PSR_CTL); + psrctl &= ~(TXGBE_PSR_CTL_MO | TXGBE_PSR_CTL_MFE); + psrctl |= TXGBE_PSR_CTL_MFE | + (hw->mac.mc_filter_type << TXGBE_PSR_CTL_MO_SHIFT); + wr32(hw, TXGBE_PSR_CTL, psrctl); + } + + return 0; +} + +/** + * txgbe_enable_mc - Enable multicast address in RAR + * @hw: pointer to hardware structure + * + * Enables multicast address in RAR and the use of the multicast hash table. + **/ +s32 txgbe_enable_mc(struct txgbe_hw *hw) +{ + struct txgbe_addr_filter_info *a = &hw->addr_ctrl; + u32 psrctl; + + if (a->mta_in_use > 0) { + psrctl = rd32(hw, TXGBE_PSR_CTL); + psrctl &= ~(TXGBE_PSR_CTL_MO | TXGBE_PSR_CTL_MFE); + psrctl |= TXGBE_PSR_CTL_MFE | + (hw->mac.mc_filter_type << TXGBE_PSR_CTL_MO_SHIFT); + wr32(hw, TXGBE_PSR_CTL, psrctl); + } + + return 0; +} + +/** + * txgbe_disable_mc - Disable multicast address in RAR + * @hw: pointer to hardware structure + * + * Disables multicast address in RAR and the use of the multicast hash table. + **/ +s32 txgbe_disable_mc(struct txgbe_hw *hw) +{ + struct txgbe_addr_filter_info *a = &hw->addr_ctrl; + u32 psrctl; + + if (a->mta_in_use > 0) { + psrctl = rd32(hw, TXGBE_PSR_CTL); + psrctl &= ~(TXGBE_PSR_CTL_MO | TXGBE_PSR_CTL_MFE); + psrctl |= hw->mac.mc_filter_type << TXGBE_PSR_CTL_MO_SHIFT; + wr32(hw, TXGBE_PSR_CTL, psrctl); + } + + return 0; +} + +/** + * txgbe_fc_enable - Enable flow control + * @hw: pointer to hardware structure + * + * Enable flow control according to the current settings. + **/ +s32 txgbe_fc_enable(struct txgbe_hw *hw) +{ + u32 mflcn_reg = 0; + u32 fccfg_reg = 0; + s32 ret_val = 0; + u32 fcrtl = 0; + u32 fcrth = 0; + u32 reg = 0; + int i = 0; + + /* Validate the water mark configuration */ + if (!hw->fc.pause_time) { + ret_val = TXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* Low water mark of zero causes XOFF floods */ + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & txgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + if (!hw->fc.low_water[i] || + hw->fc.low_water[i] >= hw->fc.high_water[i]) { + ret_val = TXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + } + } + + /* Negotiate the fc mode to use */ + txgbe_fc_autoneg(hw); + + /* Disable any previous flow control settings */ + mflcn_reg = rd32(hw, TXGBE_MAC_RX_FLOW_CTRL); + mflcn_reg &= ~(TXGBE_MAC_RX_FLOW_CTRL_PFCE | + TXGBE_MAC_RX_FLOW_CTRL_RFE); + + fccfg_reg = rd32(hw, TXGBE_RDB_RFCC); + fccfg_reg &= ~(TXGBE_RDB_RFCC_RFCE_802_3X | + TXGBE_RDB_RFCC_RFCE_PRIORITY); + + /* The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case txgbe_fc_none: + /* Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case txgbe_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + mflcn_reg |= TXGBE_MAC_RX_FLOW_CTRL_RFE; + break; + case txgbe_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + fccfg_reg |= TXGBE_RDB_RFCC_RFCE_802_3X; + break; + case txgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + mflcn_reg |= TXGBE_MAC_RX_FLOW_CTRL_RFE; + fccfg_reg |= TXGBE_RDB_RFCC_RFCE_802_3X; + break; + default: + ERROR_REPORT1(TXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = TXGBE_ERR_CONFIG; + goto out; + } + + /* Set 802.3x based flow control settings. */ + wr32(hw, TXGBE_MAC_RX_FLOW_CTRL, mflcn_reg); + wr32(hw, TXGBE_RDB_RFCC, fccfg_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & txgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + fcrtl = (hw->fc.low_water[i] << 10); + + wr32(hw, TXGBE_RDB_RFCL(i), fcrtl); + fcrth = (hw->fc.high_water[i] << 10) | + TXGBE_RDB_RFCH_XOFFE; + } else { + wr32(hw, TXGBE_RDB_RFCL(i), 0); + /* In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + fcrth = rd32(hw, TXGBE_RDB_PB_SZ(i)) - 24576; + } + + wr32(hw, TXGBE_RDB_RFCH(i), fcrth); + } + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (TXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) + wr32(hw, TXGBE_RDB_RFCV(i), reg); + + /* Configure flow control refresh threshold value */ + wr32(hw, TXGBE_RDB_RFCRT, hw->fc.pause_time / 2); + +out: + return ret_val; +} + +/** + * txgbe_negotiate_fc - Negotiate flow control + * @hw: pointer to hardware structure + * @adv_reg: flow control advertised settings + * @lp_reg: link partner's flow control settings + * @adv_sym: symmetric pause bit in advertisement + * @adv_asm: asymmetric pause bit in advertisement + * @lp_sym: symmetric pause bit in link partner advertisement + * @lp_asm: asymmetric pause bit in link partner advertisement + * + * Find the intersection between advertised settings and link partner's + * advertised settings + **/ +static s32 txgbe_negotiate_fc(struct txgbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) +{ + if ((!(adv_reg)) || (!(lp_reg))) + return TXGBE_ERR_FC_NOT_NEGOTIATED; + + if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { + if (hw->fc.requested_mode == txgbe_fc_full) + hw->fc.current_mode = txgbe_fc_full; + else + hw->fc.current_mode = txgbe_fc_rx_pause; + } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && + (lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = txgbe_fc_tx_pause; + } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && + !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = txgbe_fc_rx_pause; + } else { + hw->fc.current_mode = txgbe_fc_none; + } + return 0; +} + +/** + * txgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber + * @hw: pointer to hardware structure + * + * Enable flow control according on 1 gig fiber. + **/ +static s32 txgbe_fc_autoneg_fiber(struct txgbe_hw *hw) +{ + u32 pcs_anadv_reg, pcs_lpab_reg; + s32 ret_val = TXGBE_ERR_FC_NOT_NEGOTIATED; + + pcs_anadv_reg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_AN_ADV); + pcs_lpab_reg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_LP_BABL); + + ret_val = txgbe_negotiate_fc(hw, pcs_anadv_reg, + pcs_lpab_reg, + TXGBE_SR_MII_MMD_AN_ADV_PAUSE_SYM, + TXGBE_SR_MII_MMD_AN_ADV_PAUSE_ASM, + TXGBE_SR_MII_MMD_AN_ADV_PAUSE_SYM, + TXGBE_SR_MII_MMD_AN_ADV_PAUSE_ASM); + + return ret_val; +} + +/** + * txgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +static s32 txgbe_fc_autoneg_backplane(struct txgbe_hw *hw) +{ + u32 anlp1_reg, autoc_reg; + s32 ret_val = TXGBE_ERR_FC_NOT_NEGOTIATED; + + /* Read the 10g AN autoc and LP ability registers and resolve + * local flow control settings accordingly + */ + autoc_reg = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG1); + anlp1_reg = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_LP_ABL1); + + ret_val = txgbe_negotiate_fc(hw, autoc_reg, + anlp1_reg, TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM, + TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM, + TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM, + TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM); + + return ret_val; +} + +/** + * txgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +static s32 txgbe_fc_autoneg_copper(struct txgbe_hw *hw) +{ + u8 technology_ability_reg = 0; + u8 lp_technology_ability_reg = 0; + + txgbe_get_phy_advertised_pause(hw, &technology_ability_reg); + txgbe_get_lp_advertised_pause(hw, &lp_technology_ability_reg); + + return txgbe_negotiate_fc(hw, (u32)technology_ability_reg, + (u32)lp_technology_ability_reg, + TXGBE_TAF_SYM_PAUSE, TXGBE_TAF_ASM_PAUSE, + TXGBE_TAF_SYM_PAUSE, TXGBE_TAF_ASM_PAUSE); +} + +/** + * txgbe_fc_autoneg - Configure flow control + * @hw: pointer to hardware structure + * + * Compares our advertised flow control capabilities to those advertised by + * our link partner, and determines the proper flow control mode to use. + **/ +void txgbe_fc_autoneg(struct txgbe_hw *hw) +{ + s32 ret_val = TXGBE_ERR_FC_NOT_NEGOTIATED; + u32 speed; + bool link_up = 0; + + /* AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) { + ERROR_REPORT1(TXGBE_ERROR_UNSUPPORTED, + "Flow control autoneg is disabled"); + goto out; + } + + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) { + ERROR_REPORT1(TXGBE_ERROR_SOFTWARE, "The link is down"); + goto out; + } + + switch (hw->phy.media_type) { + /* Autoneg flow control on fiber adapters */ + case txgbe_media_type_fiber: + if (speed == TXGBE_LINK_SPEED_1GB_FULL) + ret_val = txgbe_fc_autoneg_fiber(hw); + break; + + /* Autoneg flow control on backplane adapters */ + case txgbe_media_type_backplane: + ret_val = txgbe_fc_autoneg_backplane(hw); + break; + + /* Autoneg flow control on copper adapters */ + case txgbe_media_type_copper: + if (txgbe_device_supports_autoneg_fc(hw)) + ret_val = txgbe_fc_autoneg_copper(hw); + break; + + default: + break; + } + +out: + if (ret_val == 0) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + +/** + * txgbe_disable_pcie_master - Disable PCI-express master access + * @hw: pointer to hardware structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. TXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable + * bit hasn't caused the master requests to be disabled, else 0 + * is returned signifying master requests disabled. + **/ +s32 txgbe_disable_pcie_master(struct txgbe_hw *hw) +{ + s32 status = 0; + u32 i; + struct txgbe_adapter *adapter = hw->back; + unsigned int num_vfs = adapter->num_vfs; + u16 dev_ctl; + u32 vf_bme_clear = 0; + u16 vid = 0; + u16 cmd = 0; + u32 reg32 = 0; + + /* Always set this bit to ensure any future transactions are blocked */ + pci_clear_master(((struct txgbe_adapter *)hw->back)->pdev); + + /* Exit if master requests are blocked */ + if (!(rd32(hw, TXGBE_PX_TRANSACTION_PENDING)) || + TXGBE_REMOVED(hw->hw_addr)) + goto out; + + /* BME disable handshake will not be finished if any VF BME is 0 */ + for (i = 0; i < num_vfs; i++) { + struct pci_dev *vfdev = NULL; + + if (!adapter->vfinfo) + break; + vfdev = adapter->vfinfo[i].vfdev; + if (!vfdev) + continue; + pci_read_config_word(vfdev, 0x4, &dev_ctl); + if ((dev_ctl & 0x4) == 0) { + vf_bme_clear = 1; + break; + } + } + + /* Poll for master request bit to clear */ + for (i = 0; i < TXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + usec_delay(100); + if (!(rd32(hw, TXGBE_PX_TRANSACTION_PENDING))) + goto out; + } + + if (!vf_bme_clear) { + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "PCIe transaction pending bit did not clear.\n"); + status = TXGBE_ERR_MASTER_REQUESTS_PENDING; + + /* print out PCI configuration space value */ + txgbe_print_tx_hang_status(adapter); + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &vid); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "PCI VID is 0x%x\n", vid); + + pci_read_config_word(adapter->pdev, PCI_COMMAND, &cmd); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "PCI COMMAND value is 0x%x.\n", cmd); + + reg32 = rd32(hw, 0x10000); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "read 0x10000 value is 0x%08x\n", reg32); + } + +out: + return status; +} + +/** + * txgbe_acquire_swfw_sync - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +s32 txgbe_acquire_swfw_sync(struct txgbe_hw *hw, u32 mask) +{ + u32 gssr = 0; + u32 swmask = mask; + u32 fwmask = mask << 16; + u32 timeout = 200; + u32 i; + + for (i = 0; i < timeout; i++) { + /* SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) + */ + if (txgbe_get_eeprom_semaphore(hw)) + return TXGBE_ERR_SWFW_SYNC; + + if (txgbe_check_mng_access(hw)) { + gssr = rd32(hw, TXGBE_MNG_SWFW_SYNC); + if (gssr & (fwmask | swmask)) { + /* Resource is currently in use by FW or SW */ + txgbe_release_eeprom_semaphore(hw); + msec_delay(5); + } else { + gssr |= swmask; + wr32(hw, TXGBE_MNG_SWFW_SYNC, gssr); + txgbe_release_eeprom_semaphore(hw); + return 0; + } + } + } + + /* If time expired clear the bits holding the lock and retry */ + if (gssr & (fwmask | swmask)) + txgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); + + msec_delay(5); + return TXGBE_ERR_SWFW_SYNC; +} + +/** + * txgbe_release_swfw_sync - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +void txgbe_release_swfw_sync(struct txgbe_hw *hw, u32 mask) +{ + txgbe_get_eeprom_semaphore(hw); + if (txgbe_check_mng_access(hw)) + wr32m(hw, TXGBE_MNG_SWFW_SYNC, mask, 0); + + txgbe_release_eeprom_semaphore(hw); +} + +/** + * txgbe_disable_sec_rx_path - Stops the receive data path + * @hw: pointer to hardware structure + * + * Stops the receive data path and waits for the HW to internally empty + * the Rx security block + **/ +s32 txgbe_disable_sec_rx_path(struct txgbe_hw *hw) +{ +#define TXGBE_MAX_SECRX_POLL 40 + int i; + int secrxreg; + + wr32m(hw, TXGBE_RSC_CTL, + TXGBE_RSC_CTL_RX_DIS, TXGBE_RSC_CTL_RX_DIS); + for (i = 0; i < TXGBE_MAX_SECRX_POLL; i++) { + secrxreg = rd32(hw, TXGBE_RSC_ST); + if (!(secrxreg & TXGBE_RSC_ST_RSEC_RDY)) + usec_delay(1000); + else + break; + } + + return 0; +} + +/** + * txgbe_enable_sec_rx_path - Enables the receive data path + * @hw: pointer to hardware structure + * + * Enables the receive data path. + **/ +s32 txgbe_enable_sec_rx_path(struct txgbe_hw *hw) +{ + wr32m(hw, TXGBE_RSC_CTL, + TXGBE_RSC_CTL_RX_DIS, 0); + TXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * txgbe_disable_sec_tx_path - Stops the transmit data path + * @hw: pointer to hardware structure + * + * Stops the transmit data path and waits for the HW to internally empty + * the tx security block + **/ +s32 txgbe_disable_sec_tx_path(struct txgbe_hw *hw) +{ +#define TXGBE_MAX_SECTX_POLL 40 + + int i; + int secrxreg; + + wr32m(hw, TXGBE_TSC_CTL, + TXGBE_TSC_CTL_TX_DIS, TXGBE_TSC_CTL_TX_DIS); + for (i = 0; i < TXGBE_MAX_SECTX_POLL; i++) { + secrxreg = rd32(hw, TXGBE_TSC_ST); + if (!(secrxreg & TXGBE_TSC_ST_SECTX_RDY)) + usec_delay(1000); + else + break; + } + + return 0; +} + +/** + * txgbe_enable_sec_Tx_path - Enables the transmit data path + * @hw: pointer to hardware structure + * + * Enables the transmit data path. + **/ +s32 txgbe_enable_sec_tx_path(struct txgbe_hw *hw) +{ + wr32m(hw, TXGBE_TSC_CTL, + TXGBE_TSC_CTL_TX_DIS, 0); + TXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * txgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM + * @hw: pointer to hardware structure + * @san_mac_offset: SAN MAC address offset + * + * This function will read the EEPROM location for the SAN MAC address + * pointer, and returns the value at that location. This is used in both + * get and set mac_addr routines. + **/ +static s32 txgbe_get_san_mac_addr_offset(struct txgbe_hw *hw, + u16 *san_mac_offset) +{ + s32 ret_val; + + /* First read the EEPROM pointer to see if the MAC addresses are + * available. + */ + ret_val = hw->eeprom.ops.read(hw, + hw->eeprom.sw_region_offset + TXGBE_SAN_MAC_ADDR_PTR, + san_mac_offset); + if (ret_val) { + ERROR_REPORT2(TXGBE_ERROR_INVALID_STATE, + "eeprom at offset %d failed", + TXGBE_SAN_MAC_ADDR_PTR); + } + + return ret_val; +} + +/** + * txgbe_get_san_mac_addr - SAN MAC address retrieval from the EEPROM + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Reads the SAN MAC address from the EEPROM, if it's available. This is + * per-port, so set_lan_id() must be called before reading the addresses. + * set_lan_id() is called by identify_sfp(), but this cannot be relied + * upon for non-SFP connections, so we must call it here. + **/ +s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr) +{ + u16 san_mac_data, san_mac_offset; + u8 i; + s32 ret_val; + + /* First read the EEPROM pointer to see if the MAC addresses are + * available. If they're not, no point in calling set_lan_id() here. + */ + ret_val = txgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) + goto san_mac_addr_out; + + /* apply the port offset to the address offset */ + (hw->bus.func) ? (san_mac_offset += TXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : + (san_mac_offset += TXGBE_SAN_MAC_ADDR_PORT0_OFFSET); + for (i = 0; i < 3; i++) { + ret_val = hw->eeprom.ops.read(hw, san_mac_offset, + &san_mac_data); + if (ret_val) { + ERROR_REPORT2(TXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + san_mac_offset); + goto san_mac_addr_out; + } + san_mac_addr[i * 2] = (u8)(san_mac_data); + san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); + san_mac_offset++; + } + return 0; + +san_mac_addr_out: + /* No addresses available in this EEPROM. It's not an + * error though, so just wipe the local address and return. + */ + for (i = 0; i < 6; i++) + san_mac_addr[i] = 0xFF; + return 0; +} + +/** + * txgbe_set_san_mac_addr - Write the SAN MAC address to the EEPROM + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Write a SAN MAC address to the EEPROM. + **/ +s32 txgbe_set_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr) +{ + s32 ret_val; + u16 san_mac_data, san_mac_offset; + u8 i; + + /* Look for SAN mac address pointer. If not defined, return */ + ret_val = txgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) + return TXGBE_ERR_NO_SAN_ADDR_PTR; + + /* Apply the port offset to the address offset */ + (hw->bus.func) ? (san_mac_offset += TXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : + (san_mac_offset += TXGBE_SAN_MAC_ADDR_PORT0_OFFSET); + + for (i = 0; i < 3; i++) { + san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); + san_mac_data |= (u16)(san_mac_addr[i * 2]); + hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data); + san_mac_offset++; + } + + return 0; +} + +/** + * txgbe_insert_mac_addr - Find a RAR for this mac address + * @hw: pointer to hardware structure + * @addr: Address to put into receive address register + * @vmdq: VMDq pool to assign + * + * Puts an ethernet address into a receive address register, or + * finds the rar that it is aleady in; adds to the pool list + **/ +s32 txgbe_insert_mac_addr(struct txgbe_hw *hw, u8 *addr, u32 vmdq) +{ + static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; + u32 first_empty_rar = NO_EMPTY_RAR_FOUND; + u32 rar; + u32 rar_low, rar_high; + u32 addr_low, addr_high; + + /* swap bytes for HW little endian */ + addr_low = addr[5] | (addr[4] << 8) + | (addr[3] << 16) + | (addr[2] << 24); + addr_high = addr[1] | (addr[0] << 8); + + /* Either find the mac_id in rar or find the first empty space. + * rar_highwater points to just after the highest currently used + * rar in order to shorten the search. It grows when we add a new + * rar to the top. + */ + for (rar = 0; rar < hw->mac.rar_highwater; rar++) { + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, rar); + rar_high = rd32(hw, TXGBE_PSR_MAC_SWC_AD_H); + + if ((TXGBE_PSR_MAC_SWC_AD_H_AV & rar_high) == 0 && + first_empty_rar == NO_EMPTY_RAR_FOUND) { + first_empty_rar = rar; + } else if ((rar_high & 0xFFFF) == addr_high) { + rar_low = rd32(hw, TXGBE_PSR_MAC_SWC_AD_L); + if (rar_low == addr_low) + break; /* found it already in the rars */ + } + } + + if (first_empty_rar != NO_EMPTY_RAR_FOUND) { + /* stick it into first empty RAR slot we found */ + rar = first_empty_rar; + hw->mac.ops.set_rar(hw, rar, addr, vmdq, + TXGBE_PSR_MAC_SWC_AD_H_AV); + } else if (rar == hw->mac.rar_highwater) { + /* add it to the top of the list and inc the highwater mark */ + hw->mac.ops.set_rar(hw, rar, addr, vmdq, + TXGBE_PSR_MAC_SWC_AD_H_AV); + hw->mac.rar_highwater++; + } else if (rar >= hw->mac.num_rar_entries) { + return TXGBE_ERR_INVALID_MAC_ADDR; + } + + return rar; +} + +/** + * txgbe_clear_vmdq - Disassociate a VMDq pool index from a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to disassociate + * @vmdq: VMDq pool index to remove from the rar + **/ +s32 txgbe_clear_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 mpsar_lo, mpsar_hi; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + ERROR_REPORT2(TXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", rar); + return TXGBE_ERR_INVALID_ARGUMENT; + } + + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, rar); + mpsar_lo = rd32(hw, TXGBE_PSR_MAC_SWC_VM_L); + mpsar_hi = rd32(hw, TXGBE_PSR_MAC_SWC_VM_H); + + if (TXGBE_REMOVED(hw->hw_addr)) + goto done; + + if (!mpsar_lo && !mpsar_hi) + goto done; + + /* was that the last pool using this rar? */ + if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) + hw->mac.ops.clear_rar(hw, rar); +done: + return 0; +} + +/** + * txgbe_set_vmdq - Associate a VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq pool index + **/ +s32 txgbe_set_vmdq(struct txgbe_hw *hw, u32 rar, u32 pool) +{ + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + ERROR_REPORT2(TXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", rar); + return TXGBE_ERR_INVALID_ARGUMENT; + } + + return 0; +} + +/** + * This function should only be involved in the IOV mode. + * In IOV mode, Default pool is next pool after the number of + * VFs advertized and not 0. + * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] + * + * txgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @vmdq: VMDq pool index + **/ +s32 txgbe_set_vmdq_san_mac(struct txgbe_hw *hw, u32 vmdq) +{ + u32 rar = hw->mac.san_mac_rar_index; + + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, rar); + if (vmdq < 32) { + wr32(hw, TXGBE_PSR_MAC_SWC_VM_L, 1 << vmdq); + wr32(hw, TXGBE_PSR_MAC_SWC_VM_H, 0); + } else { + wr32(hw, TXGBE_PSR_MAC_SWC_VM_L, 0); + wr32(hw, TXGBE_PSR_MAC_SWC_VM_H, 1 << (vmdq - 32)); + } + + return 0; +} + +/** + * txgbe_init_uta_tables - Initialize the Unicast Table Array + * @hw: pointer to hardware structure + **/ +s32 txgbe_init_uta_tables(struct txgbe_hw *hw) +{ + int i; + + for (i = 0; i < 128; i++) + wr32(hw, TXGBE_PSR_UC_TBL(i), 0); + + return 0; +} + +/** + * txgbe_find_vlvf_slot - find the vlanid or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * + * return the VLVF index where this VLAN id should be placed + * + **/ +s32 txgbe_find_vlvf_slot(struct txgbe_hw *hw, u32 vlan) +{ + u32 bits = 0; + u32 first_empty_slot = 0; + s32 regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries. Save off the first empty + * slot found along the way + */ + for (regindex = 1; regindex < TXGBE_PSR_VLAN_SWC_ENTRIES; regindex++) { + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, regindex); + bits = rd32(hw, TXGBE_PSR_VLAN_SWC); + if (!bits && !(first_empty_slot)) + first_empty_slot = regindex; + else if ((bits & 0x0FFF) == vlan) + break; + } + + /* If regindex is less than TXGBE_VLVF_ENTRIES, then we found the vlan + * in the VLVF. Else use the first empty VLVF register for this + * vlan id. + */ + if (regindex >= TXGBE_PSR_VLAN_SWC_ENTRIES) { + if (first_empty_slot) { + regindex = first_empty_slot; + } else { + ERROR_REPORT1(TXGBE_ERROR_SOFTWARE, + "No space in VLVF.\n"); + regindex = TXGBE_ERR_NO_SPACE; + } + } + + return regindex; +} + +/** + * txgbe_set_vfta - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 txgbe_set_vfta(struct txgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + s32 regindex; + u32 bitindex; + u32 vfta; + u32 targetbit; + s32 ret_val = 0; + bool vfta_changed = false; + + if (vlan > 4095) + return TXGBE_ERR_PARAM; + + regindex = (vlan >> 5) & 0x7F; + bitindex = vlan & 0x1F; + targetbit = (1 << bitindex); + /* errata 5 */ + vfta = hw->mac.vft_shadow[regindex]; + if (vlan_on) { + if (!(vfta & targetbit)) { + vfta |= targetbit; + vfta_changed = true; + } + } else { + if ((vfta & targetbit)) { + vfta &= ~targetbit; + vfta_changed = true; + } + } + + /* Part 2 + * Call txgbe_set_vlvf to set VLVFB and VLVF + */ + ret_val = txgbe_set_vlvf(hw, vlan, vind, vlan_on, + &vfta_changed); + if (ret_val != 0) + return ret_val; + + if (vfta_changed) + wr32(hw, TXGBE_PSR_VLAN_TBL(regindex), vfta); + /* errata 5 */ + hw->mac.vft_shadow[regindex] = vfta; + return 0; +} + +/** + * txgbe_set_vlvf - Set VLAN Pool Filter + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * @vfta_changed: pointer to boolean flag which indicates whether VFTA + * should be changed + * + * Turn on/off specified bit in VLVF table. + **/ +s32 txgbe_set_vlvf(struct txgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool *vfta_changed) +{ + u32 vt; + + if (vlan > 4095) + return TXGBE_ERR_PARAM; + + /* If VT Mode is set + * Either vlan_on + * make sure the vlan is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + vt = rd32(hw, TXGBE_CFG_PORT_CTL); + if (vt & TXGBE_CFG_PORT_CTL_NUM_VT_MASK) { + s32 vlvf_index; + u32 bits; + + vlvf_index = txgbe_find_vlvf_slot(hw, vlan); + if (vlvf_index < 0) + return vlvf_index; + + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, vlvf_index); + if (vlan_on) { + /* set the pool bit */ + if (vind < 32) { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_L); + bits |= (1 << vind); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_L, + bits); + } else { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + bits |= (1 << (vind - 32)); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_H, + bits); + } + } else { + /* clear the pool bit */ + if (vind < 32) { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_L); + bits &= ~(1 << vind); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_L, + bits); + bits |= rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + } else { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + bits &= ~(1 << (vind - 32)); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_H, + bits); + bits |= rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_L); + } + } + + /* If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + if (bits) { + wr32(hw, TXGBE_PSR_VLAN_SWC, + (TXGBE_PSR_VLAN_SWC_VIEN | vlan)); + if (!vlan_on && !vfta_changed) { + /* someone wants to clear the vfta entry + * but some pools/VFs are still using it. + * Ignore it. + */ + *vfta_changed = false; + } + } else { + wr32(hw, TXGBE_PSR_VLAN_SWC, 0); + } + } + + return 0; +} + +/** + * txgbe_clear_vfta - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +s32 txgbe_clear_vfta(struct txgbe_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < hw->mac.vft_size; offset++) { + wr32(hw, TXGBE_PSR_VLAN_TBL(offset), 0); + /* errata 5 */ + hw->mac.vft_shadow[offset] = 0; + } + + for (offset = 0; offset < TXGBE_PSR_VLAN_SWC_ENTRIES; offset++) { + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, offset); + wr32(hw, TXGBE_PSR_VLAN_SWC, 0); + wr32(hw, TXGBE_PSR_VLAN_SWC_VM_L, 0); + wr32(hw, TXGBE_PSR_VLAN_SWC_VM_H, 0); + } + + return 0; +} + +/** + * txgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from + * the EEPROM + * @hw: pointer to hardware structure + * @wwnn_prefix: the alternative WWNN prefix + * @wwpn_prefix: the alternative WWPN prefix + * + * This function will read the EEPROM from the alternative SAN MAC address + * block to check the support for the alternative WWNN/WWPN prefix support. + **/ +s32 txgbe_get_wwn_prefix(struct txgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix) +{ + u16 offset, caps; + u16 alt_san_mac_blk_offset; + + /* clear output first */ + *wwnn_prefix = 0xFFFF; + *wwpn_prefix = 0xFFFF; + + /* check if alternative SAN MAC is supported */ + offset = hw->eeprom.sw_region_offset + TXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; + if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset)) + goto wwn_prefix_err; + + if (alt_san_mac_blk_offset == 0 || + alt_san_mac_blk_offset == 0xFFFF) + goto wwn_prefix_out; + + /* check capability in alternative san mac address block */ + offset = alt_san_mac_blk_offset + TXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; + if (hw->eeprom.ops.read(hw, offset, &caps)) + goto wwn_prefix_err; + if (!(caps & TXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) + goto wwn_prefix_out; + + /* get the corresponding prefix for WWNN/WWPN */ + offset = alt_san_mac_blk_offset + TXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; + if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) { + ERROR_REPORT2(TXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", offset); + } + + offset = alt_san_mac_blk_offset + TXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; + if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) + goto wwn_prefix_err; + +wwn_prefix_out: + return 0; + +wwn_prefix_err: + ERROR_REPORT2(TXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", offset); + return 0; +} + +/** + * txgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for anti-spoofing + * @pf: Physical Function pool - do not enable anti-spoofing for the PF + **/ +void txgbe_set_mac_anti_spoofing(struct txgbe_hw *hw, bool enable, int pf) +{ + u64 pfvfspoof = 0; + + if (enable) { + /* The PF should be allowed to spoof so that it can support + * emulation mode NICs. Do not set the bits assigned to the PF + * Remaining pools belong to the PF so they do not need to have + * anti-spoofing enabled. + */ + pfvfspoof = (1 << pf) - 1; + wr32(hw, TXGBE_TDM_MAC_AS_L, + pfvfspoof & 0xffffffff); + wr32(hw, TXGBE_TDM_MAC_AS_H, pfvfspoof >> 32); + } else { + wr32(hw, TXGBE_TDM_MAC_AS_L, 0); + wr32(hw, TXGBE_TDM_MAC_AS_H, 0); + } +} + +/** + * txgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for VLAN anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + * + **/ +void txgbe_set_vlan_anti_spoofing(struct txgbe_hw *hw, bool enable, int vf) +{ + u32 pfvfspoof; + + if (vf < 32) { + pfvfspoof = rd32(hw, TXGBE_TDM_VLAN_AS_L); + if (enable) + pfvfspoof |= (1 << vf); + else + pfvfspoof &= ~(1 << vf); + wr32(hw, TXGBE_TDM_VLAN_AS_L, pfvfspoof); + } else { + pfvfspoof = rd32(hw, TXGBE_TDM_VLAN_AS_H); + if (enable) + pfvfspoof |= (1 << (vf - 32)); + else + pfvfspoof &= ~(1 << (vf - 32)); + wr32(hw, TXGBE_TDM_VLAN_AS_H, pfvfspoof); + } +} + +/** + * txgbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for Ethertype anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing + * + **/ +void txgbe_set_ethertype_anti_spoofing(struct txgbe_hw *hw, + bool enable, int vf) +{ + u32 pfvfspoof; + + if (vf < 32) { + pfvfspoof = rd32(hw, TXGBE_TDM_ETYPE_AS_L); + if (enable) + pfvfspoof |= (1 << vf); + else + pfvfspoof &= ~(1 << vf); + wr32(hw, TXGBE_TDM_ETYPE_AS_L, pfvfspoof); + } else { + pfvfspoof = rd32(hw, TXGBE_TDM_ETYPE_AS_H); + if (enable) + pfvfspoof |= (1 << (vf - 32)); + else + pfvfspoof &= ~(1 << (vf - 32)); + wr32(hw, TXGBE_TDM_ETYPE_AS_H, pfvfspoof); + } +} + +/** + * txgbe_get_device_caps - Get additional device capabilities + * @hw: pointer to hardware structure + * @device_caps: the EEPROM word with the extra device capabilities + * + * This function will read the EEPROM location for the device capabilities, + * and return the word through device_caps. + **/ +s32 txgbe_get_device_caps(struct txgbe_hw *hw, u16 *device_caps) +{ + hw->eeprom.ops.read(hw, hw->eeprom.sw_region_offset + TXGBE_DEVICE_CAPS, device_caps); + + return 0; +} + +/** + * txgbe_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +u8 txgbe_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (0 - sum); +} + +/** + * txgbe_host_interface_command - Issue command to manageability block + * @hw: pointer to the HW structure + * @buffer: contains the command to write and where the return status will + * be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. + * + * Communicates with the manageability block. On success return 0 + * else return TXGBE_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 txgbe_host_interface_command(struct txgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)hw->back; + struct txgbe_hic_hdr *send_hdr = (struct txgbe_hic_hdr *)buffer; + u32 hdr_size = sizeof(struct txgbe_hic_hdr); + struct txgbe_hic_hdr *recv_hdr; + u32 buf[64] = {}; + u32 hicr, i, bi; + s32 status = 0; + u32 dword_len; + u16 buf_len; + u8 send_cmd; + + if (length == 0 || length > TXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, + "Buffer length failure buffersize=%d.\n", length); + return TXGBE_ERR_HOST_INTERFACE_COMMAND; + } + if (hw->mac.type == txgbe_mac_sp) + if (hw->mac.ops.acquire_swfw_sync(hw, TXGBE_MNG_SWFW_SYNC_SW_MB) != 0) + return TXGBE_ERR_SWFW_SYNC; + + /* Calculate length in DWORDs. We must be DWORD aligned */ + if ((length % (sizeof(u32))) != 0) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, + "Buffer length failure, not aligned to dword"); + status = TXGBE_ERR_INVALID_ARGUMENT; + goto rel_out; + } + + dword_len = length >> 2; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + WARN_ON(in_interrupt()); + while (test_and_set_bit(__TXGBE_SWFW_BUSY, &adapter->state)) { + timeout--; + if (!timeout) + return TXGBE_ERR_TIMEOUT; + usleep_range(1000, 2000); + } + + /* index to unique seq id for each mbox message */ + send_hdr->cksum_or_index.index = adapter->swfw_index; + send_cmd = send_hdr->cmd; + + /* write data to SW-FW mbox array */ + for (i = 0; i < dword_len; i++) { + wr32a(hw, TXGBE_AML_MNG_MBOX_SW2FW, + i, TXGBE_CPU_TO_LE32(buffer[i])); + /* write flush */ + buf[i] = rd32a(hw, TXGBE_AML_MNG_MBOX_SW2FW, i); + } + + /* amlite: generate interrupt to notify FW */ + wr32m(hw, TXGBE_AML_MNG_MBOX_CTL_SW2FW, + TXGBE_AML_MNG_MBOX_NOTIFY, 0); + wr32m(hw, TXGBE_AML_MNG_MBOX_CTL_SW2FW, + TXGBE_AML_MNG_MBOX_NOTIFY, TXGBE_AML_MNG_MBOX_NOTIFY); + + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* polling reply from FW */ + timeout = 50; + do { + timeout--; + usleep_range(1000, 2000); + + /* read hdr */ + for (bi = 0; bi < dword_len; bi++) { + buffer[bi] = rd32a(hw, TXGBE_AML_MNG_MBOX_FW2SW, bi); + TXGBE_LE32_TO_CPUS(&buffer[bi]); + } + + /* check hdr */ + recv_hdr = (struct txgbe_hic_hdr *)buffer; + + if (recv_hdr->cmd == send_cmd && + recv_hdr->cksum_or_index.index == adapter->swfw_index) + break; + + } while (timeout); + + if (!timeout) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, + "Polling from FW messages timeout, cmd is 0x%x, index is %d\n", + send_cmd, adapter->swfw_index); + status = TXGBE_ERR_TIMEOUT; + goto rel_out; + } + + if (!return_data) + goto rel_out; + + /* If there is any thing in data position pull it in */ + buf_len = recv_hdr->buf_len; + if (buf_len == 0) + goto rel_out; + + if (length < buf_len + hdr_size) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, + "Buffer not large enough for reply message.\n"); + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; + } + + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + for (; bi <= dword_len; bi++) { + buffer[bi] = rd32a(hw, TXGBE_AML_MNG_MBOX_FW2SW, bi); + TXGBE_LE32_TO_CPUS(&buffer[bi]); + } + } else if (hw->mac.type == txgbe_mac_sp) { + /* legacy sw-fw mbox + * The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < dword_len; i++) { + if (txgbe_check_mng_access(hw)) { + wr32a(hw, TXGBE_MNG_MBOX, + i, TXGBE_CPU_TO_LE32(buffer[i])); + /* write flush */ + buf[i] = rd32a(hw, TXGBE_MNG_MBOX, i); + } else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + /* Setting this bit tells the ARC that a new command is pending. */ + if (txgbe_check_mng_access(hw)) { + wr32m(hw, TXGBE_MNG_MBOX_CTL, + TXGBE_MNG_MBOX_CTL_SWRDY, TXGBE_MNG_MBOX_CTL_SWRDY); + } else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + + for (i = 0; i < timeout; i++) { + if (txgbe_check_mng_access(hw)) { + hicr = rd32(hw, TXGBE_MNG_MBOX_CTL); + if ((hicr & TXGBE_MNG_MBOX_CTL_FWRDY)) + break; + } + msec_delay(1); + } + + buf[0] = rd32(hw, TXGBE_MNG_MBOX); + + if ((buf[0] & 0xff0000) >> 16 == 0x80) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, "It's unknown cmd.\n"); + status = TXGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + + /* Check command completion */ + if (timeout != 0 && i == timeout) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, + "Command has failed with no status valid.\n"); + + ERROR_REPORT1(TXGBE_ERROR_CAUTION, "write value:\n"); + for (i = 0; i < dword_len; i++) + ERROR_REPORT1(TXGBE_ERROR_CAUTION, "%x ", buffer[i]); + + ERROR_REPORT1(TXGBE_ERROR_CAUTION, "read value:\n"); + for (i = 0; i < dword_len; i++) + ERROR_REPORT1(TXGBE_ERROR_CAUTION, "%x ", buf[i]); + + ERROR_REPORT1(TXGBE_ERROR_CAUTION, + "===%x= %x=\n", buffer[0] & 0xff, (~buf[0] >> 24)); + if ((buffer[0] & 0xff) != (~buf[0] >> 24)) { + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; + } + } + + if (!return_data) + goto rel_out; + + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* first pull in the header so we know the buffer length */ + for (bi = 0; bi < dword_len; bi++) { + if (txgbe_check_mng_access(hw)) { + buffer[bi] = rd32a(hw, TXGBE_MNG_MBOX, + bi); + TXGBE_LE32_TO_CPUS(&buffer[bi]); + } else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + + /* If there is any thing in data position pull it in */ + buf_len = ((struct txgbe_hic_hdr *)buffer)->buf_len; + if (buf_len == 0) + goto rel_out; + + if (length < buf_len + hdr_size) { + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; + } + + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + + /* Pull in the rest of the buffer (bi is where we left off) */ + for (; bi <= dword_len; bi++) { + if (txgbe_check_mng_access(hw)) { + buffer[bi] = rd32a(hw, TXGBE_MNG_MBOX, bi); + TXGBE_LE32_TO_CPUS(&buffer[bi]); + } else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + } + +rel_out: + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + /* index++, index replace txgbe_hic_hdr.checksum */ + adapter->swfw_index = send_hdr->cksum_or_index.index == TXGBE_HIC_HDR_INDEX_MAX ? + 0 : send_hdr->cksum_or_index.index + 1; + + clear_bit(__TXGBE_SWFW_BUSY, &adapter->state); + } else { + hw->mac.ops.release_swfw_sync(hw, TXGBE_MNG_SWFW_SYNC_SW_MB); + } + + return status; +} + +/** + * txgbe_set_fw_drv_ver - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + * + * Sends driver version number to firmware through the manageability + * block. On success return 0 + * else returns TXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or TXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +s32 txgbe_set_fw_drv_ver(struct txgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub) +{ + struct txgbe_hic_drv_info fw_cmd; + int i; + s32 ret_val = 0; + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.ver_maj = maj; + fw_cmd.ver_min = min; + fw_cmd.ver_build = build; + fw_cmd.ver_sub = sub; + fw_cmd.pad = 0; + fw_cmd.pad2 = 0; + + if (hw->mac.type == txgbe_mac_sp) { + fw_cmd.hdr.cksum_or_index.checksum = 0; + fw_cmd.hdr.cksum_or_index.checksum = txgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + } + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = txgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(fw_cmd), + TXGBE_HI_COMMAND_TIMEOUT, + true); + if (ret_val != 0) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + ret_val = 0; + else + ret_val = TXGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + return ret_val; +} + +/** + * txgbe_reset_hostif - send reset cmd to fw + * @hw: pointer to hardware structure + * + * Sends reset cmd to firmware through the manageability + * block. On success return 0 + * else returns TXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or TXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +s32 txgbe_reset_hostif(struct txgbe_hw *hw) +{ + struct txgbe_hic_reset reset_cmd; + int i; + s32 status = 0; + + reset_cmd.hdr.cmd = FW_RESET_CMD; + reset_cmd.hdr.buf_len = FW_RESET_LEN; + reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + reset_cmd.lan_id = hw->bus.lan_id; + reset_cmd.reset_type = (u16)hw->reset_type; + if (hw->mac.type == txgbe_mac_sp) { + reset_cmd.hdr.cksum_or_index.checksum = 0; + reset_cmd.hdr.cksum_or_index.checksum = txgbe_calculate_checksum((u8 *)&reset_cmd, + (FW_CEM_HDR_LEN + reset_cmd.hdr.buf_len)); + } + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + status = txgbe_host_interface_command(hw, (u32 *)&reset_cmd, + sizeof(reset_cmd), + TXGBE_HI_COMMAND_TIMEOUT, + true); + if (status != 0) + continue; + + if (reset_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) { + status = 0; + hw->link_status = TXGBE_LINK_STATUS_NONE; + } else { + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + break; + } + + return status; +} + +static u16 txgbe_crc16_ccitt(const u8 *buf, int size) +{ + u16 crc = 0; + int i; + + while (--size >= 0) { + crc ^= (u16)*buf++ << 8; + for (i = 0; i < 8; i++) { + if (crc & 0x8000) + crc = crc << 1 ^ 0x1021; + else + crc <<= 1; + } + } + return crc; +} + +s32 txgbe_upgrade_flash_hostif(struct txgbe_hw *hw, u32 region, + const u8 *data, u32 size) +{ + struct txgbe_hic_upg_start start_cmd; + struct txgbe_hic_upg_write write_cmd; + struct txgbe_hic_upg_verify verify_cmd; + u32 offset; + s32 status = 0; + + start_cmd.hdr.cmd = FW_FLASH_UPGRADE_START_CMD; + start_cmd.hdr.buf_len = FW_FLASH_UPGRADE_START_LEN; + start_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + start_cmd.module_id = (u8)region; + if (hw->mac.type == txgbe_mac_sp) { + start_cmd.hdr.cksum_or_index.checksum = 0; + start_cmd.hdr.cksum_or_index.checksum = txgbe_calculate_checksum((u8 *)&start_cmd, + (FW_CEM_HDR_LEN + start_cmd.hdr.buf_len)); + } + start_cmd.pad2 = 0; + start_cmd.pad3 = 0; + + status = txgbe_host_interface_command(hw, (u32 *)&start_cmd, + sizeof(start_cmd), + TXGBE_HI_FLASH_ERASE_TIMEOUT, + true); + + if (start_cmd.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) { + status = 0; + } else { + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + return status; + } + + for (offset = 0; offset < size;) { + write_cmd.hdr.cmd = FW_FLASH_UPGRADE_WRITE_CMD; + if (size - offset > 248) { + write_cmd.data_len = 248 / 4; + write_cmd.eof_flag = 0; + } else { + write_cmd.data_len = (u8)((size - offset) / 4); + write_cmd.eof_flag = 1; + } + memcpy((u8 *)write_cmd.data, &data[offset], write_cmd.data_len * 4); + write_cmd.hdr.buf_len = (write_cmd.data_len + 1) * 4; + write_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + write_cmd.check_sum = txgbe_crc16_ccitt((u8 *)write_cmd.data, + write_cmd.data_len * 4); + + status = txgbe_host_interface_command(hw, (u32 *)&write_cmd, + sizeof(write_cmd), + TXGBE_HI_FLASH_UPDATE_TIMEOUT, + true); + if (start_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) { + status = 0; + } else { + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + return status; + } + offset += write_cmd.data_len * 4; + } + + verify_cmd.hdr.cmd = FW_FLASH_UPGRADE_VERIFY_CMD; + verify_cmd.hdr.buf_len = FW_FLASH_UPGRADE_VERIFY_LEN; + verify_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + switch (region) { + case TXGBE_MODULE_EEPROM: + verify_cmd.action_flag = TXGBE_RELOAD_EEPROM; + break; + case TXGBE_MODULE_FIRMWARE: + verify_cmd.action_flag = TXGBE_RESET_FIRMWARE; + break; + case TXGBE_MODULE_HARDWARE: + verify_cmd.action_flag = TXGBE_RESET_LAN; + break; + default: + return status; + } + + if (hw->mac.type == txgbe_mac_sp) + verify_cmd.hdr.cksum_or_index.checksum = txgbe_calculate_checksum((u8 *)&verify_cmd, + (FW_CEM_HDR_LEN + verify_cmd.hdr.buf_len)); + status = txgbe_host_interface_command(hw, (u32 *)&verify_cmd, + sizeof(verify_cmd), + TXGBE_HI_FLASH_VERIFY_TIMEOUT, + true); + + if (verify_cmd.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + + return status; +} + +/* cmd_addr is used for some special command: + * 1. to be sector address, when implemented erase sector command + * 2. to be flash address when implemented read, write flash address + */ +static int txgbe_fmgr_cmd_op(struct txgbe_hw *hw, u32 cmd, u32 cmd_addr) +{ + u32 cmd_val = 0, time_out = 0; + + cmd_val = (cmd << SPI_CLK_CMD_OFFSET) | (SPI_CLK_DIV << SPI_CLK_DIV_OFFSET) | cmd_addr; + wr32(hw, SPI_H_CMD_REG_ADDR, cmd_val); + while (1) { + if (rd32(hw, SPI_H_STA_REG_ADDR) & 0x1) + break; + + if (time_out == SPI_TIME_OUT_VALUE) + return -ETIMEDOUT; + + time_out = time_out + 1; + usleep_range(50, 100); + } + + return 0; +} + +static int fmgr_usr_cmd_op(struct txgbe_hw *hw, u32 usr_cmd) +{ + u8 status = 0; + + wr32(hw, SPI_H_USR_CMD_REG_ADDR, usr_cmd); + status = txgbe_fmgr_cmd_op(hw, SPI_CMD_USER_CMD, 0); + + return status; +} + +static int txgbe_flash_erase_chip(struct txgbe_hw *hw) +{ + return txgbe_fmgr_cmd_op(hw, SPI_CMD_ERASE_CHIP, 0); +} + +static int txgbe_flash_erase_sector(struct txgbe_hw *hw, u32 sec_addr) +{ + return txgbe_fmgr_cmd_op(hw, SPI_CMD_ERASE_SECTOR, sec_addr); +} + +static int txgbe_flash_write_dword(struct txgbe_hw *hw, u32 addr, u32 dword) +{ + int status = 0; + u32 data; + + wr32(hw, SPI_H_DAT_REG_ADDR, dword); + status = txgbe_fmgr_cmd_op(hw, SPI_CMD_WRITE_DWORD, addr); + if (status) + return status; + + txgbe_flash_read_dword(hw, addr, &data); + if (dword != data) + return -EIO; + + return 0; +} + +int txgbe_flash_read_dword(struct txgbe_hw *hw, u32 addr, u32 *data) +{ + int ret = 0; + + ret = txgbe_fmgr_cmd_op(hw, SPI_CMD_READ_DWORD, addr); + if (ret < 0) + return ret; + + *data = rd32(hw, SPI_H_DAT_REG_ADDR); + + return ret; +} + +static int txgbe_flash_write_unlock(struct txgbe_hw *hw) +{ + int status; + struct txgbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = 0x40; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = 0; + + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.req.cksum_or_index.checksum = 0xFF; + + /* convert offset from words to bytes */ + buffer.address = 0; + /* one word */ + buffer.length = 0; + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000, false); + + return status; +} + +int txgbe_upgrade_flash(struct txgbe_hw *hw, u32 region, + const u8 *data, u32 size) +{ + u32 mac_addr0_dword0_t, mac_addr0_dword1_t, mac_addr1_dword0_t, mac_addr1_dword1_t; + u32 serial_num_dword0_t, serial_num_dword1_t, serial_num_dword2_t; + struct txgbe_adapter *adapter = hw->back; + u8 status = 0, skip = 0, flash_vendor = 0; + u32 sector_num = 0, read_data = 0, i = 0; + u32 sn[24]; + char sn_str[40]; + u8 sn_is_str = true; + u8 *vpd_tend = NULL; + u32 curadr = 0; + u32 vpdadr = 0; + u8 id_str_len, pn_str_len, sn_str_len, rv_str_len; + u32 mac_addr0_dword0_addr, mac_addr0_dword1_addr; + u32 mac_addr1_dword0_addr, mac_addr1_dword1_addr; + u16 subsystem_device_id; + u16 device_id; + u16 vpd_ro_len; + u32 chksum = 0; + u32 upgrade_check = 0x0; + int err = 0; + + if (hw->mac.type == txgbe_mac_sp) { + upgrade_check = PRB_CTL; + subsystem_device_id = data[0xfffdc] << 8 | data[0xfffdd]; + device_id = data[0xfffde] << 8 | data[0xfffdf]; + } else { + upgrade_check = PRB_SCRATCH; + if (data[0x3000] == 0x25 && data[0x3001] == 0x20) { + subsystem_device_id = data[0x302c] << 8 | data[0x302d]; + device_id = data[0x302e] << 8 | data[0x302f]; + } else { + subsystem_device_id = data[0xfffdc] << 8 | data[0xfffdd]; + device_id = data[0xfffde] << 8 | data[0xfffdf]; + } + } + + read_data = rd32(hw, upgrade_check); + if (read_data & 0x80000000) { + e_info(drv, "The flash has been successfully upgraded once, please reboot to make it work.\n"); + return -EOPNOTSUPP; + } + + /*check sub_id*/; + e_info(drv, "Checking sub_id .......\n"); + e_info(drv, "The card's sub_id : %04x\n", hw->subsystem_device_id); + e_info(drv, "The image's sub_id : %04x\n", subsystem_device_id); + + if ((hw->subsystem_device_id & 0xfff) == (subsystem_device_id & 0xfff)) { + e_info(drv, "It is a right image\n"); + } else if (hw->subsystem_device_id == 0xffff) { + e_info(drv, "update anyway\n"); + } else { + e_err(drv, "====The Gigabit image is not match the Gigabit card====\n"); + e_err(drv, "====Please check your image====\n"); + return -EOPNOTSUPP; + } + + /*check dev_id*/ + e_info(drv, "Checking dev_id .......\n"); + e_info(drv, "The image's dev_id : %04x\n", device_id); + e_info(drv, "The card's dev_id : %04x\n", hw->device_id); + if (!((hw->device_id & 0xfff0) == (device_id & 0xfff0)) && + !(hw->device_id == 0xffff)) { + e_err(drv, "====The Gigabit image is not match the Gigabit card====\n"); + e_err(drv, "====Please check your image====\n"); + return -EOPNOTSUPP; + } + + /* unlock flash write protect*/ + wr32(hw, TXGBE_SPI_CMDCFG0, 0x9f050206); + wr32(hw, 0x10194, 0x9f050206); + + msleep(1000); + + switch (hw->mac.type) { + case txgbe_mac_sp: + mac_addr0_dword0_addr = MAC_ADDR0_WORD0_OFFSET_1G; + mac_addr0_dword1_addr = MAC_ADDR0_WORD1_OFFSET_1G; + mac_addr1_dword0_addr = MAC_ADDR1_WORD0_OFFSET_1G; + mac_addr1_dword1_addr = MAC_ADDR1_WORD1_OFFSET_1G; + break; + case txgbe_mac_aml: + case txgbe_mac_aml40: + mac_addr0_dword0_addr = AMLITE_MAC_ADDR0_WORD0_OFFSET; + mac_addr0_dword1_addr = AMLITE_MAC_ADDR0_WORD1_OFFSET; + mac_addr1_dword0_addr = AMLITE_MAC_ADDR1_WORD0_OFFSET; + mac_addr1_dword1_addr = AMLITE_MAC_ADDR1_WORD1_OFFSET; + break; + default: + e_err(drv, "====Error mac type====\n"); + return -EOPNOTSUPP; + } + + txgbe_flash_read_dword(hw, mac_addr0_dword0_addr, &mac_addr0_dword0_t); + txgbe_flash_read_dword(hw, mac_addr0_dword1_addr, &mac_addr0_dword1_t); + mac_addr0_dword1_t = mac_addr0_dword1_t & U16_MAX; + txgbe_flash_read_dword(hw, mac_addr1_dword0_addr, &mac_addr1_dword0_t); + txgbe_flash_read_dword(hw, mac_addr1_dword1_addr, &mac_addr1_dword1_t); + mac_addr1_dword1_t = mac_addr1_dword1_t & U16_MAX; + + for (i = 0; i < 24; i++) + txgbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 4 * i, &sn[i]); + + if (sn[23] == U32_MAX) + sn_is_str = false; + + txgbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G, &serial_num_dword0_t); + txgbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 4, &serial_num_dword1_t); + txgbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 8, &serial_num_dword2_t); + e_info(drv, "Old: MAC Address0 is: 0x%04x%08x\n", mac_addr0_dword1_t, mac_addr0_dword0_t); + e_info(drv, " MAC Address1 is: 0x%04x%08x\n", mac_addr1_dword1_t, mac_addr1_dword0_t); + + status = fmgr_usr_cmd_op(hw, 0x6); /* write enable*/ + status = fmgr_usr_cmd_op(hw, 0x98); /* global protection un-lock*/ + txgbe_flash_write_unlock(hw); + msleep(1000); + + //rebuild vpd + vpd_tend = kcalloc(256, sizeof(u8), GFP_KERNEL); + if (!vpd_tend) + return -ENOMEM; + + memset(vpd_tend, 0xff, 256 * sizeof(u8)); + + curadr = TXGBE_VPD_OFFSET + 1; + id_str_len = data[curadr] | data[curadr + 1] << 8; + curadr += (7 + id_str_len); + pn_str_len = data[curadr]; + curadr += 1 + pn_str_len; + + for (i = 0; i < curadr - TXGBE_VPD_OFFSET; i++) + vpd_tend[i] = data[TXGBE_VPD_OFFSET + i]; + + memset(sn_str, 0x0, sizeof(sn_str)); + if (sn_is_str) { + for (i = 0; i < 24; i++) + sn_str[i] = sn[23 - i]; + + sn_str_len = strlen(sn_str); + } else { + sn_str_len = 0x12; + sprintf(sn_str, "%02x%08x%08x", + (serial_num_dword2_t & 0xff), + serial_num_dword1_t, serial_num_dword0_t); + } + + vpdadr = curadr - TXGBE_VPD_OFFSET; + + if (data[curadr] == 'S' && data[curadr + 1] == 'N') { + if (data[curadr + 2]) { + for (i = sn_str_len; i < data[curadr + 2]; i++) + sn_str[i] = 0x20; + sn_str_len = data[curadr + 2]; + } + curadr += 3 + data[curadr + 2]; + rv_str_len = data[2 + curadr]; + } else { + rv_str_len = data[2 + curadr]; + } + + vpd_tend[vpdadr] = 'S'; + vpd_tend[vpdadr + 1] = 'N'; + vpd_tend[vpdadr + 2] = sn_str_len; + + for (i = 0; i < sn_str_len; i++) + vpd_tend[vpdadr + 3 + i] = sn_str[i]; + + vpdadr = vpdadr + 3 + sn_str_len; + + for (i = 0; i < 3; i++) + vpd_tend[vpdadr + i] = data[curadr + i]; + + vpdadr += 3; + for (i = 0; i < rv_str_len; i++) + vpd_tend[vpdadr + i] = 0x0; + + vpdadr += rv_str_len; + vpd_ro_len = pn_str_len + sn_str_len + rv_str_len + 9; + vpd_tend[4 + id_str_len] = vpd_ro_len & 0xff; + vpd_tend[5 + id_str_len] = (vpd_ro_len >> 8) & 0xff; + + for (i = 0; i < vpdadr; i++) + chksum += vpd_tend[i]; + chksum = ~(chksum & 0xff) + 1; + vpd_tend[vpdadr - rv_str_len] = chksum; + vpd_tend[vpdadr] = 0x78; + + if (flash_vendor == 1) { + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c720); + for (i = 0; i < 8; i++) { + txgbe_flash_erase_sector(hw, i * 128); + msleep(20); + } + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c7d8); + } + + sector_num = size / SPI_SECTOR_SIZE; + /* Winbond Flash, erase chip command is okay, but erase sector doestn't work*/ + if (flash_vendor == 2) { + status = txgbe_flash_erase_chip(hw); + e_err(drv, "Erase chip command, return status = %0d\n", status); + msleep(1000); + } else { + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c720); + for (i = 0; i < sector_num; i++) { + status = txgbe_flash_erase_sector(hw, i * SPI_SECTOR_SIZE); + if (status) + e_err(drv, "Erase sector[%2d] command, return status = %0d\n", + i, status); + msleep(50); + } + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c7d8); + } + + /* Program Image file in dword*/ + for (i = 0; i < size / 4; i++) { + read_data = data[4 * i + 3] << 24 | + data[4 * i + 2] << 16 | + data[4 * i + 1] << 8 | data[4 * i]; + read_data = __le32_to_cpu(read_data); + skip = ((i * 4 == mac_addr0_dword0_addr) || + (i * 4 == mac_addr0_dword1_addr) || + (i * 4 == mac_addr1_dword0_addr) || + (i * 4 == mac_addr1_dword1_addr) || + (i * 4 >= PRODUCT_SERIAL_NUM_OFFSET_1G && + i * 4 <= PRODUCT_SERIAL_NUM_OFFSET_1G + 92) || + (i * 4 >= TXGBE_VPD_OFFSET && i * 4 < TXGBE_VPD_END) || + (i * 4 == 0x15c)); + if (read_data != U32_MAX && !skip) { + status = txgbe_flash_write_dword(hw, i * 4, read_data); + if (status) { + e_err(drv, "ERROR: Program 0x%08x @addr: 0x%08x is failed !!\n", + read_data, i * 4); + txgbe_flash_read_dword(hw, i * 4, &read_data); + e_err(drv, " Read data from Flash is: 0x%08x\n", + read_data); + err = -EBUSY; + goto err_exit; + } + } + } + + for (i = 0; i < 256 / 4; i++) { + read_data = vpd_tend[4 * i + 3] << 24 | + vpd_tend[4 * i + 2] << 16 | + vpd_tend[4 * i + 1] << 8 | + vpd_tend[4 * i]; + read_data = __le32_to_cpu(read_data); + if (read_data != U32_MAX) { + status = txgbe_flash_write_dword(hw, + TXGBE_VPD_OFFSET + i * 4, read_data); + if (status) { + e_err(drv, "ERROR: Program 0x%08x @addr: 0x%08x is failed !!\n", + read_data, i * 4); + txgbe_flash_read_dword(hw, i * 4, &read_data); + e_err(drv, " Read data from Flash is: 0x%08x\n", + read_data); + err = -EBUSY; + goto err_exit; + } + } + } + + txgbe_flash_write_dword(hw, mac_addr0_dword0_addr, + mac_addr0_dword0_t); + txgbe_flash_write_dword(hw, mac_addr0_dword1_addr, + (mac_addr0_dword1_t | 0x80000000));//lan0 + txgbe_flash_write_dword(hw, mac_addr1_dword0_addr, + mac_addr1_dword0_t); + txgbe_flash_write_dword(hw, mac_addr1_dword1_addr, + (mac_addr1_dword1_t | 0x80000000));//lan1 + if (sn_is_str) { + for (i = 0; i < 24; i++) + txgbe_flash_write_dword(hw, + PRODUCT_SERIAL_NUM_OFFSET_1G + 4 * i, sn[i]); + } else { + txgbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G, + serial_num_dword0_t); + txgbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 4, + serial_num_dword1_t); + txgbe_flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 8, + serial_num_dword2_t); + } + + wr32(hw, upgrade_check, rd32(hw, upgrade_check) | 0x80000000); + +err_exit: + kfree(vpd_tend); + return err; +} + +/** + * txgbe_set_rxpba - Initialize Rx packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +void txgbe_set_rxpba(struct txgbe_hw *hw, int num_pb, u32 headroom, + int strategy) +{ + u32 pbsize = hw->mac.rx_pb_size; + int i = 0; + u32 rxpktsize, txpktsize, txpbthresh; + + /* Reserve headroom */ + pbsize -= headroom; + + if (!num_pb) + num_pb = 1; + + /* Divide remaining packet buffer space amongst the number of packet + * buffers requested using supplied strategy. + */ + switch (strategy) { + case PBA_STRATEGY_WEIGHTED: + /* txgbe_dcb_pba_80_48 strategy weight first half of packet + * buffer with 5/8 of the packet buffer space. + */ + rxpktsize = (pbsize * 5) / (num_pb * 4); + pbsize -= rxpktsize * (num_pb / 2); + rxpktsize <<= TXGBE_RDB_PB_SZ_SHIFT; + for (; i < (num_pb / 2); i++) + wr32(hw, TXGBE_RDB_PB_SZ(i), rxpktsize); + fallthrough; + case PBA_STRATEGY_EQUAL: + rxpktsize = (pbsize / (num_pb - i)) << TXGBE_RDB_PB_SZ_SHIFT; + for (; i < num_pb; i++) + wr32(hw, TXGBE_RDB_PB_SZ(i), rxpktsize); + break; + default: + break; + } + + /* Only support an equally distributed Tx packet buffer strategy. */ + txpktsize = TXGBE_TDB_PB_SZ_MAX / num_pb; + txpbthresh = (txpktsize / 1024) - TXGBE_TXPKT_SIZE_MAX; + for (i = 0; i < num_pb; i++) { + wr32(hw, TXGBE_TDB_PB_SZ(i), txpktsize); + wr32(hw, TXGBE_TDM_PB_THRE(i), txpbthresh); + } + + /* Clear unused TCs, if any, to zero buffer size*/ + for (; i < TXGBE_MAX_PB; i++) { + wr32(hw, TXGBE_RDB_PB_SZ(i), 0); + wr32(hw, TXGBE_TDB_PB_SZ(i), 0); + wr32(hw, TXGBE_TDM_PB_THRE(i), 0); + } +} + +/** + * txgbe_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure + * @data: pointer to the thermal sensor data structure + * + * algorithm: + * T = (-4.8380E+01)N^0 + (3.1020E-01)N^1 + (-1.8201E-04)N^2 + + (8.1542E-08)N^3 + (-1.6743E-11)N^4 + * algorithm with 5% more deviation, easy for implementation + * T = (-50)N^0 + (0.31)N^1 + (-0.0002)N^2 + (0.0000001)N^3 + * + * Returns the thermal sensor data structure + **/ +s32 txgbe_get_thermal_sensor_data(struct txgbe_hw *hw) +{ + s64 tsv; + int i = 0; + struct txgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + u32 data_code; + int temp_data, temp_fraction; + struct txgbe_adapter *adapter = hw->back; + + /* Only support thermal sensors attached to physical port 0 */ + if (hw->bus.lan_id) + return TXGBE_NOT_IMPLEMENTED; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + wr32(hw, TXGBE_AML_TS_ENA, 0x0001); + + while (1) { + data_code = rd32(hw, TXGBE_AML_TS_STS); + if ((data_code & TXGBE_AML_TS_STS_VLD) != 0) + break; + usleep_range(1000, 2000); + if (i++ > PHYINIT_TIMEOUT) { + e_info(drv, "ERROR: Wait 0x1033c Timeout!!!\n"); + return -1; + } + } + + data_code = data_code & 0xFFF; + temp_data = 419400 + 2205 * (data_code * 1000 / 4094 - 500); + + //Change double Temperature to int + tsv = temp_data / 10000; + temp_fraction = temp_data - (tsv * 10000); + if (temp_fraction >= 5000) + tsv += 1; + + data->sensor.temp = (s16)tsv; + } else { + tsv = (s64)(rd32(hw, TXGBE_TS_ST) & + TXGBE_TS_ST_DATA_OUT_MASK); + + tsv = tsv < 1200 ? tsv : 1200; + tsv = -(48380 << 8) / 1000 + + div64_s64(tsv * (31020 << 8), 100000) + - div64_s64(tsv * tsv * (18201 << 8), 100000000) + + div64_s64(tsv * tsv * tsv * (81542 << 8), 1000000000000) + - div64_s64(tsv * tsv * tsv * tsv * (16743 << 8), 1000000000000000); + tsv >>= 8; + + data->sensor.temp = (s16)tsv; + + for (i = 0; i < 100; i++) { + tsv = (s64)rd32(hw, TXGBE_TS_ST); + if (tsv >> 16 != 0x1) { + usleep_range(1000, 2000); + continue; + } else { + tsv = tsv & TXGBE_TS_ST_DATA_OUT_MASK; + tsv = tsv < 1200 ? tsv : 1200; + tsv = -(48380 << 8) / 1000 + + div64_s64(tsv * (31020 << 8), 100000) - + div64_s64(tsv * tsv * (18201 << 8), 100000000) + + div64_s64(tsv * tsv * tsv * (81542 << 8), + 1000000000000) - + div64_s64(tsv * tsv * tsv * tsv * (16743 << 8), + 1000000000000000); + + tsv >>= 8; + + data->sensor.temp = (s16)tsv; + break; + } + } + } + + return 0; +} + +/** + * txgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Inits the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +s32 txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw) +{ + s32 status = 0; + + struct txgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + memset(data, 0, sizeof(struct txgbe_thermal_sensor_data)); + + /* Only support thermal sensors attached to SP physical port 0 */ + if (hw->bus.lan_id) + return TXGBE_NOT_IMPLEMENTED; + + data->sensor.alarm_thresh = 100; + data->sensor.dalarm_thresh = 90; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + wr32(hw, TXGBE_AML_TS_ENA, 0x0); + wr32(hw, TXGBE_AML_INTR_RAW_LO, TXGBE_AML_INTR_CL_LO); + wr32(hw, TXGBE_AML_INTR_RAW_HI, TXGBE_AML_INTR_CL_HI); + + wr32(hw, TXGBE_AML_INTR_HIGH_EN, TXGBE_AML_INTR_EN_HI); + wr32(hw, TXGBE_AML_INTR_LOW_EN, TXGBE_AML_INTR_EN_LO); + + wr32m(hw, TXGBE_AML_TS_CTL1, TXGBE_AML_EVAL_MODE_MASK, 0x10); + wr32m(hw, TXGBE_AML_TS_CTL1, TXGBE_AML_ALARM_THRE_MASK, 0x186a0000); + wr32m(hw, TXGBE_AML_TS_CTL1, TXGBE_AML_DALARM_THRE_MASK, 0x16f60); + wr32(hw, TXGBE_AML_TS_ENA, 0x1); + } else { + wr32(hw, TXGBE_TS_CTL, TXGBE_TS_CTL_EVAL_MD); + wr32(hw, TXGBE_TS_INT_EN, + TXGBE_TS_INT_EN_ALARM_INT_EN | TXGBE_TS_INT_EN_DALARM_INT_EN); + wr32(hw, TXGBE_TS_EN, TXGBE_TS_EN_ENA); + + wr32(hw, TXGBE_TS_ALARM_THRE, 677); + wr32(hw, TXGBE_TS_DALARM_THRE, 614); + } + return status; +} + +void txgbe_disable_rx(struct txgbe_hw *hw) +{ + u32 pfdtxgswc; + u32 rxctrl; + + rxctrl = rd32(hw, TXGBE_RDB_PB_CTL); + if (rxctrl & TXGBE_RDB_PB_CTL_RXEN) { + pfdtxgswc = rd32(hw, TXGBE_PSR_CTL); + if (pfdtxgswc & TXGBE_PSR_CTL_SW_EN) { + pfdtxgswc &= ~TXGBE_PSR_CTL_SW_EN; + wr32(hw, TXGBE_PSR_CTL, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + rxctrl &= ~TXGBE_RDB_PB_CTL_RXEN; + wr32(hw, TXGBE_RDB_PB_CTL, rxctrl); + + if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP))) { + /* disable mac receiver */ + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_RE, 0); + } + } +} + +void txgbe_enable_rx(struct txgbe_hw *hw) +{ + u32 pfdtxgswc; + + /* enable mac receiver */ + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_RE, TXGBE_MAC_RX_CFG_RE); + + wr32m(hw, TXGBE_RDB_PB_CTL, + TXGBE_RDB_PB_CTL_RXEN, TXGBE_RDB_PB_CTL_RXEN); + + if (hw->mac.set_lben) { + pfdtxgswc = rd32(hw, TXGBE_PSR_CTL); + pfdtxgswc |= TXGBE_PSR_CTL_SW_EN; + wr32(hw, TXGBE_PSR_CTL, pfdtxgswc); + hw->mac.set_lben = false; + } +} + +/** + * txgbe_mng_present - returns true when management capability is present + * @hw: pointer to hardware structure + */ +bool txgbe_mng_present(struct txgbe_hw *hw) +{ + u32 fwsm; + + fwsm = rd32(hw, TXGBE_MIS_ST); + return fwsm & TXGBE_MIS_ST_MNG_INIT_DN; +} + +bool txgbe_check_mng_access(struct txgbe_hw *hw) +{ + bool ret = false; + u32 rst_delay; + u32 i; + + struct txgbe_adapter *adapter = hw->back; + + if (!txgbe_mng_present(hw)) + return false; + if (adapter->hw.revision_id != TXGBE_SP_MPW) + return true; + if (!(adapter->flags2 & TXGBE_FLAG2_MNG_REG_ACCESS_DISABLED)) + return true; + + rst_delay = (rd32(&adapter->hw, TXGBE_MIS_RST_ST) & + TXGBE_MIS_RST_ST_RST_INIT) >> + TXGBE_MIS_RST_ST_RST_INI_SHIFT; + for (i = 0; i < rst_delay + 2; i++) { + if (!(adapter->flags2 & TXGBE_FLAG2_MNG_REG_ACCESS_DISABLED)) { + ret = true; + break; + } + msleep(100); + } + return ret; +} + +/** + * txgbe_setup_mac_link_multispeed_fiber - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the MAC and/or PHY register and restarts link. + **/ +s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; + u32 highest_link_speed = TXGBE_LINK_SPEED_UNKNOWN; + s32 status = 0; + u32 speedcnt = 0; + u32 i = 0; + bool autoneg, link_up = false; + struct txgbe_adapter *adapter = hw->back; + + /* Mask off requested but non-supported speeds */ + status = hw->mac.ops.get_link_capabilities(hw, + &link_speed, &autoneg); + if (status != 0) + return status; + + speed &= link_speed; + + /* Try each speed one by one, highest priority first. We do this in + * software because 10Gb fiber doesn't support speed autonegotiation. + */ + if (speed & TXGBE_LINK_SPEED_25GB_FULL) { + speedcnt++; + highest_link_speed = TXGBE_LINK_SPEED_25GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = hw->mac.ops.check_link(hw, + &link_speed, &link_up, false); + if (status != 0) + return status; + + if (link_speed == TXGBE_LINK_SPEED_25GB_FULL && link_up) + goto out; + + /* Allow module to change analog characteristics (1G->10G) */ + msec_delay(40); + + status = hw->mac.ops.setup_mac_link(hw, + TXGBE_LINK_SPEED_25GB_FULL, + autoneg_wait_to_complete); + if (status != 0) + return status; + + /* Flap the Tx laser if it has not already been done */ + hw->mac.ops.flap_tx_laser(hw); + + /* Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted. sapphire uses the same timing for 10g SFI. + */ + for (i = 0; i < 5; i++) { + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = hw->mac.ops.check_link(hw, + &link_speed, &link_up, false); + if (status != 0) + return status; + + if (link_up) + goto out; + } + } + + if (speed & TXGBE_LINK_SPEED_10GB_FULL) { + speedcnt++; + if (highest_link_speed == TXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = TXGBE_LINK_SPEED_10GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = hw->mac.ops.check_link(hw, + &link_speed, &link_up, false); + if (status != 0) + return status; + + if (link_speed == TXGBE_LINK_SPEED_10GB_FULL && link_up) + goto out; + + /* Allow module to change analog characteristics (1G->10G) */ + msec_delay(40); + + status = hw->mac.ops.setup_mac_link(hw, + TXGBE_LINK_SPEED_10GB_FULL, + autoneg_wait_to_complete); + if (status != 0) + return status; + + /* Flap the Tx laser if it has not already been done */ + hw->mac.ops.flap_tx_laser(hw); + + /* Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted. sapphire uses the same timing for 10g SFI. + */ + for (i = 0; i < 5; i++) { + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = hw->mac.ops.check_link(hw, + &link_speed, &link_up, false); + if (status != 0) + return status; + + if (link_up) + goto out; + } + } + + if (speed & TXGBE_LINK_SPEED_1GB_FULL) { + u32 curr_autoneg = 2; + + speedcnt++; + if (highest_link_speed == TXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = TXGBE_LINK_SPEED_1GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = hw->mac.ops.check_link(hw, + &link_speed, &link_up, false); + if (status != 0) + return status; + + if (link_speed == TXGBE_LINK_SPEED_1GB_FULL) { + curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); + curr_autoneg = !!(curr_autoneg & (0x1 << 12)); + } + + if (link_speed == TXGBE_LINK_SPEED_1GB_FULL && + link_up && + adapter->autoneg == curr_autoneg) + goto out; + + /* Allow module to change analog characteristics (10G->1G) */ + msec_delay(40); + + status = hw->mac.ops.setup_mac_link(hw, + TXGBE_LINK_SPEED_1GB_FULL, + autoneg_wait_to_complete); + if (status != 0) + return status; + + /* Flap the Tx laser if it has not already been done */ + hw->mac.ops.flap_tx_laser(hw); + + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = hw->mac.ops.check_link(hw, + &link_speed, &link_up, false); + if (status != 0) + return status; + + if (link_up) + goto out; + } + + /* We didn't get link. Configure back to the highest speed we tried, + * (if there was more than one). We call ourselves back with just the + * single highest speed that the user requested. + */ + if (speedcnt > 1) + status = txgbe_setup_mac_link_multispeed_fiber(hw, + highest_link_speed, + autoneg_wait_to_complete); + +out: + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & TXGBE_LINK_SPEED_40GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_40GB_FULL; + + if (speed & TXGBE_LINK_SPEED_25GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_25GB_FULL; + + if (speed & TXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_10GB_FULL; + + if (speed & TXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_1GB_FULL; + + return status; +} + +int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit) +{ + u32 i = 0; + u32 reg = 0; + int err = 0; + /* if there's flash existing */ + if (!(rd32(hw, TXGBE_SPI_STATUS) & + TXGBE_SPI_STATUS_FLASH_BYPASS)) { + /* wait hw load flash done */ + for (i = 0; i < TXGBE_MAX_FLASH_LOAD_POLL_TIME; i++) { + reg = rd32(hw, TXGBE_SPI_ILDR_STATUS); + if (!(reg & check_bit)) + break; + msleep(200); + } + if (i == TXGBE_MAX_FLASH_LOAD_POLL_TIME) + err = TXGBE_ERR_FLASH_LOADING_FAILED; + } + return err; +} + +/* The txgbe_ptype_lookup is used to convert from the 8-bit ptype in the + * hardware to a bit-field that can be used by SW to more easily determine the + * packet type. + * + * Macros are used to shorten the table lines and make this table human + * readable. + * + * We store the PTYPE in the top byte of the bit field - this is just so that + * we can check that the table doesn't have a row missing, as the index into + * the table should be the PTYPE. + * + * Typical work flow: + * + * IF NOT txgbe_ptype_lookup[ptype].known + * THEN + * Packet is unknown + * ELSE IF txgbe_ptype_lookup[ptype].mac == TXGBE_DEC_PTYPE_MAC_IP + * Use the rest of the fields to look at the tunnels, inner protocols, etc + * ELSE + * Use the enum txgbe_l2_ptypes to decode the packet type + * ENDIF + */ + +/* macro to make the table lines short */ +#define TXGBE_PTT(ptype, mac, ip, etype, eip, proto, layer)\ + { ptype, \ + 1, \ + /* mac */ TXGBE_DEC_PTYPE_MAC_##mac, \ + /* ip */ TXGBE_DEC_PTYPE_IP_##ip, \ + /* etype */ TXGBE_DEC_PTYPE_ETYPE_##etype, \ + /* eip */ TXGBE_DEC_PTYPE_IP_##eip, \ + /* proto */ TXGBE_DEC_PTYPE_PROT_##proto, \ + /* layer */ TXGBE_DEC_PTYPE_LAYER_##layer } + +#define TXGBE_UKN(ptype) \ + { ptype, 0, 0, 0, 0, 0, 0, 0 } + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +/* for ((pt=0;pt<256;pt++)); do printf "macro(0x%02X),\n" $pt; done */ +struct txgbe_dec_ptype txgbe_ptype_lookup[256] = { + TXGBE_UKN(0x00), + TXGBE_UKN(0x01), + TXGBE_UKN(0x02), + TXGBE_UKN(0x03), + TXGBE_UKN(0x04), + TXGBE_UKN(0x05), + TXGBE_UKN(0x06), + TXGBE_UKN(0x07), + TXGBE_UKN(0x08), + TXGBE_UKN(0x09), + TXGBE_UKN(0x0A), + TXGBE_UKN(0x0B), + TXGBE_UKN(0x0C), + TXGBE_UKN(0x0D), + TXGBE_UKN(0x0E), + TXGBE_UKN(0x0F), + + /* L2: mac */ + TXGBE_UKN(0x10), + TXGBE_PTT(0x11, L2, NONE, NONE, NONE, NONE, PAY2), + TXGBE_PTT(0x12, L2, NONE, NONE, NONE, TS, PAY2), + TXGBE_PTT(0x13, L2, NONE, NONE, NONE, NONE, PAY2), + TXGBE_PTT(0x14, L2, NONE, NONE, NONE, NONE, PAY2), + TXGBE_PTT(0x15, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x16, L2, NONE, NONE, NONE, NONE, PAY2), + TXGBE_PTT(0x17, L2, NONE, NONE, NONE, NONE, NONE), + + /* L2: ethertype filter */ + TXGBE_PTT(0x18, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x19, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1A, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1B, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1C, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1D, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1E, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1F, L2, NONE, NONE, NONE, NONE, NONE), + + /* L3: ip non-tunnel */ + TXGBE_UKN(0x20), + TXGBE_PTT(0x21, IP, FGV4, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x22, IP, IPV4, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x23, IP, IPV4, NONE, NONE, UDP, PAY4), + TXGBE_PTT(0x24, IP, IPV4, NONE, NONE, TCP, PAY4), + TXGBE_PTT(0x25, IP, IPV4, NONE, NONE, SCTP, PAY4), + TXGBE_UKN(0x26), + TXGBE_UKN(0x27), + TXGBE_UKN(0x28), + TXGBE_PTT(0x29, IP, FGV6, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x2A, IP, IPV6, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x2B, IP, IPV6, NONE, NONE, UDP, PAY3), + TXGBE_PTT(0x2C, IP, IPV6, NONE, NONE, TCP, PAY4), + TXGBE_PTT(0x2D, IP, IPV6, NONE, NONE, SCTP, PAY4), + TXGBE_UKN(0x2E), + TXGBE_UKN(0x2F), + + /* L2: fcoe */ + TXGBE_PTT(0x30, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x31, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x32, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x33, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x34, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_UKN(0x35), + TXGBE_UKN(0x36), + TXGBE_UKN(0x37), + TXGBE_PTT(0x38, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x39, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x3A, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x3B, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x3C, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_UKN(0x3D), + TXGBE_UKN(0x3E), + TXGBE_UKN(0x3F), + + TXGBE_UKN(0x40), + TXGBE_UKN(0x41), + TXGBE_UKN(0x42), + TXGBE_UKN(0x43), + TXGBE_UKN(0x44), + TXGBE_UKN(0x45), + TXGBE_UKN(0x46), + TXGBE_UKN(0x47), + TXGBE_UKN(0x48), + TXGBE_UKN(0x49), + TXGBE_UKN(0x4A), + TXGBE_UKN(0x4B), + TXGBE_UKN(0x4C), + TXGBE_UKN(0x4D), + TXGBE_UKN(0x4E), + TXGBE_UKN(0x4F), + TXGBE_UKN(0x50), + TXGBE_UKN(0x51), + TXGBE_UKN(0x52), + TXGBE_UKN(0x53), + TXGBE_UKN(0x54), + TXGBE_UKN(0x55), + TXGBE_UKN(0x56), + TXGBE_UKN(0x57), + TXGBE_UKN(0x58), + TXGBE_UKN(0x59), + TXGBE_UKN(0x5A), + TXGBE_UKN(0x5B), + TXGBE_UKN(0x5C), + TXGBE_UKN(0x5D), + TXGBE_UKN(0x5E), + TXGBE_UKN(0x5F), + TXGBE_UKN(0x60), + TXGBE_UKN(0x61), + TXGBE_UKN(0x62), + TXGBE_UKN(0x63), + TXGBE_UKN(0x64), + TXGBE_UKN(0x65), + TXGBE_UKN(0x66), + TXGBE_UKN(0x67), + TXGBE_UKN(0x68), + TXGBE_UKN(0x69), + TXGBE_UKN(0x6A), + TXGBE_UKN(0x6B), + TXGBE_UKN(0x6C), + TXGBE_UKN(0x6D), + TXGBE_UKN(0x6E), + TXGBE_UKN(0x6F), + TXGBE_UKN(0x70), + TXGBE_UKN(0x71), + TXGBE_UKN(0x72), + TXGBE_UKN(0x73), + TXGBE_UKN(0x74), + TXGBE_UKN(0x75), + TXGBE_UKN(0x76), + TXGBE_UKN(0x77), + TXGBE_UKN(0x78), + TXGBE_UKN(0x79), + TXGBE_UKN(0x7A), + TXGBE_UKN(0x7B), + TXGBE_UKN(0x7C), + TXGBE_UKN(0x7D), + TXGBE_UKN(0x7E), + TXGBE_UKN(0x7F), + + /* IPv4 --> IPv4/IPv6 */ + TXGBE_UKN(0x80), + TXGBE_PTT(0x81, IP, IPV4, IPIP, FGV4, NONE, PAY3), + TXGBE_PTT(0x82, IP, IPV4, IPIP, IPV4, NONE, PAY3), + TXGBE_PTT(0x83, IP, IPV4, IPIP, IPV4, UDP, PAY4), + TXGBE_PTT(0x84, IP, IPV4, IPIP, IPV4, TCP, PAY4), + TXGBE_PTT(0x85, IP, IPV4, IPIP, IPV4, SCTP, PAY4), + TXGBE_UKN(0x86), + TXGBE_UKN(0x87), + TXGBE_UKN(0x88), + TXGBE_PTT(0x89, IP, IPV4, IPIP, FGV6, NONE, PAY3), + TXGBE_PTT(0x8A, IP, IPV4, IPIP, IPV6, NONE, PAY3), + TXGBE_PTT(0x8B, IP, IPV4, IPIP, IPV6, UDP, PAY4), + TXGBE_PTT(0x8C, IP, IPV4, IPIP, IPV6, TCP, PAY4), + TXGBE_PTT(0x8D, IP, IPV4, IPIP, IPV6, SCTP, PAY4), + TXGBE_UKN(0x8E), + TXGBE_UKN(0x8F), + + /* IPv4 --> GRE/NAT --> NONE/IPv4/IPv6 */ + TXGBE_PTT(0x90, IP, IPV4, IG, NONE, NONE, PAY3), + TXGBE_PTT(0x91, IP, IPV4, IG, FGV4, NONE, PAY3), + TXGBE_PTT(0x92, IP, IPV4, IG, IPV4, NONE, PAY3), + TXGBE_PTT(0x93, IP, IPV4, IG, IPV4, UDP, PAY4), + TXGBE_PTT(0x94, IP, IPV4, IG, IPV4, TCP, PAY4), + TXGBE_PTT(0x95, IP, IPV4, IG, IPV4, SCTP, PAY4), + TXGBE_UKN(0x96), + TXGBE_UKN(0x97), + TXGBE_UKN(0x98), + TXGBE_PTT(0x99, IP, IPV4, IG, FGV6, NONE, PAY3), + TXGBE_PTT(0x9A, IP, IPV4, IG, IPV6, NONE, PAY3), + TXGBE_PTT(0x9B, IP, IPV4, IG, IPV6, UDP, PAY4), + TXGBE_PTT(0x9C, IP, IPV4, IG, IPV6, TCP, PAY4), + TXGBE_PTT(0x9D, IP, IPV4, IG, IPV6, SCTP, PAY4), + TXGBE_UKN(0x9E), + TXGBE_UKN(0x9F), + + /* IPv4 --> GRE/NAT --> MAC --> NONE/IPv4/IPv6 */ + TXGBE_PTT(0xA0, IP, IPV4, IGM, NONE, NONE, PAY3), + TXGBE_PTT(0xA1, IP, IPV4, IGM, FGV4, NONE, PAY3), + TXGBE_PTT(0xA2, IP, IPV4, IGM, IPV4, NONE, PAY3), + TXGBE_PTT(0xA3, IP, IPV4, IGM, IPV4, UDP, PAY4), + TXGBE_PTT(0xA4, IP, IPV4, IGM, IPV4, TCP, PAY4), + TXGBE_PTT(0xA5, IP, IPV4, IGM, IPV4, SCTP, PAY4), + TXGBE_UKN(0xA6), + TXGBE_UKN(0xA7), + TXGBE_UKN(0xA8), + TXGBE_PTT(0xA9, IP, IPV4, IGM, FGV6, NONE, PAY3), + TXGBE_PTT(0xAA, IP, IPV4, IGM, IPV6, NONE, PAY3), + TXGBE_PTT(0xAB, IP, IPV4, IGM, IPV6, UDP, PAY4), + TXGBE_PTT(0xAC, IP, IPV4, IGM, IPV6, TCP, PAY4), + TXGBE_PTT(0xAD, IP, IPV4, IGM, IPV6, SCTP, PAY4), + TXGBE_UKN(0xAE), + TXGBE_UKN(0xAF), + + /* IPv4 --> GRE/NAT --> MAC+VLAN --> NONE/IPv4/IPv6 */ + TXGBE_PTT(0xB0, IP, IPV4, IGMV, NONE, NONE, PAY3), + TXGBE_PTT(0xB1, IP, IPV4, IGMV, FGV4, NONE, PAY3), + TXGBE_PTT(0xB2, IP, IPV4, IGMV, IPV4, NONE, PAY3), + TXGBE_PTT(0xB3, IP, IPV4, IGMV, IPV4, UDP, PAY4), + TXGBE_PTT(0xB4, IP, IPV4, IGMV, IPV4, TCP, PAY4), + TXGBE_PTT(0xB5, IP, IPV4, IGMV, IPV4, SCTP, PAY4), + TXGBE_UKN(0xB6), + TXGBE_UKN(0xB7), + TXGBE_UKN(0xB8), + TXGBE_PTT(0xB9, IP, IPV4, IGMV, FGV6, NONE, PAY3), + TXGBE_PTT(0xBA, IP, IPV4, IGMV, IPV6, NONE, PAY3), + TXGBE_PTT(0xBB, IP, IPV4, IGMV, IPV6, UDP, PAY4), + TXGBE_PTT(0xBC, IP, IPV4, IGMV, IPV6, TCP, PAY4), + TXGBE_PTT(0xBD, IP, IPV4, IGMV, IPV6, SCTP, PAY4), + TXGBE_UKN(0xBE), + TXGBE_UKN(0xBF), + + /* IPv6 --> IPv4/IPv6 */ + TXGBE_UKN(0xC0), + TXGBE_PTT(0xC1, IP, IPV6, IPIP, FGV4, NONE, PAY3), + TXGBE_PTT(0xC2, IP, IPV6, IPIP, IPV4, NONE, PAY3), + TXGBE_PTT(0xC3, IP, IPV6, IPIP, IPV4, UDP, PAY4), + TXGBE_PTT(0xC4, IP, IPV6, IPIP, IPV4, TCP, PAY4), + TXGBE_PTT(0xC5, IP, IPV6, IPIP, IPV4, SCTP, PAY4), + TXGBE_UKN(0xC6), + TXGBE_UKN(0xC7), + TXGBE_UKN(0xC8), + TXGBE_PTT(0xC9, IP, IPV6, IPIP, FGV6, NONE, PAY3), + TXGBE_PTT(0xCA, IP, IPV6, IPIP, IPV6, NONE, PAY3), + TXGBE_PTT(0xCB, IP, IPV6, IPIP, IPV6, UDP, PAY4), + TXGBE_PTT(0xCC, IP, IPV6, IPIP, IPV6, TCP, PAY4), + TXGBE_PTT(0xCD, IP, IPV6, IPIP, IPV6, SCTP, PAY4), + TXGBE_UKN(0xCE), + TXGBE_UKN(0xCF), + + /* IPv6 --> GRE/NAT -> NONE/IPv4/IPv6 */ + TXGBE_PTT(0xD0, IP, IPV6, IG, NONE, NONE, PAY3), + TXGBE_PTT(0xD1, IP, IPV6, IG, FGV4, NONE, PAY3), + TXGBE_PTT(0xD2, IP, IPV6, IG, IPV4, NONE, PAY3), + TXGBE_PTT(0xD3, IP, IPV6, IG, IPV4, UDP, PAY4), + TXGBE_PTT(0xD4, IP, IPV6, IG, IPV4, TCP, PAY4), + TXGBE_PTT(0xD5, IP, IPV6, IG, IPV4, SCTP, PAY4), + TXGBE_UKN(0xD6), + TXGBE_UKN(0xD7), + TXGBE_UKN(0xD8), + TXGBE_PTT(0xD9, IP, IPV6, IG, FGV6, NONE, PAY3), + TXGBE_PTT(0xDA, IP, IPV6, IG, IPV6, NONE, PAY3), + TXGBE_PTT(0xDB, IP, IPV6, IG, IPV6, UDP, PAY4), + TXGBE_PTT(0xDC, IP, IPV6, IG, IPV6, TCP, PAY4), + TXGBE_PTT(0xDD, IP, IPV6, IG, IPV6, SCTP, PAY4), + TXGBE_UKN(0xDE), + TXGBE_UKN(0xDF), + + /* IPv6 --> GRE/NAT -> MAC -> NONE/IPv4/IPv6 */ + TXGBE_PTT(0xE0, IP, IPV6, IGM, NONE, NONE, PAY3), + TXGBE_PTT(0xE1, IP, IPV6, IGM, FGV4, NONE, PAY3), + TXGBE_PTT(0xE2, IP, IPV6, IGM, IPV4, NONE, PAY3), + TXGBE_PTT(0xE3, IP, IPV6, IGM, IPV4, UDP, PAY4), + TXGBE_PTT(0xE4, IP, IPV6, IGM, IPV4, TCP, PAY4), + TXGBE_PTT(0xE5, IP, IPV6, IGM, IPV4, SCTP, PAY4), + TXGBE_UKN(0xE6), + TXGBE_UKN(0xE7), + TXGBE_UKN(0xE8), + TXGBE_PTT(0xE9, IP, IPV6, IGM, FGV6, NONE, PAY3), + TXGBE_PTT(0xEA, IP, IPV6, IGM, IPV6, NONE, PAY3), + TXGBE_PTT(0xEB, IP, IPV6, IGM, IPV6, UDP, PAY4), + TXGBE_PTT(0xEC, IP, IPV6, IGM, IPV6, TCP, PAY4), + TXGBE_PTT(0xED, IP, IPV6, IGM, IPV6, SCTP, PAY4), + TXGBE_UKN(0xEE), + TXGBE_UKN(0xEF), + + /* IPv6 --> GRE/NAT -> MAC--> NONE/IPv */ + TXGBE_PTT(0xF0, IP, IPV6, IGMV, NONE, NONE, PAY3), + TXGBE_PTT(0xF1, IP, IPV6, IGMV, FGV4, NONE, PAY3), + TXGBE_PTT(0xF2, IP, IPV6, IGMV, IPV4, NONE, PAY3), + TXGBE_PTT(0xF3, IP, IPV6, IGMV, IPV4, UDP, PAY4), + TXGBE_PTT(0xF4, IP, IPV6, IGMV, IPV4, TCP, PAY4), + TXGBE_PTT(0xF5, IP, IPV6, IGMV, IPV4, SCTP, PAY4), + TXGBE_UKN(0xF6), + TXGBE_UKN(0xF7), + TXGBE_UKN(0xF8), + TXGBE_PTT(0xF9, IP, IPV6, IGMV, FGV6, NONE, PAY3), + TXGBE_PTT(0xFA, IP, IPV6, IGMV, IPV6, NONE, PAY3), + TXGBE_PTT(0xFB, IP, IPV6, IGMV, IPV6, UDP, PAY4), + TXGBE_PTT(0xFC, IP, IPV6, IGMV, IPV6, TCP, PAY4), + TXGBE_PTT(0xFD, IP, IPV6, IGMV, IPV6, SCTP, PAY4), + TXGBE_UKN(0xFE), + TXGBE_UKN(0xFF), +}; + +void txgbe_init_mac_link_ops_sp(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + + /* enable the laser control functions for SFP+ fiber + * and MNG not enabled + */ + mac->ops.disable_tx_laser = + txgbe_disable_tx_laser_multispeed_fiber; + mac->ops.enable_tx_laser = + txgbe_enable_tx_laser_multispeed_fiber; + mac->ops.flap_tx_laser = txgbe_flap_tx_laser_multispeed_fiber; + + if (hw->phy.multispeed_fiber) { + /* Set up dual speed SFP+ support */ + mac->ops.setup_link = txgbe_setup_mac_link_multispeed_fiber; + mac->ops.setup_mac_link = txgbe_setup_mac_link_sp; + mac->ops.set_rate_select_speed = + txgbe_set_hard_rate_select_speed; + } else { + mac->ops.setup_link = txgbe_setup_mac_link_sp; + mac->ops.set_rate_select_speed = + txgbe_set_hard_rate_select_speed; + } +} + +/** + * txgbe_init_phy_ops - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +s32 txgbe_init_phy_ops_sp(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + s32 ret_val = 0; + + /* Identify the PHY or SFP module */ + ret_val = hw->phy.ops.identify(hw); + if (ret_val == TXGBE_ERR_SFP_NOT_SUPPORTED) + goto init_phy_ops_out; + + /* Setup function pointers based on detected SFP module and speeds */ + txgbe_init_mac_link_ops_sp(hw); + if (hw->phy.sfp_type != txgbe_sfp_type_unknown) + hw->phy.ops.reset = NULL; + + /* If copper media, overwrite with copper function pointers */ + if (hw->mac.ops.get_media_type(hw) == txgbe_media_type_copper) { + hw->phy.type = txgbe_phy_xaui; + if ((hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI) { + mac->ops.setup_link = txgbe_setup_copper_link; + mac->ops.get_link_capabilities = + txgbe_get_copper_link_capabilities; + } + } + +init_phy_ops_out: + return ret_val; +} + +static s32 txgbe_setup_sfp_modules_sp(struct txgbe_hw *hw) +{ + s32 ret_val = 0; + + if (hw->phy.sfp_type != txgbe_sfp_type_unknown) + txgbe_init_mac_link_ops_sp(hw); + + return ret_val; +} + +/** + * txgbe_init_ops_sp - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for sapphire. + * Does not touch the hardware. + **/ + +static s32 txgbe_init_ops_sp(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + struct txgbe_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + ret_val = txgbe_init_ops_generic(hw); + + /* PHY */ + phy->ops.init = txgbe_init_phy_ops_sp; + + /* MAC */ + mac->ops.get_media_type = txgbe_get_media_type_sp; + mac->ops.setup_sfp = txgbe_setup_sfp_modules_sp; + + /* LINK */ + mac->ops.get_link_capabilities = txgbe_get_link_capabilities_sp; + mac->ops.setup_link = txgbe_setup_mac_link_sp; + mac->ops.check_link = txgbe_check_mac_link_sp; + + return ret_val; +} + +static void txgbe_set_mac_type(struct txgbe_hw *hw) +{ + switch (hw->device_id) { + case TXGBE_DEV_ID_SP1000: + case TXGBE_DEV_ID_WX1820: + hw->mac.type = txgbe_mac_sp; + break; + case TXGBE_DEV_ID_AML: + case TXGBE_DEV_ID_AML5025: + case TXGBE_DEV_ID_AML5125: + hw->mac.type = txgbe_mac_aml; + break; + case TXGBE_DEV_ID_AML5040: + case TXGBE_DEV_ID_AML5140: + hw->mac.type = txgbe_mac_aml40; + break; + default: + hw->mac.type = txgbe_mac_unknown; + break; + } +} + +/** + * txgbe_init_shared_code - Initialize the shared code + * @hw: pointer to hardware structure + * + * This will assign function pointers and assign the MAC type and PHY code. + * Does not touch the hardware. This function must be called prior to any + * other function in the shared code. The txgbe_hw structure should be + * memset to 0 prior to calling this function. The following fields in + * hw structure should be filled in prior to calling this function: + * hw_addr, back, device_id, vendor_id, subsystem_device_id, + * subsystem_vendor_id, and revision_id + **/ +int txgbe_init_shared_code(struct txgbe_hw *hw) +{ + s32 status; + + txgbe_set_mac_type(hw); + + switch (hw->mac.type) { + case txgbe_mac_sp: + status = txgbe_init_ops_sp(hw); + break; + case txgbe_mac_aml: + status = txgbe_init_ops_aml(hw); + break; + case txgbe_mac_aml40: + status = txgbe_init_ops_aml40(hw); + break; + default: + status = TXGBE_ERR_DEVICE_NOT_SUPPORTED; + break; + } + + return status; +} + +s32 txgbe_init_ops_generic(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + struct txgbe_phy_info *phy = &hw->phy; + struct txgbe_eeprom_info *eeprom = &hw->eeprom; + struct txgbe_flash_info *flash = &hw->flash; + s32 ret_val = 0; + + /* PHY */ + phy->ops.reset = txgbe_reset_phy; + phy->ops.read_reg = txgbe_read_phy_reg; + phy->ops.write_reg = txgbe_write_phy_reg; + phy->ops.read_reg_mdi = txgbe_read_phy_reg_mdi; + phy->ops.write_reg_mdi = txgbe_write_phy_reg_mdi; + phy->ops.setup_link = txgbe_setup_phy_link; + phy->ops.setup_link_speed = txgbe_setup_phy_link_speed; + phy->ops.get_firmware_version = txgbe_get_phy_firmware_version; + phy->ops.read_i2c_byte = txgbe_read_i2c_byte; + phy->ops.write_i2c_byte = txgbe_write_i2c_byte; + phy->ops.read_i2c_sff8472 = txgbe_read_i2c_sff8472; + phy->ops.read_i2c_sff8636 = txgbe_read_i2c_sff8636; + phy->ops.read_i2c_eeprom = txgbe_read_i2c_eeprom; + phy->ops.read_i2c_sfp_phy = txgbe_read_i2c_sfp_phy; + phy->ops.write_i2c_eeprom = txgbe_write_i2c_eeprom; + phy->ops.identify_sfp = txgbe_identify_module; + phy->sfp_type = txgbe_sfp_type_unknown; + phy->ops.check_overtemp = txgbe_tn_check_overtemp; + phy->ops.identify = txgbe_identify_phy; + + /* MAC */ + mac->ops.init_hw = txgbe_init_hw; + mac->ops.clear_hw_cntrs = txgbe_clear_hw_cntrs; + mac->ops.get_mac_addr = txgbe_get_mac_addr; + mac->ops.stop_adapter = txgbe_stop_adapter; + mac->ops.get_bus_info = txgbe_get_bus_info; + mac->ops.set_lan_id = txgbe_set_lan_id_multi_port_pcie; + mac->ops.acquire_swfw_sync = txgbe_acquire_swfw_sync; + mac->ops.release_swfw_sync = txgbe_release_swfw_sync; + mac->ops.reset_hw = txgbe_reset_hw; + mac->ops.get_media_type = NULL; + mac->ops.disable_sec_rx_path = txgbe_disable_sec_rx_path; + mac->ops.enable_sec_rx_path = txgbe_enable_sec_rx_path; + mac->ops.disable_sec_tx_path = txgbe_disable_sec_tx_path; + mac->ops.enable_sec_tx_path = txgbe_enable_sec_tx_path; + mac->ops.enable_rx_dma = txgbe_enable_rx_dma; + mac->ops.start_hw = txgbe_start_hw; + mac->ops.get_san_mac_addr = txgbe_get_san_mac_addr; + mac->ops.set_san_mac_addr = txgbe_set_san_mac_addr; + mac->ops.get_device_caps = txgbe_get_device_caps; + mac->ops.get_wwn_prefix = txgbe_get_wwn_prefix; + mac->ops.setup_eee = txgbe_setup_eee; + + /* LEDs */ + mac->ops.led_on = txgbe_led_on; + mac->ops.led_off = txgbe_led_off; + + /* RAR, Multicast, VLAN */ + mac->ops.set_rar = txgbe_set_rar; + mac->ops.clear_rar = txgbe_clear_rar; + mac->ops.init_rx_addrs = txgbe_init_rx_addrs; + mac->ops.update_uc_addr_list = txgbe_update_uc_addr_list; + mac->ops.update_mc_addr_list = txgbe_update_mc_addr_list; + mac->ops.enable_mc = txgbe_enable_mc; + mac->ops.disable_mc = txgbe_disable_mc; + mac->ops.enable_rx = txgbe_enable_rx; + mac->ops.disable_rx = txgbe_disable_rx; + mac->ops.set_vmdq_san_mac = txgbe_set_vmdq_san_mac; + mac->ops.insert_mac_addr = txgbe_insert_mac_addr; + mac->rar_highwater = 1; + mac->ops.set_vfta = txgbe_set_vfta; + mac->ops.set_vlvf = txgbe_set_vlvf; + mac->ops.clear_vfta = txgbe_clear_vfta; + mac->ops.init_uta_tables = txgbe_init_uta_tables; + mac->ops.set_mac_anti_spoofing = txgbe_set_mac_anti_spoofing; + mac->ops.set_vlan_anti_spoofing = txgbe_set_vlan_anti_spoofing; + mac->ops.set_ethertype_anti_spoofing = + txgbe_set_ethertype_anti_spoofing; + + /* Flow Control */ + mac->ops.fc_enable = txgbe_fc_enable; + mac->ops.setup_fc = txgbe_setup_fc; + + /* Link */ + mac->ops.get_link_capabilities = NULL; + mac->ops.check_link = NULL; + mac->ops.setup_rxpba = txgbe_set_rxpba; + mac->mcft_size = TXGBE_SP_MC_TBL_SIZE; + mac->vft_size = TXGBE_SP_VFT_TBL_SIZE; + mac->num_rar_entries = TXGBE_SP_RAR_ENTRIES; + mac->rx_pb_size = TXGBE_SP_RX_PB_SIZE; + mac->max_rx_queues = TXGBE_SP_MAX_RX_QUEUES; + mac->max_tx_queues = TXGBE_SP_MAX_TX_QUEUES; + mac->max_msix_vectors = txgbe_get_pcie_msix_count(hw); + + mac->arc_subsystem_valid = (rd32(hw, TXGBE_MIS_ST) & + TXGBE_MIS_ST_MNG_INIT_DN) ? true : false; + + hw->mbx.ops.init_params = txgbe_init_mbx_params_pf; + + /* EEPROM */ + eeprom->ops.init_params = txgbe_init_eeprom_params; + eeprom->ops.calc_checksum = txgbe_calc_eeprom_checksum; + eeprom->ops.read = txgbe_read_ee_hostif; + eeprom->ops.read_buffer = txgbe_read_ee_hostif_buffer; + eeprom->ops.write = txgbe_write_ee_hostif; + eeprom->ops.write_buffer = txgbe_write_ee_hostif_buffer; + eeprom->ops.update_checksum = txgbe_update_eeprom_checksum; + eeprom->ops.validate_checksum = txgbe_validate_eeprom_checksum; + + /* FLASH */ + flash->ops.init_params = txgbe_init_flash_params; + flash->ops.read_buffer = txgbe_read_flash_buffer; + flash->ops.write_buffer = txgbe_write_flash_buffer; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = txgbe_set_fw_drv_ver; + + mac->ops.get_thermal_sensor_data = + txgbe_get_thermal_sensor_data; + mac->ops.init_thermal_sensor_thresh = + txgbe_init_thermal_sensor_thresh; + + mac->ops.get_rtrup2tc = txgbe_dcb_get_rtrup2tc; + + return ret_val; +} + +/** + * txgbe_get_link_capabilities - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +s32 txgbe_get_link_capabilities_sp(struct txgbe_hw *hw, + u32 *speed, + bool *autoneg) +{ + s32 status = 0; + u32 sr_pcs_ctl, sr_pma_mmd_ctl1, sr_an_mmd_ctl; + u32 sr_an_mmd_adv_reg2; + + if (hw->phy.multispeed_fiber) { + *speed = TXGBE_LINK_SPEED_10GB_FULL | + TXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + } else if (hw->dac_sfp) { + *autoneg = true; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KR; + *speed = TXGBE_LINK_SPEED_10GB_FULL; + } else if (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core1) { + /* Check if 1G SFP module. */ + *speed = TXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + } + /* SFP */ + else if (hw->mac.ops.get_media_type(hw) == txgbe_media_type_fiber) { + *speed = TXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + } + /* XAUI */ + else if ((hw->mac.ops.get_media_type(hw) == txgbe_media_type_copper) && + ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI || + (hw->subsystem_device_id & 0xF0) == TXGBE_ID_SFI_XAUI)) { + *speed = TXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_T; + } + /* SGMII */ + else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_SGMII) { + *speed = TXGBE_LINK_SPEED_1GB_FULL | + TXGBE_LINK_SPEED_100_FULL | + TXGBE_LINK_SPEED_10_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_1000BASE_T | + TXGBE_PHYSICAL_LAYER_100BASE_TX; + /* MAC XAUI */ + } else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_XAUI) { + *speed = TXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KX4; + /* MAC SGMII */ + } else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) { + *speed = TXGBE_LINK_SPEED_1GB_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_1000BASE_KX; + } else { + /* Determine link capabilities based on the stored value, + * which represents EEPROM defaults. If value has not + * been stored, use the current register values. + */ + if (hw->mac.orig_link_settings_stored) { + sr_pcs_ctl = hw->mac.orig_sr_pcs_ctl2; + sr_pma_mmd_ctl1 = hw->mac.orig_sr_pma_mmd_ctl1; + sr_an_mmd_ctl = hw->mac.orig_sr_an_mmd_ctl; + sr_an_mmd_adv_reg2 = hw->mac.orig_sr_an_mmd_adv_reg2; + } else { + sr_pcs_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2); + sr_pma_mmd_ctl1 = txgbe_rd32_epcs(hw, + TXGBE_SR_PMA_MMD_CTL1); + sr_an_mmd_ctl = txgbe_rd32_epcs(hw, + TXGBE_SR_AN_MMD_CTL); + sr_an_mmd_adv_reg2 = txgbe_rd32_epcs(hw, + TXGBE_SR_AN_MMD_ADV_REG2); + } + + if ((sr_pcs_ctl & TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_MASK) == + TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X && + (sr_pma_mmd_ctl1 & TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_MASK) + == TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_1G && + (sr_an_mmd_ctl & TXGBE_SR_AN_MMD_CTL_ENABLE) == 0) { + /* 1G or KX - no backplane auto-negotiation */ + *speed = TXGBE_LINK_SPEED_1GB_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_1000BASE_KX; + } else if ((sr_pcs_ctl & TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_MASK) == + TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X && + (sr_pma_mmd_ctl1 & TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_MASK) + == TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_10G && + (sr_an_mmd_ctl & TXGBE_SR_AN_MMD_CTL_ENABLE) == 0) { + *speed = TXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KX4; + } else if ((sr_pcs_ctl & TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_MASK) == + TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_R && + (sr_an_mmd_ctl & TXGBE_SR_AN_MMD_CTL_ENABLE) == 0) { + /* 10 GbE serial link (KR -no backplane auto-negotiation) */ + *speed = TXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KR; + } else if ((sr_an_mmd_ctl & TXGBE_SR_AN_MMD_CTL_ENABLE)) { + /* KX/KX4/KR backplane auto-negotiation enable */ + *speed = TXGBE_LINK_SPEED_UNKNOWN; + if (sr_an_mmd_adv_reg2 & + TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KR) + *speed |= TXGBE_LINK_SPEED_10GB_FULL; + if (sr_an_mmd_adv_reg2 & + TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX4) + *speed |= TXGBE_LINK_SPEED_10GB_FULL; + if (sr_an_mmd_adv_reg2 & + TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX) + *speed |= TXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KR | + TXGBE_PHYSICAL_LAYER_10GBASE_KX4 | + TXGBE_PHYSICAL_LAYER_1000BASE_KX; + } else { + status = TXGBE_ERR_LINK_SETUP; + goto out; + } + } + +out: + return status; +} + +/** + * txgbe_get_media_type_sp - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum txgbe_media_type txgbe_get_media_type_sp(struct txgbe_hw *hw) +{ + enum txgbe_media_type media_type; + u8 device_type = hw->subsystem_device_id & 0xF0; + + /* Detect if there is a copper PHY attached. */ + switch (hw->phy.type) { + case txgbe_phy_cu_unknown: + case txgbe_phy_tn: + media_type = txgbe_media_type_copper; + goto out; + default: + break; + } + + switch (device_type) { + case TXGBE_ID_MAC_XAUI: + case TXGBE_ID_MAC_SGMII: + case TXGBE_ID_KR_KX_KX4: + /* Default device ID is mezzanine card KX/KX4 */ + media_type = txgbe_media_type_backplane; + break; + case TXGBE_ID_SFP: + media_type = txgbe_media_type_fiber; + break; + case TXGBE_ID_XAUI: + case TXGBE_ID_SGMII: + media_type = txgbe_media_type_copper; + break; + case TXGBE_ID_SFI_XAUI: + if (hw->bus.lan_id == 0) + media_type = txgbe_media_type_fiber; + else + media_type = txgbe_media_type_copper; + break; + default: + media_type = txgbe_media_type_unknown; + break; + } +out: + return media_type; +} + +/** + * txgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser + * @hw: pointer to hardware structure + * + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively shutting down the Tx + * laser on the PHY, effectively halting physical link. + **/ +void txgbe_disable_tx_laser_multispeed_fiber(struct txgbe_hw *hw) +{ + u32 esdp_reg = rd32(hw, TXGBE_GPIO_DR); + + if (!((hw->mac.ops.get_media_type(hw) == txgbe_media_type_fiber) || + (hw->mac.ops.get_media_type(hw) == txgbe_media_type_fiber_qsfp))) + return; + /* Blocked by MNG FW so bail */ + txgbe_check_reset_blocked(hw); + + if (txgbe_close_notify(hw)) { + /* over write led when ifconfig down */ + if (hw->mac.type == txgbe_mac_aml40) + hw->mac.ops.led_off(hw, TXGBE_LED_LINK_UP | TXGBE_AMLITE_LED_LINK_40G | + TXGBE_AMLITE_LED_LINK_ACTIVE); + else if (hw->mac.type == txgbe_mac_aml) + hw->mac.ops.led_off(hw, TXGBE_LED_LINK_UP | TXGBE_AMLITE_LED_LINK_25G | + TXGBE_AMLITE_LED_LINK_10G | TXGBE_AMLITE_LED_LINK_ACTIVE); + else + hw->mac.ops.led_off(hw, TXGBE_LED_LINK_UP | + TXGBE_LED_LINK_10G | TXGBE_LED_LINK_1G | + TXGBE_LED_LINK_ACTIVE); + } + + /* Disable Tx laser; allow 100us to go dark per spec */ + if (hw->mac.type == txgbe_mac_aml40) { + wr32m(hw, TXGBE_GPIO_DDR, TXGBE_GPIO_DR_1, TXGBE_GPIO_DR_1); + esdp_reg &= ~TXGBE_GPIO_DR_1; + } else if (hw->mac.type == txgbe_mac_aml) { + esdp_reg |= TXGBE_GPIO_DR_1; + } else { + esdp_reg |= TXGBE_GPIO_DR_1 | TXGBE_GPIO_DR_0; + } + + wr32(hw, TXGBE_GPIO_DR, esdp_reg); + TXGBE_WRITE_FLUSH(hw); + usec_delay(100); +} + +/** + * txgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser + * @hw: pointer to hardware structure + * + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively turning on the Tx + * laser on the PHY, effectively starting physical link. + **/ +void txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw) +{ + if (!((hw->mac.ops.get_media_type(hw) == txgbe_media_type_fiber) || + (hw->mac.ops.get_media_type(hw) == txgbe_media_type_fiber_qsfp))) + return; + if (txgbe_open_notify(hw)) + /* recover led configure when ifconfig up */ + wr32(hw, TXGBE_CFG_LED_CTL, 0); + + /* Enable Tx laser; allow 100ms to light up */ + if (hw->mac.type == txgbe_mac_aml40) { + wr32m(hw, TXGBE_GPIO_DDR, TXGBE_GPIO_DR_1, TXGBE_GPIO_DR_1); + wr32m(hw, TXGBE_GPIO_DR, TXGBE_GPIO_DR_1, TXGBE_GPIO_DR_1); + } else { + wr32m(hw, TXGBE_GPIO_DR, + TXGBE_GPIO_DR_0 | TXGBE_GPIO_DR_1, 0); + } + TXGBE_WRITE_FLUSH(hw); + msec_delay(100); +} + +/** + * txgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser + * @hw: pointer to hardware structure + * + * When the driver changes the link speeds that it can support, + * it sets autotry_restart to true to indicate that we need to + * initiate a new autotry session with the link partner. To do + * so, we set the speed then disable and re-enable the Tx laser, to + * alert the link partner that it also needs to restart autotry on its + * end. This is consistent with true clause 37 autoneg, which also + * involves a loss of signal. + **/ +void txgbe_flap_tx_laser_multispeed_fiber(struct txgbe_hw *hw) +{ + if (!((hw->mac.ops.get_media_type(hw) == txgbe_media_type_fiber) || + (hw->mac.ops.get_media_type(hw) == txgbe_media_type_fiber_qsfp))) + return; + + /* Blocked by MNG FW so bail */ + txgbe_check_reset_blocked(hw); + + if (hw->mac.autotry_restart) { + txgbe_disable_tx_laser_multispeed_fiber(hw); + txgbe_enable_tx_laser_multispeed_fiber(hw); + hw->mac.autotry_restart = false; + } +} + +/** + * txgbe_set_hard_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * Set module link speed via RS0/RS1 rate select pins. + */ +void txgbe_set_hard_rate_select_speed(struct txgbe_hw *hw, + u32 speed) +{ + u32 esdp_reg = rd32(hw, TXGBE_GPIO_DR); + + switch (speed) { + case TXGBE_LINK_SPEED_25GB_FULL: + /*amlite TODO*/ + break; + case TXGBE_LINK_SPEED_10GB_FULL: + esdp_reg |= TXGBE_GPIO_DR_5 | TXGBE_GPIO_DR_4; + break; + case TXGBE_LINK_SPEED_1GB_FULL: + esdp_reg &= ~(TXGBE_GPIO_DR_5 | TXGBE_GPIO_DR_4); + break; + default: + return; + } + + wr32(hw, TXGBE_GPIO_DDR, + TXGBE_GPIO_DDR_5 | TXGBE_GPIO_DDR_4 | + TXGBE_GPIO_DDR_1 | TXGBE_GPIO_DDR_0); + + wr32(hw, TXGBE_GPIO_DR, esdp_reg); + + TXGBE_WRITE_FLUSH(hw); +} + +s32 txgbe_set_sgmii_an37_ability(struct txgbe_hw *hw) +{ + u32 value; + struct txgbe_adapter *adapter = hw->back; + + txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, 0x3002); + + /* for sgmii + external phy, set to 0x0105 (phy sgmii mode) */ + /* for sgmii direct link, set to 0x010c (mac sgmii mode) */ + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII || + hw->mac.ops.get_media_type(hw) == txgbe_media_type_fiber) + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_AN_CTL, 0x010c); + else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_SGMII || + (hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_AN_CTL, 0x0105); + + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_DIGI_CTL, 0x0200); + + value = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); + value = (value & ~0x1200) | (0x1 << 9); + if (adapter->autoneg) + value |= (0x1 << 12); + + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_CTL, value); + + return 0; +} + +int txgbe_enable_rx_adapter(struct txgbe_hw *hw) +{ + int ret = 0; + u32 value; + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL); + value |= BIT(12); + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, value); + + value = 0; + ret = read_poll_timeout(txgbe_rd32_epcs, value, (value & BIT(11)), 1000, + 200000, false, hw, TXGBE_PHY_RX_AD_ACK); + if (ret) + return -ETIMEDOUT; + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL); + value &= ~BIT(12); + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, value); + + return 0; +} + +s32 txgbe_set_link_to_kr(struct txgbe_hw *hw, bool autoneg) +{ + u32 i; + s32 status = 0; + u32 value = 0; + struct txgbe_adapter *adapter = hw->back; + + /* 1. Wait xpcs power-up good */ + for (i = 0; i < TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK) == + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD) + break; + usleep_range(10000, 20000); + } + if (i == TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME) { + status = TXGBE_ERR_XPCS_POWER_UP_FAILED; + goto out; + } + + txgbe_wr32_epcs(hw, 0x78002, 0x0); + txgbe_wr32_epcs(hw, 0x78001, 0x7); + txgbe_wr32_epcs(hw, TXGBE_VR_AN_KR_MODE_CL, 0x1); + + /* 2. Disable xpcs AN-73 */ + if (adapter->backplane_an == 1) { + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3000); + /* bit8:CA_TX_EQ bit7:an_preset bit6:TX_EQ_OVR_RIDE */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); + value &= ~0x40; + value |= BIT(8); + txgbe_wr32_epcs(hw, 0x18037, value); + } else { + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0); + txgbe_wr32_epcs(hw, TXGBE_VR_AN_KR_MODE_CL, 0x0); + } + + /* 3. Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL3 Register */ + /* Bit[10:0](MPLLA_BANDWIDTH) = 11'd123 (default: 11'd16) */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3, + TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_10GBASER_KR); + + /* 4. Set VR_XS_PMA_Gen5_12G_MISC_CTRL0 Register */ + /* Bit[12:8](RX_VREF_CTRL) = 5'hF (default: 5'h11) */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, + 0xCF00); + + /* 5. Set VR_XS_PMA_Gen5_12G_RX_EQ_CTRL0 Register + * Bit[15:8](VGA1/2_GAIN_0) = 8'h77, Bit[7:5](CTLE_POLE_0) = 3'h2 + * Bit[4:0](CTLE_BOOST_0) = 4'hA + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, + 0x774A); + + /* 6. Set VR_MII_Gen5_12G_RX_GENCTRL3 Register + * Bit[2:0](LOS_TRSHLD_0) = 3'h4 (default: 3) + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3, + 0x0004); + /* 7. Initialize the mode by setting VR XS or PCS MMD Digital + * Control1 Register Bit[15](VR_RST) + */ + txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, + 0xA000); + /* wait phy initialization done */ + for (i = 0; i < TXGBE_PHY_INIT_DONE_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0) + break; + msleep(100); + } + if (i == TXGBE_PHY_INIT_DONE_POLLING_TIME) { + status = TXGBE_ERR_PHY_INIT_NOT_DONE; + goto out; + } + + if (adapter->ffe_set == TXGBE_BP_M_KR) { + e_info(hw, "Set KR TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main, adapter->ffe_pre, adapter->ffe_post); + value = (0x1804 & ~0x3F3F); + value |= adapter->ffe_main << 8 | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + + value = (0x50 & ~0x7F) | (1 << 6) | adapter->ffe_post; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } +out: + return status; +} + +s32 txgbe_set_link_to_kx4(struct txgbe_hw *hw, bool autoneg) +{ + u32 i; + s32 status = 0; + u32 value; + struct txgbe_adapter *adapter = hw->back; + + /* check link status, if already set, skip setting it again */ + if (hw->link_status == TXGBE_LINK_STATUS_KX4) + goto out; + + e_dev_info("It is set to kx4.\n"); + + /* 1. Wait xpcs power-up good */ + for (i = 0; i < TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK) == + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD) + break; + usleep_range(10000, 20000); + } + if (i == TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME) { + status = TXGBE_ERR_XPCS_POWER_UP_FAILED; + goto out; + } + + wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, + ~TXGBE_MAC_TX_CFG_TE); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + ~TXGBE_MAC_RX_CFG_RE); + + /* 2. Disable xpcs AN-73 */ + if (!autoneg) + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0); + else + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3000); + + if (hw->revision_id == TXGBE_SP_MPW) { + /* Disable PHY MPLLA */ + txgbe_wr32_ephy(hw, 0x4, 0x2501); + /* Reset rx lane0-3 clock */ + txgbe_wr32_ephy(hw, 0x1005, 0x4001); + txgbe_wr32_ephy(hw, 0x1105, 0x4001); + txgbe_wr32_ephy(hw, 0x1205, 0x4001); + txgbe_wr32_ephy(hw, 0x1305, 0x4001); + } else { + /* Disable PHY MPLLA for eth mode change(after ECO) */ + txgbe_wr32_ephy(hw, 0x4, 0x250A); + TXGBE_WRITE_FLUSH(hw); + usleep_range(1000, 2000); + + /* Set the eth change_mode bit first in mis_rst register + * for corresponding LAN port + */ + if (hw->bus.lan_id == 0) + wr32(hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_LAN0_CHG_ETH_MODE); + else + wr32(hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_LAN1_CHG_ETH_MODE); + } + + /* Set SR PCS Control2 Register Bits[1:0] = 2'b01 PCS_TYPE_SEL: non KR */ + txgbe_wr32_epcs(hw, TXGBE_SR_PCS_CTL2, + TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X); + /* Set SR PMA MMD Control1 Register Bit[13] = 1'b1 SS13: 10G speed */ + txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1, + TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_10G); + + value = (0xf5f0 & ~0x7F0) | (0x5 << 8) | (0x7 << 5) | 0xF0; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GENCTRL1, value); + + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_XAUI) + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0xCF00); + else + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0x4F00); + + for (i = 0; i < 4; i++) { + if (i == 0) + value = (0x45 & ~0xFFFF) | (0x7 << 12) | (0x7 << 8) | 0x6; + else + value = (0xff06 & ~0xFFFF) | (0x7 << 12) | (0x7 << 8) | 0x6; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0 + i, value); + } + + value = 0x0 & ~0x7777; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0, value); + + txgbe_wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0x0); + + value = (0x6db & ~0xFFF) | (0x1 << 9) | (0x1 << 6) | (0x1 << 3) | 0x1; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3, value); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA */ + /* Control 0 Register Bit[7:0] = 8'd40 MPLLA_MULTIPLIER */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL0, + TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_OTHER); + /* Set VR XS, PMA or MII Synopsys Enterprise Gen5 12G PHY MPLLA */ + /* Control 3 Register Bit[10:0] = 11'd86 MPLLA_BANDWIDTH */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3, + TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_OTHER); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */ + /* Calibration Load 0 Register Bit[12:0] = 13'd1360 VCO_LD_VAL_0 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD0, + TXGBE_PHY_VCO_CAL_LD0_OTHER); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */ + /* Calibration Load 1 Register Bit[12:0] = 13'd1360 VCO_LD_VAL_1 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD1, + TXGBE_PHY_VCO_CAL_LD0_OTHER); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */ + /* Calibration Load 2 Register Bit[12:0] = 13'd1360 VCO_LD_VAL_2 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD2, + TXGBE_PHY_VCO_CAL_LD0_OTHER); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */ + /* Calibration Load 3 Register Bit[12:0] = 13'd1360 VCO_LD_VAL_3 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD3, + TXGBE_PHY_VCO_CAL_LD0_OTHER); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */ + /* Calibration Reference 0 Register Bit[5:0] = 6'd34 VCO_REF_LD_0/1 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF0, + 0x2222); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */ + /* Calibration Reference 1 Register Bit[5:0] = 6'd34 VCO_REF_LD_2/3 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF1, + 0x2222); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY AFE-DFE */ + /* Enable Register Bit[7:0] = 8'd0 AFE_EN_0/3_1, DFE_EN_0/3_1 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, + 0x0); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx */ + /* Equalization Control 4 Register Bit[3:0] = 4'd0 CONT_ADAPT_0/3_1 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, + 0x00F0); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx Rate */ + /* Control Register Bit[14:12], Bit[10:8], Bit[6:4], Bit[2:0], + * all rates to 3'b010 TX0/1/2/3_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_RATE_CTL, + 0x2222); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx Rate */ + /* Control Register Bit[13:12], Bit[9:8], Bit[5:4], Bit[1:0], + * all rates to 2'b10 RX0/1/2/3_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_RATE_CTL, + 0x2222); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx General */ + /* Control 2 Register Bit[15:8] = 2'b01 TX0/1/2/3_WIDTH: 10bits */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GEN_CTL2, + 0x5500); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx General */ + /* Control 2 Register Bit[15:8] = 2'b01 RX0/1/2/3_WIDTH: 10bits */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL2, + 0x5500); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control + * 2 Register Bit[10:8] = 3'b010 + * MPLLA_DIV16P5_CLK_EN=0, MPLLA_DIV10_CLK_EN=1, MPLLA_DIV8_CLK_EN=0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL2, + TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_10); + + txgbe_wr32_epcs(hw, 0x1f0000, 0x0); + txgbe_wr32_epcs(hw, 0x1f8001, 0x0); + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_DIGI_CTL, 0x0); + + /* 10. Initialize the mode by setting VR XS or PCS MMD Digital Control1 + * Register Bit[15](VR_RST) + */ + txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, 0xA000); + /* wait phy initialization done */ + for (i = 0; i < TXGBE_PHY_INIT_DONE_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0) + break; + msleep(100); + } + + /* if success, set link status */ + hw->link_status = TXGBE_LINK_STATUS_KX4; + + if (i == TXGBE_PHY_INIT_DONE_POLLING_TIME) { + status = TXGBE_ERR_PHY_INIT_NOT_DONE; + goto out; + } + + if (adapter->ffe_set == TXGBE_BP_M_KX4) { + e_dev_info("Set KX4 TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main, adapter->ffe_pre, adapter->ffe_post); + value = (0x1804 & ~0x3F3F); + value |= adapter->ffe_main << 8 | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + + value = (0x50 & ~0x7F) | (1 << 6) | adapter->ffe_post; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } + +out: + hw->mac.ops.enable_sec_tx_path(hw); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + TXGBE_MAC_RX_CFG_RE); + + return status; +} + +s32 txgbe_set_link_to_kx(struct txgbe_hw *hw, + u32 speed, + bool autoneg) +{ + u32 i; + s32 status = 0; + u32 wdata = 0; + u32 value; + struct txgbe_adapter *adapter = hw->back; + + /* check link status, if already set, skip setting it again */ + if (hw->link_status == TXGBE_LINK_STATUS_KX && + (hw->subsystem_device_id & 0xF0) != TXGBE_ID_MAC_SGMII) + goto out; + + e_dev_info("It is set to kx. speed =0x%x\n", speed); + + /* 1. Wait xpcs power-up good */ + for (i = 0; i < TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK) == + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD) + break; + usleep_range(10000, 20000); + } + if (i == TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME) { + status = TXGBE_ERR_XPCS_POWER_UP_FAILED; + goto out; + } + + wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, + ~TXGBE_MAC_TX_CFG_TE); + + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + ~TXGBE_MAC_RX_CFG_RE); + + /* 2. Disable xpcs AN-73 */ + if (!autoneg) + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0); + else + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3000); + + if (hw->revision_id == TXGBE_SP_MPW) { + /* Disable PHY MPLLA */ + txgbe_wr32_ephy(hw, 0x4, 0x2401); + /* Reset rx lane0 clock */ + txgbe_wr32_ephy(hw, 0x1005, 0x4001); + } else { + /* Disable PHY MPLLA for eth mode change(after ECO) */ + txgbe_wr32_ephy(hw, 0x4, 0x240A); + TXGBE_WRITE_FLUSH(hw); + usleep_range(1000, 2000); + + /* Set the eth change_mode bit first in mis_rst register */ + /* for corresponding LAN port */ + if (hw->bus.lan_id == 0) + wr32(hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_LAN0_CHG_ETH_MODE); + else + wr32(hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_LAN1_CHG_ETH_MODE); + } + + /* Set SR PCS Control2 Register Bits[1:0] = 2'b01 PCS_TYPE_SEL: non KR */ + txgbe_wr32_epcs(hw, TXGBE_SR_PCS_CTL2, + TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X); + + /* Set SR PMA MMD Control1 Register Bit[13] = 1'b0 SS13: 1G speed */ + txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1, + TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_1G); + + /* Set SR MII MMD Control Register to corresponding speed: {Bit[6], + * Bit[13]}=[2'b00,2'b01,2'b10]->[10M,100M,1G] + */ + if (speed == TXGBE_LINK_SPEED_100_FULL) + wdata = 0x2100; + else if (speed == TXGBE_LINK_SPEED_1GB_FULL) + wdata = 0x0140; + else if (speed == TXGBE_LINK_SPEED_10_FULL) + wdata = 0x0100; + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_CTL, + wdata); + + value = (0xf5f0 & ~0x710) | (0x5 << 8) | 0x10; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GENCTRL1, value); + + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0xCF00); + + for (i = 0; i < 4; i++) { + if (i) + value = 0xff06; + else + value = (0x45 & ~0xFFFF) | (0x7 << 12) | (0x7 << 8) | 0x6; + + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0 + i, value); + } + + value = 0x0 & ~0x7; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0, value); + + txgbe_wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0x0); + + value = (0x6db & ~0x7) | 0x4; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3, value); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control + * 0 Register Bit[7:0] = 8'd32 MPLLA_MULTIPLIER + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL0, + TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_1GBASEX_KX); + + /* Set VR XS, PMA or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control 3 + * Register Bit[10:0] = 11'd70 MPLLA_BANDWIDTH + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3, + TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_1GBASEX_KX); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO + * Calibration Load 0 Register Bit[12:0] = 13'd1344 VCO_LD_VAL_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD0, + TXGBE_PHY_VCO_CAL_LD0_1GBASEX_KX); + + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD1, 0x549); + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD2, 0x549); + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD3, 0x549); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO + * Calibration Reference 0 Register Bit[5:0] = 6'd42 VCO_REF_LD_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF0, + TXGBE_PHY_VCO_CAL_REF0_LD0_1GBASEX_KX); + + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF1, 0x2929); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY AFE-DFE Enable + * Register Bit[4], Bit[0] = 1'b0 AFE_EN_0, DFE_EN_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, + 0x0); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx + * Equalization Control 4 Register Bit[0] = 1'b0 CONT_ADAPT_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, + 0x0010); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx Rate + * Control Register Bit[2:0] = 3'b011 TX0_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_RATE_CTL, + TXGBE_PHY_TX_RATE_CTL_TX0_RATE_1GBASEX_KX); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx Rate + * Control Register Bit[2:0] = 3'b011 RX0_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_RATE_CTL, + TXGBE_PHY_RX_RATE_CTL_RX0_RATE_1GBASEX_KX); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx General + * Control 2 Register Bit[9:8] = 2'b01 TX0_WIDTH: 10bits + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GEN_CTL2, + TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_OTHER); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx General + * Control 2 Register Bit[9:8] = 2'b01 RX0_WIDTH: 10bits + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL2, + TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_OTHER); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control + * 2 Register Bit[10:8] = 3'b010 MPLLA_DIV16P5_CLK_EN=0, + * MPLLA_DIV10_CLK_EN=1, MPLLA_DIV8_CLK_EN=0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL2, + TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_10); + /* VR MII MMD AN Control Register Bit[8] = 1'b1 MII_CTRL */ + /* Set to 8bit MII (required in 10M/100M SGMII) */ + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_AN_CTL, + 0x0100); + + /* 10. Initialize the mode by setting VR XS or PCS MMD Digital Control1 + * Register Bit[15](VR_RST) + */ + txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, 0xA000); + /* wait phy initialization done */ + for (i = 0; i < TXGBE_PHY_INIT_DONE_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0) + break; + msleep(100); + } + + /* if success, set link status */ + hw->link_status = TXGBE_LINK_STATUS_KX; + + if (i == TXGBE_PHY_INIT_DONE_POLLING_TIME) { + status = TXGBE_ERR_PHY_INIT_NOT_DONE; + goto out; + } + + if (adapter->ffe_set == TXGBE_BP_M_KX) { + e_dev_info("Set KX TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main, adapter->ffe_pre, adapter->ffe_post); + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0); + value = (value & ~0x3F3F) | (adapter->ffe_main << 8) | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); + value = (value & ~0x7F) | adapter->ffe_post | (1 << 6); + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } + +out: + hw->mac.ops.enable_sec_tx_path(hw); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + TXGBE_MAC_RX_CFG_RE); + + return status; +} + +static s32 txgbe_set_link_to_sfi(struct txgbe_hw *hw, + u32 speed) +{ + u32 i; + s32 status = 0; + u32 value = 0; + struct txgbe_adapter *adapter = hw->back; + + /* Set the module link speed */ + hw->mac.ops.set_rate_select_speed(hw, speed); + + /* 1. Wait xpcs power-up good */ + for (i = 0; i < TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK) == + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD) + break; + usleep_range(10000, 20000); + } + if (i == TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME) { + status = TXGBE_ERR_XPCS_POWER_UP_FAILED; + goto out; + } + + wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, + ~TXGBE_MAC_TX_CFG_TE); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + ~TXGBE_MAC_RX_CFG_RE); + + /* 2. Disable xpcs AN-73 */ + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0); + + if (hw->revision_id != TXGBE_SP_MPW) { + /* Disable PHY MPLLA for eth mode change(after ECO) */ + txgbe_wr32_ephy(hw, 0x4, 0x243A); + TXGBE_WRITE_FLUSH(hw); + usleep_range(1000, 2000); + /* Set the eth change_mode bit first in mis_rst register + * for corresponding LAN port + */ + if (hw->bus.lan_id == 0) + wr32(hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_LAN0_CHG_ETH_MODE); + else + wr32(hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_LAN1_CHG_ETH_MODE); + } + if (speed == TXGBE_LINK_SPEED_10GB_FULL) { + /* @. Set SR PCS Control2 Register Bits[1:0] = 2'b00 PCS_TYPE_SEL: KR */ + txgbe_wr32_epcs(hw, TXGBE_SR_PCS_CTL2, 0); + value = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1); + value = value | 0x2000; + txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1, value); + /* @. Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL0 Register Bit[7:0] = 8'd33 + * MPLLA_MULTIPLIER + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL0, 0x0021); + /* 3. Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL3 Register + * Bit[10:0](MPLLA_BANDWIDTH) = 11'd0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3, 0); + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_GENCTRL1); + value = (value & ~0x700) | 0x500; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GENCTRL1, value); + /* 4.Set VR_XS_PMA_Gen5_12G_MISC_CTRL0 Register Bit[12:8](RX_VREF_CTRL) + * = 5'hF + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0xCF00); + /* @. Set VR_XS_PMA_Gen5_12G_VCO_CAL_LD0 Register Bit[12:0] = 13'd1353 + * VCO_LD_VAL_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD0, 0x0549); + /* @. Set VR_XS_PMA_Gen5_12G_VCO_CAL_REF0 Register Bit[5:0] = 6'd41 + * VCO_REF_LD_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF0, 0x0029); + /* @. Set VR_XS_PMA_Gen5_12G_TX_RATE_CTRL Register Bit[2:0] = 3'b000 + * TX0_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_RATE_CTL, 0); + /* @. Set VR_XS_PMA_Gen5_12G_RX_RATE_CTRL Register Bit[2:0] = 3'b000 + * RX0_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_RATE_CTL, 0); + /* @. Set VR_XS_PMA_Gen5_12G_TX_GENCTRL2 Register Bit[9:8] = 2'b11 + * TX0_WIDTH: 20bits + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GEN_CTL2, 0x0300); + /* @. Set VR_XS_PMA_Gen5_12G_RX_GENCTRL2 Register Bit[9:8] = 2'b11 + * RX0_WIDTH: 20bits + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL2, 0x0300); + /* @. Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL2 Register Bit[10:8] = 3'b110 + * MPLLA_DIV16P5_CLK_EN=1, MPLLA_DIV10_CLK_EN=1, MPLLA_DIV8_CLK_EN=0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL2, 0x0600); + + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) { + /* 7. Set VR_XS_PMA_Gen5_12G_RX_EQ_CTRL0 Register + * Bit[15:8](VGA1/2_GAIN_0) = 8'h77, Bit[7:5] + * (CTLE_POLE_0) = 3'h2, Bit[4:0](CTLE_BOOST_0) = 4'hF + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, 0x774F); + + } else { + /* 7. Set VR_XS_PMA_Gen5_12G_RX_EQ_CTRL0 Register Bit[15:8] + * (VGA1/2_GAIN_0) = 8'h00, Bit[7:5](CTLE_POLE_0) = 3'h2, + * Bit[4:0](CTLE_BOOST_0) = 4'hA + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0); + value = (value & ~0xFFFF) | (2 << 5) | 0x05; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, value); + } + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0); + value = (value & ~0x7) | 0x0; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0, value); + + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) { + /* 8. Set VR_XS_PMA_Gen5_12G_DFE_TAP_CTRL0 Register Bit[7:0](DFE_TAP1_0) + * = 8'd20 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0x0014); + value = txgbe_rd32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE); + value = (value & ~0x11) | 0x11; + txgbe_wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, value); + } else { + /* 8. Set VR_XS_PMA_Gen5_12G_DFE_TAP_CTRL0 Register Bit[7:0](DFE_TAP1_0) + * = 8'd20 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0xBE); + /* 9. Set VR_MII_Gen5_12G_AFE_DFE_EN_CTRL Register Bit[4](DFE_EN_0) = + * 1'b0, Bit[0](AFE_EN_0) = 1'b0 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE); + value = (value & ~0x11) | 0x0; + txgbe_wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, value); + } + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL); + value = value & ~0x1; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, value); + } else { + if (hw->revision_id == TXGBE_SP_MPW) { + /* Disable PHY MPLLA */ + txgbe_wr32_ephy(hw, 0x4, 0x2401); + /* Reset rx lane0 clock */ + txgbe_wr32_ephy(hw, 0x1005, 0x4001); + } + /* @. Set SR PCS Control2 Register Bits[1:0] = 2'b00 PCS_TYPE_SEL: KR */ + txgbe_wr32_epcs(hw, TXGBE_SR_PCS_CTL2, 0x1); + /* Set SR PMA MMD Control1 Register Bit[13] = 1'b0 SS13: 1G speed */ + txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1, 0x0000); + /* Set SR MII MMD Control Register to corresponding speed: */ + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_CTL, 0x0140); + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_GENCTRL1); + value = (value & ~0x710) | 0x500; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GENCTRL1, value); + + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0xCF00); + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0); + value = (value & ~0x3F3F) | (24 << 8) | 4; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); + value = (value & ~0x7F) | 16 | (1 << 6); + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) { + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, 0x774F); + } else { + /* 7. Set VR_XS_PMA_Gen5_12G_RX_EQ_CTRL0 Register Bit[15:8] + * (VGA1/2_GAIN_0) = 8'h00, Bit[7:5](CTLE_POLE_0) = 3'h2, + * Bit[4:0](CTLE_BOOST_0) = 4'hA + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0); + value = (value & ~0xFFFF) | 0x7706; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, value); + } + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0); + value = (value & ~0x7) | 0x0; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0, value); + /* 8. Set VR_XS_PMA_Gen5_12G_DFE_TAP_CTRL0 Register Bit[7:0](DFE_TAP1_0) + * = 8'd00 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0x0); + /* Set VR_XS_PMA_Gen5_12G_RX_GENCTRL3 Register Bit[2:0] LOS_TRSHLD_0 = 4 */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3); + value = (value & ~0x7) | 0x4; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3, value); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY + * MPLLA Control 0 Register Bit[7:0] = 8'd32 MPLLA_MULTIPLIER + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL0, 0x0020); + /* Set VR XS, PMA or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control + * 3 Register Bit[10:0] = 11'd70 MPLLA_BANDWIDTH + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3, 0x0046); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO + * Calibration Load 0 Register Bit[12:0] = 13'd1344 VCO_LD_VAL_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD0, 0x0540); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO + * Calibration Reference 0 Register Bit[5:0] = 6'd42 VCO_REF_LD_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF0, 0x002A); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY AFE-DFE + * Enable Register Bit[4], Bit[0] = 1'b0 AFE_EN_0, DFE_EN_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, 0x0); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx + * Equalization Control 4 Register Bit[0] = 1'b0 CONT_ADAPT_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, 0x0010); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx Rate + * Control Register Bit[2:0] = 3'b011 TX0_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_RATE_CTL, 0x0003); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx Rate + * Control Register Bit[2:0] = 3'b011 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_RATE_CTL, 0x0003); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx General + * Control 2 Register Bit[9:8] = 2'b01 TX0_WIDTH: 10bits + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GEN_CTL2, 0x0100); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx General + * Control 2 Register Bit[9:8] = 2'b01 RX0_WIDTH: 10bits + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL2, 0x0100); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA + * Control 2 Register Bit[10:8] = 3'b010 MPLLA_DIV16P5_CLK_EN=0, + * MPLLA_DIV10_CLK_EN=1, MPLLA_DIV8_CLK_EN=0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL2, 0x0200); + /* VR MII MMD AN Control Register Bit[8] = 1'b1 MII_CTRL */ + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_AN_CTL, 0x0100); + } + /* 10. Initialize the mode by setting VR XS or PCS MMD Digital Control1 + * Register Bit[15](VR_RST) + */ + txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, 0xA000); + /* wait phy initialization done */ + for (i = 0; i < TXGBE_PHY_INIT_DONE_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0) + break; + msleep(100); + } + if (i == TXGBE_PHY_INIT_DONE_POLLING_TIME) { + status = TXGBE_ERR_PHY_INIT_NOT_DONE; + goto out; + } + + if (adapter->ffe_set == TXGBE_BP_M_SFI) { + e_dev_info("Set SFI TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main, adapter->ffe_pre, adapter->ffe_post); + /* 5. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL0 Register Bit[13:8](TX_EQ_MAIN) + * = 6'd30, Bit[5:0](TX_EQ_PRE) = 6'd4 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0); + value = (value & ~0x3F3F) | (adapter->ffe_main << 8) | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + /* 6. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL1 Register Bit[6](TX_EQ_OVR_RIDE) + * = 1'b1, Bit[5:0](TX_EQ_POST) = 6'd36 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); + value = (value & ~0x7F) | adapter->ffe_post | (1 << 6); + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } +out: + hw->mac.ops.enable_sec_tx_path(hw); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + TXGBE_MAC_RX_CFG_RE); + + return status; +} + +/** + * txgbe_setup_mac_link - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +s32 txgbe_setup_mac_link_sp(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + bool autoneg = false; + s32 status = 0; + u32 link_capabilities = TXGBE_LINK_SPEED_UNKNOWN; + struct txgbe_adapter *adapter = hw->back; + u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; + bool link_up = false; + u32 curr_autoneg = 2; + + /* Check to see if speed passed in is supported. */ + status = hw->mac.ops.get_link_capabilities(hw, + &link_capabilities, &autoneg); + if (status) + goto out; + + speed &= link_capabilities; + + if (speed == TXGBE_LINK_SPEED_UNKNOWN) { + status = TXGBE_ERR_LINK_SETUP; + goto out; + } + + if (!(((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_KR_KX_KX4) || + ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_MAC_XAUI) || + ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_MAC_SGMII) || + hw->dac_sfp)) { + status = hw->mac.ops.check_link(hw, + &link_speed, &link_up, false); + + if (link_speed == TXGBE_LINK_SPEED_1GB_FULL) { + curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); + curr_autoneg = !!(curr_autoneg & (0x1 << 12)); + } + + if (status != 0) + goto out; + + if (link_speed == speed && link_up && + !(speed == TXGBE_LINK_SPEED_1GB_FULL && + adapter->autoneg != curr_autoneg)) + goto out; + } + + if ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_KR_KX_KX4 || + hw->dac_sfp) { + txgbe_set_link_to_kr(hw, autoneg); + } else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI || + ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_MAC_XAUI) || + (hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_SGMII || + ((hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_MAC_SGMII) || + (hw->mac.ops.get_media_type(hw) == txgbe_media_type_copper && + (hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_SFI_XAUI)) { + if (speed == TXGBE_LINK_SPEED_10GB_FULL) { + txgbe_set_link_to_kx4(hw, 0); + } else { + txgbe_set_link_to_kx(hw, speed, 0); + txgbe_set_sgmii_an37_ability(hw); + hw->phy.autoneg_advertised |= speed; + } + } else if (hw->mac.ops.get_media_type(hw) == txgbe_media_type_fiber) { + if (!((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP && + (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1))) { + txgbe_set_link_to_sfi(hw, speed); + if (speed == TXGBE_LINK_SPEED_1GB_FULL) { + txgbe_setup_fc(hw); + txgbe_set_sgmii_an37_ability(hw); + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_1GB_FULL; + } + } + } + +out: + return status; +} + +/** + * txgbe_setup_copper_link - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true if waiting is needed to complete + * + * Restarts link on PHY and MAC based on settings passed in. + **/ +static s32 txgbe_setup_copper_link(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + s32 status; + u32 link_speed; + + /* Setup the PHY according to input speed */ + link_speed = hw->phy.ops.setup_link_speed(hw, speed, + autoneg_wait_to_complete); + + if (link_speed != TXGBE_LINK_SPEED_UNKNOWN) + status = txgbe_setup_mac_link_sp(hw, link_speed, autoneg_wait_to_complete); + else + status = 0; + + return status; +} + +int txgbe_reconfig_mac(struct txgbe_hw *hw) +{ + u32 mac_wdg_timeout; + u32 mac_flow_ctrl; + + mac_wdg_timeout = rd32(hw, TXGBE_MAC_WDG_TIMEOUT); + mac_flow_ctrl = rd32(hw, TXGBE_MAC_RX_FLOW_CTRL); + + if (hw->bus.lan_id == 0) + wr32(hw, TXGBE_MIS_RST, TXGBE_MIS_RST_LAN0_MAC_RST); + else if (hw->bus.lan_id == 1) + wr32(hw, TXGBE_MIS_RST, TXGBE_MIS_RST_LAN1_MAC_RST); + + /* wait for mac rst complete */ + usec_delay(1500); + wr32m(hw, TXGBE_MAC_MISC_CTL, TXGBE_MAC_MISC_LINK_STS_MOD, + TXGBE_LINK_BOTH_PCS_MAC); + + /* receive packets that size > 2048 */ + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_JE, TXGBE_MAC_RX_CFG_JE); + + /* clear counters on read */ + wr32m(hw, TXGBE_MMC_CONTROL, + TXGBE_MMC_CONTROL_RSTONRD, TXGBE_MMC_CONTROL_RSTONRD); + + wr32m(hw, TXGBE_MAC_RX_FLOW_CTRL, + TXGBE_MAC_RX_FLOW_CTRL_RFE, TXGBE_MAC_RX_FLOW_CTRL_RFE); + + wr32(hw, TXGBE_MAC_PKT_FLT, + TXGBE_MAC_PKT_FLT_PR); + + wr32(hw, TXGBE_MAC_WDG_TIMEOUT, mac_wdg_timeout); + wr32(hw, TXGBE_MAC_RX_FLOW_CTRL, mac_flow_ctrl); + + return 0; +} + +static int txgbe_reset_misc(struct txgbe_hw *hw) +{ + struct txgbe_adapter *adapter = hw->back; + u32 value; + u32 err; + int i; + + if (hw->mac.type == txgbe_mac_aml40) { + if (!(rd32(hw, TXGBE_EPHY_STAT) & TXGBE_EPHY_STAT_PPL_LOCK)) { + err = hw->mac.ops.setup_link(hw, TXGBE_LINK_SPEED_40GB_FULL, false); + if (err) { + e_dev_info("%s setup phy failed\n", __func__); + return err; + } + } + } else if (hw->mac.type == txgbe_mac_aml) { + if ((rd32(hw, TXGBE_EPHY_STAT) & TXGBE_EPHY_STAT_PPL_LOCK) + != TXGBE_EPHY_STAT_PPL_LOCK) { + err = hw->mac.ops.setup_link(hw, TXGBE_LINK_SPEED_AMLITE_AUTONEG, false); + if (err) { + e_dev_info("%s setup phy failed\n", __func__); + return err; + } + } + } else { + value = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2); + if ((value & 0x3) != TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X) + hw->link_status = TXGBE_LINK_STATUS_NONE; + } + + /* receive packets that size > 2048 */ + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_JE, TXGBE_MAC_RX_CFG_JE); + + /* clear counters on read */ + wr32m(hw, TXGBE_MMC_CONTROL, + TXGBE_MMC_CONTROL_RSTONRD, TXGBE_MMC_CONTROL_RSTONRD); + + wr32m(hw, TXGBE_MAC_RX_FLOW_CTRL, + TXGBE_MAC_RX_FLOW_CTRL_RFE, TXGBE_MAC_RX_FLOW_CTRL_RFE); + + wr32(hw, TXGBE_MAC_PKT_FLT, + TXGBE_MAC_PKT_FLT_PR); + + wr32m(hw, TXGBE_MIS_RST_ST, + TXGBE_MIS_RST_ST_RST_INIT, 0xA00); + + wr32(hw, TXGBE_PSR_LAN_FLEX_SEL, 0); + for (i = 0; i < 16; i++) { + wr32(hw, TXGBE_PSR_LAN_FLEX_DW_L(i), 0); + wr32(hw, TXGBE_PSR_LAN_FLEX_DW_H(i), 0); + wr32(hw, TXGBE_PSR_LAN_FLEX_MSK(i), 0); + } + + /* set pause frame dst mac addr */ + wr32(hw, TXGBE_RDB_PFCMACDAL, 0xC2000001); + wr32(hw, TXGBE_RDB_PFCMACDAH, 0x0180); + + txgbe_init_thermal_sensor_thresh(hw); + + return 0; +} + +/** + * txgbe_reset_hw - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + **/ +s32 txgbe_reset_hw(struct txgbe_hw *hw) +{ + u32 sr_pcs_ctl, sr_pma_mmd_ctl1, sr_an_mmd_ctl, sr_an_mmd_adv_reg2; + u32 curr_sr_an_mmd_ctl = 0, curr_sr_an_mmd_adv_reg2 = 0; + u32 curr_sr_pcs_ctl = 0, curr_sr_pma_mmd_ctl1 = 0; + struct txgbe_adapter *adapter = hw->back; + u32 curr_vr_xs_or_pcs_mmd_digi_ctl1 = 0; + u32 vr_xs_or_pcs_mmd_digi_ctl1; + u32 reset_status = 0; + u32 rst_delay = 0; + u32 reset = 0; + s32 status; + u32 value; + u32 i; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status != 0) + goto reset_hw_out; + + /* Identify PHY and related function pointers */ + status = hw->phy.ops.init(hw); + + if (status == TXGBE_ERR_SFP_NOT_SUPPORTED) + goto reset_hw_out; + + if (hw->mac.type == txgbe_mac_sp) { + /* remember internal phy regs from before we reset */ + curr_sr_pcs_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2); + curr_sr_pma_mmd_ctl1 = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1); + curr_sr_an_mmd_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_CTL); + curr_sr_an_mmd_adv_reg2 = txgbe_rd32_epcs(hw, + TXGBE_SR_AN_MMD_ADV_REG2); + curr_vr_xs_or_pcs_mmd_digi_ctl1 = + txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1); + } + + /* Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + if (hw->force_full_reset) { + rst_delay = (rd32(hw, TXGBE_MIS_RST_ST) & + TXGBE_MIS_RST_ST_RST_INIT) >> + TXGBE_MIS_RST_ST_RST_INI_SHIFT; + if (hw->reset_type == TXGBE_SW_RESET) { + for (i = 0; i < rst_delay + 20; i++) { + reset_status = + rd32(hw, TXGBE_MIS_RST_ST); + if (!(reset_status & + TXGBE_MIS_RST_ST_DEV_RST_ST_MASK)) + break; + msleep(100); + } + + if (reset_status & TXGBE_MIS_RST_ST_DEV_RST_ST_MASK) { + status = TXGBE_ERR_RESET_FAILED; + goto reset_hw_out; + } + status = txgbe_check_flash_load(hw, + TXGBE_SPI_ILDR_STATUS_SW_RESET); + if (status != 0) + goto reset_hw_out; + /* errata 7 */ + if (txgbe_mng_present(hw) && + hw->revision_id == TXGBE_SP_MPW) { + struct txgbe_adapter *adapter = + (struct txgbe_adapter *)hw->back; + adapter->flags2 &= + ~TXGBE_FLAG2_MNG_REG_ACCESS_DISABLED; + } + } else if (hw->reset_type == TXGBE_GLOBAL_RESET) { + struct txgbe_adapter *adapter = + (struct txgbe_adapter *)hw->back; + msleep(100 * rst_delay + 2000); + pci_restore_state(adapter->pdev); + pci_save_state(adapter->pdev); + pci_wake_from_d3(adapter->pdev, false); + } + } else { + if (hw->bus.lan_id == 0) + reset = TXGBE_MIS_RST_LAN0_RST; + else + reset = TXGBE_MIS_RST_LAN1_RST; + + wr32(hw, TXGBE_MIS_RST, + reset | rd32(hw, TXGBE_MIS_RST)); + TXGBE_WRITE_FLUSH(hw); + usec_delay(10); + + if (hw->bus.lan_id == 0) { + status = txgbe_check_flash_load(hw, + TXGBE_SPI_ILDR_STATUS_LAN0_SW_RST); + } else { + status = txgbe_check_flash_load(hw, + TXGBE_SPI_ILDR_STATUS_LAN1_SW_RST); + } + if (status != 0) + goto reset_hw_out; + } + + status = txgbe_reset_misc(hw); + if (status != 0) + goto reset_hw_out; + + status = txgbe_reset_misc(hw); + if (status != 0) + goto reset_hw_out; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + wr32(hw, TXGBE_LINKUP_FILTER, TXGBE_LINKUP_FILTER_TIME); + wr32m(hw, TXGBE_MAC_MISC_CTL, TXGBE_MAC_MISC_LINK_STS_MOD, + TXGBE_LINK_BOTH_PCS_MAC); + /* amlite: bme */ + wr32(hw, PX_PF_BME, 0x1); + /* amlite: rdm_rsc_ctl_free_ctl set to 1 */ + wr32m(hw, TXGBE_RDM_RSC_CTL, TXGBE_RDM_RSC_CTL_FREE_CTL, + TXGBE_RDM_RSC_CTL_FREE_CTL); + adapter->an_done = false; + adapter->cur_fec_link = TXGBE_PHY_FEC_AUTO; + } else { + /* Store the original AUTOC/AUTOC2 values if they have not been + * stored off yet. Otherwise restore the stored original + * values since the reset operation sets back to defaults. + */ + sr_pcs_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2); + sr_pma_mmd_ctl1 = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1); + sr_an_mmd_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_CTL); + sr_an_mmd_adv_reg2 = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG2); + vr_xs_or_pcs_mmd_digi_ctl1 = + txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1); + + if (!hw->mac.orig_link_settings_stored) { + hw->mac.orig_sr_pcs_ctl2 = sr_pcs_ctl; + hw->mac.orig_sr_pma_mmd_ctl1 = sr_pma_mmd_ctl1; + hw->mac.orig_sr_an_mmd_ctl = sr_an_mmd_ctl; + hw->mac.orig_sr_an_mmd_adv_reg2 = sr_an_mmd_adv_reg2; + hw->mac.orig_vr_xs_or_pcs_mmd_digi_ctl1 = + vr_xs_or_pcs_mmd_digi_ctl1; + hw->mac.orig_link_settings_stored = true; + } else { + /* If MNG FW is running on a multi-speed device that + * doesn't autoneg with out driver support we need to + * leave LMS in the state it was before we MAC reset. + * Likewise if we support WoL we don't want change the + * LMS state. + */ + hw->mac.orig_sr_pcs_ctl2 = curr_sr_pcs_ctl; + hw->mac.orig_sr_pma_mmd_ctl1 = curr_sr_pma_mmd_ctl1; + hw->mac.orig_sr_an_mmd_ctl = curr_sr_an_mmd_ctl; + hw->mac.orig_sr_an_mmd_adv_reg2 = + curr_sr_an_mmd_adv_reg2; + hw->mac.orig_vr_xs_or_pcs_mmd_digi_ctl1 = + curr_vr_xs_or_pcs_mmd_digi_ctl1; + } + } + + /*make sure phy power is up*/ + msleep(100); + if (hw->mac.type == txgbe_mac_sp) { + /*A temporary solution for set to sfi*/ + if (adapter->ffe_set == TXGBE_BP_M_SFI) { + e_dev_info("Set SFI TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main, adapter->ffe_pre, adapter->ffe_post); + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0); + value = (value & ~0x3F3F) | (adapter->ffe_main << 8) | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); + value = (value & ~0x7F) | adapter->ffe_post | (1 << 6); + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } + + if (adapter->ffe_set == TXGBE_BP_M_KR) { + e_info(hw, "Set KR TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main, adapter->ffe_pre, adapter->ffe_post); + value = (0x1804 & ~0x3F3F); + value |= adapter->ffe_main << 8 | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + + value = (0x50 & ~0x7F) | (1 << 6) | adapter->ffe_post; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } + + if (adapter->ffe_set == TXGBE_BP_M_KX) { + e_dev_info("Set KX TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main, adapter->ffe_pre, adapter->ffe_post); + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0); + value = (value & ~0x3F3F) | (adapter->ffe_main << 8) | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); + value = (value & ~0x7F) | adapter->ffe_post | (1 << 6); + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } + } + + /* Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + /* Store the permanent SAN mac address */ + hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); + + /* Add the SAN MAC address to the RAR only if it's a valid address */ + if (txgbe_validate_mac_addr(hw->mac.san_addr) == 0) { + hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, + hw->mac.san_addr, 0, TXGBE_PSR_MAC_SWC_AD_H_AV); + + /* Save the SAN MAC RAR index */ + hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + + /* Reserve the last RAR for the SAN MAC address */ + hw->mac.num_rar_entries--; + } + + /* Store the alternative WWNN/WWPN prefix */ + hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, + &hw->mac.wwpn_prefix); + + pci_set_master(((struct txgbe_adapter *)hw->back)->pdev); + +reset_hw_out: + return status; +} + +/** + * txgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete + * @hw: pointer to hardware structure + * @fdircmd: current value of FDIRCMD register + */ +static s32 txgbe_fdir_check_cmd_complete(struct txgbe_hw *hw, u32 *fdircmd) +{ + int i; + + for (i = 0; i < TXGBE_RDB_FDIR_CMD_CMD_POLL; i++) { + *fdircmd = rd32(hw, TXGBE_RDB_FDIR_CMD); + if (!(*fdircmd & TXGBE_RDB_FDIR_CMD_CMD_MASK)) + return 0; + usec_delay(10); + } + + return TXGBE_ERR_FDIR_CMD_INCOMPLETE; +} + +/** + * txgbe_reinit_fdir_tables - Reinitialize Flow Director tables. + * @hw: pointer to hardware structure + **/ +s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw) +{ + s32 err; + int i; + u32 fdirctrl = rd32(hw, TXGBE_RDB_FDIR_CTL); + u32 fdircmd; + + fdirctrl &= ~TXGBE_RDB_FDIR_CTL_INIT_DONE; + + /* Before starting reinitialization process, + * FDIRCMD.CMD must be zero. + */ + err = txgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) + return err; + + wr32(hw, TXGBE_RDB_FDIR_FREE, 0); + TXGBE_WRITE_FLUSH(hw); + /* sapphire/amber-lite adapters flow director init flow cannot be + * restarted, Workaround sapphire/amber-lite + * silicon errata by performing the following steps + * before re-writing the FDIRCTRL control register with the same value. + * - write 1 to bit 8 of FDIRCMD register & + * - write 0 to bit 8 of FDIRCMD register + */ + wr32m(hw, TXGBE_RDB_FDIR_CMD, + TXGBE_RDB_FDIR_CMD_CLEARHT, TXGBE_RDB_FDIR_CMD_CLEARHT); + TXGBE_WRITE_FLUSH(hw); + wr32m(hw, TXGBE_RDB_FDIR_CMD, + TXGBE_RDB_FDIR_CMD_CLEARHT, 0); + TXGBE_WRITE_FLUSH(hw); + /* Clear FDIR Hash register to clear any leftover hashes + * waiting to be programmed. + */ + wr32(hw, TXGBE_RDB_FDIR_HASH, 0x00); + TXGBE_WRITE_FLUSH(hw); + + wr32(hw, TXGBE_RDB_FDIR_CTL, fdirctrl); + TXGBE_WRITE_FLUSH(hw); + + /* Poll init-done after we write FDIRCTRL register */ + for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) { + if (rd32(hw, TXGBE_RDB_FDIR_CTL) & + TXGBE_RDB_FDIR_CTL_INIT_DONE) + break; + msec_delay(1); + } + if (i >= TXGBE_FDIR_INIT_DONE_POLL) + return TXGBE_ERR_FDIR_REINIT_FAILED; + + /* Clear FDIR statistics registers (read to clear) */ + rd32(hw, TXGBE_RDB_FDIR_USE_ST); + rd32(hw, TXGBE_RDB_FDIR_FAIL_ST); + rd32(hw, TXGBE_RDB_FDIR_MATCH); + rd32(hw, TXGBE_RDB_FDIR_MISS); + rd32(hw, TXGBE_RDB_FDIR_LEN); + + return 0; +} + +/** + * txgbe_fdir_enable - Initialize Flow Director control registers + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register + **/ +static void txgbe_fdir_enable(struct txgbe_hw *hw, u32 fdirctrl) +{ + int i; + + /* Prime the keys for hashing */ + wr32(hw, TXGBE_RDB_FDIR_HKEY, TXGBE_ATR_BUCKET_HASH_KEY); + wr32(hw, TXGBE_RDB_FDIR_SKEY, TXGBE_ATR_SIGNATURE_HASH_KEY); + + /* Poll init-done after we write the register. Estimated times: + * 10G: PBALLOC = 11b, timing is 60us + * 1G: PBALLOC = 11b, timing is 600us + * 100M: PBALLOC = 11b, timing is 6ms + * + * Multiple these timings by 4 if under full Rx load + * + * So we'll poll for TXGBE_FDIR_INIT_DONE_POLL times, sleeping for + * 1 msec per poll time. If we're at line rate and drop to 100M, then + * this might not finish in our poll time, but we can live with that + * for now. + */ + wr32(hw, TXGBE_RDB_FDIR_CTL, fdirctrl); + TXGBE_WRITE_FLUSH(hw); + for (i = 0; i < TXGBE_RDB_FDIR_INIT_DONE_POLL; i++) { + if (rd32(hw, TXGBE_RDB_FDIR_CTL) & + TXGBE_RDB_FDIR_CTL_INIT_DONE) + break; + msec_delay(1); + } +} + +/** + * txgbe_init_fdir_signature -Initialize Flow Director sig filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation + **/ +s32 txgbe_init_fdir_signature(struct txgbe_hw *hw, u32 fdirctrl) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)hw->back; + + int i = VMDQ_P(0) / 4; + int j = VMDQ_P(0) % 4; + u32 flex = rd32m(hw, TXGBE_RDB_FDIR_FLEX_CFG(i), + ~((TXGBE_RDB_FDIR_FLEX_CFG_BASE_MSK | + TXGBE_RDB_FDIR_FLEX_CFG_MSK | + TXGBE_RDB_FDIR_FLEX_CFG_OFST) << + (TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT * j))); + + flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC | + 0x6 << TXGBE_RDB_FDIR_FLEX_CFG_OFST_SHIFT) << + (TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT * j); + wr32(hw, TXGBE_RDB_FDIR_FLEX_CFG(i), flex); + + /* Continue setup of fdirctrl register bits: + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 filters are left + */ + fdirctrl |= (0xF << TXGBE_RDB_FDIR_CTL_HASH_BITS_SHIFT) | + (0xA << TXGBE_RDB_FDIR_CTL_MAX_LENGTH_SHIFT) | + (4 << TXGBE_RDB_FDIR_CTL_FULL_THRESH_SHIFT); + + /* write hashes and fdirctrl register, poll for completion */ + txgbe_fdir_enable(hw, fdirctrl); + + if (hw->revision_id == TXGBE_SP_MPW) + wr32m(hw, TXGBE_PX_RR_CFG(0), + TXGBE_PX_RR_CFG_RSC, ~TXGBE_PX_RR_CFG_RSC); + + return 0; +} + +/** + * txgbe_init_fdir_perfect - Initialize Flow Director perfect filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation + * @cloud_mode: true - cloud mode, false - other mode + **/ +s32 txgbe_init_fdir_perfect(struct txgbe_hw *hw, u32 fdirctrl, + bool cloud_mode) +{ + /* Continue setup of fdirctrl register bits: + * Turn perfect match filtering on + * Report hash in RSS field of Rx wb descriptor + * Initialize the drop queue + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 (0x4 * 16) filters are left + */ + fdirctrl |= TXGBE_RDB_FDIR_CTL_PERFECT_MATCH | + (TXGBE_RDB_FDIR_DROP_QUEUE << + TXGBE_RDB_FDIR_CTL_DROP_Q_SHIFT) | + (0xF << TXGBE_RDB_FDIR_CTL_HASH_BITS_SHIFT) | + (0xA << TXGBE_RDB_FDIR_CTL_MAX_LENGTH_SHIFT) | + (4 << TXGBE_RDB_FDIR_CTL_FULL_THRESH_SHIFT); + + /* write hashes and fdirctrl register, poll for completion */ + txgbe_fdir_enable(hw, fdirctrl); + + if (hw->revision_id == TXGBE_SP_MPW) { + if (((struct txgbe_adapter *)hw->back)->num_rx_queues > + TXGBE_RDB_FDIR_DROP_QUEUE) + /* errata 1: disable RSC of drop ring */ + wr32m(hw, + TXGBE_PX_RR_CFG(TXGBE_RDB_FDIR_DROP_QUEUE), + TXGBE_PX_RR_CFG_RSC, ~TXGBE_PX_RR_CFG_RSC); + } + return 0; +} + +/* These defines allow us to quickly generate all of the necessary instructions + * in the function below by simply calling out TXGBE_COMPUTE_SIG_HASH_ITERATION + * for values 0 through 15 + */ +#define TXGBE_ATR_COMMON_HASH_KEY \ + (TXGBE_ATR_BUCKET_HASH_KEY & TXGBE_ATR_SIGNATURE_HASH_KEY) +#define TXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ + common_hash ^= lo_hash_dword >> n; \ + else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ + sig_hash ^= lo_hash_dword << (16 - n); \ + if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ + common_hash ^= hi_hash_dword >> n; \ + else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ + else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ + sig_hash ^= hi_hash_dword << (16 - n); \ +} while (0) + +/** + * txgbe_atr_compute_sig_hash - Compute the signature hash + * @stream: input bitstream to compute the hash on + * + * This function is almost identical to the function above but contains + * several optimizations such as unwinding all of the loops, letting the + * compiler work out all of the conditional ifs since the keys are static + * defines, and computing two keys at once since the hashed dword stream + * will be the same for both keys. + **/ +u32 txgbe_atr_compute_sig_hash(union txgbe_atr_hash_dword input, + union txgbe_atr_hash_dword common) +{ + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = TXGBE_NTOHL(input.dword); + + /* generate common hash dword */ + hi_hash_dword = TXGBE_NTOHL(common.dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + TXGBE_COMPUTE_SIG_HASH_ITERATION(0); + + /* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the VLAN until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + TXGBE_COMPUTE_SIG_HASH_ITERATION(1); + TXGBE_COMPUTE_SIG_HASH_ITERATION(2); + TXGBE_COMPUTE_SIG_HASH_ITERATION(3); + TXGBE_COMPUTE_SIG_HASH_ITERATION(4); + TXGBE_COMPUTE_SIG_HASH_ITERATION(5); + TXGBE_COMPUTE_SIG_HASH_ITERATION(6); + TXGBE_COMPUTE_SIG_HASH_ITERATION(7); + TXGBE_COMPUTE_SIG_HASH_ITERATION(8); + TXGBE_COMPUTE_SIG_HASH_ITERATION(9); + TXGBE_COMPUTE_SIG_HASH_ITERATION(10); + TXGBE_COMPUTE_SIG_HASH_ITERATION(11); + TXGBE_COMPUTE_SIG_HASH_ITERATION(12); + TXGBE_COMPUTE_SIG_HASH_ITERATION(13); + TXGBE_COMPUTE_SIG_HASH_ITERATION(14); + TXGBE_COMPUTE_SIG_HASH_ITERATION(15); + + /* combine common_hash result with signature and bucket hashes */ + bucket_hash ^= common_hash; + bucket_hash &= TXGBE_ATR_HASH_MASK; + + sig_hash ^= common_hash << 16; + sig_hash &= TXGBE_ATR_HASH_MASK << 16; + + /* return completed signature hash */ + return sig_hash ^ bucket_hash; +} + +/** + * txgbe_atr_add_signature_filter - Adds a signature hash filter + * @hw: pointer to hardware structure + * @input: unique input dword + * @common: compressed common input dword + * @queue: queue index to direct traffic to + **/ +s32 txgbe_fdir_add_signature_filter(struct txgbe_hw *hw, + union txgbe_atr_hash_dword input, + union txgbe_atr_hash_dword common, + u8 queue) +{ + u32 fdirhashcmd = 0; + u8 flow_type; + u32 fdircmd; + s32 err; + + /* Get the flow_type in order to program FDIRCMD properly + * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 + * fifth is FDIRCMD.TUNNEL_FILTER + */ + flow_type = input.formatted.flow_type; + switch (flow_type) { + case TXGBE_ATR_FLOW_TYPE_TCPV4: + case TXGBE_ATR_FLOW_TYPE_UDPV4: + case TXGBE_ATR_FLOW_TYPE_SCTPV4: + case TXGBE_ATR_FLOW_TYPE_TCPV6: + case TXGBE_ATR_FLOW_TYPE_UDPV6: + case TXGBE_ATR_FLOW_TYPE_SCTPV6: + break; + default: + return TXGBE_ERR_CONFIG; + } + + /* configure FDIRCMD register */ + fdircmd = TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW | + TXGBE_RDB_FDIR_CMD_FILTER_UPDATE | + TXGBE_RDB_FDIR_CMD_LAST | TXGBE_RDB_FDIR_CMD_QUEUE_EN; + fdircmd |= (u32)flow_type << TXGBE_RDB_FDIR_CMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << TXGBE_RDB_FDIR_CMD_RX_QUEUE_SHIFT; + + fdirhashcmd |= txgbe_atr_compute_sig_hash(input, common); + fdirhashcmd |= 0x1 << TXGBE_RDB_FDIR_HASH_BUCKET_VALID_SHIFT; + wr32(hw, TXGBE_RDB_FDIR_HASH, fdirhashcmd); + + wr32(hw, TXGBE_RDB_FDIR_CMD, fdircmd); + + err = txgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) + return err; + + return 0; +} + +#define TXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ +} while (0) + +/** + * txgbe_atr_compute_perfect_hash - Compute the perfect filter hash + * @atr_input: input bitstream to compute the hash on + * @input_mask: mask for the input bitstream + * + * This function serves two main purposes. First it applies the input_mask + * to the atr_input resulting in a cleaned up atr_input data stream. + * Secondly it computes the hash and stores it in the bkt_hash field at + * the end of the input byte stream. This way it will be available for + * future use without needing to recompute the hash. + **/ +void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input, + union txgbe_atr_input *input_mask) +{ + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 bucket_hash = 0; + u32 hi_dword = 0; + u32 i = 0; + + /* Apply masks to input data */ + for (i = 0; i < 11; i++) + input->dword_stream[i] &= input_mask->dword_stream[i]; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = TXGBE_NTOHL(input->dword_stream[0]); + + /* generate common hash dword */ + for (i = 1; i <= 10; i++) + hi_dword ^= input->dword_stream[i]; + hi_hash_dword = TXGBE_NTOHL(hi_dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + TXGBE_COMPUTE_BKT_HASH_ITERATION(0); + + /* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the VLAN until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + for (i = 1; i <= 15; i++) + TXGBE_COMPUTE_BKT_HASH_ITERATION(i); + + /* Limit hash to 13 bits since max bucket count is 8K. + * Store result at the end of the input stream. + */ + input->formatted.bkt_hash = bucket_hash & 0x1FFF; +} + +/** + * txgbe_get_fdirtcpm - generate a TCP port from atr_input_masks + * @input_mask: mask to be bit swapped + * + * The source and destination port masks for flow director are bit swapped + * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to + * generate a correctly swapped value we need to bit swap the mask and that + * is what is accomplished by this function. + **/ +static u32 txgbe_get_fdirtcpm(union txgbe_atr_input *input_mask) +{ + u32 mask = TXGBE_NTOHS(input_mask->formatted.dst_port); + + mask <<= TXGBE_RDB_FDIR_TCP_MSK_DPORTM_SHIFT; + mask |= TXGBE_NTOHS(input_mask->formatted.src_port); + + return mask; +} + +/* These two macros are meant to address the fact that we have registers + * that are either all or in part big-endian. As a result on big-endian + * systems we will end up byte swapping the value to little-endian before + * it is byte swapped again and written to the hardware in the original + * big-endian format. + */ +#define TXGBE_STORE_AS_BE32(_value) \ + (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ + (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) + +#define TXGBE_WRITE_REG_BE32(a, reg, value) \ + wr32((a), (reg), TXGBE_STORE_AS_BE32(TXGBE_NTOHL(value))) + +#define TXGBE_STORE_AS_BE16(_value) \ + TXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) + +s32 txgbe_fdir_set_input_mask(struct txgbe_hw *hw, + union txgbe_atr_input *input_mask, + bool cloud_mode) +{ + /* mask IPv6 since it is currently not supported */ + u32 fdirm = 0; + u32 fdirtcpm; + u32 flex = 0; + int i, j; +#ifdef CONFIG_PCI_IOV + struct txgbe_adapter *adapter = (struct txgbe_adapter *)hw->back; +#endif + + /* Program the relevant mask registers. If src/dst_port or src/dst_addr + * are zero, then assume a full mask for that field. Also assume that + * a VLAN of 0 is unspecified, so mask that out as well. L4type + * cannot be masked out in this implementation. + * + * This also assumes IPv4 only. IPv6 masking isn't supported at this + * point in time. + */ + + /* verify bucket hash is cleared on hash generation */ + if (input_mask->formatted.bkt_hash) + e_info(drv, " bucket hash should always be 0 in mask\n"); + + /* Program FDIRM and verify partial masks */ + switch (input_mask->formatted.vm_pool & 0x7F) { + case 0x0: + fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_POOL; + case 0x7F: + break; + default: + return TXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.flow_type & TXGBE_ATR_L4TYPE_MASK) { + case 0x0: + fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_L4P; + if (input_mask->formatted.dst_port || + input_mask->formatted.src_port) + return TXGBE_ERR_CONFIG; + case TXGBE_ATR_L4TYPE_MASK: + break; + default: + return TXGBE_ERR_CONFIG; + } + + /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ + wr32(hw, TXGBE_RDB_FDIR_OTHER_MSK, fdirm); + + i = VMDQ_P(0) / 4; + j = VMDQ_P(0) % 4; + flex = rd32m(hw, TXGBE_RDB_FDIR_FLEX_CFG(i), + ~((TXGBE_RDB_FDIR_FLEX_CFG_BASE_MSK | + TXGBE_RDB_FDIR_FLEX_CFG_MSK | + TXGBE_RDB_FDIR_FLEX_CFG_OFST) << + (TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT * j))); + flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC | + 0x6 << TXGBE_RDB_FDIR_FLEX_CFG_OFST_SHIFT) << + (TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT * j); + + switch (input_mask->formatted.flex_bytes & 0xFFFF) { + case 0x0000: + flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK << + (TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT * j); + fallthrough; + case 0xFFFF: + break; + default: + return TXGBE_ERR_CONFIG; + } + wr32(hw, TXGBE_RDB_FDIR_FLEX_CFG(i), flex); + + /* store the TCP/UDP port masks, + * bit reversed from port layout + */ + fdirtcpm = txgbe_get_fdirtcpm(input_mask); + + /* write both the same so that UDP and TCP use the same mask */ + wr32(hw, TXGBE_RDB_FDIR_TCP_MSK, ~fdirtcpm); + wr32(hw, TXGBE_RDB_FDIR_UDP_MSK, ~fdirtcpm); + wr32(hw, TXGBE_RDB_FDIR_SCTP_MSK, ~fdirtcpm); + + /* store source and destination IP masks (little-endian) */ + wr32(hw, TXGBE_RDB_FDIR_SA4_MSK, + TXGBE_NTOHL(~input_mask->formatted.src_ip[0])); + wr32(hw, TXGBE_RDB_FDIR_DA4_MSK, + TXGBE_NTOHL(~input_mask->formatted.dst_ip[0])); + return 0; +} + +s32 txgbe_fdir_write_perfect_filter(struct txgbe_hw *hw, + union txgbe_atr_input *input, + u16 soft_id, u8 queue, + bool cloud_mode) +{ + u32 fdirport, fdirvlan, fdirhash, fdircmd; + s32 err; + + if (!cloud_mode) { + /* currently IPv6 is not supported, must be programmed with 0 */ + wr32(hw, TXGBE_RDB_FDIR_IP6(2), + TXGBE_NTOHL(input->formatted.src_ip[0])); + wr32(hw, TXGBE_RDB_FDIR_IP6(1), + TXGBE_NTOHL(input->formatted.src_ip[1])); + wr32(hw, TXGBE_RDB_FDIR_IP6(0), + TXGBE_NTOHL(input->formatted.src_ip[2])); + + /* record the source address (little-endian) */ + wr32(hw, TXGBE_RDB_FDIR_SA, + TXGBE_NTOHL(input->formatted.src_ip[0])); + + /* record the first 32 bits of the destination address + * (little-endian) + */ + wr32(hw, TXGBE_RDB_FDIR_DA, + TXGBE_NTOHL(input->formatted.dst_ip[0])); + + /* record source and destination port (little-endian)*/ + fdirport = TXGBE_NTOHS(input->formatted.dst_port); + fdirport <<= TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT; + fdirport |= TXGBE_NTOHS(input->formatted.src_port); + wr32(hw, TXGBE_RDB_FDIR_PORT, fdirport); + } + + /* record packet type and flex_bytes(little-endian) */ + fdirvlan = TXGBE_NTOHS(input->formatted.flex_bytes); + fdirvlan <<= TXGBE_RDB_FDIR_FLEX_FLEX_SHIFT; + + fdirvlan |= TXGBE_NTOHS(input->formatted.vlan_id); + wr32(hw, TXGBE_RDB_FDIR_FLEX, fdirvlan); + + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash | + 0x1 << TXGBE_RDB_FDIR_HASH_BUCKET_VALID_SHIFT; + fdirhash |= soft_id << TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX_SHIFT; + wr32(hw, TXGBE_RDB_FDIR_HASH, fdirhash); + + /* flush all previous writes to make certain registers are + * programmed prior to issuing the command + */ + TXGBE_WRITE_FLUSH(hw); + + /* configure FDIRCMD register */ + fdircmd = TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW | + TXGBE_RDB_FDIR_CMD_FILTER_UPDATE | + TXGBE_RDB_FDIR_CMD_LAST | TXGBE_RDB_FDIR_CMD_QUEUE_EN; + if (queue == TXGBE_RDB_FDIR_DROP_QUEUE) + fdircmd |= TXGBE_RDB_FDIR_CMD_DROP; + fdircmd |= input->formatted.flow_type << + TXGBE_RDB_FDIR_CMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << TXGBE_RDB_FDIR_CMD_RX_QUEUE_SHIFT; + fdircmd |= (u32)input->formatted.vm_pool << + TXGBE_RDB_FDIR_CMD_VT_POOL_SHIFT; + + wr32(hw, TXGBE_RDB_FDIR_CMD, fdircmd); + err = txgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) + return err; + + return 0; +} + +s32 txgbe_fdir_erase_perfect_filter(struct txgbe_hw *hw, + union txgbe_atr_input *input, + u16 soft_id) +{ + u32 fdirhash; + u32 fdircmd; + s32 err; + + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash; + fdirhash |= soft_id << TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX_SHIFT; + wr32(hw, TXGBE_RDB_FDIR_HASH, fdirhash); + + /* flush hash to HW */ + TXGBE_WRITE_FLUSH(hw); + + /* Query if filter is present */ + wr32(hw, TXGBE_RDB_FDIR_CMD, + TXGBE_RDB_FDIR_CMD_CMD_QUERY_REM_FILT); + + err = txgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) + return err; + + /* if filter exists in hardware then remove it */ + if (fdircmd & TXGBE_RDB_FDIR_CMD_FILTER_VALID) { + wr32(hw, TXGBE_RDB_FDIR_HASH, fdirhash); + TXGBE_WRITE_FLUSH(hw); + wr32(hw, TXGBE_RDB_FDIR_CMD, + TXGBE_RDB_FDIR_CMD_CMD_REMOVE_FLOW); + } + + return 0; +} + +/** + * txgbe_start_hw - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +s32 txgbe_start_hw(struct txgbe_hw *hw) +{ + int ret_val = 0; + u32 i; + + /* Set the media type */ + hw->phy.media_type = hw->mac.ops.get_media_type(hw); + + /* PHY ops initialization must be done in reset_hw() */ + + /* Clear the VLAN filter table */ + hw->mac.ops.clear_vfta(hw); + + /* Clear statistics registers */ + hw->mac.ops.clear_hw_cntrs(hw); + + TXGBE_WRITE_FLUSH(hw); + /* Setup flow control */ + ret_val = hw->mac.ops.setup_fc(hw); + + if (hw->mac.type == txgbe_mac_sp) { + /* Clear the rate limiters */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + wr32(hw, TXGBE_TDM_RP_IDX, i); + wr32(hw, TXGBE_TDM_RP_RATE, 0); + } + } + TXGBE_WRITE_FLUSH(hw); + + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + + /* We need to run link autotry after the driver loads */ + hw->mac.autotry_restart = true; + + return ret_val; +} + +/** + * txgbe_identify_phy - Get physical layer module + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + * If PHY already detected, maintains current PHY type in hw struct, + * otherwise executes the PHY detection routine. + **/ +s32 txgbe_identify_phy(struct txgbe_hw *hw) +{ + /* Detect PHY if not unknown - returns success if already detected. */ + s32 status = TXGBE_ERR_PHY_ADDR_INVALID; + enum txgbe_media_type media_type; + + /* avoid fw access phy */ + if (((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) && + ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP)) { + /* Let firmware know the driver has taken over */ + wr32m(hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_DRV_LOAD, TXGBE_CFG_PORT_CTL_DRV_LOAD); + mdelay(100); + } + + if (!hw->phy.phy_semaphore_mask) + hw->phy.phy_semaphore_mask = TXGBE_MNG_SWFW_SYNC_SW_PHY; + + media_type = hw->mac.ops.get_media_type(hw); + if (media_type == txgbe_media_type_copper) { + status = txgbe_init_external_phy(hw); + if (status != 0) + return status; + + txgbe_get_phy_id(hw); + hw->phy.type = txgbe_get_phy_type_from_id(hw); + status = 0; + } else if (media_type == txgbe_media_type_fiber || + media_type == txgbe_media_type_fiber_qsfp) { + status = txgbe_identify_module(hw); + } else { + hw->phy.type = txgbe_phy_none; + status = 0; + } + + /* Let firmware take over control of h/w */ + if (((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) && + ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP)) + wr32m(hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_DRV_LOAD, 0); + + /* Return error if SFP module has been detected but is not supported */ + if (hw->phy.type == txgbe_phy_sfp_unsupported) + return TXGBE_ERR_SFP_NOT_SUPPORTED; + + return status; +} + +int txgbe_set_pps(struct txgbe_hw *hw, bool enable, u64 nsec, u64 cycles) +{ + int status; + struct txgbe_hic_set_pps pps_cmd; + int i; + + pps_cmd.hdr.cmd = FW_PPS_SET_CMD; + pps_cmd.hdr.buf_len = FW_PPS_SET_LEN; + pps_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + pps_cmd.lan_id = hw->bus.lan_id; + pps_cmd.enable = enable; + pps_cmd.nsec = nsec; + pps_cmd.cycles = cycles; + pps_cmd.hdr.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + /* send reset request to FW and wait for response */ + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + status = txgbe_host_interface_command(hw, (u32 *)&pps_cmd, + sizeof(pps_cmd), + TXGBE_HI_COMMAND_TIMEOUT, + true); + usleep_range(1000, 2000); + if (status != 0) + continue; + + if (pps_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + break; + } + + return status; +} + +/** + * txgbe_enable_rx_dma - Enable the Rx DMA unit on sapphire/amber-lite + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit for sapphire/amber-lite + **/ +s32 txgbe_enable_rx_dma(struct txgbe_hw *hw, u32 regval) +{ + hw->mac.ops.disable_sec_rx_path(hw); + + if (regval & TXGBE_RDB_PB_CTL_RXEN) + hw->mac.ops.enable_rx(hw); + else + hw->mac.ops.disable_rx(hw); + + hw->mac.ops.enable_sec_rx_path(hw); + + return 0; +} + +/** + * txgbe_init_flash_params - Initialize flash params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters txgbe_eeprom_info within the + * txgbe_hw struct in order to set up EEPROM access. + **/ +s32 txgbe_init_flash_params(struct txgbe_hw *hw) +{ + struct txgbe_flash_info *flash = &hw->flash; + u32 eec; + + eec = 0x1000000; + flash->semaphore_delay = 10; + flash->dword_size = (eec >> 2); + flash->address_bits = 24; + + return 0; +} + +/** + * txgbe_read_flash_buffer - Read FLASH dword(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of dword in EEPROM to read + * @dwords: number of dwords + * @data: dword(s) read from the EEPROM + * + * Retrieves 32 bit dword(s) read from EEPROM + **/ +s32 txgbe_read_flash_buffer(struct txgbe_hw *hw, u32 offset, + u32 dwords, u32 *data) +{ + s32 status = 0; + u32 i; + + hw->eeprom.ops.init_params(hw); + + if (!dwords || offset + dwords >= hw->flash.dword_size) { + status = TXGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(TXGBE_ERROR_ARGUMENT, "Invalid FLASH arguments"); + return status; + } + + for (i = 0; i < dwords; i++) { + wr32(hw, TXGBE_SPI_DATA, data[i]); + wr32(hw, TXGBE_SPI_CMD, + TXGBE_SPI_CMD_ADDR(offset + i) | + TXGBE_SPI_CMD_CMD(0x0)); + + status = po32m(hw, TXGBE_SPI_STATUS, + TXGBE_SPI_STATUS_OPDONE, TXGBE_SPI_STATUS_OPDONE, + TXGBE_SPI_TIMEOUT, 0); + if (status) + break; + } + + return status; +} + +/** + * txgbe_write_flash_buffer - Write FLASH dword(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of dword in EEPROM to write + * @dwords: number of dwords + * @data: dword(s) write from to EEPROM + * + **/ +s32 txgbe_write_flash_buffer(struct txgbe_hw *hw, u32 offset, + u32 dwords, u32 *data) +{ + s32 status = 0; + u32 i; + + hw->eeprom.ops.init_params(hw); + + if (!dwords || offset + dwords >= hw->flash.dword_size) { + status = TXGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(TXGBE_ERROR_ARGUMENT, "Invalid FLASH arguments"); + return status; + } + + for (i = 0; i < dwords; i++) { + wr32(hw, TXGBE_SPI_CMD, + TXGBE_SPI_CMD_ADDR(offset + i) | + TXGBE_SPI_CMD_CMD(0x1)); + + status = po32m(hw, TXGBE_SPI_STATUS, + TXGBE_SPI_STATUS_OPDONE, TXGBE_SPI_STATUS_OPDONE, + TXGBE_SPI_TIMEOUT, 0); + if (status != 0) + break; + + data[i] = rd32(hw, TXGBE_SPI_DATA); + } + + return status; +} + +/** + * txgbe_init_eeprom_params - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters txgbe_eeprom_info within the + * txgbe_hw struct in order to set up EEPROM access. + **/ +s32 txgbe_init_eeprom_params(struct txgbe_hw *hw) +{ + struct txgbe_eeprom_info *eeprom = &hw->eeprom; + u16 eeprom_size; + s32 status = 0; + u16 data; + + if (eeprom->type == txgbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = txgbe_eeprom_none; + + if (!(rd32(hw, TXGBE_SPI_STATUS) & + TXGBE_SPI_STATUS_FLASH_BYPASS)) { + eeprom->type = txgbe_flash; + + eeprom_size = 4096; + eeprom->word_size = eeprom_size >> 1; + } + } + + status = hw->eeprom.ops.read(hw, TXGBE_SW_REGION_PTR, + &data); + if (status) + return status; + + eeprom->sw_region_offset = data >> 1; + + return status; +} + +/** + * txgbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * assuming that the semaphore is already obtained. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +static s32 txgbe_read_ee_hostif_data(struct txgbe_hw *hw, u16 offset, + u16 *data) +{ + s32 status; + struct txgbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.req.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = TXGBE_CPU_TO_BE32(offset * 2); + /* one word */ + buffer.length = TXGBE_CPU_TO_BE16(sizeof(u16)); + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + TXGBE_HI_COMMAND_TIMEOUT, false); + + if (status) + return status; + + if (txgbe_check_mng_access(hw)) { + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + *data = (u16)rd32a(hw, TXGBE_AML_MNG_MBOX_FW2SW, + FW_NVM_DATA_OFFSET); + else if (hw->mac.type == txgbe_mac_sp) + *data = (u16)rd32a(hw, TXGBE_MNG_MBOX, + FW_NVM_DATA_OFFSET); + } else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + + return 0; +} + +/** + * txgbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +s32 txgbe_read_ee_hostif(struct txgbe_hw *hw, u16 offset, + u16 *data) +{ + s32 status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, + TXGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = txgbe_read_ee_hostif_data(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, + TXGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + status = TXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * txgbe_read_ee_hostif_buffer- Read EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the hostif. + **/ +s32 txgbe_read_ee_hostif_buffer(struct txgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + struct txgbe_hic_read_shadow_ram buffer; + u32 current_word = 0; + u16 words_to_read; + s32 status; + u32 reg; + u32 i; + u32 value = 0; + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, + TXGBE_MNG_SWFW_SYNC_SW_FLASH); + if (status) + return status; + + while (words) { + if (words > FW_MAX_READ_BUFFER_SIZE / 2) + words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; + else + words_to_read = words; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.req.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = TXGBE_CPU_TO_BE32((offset + current_word) * 2); + buffer.length = TXGBE_CPU_TO_BE16(words_to_read * 2); + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + TXGBE_HI_COMMAND_TIMEOUT, + false); + + if (status) + goto out; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + reg = TXGBE_AML_MNG_MBOX_FW2SW; + else + reg = TXGBE_MNG_MBOX; + + for (i = 0; i < words_to_read; i++) { + if (txgbe_check_mng_access(hw)) { + value = rd32(hw, reg + (FW_NVM_DATA_OFFSET << 2) + 2 * i); + } else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + data[current_word] = (u16)(value & 0xffff); + current_word++; + i++; + if (i < words_to_read) { + value >>= 16; + data[current_word] = (u16)(value & 0xffff); + current_word++; + } + } + words -= words_to_read; + } + +out: + hw->mac.ops.release_swfw_sync(hw, + TXGBE_MNG_SWFW_SYNC_SW_FLASH); + return status; +} + +/** + * txgbe_write_ee_hostif - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +static s32 txgbe_write_ee_hostif_data(struct txgbe_hw *hw, u16 offset, + u16 data) +{ + s32 status; + struct txgbe_hic_write_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.req.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = TXGBE_CPU_TO_BE16(sizeof(u16)); + buffer.data = data; + buffer.address = TXGBE_CPU_TO_BE32(offset * 2); + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + TXGBE_HI_COMMAND_TIMEOUT, false); + + return status; +} + +s32 txgbe_close_notify(struct txgbe_hw *hw) +{ + int tmp; + s32 status; + struct txgbe_hic_write_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_DW_CLOSE_NOTIFY; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = 0; + + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.req.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = 0; + buffer.address = 0; + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + 500, false); + if (status) + return status; + + if (txgbe_check_mng_access(hw)) { + tmp = (u32)rd32a(hw, TXGBE_MNG_MBOX, 1); + if (tmp == TXGBE_CHECKSUM_CAP_ST_PASS) + status = 0; + else + status = TXGBE_ERR_EEPROM_CHECKSUM; + } else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + + return status; +} + +s32 txgbe_open_notify(struct txgbe_hw *hw) +{ + int tmp; + s32 status; + struct txgbe_hic_write_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_DW_OPEN_NOTIFY; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = 0; + + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.req.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = 0; + buffer.address = 0; + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + 500, false); + if (status) + return status; + + if (txgbe_check_mng_access(hw)) { + tmp = (u32)rd32a(hw, TXGBE_MNG_MBOX, 1); + if (tmp == TXGBE_CHECKSUM_CAP_ST_PASS) + status = 0; + else + status = TXGBE_ERR_EEPROM_CHECKSUM; + } else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + + return status; +} + +/** + * txgbe_write_ee_hostif - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 txgbe_write_ee_hostif(struct txgbe_hw *hw, u16 offset, + u16 data) +{ + s32 status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, + TXGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = txgbe_write_ee_hostif_data(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, + TXGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + status = TXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * txgbe_write_ee_hostif_buffer - Write EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the hostif. + **/ +s32 txgbe_write_ee_hostif_buffer(struct txgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status = 0; + u16 i = 0; + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, + TXGBE_MNG_SWFW_SYNC_SW_FLASH); + if (status != 0) + goto out; + + for (i = 0; i < words; i++) { + status = txgbe_write_ee_hostif_data(hw, offset + i, + data[i]); + if (status != 0) + break; + } + + hw->mac.ops.release_swfw_sync(hw, TXGBE_MNG_SWFW_SYNC_SW_FLASH); +out: + + return status; +} + +/** + * txgbe_calc_eeprom_checksum - Calculates and returns the checksum + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 txgbe_calc_eeprom_checksum(struct txgbe_hw *hw) +{ + u16 *buffer = NULL; + u32 buffer_size = 0; + + u16 *eeprom_ptrs = NULL; + u16 *local_buffer; + s32 status; + u16 checksum = 0; + u16 i; + + hw->eeprom.ops.init_params(hw); + + if (!buffer) { + eeprom_ptrs = vmalloc(TXGBE_EEPROM_LAST_WORD * sizeof(u16)); + if (!eeprom_ptrs) + return TXGBE_ERR_NO_SPACE; + /* Read pointer area */ + status = txgbe_read_ee_hostif_buffer(hw, 0, + TXGBE_EEPROM_LAST_WORD, + eeprom_ptrs); + if (status) + return status; + + local_buffer = eeprom_ptrs; + } else { + if (buffer_size < TXGBE_EEPROM_LAST_WORD) + return TXGBE_ERR_PARAM; + local_buffer = buffer; + } + + for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++) { + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + if ((i > (TXGBE_SHOWROM_I2C_PTR / 2)) && (i < (TXGBE_SHOWROM_I2C_END / 2))) + local_buffer[i] = 0xffff; + if (i != hw->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM) + checksum += local_buffer[i]; + } + + checksum = (u16)TXGBE_EEPROM_SUM - checksum; + if (eeprom_ptrs) + vfree(eeprom_ptrs); + + return (s32)checksum; +} + +/** + * txgbe_update_eeprom_checksum - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +s32 txgbe_update_eeprom_checksum(struct txgbe_hw *hw) +{ + s32 status; + u16 checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = txgbe_read_ee_hostif(hw, 0, &checksum); + if (status) + return status; + + status = txgbe_calc_eeprom_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = txgbe_write_ee_hostif(hw, TXGBE_EEPROM_CHECKSUM, + checksum); + + return status; +} + +/** + * txgbe_validate_eeprom_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, + u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) + return status; + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = txgbe_read_ee_hostif(hw, hw->eeprom.sw_region_offset + + TXGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + return status; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + status = TXGBE_ERR_EEPROM_CHECKSUM; + ERROR_REPORT1(TXGBE_ERROR_INVALID_STATE, + "Invalid EEPROM checksum\n"); + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + +/** + * txgbe_update_flash - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure + * + * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. + **/ +s32 txgbe_update_flash(struct txgbe_hw *hw) +{ + s32 status = 0; + union txgbe_hic_hdr2 buffer; + + buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; + buffer.req.buf_lenh = 0; + buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; + + if (hw->mac.type == txgbe_mac_sp) + buffer.req.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + TXGBE_HI_COMMAND_TIMEOUT, false); + + return status; +} + +/** + * txgbe_check_mac_link - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +s32 txgbe_check_mac_link_sp(struct txgbe_hw *hw, u32 *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + u32 links_reg = 0; + u16 value = 0; + u32 i; + + if (link_up_wait_to_complete) { + for (i = 0; i < TXGBE_LINK_UP_TIME; i++) { + if (hw->mac.ops.get_media_type(hw) == txgbe_media_type_copper && + ((hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI)) { + /* read ext phy link status */ + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 0x03, 0x8008, &value); + if (value & 0x400) + *link_up = true; + else + *link_up = false; + } else { + *link_up = true; + } + + if (*link_up) { + links_reg = rd32(hw, + TXGBE_CFG_PORT_ST); + if (!(links_reg & TXGBE_CFG_PORT_ST_LINK_UP)) { + *link_up = false; + } else { + *link_up = true; + break; + } + } + msleep(100); + } + } else { + if (hw->mac.ops.get_media_type(hw) == txgbe_media_type_copper && + ((hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI)) { + /* read ext phy link status */ + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 0x03, 0x8008, &value); + if (value & 0x400) + *link_up = true; + else + *link_up = false; + } else { + *link_up = true; + } + if (*link_up) { + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) + *link_up = true; + else + *link_up = false; + } + } + + /* sync link status to fw for ocp card */ + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) + wr32(hw, TXGBE_TSC_LSEC_PKTNUM0, value); + + if (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core1) { + *link_up = hw->f2c_mod_status; + + if (*link_up) + /* recover led configure when link up */ + wr32(hw, TXGBE_CFG_LED_CTL, 0); + else + /* over write led when link down */ + hw->mac.ops.led_off(hw, TXGBE_LED_LINK_UP | TXGBE_LED_LINK_10G | + TXGBE_LED_LINK_1G | TXGBE_LED_LINK_ACTIVE); + } + + if (*link_up) { + if (hw->mac.ops.get_media_type(hw) == txgbe_media_type_copper && + ((hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI)) { + if ((value & 0xc000) == 0xc000) + *speed = TXGBE_LINK_SPEED_10GB_FULL; + else if ((value & 0xc000) == 0x8000) + *speed = TXGBE_LINK_SPEED_1GB_FULL; + else if ((value & 0xc000) == 0x4000) + *speed = TXGBE_LINK_SPEED_100_FULL; + else if ((value & 0xc000) == 0x0000) + *speed = TXGBE_LINK_SPEED_10_FULL; + } else { + if ((links_reg & TXGBE_CFG_PORT_ST_LINK_10G) == + TXGBE_CFG_PORT_ST_LINK_10G) + *speed = TXGBE_LINK_SPEED_10GB_FULL; + else if ((links_reg & TXGBE_CFG_PORT_ST_LINK_1G) == + TXGBE_CFG_PORT_ST_LINK_1G) + *speed = TXGBE_LINK_SPEED_1GB_FULL; + else if ((links_reg & TXGBE_CFG_PORT_ST_LINK_100M) == + TXGBE_CFG_PORT_ST_LINK_100M) + *speed = TXGBE_LINK_SPEED_100_FULL; + else + *speed = TXGBE_LINK_SPEED_10_FULL; + } + } else { + *speed = TXGBE_LINK_SPEED_UNKNOWN; + } + + return 0; +} + +/** + * txgbe_setup_eee - Enable/disable EEE support + * @hw: pointer to the HW structure + * @enable_eee: boolean flag to enable EEE + * + * Enable/disable EEE based on enable_eee flag. + * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C + * are modified. + * + **/ +s32 txgbe_setup_eee(struct txgbe_hw *hw, bool enable_eee) +{ + return 0; +} + +s32 txgbe_hic_write_lldp(struct txgbe_hw *hw, u32 open) +{ + int status; + struct txgbe_adapter *adapter = hw->back; + struct pci_dev *pdev = adapter->pdev; + struct txgbe_hic_write_lldp buffer; + + buffer.hdr.cmd = 0xf1 - open; + buffer.hdr.buf_len = 0x1; + buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + buffer.func = PCI_FUNC(pdev->devfn); + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000, false); + return status; +} + +static int txgbe_hic_get_lldp(struct txgbe_hw *hw) +{ + int status; + struct txgbe_hic_write_lldp buffer; + + buffer.hdr.cmd = 0xf2; + buffer.hdr.buf_len = 0x1; + buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + + if (hw->mac.type == txgbe_mac_sp) + buffer.hdr.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + buffer.func = hw->bus.lan_id; + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000, true); + if (buffer.hdr.cmd_or_resp.ret_status != FW_CEM_RESP_STATUS_SUCCESS) + return -1; + else + return (int)buffer.func; +} + +int txgbe_is_lldp(struct txgbe_hw *hw) +{ + u32 tmp = 0, lldp_flash_data = 0, i = 0; + struct txgbe_adapter *adapter = hw->back; + s32 status = 0; + + status = txgbe_hic_get_lldp(hw); + if (status != -1) { + if (status) + adapter->eth_priv_flags |= TXGBE_ETH_PRIV_FLAG_LLDP; + else + adapter->eth_priv_flags &= ~TXGBE_ETH_PRIV_FLAG_LLDP; + goto out; + } else { + for (; i < 0x1000 / sizeof(u32); i++) { + status = txgbe_flash_read_dword(hw, TXGBE_LLDP_REG + i * 4, &tmp); + if (status) + return status; + if (tmp == U32_MAX) + break; + lldp_flash_data = tmp; + } + if (lldp_flash_data & BIT(hw->bus.lan_id)) + adapter->eth_priv_flags |= TXGBE_ETH_PRIV_FLAG_LLDP; + else + adapter->eth_priv_flags &= ~TXGBE_ETH_PRIV_FLAG_LLDP; + } +out: + return 0; +} + +void txgbe_hic_write_autoneg_status(struct txgbe_hw *hw, bool autoneg) +{ + struct txgbe_adapter *adapter = hw->back; + struct txgbe_hic_write_autoneg buffer; + + /* only support sp temporarily */ + if (hw->mac.type != txgbe_mac_sp) + return; + + /* only 0x64e20011 and above 0x20011 support */ + if (adapter->etrack_id != 0x64e20011 && + (adapter->etrack_id & 0xfffff) < 0x20012) + return; + + buffer.hdr.cmd = FW_AN_STA_CMD; + buffer.hdr.buf_len = FW_AN_STA_LEN; + buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + buffer.lan_id = hw->bus.lan_id; + buffer.autoneg = autoneg; + buffer.hdr.cksum_or_index.checksum = FW_DEFAULT_CHECKSUM; + + txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000, false); +} + +void txgbe_set_queue_rate_limit(struct txgbe_hw *hw, int queue, u16 max_tx_rate) +{ + struct txgbe_adapter *adapter = hw->back; + int factor_int; + int factor_fra; + int link_speed; + int bcnrc_val; + + /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported + * and 0x004 otherwise. + */ + + wr32(hw, TXGBE_TDM_MMW, 0x14); + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + if (max_tx_rate) { + u16 frac; + + link_speed = txgbe_link_mbps(adapter); + max_tx_rate = max_tx_rate * 105 / 100; //necessary offset by test + /* Calculate the rate factor values to set */ + factor_int = link_speed / max_tx_rate; + frac = (link_speed % max_tx_rate) * 10000 / max_tx_rate; + factor_fra = txgbe_frac_to_bi(frac, 10000, 14); + if (max_tx_rate > link_speed) { + factor_int = 1; + factor_fra = 0; + } + + wr32(hw, TXGBE_TDM_RL_QUEUE_IDX, queue); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_FACTOR_INT_MASK, factor_int << TXGBE_TDM_FACTOR_INT_SHIFT); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_FACTOR_FRA_MASK, factor_fra << TXGBE_TDM_FACTOR_FRA_SHIFT); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_RL_EN, TXGBE_TDM_RL_EN); + } else { + wr32(hw, TXGBE_TDM_RL_QUEUE_IDX, queue); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_RL_EN, 0); + } + } else { + bcnrc_val = TXGBE_TDM_RP_RATE_MAX(max_tx_rate); + + wr32(hw, TXGBE_TDM_RP_IDX, queue); + wr32(hw, TXGBE_TDM_RP_RATE, bcnrc_val); + if (max_tx_rate) + wr32m(hw, TXGBE_TDM_RP_CTL, + TXGBE_TDM_RP_CTL_RLEN, TXGBE_TDM_RP_CTL_RLEN); + else + wr32m(hw, TXGBE_TDM_RP_CTL, + TXGBE_TDM_RP_CTL_RLEN, 0); + } +} + +int txgbe_hic_notify_led_active(struct txgbe_hw *hw, int active_flag) +{ + int status; + struct txgbe_led_active_set buffer; + + buffer.hdr.cmd = 0xf8; + buffer.hdr.buf_len = 0x1; + buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + buffer.active_flag = active_flag; + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000, true); + + return 0; +} + +bool txgbe_is_backplane(struct txgbe_hw *hw) +{ + return hw->mac.ops.get_media_type(hw) == txgbe_media_type_backplane ? + true : false; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h new file mode 100644 index 000000000000..04485f679696 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h @@ -0,0 +1,350 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_HW_H_ +#define _TXGBE_HW_H_ + +#define TXGBE_EMC_INTERNAL_DATA 0x00 +#define TXGBE_EMC_INTERNAL_THERM_LIMIT 0x20 +#define TXGBE_EMC_DIODE1_DATA 0x01 +#define TXGBE_EMC_DIODE1_THERM_LIMIT 0x19 +#define TXGBE_EMC_DIODE2_DATA 0x23 +#define TXGBE_EMC_DIODE2_THERM_LIMIT 0x1A +#define TXGBE_EMC_DIODE3_DATA 0x2A +#define TXGBE_EMC_DIODE3_THERM_LIMIT 0x30 + +#define SPI_CLK_DIV 2 + +#define SPI_CMD_ERASE_CHIP 4 // SPI erase chip command +#define SPI_CMD_ERASE_SECTOR 3 // SPI erase sector command +#define SPI_CMD_WRITE_DWORD 0 // SPI write a dword command +#define SPI_CMD_READ_DWORD 1 // SPI read a dword command +#define SPI_CMD_USER_CMD 5 // SPI user command +#define SPI_CLK_CMD_OFFSET 28 // SPI command field offset in Command register +#define SPI_CLK_DIV_OFFSET 25 // SPI clock divide field offset in Command register + +#define SPI_TIME_OUT_VALUE 10000 +#define SPI_SECTOR_SIZE (4 * 1024) // FLASH sector size is 64KB +#define SPI_H_CMD_REG_ADDR 0x10104 // SPI Command register address +#define SPI_H_DAT_REG_ADDR 0x10108 // SPI Data register address +#define SPI_H_STA_REG_ADDR 0x1010c // SPI Status register address +#define SPI_H_USR_CMD_REG_ADDR 0x10110 // SPI User Command register address +#define SPI_CMD_CFG1_ADDR 0x10118 // Flash command configuration register 1 +#define MISC_RST_REG_ADDR 0x1000c // Misc reset register address +#define MGR_FLASH_RELOAD_REG_ADDR 0x101a0 // MGR reload flash read +#define PRB_CTL 0x10200 // used to check whether has been upgraded +#define PRB_SCRATCH 0x10230 // used to check whether has been upgraded + +#define MAC_ADDR0_WORD0_OFFSET_1G 0x006000c // MAC Address for LAN0, stored in external FLASH +#define MAC_ADDR0_WORD1_OFFSET_1G 0x0060014 +#define MAC_ADDR1_WORD0_OFFSET_1G 0x007000c // MAC Address for LAN1, stored in external FLASH +#define MAC_ADDR1_WORD1_OFFSET_1G 0x0070014 + +#define AMLITE_MAC_ADDR0_WORD0_OFFSET 0x00f010c // MAC Address for LAN0, stored in external FLASH +#define AMLITE_MAC_ADDR0_WORD1_OFFSET 0x00f0114 +#define AMLITE_MAC_ADDR1_WORD0_OFFSET 0x00f020c // MAC Address for LAN1, stored in external FLASH +#define AMLITE_MAC_ADDR1_WORD1_OFFSET 0x00f0214 + +#define PRODUCT_SERIAL_NUM_OFFSET_1G 0x00f0000 +#define TXGBE_VPD_OFFSET 0x500 +#define TXGBE_VPD_END 0x600 + +struct txgbe_hic_read_cab { + union txgbe_hic_hdr2 hdr; + union { + u8 d8[252]; + u16 d16[126]; + u32 d32[63]; + } dbuf; +}; + +/** + * Packet Type decoding + **/ +/* txgbe_dec_ptype.mac: outer mac */ +enum txgbe_dec_ptype_mac { + TXGBE_DEC_PTYPE_MAC_IP = 0, + TXGBE_DEC_PTYPE_MAC_L2 = 2, + TXGBE_DEC_PTYPE_MAC_FCOE = 3, +}; + +/* txgbe_dec_ptype.[e]ip: outer&encaped ip */ +#define TXGBE_DEC_PTYPE_IP_FRAG (0x4) +enum txgbe_dec_ptype_ip { + TXGBE_DEC_PTYPE_IP_NONE = 0, + TXGBE_DEC_PTYPE_IP_IPV4 = 1, + TXGBE_DEC_PTYPE_IP_IPV6 = 2, + TXGBE_DEC_PTYPE_IP_FGV4 = + (TXGBE_DEC_PTYPE_IP_FRAG | TXGBE_DEC_PTYPE_IP_IPV4), + TXGBE_DEC_PTYPE_IP_FGV6 = + (TXGBE_DEC_PTYPE_IP_FRAG | TXGBE_DEC_PTYPE_IP_IPV6), +}; + +/* txgbe_dec_ptype.etype: encaped type */ +enum txgbe_dec_ptype_etype { + TXGBE_DEC_PTYPE_ETYPE_NONE = 0, + TXGBE_DEC_PTYPE_ETYPE_IPIP = 1, /* IP+IP */ + TXGBE_DEC_PTYPE_ETYPE_IG = 2, /* IP+GRE */ + TXGBE_DEC_PTYPE_ETYPE_IGM = 3, /* IP+GRE+MAC */ + TXGBE_DEC_PTYPE_ETYPE_IGMV = 4, /* IP+GRE+MAC+VLAN */ +}; + +/* txgbe_dec_ptype.proto: payload proto */ +enum txgbe_dec_ptype_prot { + TXGBE_DEC_PTYPE_PROT_NONE = 0, + TXGBE_DEC_PTYPE_PROT_UDP = 1, + TXGBE_DEC_PTYPE_PROT_TCP = 2, + TXGBE_DEC_PTYPE_PROT_SCTP = 3, + TXGBE_DEC_PTYPE_PROT_ICMP = 4, + TXGBE_DEC_PTYPE_PROT_TS = 5, /* time sync */ +}; + +/* txgbe_dec_ptype.layer: payload layer */ +enum txgbe_dec_ptype_layer { + TXGBE_DEC_PTYPE_LAYER_NONE = 0, + TXGBE_DEC_PTYPE_LAYER_PAY2 = 1, + TXGBE_DEC_PTYPE_LAYER_PAY3 = 2, + TXGBE_DEC_PTYPE_LAYER_PAY4 = 3, +}; + +struct txgbe_dec_ptype { + u32 ptype:8; + u32 known:1; + u32 mac:2; /* outer mac */ + u32 ip:3; /* outer ip*/ + u32 etype:3; /* encaped type */ + u32 eip:3; /* encaped ip */ + u32 prot:4; /* payload proto */ + u32 layer:3; /* payload layer */ +}; + +extern struct txgbe_dec_ptype txgbe_ptype_lookup[256]; + +s32 txgbe_check_mac_link_sp(struct txgbe_hw *hw, u32 *speed, + bool *link_up, bool link_up_wait_to_complete); +u32 rd32_ephy(struct txgbe_hw *hw, u32 addr); +u32 txgbe_rd32_epcs(struct txgbe_hw *hw, u32 addr); +void txgbe_wr32_ephy(struct txgbe_hw *hw, u32 addr, u32 data); +void txgbe_wr32_epcs(struct txgbe_hw *hw, u32 addr, u32 data); + +void txgbe_dcb_get_rtrup2tc(struct txgbe_hw *hw, u8 *map); +u16 txgbe_get_pcie_msix_count(struct txgbe_hw *hw); +s32 txgbe_init_hw(struct txgbe_hw *hw); +s32 txgbe_start_hw(struct txgbe_hw *hw); +s32 txgbe_clear_hw_cntrs(struct txgbe_hw *hw); +s32 txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 txgbe_get_mac_addr(struct txgbe_hw *hw, u8 *mac_addr); +s32 txgbe_get_bus_info(struct txgbe_hw *hw); +void txgbe_set_pci_config_data(struct txgbe_hw *hw, u16 link_status); +void txgbe_set_lan_id_multi_port_pcie(struct txgbe_hw *hw); +s32 txgbe_stop_adapter(struct txgbe_hw *hw); + +s32 txgbe_led_on(struct txgbe_hw *hw, u32 index); +s32 txgbe_led_off(struct txgbe_hw *hw, u32 index); + +s32 txgbe_set_rar(struct txgbe_hw *hw, u32 index, u8 *addr, u64 pools, + u32 enable_addr); +s32 txgbe_clear_rar(struct txgbe_hw *hw, u32 index); +s32 txgbe_init_rx_addrs(struct txgbe_hw *hw); +s32 txgbe_update_mc_addr_list(struct txgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, + txgbe_mc_addr_itr func, bool clear); +s32 txgbe_update_uc_addr_list(struct txgbe_hw *hw, u8 *addr_list, + u32 addr_count, txgbe_mc_addr_itr func); +s32 txgbe_enable_mc(struct txgbe_hw *hw); +s32 txgbe_disable_mc(struct txgbe_hw *hw); +s32 txgbe_disable_sec_rx_path(struct txgbe_hw *hw); +s32 txgbe_enable_sec_rx_path(struct txgbe_hw *hw); +s32 txgbe_disable_sec_tx_path(struct txgbe_hw *hw); +s32 txgbe_enable_sec_tx_path(struct txgbe_hw *hw); + +#ifndef read_poll_timeout +#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \ + sleep_before_read, args...) \ +({ \ + u64 __timeout_us = (timeout_us); \ + unsigned long __sleep_us = (sleep_us); \ + ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ + might_sleep_if((__sleep_us) != 0); \ + if (sleep_before_read && __sleep_us) \ + usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + for (;;) { \ + (val) = op(args); \ + if (cond) \ + break; \ + if (__timeout_us && \ + ktime_compare(ktime_get(), __timeout) > 0) { \ + (val) = op(args); \ + break; \ + } \ + if (__sleep_us) \ + usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + cpu_relax(); \ + } \ + (cond) ? 0 : -ETIMEDOUT; \ +}) +#endif + +s32 txgbe_fc_enable(struct txgbe_hw *hw); +bool txgbe_device_supports_autoneg_fc(struct txgbe_hw *hw); +void txgbe_fc_autoneg(struct txgbe_hw *hw); +s32 txgbe_setup_fc(struct txgbe_hw *hw); + +s32 txgbe_validate_mac_addr(u8 *mac_addr); +s32 txgbe_acquire_swfw_sync(struct txgbe_hw *hw, u32 mask); +void txgbe_release_swfw_sync(struct txgbe_hw *hw, u32 mask); +s32 txgbe_disable_pcie_master(struct txgbe_hw *hw); + +s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr); +s32 txgbe_set_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr); + +s32 txgbe_set_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq); +s32 txgbe_set_vmdq_san_mac(struct txgbe_hw *hw, u32 vmdq); +s32 txgbe_clear_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq); +s32 txgbe_insert_mac_addr(struct txgbe_hw *hw, u8 *addr, u32 vmdq); +s32 txgbe_init_uta_tables(struct txgbe_hw *hw); +s32 txgbe_set_vfta(struct txgbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on); +s32 txgbe_set_vlvf(struct txgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool *vfta_changed); +s32 txgbe_clear_vfta(struct txgbe_hw *hw); +s32 txgbe_find_vlvf_slot(struct txgbe_hw *hw, u32 vlan); + +s32 txgbe_get_wwn_prefix(struct txgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix); + +void txgbe_set_mac_anti_spoofing(struct txgbe_hw *hw, bool enable, int pf); +void txgbe_set_vlan_anti_spoofing(struct txgbe_hw *hw, bool enable, int vf); +void txgbe_set_ethertype_anti_spoofing(struct txgbe_hw *hw, + bool enable, int vf); +s32 txgbe_get_device_caps(struct txgbe_hw *hw, u16 *device_caps); +void txgbe_set_rxpba(struct txgbe_hw *hw, int num_pb, u32 headroom, + int strategy); +s32 txgbe_set_fw_drv_ver(struct txgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 ver); +s32 txgbe_reset_hostif(struct txgbe_hw *hw); +u8 txgbe_calculate_checksum(u8 *buffer, u32 length); +s32 txgbe_host_interface_command(struct txgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data); + +void txgbe_clear_tx_pending(struct txgbe_hw *hw); +bool txgbe_mng_present(struct txgbe_hw *hw); +bool txgbe_check_mng_access(struct txgbe_hw *hw); + +s32 txgbe_get_thermal_sensor_data(struct txgbe_hw *hw); +s32 txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw); +void txgbe_enable_rx(struct txgbe_hw *hw); +void txgbe_disable_rx(struct txgbe_hw *hw); +s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); +int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit); + +/* @txgbe_api.h */ +s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw); +s32 txgbe_init_fdir_signature(struct txgbe_hw *hw, u32 fdirctrl); +s32 txgbe_init_fdir_perfect(struct txgbe_hw *hw, u32 fdirctrl, + bool cloud_mode); +s32 txgbe_fdir_add_signature_filter(struct txgbe_hw *hw, + union txgbe_atr_hash_dword input, + union txgbe_atr_hash_dword common, + u8 queue); +s32 txgbe_fdir_set_input_mask(struct txgbe_hw *hw, + union txgbe_atr_input *input_mask, bool cloud_mode); +s32 txgbe_fdir_write_perfect_filter(struct txgbe_hw *hw, + union txgbe_atr_input *input, + u16 soft_id, u8 queue, bool cloud_mode); +s32 txgbe_fdir_erase_perfect_filter(struct txgbe_hw *hw, + union txgbe_atr_input *input, + u16 soft_id); +s32 txgbe_fdir_add_perfect_filter(struct txgbe_hw *hw, + union txgbe_atr_input *input, + union txgbe_atr_input *mask, + u16 soft_id, + u8 queue, + bool cloud_mode); +void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input, + union txgbe_atr_input *mask); +u32 txgbe_atr_compute_sig_hash(union txgbe_atr_hash_dword input, + union txgbe_atr_hash_dword common); + +s32 txgbe_get_link_capabilities_sp(struct txgbe_hw *hw, + u32 *speed, bool *autoneg); +enum txgbe_media_type txgbe_get_media_type_sp(struct txgbe_hw *hw); +void txgbe_disable_tx_laser_multispeed_fiber(struct txgbe_hw *hw); +void txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw); +void txgbe_flap_tx_laser_multispeed_fiber(struct txgbe_hw *hw); +void txgbe_set_hard_rate_select_speed(struct txgbe_hw *hw, + u32 speed); +int txgbe_init_shared_code(struct txgbe_hw *hw); +s32 txgbe_setup_mac_link_sp(struct txgbe_hw *hw, u32 speed, + bool autoneg_wait_to_complete); +void txgbe_init_mac_link_ops_sp(struct txgbe_hw *hw); +s32 txgbe_reset_hw(struct txgbe_hw *hw); +s32 txgbe_identify_phy(struct txgbe_hw *hw); +s32 txgbe_init_phy_ops_sp(struct txgbe_hw *hw); +s32 txgbe_enable_rx_dma(struct txgbe_hw *hw, u32 regval); +s32 txgbe_init_ops_generic(struct txgbe_hw *hw); +s32 txgbe_setup_eee(struct txgbe_hw *hw, bool enable_eee); +int txgbe_reconfig_mac(struct txgbe_hw *hw); + +s32 txgbe_init_flash_params(struct txgbe_hw *hw); +s32 txgbe_read_flash_buffer(struct txgbe_hw *hw, u32 offset, + u32 dwords, u32 *data); +s32 txgbe_write_flash_buffer(struct txgbe_hw *hw, u32 offset, + u32 dwords, u32 *data); + +s32 txgbe_read_eeprom(struct txgbe_hw *hw, + u16 offset, u16 *data); +s32 txgbe_read_eeprom_buffer(struct txgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 txgbe_init_eeprom_params(struct txgbe_hw *hw); +s32 txgbe_update_eeprom_checksum(struct txgbe_hw *hw); +s32 txgbe_calc_eeprom_checksum(struct txgbe_hw *hw); +s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, + u16 *checksum_val); +s32 txgbe_update_flash(struct txgbe_hw *hw); +int txgbe_upgrade_flash(struct txgbe_hw *hw, u32 region, + const u8 *data, u32 size); +s32 txgbe_write_ee_hostif_buffer(struct txgbe_hw *hw, + u16 offset, u16 words, u16 *data); +s32 txgbe_write_ee_hostif(struct txgbe_hw *hw, u16 offset, + u16 data); +s32 txgbe_read_ee_hostif_buffer(struct txgbe_hw *hw, + u16 offset, u16 words, u16 *data); +s32 txgbe_read_ee_hostif(struct txgbe_hw *hw, u16 offset, u16 *data); +u32 txgbe_rd32_epcs(struct txgbe_hw *hw, u32 addr); +void txgbe_wr32_epcs(struct txgbe_hw *hw, u32 addr, u32 data); +void txgbe_wr32_ephy(struct txgbe_hw *hw, u32 addr, u32 data); +u32 rd32_ephy(struct txgbe_hw *hw, u32 addr); + +s32 txgbe_upgrade_flash_hostif(struct txgbe_hw *hw, u32 region, + const u8 *data, u32 size); + +s32 txgbe_close_notify(struct txgbe_hw *hw); +s32 txgbe_open_notify(struct txgbe_hw *hw); + +s32 txgbe_set_link_to_kr(struct txgbe_hw *hw, bool autoneg); +s32 txgbe_set_link_to_kx4(struct txgbe_hw *hw, bool autoneg); + +s32 txgbe_set_link_to_kx(struct txgbe_hw *hw, + u32 speed, + bool autoneg); +int txgbe_flash_read_dword(struct txgbe_hw *hw, u32 addr, u32 *data); +s32 txgbe_hic_write_lldp(struct txgbe_hw *hw, u32 open); +int txgbe_is_lldp(struct txgbe_hw *hw); +s32 txgbe_set_sgmii_an37_ability(struct txgbe_hw *hw); +int txgbe_set_pps(struct txgbe_hw *hw, bool enable, u64 nsec, u64 cycles); +void txgbe_hic_write_autoneg_status(struct txgbe_hw *hw, bool autoneg); +int txgbe_enable_rx_adapter(struct txgbe_hw *hw); + +s32 txgbe_init_ops_aml(struct txgbe_hw *hw); +s32 txgbe_init_ops_aml40(struct txgbe_hw *hw); + +void txgbe_set_queue_rate_limit(struct txgbe_hw *hw, int queue, u16 max_tx_rate); + +int txgbe_hic_notify_led_active(struct txgbe_hw *hw, int active_flag); +bool txgbe_is_backplane(struct txgbe_hw *hw); + +#endif /* _TXGBE_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c new file mode 100644 index 000000000000..eaef1b86bf8f --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c @@ -0,0 +1,1208 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "txgbe.h" +#include "txgbe_sriov.h" + +/** + * txgbe_cache_ring_dcb_vmdq - Descriptor ring to register mapping for VMDq + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for VMDq to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE are enabled along + * with VMDq. + * + **/ +static bool txgbe_cache_ring_dcb_vmdq(struct txgbe_adapter *adapter) +{ +#if IS_ENABLED(CONFIG_FCOE) + struct txgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; +#endif /* CONFIG_FCOE */ + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + int i; + u16 reg_idx; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + /* verify we have DCB enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ + if (!(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) + return false; + + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= tcs) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->rx_ring[i]->reg_idx = reg_idx; + } + + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= tcs) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->tx_ring[i]->reg_idx = reg_idx; + } + +#if IS_ENABLED(CONFIG_FCOE) + /* nothing to do if FCoE is disabled */ + if (!(adapter->flags & TXGBE_FLAG_FCOE_ENABLED)) + return true; + + /* The work is already done if the FCoE ring is shared */ + if (fcoe->offset < tcs) + return true; + + /* The FCoE rings exist separately, we need to move their reg_idx */ + if (fcoe->indices) { + u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + u8 fcoe_tc = txgbe_fcoe_get_tc(adapter); + + reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; + for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; + adapter->rx_ring[i]->reg_idx = reg_idx; + reg_idx++; + } + + reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; + for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; + adapter->tx_ring[i]->reg_idx = reg_idx; + reg_idx++; + } + } +#endif /* CONFIG_FCOE */ + + return true; +} + +/* txgbe_get_first_reg_idx - Return first register index associated with ring */ +static void txgbe_get_first_reg_idx(struct txgbe_adapter *adapter, u8 tc, + u16 *tx, u16 *rx) +{ + struct net_device *dev = adapter->netdev; + u8 num_tcs = netdev_get_num_tc(dev); + + *tx = 0; + *rx = 0; + + if (num_tcs > 4) { + /* TCs : TC0/1 TC2/3 TC4-7 + * TxQs/TC: 32 16 8 + * RxQs/TC: 16 16 16 + */ + *rx = tc << 4; + if (tc < 3) + *tx = tc << 5; /* 0, 32, 64 */ + else if (tc < 5) + *tx = (tc + 2) << 4; /* 80, 96 */ + else + *tx = (tc + 8) << 3; /* 104, 112, 120 */ + } else { + /* TCs : TC0 TC1 TC2/3 + * TxQs/TC: 64 32 16 + * RxQs/TC: 32 32 32 + */ + *rx = tc << 5; + if (tc < 2) + *tx = tc << 6; /* 0, 64 */ + else + *tx = (tc + 4) << 4; /* 96, 112 */ + } +} + +/** + * txgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for DCB to the assigned rings. + * + **/ +static bool txgbe_cache_ring_dcb(struct txgbe_adapter *adapter) +{ + int tc, offset, rss_i, i; + u16 tx_idx, rx_idx; + struct net_device *dev = adapter->netdev; + u8 num_tcs = netdev_get_num_tc(dev); + + if (num_tcs <= 1) + return false; + + rss_i = adapter->ring_feature[RING_F_RSS].indices; + + for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { + txgbe_get_first_reg_idx(adapter, (u8)tc, &tx_idx, &rx_idx); + for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { + adapter->tx_ring[offset + i]->reg_idx = tx_idx; + adapter->rx_ring[offset + i]->reg_idx = rx_idx; + adapter->tx_ring[offset + i]->dcb_tc = (u8)tc; + adapter->rx_ring[offset + i]->dcb_tc = (u8)tc; + } + } + + return true; +} + +/** + * txgbe_cache_ring_vmdq - Descriptor ring to register mapping for VMDq + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for VMDq to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE/SRIOV are enabled along + * with VMDq. + * + **/ +static bool txgbe_cache_ring_vmdq(struct txgbe_adapter *adapter) +{ +#if IS_ENABLED(CONFIG_FCOE) + struct txgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; +#endif + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct txgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; + int i; + u16 reg_idx; + + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) + return false; + + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { +#if IS_ENABLED(CONFIG_FCOE) + /* Allow first FCoE queue to be mapped as RSS */ + if (fcoe->offset && i > fcoe->offset) + break; +#endif + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->rx_ring[i]->reg_idx = reg_idx; + } + +#if IS_ENABLED(CONFIG_FCOE) + /* FCoE uses a linear block of queues so just assigning 1:1 */ + for (; i < adapter->num_rx_queues; i++, reg_idx++) + adapter->rx_ring[i]->reg_idx = reg_idx; +#endif + + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { +#if IS_ENABLED(CONFIG_FCOE) + /* Allow first FCoE queue to be mapped as RSS */ + if (fcoe->offset && i > fcoe->offset) + break; +#endif + /* If we are greater than indices move to next pool */ + if ((reg_idx & rss->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->tx_ring[i]->reg_idx = reg_idx; + } + +#if IS_ENABLED(CONFIG_FCOE) + /* FCoE uses a linear block of queues so just assigning 1:1 */ + for (; i < adapter->num_tx_queues; i++, reg_idx++) + adapter->tx_ring[i]->reg_idx = reg_idx; +#endif + + return true; +} + +/** + * txgbe_cache_ring_rss - Descriptor ring to register mapping for RSS + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV. + * + **/ +static bool txgbe_cache_ring_rss(struct txgbe_adapter *adapter) +{ + int i, reg_idx; + + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + + for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++) + adapter->tx_ring[i]->reg_idx = reg_idx; + for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++) + adapter->xdp_ring[i]->reg_idx = reg_idx; + + return true; +} + +/** + * txgbe_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + * + * Note, the order the various feature calls is important. It must start with + * the "most" features enabled at the same time, then trickle down to the + * least amount of features turned on at once. + **/ +static void txgbe_cache_ring_register(struct txgbe_adapter *adapter) +{ + if (txgbe_cache_ring_dcb_vmdq(adapter)) + return; + + if (txgbe_cache_ring_dcb(adapter)) + return; + + if (txgbe_cache_ring_vmdq(adapter)) + return; + + txgbe_cache_ring_rss(adapter); +} + +#define TXGBE_RSS_64Q_MASK 0x3F +#define TXGBE_RSS_16Q_MASK 0xF +#define TXGBE_RSS_8Q_MASK 0x7 +#define TXGBE_RSS_4Q_MASK 0x3 +#define TXGBE_RSS_2Q_MASK 0x1 +#define TXGBE_RSS_DISABLED_MASK 0x0 + +/** + * txgbe_set_dcb_vmdq_queues: Allocate queues for VMDq devices w/ DCB + * @adapter: board private structure to initialize + * + * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues + * and VM pools where appropriate. Also assign queues based on DCB + * priorities and map accordingly.. + * + **/ +static bool txgbe_set_dcb_vmdq_queues(struct txgbe_adapter *adapter) +{ + u16 i; + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; +#if IS_ENABLED(CONFIG_FCOE) + u16 fcoe_i = 0; +#endif + u8 tcs = netdev_get_num_tc(adapter->netdev); + + /* verify we have DCB enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ + if (!(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) + return false; + + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* 16 pools w/ 8 TC per pool */ + if (tcs > 4) { + vmdq_i = min_t(u16, vmdq_i, 16); + vmdq_m = TXGBE_VMDQ_8Q_MASK; + /* 32 pools w/ 4 TC per pool */ + } else { + vmdq_i = min_t(u16, vmdq_i, 32); + vmdq_m = TXGBE_VMDQ_4Q_MASK; + } + +#if IS_ENABLED(CONFIG_FCOE) + /* queues in the remaining pools are available for FCoE */ + fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; +#endif /* CONFIG_FCOE */ + + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* We do not support DCB, VMDq, and RSS all simultaneously + * so we will disable RSS since it is the lowest priority + */ + adapter->ring_feature[RING_F_RSS].indices = 1; + adapter->ring_feature[RING_F_RSS].mask = TXGBE_RSS_DISABLED_MASK; + + adapter->queues_per_pool = tcs;/*maybe same to num_rx_queues_per_pool*/ + adapter->num_rx_pools = vmdq_i; + adapter->num_rx_queues_per_pool = tcs; + + adapter->num_tx_queues = vmdq_i * tcs; + adapter->num_rx_queues = vmdq_i * tcs; + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~TXGBE_FLAG_FDIR_HASH_CAPABLE; + +#if IS_ENABLED(CONFIG_FCOE) + if (adapter->flags & TXGBE_FLAG_FCOE_ENABLED) { + struct txgbe_ring_feature *fcoe; + + fcoe = &adapter->ring_feature[RING_F_FCOE]; + + /* limit ourselves based on feature limits */ + fcoe_i = min_t(u16, fcoe_i, fcoe->limit); + + if (fcoe_i) { + /* alloc queues for FCoE separately */ + fcoe->indices = fcoe_i; + fcoe->offset = vmdq_i * tcs; + + /* add queues to adapter */ + adapter->num_tx_queues += fcoe_i; + adapter->num_rx_queues += fcoe_i; + } else if (tcs > 1) { + /* use queue belonging to FcoE TC */ + fcoe->indices = 1; + fcoe->offset = txgbe_fcoe_get_tc(adapter); + } else { + adapter->flags &= ~TXGBE_FLAG_FCOE_ENABLED; + + fcoe->indices = 0; + fcoe->offset = 0; + } + } +#endif /* CONFIG_FCOE */ + + /* configure TC to queue mapping */ + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(adapter->netdev, (u8)i, 1, i); + + return true; +} + +/** + * txgbe_set_dcb_queues: Allocate queues for a DCB-enabled device + * @adapter: board private structure to initialize + * + * When DCB (Data Center Bridging) is enabled, allocate queues for + * each traffic class. If multiqueue isn't available,then abort DCB + * initialization. + * + * This function handles all combinations of DCB, RSS, and FCoE. + * + **/ +static bool txgbe_set_dcb_queues(struct txgbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct txgbe_ring_feature *f; + u16 rss_i, rss_m, i; + u16 tcs; + + /* Map queue offset and counts onto allocated tx queues */ + tcs = netdev_get_num_tc(dev); + + if (tcs <= 1) + return false; + + /* determine the upper limit for our current DCB mode */ + rss_i = dev->num_tx_queues / tcs; + + if (tcs > 4) { + /* 8 TC w/ 8 queues per TC */ + rss_i = min_t(u16, rss_i, 8); + rss_m = TXGBE_RSS_8Q_MASK; + } else { + /* 4 TC w/ 16 queues per TC */ + rss_i = min_t(u16, rss_i, 16); + rss_m = TXGBE_RSS_16Q_MASK; + } + + /* set RSS mask and indices */ + f = &adapter->ring_feature[RING_F_RSS]; + rss_i = min_t(u16, rss_i, f->limit); + f->indices = rss_i; + f->mask = rss_m; + + /* disable ATR as it is not supported when DCB is enabled */ + adapter->flags &= ~TXGBE_FLAG_FDIR_HASH_CAPABLE; + +#if IS_ENABLED(CONFIG_FCOE) + /* FCoE enabled queues require special configuration indexed + * by feature specific indices and mask. Here we map FCoE + * indices onto the DCB queue pairs allowing FCoE to own + * configuration later. + */ + if (adapter->flags & TXGBE_FLAG_FCOE_ENABLED) { + u8 tc = txgbe_fcoe_get_tc(adapter); + + f = &adapter->ring_feature[RING_F_FCOE]; + f->indices = min_t(u16, rss_i, f->limit); + f->offset = rss_i * tc; + } +#endif /* CONFIG_FCOE */ + + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(dev, (u8)i, rss_i, rss_i * i); + + adapter->num_tx_queues = rss_i * tcs; + adapter->num_rx_queues = rss_i * tcs; + + return true; +} + +static u16 txgbe_xdp_queues(struct txgbe_adapter *adapter) +{ + u16 queues = min_t(int, MAX_XDP_QUEUES, nr_cpu_ids); + + return adapter->xdp_prog ? queues : 0; +} + +/** + * txgbe_set_vmdq_queues: Allocate queues for VMDq devices + * @adapter: board private structure to initialize + * + * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues + * and VM pools where appropriate. If RSS is available, then also try and + * enable RSS and map accordingly. + * + **/ +static bool txgbe_set_vmdq_queues(struct txgbe_adapter *adapter) +{ + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; + u16 rss_m = TXGBE_RSS_DISABLED_MASK; +#if IS_ENABLED(CONFIG_FCOE) + u16 fcoe_i = 0; +#endif + + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) + return false; + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* double check we are limited to maximum pools */ + vmdq_i = min_t(u16, TXGBE_MAX_VMDQ_INDICES, vmdq_i); + + /* 64 pool mode with 2 queues per pool, or + * 16/32/64 pool mode with 1 queue per pool + */ + if (vmdq_i > 32) { + vmdq_m = TXGBE_VMDQ_2Q_MASK; + rss_m = TXGBE_RSS_2Q_MASK; + rss_i = min_t(u16, rss_i, 2); + /* 32 pool mode with 4 queues per pool */ + } else { + vmdq_m = TXGBE_VMDQ_4Q_MASK; + rss_m = TXGBE_RSS_4Q_MASK; + /* We can support 4, 2, or 1 queues */ + rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1; + } + +#if IS_ENABLED(CONFIG_FCOE) + /* queues in the remaining pools are available for FCoE */ + fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); +#endif + + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* limit RSS based on user input and save for later use */ + adapter->ring_feature[RING_F_RSS].indices = rss_i; + adapter->ring_feature[RING_F_RSS].mask = rss_m; + + adapter->queues_per_pool = rss_i;/*maybe same to num_rx_queues_per_pool*/ + adapter->num_rx_pools = vmdq_i; + adapter->num_rx_queues_per_pool = rss_i; + + adapter->num_rx_queues = vmdq_i * rss_i; + adapter->num_tx_queues = vmdq_i * rss_i; + + adapter->num_xdp_queues = 0; + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~TXGBE_FLAG_FDIR_HASH_CAPABLE; + +#if IS_ENABLED(CONFIG_FCOE) + /* FCoE can use rings from adjacent buffers to allow RSS + * like behavior. To account for this we need to add the + * FCoE indices to the total ring count. + */ + if (adapter->flags & TXGBE_FLAG_FCOE_ENABLED) { + struct txgbe_ring_feature *fcoe; + + fcoe = &adapter->ring_feature[RING_F_FCOE]; + + /* limit ourselves based on feature limits */ + fcoe_i = min_t(u16, fcoe_i, fcoe->limit); + + if (vmdq_i > 1 && fcoe_i) { + /* alloc queues for FCoE separately */ + fcoe->indices = fcoe_i; + fcoe->offset = vmdq_i * rss_i; + } else { + /* merge FCoE queues with RSS queues */ + fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus()); + + /* limit indices to rss_i if MSI-X is disabled */ + if (!(adapter->flags & TXGBE_FLAG_MSIX_ENABLED)) + fcoe_i = rss_i; + + /* attempt to reserve some queues for just FCoE */ + fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); + fcoe->offset = fcoe_i - fcoe->indices; + fcoe_i -= rss_i; + } + + /* add queues to adapter */ + adapter->num_tx_queues += fcoe_i; + adapter->num_rx_queues += fcoe_i; + } +#endif /* CONFIG_FCOE */ + + return true; +} + +/** + * txgbe_set_rss_queues: Allocate queues for RSS + * @adapter: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static bool txgbe_set_rss_queues(struct txgbe_adapter *adapter) +{ + struct txgbe_ring_feature *f; + u16 rss_i; + + /* set mask for 16 queue limit of RSS */ + f = &adapter->ring_feature[RING_F_RSS]; + rss_i = f->limit; + + f->indices = rss_i; + f->mask = TXGBE_RSS_64Q_MASK; + + /* disable ATR by default, it will be configured below */ + adapter->flags &= ~TXGBE_FLAG_FDIR_HASH_CAPABLE; + + /* Use Flow Director in addition to RSS to ensure the best + * distribution of flows across cores, even when an FDIR flow + * isn't matched. + */ + if (rss_i > 1 && adapter->atr_sample_rate) { + f = &adapter->ring_feature[RING_F_FDIR]; + + f->indices = f->limit; + rss_i = f->indices; + + if (!(adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + adapter->flags |= TXGBE_FLAG_FDIR_HASH_CAPABLE; + } + +#if IS_ENABLED(CONFIG_FCOE) + /* FCoE can exist on the same rings as standard network traffic + * however it is preferred to avoid that if possible. In order + * to get the best performance we allocate as many FCoE queues + * as we can and we place them at the end of the ring array to + * avoid sharing queues with standard RSS on systems with 24 or + * more CPUs. + */ + if (adapter->flags & TXGBE_FLAG_FCOE_ENABLED) { + struct net_device *dev = adapter->netdev; + u16 fcoe_i; + + f = &adapter->ring_feature[RING_F_FCOE]; + + /* merge FCoE queues with RSS queues */ + fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus()); + fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues); + + /* limit indices to rss_i if MSI-X is disabled */ + if (!(adapter->flags & TXGBE_FLAG_MSIX_ENABLED)) + fcoe_i = rss_i; + + /* attempt to reserve some queues for just FCoE */ + f->indices = min_t(u16, fcoe_i, f->limit); + f->offset = fcoe_i - f->indices; + rss_i = max_t(u16, fcoe_i, rss_i); + } +#endif /* CONFIG_FCOE */ + + adapter->num_rx_queues = rss_i; + adapter->num_tx_queues = rss_i; + adapter->num_xdp_queues = txgbe_xdp_queues(adapter); + return true; +} + +static void txgbe_set_num_queues(struct txgbe_adapter *adapter) +{ + /* Start with base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->queues_per_pool = 1;/*maybe same to num_rx_queues_per_pool*/ + adapter->num_xdp_queues = 0; + adapter->num_rx_pools = adapter->num_rx_queues; + adapter->num_rx_queues_per_pool = 1; + + if (txgbe_set_dcb_vmdq_queues(adapter)) + return; + + if (txgbe_set_dcb_queues(adapter)) + return; + + if (txgbe_set_vmdq_queues(adapter)) + return; + + txgbe_set_rss_queues(adapter); +} + +/** + * txgbe_acquire_msix_vectors - acquire MSI-X vectors + * @adapter: board private structure + * + * Attempts to acquire a suitable range of MSI-X vector interrupts. Will + * return a negative error code if unable to acquire MSI-X vectors for any + * reason. + */ +static int txgbe_acquire_msix_vectors(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i, vectors, vector_threshold; + + if (!(adapter->flags & TXGBE_FLAG_MSIX_CAPABLE)) + return -EOPNOTSUPP; + + /* We start by asking for one vector per queue pair */ + vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); + vectors = max(vectors, adapter->num_xdp_queues); + + /* It is easy to be greedy for MSI-X vectors. However, it really + * doesn't do much good if we have a lot more vectors than CPUs. We'll + * be somewhat conservative and only ask for (roughly) the same number + * of vectors as there are CPUs. + */ + vectors = min_t(int, vectors, num_online_cpus()); + + /* Some vectors are necessary for non-queue interrupts */ + vectors += NON_Q_VECTORS; + + /* Hardware can only support a maximum of hw.mac->max_msix_vectors. + * With features such as RSS and VMDq, we can easily surpass the + * number of Rx and Tx descriptor queues supported by our device. + * Thus, we cap the maximum in the rare cases where the CPU count also + * exceeds our vector limit + */ + vectors = min_t(int, vectors, hw->mac.max_msix_vectors); + + /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] + * handler, and (2) an Other (Link Status Change, etc.) handler. + */ + vector_threshold = MIN_MSIX_COUNT; + + adapter->msix_entries = kcalloc(vectors, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + + for (i = 0; i < vectors; i++) + adapter->msix_entries[i].entry = i; + + vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + vector_threshold, vectors); + if (vectors < 0) { + /* A negative count of allocated vectors indicates an error in + * acquiring within the specified range of MSI-X vectors + */ + e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", + vectors); + + adapter->flags &= ~TXGBE_FLAG_MSIX_ENABLED; + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + return vectors; + } + + /* we successfully allocated some number of vectors within our + * requested range. + */ + adapter->flags |= TXGBE_FLAG_MSIX_ENABLED; + + /* Adjust for only the vectors we'll use, which is minimum + * of max_q_vectors, or the number of vectors we were allocated. + */ + vectors -= NON_Q_VECTORS; + adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); + + return 0; +} + +static void txgbe_add_ring(struct txgbe_ring *ring, + struct txgbe_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; + head->next_update = jiffies + 1; +} + +/** + * txgbe_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int txgbe_alloc_q_vector(struct txgbe_adapter *adapter, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int xdp_count, unsigned int xdp_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct txgbe_q_vector *q_vector; + struct txgbe_ring *ring; + int node = -1; + int cpu = -1; + u8 tcs = netdev_get_num_tc(adapter->netdev); + int ring_count, size; + + /* note this will allocate space for the ring structure as well! */ + ring_count = txr_count + rxr_count + xdp_count; + size = sizeof(struct txgbe_q_vector) + + (sizeof(struct txgbe_ring) * ring_count); + + /* customize cpu for Flow Director mapping */ + if (tcs <= 1 && !(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) { + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + + if (rss_i > 1 && adapter->atr_sample_rate) { + if (cpu_online(v_idx)) { + cpu = v_idx; + node = cpu_to_node(cpu); + } + } + } + + /* allocate q_vector and rings */ + q_vector = kzalloc_node(size, GFP_KERNEL, node); + if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* setup affinity mask and node */ + if (cpu != -1) + cpumask_set_cpu(cpu, &q_vector->affinity_mask); + q_vector->numa_node = node; + + /* initialize CPU for DCA */ + q_vector->cpu = -1; + + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, + txgbe_poll); + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + q_vector->v_idx = v_idx; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + q_vector->rx.work_limit = adapter->rx_work_limit; + + /* Initialize setting for adaptive ITR */ + q_vector->tx.itr = TXGBE_ITR_ADAPTIVE_MAX_USECS | + TXGBE_ITR_ADAPTIVE_LATENCY; + q_vector->rx.itr = TXGBE_ITR_ADAPTIVE_MAX_USECS | + TXGBE_ITR_ADAPTIVE_LATENCY; + + /* initialize ITR */ + if (txr_count && !rxr_count) { + /* tx only vector */ + if (adapter->tx_itr_setting == 1) + q_vector->itr = TXGBE_12K_ITR; + else + q_vector->itr = adapter->tx_itr_setting; + } else { + /* rx or rx/tx vector */ + if (adapter->rx_itr_setting == 1) + q_vector->itr = TXGBE_20K_ITR; + else + q_vector->itr = adapter->rx_itr_setting; + } + + /* initialize pointer to rings */ + ring = q_vector->ring; + + while (txr_count) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + txgbe_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + if (adapter->num_vmdqs > 1) + ring->queue_index = + txr_idx % adapter->queues_per_pool; + else + ring->queue_index = txr_idx; + clear_ring_xdp(ring); + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* update count and index */ + txr_count--; + txr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + while (xdp_count) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + txgbe_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = xdp_idx; + set_ring_xdp(ring); + + spin_lock_init(&ring->tx_lock); + + /* assign ring to adapter */ + adapter->xdp_ring[xdp_idx] = ring; + + /* update count and index */ + xdp_count--; + xdp_idx++; + + /* push pointer to next ring */ + ring++; + } + while (rxr_count) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + txgbe_add_ring(ring, &q_vector->rx); + +#if IS_ENABLED(CONFIG_FCOE) + if (adapter->flags & TXGBE_FLAG_FCOE_ENABLED) { + struct txgbe_ring_feature *f; + + f = &adapter->ring_feature[RING_F_FCOE]; + + if (rxr_idx >= f->offset && + rxr_idx < f->offset + f->indices) { + set_bit(__TXGBE_RX_FCOE, &ring->state); + } + } +#endif /* CONFIG_FCOE */ + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + if (adapter->num_vmdqs > 1) + ring->queue_index = + rxr_idx % adapter->queues_per_pool; + else + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + + /* update count and index */ + rxr_count--; + rxr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + return 0; +} + +/** + * txgbe_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void txgbe_free_q_vector(struct txgbe_adapter *adapter, int v_idx) +{ + struct txgbe_q_vector *q_vector = adapter->q_vector[v_idx]; + struct txgbe_ring *ring; + + txgbe_for_each_ring(ring, q_vector->tx) { + if (ring_is_xdp(ring)) + adapter->xdp_ring[ring->queue_index] = NULL; + else + adapter->tx_ring[ring->queue_index] = NULL; + } + + if (static_key_enabled((struct static_key *)&txgbe_xdp_locking_key)) + static_branch_dec(&txgbe_xdp_locking_key); + + txgbe_for_each_ring(ring, q_vector->rx) + adapter->rx_ring[ring->queue_index] = NULL; + + adapter->q_vector[v_idx] = NULL; + + netif_napi_del(&q_vector->napi); + + kfree_rcu(q_vector, rcu); +} + +/** + * txgbe_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int txgbe_alloc_q_vectors(struct txgbe_adapter *adapter) +{ + unsigned int q_vectors = adapter->num_q_vectors; + unsigned int rxr_remaining = adapter->num_rx_queues; + unsigned int txr_remaining = adapter->num_tx_queues; + unsigned int xdp_remaining = adapter->num_xdp_queues; + unsigned int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) { + for (; rxr_remaining; v_idx++) { + err = txgbe_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 0, 0, 1, rxr_idx); + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx); + + err = txgbe_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, + xqpv, xdp_idx, + rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + xdp_remaining -= xqpv; + rxr_idx++; + txr_idx++; + xdp_idx += xqpv; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_xdp_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + txgbe_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * txgbe_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void txgbe_free_q_vectors(struct txgbe_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_xdp_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + txgbe_free_q_vector(adapter, v_idx); +} + +void txgbe_reset_interrupt_capability(struct txgbe_adapter *adapter) +{ + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) { + adapter->flags &= ~TXGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & TXGBE_FLAG_MSI_ENABLED) { + adapter->flags &= ~TXGBE_FLAG_MSI_ENABLED; + pci_disable_msi(adapter->pdev); + } +} + +/** + * txgbe_set_interrupt_capability - set MSI-X or MSI if supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +void txgbe_set_interrupt_capability(struct txgbe_adapter *adapter) +{ + int err; + + /* We will try to get MSI-X interrupts first */ + if (!txgbe_acquire_msix_vectors(adapter)) + return; + + /* At this point, we do not have MSI-X capabilities. We need to + * reconfigure or disable various features which require MSI-X + * capability. + */ + + /* Disable DCB unless we only have a single traffic class */ + if (netdev_get_num_tc(adapter->netdev) > 1) { + e_dev_warn("Number of DCB TCs exceeds number of available queues.\n"); + netdev_reset_tc(adapter->netdev); + + adapter->flags &= ~TXGBE_FLAG_DCB_ENABLED; + adapter->temp_dcb_cfg.pfc_mode_enable = false; + adapter->dcb_cfg.pfc_mode_enable = false; + } + + adapter->dcb_cfg.num_tcs.pg_tcs = 1; + adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + + /* Disable VMDq support */ + e_dev_warn("Disabling VMQd support\n"); + adapter->flags &= ~TXGBE_FLAG_VMDQ_ENABLED; + +#ifdef CONFIG_PCI_IOV + /* Disable SR-IOV support */ + e_dev_warn("Disabling SR-IOV support\n"); + txgbe_disable_sriov(adapter); +#endif /* CONFIG_PCI_IOV */ + + /* Disable RSS */ + e_dev_warn("Disabling RSS support\n"); + adapter->ring_feature[RING_F_RSS].limit = 1; + + /* recalculate number of queues now that many features have been + * changed or disabled. + */ + txgbe_set_num_queues(adapter); + adapter->num_q_vectors = 1; + + if (!(adapter->flags & TXGBE_FLAG_MSI_CAPABLE)) + return; + + err = pci_enable_msi(adapter->pdev); + if (err) + e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n", + err); + else + adapter->flags |= TXGBE_FLAG_MSI_ENABLED; +} + +/** + * txgbe_init_interrupt_scheme - Determine proper interrupt scheme + * @adapter: board private structure to initialize + * + * We determine which interrupt scheme to use based on... + * - Kernel support (MSI, MSI-X) + * - which can be user-defined (via MODULE_PARAM) + * - Hardware queue count (num_*_queues) + * - defined by miscellaneous hardware support/features (RSS, etc.) + **/ +int txgbe_init_interrupt_scheme(struct txgbe_adapter *adapter) +{ + int err; + + /* Number of supported queues */ + txgbe_set_num_queues(adapter); + + /* Set interrupt mode */ + txgbe_set_interrupt_capability(adapter); + + /* Allocate memory for queues */ + err = txgbe_alloc_q_vectors(adapter); + if (err) { + e_err(probe, "Unable to allocate memory for queue vectors\n"); + txgbe_reset_interrupt_capability(adapter); + return err; + } + + txgbe_cache_ring_register(adapter); + + set_bit(__TXGBE_DOWN, &adapter->state); + + return 0; +} + +/** + * txgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void txgbe_clear_interrupt_scheme(struct txgbe_adapter *adapter) +{ + txgbe_free_q_vectors(adapter); + txgbe_reset_interrupt_capability(adapter); +} + +void txgbe_tx_ctxtdesc(struct txgbe_ring *tx_ring, u32 vlan_macip_lens, + u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) +{ + struct txgbe_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = TXGBE_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= TXGBE_TXD_DTYP_CTXT; + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c new file mode 100644 index 000000000000..de872502c8b5 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -0,0 +1,11176 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "txgbe_xsk.h" +#include +#include +#include +#include +#include +#include +#include + +#include "txgbe.h" +#include "txgbe_dcb.h" +#include "txgbe_sriov.h" +#include "txgbe_hw.h" +#include "txgbe_phy.h" +#include "txgbe_pcierr.h" +#include "txgbe_bp.h" +#include "txgbe_e56.h" +#include "txgbe_e56_bp.h" + +char txgbe_driver_name[32] = TXGBE_NAME; +static const char txgbe_driver_string[] = + "WangXun RP1000/RP2000/FF50XX PCI Express Network Driver"; + +#define DRV_VERSION __stringify(2.1.1oe) + +const char txgbe_driver_version[32] = DRV_VERSION; +static const char txgbe_copyright[] = + "Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd"; +static const char txgbe_overheat_msg[] = + "Network adapter has been stopped because it has over heated."; +static const char txgbe_underheat_msg[] = + "Network adapter has been started again since the temperature"; + +/* txgbe_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id txgbe_pci_tbl[] = { + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_SP1000), 0}, + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_WX1820), 0}, + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_AML), 0}, + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_AML5025), 0}, + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_AML5125), 0}, + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_AML5040), 0}, + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_AML5140), 0}, + /* required last entry */ + { .device = 0 } +}; +MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl); +MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, "); +MODULE_DESCRIPTION("WangXun(R) RP1000/RP2000/FF50XX PCI Express Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +DEFINE_STATIC_KEY_FALSE(txgbe_xdp_locking_key); +EXPORT_SYMBOL(txgbe_xdp_locking_key); + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 + +static struct workqueue_struct *txgbe_wq; + +static bool txgbe_is_sfp(struct txgbe_hw *hw); +static bool txgbe_check_cfg_remove(struct txgbe_hw *hw, struct pci_dev *pdev); +static void txgbe_napi_enable_all(struct txgbe_adapter *adapter); +static void txgbe_napi_disable_all(struct txgbe_adapter *adapter); + +static inline struct txgbe_dec_ptype txgbe_decode_ptype(const u8 ptype) +{ + return txgbe_ptype_lookup[ptype]; +} + +static inline struct txgbe_dec_ptype +decode_rx_desc_ptype(const union txgbe_rx_desc *rx_desc) +{ + return txgbe_decode_ptype(TXGBE_RXD_PKTTYPE(rx_desc)); +} + +void txgbe_print_tx_hang_status(struct txgbe_adapter *adapter) +{ + int pos; + u32 value; + struct pci_dev *pdev = adapter->pdev; + u16 devctl2; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); + if (!pos) + return; + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_STATUS, &value); + e_info(probe, "AER Uncorrectable Error Status: 0x%08x\n", value); + txgbe_aer_print_error(adapter, TXGBE_AER_UNCORRECTABLE, value); + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &value); + e_info(probe, "AER Uncorrectable Error Mask: 0x%08x\n", value); + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &value); + e_info(probe, "AER Uncorrectable Error Severity: 0x%08x\n", value); + pci_read_config_dword(pdev, pos + PCI_ERR_COR_STATUS, &value); + e_info(probe, "AER Correctable Error Status: 0x%08x\n", value); + txgbe_aer_print_error(adapter, TXGBE_AER_CORRECTABLE, value); + pci_read_config_dword(pdev, pos + PCI_ERR_COR_MASK, &value); + e_info(probe, "AER Correctable Error Mask: 0x%08x\n", value); + pci_read_config_dword(pdev, pos + PCI_ERR_CAP, &value); + e_info(probe, "AER Capabilities and Control Register: 0x%08x\n", value); + + pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &devctl2); + e_info(probe, "Device Control2 Register: 0x%04x\n", devctl2); + + e_info(probe, "Tx flow control Status[TDB_TFCS 0xCE00]: 0x%x\n", + rd32(&adapter->hw, TXGBE_TDB_TFCS)); + + e_info(tx_err, "tdm_desc_fatal_0: 0x%x\n", + rd32(&adapter->hw, 0x180d0)); + e_info(tx_err, "tdm_desc_fatal_1: 0x%x\n", + rd32(&adapter->hw, 0x180d4)); + e_info(tx_err, "tdm_desc_fatal_2: 0x%x\n", + rd32(&adapter->hw, 0x180d8)); + e_info(tx_err, "tdm_desc_fatal_3: 0x%x\n", + rd32(&adapter->hw, 0x180dc)); + e_info(tx_err, "tdm_desc_nonfatal_0: 0x%x\n", + rd32(&adapter->hw, 0x180c0)); + e_info(tx_err, "tdm_desc_nonfatal_1: 0x%x\n", + rd32(&adapter->hw, 0x180c4)); + e_info(tx_err, "tdm_desc_nonfatal_2: 0x%x\n", + rd32(&adapter->hw, 0x180c8)); + e_info(tx_err, "tdm_desc_nonfatal_3: 0x%x\n", + rd32(&adapter->hw, 0x180cc)); +} + +static void txgbe_dump_all_ring_desc(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + union txgbe_tx_desc *tx_desc; + struct txgbe_ring *tx_ring; + int i, j; + + if (!netif_msg_tx_err(adapter)) + return; + + e_warn(tx_err, "Dump desc base addr\n"); + + for (i = 0; i < adapter->num_tx_queues; i++) + e_warn(tx_err, "q_%d:0x%x%x\n", i, rd32(hw, TXGBE_PX_TR_BAH(i)), + rd32(hw, TXGBE_PX_TR_BAL(i))); + + for (i = 0; i < adapter->num_tx_queues; i++) { + tx_ring = adapter->tx_ring[i]; + for (j = 0; j < tx_ring->count; j++) { + tx_desc = TXGBE_TX_DESC(tx_ring, j); + if (tx_desc->read.olinfo_status != 0x1) + e_warn(tx_err, "queue[%d][%d]:0x%llx, 0x%x, 0x%x\n", + i, j, tx_desc->read.buffer_addr, + tx_desc->read.cmd_type_len, + tx_desc->read.olinfo_status); + } + } +} + +static void txgbe_check_minimum_link(struct txgbe_adapter *adapter, + int expected_gts) +{ + struct txgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev; + + /* Some devices are not connected over PCIe and thus do not negotiate + * speed. These devices do not have valid bus info, and thus any report + * we generate may not be correct. + */ + if (hw->bus.type == txgbe_bus_type_internal) + return; + + pdev = adapter->pdev; + pcie_print_link_status(pdev); +} + +/** + * txgbe_enumerate_functions - Get the number of ports this device has + * @adapter: adapter structure + * + * This function enumerates the phsyical functions co-located on a single slot, + * in order to determine how many ports a device has. This is most useful in + * determining the required GT/s of PCIe bandwidth necessary for optimal + * performance. + **/ +static inline int txgbe_enumerate_functions(struct txgbe_adapter *adapter) +{ + struct pci_dev *entry, *pdev = adapter->pdev; + int physfns = 0; + + list_for_each_entry(entry, &pdev->bus->devices, bus_list) { +#ifdef CONFIG_PCI_IOV + /* don't count virtual functions */ + if (entry->is_virtfn) + continue; +#endif + + /* When the devices on the bus don't all match our device ID, + * we can't reliably determine the correct number of + * functions. This can occur if a function has been direct + * attached to a virtual machine using VT-d, for example. In + * this case, simply return -1 to indicate this. + */ + if (entry->vendor != pdev->vendor || + entry->device != pdev->device) + return -1; + + physfns++; + } + + return physfns; +} + +void txgbe_service_event_schedule(struct txgbe_adapter *adapter) +{ + if (!test_bit(__TXGBE_DOWN, &adapter->state) && + !test_bit(__TXGBE_REMOVING, &adapter->state) && + !test_and_set_bit(__TXGBE_SERVICE_SCHED, &adapter->state)) + queue_work(txgbe_wq, &adapter->service_task); +} + +static void txgbe_service_event_complete(struct txgbe_adapter *adapter) +{ + WARN_ON_ONCE(!test_bit(__TXGBE_SERVICE_SCHED, &adapter->state)); + + /* flush memory to make sure state is correct before next watchdog */ + smp_mb__before_atomic(); + clear_bit(__TXGBE_SERVICE_SCHED, &adapter->state); +} + +static void txgbe_remove_adapter(struct txgbe_hw *hw) +{ + struct txgbe_adapter *adapter = hw->back; + + if (!hw->hw_addr) + return; + hw->hw_addr = NULL; + e_dev_err("Adapter removed\n"); + if (test_bit(__TXGBE_SERVICE_INITED, &adapter->state)) + txgbe_service_event_schedule(adapter); +} + +static void txgbe_check_remove(struct txgbe_hw *hw, u32 reg) +{ + u32 value; + + /* The following check not only optimizes a bit by not + * performing a read on the status register when the + * register just read was a status register read that + * returned TXGBE_FAILED_READ_REG. It also blocks any + * potential recursion. + */ + if (reg == TXGBE_CFG_PORT_ST) { + txgbe_remove_adapter(hw); + return; + } + value = rd32(hw, TXGBE_CFG_PORT_ST); + if (value == TXGBE_FAILED_READ_REG) + txgbe_remove_adapter(hw); +} + +static u32 txgbe_validate_register_read(struct txgbe_hw *hw, u32 reg, bool quiet) +{ + int i; + u32 value; + u8 __iomem *reg_addr; + struct txgbe_adapter *adapter = hw->back; + + reg_addr = READ_ONCE(hw->hw_addr); + if (TXGBE_REMOVED(reg_addr)) + return TXGBE_FAILED_READ_REG; + for (i = 0; i < TXGBE_DEAD_READ_RETRIES; ++i) { + value = txgbe_rd32(reg_addr + reg); + if (value != TXGBE_DEAD_READ_REG) + break; + } + if (quiet) + return value; + if (value == TXGBE_DEAD_READ_REG) + e_err(drv, "%s: register %x read unchanged\n", __func__, reg); + else + e_warn(hw, "%s: register %x read recovered after %d retries\n", + __func__, reg, i + 1); + return value; +} + +/** + * txgbe_read_reg - Read from device register + * @hw: hw specific details + * @reg: offset of register to read + * + * Returns : value read or TXGBE_FAILED_READ_REG if removed + * + * This function is used to read device registers. It checks for device + * removal by confirming any read that returns all ones by checking the + * status register value for all ones. This function avoids reading from + * the hardware if a removal was previously detected in which case it + * returns TXGBE_FAILED_READ_REG (all ones). + */ +u32 txgbe_read_reg(struct txgbe_hw *hw, u32 reg, bool quiet) +{ + u32 value; + u8 __iomem *reg_addr; + + reg_addr = READ_ONCE(hw->hw_addr); + if (TXGBE_REMOVED(reg_addr)) + return TXGBE_FAILED_READ_REG; + value = txgbe_rd32(reg_addr + reg); + if (unlikely(value == TXGBE_FAILED_READ_REG)) + txgbe_check_remove(hw, reg); + if (unlikely(value == TXGBE_DEAD_READ_REG)) + value = txgbe_validate_register_read(hw, reg, quiet); + return value; +} + +static void txgbe_release_hw_control(struct txgbe_adapter *adapter) +{ + /* Let firmware take over control of h/w */ + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_DRV_LOAD, 0); +} + +static void txgbe_get_hw_control(struct txgbe_adapter *adapter) +{ + /* Let firmware know the driver has taken over */ + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_DRV_LOAD, TXGBE_CFG_PORT_CTL_DRV_LOAD); +} + +/** + * txgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors + * @adapter: pointer to adapter struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * + **/ +static void txgbe_set_ivar(struct txgbe_adapter *adapter, s8 direction, + u16 queue, u16 msix_vector) +{ + u32 ivar, index; + struct txgbe_hw *hw = &adapter->hw; + + if (direction == -1) { + /* other causes */ + msix_vector |= TXGBE_PX_IVAR_ALLOC_VAL; + index = 0; + ivar = rd32(&adapter->hw, TXGBE_PX_MISC_IVAR); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + wr32(&adapter->hw, TXGBE_PX_MISC_IVAR, ivar); + } else { + /* tx or rx causes */ + msix_vector |= TXGBE_PX_IVAR_ALLOC_VAL; + index = ((16 * (queue & 1)) + (8 * direction)); + ivar = rd32(hw, TXGBE_PX_IVAR(queue >> 1)); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + wr32(hw, TXGBE_PX_IVAR(queue >> 1), ivar); + } +} + +void txgbe_unmap_and_free_tx_resource(struct txgbe_ring *ring, + struct txgbe_tx_buffer *tx_buffer) +{ + if (!ring_is_xdp(ring) && tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + tx_buffer->va = NULL; + /* tx_buffer must be completely set up in the transmit path */ +} + +static void txgbe_update_xoff_rx_lfc(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_hw_stats *hwstats = &adapter->stats; + int i; + u32 data; + + if (hw->fc.current_mode != txgbe_fc_full && + hw->fc.current_mode != txgbe_fc_rx_pause) + return; + + data = rd32(hw, TXGBE_MAC_LXOFFRXC); + + hwstats->lxoffrxc += data; + + /* refill credits (no tx hang) if we received xoff */ + if (!data) + return; + + for (i = 0; i < adapter->num_tx_queues; i++) + clear_bit(__TXGBE_HANG_CHECK_ARMED, + &adapter->tx_ring[i]->state); + for (i = 0; i < adapter->num_xdp_queues; i++) + clear_bit(__TXGBE_HANG_CHECK_ARMED, + &adapter->xdp_ring[i]->state); +} + +static void txgbe_update_xoff_received(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_hw_stats *hwstats = &adapter->stats; + u32 xoff[8] = {0}; + int tc; + int i; + bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; + + if (adapter->txgbe_ieee_pfc) + pfc_en |= !!(adapter->txgbe_ieee_pfc->pfc_en); + + if (!(adapter->flags & TXGBE_FLAG_DCB_ENABLED) || !pfc_en) { + txgbe_update_xoff_rx_lfc(adapter); + return; + } + + /* update stats for each tc, only valid with PFC enabled */ + for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { + u32 pxoffrxc; + + wr32m(hw, TXGBE_MMC_CONTROL, TXGBE_MMC_CONTROL_UP, i << 16); + pxoffrxc = rd32(hw, TXGBE_MAC_PXOFFRXC); + hwstats->pxoffrxc[i] += pxoffrxc; + /* Get the TC for given UP */ + tc = netdev_get_prio_tc_map(adapter->netdev, i); + xoff[tc] += pxoffrxc; + } + + /* disarm tx queues that have received xoff frames */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct txgbe_ring *tx_ring = adapter->tx_ring[i]; + + tc = tx_ring->dcb_tc; + if (tc <= 7 && xoff[tc]) + clear_bit(__TXGBE_HANG_CHECK_ARMED, &tx_ring->state); + } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct txgbe_ring *xdp_ring = adapter->xdp_ring[i]; + + tc = xdp_ring->dcb_tc; + if (tc <= 7 && xoff[tc]) + clear_bit(__TXGBE_HANG_CHECK_ARMED, &xdp_ring->state); + } +} + +static u64 txgbe_get_tx_completed(struct txgbe_ring *ring) +{ + return ring->stats.packets; +} + +static u64 txgbe_get_tx_pending(struct txgbe_ring *ring) +{ + struct txgbe_adapter *adapter; + struct txgbe_hw *hw; + u32 head, tail; + + if (ring->accel) + adapter = ring->accel->adapter; + else + adapter = ring->q_vector->adapter; + + hw = &adapter->hw; + head = rd32(hw, TXGBE_PX_TR_RP(ring->reg_idx)); + tail = rd32(hw, TXGBE_PX_TR_WP(ring->reg_idx)); + + return ((head <= tail) ? tail : tail + ring->count) - head; +} + +static inline bool txgbe_check_tx_hang(struct txgbe_ring *tx_ring) +{ + u64 tx_done = txgbe_get_tx_completed(tx_ring); + u64 tx_done_old = tx_ring->tx_stats.tx_done_old; + u64 tx_pending = txgbe_get_tx_pending(tx_ring); + + clear_check_for_tx_hang(tx_ring); + + /* Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. The + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with + * pfc clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if (tx_done_old == tx_done && tx_pending) + /* make sure it is true for two checks in a row */ + return test_and_set_bit(__TXGBE_HANG_CHECK_ARMED, + &tx_ring->state); + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + /* reset the countdown */ + clear_bit(__TXGBE_HANG_CHECK_ARMED, &tx_ring->state); + + return false; +} + +static void txgbe_tx_timeout_dorecovery(struct txgbe_adapter *adapter) +{ + /* schedule immediate reset if we believe we hung */ + + if (adapter->hw.bus.lan_id == 0) + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + else + wr32(&adapter->hw, TXGBE_MIS_PF_SM, 1); + txgbe_service_event_schedule(adapter); +} + +/** + * txgbe_tx_timeout_reset - initiate reset due to Tx timeout + * @adapter: driver private struct + **/ +static void txgbe_tx_timeout_reset(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + adapter->flags2 |= TXGBE_FLAG2_DMA_RESET_REQUESTED; + e_warn(drv, "initiating dma reset due to tx timeout\n"); + } else { + adapter->flags2 |= TXGBE_FLAG2_PF_RESET_REQUESTED; + e_warn(drv, "initiating reset due to tx timeout\n"); + } + txgbe_service_event_schedule(adapter); + } +} + +/** + * txgbe_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ + +static void txgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + bool tdm_desc_fatal = false; + u32 value2 = 0, value3 = 0; + bool real_tx_hang = false; + u16 pci_cmd = 0; + u32 head, tail; + u16 vid = 0; + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct txgbe_ring *tx_ring = adapter->tx_ring[i]; + + if (check_for_tx_hang(tx_ring) && txgbe_check_tx_hang(tx_ring)) + real_tx_hang = true; + } + + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &vid); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "pci vendor id is 0x%x\n", vid); + + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "pci command reg is 0x%x.\n", pci_cmd); + + if (hw->mac.type == txgbe_mac_sp) { + value2 = rd32(&adapter->hw, 0x10000); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "reg 0x10000 value is 0x%08x\n", value2); + value2 = rd32(&adapter->hw, 0x180d0); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "reg 0x180d0 value is 0x%08x\n", value2); + value2 = rd32(&adapter->hw, 0x180d4); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "reg 0x180d4 value is 0x%08x\n", value2); + value2 = rd32(&adapter->hw, 0x180d8); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "reg 0x180d8 value is 0x%08x\n", value2); + value2 = rd32(&adapter->hw, 0x180dc); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "reg 0x180dc value is 0x%08x\n", value2); + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + head = rd32(&adapter->hw, TXGBE_PX_TR_RP(adapter->tx_ring[i]->reg_idx)); + tail = rd32(&adapter->hw, TXGBE_PX_TR_WP(adapter->tx_ring[i]->reg_idx)); + + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "tx ring %d next_to_use is %d, next_to_clean is %d\n", + i, adapter->tx_ring[i]->next_to_use, adapter->tx_ring[i]->next_to_clean); + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "tx ring %d hw rp is 0x%x, wp is 0x%x\n", i, head, tail); + } + + value2 = rd32(&adapter->hw, TXGBE_PX_IMS(0)); + value3 = rd32(&adapter->hw, TXGBE_PX_IMS(1)); + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "PX_IMS0 value is 0x%08x, PX_IMS1 value is 0x%08x\n", value2, value3); + + /* only check pf queue tdm desc error */ + if ((rd32(&adapter->hw, TXGBE_TDM_DESC_FATAL(0)) & 0xffffffff) || + (rd32(&adapter->hw, TXGBE_TDM_DESC_FATAL(1)) & 0xffffffff)) + tdm_desc_fatal = true; + + /* PCIe link loss, tdm desc fatal error or memory space can't access */ + if (vid == TXGBE_FAILED_READ_CFG_WORD || + tdm_desc_fatal || + !(pci_cmd & 0x2)) { + txgbe_tx_timeout_dorecovery(adapter); + } else { + txgbe_print_tx_hang_status(adapter); + txgbe_tx_timeout_reset(adapter); + } +} + +static inline u16 txgbe_desc_buf_unmapped(struct txgbe_ring *ring, u16 ntc, u16 ntf) +{ + return ((ntc >= ntf) ? 0 : ring->count) + ntc - ntf; +} + +/** + * txgbe_ - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean + **/ +static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector, + struct txgbe_ring *tx_ring, int napi_budget) +{ + struct txgbe_adapter *adapter = q_vector->adapter; + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_tx_buffer *tx_buffer; + union txgbe_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + unsigned int i = tx_ring->next_to_clean; + u16 vid = 0; + u32 head = 0; + u32 temp = tx_ring->next_to_clean; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + head = *tx_ring->headwb_mem; + + int j = 0; + u32 size; + unsigned int ntf; + struct txgbe_tx_buffer *free_tx_buffer; + u32 unmapped_descs = 0; + bool first_dma; + + if (test_bit(__TXGBE_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = TXGBE_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union txgbe_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + /* we have caught up to head, no work left to do */ + if (temp == head) + break; + else if (head > temp && !(tx_buffer->next_eop >= temp && + (tx_buffer->next_eop < head))) + break; + else if (!(tx_buffer->next_eop >= temp || + (tx_buffer->next_eop < head))) + break; + } else { + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(TXGBE_TXD_STAT_DD))) + break; + } + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + if (tx_buffer->skb) { + if (!ring_is_xdp(tx_ring) && + !(skb_shinfo(tx_buffer->skb)->tx_flags & SKBTX_IN_PROGRESS)) + skb_orphan(tx_buffer->skb); + } else { + dev_err(tx_ring->dev, "skb is NULL.\n"); + } + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = TXGBE_TX_DESC(tx_ring, 0); + } + } + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = TXGBE_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + + first_dma = false; + ntf = tx_ring->next_to_free; + free_tx_buffer = &tx_ring->tx_buffer_info[ntf]; + ntf -= tx_ring->count; + unmapped_descs = txgbe_desc_buf_unmapped(tx_ring, i, tx_ring->next_to_free); + while (unmapped_descs > adapter->desc_reserved) { + if (ring_is_xdp(tx_ring)) { + if (free_tx_buffer->xdpf) { + xdp_return_frame(free_tx_buffer->xdpf); + first_dma = true; + } + } else { + if (free_tx_buffer->skb) { + dev_consume_skb_any(free_tx_buffer->skb); + first_dma = true; + } + } + if (first_dma) { + if (dma_unmap_len(free_tx_buffer, len)) { + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(free_tx_buffer, dma), + dma_unmap_len(free_tx_buffer, len), + DMA_TO_DEVICE); + } + + /* clear tx_buffer data */ + if (ring_is_xdp(tx_ring)) + free_tx_buffer->xdpf = NULL; + else + free_tx_buffer->skb = NULL; + + dma_unmap_len_set(free_tx_buffer, len, 0); + free_tx_buffer->va = NULL; + first_dma = false; + } else { + /* unmap any remaining paged data */ + if (dma_unmap_len(free_tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(free_tx_buffer, dma), + dma_unmap_len(free_tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(free_tx_buffer, len, 0); + free_tx_buffer->va = NULL; + } + } + + free_tx_buffer++; + ntf++; + if (unlikely(!ntf)) { + ntf -= tx_ring->count; + free_tx_buffer = tx_ring->tx_buffer_info; + } + + unmapped_descs--; + }; + + ntf += tx_ring->count; + tx_ring->next_to_free = ntf; + /* need update next_to_free before next_to_clean */ + wmb(); + tx_ring->next_to_clean = i; + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (check_for_tx_hang(tx_ring) && txgbe_check_tx_hang(tx_ring)) { + /* schedule immediate reset if we believe we hung */ + + e_err(drv, "Detected Tx Unit Hang%s\n" + " Tx Queue <%d>\n" + " TDH, TDT <%x>, <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "tx_buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", + ring_is_xdp(tx_ring) ? " (XDP)" : "", + tx_ring->queue_index, + rd32(hw, TXGBE_PX_TR_RP(tx_ring->reg_idx)), + rd32(hw, TXGBE_PX_TR_WP(tx_ring->reg_idx)), + tx_ring->next_to_use, i, + tx_ring->tx_buffer_info[i].time_stamp, jiffies); + + if (netif_msg_tx_err(adapter)) { + for (j = 0; j < tx_ring->count; j++) { + tx_desc = TXGBE_TX_DESC(tx_ring, j); + if (tx_desc->read.olinfo_status != 0x1) + e_warn(tx_err, "q_[%d][%d]:0x%llx, 0x%x, 0x%x\n", + tx_ring->reg_idx, j, tx_desc->read.buffer_addr, + tx_desc->read.cmd_type_len, + tx_desc->read.olinfo_status); + } + } + + if (netif_msg_pktdata(adapter)) { + for (j = 0; j < tx_ring->count; j++) { + tx_buffer = &tx_ring->tx_buffer_info[j]; + size = dma_unmap_len(tx_buffer, len); + if (size != 0 && tx_buffer->va) { + e_err(pktdata, "tx buffer[%d][%d]:\n", + tx_ring->queue_index, j); + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_ERR, "", + DUMP_PREFIX_OFFSET, 16, + 1, tx_buffer->va, size, + true); + } + } + + for (j = 0; j < tx_ring->count; j++) { + tx_buffer = &tx_ring->tx_buffer_info[j]; + if (tx_buffer->skb) { + e_err(pktdata, "****skb in tx buffer[%d][%d]: *******\n", + tx_ring->queue_index, j); + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_ERR, "", + DUMP_PREFIX_OFFSET, 16, + 1, tx_buffer->skb, + sizeof(struct sk_buff), + true); + } + } + } + + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &vid); + if (vid == TXGBE_FAILED_READ_CFG_WORD) + e_info(hw, "pcie link has been lost.\n"); + + if (!ring_is_xdp(tx_ring)) + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + e_info(probe, + "tx hang %d detected on queue %d, resetting adapter\n", + adapter->tx_timeout_count + 1, tx_ring->queue_index); + + if (vid == TXGBE_FAILED_READ_CFG_WORD) { + txgbe_tx_timeout_dorecovery(adapter); + } else { + txgbe_print_tx_hang_status(adapter); + txgbe_tx_timeout_reset(adapter); + } + + /* the adapter is about to reset, no point in enabling stuff */ + return true; + } + if (ring_is_xdp(tx_ring)) + return !!budget; + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (txgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !test_bit(__TXGBE_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + + return !!budget; +} + +#define TXGBE_RSS_L4_TYPES_MASK \ + ((1ul << TXGBE_RXD_RSSTYPE_IPV4_TCP) | \ + (1ul << TXGBE_RXD_RSSTYPE_IPV4_UDP) | \ + (1ul << TXGBE_RXD_RSSTYPE_IPV4_SCTP) | \ + (1ul << TXGBE_RXD_RSSTYPE_IPV6_TCP) | \ + (1ul << TXGBE_RXD_RSSTYPE_IPV6_UDP) | \ + (1ul << TXGBE_RXD_RSSTYPE_IPV6_SCTP)) + +static inline void txgbe_rx_hash(struct txgbe_ring *ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + TXGBE_RXD_RSSTYPE_MASK; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (TXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} + +#if IS_ENABLED(CONFIG_FCOE) +/** + * txgbe_rx_is_fcoe - check the rx desc for incoming pkt type + * @ring: structure containing ring specific data + * @rx_desc: advanced rx descriptor + * + * Returns : true if it is FCoE pkt + */ +static inline bool txgbe_rx_is_fcoe(struct txgbe_ring *ring, + union txgbe_rx_desc *rx_desc) +{ + u8 ptype = TXGBE_RXD_PKTTYPE(rx_desc); + + return test_bit(__TXGBE_RX_FCOE, &ring->state) && + ptype >= TXGBE_PTYPE_L2_FCOE && + ptype <= TXGBE_PTYPE_L2_FCOE_VFT_FCOTHER; +} +#endif /* CONFIG_FCOE */ + +/** + * txgbe_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containing ring specific data + * @rx_desc: current Rx descriptor being processed + * @skb: skb currently being received and modified + **/ +static inline void txgbe_rx_checksum(struct txgbe_ring *ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct txgbe_dec_ptype dptype = decode_rx_desc_ptype(rx_desc); + + skb->ip_summed = CHECKSUM_NONE; + + skb_checksum_none_assert(skb); + + /* Rx csum disabled */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* if IPv4 header checksum error */ + if ((txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_IPCS) && + txgbe_test_staterr(rx_desc, TXGBE_RXD_ERR_IPE)) || + (txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_OUTERIPCS) && + txgbe_test_staterr(rx_desc, TXGBE_RXD_ERR_OUTERIPER))) { + ring->rx_stats.csum_err++; + return; + } + + /* L4 checksum offload flag must set for the below code to work */ + if (!txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_L4CS)) + return; + + /*likely incorrect csum if IPv6 Dest Header found */ + if (dptype.prot != TXGBE_DEC_PTYPE_PROT_SCTP && + txgbe_test_staterr(rx_desc, TXGBE_RXD_IPV6EX)) + return; + + /* if L4 checksum error */ + if (txgbe_test_staterr(rx_desc, TXGBE_RXD_ERR_TCPE)) { + ring->rx_stats.csum_err++; + return; + } + /* If there is an outer header present that might contain a checksum + * we need to bump the checksum level by 1 to reflect the fact that + * we are indicating we validated the inner checksum. + */ + if (dptype.etype >= TXGBE_DEC_PTYPE_ETYPE_IG) { + skb->csum_level = 1; + skb->encapsulation = 1; + } + + /* It must be a TCP or UDP or SCTP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + ring->rx_stats.csum_good_cnt++; +} + +static bool txgbe_alloc_mapped_skb(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *bi) +{ + struct sk_buff *skb = bi->skb; + dma_addr_t dma = bi->dma; + + if (unlikely(dma)) + return true; + + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->skb = skb; + } + + dma = dma_map_single(rx_ring->dev, skb->data, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + dev_kfree_skb_any(skb); + bi->skb = NULL; + + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->dma = dma; + return true; +} + +static bool txgbe_alloc_mapped_page(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(txgbe_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + TXGBE_RX_DMA_ATTR); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, txgbe_rx_pg_order(rx_ring)); + + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + bi->page_dma = dma; + bi->page = page; + bi->page_offset = txgbe_rx_offset(rx_ring); + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + + return true; +} + +/** + * txgbe_release_rx_desc - Store the new tail and head values + * @rx_ring: ring to bump + * @val: new head index + **/ +static void txgbe_release_rx_desc(struct txgbe_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); +} + +/** + * txgbe_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +bool txgbe_alloc_rx_buffers(struct txgbe_ring *rx_ring, u16 cleaned_count) +{ + union txgbe_rx_desc *rx_desc; + struct txgbe_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + + /* nothing to do */ + if (!cleaned_count) + return false; + + rx_desc = TXGBE_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { + if (ring_is_hs_enabled(rx_ring)) { + if (!txgbe_alloc_mapped_skb(rx_ring, bi)) + goto no_buffers; + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + } + + if (!txgbe_alloc_mapped_page(rx_ring, bi)) + goto no_buffers; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->page_dma, + bi->page_offset, + txgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + rx_desc->read.pkt_addr = + cpu_to_le64(bi->page_dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = TXGBE_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.upper.status_error = 0; + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) + txgbe_release_rx_desc(rx_ring, i); + + return false; + +no_buffers: + i += rx_ring->count; + + if (rx_ring->next_to_use != i) + txgbe_release_rx_desc(rx_ring, i); + + return true; +} + +static inline u16 txgbe_get_hlen(struct txgbe_ring __always_unused *rx_ring, + union txgbe_rx_desc *rx_desc) +{ + __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; + u16 hlen = le16_to_cpu(hdr_info) & TXGBE_RXD_HDRBUFLEN_MASK; + + if (hlen > (TXGBE_RX_HDR_SIZE << TXGBE_RXD_HDRBUFLEN_SHIFT)) + hlen = 0; + else + hlen >>= TXGBE_RXD_HDRBUFLEN_SHIFT; + + return hlen; +} + +static void txgbe_set_rsc_gso_size(struct txgbe_ring __maybe_unused *ring, + struct sk_buff *skb) +{ + u16 hdr_len = skb_headlen(skb); + + /* set gso_size to avoid messing up TCP MSS */ + skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), + TXGBE_CB(skb)->append_cnt); + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; +} + +static void txgbe_update_rsc_stats(struct txgbe_ring *rx_ring, + struct sk_buff *skb) +{ + /* if append_cnt is 0 then frame is not RSC */ + if (!TXGBE_CB(skb)->append_cnt) + return; + + rx_ring->rx_stats.rsc_count += TXGBE_CB(skb)->append_cnt; + rx_ring->rx_stats.rsc_flush++; + + txgbe_set_rsc_gso_size(rx_ring, skb); + + /* gso_size is computed using append_cnt so always clear it last */ + TXGBE_CB(skb)->append_cnt = 0; +} + +static void txgbe_rx_vlan(struct txgbe_ring *ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u8 idx = 0; + u16 ethertype; + + if ((ring->netdev->features & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX)) && + txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_VP)) { + idx = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + TXGBE_RXD_TPID_MASK) >> TXGBE_RXD_TPID_SHIFT; + ethertype = ring->q_vector->adapter->hw.tpid[idx]; + __vlan_hwaccel_put_tag(skb, + htons(ethertype), + le16_to_cpu(rx_desc->wb.upper.vlan)); + } +} + +/** + * txgbe_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +void txgbe_process_skb_fields(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 flags = rx_ring->q_vector->adapter->flags; + + txgbe_update_rsc_stats(rx_ring, skb); + txgbe_rx_hash(rx_ring, rx_desc, skb); + txgbe_rx_checksum(rx_ring, rx_desc, skb); + + if (unlikely(flags & TXGBE_FLAG_RX_HWTSTAMP_ENABLED) && + unlikely(txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_TS))) { + txgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb); + rx_ring->last_rx_timestamp = jiffies; + } + + txgbe_rx_vlan(rx_ring, rx_desc, skb); + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +void txgbe_rx_skb(struct txgbe_q_vector *q_vector, + struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + napi_gro_receive(&q_vector->napi, skb); +} + +/** + * txgbe_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool txgbe_is_non_eop(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct txgbe_rx_buffer *rx_buffer = + &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(TXGBE_RX_DESC(rx_ring, ntc)); + + /* update RSC append count if present */ + if (ring_is_rsc_enabled(rx_ring)) { + __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data & + cpu_to_le32(TXGBE_RXD_RSCCNT_MASK); + + if (unlikely(rsc_enabled)) { + u32 rsc_cnt = le32_to_cpu(rsc_enabled); + + rsc_cnt >>= TXGBE_RXD_RSCCNT_SHIFT; + TXGBE_CB(skb)->append_cnt += rsc_cnt - 1; + + /* update ntc based on RSC value */ + ntc = le32_to_cpu(rx_desc->wb.upper.status_error); + ntc &= TXGBE_RXD_NEXTP_MASK; + ntc >>= TXGBE_RXD_NEXTP_SHIFT; + } + } + + /* if we are the last buffer then there is nothing else to do */ + if (likely(txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP))) + return false; + + /* place skb in next buffer to be received */ + if (ring_is_hs_enabled(rx_ring)) { + rx_buffer->skb = rx_ring->rx_buffer_info[ntc].skb; + rx_buffer->dma = rx_ring->rx_buffer_info[ntc].dma; + rx_ring->rx_buffer_info[ntc].dma = 0; + } + rx_ring->rx_buffer_info[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; + + return true; +} + +#define TXGBE_TXD_CMD (TXGBE_TXD_EOP | \ + TXGBE_TXD_RS) + +/** + * txgbe_pull_tail - txgbe specific version of skb_pull_tail + * @skb: pointer to current skb being adjusted + * + * This function is an txgbe specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void txgbe_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(skb->dev, va, TXGBE_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + skb_frag_off_add(frag, pull_len); + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * txgbe_dma_sync_frag - perform DMA sync for first frag of SKB + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being updated + * + * This function provides a basic DMA sync up for the first fragment of an + * skb. The reason for doing this is that the first fragment cannot be + * unmapped until we have reached the end of packet descriptor for a buffer + * chain. + */ +static void txgbe_dma_sync_frag(struct txgbe_ring *rx_ring, + struct sk_buff *skb) +{ + /* if the page was released unmap it, else just sync our portion */ + if (unlikely(TXGBE_CB(skb)->page_released)) { + dma_unmap_page_attrs(rx_ring->dev, TXGBE_CB(skb)->dma, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + TXGBE_RX_DMA_ATTR); + } else if (ring_uses_build_skb(rx_ring)) { + unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; + + dma_sync_single_range_for_cpu(rx_ring->dev, + TXGBE_CB(skb)->dma, + offset, + skb_headlen(skb), + DMA_FROM_DEVICE); + } else { + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rx_ring->dev, + TXGBE_CB(skb)->dma, + skb_frag_off(frag), + skb_frag_size(frag), + DMA_FROM_DEVICE); + } +} + +/** + * txgbe_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +bool txgbe_cleanup_headers(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *netdev = rx_ring->netdev; + + if (IS_ERR(skb)) + return true; + + /* verify that the packet does not have any known errors */ + if (unlikely(txgbe_test_staterr(rx_desc, + TXGBE_RXD_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL))) { + dev_kfree_skb_any(skb); + return true; + } + + /* place header in linear portion of buffer */ + if (!skb_headlen(skb)) + txgbe_pull_tail(skb); + +#if IS_ENABLED(CONFIG_FCOE) + /* do not attempt to pad FCoE Frames as this will disrupt DDP */ + if (txgbe_rx_is_fcoe(rx_ring, rx_desc)) + return false; +#endif + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * txgbe_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void txgbe_reuse_rx_page(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *old_buff) +{ + struct txgbe_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls and unnecessary copy of skb. + */ + new_buff->page_dma = old_buff->page_dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static inline bool txgbe_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +/** + * txgbe_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static void txgbe_add_rx_frag(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *rx_buffer, + struct sk_buff *skb, + unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = txgbe_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = rx_ring->rx_offset ? + SKB_DATA_ALIGN(txgbe_rx_offset(rx_ring) + size) : + SKB_DATA_ALIGN(size); +#endif + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +static unsigned int txgbe_rx_frame_truesize(struct txgbe_ring *rx_ring, + unsigned int size) +{ + unsigned int truesize; +#if (PAGE_SIZE < 8192) + truesize = txgbe_rx_pg_size(rx_ring) / 2; +#else + truesize = SKB_DATA_ALIGN(TXGBE_SKB_PAD + size) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + ; +#endif + return truesize; +} + +static void txgbe_rx_buffer_flip(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *rx_buffer, + unsigned int size) +{ + unsigned int truesize = txgbe_rx_frame_truesize(rx_ring, size); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +static bool txgbe_can_reuse_rx_page(struct txgbe_rx_buffer *rx_buffer) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote pages */ + if (unlikely(txgbe_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) + return false; +#else + /* The last offset is a bit aggressive in that we assume the + * worst case of FCoE being enabled and using a 3K buffer. + * However this should have minimal impact as the 1K extra is + * still less than one buffer in size. + */ +#define TXGBE_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - TXGBE_RXBUFFER_3K) + if (rx_buffer->page_offset > TXGBE_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +static void txgbe_put_rx_buffer(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *rx_buffer, + struct sk_buff *skb) +{ + if (txgbe_can_reuse_rx_page(rx_buffer)) { + /* hand second half of page back to the ring */ + txgbe_reuse_rx_page(rx_ring, rx_buffer); + } else { + if (!IS_ERR(skb) && TXGBE_CB(skb)->dma == rx_buffer->page_dma) { + /* the page has been released from the ring */ + TXGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->page_dma, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + TXGBE_RX_DMA_ATTR); + } + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; + rx_buffer->skb = NULL; +} + +int txgbe_xmit_xdp_ring(struct txgbe_ring *ring, struct xdp_frame *xdpf) +{ + struct txgbe_tx_buffer *tx_buffer; + union txgbe_tx_desc *tx_desc; + u32 len, cmd_type = 0; + dma_addr_t dma; + u16 i; + + len = xdpf->len; + + if (unlikely(!txgbe_desc_unused(ring))) + return TXGBE_XDP_CONSUMED; + + dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE); + + if (dma_mapping_error(ring->dev, dma)) + return TXGBE_XDP_CONSUMED; + + /* record the location of the first descriptor for this packet */ + tx_buffer = &ring->tx_buffer_info[ring->next_to_use]; + tx_buffer->bytecount = len; + tx_buffer->gso_segs = 1; + tx_buffer->protocol = 0; + + i = ring->next_to_use; + tx_desc = TXGBE_TX_DESC(ring, i); + + dma_unmap_len_set(tx_buffer, len, len); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_buffer->xdpf = xdpf; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + cmd_type = txgbe_tx_cmd_type(tx_buffer->tx_flags); + cmd_type |= len | TXGBE_TXD_CMD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = + cpu_to_le32(len << TXGBE_TXD_PAYLEN_SHIFT); + + /* Avoid any potential race with xdp_xmit and cleanup */ + smp_wmb(); + ring->xdp_tx_active++; + + /* set next_to_watch value indicating a packet is present */ + i++; + if (i == ring->count) + i = 0; + + tx_buffer->next_to_watch = tx_desc; + tx_buffer->next_eop = i; + ring->next_to_use = i; + + return TXGBE_XDP_TX; +} + +static struct sk_buff * +txgbe_run_xdp(struct txgbe_adapter __maybe_unused *adapter, + struct txgbe_ring __maybe_unused *rx_ring, + struct txgbe_rx_buffer __maybe_unused *rx_buffer, + struct xdp_buff __maybe_unused *xdp) +{ + int result = TXGBE_XDP_PASS; + struct bpf_prog *xdp_prog; + struct txgbe_ring *ring; + struct xdp_frame *xdpf; + + int err; + u32 act; + + rcu_read_lock(); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + + if (!xdp_prog) + goto xdp_out; + + prefetchw(xdp->data_hard_start); /* xdp_frame write */ + + act = bpf_prog_run_xdp(xdp_prog, xdp); + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) { + result = TXGBE_XDP_CONSUMED; + break; + } + ring = adapter->xdp_ring[smp_processor_id() % adapter->num_xdp_queues]; + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + result = txgbe_xmit_xdp_ring(ring, xdpf); + + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + break; + case XDP_REDIRECT: + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); + if (!err) + result = TXGBE_XDP_REDIR; + else + result = TXGBE_XDP_CONSUMED; + break; + default: + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_DROP: + result = TXGBE_XDP_CONSUMED; + break; + } +xdp_out: + rcu_read_unlock(); + + return ERR_PTR(-result); +} + +static struct txgbe_rx_buffer *txgbe_get_rx_buffer(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff **skb, + const unsigned int size) +{ + struct txgbe_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + *skb = rx_buffer->skb; + + /* Delay unmapping of the first packet. It carries the header + * information, HW may still access the header after the writeback. + * Only unmap it when EOP is reached + */ + if (!txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP)) { + if (!*skb) + goto skip_sync; + } else { + if (*skb) + txgbe_dma_sync_frag(rx_ring, *skb); + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); +skip_sync: + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static struct sk_buff *txgbe_build_skb(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + union txgbe_rx_desc *rx_desc) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + void *va = xdp->data_meta; + +#if (PAGE_SIZE < 8192) + unsigned int truesize = txgbe_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* build an skb around the page buffer */ + skb = build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, xdp->data_end - xdp->data); + + if (metasize) + skb_metadata_set(skb, metasize); + + /* record DMA address if this is the start of a chain of buffers */ + if (!txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP)) + TXGBE_CB(skb)->dma = rx_buffer->page_dma; + + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + + return skb; +} + +static struct sk_buff *txgbe_construct_skb(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + union txgbe_rx_desc *rx_desc) +{ + unsigned int size = xdp->data_end - xdp->data; +#if (PAGE_SIZE < 8192) + unsigned int truesize = txgbe_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(xdp->data); +#if L1_CACHE_BYTES < 128 + prefetch(xdp->data + L1_CACHE_BYTES); +#endif + /* Note, we get here by enabling legacy-rx via: + * + * ethtool --set-priv-flags legacy-rx on + * + * In this mode, we currently get 0 extra XDP headroom as + * opposed to having legacy-rx off, where we process XDP + * packets going to stack via txgbe_build_skb(). The latter + * provides us currently with 192 bytes of headroom. + * + * For txgbe_construct_skb() mode it means that the + * xdp->data_meta will always point to xdp->data, since + * the helper cannot expand the head. Should this ever + * change in future for legacy-rx mode on, then lets also + * add xdp->data_meta handling here. + */ + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, TXGBE_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + if (size > TXGBE_RX_HDR_SIZE) { + if (!txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP)) + TXGBE_CB(skb)->dma = rx_buffer->page_dma; + + skb_add_rx_frag(skb, 0, rx_buffer->page, + xdp->data - page_address(rx_buffer->page), + size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + memcpy(__skb_put(skb, size), + xdp->data, ALIGN(size, sizeof(long))); + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +/** + * txgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the system. + * + * Returns amount of work completed. + **/ +static int txgbe_clean_rx_irq(struct txgbe_q_vector *q_vector, + struct txgbe_ring *rx_ring, + int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0, xdp_xmit = 0; + u16 cleaned_count = txgbe_desc_unused(rx_ring); + struct txgbe_adapter *adapter = q_vector->adapter; + unsigned int offset = txgbe_rx_offset(rx_ring); +#if IS_ENABLED(CONFIG_FCOE) + int ddp_bytes; + unsigned int mss = 0; +#endif /* CONFIG_FCOE */ + struct xdp_buff xdp; + + xdp.data_end = NULL; + xdp.data = NULL; + xdp.rxq = &rx_ring->xdp_rxq; + + /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ +#if (PAGE_SIZE < 8192) + xdp.frame_sz = txgbe_rx_frame_truesize(rx_ring, 0); +#endif + while (likely(total_rx_packets < budget)) { + union txgbe_rx_desc *rx_desc; + struct txgbe_rx_buffer *rx_buffer; + struct sk_buff *skb; + unsigned int size; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= TXGBE_RX_BUFFER_WRITE) { + txgbe_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = TXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); + + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_buffer = txgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size); + + /* retrieve a buffer from the ring */ + if (!skb) { + xdp.data = page_address(rx_buffer->page) + + rx_buffer->page_offset; + xdp.data_meta = xdp.data; + xdp.data_hard_start = xdp.data - offset; + xdp.data_end = xdp.data + size; + +#if (PAGE_SIZE > 4096) + /* At larger PAGE_SIZE, frame_sz depend on len size */ + xdp.frame_sz = txgbe_rx_frame_truesize(rx_ring, size); +#endif + skb = txgbe_run_xdp(adapter, rx_ring, rx_buffer, &xdp); + } + + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & (TXGBE_XDP_TX | TXGBE_XDP_REDIR)) { + xdp_xmit |= xdp_res; + txgbe_rx_buffer_flip(rx_ring, rx_buffer, size); + } else { + rx_buffer->pagecnt_bias++; + } + total_rx_packets++; + total_rx_bytes += size; + } else if (skb) { + txgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); + } else if (ring_uses_build_skb(rx_ring)) { + skb = txgbe_build_skb(rx_ring, rx_buffer, + &xdp, rx_desc); + } else { + skb = txgbe_construct_skb(rx_ring, rx_buffer, + &xdp, rx_desc); + } + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + break; + } + + txgbe_put_rx_buffer(rx_ring, rx_buffer, skb); + + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (txgbe_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* verify the packet layout is correct */ + if (txgbe_cleanup_headers(rx_ring, rx_desc, skb)) + continue; + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + txgbe_process_skb_fields(rx_ring, rx_desc, skb); + +#if IS_ENABLED(CONFIG_FCOE) + /* if ddp, not passing to ULD unless for FCP_RSP or error */ + if (txgbe_rx_is_fcoe(rx_ring, rx_desc)) { + ddp_bytes = txgbe_fcoe_ddp(adapter, rx_desc, skb); + /* include DDPed FCoE data */ + if (ddp_bytes > 0) { + if (!mss) { + mss = rx_ring->netdev->mtu - + sizeof(struct fcoe_hdr) - + sizeof(struct fc_frame_header) - + sizeof(struct fcoe_crc_eof); + if (mss > 512) + mss &= ~511; + } + total_rx_bytes += ddp_bytes; + total_rx_packets += + DIV_ROUND_UP(ddp_bytes, mss); + } + if (!ddp_bytes) { + dev_kfree_skb_any(skb); + continue; + } + } +#endif /* CONFIG_FCOE */ + txgbe_rx_skb(q_vector, rx_ring, rx_desc, skb); + + /* update budget accounting */ + total_rx_packets++; + } + + if (xdp_xmit & TXGBE_XDP_TX) { + struct txgbe_ring *ring = adapter->xdp_ring[smp_processor_id() % + adapter->num_xdp_queues]; + + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch + */ + wmb(); + writel(ring->next_to_use, ring->tail); + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + } + if (xdp_xmit & TXGBE_XDP_REDIR) + xdp_do_flush_map(); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * txgbe_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * txgbe_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +static void txgbe_configure_msix(struct txgbe_adapter *adapter) +{ + u16 v_idx; + + /* Populate MSIX to EITR Select */ + if (adapter->num_vfs >= 32) { + u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; + + wr32(&adapter->hw, TXGBE_PX_ITRSEL, eitrsel); + } else { + wr32(&adapter->hw, TXGBE_PX_ITRSEL, 0); + } + + /* Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + struct txgbe_q_vector *q_vector = adapter->q_vector[v_idx]; + struct txgbe_ring *ring; + + txgbe_for_each_ring(ring, q_vector->rx) + txgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); + + txgbe_for_each_ring(ring, q_vector->tx) + txgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); + + txgbe_write_eitr(q_vector); + } + + txgbe_set_ivar(adapter, -1, 0, v_idx); + + wr32(&adapter->hw, TXGBE_PX_ITR(v_idx), 1950); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +static inline bool txgbe_container_is_rx(struct txgbe_q_vector *q_vector, + struct txgbe_ring_container *rc) +{ + return &q_vector->rx == rc; +} + +/** + * ixgbe_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @ring_container: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + **/ +static void txgbe_update_itr(struct txgbe_q_vector *q_vector, + struct txgbe_ring_container *ring_container) +{ + unsigned int itr = TXGBE_ITR_ADAPTIVE_MIN_USECS | + TXGBE_ITR_ADAPTIVE_LATENCY; + unsigned int avg_wire_size, packets, bytes; + unsigned long next_update = jiffies; + + /* If we don't have any rings just leave ourselves set for maximum + * possible latency so we take ourselves out of the equation. + */ + if (!ring_container->ring) + return; + + /* If we didn't update within up to 1 - 2 jiffies we can assume + * that either packets are coming in so slow there hasn't been + * any work, or that there is so much work that NAPI is dealing + * with interrupt moderation and we don't need to do anything. + */ + if (time_after(next_update, ring_container->next_update)) + goto clear_counts; + + packets = ring_container->total_packets; + bytes = ring_container->total_bytes; + + if (txgbe_container_is_rx(q_vector, ring_container)) { + /* If Rx and there are 1 to 23 packets and bytes are less than + * 12112 assume insufficient data to use bulk rate limiting + * approach. Instead we will focus on simply trying to target + * receiving 8 times as much data in the next interrupt. + */ + if (packets && packets < 24 && bytes < 12112) { + itr = TXGBE_ITR_ADAPTIVE_LATENCY; + avg_wire_size = (bytes + packets * 24) * 2; + avg_wire_size = clamp_t(unsigned int, + avg_wire_size, 2560, 12800); + goto adjust_for_speed; + } + } + + /* Less than 48 packets we can assume that our current interrupt delay + * is only slightly too low. As such we should increase it by a small + * fixed amount. + */ + if (packets < 48) { + itr = (q_vector->itr >> 2) + TXGBE_ITR_ADAPTIVE_MIN_INC; + if (itr > TXGBE_ITR_ADAPTIVE_MAX_USECS) + itr = TXGBE_ITR_ADAPTIVE_MAX_USECS; + + /* If sample size is 0 - 7 we should probably switch + * to latency mode instead of trying to control + * things as though we are in bulk. + * + * Otherwise if the number of packets is less than 48 + * we should maintain whatever mode we are currently + * in. The range between 8 and 48 is the cross-over + * point between latency and bulk traffic. + */ + if (packets < 8) + itr += TXGBE_ITR_ADAPTIVE_LATENCY; + else + itr += ring_container->itr & TXGBE_ITR_ADAPTIVE_LATENCY; + goto clear_counts; + } + + /* Between 48 and 96 is our "goldilocks" zone where we are working + * out "just right". Just report that our current ITR is good for us. + */ + if (packets < 96) { + itr = q_vector->itr >> 2; + goto clear_counts; + } + + /* If packet count is 96 or greater we are likely looking at a slight + * overrun of the delay we want. Try halving our delay to see if that + * will cut the number of packets in half per interrupt. + */ + if (packets < 256) { + itr = q_vector->itr >> 3; + if (itr < TXGBE_ITR_ADAPTIVE_MIN_USECS) + itr = TXGBE_ITR_ADAPTIVE_MIN_USECS; + goto clear_counts; + } + + /* The paths below assume we are dealing with a bulk ITR since number + * of packets is 256 or greater. We are just going to have to compute + * a value and try to bring the count under control, though for smaller + * packet sizes there isn't much we can do as NAPI polling will likely + * be kicking in sooner rather than later. + */ + itr = TXGBE_ITR_ADAPTIVE_BULK; + + /* If packet counts are 256 or greater we can assume we have a gross + * overestimation of what the rate should be. Instead of trying to fine + * tune it just use the formula below to try and dial in an exact value + * give the current packet size of the frame. + */ + avg_wire_size = bytes / packets; + + /* The following is a crude approximation of: + * wmem_default / (size + overhead) = desired_pkts_per_int + * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate + * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value + * + * Assuming wmem_default is 212992 and overhead is 640 bytes per + * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the + * formula down to + * + * (170 * (size + 24)) / (size + 640) = ITR + * + * We first do some math on the packet size and then finally bitshift + * by 8 after rounding up. We also have to account for PCIe link speed + * difference as ITR scales based on this. + */ + if (avg_wire_size <= 60) { + /* Start at 50k ints/sec */ + avg_wire_size = 5120; + } else if (avg_wire_size <= 316) { + /* 50K ints/sec to 16K ints/sec */ + avg_wire_size *= 40; + avg_wire_size += 2720; + } else if (avg_wire_size <= 1084) { + /* 16K ints/sec to 9.2K ints/sec */ + avg_wire_size *= 15; + avg_wire_size += 11452; + } else if (avg_wire_size <= 1980) { + /* 9.2K ints/sec to 8K ints/sec */ + avg_wire_size *= 5; + avg_wire_size += 22420; + } else { + /* plateau at a limit of 8K ints/sec */ + avg_wire_size = 32256; + } + +adjust_for_speed: + /* Resultant value is 256 times larger than it needs to be. This + * gives us room to adjust the value as needed to either increase + * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. + * + * Use addition as we have already recorded the new latency flag + * for the ITR value. + */ + switch (q_vector->adapter->link_speed) { + case TXGBE_LINK_SPEED_25GB_FULL: + itr += DIV_ROUND_UP(avg_wire_size, + TXGBE_ITR_ADAPTIVE_MIN_INC * 512) * + TXGBE_ITR_ADAPTIVE_MIN_INC; + break; + case TXGBE_LINK_SPEED_10GB_FULL: + case TXGBE_LINK_SPEED_100_FULL: + default: + itr += DIV_ROUND_UP(avg_wire_size, + TXGBE_ITR_ADAPTIVE_MIN_INC * 256) * + TXGBE_ITR_ADAPTIVE_MIN_INC; + break; + case TXGBE_LINK_SPEED_1GB_FULL: + case TXGBE_LINK_SPEED_10_FULL: + itr += DIV_ROUND_UP(avg_wire_size, + TXGBE_ITR_ADAPTIVE_MIN_INC * 64) * + TXGBE_ITR_ADAPTIVE_MIN_INC; + break; + } + + /* In the case of a latency specific workload only allow us to + * reduce the ITR by at most 2us. By doing this we should dial + * in so that our number of interrupts is no more than 2x the number + * of packets for the least busy workload. So for example in the case + * of a TCP worload the ack packets being received would set the + * interrupt rate as they are a latency specific workload. + */ + if ((itr & TXGBE_ITR_ADAPTIVE_LATENCY) && itr < ring_container->itr) + itr = ring_container->itr - TXGBE_ITR_ADAPTIVE_MIN_INC; +clear_counts: + /* write back value */ + ring_container->itr = itr; + + /* next update should occur within next jiffy */ + ring_container->next_update = next_update + 1; + + ring_container->total_bytes = 0; + ring_container->total_packets = 0; +} + +/** + * txgbe_write_eitr - write EITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + * + * This function is made to be called by ethtool and by the driver + * when it needs to update EITR registers at runtime. Hardware + * specific quirks/differences are taken care of here. + */ +void txgbe_write_eitr(struct txgbe_q_vector *q_vector) +{ + struct txgbe_adapter *adapter = q_vector->adapter; + struct txgbe_hw *hw = &adapter->hw; + int v_idx = q_vector->v_idx; + u32 itr_reg = q_vector->itr & TXGBE_MAX_EITR; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + itr_reg = (q_vector->itr >> 3) & TXGBE_AMLITE_MAX_EITR; + else + itr_reg = q_vector->itr & TXGBE_MAX_EITR; + itr_reg |= TXGBE_PX_ITR_CNT_WDIS; + + wr32(hw, TXGBE_PX_ITR(v_idx), itr_reg); +} + +static void txgbe_set_itr(struct txgbe_q_vector *q_vector) +{ + u32 new_itr; + + txgbe_update_itr(q_vector, &q_vector->tx); + txgbe_update_itr(q_vector, &q_vector->rx); + + /* use the smallest value of new ITR delay calculations */ + new_itr = min(q_vector->rx.itr, q_vector->tx.itr); + + /* Clear latency flag if set, shift into correct position */ + new_itr &= TXGBE_ITR_ADAPTIVE_MASK_USECS; + new_itr <<= 2; + + if (new_itr != q_vector->itr) { + /* save the algorithm value here */ + q_vector->itr = new_itr; + + txgbe_write_eitr(q_vector); + } +} + +/** + * txgbe_check_overtemp_subtask - check for over temperature + * @adapter: pointer to adapter + **/ +static void txgbe_check_overtemp_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 eicr = adapter->interrupt_event; + s32 temp_state; + u16 value = 0; + + if (test_bit(__TXGBE_DOWN, &adapter->state)) + return; + if (!(adapter->flags2 & TXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) + return; + /*when pci lose link, not check over heat*/ + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &value); + if (value == TXGBE_FAILED_READ_CFG_WORD) + return; + + if (!(adapter->flags3 & TXGBE_FLAG3_TEMP_SENSOR_INPROGRESS)) { + if (!(adapter->flags2 & TXGBE_FLAG2_TEMP_SENSOR_EVENT)) + return; + + adapter->flags2 &= ~TXGBE_FLAG2_TEMP_SENSOR_EVENT; + + /* Since the warning interrupt is for both ports + * we don't have to check if: + * This interrupt wasn't for our port. + * We may have missed the interrupt so always have to + * check if we got a LSC + */ + if (!(eicr & TXGBE_PX_MISC_IC_OVER_HEAT)) + return; + } + + temp_state = hw->phy.ops.check_overtemp(hw); + if (!temp_state || temp_state == TXGBE_NOT_IMPLEMENTED) + return; + + if (temp_state == TXGBE_ERR_UNDERTEMP && + test_bit(__TXGBE_HANGING, &adapter->state)) { + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) { + adapter->flags3 &= ~TXGBE_FLAG3_TEMP_SENSOR_INPROGRESS; + // re-enable over_heat misx itr + wr32m(&adapter->hw, TXGBE_PX_MISC_IEN, TXGBE_PX_MISC_IEN_OVER_HEAT, + TXGBE_PX_MISC_IEN_OVER_HEAT); + } + e_crit(drv, "%s\n", txgbe_underheat_msg); + + wr32m(&adapter->hw, TXGBE_RDB_PB_CTL, + TXGBE_RDB_PB_CTL_RXEN, TXGBE_RDB_PB_CTL_RXEN); + netif_carrier_on(adapter->netdev); + + clear_bit(__TXGBE_HANGING, &adapter->state); + } else if (temp_state == TXGBE_ERR_OVERTEMP && + !test_and_set_bit(__TXGBE_HANGING, &adapter->state)) { + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) + adapter->flags3 |= TXGBE_FLAG3_TEMP_SENSOR_INPROGRESS; + e_crit(drv, "%s\n", txgbe_overheat_msg); + netif_carrier_off(adapter->netdev); + + wr32m(&adapter->hw, TXGBE_RDB_PB_CTL, + TXGBE_RDB_PB_CTL_RXEN, 0); + } + + adapter->interrupt_event = 0; +} + +static void txgbe_check_overtemp_event(struct txgbe_adapter *adapter, u32 eicr) +{ + if (!(adapter->flags2 & TXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) + return; + + if (!(eicr & TXGBE_PX_MISC_IC_OVER_HEAT)) + return; + + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + adapter->interrupt_event = eicr; + adapter->flags2 |= TXGBE_FLAG2_TEMP_SENSOR_EVENT; + txgbe_service_event_schedule(adapter); + } +} + +static void txgbe_check_sfp_event(struct txgbe_adapter *adapter, u32 eicr) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 eicr_mask = TXGBE_PX_MISC_IC_GPIO; + u32 reg; + + if (hw->mac.type == txgbe_mac_aml40) { + if (eicr & eicr_mask) { + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + wr32(hw, TXGBE_GPIO_INTMASK, 0xFF); + reg = rd32(hw, TXGBE_GPIO_INTSTATUS); + if (reg & TXGBE_GPIO_INTSTATUS_4) { + adapter->flags2 |= TXGBE_FLAG2_SFP_NEEDS_RESET; + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_4); + adapter->sfp_poll_time = 0; + txgbe_service_event_schedule(adapter); + } + } + } + } else if (hw->mac.type == txgbe_mac_sp || hw->mac.type == txgbe_mac_aml) { + if (eicr & eicr_mask) { + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + wr32(hw, TXGBE_GPIO_INTMASK, 0xFF); + reg = rd32(hw, TXGBE_GPIO_INTSTATUS); + if (reg & TXGBE_GPIO_INTSTATUS_2) { + adapter->flags2 |= TXGBE_FLAG2_SFP_NEEDS_RESET; + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_2); + adapter->sfp_poll_time = 0; + txgbe_service_event_schedule(adapter); + } + if (reg & TXGBE_GPIO_INTSTATUS_3) { + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_3); + txgbe_service_event_schedule(adapter); + } + + if (reg & TXGBE_GPIO_INTSTATUS_6) { + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_6); + adapter->flags |= + TXGBE_FLAG_NEED_LINK_CONFIG; + txgbe_service_event_schedule(adapter); + } + } + } + } +} + +static void txgbe_check_lsc(struct txgbe_adapter *adapter) +{ + adapter->lsc_int++; + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + txgbe_service_event_schedule(adapter); +} + +static void txgbe_check_phy_event(struct txgbe_adapter *adapter) +{ + adapter->flags3 |= TXGBE_FLAG3_PHY_EVENT; + + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + txgbe_service_event_schedule(adapter); +} + +/** + * txgbe_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +void txgbe_irq_enable(struct txgbe_adapter *adapter, bool queues, bool flush) +{ + u32 mask = 0; + struct txgbe_hw *hw = &adapter->hw; + u8 device_type = hw->subsystem_device_id & 0xF0; + + if (hw->mac.type == txgbe_mac_aml40) { + mask = TXGBE_GPIO_INTTYPE_LEVEL_4; + } else if (device_type != TXGBE_ID_MAC_XAUI && + device_type != TXGBE_ID_MAC_SGMII) { + mask = TXGBE_GPIO_INTTYPE_LEVEL_2 | TXGBE_GPIO_INTTYPE_LEVEL_3 | + TXGBE_GPIO_INTTYPE_LEVEL_6; + } + wr32(&adapter->hw, TXGBE_GPIO_INTTYPE_LEVEL, mask); + + /* enable misc interrupt */ + mask = TXGBE_PX_MISC_IEN_MASK; + + if (hw->mac.type != txgbe_mac_sp) + mask &= ~TXGBE_PX_MISC_IEN_ETH_EVENT; + + if (adapter->flags2 & TXGBE_FLAG2_TEMP_SENSOR_CAPABLE) + mask |= TXGBE_PX_MISC_IEN_OVER_HEAT; + + if (adapter->flags3 & TXGBE_FLAG3_TEMP_SENSOR_INPROGRESS) + mask &= ~TXGBE_PX_MISC_IEN_OVER_HEAT; + + if ((adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) && + !(adapter->flags2 & TXGBE_FLAG2_FDIR_REQUIRES_REINIT)) + mask |= TXGBE_PX_MISC_IEN_FLOW_DIR; + + mask |= TXGBE_PX_MISC_IEN_TIMESYNC; + + if (netif_msg_tx_err(adapter)) + mask |= TXGBE_PX_MISC_IEN_TXDESC; + + wr32(&adapter->hw, TXGBE_PX_MISC_IEN, mask); + /* unmask interrupt */ + txgbe_intr_enable(&adapter->hw, TXGBE_INTR_MISC(adapter)); + if (queues) + txgbe_intr_enable(&adapter->hw, TXGBE_INTR_QALL(adapter)); + + /* flush configuration */ + if (flush) + TXGBE_WRITE_FLUSH(&adapter->hw); + + /* enable gpio interrupt */ + if (hw->mac.type == txgbe_mac_aml40) { + mask = TXGBE_GPIO_INTEN_4; + } else if (device_type != TXGBE_ID_MAC_XAUI && + device_type != TXGBE_ID_MAC_SGMII) { + mask = TXGBE_GPIO_INTEN_2 | TXGBE_GPIO_INTEN_3 | + TXGBE_GPIO_INTEN_6; + } + wr32(&adapter->hw, TXGBE_GPIO_INTEN, mask); +} + +static void txgbe_do_lan_reset(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring *tx_ring; + u32 reset = 0; + u32 i; + + for (i = 0; i < adapter->num_tx_queues; i++) + tx_ring = adapter->tx_ring[i]; + + usec_delay(1000); + if (hw->bus.lan_id == 0) + reset = TXGBE_MIS_RST_LAN0_RST; + else + reset = TXGBE_MIS_RST_LAN1_RST; + + wr32(hw, TXGBE_MIS_RST, + reset | rd32(hw, TXGBE_MIS_RST)); + TXGBE_WRITE_FLUSH(hw); + usec_delay(10); +} + +static void txgbe_tx_ring_recovery(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 desc_error[4] = {0, 0, 0, 0}; + u32 i; + + /* check tdm fatal error */ + for (i = 0; i < 4; i++) { + desc_error[i] = rd32(hw, TXGBE_TDM_DESC_FATAL(i)); + if (desc_error[i] != 0) { + e_err(drv, "TDM fatal error queue\n"); + txgbe_tx_timeout_reset(adapter); + return; + } + } + + /* check tdm non-fatal error */ + for (i = 0; i < 4; i++) + desc_error[i] = rd32(hw, TXGBE_TDM_DESC_NONFATAL(i)); + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (desc_error[i / 32] & (1 << i % 32)) { + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_Q_RESET; + e_err(drv, "TDM non-fatal error, queue[%d]", i); + } + } +} + +static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data) +{ + struct txgbe_adapter *adapter = data; + struct txgbe_hw *hw = &adapter->hw; + u32 eicr; + u32 ecc; + u32 value = 0; + u16 vid; + + eicr = txgbe_misc_isb(adapter, TXGBE_ISB_MISC); + + if (eicr & TXGBE_PX_MISC_IC_ETH_AN) { + if (adapter->backplane_an) + txgbe_check_lsc(adapter); + } + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + if (eicr & TXGBE_PX_MISC_AML_ETH_LK_CHANGE) + txgbe_check_lsc(adapter); + if (eicr & TXGBE_PX_MISC_AML_ETH_PHY_EVENT) + txgbe_check_phy_event(adapter); + } else { + if (eicr & (TXGBE_PX_MISC_IC_ETH_LK | + TXGBE_PX_MISC_IC_ETH_LKDN | + TXGBE_PX_MISC_IC_ETH_EVENT)) + txgbe_check_lsc(adapter); + } + + if (eicr & TXGBE_PX_MISC_IC_VF_MBOX) + txgbe_msg_task(adapter); + + if (eicr & TXGBE_PX_MISC_IC_PCIE_REQ_ERR) { + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "lan id %d, PCIe request error founded.\n", hw->bus.lan_id); + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &vid); + if (vid == TXGBE_FAILED_READ_CFG_WORD) { + ERROR_REPORT1(TXGBE_ERROR_POLLING, "PCIe link is lost.\n"); + /*when pci lose link, not check over heat*/ + if (hw->bus.lan_id == 0) { + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + txgbe_service_event_schedule(adapter); + } else { + wr32(&adapter->hw, TXGBE_MIS_PF_SM, 1); + } + } else { + adapter->flags2 |= TXGBE_FLAG2_DMA_RESET_REQUESTED; + } + } + + if (eicr & TXGBE_PX_MISC_IC_INT_ERR) { + e_info(link, "Received unrecoverable ECC Err, initiating reset.\n"); + ecc = rd32(hw, TXGBE_MIS_ST); + if (((ecc & TXGBE_MIS_ST_LAN0_ECC) && hw->bus.lan_id == 0) || + ((ecc & TXGBE_MIS_ST_LAN1_ECC) && hw->bus.lan_id == 1)) + adapter->flags2 |= TXGBE_FLAG2_PF_RESET_REQUESTED | + TXGBE_FLAG2_ECC_ERR_RESET; + + txgbe_service_event_schedule(adapter); + } + if (eicr & TXGBE_PX_MISC_IC_DEV_RST) { + value = rd32(hw, TXGBE_TSC_LSEC_PKTNUM1); + if (!(value & 0x1)) { + adapter->flags2 |= TXGBE_FLAG2_RESET_INTR_RECEIVED; + txgbe_service_event_schedule(adapter); + } + } + if (eicr & TXGBE_PX_MISC_IC_STALL) { + adapter->flags2 |= TXGBE_FLAG2_PF_RESET_REQUESTED; + txgbe_service_event_schedule(adapter); + } + + if (eicr & TXGBE_PX_MISC_IC_TXDESC) { + txgbe_tx_ring_recovery(adapter); + txgbe_service_event_schedule(adapter); + } + + /* Handle Flow Director Full threshold interrupt */ + if (eicr & TXGBE_PX_MISC_IC_FLOW_DIR) { + int reinit_count = 0; + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct txgbe_ring *ring = adapter->tx_ring[i]; + + if (test_and_clear_bit(__TXGBE_TX_FDIR_INIT_DONE, + &ring->state)) + reinit_count++; + } + if (reinit_count) { + /* no more flow director interrupts until after init */ + wr32m(hw, TXGBE_PX_MISC_IEN, + TXGBE_PX_MISC_IEN_FLOW_DIR, 0); + adapter->flags2 |= + TXGBE_FLAG2_FDIR_REQUIRES_REINIT; + txgbe_service_event_schedule(adapter); + } + } + + txgbe_check_sfp_event(adapter, eicr); + txgbe_check_overtemp_event(adapter, eicr); + + if (unlikely(eicr & TXGBE_PX_MISC_IC_TIMESYNC)) + txgbe_ptp_check_pps_event(adapter); + + /* re-enable the original interrupt state, no lsc, no queues */ + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + txgbe_irq_enable(adapter, false, false); + + wr32(hw, TXGBE_GPIO_INTMASK, 0x0); + + return IRQ_HANDLED; +} + +static irqreturn_t txgbe_msix_clean_rings(int __always_unused irq, void *data) +{ + struct txgbe_q_vector *q_vector = data; + + /* EIAM disabled interrupts (on this vector) for us */ + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * txgbe_poll - NAPI polling RX/TX cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean all queues associated with a q_vector. + **/ +int txgbe_poll(struct napi_struct *napi, int budget) +{ + struct txgbe_q_vector *q_vector = + container_of(napi, struct txgbe_q_vector, napi); + struct txgbe_adapter *adapter = q_vector->adapter; + struct txgbe_ring *ring; + int per_ring_budget; + bool clean_complete = true; + + txgbe_for_each_ring(ring, q_vector->tx) { + bool wd = ring->xsk_pool ? + txgbe_clean_xdp_tx_irq(q_vector, ring) : + txgbe_clean_tx_irq(q_vector, ring, budget); + if (!wd) + clean_complete = false; + } + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling + */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget / q_vector->rx.count, 1); + else + per_ring_budget = budget; + + txgbe_for_each_ring(ring, q_vector->rx) { + int cleaned = ring->xsk_pool ? + txgbe_clean_rx_irq_zc(q_vector, ring, + per_ring_budget) : + txgbe_clean_rx_irq(q_vector, ring, + per_ring_budget); + + if (cleaned >= per_ring_budget) + clean_complete = false; + } + + /* Exit if we are called by netpoll */ + if (budget <= 0) + return budget; + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* all work done, exit the polling mode */ + napi_complete(napi); + if (adapter->rx_itr_setting == 1) + txgbe_set_itr(q_vector); + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + txgbe_intr_enable(&adapter->hw, + TXGBE_INTR_Q(q_vector->v_idx)); + + return 0; +} + +/** + * txgbe_request_msix_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * txgbe_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int txgbe_request_msix_irqs(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int vector, err; + int ri = 0, ti = 0; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct txgbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-TxRx-%d", netdev->name, ri++); + ti++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-rx-%d", netdev->name, ri++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-tx-%d", netdev->name, ti++); + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq(entry->vector, &txgbe_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { + e_err(probe, "request_irq failed for MSIX interrupt'%s' Error: %d\n", + q_vector->name, err); + goto free_queue_irqs; + } + + /* If Flow Director is enabled, set interrupt affinity */ + if (adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) { + /* assign the mask for this irq */ + irq_set_affinity_hint(entry->vector, + &q_vector->affinity_mask); + } + } + + err = request_irq(adapter->msix_entries[vector].vector, + txgbe_msix_other, 0, netdev->name, adapter); + if (err) { + e_err(probe, "request_irq for msix_other failed: %d\n", err); + goto free_queue_irqs; + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + irq_set_affinity_hint(adapter->msix_entries[vector].vector, + NULL); + + free_irq(adapter->msix_entries[vector].vector, + adapter->q_vector[vector]); + } + adapter->flags &= ~TXGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return err; +} + +/** + * txgbe_intr - legacy mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t txgbe_intr(int __always_unused irq, void *data) +{ + struct txgbe_adapter *adapter = data; + struct txgbe_q_vector *q_vector = adapter->q_vector[0]; + struct txgbe_hw *hw = &adapter->hw; + u32 eicr_misc; + u32 value; + u16 pci_value; + + if (!(adapter->flags & TXGBE_FLAG_MSI_ENABLED)) { + pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_value); + if (!(pci_value & PCI_STATUS_INTERRUPT)) + return IRQ_HANDLED; + wr32(&adapter->hw, TXGBE_PX_INTA, 1); + } + + eicr_misc = txgbe_misc_isb(adapter, TXGBE_ISB_MISC); + if (eicr_misc & TXGBE_PX_MISC_IC_ETH_AN) { + if (adapter->backplane_an) + txgbe_service_event_schedule(adapter); + } + + if (eicr_misc & (TXGBE_PX_MISC_IC_ETH_LK | TXGBE_PX_MISC_IC_ETH_LKDN)) + txgbe_check_lsc(adapter); + + if (eicr_misc & TXGBE_PX_MISC_IC_INT_ERR) { + e_info(link, "Received unrecoverable ECC Err, initiating reset.\n"); + adapter->flags2 |= TXGBE_FLAG2_GLOBAL_RESET_REQUESTED | + TXGBE_FLAG2_ECC_ERR_RESET; + txgbe_service_event_schedule(adapter); + } + + if (eicr_misc & TXGBE_PX_MISC_IC_DEV_RST) { + value = rd32(hw, TXGBE_TSC_LSEC_PKTNUM1); + if (!(value & 0x1)) { + adapter->flags2 |= TXGBE_FLAG2_RESET_INTR_RECEIVED; + txgbe_service_event_schedule(adapter); + } + } + txgbe_check_sfp_event(adapter, eicr_misc); + txgbe_check_overtemp_event(adapter, eicr_misc); + + if (unlikely(eicr_misc & TXGBE_PX_MISC_IC_TIMESYNC)) + txgbe_ptp_check_pps_event(adapter); + + adapter->isb_mem[TXGBE_ISB_MISC] = 0; + /* would disable interrupts here but it is auto disabled */ + napi_schedule_irqoff(&q_vector->napi); + + /* re-enable link(maybe) and non-queue interrupts, no flush. + * txgbe_poll will re-enable the queue interrupts + */ + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + txgbe_irq_enable(adapter, false, false); + + return IRQ_HANDLED; +} + +/** + * txgbe_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int txgbe_request_irq(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) + err = txgbe_request_msix_irqs(adapter); + else if (adapter->flags & TXGBE_FLAG_MSI_ENABLED) + err = request_irq(adapter->pdev->irq, &txgbe_intr, 0, + netdev->name, adapter); + else + err = request_irq(adapter->pdev->irq, &txgbe_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + e_err(probe, "request_irq failed, Error %d\n", err); + + return err; +} + +static void txgbe_free_irq(struct txgbe_adapter *adapter) +{ + int vector; + + if (!(adapter->flags & TXGBE_FLAG_MSIX_ENABLED)) { + free_irq(adapter->pdev->irq, adapter); + return; + } + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct txgbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + /* free only the irqs that were actually requested */ + if (!q_vector->rx.ring && !q_vector->tx.ring) + continue; + + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); + free_irq(entry->vector, q_vector); + } + + free_irq(adapter->msix_entries[vector++].vector, adapter); +} + +/** + * txgbe_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +void txgbe_irq_disable(struct txgbe_adapter *adapter) +{ + wr32(&adapter->hw, TXGBE_PX_MISC_IEN, 0); + txgbe_intr_disable(&adapter->hw, TXGBE_INTR_ALL); + + TXGBE_WRITE_FLUSH(&adapter->hw); + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) { + int vector; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) + synchronize_irq(adapter->msix_entries[vector].vector); + + synchronize_irq(adapter->msix_entries[vector++].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * txgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts + * + **/ +static void txgbe_configure_msi_and_legacy(struct txgbe_adapter *adapter) +{ + struct txgbe_q_vector *q_vector = adapter->q_vector[0]; + struct txgbe_ring *ring; + + txgbe_write_eitr(q_vector); + + txgbe_for_each_ring(ring, q_vector->rx) + txgbe_set_ivar(adapter, 0, ring->reg_idx, 0); + + txgbe_for_each_ring(ring, q_vector->tx) + txgbe_set_ivar(adapter, 1, ring->reg_idx, 0); + + txgbe_set_ivar(adapter, -1, 0, 1); + + e_info(hw, "Legacy interrupt IVAR setup done\n"); +} + +/* amlite: tx header wb */ +static int txgbe_setup_headwb_resources(struct txgbe_ring *ring) +{ + struct txgbe_adapter *adapter; + struct txgbe_hw *hw; + struct device *dev = ring->dev; + u8 headwb_size = 1; + + if (ring->q_vector) { + adapter = ring->q_vector->adapter; + hw = &adapter->hw; + if (hw->mac.type == txgbe_mac_sp) + return 0; + } else { + return 0; + } + + ring->headwb_mem = dma_alloc_coherent(dev, + sizeof(u32) * headwb_size, + &ring->headwb_dma, + GFP_KERNEL); + if (!ring->headwb_mem) { + e_err(drv, "%s no mem\n", __func__); + return -ENOMEM; + } + memset(ring->headwb_mem, 0, sizeof(u32) * headwb_size); + + return 0; +} + +/** + * txgbe_configure_tx_ring - Configure Tx ring after Reset + * @adapter: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +void txgbe_configure_tx_ring(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u64 tdba = ring->dma; + int wait_loop = 10; + u32 txdctl = TXGBE_PX_TR_CFG_ENABLE; + u8 reg_idx = ring->reg_idx; + + ring->xsk_pool = NULL; + if (ring_is_xdp(ring)) + ring->xsk_pool = txgbe_xsk_umem(adapter, ring); + + /* disable queue to avoid issues while updating state */ + wr32(hw, TXGBE_PX_TR_CFG(reg_idx), TXGBE_PX_TR_CFG_SWFLSH); + TXGBE_WRITE_FLUSH(hw); + + wr32(hw, TXGBE_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32)); +#ifdef CONFIG_64BIT + wr32(hw, TXGBE_PX_TR_BAH(reg_idx), tdba >> 32); +#endif + /* reset head and tail pointers */ + wr32(hw, TXGBE_PX_TR_RP(reg_idx), 0); + wr32(hw, TXGBE_PX_TR_WP(reg_idx), 0); + ring->tail = adapter->io_addr + TXGBE_PX_TR_WP(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + ring->next_to_free = 0; + + txdctl |= TXGBE_RING_SIZE(ring) << TXGBE_PX_TR_CFG_TR_SIZE_SHIFT; + + /* set WTHRESH to encourage burst writeback, it should not be set + * higher than 1 when: + * - ITR is 0 as it could cause false TX hangs + * - ITR is set to > 100k int/sec and BQL is enabled + * + * In order to avoid issues WTHRESH + PTHRESH should always be equal + * to or less than the number of on chip descriptors, which is + * currently 40. + */ + + txdctl |= 0x20 << TXGBE_PX_TR_CFG_WTHRESH_SHIFT; + + /* reinitialize flowdirector state */ + if (adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) { + ring->atr_sample_rate = adapter->atr_sample_rate; + ring->atr_count = 0; + set_bit(__TXGBE_TX_FDIR_INIT_DONE, &ring->state); + } else { + ring->atr_sample_rate = 0; + } + + /* initialize XPS */ + if (!test_and_set_bit(__TXGBE_TX_XPS_INIT_DONE, &ring->state)) { + struct txgbe_q_vector *q_vector = ring->q_vector; + + if (q_vector) + netif_set_xps_queue(adapter->netdev, + &q_vector->affinity_mask, + ring->queue_index); + } + + clear_bit(__TXGBE_HANG_CHECK_ARMED, &ring->state); + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + wr32(hw, TXGBE_PX_TR_HEAD_ADDRL(reg_idx), + ring->headwb_dma & DMA_BIT_MASK(32)); +#ifdef CONFIG_64BIT + wr32(hw, TXGBE_PX_TR_HEAD_ADDRH(reg_idx), ring->headwb_dma >> 32); +#endif + txdctl |= TXGBE_PX_TR_CFG_HEAD_WB; + } + + /* enable queue */ + wr32(hw, TXGBE_PX_TR_CFG(reg_idx), txdctl); + + /* poll to verify queue is enabled */ + do { + usleep_range(1000, 2000); + txdctl = rd32(hw, TXGBE_PX_TR_CFG(reg_idx)); + } while (--wait_loop && !(txdctl & TXGBE_PX_TR_CFG_ENABLE)); + if (!wait_loop) + e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +} + +/** + * txgbe_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void txgbe_configure_tx(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 i; + + /* TDM_CTL.TE must be before Tx queues are enabled */ + wr32m(hw, TXGBE_TDM_CTL, + TXGBE_TDM_CTL_TE, TXGBE_TDM_CTL_TE); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) + txgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + txgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]); + wr32m(hw, TXGBE_TSC_BUF_AE, 0x3FF, 0x10); + + /* enable mac transmitter */ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + hw->mac.ops.enable_sec_tx_path(hw); + + /* enable mac transmitter */ + wr32m(hw, TXGBE_MAC_TX_CFG, + TXGBE_MAC_TX_CFG_TE, TXGBE_MAC_TX_CFG_TE); +} + +static void txgbe_enable_rx_drop(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u16 reg_idx = ring->reg_idx; + + u32 srrctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + + srrctl |= TXGBE_PX_RR_CFG_DROP_EN; + + wr32(hw, TXGBE_PX_RR_CFG(reg_idx), srrctl); +} + +static void txgbe_disable_rx_drop(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u16 reg_idx = ring->reg_idx; + + u32 srrctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + + srrctl &= ~TXGBE_PX_RR_CFG_DROP_EN; + + wr32(hw, TXGBE_PX_RR_CFG(reg_idx), srrctl); +} + +void txgbe_set_rx_drop_en(struct txgbe_adapter *adapter) +{ + int i; + bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; + + if (adapter->txgbe_ieee_pfc) + pfc_en |= !!(adapter->txgbe_ieee_pfc->pfc_en); + + /* We should set the drop enable bit if: + * SR-IOV is enabled + * or + * Number of Rx queues > 1 and flow control is disabled + * This allows us to avoid head of line blocking for security + * and performance reasons. + */ + if (adapter->num_vfs || (adapter->num_rx_queues > 1 && + !(adapter->hw.fc.current_mode & txgbe_fc_tx_pause) && !pfc_en)) { + for (i = 0; i < adapter->num_rx_queues; i++) + txgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + txgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); + } +} + +static void txgbe_configure_srrctl(struct txgbe_adapter *adapter, + struct txgbe_ring *rx_ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 srrctl; + u16 reg_idx = rx_ring->reg_idx; + + srrctl = rd32m(hw, TXGBE_PX_RR_CFG(reg_idx), + ~(TXGBE_PX_RR_CFG_RR_HDR_SZ | + TXGBE_PX_RR_CFG_RR_BUF_SZ | + TXGBE_PX_RR_CFG_SPLIT_MODE)); + /* configure header buffer length, needed for RSC */ + srrctl |= TXGBE_RX_HDR_SIZE << TXGBE_PX_RR_CFG_BSIZEHDRSIZE_SHIFT; + + /* configure the packet buffer length */ + if (test_bit(__TXGBE_RX_3K_BUFFER, &rx_ring->state)) + srrctl |= TXGBE_RXBUFFER_3K >> TXGBE_PX_RR_CFG_BSIZEPKT_SHIFT; + else + srrctl |= TXGBE_RXBUFFER_2K >> TXGBE_PX_RR_CFG_BSIZEPKT_SHIFT; + + if (ring_is_hs_enabled(rx_ring)) + srrctl |= TXGBE_PX_RR_CFG_SPLIT_MODE; + + wr32(hw, TXGBE_PX_RR_CFG(reg_idx), srrctl); +} + +/** + * Return a number of entries in the RSS indirection table + * + * @adapter: device handle + * + */ +u32 txgbe_rss_indir_tbl_entries(struct txgbe_adapter *adapter) +{ + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + return 64; + else + return 128; +} + +/** + * Write the RETA table to HW + * + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +void txgbe_store_reta(struct txgbe_adapter *adapter) +{ + u32 i, reta_entries = txgbe_rss_indir_tbl_entries(adapter); + struct txgbe_hw *hw = &adapter->hw; + u32 reta = 0; + u8 *indir_tbl = adapter->rss_indir_tbl; + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + reta |= indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + wr32(hw, TXGBE_RDB_RSSTBL(i >> 2), reta); + reta = 0; + } + } +} + +void txgbe_store_vfreta(struct txgbe_adapter *adapter) +{ + u32 reta_entries = txgbe_rss_indir_tbl_entries(adapter); + unsigned int pf_pool = adapter->num_vfs; + u8 *indir_tbl = adapter->rss_indir_tbl; + struct txgbe_hw *hw = &adapter->hw; + u32 reta = 0; + u32 i; + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + reta |= indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + wr32(hw, TXGBE_RDB_VMRSSTBL(i >> 2, pf_pool), reta); + reta = 0; + } + } +} + +void txgbe_setup_reta(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 i, j; + u32 reta_entries = txgbe_rss_indir_tbl_entries(adapter); + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + + /* Program table for at least 4 queues w/ SR-IOV so that VFs can + * make full use of any rings they may have. We will use the + * PSRTYPE register to control how many rings we use within the PF. + */ + if ((adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) && rss_i < 2) + rss_i = 2; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + wr32(hw, TXGBE_RDB_RSSRK(i), adapter->rss_key[i]); + + /* Fill out redirection table */ + memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); + + for (i = 0, j = 0; i < reta_entries; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + txgbe_store_reta(adapter); +} + +static void txgbe_setup_vfreta(struct txgbe_adapter *adapter) +{ + u32 reta_entries = txgbe_rss_indir_tbl_entries(adapter); + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + unsigned int pf_pool = adapter->num_vfs; + struct txgbe_hw *hw = &adapter->hw; + u32 i, j; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + wr32(hw, TXGBE_RDB_VMRSSRK(i, pf_pool), *(adapter->rss_key + i)); + + for (i = 0, j = 0; i < reta_entries; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + txgbe_store_vfreta(adapter); +} + +static void txgbe_setup_mrqc(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rss_field = 0; + + /* VT, DCB and RSS do not coexist at the same time */ + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED && + adapter->flags & TXGBE_FLAG_DCB_ENABLED) + return; + + /* Disable indicating checksum in descriptor, enables RSS hash */ + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_PCSD, TXGBE_PSR_CTL_PCSD); + + /* Perform hash on these packet types */ + rss_field = TXGBE_RDB_RA_CTL_RSS_IPV4 | + TXGBE_RDB_RA_CTL_RSS_IPV4_TCP | + TXGBE_RDB_RA_CTL_RSS_IPV6 | + TXGBE_RDB_RA_CTL_RSS_IPV6_TCP; + + if (adapter->flags2 & TXGBE_FLAG2_RSS_FIELD_IPV4_UDP) + rss_field |= TXGBE_RDB_RA_CTL_RSS_IPV4_UDP; + if (adapter->flags2 & TXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + rss_field |= TXGBE_RDB_RA_CTL_RSS_IPV6_UDP; + + netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); + + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + unsigned int pool = adapter->num_vfs; + u32 vfmrqc; + + /* Setup RSS through the VF registers */ + txgbe_setup_vfreta(adapter); + + vfmrqc = rd32(hw, TXGBE_RDB_PL_CFG(pool)); + vfmrqc &= ~TXGBE_RDB_PL_CFG_RSS_MASK; + vfmrqc |= rss_field | TXGBE_RDB_PL_CFG_RSS_EN; + wr32(hw, TXGBE_RDB_PL_CFG(pool), vfmrqc); + + /* Enable VF RSS mode */ + rss_field |= TXGBE_RDB_RA_CTL_MULTI_RSS; + } else { + txgbe_setup_reta(adapter); + } + + if (adapter->flags2 & TXGBE_FLAG2_RSS_ENABLED) + rss_field |= TXGBE_RDB_RA_CTL_RSS_EN; + + wr32(hw, TXGBE_RDB_RA_CTL, rss_field); +} + +/** + * txgbe_clear_rscctl - disable RSC for the indicated ring + * @adapter: address of board private structure + * @ring: structure containing ring specific data + **/ +void txgbe_clear_rscctl(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u8 reg_idx = ring->reg_idx; + + wr32m(hw, TXGBE_PX_RR_CFG(reg_idx), + TXGBE_PX_RR_CFG_RSC, 0); + + clear_ring_rsc_enabled(ring); +} + +/** + * txgbe_configure_rscctl - enable RSC for the indicated ring + * @adapter: address of board private structure + * @ring: structure containing ring specific data + **/ +void txgbe_configure_rscctl(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rscctrl; + u8 reg_idx = ring->reg_idx; + + if (!ring_is_rsc_enabled(ring)) + return; + + rscctrl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + rscctrl |= TXGBE_PX_RR_CFG_RSC; + /* we must limit the number of descriptors so that the + * total size of max desc * buf_len is not greater + * than 65536 + */ +#if (MAX_SKB_FRAGS >= 16) + rscctrl |= TXGBE_PX_RR_CFG_MAX_RSCBUF_16; +#elif (MAX_SKB_FRAGS >= 8) + rscctrl |= TXGBE_PX_RR_CFG_MAX_RSCBUF_8; +#elif (MAX_SKB_FRAGS >= 4) + rscctrl |= TXGBE_PX_RR_CFG_MAX_RSCBUF_4; +#else + rscctrl |= TXGBE_PX_RR_CFG_MAX_RSCBUF_1; +#endif + + wr32(hw, TXGBE_PX_RR_CFG(reg_idx), rscctrl); +} + +static void txgbe_rx_desc_queue_enable(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + int wait_loop = TXGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (TXGBE_REMOVED(hw->hw_addr)) + return; + + do { + usleep_range(1000, 2000); + rxdctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + } while (--wait_loop && !(rxdctl & TXGBE_PX_RR_CFG_RR_EN)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within the polling period\n", + reg_idx); + } +} + +/* disable the specified rx ring/queue */ +void txgbe_disable_rx_queue(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + int wait_loop = TXGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (TXGBE_REMOVED(hw->hw_addr)) + return; + + /* write value back with RXDCTL.ENABLE bit cleared */ + wr32m(hw, TXGBE_PX_RR_CFG(reg_idx), + TXGBE_PX_RR_CFG_RR_EN, 0); + + /* the hardware may take up to 100us to really disable the rx queue */ + do { + usleep_range(10, 20); + rxdctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + } while (--wait_loop && (rxdctl & TXGBE_PX_RR_CFG_RR_EN)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rxq %d not cleared within the polling period\n", + reg_idx); + } +} + +void txgbe_configure_rx_ring(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u64 rdba = ring->dma; + u32 rxdctl; + u16 reg_idx = ring->reg_idx; + netdev_features_t features = netdev->features; + + /* disable queue to avoid issues while updating state */ + rxdctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + txgbe_disable_rx_queue(adapter, ring); + + if (ring->q_vector) + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + ring->xsk_pool = txgbe_xsk_umem(adapter, ring); + if (ring->xsk_pool) { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + } else { + if (ring->q_vector) + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL)); + } + + wr32(hw, TXGBE_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32)); +#ifdef CONFIG_64BIT + wr32(hw, TXGBE_PX_RR_BAH(reg_idx), rdba >> 32); +#endif + if (ring->count == TXGBE_MAX_RXD) + rxdctl |= 0 << TXGBE_PX_RR_CFG_RR_SIZE_SHIFT; + else + rxdctl |= (ring->count / 128) << TXGBE_PX_RR_CFG_RR_SIZE_SHIFT; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + rxdctl |= TXGBE_PX_RR_CFG_VLAN; + else + rxdctl &= ~TXGBE_PX_RR_CFG_VLAN; + + rxdctl |= 0x1 << TXGBE_PX_RR_CFG_RR_THER_SHIFT; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + rxdctl |= TXGBE_PX_RR_CFG_DESC_MERGE; + + wr32(hw, TXGBE_PX_RR_CFG(reg_idx), rxdctl); + + /* reset head and tail pointers */ + wr32(hw, TXGBE_PX_RR_RP(reg_idx), 0); + wr32(hw, TXGBE_PX_RR_WP(reg_idx), 0); + ring->tail = adapter->io_addr + TXGBE_PX_RR_WP(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + ring->next_to_alloc = 0; + ring->rx_offset = txgbe_rx_offset(ring); + + txgbe_configure_srrctl(adapter, ring); + /* In ESX, RSCCTL configuration is done by on demand */ + txgbe_configure_rscctl(adapter, ring); + + /* enable receive descriptor ring */ + wr32m(hw, TXGBE_PX_RR_CFG(reg_idx), + TXGBE_PX_RR_CFG_RR_EN, TXGBE_PX_RR_CFG_RR_EN); + + txgbe_rx_desc_queue_enable(adapter, ring); + + if (ring->xsk_pool) + txgbe_alloc_rx_buffers_zc(ring, txgbe_desc_unused(ring)); + else + txgbe_alloc_rx_buffers(ring, txgbe_desc_unused(ring)); +} + +static void txgbe_setup_psrtype(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int rss_i = adapter->ring_feature[RING_F_RSS].indices; + int pool; + + /* PSRTYPE must be initialized in adapters */ + u32 psrtype = TXGBE_RDB_PL_CFG_L4HDR | + TXGBE_RDB_PL_CFG_L3HDR | + TXGBE_RDB_PL_CFG_L2HDR | + TXGBE_RDB_PL_CFG_TUN_OUTER_L2HDR | + TXGBE_RDB_PL_CFG_TUN_TUNHDR; + + if (rss_i > 3) + psrtype |= 2 << 29; + else if (rss_i > 1) + psrtype |= 1 << 29; + + for_each_set_bit(pool, &adapter->fwd_bitmask, TXGBE_MAX_MACVLANS) + wr32(hw, TXGBE_RDB_PL_CFG(VMDQ_P(pool)), psrtype); +} + +/** + * txgbe_configure_bridge_mode - common settings for configuring bridge mode + * @adapter - the private structure + * + * This function's purpose is to remove code duplication and configure some + * settings require to switch bridge modes. + **/ +static void txgbe_configure_bridge_mode(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + if (adapter->flags & TXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE) { + /* disable Tx loopback, rely on switch hairpin mode */ + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_SW_EN, 0); + + } else { + /* enable Tx loopback for internal VF/PF communication */ + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_SW_EN, TXGBE_PSR_CTL_SW_EN); + } +} + +static void txgbe_configure_virtualization(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 reg_offset, vf_shift; + u32 i; + + if (!(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) { + if (hw->mac.ops.set_ethertype_anti_spoofing) { + wr32(hw, + TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_LLDP), 0); + wr32(hw, + TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_FC), 0); + } + return; + } + + wr32m(hw, TXGBE_PSR_VM_CTL, + TXGBE_PSR_VM_CTL_POOL_MASK | + TXGBE_PSR_VM_CTL_REPLEN, + VMDQ_P(0) << TXGBE_PSR_VM_CTL_POOL_SHIFT | + TXGBE_PSR_VM_CTL_REPLEN); + + for_each_set_bit(i, &adapter->fwd_bitmask, TXGBE_MAX_MACVLANS) { + /* accept untagged packets until a vlan tag is + * specifically set for the VMDQ queue/pool + */ + wr32m(hw, TXGBE_PSR_VM_L2CTL(i), + TXGBE_PSR_VM_L2CTL_AUPE, TXGBE_PSR_VM_L2CTL_AUPE); + } + + vf_shift = VMDQ_P(0) % 32; + reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; + + /* Enable only the PF pools for Tx/Rx */ + wr32(hw, TXGBE_RDM_VF_RE(reg_offset), (~0) << vf_shift); + wr32(hw, TXGBE_RDM_VF_RE(reg_offset ^ 1), reg_offset - 1); + wr32(hw, TXGBE_TDM_VF_TE(reg_offset), (~0) << vf_shift); + wr32(hw, TXGBE_TDM_VF_TE(reg_offset ^ 1), reg_offset - 1); + + adapter->flags2 &= ~TXGBE_FLAG2_VLAN_PROMISC; + + if (!(adapter->flags & TXGBE_FLAG_SRIOV_ENABLED)) + return; + + /* configure default bridge settings */ + txgbe_configure_bridge_mode(adapter); + + /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be + * calling set_ethertype_anti_spoofing for each VF in loop below. + */ + if (hw->mac.ops.set_ethertype_anti_spoofing) { + wr32(hw, + TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_LLDP), + (TXGBE_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */ + TXGBE_PSR_ETYPE_SWC_TX_ANTISPOOF | + TXGBE_ETH_P_LLDP)); /* LLDP eth procotol type */ + + wr32(hw, + TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_FC), + (TXGBE_PSR_ETYPE_SWC_FILTER_EN | + TXGBE_PSR_ETYPE_SWC_TX_ANTISPOOF | + ETH_P_PAUSE)); + } + + for (i = 0; i < adapter->num_vfs; i++) { + if (!adapter->vfinfo[i].spoofchk_enabled) + txgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false); + /* enable ethertype anti spoofing if hw supports it */ + hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i); + } +} + +static void txgbe_set_rx_buffer_len(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + struct txgbe_ring *rx_ring; + int i; + u32 mhadd; + +#if IS_ENABLED(CONFIG_FCOE) + /* adjust max frame to be able to do baby jumbo for FCoE */ + if (adapter->flags & TXGBE_FLAG_FCOE_ENABLED && + max_frame < TXGBE_FCOE_JUMBO_FRAME_SIZE) + max_frame = TXGBE_FCOE_JUMBO_FRAME_SIZE; +#endif /* CONFIG_FCOE */ + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); + + mhadd = rd32(hw, TXGBE_PSR_MAX_SZ); + if (max_frame != mhadd) + wr32(hw, TXGBE_PSR_MAX_SZ, max_frame); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring = adapter->rx_ring[i]; + clear_bit(__TXGBE_RX_3K_BUFFER, &rx_ring->state); + if (adapter->flags & TXGBE_FLAG_RX_HS_ENABLED) { + rx_ring->rx_buf_len = TXGBE_RX_HDR_SIZE; + set_ring_hs_enabled(rx_ring); + } else { + clear_ring_hs_enabled(rx_ring); + } + if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) + set_ring_rsc_enabled(rx_ring); + else + clear_ring_rsc_enabled(rx_ring); + + clear_bit(__TXGBE_RX_3K_BUFFER, &rx_ring->state); + clear_bit(__TXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); +#if IS_ENABLED(CONFIG_FCOE) + if (test_bit(__TXGBE_RX_FCOE, &rx_ring->state)) + set_bit(__TXGBE_RX_3K_BUFFER, &rx_ring->state); +#endif + if (adapter->flags2 & TXGBE_FLAG2_RX_LEGACY) + continue; + + set_bit(__TXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); +#if (PAGE_SIZE < 8192) + if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) + set_bit(__TXGBE_RX_3K_BUFFER, &rx_ring->state); + + if (TXGBE_2K_TOO_SMALL_WITH_PADDING || + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) + set_bit(__TXGBE_RX_3K_BUFFER, &rx_ring->state); +#endif + } +} + +/** + * txgbe_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void txgbe_configure_rx(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i; + u32 rxctrl, psrctl; + + /* disable receives while setting up the descriptors */ + hw->mac.ops.disable_rx(hw); + + txgbe_setup_psrtype(adapter); + + /* enable hw crc stripping */ + wr32m(hw, TXGBE_RSC_CTL, + TXGBE_RSC_CTL_CRC_STRIP, TXGBE_RSC_CTL_CRC_STRIP); + + /* RSC Setup */ + psrctl = rd32m(hw, TXGBE_PSR_CTL, ~TXGBE_PSR_CTL_RSC_DIS); + psrctl |= TXGBE_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */ + if (!(adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED)) + psrctl |= TXGBE_PSR_CTL_RSC_DIS; + wr32(hw, TXGBE_PSR_CTL, psrctl); + + /* Program registers for the distribution of queues */ + txgbe_setup_mrqc(adapter); + + /* set_rx_buffer_len must be called before ring initialization */ + txgbe_set_rx_buffer_len(adapter); + + wr32(hw, TXGBE_RDM_DCACHE_CTL, TXGBE_RDM_DCACHE_CTL_EN); + wr32m(hw, TXGBE_RDM_RSC_CTL, TXGBE_RDM_RSC_CTL_FREE_CTL, + TXGBE_RDM_RSC_CTL_FREE_CTL); + wr32m(hw, TXGBE_RDM_RSC_CTL, TXGBE_RDM_RSC_CTL_FREE_CNT_DIS, + ~TXGBE_RDM_RSC_CTL_FREE_CNT_DIS); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + txgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); + + rxctrl = rd32(hw, TXGBE_RDB_PB_CTL); + + /* enable all receives */ + rxctrl |= TXGBE_RDB_PB_CTL_RXEN; + hw->mac.ops.enable_rx_dma(hw, rxctrl); +} + +static int txgbe_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + int pool_ndx = VMDQ_P(0); + + if (vid < VLAN_N_VID) + set_bit(vid, adapter->active_vlans); + + /* add VID to filter table */ + if (hw->mac.ops.set_vfta) { + if (!vid || !(adapter->flags2 & TXGBE_FLAG2_VLAN_PROMISC)) + hw->mac.ops.set_vfta(hw, vid, pool_ndx, true); + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) { + int i; + /* enable vlan id for all pools */ + for_each_set_bit(i, &adapter->fwd_bitmask, + TXGBE_MAX_MACVLANS) + hw->mac.ops.set_vfta(hw, vid, + VMDQ_P(i), true); + } + } + + return 0; +} + +static int txgbe_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + int pool_ndx = VMDQ_P(0); + + /* User is not allowed to remove vlan ID 0 */ + if (!vid) + return 0; + /* remove VID from filter table */ + if (hw->mac.ops.set_vfta) { + if (vid && !(adapter->flags2 & TXGBE_FLAG2_VLAN_PROMISC)) + hw->mac.ops.set_vfta(hw, vid, pool_ndx, false); + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) { + int i; + /* remove vlan id from all pools */ + for_each_set_bit(i, &adapter->fwd_bitmask, + TXGBE_MAX_MACVLANS) + hw->mac.ops.set_vfta(hw, vid, + VMDQ_P(i), false); + } + } + clear_bit(vid, adapter->active_vlans); + + return 0; +} + +/** + * txgbe_vlan_strip_disable - helper to disable vlan tag stripping + * @adapter: driver data + */ +void txgbe_vlan_strip_disable(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i, j; + + /* leave vlan tag stripping enabled for DCB */ + if (adapter->flags & TXGBE_FLAG_DCB_ENABLED) + return; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct txgbe_ring *ring = adapter->rx_ring[i]; + + if (ring->accel) + continue; + j = ring->reg_idx; + wr32m(hw, TXGBE_PX_RR_CFG(j), + TXGBE_PX_RR_CFG_VLAN, 0); + } +} + +/** + * txgbe_vlan_strip_enable - helper to enable vlan tag stripping + * @adapter: driver data + */ +void txgbe_vlan_strip_enable(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i, j; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct txgbe_ring *ring = adapter->rx_ring[i]; + + if (ring->accel) + continue; + j = ring->reg_idx; + wr32m(hw, TXGBE_PX_RR_CFG(j), + TXGBE_PX_RR_CFG_VLAN, TXGBE_PX_RR_CFG_VLAN); + } +} + +void txgbe_vlan_mode(struct net_device *netdev, u32 features) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + bool enable; + + enable = !!(features & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX)); + + if (enable) + /* enable VLAN tag insert/strip */ + txgbe_vlan_strip_enable(adapter); + else + /* disable VLAN tag insert/strip */ + txgbe_vlan_strip_disable(adapter); +} + +static void txgbe_restore_vlan(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 vid; + + txgbe_vlan_mode(netdev, netdev->features); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + txgbe_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); +} + +static u8 *txgbe_addr_list_itr(struct txgbe_hw __maybe_unused *hw, + u8 **mc_addr_ptr, u32 *vmdq) +{ + struct netdev_hw_addr *mc_ptr; + +#ifdef CONFIG_PCI_IOV + struct txgbe_adapter *adapter = hw->back; +#endif /* CONFIG_PCI_IOV */ + u8 *addr = *mc_addr_ptr; + + /* VMDQ_P implicitely uses the adapter struct when CONFIG_PCI_IOV is + * defined, so we have to wrap the pointer above correctly to prevent + * a warning. + */ + *vmdq = VMDQ_P(0); + + mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]); + if (mc_ptr->list.next) { + struct netdev_hw_addr *ha; + + ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list); + *mc_addr_ptr = ha->addr; + } else { + *mc_addr_ptr = NULL; + } + + return addr; +} + +/** + * txgbe_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +int txgbe_write_mc_addr_list(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *addr_list = NULL; + int addr_count = 0; + + if (!hw->mac.ops.update_mc_addr_list) + return -ENOMEM; + + if (!netif_running(netdev)) + return 0; + + if (netdev_mc_empty(netdev)) { + hw->mac.ops.update_mc_addr_list(hw, NULL, 0, + txgbe_addr_list_itr, true); + } else { + ha = list_first_entry(&netdev->mc.list, + struct netdev_hw_addr, list); + addr_list = ha->addr; + + addr_count = netdev_mc_count(netdev); + + hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, + txgbe_addr_list_itr, true); + } + +#ifdef CONFIG_PCI_IOV + txgbe_restore_vf_multicasts(adapter); +#endif + return addr_count; +} + +void txgbe_full_sync_mac_table(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & TXGBE_MAC_STATE_IN_USE) { + hw->mac.ops.set_rar(hw, i, + adapter->mac_table[i].addr, + adapter->mac_table[i].pools, + TXGBE_PSR_MAC_SWC_AD_H_AV); + } else { + hw->mac.ops.clear_rar(hw, i); + } + adapter->mac_table[i].state &= ~(TXGBE_MAC_STATE_MODIFIED); + } +} + +static void txgbe_sync_mac_table(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & TXGBE_MAC_STATE_MODIFIED) { + if (adapter->mac_table[i].state & + TXGBE_MAC_STATE_IN_USE) { + hw->mac.ops.set_rar(hw, i, + adapter->mac_table[i].addr, + adapter->mac_table[i].pools, + TXGBE_PSR_MAC_SWC_AD_H_AV); + } else { + hw->mac.ops.clear_rar(hw, i); + } + adapter->mac_table[i].state &= + ~(TXGBE_MAC_STATE_MODIFIED); + } + } +} + +int txgbe_available_rars(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 i, count = 0; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state == 0) + count++; + } + return count; +} + +/* this function destroys the first RAR entry */ +static void txgbe_mac_set_default_filter(struct txgbe_adapter *adapter, + u8 *addr) +{ + struct txgbe_hw *hw = &adapter->hw; + + memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN); + adapter->mac_table[0].pools = 1ULL << VMDQ_P(0); + adapter->mac_table[0].state = (TXGBE_MAC_STATE_DEFAULT | + TXGBE_MAC_STATE_IN_USE); + hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr, + adapter->mac_table[0].pools, + TXGBE_PSR_MAC_SWC_AD_H_AV); +} + +int txgbe_add_mac_filter(struct txgbe_adapter *adapter, const u8 *addr, u16 pool) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & TXGBE_MAC_STATE_IN_USE) { + if (ether_addr_equal(addr, adapter->mac_table[i].addr)) { + if (adapter->mac_table[i].pools != (1ULL << pool)) { + adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; + memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN); + adapter->mac_table[i].pools |= (1ULL << pool); + txgbe_sync_mac_table(adapter); + return i; + } + } + } + } + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & TXGBE_MAC_STATE_IN_USE) + continue; + + adapter->mac_table[i].state |= (TXGBE_MAC_STATE_MODIFIED | + TXGBE_MAC_STATE_IN_USE); + memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN); + adapter->mac_table[i].pools |= (1ULL << pool); + txgbe_sync_mac_table(adapter); + return i; + } + return -ENOMEM; +} + +static void txgbe_flush_sw_mac_table(struct txgbe_adapter *adapter) +{ + u32 i; + struct txgbe_hw *hw = &adapter->hw; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; + adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + adapter->mac_table[i].pools = 0; + } + txgbe_sync_mac_table(adapter); +} + +int txgbe_del_mac_filter(struct txgbe_adapter *adapter, const u8 *addr, u16 pool) +{ + /* search table for addr, if found, set to 0 and sync */ + u32 i; + struct txgbe_hw *hw = &adapter->hw; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (ether_addr_equal(addr, adapter->mac_table[i].addr)) { + if (adapter->mac_table[i].pools & (1ULL << pool)) { + adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; + if (adapter->mac_table[i].pools == (1ULL << pool)) + adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE; + + adapter->mac_table[i].pools &= ~(1ULL << pool); + txgbe_sync_mac_table(adapter); + } + return 0; + } + + if (adapter->mac_table[i].pools != (1 << pool)) + continue; + if (!ether_addr_equal(addr, adapter->mac_table[i].addr)) + continue; + + adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; + adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + adapter->mac_table[i].pools = 0; + txgbe_sync_mac_table(adapter); + return 0; + } + return -ENOMEM; +} + +static int txgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int ret; + + ret = txgbe_add_mac_filter(adapter, addr, VMDQ_P(0)); + + return min_t(int, ret, 0); +} + +static int txgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + txgbe_del_mac_filter(adapter, addr, VMDQ_P(0)); + + return 0; +} + +static int txgbe_add_cloud_switcher(struct txgbe_adapter *adapter, + u32 key, u16 __always_unused pool) +{ + struct txgbe_hw *hw = &adapter->hw; + + wr32(hw, TXGBE_PSR_CL_SWC_IDX, 0); + wr32(hw, TXGBE_PSR_CL_SWC_KEY, key); + wr32(hw, TXGBE_PSR_CL_SWC_CTL, + TXGBE_PSR_CL_SWC_CTL_VLD | TXGBE_PSR_CL_SWC_CTL_DST_MSK); + wr32(hw, TXGBE_PSR_CL_SWC_VM_L, 0x1); + wr32(hw, TXGBE_PSR_CL_SWC_VM_H, 0x0); + + return 0; +} + +static void txgbe_vlan_promisc_enable(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 vlnctrl, i; + u32 vind; + u32 bits; + + vlnctrl = rd32(hw, TXGBE_PSR_VLAN_CTL); + + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) { + /* we need to keep the VLAN filter on in SRIOV */ + vlnctrl |= TXGBE_PSR_VLAN_CTL_VFE; + wr32(hw, TXGBE_PSR_VLAN_CTL, vlnctrl); + } else { + vlnctrl &= ~TXGBE_PSR_VLAN_CTL_VFE; + wr32(hw, TXGBE_PSR_VLAN_CTL, vlnctrl); + return; + } + + /* We are already in VLAN promisc, nothing to do */ + if (adapter->flags2 & TXGBE_FLAG2_VLAN_PROMISC) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags2 |= TXGBE_FLAG2_VLAN_PROMISC; + + /* Add PF to all active pools */ + vind = VMDQ_P(0); + for (i = TXGBE_PSR_VLAN_SWC_ENTRIES; --i;) { + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, i); + + if (vind < 32) { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_L); + bits |= (1 << vind); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_L, + bits); + } else { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + bits |= (1 << (vind - 32)); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_H, + bits); + } + } + + /* Set all bits in the VLAN filter table array */ + for (i = 0; i < hw->mac.vft_size; i++) + wr32(hw, TXGBE_PSR_VLAN_TBL(i), ~0U); +} + +static void txgbe_scrub_vfta(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 i, vid, bits; + u32 vfta; + u32 vind; + u32 vlvf; + + for (i = TXGBE_PSR_VLAN_SWC_ENTRIES; --i;) { + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, i); + vlvf = rd32(hw, TXGBE_PSR_VLAN_SWC); + + /* pull VLAN ID from VLVF */ + vid = vlvf & ~TXGBE_PSR_VLAN_SWC_VIEN; + + if (vlvf & TXGBE_PSR_VLAN_SWC_VIEN) { + /* if PF is part of this then continue */ + if (test_bit(vid, adapter->active_vlans)) + continue; + } + + /* remove PF from the pool */ + vind = VMDQ_P(0); + if (vind < 32) { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_L); + bits &= ~(1 << vind); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_L, + bits); + } else { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + bits &= ~(1 << (vind - 32)); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_H, + bits); + } + } + + /* extract values from vft_shadow and write back to VFTA */ + for (i = 0; i < hw->mac.vft_size; i++) { + vfta = hw->mac.vft_shadow[i]; + wr32(hw, TXGBE_PSR_VLAN_TBL(i), vfta); + } +} + +static void txgbe_vlan_promisc_disable(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 vlnctrl; + + /* configure vlan filtering */ + vlnctrl = rd32(hw, TXGBE_PSR_VLAN_CTL); + vlnctrl |= TXGBE_PSR_VLAN_CTL_VFE; + wr32(hw, TXGBE_PSR_VLAN_CTL, vlnctrl); + + /* We are not in VLAN promisc, nothing to do */ + if (!(adapter->flags2 & TXGBE_FLAG2_VLAN_PROMISC)) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags2 &= ~TXGBE_FLAG2_VLAN_PROMISC; + + txgbe_scrub_vfta(adapter); +} + +/** + * txgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_method entry point is called whenever the unicast/multicast + * address list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast and + * promiscuous mode. + **/ +void txgbe_set_rx_mode(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 fctrl, vmolr, vlnctrl; + int count; + netdev_features_t features = netdev->features; + + /* Check for Promiscuous and All Multicast modes */ + fctrl = rd32m(hw, TXGBE_PSR_CTL, + ~(TXGBE_PSR_CTL_UPE | TXGBE_PSR_CTL_MPE)); + vmolr = rd32m(hw, TXGBE_PSR_VM_L2CTL(VMDQ_P(0)), + ~(TXGBE_PSR_VM_L2CTL_UPE | + TXGBE_PSR_VM_L2CTL_MPE | + TXGBE_PSR_VM_L2CTL_ROPE | + TXGBE_PSR_VM_L2CTL_ROMPE)); + vlnctrl = rd32m(hw, TXGBE_PSR_VLAN_CTL, + ~(TXGBE_PSR_VLAN_CTL_VFE | + TXGBE_PSR_VLAN_CTL_CFIEN)); + + /* set all bits that we expect to always be set */ + fctrl |= TXGBE_PSR_CTL_BAM | TXGBE_PSR_CTL_MFE; + vmolr |= TXGBE_PSR_VM_L2CTL_BAM | + TXGBE_PSR_VM_L2CTL_AUPE | + TXGBE_PSR_VM_L2CTL_VACC; + + hw->addr_ctrl.user_set_promisc = false; + if (netdev->flags & IFF_PROMISC) { + hw->addr_ctrl.user_set_promisc = true; + fctrl |= (TXGBE_PSR_CTL_UPE | TXGBE_PSR_CTL_MPE); + /* pf don't want packets routing to vf, so clear UPE */ + vmolr |= TXGBE_PSR_VM_L2CTL_MPE; + if ((adapter->flags & (TXGBE_FLAG_VMDQ_ENABLED | + TXGBE_FLAG_SRIOV_ENABLED))) + vlnctrl |= TXGBE_PSR_VLAN_CTL_VFE; + + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + features &= ~NETIF_F_HW_VLAN_STAG_FILTER; + } + + if (netdev->flags & IFF_ALLMULTI) { + fctrl |= TXGBE_PSR_CTL_MPE; + vmolr |= TXGBE_PSR_VM_L2CTL_MPE; + } + + /* This is useful for sniffing bad packets. */ + if (netdev->features & NETIF_F_RXALL) { + vmolr |= (TXGBE_PSR_VM_L2CTL_UPE | TXGBE_PSR_VM_L2CTL_MPE); + vlnctrl &= ~TXGBE_PSR_VLAN_CTL_VFE; + /* receive bad packets */ + wr32m(hw, TXGBE_RSC_CTL, + TXGBE_RSC_CTL_SAVE_MAC_ERR, + TXGBE_RSC_CTL_SAVE_MAC_ERR); + + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + features &= ~NETIF_F_HW_VLAN_STAG_FILTER; + } else { + vmolr |= TXGBE_PSR_VM_L2CTL_ROPE | TXGBE_PSR_VM_L2CTL_ROMPE; + } + + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + if (__dev_uc_sync(netdev, txgbe_uc_sync, txgbe_uc_unsync)) { + vmolr &= ~TXGBE_PSR_VM_L2CTL_ROPE; + fctrl |= TXGBE_PSR_CTL_UPE; + e_dev_warn("netdev uc count is %d, hw available mac entry count is %d, enable promisc mode\n", + netdev_uc_count(netdev), txgbe_available_rars(adapter)); + } + + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = txgbe_write_mc_addr_list(netdev); + if (count < 0) { + vmolr &= ~TXGBE_PSR_VM_L2CTL_ROMPE; + vmolr |= TXGBE_PSR_VM_L2CTL_MPE; + } + + wr32(hw, TXGBE_PSR_CTL, fctrl); + wr32(hw, TXGBE_PSR_VM_L2CTL(VMDQ_P(0)), vmolr); + + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + txgbe_vlan_promisc_disable(adapter); + else + txgbe_vlan_promisc_enable(adapter); + + /* enable cloud switch */ + if (adapter->flags2 & TXGBE_FLAG2_CLOUD_SWITCH_ENABLED) + txgbe_add_cloud_switcher(adapter, 0x10, 0); +} + +static void txgbe_napi_enable_all(struct txgbe_adapter *adapter) +{ + struct txgbe_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + napi_enable(&q_vector->napi); + } +} + +static void txgbe_napi_disable_all(struct txgbe_adapter *adapter) +{ + struct txgbe_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + napi_disable(&q_vector->napi); + } +} + +s32 txgbe_dcb_hw_ets(struct txgbe_hw *hw, struct ieee_ets *ets, int max_frame) +{ + __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; + __u8 prio_type[IEEE_8021QAZ_MAX_TCS]; + int i; + + /* naively give each TC a bwg to map onto CEE hardware */ + __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; + + /* Map TSA onto CEE prio type */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + prio_type[i] = 2; + break; + case IEEE_8021QAZ_TSA_ETS: + prio_type[i] = 0; + break; + default: + /* Hardware only supports priority strict or + * ETS transmission selection algorithms if + * we receive some other value from dcbnl + * throw an error + */ + return -EINVAL; + } + } + + txgbe_dcb_calculate_tc_credits(ets->tc_tx_bw, refill, max, max_frame); + return txgbe_dcb_hw_config(hw, refill, max, + bwg_id, prio_type, ets->prio_tc); +} + +void txgbe_clear_vxlan_port(struct txgbe_adapter *adapter) +{ + if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; +} + +/* NETIF_F_GSO_IPXIP4/6 may not be defined in all distributions */ +#define TXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + +static inline unsigned long txgbe_tso_features(void) +{ + unsigned long features = 0; + + features |= NETIF_F_TSO; + features |= NETIF_F_TSO6; + features |= NETIF_F_GSO_PARTIAL | TXGBE_GSO_PARTIAL_FEATURES; + + return features; +} + +static void txgbe_configure_dcb(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + u32 msb = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1; + + if (!(adapter->flags & TXGBE_FLAG_DCB_ENABLED)) + return; + +#if IS_ENABLED(CONFIG_FCOE) + if (netdev->features & NETIF_F_FCOE_MTU) + max_frame = max_t(int, max_frame, + TXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif /* CONFIG_FCOE */ + + if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) { + if (adapter->txgbe_ieee_ets) + txgbe_dcb_hw_ets(&adapter->hw, + adapter->txgbe_ieee_ets, + max_frame); + + if (adapter->txgbe_ieee_pfc && adapter->txgbe_ieee_ets) { + struct ieee_pfc *pfc = adapter->txgbe_ieee_pfc; + u8 *tc = adapter->txgbe_ieee_ets->prio_tc; + + txgbe_dcb_config_pfc(&adapter->hw, pfc->pfc_en, tc); + } + } else { + txgbe_dcb_calculate_tc_credits_cee(hw, + &adapter->dcb_cfg, + max_frame, + TXGBE_DCB_TX_CONFIG); + txgbe_dcb_calculate_tc_credits_cee(hw, + &adapter->dcb_cfg, + max_frame, + TXGBE_DCB_RX_CONFIG); + txgbe_dcb_hw_config_cee(hw, &adapter->dcb_cfg); + } + + /* Enable RSS Hash per TC */ + while (rss_i) { + msb++; + rss_i >>= 1; + } + + /* write msb to all 8 TCs in one write */ + wr32(hw, TXGBE_RDB_RSS_TC, msb * 0x11111111); +} + +static void txgbe_configure_lli(struct txgbe_adapter *adapter) +{ + /* lli should only be enabled with MSI-X and MSI */ + if (!(adapter->flags & TXGBE_FLAG_MSI_ENABLED) && + !(adapter->flags & TXGBE_FLAG_MSIX_ENABLED)) + return; + + if (adapter->lli_etype) { + wr32(&adapter->hw, TXGBE_RDB_5T_CTL1(0), + (TXGBE_RDB_5T_CTL1_LLI | + TXGBE_RDB_5T_CTL1_SIZE_BP)); + wr32(&adapter->hw, TXGBE_RDB_ETYPE_CLS(0), + TXGBE_RDB_ETYPE_CLS_LLI); + wr32(&adapter->hw, TXGBE_PSR_ETYPE_SWC(0), + (adapter->lli_etype | + TXGBE_PSR_ETYPE_SWC_FILTER_EN)); + } + + if (adapter->lli_port) { + wr32(&adapter->hw, TXGBE_RDB_5T_CTL1(0), + (TXGBE_RDB_5T_CTL1_LLI | + TXGBE_RDB_5T_CTL1_SIZE_BP)); + wr32(&adapter->hw, TXGBE_RDB_5T_CTL0(0), + (TXGBE_RDB_5T_CTL0_POOL_MASK_EN | + (TXGBE_RDB_5T_CTL0_PRIORITY_MASK << + TXGBE_RDB_5T_CTL0_PRIORITY_SHIFT) | + (TXGBE_RDB_5T_CTL0_DEST_PORT_MASK << + TXGBE_RDB_5T_CTL0_5TUPLE_MASK_SHIFT))); + + wr32(&adapter->hw, TXGBE_RDB_5T_SDP(0), + (adapter->lli_port << 16)); + } + + if (adapter->lli_size) { + wr32(&adapter->hw, TXGBE_RDB_5T_CTL1(0), + TXGBE_RDB_5T_CTL1_LLI); + wr32m(&adapter->hw, TXGBE_RDB_LLI_THRE, + TXGBE_RDB_LLI_THRE_SZ(~0), adapter->lli_size); + wr32(&adapter->hw, TXGBE_RDB_5T_CTL0(0), + (TXGBE_RDB_5T_CTL0_POOL_MASK_EN | + (TXGBE_RDB_5T_CTL0_PRIORITY_MASK << + TXGBE_RDB_5T_CTL0_PRIORITY_SHIFT) | + (TXGBE_RDB_5T_CTL0_5TUPLE_MASK_MASK << + TXGBE_RDB_5T_CTL0_5TUPLE_MASK_SHIFT))); + } + + if (adapter->lli_vlan_pri) { + wr32m(&adapter->hw, TXGBE_RDB_LLI_THRE, + TXGBE_RDB_LLI_THRE_PRIORITY_EN | + TXGBE_RDB_LLI_THRE_UP(~0), + TXGBE_RDB_LLI_THRE_PRIORITY_EN | + (adapter->lli_vlan_pri << TXGBE_RDB_LLI_THRE_UP_SHIFT)); + } +} + +/* Additional bittime to account for TXGBE framing */ +#define TXGBE_ETH_FRAMING 20 + +static int txgbe_hpbthresh(struct txgbe_adapter *adapter, int pb) +{ + struct txgbe_hw *hw = &adapter->hw; + struct net_device *dev = adapter->netdev; + int link, tc, kb, marker; + u32 dv_id, rx_pba; + + /* Calculate max LAN frame size */ + link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + TXGBE_ETH_FRAMING; + tc = link; + +#if IS_ENABLED(CONFIG_FCOE) + /* FCoE traffic class uses FCOE jumbo frames */ + if (dev->features & NETIF_F_FCOE_MTU && + tc < TXGBE_FCOE_JUMBO_FRAME_SIZE && + pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)) + tc = TXGBE_FCOE_JUMBO_FRAME_SIZE; +#endif /* CONFIG_FCOE */ + + /* Calculate delay value for device */ + dv_id = TXGBE_DV(link, tc); + + /* Loopback switch introduces additional latency */ + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + dv_id += TXGBE_B2BT(tc); + + /* Delay value is calculated in bit times convert to KB */ + kb = TXGBE_BT2KB(dv_id); + rx_pba = rd32(hw, TXGBE_RDB_PB_SZ(pb)) + >> TXGBE_RDB_PB_SZ_SHIFT; + + marker = rx_pba - kb; + + /* It is possible that the packet buffer is not large enough + * to provide required headroom. In this case throw an error + * to user and a do the best we can. + */ + if (marker < 0) { + e_warn(drv, "Packet Buffer(%i) can not provide enoughheadroom.\n", pb); + marker = tc + 1; + } + + return marker; +} + +/** + * txgbe_lpbthresh - calculate low water mark for flow control + * + * @adapter: board private structure to calculate for + * @pb - packet buffer to calculate + **/ +static int txgbe_lpbthresh(struct txgbe_adapter *adapter, int __maybe_unused pb) +{ + struct net_device *dev = adapter->netdev; + int tc; + u32 dv_id; + + /* Calculate max LAN frame size */ + tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + +#if IS_ENABLED(CONFIG_FCOE) + /* FCoE traffic class uses FCOE jumbo frames */ + if (dev->features & NETIF_F_FCOE_MTU && + tc < TXGBE_FCOE_JUMBO_FRAME_SIZE && + pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)) + tc = TXGBE_FCOE_JUMBO_FRAME_SIZE; +#endif /* CONFIG_FCOE */ + + /* Calculate delay value for device */ + dv_id = TXGBE_LOW_DV(tc); + + /* Delay value is calculated in bit times convert to KB */ + return TXGBE_BT2KB(dv_id); +} + +static void txgbe_pbthresh_setup(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int num_tc = netdev_get_num_tc(adapter->netdev); + int i; + + if (!num_tc) + num_tc = 1; + + for (i = 0; i < num_tc; i++) { + hw->fc.high_water[i] = txgbe_hpbthresh(adapter, i); + hw->fc.low_water[i] = txgbe_lpbthresh(adapter, i); + + /* Low water marks must not be larger than high water marks */ + if (hw->fc.low_water[i] > hw->fc.high_water[i]) + hw->fc.low_water[i] = 0; + } + + for (; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) + hw->fc.high_water[i] = 0; +} + +static void txgbe_configure_pb(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int hdrm; + int tc = netdev_get_num_tc(adapter->netdev); + + if (adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE || + adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE) + hdrm = 32 << adapter->fdir_pballoc; + else + hdrm = 0; + + hw->mac.ops.setup_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); + txgbe_pbthresh_setup(adapter); +} + +static void txgbe_ethertype_filter_restore(struct txgbe_adapter *adapter) +{ + struct txgbe_etype_filter_info *filter_info = &adapter->etype_filter_info; + struct txgbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS; i++) { + if (filter_info->ethertype_mask & (1 << i)) { + wr32(hw, TXGBE_PSR_ETYPE_SWC(i), + filter_info->etype_filters[i].etqf); + wr32(hw, TXGBE_RDB_ETYPE_CLS(i), + filter_info->etype_filters[i].etqs); + TXGBE_WRITE_FLUSH(hw); + } + } +} + +static void txgbe_fdir_filter_restore(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct hlist_node *node; + struct txgbe_fdir_filter *filter; + u8 queue = 0; + + spin_lock(&adapter->fdir_perfect_lock); + + if (!hlist_empty(&adapter->fdir_filter_list)) + txgbe_fdir_set_input_mask(hw, &adapter->fdir_mask, + adapter->cloud_mode); + + hlist_for_each_entry_safe(filter, node, + &adapter->fdir_filter_list, fdir_node) { + if (filter->action == TXGBE_RDB_FDIR_DROP_QUEUE) { + queue = TXGBE_RDB_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(filter->action); + u8 vf = ethtool_get_flow_spec_ring_vf(filter->action); + + if (!vf && ring >= adapter->num_rx_queues) { + e_err(drv, + "FDIR restore failed w/o vf, ring:%u\n", + ring); + continue; + } else if (vf && + ((vf > adapter->num_vfs) || + ring >= adapter->num_rx_queues_per_pool)) { + e_err(drv, + "FDIR restore failed vf:%u, ring:%u\n", + vf, ring); + continue; + } + + /* Map the ring onto the absolute queue index */ + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * + adapter->num_rx_queues_per_pool) + ring; + } + + txgbe_fdir_write_perfect_filter(hw, + &filter->filter, + filter->sw_idx, + queue, + adapter->cloud_mode); + } + + spin_unlock(&adapter->fdir_perfect_lock); +} + +void txgbe_configure_isb(struct txgbe_adapter *adapter) +{ + /* set ISB Address */ + struct txgbe_hw *hw = &adapter->hw; + + wr32(hw, TXGBE_PX_ISB_ADDR_L, + adapter->isb_dma & DMA_BIT_MASK(32)); +#ifdef CONFIG_64BIT + wr32(hw, TXGBE_PX_ISB_ADDR_H, adapter->isb_dma >> 32); +#else + wr32(hw, TXGBE_PX_ISB_ADDR_H, 0); +#endif +} + +static void txgbe_configure_port(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 value, i; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) { + if (tcs > 4) + /* 8 TCs */ + value = TXGBE_CFG_PORT_CTL_NUM_TC_8 | + TXGBE_CFG_PORT_CTL_NUM_VT_16 | + TXGBE_CFG_PORT_CTL_DCB_EN; + else if (tcs > 1) + /* 4 TCs */ + value = TXGBE_CFG_PORT_CTL_NUM_TC_4 | + TXGBE_CFG_PORT_CTL_NUM_VT_32 | + TXGBE_CFG_PORT_CTL_DCB_EN; + else if (adapter->ring_feature[RING_F_RSS].mask == TXGBE_RSS_4Q_MASK) + value = TXGBE_CFG_PORT_CTL_NUM_VT_32; + else /* adapter->ring_feature[RING_F_RSS].indices <= 2 */ + value = TXGBE_CFG_PORT_CTL_NUM_VT_64; + } else { + if (tcs > 4) + value = TXGBE_CFG_PORT_CTL_NUM_TC_8 | + TXGBE_CFG_PORT_CTL_DCB_EN; + else if (tcs > 1) + value = TXGBE_CFG_PORT_CTL_NUM_TC_4 | + TXGBE_CFG_PORT_CTL_DCB_EN; + else + value = 0; + } + + value |= TXGBE_CFG_PORT_CTL_D_VLAN | TXGBE_CFG_PORT_CTL_QINQ; + wr32m(hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_NUM_TC_MASK | + TXGBE_CFG_PORT_CTL_NUM_VT_MASK | + TXGBE_CFG_PORT_CTL_DCB_EN | + TXGBE_CFG_PORT_CTL_D_VLAN | + TXGBE_CFG_PORT_CTL_QINQ, + value); + if (adapter->tx_unidir_mode) + wr32m(hw, TXGBE_CFG_PORT_CTL, TXGBE_CFG_PORT_CTL_FORCE_LKUP, + TXGBE_CFG_PORT_CTL_FORCE_LKUP); + + wr32(hw, TXGBE_CFG_TAG_TPID(0), + ETH_P_8021Q | ETH_P_8021AD << 16); + adapter->hw.tpid[0] = ETH_P_8021Q; + adapter->hw.tpid[1] = ETH_P_8021AD; + for (i = 1; i < 4; i++) + wr32(hw, TXGBE_CFG_TAG_TPID(i), + ETH_P_8021Q | ETH_P_8021Q << 16); + for (i = 2; i < 8; i++) + adapter->hw.tpid[i] = ETH_P_8021Q; +} + +static void txgbe_configure_desc_chk(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i; + + if (!netif_msg_tx_err(adapter)) + return; + + for (i = 0; i < 4; i++) + wr32(hw, TXGBE_TDM_DESC_CHK(i), 0xFFFFFFFF); + + e_info(drv, "enable desc check\n"); +} + +static void txgbe_configure(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + txgbe_configure_pb(adapter); + txgbe_configure_dcb(adapter); + + /* We must restore virtualization before VLANs or else + * the VLVF registers will not be populated + */ + txgbe_configure_virtualization(adapter); + txgbe_configure_port(adapter); + + txgbe_set_rx_mode(adapter->netdev); + txgbe_restore_vlan(adapter); + + hw->mac.ops.disable_sec_rx_path(hw); + + txgbe_ethertype_filter_restore(adapter); + if (adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) { + txgbe_init_fdir_signature(&adapter->hw, + adapter->fdir_pballoc); + } else if (adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE) { + txgbe_init_fdir_perfect(&adapter->hw, + adapter->fdir_pballoc, + adapter->cloud_mode); + txgbe_fdir_filter_restore(adapter); + } + + hw->mac.ops.enable_sec_rx_path(hw); + + hw->mac.ops.setup_eee(hw, + (adapter->flags2 & TXGBE_FLAG2_EEE_CAPABLE) && + (adapter->flags2 & TXGBE_FLAG2_EEE_ENABLED)); + +#if IS_ENABLED(CONFIG_FCOE) + /* configure FCoE L2 filters, redirection table, and Rx control */ + txgbe_configure_fcoe(adapter); +#endif /* CONFIG_FCOE */ + + txgbe_configure_tx(adapter); + txgbe_configure_rx(adapter); + txgbe_configure_desc_chk(adapter); + txgbe_configure_isb(adapter); +} + +static bool txgbe_is_sfp(struct txgbe_hw *hw) +{ + switch (hw->mac.ops.get_media_type(hw)) { + case txgbe_media_type_fiber_qsfp: + case txgbe_media_type_fiber: + return true; + default: + return false; + } +} + +/** + * txgbe_sfp_link_config - set up SFP+ link + * @adapter: pointer to private adapter struct + **/ +static void txgbe_sfp_link_config(struct txgbe_adapter *adapter) +{ + /* We are assuming the worst case scenerio here, and that + * is that an SFP was inserted/removed after the reset + * but before SFP detection was enabled. As such the best + * solution is to just start searching as soon as we start + */ + + adapter->flags2 |= TXGBE_FLAG2_SFP_NEEDS_RESET; + adapter->sfp_poll_time = 0; +} + +/** + * txgbe_non_sfp_link_config - set up non-SFP+ link + * @hw: pointer to private hardware struct + * + * Returns 0 on success, negative on failure + **/ +static int txgbe_non_sfp_link_config(struct txgbe_hw *hw) +{ + u32 speed; + bool autoneg, link_up = false; + u32 ret = TXGBE_ERR_LINK_SETUP; + + ret = hw->mac.ops.check_link(hw, &speed, &link_up, false); + + if (ret) + goto link_cfg_out; + + if (link_up) + return 0; + + if ((hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI) { + /* setup external PHY Mac Interface */ + txgbe_mtd_set_mac_intf_ctrl(&hw->phy_dev, hw->phy.addr, MTD_MAC_TYPE_XAUI, + false, MTD_MAC_SNOOP_OFF, + 0, MTD_MAC_SPEED_1000_MBPS, + MTD_MAX_MAC_SPEED_LEAVE_UNCHANGED, + true, true); + + speed = hw->phy.autoneg_advertised; + if (!speed) + ret = hw->mac.ops.get_link_capabilities(hw, &speed, + &autoneg); + if (ret) + goto link_cfg_out; + } else { + speed = TXGBE_LINK_SPEED_10GB_FULL; + autoneg = false; + } + + ret = hw->mac.ops.setup_link(hw, speed, false); + +link_cfg_out: + return ret; +} + +static void txgbe_setup_gpie(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 gpie = 0; + + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) + gpie = TXGBE_PX_GPIE_MODEL; + + wr32(hw, TXGBE_PX_GPIE, gpie); +} + +static void reinit_gpio_int(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 reg; + + wr32(hw, TXGBE_GPIO_INTMASK, 0xFF); + reg = rd32(hw, TXGBE_GPIO_INTSTATUS); + if (reg & TXGBE_GPIO_INTSTATUS_2) { + adapter->flags2 |= TXGBE_FLAG2_SFP_NEEDS_RESET; + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_2); + adapter->sfp_poll_time = 0; + txgbe_service_event_schedule(adapter); + } + if (reg & TXGBE_GPIO_INTSTATUS_3) { + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_3); + txgbe_service_event_schedule(adapter); + } + + if (reg & TXGBE_GPIO_INTSTATUS_6) { + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_6); + adapter->flags |= + TXGBE_FLAG_NEED_LINK_CONFIG; + txgbe_service_event_schedule(adapter); + } + wr32(hw, TXGBE_GPIO_INTMASK, 0x0); +} + +static void txgbe_up_complete(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int err; + u32 links_reg; + u16 value; + + txgbe_get_hw_control(adapter); + txgbe_setup_gpie(adapter); + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) + txgbe_configure_msix(adapter); + else + txgbe_configure_msi_and_legacy(adapter); + + /* enable the optics for SFP+ fiber + * or power up mv phy + */ + if (hw->phy.media_type == txgbe_media_type_fiber || + hw->phy.media_type == txgbe_media_type_fiber_qsfp) + if (hw->mac.ops.enable_tx_laser) + hw->mac.ops.enable_tx_laser(hw); + if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) { + if (hw->phy.media_type == txgbe_media_type_copper && + (hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI) + txgbe_external_phy_resume(hw); + } + + /* Make sure to clear down flag */ + smp_mb__before_atomic(); + clear_bit(__TXGBE_DOWN, &adapter->state); + txgbe_napi_enable_all(adapter); + txgbe_configure_lli(adapter); + + if (txgbe_is_sfp(hw)) { + txgbe_sfp_link_config(adapter); + } else if (txgbe_is_backplane(hw)) { + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + txgbe_service_event_schedule(adapter); + } else { + err = txgbe_non_sfp_link_config(hw); + if (err) + e_err(probe, "link_config FAILED %d\n", err); + } + + if (hw->mac.type == txgbe_mac_aml40) { + hw->mac.ops.clear_hw_cntrs(hw); + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { + if (links_reg & TXGBE_CFG_PORT_ST_AML_LINK_40G) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | + TXGBE_MAC_TX_CFG_AML_SPEED_40G); + } + } + + wr32(hw, TXGBE_GPIO_DDR, + TXGBE_GPIO_DDR_0 | TXGBE_GPIO_DDR_1 | TXGBE_GPIO_DDR_3); + wr32(hw, TXGBE_GPIO_DR, TXGBE_GPIO_DR_1); + } else if (hw->mac.type == txgbe_mac_aml) { + hw->mac.ops.clear_hw_cntrs(hw); + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { + if (links_reg & TXGBE_CFG_PORT_ST_AML_LINK_25G) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | + TXGBE_MAC_TX_CFG_AML_SPEED_25G); + } else if (links_reg & TXGBE_CFG_PORT_ST_AML_LINK_10G) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | + TXGBE_MAC_TX_CFG_AML_SPEED_10G); + } + } + + wr32(hw, TXGBE_GPIO_INT_POLARITY, 0x0); + wr32(hw, TXGBE_GPIO_DDR, + TXGBE_GPIO_DDR_0 | TXGBE_GPIO_DDR_1 | TXGBE_GPIO_DDR_4 | TXGBE_GPIO_DDR_5); + wr32(hw, TXGBE_GPIO_DR, TXGBE_GPIO_DR_4 | TXGBE_GPIO_DR_5); + + usleep_range(10000, 20000); + wr32(hw, TXGBE_GPIO_DR, TXGBE_GPIO_DR_0); + } else { + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { + if (links_reg & TXGBE_CFG_PORT_ST_LINK_10G) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_SPEED_MASK) | + TXGBE_MAC_TX_CFG_SPEED_10G); + } else if (links_reg & (TXGBE_CFG_PORT_ST_LINK_1G | + TXGBE_CFG_PORT_ST_LINK_100M)) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_SPEED_MASK) | + TXGBE_MAC_TX_CFG_SPEED_1G); + } + } + } + + /* workaround gpio int lost in lldp-on condition */ + reinit_gpio_int(adapter); + + /* clear any pending interrupts, may auto mask */ + rd32(hw, TXGBE_PX_IC(0)); + rd32(hw, TXGBE_PX_IC(1)); + rd32(hw, TXGBE_PX_MISC_IC); + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) + wr32(hw, TXGBE_GPIO_EOI, TXGBE_GPIO_EOI_6); + txgbe_irq_enable(adapter, true, true); + /* enable external PHY interrupt */ + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) { + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 0x03, 0x8011, &value); + /* only enable T unit int */ + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xf043, 0x1); + /* active high */ + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xf041, 0x0); + /* enable AN complete and link status change int */ + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 0x03, 0x8010, 0xc00); + } + /* enable transmits */ + netif_tx_start_all_queues(adapter->netdev); + /* bring the link up in the watchdog, this could race with our first + * link up interrupt but shouldn't be a problem + */ + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + hw->f2c_mod_status = false; + mod_timer(&adapter->service_timer, jiffies); + + /* PCIE recovery: record lan status */ + if (hw->bus.lan_id == 0) { + wr32m(hw, TXGBE_MIS_PRB_CTL, + TXGBE_MIS_PRB_CTL_LAN0_UP, TXGBE_MIS_PRB_CTL_LAN0_UP); + } else if (hw->bus.lan_id == 1) { + wr32m(hw, TXGBE_MIS_PRB_CTL, + TXGBE_MIS_PRB_CTL_LAN1_UP, TXGBE_MIS_PRB_CTL_LAN1_UP); + } else { + e_err(probe, "%s:invalid bus lan id %d\n", __func__, hw->bus.lan_id); + } + + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + wr32m(hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_PFRSTD, TXGBE_CFG_PORT_CTL_PFRSTD); + /* update setting rx tx for all active vfs */ + txgbe_set_all_vfs(adapter); + + /* clear ecc reset flag if set */ + if (adapter->flags2 & TXGBE_FLAG2_ECC_ERR_RESET) + adapter->flags2 &= ~TXGBE_FLAG2_ECC_ERR_RESET; +} + +void txgbe_reinit_locked(struct txgbe_adapter *adapter) +{ + if (adapter->flags2 & TXGBE_FLAG2_KR_PRO_REINIT) + return; + + adapter->flags2 |= TXGBE_FLAG2_KR_PRO_REINIT; + WARN_ON(in_interrupt()); + /* put off any impending NetWatchDogTimeout */ + netif_trans_update(adapter->netdev); + + adapter->flags2 |= TXGBE_FLAG2_SERVICE_RUNNING; + + while (test_and_set_bit(__TXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + txgbe_down(adapter); + /* If SR-IOV enabled then wait a bit before bringing the adapter + * back up to give the VFs time to respond to the reset. The + * two second wait is based upon the watchdog timer cycle in + * the VF driver. + */ + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + msleep(2000); + txgbe_up(adapter); + clear_bit(__TXGBE_RESETTING, &adapter->state); + adapter->flags2 &= ~TXGBE_FLAG2_KR_PRO_REINIT; +} + +static void txgbe_reinit_locked_dma_reset(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i; + + if (adapter->flags2 & TXGBE_FLAG2_KR_PRO_REINIT) + return; + + adapter->flags2 |= TXGBE_FLAG2_KR_PRO_REINIT; + + WARN_ON(in_interrupt()); + /* put off any impending NetWatchDogTimeout */ + netif_trans_update(adapter->netdev); + + while (test_and_set_bit(__TXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + txgbe_down(adapter); + + e_info(probe, "dma reset\n"); + + if (rd32(hw, PX_PF_PEND) & 0x3) { + e_dev_err("PX_PF_PEND case dma reset exit\n"); + goto skip_dma_rst; + } + + for (i = 0; i < 4; i++) { + if (rd32(hw, PX_VF_PEND(i))) { + e_dev_err("PX_VF_PEND case dma reset exit\n"); + goto skip_dma_rst; + } + } + wr32(hw, TXGBE_MIS_RST, + 1 << 4); + TXGBE_WRITE_FLUSH(hw); + msleep(1000); + + /* amlite: bme */ + wr32(hw, PX_PF_BME, 0x1); +skip_dma_rst: + /* If SR-IOV enabled then wait a bit before bringing the adapter + * back up to give the VFs time to respond to the reset. The + * two second wait is based upon the watchdog timer cycle in + * the VF driver. + */ + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + msleep(2000); + txgbe_up(adapter); + clear_bit(__TXGBE_RESETTING, &adapter->state); + adapter->flags2 &= ~TXGBE_FLAG2_KR_PRO_REINIT; +} + +void txgbe_up(struct txgbe_adapter *adapter) +{ + /* hardware has been reset, we need to reload some things */ + txgbe_configure(adapter); + + txgbe_up_complete(adapter); +} + +void txgbe_reset(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int err; + u8 old_addr[ETH_ALEN]; + + if (TXGBE_REMOVED(hw->hw_addr)) + return; + /* lock SFP init bit to prevent race conditions with the watchdog */ + while (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + /* clear all SFP and link config related flags while holding SFP_INIT */ + adapter->flags2 &= ~(TXGBE_FLAG2_SEARCH_FOR_SFP | + TXGBE_FLAG2_SFP_NEEDS_RESET); + adapter->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; + + err = hw->mac.ops.init_hw(hw); + switch (err) { + case 0: + case TXGBE_ERR_SFP_NOT_PRESENT: + case TXGBE_ERR_SFP_NOT_SUPPORTED: + break; + case TXGBE_ERR_MASTER_REQUESTS_PENDING: + e_dev_err("master disable timed out\n"); + txgbe_tx_timeout_dorecovery(adapter); + break; + case TXGBE_ERR_EEPROM_VERSION: + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter/LOM.\n"); + break; + default: + e_dev_err("Hardware Error: %d\n", err); + } + + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); + /* do not flush user set addresses */ + memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len); + txgbe_flush_sw_mac_table(adapter); + txgbe_mac_set_default_filter(adapter, old_addr); + + /* update SAN MAC vmdq pool selection */ + hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); + + if (test_bit(__TXGBE_PTP_RUNNING, &adapter->state)) + txgbe_ptp_reset(adapter); +} + +/** + * txgbe_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +void txgbe_clean_rx_ring(struct txgbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + unsigned long size; + u16 i; + + if (rx_ring->xsk_pool) { + txgbe_xsk_clean_rx_ring(rx_ring); + goto skip_free; + } + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct txgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; + + if (rx_buffer->dma) { + dma_unmap_single(dev, + rx_buffer->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_buffer->dma = 0; + } + + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; + + if (TXGBE_CB(skb)->page_released) + dma_unmap_page_attrs(rx_ring->dev, + TXGBE_CB(skb)->dma, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + TXGBE_RX_DMA_ATTR); + dev_kfree_skb(skb); + rx_buffer->skb = NULL; + } + + if (!rx_buffer->page) + continue; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + rx_buffer->page_offset, + txgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->page_dma, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + TXGBE_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + rx_buffer->page = NULL; + } + + size = sizeof(struct txgbe_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + +skip_free: + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * txgbe_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +void txgbe_clean_tx_ring(struct txgbe_ring *tx_ring) +{ + struct txgbe_tx_buffer *tx_buffer_info; + unsigned long size; + u16 i; + + if (tx_ring->xsk_pool) { + txgbe_xsk_clean_tx_ring(tx_ring); + return; + } + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buffer_info) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + + if (ring_is_xdp(tx_ring)) { + if (tx_buffer_info->xdpf) + xdp_return_frame(tx_buffer_info->xdpf); + } + txgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + } + + if (!ring_is_xdp(tx_ring)) + netdev_tx_reset_queue(txring_txq(tx_ring)); + + size = sizeof(struct txgbe_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); +} + +/** + * txgbe_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void txgbe_clean_all_rx_rings(struct txgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + txgbe_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * txgbe_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void txgbe_clean_all_tx_rings(struct txgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + txgbe_clean_tx_ring(adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + txgbe_clean_tx_ring(adapter->xdp_ring[i]); +} + +static void txgbe_fdir_filter_exit(struct txgbe_adapter *adapter) +{ + struct hlist_node *node; + struct txgbe_fdir_filter *filter; + + spin_lock(&adapter->fdir_perfect_lock); + + hlist_for_each_entry_safe(filter, node, + &adapter->fdir_filter_list, fdir_node) { + hlist_del(&filter->fdir_node); + kfree(filter); + } + adapter->fdir_filter_count = 0; + + spin_unlock(&adapter->fdir_perfect_lock); +} + +static void txgbe_disable_device(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct txgbe_hw *hw = &adapter->hw; + u32 i; + + /* signal that we are down to the interrupt handler */ + if (test_and_set_bit(__TXGBE_DOWN, &adapter->state)) + return; /* do nothing if already down */ + + if (!(adapter->flags2 & TXGBE_FLAG2_ECC_ERR_RESET)) + txgbe_disable_pcie_master(hw); + + /* disable receives */ + hw->mac.ops.disable_rx(hw); + + /* disable all enabled rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) + /* this call also flushes the previous write */ + txgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); + + netif_tx_stop_all_queues(netdev); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + /* synchronize_rcu() needed for pending XDP buffers to drain */ + if (adapter->xdp_ring[0]) + synchronize_rcu(); + + txgbe_irq_disable(adapter); + txgbe_napi_disable_all(adapter); + + adapter->flags2 &= ~(TXGBE_FLAG2_FDIR_REQUIRES_REINIT | + TXGBE_FLAG2_PF_RESET_REQUESTED | + TXGBE_FLAG2_DEV_RESET_REQUESTED | + TXGBE_FLAG2_GLOBAL_RESET_REQUESTED); + adapter->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE; + + del_timer_sync(&adapter->service_timer); + adapter->flags2 &= ~TXGBE_FLAG2_SERVICE_RUNNING; + + hw->f2c_mod_status = false; + cancel_work_sync(&adapter->sfp_sta_task); + + /* PCIE recovery: record lan status, clear */ + if (hw->bus.lan_id == 0) + wr32m(hw, TXGBE_MIS_PRB_CTL, + TXGBE_MIS_PRB_CTL_LAN0_UP, 0); + else if (hw->bus.lan_id == 1) + wr32m(hw, TXGBE_MIS_PRB_CTL, + TXGBE_MIS_PRB_CTL_LAN1_UP, 0); + else + e_dev_err("Invalid bus lan id %d\n", hw->bus.lan_id); + + if (adapter->num_vfs) { + /* Clear EITR Select mapping */ + wr32(&adapter->hw, TXGBE_PX_ITRSEL, 0); + + /* Mark all the VFs as inactive */ + for (i = 0 ; i < adapter->num_vfs; i++) + adapter->vfinfo[i].clear_to_send = 0; + + /* Disable all VFTE/VFRE TX/RX */ + txgbe_set_all_vfs(adapter); + } + + if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP) || + adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) { + /* disable mac transmiter */ + wr32m(hw, TXGBE_MAC_TX_CFG, + TXGBE_MAC_TX_CFG_TE, 0); + } + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < adapter->num_tx_queues; i++) { + u8 reg_idx = adapter->tx_ring[i]->reg_idx; + + wr32(hw, TXGBE_PX_TR_CFG(reg_idx), + TXGBE_PX_TR_CFG_SWFLSH); + } + + /* Disable the Tx DMA engine */ + wr32m(hw, TXGBE_TDM_CTL, TXGBE_TDM_CTL_TE, 0); + + /* workaround gpio int lost in lldp-on condition */ + reinit_gpio_int(adapter); +} + +void txgbe_down(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + txgbe_disable_device(adapter); + txgbe_reset(adapter); + + if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) { + if (hw->phy.media_type == txgbe_media_type_fiber || + hw->phy.media_type == txgbe_media_type_fiber_qsfp) + hw->mac.ops.disable_tx_laser(hw); + else if (hw->phy.media_type == txgbe_media_type_copper && + (hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI) + txgbe_external_phy_suspend(hw); + } + + txgbe_clean_all_tx_rings(adapter); + txgbe_clean_all_rx_rings(adapter); +} + +/** + * txgbe_sw_init - Initialize general software structures (struct txgbe_adapter) + * @adapter: board private structure to initialize + * + * txgbe_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static const u32 def_rss_key[10] = { + 0xE291D73D, 0x1805EC6C, 0x2A94B30D, + 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, + 0x6A3E67EA, 0x14364D17, 0x3BED200D +}; + +static int txgbe_sw_init(struct txgbe_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct txgbe_hw *hw = &adapter->hw; + unsigned int fdir; + u32 ssid = 0; + int err; +#if IS_ENABLED(CONFIG_DCB) + struct txgbe_dcb_tc_config *tc; + int j, bwg_pct; +#endif /* CONFIG_DCB */ + u32 fw_version; + u32 flash_header; + u32 flash_header_index; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + if (hw->revision_id == TXGBE_FAILED_READ_CFG_BYTE && + txgbe_check_cfg_remove(hw, pdev)) { + e_err(probe, "read of revision id failed\n"); + err = -ENODEV; + goto out; + } + + err = txgbe_init_shared_code(hw); + if (err) { + e_err(probe, "init_shared_code failed: %d\n", err); + goto out; + } + + txgbe_flash_read_dword(hw, 0x0, &flash_header); + if (((flash_header >> 16) & 0xffff) == TXGBE_FLASH_HEADER_FLAG) + flash_header_index = 0x0; + else + flash_header_index = 0x1; + + hw->oem_svid = pdev->subsystem_vendor; + hw->oem_ssid = pdev->subsystem_device; + if (pdev->subsystem_vendor == 0x8088) { + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + } else { + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + txgbe_flash_read_dword(hw, (flash_header_index * 0x10000) + 0x302c, &ssid); + else + txgbe_flash_read_dword(hw, 0xfffdc, &ssid); + + if (ssid == 0x1) { + e_err(probe, "read of internal subsystem device id failed\n"); + err = -ENODEV; + goto out; + } + hw->subsystem_device_id = (u16)ssid; + hw->subsystem_device_id = hw->subsystem_device_id >> 8 | + hw->subsystem_device_id << 8; + } + + txgbe_flash_read_dword(hw, (flash_header_index * 0x10000) + 0x13a, &fw_version); + snprintf(adapter->fl_version, sizeof(adapter->fw_version), + "0x%08x", fw_version); + + adapter->mac_table = kcalloc(hw->mac.num_rar_entries, + sizeof(struct txgbe_mac_addr), + GFP_KERNEL); + if (!adapter->mac_table) { + err = TXGBE_ERR_OUT_OF_MEM; + e_err(probe, "mac_table allocation failed: %d\n", err); + goto out; + } + + memcpy(adapter->rss_key, def_rss_key, sizeof(def_rss_key)); + adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL); + if (!adapter->af_xdp_zc_qps) + return -ENOMEM; + + /* Set common capability flags and settings */ +#if IS_ENABLED(CONFIG_TPH) + adapter->flags |= TXGBE_FLAG_TPH_CAPABLE; +#endif +#if IS_ENABLED(CONFIG_FCOE) + adapter->flags |= TXGBE_FLAG_FCOE_CAPABLE; + adapter->flags &= ~TXGBE_FLAG_FCOE_ENABLED; +#if IS_ENABLED(CONFIG_DCB) + /* Default traffic class to use for FCoE */ + adapter->fcoe.up = TXGBE_FCOE_DEFUP; + adapter->fcoe.up_set = TXGBE_FCOE_DEFUP; +#endif /* CONFIG_DCB */ +#endif /* CONFIG_FCOE */ + adapter->flags |= TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE; + adapter->flags2 |= TXGBE_FLAG2_RSC_CAPABLE; + fdir = min_t(int, TXGBE_MAX_FDIR_INDICES, num_online_cpus()); + adapter->ring_feature[RING_F_FDIR].limit = fdir; + adapter->max_q_vectors = TXGBE_MAX_MSIX_Q_VECTORS_SAPPHIRE; + + /* Set MAC specific capability flags and exceptions */ + adapter->flags |= TXGBE_FLAGS_SP_INIT; + adapter->flags2 |= TXGBE_FLAG2_TEMP_SENSOR_CAPABLE; + hw->phy.smart_speed = txgbe_smart_speed_off; + adapter->flags2 |= TXGBE_FLAG2_EEE_CAPABLE; + +#if IS_ENABLED(CONFIG_FCOE) + /* FCoE support exists, always init the FCoE lock */ + spin_lock_init(&adapter->fcoe.lock); +#endif /* CONFIG_FCOE */ + + /* n-tuple support exists, always init our spinlock */ + spin_lock_init(&adapter->fdir_perfect_lock); + + mutex_init(&adapter->e56_lock); + +#if IS_ENABLED(CONFIG_DCB) + adapter->dcb_cfg.num_tcs.pg_tcs = 8; + adapter->dcb_cfg.num_tcs.pfc_tcs = 8; + + /* Configure DCB traffic classes */ + bwg_pct = 100 / adapter->dcb_cfg.num_tcs.pg_tcs; + for (j = 0; j < adapter->dcb_cfg.num_tcs.pg_tcs; j++) { + tc = &adapter->dcb_cfg.tc_config[j]; + tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = 0; + tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwg_pct; + tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = 0; + tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwg_pct; + tc->pfc = txgbe_dcb_pfc_disabled; + } + + /* reset back to TC 0 */ + tc = &adapter->dcb_cfg.tc_config[0]; + + /* total of all TCs bandwidth needs to be 100 */ + bwg_pct += 100 % adapter->dcb_cfg.num_tcs.pg_tcs; + tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwg_pct; + tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwg_pct; + + /* Initialize default user to priority mapping, UPx->TC0 */ + tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; + tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; + + adapter->dcb_cfg.bw_percentage[TXGBE_DCB_TX_CONFIG][0] = 100; + adapter->dcb_cfg.bw_percentage[TXGBE_DCB_RX_CONFIG][0] = 100; + adapter->dcb_cfg.rx_pba_cfg = txgbe_dcb_pba_equal; + adapter->dcb_cfg.pfc_mode_enable = false; + adapter->dcb_cfg.round_robin_enable = false; + adapter->dcb_set_bitmap = 0x00; + adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; + memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, + sizeof(adapter->temp_dcb_cfg)); +#endif /* CONFIG_DCB */ + + hw->mbx.ops.init_params(hw); + + /* default flow control settings */ + hw->fc.requested_mode = txgbe_fc_full; + hw->fc.current_mode = txgbe_fc_full; /* init for ethtool output */ + + adapter->last_lfc_mode = hw->fc.current_mode; + hw->fc.pause_time = TXGBE_DEFAULT_FCPAUSE; + hw->fc.send_xon = true; + hw->fc.disable_fc_autoneg = false; + + hw->dac_sfp = false; + + /* set default ring sizes */ + adapter->tx_ring_count = TXGBE_DEFAULT_TXD; + adapter->rx_ring_count = TXGBE_DEFAULT_RXD; + + /* set default work limits */ + adapter->tx_work_limit = TXGBE_DEFAULT_TX_WORK; + adapter->rx_work_limit = TXGBE_DEFAULT_RX_WORK; + + adapter->tx_timeout_recovery_level = 0; + adapter->cmplt_to_dis = false; + + /* PF holds first pool slot */ + adapter->num_vmdqs = 1; + set_bit(0, &adapter->fwd_bitmask); + set_bit(__TXGBE_DOWN, &adapter->state); + memset(adapter->i2c_eeprom, 0, sizeof(u8) * 512); + + adapter->fec_link_mode = TXGBE_PHY_FEC_AUTO; + adapter->cur_fec_link = TXGBE_PHY_FEC_AUTO; + + adapter->link_valid = true; + + if (hw->mac.type == txgbe_mac_sp) + adapter->desc_reserved = DESC_RESERVED; + else + adapter->desc_reserved = DESC_RESERVED_AML; + + bitmap_zero(adapter->limited_vlans, 4096); + +out: + return err; +} + +/** + * txgbe_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int txgbe_setup_tx_resources(struct txgbe_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = -1; + int size; + + size = sizeof(struct txgbe_tx_buffer) * tx_ring->count; + + if (tx_ring->q_vector) + numa_node = tx_ring->q_vector->numa_node; + + tx_ring->tx_buffer_info = vzalloc_node(size, numa_node); + if (!tx_ring->tx_buffer_info) + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union txgbe_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + set_dev_node(dev, numa_node); + tx_ring->desc = dma_alloc_coherent(dev, + tx_ring->size, + &tx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!tx_ring->desc) + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + txgbe_setup_headwb_resources(tx_ring); + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * txgbe_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int txgbe_setup_all_tx_resources(struct txgbe_adapter *adapter) +{ + int i = 0, j = 0, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = txgbe_setup_tx_resources(adapter->tx_ring[i]); + if (!err) + continue; + + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + for (j = 0; j < adapter->num_xdp_queues; j++) { + err = txgbe_setup_tx_resources(adapter->xdp_ring[j]); + if (!err) + continue; + + e_err(probe, "Allocation for Tx(XDP) Queue %u failed\n", j); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + txgbe_free_tx_resources(adapter->tx_ring[i]); + while (j--) + txgbe_free_tx_resources(adapter->xdp_ring[j]); + return err; +} + +/** + * txgbe_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int txgbe_setup_rx_resources(struct txgbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = -1; + int size; + int err; + + size = sizeof(struct txgbe_rx_buffer) * rx_ring->count; + + if (rx_ring->q_vector) + numa_node = rx_ring->q_vector->numa_node; + + rx_ring->rx_buffer_info = vzalloc_node(size, numa_node); + if (!rx_ring->rx_buffer_info) + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union txgbe_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + set_dev_node(dev, numa_node); + rx_ring->desc = dma_alloc_coherent(dev, + rx_ring->size, + &rx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!rx_ring->desc) + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + if (!rx_ring->q_vector) + return 0; + + if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, + rx_ring->queue_index, + rx_ring->q_vector->napi.napi_id < 0)) + goto err; + + err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err) { + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + goto err; + } + + rx_ring->xdp_prog = rx_ring->q_vector->adapter->xdp_prog; + return 0; +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * txgbe_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int txgbe_setup_all_rx_resources(struct txgbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = txgbe_setup_rx_resources(adapter->rx_ring[i]); + if (!err) + continue; + + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + +#if IS_ENABLED(CONFIG_FCOE) + err = txgbe_setup_fcoe_ddp_resources(adapter); + if (!err) +#endif + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + txgbe_free_rx_resources(adapter->rx_ring[i]); + return err; +} + +/** + * txgbe_setup_isb_resources - allocate interrupt status resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int txgbe_setup_isb_resources(struct txgbe_adapter *adapter) +{ + struct device *dev = pci_dev_to_dev(adapter->pdev); + + adapter->isb_mem = dma_alloc_coherent(dev, + sizeof(u32) * TXGBE_ISB_MAX, + &adapter->isb_dma, + GFP_KERNEL); + if (!adapter->isb_mem) + return -ENOMEM; + memset(adapter->isb_mem, 0, sizeof(u32) * TXGBE_ISB_MAX); + return 0; +} + +/** + * txgbe_free_isb_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +void txgbe_free_isb_resources(struct txgbe_adapter *adapter) +{ + struct device *dev = pci_dev_to_dev(adapter->pdev); + + dma_free_coherent(dev, sizeof(u32) * TXGBE_ISB_MAX, + adapter->isb_mem, adapter->isb_dma); + adapter->isb_mem = NULL; +} + +void txgbe_free_headwb_resources(struct txgbe_ring *ring) +{ + u8 headwb_size = 1; + struct txgbe_adapter *adapter; + struct txgbe_hw *hw; + + if (ring->q_vector) { + adapter = ring->q_vector->adapter; + hw = &adapter->hw; + if (hw->mac.type == txgbe_mac_sp) + return; + } else { + return; + } + + if (ring->headwb_mem) { + dma_free_coherent(ring->dev, sizeof(u32) * headwb_size, + ring->headwb_mem, ring->headwb_dma); + ring->headwb_mem = NULL; + } +} + +/** + * txgbe_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void txgbe_free_tx_resources(struct txgbe_ring *tx_ring) +{ + txgbe_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + + txgbe_free_headwb_resources(tx_ring); +} + +/** + * txgbe_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void txgbe_free_all_tx_resources(struct txgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + txgbe_free_tx_resources(adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + txgbe_free_tx_resources(adapter->xdp_ring[i]); +} + +/** + * txgbe_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void txgbe_free_rx_resources(struct txgbe_ring *rx_ring) +{ + txgbe_clean_rx_ring(rx_ring); + + rx_ring->xdp_prog = NULL; + + if (rx_ring->q_vector) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * txgbe_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void txgbe_free_all_rx_resources(struct txgbe_adapter *adapter) +{ + int i; + +#if IS_ENABLED(CONFIG_FCOE) + txgbe_free_fcoe_ddp_resources(adapter); +#endif + + for (i = 0; i < adapter->num_rx_queues; i++) + txgbe_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * txgbe_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int txgbe_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (new_mtu < 68 || new_mtu > 9414) + return -EINVAL; + + if (adapter->xdp_prog) { + int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + + VLAN_HLEN; + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct txgbe_ring *ring = adapter->rx_ring[i]; + + if (new_frame_size > txgbe_rx_bufsz(ring)) { + e_warn(probe, "Requested MTU size is not supported with XDP\n"); + return -EINVAL; + } + } + } + + /* we cannot allow legacy VFs to enable their receive + * paths when MTU greater than 1500 is configured. So display a + * warning that legacy VFs will be disabled. + */ + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED && + new_mtu > ETH_DATA_LEN) + e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); + + e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + txgbe_reinit_locked(adapter); + + return 0; +} + +/** + * txgbe_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +int txgbe_open(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + int err; + + /*special for backplane flow*/ + adapter->flags2 &= ~TXGBE_FLAG2_KR_PRO_DOWN; + + /* disallow open during test */ + if (test_bit(__TXGBE_TESTING, &adapter->state)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = txgbe_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = txgbe_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + err = txgbe_setup_isb_resources(adapter); + if (err) + goto err_req_isb; + + txgbe_configure(adapter); + + err = txgbe_request_irq(adapter); + if (err) + goto err_req_irq; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_vmdqs > 1 + ? adapter->queues_per_pool + : adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_vmdqs > 1 + ? adapter->queues_per_pool + : adapter->num_rx_queues); + if (err) + goto err_set_queues; + + txgbe_ptp_init(adapter); + txgbe_up_complete(adapter); + + udp_tunnel_nic_reset_ntf(netdev); + + if (hw->mac.type == txgbe_mac_aml) { + wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, + ~TXGBE_MAC_TX_CFG_TE); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + ~TXGBE_MAC_RX_CFG_RE); + hw->mac.ops.clear_hw_cntrs(hw); + } + + return 0; + +err_set_queues: + txgbe_free_irq(adapter); +err_req_irq: + txgbe_free_isb_resources(adapter); +err_req_isb: + txgbe_free_all_rx_resources(adapter); + +err_setup_rx: + txgbe_free_all_tx_resources(adapter); +err_setup_tx: + txgbe_reset(adapter); + + return err; +} + +/** + * txgbe_close_suspend - actions necessary to both suspend and close flows + * @adapter: the private adapter struct + * + * This function should contain the necessary work common to both suspending + * and closing of the device. + */ +static void txgbe_close_suspend(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + txgbe_ptp_suspend(adapter); + + txgbe_disable_device(adapter); + + /* power down the optics for SFP+ fiber or mv phy */ + if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) { + if (hw->phy.media_type == txgbe_media_type_fiber || + hw->phy.media_type == txgbe_media_type_fiber_qsfp) + hw->mac.ops.disable_tx_laser(hw); + else if (hw->phy.media_type == txgbe_media_type_copper && + (hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI) + txgbe_external_phy_suspend(hw); + } + + txgbe_clean_all_tx_rings(adapter); + txgbe_clean_all_rx_rings(adapter); + + txgbe_free_irq(adapter); + + txgbe_free_isb_resources(adapter); + txgbe_free_all_rx_resources(adapter); + txgbe_free_all_tx_resources(adapter); +} + +/** + * txgbe_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +int txgbe_close(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + + if (hw->subsystem_device_id == TXGBE_ID_WX1820_KR_KX_KX4 || + hw->subsystem_device_id == TXGBE_ID_SP1000_KR_KX_KX4 || + hw->dac_sfp) { + txgbe_bp_close_protect(adapter); + } + + txgbe_ptp_stop(adapter); + + txgbe_down(adapter); + txgbe_free_irq(adapter); + + txgbe_free_isb_resources(adapter); + txgbe_free_all_rx_resources(adapter); + txgbe_free_all_tx_resources(adapter); + + txgbe_fdir_filter_exit(adapter); + memset(&adapter->ft_filter_info, 0, + sizeof(struct txgbe_5tuple_filter_info)); + + txgbe_release_hw_control(adapter); + + return 0; +} + +#ifdef CONFIG_PM +static int txgbe_resume(struct device *dev) +{ + struct txgbe_adapter *adapter; + struct net_device *netdev; + u32 err; + struct pci_dev *pdev = to_pci_dev(dev); + + adapter = pci_get_drvdata(pdev); + netdev = adapter->netdev; + adapter->hw.hw_addr = adapter->io_addr; + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + e_dev_err("Cannot enable PCI device from suspend\n"); + return err; + } + + /* make sure to clear disable flag */ + smp_mb__before_atomic(); + clear_bit(__TXGBE_DISABLED, &adapter->state); + pci_set_master(pdev); + + pci_wake_from_d3(pdev, false); + + txgbe_reset(adapter); + + rtnl_lock(); + + err = txgbe_init_interrupt_scheme(adapter); + if (!err && netif_running(netdev)) + err = txgbe_open(netdev); + + rtnl_unlock(); + + if (err) + return err; + + netif_device_attach(netdev); + + return 0; +} + +/** + * txgbe_freeze - quiesce the device (no IRQ's or DMA) + * @dev: The port's netdev + */ +static int txgbe_freeze(struct device *dev) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev)); + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + txgbe_down(adapter); + txgbe_free_irq(adapter); + } + + txgbe_reset_interrupt_capability(adapter); + + return 0; +} + +/** + * txgbe_thaw - un-quiesce the device + * @dev: The port's netdev + */ +static int txgbe_thaw(struct device *dev) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev)); + struct net_device *netdev = adapter->netdev; + + txgbe_set_interrupt_capability(adapter); + + if (netif_running(netdev)) { + u32 err = txgbe_request_irq(adapter); + + if (err) + return err; + + txgbe_up(adapter); + } + + netif_device_attach(netdev); + + return 0; +} +#endif /* CONFIG_PM */ + +/** + * __txgbe_shutdown is not used when power management + * is disabled on older kernels (<2.6.12). causes a compile + * warning/error, because it is defined and not used. + **/ +#if defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) +static int __txgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + struct txgbe_hw *hw = &adapter->hw; + u32 wufc = adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + txgbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + + rtnl_lock(); + if (netif_running(netdev)) + txgbe_close_suspend(adapter); + rtnl_unlock(); + + txgbe_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + if (wufc) { + txgbe_set_rx_mode(netdev); + txgbe_configure_rx(adapter); + /* enable the optics for SFP+ fiber as we can WoL */ + hw->mac.ops.enable_tx_laser(hw); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & TXGBE_PSR_WKUP_CTL_MC) { + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_MPE, TXGBE_PSR_CTL_MPE); + } + + pci_clear_master(adapter->pdev); + wr32(hw, TXGBE_PSR_WKUP_CTL, wufc); + } else { + wr32(hw, TXGBE_PSR_WKUP_CTL, 0); + } + + pci_wake_from_d3(pdev, !!wufc); + + *enable_wake = !!wufc; + txgbe_release_hw_control(adapter); + + if (!test_and_set_bit(__TXGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); + + return 0; +} +#endif /* defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) */ + +#ifdef CONFIG_PM +static int txgbe_suspend(struct device *dev) +{ + int retval; + bool wake; + struct pci_dev *pdev = to_pci_dev(dev); + + retval = __txgbe_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} +#endif /* CONFIG_PM */ + +static void txgbe_shutdown(struct pci_dev *pdev) +{ + bool wake = 0; + + __txgbe_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +/** + * txgbe_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: storage space for 64bit statistics + * + * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This + * function replaces txgbe_get_stats for kernels which support it. + */ +static void txgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) + +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct txgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, + start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct txgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, + start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct txgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, + start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + rcu_read_unlock(); + /* following stats updated by txgbe_watchdog_task() */ + stats->multicast = netdev->stats.multicast; + stats->rx_errors = netdev->stats.rx_errors; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; +} + +/** + * txgbe_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void txgbe_update_stats(struct txgbe_adapter *adapter) +{ + struct net_device_stats *net_stats = &adapter->net_stats; + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_hw_stats *hwstats = &adapter->stats; + u64 total_mpc = 0; + u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff; + u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; + u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; + u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; + u64 hw_csum_rx_good = 0; + u8 pf_queue_offset = 0; + + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return; + + if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) { + u64 rsc_count = 0; + u64 rsc_flush = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; + rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; + } + adapter->rsc_total_count = rsc_count; + adapter->rsc_total_flush = rsc_flush; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct txgbe_ring *rx_ring = adapter->rx_ring[i]; + + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + hw_csum_rx_error += rx_ring->rx_stats.csum_err; + hw_csum_rx_good += rx_ring->rx_stats.csum_good_cnt; + bytes += rx_ring->stats.bytes; + packets += rx_ring->stats.packets; + } + adapter->non_eop_descs = non_eop_descs; + adapter->alloc_rx_page_failed = alloc_rx_page_failed; + adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; + adapter->hw_csum_rx_error = hw_csum_rx_error; + adapter->hw_csum_rx_good = hw_csum_rx_good; + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + bytes = 0; + packets = 0; + /* gather some stats to the adapter struct that are per queue */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct txgbe_ring *tx_ring = adapter->tx_ring[i]; + + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + bytes += tx_ring->stats.bytes; + packets += tx_ring->stats.packets; + } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct txgbe_ring *xdp_ring = adapter->xdp_ring[i]; + + restart_queue += xdp_ring->tx_stats.restart_queue; + tx_busy += xdp_ring->tx_stats.tx_busy; + bytes += xdp_ring->stats.bytes; + packets += xdp_ring->stats.packets; + } + adapter->restart_queue = restart_queue; + adapter->tx_busy = tx_busy; + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + + hwstats->crcerrs += rd32(hw, TXGBE_RX_CRC_ERROR_FRAMES_LOW); + + /* 8 register reads */ + for (i = 0; i < 8; i++) { + /* for packet buffers not used, the register should read 0 */ + mpc = rd32(hw, TXGBE_RDB_MPCNT(i)); + missed_rx += mpc; + hwstats->mpc[i] += mpc; + total_mpc += hwstats->mpc[i]; + hwstats->pxontxc[i] += rd32(hw, TXGBE_RDB_PXONTXC(i)); + hwstats->pxofftxc[i] += + rd32(hw, TXGBE_RDB_PXOFFTXC(i)); + hwstats->pxonrxc[i] += rd32(hw, TXGBE_MAC_PXONRXC(i)); + } + + hwstats->gprc += rd32(hw, TXGBE_PX_GPRC); + + txgbe_update_xoff_received(adapter); + + hwstats->o2bgptc += rd32(hw, TXGBE_TDM_OS2BMC_CNT); + if (txgbe_check_mng_access(&adapter->hw)) { + hwstats->o2bspc += rd32(hw, TXGBE_MNG_OS2BMC_CNT); + hwstats->b2ospc += rd32(hw, TXGBE_MNG_BMC2OS_CNT); + } + hwstats->b2ogprc += rd32(hw, TXGBE_RDM_BMC2OS_CNT); + hwstats->gorc += rd32(hw, TXGBE_PX_GORC_LSB); + hwstats->gorc += (u64)rd32(hw, TXGBE_PX_GORC_MSB) << 32; + + hwstats->gotc += rd32(hw, TXGBE_PX_GOTC_LSB); + hwstats->gotc += (u64)rd32(hw, TXGBE_PX_GOTC_MSB) << 32; + + adapter->hw_rx_no_dma_resources += + rd32(hw, TXGBE_RDM_DRP_PKT); + hwstats->lxonrxc += rd32(hw, TXGBE_MAC_LXONRXC); + hwstats->fdirmatch += rd32(hw, TXGBE_RDB_FDIR_MATCH); + hwstats->fdirmiss += rd32(hw, TXGBE_RDB_FDIR_MISS); + +#if IS_ENABLED(CONFIG_FCOE) + hwstats->fccrc += rd32(hw, TXGBE_FCCRC); + hwstats->fclast += rd32(hw, TXGBE_FCLAST); + hwstats->fcoerpdc += rd32(hw, TXGBE_FCOERPDC); + hwstats->fcoeprc += rd32(hw, TXGBE_FCOEPRC); + hwstats->fcoeptc += rd32(hw, TXGBE_FCOEPTC); + hwstats->fcoedwrc += rd32(hw, TXGBE_FCOEDWRC); + hwstats->fcoedwtc += rd32(hw, TXGBE_FCOEDWTC); + /* Add up per cpu counters for total ddp alloc fail */ + if (adapter->fcoe.ddp_pool) { + struct txgbe_fcoe *fcoe = &adapter->fcoe; + struct txgbe_fcoe_ddp_pool *ddp_pool; + unsigned int cpu; + u64 noddp = 0, noddp_ext_buff = 0; + + for_each_possible_cpu(cpu) { + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + noddp += ddp_pool->noddp; + noddp_ext_buff += ddp_pool->noddp_ext_buff; + } + hwstats->fcoe_noddp = noddp; + hwstats->fcoe_noddp_ext_buff = noddp_ext_buff; + } +#endif /* CONFIG_FCOE */ + + bprc = rd32(hw, TXGBE_RX_BC_FRAMES_GOOD_LOW); + hwstats->bprc += bprc; + hwstats->mprc = 0; + hwstats->rdpc += rd32(hw, TXGBE_RDB_PKT_CNT); + hwstats->rddc += rd32(hw, TXGBE_RDB_DRP_CNT); + hwstats->psrpc += rd32(hw, TXGBE_PSR_PKT_CNT); + hwstats->psrdc += rd32(hw, TXGBE_PSR_DBG_DRP_CNT); + hwstats->untag += rd32(hw, TXGBE_RSEC_LSEC_UNTAG_PKT); + hwstats->tdmpc += rd32(hw, TXGBE_TDM_PKT_CNT); + hwstats->tdmdc += rd32(hw, TXGBE_TDM_DRP_CNT); + hwstats->tdbpc += rd32(hw, TXGBE_TDB_OUT_PKT_CNT); + + pf_queue_offset = adapter->ring_feature[RING_F_VMDQ].offset * + (adapter->ring_feature[RING_F_RSS].mask + 1); + + for (i = pf_queue_offset; i < 128; i++) + hwstats->mprc += rd32(hw, TXGBE_PX_MPRC(i)); + + hwstats->roc += rd32(hw, TXGBE_RX_OVERSIZE_FRAMES_GOOD); + hwstats->rlec += rd32(hw, TXGBE_RX_LEN_ERROR_FRAMES_LOW); + lxon = rd32(hw, TXGBE_RDB_LXONTXC); + hwstats->lxontxc += lxon; + lxoff = rd32(hw, TXGBE_RDB_LXOFFTXC); + hwstats->lxofftxc += lxoff; + + hwstats->gptc += rd32(hw, TXGBE_PX_GPTC); + hwstats->mptc += rd32(hw, TXGBE_TX_MC_FRAMES_GOOD_LOW); + hwstats->ruc += rd32(hw, TXGBE_RX_UNDERSIZE_FRAMES_GOOD); + hwstats->tpr += rd32(hw, TXGBE_RX_FRAME_CNT_GOOD_BAD_LOW); + hwstats->bptc += rd32(hw, TXGBE_TX_BC_FRAMES_GOOD_LOW); + /* Fill out the OS statistics structure */ + net_stats->multicast = hwstats->mprc; + + /* Rx Errors */ + net_stats->rx_errors = hwstats->crcerrs + + hwstats->rlec; + net_stats->rx_dropped = 0; + net_stats->rx_length_errors = hwstats->rlec; + net_stats->rx_crc_errors = hwstats->crcerrs; + net_stats->rx_missed_errors = total_mpc; +} + +/** + * txgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table + * @adapter - pointer to the device adapter structure + **/ +static void txgbe_fdir_reinit_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i; + + if (!(adapter->flags2 & TXGBE_FLAG2_FDIR_REQUIRES_REINIT)) + return; + + adapter->flags2 &= ~TXGBE_FLAG2_FDIR_REQUIRES_REINIT; + + /* if interface is down do nothing */ + if (test_bit(__TXGBE_DOWN, &adapter->state)) + return; + + /* do nothing if we are not using signature filters */ + if (!(adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE)) + return; + + adapter->fdir_overflow++; + + if (txgbe_reinit_fdir_tables(hw) == 0) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_bit(__TXGBE_TX_FDIR_INIT_DONE, + &adapter->tx_ring[i]->state); + /* re-enable flow director interrupts */ + wr32m(hw, TXGBE_PX_MISC_IEN, + TXGBE_PX_MISC_IEN_FLOW_DIR, TXGBE_PX_MISC_IEN_FLOW_DIR); + } else { + e_err(probe, "failed to finish FDIR re-initialization.\n"); + } +} + +void txgbe_irq_rearm_queues(struct txgbe_adapter *adapter, + u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + wr32(&adapter->hw, TXGBE_PX_IMC(0), mask); + wr32(&adapter->hw, TXGBE_PX_ICS(0), mask); + + mask = (qmask >> 32); + wr32(&adapter->hw, TXGBE_PX_IMC(1), mask); + wr32(&adapter->hw, TXGBE_PX_ICS(1), mask); +} + +/** + * txgbe_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter - pointer to the device adapter structure + * + * This function serves two purposes. First it strobes the interrupt lines + * in order to make certain interrupts are occurring. Secondly it sets the + * bits needed to check for TX hangs. As a result we should immediately + * determine if a hang has occurred. + */ +static void txgbe_check_hang_subtask(struct txgbe_adapter *adapter) +{ + int i; + u64 eics = 0; + + /* If we're down or resetting, just bail */ + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_REMOVING, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return; + + /* Force detection of hung controller */ + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_check_for_tx_hang(adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + set_check_for_tx_hang(adapter->xdp_ring[i]); + } + + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) { + /* get one bit for every active tx/rx interrupt vector */ + for (i = 0; i < adapter->num_q_vectors; i++) { + struct txgbe_q_vector *qv = adapter->q_vector[i]; + + if (qv->rx.ring || qv->tx.ring) + eics |= BIT_ULL(i); + } + } + /* Cause software interrupt to ensure rings are cleaned */ + txgbe_irq_rearm_queues(adapter, eics); +} + +/** + * txgbe_watchdog_update_link - update the link status + * @adapter - pointer to the device adapter structure + * @link_speed - pointer to a u32 to store the link_speed + **/ +static void txgbe_watchdog_update_link(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; + bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; + u32 reg; + u32 __maybe_unused i = 1; + + if (!(adapter->flags & TXGBE_FLAG_NEED_LINK_UPDATE)) + return; + + if (test_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + msleep(20); + + link_speed = TXGBE_LINK_SPEED_10GB_FULL; + link_up = true; + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + + if (link_up || time_after(jiffies, (adapter->link_check_timeout + + TXGBE_TRY_LINK_TIMEOUT))) + adapter->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE; + + for (i = 0; i < 3; i++) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + usleep_range(10000, 20000); + } + + adapter->link_up = link_up; + adapter->link_speed = link_speed; + + if (adapter->txgbe_ieee_pfc) + pfc_en |= !!(adapter->txgbe_ieee_pfc->pfc_en); + + if (link_up && !((adapter->flags & TXGBE_FLAG_DCB_ENABLED) && pfc_en)) { + hw->mac.ops.fc_enable(hw); + txgbe_set_rx_drop_en(adapter); + } + + if (link_up) { + adapter->last_rx_ptp_check = jiffies; + + if (test_bit(__TXGBE_PTP_RUNNING, &adapter->state)) + txgbe_ptp_start_cyclecounter(adapter); + + if (hw->mac.type == txgbe_mac_aml40) { + if (!(hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1 || + txgbe_is_backplane(hw))) + txgbe_reconfig_mac(hw); + + if (link_speed & TXGBE_LINK_SPEED_40GB_FULL) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_40G); + } + /* enable mac receiver */ + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_RE, TXGBE_MAC_RX_CFG_RE); + } else if (hw->mac.type == txgbe_mac_aml) { + if (!(hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1 || + txgbe_is_backplane(hw))) + txgbe_reconfig_mac(hw); + + if (link_speed & TXGBE_LINK_SPEED_25GB_FULL) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_25G); + } else if (link_speed & TXGBE_LINK_SPEED_10GB_FULL) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_10G); + } else { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_AML_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_AML_SPEED_1G); + } + + /* enable mac receiver */ + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_RE, TXGBE_MAC_RX_CFG_RE); + } else { + if (link_speed & TXGBE_LINK_SPEED_10GB_FULL) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_SPEED_10G); + } else if (link_speed & (TXGBE_LINK_SPEED_1GB_FULL | + TXGBE_LINK_SPEED_100_FULL | TXGBE_LINK_SPEED_10_FULL)) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_SPEED_1G); + } + + /* Re configure MAC RX */ + reg = rd32(hw, TXGBE_MAC_RX_CFG); + wr32(hw, TXGBE_MAC_RX_CFG, reg); + wr32(hw, TXGBE_MAC_PKT_FLT, TXGBE_MAC_PKT_FLT_PR); + reg = rd32(hw, TXGBE_MAC_WDG_TIMEOUT); + wr32(hw, TXGBE_MAC_WDG_TIMEOUT, reg); + } + } +} + +static void txgbe_update_default_up(struct txgbe_adapter *adapter) +{ + u8 up = 0; + + struct net_device *netdev = adapter->netdev; + struct dcb_app app = { + .selector = DCB_APP_IDTYPE_ETHTYPE, + .protocol = 0, + }; + up = dcb_getapp(netdev, &app); + +#if IS_ENABLED(CONFIG_FCOE) + adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0; +#else + adapter->default_up = up; +#endif +} + +/** + * txgbe_watchdog_link_is_up - update netif_carrier status and + * print link up message + * @adapter - pointer to the device adapter structure + **/ +static void txgbe_watchdog_link_is_up(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct txgbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool flow_rx, flow_tx; + + /* only continue if link was previously down */ + if (netif_carrier_ok(netdev)) + return; + + adapter->flags2 &= ~TXGBE_FLAG2_SEARCH_FOR_SFP; + + /* flow_rx, flow_tx report link flow control status */ + flow_rx = (rd32(hw, TXGBE_MAC_RX_FLOW_CTRL) & 0x101) == 0x1; + flow_tx = !!(TXGBE_RDB_RFCC_RFCE_802_3X & + rd32(hw, TXGBE_RDB_RFCC)); + + switch (adapter->link_speed) { + case TXGBE_LINK_SPEED_40GB_FULL: + adapter->speed = SPEED_40000; + break; + case TXGBE_LINK_SPEED_25GB_FULL: + adapter->speed = SPEED_25000; + break; + case TXGBE_LINK_SPEED_10GB_FULL: + adapter->speed = SPEED_10000; + break; + case TXGBE_LINK_SPEED_1GB_FULL: + default: + adapter->speed = SPEED_1000; + break; + } + + if (hw->mac.type == txgbe_mac_aml) + adapter->cur_fec_link = txgbe_get_cur_fec_mode(hw); + + e_info(drv, "NIC Link is Up %s, Flow Control: %s%s\n", + (link_speed == TXGBE_LINK_SPEED_40GB_FULL ? + "40 Gbps" : + (link_speed == TXGBE_LINK_SPEED_25GB_FULL ? + "25 Gbps" : + (link_speed == TXGBE_LINK_SPEED_10GB_FULL ? + "10 Gbps" : + (link_speed == TXGBE_LINK_SPEED_1GB_FULL ? + "1 Gbps" : + (link_speed == TXGBE_LINK_SPEED_100_FULL ? + "100 Mbps" : + (link_speed == TXGBE_LINK_SPEED_10_FULL ? + "10 Mbps" : + "unknown speed")))))), + ((flow_rx && flow_tx) ? "RX/TX" : + (flow_rx ? "RX" : + (flow_tx ? "TX" : "None"))), + ((hw->mac.type == txgbe_mac_aml && link_speed == TXGBE_LINK_SPEED_25GB_FULL) ? + ((adapter->cur_fec_link == TXGBE_PHY_FEC_BASER) ? ", FEC: BASE-R" : + (adapter->cur_fec_link == TXGBE_PHY_FEC_RS) ? ", FEC: RS" : + (adapter->cur_fec_link == TXGBE_PHY_FEC_OFF) ? ", FEC: OFF" : "") : "")); + + if (!adapter->backplane_an && + (hw->dac_sfp || + (hw->subsystem_device_id & TXGBE_DEV_MASK) == TXGBE_ID_KR_KX_KX4) && + hw->mac.type == txgbe_mac_sp) + txgbe_enable_rx_adapter(hw); + + if (adapter->tx_unidir_mode) { + wr32m(hw, 0x11004, BIT(10), BIT(10)); + wr32m(hw, 0x11004, BIT(0), BIT(0)); + e_dev_info("Enable loopback and disable rx : %x\n.", + rd32(hw, 0x11004)); + } + txgbe_check_vlan_rate_limit(adapter); + netif_carrier_on(netdev); + txgbe_check_vf_rate_limit(adapter); + + netif_tx_wake_all_queues(netdev); + + /* update the default user priority for VFs */ + txgbe_update_default_up(adapter); + + /* ping all the active vfs to let them know link has changed */ + //txgbe_ping_all_vfs(adapter); + txgbe_ping_all_vfs_with_link_status(adapter, true); +} + +static void txgbe_link_down_flush_tx(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + if (hw->mac.type != txgbe_mac_aml && + hw->mac.type != txgbe_mac_aml40) + return; + + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, + ~TXGBE_MAC_RX_CFG_RE); + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_LM, TXGBE_MAC_RX_CFG_LM); + + mdelay(20); + + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_LM, 0); +} + +/** + * txgbe_watchdog_link_is_down - update netif_carrier status and + * print link down message + * @adapter - pointer to the adapter structure + **/ +static void txgbe_watchdog_link_is_down(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct txgbe_hw *hw = &adapter->hw; + + adapter->link_up = false; + adapter->link_speed = 0; + + if (hw->mac.type == txgbe_mac_sp) + if (hw->subsystem_device_id == TXGBE_ID_WX1820_KR_KX_KX4 || + hw->subsystem_device_id == TXGBE_ID_SP1000_KR_KX_KX4 || + hw->dac_sfp) + txgbe_bp_down_event(adapter); + + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) + return; + + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + + if (test_bit(__TXGBE_PTP_RUNNING, &adapter->state)) + txgbe_ptp_start_cyclecounter(adapter); + + if (hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1 || + txgbe_is_backplane(hw)) + adapter->an_done = false; + + e_info(drv, "NIC Link is Down\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + txgbe_link_down_flush_tx(adapter); + /* ping all the active vfs to let them know link has changed */ + txgbe_ping_all_vfs_with_link_status(adapter, false); +} + +static bool txgbe_ring_tx_pending(struct txgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct txgbe_ring *tx_ring = adapter->tx_ring[i]; + + if (tx_ring->next_to_use != tx_ring->next_to_clean) + return true; + } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct txgbe_ring *xdp_ring = adapter->xdp_ring[i]; + + if (xdp_ring->next_to_use != xdp_ring->next_to_clean) + return true; + } + return false; +} + +static bool txgbe_vf_tx_pending(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + + u32 i, j; + + if (!adapter->num_vfs) + return false; + + for (i = 0; i < adapter->num_vfs; i++) { + for (j = 0; j < q_per_pool; j++) { + u32 h, t; + + h = rd32(hw, + TXGBE_PX_TR_RPn(q_per_pool, i, j)); + t = rd32(hw, + TXGBE_PX_TR_WPn(q_per_pool, i, j)); + + if (h != t) + return true; + } + } + + return false; +} + +/** + * txgbe_watchdog_flush_tx - flush queues on link down + * @adapter - pointer to the device adapter structure + **/ +static void txgbe_watchdog_flush_tx(struct txgbe_adapter *adapter) +{ + if (!netif_carrier_ok(adapter->netdev)) { + if (txgbe_ring_tx_pending(adapter) || + txgbe_vf_tx_pending(adapter)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + e_warn(drv, "initiating reset due to lost link with pending Tx work\n"); + adapter->flags2 |= TXGBE_FLAG2_PF_RESET_REQUESTED; + } + } +} + +#ifdef CONFIG_PCI_IOV +static inline void txgbe_issue_vf_flr(struct txgbe_adapter *adapter, + struct pci_dev *vfdev) +{ + int pos, i; + u16 status; + + /* wait for pending transactions on the bus */ + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(vfdev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + goto clear; + } + + e_dev_warn("Issuing VFLR with pending transactions\n"); + +clear: + pos = pci_find_capability(vfdev, PCI_CAP_ID_EXP); + if (!pos) + return; + + e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev)); + pci_write_config_word(vfdev, pos + PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_BCR_FLR); + msleep(100); +} + +static void txgbe_spoof_check(struct txgbe_adapter *adapter) +{ + u32 ssvpc; + + /* Do not perform spoof check if in non-IOV mode */ + if (adapter->num_vfs == 0) + return; + ssvpc = rd32(&adapter->hw, TXGBE_TDM_SEC_DRP); + + /* ssvpc register is cleared on read, if zero then no + * spoofed packets in the last interval. + */ + if (!ssvpc) + return; + + e_warn(drv, "%d Spoofed packets detected\n", ssvpc); +} + +#endif /* CONFIG_PCI_IOV */ + +/** + * txgbe_watchdog_subtask - check and bring link up + * @adapter - pointer to the device adapter structure + **/ +static void txgbe_watchdog_subtask(struct txgbe_adapter *adapter) +{ + u32 __maybe_unused value = 0; + struct txgbe_hw *hw = &adapter->hw; + + /* if interface is down do nothing */ + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_REMOVING, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return; + + if (hw->mac.type == txgbe_mac_sp) + if (hw->subsystem_device_id == TXGBE_ID_WX1820_KR_KX_KX4 || + hw->subsystem_device_id == TXGBE_ID_SP1000_KR_KX_KX4 || + hw->dac_sfp) + txgbe_bp_watchdog_event(adapter); + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) + txgbe_e56_bp_watchdog_event(adapter); + + if (!(adapter->flags2 & TXGBE_FLAG2_LINK_DOWN)) + txgbe_watchdog_update_link(adapter); + + if (adapter->link_up) + txgbe_watchdog_link_is_up(adapter); + else + txgbe_watchdog_link_is_down(adapter); + +#ifdef CONFIG_PCI_IOV + txgbe_spoof_check(adapter); +#endif /* CONFIG_PCI_IOV */ + + txgbe_update_stats(adapter); + + txgbe_watchdog_flush_tx(adapter); +} + +static void txgbe_phy_event_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rdata; + + /* if interface is down do nothing */ + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_REMOVING, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return; + + if (!(adapter->flags3 & TXGBE_FLAG3_PHY_EVENT)) + return; + + adapter->flags3 &= ~TXGBE_FLAG3_PHY_EVENT; + + mutex_lock(&adapter->e56_lock); + rdata = rd32_ephy(hw, E56PHY_INTR_0_ADDR); + if (rdata & E56PHY_INTR_0_IDLE_ENTRY1) { + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ENABLE_ADDR, 0x0); + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ADDR, E56PHY_INTR_0_IDLE_ENTRY1); + txgbe_wr32_ephy(hw, E56PHY_INTR_0_ENABLE_ADDR, E56PHY_INTR_0_IDLE_ENTRY1); + } + + rdata = rd32_ephy(hw, E56PHY_INTR_1_ADDR); + if (rdata & E56PHY_INTR_1_IDLE_EXIT1) { + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ENABLE_ADDR, 0x0); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ADDR, E56PHY_INTR_1_IDLE_EXIT1); + txgbe_wr32_ephy(hw, E56PHY_INTR_1_ENABLE_ADDR, E56PHY_INTR_1_IDLE_EXIT1); + + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + } + mutex_unlock(&adapter->e56_lock); +} + +/** + * txgbe_sfp_detection_subtask - poll for SFP+ cable + * @adapter - the txgbe adapter structure + **/ +static void txgbe_sfp_detection_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 value = 0; + s32 err; + + /* not searching for SFP so there is nothing to do here */ + if (!(adapter->flags2 & TXGBE_FLAG2_SEARCH_FOR_SFP) && + !(adapter->flags2 & TXGBE_FLAG2_SFP_NEEDS_RESET)) + return; + + if (adapter->sfp_poll_time && + time_after(adapter->sfp_poll_time, jiffies)) + return; /* If not yet time to poll for SFP */ + + /* someone else is in init, wait until next service event */ + if (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + return; + + adapter->sfp_poll_time = jiffies + TXGBE_SFP_POLL_JIFFIES - 1; + + if (hw->mac.type == txgbe_mac_aml40) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_PRST_LS) { + err = TXGBE_ERR_SFP_NOT_PRESENT; + adapter->flags2 &= ~TXGBE_FLAG2_SFP_NEEDS_RESET; + goto sfp_out; + } + } + + if (hw->mac.type == txgbe_mac_aml) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_ABS_LS) { + err = TXGBE_ERR_SFP_NOT_PRESENT; + adapter->flags2 &= ~TXGBE_FLAG2_SFP_NEEDS_RESET; + goto sfp_out; + } + } + + /* wait for sfp module ready*/ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + msleep(200); + + adapter->eeprom_type = 0; + adapter->eeprom_len = 0; + memset(adapter->i2c_eeprom, 0, sizeof(u8) * 512); + + err = hw->phy.ops.identify_sfp(hw); + if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) + goto sfp_out; + + if (err == TXGBE_ERR_SFP_NOT_PRESENT) + adapter->flags2 |= TXGBE_FLAG2_SFP_NEEDS_RESET; + + /* exit on error */ + if (err) + goto sfp_out; + + /* exit if reset not needed */ + if (!(adapter->flags2 & TXGBE_FLAG2_SFP_NEEDS_RESET)) + goto sfp_out; + + adapter->flags2 &= ~TXGBE_FLAG2_SFP_NEEDS_RESET; + + err = hw->mac.ops.setup_sfp(hw); + + hw->phy.autoneg_advertised = 0; + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); + +sfp_out: + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); + + if (err == TXGBE_ERR_SFP_NOT_SUPPORTED && adapter->netdev_registered) + e_dev_err("failed to initialize because an unsupported SFP+ module type was detected.\n"); +} + +/** + * txgbe_sfp_link_config_subtask - set up link SFP after module install + * @adapter - the txgbe adapter structure + **/ +static void txgbe_sfp_link_config_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 speed; + bool autoneg = false; + u16 value; + u32 gssr = hw->phy.phy_semaphore_mask; + u8 device_type = hw->subsystem_device_id & 0xF0; + + if (!(adapter->flags & TXGBE_FLAG_NEED_LINK_CONFIG)) + return; + + /* someone else is in init, wait until next service event */ + if (test_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + return; + + adapter->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; + + if (device_type == TXGBE_ID_XAUI) { + /* clear ext phy int status */ + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 0x03, 0x8011, &value); + if (value & 0x400) + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + if (!(value & 0x800)) + return; + } + + if (device_type == TXGBE_ID_MAC_XAUI || + (hw->mac.ops.get_media_type(hw) == txgbe_media_type_copper && + device_type == TXGBE_ID_SFI_XAUI)) { + speed = TXGBE_LINK_SPEED_10GB_FULL; + } else if (device_type == TXGBE_ID_MAC_SGMII) { + speed = TXGBE_LINK_SPEED_1GB_FULL; + } else { + speed = hw->phy.autoneg_advertised; + if (!speed && hw->mac.ops.get_link_capabilities) { + hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); + /* setup the highest link when no autoneg */ + if (!autoneg) { + if (speed & TXGBE_LINK_SPEED_25GB_FULL) + speed = TXGBE_LINK_SPEED_25GB_FULL; + else if (speed & TXGBE_LINK_SPEED_10GB_FULL) + speed = TXGBE_LINK_SPEED_10GB_FULL; + } + } + } + + /* firmware is configuring phy now, delay host driver config action */ + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) { + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) { + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + e_warn(probe, "delay config ephy\n"); + return; + } + } + + hw->mac.ops.setup_link(hw, speed, false); + + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) + hw->mac.ops.release_swfw_sync(hw, gssr); + + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; +} + +static void txgbe_sfp_reset_eth_phy_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 speed; + bool linkup = true; + u32 i = 0; + + if (!(adapter->flags2 & TXGBE_FLAG_NEED_ETH_PHY_RESET)) + return; + + adapter->flags2 &= ~TXGBE_FLAG_NEED_ETH_PHY_RESET; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + return; + + hw->mac.ops.check_link(hw, &speed, &linkup, false); + if (!linkup) { + txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, + 0xA000); + /* wait phy initialization done */ + for (i = 0; i < TXGBE_PHY_INIT_DONE_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0) + break; + msleep(100); + } + } +} + +/** + * txgbe_service_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void txgbe_service_timer(struct timer_list *t) +{ + struct txgbe_adapter *adapter = from_timer(adapter, t, service_timer); + unsigned long next_event_offset; + struct txgbe_hw *hw = &adapter->hw; + u32 val = 0; + + /* poll faster when waiting for link */ + if (adapter->flags & TXGBE_FLAG_NEED_LINK_UPDATE) + next_event_offset = HZ / 10; + else + next_event_offset = HZ * 2; + + /* record which func to provoke PCIE recovery */ + if (rd32(&adapter->hw, TXGBE_MIS_PF_SM) == 1) { + val = rd32m(&adapter->hw, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN0_UP | + TXGBE_MIS_PRB_CTL_LAN1_UP); + if (val & TXGBE_MIS_PRB_CTL_LAN0_UP) { + if (hw->bus.lan_id == 0) { + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + e_info(probe, "%s: set recover on Lan0\n", __func__); + } + } else if (val & TXGBE_MIS_PRB_CTL_LAN1_UP) { + if (hw->bus.lan_id == 1) { + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + e_info(probe, "%s: set recover on Lan1\n", __func__); + } + } + } + + /* Reset the timer */ + mod_timer(&adapter->service_timer, next_event_offset + jiffies); + + txgbe_service_event_schedule(adapter); + if (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core1) + queue_work(txgbe_wq, &adapter->sfp_sta_task); +} + +static void txgbe_sfp_phy_status_work(struct work_struct *work) +{ + struct txgbe_adapter *adapter = container_of(work, + struct txgbe_adapter, + sfp_sta_task); + struct txgbe_hw *hw = &adapter->hw; + u16 data = 0; + bool status = false; + s32 i2c_status; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) + return; + + if (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1) { + i2c_status = hw->phy.ops.read_i2c_sfp_phy(hw, + 0x0a, + &data); + + if (i2c_status != 0) + goto RELEASE_SEM; + + /* Avoid read module info and read f2c module internal phy + * may cause i2c controller read reg data err + */ + if ((data & 0x83ff) != 0 || data == 0) + goto RELEASE_SEM; + + if ((data & TXGBE_I2C_PHY_LOCAL_RX_STATUS) && + (data & TXGBE_I2C_PHY_REMOTE_RX_STATUS)) + status = true; + else + status = false; + } else if ((hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core0) || + (hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core1)) { + i2c_status = hw->phy.ops.read_i2c_sfp_phy(hw, + 0x8008, + &data); + + if (i2c_status != 0) + goto RELEASE_SEM; + + if (data & TXGBE_I2C_10G_SFP_LINK_STATUS) + status = true; + else + status = false; + } + +RELEASE_SEM: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + /* sync sfp status to firmware */ + wr32(hw, TXGBE_TSC_LSEC_PKTNUM0, data | 0x80000000); + + if (hw->f2c_mod_status != status) { + hw->f2c_mod_status = status; + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + } +} + +static void txgbe_amlit_temp_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 link_speed = 0, val = 0; + s32 status = 0; + int temp; + + if (hw->mac.type != txgbe_mac_aml && + hw->mac.type != txgbe_mac_aml40) + return; + + if (!netif_carrier_ok(adapter->netdev)) + return; + + status = txgbe_e56_get_temp(hw, &temp); + if (status) + return; + + if (!(temp - adapter->amlite_temp > 4 || + adapter->amlite_temp - temp > 4)) + return; + + adapter->amlite_temp = temp; + val = rd32(hw, TXGBE_CFG_PORT_ST); + if (val & TXGBE_AMLITE_CFG_LED_CTL_LINK_40G_SEL) + link_speed = TXGBE_LINK_SPEED_40GB_FULL; + else if (val & TXGBE_AMLITE_CFG_LED_CTL_LINK_25G_SEL) + link_speed = TXGBE_LINK_SPEED_25GB_FULL; + else + link_speed = TXGBE_LINK_SPEED_10GB_FULL; + + mutex_lock(&adapter->e56_lock); + if (hw->mac.type == txgbe_mac_aml) + txgbe_temp_track_seq(hw, link_speed); + else if (hw->mac.type == txgbe_mac_aml40) + txgbe_temp_track_seq_40g(hw, link_speed); + mutex_unlock(&adapter->e56_lock); +} + +static void txgbe_reset_subtask(struct txgbe_adapter *adapter) +{ + u32 reset_flag = 0; + u32 value = 0; + union txgbe_tx_desc *tx_desc; + int i, j; + u32 desc_error[4] = {0, 0, 0, 0}; + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring *tx_ring; + struct txgbe_tx_buffer *tx_buffer; + u32 size; + + if (!(adapter->flags2 & (TXGBE_FLAG2_PF_RESET_REQUESTED | + TXGBE_FLAG2_DEV_RESET_REQUESTED | + TXGBE_FLAG2_GLOBAL_RESET_REQUESTED | + TXGBE_FLAG2_RESET_INTR_RECEIVED | + TXGBE_FLAG2_DMA_RESET_REQUESTED))) + return; + + /* If we're already down, just bail */ + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_REMOVING, &adapter->state)) + return; + + if (netif_msg_tx_err(adapter)) { + for (i = 0; i < 4; i++) + desc_error[i] = rd32(hw, TXGBE_TDM_DESC_FATAL(i)); + + /* check tdm fatal error */ + for (i = 0; i < adapter->num_tx_queues; i++) { + if (desc_error[i / 32] & (1 << i % 32)) { + e_err(tx_err, "TDM fatal error queue[%d]", i); + tx_ring = adapter->tx_ring[i]; + e_warn(tx_err, "queue[%d] RP = 0x%x\n", i, + rd32(&adapter->hw, TXGBE_PX_TR_RP(adapter->tx_ring[i]->reg_idx))); + for (j = 0; j < tx_ring->count; j++) { + tx_desc = TXGBE_TX_DESC(tx_ring, j); + if (tx_desc->read.olinfo_status != 1) + e_warn(tx_err, "queue[%d][%d]:0x%llx, 0x%x, 0x%x\n", + i, j, tx_desc->read.buffer_addr, + tx_desc->read.cmd_type_len, + tx_desc->read.olinfo_status); + } + for (j = 0; j < tx_ring->count; j++) { + tx_buffer = &tx_ring->tx_buffer_info[j]; + size = dma_unmap_len(tx_buffer, len); + if (size != 0 && tx_buffer->va) + e_err(pktdata, "tx buffer[%d][%d]:\n", i, j); + if (size != 0 && tx_buffer->va && + netif_msg_pktdata(adapter)) + print_hex_dump(KERN_ERR, "", + DUMP_PREFIX_OFFSET, + 16, 1, tx_buffer->va, + size, true); + } + + for (j = 0; j < tx_ring->count; j++) { + tx_buffer = &tx_ring->tx_buffer_info[j]; + if (tx_buffer->skb) + e_err(pktdata, "****skb in tx buffer[%d][%d]: *******\n", i, j); + if (tx_buffer->skb && netif_msg_pktdata(adapter)) + print_hex_dump(KERN_ERR, "", + DUMP_PREFIX_OFFSET, + 16, 1, tx_buffer->skb, + sizeof(struct sk_buff), + true); + } + netif_stop_subqueue(tx_ring->netdev, i); + if (!ring_is_xdp(tx_ring)) + netdev_tx_reset_queue(txring_txq(tx_ring)); + txgbe_do_lan_reset(adapter); + } + } + } + + netdev_err(adapter->netdev, "Reset adapter\n"); + adapter->tx_timeout_count++; + + rtnl_lock(); + if (adapter->flags2 & TXGBE_FLAG2_GLOBAL_RESET_REQUESTED) { + reset_flag |= TXGBE_FLAG2_GLOBAL_RESET_REQUESTED; + adapter->flags2 &= ~TXGBE_FLAG2_GLOBAL_RESET_REQUESTED; + } + if (adapter->flags2 & TXGBE_FLAG2_DEV_RESET_REQUESTED) { + reset_flag |= TXGBE_FLAG2_DEV_RESET_REQUESTED; + adapter->flags2 &= ~TXGBE_FLAG2_DEV_RESET_REQUESTED; + } + if (adapter->flags2 & TXGBE_FLAG2_PF_RESET_REQUESTED) { + reset_flag |= TXGBE_FLAG2_PF_RESET_REQUESTED; + adapter->flags2 &= ~TXGBE_FLAG2_PF_RESET_REQUESTED; + } + if (adapter->flags2 & TXGBE_FLAG2_DMA_RESET_REQUESTED) { + reset_flag |= TXGBE_FLAG2_DMA_RESET_REQUESTED; + adapter->flags2 &= ~TXGBE_FLAG2_DMA_RESET_REQUESTED; + } + + if (adapter->flags2 & TXGBE_FLAG2_RESET_INTR_RECEIVED) { + /* If there's a recovery already waiting, it takes + * precedence before starting a new reset sequence. + */ + adapter->flags2 &= ~TXGBE_FLAG2_RESET_INTR_RECEIVED; + value = rd32m(&adapter->hw, TXGBE_MIS_RST_ST, + TXGBE_MIS_RST_ST_DEV_RST_TYPE_MASK) >> + TXGBE_MIS_RST_ST_DEV_RST_TYPE_SHIFT; + if (value == TXGBE_MIS_RST_ST_DEV_RST_TYPE_SW_RST) { + adapter->hw.reset_type = TXGBE_SW_RESET; + /* errata 7 */ + if (txgbe_mng_present(&adapter->hw) && + adapter->hw.revision_id == TXGBE_SP_MPW) + adapter->flags2 |= + TXGBE_FLAG2_MNG_REG_ACCESS_DISABLED; + } else if (value == TXGBE_MIS_RST_ST_DEV_RST_TYPE_GLOBAL_RST) { + adapter->hw.reset_type = TXGBE_GLOBAL_RESET; + } + adapter->hw.force_full_reset = true; + txgbe_reinit_locked(adapter); + adapter->hw.force_full_reset = false; + goto unlock; + } + + if (reset_flag & TXGBE_FLAG2_DEV_RESET_REQUESTED) { + /* Request a Device Reset + * + * This will start the chip's countdown to the actual full + * chip reset event, and a warning interrupt to be sent + * to all PFs, including the requestor. Our handler + * for the warning interrupt will deal with the shutdown + * and recovery of the switch setup. + */ + /*debug to up*/ + /*txgbe_dump(adapter);*/ + if (txgbe_mng_present(&adapter->hw)) + txgbe_reset_hostif(&adapter->hw); + else + wr32m(&adapter->hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_SW_RST, TXGBE_MIS_RST_SW_RST); + + } else if (reset_flag & TXGBE_FLAG2_PF_RESET_REQUESTED) { + /*debug to up*/ + /*txgbe_dump(adapter);*/ + txgbe_reinit_locked(adapter); + } else if (reset_flag & TXGBE_FLAG2_DMA_RESET_REQUESTED) { + txgbe_reinit_locked_dma_reset(adapter); + } else if (reset_flag & TXGBE_FLAG2_GLOBAL_RESET_REQUESTED) { + /* Request a Global Reset + * + * This will start the chip's countdown to the actual full + * chip reset event, and a warning interrupt to be sent + * to all PFs, including the requestor. Our handler + * for the warning interrupt will deal with the shutdown + * and recovery of the switch setup. + */ + /*debug to up*/ + /*txgbe_dump(adapter);*/ + pci_save_state(adapter->pdev); + if (txgbe_mng_present(&adapter->hw)) + txgbe_reset_hostif(&adapter->hw); + else + wr32m(&adapter->hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_GLOBAL_RST, + TXGBE_MIS_RST_GLOBAL_RST); + } + +unlock: + rtnl_unlock(); +} + +static void txgbe_check_pcie_subtask(struct txgbe_adapter *adapter) +{ + bool status; + + if (!(adapter->flags2 & TXGBE_FLAG2_PCIE_NEED_RECOVER)) + return; + + txgbe_print_tx_hang_status(adapter); + txgbe_dump_all_ring_desc(adapter); + + wr32m(&adapter->hw, TXGBE_MIS_PF_SM, TXGBE_MIS_PF_SM_SM, 0); + + if (!(adapter->flags & TXGBE_FLAG_SRIOV_ENABLED)) { + status = txgbe_check_recovery_capability(adapter->pdev); + if (status) { + e_info(probe, "do recovery\n"); + txgbe_pcie_do_recovery(adapter->pdev); + } else { + e_err(drv, "This platform can't support pcie recovery, skip it\n"); + } + } + + adapter->flags2 &= ~TXGBE_FLAG2_PCIE_NEED_RECOVER; +} + +static void txgbe_tx_queue_clear_error_task(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring *tx_ring; + u32 desc_error[4] = {0, 0, 0, 0}; + union txgbe_tx_desc *tx_desc; + u32 i, j; + struct txgbe_tx_buffer *tx_buffer; + u32 size; + + for (i = 0; i < 4; i++) + desc_error[i] = rd32(hw, TXGBE_TDM_DESC_NONFATAL(i)); + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (desc_error[i / 32] & (1 << i % 32)) { + tx_ring = adapter->tx_ring[i]; + netif_stop_subqueue(tx_ring->netdev, i); + msec_delay(10); + + e_err(tx_err, "queue[%d] RP = 0x%x\n", i, + rd32(&adapter->hw, TXGBE_PX_TR_RP(adapter->tx_ring[i]->reg_idx))); + for (j = 0; j < tx_ring->count; j++) { + tx_desc = TXGBE_TX_DESC(tx_ring, j); + if (tx_desc->read.olinfo_status != 0x1) + e_warn(tx_err, "queue[%d][%d]:0x%llx, 0x%x, 0x%x\n", + i, j, tx_desc->read.buffer_addr, tx_desc->read.cmd_type_len, + tx_desc->read.olinfo_status); + } + + for (j = 0; j < tx_ring->count; j++) { + tx_buffer = &tx_ring->tx_buffer_info[j]; + size = dma_unmap_len(tx_buffer, len); + if (size != 0 && tx_buffer->va) { + e_warn(pktdata, "tx buffer[%d][%d]:\n", i, j); + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, + 16, 1, tx_buffer->va, + size, true); + } + } + + for (j = 0; j < tx_ring->count; j++) { + tx_buffer = &tx_ring->tx_buffer_info[j]; + if (tx_buffer->skb) { + e_err(pktdata, + "****skb in tx buffer[%d][%d]: *******\n", i, j); + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_ERR, "", + DUMP_PREFIX_OFFSET, 16, 1, + tx_buffer->skb, + sizeof(struct sk_buff), + true); + } + } + + wr32(hw, TXGBE_TDM_DESC_NONFATAL(i / 32), BIT(i % 32)); + + txgbe_clean_tx_ring(tx_ring); + + txgbe_configure_tx_ring(adapter, tx_ring); + netif_start_subqueue(tx_ring->netdev, i); + } + } +} + +/** + * txgbe_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void txgbe_service_task(struct work_struct *work) +{ + struct txgbe_adapter *adapter = container_of(work, + struct txgbe_adapter, + service_task); + struct txgbe_hw *hw = &adapter->hw; + + if (TXGBE_REMOVED(adapter->hw.hw_addr)) { + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + rtnl_lock(); + txgbe_down(adapter); + rtnl_unlock(); + } + txgbe_service_event_complete(adapter); + return; + } + + txgbe_check_pcie_subtask(adapter); + txgbe_reset_subtask(adapter); + txgbe_phy_event_subtask(adapter); + txgbe_sfp_detection_subtask(adapter); + if (!(hw->mac.type == txgbe_mac_sp || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || + hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1 || + txgbe_is_backplane(hw))) + txgbe_watchdog_subtask(adapter); + txgbe_sfp_link_config_subtask(adapter); + txgbe_sfp_reset_eth_phy_subtask(adapter); + txgbe_check_overtemp_subtask(adapter); + txgbe_watchdog_subtask(adapter); + txgbe_fdir_reinit_subtask(adapter); + txgbe_check_hang_subtask(adapter); + + if (test_bit(__TXGBE_PTP_RUNNING, &adapter->state)) { + txgbe_ptp_overflow_check(adapter); + if (unlikely(adapter->flags & + TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)) + txgbe_ptp_rx_hang(adapter); + } + + txgbe_tx_queue_clear_error_task(adapter); + txgbe_amlit_temp_subtask(adapter); + + txgbe_service_event_complete(adapter); +} + +union network_header { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + void *raw; +}; + +static struct txgbe_dec_ptype encode_tx_desc_ptype(const struct txgbe_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + u8 tun_prot = 0; + u8 l4_prot = 0; + u8 ptype = 0; + unsigned char *exthdr; + unsigned char *l4_hdr; + __be16 frag_off; + + if (skb->encapsulation) { + union network_header hdr; + + switch (first->protocol) { + case htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) + goto encap_frag; + ptype = TXGBE_PTYPE_TUN_IPV4; + break; + case htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr); + tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &tun_prot, &frag_off); + if (tun_prot == NEXTHDR_FRAGMENT) + goto encap_frag; + ptype = TXGBE_PTYPE_TUN_IPV6; + break; + default: + goto exit; + } + + if (tun_prot == IPPROTO_IPIP || + tun_prot == IPPROTO_IPV6) { + hdr.raw = (void *)inner_ip_hdr(skb); + ptype |= TXGBE_PTYPE_PKT_IPIP; + } else if (tun_prot == IPPROTO_UDP) { + hdr.raw = (void *)inner_ip_hdr(skb); + /* fixme: VXLAN-GPE neither ETHER nor IP */ + + if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB)) { + ptype |= TXGBE_PTYPE_PKT_IG; + } else { + if (((struct ethhdr *) + skb_inner_mac_header(skb))->h_proto + == htons(ETH_P_8021Q)) { + ptype |= TXGBE_PTYPE_PKT_IGMV; + } else { + ptype |= TXGBE_PTYPE_PKT_IGM; + } + } + + } else if (tun_prot == IPPROTO_GRE) { + hdr.raw = (void *)inner_ip_hdr(skb); + + if (skb->inner_protocol == htons(ETH_P_IP) || + skb->inner_protocol == htons(ETH_P_IPV6)) { + ptype |= TXGBE_PTYPE_PKT_IG; + } else { + if (((struct ethhdr *) + skb_inner_mac_header(skb))->h_proto + == htons(ETH_P_8021Q)) { + ptype |= TXGBE_PTYPE_PKT_IGMV; + } else { + ptype |= TXGBE_PTYPE_PKT_IGM; + } + } + } else { + goto exit; + } + + switch (hdr.ipv4->version) { + case IPVERSION: + l4_prot = hdr.ipv4->protocol; + if (hdr.ipv4->frag_off & htons(IP_MF | IP_OFFSET)) { + ptype |= TXGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; + case 6: + l4_hdr = skb_inner_transport_header(skb); + exthdr = skb_inner_network_header(skb) + sizeof(struct ipv6hdr); + l4_prot = inner_ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_prot, &frag_off); + ptype |= TXGBE_PTYPE_PKT_IPV6; + if (l4_prot == NEXTHDR_FRAGMENT) { + ptype |= TXGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; + default: + goto exit; + } + } else { +encap_frag: + switch (first->protocol) { + case htons(ETH_P_IP): + l4_prot = ip_hdr(skb)->protocol; + ptype = TXGBE_PTYPE_PKT_IP; + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { + ptype |= TXGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; + case htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr); + l4_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_prot, &frag_off); + + ptype = TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6; + if (l4_prot == NEXTHDR_FRAGMENT) { + ptype |= TXGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; + case htons(ETH_P_1588): + ptype = TXGBE_PTYPE_L2_TS; + goto exit; + case htons(ETH_P_FIP): + ptype = TXGBE_PTYPE_L2_FIP; + goto exit; + case htons(TXGBE_ETH_P_LLDP): + ptype = TXGBE_PTYPE_L2_LLDP; + goto exit; + case htons(TXGBE_ETH_P_CNM): + ptype = TXGBE_PTYPE_L2_CNM; + goto exit; + case htons(ETH_P_PAE): + ptype = TXGBE_PTYPE_L2_EAPOL; + goto exit; + case htons(ETH_P_ARP): + ptype = TXGBE_PTYPE_L2_ARP; + goto exit; + default: + ptype = TXGBE_PTYPE_L2_MAC; + goto exit; + } + } + + switch (l4_prot) { + case IPPROTO_TCP: + ptype |= TXGBE_PTYPE_TYP_TCP; + break; + case IPPROTO_UDP: + ptype |= TXGBE_PTYPE_TYP_UDP; + break; + case IPPROTO_SCTP: + ptype |= TXGBE_PTYPE_TYP_SCTP; + break; + default: + ptype |= TXGBE_PTYPE_TYP_IP; + break; + } + +exit: + return txgbe_decode_ptype(ptype); +} + +static int txgbe_tso(struct txgbe_ring *tx_ring, + struct txgbe_tx_buffer *first, + u8 *hdr_len, struct txgbe_dec_ptype dptype) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens, type_tucmd; + u32 mss_l4len_idx, l4len; + struct tcphdr *tcph; + struct iphdr *iph; + u32 tunhdr_eiplen_tunlen = 0; + u8 tun_prot = 0; + unsigned char *exthdr; + unsigned char *l4_hdr; + __be16 frag_off; + bool enc = skb->encapsulation; + struct ipv6hdr *ipv6h; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + if (skb_header_cloned(skb)) { + int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + + if (err) + return err; + } + + iph = enc ? inner_ip_hdr(skb) : ip_hdr(skb); + + if (iph->version == 4) { + tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcph->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + first->tx_flags |= TXGBE_TX_FLAGS_TSO | + TXGBE_TX_FLAGS_CSUM | + TXGBE_TX_FLAGS_IPV4 | + TXGBE_TX_FLAGS_CC; + } else if (iph->version == 6 && skb_is_gso_v6(skb)) { + ipv6h = enc ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); + + ipv6h->payload_len = 0; + tcph->check = + ~csum_ipv6_magic(&ipv6h->saddr, + &ipv6h->daddr, + 0, IPPROTO_TCP, 0); + first->tx_flags |= TXGBE_TX_FLAGS_TSO | + TXGBE_TX_FLAGS_CSUM | + TXGBE_TX_FLAGS_CC; + } + /* compute header lengths */ + + l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); + *hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data) + : skb_transport_offset(skb); + *hdr_len += l4len; + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* mss_l4len_id: use 0 as index for TSO */ + mss_l4len_idx = l4len << TXGBE_TXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << TXGBE_TXD_MSS_SHIFT; + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + + if (enc) { + switch (first->protocol) { + case htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + first->tx_flags |= TXGBE_TX_FLAGS_OUTER_IPV4; + break; + case htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr); + tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &tun_prot, &frag_off); + break; + default: + break; + } + switch (tun_prot) { + case IPPROTO_UDP: + tunhdr_eiplen_tunlen = TXGBE_TXD_TUNNEL_UDP; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + TXGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_GRE: + tunhdr_eiplen_tunlen = TXGBE_TXD_TUNNEL_GRE; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + TXGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: + tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) - + (char *)ip_hdr(skb)) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT; + break; + default: + break; + } + + vlan_macip_lens = skb_inner_network_header_len(skb) >> 1; + } else { + vlan_macip_lens = skb_network_header_len(skb) >> 1; + } + vlan_macip_lens |= skb_network_offset(skb) << TXGBE_TXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & TXGBE_TX_FLAGS_VLAN_MASK; + + type_tucmd = dptype.ptype << 24; + + if (skb->vlan_proto == htons(ETH_P_8021AD)) + type_tucmd |= TXGBE_SET_FLAG(first->tx_flags, + TXGBE_TX_FLAGS_HW_VLAN, + 0x1 << TXGBE_TXD_TAG_TPID_SEL_SHIFT); + + txgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, + type_tucmd, mss_l4len_idx); + + return 1; +} + +static void txgbe_tx_csum(struct txgbe_ring *tx_ring, + struct txgbe_tx_buffer *first, struct txgbe_dec_ptype dptype) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 mss_l4len_idx = 0; + u32 tunhdr_eiplen_tunlen = 0; + u8 tun_prot = 0; + u32 type_tucmd; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(first->tx_flags & TXGBE_TX_FLAGS_HW_VLAN) && + !(first->tx_flags & TXGBE_TX_FLAGS_CC)) + return; + vlan_macip_lens = skb_network_offset(skb) << + TXGBE_TXD_MACLEN_SHIFT; + } else { + u8 l4_prot = 0; + unsigned char *exthdr; + unsigned char *l4_hdr; + __be16 frag_off; + union { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + u8 *raw; + } network_hdr; + union { + struct tcphdr *tcphdr; + u8 *raw; + } transport_hdr; + + if (skb->encapsulation) { + network_hdr.raw = skb_inner_network_header(skb); + transport_hdr.raw = skb_inner_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + TXGBE_TXD_MACLEN_SHIFT; + switch (first->protocol) { + case htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr); + tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &tun_prot, &frag_off); + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but version=%d\n", + network_hdr.ipv4->version); + } + return; + } + switch (tun_prot) { + case IPPROTO_UDP: + tunhdr_eiplen_tunlen = TXGBE_TXD_TUNNEL_UDP; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + TXGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_GRE: + tunhdr_eiplen_tunlen = TXGBE_TXD_TUNNEL_GRE; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + TXGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: + tunhdr_eiplen_tunlen = + (((char *)inner_ip_hdr(skb) - + (char *)ip_hdr(skb)) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT; + break; + default: + break; + } + + } else { + network_hdr.raw = skb_network_header(skb); + transport_hdr.raw = skb_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + TXGBE_TXD_MACLEN_SHIFT; + } + + switch (network_hdr.ipv4->version) { + case IPVERSION: + vlan_macip_lens |= + (transport_hdr.raw - network_hdr.raw) >> 1; + l4_prot = network_hdr.ipv4->protocol; + break; + case 6: + vlan_macip_lens |= + (transport_hdr.raw - network_hdr.raw) >> 1; + exthdr = network_hdr.raw + sizeof(struct ipv6hdr); + l4_prot = network_hdr.ipv6->nexthdr; + if (transport_hdr.raw != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_prot, &frag_off); + break; + default: + break; + } + + switch (l4_prot) { + case IPPROTO_TCP: + mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << + TXGBE_TXD_L4LEN_SHIFT; + break; + case IPPROTO_SCTP: + mss_l4len_idx = sizeof(struct sctphdr) << + TXGBE_TXD_L4LEN_SHIFT; + break; + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + TXGBE_TXD_L4LEN_SHIFT; + break; + default: + skb_checksum_help(skb); + goto csum_failed; + } + + /* update TX checksum flag */ + first->tx_flags |= TXGBE_TX_FLAGS_CSUM; + } + first->tx_flags |= TXGBE_TX_FLAGS_CC; + /* vlan_macip_lens: MACLEN, VLAN tag */ + vlan_macip_lens |= first->tx_flags & TXGBE_TX_FLAGS_VLAN_MASK; + + type_tucmd = dptype.ptype << 24; + + if (skb->vlan_proto == htons(ETH_P_8021AD)) + type_tucmd |= TXGBE_SET_FLAG(first->tx_flags, + TXGBE_TX_FLAGS_HW_VLAN, + 0x1 << TXGBE_TXD_TAG_TPID_SEL_SHIFT); + + txgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, + type_tucmd, mss_l4len_idx); +} + +u32 txgbe_tx_cmd_type(u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = TXGBE_TXD_DTYP_DATA | + TXGBE_TXD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= TXGBE_SET_FLAG(tx_flags, TXGBE_TX_FLAGS_HW_VLAN, + TXGBE_TXD_VLE); + + /* set segmentation enable bits for TSO/FSO */ + cmd_type |= TXGBE_SET_FLAG(tx_flags, TXGBE_TX_FLAGS_TSO, + TXGBE_TXD_TSE); + + /* set timestamp bit if present */ + cmd_type |= TXGBE_SET_FLAG(tx_flags, TXGBE_TX_FLAGS_TSTAMP, + TXGBE_TXD_MAC_TSTAMP); + + cmd_type |= TXGBE_SET_FLAG(tx_flags, TXGBE_TX_FLAGS_LINKSEC, + TXGBE_TXD_LINKSEC); + + return cmd_type; +} + +static void txgbe_tx_olinfo_status(union txgbe_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << TXGBE_TXD_PAYLEN_SHIFT; + + /* enable L4 checksum for TSO and TX checksum offload */ + olinfo_status |= TXGBE_SET_FLAG(tx_flags, + TXGBE_TX_FLAGS_CSUM, + TXGBE_TXD_L4CS); + + /* enable IPv4 checksum for TSO */ + olinfo_status |= TXGBE_SET_FLAG(tx_flags, + TXGBE_TX_FLAGS_IPV4, + TXGBE_TXD_IIPCS); + /* enable outer IPv4 checksum for TSO */ + olinfo_status |= TXGBE_SET_FLAG(tx_flags, + TXGBE_TX_FLAGS_OUTER_IPV4, + TXGBE_TXD_EIPCS); + /* Check Context must be set if Tx switch is enabled, which it + * always is for case where virtual functions are running + */ + olinfo_status |= TXGBE_SET_FLAG(tx_flags, + TXGBE_TX_FLAGS_CC, + TXGBE_TXD_CC); + + olinfo_status |= TXGBE_SET_FLAG(tx_flags, + TXGBE_TX_FLAGS_IPSEC, + TXGBE_TXD_IPSEC); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int __txgbe_maybe_stop_tx(struct txgbe_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(txgbe_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +static inline int txgbe_maybe_stop_tx(struct txgbe_ring *tx_ring, u16 size) +{ + if (likely(txgbe_desc_unused(tx_ring) >= size)) + return 0; + + return __txgbe_maybe_stop_tx(tx_ring, size); +} + +static int txgbe_tx_map(struct txgbe_ring *tx_ring, + struct txgbe_tx_buffer *first, + const u8 hdr_len) +{ + struct txgbe_adapter *adapter = netdev_priv(tx_ring->netdev); + struct sk_buff *skb = first->skb; + struct txgbe_tx_buffer *tx_buffer; + union txgbe_tx_desc *tx_desc; + skb_frag_t *frag; + dma_addr_t dma; + unsigned int data_len, size; + u32 tx_flags = first->tx_flags; + u32 cmd_type = txgbe_tx_cmd_type(tx_flags); + u16 i = tx_ring->next_to_use; + + tx_desc = TXGBE_TX_DESC(tx_ring, i); + + txgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + +#if IS_ENABLED(CONFIG_FCOE) + if (tx_flags & TXGBE_TX_FLAGS_FCOE) { + if (data_len < sizeof(struct fcoe_crc_eof)) { + size -= sizeof(struct fcoe_crc_eof) - data_len; + data_len = 0; + } else { + data_len -= sizeof(struct fcoe_crc_eof); + } + } +#endif /* CONFIG_FCOE */ + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + tx_buffer->va = skb->data; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) { + tx_buffer->va = NULL; + goto dma_error; + } + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > TXGBE_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ TXGBE_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = TXGBE_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += TXGBE_MAX_DATA_PER_TXD; + size -= TXGBE_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = TXGBE_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + +#if IS_ENABLED(CONFIG_FCOE) + size = min_t(unsigned int, data_len, skb_frag_size(frag)); +#else + size = skb_frag_size(frag); +#endif + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_buffer->va = skb_frag_address_safe(frag); + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | TXGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + /* set next_eop for amlite tx head wb*/ + first->next_eop = i; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + txgbe_maybe_stop_tx(tx_ring, adapter->desc_reserved + DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more() || + (txgbe_desc_unused(tx_ring) <= (tx_ring->count >> 1))) + writel(i, tx_ring->tail); + + return 0; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + tx_buffer->va = NULL; + if (tx_buffer == first) + break; + if (i == 0) + i += tx_ring->count; + i--; + } + + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + tx_ring->next_to_use = i; + + return -1; +} + +static void txgbe_atr(struct txgbe_ring *ring, + struct txgbe_tx_buffer *first, + struct txgbe_dec_ptype dptype) +{ + struct txgbe_q_vector *q_vector = ring->q_vector; + union txgbe_atr_hash_dword input = { .dword = 0 }; + union txgbe_atr_hash_dword common = { .dword = 0 }; + union network_header hdr; + struct tcphdr *th; + + /* if ring doesn't have a interrupt vector, cannot perform ATR */ + if (!q_vector) + return; + + /* do nothing if sampling is disabled */ + if (!ring->atr_sample_rate) + return; + + ring->atr_count++; + + if (dptype.etype) { + if (TXGBE_PTYPE_TYPL4(dptype.ptype) != TXGBE_PTYPE_TYP_TCP) + return; + hdr.raw = (void *)skb_inner_network_header(first->skb); + th = inner_tcp_hdr(first->skb); + } else { + if (TXGBE_PTYPE_PKT(dptype.ptype) != TXGBE_PTYPE_PKT_IP || + TXGBE_PTYPE_TYPL4(dptype.ptype) != TXGBE_PTYPE_TYP_TCP) + return; + hdr.raw = (void *)skb_network_header(first->skb); + th = tcp_hdr(first->skb); + } + + /* skip this packet since it is invalid or the socket is closing */ + if (!th || th->fin) + return; + + /* sample on all syn packets or once every atr sample count */ + if (!th->syn && ring->atr_count < ring->atr_sample_rate) + return; + + /* reset sample count */ + ring->atr_count = 0; + + /* src and dst are inverted, think how the receiver sees them + * + * The input is broken into two sections, a non-compressed section + * containing vm_pool, vlan_id, and flow_type. The rest of the data + * is XORed together and stored in the compressed dword. + */ + input.formatted.vlan_id = htons((u16)dptype.ptype); + + /* since src port and flex bytes occupy the same word XOR them together + * and write the value to source port portion of compressed dword + */ + if (first->tx_flags & TXGBE_TX_FLAGS_SW_VLAN) + common.port.src ^= th->dest ^ first->skb->protocol; + else if (first->tx_flags & TXGBE_TX_FLAGS_HW_VLAN) + common.port.src ^= th->dest ^ first->skb->vlan_proto; + else + common.port.src ^= th->dest ^ first->protocol; + common.port.dst ^= th->source; + + if (TXGBE_PTYPE_PKT_IPV6 & TXGBE_PTYPE_PKT(dptype.ptype)) { + input.formatted.flow_type = TXGBE_ATR_FLOW_TYPE_TCPV6; + common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ + hdr.ipv6->saddr.s6_addr32[1] ^ + hdr.ipv6->saddr.s6_addr32[2] ^ + hdr.ipv6->saddr.s6_addr32[3] ^ + hdr.ipv6->daddr.s6_addr32[0] ^ + hdr.ipv6->daddr.s6_addr32[1] ^ + hdr.ipv6->daddr.s6_addr32[2] ^ + hdr.ipv6->daddr.s6_addr32[3]; + } else { + input.formatted.flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; + common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; + } + + /* This assumes the Rx queue and Tx queue are bound to the same CPU */ + txgbe_fdir_add_signature_filter(&q_vector->adapter->hw, + input, common, ring->queue_index); +} + +static int txgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) +{ + int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct txgbe_adapter *adapter = netdev_priv(dev); + struct bpf_prog *old_prog; + bool need_reset; + + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + return -EINVAL; + + if (adapter->flags & TXGBE_FLAG_DCB_ENABLED) + return -EINVAL; + + if (adapter->xdp_prog && prog) { + e_dev_err("XDP can't be active at the same time"); + return -EBUSY; + } + + /* verify txgbe ring attributes are sufficient for XDP */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct txgbe_ring *ring = adapter->rx_ring[i]; + + if (frame_size > txgbe_rx_bufsz(ring)) + return -EINVAL; + } + old_prog = adapter->xdp_prog; + need_reset = (!!prog != !!old_prog); + + if (need_reset) { + if (netif_running(dev)) + txgbe_close(dev); + else + txgbe_reset(adapter); + + if (nr_cpu_ids > MAX_XDP_QUEUES) + static_branch_inc(&txgbe_xdp_locking_key); + } + + old_prog = xchg(&adapter->xdp_prog, prog); + + /* If transitioning XDP modes reconfigure rings */ + if (need_reset) { + if (!adapter->xdp_prog && adapter->old_rss_limit) { + adapter->ring_feature[RING_F_FDIR].limit = adapter->old_rss_limit; + adapter->ring_feature[RING_F_RSS].limit = adapter->old_rss_limit; + } + + if (!prog) { + if ((adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) && + adapter->lro_before_xdp) { + adapter->flags2 |= TXGBE_FLAG2_RSC_ENABLED; + dev->features |= NETIF_F_LRO; + } + } else { + adapter->lro_before_xdp = !!(adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED); + if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) { + e_dev_err("XDP not support LRO"); + dev->features &= ~NETIF_F_LRO; + adapter->flags2 &= ~TXGBE_FLAG2_RSC_ENABLED; + } + } + + if (adapter->xdp_prog) { + if (adapter->num_rx_queues > TXGBE_MAX_XDP_RSS_INDICES) { + adapter->old_rss_limit = adapter->ring_feature[RING_F_RSS].limit; + adapter->ring_feature[RING_F_FDIR].limit = TXGBE_MAX_XDP_RSS_INDICES; + adapter->ring_feature[RING_F_RSS].limit = TXGBE_MAX_XDP_RSS_INDICES; + e_dev_info("limit tx rx ring to 32.\n"); + } else { + adapter->old_rss_limit = 0; + } + } + + txgbe_clear_interrupt_scheme(adapter); + + txgbe_init_interrupt_scheme(adapter); + if (netif_running(dev)) + txgbe_open(dev); + + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); + } + if (old_prog) + bpf_prog_put(old_prog); + + /* Kick start the NAPI context if there is an AF_XDP socket open + * on that queue id. This so that receiving will start. + */ + if (need_reset && prog) { + for (i = 0; i < adapter->num_rx_queues; i++) { + if (adapter->xdp_ring[i]->xsk_pool) { + (void)txgbe_xsk_wakeup(adapter->netdev, i, + XDP_WAKEUP_RX); + } + } + } + + if (adapter->xdp_prog) + e_dev_info("xdp program is setup"); + else + e_dev_info("xdp program not load"); + + return 0; +} + +static int txgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return txgbe_xdp_setup(dev, xdp->prog); + case XDP_SETUP_XSK_POOL: + return txgbe_xsk_umem_setup(adapter, xdp->xsk.pool, + xdp->xsk.queue_id); + default: + return -EINVAL; + } +} + +static int txgbe_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_ring *ring; + int drops = 0; + int nxmit = 0; + int i; + + if (unlikely(test_bit(__TXGBE_DOWN, &adapter->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + /* During program transitions its possible adapter->xdp_prog is assigned + * but ring has not been configured yet. In this case simply abort xmit. + */ + ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id() % + adapter->num_xdp_queues] : NULL; + if (unlikely(!ring)) + return -ENXIO; + + if (unlikely(test_bit(__TXGBE_TX_DISABLED, &ring->state))) + return -ENXIO; + + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; + + err = txgbe_xmit_xdp_ring(ring, xdpf); + if (err != TXGBE_XDP_TX) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + nxmit++; + } + + if (unlikely(flags & XDP_XMIT_FLUSH)) { + /* update hw tx pointer */ + wmb(); + writel(ring->next_to_use, ring->tail); + } + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + + return n - drops; +} + +static u16 txgbe_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + int queue; +#if IS_ENABLED(CONFIG_FCOE) + struct txgbe_ring_feature *f; + int txq; +#endif + + if (adapter->vlan_rate_link_speed) { + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED || + adapter->flags & TXGBE_FLAG_FCOE_ENABLED) +#if IS_ENABLED(CONFIG_FCOE) + goto fcoe; +#else + goto skip_select; +#endif + if (skb_vlan_tag_present(skb)) { + u16 vlan_id = skb_vlan_tag_get_id(skb); + + if (test_bit(vlan_id, adapter->limited_vlans)) { + int r_idx = adapter->num_tx_queues - 1 - + txgbe_find_nth_limited_vlan(adapter, vlan_id); + return r_idx; + } + } + } +#if IS_ENABLED(CONFIG_FCOE) +fcoe: + /* only execute the code below if protocol is FCoE + * or FIP and we have FCoE enabled on the adapter + */ + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_FCOE): + case htons(ETH_P_FIP): + adapter = netdev_priv(dev); + + if (adapter->flags & TXGBE_FLAG_FCOE_ENABLED) + break; + fallthrough; + default: + goto skip_select; + } + + f = &adapter->ring_feature[RING_F_FCOE]; + + txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : + smp_processor_id(); + + while (txq >= f->indices) + txq -= f->indices; + + return txq + f->offset; +#endif/*FCOE*/ +skip_select: + queue = netdev_pick_tx(dev, skb, sb_dev); + + if (adapter->vlan_rate_link_speed) + queue = queue % (adapter->num_tx_queues - + adapter->active_vlan_limited); + + return queue; +} + +/** + * skb_pad - zero pad the tail of an skb + * @skb: buffer to pad + * @pad: space to pad + * + * Ensure that a buffer is followed by a padding area that is zero + * filled. Used by network drivers which may DMA or transfer data + * beyond the buffer end onto the wire. + * + * May return error in out of memory cases. The skb is freed on error. + */ + +static int txgbe_skb_pad_nonzero(struct sk_buff *skb, int pad) +{ + int err; + int ntail; + + /* If the skbuff is non linear tailroom is always zero.. */ + if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { + memset(skb->data + skb->len, 0x1, pad); + return 0; + } + + ntail = skb->data_len + pad - (skb->end - skb->tail); + if (likely(skb_cloned(skb) || ntail > 0)) { + err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); + if (unlikely(err)) + goto free_skb; + } + + /* FIXME: The use of this function with non-linear skb's really needs + * to be audited. + */ + err = skb_linearize(skb); + if (unlikely(err)) + goto free_skb; + + memset(skb->data + skb->len, 0x1, pad); + return 0; + +free_skb: + kfree_skb(skb); + return err; +} + +netdev_tx_t txgbe_xmit_frame_ring(struct sk_buff *skb, + struct txgbe_adapter __maybe_unused *adapter, + struct txgbe_ring *tx_ring) +{ + struct txgbe_tx_buffer *first; + int tso; + u32 tx_flags = 0; + unsigned short f; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = skb->protocol; + u8 hdr_len = 0; + struct txgbe_dec_ptype dptype; + u8 vlan_addlen = 0; + + /* work around hw errata 3 */ + u16 _llclen, *llclen; + + llclen = skb_header_pointer(skb, ETH_HLEN - 2, sizeof(u16), &_llclen); + if (*llclen == 0x3 || *llclen == 0x4 || *llclen == 0x5) { + if (txgbe_skb_pad_nonzero(skb, ETH_ZLEN - skb->len)) + return -ENOMEM; + __skb_put(skb, ETH_ZLEN - skb->len); + } + + /* need: 1 descriptor per page * PAGE_SIZE/TXGBE_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/TXGBE_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> + frags[f])); + + if (txgbe_maybe_stop_tx(tx_ring, count + adapter->desc_reserved + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + /* if we have a HW VLAN tag being added default to the HW one */ + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb) << TXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= TXGBE_TX_FLAGS_HW_VLAN; + /* else if it is a SW VLAN check the next protocol and store the tag */ + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + goto out_drop; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= ntohs(vhdr->h_vlan_TCI) << + TXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= TXGBE_TX_FLAGS_SW_VLAN; + vlan_addlen += VLAN_HLEN; + } + + if (protocol == htons(ETH_P_8021Q) || + protocol == htons(ETH_P_8021AD)) { + tx_flags |= TXGBE_TX_FLAGS_SW_VLAN; + vlan_addlen += VLAN_HLEN; + } + + protocol = vlan_get_protocol(skb); + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + adapter->ptp_clock) { + if (!test_and_set_bit_lock(__TXGBE_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + tx_flags |= TXGBE_TX_FLAGS_TSTAMP; + + /* schedule check for Tx timestamp */ + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + schedule_work(&adapter->ptp_tx_work); + } else { + adapter->tx_hwtstamp_skipped++; + } + } + +#ifdef CONFIG_PCI_IOV + /* Use the l2switch_enable flag - would be false if the DMA + * Tx switch had been disabled. + */ + if (adapter->flags & TXGBE_FLAG_SRIOV_L2SWITCH_ENABLE) + tx_flags |= TXGBE_TX_FLAGS_CC; + +#endif + + if ((adapter->flags & TXGBE_FLAG_DCB_ENABLED) && + ((tx_flags & (TXGBE_TX_FLAGS_HW_VLAN | TXGBE_TX_FLAGS_SW_VLAN)) || + skb->priority != TC_PRIO_CONTROL)) { + tx_flags &= ~TXGBE_TX_FLAGS_VLAN_PRIO_MASK; +#if IS_ENABLED(CONFIG_FCOE) + /* for FCoE with DCB, we force the priority to what + * was specified by the switch + */ + if ((adapter->flags & TXGBE_FLAG_FCOE_ENABLED) && + (protocol == htons(ETH_P_FCOE) || + protocol == htons(ETH_P_FIP))) + tx_flags |= adapter->fcoe.up << + TXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; + else +#endif /* CONFIG_FCOE */ + tx_flags |= skb->priority << + TXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; + if (tx_flags & TXGBE_TX_FLAGS_SW_VLAN) { + struct vlan_ethhdr *vhdr; + + if (skb_header_cloned(skb) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + goto out_drop; + vhdr = (struct vlan_ethhdr *)skb->data; + vhdr->h_vlan_TCI = htons(tx_flags >> + TXGBE_TX_FLAGS_VLAN_SHIFT); + } else { + tx_flags |= TXGBE_TX_FLAGS_HW_VLAN; + } + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + dptype = encode_tx_desc_ptype(first); + +#if IS_ENABLED(CONFIG_FCOE) + /* setup tx offload for FCoE */ + if (protocol == htons(ETH_P_FCOE) && + (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { + tso = txgbe_fso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + + goto xmit_fcoe; + } else if (protocol == htons(ETH_P_FIP)) { + /* FCoE stack has a bug where it does not set the network + * header offset for FIP frames sent resulting into MACLEN + * being set to ZERO in the Tx context descriptor. + * This will cause MDD events when trying to Tx such frames. + */ + if (!skb_network_offset(skb)) { + if (tx_flags & (TXGBE_TX_FLAGS_HW_VLAN | + TXGBE_TX_FLAGS_SW_VLAN)) + skb_set_network_header(skb, + sizeof(struct ethhdr) + + sizeof(struct vlan_hdr) + + vlan_addlen); + else + skb_set_network_header(skb, + sizeof(struct ethhdr)); + } + } +#endif /* CONFIG_FCOE */ + + tso = txgbe_tso(tx_ring, first, &hdr_len, dptype); + if (tso < 0) + goto out_drop; + else if (!tso) + txgbe_tx_csum(tx_ring, first, dptype); + + /* add the ATR filter if ATR is on */ + if (test_bit(__TXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) + txgbe_atr(tx_ring, first, dptype); + +#if IS_ENABLED(CONFIG_FCOE) +xmit_fcoe: +#endif /* CONFIG_FCOE */ + if (txgbe_tx_map(tx_ring, first, hdr_len)) + goto cleanup_tx_tstamp; + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; + +cleanup_tx_tstamp: + if (unlikely(tx_flags & TXGBE_TX_FLAGS_TSTAMP)) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + cancel_work_sync(&adapter->ptp_tx_work); + clear_bit_unlock(__TXGBE_PTP_TX_IN_PROGRESS, &adapter->state); + } + + return NETDEV_TX_OK; +} + +static netdev_tx_t txgbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_ring *tx_ring; + unsigned int r_idx = skb->queue_mapping; + + if (!netif_carrier_ok(netdev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* The minimum packet size for olinfo paylen is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + + if (!adapter->num_tx_queues) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + tx_ring = adapter->tx_ring[r_idx]; + + if (unlikely(test_bit(__TXGBE_TX_DISABLED, &tx_ring->state))) + return NETDEV_TX_BUSY; + + if (!tx_ring->tx_buffer_info) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + return txgbe_xmit_frame_ring(skb, adapter, tx_ring); +} + +/** + * txgbe_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int txgbe_set_mac(struct net_device *netdev, void *p) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr((u8 *)addr->sa_data)) + return -EADDRNOTAVAIL; + + txgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + txgbe_mac_set_default_filter(adapter, hw->mac.addr); + e_info(drv, "The mac has been set to %02X:%02X:%02X:%02X:%02X:%02X\n", + hw->mac.addr[0], hw->mac.addr[1], hw->mac.addr[2], + hw->mac.addr[3], hw->mac.addr[4], hw->mac.addr[5]); + + return 0; +} + +static int txgbe_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&ifr->ifr_data; + int prtad, devad, ret; + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u16 value = 0; + + prtad = (mii->phy_id & MDIO_PHY_ID_PRTAD) >> 5; + devad = (mii->phy_id & MDIO_PHY_ID_DEVAD); + + if (cmd == SIOCGMIIREG) { + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) { + ret = txgbe_read_mdio(&hw->phy_dev, prtad, devad, mii->reg_num, + &value); + if (ret < 0) + return ret; + mii->val_out = value; + return 0; + } + goto out; + } else { + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) { + return txgbe_write_mdio(&hw->phy_dev, prtad, devad, + mii->reg_num, mii->val_in); + } + goto out; + } + +out: + return -EOPNOTSUPP; +} + +int txgbe_find_nth_limited_vlan(struct txgbe_adapter *adapter, int vlan) +{ + return bitmap_weight(adapter->limited_vlans, vlan + 1) - 1; +} + +void txgbe_del_vlan_limit(struct txgbe_adapter *adapter, int vlan) +{ + int new_queue_rate_limit[64]; + int idx = 0; + int i = 0, j = 0; + + if (!test_bit(vlan, adapter->limited_vlans)) + return; + + idx = txgbe_find_nth_limited_vlan(adapter, vlan); + for (; i < bitmap_weight(adapter->limited_vlans, 4096); i++) { + if (i != idx) + new_queue_rate_limit[j++] = adapter->queue_rate_limit[i]; + } + + memcpy(adapter->queue_rate_limit, new_queue_rate_limit, sizeof(int) * 64); + clear_bit(vlan, adapter->limited_vlans); +} + +void txgbe_set_vlan_limit(struct txgbe_adapter *adapter, int vlan, int rate_limit) +{ + int new_queue_rate_limit[64]; + int idx = 0; + int i = 0, j = 0; + + if (test_and_set_bit(vlan, adapter->limited_vlans)) { + idx = txgbe_find_nth_limited_vlan(adapter, vlan); + adapter->queue_rate_limit[idx] = rate_limit; + return; + } + + idx = txgbe_find_nth_limited_vlan(adapter, vlan); + for (; j < bitmap_weight(adapter->limited_vlans, 4096); j++) { + if (j == idx) + new_queue_rate_limit[j] = rate_limit; + else + new_queue_rate_limit[j] = adapter->queue_rate_limit[i++]; + } + + memcpy(adapter->queue_rate_limit, new_queue_rate_limit, sizeof(int) * 64); +} + +void txgbe_check_vlan_rate_limit(struct txgbe_adapter *adapter) +{ + int i; + + if (!adapter->vlan_rate_link_speed) + return; + + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED || + adapter->flags & TXGBE_FLAG_FCOE_ENABLED) { + e_dev_info("Can't limit vlan rate when enable SRIOV or FCOE"); + goto resume_rate; + } + + if (txgbe_link_mbps(adapter) != adapter->vlan_rate_link_speed) { + dev_info(pci_dev_to_dev(adapter->pdev), + "Link speed has been changed. vlan Transmit rate is disabled\n"); + goto resume_rate; + } + + if (adapter->active_vlan_limited > adapter->num_tx_queues) { + e_dev_err("limited vlan bigger than num of tx ring, disabled vlan limit\n"); + goto resume_rate; + } + + for (i = 0; i < adapter->active_vlan_limited; i++) { + txgbe_set_queue_rate_limit(&adapter->hw, + (adapter->num_tx_queues - i - 1), adapter->queue_rate_limit[i]); + } + return; +resume_rate: + e_dev_info("clear all vlan limit"); + bitmap_zero(adapter->limited_vlans, 4096); + adapter->active_vlan_limited = bitmap_weight(adapter->limited_vlans, 4096); + adapter->vlan_rate_link_speed = 0; + memset(adapter->queue_rate_limit, 0, sizeof(int) * 64); + adapter->vlan_rate_link_speed = 0; + for (i = 0; i < adapter->num_tx_queues; i++) + txgbe_set_queue_rate_limit(&adapter->hw, i, 0); +} + +struct vlan_rate_param { + int count; + unsigned short vlans[64]; + unsigned int rates[64]; +}; + +#define SIOCSVLANRATE (SIOCDEVPRIVATE + 0xe) +#define SIOCGVLANRATE (SIOCDEVPRIVATE + 0xf) + +static int txgbe_vlan_rate_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct vlan_rate_param param; + int i; + int link_speed; + int set_num = 0; + + if (cmd != SIOCSVLANRATE) + return -EOPNOTSUPP; + + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED || + adapter->flags & TXGBE_FLAG_FCOE_ENABLED){ + e_dev_err("Not support vlan limit when enable SRIOV of FCOE"); + return -EINVAL; + } + + if (!netif_carrier_ok(netdev) || + adapter->link_speed < TXGBE_LINK_SPEED_1GB_FULL) { + e_dev_err("please set vlan rate limit when link up, speed 1G not support"); + return -EINVAL; + } + + link_speed = txgbe_link_mbps(adapter); + + if (copy_from_user(¶m, ifr->ifr_data, sizeof(param))) + return -EFAULT; + + if (param.count == 0) { + e_dev_info("clear all vlan limit"); + bitmap_zero(adapter->limited_vlans, 4096); + adapter->vlan_rate_link_speed = 0; + memset(adapter->queue_rate_limit, 0, sizeof(int) * 64); + goto after_set; + } + + for (i = 0; i < param.count; i++) { + if (param.vlans[i] > 4095 || + (param.rates[i] != 0 && param.rates[i] <= 10) || + param.rates[i] > link_speed) { + e_dev_err("Invalid param: VLAN_ID(0~4095): %d, rate(0,11~linkspeed):%d\n", + param.vlans[i], param.rates[i]); + return -EINVAL; + } + } + + for (i = 0; i < param.count; i++) + if (param.rates[i]) + set_num++; + else + if (test_bit(param.vlans[i], adapter->limited_vlans) && + param.rates[i] == 0) + set_num--; + + if (param.count <= 0 || param.count > 64 || + (set_num + adapter->active_vlan_limited > adapter->num_tx_queues - 1)) + return -EINVAL; + + adapter->vlan_rate_link_speed = link_speed; + for (i = 0; i < param.count; i++) + if (param.rates[i]) + txgbe_set_vlan_limit(adapter, param.vlans[i], param.rates[i]); + else + txgbe_del_vlan_limit(adapter, param.vlans[i]); +after_set: + /*clear all rate limit*/ + for (i = 0; i < adapter->num_tx_queues; i++) + txgbe_set_queue_rate_limit(&adapter->hw, i, 0); + + adapter->active_vlan_limited = bitmap_weight(adapter->limited_vlans, 4096); + + for (i = 0; i < adapter->active_vlan_limited; i++) { + txgbe_set_queue_rate_limit(&adapter->hw, + adapter->num_tx_queues - i - 1, adapter->queue_rate_limit[i]); + } + return 0; +} + +static int txgbe_get_vlan_rate_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct vlan_rate_param param; + int i = 0, n = 0; + + if (cmd != SIOCGVLANRATE) + return -EOPNOTSUPP; + + for_each_set_bit(i, adapter->limited_vlans, 4096) { + param.vlans[n] = i; + param.rates[n] = adapter->queue_rate_limit[n]; + n++; + } + param.count = n; + + if (copy_to_user(ifr->ifr_data, ¶m, sizeof(param))) + return -EFAULT; + + return 0; +} + +static int txgbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + switch (cmd) { + case SIOCGHWTSTAMP: + return txgbe_ptp_get_ts_config(adapter, ifr); + case SIOCSHWTSTAMP: + return txgbe_ptp_set_ts_config(adapter, ifr); + + case SIOCGMIIREG: + case SIOCSMIIREG: + return txgbe_mii_ioctl(netdev, ifr, cmd); + case SIOCSVLANRATE: + return txgbe_vlan_rate_ioctl(netdev, ifr, cmd); + case SIOCGVLANRATE: + return txgbe_get_vlan_rate_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +static int txgbe_siocdevprivate(struct net_device *netdev, struct ifreq *ifr, + void __user *data, int cmd) +{ + return txgbe_ioctl(netdev, ifr, cmd); +} + +/* txgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. + * @adapter: pointer to txgbe_adapter + * @tc: number of traffic classes currently enabled + * + * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm + * 802.1Q priority maps to a packet buffer that exists. + */ +static void txgbe_validate_rtr(struct txgbe_adapter *adapter, u8 tc) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 reg, rsave; + int i; + + reg = rd32(hw, TXGBE_RDB_UP2TC); + rsave = reg; + + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + u8 up2tc = 0xF & (reg >> (i * TXGBE_RDB_UP2TC_UP_SHIFT)); + + /* If up2tc is out of bounds default to zero */ + if (up2tc > tc) + reg &= ~(0xF << (i * TXGBE_RDB_UP2TC_UP_SHIFT)); + } + + if (reg != rsave) + wr32(hw, TXGBE_RDB_UP2TC, reg); +} + +/** + * txgbe_set_prio_tc_map - Configure netdev prio tc map + * @adapter: Pointer to adapter struct + * + * Populate the netdev user priority to tc map + */ +static void txgbe_set_prio_tc_map(struct txgbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct txgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; + struct ieee_ets *ets = adapter->txgbe_ieee_ets; + u8 prio; + + for (prio = 0; prio < TXGBE_DCB_MAX_USER_PRIORITY; prio++) { + u8 tc = 0; + + if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) + tc = txgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio); + else if (ets) + tc = ets->prio_tc[prio]; + + netdev_set_prio_tc_map(dev, prio, tc); + } +} + +/** + * txgbe_setup_tc - routine to configure net_device for multiple traffic + * classes. + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + */ +int txgbe_setup_tc(struct net_device *dev, u8 tc) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + + /* Hardware supports up to 8 traffic classes */ + if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) + return -EINVAL; + + if (tc && adapter->num_vmdqs > TXGBE_MAX_DCBMACVLANS) + return -EBUSY; + + if (adapter->xdp_prog) { + if (adapter->num_rx_queues > TXGBE_MAX_XDP_RSS_INDICES) { + adapter->old_rss_limit = adapter->ring_feature[RING_F_RSS].limit; + adapter->ring_feature[RING_F_FDIR].limit = TXGBE_MAX_XDP_RSS_INDICES; + adapter->ring_feature[RING_F_RSS].limit = TXGBE_MAX_XDP_RSS_INDICES; + e_dev_info("limit tx rx ring to 32\n"); + } else { + adapter->old_rss_limit = 0; + } + } + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + if (netif_running(dev)) + txgbe_close(dev); + else + txgbe_reset(adapter); + + txgbe_clear_interrupt_scheme(adapter); + + if (tc) { + netdev_set_num_tc(dev, tc); + txgbe_set_prio_tc_map(adapter); + + adapter->flags |= TXGBE_FLAG_DCB_ENABLED; + + } else { + netdev_reset_tc(dev); + + adapter->flags &= ~TXGBE_FLAG_DCB_ENABLED; + + adapter->temp_dcb_cfg.pfc_mode_enable = false; + adapter->dcb_cfg.pfc_mode_enable = false; + } + + txgbe_validate_rtr(adapter, tc); + + txgbe_init_interrupt_scheme(adapter); + if (netif_running(dev)) + txgbe_open(dev); + return 0; +} + +static int txgbe_setup_tc_mqprio(struct net_device *dev, + struct tc_mqprio_qopt *mqprio) +{ + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + return txgbe_setup_tc(dev, mqprio->num_tc); +} + +static int +__txgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + return txgbe_setup_tc_mqprio(dev, type_data); + default: + return -EOPNOTSUPP; + } +} + +#ifdef CONFIG_PCI_IOV +void txgbe_sriov_reinit(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + txgbe_setup_tc(netdev, netdev_get_num_tc(netdev)); + rtnl_unlock(); +} +#endif + +void txgbe_do_reset(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + txgbe_reinit_locked(adapter); + else + txgbe_reset(adapter); +} + +static netdev_features_t txgbe_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); +#if IS_ENABLED(CONFIG_DCB) + if (adapter->flags & TXGBE_FLAG_DCB_ENABLED) + features |= NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX; +#endif /* CONFIG_DCB */ + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + if (adapter->xdp_prog) { + adapter->lro_before_xdp = !!(features & NETIF_F_LRO); + if (features & NETIF_F_LRO) { + e_dev_err("LRO is not supported with XDP\n"); + features &= ~NETIF_F_LRO; + } + } + + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + features |= NETIF_F_HW_VLAN_STAG_FILTER; + else + features &= ~NETIF_F_HW_VLAN_STAG_FILTER; + + if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) + features &= ~NETIF_F_HW_VLAN_STAG_RX; + else + features |= NETIF_F_HW_VLAN_STAG_RX; + if (!(features & NETIF_F_HW_VLAN_CTAG_TX)) + features &= ~NETIF_F_HW_VLAN_STAG_TX; + else + features |= NETIF_F_HW_VLAN_STAG_TX; + + return features; +} + +static int txgbe_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + bool need_reset = false; + netdev_features_t changed = netdev->features ^ features; + + /* Make sure RSC matches LRO, reset if change */ + if (!(features & NETIF_F_LRO)) { + if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) + need_reset = true; + adapter->flags2 &= ~TXGBE_FLAG2_RSC_ENABLED; + } else if ((adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) && + !(adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED)) { + if (adapter->rx_itr_setting == 1 || + adapter->rx_itr_setting > TXGBE_MIN_RSC_ITR) { + adapter->flags2 |= TXGBE_FLAG2_RSC_ENABLED; + need_reset = true; + } else if ((netdev->features ^ features) & NETIF_F_LRO) { + e_info(probe, "rx-usecs set too low, falling back to software LRO\n"); + } + } + + /* Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. + */ + switch (features & NETIF_F_NTUPLE) { + case NETIF_F_NTUPLE: + /* turn off ATR, enable perfect filters and reset */ + if (!(adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + need_reset = true; + + adapter->flags &= ~TXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->flags |= TXGBE_FLAG_FDIR_PERFECT_CAPABLE; + break; + default: + /* turn off perfect filters, enable ATR and reset */ + if (adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE) + need_reset = true; + + adapter->flags &= ~TXGBE_FLAG_FDIR_PERFECT_CAPABLE; + + /* We cannot enable ATR if VMDq is enabled */ + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) + break; + + /* We cannot enable ATR if we have 2 or more traffic classes */ + if (netdev_get_num_tc(netdev) > 1) + break; + + /* We cannot enable ATR if RSS is disabled */ + if (adapter->ring_feature[RING_F_RSS].limit <= 1) + break; + + /* A sample rate of 0 indicates ATR disabled */ + if (!adapter->atr_sample_rate) + break; + + adapter->flags |= TXGBE_FLAG_FDIR_HASH_CAPABLE; + break; + } + + if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && + features & NETIF_F_RXCSUM)) + txgbe_clear_vxlan_port(adapter); + + if (features & NETIF_F_RXHASH) { + if (!(adapter->flags2 & TXGBE_FLAG2_RSS_ENABLED)) { + wr32m(&adapter->hw, TXGBE_RDB_RA_CTL, + TXGBE_RDB_RA_CTL_RSS_EN, TXGBE_RDB_RA_CTL_RSS_EN); + adapter->flags2 |= TXGBE_FLAG2_RSS_ENABLED; + } + } else { + if (adapter->flags2 & TXGBE_FLAG2_RSS_ENABLED) { + wr32m(&adapter->hw, TXGBE_RDB_RA_CTL, + TXGBE_RDB_RA_CTL_RSS_EN, ~TXGBE_RDB_RA_CTL_RSS_EN); + adapter->flags2 &= ~TXGBE_FLAG2_RSS_ENABLED; + } + } + + netdev->features = features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + need_reset = true; + + if (need_reset) + txgbe_do_reset(netdev); + + else if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) + txgbe_set_rx_mode(netdev); + + return 0; +} + +/** + * txgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports + * @dev: The port's netdev + * @ti: Tunnel endpoint information + **/ +static void txgbe_add_udp_tunnel_port(struct net_device *dev, + struct udp_tunnel_info *ti) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_hw *hw = &adapter->hw; + u16 port = ntohs(ti->port); + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port == port) + return; + + if (adapter->vxlan_port) { + netdev_info(dev, + "VXLAN port %d set, not adding port %d\n", + ntohs(adapter->vxlan_port), + ntohs(port)); + return; + } + + adapter->vxlan_port = port; + wr32(hw, TXGBE_CFG_VXLAN, port); + break; + case UDP_TUNNEL_TYPE_GENEVE: + // if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + // return; + + if (adapter->geneve_port == port) + return; + + if (adapter->geneve_port) { + netdev_info(dev, + "GENEVE port %d set, not adding port %d\n", + ntohs(adapter->geneve_port), + ntohs(port)); + return; + } + + adapter->geneve_port = port; + wr32(hw, TXGBE_CFG_GENEVE, port); + break; + default: + return; + } +} + +/** + * txgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports + * @dev: The port's netdev + * @ti: Tunnel endpoint information + **/ +static void txgbe_del_udp_tunnel_port(struct net_device *dev, + struct udp_tunnel_info *ti) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + + if (ti->type != UDP_TUNNEL_TYPE_VXLAN && + ti->type != UDP_TUNNEL_TYPE_GENEVE) + return; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port != ntohs(ti->port)) { + netdev_info(dev, "VXLAN port %d not found\n", + ntohs(ti->port)); + return; + } + + txgbe_clear_vxlan_port(adapter); + adapter->flags2 |= TXGBE_FLAG2_VXLAN_REREG_NEEDED; + break; + case UDP_TUNNEL_TYPE_GENEVE: +// if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) +// return; + + if (adapter->geneve_port != ntohs(ti->port)) { + netdev_info(dev, "GENEVE port %d not found\n", + ntohs(ti->port)); + return; + } + + adapter->geneve_port = 0; + break; + default: + return; + } +} + +static int txgbe_udp_tunnel_set(struct net_device *dev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti) +{ + txgbe_add_udp_tunnel_port(dev, ti); + return 0; +} + +static int txgbe_udp_tunnel_unset(struct net_device *dev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti) +{ + txgbe_del_udp_tunnel_port(dev, ti); + + return 0; +} + +static const struct udp_tunnel_nic_info txgbe_udp_tunnels = { + .set_port = txgbe_udp_tunnel_set, + .unset_port = txgbe_udp_tunnel_unset, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, + }, +}; + +static int txgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, + u16 flags, + struct netlink_ext_ack *extack) +{ + /* guarantee we can provide a unique filter for the unicast address */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + if (netdev_uc_count(dev) >= TXGBE_MAX_PF_MACVLANS) + return -ENOMEM; + } + + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); +} + +static int txgbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh, + __always_unused u16 flags, + struct netlink_ext_ack __always_unused *ext) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct nlattr *attr, *br_spec; + int rem; + + if (!(adapter->flags & TXGBE_FLAG_SRIOV_ENABLED)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if (mode == BRIDGE_MODE_VEPA) + adapter->flags |= TXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + else if (mode == BRIDGE_MODE_VEB) + adapter->flags &= ~TXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + else + return -EINVAL; + + adapter->bridge_mode = mode; + + /* re-configure settings related to bridge mode */ + txgbe_configure_bridge_mode(adapter); + + e_info(drv, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + } + + return 0; +} + +static int txgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __maybe_unused filter_mask, + int nlflags) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + u16 mode; + + if (!(adapter->flags & TXGBE_FLAG_SRIOV_ENABLED)) + return 0; + + mode = adapter->bridge_mode; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags, + filter_mask, NULL); +} + +#define TXGBE_MAX_TUNNEL_HDR_LEN 80 +static netdev_features_t +txgbe_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + u32 vlan_num = 0; + u16 vlan_depth = skb->mac_len; + __be16 type = skb->protocol; + struct vlan_hdr *vh; + + if (skb_vlan_tag_present(skb)) + vlan_num++; + + if (vlan_depth) + vlan_depth -= VLAN_HLEN; + else + vlan_depth = ETH_HLEN; + + while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { + vlan_num++; + vh = (struct vlan_hdr *)(skb->data + vlan_depth); + type = vh->h_vlan_encapsulated_proto; + vlan_depth += VLAN_HLEN; + } + + if (vlan_num > 2) + features &= ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); + + if (skb->encapsulation) { + if (unlikely(skb_inner_mac_header(skb) - + skb_transport_header(skb) > + TXGBE_MAX_TUNNEL_HDR_LEN)) + return features & ~NETIF_F_CSUM_MASK; + } + + if (skb->encapsulation) { + if (skb->inner_protocol_type == ENCAP_TYPE_ETHER && + skb->inner_protocol != htons(ETH_P_IP) && + skb->inner_protocol != htons(ETH_P_IPV6) && + skb->inner_protocol != htons(ETH_P_TEB)) + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + } + + return features; +} + +static const struct net_device_ops txgbe_netdev_ops = { + .ndo_open = txgbe_open, + .ndo_stop = txgbe_close, + .ndo_start_xmit = txgbe_xmit_frame, + .ndo_select_queue = txgbe_select_queue, + .ndo_set_rx_mode = txgbe_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = txgbe_set_mac, + .ndo_change_mtu = txgbe_change_mtu, + .ndo_tx_timeout = txgbe_tx_timeout, + .ndo_vlan_rx_add_vid = txgbe_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = txgbe_vlan_rx_kill_vid, + .ndo_eth_ioctl = txgbe_ioctl, + .ndo_siocdevprivate = txgbe_siocdevprivate, + .ndo_set_vf_mac = txgbe_ndo_set_vf_mac, + .ndo_set_vf_vlan = txgbe_ndo_set_vf_vlan, + .ndo_set_vf_rate = txgbe_ndo_set_vf_bw, + .ndo_set_vf_spoofchk = txgbe_ndo_set_vf_spoofchk, + .ndo_set_vf_link_state = txgbe_ndo_set_vf_link_state, + .ndo_set_vf_trust = txgbe_ndo_set_vf_trust, + .ndo_get_vf_config = txgbe_ndo_get_vf_config, + .ndo_get_stats64 = txgbe_get_stats64, + .ndo_setup_tc = __txgbe_setup_tc, +#if IS_ENABLED(CONFIG_FCOE) + .ndo_fcoe_ddp_setup = txgbe_fcoe_ddp_get, + .ndo_fcoe_ddp_target = txgbe_fcoe_ddp_target, + .ndo_fcoe_ddp_done = txgbe_fcoe_ddp_put, + .ndo_fcoe_enable = txgbe_fcoe_enable, + .ndo_fcoe_disable = txgbe_fcoe_disable, + .ndo_fcoe_get_wwn = txgbe_fcoe_get_wwn, +#endif + .ndo_set_features = txgbe_set_features, + .ndo_fix_features = txgbe_fix_features, + .ndo_fdb_add = txgbe_ndo_fdb_add, + .ndo_bridge_setlink = txgbe_ndo_bridge_setlink, + .ndo_bridge_getlink = txgbe_ndo_bridge_getlink, + .ndo_features_check = txgbe_features_check, + .ndo_bpf = txgbe_xdp, + .ndo_xdp_xmit = txgbe_xdp_xmit, + .ndo_xsk_wakeup = txgbe_xsk_wakeup, +}; + +void txgbe_assign_netdev_ops(struct net_device *dev) +{ + dev->netdev_ops = &txgbe_netdev_ops; + txgbe_set_ethtool_ops(dev); + dev->watchdog_timeo = 5 * HZ; +} + +/** + * txgbe_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in txgbe_pci_tbl + * + * Returns 0 on success, negative on failure + * + * txgbe_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int txgbe_probe(struct pci_dev *pdev, + const struct pci_device_id __always_unused *ent) +{ + struct net_device *netdev; + struct txgbe_adapter *adapter = NULL; + struct txgbe_hw *hw = NULL; + static int cards_found; + int err, pci_using_dac, expected_gts; + u16 offset = 0; + u16 eeprom_verh = 0, eeprom_verl = 0; + u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0; + u32 etrack_id = 0; + u16 build = 0, major = 0, patch = 0; + char *info_string, *i_s_var; + u8 part_str[TXGBE_PBANUM_LENGTH]; + unsigned int indices = MAX_TX_QUEUES; + bool disable_dev = false; + u16 pvalue = 0; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + if (!dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(pci_dev_to_dev(pdev), + DMA_BIT_MASK(32)); + if (err) { + dev_err(pci_dev_to_dev(pdev), "No usable DMA configuration, aborting\n"); + goto err_dma; + } + } + pci_using_dac = 0; + } + + err = pci_request_selected_regions(pdev, pci_select_bars(pdev, + IORESOURCE_MEM), txgbe_driver_name); + if (err) { + dev_err(pci_dev_to_dev(pdev), + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + hw = vmalloc(sizeof(*hw)); + if (hw) { + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + vfree(hw); + } else { + return -EINVAL; + } + + pci_set_master(pdev); + /* errata 16 */ + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, + 0x1000); + + netdev = alloc_etherdev_mq(sizeof(struct txgbe_adapter), indices); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, pci_dev_to_dev(pdev)); + + adapter = netdev_priv(netdev); + adapter->indices = indices; + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + adapter->io_addr = hw->hw_addr; + if (!hw->hw_addr) { + err = -EIO; + goto err_ioremap; + } + + txgbe_assign_netdev_ops(netdev); + + strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); + + adapter->bd_number = cards_found; + + /* setup the private structure */ + err = txgbe_sw_init(adapter); + if (err) + goto err_sw_init; + + netdev->udp_tunnel_nic_info = &txgbe_udp_tunnels; + + /* check_options must be called before setup_link to set up + * hw->fc completely + */ + txgbe_check_options(adapter); + txgbe_bp_mode_setting(adapter); + hw->mac.ops.set_lan_id(hw); + /* check if flash load is done after hw power up */ + err = txgbe_check_flash_load(hw, TXGBE_SPI_ILDR_STATUS_PERST); + if (err) + goto err_sw_init; + err = txgbe_check_flash_load(hw, TXGBE_SPI_ILDR_STATUS_PWRRST); + if (err) + goto err_sw_init; + /* reset_hw fills in the perm_addr as well */ + hw->phy.reset_if_overtemp = true; + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + txgbe_get_hw_control(adapter); + err = hw->mac.ops.reset_hw(hw); + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + txgbe_release_hw_control(adapter); + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + hw->phy.reset_if_overtemp = false; + if (err == TXGBE_ERR_SFP_NOT_PRESENT) { + err = 0; + } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) { + e_dev_err("An unsupported SFP+ module type was detected.\n"); + e_dev_err("Reload the driver after installing a supported module.\n"); + goto err_sw_init; + } else if (err) { + e_dev_err("HW Init failed: %d\n", err); + goto err_sw_init; + } + + if (txgbe_is_lldp(hw)) + e_dev_err("Can not get lldp flags from flash\n"); +#ifdef CONFIG_PCI_IOV + if (adapter->max_vfs > 0) { + e_dev_warn("Enabling SR-IOV VFs using the max_vfs parameter is deprecated.\n"); + e_dev_warn("Please use the pci sysfs interface instead. Ex:\n"); + e_dev_warn("echo '%d' > /sys/bus/pci/devices/%04x:%02x:%02x.%1x/sriov_numvfs\n", + adapter->max_vfs, + pci_domain_nr(pdev->bus), + pdev->bus->number, + PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn) + ); + } + + if (adapter->flags & TXGBE_FLAG_SRIOV_CAPABLE) { + pci_sriov_set_totalvfs(pdev, TXGBE_MAX_VFS_DRV_LIMIT); + txgbe_enable_sriov(adapter); + } +#endif /* CONFIG_PCI_IOV */ + + netdev->features = NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM; + + netdev->gso_partial_features = TXGBE_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | + TXGBE_GSO_PARTIAL_FEATURES; + + netdev->features |= NETIF_F_SCTP_CRC; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_RXALL; + + netdev->hw_features |= NETIF_F_NTUPLE | + NETIF_F_HW_TC; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->hw_enc_features |= netdev->vlan_features; + netdev->mpls_features |= NETIF_F_HW_CSUM; + + /* set this bit last since it cannot be part of vlan_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + + /* give us the option of enabling RSC/LRO later */ + if (adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) { + netdev->hw_features |= NETIF_F_LRO; + netdev->features |= NETIF_F_LRO; + } + + if (netdev->features & NETIF_F_LRO) { + if ((adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) && + (adapter->rx_itr_setting == 1 || + adapter->rx_itr_setting > TXGBE_MIN_RSC_ITR)) { + adapter->flags2 |= TXGBE_FLAG2_RSC_ENABLED; + } else if (adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) { + e_dev_info("InterruptThrottleRate set too high, falling back to software LRO\n"); + } + } + + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = TXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); + +#if IS_ENABLED(CONFIG_DCB) + netdev->dcbnl_ops = &dcbnl_ops; +#endif /* CONFIG_DCB */ + +#if IS_ENABLED(CONFIG_FCOE) + if (adapter->flags & TXGBE_FLAG_FCOE_CAPABLE) { + unsigned int fcoe_l; + + netdev->features |= NETIF_F_FSO | + NETIF_F_FCOE_CRC; + + fcoe_l = min_t(int, TXGBE_RDB_FCRE_TBL_SIZE, num_online_cpus()); + adapter->ring_feature[RING_F_FCOE].limit = fcoe_l; + + netdev->vlan_features |= NETIF_F_FSO | + NETIF_F_FCOE_CRC | + NETIF_F_FCOE_MTU; + } +#endif /* CONFIG_FCOE */ + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + hw->eeprom.ops.init_params(hw); + /* make sure the EEPROM is good */ + + if (hw->eeprom.ops.validate_checksum(hw, NULL)) { + e_dev_err("The EEPROM Checksum Is Not Valid\n"); + wr32(hw, TXGBE_MIS_RST, TXGBE_MIS_RST_SW_RST); + err = -EIO; + goto err_sw_init; + } + + eth_hw_addr_set(netdev, hw->mac.perm_addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + e_dev_err("invalid MAC address\n"); + err = -EIO; + goto err_sw_init; + } + + txgbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + memset(&adapter->etype_filter_info, 0, + sizeof(struct txgbe_etype_filter_info)); + memset(&adapter->ft_filter_info, 0, + sizeof(struct txgbe_5tuple_filter_info)); + + timer_setup(&adapter->service_timer, txgbe_service_timer, 0); + + if (TXGBE_REMOVED(hw->hw_addr)) { + err = -EIO; + goto err_sw_init; + } + INIT_WORK(&adapter->service_task, txgbe_service_task); + INIT_WORK(&adapter->sfp_sta_task, txgbe_sfp_phy_status_work); + set_bit(__TXGBE_SERVICE_INITED, &adapter->state); + clear_bit(__TXGBE_SERVICE_SCHED, &adapter->state); + + err = txgbe_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + + /* WOL not supported for all devices */ + adapter->wol = 0; + + if ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP) { + adapter->wol = TXGBE_PSR_WKUP_CTL_MAG; + wr32(hw, TXGBE_PSR_WKUP_CTL, adapter->wol); + } + hw->wol_enabled = !!(adapter->wol); + + device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); + + /* Save off EEPROM version number and Option Rom version which + * together make a unique identify for the eeprom + */ + hw->eeprom.ops.read(hw, + hw->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_H, + &eeprom_verh); + hw->eeprom.ops.read(hw, + hw->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_L, + &eeprom_verl); + etrack_id = (eeprom_verh << 16) | eeprom_verl; + + hw->eeprom.ops.read(hw, + hw->eeprom.sw_region_offset + TXGBE_ISCSI_BOOT_CONFIG, &offset); + + /* Make sure offset to SCSI block is valid */ + if (!(offset == 0x0) && !(offset == 0xffff)) { + hw->eeprom.ops.read(hw, offset + 0x84, &eeprom_cfg_blkh); + hw->eeprom.ops.read(hw, offset + 0x83, &eeprom_cfg_blkl); + + /* Only display Option Rom if exist */ + if (eeprom_cfg_blkl && eeprom_cfg_blkh) { + major = eeprom_cfg_blkl >> 8; + build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8); + patch = eeprom_cfg_blkh & 0x00ff; + + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "0x%08x, %d.%d.%d", etrack_id, major, build, + patch); + } else { + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "0x%08x", etrack_id); + } + } else { + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "0x%08x", etrack_id); + } + + adapter->etrack_id = etrack_id; + + if (strcmp(adapter->eeprom_id, adapter->fl_version) == 0) { + memcpy(adapter->fw_version, adapter->eeprom_id, sizeof(adapter->eeprom_id)); + + if (hw->bus.lan_id == 0) + e_dev_info("Running Firmware Version: %s\n", adapter->eeprom_id); + } else { + snprintf(adapter->fw_version, sizeof(adapter->fw_version), "%s,ACT.%s", + adapter->fl_version, adapter->eeprom_id); + + if (hw->bus.lan_id == 0) + e_dev_info("Running Firmware Version: %s, Flash Firmware Version: %s\n", + adapter->eeprom_id, adapter->fl_version); + } + + /* reset the hardware with the new settings */ + err = hw->mac.ops.start_hw(hw); + if (err == TXGBE_ERR_EEPROM_VERSION) { + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter\n"); + } else if (err) { + e_dev_err("HW init failed\n"); + goto err_register; + } + + /* pick up the PCI bus settings for reporting later */ + hw->mac.ops.get_bus_info(hw); + + strscpy(netdev->name, "eth%d", sizeof(netdev->name)); + err = register_netdev(netdev); + if (err) + goto err_register; + + pci_set_drvdata(pdev, adapter); + adapter->netdev_registered = true; + pci_save_state(pdev); + + /* power down the optics for SFP+ fiber or mv phy */ + if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) { + if (hw->phy.media_type == txgbe_media_type_fiber || + hw->phy.media_type == txgbe_media_type_fiber_qsfp) + hw->mac.ops.disable_tx_laser(hw); + else if (hw->phy.media_type == txgbe_media_type_copper && + (hw->subsystem_device_id & 0xF0) != TXGBE_ID_SFI_XAUI) + txgbe_external_phy_suspend(hw); + } + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + /* keep stopping all the transmit queues for older kernels */ + netif_tx_stop_all_queues(netdev); + + /* print all messages at the end so that we use our eth%d name */ + + /* calculate the expected PCIe bandwidth required for optimal + * performance. Note that some older parts will never have enough + * bandwidth due to being older generation PCIe parts. We clamp these + * parts to ensure that no warning is displayed, as this could confuse + * users otherwise. + */ + + expected_gts = txgbe_enumerate_functions(adapter) * 10; + + /* don't check link if we failed to enumerate functions */ + if (expected_gts > 0) + txgbe_check_minimum_link(adapter, expected_gts); + + if ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) + e_info(probe, "NCSI : support"); + else + e_info(probe, "NCSI : unsupported"); + + /* First try to read PBA as a string */ + err = txgbe_read_pba_string(hw, part_str, TXGBE_PBANUM_LENGTH); + if (err) + strscpy((char *)part_str, "Unknown", TXGBE_PBANUM_LENGTH); + if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present) + e_info(probe, "PHY: %d, SFP+: %d, PBA No: %s\n", + hw->phy.type, hw->phy.sfp_type, part_str); + else + e_info(probe, "PHY: %d, PBA No: %s\n", + hw->phy.type, part_str); + + e_dev_info("%02x:%02x:%02x:%02x:%02x:%02x\n", + netdev->dev_addr[0], netdev->dev_addr[1], + netdev->dev_addr[2], netdev->dev_addr[3], + netdev->dev_addr[4], netdev->dev_addr[5]); + +#define INFO_STRING_LEN 255 + info_string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); + if (!info_string) { + e_err(probe, "allocation for info string failed\n"); + goto no_info_string; + } + i_s_var = info_string; + i_s_var += sprintf(info_string, "Enabled Features: "); + i_s_var += sprintf(i_s_var, "RxQ: %d TxQ: %d ", + adapter->num_rx_queues, adapter->num_tx_queues); +#if IS_ENABLED(CONFIG_FCOE) + if (adapter->flags & TXGBE_FLAG_FCOE_ENABLED) + i_s_var += sprintf(i_s_var, "FCoE "); +#endif + if (adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) + i_s_var += sprintf(i_s_var, "FdirHash "); + if (adapter->flags & TXGBE_FLAG_DCB_ENABLED) + i_s_var += sprintf(i_s_var, "DCB "); + if (adapter->flags & TXGBE_FLAG_TPH_ENABLED) + i_s_var += sprintf(i_s_var, "TPH "); + if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) + i_s_var += sprintf(i_s_var, "RSC "); + else if (netdev->features & NETIF_F_LRO) + i_s_var += sprintf(i_s_var, "LRO "); + + if (adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_ENABLE) + i_s_var += sprintf(i_s_var, "vxlan_rx "); + + WARN_ON_ONCE(i_s_var > (info_string + INFO_STRING_LEN)); + /* end features printing */ + e_info(probe, "%s\n", info_string); + kfree(info_string); +no_info_string: +#ifdef CONFIG_PCI_IOV + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + int i; + + for (i = 0; i < adapter->num_vfs; i++) + txgbe_vf_configuration(pdev, (i | 0x10000000)); + } +#endif + + e_info(probe, "WangXun(R) RP1000/RP2000/FF50XX Network Connection\n"); + cards_found++; + + if (txgbe_sysfs_init(adapter)) + e_err(probe, "failed to allocate sysfs resources\n"); + + txgbe_dbg_adapter_init(adapter); + + if (txgbe_mng_present(hw) && txgbe_is_sfp(hw) && + ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP)) + hw->mac.ops.setup_link(hw, + TXGBE_LINK_SPEED_25GB_FULL | TXGBE_LINK_SPEED_10GB_FULL | + TXGBE_LINK_SPEED_1GB_FULL, true); + + hw->mac.ops.setup_eee(hw, + (adapter->flags2 & TXGBE_FLAG2_EEE_CAPABLE) && + (adapter->flags2 & TXGBE_FLAG2_EEE_ENABLED)); + + if (hw->mac.type == txgbe_mac_sp) { + pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &pvalue); + pvalue = pvalue | 0x10; + pcie_capability_write_word(pdev, PCI_EXP_DEVCTL2, pvalue); + adapter->cmplt_to_dis = true; + e_info(probe, "disable completion timeout\n"); + } + + return 0; + +err_register: + txgbe_clear_interrupt_scheme(adapter); + txgbe_release_hw_control(adapter); +err_sw_init: +#ifdef CONFIG_PCI_IOV + txgbe_disable_sriov(adapter); +#endif /* CONFIG_PCI_IOV */ + adapter->flags2 &= ~TXGBE_FLAG2_SEARCH_FOR_SFP; + kfree(adapter->mac_table); + iounmap(adapter->io_addr); + + bitmap_free(adapter->af_xdp_zc_qps); + +err_ioremap: + disable_dev = !test_and_set_bit(__TXGBE_DISABLED, &adapter->state); + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + if (!adapter || disable_dev) + pci_disable_device(pdev); + return err; +} + +/** + * txgbe_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * txgbe_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void txgbe_remove(struct pci_dev *pdev) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + struct txgbe_hw *hw = &adapter->hw; + struct net_device *netdev; + bool disable_dev; + + /* if !adapter then we already cleaned up in probe */ + if (!adapter) + return; + + mutex_destroy(&adapter->e56_lock); + + hw = &adapter->hw; + txgbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + + netdev = adapter->netdev; + txgbe_dbg_adapter_exit(adapter); + + set_bit(__TXGBE_REMOVING, &adapter->state); + cancel_work_sync(&adapter->service_task); + + txgbe_sysfs_exit(adapter); + + if (adapter->netdev_registered) { + unregister_netdev(netdev); + adapter->netdev_registered = false; + } + +#ifdef CONFIG_PCI_IOV + txgbe_disable_sriov(adapter); +#endif + +#if IS_ENABLED(CONFIG_FCOE) + txgbe_fcoe_ddp_disable(adapter); +#endif /* CONFIG_FCOE */ + + txgbe_clear_interrupt_scheme(adapter); + txgbe_release_hw_control(adapter); + + kfree(adapter->txgbe_ieee_pfc); + kfree(adapter->txgbe_ieee_ets); + + iounmap(adapter->io_addr); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + kfree(adapter->mac_table); + bitmap_free(adapter->af_xdp_zc_qps); + + disable_dev = !test_and_set_bit(__TXGBE_DISABLED, &adapter->state); + free_netdev(netdev); + + if (disable_dev) + pci_disable_device(pdev); +} + +static bool txgbe_check_cfg_remove(struct txgbe_hw *hw, struct pci_dev *pdev) +{ + u16 value; + + pci_read_config_word(pdev, PCI_VENDOR_ID, &value); + if (value == TXGBE_FAILED_READ_CFG_WORD) { + txgbe_remove_adapter(hw); + return true; + } + return false; +} + +u16 txgbe_read_pci_cfg_word(struct txgbe_hw *hw, u32 reg) +{ + struct txgbe_adapter *adapter = hw->back; + u16 value; + + if (TXGBE_REMOVED(hw->hw_addr)) + return TXGBE_FAILED_READ_CFG_WORD; + pci_read_config_word(adapter->pdev, reg, &value); + if (value == TXGBE_FAILED_READ_CFG_WORD && + txgbe_check_cfg_remove(hw, adapter->pdev)) + return TXGBE_FAILED_READ_CFG_WORD; + return value; +} + +#ifdef CONFIG_PCI_IOV +static u32 txgbe_read_pci_cfg_dword(struct txgbe_hw *hw, u32 reg) +{ + struct txgbe_adapter *adapter = hw->back; + u32 value; + + if (TXGBE_REMOVED(hw->hw_addr)) + return TXGBE_FAILED_READ_CFG_DWORD; + pci_read_config_dword(adapter->pdev, reg, &value); + if (value == TXGBE_FAILED_READ_CFG_DWORD && + txgbe_check_cfg_remove(hw, adapter->pdev)) + return TXGBE_FAILED_READ_CFG_DWORD; + return value; +} +#endif /* CONFIG_PCI_IOV */ + +void txgbe_write_pci_cfg_word(struct txgbe_hw *hw, u32 reg, u16 value) +{ + struct txgbe_adapter *adapter = hw->back; + + if (TXGBE_REMOVED(hw->hw_addr)) + return; + pci_write_config_word(adapter->pdev, reg, value); +} + +/** + * txgbe_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t txgbe_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + +#ifdef CONFIG_PCI_IOV + struct txgbe_hw *hw = &adapter->hw; + struct pci_dev *bdev, *vfdev; + u32 dw0, dw1, dw2, dw3; + int vf, pos; + u16 req_id, pf_func; + + if (adapter->num_vfs == 0) + goto skip_bad_vf_detection; + + bdev = pdev->bus->self; + while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) + bdev = bdev->bus->self; + + if (!bdev) + goto skip_bad_vf_detection; + + pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR); + if (!pos) + goto skip_bad_vf_detection; + + dw0 = txgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG); + dw1 = txgbe_read_pci_cfg_dword(hw, + pos + PCI_ERR_HEADER_LOG + 4); + dw2 = txgbe_read_pci_cfg_dword(hw, + pos + PCI_ERR_HEADER_LOG + 8); + dw3 = txgbe_read_pci_cfg_dword(hw, + pos + PCI_ERR_HEADER_LOG + 12); + if (TXGBE_REMOVED(hw->hw_addr)) + goto skip_bad_vf_detection; + + req_id = dw1 >> 16; + /* if bit 7 of the requestor ID is set then it's a VF */ + if (!(req_id & 0x0080)) + goto skip_bad_vf_detection; + + pf_func = req_id & 0x01; + if ((pf_func & 1) == (pdev->devfn & 1)) { + vf = (req_id & 0x7F) >> 1; + e_dev_err("VF %d has caused a PCIe error\n", vf); + e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: %8.8x\tdw3: %8.8x\n", + dw0, dw1, dw2, dw3); + + /* Find the pci device of the offending VF */ + vfdev = pci_get_device(PCI_VENDOR_ID_TRUSTNETIC, + TXGBE_VF_DEVICE_ID, NULL); + while (vfdev) { + if (vfdev->devfn == (req_id & 0xFF)) + break; + vfdev = pci_get_device(PCI_VENDOR_ID_TRUSTNETIC, + TXGBE_VF_DEVICE_ID, vfdev); + } + /* There's a slim chance the VF could have been hot + * plugged, so if it is no longer present we don't need + * to issue the VFLR.Just clean up the AER in that case. + */ + if (vfdev) { + txgbe_issue_vf_flr(adapter, vfdev); + /* Free device reference count */ + pci_dev_put(vfdev); + } + } + + /* Even though the error may have occurred on the other port + * we still need to increment the vf error reference count for + * both ports because the I/O resume function will be called + * for both of them. + */ + adapter->vferr_refcount++; + + return PCI_ERS_RESULT_RECOVERED; + +skip_bad_vf_detection: +#endif /* CONFIG_PCI_IOV */ + + if (!test_bit(__TXGBE_SERVICE_INITED, &adapter->state)) + return PCI_ERS_RESULT_DISCONNECT; + + rtnl_lock(); + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (netif_running(netdev)) + txgbe_close(netdev); + + if (!test_and_set_bit(__TXGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); + rtnl_unlock(); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * txgbe_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t txgbe_io_slot_reset(struct pci_dev *pdev) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED; + u16 value; + + if (adapter->hw.mac.type == txgbe_mac_sp) + if (adapter->cmplt_to_dis) { + pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &value); + value |= 0x10; + pcie_capability_write_word(adapter->pdev, PCI_EXP_DEVCTL2, value); + adapter->cmplt_to_dis = false; + } + + if (pci_enable_device_mem(pdev)) { + e_err(probe, "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + /* make sure to clear flag */ + smp_mb__before_atomic(); + clear_bit(__TXGBE_DISABLED, &adapter->state); + adapter->hw.hw_addr = adapter->io_addr; + pci_set_master(pdev); + pci_restore_state(pdev); + /* After second error pci->state_saved is false, this + * resets it so EEH doesn't break. + */ + pci_save_state(pdev); + + pci_wake_from_d3(pdev, false); + + txgbe_reset(adapter); + + result = PCI_ERS_RESULT_RECOVERED; + } + + return result; +} + +/** + * txgbe_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void txgbe_io_resume(struct pci_dev *pdev) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + +#ifdef CONFIG_PCI_IOV + if (adapter->vferr_refcount) { + e_info(drv, "Resuming after VF err\n"); + adapter->vferr_refcount--; + return; + } + +#endif + rtnl_lock(); + if (netif_running(netdev)) + txgbe_open(netdev); + + netif_device_attach(netdev); + rtnl_unlock(); +} + +struct net_device *txgbe_hw_to_netdev(const struct txgbe_hw *hw) +{ + return ((struct txgbe_adapter *)hw->back)->netdev; +} + +struct txgbe_msg *txgbe_hw_to_msg(const struct txgbe_hw *hw) +{ + struct txgbe_adapter *adapter = + container_of(hw, struct txgbe_adapter, hw); + + return (struct txgbe_msg *)&adapter->msg_enable; +} + +static const struct pci_error_handlers txgbe_err_handler = { + .error_detected = txgbe_io_error_detected, + .slot_reset = txgbe_io_slot_reset, + .resume = txgbe_io_resume, +}; + +#ifdef CONFIG_PM +static const struct dev_pm_ops txgbe_pm_ops = { + .suspend = txgbe_suspend, + .resume = txgbe_resume, + .freeze = txgbe_freeze, + .thaw = txgbe_thaw, + .poweroff = txgbe_suspend, + .restore = txgbe_resume, +}; +#endif + +static struct pci_driver txgbe_driver = { + .name = txgbe_driver_name, + .id_table = txgbe_pci_tbl, + .probe = txgbe_probe, + .remove = txgbe_remove, + .driver = { + .pm = &txgbe_pm_ops, + }, + .shutdown = txgbe_shutdown, + .sriov_configure = txgbe_pci_sriov_configure, + .err_handler = &txgbe_err_handler +}; + +bool txgbe_is_txgbe(struct pci_dev *pcidev) +{ + if (pci_dev_driver(pcidev) != &txgbe_driver) + return false; + else + return true; +} + +/** + * txgbe_init_module - Driver Registration Routine + * + * txgbe_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init txgbe_init_module(void) +{ + int ret; + + pr_info("%s - version %s\n", txgbe_driver_string, txgbe_driver_version); + pr_info("%s\n", txgbe_copyright); + + txgbe_wq = create_singlethread_workqueue(txgbe_driver_name); + if (!txgbe_wq) { + pr_err("%s: Failed to create workqueue\n", txgbe_driver_name); + return -ENOMEM; + } + txgbe_dbg_init(); + + ret = pci_register_driver(&txgbe_driver); + return ret; +} + +module_init(txgbe_init_module); + +/** + * txgbe_exit_module - Driver Exit Cleanup Routine + * + * txgbe_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit txgbe_exit_module(void) +{ + pci_unregister_driver(&txgbe_driver); + destroy_workqueue(txgbe_wq); + txgbe_dbg_exit(); +} + +module_exit(txgbe_exit_module); + +/* txgbe_main.c */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.c new file mode 100644 index 000000000000..3b9b4fd3df46 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.c @@ -0,0 +1,688 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "txgbe_type.h" +#include "txgbe.h" +#include "txgbe_mbx.h" + +/** + * txgbe_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +int txgbe_read_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int err = TXGBE_ERR_MBX; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + err = hw->mbx.ops.read(hw, msg, size, mbx_id); + + return err; +} + +/** + * txgbe_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +int txgbe_write_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int err = 0; + + if (size > mbx->size) { + err = TXGBE_ERR_MBX; + ERROR_REPORT2(TXGBE_ERROR_ARGUMENT, + "Invalid mailbox message size %d", size); + } else { + err = hw->mbx.ops.write(hw, msg, size, mbx_id); + } + + return err; +} + +/** + * txgbe_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int txgbe_check_for_msg(struct txgbe_hw *hw, u16 mbx_id) +{ + int err = TXGBE_ERR_MBX; + + err = hw->mbx.ops.check_for_msg(hw, mbx_id); + + return err; +} + +/** + * txgbe_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int txgbe_check_for_ack(struct txgbe_hw *hw, u16 mbx_id) +{ + int err = TXGBE_ERR_MBX; + + err = hw->mbx.ops.check_for_ack(hw, mbx_id); + + return err; +} + +/** + * txgbe_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int txgbe_check_for_rst(struct txgbe_hw *hw, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int err = TXGBE_ERR_MBX; + + if (mbx->ops.check_for_rst) + err = mbx->ops.check_for_rst(hw, mbx_id); + + return err; +} + +/** + * txgbe_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static int txgbe_poll_for_msg(struct txgbe_hw *hw, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && hw->mbx.ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->udelay); + } + + if (countdown == 0) + ERROR_REPORT2(TXGBE_ERROR_POLLING, + "Polling for VF%d mailbox message timedout", mbx_id); + +out: + return countdown ? 0 : TXGBE_ERR_MBX; +} + +/** + * txgbe_poll_for_ack - Wait for message acknowledgment + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgment + **/ +static int txgbe_poll_for_ack(struct txgbe_hw *hw, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && hw->mbx.ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->udelay); + } + + if (countdown == 0) + ERROR_REPORT2(TXGBE_ERROR_POLLING, + "Polling for VF%d mailbox ack timedout", mbx_id); + +out: + return countdown ? 0 : TXGBE_ERR_MBX; +} + +/** + * txgbe_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +int txgbe_read_posted_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int err = TXGBE_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + err = txgbe_poll_for_msg(hw, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!err) + err = hw->mbx.ops.read(hw, msg, size, mbx_id); +out: + return err; +} + +/** + * txgbe_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +int txgbe_write_posted_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int err; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->timeout) + return TXGBE_ERR_MBX; + + /* send msg */ + err = hw->mbx.ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!err) + err = txgbe_poll_for_ack(hw, mbx_id); + + return err; +} + +/** + * txgbe_init_mbx_ops - Initialize MB function pointers + * @hw: pointer to the HW structure + * + * Setups up the mailbox read and write message function pointers + **/ +void txgbe_init_mbx_ops(struct txgbe_hw *hw) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + + mbx->ops.read_posted = txgbe_read_posted_mbx; + mbx->ops.write_posted = txgbe_write_posted_mbx; +} + +/** + * txgbe_read_v2p_mailbox - read v2p mailbox + * @hw: pointer to the HW structure + * + * This function is used to read the v2p mailbox without losing the read to + * clear status bits. + **/ +static u32 txgbe_read_v2p_mailbox(struct txgbe_hw *hw) +{ + u32 v2p_mailbox = rd32(hw, TXGBE_VXMAILBOX); + + v2p_mailbox |= hw->mbx.v2p_mailbox; + /* read and clear mirrored mailbox flags */ + v2p_mailbox |= rd32a(hw, TXGBE_VXMBMEM, TXGBE_VXMAILBOX_SIZE); + wr32a(hw, TXGBE_VXMBMEM, TXGBE_VXMAILBOX_SIZE, 0); + hw->mbx.v2p_mailbox |= v2p_mailbox & TXGBE_VXMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +/** + * txgbe_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +static int txgbe_check_for_bit_vf(struct txgbe_hw *hw, u32 mask) +{ + u32 mailbox = txgbe_read_v2p_mailbox(hw); + + hw->mbx.v2p_mailbox &= ~mask; + + return (mailbox & mask ? 0 : TXGBE_ERR_MBX); +} + +/** + * txgbe_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the Status bit or else ERR_MBX + **/ +static int txgbe_check_for_msg_vf(struct txgbe_hw *hw, u16 __always_unused mbx_id) +{ + int err = TXGBE_ERR_MBX; + + /* read clear the pf sts bit */ + if (!txgbe_check_for_bit_vf(hw, TXGBE_VXMAILBOX_PFSTS)) { + err = 0; + hw->mbx.stats.reqs++; + } + + return err; +} + +/** + * txgbe_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX + **/ +static int txgbe_check_for_ack_vf(struct txgbe_hw *hw, u16 __always_unused mbx_id) +{ + int err = TXGBE_ERR_MBX; + + /* read clear the pf ack bit */ + if (!txgbe_check_for_bit_vf(hw, TXGBE_VXMAILBOX_PFACK)) { + err = 0; + hw->mbx.stats.acks++; + } + + return err; +} + +/** + * txgbe_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns true if the PF has set the reset done bit or else false + **/ +static int txgbe_check_for_rst_vf(struct txgbe_hw *hw, u16 __always_unused mbx_id) +{ + int err = TXGBE_ERR_MBX; + + if (!txgbe_check_for_bit_vf(hw, (TXGBE_VXMAILBOX_RSTD | + TXGBE_VXMAILBOX_RSTI))) { + err = 0; + hw->mbx.stats.rsts++; + } + + return err; +} + +/** + * txgbe_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return SUCCESS if we obtained the mailbox lock + **/ +static int txgbe_obtain_mbx_lock_vf(struct txgbe_hw *hw) +{ + int err = TXGBE_ERR_MBX; + u32 mailbox; + + /* Take ownership of the buffer */ + wr32(hw, TXGBE_VXMAILBOX, TXGBE_VXMAILBOX_VFU); + + /* reserve mailbox for vf use */ + mailbox = txgbe_read_v2p_mailbox(hw); + if (mailbox & TXGBE_VXMAILBOX_VFU) + err = 0; + else + ERROR_REPORT2(TXGBE_ERROR_POLLING, + "Failed to obtain mailbox lock for VF"); + + return err; +} + +/** + * txgbe_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static int txgbe_write_mbx_vf(struct txgbe_hw *hw, u32 *msg, u16 size, + u16 __always_unused mbx_id) +{ + int err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = txgbe_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + txgbe_check_for_msg_vf(hw, 0); + txgbe_check_for_ack_vf(hw, 0); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + wr32a(hw, TXGBE_VXMBMEM, i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + wr32(hw, TXGBE_VXMAILBOX, TXGBE_VXMAILBOX_REQ); + +out_no_write: + return err; +} + +/** + * txgbe_read_mbx_vf - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +static int txgbe_read_mbx_vf(struct txgbe_hw *hw, u32 *msg, u16 size, + u16 __always_unused mbx_id) +{ + int err = 0; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = txgbe_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = rd32a(hw, TXGBE_VXMBMEM, i); + + /* Acknowledge receipt and release mailbox, then we're done */ + wr32(hw, TXGBE_VXMAILBOX, TXGBE_VXMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return err; +} + +/** + * txgbe_init_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for vf mailbox + */ +void txgbe_init_mbx_params_vf(struct txgbe_hw *hw) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to begin communications + */ + mbx->timeout = 0; + mbx->udelay = TXGBE_VF_MBX_INIT_DELAY; + + mbx->size = TXGBE_VXMAILBOX_SIZE; + + mbx->ops.read = txgbe_read_mbx_vf; + mbx->ops.write = txgbe_write_mbx_vf; + mbx->ops.read_posted = txgbe_read_posted_mbx; + mbx->ops.write_posted = txgbe_write_posted_mbx; + mbx->ops.check_for_msg = txgbe_check_for_msg_vf; + mbx->ops.check_for_ack = txgbe_check_for_ack_vf; + mbx->ops.check_for_rst = txgbe_check_for_rst_vf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} + +static int txgbe_check_for_bit_pf(struct txgbe_hw *hw, u32 mask, int index) +{ + u32 mbvficr = rd32(hw, TXGBE_MBVFICR(index)); + int err = TXGBE_ERR_MBX; + + if (mbvficr & mask) { + err = 0; + wr32(hw, TXGBE_MBVFICR(index), mask); + } + + return err; +} + +/** + * txgbe_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static int txgbe_check_for_msg_pf(struct txgbe_hw *hw, u16 vf) +{ + int err = TXGBE_ERR_MBX; + int index = TXGBE_MBVFICR_INDEX(vf); + u32 vf_bit = vf % 16; + + if (!txgbe_check_for_bit_pf(hw, TXGBE_MBVFICR_VFREQ_VF1 << vf_bit, + index)) { + err = 0; + hw->mbx.stats.reqs++; + } + + return err; +} + +/** + * txgbe_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static int txgbe_check_for_ack_pf(struct txgbe_hw *hw, u16 vf) +{ + int err = TXGBE_ERR_MBX; + int index = TXGBE_MBVFICR_INDEX(vf); + u32 vf_bit = vf % 16; + + if (!txgbe_check_for_bit_pf(hw, TXGBE_MBVFICR_VFACK_VF1 << vf_bit, + index)) { + err = 0; + hw->mbx.stats.acks++; + } + + return err; +} + +/** + * txgbe_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static int txgbe_check_for_rst_pf(struct txgbe_hw *hw, u16 vf) +{ + u32 reg_offset = (vf < 32) ? 0 : 1; + u32 vf_shift = vf % 32; + u32 vflre = 0; + int err = TXGBE_ERR_MBX; + + vflre = rd32(hw, TXGBE_VFLRE(reg_offset)); + + if (vflre & (1 << vf_shift)) { + err = 0; + wr32(hw, TXGBE_VFLREC(reg_offset), (1 << vf_shift)); + hw->mbx.stats.rsts++; + } + + return err; +} + +/** + * txgbe_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +static int txgbe_obtain_mbx_lock_pf(struct txgbe_hw *hw, u16 vf) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + int err = TXGBE_ERR_MBX; + u32 mailbox; + + while (countdown--) { + /* Take ownership of the buffer */ + wr32(hw, TXGBE_PXMAILBOX(vf), TXGBE_PXMAILBOX_PFU); + + /* reserve mailbox for vf use */ + mailbox = rd32(hw, TXGBE_PXMAILBOX(vf)); + if (mailbox & TXGBE_PXMAILBOX_PFU) { + err = 0; + break; + } + + /* Wait a bit before trying again */ + usec_delay(mbx->udelay); + } + + if (err) + ERROR_REPORT2(TXGBE_ERROR_POLLING, + "Failed to obtain mailbox lock for PF%d", vf); + + return err; +} + +/** + * txgbe_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static int txgbe_write_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, + u16 vf) +{ + int err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = txgbe_obtain_mbx_lock_pf(hw, vf); + if (err) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + txgbe_check_for_msg_pf(hw, vf); + txgbe_check_for_ack_pf(hw, vf); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + wr32a(hw, TXGBE_PXMBMEM(vf), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + /* set mirrored mailbox flags */ + wr32a(hw, TXGBE_PXMBMEM(vf), TXGBE_VXMAILBOX_SIZE, TXGBE_PXMAILBOX_STS); + wr32(hw, TXGBE_PXMAILBOX(vf), TXGBE_PXMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return err; +} + +/** + * txgbe_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +static int txgbe_read_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, + u16 vf) +{ + int err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = txgbe_obtain_mbx_lock_pf(hw, vf); + if (err) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = rd32a(hw, TXGBE_PXMBMEM(vf), i); + + /* Acknowledge the message and release buffer */ + /* set mirrored mailbox flags */ + wr32a(hw, TXGBE_PXMBMEM(vf), TXGBE_VXMAILBOX_SIZE, TXGBE_PXMAILBOX_ACK); + wr32(hw, TXGBE_PXMAILBOX(vf), TXGBE_PXMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return err; +} + +/** + * txgbe_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +void txgbe_init_mbx_params_pf(struct txgbe_hw *hw) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + + mbx->timeout = TXGBE_VF_MBX_INIT_TIMEOUT; + mbx->udelay = TXGBE_VF_MBX_INIT_DELAY; + + mbx->size = TXGBE_VXMAILBOX_SIZE; + + mbx->ops.read = txgbe_read_mbx_pf; + mbx->ops.write = txgbe_write_mbx_pf; + mbx->ops.read_posted = txgbe_read_posted_mbx; + mbx->ops.write_posted = txgbe_write_posted_mbx; + mbx->ops.check_for_msg = txgbe_check_for_msg_pf; + mbx->ops.check_for_ack = txgbe_check_for_ack_pf; + mbx->ops.check_for_rst = txgbe_check_for_rst_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.h new file mode 100644 index 000000000000..b8ed0616ec0a --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_MBX_H_ +#define _TXGBE_MBX_H_ + +#define TXGBE_VXMAILBOX_SIZE (16 - 1) + +/** + * VF Registers + **/ +#define TXGBE_VXMAILBOX 0x00600 +#define TXGBE_VXMAILBOX_REQ ((0x1) << 0) /* Request for PF Ready bit */ +#define TXGBE_VXMAILBOX_ACK ((0x1) << 1) /* Ack PF message received */ +#define TXGBE_VXMAILBOX_VFU ((0x1) << 2) /* VF owns the mailbox buffer */ +#define TXGBE_VXMAILBOX_PFU ((0x1) << 3) /* PF owns the mailbox buffer */ +#define TXGBE_VXMAILBOX_PFSTS ((0x1) << 4) /* PF wrote a message in the MB */ +#define TXGBE_VXMAILBOX_PFACK ((0x1) << 5) /* PF ack the previous VF msg */ +#define TXGBE_VXMAILBOX_RSTI ((0x1) << 6) /* PF has reset indication */ +#define TXGBE_VXMAILBOX_RSTD ((0x1) << 7) /* PF has indicated reset done */ +#define TXGBE_VXMAILBOX_R2C_BITS (TXGBE_VXMAILBOX_RSTD | \ + TXGBE_VXMAILBOX_PFSTS | TXGBE_VXMAILBOX_PFACK) + +#define TXGBE_VXMBMEM 0x00C00 /* 16*4B */ + +/** + * PF Registers + **/ +#define TXGBE_PXMAILBOX(i) (0x00600 + (4 * (i))) /* i=[0,63] */ +#define TXGBE_PXMAILBOX_STS ((0x1) << 0) /* Initiate message send to VF */ +#define TXGBE_PXMAILBOX_ACK ((0x1) << 1) /* Ack message recv'd from VF */ +#define TXGBE_PXMAILBOX_VFU ((0x1) << 2) /* VF owns the mailbox buffer */ +#define TXGBE_PXMAILBOX_PFU ((0x1) << 3) /* PF owns the mailbox buffer */ +#define TXGBE_PXMAILBOX_RVFU ((0x1) << 4) /* Reset VFU - used when VF stuck*/ + +#define TXGBE_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,63] */ + +#define TXGBE_VFLRP(i) (0x00490 + (4 * (i))) /* i=[0,1] */ +#define TXGBE_VFLRE(i) (0x004A0 + (4 * (i))) /* i=[0,1] */ +#define TXGBE_VFLREC(i) (0x004A8 + (4 * (i))) /* i=[0,1] */ + +/* SR-IOV specific macros */ +#define TXGBE_MBVFICR(i) (0x00480 + (4 * (i))) /* i=[0,3] */ +#define TXGBE_MBVFICR_INDEX(vf) ((vf) >> 4) +#define TXGBE_MBVFICR_VFREQ_MASK (0x0000FFFF) /* bits for VF messages */ +#define TXGBE_MBVFICR_VFREQ_VF1 (0x00000001) /* bit for VF 1 message */ +#define TXGBE_MBVFICR_VFACK_MASK (0xFFFF0000) /* bits for VF acks */ +#define TXGBE_MBVFICR_VFACK_VF1 (0x00010000) /* bit for VF 1 ack */ + +/** + * Messages + **/ +/* If it's a TXGBE_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is TXGBE_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +#define TXGBE_VT_MSGTYPE_ACK 0x80000000 +#define TXGBE_VT_MSGTYPE_NACK 0x40000000 +#define TXGBE_VT_MSGTYPE_CTS 0x20000000 +#define TXGBE_VT_MSGINFO_SHIFT 16 +#define TXGBE_VT_MSGINFO_VLAN_OFFLOAD_SHIFT 17 +/* bits 23:16 are used for extra info for certain messages */ +#define TXGBE_VT_MSGINFO_MASK (0xFF << TXGBE_VT_MSGINFO_SHIFT) + +/* definitions to support mailbox API version negotiation */ + +/* each element denotes a version of the API; existing numbers may not + * change; any additions must go at the end + */ +enum txgbe_pfvf_api_rev { + txgbe_mbox_api_null, + txgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ + txgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + txgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + txgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ + txgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ + txgbe_mbox_api_21, /* API version 2.1 */ + txgbe_mbox_api_22, /* API version 2.2 */ + txgbe_mbox_api_unknown, /* indicates that API version is not known */ +}; + +/* mailbox API, legacy requests */ +#define TXGBE_VF_RESET 0x01 /* VF requests reset */ +#define TXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define TXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define TXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ + +/* mailbox API, version 1.0 VF requests */ +#define TXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define TXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ +#define TXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ + +/* mailbox API, version 1.1 VF requests */ +#define TXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ + +/* mailbox API, version 1.2 VF requests */ +#define TXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define TXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ +#define TXGBE_VF_UPDATE_XCAST_MODE 0x0c +#define TXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */ +#define TXGBE_VF_GET_FW_VERSION 0x11 /* get fw version */ + +/* mailbox API, version 2.1 VF requests */ +#define TXGBE_VF_SET_5TUPLE 0x20 /* VF request PF for 5-tuple filter */ + +/* mailbox API, version 2.2 VF requests */ +#define TXGBE_VF_QUEUE_RATE_LIMIT 0x21 /* VF request PF to set vf-queue rate limit */ + +#define TXGBE_VF_BACKUP 0x8001 /* VF requests backup */ + +/* mode choices for TXGBE_VF_UPDATE_XCAST_MODE */ +enum txgbevf_xcast_modes { + TXGBEVF_XCAST_MODE_NONE = 0, + TXGBEVF_XCAST_MODE_MULTI, + TXGBEVF_XCAST_MODE_ALLMULTI, + TXGBEVF_XCAST_MODE_PROMISC, +}; + +enum txgbevf_5tuple_msg { + TXGBEVF_5T_REQ = 0, + TXGBEVF_5T_CMD, + TXGBEVF_5T_CTRL0, + TXGBEVF_5T_CTRL1, + TXGBEVF_5T_PORT, + TXGBEVF_5T_DA, + TXGBEVF_5T_SA, + TXGBEVF_5T_MAX, /* must be last */ +}; + +#define TXGBEVF_5T_ADD_SHIFT 31 + +enum txgbevf_queue_rate_limit_msg { + TXGBEVF_Q_RATE_REQ = 0, + TXGBEVF_Q_RATE_INDEX, + TXGBEVF_Q_RATE_LIMIT, +}; + +/* GET_QUEUES return data indices within the mailbox */ +#define TXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define TXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define TXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define TXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ + +/* length of permanent address message returned from PF */ +#define TXGBE_VF_PERMADDR_MSG_LEN 4 +/* word in permanent address message with the current multicast type */ +#define TXGBE_VF_MC_TYPE_WORD 3 + +#define TXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ +#define TXGBE_PF_NOFITY_VF_LINK_STATUS 0x1 +#define TXGBE_PF_NOFITY_VF_NET_NOT_RUNNING BIT(31) + +/* mailbox API, version 2.0 VF requests */ +#define TXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ +#define TXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ +#define TXGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */ +#define TXGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */ +#define TXGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */ +#define TXGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */ +#define TXGBE_VF_GET_MTU 0x0E /* get bounds on MTU */ +#define TXGBE_VF_SET_MTU 0x0F /* set a specific MTU */ + +/* mailbox API, version 2.0 PF requests */ +#define TXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */ + +#define TXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define TXGBE_VF_MBX_INIT_DELAY 50 /* microseconds between retries */ + +int txgbe_read_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); +int txgbe_write_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); +int txgbe_read_posted_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); +int txgbe_write_posted_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id); +int txgbe_check_for_msg(struct txgbe_hw *hw, u16 mbx_id); +int txgbe_check_for_ack(struct txgbe_hw *hw, u16 mbx_id); +int txgbe_check_for_rst(struct txgbe_hw *hw, u16 mbx_id); +void txgbe_init_mbx_ops(struct txgbe_hw *hw); +void txgbe_init_mbx_params_vf(struct txgbe_hw *hw); +void txgbe_init_mbx_params_pf(struct txgbe_hw *hw); + +#endif /* _TXGBE_MBX_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.c new file mode 100644 index 000000000000..9adba9083360 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.c @@ -0,0 +1,1301 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "txgbe.h" +#include "txgbe_mtd.h" + +u32 txgbe_mtd_xmdio_wr(struct mtd_dev *dev_ptr, + u16 port, + u16 dev, + u16 reg, + u16 value) +{ + u32 result = MTD_OK; + + if (!dev_ptr->mtd_write_mdio) { + if (dev_ptr->mtd_write_mdio(dev_ptr, port, dev, reg, value) == MTD_FAIL) { + result = MTD_FAIL; + MTD_DBG_INFO("mtd_write_mdio 0x%04X failed to port=%d, dev=%d, reg=0x%04X\n", + value, port, dev, reg); + } + } else { + result = MTD_FAIL; + } + + return result; +} + +u32 txgbe_mtd_hw_xmdio_rd(struct mtd_dev *dev_ptr, + u16 port, + u16 dev, + u16 reg, + u16 *data) +{ + u32 result = MTD_OK; + + if (!dev_ptr->fmtd_read_mdio) { + if (dev_ptr->fmtd_read_mdio(dev_ptr, port, dev, reg, data) == MTD_FAIL) { + result = MTD_FAIL; + MTD_DBG_INFO("fmtd_read_mdio failed from port=%d, dev=%d, reg=0x%04X\n", + port, dev, reg); + } + } else { + result = MTD_FAIL; + } + return result; +} + +#define MTD_CALC_MASK(field_offset, field_len, mask) do {\ + if (((field_len) + (field_offset)) >= 16) \ + mask = (0 - (1 << (field_offset))); \ + else \ + mask = (((1 << ((field_len) + (field_offset)))) - (1 << (field_offset)));\ + } while (0) + +u32 txgbe_mtd_get_phy_reg_filed(struct mtd_dev *dev_ptr, + u16 port, + u16 dev, + u16 reg_addr, + u8 field_offset, + u8 field_length, + u16 *data) +{ + u16 tmp_data; + u32 ret_val; + + ret_val = txgbe_mtd_hw_xmdio_rd(dev_ptr, port, dev, reg_addr, &tmp_data); + + if (ret_val != MTD_OK) { + MTD_DBG_ERROR("Failed to read register\n"); + return MTD_FAIL; + } + + txgbe_mtd_get_reg_filed(tmp_data, field_offset, field_length, data); + + return MTD_OK; +} + +u32 txgbe_mtd_set_phy_feild(struct mtd_dev *dev_ptr, + u16 port, + u16 dev, + u16 reg_addr, + u8 field_offset, + u8 field_length, + u16 data) +{ + u16 tmp_data, new_data; + u32 ret_val; + + ret_val = txgbe_mtd_hw_xmdio_rd(dev_ptr, port, dev, reg_addr, &tmp_data); + if (ret_val != MTD_OK) + return MTD_FAIL; + + txgbe_mtd_set_reg_field_word(tmp_data, + data, field_offset, field_length, &new_data); + + ret_val = txgbe_mtd_xmdio_wr(dev_ptr, port, dev, reg_addr, new_data); + + if (ret_val != MTD_OK) + return MTD_FAIL; + + return MTD_OK; +} + +u32 txgbe_mtd_get_reg_filed(u16 reg_data, + u8 field_offset, + u8 field_length, + u16 *data) +{ + /* Bits mask to be read */ + u16 mask; + + MTD_CALC_MASK(field_offset, field_length, mask); + + *data = (reg_data & mask) >> field_offset; + + return MTD_OK; +} + +u32 txgbe_mtd_set_reg_field_word(u16 reg_data, + u16 bit_field_data, + u8 field_offset, + u8 field_length, + u16 *data) +{ + /* Bits mask to be read */ + u16 mask; + + MTD_CALC_MASK(field_offset, field_length, mask); + + /* Set the desired bits to 0. */ + reg_data &= ~mask; + /* Set the given data into the above reset bits.*/ + reg_data |= ((bit_field_data << field_offset) & mask); + + *data = reg_data; + + return MTD_OK; +} + +u32 txgbe_mtd_wait(u32 x) +{ + msleep(x); + return MTD_OK; +} + +static u32 txgbe_mtd_check_dev_cap(struct mtd_dev *dev_ptr, + u16 port, + bool *phy_has_macsec, + bool *phy_has_copper_intf, + bool *is_e20x0_dev) +{ + u8 major, minor, inc, test; + u16 abilities; + + *phy_has_macsec = true; + *phy_has_copper_intf = true; + *is_e20x0_dev = false; + + if (txgbe_mtd_get_firmver(dev_ptr, port, + &major, &minor, &inc, &test) == MTD_FAIL) { + /* firmware not running will produce this case */ + major = 0; + minor = 0; + inc = 0; + test = 0; + } + + if (major == 0 && minor == 0 && inc == 0 && test == 0) { + u16 reg2, reg3; + u16 index, index2; + u16 temp; + u16 bit16thru23[8]; + + /* save these registers */ + txgbe_attempt(txgbe_mtd_hw_xmdio_rd(dev_ptr, port, + 31, MTD_REG_SCR, ®2)); + txgbe_attempt(txgbe_mtd_hw_xmdio_rd(dev_ptr, port, + 31, MTD_REG_ECSR, ®3)); + + /* clear these bit indications */ + for (index = 0; index < 8; index++) + bit16thru23[index] = 0; + + txgbe_attempt(txgbe_mtd_xmdio_wr(dev_ptr, port, + 31, MTD_REG_CCCR9, 0x0300)); + txgbe_mtd_wait(1); + txgbe_attempt(txgbe_mtd_xmdio_wr(dev_ptr, port, + 31, MTD_REG_SCR, 0x0102)); + txgbe_mtd_wait(1); + + txgbe_attempt(txgbe_mtd_xmdio_wr(dev_ptr, port, + 31, MTD_REG_ECSR, 0x06D3)); + txgbe_mtd_wait(1); + txgbe_attempt(txgbe_mtd_xmdio_wr(dev_ptr, port, + 31, MTD_REG_ECSR, 0x0593)); + txgbe_mtd_wait(1); + txgbe_attempt(txgbe_mtd_xmdio_wr(dev_ptr, port, + 31, MTD_REG_ECSR, 0x0513)); + txgbe_mtd_wait(1); + + index = 0; + index2 = 0; + while (index < 24) { + txgbe_attempt(txgbe_mtd_xmdio_wr(dev_ptr, port, + 31, MTD_REG_ECSR, 0x0413)); + txgbe_mtd_wait(1); + txgbe_attempt(txgbe_mtd_xmdio_wr(dev_ptr, port, + 31, MTD_REG_ECSR, 0x0513)); + txgbe_mtd_wait(1); + + if (index >= 16) + txgbe_attempt(txgbe_mtd_hw_xmdio_rd(dev_ptr, port, 31, + MTD_REG_ECSR, &bit16thru23[index2++])); + else + txgbe_attempt(txgbe_mtd_hw_xmdio_rd(dev_ptr, port, 31, + MTD_REG_ECSR, &temp)); + + txgbe_mtd_wait(1); + index++; + } + + if (((bit16thru23[0] >> 11) & 1) | ((bit16thru23[1] >> 11) & 1)) + *phy_has_macsec = false; + + if (((bit16thru23[4] >> 11) & 1) | ((bit16thru23[5] >> 11) & 1)) + *phy_has_copper_intf = false; + + if (((bit16thru23[6] >> 11) & 1) | ((bit16thru23[7] >> 11) & 1)) + *is_e20x0_dev = true; + + txgbe_attempt(txgbe_mtd_xmdio_wr(dev_ptr, port, + 31, MTD_REG_ECSR, 0x0413)); + txgbe_mtd_wait(1); + txgbe_attempt(txgbe_mtd_xmdio_wr(dev_ptr, port, + 31, MTD_REG_ECSR, 0x0493)); + txgbe_mtd_wait(1); + txgbe_attempt(txgbe_mtd_xmdio_wr(dev_ptr, port, + 31, MTD_REG_ECSR, 0x0413)); + txgbe_mtd_wait(1); + txgbe_attempt(txgbe_mtd_xmdio_wr(dev_ptr, port, + 31, MTD_REG_ECSR, 0x0513)); + txgbe_mtd_wait(1); + + txgbe_attempt(txgbe_mtd_xmdio_wr(dev_ptr, port, + 31, MTD_REG_CCCR9, 0x5440)); + txgbe_attempt(txgbe_mtd_xmdio_wr(dev_ptr, port, + 31, MTD_REG_SCR, reg2)); + txgbe_attempt(txgbe_mtd_xmdio_wr(dev_ptr, port, + 31, MTD_REG_ECSR, reg3)); + + } else { + /* should just read it from the firmware status register */ + txgbe_attempt(txgbe_mtd_hw_xmdio_rd(dev_ptr, port, + MTD_T_UNIT_PMA_PMD, + MTD_TUNIT_XG_EXT_STATUS, &abilities)); + if (abilities & (1 << 12)) + *phy_has_macsec = false; + + if (abilities & (1 << 13)) + *phy_has_copper_intf = false; + + if (abilities & (1 << 14)) + *is_e20x0_dev = true; + } + + return MTD_OK; +} + +static u32 txgbe_mtd_ready_after_rst(struct mtd_dev *dev_ptr, u16 port, bool *phy_ready) +{ + u16 val; + + *phy_ready = false; + + txgbe_attempt(txgbe_mtd_get_phy_reg_filed(dev_ptr, port, + MTD_T_UNIT_PMA_PMD, MTD_TUNIT_IEEE_PMA_CTRL1, + 15, 1, &val)); + + if (val) + *phy_ready = false; + else + *phy_ready = true; + + return MTD_OK; +} + +u32 txgbe_mtd_sw_rst(struct mtd_dev *dev_ptr, + u16 port, + u16 timeout) +{ + u16 counter; + bool phy_ready; + /* bit self clears when done */ + txgbe_attempt(txgbe_mtd_set_phy_feild(dev_ptr, port, + MTD_T_UNIT_PMA_PMD, MTD_TUNIT_IEEE_PMA_CTRL1, + 15, 1, 1)); + + if (timeout) { + counter = 0; + txgbe_attempt(txgbe_mtd_ready_after_rst(dev_ptr, + port, &phy_ready)); + while (!phy_ready && counter <= timeout) { + txgbe_attempt(txgbe_mtd_wait(1)); + txgbe_attempt(txgbe_mtd_ready_after_rst(dev_ptr, + port, &phy_ready)); + counter++; + } + + if (counter < timeout) + return MTD_OK; + else + return MTD_FAIL; + } else { + return MTD_OK; + } +} + +static u32 txgbe_mtd_ready_after_hw_rst(struct mtd_dev *dev_ptr, u16 port, bool *phy_ready) +{ + u16 val; + + *phy_ready = false; + + txgbe_attempt(txgbe_mtd_get_phy_reg_filed(dev_ptr, port, + MTD_C_UNIT_GENERAL, MTD_CUNIT_PORT_CTRL, + 14, 1, &val)); + + if (val) + *phy_ready = false; + else + *phy_ready = true; + + return MTD_OK; +} + +u32 txgbe_mtd_hw_rst(struct mtd_dev *dev_ptr, + u16 port, + u16 timeout) +{ + u16 counter; + bool phy_ready; + + /* bit self clears when done */ + txgbe_attempt(txgbe_mtd_set_phy_feild(dev_ptr, port, + MTD_C_UNIT_GENERAL, MTD_CUNIT_PORT_CTRL, + 14, 1, 1)); + + if (timeout) { + counter = 0; + txgbe_attempt(txgbe_mtd_ready_after_hw_rst(dev_ptr, port, &phy_ready)); + while (!phy_ready && counter <= timeout) { + txgbe_attempt(txgbe_mtd_wait(1)); + txgbe_attempt(txgbe_mtd_ready_after_hw_rst(dev_ptr, port, &phy_ready)); + counter++; + } + if (counter < timeout) + return MTD_OK; + else + return MTD_FAIL; /* timed out without becoming ready */ + } else { + return MTD_OK; + } +} + +u32 txgbe_mtd_enable_speeds(struct mtd_dev *dev_ptr, + u16 port, + u16 speed_bits, + bool an_restart) +{ + bool speed_forced; + u16 dummy; + u16 temp_reg_value; + + if (speed_bits & MTD_FORCED_SPEEDS_BIT_MASK) { + /* tried to force the speed, this function is for autonegotiation control */ + return MTD_FAIL; + } + + if (MTD_IS_X32X0_BASE(dev_ptr->device_id) && ((speed_bits & MTD_SPEED_2P5GIG_FD) || + (speed_bits & MTD_SPEED_5GIG_FD))) { + return MTD_FAIL; /* tried to advertise 2.5G/5G on a 88X32X0 chipset */ + } + + if (MTD_IS_X33X0_BASE(dev_ptr->device_id)) { + const u16 chip_rev = (dev_ptr->device_id & 0xf); /* get the chip revision */ + + if (chip_rev == 9 || chip_rev == 5 || chip_rev == 1 || + chip_rev == 8 || chip_rev == 4 || chip_rev == 0) + return MTD_FAIL; + } + + /* Enable AN and set speed back to power-on default in case previously forced + *Only do it if forced, to avoid an extra/unnecessary soft reset + */ + txgbe_attempt(txgbe_mtd_get_forced_speed(dev_ptr, port, + &speed_forced, &dummy)); + if (speed_forced) + txgbe_attempt(txgbe_mtd_undo_forced_speed(dev_ptr, port, false)); + + if (speed_bits == MTD_ADV_NONE) { + txgbe_attempt(txgbe_mtd_set_phy_feild(dev_ptr, port, 7, 0x0010, + MTD_7_0010_SPEED_BIT_POS, + MTD_7_0010_SPEED_BIT_LENGTH, + 0)); + + /* Take care of speed bits in 7.8000 (1000BASE-T speed bits) */ + txgbe_attempt(txgbe_mtd_set_phy_feild(dev_ptr, port, 7, 0x8000, + MTD_7_8000_SPEED_BIT_POS, + MTD_7_8000_SPEED_BIT_LENGTH, + 0)); + + /* Now take care of bit in 7.0020 (10GBASE-T) */ + txgbe_attempt(txgbe_mtd_set_phy_feild(dev_ptr, port, 7, 0x0020, + MTD_7_0020_SPEED_BIT_POS, + MTD_7_0020_SPEED_BIT_LENGTH, 0)); + + if (MTD_IS_X33X0_BASE(dev_ptr->device_id)) { + /* Now take care of bits in 7.0020 (2.5G, 5G speed bits) */ + txgbe_attempt(txgbe_mtd_set_phy_feild(dev_ptr, port, + 7, 0x0020, + MTD_7_0020_SPEED_BIT_POS2, + MTD_7_0020_SPEED_BIT_LENGTH2, 0)); + } + } else { + /* Take care of bits in 7.0010 (advertisement register, 10BT and 100BT bits) */ + txgbe_attempt(txgbe_mtd_set_phy_feild(dev_ptr, port, 7, 0x0010, + MTD_7_0010_SPEED_BIT_POS, + MTD_7_0010_SPEED_BIT_LENGTH, + (speed_bits & MTD_LOWER_BITS_MASK))); + + /* Take care of speed bits in 7.8000 (1000BASE-T speed bits) */ + txgbe_attempt(txgbe_mtd_set_phy_feild(dev_ptr, port, 7, 0x8000, + MTD_7_8000_SPEED_BIT_POS, + MTD_7_8000_SPEED_BIT_LENGTH, + MTD_GET_1000BT_BITS(speed_bits))); + + /* Now take care of bits in 7.0020 (10GBASE-T first) */ + txgbe_attempt(txgbe_mtd_hw_xmdio_rd(dev_ptr, port, + 7, 0x0020, &temp_reg_value)); + txgbe_attempt(txgbe_mtd_set_reg_field_word(temp_reg_value, + MTD_GET_10GBT_BIT(speed_bits), + MTD_7_0020_SPEED_BIT_POS, + MTD_7_0020_SPEED_BIT_LENGTH, + &temp_reg_value)); + + if (MTD_IS_X33X0_BASE(dev_ptr->device_id)) { + /* Now take care of 2.5G bit in 7.0020 */ + txgbe_attempt(txgbe_mtd_set_reg_field_word(temp_reg_value, + MTD_GET_2P5GBT_BIT(speed_bits), + 7, 1, + &temp_reg_value)); + + /* Now take care of 5G bit in 7.0020 */ + txgbe_attempt(txgbe_mtd_set_reg_field_word(temp_reg_value, + MTD_GET_5GBT_BIT(speed_bits), + 8, 1, + &temp_reg_value)); + } + + /* Now write result back to 7.0020 */ + txgbe_attempt(txgbe_mtd_xmdio_wr(dev_ptr, port, 7, 0x0020, temp_reg_value)); + + if (MTD_GET_10GBT_BIT(speed_bits) || + MTD_GET_2P5GBT_BIT(speed_bits) || + MTD_GET_5GBT_BIT(speed_bits)) + /* Set XNP on if any bit that required it was set */ + txgbe_attempt(txgbe_mtd_set_phy_feild(dev_ptr, port, + 7, 0, 13, 1, 1)); + } + + if (an_restart) { + return ((u32)(txgbe_mtd_autoneg_enable(dev_ptr, port) || + txgbe_mtd_autoneg_restart(dev_ptr, port))); + } + + return MTD_OK; +} + +u32 txgbe_mtd_undo_forced_speed(struct mtd_dev *dev_ptr, + u16 port, + bool an_restart) +{ + txgbe_attempt(txgbe_mtd_set_phy_feild(dev_ptr, port, + MTD_T_UNIT_PMA_PMD, MTD_TUNIT_IEEE_PMA_CTRL1, + 13, 1, 1)); + txgbe_attempt(txgbe_mtd_set_phy_feild(dev_ptr, port, + MTD_T_UNIT_PMA_PMD, MTD_TUNIT_IEEE_PMA_CTRL1, + 6, 1, 1)); + + /* when speed bits are changed, T unit sw reset is required, wait until phy is ready */ + txgbe_attempt(txgbe_mtd_sw_rst(dev_ptr, port, 1000)); + + if (an_restart) { + return ((u32)(txgbe_mtd_autoneg_enable(dev_ptr, port) || + txgbe_mtd_autoneg_restart(dev_ptr, port))); + } + + return MTD_OK; +} + +u32 txgbe_mtd_get_forced_speed(struct mtd_dev *dev_ptr, + u16 port, + bool *speed_is_forced, + u16 *force_speed) +{ + u16 val, bit0, bit1, force_speed_bits, duplex_bit; + bool an_disabled; + + *speed_is_forced = false; + *force_speed = MTD_ADV_NONE; + + /* check if 7.0.12 is 0 or 1 (disabled or enabled) */ + txgbe_attempt(txgbe_mtd_get_phy_reg_filed(dev_ptr, port, + 7, 0, 12, 1, &val)); + + (val) ? (an_disabled = false) : (an_disabled = true); + + if (an_disabled) { + txgbe_attempt(txgbe_mtd_get_phy_reg_filed(dev_ptr, port, + MTD_T_UNIT_PMA_PMD, MTD_TUNIT_IEEE_PMA_CTRL1, + 6, 1, &bit0)); + txgbe_attempt(txgbe_mtd_get_phy_reg_filed(dev_ptr, port, + MTD_T_UNIT_PMA_PMD, MTD_TUNIT_IEEE_PMA_CTRL1, + 13, 1, &bit1)); + + /* now read the duplex bit setting */ + txgbe_attempt(txgbe_mtd_get_phy_reg_filed(dev_ptr, port, + 7, 0x8000, + 4, 1, &duplex_bit)); + + force_speed_bits = 0; + force_speed_bits = bit0 | (bit1 << 1); + + if (force_speed_bits == 0) { + /* it's set to 10BT */ + if (duplex_bit) { + *speed_is_forced = true; + *force_speed = MTD_SPEED_10M_FD_AN_DIS; + } else { + *speed_is_forced = true; + *force_speed = MTD_SPEED_10M_HD_AN_DIS; + } + } else if (force_speed_bits == 2) { + /* it's set to 100BT */ + if (duplex_bit) { + *speed_is_forced = true; + *force_speed = MTD_SPEED_100M_FD_AN_DIS; + } else { + *speed_is_forced = true; + *force_speed = MTD_SPEED_100M_HD_AN_DIS; + } + } + /* else it's set to 1000BT or 10GBT which require AN to work */ + } + + return MTD_OK; +} + +u32 txgbe_mtd_autoneg_restart(struct mtd_dev *dev_ptr, u16 port) +{ + /* set 7.0.9, restart AN */ + return (txgbe_mtd_set_phy_feild(dev_ptr, port, 7, 0, + 9, 1, 1)); +} + +u32 txgbe_mtd_autoneg_enable(struct mtd_dev *dev_ptr, u16 port) +{ + /* set 7.0.12=1, enable AN */ + return (txgbe_mtd_set_phy_feild(dev_ptr, port, 7, 0, + 12, 1, 1)); +} + +u32 txgbe_mtd_autoneg_done(struct mtd_dev *dev_ptr, u16 port, bool *an_speed_res_done) +{ + u16 val; + + /* read speed/duplex resolution done bit in 3.8008 bit 11 */ + if (txgbe_mtd_get_phy_reg_filed(dev_ptr, port, + 3, 0x8008, + 11, 1, &val) == MTD_FAIL) { + *an_speed_res_done = false; + return MTD_FAIL; + } + + (val) ? (*an_speed_res_done = true) : (*an_speed_res_done = false); + + return MTD_OK; +} + +u32 txgbe_mtd_get_autoneg_res(struct mtd_dev *dev_ptr, u16 port, u16 *speed_resolution) +{ + u16 val, speed, speed2, duplex; + bool res_done; + + *speed_resolution = MTD_ADV_NONE; + + /* check if AN is enabled */ + txgbe_attempt(txgbe_mtd_get_phy_reg_filed(dev_ptr, port, + 7, 0, 12, 1, &val)); + + if (val) { + /* an is enabled, check if speed is resolved */ + txgbe_attempt(txgbe_mtd_autoneg_done(dev_ptr, port, &res_done)); + + if (res_done) { + txgbe_attempt(txgbe_mtd_get_phy_reg_filed(dev_ptr, port, + 3, 0x8008, 14, 2, &speed)); + + txgbe_attempt(txgbe_mtd_get_phy_reg_filed(dev_ptr, port, + 3, 0x8008, 13, 1, &duplex)); + + switch (speed) { + case MTD_CU_SPEED_10_MBPS: + if (duplex) + *speed_resolution = MTD_SPEED_10M_FD; + else + *speed_resolution = MTD_SPEED_10M_HD; + break; + case MTD_CU_SPEED_100_MBPS: + if (duplex) + *speed_resolution = MTD_SPEED_100M_FD; + else + *speed_resolution = MTD_SPEED_100M_HD; + break; + case MTD_CU_SPEED_1000_MBPS: + if (duplex) + *speed_resolution = MTD_SPEED_1GIG_FD; + else + *speed_resolution = MTD_SPEED_1GIG_HD; + break; + case MTD_CU_SPEED_10_GBPS: /* also MTD_CU_SPEED_NBT */ + if (MTD_IS_X32X0_BASE(dev_ptr->device_id)) { + *speed_resolution = MTD_SPEED_10GIG_FD; + } else { + txgbe_attempt(txgbe_mtd_get_phy_reg_filed(dev_ptr, port, + 3, 0x8008, + 2, 2, &speed2)); + + switch (speed2) { + case MTD_CU_SPEED_NBT_10G: + *speed_resolution = MTD_SPEED_10GIG_FD; + break; + + case MTD_CU_SPEED_NBT_5G: + *speed_resolution = MTD_SPEED_5GIG_FD; + break; + + case MTD_CU_SPEED_NBT_2P5G: + *speed_resolution = MTD_SPEED_2P5GIG_FD; + break; + + default: + /* this is an error */ + return MTD_FAIL; + } + } + break; + default: + /* this is an error */ + return MTD_FAIL; + } + } + } + + return MTD_OK; +} + +/****************************************************************************/ +u32 txgbe_mtd_is_baset_up(struct mtd_dev *dev_ptr, u16 port, + u16 *speed, + bool *link_up) +{ + bool speed_is_forced; + u16 force_speed, cu_speed, cu_link_status; + + *link_up = false; + *speed = MTD_ADV_NONE; + + /* first check if speed is forced to one of the speeds not requiring AN to train */ + txgbe_attempt(txgbe_mtd_get_forced_speed(dev_ptr, port, &speed_is_forced, &force_speed)); + + if (speed_is_forced) { + /* check if the link is up at the speed it's forced to */ + txgbe_attempt(txgbe_mtd_get_phy_reg_filed(dev_ptr, port, + 3, 0x8008, 14, + 2, &cu_speed)); + txgbe_attempt(txgbe_mtd_get_phy_reg_filed(dev_ptr, port, + 3, 0x8008, 10, + 1, &cu_link_status)); + + switch (force_speed) { + case MTD_SPEED_10M_HD_AN_DIS: + case MTD_SPEED_10M_FD_AN_DIS: + /* might want to add checking the duplex to make sure there + * is no duplex mismatch + */ + if (cu_speed == MTD_CU_SPEED_10_MBPS) + *speed = force_speed; + else + *speed = MTD_SPEED_MISMATCH; + if (cu_link_status) + *link_up = true; + + break; + + case MTD_SPEED_100M_HD_AN_DIS: + case MTD_SPEED_100M_FD_AN_DIS: + /* might want to add checking the duplex to make sure there + * is no duplex mismatch + */ + if (cu_speed == MTD_CU_SPEED_100_MBPS) + *speed = force_speed; + else + *speed = MTD_SPEED_MISMATCH; + + if (cu_link_status) + *link_up = true; + break; + + default: + return MTD_FAIL; + } + } else { + /* must be going through AN */ + txgbe_attempt(txgbe_mtd_get_autoneg_res(dev_ptr, port, speed)); + + if (*speed != MTD_ADV_NONE) { + /* check if the link is up at the speed it's AN to */ + txgbe_attempt(txgbe_mtd_get_phy_reg_filed(dev_ptr, port, + 3, 0x8008, 10, + 1, &cu_link_status)); + + switch (*speed) { + case MTD_SPEED_10M_HD: + case MTD_SPEED_10M_FD: + case MTD_SPEED_100M_HD: + case MTD_SPEED_100M_FD: + case MTD_SPEED_1GIG_HD: + case MTD_SPEED_1GIG_FD: + case MTD_SPEED_10GIG_FD: + case MTD_SPEED_2P5GIG_FD: + case MTD_SPEED_5GIG_FD: + if (cu_link_status) + *link_up = true; + break; + default: + return MTD_FAIL; + } + } + /* else link is down, and AN is in progress, */ + } + + if (*speed == MTD_SPEED_MISMATCH) + return MTD_FAIL; + else + return MTD_OK; +} + +u32 txgbe_mtd_set_pause_adver(struct mtd_dev *dev_ptr, u16 port, + u32 pause_type, + bool an_restart) +{ + /* sets/clears bits 11, 10 (A6,A5 in the tech bit field of 7.16) */ + txgbe_attempt(txgbe_mtd_set_phy_feild(dev_ptr, port, 7, 0x0010, + 10, 2, (u16)pause_type)); + + if (an_restart) { + return ((u32)(txgbe_mtd_autoneg_enable(dev_ptr, port) || + txgbe_mtd_autoneg_restart(dev_ptr, port))); + } + + return MTD_OK; +} + +static u32 txgbe_mtd_autoneg_is_cmplt(struct mtd_dev *dev_ptr, u16 port, bool *an_status_ready) +{ + u16 val; + + /* read an completed, 7.1.5 bit */ + if (txgbe_mtd_get_phy_reg_filed(dev_ptr, port, + 7, 1, 5, + 1, &val) == MTD_FAIL) { + *an_status_ready = false; + return MTD_FAIL; + } + + (val) ? (*an_status_ready = true) : (*an_status_ready = false); + + return MTD_OK; +} + +u32 txgbe_mtd_get_lp_adver_pause(struct mtd_dev *dev_ptr, + u16 port, + u8 *pause_bits) +{ + u16 val; + bool an_status_ready; + + /* Make sure AN is complete */ + txgbe_attempt(txgbe_mtd_autoneg_is_cmplt(dev_ptr, port, + &an_status_ready)); + + if (!an_status_ready) { + *pause_bits = MTD_CLEAR_PAUSE; + return MTD_FAIL; + } + + /* get bits 11, 10 (A6,A5 in the tech bit field of 7.19) */ + if (txgbe_mtd_get_phy_reg_filed(dev_ptr, port, 7, 19, + 10, 2, &val) == MTD_FAIL) { + *pause_bits = MTD_CLEAR_PAUSE; + return MTD_FAIL; + } + + *pause_bits = (u8)val; + + return MTD_OK; +} + +u32 txgbe_mtd_get_firmver(struct mtd_dev *dev_ptr, + u16 port, + u8 *major, + u8 *minor, + u8 *inc, + u8 *test) +{ + u16 reg_49169, reg_49170; + + txgbe_attempt(txgbe_mtd_hw_xmdio_rd(dev_ptr, port, 1, 49169, ®_49169)); + + *major = (reg_49169 & 0xFF00) >> 8; + *minor = (reg_49169 & 0x00FF); + + txgbe_attempt(txgbe_mtd_hw_xmdio_rd(dev_ptr, port, 1, 49170, ®_49170)); + + *inc = (reg_49170 & 0xFF00) >> 8; + *test = (reg_49170 & 0x00FF); + + /* firmware is not running if all 0's */ + if (!(*major || *minor || *inc || *test)) + return MTD_FAIL; + + return MTD_OK; +} + +u32 txgbe_mtd_get_phy_revision(struct mtd_dev *dev_ptr, + u16 port, + enum txgbe_mtd_dev_id *phy_rev, + u8 *num_ports, + u8 *this_port) +{ + u16 temp = 0, try_counter, temp2, base_type, reported_hw_rev; + u16 revision = 0, numports, thisport, ready_bit, fw_numports, fw_thisport; + bool register_exists, reg_ready, has_macsec, has_copper, is_e20x0_dev; + u8 major, minor, inc, test; + + *phy_rev = MTD_REV_UNKNOWN; + *num_ports = 0; + *this_port = 0; + + /* first check base type of device, get reported rev and port info */ + txgbe_attempt(txgbe_mtd_hw_xmdio_rd(dev_ptr, port, 3, 0xD00D, &temp)); + base_type = ((temp & 0xFC00) >> 6); + reported_hw_rev = (temp & 0x000F); + numports = ((temp & 0x0380) >> 7) + 1; + thisport = ((temp & 0x0070) >> 4); + + /* find out if device has macsec/ptp, copper unit or is an E20X0-type device */ + txgbe_attempt(txgbe_mtd_check_dev_cap(dev_ptr, port, + &has_macsec, &has_copper, &is_e20x0_dev)); + + /* check if internal processor firmware is up and running, and if so, easier to get info */ + if (txgbe_mtd_get_firmver(dev_ptr, port, + &major, &minor, &inc, &test) == MTD_FAIL) { + major = 0; + minor = 0; + inc = 0; + test = 0; + } + + if (major == 0 && minor == 0 && inc == 0 && test == 0) { + /* no firmware running, have to verify device revision */ + if (MTD_IS_X32X0_BASE(base_type)) { + /* A0 and Z2 report the same revision, need to check which is which */ + if (reported_hw_rev == 1) { + /* need to figure out if it's A0 or Z2 */ + /* remove internal reset */ + txgbe_attempt(txgbe_mtd_set_phy_feild(dev_ptr, port, + 3, 0xD801, 5, 1, 1)); + + /* wait until it's ready */ + reg_ready = false; + try_counter = 0; + while (!reg_ready && try_counter++ < 10) { + txgbe_attempt(txgbe_mtd_wait(1)); + txgbe_attempt(txgbe_mtd_get_phy_reg_filed(dev_ptr, port, + 3, 0xD007, 6, + 1, &ready_bit)); + if (ready_bit == 1) + reg_ready = true; + } + + if (!reg_ready) { + /* timed out, can't tell for sure what rev this is */ + *num_ports = 0; + *this_port = 0; + *phy_rev = MTD_REV_UNKNOWN; + return MTD_FAIL; + } + + /* perform test */ + register_exists = false; + txgbe_attempt(txgbe_mtd_hw_xmdio_rd(dev_ptr, + port, 3, 0x8EC6, &temp)); + txgbe_attempt(txgbe_mtd_xmdio_wr(dev_ptr, port, + 3, 0x8EC6, 0xA5A5)); + txgbe_attempt(txgbe_mtd_hw_xmdio_rd(dev_ptr, port, + 3, 0x8EC6, &temp2)); + + /* put back internal reset */ + txgbe_attempt(txgbe_mtd_set_phy_feild(dev_ptr, port, + 3, 0xD801, + 5, 1, 0)); + + if (temp == 0 && temp2 == 0xA5A5) + register_exists = true; + + if (register_exists) + revision = 2; /* this is actually QA0 */ + else + revision = reported_hw_rev; /* this is a QZ2 */ + } else { + /* it's not A0 or Z2, use what's reported by the hardware */ + revision = reported_hw_rev; + } + } else if (MTD_IS_X33X0_BASE(base_type)) { + /* all 33X0 devices report correct revision */ + revision = reported_hw_rev; + } + + /* have to use what's reported by the hardware */ + *num_ports = (u8)numports; + *this_port = (u8)thisport; + } else { + /* there is firmware loaded/running in internal processor */ + /* can get device revision reported by firmware */ + txgbe_attempt(txgbe_mtd_hw_xmdio_rd(dev_ptr, port, + MTD_T_UNIT_PMA_PMD, + MTD_TUNIT_PHY_REV_INFO_REG, + &temp)); + txgbe_attempt(txgbe_mtd_get_reg_filed(temp, + 0, 4, &revision)); + txgbe_attempt(txgbe_mtd_get_reg_filed(temp, + 4, 3, &fw_numports)); + txgbe_attempt(txgbe_mtd_get_reg_filed(temp, + 7, 3, &fw_thisport)); + if (fw_numports == numports && fw_thisport == thisport) { + *num_ports = (u8)numports; + *this_port = (u8)thisport; + } else { + *phy_rev = MTD_REV_UNKNOWN; + *num_ports = 0; + *this_port = 0; + return MTD_FAIL; /* firmware and hardware are reporting different values */ + } + } + + /* now have correct information to build up the enum txgbe_mtd_dev_id */ + if (MTD_IS_X32X0_BASE(base_type)) { + temp = MTD_X32X0_BASE; + } else if (MTD_IS_X33X0_BASE(base_type)) { + temp = MTD_X33X0_BASE; + } else { + *phy_rev = MTD_REV_UNKNOWN; + *num_ports = 0; + *this_port = 0; + return MTD_FAIL; + } + + if (has_macsec) + temp |= MTD_MACSEC_CAPABLE; + + if (has_copper) + temp |= MTD_COPPER_CAPABLE; + + if (MTD_IS_X33X0_BASE(base_type) && is_e20x0_dev) + temp |= MTD_E20X0_DEVICE; + + temp |= (revision & 0xF); + + *phy_rev = (enum txgbe_mtd_dev_id)temp; + + /* make sure we got a good one */ + if (txgbe_mtd_phy_rev_vaild(*phy_rev) == MTD_OK) + return MTD_OK; + else + return MTD_FAIL; +} + +u32 txgbe_mtd_phy_rev_vaild(enum txgbe_mtd_dev_id phy_rev) +{ + switch (phy_rev) { + /* list must match enum txgbe_mtd_dev_id */ + case MTD_REV_3240P_Z2: + case MTD_REV_3240P_A0: + case MTD_REV_3240P_A1: + case MTD_REV_3220P_Z2: + case MTD_REV_3220P_A0: + + case MTD_REV_3240_Z2: + case MTD_REV_3240_A0: + case MTD_REV_3240_A1: + case MTD_REV_3220_Z2: + case MTD_REV_3220_A0: + + case MTD_REV_3310P_A0: + case MTD_REV_3320P_A0: + case MTD_REV_3340P_A0: + case MTD_REV_3310_A0: + case MTD_REV_3320_A0: + case MTD_REV_3340_A0: + + case MTD_REV_E2010P_A0: + case MTD_REV_E2020P_A0: + case MTD_REV_E2040P_A0: + case MTD_REV_E2010_A0: + case MTD_REV_E2020_A0: + case MTD_REV_E2040_A0: + + case MTD_REV_2340P_A1: + case MTD_REV_2320P_A0: + case MTD_REV_2340_A1: + case MTD_REV_2320_A0: + return MTD_OK; + /* unsupported PHYs */ + case MTD_REV_3310P_Z1: + case MTD_REV_3320P_Z1: + case MTD_REV_3340P_Z1: + case MTD_REV_3310_Z1: + case MTD_REV_3320_Z1: + case MTD_REV_3340_Z1: + case MTD_REV_3310P_Z2: + case MTD_REV_3320P_Z2: + case MTD_REV_3340P_Z2: + case MTD_REV_3310_Z2: + case MTD_REV_3320_Z2: + case MTD_REV_3340_Z2: + case MTD_REV_E2010P_Z2: + case MTD_REV_E2020P_Z2: + case MTD_REV_E2040P_Z2: + case MTD_REV_E2010_Z2: + case MTD_REV_E2020_Z2: + case MTD_REV_E2040_Z2: + default: + return MTD_FAIL; /* is either MTD_REV_UNKNOWN or not in the above list */ + } +} + +/* mtdCunit.c */ +static u32 txgbe_mtd_cunit_sw_rst(struct mtd_dev *dev_ptr, u16 port) +{ + return txgbe_mtd_set_phy_feild(dev_ptr, port, + MTD_C_UNIT_GENERAL, MTD_CUNIT_PORT_CTRL, + 15, 1, 1); +} + +/* mtdHxunit.c */ +static u32 txgbe_mtd_rerun_serdes_autoneg_init_automode(struct mtd_dev *dev_ptr, u16 port) +{ + u16 temp, temp2, temp3; + u16 wait_counter; + + txgbe_attempt(txgbe_mtd_hw_xmdio_rd(dev_ptr, port, + MTD_T_UNIT_AN, MTD_SERDES_CTRL_STATUS, &temp)); + + txgbe_attempt(txgbe_mtd_set_reg_field_word(temp, + 3, 14, 2, &temp2)); + + txgbe_attempt(txgbe_mtd_xmdio_wr(dev_ptr, port, + MTD_T_UNIT_AN, MTD_SERDES_CTRL_STATUS, temp2)); + + /* wait for it to be done */ + wait_counter = 0; + txgbe_attempt(txgbe_mtd_hw_xmdio_rd(dev_ptr, port, + MTD_T_UNIT_AN, MTD_SERDES_CTRL_STATUS, &temp3)); + while ((temp3 & 0x8000) && (wait_counter < 100)) { + txgbe_attempt(txgbe_mtd_wait(1)); + txgbe_attempt(txgbe_mtd_hw_xmdio_rd(dev_ptr, port, + MTD_T_UNIT_AN, MTD_SERDES_CTRL_STATUS, &temp3)); + wait_counter++; + } + + if (wait_counter >= 100) + return MTD_FAIL; /* execute timed out */ + + return MTD_OK; +} + +u32 txgbe_mtd_set_mac_intf_ctrl(struct mtd_dev *dev_ptr, + u16 port, + u16 mac_type, + bool mac_powerdown, + u16 mac_snoop_sel, + u16 mac_active_lane_sel, + u16 mac_link_down_speed, + u16 mac_max_speed, + bool do_sw_rst, + bool rerun_serdes_init) +{ + u16 cunit_port_ctrl, cunit_mode_cfg; + + /* do range checking on parameters */ + if (mac_type > MTD_MAC_LEAVE_UNCHANGED) + return MTD_FAIL; + + if (mac_snoop_sel > MTD_MAC_SNOOP_LEAVE_UNCHANGED || + mac_snoop_sel == 1) + return MTD_FAIL; + + if (mac_active_lane_sel > 1) + return MTD_FAIL; + + if (mac_link_down_speed > MTD_MAC_SPEED_LEAVE_UNCHANGED) + return MTD_FAIL; + + if (!(mac_max_speed == MTD_MAX_MAC_SPEED_10G || + mac_max_speed == MTD_MAX_MAC_SPEED_5G || + mac_max_speed == MTD_MAX_MAC_SPEED_2P5G || + mac_max_speed == MTD_MAX_MAC_SPEED_LEAVE_UNCHANGED || + mac_max_speed == MTD_MAX_MAC_SPEED_NOT_APPLICABLE)) + return MTD_FAIL; + + txgbe_attempt(txgbe_mtd_hw_xmdio_rd(dev_ptr, port, + MTD_C_UNIT_GENERAL, + MTD_CUNIT_PORT_CTRL, + &cunit_port_ctrl)); + txgbe_attempt(txgbe_mtd_hw_xmdio_rd(dev_ptr, port, + MTD_C_UNIT_GENERAL, + MTD_CUNIT_MODE_CONFIG, + &cunit_mode_cfg)); + + /* Because writes of some of these bits don't show up in the register on a read + * until after the software reset, we can't do repeated read-modify-writes + * to the same register or we will lose those changes. + * This approach also cuts down on IO and speeds up the code + */ + + if (mac_type < MTD_MAC_LEAVE_UNCHANGED) + txgbe_attempt(txgbe_mtd_set_reg_field_word(cunit_port_ctrl, + mac_type, + 0, 3, + &cunit_port_ctrl)); + + txgbe_attempt(txgbe_mtd_set_reg_field_word(cunit_mode_cfg, + (u16)mac_powerdown, + 3, 1, + &cunit_mode_cfg)); + + if (mac_snoop_sel < MTD_MAC_SNOOP_LEAVE_UNCHANGED) + txgbe_attempt(txgbe_mtd_set_reg_field_word(cunit_mode_cfg, + mac_snoop_sel, + 8, 2, + &cunit_mode_cfg)); + + txgbe_attempt(txgbe_mtd_set_reg_field_word(cunit_mode_cfg, + mac_active_lane_sel, 10, + 1, &cunit_mode_cfg)); + + if (mac_link_down_speed < MTD_MAC_SPEED_LEAVE_UNCHANGED) + txgbe_attempt(txgbe_mtd_set_reg_field_word(cunit_mode_cfg, + mac_link_down_speed, + 6, 2, &cunit_mode_cfg)); + + /* Now write changed values */ + txgbe_attempt(txgbe_mtd_xmdio_wr(dev_ptr, port, + MTD_C_UNIT_GENERAL, + MTD_CUNIT_PORT_CTRL, + cunit_port_ctrl)); + txgbe_attempt(txgbe_mtd_xmdio_wr(dev_ptr, port, + MTD_C_UNIT_GENERAL, + MTD_CUNIT_MODE_CONFIG, + cunit_mode_cfg)); + + if (MTD_IS_X33X0_BASE(dev_ptr->device_id)) + if (mac_max_speed != MTD_MAX_MAC_SPEED_LEAVE_UNCHANGED) + txgbe_attempt(txgbe_mtd_set_phy_feild(dev_ptr, port, + 31, 0xF0A8, 0, + 2, mac_max_speed)); + + if (do_sw_rst) { + txgbe_attempt(txgbe_mtd_cunit_sw_rst(dev_ptr, port)); + + if (mac_link_down_speed < MTD_MAC_SPEED_LEAVE_UNCHANGED) + txgbe_attempt(txgbe_mtd_cunit_sw_rst(dev_ptr, port)); + + if (rerun_serdes_init) + txgbe_attempt(txgbe_mtd_rerun_serdes_autoneg_init_automode(dev_ptr, port)); + } + + return MTD_OK; +} + +static u32 txgbe_mtd_sem_create(struct mtd_dev *dev, enum begin_state state) +{ + if (dev->sem_create) + return dev->sem_create(state); + + return 1; +} + +static u32 txgbe_mtd_sem_delete(struct mtd_dev *dev, u32 smid) +{ + if ((dev->sem_delete) && smid) + if (dev->sem_delete(smid)) + return MTD_FAIL; + + return MTD_OK; +} + +u32 txgbe_mtd_load_driver(fmtd_read_mdio read_mdio, + fmtd_write_mdio write_mdio, + bool macsec_indirect_access, + f_create sem_create, + f_delete sem_delete, + f_take sem_take, + f_give sem_give, + u16 any_port, + struct mtd_dev *dev) +{ + u16 data; + + /* Check for parameters validity */ + if (!dev) + return MTD_API_ERR_DEV; + + /* The initialization was already done. */ + if (dev->dev_enabled) + return MTD_API_ERR_DEV_ALREADY_EXIST; + + /* Make sure txgbe_mtd_wait() was implemented */ + if (txgbe_mtd_wait(1) == MTD_FAIL) + return MTD_FAIL; + + dev->fmtd_read_mdio = read_mdio; + dev->mtd_write_mdio = write_mdio; + + dev->sem_create = sem_create; + dev->sem_delete = sem_delete; + dev->sem_take = sem_take; + dev->sem_give = sem_give; + dev->macsec_indirect_access = macsec_indirect_access; + + /* try to read 1.0 */ + if ((txgbe_mtd_hw_xmdio_rd(dev, + any_port, 1, 0, &data)) != MTD_OK) + return MTD_API_FAIL_READ_REG; + + /* Initialize the MACsec Register Access semaphore. */ + dev->multi_addr_sem = txgbe_mtd_sem_create(dev, FULL); + if (dev->multi_addr_sem == 0) + return MTD_API_FAIL_SEM_CREATE; + + if (dev->msec_ctrl.msec_rev == MTD_MSEC_REV_FPGA) { + dev->device_id = MTD_REV_3310P_Z2; /* verification: change if needed */ + dev->num_ports = 1; /* verification: change if needed */ + dev->this_port = 0; + } else { + /* After everything else is done, can fill in the device id */ + if ((txgbe_mtd_get_phy_revision(dev, any_port, + &dev->device_id, + &dev->num_ports, + &dev->this_port)) != MTD_OK) + return MTD_FAIL; + } + + if (MTD_IS_X33X0_BASE(dev->device_id)) + dev->macsec_indirect_access = false; + + dev->dev_enabled = true; + + return MTD_OK; +} + +u32 txgbe_mtd_unload_driver(struct mtd_dev *dev) +{ + /* Delete the MACsec register access semaphore. */ + if (txgbe_mtd_sem_delete(dev, dev->multi_addr_sem) != MTD_OK) + return MTD_API_FAIL_SEM_DELETE; + + dev->fmtd_read_mdio = NULL; + dev->mtd_write_mdio = NULL; + + dev->sem_create = NULL; + dev->sem_delete = NULL; + dev->sem_take = NULL; + dev->sem_give = NULL; + + dev->dev_enabled = false; + + return MTD_OK; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.h new file mode 100644 index 000000000000..3f2796b563d0 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_mtd.h @@ -0,0 +1,464 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_MTD_H_ +#define _TXGBE_MTD_H_ + +#define MTD_CONVERT_BOOL_TO_UINT(bool_var, uint_var) \ + {(bool_var) ? (uint_var = 1) : (uint_var = 0); } +#define MTD_CONVERT_UINT_TO_BOOL(uint_var, bool_var) \ + {(uint_var) ? (bool_var = true) : (bool_var = false); } +#define MTD_GET_BOOL_AS_BIT(bool_var) ((bool_var) ? 1 : 0) +#define MTD_GET_BIT_AS_BOOL(uint_var) ((uint_var) ? true : false) + +typedef void (*MTD_VOIDFUNCPTR) (void); /* ptr to function returning void */ +typedef u32 (*MTD_INTFUNCPTR) (void); /* ptr to function returning int */ + +enum begin_state { + EMPTY, + FULL +}; + +typedef u32 (*f_create)(enum begin_state state); +typedef u32 (*f_delete)(u32 sem_id); +typedef u32 (*f_take)(u32 sem_id, u32 timeout); +typedef u32 (*f_give)(u32 sem_id); + +struct mtd_dev; + +typedef u32 (*fmtd_read_mdio)(struct mtd_dev *dev, + u16 port, + u16 mmd, + u16 reg, + u16 *value); +typedef u32 (*fmtd_write_mdio)(struct mtd_dev *dev, + u16 port, + u16 mmd, + u16 reg, + u16 value); + +/* enum txgbe_mtd_dev_id format: */ +/* Bits 15:13 reserved */ +/* Bit 12: 1-> E20X0 device with max speed of 5G and no fiber interface */ +/* Bit 11: 1-> Macsec Capable (Macsec/PTP module included */ +/* Bit 10: 1-> Copper Capable (T unit interface included) */ +/* Bits 9:4 0x18 -> X32X0 base, 0x1A 0x33X0 base */ +/* Bits 3:0 revision/number of ports indication, see list */ +/* Following defines are for building enum txgbe_mtd_dev_id */ +#define MTD_E20X0_DEVICE BIT(12) /* whether this is an E20X0 device group */ +#define MTD_MACSEC_CAPABLE BIT(11) /* whether the device has a Macsec/PTP module */ +#define MTD_COPPER_CAPABLE BIT(10) /* whether the device has a copper (T unit) module */ +#define MTD_X32X0_BASE (0x18 << 4) /* whether the device uses X32X0 firmware base */ +#define MTD_X33X0_BASE (0x1A << 4) /* whether the device uses X33X0 firmware base */ + +/* Following macros are to test enum txgbe_mtd_dev_id for various features */ +#define MTD_IS_E20X0_DEVICE(mtd_rev_id) ((bool)((mtd_rev_id) & MTD_E20X0_DEVICE)) +#define MTD_IS_MACSEC_CAPABLE(mtd_rev_id) ((bool)((mtd_rev_id) & MTD_MACSEC_CAPABLE)) +#define MTD_IS_COPPER_CAPABLE(mtd_rev_id) ((bool)((mtd_rev_id) & MTD_COPPER_CAPABLE)) +#define MTD_IS_X32X0_BASE(mtd_rev_id) ((bool)(((mtd_rev_id) & (0x3F << 4)) == MTD_X32X0_BASE)) +#define MTD_IS_X33X0_BASE(mtd_rev_id) ((bool)(((mtd_rev_id) & (0x3F << 4)) == MTD_X33X0_BASE)) + +#define MTD_X33X0BASE_SINGLE_PORTA0 0xA +#define MTD_X33X0BASE_DUAL_PORTA0 0x6 +#define MTD_X33X0BASE_QUAD_PORTA0 0x2 + +/* internal device registers */ +#define MTD_REG_CCCR9 0xF05E /* do not enclose in parentheses */ +#define MTD_REG_SCR 0xF0F0 /* do not enclose in parentheses */ +#define MTD_REG_ECSR 0xF0F5 /* do not enclose in parentheses */ + +/* WARNING: If you add/modify this list, you must also modify txgbe_mtd_phy_rev_vaild() */ +enum txgbe_mtd_dev_id { + MTD_REV_UNKNOWN = 0, + MTD_REV_3240P_Z2 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x1), + MTD_REV_3240P_A0 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x2), + MTD_REV_3240P_A1 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x3), + MTD_REV_3220P_Z2 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x4), + MTD_REV_3220P_A0 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x5), + MTD_REV_3240_Z2 = (MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x1), + MTD_REV_3240_A0 = (MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x2), + MTD_REV_3240_A1 = (MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x3), + MTD_REV_3220_Z2 = (MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x4), + MTD_REV_3220_A0 = (MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x5), + + MTD_REV_3310P_Z1 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x8), + MTD_REV_3320P_Z1 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x4), + MTD_REV_3340P_Z1 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x0), + MTD_REV_3310_Z1 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x8), + MTD_REV_3320_Z1 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x4), + MTD_REV_3340_Z1 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x0), + + MTD_REV_3310P_Z2 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x9), + MTD_REV_3320P_Z2 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x5), + MTD_REV_3340P_Z2 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x1), + MTD_REV_3310_Z2 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x9), + MTD_REV_3320_Z2 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x5), + MTD_REV_3340_Z2 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x1), + + MTD_REV_E2010P_Z2 = (MTD_E20X0_DEVICE | MTD_MACSEC_CAPABLE | + MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x9), + MTD_REV_E2020P_Z2 = (MTD_E20X0_DEVICE | MTD_MACSEC_CAPABLE | + MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x5), + MTD_REV_E2040P_Z2 = (MTD_E20X0_DEVICE | MTD_MACSEC_CAPABLE | + MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x1), + MTD_REV_E2010_Z2 = (MTD_E20X0_DEVICE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x9), + MTD_REV_E2020_Z2 = (MTD_E20X0_DEVICE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x5), + MTD_REV_E2040_Z2 = (MTD_E20X0_DEVICE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x1), + + MTD_REV_3310P_A0 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | + MTD_X33X0_BASE | MTD_X33X0BASE_SINGLE_PORTA0), + MTD_REV_3320P_A0 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | + MTD_X33X0_BASE | MTD_X33X0BASE_DUAL_PORTA0), + MTD_REV_3340P_A0 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | + MTD_X33X0_BASE | MTD_X33X0BASE_QUAD_PORTA0), + MTD_REV_3310_A0 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_SINGLE_PORTA0), + MTD_REV_3320_A0 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_DUAL_PORTA0), + MTD_REV_3340_A0 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_QUAD_PORTA0), + + MTD_REV_E2010P_A0 = (MTD_E20X0_DEVICE | MTD_MACSEC_CAPABLE | + MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_SINGLE_PORTA0), + MTD_REV_E2020P_A0 = (MTD_E20X0_DEVICE | MTD_MACSEC_CAPABLE | + MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_DUAL_PORTA0), + MTD_REV_E2040P_A0 = (MTD_E20X0_DEVICE | MTD_MACSEC_CAPABLE | + MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_QUAD_PORTA0), + MTD_REV_E2010_A0 = (MTD_E20X0_DEVICE | MTD_COPPER_CAPABLE | + MTD_X33X0_BASE | MTD_X33X0BASE_SINGLE_PORTA0), + MTD_REV_E2020_A0 = (MTD_E20X0_DEVICE | MTD_COPPER_CAPABLE | + MTD_X33X0_BASE | MTD_X33X0BASE_DUAL_PORTA0), + MTD_REV_E2040_A0 = (MTD_E20X0_DEVICE | MTD_COPPER_CAPABLE | + MTD_X33X0_BASE | MTD_X33X0BASE_QUAD_PORTA0), + + MTD_REV_2340P_A1 = (MTD_MACSEC_CAPABLE | MTD_X32X0_BASE | 0x3), + MTD_REV_2320P_A0 = (MTD_MACSEC_CAPABLE | MTD_X32X0_BASE | 0x5), + MTD_REV_2340_A1 = (MTD_X32X0_BASE | 0x3), + MTD_REV_2320_A0 = (MTD_X32X0_BASE | 0x5) +}; + +enum mtd_msec_rev { + MTD_MSEC_REV_Z0A, + MTD_MSEC_REV_Y0A, + MTD_MSEC_REV_A0B, + MTD_MSEC_REV_FPGA, + MTD_MSEC_REV_UNKNOWN = -1 +}; + +/* compatible for USB test */ +struct mtd_sec_ctrl { + s32 dev_num; + s32 port_num; + u16 prev_addr; + u16 prev_dataL; + enum mtd_msec_rev msec_rev; +}; + +struct mtd_dev { + enum txgbe_mtd_dev_id device_id; + bool dev_enabled; + u8 num_ports; + u8 this_port; + u32 multi_addr_sem; + + fmtd_read_mdio fmtd_read_mdio; + fmtd_write_mdio mtd_write_mdio; + + f_create sem_create; /* create semapore */ + f_delete sem_delete; /* delete the semapore */ + f_take sem_take; /* try to get a semapore */ + f_give sem_give; /* return semaphore */ + + u8 macsec_indirect_access; /* if true use internal processor to access Macsec */ + struct mtd_sec_ctrl msec_ctrl; /* structure use for internal verification */ + + void *app_data; +}; + +#define MTD_OK 0 /* Operation succeeded */ +#define MTD_FAIL 1 /* Operation failed */ +#define MTD_PENDING 2 /* Pending */ + +/* bit definition */ +#define MTD_BIT_0 0x0001 +#define MTD_BIT_1 0x0002 +#define MTD_BIT_2 0x0004 +#define MTD_BIT_3 0x0008 +#define MTD_BIT_4 0x0010 +#define MTD_BIT_5 0x0020 +#define MTD_BIT_6 0x0040 +#define MTD_BIT_7 0x0080 +#define MTD_BIT_8 0x0100 +#define MTD_BIT_9 0x0200 +#define MTD_BIT_10 0x0400 +#define MTD_BIT_11 0x0800 +#define MTD_BIT_12 0x1000 +#define MTD_BIT_13 0x2000 +#define MTD_BIT_14 0x4000 +#define MTD_BIT_15 0x8000 + +#define MTD_DBG_ERROR(...) +#define MTD_DBG_INFO(...) +#define MTD_DBG_CRITIC_INFO(...) + +#define MTD_API_MAJOR_VERSION 2 +#define MTD_API_MINOR_VERSION 0 + +static inline int txgbe_attempt(int result) +{ + if (result == MTD_FAIL) + return MTD_FAIL; + return MTD_OK; +} + +#define MTD_7_0010_SPEED_BIT_LENGTH 4 +#define MTD_7_0010_SPEED_BIT_POS 5 +#define MTD_7_8000_SPEED_BIT_LENGTH 2 +#define MTD_7_8000_SPEED_BIT_POS 8 +#define MTD_7_0020_SPEED_BIT_LENGTH 1 +#define MTD_7_0020_SPEED_BIT_POS 12 +#define MTD_7_0020_SPEED_BIT_LENGTH2 2 +#define MTD_7_0020_SPEED_BIT_POS2 7 + +/* Bit defines for speed bits */ +#define MTD_FORCED_SPEEDS_BIT_MASK (MTD_SPEED_10M_HD_AN_DIS | MTD_SPEED_10M_FD_AN_DIS | \ + MTD_SPEED_100M_HD_AN_DIS | MTD_SPEED_100M_FD_AN_DIS) +#define MTD_LOWER_BITS_MASK 0x000F +#define MTD_GIG_SPEED_POS 4 +#define MTD_XGIG_SPEED_POS 6 +#define MTD_2P5G_SPEED_POS 11 +#define MTD_5G_SPEED_POS 12 +#define MTD_GET_1000BT_BITS(__speed_bits) (((__speed_bits) & (MTD_SPEED_1GIG_HD | \ + MTD_SPEED_1GIG_FD)) \ + >> MTD_GIG_SPEED_POS) +#define MTD_GET_10GBT_BIT(__speed_bits) (((__speed_bits) & MTD_SPEED_10GIG_FD) \ + >> MTD_XGIG_SPEED_POS) +#define MTD_GET_2P5GBT_BIT(__speed_bits) (((__speed_bits) & MTD_SPEED_2P5GIG_FD) \ + >> MTD_2P5G_SPEED_POS) +#define MTD_GET_5GBT_BIT(__speed_bits) (((__speed_bits) & MTD_SPEED_5GIG_FD) \ + >> MTD_5G_SPEED_POS) + +#define MTD_CU_SPEED_10_MBPS 0 /* copper is 10BASE-T */ +#define MTD_CU_SPEED_100_MBPS 1 /* copper is 100BASE-TX */ +#define MTD_CU_SPEED_1000_MBPS 2 /* copper is 1000BASE-T */ +#define MTD_CU_SPEED_10_GBPS 3 /* copper is 10GBASE-T */ + +/* for 88X33X0 family: */ +#define MTD_CU_SPEED_NBT 3 /* copper is NBASE-T */ +#define MTD_CU_SPEED_NBT_10G 0 /* copper is 10GBASE-T */ +#define MTD_CU_SPEED_NBT_5G 2 /* copper is 5GBASE-T */ +#define MTD_CU_SPEED_NBT_2P5G 1 /* copper is 2.5GBASE-T */ + +#define MTD_ADV_NONE 0x0000 /* No speeds to be advertised */ +#define MTD_SPEED_10M_HD 0x0001 /* 10BT half-duplex */ +#define MTD_SPEED_10M_FD 0x0002 /* 10BT full-duplex */ +#define MTD_SPEED_100M_HD 0x0004 /* 100BASE-TX half-duplex */ +#define MTD_SPEED_100M_FD 0x0008 /* 100BASE-TX full-duplex */ +#define MTD_SPEED_1GIG_HD 0x0010 /* 1000BASE-T half-duplex */ +#define MTD_SPEED_1GIG_FD 0x0020 /* 1000BASE-T full-duplex */ +#define MTD_SPEED_10GIG_FD 0x0040 /* 10GBASE-T full-duplex */ +#define MTD_SPEED_2P5GIG_FD 0x0800 /* 2.5GBASE-T full-duplex, 88X33X0/88E20X0 family only */ +#define MTD_SPEED_5GIG_FD 0x1000 /* 5GBASE-T full-duplex, 88X33X0/88E20X0 family only */ +#define MTD_SPEED_ALL (MTD_SPEED_10M_HD | \ + MTD_SPEED_10M_FD | \ + MTD_SPEED_100M_HD | \ + MTD_SPEED_100M_FD | \ + MTD_SPEED_1GIG_HD | \ + MTD_SPEED_1GIG_FD | \ + MTD_SPEED_10GIG_FD) +#define MTD_SPEED_ALL_33X0 (MTD_SPEED_10M_HD | \ + MTD_SPEED_10M_FD | \ + MTD_SPEED_100M_HD | \ + MTD_SPEED_100M_FD | \ + MTD_SPEED_1GIG_HD | \ + MTD_SPEED_1GIG_FD | \ + MTD_SPEED_10GIG_FD | \ + MTD_SPEED_2P5GIG_FD |\ + MTD_SPEED_5GIG_FD) + +/* these bits are for forcing the speed and disabling autonegotiation */ +#define MTD_SPEED_10M_HD_AN_DIS 0x0080 /* Speed forced to 10BT half-duplex */ +#define MTD_SPEED_10M_FD_AN_DIS 0x0100 /* Speed forced to 10BT full-duplex */ +#define MTD_SPEED_100M_HD_AN_DIS 0x0200 /* Speed forced to 100BT half-duplex */ +#define MTD_SPEED_100M_FD_AN_DIS 0x0400 /* Speed forced to 100BT full-duplex */ + +#define MTD_SPEED_MISMATCH 0x8000 + +/* for mac_type */ +#define MTD_MAC_TYPE_RXAUI_SGMII_AN_EN (0x0) /* X32X0/X33x0, but not E20x0 */ +#define MTD_MAC_TYPE_RXAUI_SGMII_AN_DIS (0x1) /* X32x0/X3340/X3320, but not X3310/E20x0 */ +#define MTD_MAC_TYPE_XAUI_RATE_ADAPT (0x1) /* X3310,E2010 only */ +#define MTD_MAC_TYPE_RXAUI_RATE_ADAPT (0x2) +#define MTD_MAC_TYPE_XAUI (0x3) /* X3310,E2010 only */ +#define MTD_MAC_TYPE_XFI_SGMII_AN_EN (0x4) +#define MTD_MAC_TYPE_XFI_SGMII_AN_DIS (0x5) +#define MTD_MAC_TYPE_XFI_RATE_ADAPT (0x6) +#define MTD_MAC_TYPE_USXGMII (0x7) /* X33x0 only */ +#define MTD_MAC_LEAVE_UNCHANGED (0x8) /* use this option to not touch these bits */ + +/* for mac_snoop_sel */ +#define MTD_MAC_SNOOP_FROM_NETWORK (0x2) +#define MTD_MAC_SNOOP_FROM_HOST (0x3) +#define MTD_MAC_SNOOP_OFF (0x0) +#define MTD_MAC_SNOOP_LEAVE_UNCHANGED (0x4) /* use this option to not touch these bits */ +/* for mac_link_down_speed */ +#define MTD_MAC_SPEED_10_MBPS MTD_CU_SPEED_10_MBPS +#define MTD_MAC_SPEED_100_MBPS MTD_CU_SPEED_100_MBPS +#define MTD_MAC_SPEED_1000_MBPS MTD_CU_SPEED_1000_MBPS +#define MTD_MAC_SPEED_10_GBPS MTD_CU_SPEED_10_GBPS +#define MTD_MAC_SPEED_LEAVE_UNCHANGED (0x4) +/* X33X0/E20X0 devices only for mac_max_speed */ +#define MTD_MAX_MAC_SPEED_10G (0) +#define MTD_MAX_MAC_SPEED_5G (2) +#define MTD_MAX_MAC_SPEED_2P5G (3) +#define MTD_MAX_MAC_SPEED_LEAVE_UNCHANGED (4) +#define MTD_MAX_MAC_SPEED_NOT_APPLICABLE (4) /* 32X0 devices can pass this */ + +/* 88X3240/3220 Device Number Definitions */ +#define MTD_T_UNIT_PMA_PMD 1 +#define MTD_T_UNIT_PCS_CU 3 +#define MTD_X_UNIT 3 +#define MTD_H_UNIT 4 +#define MTD_T_UNIT_AN 7 +#define MTD_XFI_DSP 30 +#define MTD_C_UNIT_GENERAL 31 +#define MTD_M_UNIT 31 + +/* 88X3240/3220 Device Number Definitions Host Redundant Mode */ +#define MTD_BASER_LANE_0 MTD_H_UNIT +#define MTD_BASER_LANE_1 MTD_X_UNIT + +/* 88X3240/3220 T Unit Registers MMD 1 */ +#define MTD_TUNIT_IEEE_PMA_CTRL1 0x0000 /* do not enclose in parentheses */ +#define MTD_TUNIT_IEEE_PMA_DEVID2 0x0003 /* do not enclose in parentheses */ +#define MTD_TUNIT_PHY_EXT_CTRL_1 0xC000 /* do not enclose in parentheses */ +#define MTD_TUNIT_XG_EXT_STATUS 0xC001 /* do not enclose in parentheses */ +#define MTD_TUNIT_BIST_STATUS_REG 0xC00C /* do not enclose in parentheses */ +#define MTD_TUNIT_PHY_REV_INFO_REG 0xC04E /* do not enclose in parentheses */ +#define MTD_BOOT_STATUS_REG 0xC050 /* do not enclose in parentheses */ + +#define MTD_TUNIT_IEEE_PCS_CTRL1 0x0000 /* do not enclose in parentheses */ +/* control/status for serdes initialization */ +#define MTD_SERDES_CTRL_STATUS 0x800F /* do not enclose in parentheses */ +/* 88X3240/3220 C Unit Registers MMD 31 */ +#define MTD_CUNIT_MODE_CONFIG 0xF000 /* do not enclose in parentheses */ +#define MTD_CUNIT_PORT_CTRL 0xF001 /* do not enclose in parentheses */ + +#define MTD_API_FAIL_SEM_CREATE (0x18 << 24) /*sem_create Failed. */ +#define MTD_API_FAIL_SEM_DELETE (0x19 << 24) /*sem_delete Failed. */ +#define MTD_API_FAIL_READ_REG (0x16 << 16) /*Reading from phy reg failed. */ +#define MTD_API_ERR_DEV (0x3c << 16) /*driver struture is NULL. */ +#define MTD_API_ERR_DEV_ALREADY_EXIST (0x3e << 16) /*Device Driver already loaded. */ + +#define MTD_CLEAR_PAUSE 0 /* clears both pause bits */ +#define MTD_SYM_PAUSE 1 /* for symmetric pause only */ +#define MTD_ASYM_PAUSE 2 /* for asymmetric pause only */ +#define MTD_SYM_ASYM_PAUSE 3 /* for both */ + +u32 txgbe_mtd_load_driver(fmtd_read_mdio read_mdio, + fmtd_write_mdio write_mdio, + bool macsec_indirect_access, + f_create sem_create, + f_delete sem_delete, + f_take sem_take, + f_give sem_give, + u16 any_port, + struct mtd_dev *dev); +u32 txgbe_mtd_unload_driver(struct mtd_dev *dev); +u32 txgbe_mtd_xmdio_wr(struct mtd_dev *dev_ptr, + u16 port, + u16 dev, + u16 reg, + u16 value); +u32 txgbe_mtd_hw_xmdio_rd(struct mtd_dev *dev_ptr, + u16 port, + u16 dev, + u16 reg, + u16 *data); + +u32 txgbe_mtd_get_phy_reg_filed(struct mtd_dev *dev_ptr, + u16 port, + u16 dev, + u16 reg_addr, + u8 field_offset, + u8 field_length, + u16 *data); +u32 txgbe_mtd_set_phy_feild(struct mtd_dev *dev_ptr, + u16 port, + u16 dev, + u16 reg_addr, + u8 field_offset, + u8 field_length, + u16 data); +u32 txgbe_mtd_get_reg_filed(u16 reg_data, + u8 field_offset, + u8 field_length, + u16 *data); +u32 txgbe_mtd_get_reg_filed(u16 reg_data, + u8 field_offset, + u8 field_length, + u16 *data); +u32 txgbe_mtd_set_reg_field_word(u16 reg_data, + u16 bit_field_data, + u8 field_offset, + u8 field_length, + u16 *data); +u32 txgbe_mtd_wait(u32 x); +u32 txgbe_mtd_sw_rst(struct mtd_dev *dev_ptr, + u16 port, + u16 timeout); + +u32 txgbe_mtd_hw_rst(struct mtd_dev *dev_ptr, + u16 port, + u16 timeout); +u32 txgbe_mtd_set_mac_intf_ctrl(struct mtd_dev *dev_ptr, + u16 port, + u16 mac_type, + bool mac_powerdown, + u16 mac_snoop_sel, + u16 mac_active_lane_sel, + u16 mac_link_down_speed, + u16 mac_max_speed, + bool do_sw_rst, + bool rerun_serdes_init); + +u32 txgbe_mtd_enable_speeds(struct mtd_dev *dev_ptr, + u16 port, + u16 speed_bits, + bool an_restart); + +u32 txgbe_mtd_get_autoneg_res(struct mtd_dev *dev_ptr, u16 port, u16 *speed_resolution); +u32 txgbe_mtd_autoneg_done(struct mtd_dev *dev_ptr, u16 port, bool *an_speed_res_done); +u32 txgbe_mtd_is_baset_up(struct mtd_dev *dev_ptr, + u16 port, + u16 *speed, + bool *link_up); +u32 txgbe_mtd_get_firmver(struct mtd_dev *dev_ptr, + u16 port, + u8 *major, + u8 *minor, + u8 *inc, + u8 *test); +u32 txgbe_mtd_set_pause_adver(struct mtd_dev *dev_ptr, + u16 port, + u32 pause_type, + bool an_restart); + +u32 txgbe_mtd_get_lp_adver_pause(struct mtd_dev *dev_ptr, + u16 port, + u8 *pause_bits); + +u32 txgbe_mtd_get_phy_revision(struct mtd_dev *dev_ptr, + u16 port, + enum txgbe_mtd_dev_id *phy_rev, + u8 *num_ports, + u8 *this_port); +u32 txgbe_mtd_get_forced_speed(struct mtd_dev *dev_ptr, + u16 port, + bool *speed_is_forced, + u16 *force_speed); +u32 txgbe_mtd_undo_forced_speed(struct mtd_dev *dev_ptr, + u16 port, + bool an_restart); + +u32 txgbe_mtd_autoneg_enable(struct mtd_dev *dev_ptr, u16 port); +u32 txgbe_mtd_autoneg_restart(struct mtd_dev *dev_ptr, u16 port); +u32 txgbe_mtd_phy_rev_vaild(enum txgbe_mtd_dev_id phy_rev); +#endif /* _TXGBE_MTD_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_param.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_param.c new file mode 100644 index 000000000000..374ea42fa1d4 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_param.c @@ -0,0 +1,1089 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include +#include + +#include "txgbe.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ +#define TXGBE_MAX_NIC 32 +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define STRINGIFY(foo) #foo /* magic for getting defines into strings */ +#define XSTRINGIFY(bar) STRINGIFY(bar) + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define TXGBE_PARAM_INIT { [0 ... TXGBE_MAX_NIC] = OPTION_UNSET } + +#define TXGBE_PARAM(X, desc) \ + static int X[TXGBE_MAX_NIC + 1] = TXGBE_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc) + +/* Tx unidirectional mode + * + * Valid Range: [0, 1] + * + * Default Value: 0 + */ +TXGBE_PARAM(TX_UNIDIR_MODE, "Tx Unidirectional Mode [0, 1]"); +#define TX_DEFAULT_UNIDIR_MODE 0 + +/* ffe_main (KR/KX4/KX/SFI) + * + * Valid Range: 0-60 + * + * Default Value: 27 + */ +TXGBE_PARAM(FFE_MAIN, "TX_EQ MAIN (0 - 40)"); +#define TXGBE_DEFAULT_FFE_MAIN 27 + +/* ffe_pre + * + * Valid Range: 0-60 + * + * Default Value: 8 + */ +TXGBE_PARAM(FFE_PRE, "TX_EQ PRE (0 - 40)"); +#define TXGBE_DEFAULT_FFE_PRE 8 + +/* ffe_post + * + * Valid Range: 0-60 + * + * Default Value: 44 + */ +TXGBE_PARAM(FFE_POST, "TX_EQ POST (0 - 40)"); +#define TXGBE_DEFAULT_FFE_POST 44 + +/* ffe_set + * + * Valid Range: 0-4 + * + * Default Value: 0 + */ +TXGBE_PARAM(FFE_SET, "TX_EQ SET (0 = NULL, 1 = sfi, 2 = kr, 3 = kx4, 4 = kx)"); +#define TXGBE_DEFAULT_FFE_SET 0 + +/* backplane_mode + * + * Valid Range: 0-4 + * - 0 - NULL + * - 1 - sfi + * - 2 - kr + * - 3 - kx4 + * - 4 - kx + * + * Default Value: 0 + */ +TXGBE_PARAM(backplane_mode, "Backplane Mode(0 = NULL, 1 = sfi, 2 = kr, 3 = kx4, 4 = kx)"); +#define TXGBE_BP_NULL 0 +#define TXGBE_BP_SFI 1 +#define TXGBE_BP_KR 2 +#define TXGBE_BP_KX4 3 +#define TXGBE_BP_KX 4 +#define TXGBE_DEFAULT_BP_MODE TXGBE_BP_NULL + +/* backplane_auto + * + * Valid Range: 0-1 + * - 0 - NO AUTO + * - 1 - AUTO + * Default Value: 0 + */ +TXGBE_PARAM(backplane_auto, "Backplane AUTO mode (0 = NO AUTO, 1 = AUTO)"); +#define TXGBE_BP_NAUTO 0 +#define TXGBE_BP_AUTO 1 +#define TXGBE_DEFAULT_BP_AUTO -1 + +/* int_mode (Interrupt Mode) + * + * Valid Range: 0-2 + * - 0 - Legacy Interrupt + * - 1 - MSI Interrupt + * - 2 - MSI-X Interrupt(s) + * + * Default Value: 2 + */ +TXGBE_PARAM(interrupt_type, "Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X)"); +TXGBE_PARAM(int_mode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), default 2"); +#define TXGBE_INT_LEGACY 0 +#define TXGBE_INT_MSI 1 +#define TXGBE_INT_MSIX 2 +#define TXGBE_DEFAULT_INT TXGBE_INT_MSIX + +/* MQ - Multiple Queue enable/disable + * + * Valid Range: 0, 1 + * - 0 - disables MQ + * - 1 - enables MQ + * + * Default Value: 1 + */ + +TXGBE_PARAM(MQ, "Disable or enable Multiple Queues, default 1"); + +/* RSS - Receive-Side Scaling (RSS) Descriptor Queues + * + * Valid Range: 0-64 + * - 0 - enables RSS and sets the Desc. Q's to min(64, num_online_cpus()). + * - 1-64 - enables RSS and sets the Desc. Q's to the specified value. + * + * Default Value: 0 + */ + +TXGBE_PARAM(RSS, "Number of RSS Descriptor Queues, default 0=number of cpus"); + +/* VMDQ - Virtual Machine Device Queues (VMDQ) + * + * Valid Range: 1-16 + * - 1 Disables VMDQ by allocating only a single queue. + * - 2-16 - enables VMDQ and sets the Desc. Q's to the specified value. + * + * Default Value: 1 + */ + +#define TXGBE_DEFAULT_NUM_VMDQ 8 + +TXGBE_PARAM(VMDQ, "Number of VMDQ: 0/1 = disable, 2-16 enable (default=" + XSTRINGIFY(TXGBE_DEFAULT_NUM_VMDQ) ")"); + +#ifdef CONFIG_PCI_IOV +/* max_vfs - SR I/O Virtualization + * + * Valid Range: 0-63 + * - 0 Disables SR-IOV + * - 1-63 - enables SR-IOV and sets the number of VFs enabled + * + * Default Value: 0 + */ + +#define MAX_SRIOV_VFS 63 + +TXGBE_PARAM(max_vfs, "Number of VF: 0 = disable (default), 1-" + XSTRINGIFY(MAX_SRIOV_VFS) " = enable this many VFs"); + +/* VEPA - Set internal bridge to VEPA mode + * + * Valid Range: 0-1 + * - 0 Set bridge to VEB mode + * - 1 Set bridge to VEPA mode + * + * Default Value: 0 + */ +/*Note: + *===== + * This provides ability to ensure VEPA mode on the internal bridge even if + * the kernel does not support the netdev bridge setting operations. + */ +TXGBE_PARAM(VEPA, "VEPA Bridge Mode: 0 = VEB (default), 1 = VEPA"); +#endif + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 980-500000 (0=off, 1=dynamic) + * + * Default Value: 1 + */ +#define DEFAULT_ITR 1 + +TXGBE_PARAM(itr, "Maximum ITR (0,1,980-500000), default 1"); +#define MAX_ITR TXGBE_MAX_INT_RATE +#define MIN_ITR TXGBE_MIN_INT_RATE + +/* lli_port (Low Latency Interrupt TCP Port) + * + * Valid Range: 0 - 65535 + * + * Default Value: 0 (disabled) + */ +TXGBE_PARAM(lli_port, "Low Latency Interrupt TCP Port (0-65535)"); + +#define DEFAULT_LLIPORT 0 +#define MAX_LLIPORT 0xFFFF +#define MIN_LLIPORT 0 + +/* lli_size (Low Latency Interrupt on Packet Size) + * + * Valid Range: 0 - 1500 + * + * Default Value: 0 (disabled) + */ +TXGBE_PARAM(lli_size, "Low Latency Interrupt on Packet Size (0-1500)"); + +#define DEFAULT_LLISIZE 0 +#define MAX_LLISIZE 1500 +#define MIN_LLISIZE 0 + +/* lli_etype (Low Latency Interrupt Ethernet Type) + * + * Valid Range: 0 - 0x8fff + * + * Default Value: 0 (disabled) + */ +TXGBE_PARAM(lli_etype, "Low Latency Interrupt Ethernet Protocol Type"); + +#define DEFAULT_LLIETYPE 0 +#define MAX_LLIETYPE 0x8fff +#define MIN_LLIETYPE 0 + +/* LLIVLANP (Low Latency Interrupt on VLAN priority threshold) + * + * Valid Range: 0 - 7 + * + * Default Value: 0 (disabled) + */ +TXGBE_PARAM(LLIVLANP, "Low Latency Interrupt on VLAN priority threshold"); + +#define DEFAULT_LLIVLANP 0 +#define MAX_LLIVLANP 7 +#define MIN_LLIVLANP 0 + +/* Flow Director packet buffer allocation level + * + * Valid Range: 1-3 + * 1 = 8k hash/2k perfect, + * 2 = 16k hash/4k perfect, + * 3 = 32k hash/8k perfect + * + * Default Value: 0 + */ +TXGBE_PARAM(fdir_pballoc, "Flow Director packet buffer"); + +#define TXGBE_DEFAULT_FDIR_PBALLOC TXGBE_FDIR_PBALLOC_64K + +/* Software ATR packet sample rate + * + * Valid Range: 0-255 0 = off, 1-255 = rate of Tx packet inspection + * + * Default Value: 20 + */ +TXGBE_PARAM(atr_sample_rate, "Software ATR Tx packet sample rate"); + +#define TXGBE_MAX_ATR_SAMPLE_RATE 255 +#define TXGBE_MIN_ATR_SAMPLE_RATE 1 +#define TXGBE_ATR_SAMPLE_RATE_OFF 0 +#define TXGBE_DEFAULT_ATR_SAMPLE_RATE 20 + +#if IS_ENABLED(CONFIG_FCOE) +/* FCoE - Fibre Channel over Ethernet Offload Enable/Disable + * + * Valid Range: 0, 1 + * - 0 - disables FCoE Offload + * - 1 - enables FCoE Offload + * + * Default Value: 1 + */ +TXGBE_PARAM(fcoe, "Disable or enable FCoE Offload, default 1"); +#endif /* CONFIG_FCOE */ + +/* Enable/disable Large Receive Offload + * + * Valid Values: 0(off), 1(on) + * + * Default Value: 1 + */ +TXGBE_PARAM(LRO, "Large Receive Offload (0,1), default 1 = on"); + +TXGBE_PARAM(fdir, "Support Flow director, default 1 = Enable, 0 = Disable "); +/* Enable/disable support for DMA coalescing + * + * Valid Values: 0(off), 41 - 10000(on) + * + * Default Value: 0 + */ +TXGBE_PARAM(dmac_watchdog, + "DMA coalescing watchdog in microseconds (0,41-10000), default 0 = off"); + +/* Enable/disable support for VXLAN rx checksum offload + * + * Valid Values: 0(Disable), 1(Enable) + * + * Default Value: 1 on hardware that supports it + */ +TXGBE_PARAM(vxlan_rx, + "VXLAN receive checksum offload (0,1), default 1 = Enable"); + +#define TXGBE_RXBUFMODE_NO_HEADER_SPLIT 0 +#define TXGBE_RXBUFMODE_HEADER_SPLIT 1 +#define TXGBE_DEFAULT_RXBUFMODE TXGBE_RXBUFMODE_NO_HEADER_SPLIT + +/* Cloud Switch mode + * + * Valid Range: 0-1 0 = disable Cloud Switch, 1 = enable Cloud Switch + * + * Default Value: 0 + */ +TXGBE_PARAM(cloudswitch, "Cloud Switch (0,1), default 0 = disable, 1 = enable"); + +struct txgbe_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + const char *msg; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct txgbe_opt_list { + int i; + char *str; + } *p; + } l; + } arg; +}; + +static int txgbe_validate_option(u32 *value, + struct txgbe_option *opt) +{ + int val = (int)*value; + + if (val == OPTION_UNSET) { + *value = (u32)opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (val) { + case OPTION_ENABLED: + return 0; + case OPTION_DISABLED: + return 0; + } + break; + case range_option: + if ((val >= opt->arg.r.min && val <= opt->arg.r.max) || + val == opt->def) { + return 0; + } + break; + case list_option: { + int i; + const struct txgbe_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (val == ent->i) + return 0; + } + } + break; + default: + WARN_ON_ONCE(1); + } + + *value = (u32)opt->def; + return -1; +} + +/** + * txgbe_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void txgbe_check_options(struct txgbe_adapter *adapter) +{ + u32 bd = adapter->bd_number; + u32 *aflags = &adapter->flags; + struct txgbe_ring_feature *feature = adapter->ring_feature; + u32 vmdq; + + { + u32 tx_unidir_mode; + static struct txgbe_option opt = { + .type = range_option, + .name = "TX_UNIDIR_MODE", + .err = + "using default of " __MODULE_STRING(TX_DEFAULT_UNIDIR_MODE), + .def = 0, + .arg = { .r = { .min = 0, + .max = 1} } + }; + + if (num_TX_UNIDIR_MODE > bd) { + tx_unidir_mode = TX_UNIDIR_MODE[bd]; + if (tx_unidir_mode == OPTION_UNSET) + tx_unidir_mode = TX_UNIDIR_MODE[bd]; + txgbe_validate_option(&tx_unidir_mode, &opt); + adapter->tx_unidir_mode = tx_unidir_mode; + + } else { + adapter->tx_unidir_mode = 0; + } + } + + { /* MAIN */ + u32 ffe_main; + static struct txgbe_option opt = { + .type = range_option, + .name = "FFE_MAIN", + .err = + "using default of " __MODULE_STRING(TXGBE_DEFAULT_FFE_MAIN), + .def = TXGBE_DEFAULT_FFE_MAIN, + .arg = { .r = { .min = 0, + .max = 60} } + }; + + if (num_FFE_MAIN > bd) { + ffe_main = FFE_MAIN[bd]; + if (ffe_main == OPTION_UNSET) + ffe_main = FFE_MAIN[bd]; + txgbe_validate_option(&ffe_main, &opt); + adapter->ffe_main = ffe_main; + } else { + adapter->ffe_main = 27; + } + } + + { /* PRE */ + u32 ffe_pre; + static struct txgbe_option opt = { + .type = range_option, + .name = "FFE_PRE", + .err = + "using default of " __MODULE_STRING(TXGBE_DEFAULT_FFE_PRE), + .def = TXGBE_DEFAULT_FFE_PRE, + .arg = { .r = { .min = 0, + .max = 60} } + }; + + if (num_FFE_PRE > bd) { + ffe_pre = FFE_PRE[bd]; + if (ffe_pre == OPTION_UNSET) + ffe_pre = FFE_PRE[bd]; + txgbe_validate_option(&ffe_pre, &opt); + adapter->ffe_pre = ffe_pre; + } else { + adapter->ffe_pre = 8; + } + } + + { /* POST */ + u32 ffe_post; + static struct txgbe_option opt = { + .type = range_option, + .name = "FFE_POST", + .err = + "using default of " __MODULE_STRING(TXGBE_DEFAULT_FFE_POST), + .def = TXGBE_DEFAULT_FFE_POST, + .arg = { .r = { .min = 0, + .max = 60} } + }; + if (num_FFE_POST > bd) { + ffe_post = FFE_POST[bd]; + if (ffe_post == OPTION_UNSET) + ffe_post = FFE_POST[bd]; + txgbe_validate_option(&ffe_post, &opt); + adapter->ffe_post = ffe_post; + } else { + adapter->ffe_post = 44; + } + } + + { /* ffe_set */ + u32 ffe_set; + static struct txgbe_option opt = { + .type = range_option, + .name = "FFE_SET", + .err = + "using default of " __MODULE_STRING(TXGBE_DEFAULT_FFE_SET), + .def = TXGBE_DEFAULT_FFE_SET, + .arg = { .r = { .min = 0, + .max = 4} } + }; + + if (num_FFE_SET > bd) { + ffe_set = FFE_SET[bd]; + if (ffe_set == OPTION_UNSET) + ffe_set = FFE_SET[bd]; + txgbe_validate_option(&ffe_set, &opt); + adapter->ffe_set = ffe_set; + } else { + adapter->ffe_set = 0; + } + } + + { /* backplane_mode */ + u32 bp_mode; + static struct txgbe_option opt = { + .type = range_option, + .name = "backplane_mode", + .err = + "using default of " __MODULE_STRING(TXGBE_DEFAULT_BP_MODE), + .def = TXGBE_DEFAULT_BP_MODE, + .arg = { .r = { .min = 0, + .max = 4} } + }; + + if (num_backplane_mode > bd) { + bp_mode = backplane_mode[bd]; + if (bp_mode == OPTION_UNSET) + bp_mode = backplane_mode[bd]; + txgbe_validate_option(&bp_mode, &opt); + adapter->backplane_mode = bp_mode; + } else { + adapter->backplane_mode = 0; + } + } + + { /* auto mode */ + u32 bp_auto; + static struct txgbe_option opt = { + .type = range_option, + .name = "bp_auto", + .err = + "using default of " __MODULE_STRING(TXGBE_DEFAULT_BP_AUTO), + .def = TXGBE_DEFAULT_BP_AUTO, + .arg = { .r = { .min = 0, + .max = 2} } + }; + + if (num_backplane_auto > bd) { + bp_auto = backplane_auto[bd]; + if (bp_auto == OPTION_UNSET) + bp_auto = backplane_auto[bd]; + txgbe_validate_option(&bp_auto, &opt); + adapter->backplane_auto = bp_auto; + } else { + adapter->backplane_auto = -1; + } + } + + { /* Interrupt Mode */ + u32 irq_mode; + static struct txgbe_option opt = { + .type = range_option, + .name = "Interrupt Mode", + .err = + "using default of " __MODULE_STRING(TXGBE_DEFAULT_INT), + .def = TXGBE_DEFAULT_INT, + .arg = { .r = { .min = TXGBE_INT_LEGACY, + .max = TXGBE_INT_MSIX} } + }; + + if (num_int_mode > bd || num_interrupt_type > bd) { + irq_mode = int_mode[bd]; + if (irq_mode == OPTION_UNSET) + irq_mode = interrupt_type[bd]; + txgbe_validate_option(&irq_mode, &opt); + switch (irq_mode) { + case TXGBE_INT_MSIX: + break; + case TXGBE_INT_MSI: + if (*aflags & TXGBE_FLAG_MSI_CAPABLE) + *aflags &= ~TXGBE_FLAG_MSIX_CAPABLE; + break; + case TXGBE_INT_LEGACY: + default: + *aflags &= ~TXGBE_FLAG_MSIX_CAPABLE; + *aflags &= ~TXGBE_FLAG_MSI_CAPABLE; + break; + } + } else { + /* default settings */ + if (opt.def == TXGBE_INT_MSIX && + *aflags & TXGBE_FLAG_MSIX_CAPABLE) { + *aflags |= TXGBE_FLAG_MSIX_CAPABLE; + *aflags |= TXGBE_FLAG_MSI_CAPABLE; + } else if (opt.def == TXGBE_INT_MSI && + *aflags & TXGBE_FLAG_MSI_CAPABLE) { + *aflags &= ~TXGBE_FLAG_MSIX_CAPABLE; + *aflags |= TXGBE_FLAG_MSI_CAPABLE; + } else { + *aflags &= ~TXGBE_FLAG_MSIX_CAPABLE; + *aflags &= ~TXGBE_FLAG_MSI_CAPABLE; + } + } + } + { /* Multiple Queue Support */ + static struct txgbe_option opt = { + .type = enable_option, + .name = "Multiple Queue Support", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_MQ > bd) { + u32 mq = MQ[bd]; + + txgbe_validate_option(&mq, &opt); + if (mq) + *aflags |= TXGBE_FLAG_MQ_CAPABLE; + else + *aflags &= ~TXGBE_FLAG_MQ_CAPABLE; + } else { + if (opt.def == OPTION_ENABLED) + *aflags |= TXGBE_FLAG_MQ_CAPABLE; + else + *aflags &= ~TXGBE_FLAG_MQ_CAPABLE; + } + /* Check Interoperability */ + if ((*aflags & TXGBE_FLAG_MQ_CAPABLE) && + !(*aflags & TXGBE_FLAG_MSIX_CAPABLE)) + *aflags &= ~TXGBE_FLAG_MQ_CAPABLE; + } + + { /* Receive-Side Scaling (RSS) */ + static struct txgbe_option opt = { + .type = range_option, + .name = "Receive-Side Scaling (RSS)", + .err = "using default.", + .def = 0, + .arg = { .r = { .min = 0, + .max = 1} } + }; + u32 rss = min_t(int, txgbe_max_rss_indices(adapter), + num_online_cpus()); + /* adjust Max allowed RSS queues based on MAC type */ + opt.arg.r.max = min_t(int, txgbe_max_rss_indices(adapter), + num_online_cpus()); + + if (num_RSS > bd) { + rss = RSS[bd]; + txgbe_validate_option(&rss, &opt); + /* base it off num_online_cpus() with hardware limit */ + if (!rss) + rss = min_t(int, opt.arg.r.max, + num_online_cpus()); + else + feature[RING_F_FDIR].limit = (u16)rss; + + feature[RING_F_RSS].limit = (u16)rss; + } else if (opt.def == 0) { + rss = min_t(int, txgbe_max_rss_indices(adapter), + num_online_cpus()); + feature[RING_F_FDIR].limit = (u16)rss; + feature[RING_F_RSS].limit = rss; + } + /* Check Interoperability */ + if (rss > 1) + if (!(*aflags & TXGBE_FLAG_MQ_CAPABLE)) + feature[RING_F_RSS].limit = 1; + + adapter->flags2 |= TXGBE_FLAG2_RSS_ENABLED; + } + { /* Virtual Machine Device Queues (VMDQ) */ + static struct txgbe_option opt = { + .type = range_option, + .name = "Virtual Machine Device Queues (VMDQ)", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = TXGBE_MAX_VMDQ_INDICES + } } + }; + + if (num_VMDQ > bd) { + vmdq = VMDQ[bd]; + + txgbe_validate_option(&vmdq, &opt); + + /* zero or one both mean disabled from our driver's + * perspective + */ + if (vmdq > 1) + *aflags |= TXGBE_FLAG_VMDQ_ENABLED; + else + *aflags &= ~TXGBE_FLAG_VMDQ_ENABLED; + + feature[RING_F_VMDQ].limit = (u16)vmdq; + } else { + if (opt.def == OPTION_DISABLED) + *aflags &= ~TXGBE_FLAG_VMDQ_ENABLED; + else + *aflags |= TXGBE_FLAG_VMDQ_ENABLED; + + feature[RING_F_VMDQ].limit = opt.def; + } + /* Check Interoperability */ + if (*aflags & TXGBE_FLAG_VMDQ_ENABLED) { + if (!(*aflags & TXGBE_FLAG_MQ_CAPABLE)) { + *aflags &= ~TXGBE_FLAG_VMDQ_ENABLED; + feature[RING_F_VMDQ].limit = 0; + } + } + } +#ifdef CONFIG_PCI_IOV + { /* Single Root I/O Virtualization (SR-IOV) */ + static struct txgbe_option opt = { + .type = range_option, + .name = "I/O Virtualization (IOV)", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = MAX_SRIOV_VFS} } + }; + + if (num_max_vfs > bd) { + u32 vfs = max_vfs[bd]; + + if (txgbe_validate_option(&vfs, &opt)) + vfs = 0; + + adapter->max_vfs = vfs; + + if (vfs) + *aflags |= TXGBE_FLAG_SRIOV_ENABLED; + else + *aflags &= ~TXGBE_FLAG_SRIOV_ENABLED; + + } else { + if (opt.def == OPTION_DISABLED) { + adapter->max_vfs = 0; + *aflags &= ~TXGBE_FLAG_SRIOV_ENABLED; + } else { + adapter->max_vfs = opt.def; + *aflags |= TXGBE_FLAG_SRIOV_ENABLED; + } + } + + /* Check Interoperability */ + if (*aflags & TXGBE_FLAG_SRIOV_ENABLED) { + if (!(*aflags & TXGBE_FLAG_SRIOV_CAPABLE)) { + *aflags &= ~TXGBE_FLAG_SRIOV_ENABLED; + adapter->max_vfs = 0; + } else if (!(*aflags & TXGBE_FLAG_MQ_CAPABLE)) { + *aflags &= ~TXGBE_FLAG_SRIOV_ENABLED; + adapter->max_vfs = 0; + } + } + } + { /* VEPA Bridge Mode enable for SR-IOV mode */ + static struct txgbe_option opt = { + .type = range_option, + .name = "VEPA Bridge Mode Enable", + .err = "defaulting to disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = OPTION_ENABLED} } + }; + + if (num_VEPA > bd) { + u32 vepa = VEPA[bd]; + + txgbe_validate_option(&vepa, &opt); + if (vepa) + adapter->flags |= + TXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } else { + if (opt.def == OPTION_ENABLED) + adapter->flags |= + TXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } + } +#endif /* CONFIG_PCI_IOV */ + { /* Interrupt Throttling Rate */ + static struct txgbe_option opt = { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR } } + }; + + if (num_itr > bd) { + u32 irq = itr[bd]; + + switch (irq) { + case 0: + adapter->rx_itr_setting = 0; + break; + case 1: + adapter->rx_itr_setting = 1; + break; + default: + txgbe_validate_option(&irq, &opt); + /* the first bit is used as control */ + adapter->rx_itr_setting = (u16)((1000000 / irq) << 2); + break; + } + adapter->tx_itr_setting = adapter->rx_itr_setting; + } else { + adapter->rx_itr_setting = opt.def; + adapter->tx_itr_setting = opt.def; + } + } + + { /* Low Latency Interrupt TCP Port*/ + static struct txgbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt TCP Port", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIPORT), + .def = DEFAULT_LLIPORT, + .arg = { .r = { .min = MIN_LLIPORT, + .max = MAX_LLIPORT } } + }; + + if (num_lli_port > bd) { + adapter->lli_port = lli_port[bd]; + if (adapter->lli_port) + txgbe_validate_option(&adapter->lli_port, &opt); + } else { + adapter->lli_port = opt.def; + } + } + { /* Low Latency Interrupt on Packet Size */ + static struct txgbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on Packet Size", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLISIZE), + .def = DEFAULT_LLISIZE, + .arg = { .r = { .min = MIN_LLISIZE, + .max = MAX_LLISIZE } } + }; + + if (num_lli_size > bd) { + adapter->lli_size = lli_size[bd]; + if (adapter->lli_size) + txgbe_validate_option(&adapter->lli_size, &opt); + } else { + adapter->lli_size = opt.def; + } + } + { /* Low Latency Interrupt EtherType*/ + static struct txgbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on Ethernet Protocol Type", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIETYPE), + .def = DEFAULT_LLIETYPE, + .arg = { .r = { .min = MIN_LLIETYPE, + .max = MAX_LLIETYPE } } + }; + + if (num_lli_etype > bd) { + adapter->lli_etype = lli_etype[bd]; + if (adapter->lli_etype) + txgbe_validate_option(&adapter->lli_etype, + &opt); + } else { + adapter->lli_etype = opt.def; + } + } + { /* LLI VLAN Priority */ + static struct txgbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on VLAN priority threshold", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIVLANP), + .def = DEFAULT_LLIVLANP, + .arg = { .r = { .min = MIN_LLIVLANP, + .max = MAX_LLIVLANP } } + }; + + if (num_LLIVLANP > bd) { + adapter->lli_vlan_pri = LLIVLANP[bd]; + if (adapter->lli_vlan_pri) { + txgbe_validate_option(&adapter->lli_vlan_pri, + &opt); + } + } else { + adapter->lli_vlan_pri = opt.def; + } + } + + { /* Flow Director packet buffer allocation */ + u32 fdir_pballoc_mode; + static struct txgbe_option opt = { + .type = range_option, + .name = "Flow Director packet buffer allocation", + .err = "using default of " + __MODULE_STRING(TXGBE_DEFAULT_FDIR_PBALLOC), + .def = TXGBE_DEFAULT_FDIR_PBALLOC, + .arg = {.r = {.min = TXGBE_FDIR_PBALLOC_64K, + .max = TXGBE_FDIR_PBALLOC_256K} } + }; + const char *pstring; + + if (num_fdir_pballoc > bd) { + fdir_pballoc_mode = fdir_pballoc[bd]; + txgbe_validate_option(&fdir_pballoc_mode, &opt); + switch (fdir_pballoc_mode) { + case TXGBE_FDIR_PBALLOC_256K: + adapter->fdir_pballoc = TXGBE_FDIR_PBALLOC_256K; + pstring = "256kB"; + break; + case TXGBE_FDIR_PBALLOC_128K: + adapter->fdir_pballoc = TXGBE_FDIR_PBALLOC_128K; + pstring = "128kB"; + break; + case TXGBE_FDIR_PBALLOC_64K: + default: + adapter->fdir_pballoc = TXGBE_FDIR_PBALLOC_64K; + pstring = "64kB"; + break; + } + } else { + adapter->fdir_pballoc = opt.def; + } + } + { /* Flow Director ATR Tx sample packet rate */ + static struct txgbe_option opt = { + .type = range_option, + .name = "Software ATR Tx packet sample rate", + .err = "using default of " + __MODULE_STRING(TXGBE_DEFAULT_ATR_SAMPLE_RATE), + .def = TXGBE_DEFAULT_ATR_SAMPLE_RATE, + .arg = {.r = {.min = TXGBE_ATR_SAMPLE_RATE_OFF, + .max = TXGBE_MAX_ATR_SAMPLE_RATE} } + }; + + if (num_atr_sample_rate > bd) { + adapter->atr_sample_rate = atr_sample_rate[bd]; + + if (adapter->atr_sample_rate) + txgbe_validate_option(&adapter->atr_sample_rate, + &opt); + } else { + adapter->atr_sample_rate = opt.def; + } + } + +#if IS_ENABLED(CONFIG_FCOE) + { + *aflags &= ~TXGBE_FLAG_FCOE_CAPABLE; + { + struct txgbe_option opt = { + .type = enable_option, + .name = "Enabled/Disable FCoE offload", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + if (num_fcoe > bd) { + u32 fcoe_flag = fcoe[bd]; + + txgbe_validate_option(&fcoe_flag, &opt); + if (fcoe_flag) + *aflags |= TXGBE_FLAG_FCOE_CAPABLE; + } else { + if (opt.def == OPTION_ENABLED) + *aflags |= TXGBE_FLAG_FCOE_CAPABLE; + } + } + } +#endif /* CONFIG_FCOE */ + { /* LRO - Set Large Receive Offload */ + struct txgbe_option opt = { + .type = enable_option, + .name = "LRO - Large Receive Offload", + .err = "defaulting to Disabled", + .def = OPTION_ENABLED + }; + struct net_device *netdev = adapter->netdev; + + if (num_LRO > bd) { + u32 lro = LRO[bd]; + + txgbe_validate_option(&lro, &opt); + if (lro) + netdev->features |= NETIF_F_LRO; + else + netdev->features &= ~NETIF_F_LRO; + + } else if (opt.def == OPTION_ENABLED) { + netdev->features |= NETIF_F_LRO; + } else { + netdev->features &= ~NETIF_F_LRO; + } + } + { + struct txgbe_option opt = { + .type = enable_option, + .name = "Fdir", + .err = "defaulting to disabled", + .def = OPTION_DISABLED + }; + if (num_fdir > bd) { + u32 enable_fdir = fdir[bd]; + txgbe_validate_option(&enable_fdir, &opt); + if (enable_fdir) + adapter->hw.fdir_enabled = true; + else + adapter->hw.fdir_enabled = false; + } else if (opt.def == OPTION_ENABLED) { + adapter->hw.fdir_enabled = true; + } else { + adapter->hw.fdir_enabled = false; + } + } + + { /* VXLAN rx offload */ + struct txgbe_option opt = { + .type = range_option, + .name = "vxlan_rx", + .err = "defaulting to 1 (enabled)", + .def = 1, + .arg = { .r = { .min = 0, .max = 1 } }, + }; + const char *cmsg = "VXLAN rx offload not supported on this hardware"; + const u32 flag = TXGBE_FLAG_VXLAN_OFFLOAD_ENABLE; + + if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { + opt.err = cmsg; + opt.msg = cmsg; + opt.def = 0; + opt.arg.r.max = 0; + } + if (num_vxlan_rx > bd) { + u32 enable_vxlan_rx = vxlan_rx[bd]; + + txgbe_validate_option(&enable_vxlan_rx, &opt); + if (enable_vxlan_rx) + adapter->flags |= flag; + else + adapter->flags &= ~flag; + } else if (opt.def) { + adapter->flags |= flag; + } else { + adapter->flags &= ~flag; + } + } + + { /* Cloud Switch */ + struct txgbe_option opt = { + .type = range_option, + .name = "CloudSwitch", + .err = "defaulting to 0 (disabled)", + .def = 0, + .arg = { .r = { .min = 0, .max = 1 } }, + }; + + if (num_cloudswitch > bd) { + u32 enable_cloudswitch = cloudswitch[bd]; + + txgbe_validate_option(&enable_cloudswitch, &opt); + if (enable_cloudswitch) + adapter->flags |= + TXGBE_FLAG2_CLOUD_SWITCH_ENABLED; + else + adapter->flags &= + ~TXGBE_FLAG2_CLOUD_SWITCH_ENABLED; + } else if (opt.def) { + adapter->flags |= TXGBE_FLAG2_CLOUD_SWITCH_ENABLED; + } else { + adapter->flags &= ~TXGBE_FLAG2_CLOUD_SWITCH_ENABLED; + } + } +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_pcierr.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_pcierr.c new file mode 100644 index 000000000000..f0f9338a43bd --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_pcierr.c @@ -0,0 +1,310 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include +#include +#include + +#include "txgbe_pcierr.h" +#include "txgbe.h" +#define TXGBE_ROOT_PORT_INTR_ON_MESG_MASK (PCI_ERR_ROOT_CMD_COR_EN | \ + PCI_ERR_ROOT_CMD_NONFATAL_EN | \ + PCI_ERR_ROOT_CMD_FATAL_EN) + +#ifndef PCI_ERS_RESULT_NO_AER_DRIVER +/* No AER capabilities registered for the driver */ +#define PCI_ERS_RESULT_NO_AER_DRIVER ((__force pci_ers_result_t)6) +#endif + +static const char *aer_correctable_error_string[16] = { + "RxErr", /* Bit Position 0 */ + NULL, + NULL, + NULL, + NULL, + NULL, + "BadTLP", /* Bit Position 6 */ + "BadDLLP", /* Bit Position 7 */ + "Rollover", /* Bit Position 8 */ + NULL, + NULL, + NULL, + "Timeout", /* Bit Position 12 */ + "NonFatalErr", /* Bit Position 13 */ + "CorrIntErr", /* Bit Position 14 */ + "HeaderOF", /* Bit Position 15 */ +}; + +static const char *aer_uncorrectable_error_string[27] = { + "Undefined", /* Bit Position 0 */ + NULL, + NULL, + NULL, + "DLP", /* Bit Position 4 */ + "SDES", /* Bit Position 5 */ + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + "TLP", /* Bit Position 12 */ + "FCP", /* Bit Position 13 */ + "CmpltTO", /* Bit Position 14 */ + "CmpltAbrt", /* Bit Position 15 */ + "UnxCmplt", /* Bit Position 16 */ + "RxOF", /* Bit Position 17 */ + "MalfTLP", /* Bit Position 18 */ + "ECRC", /* Bit Position 19 */ + "UnsupReq", /* Bit Position 20 */ + "ACSViol", /* Bit Position 21 */ + "UncorrIntErr", /* Bit Position 22 */ + "BlockedTLP", /* Bit Position 23 */ + "AtomicOpBlocked", /* Bit Position 24 */ + "TLPBlockedErr", /* Bit Position 25 */ + "PoisonTLPBlocked", /* Bit Position 26 */ +}; + +static pci_ers_result_t merge_result(enum pci_ers_result orig, + enum pci_ers_result new) +{ + if (new == PCI_ERS_RESULT_NO_AER_DRIVER) + return PCI_ERS_RESULT_NO_AER_DRIVER; + if (new == PCI_ERS_RESULT_NONE) + return orig; + switch (orig) { + case PCI_ERS_RESULT_CAN_RECOVER: + case PCI_ERS_RESULT_RECOVERED: + orig = new; + break; + case PCI_ERS_RESULT_DISCONNECT: + if (new == PCI_ERS_RESULT_NEED_RESET) + orig = PCI_ERS_RESULT_NEED_RESET; + break; + default: + break; + } + return orig; +} + +static int txgbe_report_error_detected(struct pci_dev *dev, + pci_channel_state_t state, + enum pci_ers_result *result) +{ + pci_ers_result_t vote; + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + if (!dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->error_detected) { + /* If any device in the subtree does not have an error_detected + * callback, PCI_ERS_RESULT_NO_AER_DRIVER prevents subsequent + * error callbacks of "any" device in the subtree, and will + * exit in the disconnected error state. + */ + if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) + vote = PCI_ERS_RESULT_NO_AER_DRIVER; + else + vote = PCI_ERS_RESULT_NONE; + } else { + err_handler = dev->driver->err_handler; + vote = err_handler->error_detected(dev, state); + } + + *result = merge_result(*result, vote); + device_unlock(&dev->dev); + return 0; +} + +static int txgbe_report_frozen_detected(struct pci_dev *dev, void *data) +{ + return txgbe_report_error_detected(dev, pci_channel_io_frozen, data); +} + +static int txgbe_report_mmio_enabled(struct pci_dev *dev, void *data) +{ + pci_ers_result_t vote, *result = data; + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + if (!dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->mmio_enabled) + goto out; + + err_handler = dev->driver->err_handler; + vote = err_handler->mmio_enabled(dev); + *result = merge_result(*result, vote); +out: + device_unlock(&dev->dev); + return 0; +} + +static int txgbe_report_slot_reset(struct pci_dev *dev, void *data) +{ + pci_ers_result_t vote, *result = data; + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + if (!dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->slot_reset) + goto out; + + err_handler = dev->driver->err_handler; + vote = err_handler->slot_reset(dev); + *result = merge_result(*result, vote); +out: + device_unlock(&dev->dev); + return 0; +} + +static int txgbe_report_resume(struct pci_dev *dev, void *data) +{ + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + dev->error_state = pci_channel_io_normal; + if (!dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->resume) + goto out; + + err_handler = dev->driver->err_handler; + err_handler->resume(dev); +out: + device_unlock(&dev->dev); + return 0; +} + +void txgbe_pcie_do_recovery(struct pci_dev *dev) +{ + pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER; + struct pci_bus *bus; + u32 reg32; + int pos; + int delay = 1; + u32 id; + u16 ctrl; + /* Error recovery runs on all subordinates of the first downstream port. + * If the downstream port detected the error, it is cleared at the end. + */ + if (!(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)) + dev = dev->bus->self; + bus = dev->subordinate; + + pci_walk_bus(bus, txgbe_report_frozen_detected, &status); + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); + if (pos) { + /* Disable Root's interrupt in response to error messages */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, ®32); + reg32 &= ~TXGBE_ROOT_PORT_INTR_ON_MESG_MASK; + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); + } + + pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl); + ctrl |= PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); + + usleep_range(2000, 4000); + ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); + + ssleep(1); + + pci_read_config_dword(dev, PCI_COMMAND, &id); + while (id == ~0) { + if (delay > 60000) { + pci_warn(dev, "not ready %dms after %s; giving up\n", + delay - 1, "bus_reset"); + return; + } + + if (delay > 1000) + pci_info(dev, "not ready %dms after %s; waiting\n", + delay - 1, "bus_reset"); + + msleep(delay); + delay *= 2; + pci_read_config_dword(dev, PCI_COMMAND, &id); + } + + if (delay > 1000) + pci_info(dev, "ready %dms after %s\n", delay - 1, + "bus_reset"); + + pci_info(dev, "Root Port link has been reset\n"); + + if (pos) { + /* Clear Root Error Status */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, ®32); + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32); + + /* Enable Root Port's interrupt in response to error messages */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, ®32); + reg32 |= TXGBE_ROOT_PORT_INTR_ON_MESG_MASK; + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); + } + + if (status == PCI_ERS_RESULT_CAN_RECOVER) { + status = PCI_ERS_RESULT_RECOVERED; + pci_dbg(dev, "broadcast mmio_enabled message\n"); + pci_walk_bus(bus, txgbe_report_mmio_enabled, &status); + } + + if (status == PCI_ERS_RESULT_NEED_RESET) { + status = PCI_ERS_RESULT_RECOVERED; + pci_dbg(dev, "broadcast slot_reset message\n"); + pci_walk_bus(bus, txgbe_report_slot_reset, &status); + } + + if (status != PCI_ERS_RESULT_RECOVERED) + goto failed; + + pci_dbg(dev, "broadcast resume message\n"); + pci_walk_bus(bus, txgbe_report_resume, &status); + +failed: + ; +} + +void txgbe_aer_print_error(struct txgbe_adapter *adapter, u32 severity, u32 status) +{ + unsigned long i; + const char *errmsg = NULL; + struct pci_dev *pdev = adapter->pdev; + unsigned long val = status; + + for_each_set_bit(i, &val, 32) { + if (severity == TXGBE_AER_CORRECTABLE) { + errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ? + aer_correctable_error_string[i] : NULL; + } else { + errmsg = i < ARRAY_SIZE(aer_uncorrectable_error_string) ? + aer_uncorrectable_error_string[i] : NULL; + + if (!errmsg && i == 14) + adapter->cmplt_to_dis = true; + } + if (errmsg) + dev_info(&pdev->dev, " [%2ld] %-22s\n", i, errmsg); + } +} + +bool txgbe_check_recovery_capability(struct pci_dev *dev) +{ +#if defined(__i386__) || defined(__x86_64__) + return true; +#else + /* check upstream bridge is root or PLX bridge, + * or cpu is kupeng 920 or not + */ + if (dev->bus->self->vendor == 0x10b5 || + dev->bus->self->vendor == 0x19e5) + return true; + else + return false; +#endif +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_pcierr.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_pcierr.h new file mode 100644 index 000000000000..1143c82a84b4 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_pcierr.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_PCIERR_H_ +#define _TXGBE_PCIERR_H_ + +#include "txgbe.h" + +#define TXGBE_AER_UNCORRECTABLE 1 +#define TXGBE_AER_CORRECTABLE 2 + +void txgbe_pcie_do_recovery(struct pci_dev *dev); +void txgbe_aer_print_error(struct txgbe_adapter *adapter, u32 severity, u32 status); +bool txgbe_check_recovery_capability(struct pci_dev *dev); + +#endif diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c new file mode 100644 index 000000000000..3fbcb1b56617 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -0,0 +1,1538 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "txgbe_phy.h" +#include "txgbe_mtd.h" +#include "txgbe.h" + +/** + * txgbe_check_reset_blocked - check status of MNG FW veto bit + * @hw: pointer to the hardware structure + * + * This function checks the MMNGC.MNG_VETO bit to see if there are + * any constraints on link from manageability. For MAC's that don't + * have this bit just return faluse since the link can not be blocked + * via this method. + **/ +s32 txgbe_check_reset_blocked(struct txgbe_hw *hw) +{ + u32 mmngc; + + mmngc = rd32(hw, TXGBE_MIS_ST); + if (mmngc & TXGBE_MIS_ST_MNG_VETO) + return true; + + return false; +} + +/** + * txgbe_get_phy_id - Get the phy type + * @hw: pointer to hardware structure + **/ +s32 txgbe_get_phy_id(struct txgbe_hw *hw) +{ + u32 status; + u16 phy_id_high = 0; + u16 phy_id_low = 0; + u8 numport, thisport; + u32 i = 0; + struct txgbe_adapter *adapter = hw->back; + + if (hw->mac.type == txgbe_mac_aml) { + hw->phy.addr = 0; + + for (i = 0; i < 32; i++) { + hw->phy.addr = i; + status = txgbe_read_phy_reg_mdi(hw, TXGBE_MDIO_PHY_ID_HIGH, + 0, &phy_id_high); + if (status) { + e_info(drv, "txgbe_read_phy_reg_mdi failed 1\n"); + return status; + } + e_info(drv, "%d: phy_id_high 0x%x\n", i, phy_id_high); + if ((phy_id_high & 0xFFFF) == 0x0141) + break; + } + + if (i == 32) { + e_info(drv, "txgbe_read_phy_reg_mdi failed\n"); + return TXGBE_ERR_PHY; + } + + status = txgbe_read_phy_reg_mdi(hw, TXGBE_MDIO_PHY_ID_LOW, + 0, &phy_id_low); + if (status) { + e_info(drv, "txgbe_read_phy_reg_mdi failed 2\n"); + return status; + } + hw->phy.id = (u32)(phy_id_high & 0xFFFF) << 6; + hw->phy.id |= (u32)((phy_id_low & 0xFC00) >> 10); + + e_info(drv, "%s: phy_id 0x%x", __func__, hw->phy.id); + + return status; + } + + status = txgbe_mtd_hw_xmdio_rd(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_PMA_PMD_DEV_TYPE, + TXGBE_MDIO_PHY_ID_HIGH, &phy_id_high); + + if (status == 0) { + hw->phy.id = (u32)(phy_id_high << 16); + status = txgbe_mtd_hw_xmdio_rd(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_PMA_PMD_DEV_TYPE, + TXGBE_MDIO_PHY_ID_LOW, &phy_id_low); + hw->phy.id |= (u32)(phy_id_low & TXGBE_PHY_REVISION_MASK); + } + + if (status == 0) { + status = txgbe_mtd_get_phy_revision(&hw->phy_dev, hw->phy.addr, + (enum txgbe_mtd_dev_id *)&hw->phy.revision, + &numport, &thisport); + if (status == MTD_FAIL) { + ERROR_REPORT1(TXGBE_ERROR_INVALID_STATE, + "Error in txgbe_mtd_get_phy_revision()\n"); + } + } + return status; +} + +/** + * txgbe_get_phy_type_from_id - Get the phy type + * @phy_id: PHY ID information + **/ +enum txgbe_phy_type txgbe_get_phy_type_from_id(struct txgbe_hw *hw) +{ + enum txgbe_phy_type phy_type; + u16 ext_ability = 0; + + switch (hw->phy.id) { + case TN1010_PHY_ID: + phy_type = txgbe_phy_tn; + break; + case QT2022_PHY_ID: + phy_type = txgbe_phy_qt; + break; + case ATH_PHY_ID: + phy_type = txgbe_phy_nl; + break; + default: + phy_type = txgbe_phy_unknown; + break; + } + if (phy_type == txgbe_phy_unknown) { + txgbe_mtd_hw_xmdio_rd(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_PMA_PMD_DEV_TYPE, + TXGBE_MDIO_PHY_EXT_ABILITY, &ext_ability); + + if (ext_ability & (TXGBE_MDIO_PHY_10GBASET_ABILITY | + TXGBE_MDIO_PHY_1000BASET_ABILITY)) + phy_type = txgbe_phy_cu_unknown; + else + phy_type = txgbe_phy_generic; + } + return phy_type; +} + +/** + * txgbe_reset_phy - Performs a PHY reset + * @hw: pointer to hardware structure + **/ +s32 txgbe_reset_phy(struct txgbe_hw *hw) +{ + s32 status = 0; + + if (status != 0 || hw->phy.type == txgbe_phy_none) + goto out; + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + hw->phy.ops.check_overtemp(hw) == TXGBE_ERR_OVERTEMP) + goto out; + + /* Blocked by MNG FW so bail */ + txgbe_check_reset_blocked(hw); + if (((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP)) + goto out; + + status = txgbe_mtd_hw_rst(&hw->phy_dev, hw->phy.addr, 1000); + +out: + return status; +} + +/** + * txgbe_read_phy_mdi - Reads a value from a specified PHY register without + * the SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 txgbe_read_phy_reg_mdi(struct txgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data) +{ + u32 command; + s32 status = 0; + + /* setup and write the address cycle command */ + command = TXGBE_MSCA_RA(reg_addr) | + TXGBE_MSCA_PA(hw->phy.addr) | + TXGBE_MSCA_DA(device_type); + wr32(hw, TXGBE_MSCA, command); + + command = TXGBE_MSCC_CMD(TXGBE_MSCA_CMD_READ) | TXGBE_MSCC_BUSY; + wr32(hw, TXGBE_MSCC, command); + + /* wait to complete */ + status = po32m(hw, TXGBE_MSCC, + TXGBE_MSCC_BUSY, ~TXGBE_MSCC_BUSY, + TXGBE_MDIO_TIMEOUT, 10); + if (status != 0) { + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "PHY address command did not complete.\n"); + return TXGBE_ERR_PHY; + } + + /* read data from MSCC */ + *phy_data = 0xFFFF & rd32(hw, TXGBE_MSCC); + + return 0; +} + +/** + * txgbe_read_phy_reg - Reads a value from a specified PHY register + * using the SWFW lock - this function is needed in most cases + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 txgbe_read_phy_reg(struct txgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + s32 status; + u32 gssr = hw->phy.phy_semaphore_mask; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { + status = txgbe_read_phy_reg_mdi(hw, reg_addr, device_type, + phy_data); + hw->mac.ops.release_swfw_sync(hw, gssr); + } else { + status = TXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * txgbe_write_phy_reg_mdi - Writes a value to specified PHY register + * without SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 txgbe_write_phy_reg_mdi(struct txgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + u32 command; + s32 status = 0; + + /* setup and write the address cycle command */ + command = TXGBE_MSCA_RA(reg_addr) | + TXGBE_MSCA_PA(hw->phy.addr) | + TXGBE_MSCA_DA(device_type); + wr32(hw, TXGBE_MSCA, command); + + command = phy_data | TXGBE_MSCC_CMD(TXGBE_MSCA_CMD_WRITE) | + TXGBE_MSCC_BUSY; + wr32(hw, TXGBE_MSCC, command); + + /* wait to complete */ + status = po32m(hw, TXGBE_MSCC, + TXGBE_MSCC_BUSY, ~TXGBE_MSCC_BUSY, + TXGBE_MDIO_TIMEOUT, 10); + if (status != 0) { + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "PHY address command did not complete.\n"); + return TXGBE_ERR_PHY; + } + + return 0; +} + +/** + * txgbe_write_phy_reg - Writes a value to specified PHY register + * using SWFW lock- this function is needed in most cases + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 txgbe_write_phy_reg(struct txgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + s32 status; + u32 gssr = hw->phy.phy_semaphore_mask; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { + status = txgbe_write_phy_reg_mdi(hw, reg_addr, device_type, + phy_data); + hw->mac.ops.release_swfw_sync(hw, gssr); + } else { + status = TXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +u32 txgbe_read_mdio(struct mtd_dev *dev, + u16 port, + u16 mmd, + u16 reg, + u16 *value) +{ + struct txgbe_hw *hw = (struct txgbe_hw *)(dev->app_data); + + if (hw->phy.addr != port) + return MTD_FAIL; + return txgbe_read_phy_reg(hw, reg, mmd, value); +} + +u32 txgbe_write_mdio(struct mtd_dev *dev, + u16 port, + u16 mmd, + u16 reg, + u16 value) +{ + struct txgbe_hw *hw = (struct txgbe_hw *)(dev->app_data); + + if (hw->phy.addr != port) + return MTD_FAIL; + + return txgbe_write_phy_reg(hw, reg, mmd, value); +} + +/** + * txgbe_setup_phy_link - Set and restart auto-neg + * @hw: pointer to hardware structure + * + * Restart auto-negotiation and PHY and waits for completion. + **/ +u32 txgbe_setup_phy_link(struct txgbe_hw *hw, u32 speed_set, bool autoneg_wait_to_complete) +{ + u16 speed = MTD_ADV_NONE; + struct mtd_dev *devptr = &hw->phy_dev; + u16 port = hw->phy.addr; + int i = 0; + bool link_up = false; + u16 link_speed = MTD_ADV_NONE; + + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10GB_FULL) + speed |= MTD_SPEED_10GIG_FD; + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_1GB_FULL) + speed |= MTD_SPEED_1GIG_FD; + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_100_FULL) + speed |= MTD_SPEED_100M_FD; + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10_FULL) + speed |= MTD_SPEED_10M_FD; + if (!autoneg_wait_to_complete) { + txgbe_mtd_get_autoneg_res(devptr, port, &link_speed); + if (link_speed & speed) { + speed = link_speed; + goto out; + } + } + + txgbe_mtd_enable_speeds(devptr, port, speed, true); + usleep_range(10000, 20000); + speed = MTD_ADV_NONE; + for (i = 0; i < 300; i++) { + txgbe_mtd_is_baset_up(devptr, port, &speed, &link_up); + if (link_up) + break; + + usleep_range(10000, 20000); + } + +out: + switch (speed) { + case MTD_SPEED_10GIG_FD: + return TXGBE_LINK_SPEED_10GB_FULL; + case MTD_SPEED_1GIG_FD: + return TXGBE_LINK_SPEED_1GB_FULL; + case MTD_SPEED_100M_FD: + return TXGBE_LINK_SPEED_100_FULL; + case MTD_SPEED_10M_FD: + return TXGBE_LINK_SPEED_10_FULL; + default: + return TXGBE_LINK_SPEED_UNKNOWN; + } +} + +/** + * txgbe_setup_phy_link_speed - Sets the auto advertised capabilities + * @hw: pointer to hardware structure + * @speed: new link speed + **/ +u32 txgbe_setup_phy_link_speed(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + /* Clear autoneg_advertised and set new values based on input link + * speed. + */ + hw->phy.autoneg_advertised = 0; + + if (speed & TXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_10GB_FULL; + + if (speed & TXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_1GB_FULL; + + if (speed & TXGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_100_FULL; + + if (speed & TXGBE_LINK_SPEED_10_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_10_FULL; + + /* Setup link based on the new speed settings */ + return txgbe_setup_phy_link(hw, speed, autoneg_wait_to_complete); +} + +/** + * txgbe_get_copper_link_capabilities - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the supported link capabilities by reading the PHY auto + * negotiation register. + **/ +s32 txgbe_get_copper_link_capabilities(struct txgbe_hw *hw, + u32 *speed, + bool *autoneg) +{ + s32 status; + u16 speed_ability; + *speed = 0; + *autoneg = true; + + status = txgbe_mtd_hw_xmdio_rd(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_PMA_PMD_DEV_TYPE, + TXGBE_MDIO_PHY_SPEED_ABILITY, &speed_ability); + + if (status == 0) { + if (speed_ability & TXGBE_MDIO_PHY_SPEED_10G) + *speed |= TXGBE_LINK_SPEED_10GB_FULL; + if (speed_ability & TXGBE_MDIO_PHY_SPEED_1G) + *speed |= TXGBE_LINK_SPEED_1GB_FULL; + if (speed_ability & TXGBE_MDIO_PHY_SPEED_100M) + *speed |= TXGBE_LINK_SPEED_100_FULL; + if (speed_ability & TXGBE_MDIO_PHY_SPEED_10M) + *speed |= TXGBE_LINK_SPEED_10_FULL; + } + + return status; +} + +/** + * txgbe_get_phy_firmware_version - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 txgbe_get_phy_firmware_version(struct txgbe_hw *hw, + u16 *firmware_version) +{ + s32 status; + u8 major, minor, inc, test; + + status = txgbe_mtd_get_firmver(&hw->phy_dev, hw->phy.addr, + &major, &minor, &inc, &test); + if (status == 0) + *firmware_version = (major << 8) | minor; + return status; +} + +/** + * txgbe_identify_module - Identifies module type + * @hw: pointer to hardware structure + * + * Determines HW type and calls appropriate function. + **/ +s32 txgbe_identify_module(struct txgbe_hw *hw) +{ + s32 status = TXGBE_ERR_SFP_NOT_PRESENT; + + switch (hw->mac.ops.get_media_type(hw)) { + case txgbe_media_type_fiber_qsfp: + status = txgbe_identify_qsfp_module(hw); + break; + case txgbe_media_type_fiber: + status = txgbe_identify_sfp_module(hw); + break; + default: + hw->phy.sfp_type = txgbe_sfp_type_not_present; + status = TXGBE_ERR_SFP_NOT_PRESENT; + break; + } + + return status; +} + +/** + * txgbe_identify_sfp_module - Identifies SFP modules + * @hw: pointer to hardware structure + * + * Searches for and identifies the SFP module and assigns appropriate PHY type. + **/ +s32 txgbe_identify_sfp_module(struct txgbe_hw *hw) +{ + s32 status = TXGBE_ERR_PHY_ADDR_INVALID; + u32 vendor_oui = 0; + enum txgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; + struct txgbe_adapter *adapter = hw->back; + u8 identifier = 0; + u8 comp_codes_1g = 0; + u8 comp_codes_10g = 0; + u8 comp_codes_25g = 0; + u8 comp_copper_len = 0; + u8 oui_bytes[3] = {0, 0, 0}; + u8 cable_tech = 0; + u8 cable_spec = 0; + u8 vendor_name[3] = {0, 0, 0}; + u16 phy_data = 0; + u8 sff8472_rev, addr_mode, databyte; + bool page_swap = false; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + u32 value; + int i; + + if (hw->mac.type == txgbe_mac_aml) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_ABS_LS) { + hw->phy.sfp_type = txgbe_sfp_type_not_present; + return TXGBE_ERR_SFP_NOT_PRESENT; + } + } + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) + return TXGBE_ERR_SWFW_SYNC; + + if (hw->mac.ops.get_media_type(hw) != txgbe_media_type_fiber) { + hw->phy.sfp_type = txgbe_sfp_type_not_present; + status = TXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + + /* LAN ID is needed for I2C access */ + txgbe_init_i2c(hw); + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_IDENTIFIER, + &identifier); + + if (status != 0) + goto err_read_i2c_eeprom; + + if (identifier != TXGBE_SFF_IDENTIFIER_SFP) { + hw->phy.type = txgbe_phy_sfp_unsupported; + status = TXGBE_ERR_SFP_NOT_SUPPORTED; + } else { + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_1GBE_COMP_CODES, + &comp_codes_1g); + if (status != 0) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_10GBE_COMP_CODES, + &comp_codes_10g); + if (status != 0) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_25GBE_COMP_CODES, + &comp_codes_25g); + if (status != 0) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_COPPER_LENGTH, + &comp_copper_len); + if (status != 0) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_CABLE_TECHNOLOGY, + &cable_tech); + if (status != 0) + goto err_read_i2c_eeprom; + + /* ID Module + * ========= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CORE0 + * 4 SFP_DA_CORE1 + * 5 SFP_SR/LR_CORE0 + * 6 SFP_SR/LR_CORE1 + * 7 SFP_act_lmt_DA_CORE0 + * 8 SFP_act_lmt_DA_CORE1 + * 9 SFP_1g_cu_CORE0 + * 10 SFP_1g_cu_CORE1 + * 11 SFP_1g_sx_CORE0 + * 12 SFP_1g_sx_CORE1 + */ + { + if (cable_tech & TXGBE_SFF_DA_PASSIVE_CABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_da_cu_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_da_cu_core1; + + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) { + hw->dac_sfp = true; + } + + if (comp_copper_len == 0x1) + hw->bypass_ctle = true; + else + hw->bypass_ctle = false; + + if (comp_codes_25g == TXGBE_SFF_25GBASECR_91FEC || + comp_codes_25g == TXGBE_SFF_25GBASECR_74FEC || + comp_codes_25g == TXGBE_SFF_25GBASECR_NOFEC) { + hw->phy.fiber_suppport_speed = + TXGBE_LINK_SPEED_25GB_FULL | + TXGBE_LINK_SPEED_10GB_FULL; + } else { + hw->phy.fiber_suppport_speed |= + TXGBE_LINK_SPEED_10GB_FULL; + } + } else if (cable_tech & TXGBE_SFF_DA_ACTIVE_CABLE) { + hw->dac_sfp = false; + hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_CABLE_SPEC_COMP, + &cable_spec); + if (cable_spec & + TXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_da_act_lmt_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_da_act_lmt_core1; + } else { + hw->phy.sfp_type = txgbe_sfp_type_unknown; + } + + if (comp_codes_25g == TXGBE_SFF_25GAUI_C2M_AOC_BER_5 || + comp_codes_25g == TXGBE_SFF_25GAUI_C2M_ACC_BER_5 || + comp_codes_25g == TXGBE_SFF_25GAUI_C2M_AOC_BER_12 || + comp_codes_25g == TXGBE_SFF_25GAUI_C2M_ACC_BER_12) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_25g_aoc_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_25g_aoc_core1; + } + } else if (comp_codes_25g == TXGBE_SFF_25GAUI_C2M_AOC_BER_5 || + comp_codes_25g == TXGBE_SFF_25GAUI_C2M_ACC_BER_5 || + comp_codes_25g == TXGBE_SFF_25GAUI_C2M_AOC_BER_12 || + comp_codes_25g == TXGBE_SFF_25GAUI_C2M_ACC_BER_12) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_sfp_type_25g_aoc_core0; + else + hw->phy.sfp_type = txgbe_sfp_type_25g_aoc_core1; + } else if (comp_codes_25g == TXGBE_SFF_25GBASESR_CAPABLE || + comp_codes_25g == TXGBE_SFF_25GBASEER_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_sfp_type_25g_sr_core0; + else + hw->phy.sfp_type = txgbe_sfp_type_25g_sr_core1; + } else if (comp_codes_25g == TXGBE_SFF_25GBASELR_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_sfp_type_25g_lr_core0; + else + hw->phy.sfp_type = txgbe_sfp_type_25g_lr_core1; + } else if (comp_codes_10g & + (TXGBE_SFF_10GBASESR_CAPABLE | + TXGBE_SFF_10GBASELR_CAPABLE)) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_srlr_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_srlr_core1; + } else if (comp_codes_1g & TXGBE_SFF_1GBASET_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_1g_cu_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_1g_cu_core1; + } else if (comp_codes_1g & TXGBE_SFF_1GBASESX_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_1g_sx_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_1g_sx_core1; + } else if (comp_codes_1g & TXGBE_SFF_1GBASELX_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_1g_lx_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_1g_lx_core1; + } else { + hw->phy.sfp_type = txgbe_sfp_type_unknown; + } + } + + if (hw->phy.sfp_type != stored_sfp_type) + hw->phy.sfp_setup_needed = true; + + /* Determine if the SFP+ PHY is dual speed or not. */ + hw->phy.multispeed_fiber = false; + if (hw->mac.type == txgbe_mac_aml) { + if ((comp_codes_25g == TXGBE_SFF_25GBASESR_CAPABLE || + comp_codes_25g == TXGBE_SFF_25GBASELR_CAPABLE || + comp_codes_25g == TXGBE_SFF_25GBASEER_CAPABLE) && + ((comp_codes_10g & TXGBE_SFF_10GBASESR_CAPABLE) || + (comp_codes_10g & TXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + } else { + if (((comp_codes_1g & TXGBE_SFF_1GBASESX_CAPABLE) && + (comp_codes_10g & TXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & TXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & TXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + } + /* Determine PHY vendor */ + if (hw->phy.type != txgbe_phy_nl) { + hw->phy.id = identifier; + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_VENDOR_OUI_BYTE0, + &oui_bytes[0]); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_VENDOR_OUI_BYTE1, + &oui_bytes[1]); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_VENDOR_OUI_BYTE2, + &oui_bytes[2]); + + if (status != 0) + goto err_read_i2c_eeprom; + + vendor_oui = + ((oui_bytes[0] << TXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | + (oui_bytes[1] << TXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | + (oui_bytes[2] << TXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + + switch (vendor_oui) { + case TXGBE_SFF_VENDOR_OUI_TYCO: + if (cable_tech & TXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + txgbe_phy_sfp_passive_tyco; + break; + case TXGBE_SFF_VENDOR_OUI_FTL: + if (cable_tech & TXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = txgbe_phy_sfp_ftl_active; + else + hw->phy.type = txgbe_phy_sfp_ftl; + break; + case TXGBE_SFF_VENDOR_OUI_AVAGO: + hw->phy.type = txgbe_phy_sfp_avago; + break; + case TXGBE_SFF_VENDOR_OUI_INTEL: + hw->phy.type = txgbe_phy_sfp_intel; + break; + default: + if (cable_tech & TXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + txgbe_phy_sfp_passive_unknown; + else if (cable_tech & TXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = + txgbe_phy_sfp_active_unknown; + else + hw->phy.type = txgbe_phy_sfp_unknown; + break; + } + } + + /* vendor name match QAX and can access sfp internal phy */ + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_CABLE_VENDOR_NAME1, + &vendor_name[0]); + if (status != 0) + goto err_read_i2c_eeprom; + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_CABLE_VENDOR_NAME2, + &vendor_name[1]); + if (status != 0) + goto err_read_i2c_eeprom; + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_CABLE_VENDOR_NAME3, + &vendor_name[2]); + if (status != 0) + goto err_read_i2c_eeprom; + + if (vendor_name[0] == 0x51 && + vendor_name[1] == 0x41 && + vendor_name[2] == 0x58) { + status = hw->phy.ops.read_i2c_sfp_phy(hw, + 0x8008, + &phy_data); + if (status == 0 || phy_data != 0xffff) { + hw->phy.multispeed_fiber = false; + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_10g_cu_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_10g_cu_core1; + } + } + + /* Allow any DA cable vendor */ + if (cable_tech & (TXGBE_SFF_DA_PASSIVE_CABLE | + TXGBE_SFF_DA_ACTIVE_CABLE)) { + status = 0; + goto out; + } + + /* Verify supported 1G SFP modules */ + if (comp_codes_10g == 0 && comp_codes_25g == 0 && + !(hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core1)) { + hw->phy.type = txgbe_phy_sfp_unsupported; + status = TXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + } + /*record eeprom info*/ + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_SFF_8472_COMP, + &sff8472_rev); + if (status != 0) + goto err_read_i2c_eeprom; + + /* addressing mode is not supported */ + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_SFF_8472_SWAP, + &addr_mode); + if (status != 0) + goto err_read_i2c_eeprom; + + if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) { + e_err(drv, "Address change required to access page 0xA2,"); + e_err(drv, "but not supported. Please report the module type to the driver maintainers.\n"); + page_swap = true; + } + + if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap || + !(addr_mode & TXGBE_SFF_DDM_IMPLEMENTED)) { + /* We have a SFP, but it does not support SFF-8472 */ + adapter->eeprom_type = ETH_MODULE_SFF_8079; + adapter->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have a SFP which supports a revision of SFF-8472. */ + adapter->eeprom_type = ETH_MODULE_SFF_8472; + adapter->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + for (i = 0; i < adapter->eeprom_len; i++) { + if (i < ETH_MODULE_SFF_8079_LEN) + status = hw->phy.ops.read_i2c_eeprom(hw, i, + &databyte); + else + status = hw->phy.ops.read_i2c_sff8472(hw, i, + &databyte); + + if (status != 0) + goto err_read_i2c_eeprom; + + adapter->i2c_eeprom[i] = databyte; + } + +out: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + return status; + +err_read_i2c_eeprom: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + hw->phy.sfp_type = txgbe_sfp_type_not_present; + if (hw->phy.type != txgbe_phy_nl) { + hw->phy.id = 0; + hw->phy.type = txgbe_phy_unknown; + } + return TXGBE_ERR_SFP_NOT_PRESENT; +} + +s32 txgbe_identify_qsfp_module(struct txgbe_hw *hw) +{ + s32 status = TXGBE_ERR_PHY_ADDR_INVALID; + u8 identifier = 0, transceiver_type = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + u32 value; + + if (hw->mac.type == txgbe_mac_aml40) { + value = rd32(hw, TXGBE_GPIO_EXT); + if (value & TXGBE_SFP1_MOD_PRST_LS) { + hw->phy.sfp_type = txgbe_sfp_type_not_present; + return TXGBE_ERR_SFP_NOT_PRESENT; + } + } + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) + return TXGBE_ERR_SWFW_SYNC; + + if (hw->mac.ops.get_media_type(hw) != txgbe_media_type_fiber_qsfp) { + hw->phy.sfp_type = txgbe_sfp_type_not_present; + status = TXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + + /* LAN ID is needed for I2C access */ + txgbe_init_i2c(hw); + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_IDENTIFIER, + &identifier); + + if (status != 0) + goto err_read_i2c_eeprom; + + if (identifier == TXGBE_SFF_IDENTIFIER_QSFP || + identifier == TXGBE_SFF_IDENTIFIER_QSFP_PLUS) { + hw->phy.type = txgbe_phy_sfp_unknown; + + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_ETHERNET_COMP_OFFSET, + &transceiver_type); + if (status != 0) + goto err_read_i2c_eeprom; + + if (transceiver_type & TXGBE_SFF_ETHERNET_40G_CR4) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_qsfp_type_40g_cu_core0; + else + hw->phy.sfp_type = txgbe_qsfp_type_40g_cu_core1; + hw->phy.fiber_suppport_speed = + TXGBE_LINK_SPEED_40GB_FULL | + TXGBE_LINK_SPEED_10GB_FULL; + } + + if (transceiver_type & TXGBE_SFF_ETHERNET_40G_SR4) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_qsfp_type_40g_sr_core0; + else + hw->phy.sfp_type = txgbe_qsfp_type_40g_sr_core1; + } + + if (transceiver_type & TXGBE_SFF_ETHERNET_40G_LR4) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_qsfp_type_40g_lr_core0; + else + hw->phy.sfp_type = txgbe_qsfp_type_40g_lr_core1; + } + + if (transceiver_type & TXGBE_SFF_ETHERNET_40G_ACTIVE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = txgbe_qsfp_type_40g_active_core0; + else + hw->phy.sfp_type = txgbe_qsfp_type_40g_active_core1; + } + + } else { + hw->phy.type = txgbe_phy_sfp_unsupported; + status = TXGBE_ERR_SFP_NOT_SUPPORTED; + } +out: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + return status; + +err_read_i2c_eeprom: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + hw->phy.sfp_type = txgbe_sfp_type_not_present; + hw->phy.id = 0; + hw->phy.type = txgbe_phy_unknown; + + return TXGBE_ERR_SFP_NOT_PRESENT; +} + +s32 txgbe_init_i2c(struct txgbe_hw *hw) +{ + wr32(hw, TXGBE_I2C_ENABLE, 0); + + wr32(hw, TXGBE_I2C_CON, + (TXGBE_I2C_CON_MASTER_MODE | + TXGBE_I2C_CON_SPEED(1) | + TXGBE_I2C_CON_RESTART_EN | + TXGBE_I2C_CON_SLAVE_DISABLE)); + /* Default addr is 0xA0 ,bit 0 is configure for read/write! */ + wr32(hw, TXGBE_I2C_TAR, TXGBE_I2C_SLAVE_ADDR); + + /* ic_clk = 1/156.25MHz + * SCL_High_time = [(HCNT + IC_*_SPKLEN + 7) * ic_clk] + SCL_Fall_time + * SCL_Low_time = [(LCNT + 1) * ic_clk] - SCL_Fall_time + SCL_Rise_time + * set I2C Frequency to Standard Speed Mode 100KHz + */ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + wr32(hw, TXGBE_I2C_SS_SCL_HCNT, 2000); + wr32(hw, TXGBE_I2C_SS_SCL_LCNT, 2000); + + wr32m(hw, TXGBE_I2C_SDA_HOLD, + TXGBE_I2C_SDA_RX_HOLD | TXGBE_I2C_SDA_TX_HOLD, 0x640064); + } else if (hw->mac.type == txgbe_mac_sp) { + wr32(hw, TXGBE_I2C_SS_SCL_HCNT, 780); + wr32(hw, TXGBE_I2C_SS_SCL_LCNT, 780); + } + + wr32(hw, TXGBE_I2C_RX_TL, 0); /* 1byte for rx full signal */ + wr32(hw, TXGBE_I2C_TX_TL, 4); + + wr32(hw, TXGBE_I2C_SCL_STUCK_TIMEOUT, 0xFFFFFF); + wr32(hw, TXGBE_I2C_SDA_STUCK_TIMEOUT, 0xFFFFFF); + + wr32(hw, TXGBE_I2C_INTR_MASK, 0); + wr32(hw, TXGBE_I2C_ENABLE, 1); + + return 0; +} + +static s32 txgbe_init_i2c_sfp_phy(struct txgbe_hw *hw) +{ + wr32(hw, TXGBE_I2C_ENABLE, 0); + + wr32(hw, TXGBE_I2C_CON, + (TXGBE_I2C_CON_MASTER_MODE | + TXGBE_I2C_CON_SPEED(1) | + TXGBE_I2C_CON_RESTART_EN | + TXGBE_I2C_CON_SLAVE_DISABLE)); + /* Default addr is 0xA0 ,bit 0 is configure for read/write! */ + wr32(hw, TXGBE_I2C_TAR, TXGBE_I2C_SLAVE_ADDR); + wr32(hw, TXGBE_I2C_SS_SCL_HCNT, 600); + wr32(hw, TXGBE_I2C_SS_SCL_LCNT, 600); + + wr32(hw, TXGBE_I2C_RX_TL, 1); /* 2bytes for rx full signal */ + wr32(hw, TXGBE_I2C_TX_TL, 4); + + wr32(hw, TXGBE_I2C_SCL_STUCK_TIMEOUT, 0xFFFFFF); + wr32(hw, TXGBE_I2C_SDA_STUCK_TIMEOUT, 0xFFFFFF); + + wr32(hw, TXGBE_I2C_INTR_MASK, 0); + wr32(hw, TXGBE_I2C_ENABLE, 1); + + return 0; +} + +s32 txgbe_clear_i2c(struct txgbe_hw *hw) +{ + s32 status = 0; + + /* wait for completion */ + status = po32m(hw, TXGBE_I2C_STATUS, + TXGBE_I2C_STATUS_MST_ACTIVITY, ~TXGBE_I2C_STATUS_MST_ACTIVITY, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + wr32(hw, TXGBE_I2C_ENABLE, 0); + +out: + return status; +} + +/** + * txgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 txgbe_read_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data) +{ + txgbe_init_i2c(hw); + return hw->phy.ops.read_i2c_byte(hw, byte_offset, + TXGBE_I2C_EEPROM_DEV_ADDR, + eeprom_data); +} + +/** + * txgbe_read_i2c_sff8472 - Reads 8 bit word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's SFF-8472 data over I2C + **/ +s32 txgbe_read_i2c_sff8472(struct txgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data) +{ + txgbe_init_i2c(hw); + return hw->phy.ops.read_i2c_byte(hw, byte_offset, + TXGBE_I2C_EEPROM_DEV_ADDR2, + sff8472_data); +} + +/** + * txgbe_read_i2c_sff8636 - Reads 8 bit word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's SFF-8472 data over I2C + **/ +s32 txgbe_read_i2c_sff8636(struct txgbe_hw *hw, u8 page, u8 byte_offset, + u8 *sff8636_data) +{ + txgbe_init_i2c(hw); + hw->phy.ops.write_i2c_byte(hw, TXGBE_SFF_QSFP_PAGE_SELECT, + TXGBE_I2C_EEPROM_DEV_ADDR, + page); + + return hw->phy.ops.read_i2c_byte(hw, byte_offset, + TXGBE_I2C_EEPROM_DEV_ADDR, + sff8636_data); +} + +/** + * txgbe_read_i2c_sfp_phy - Reads 16 bit word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xAC + * @eeprom_data: value read + * + * Performs byte read operation to Fiber to Copper SFP module + * internal phy over I2C + **/ +s32 txgbe_read_i2c_sfp_phy(struct txgbe_hw *hw, u16 byte_offset, + u16 *data) +{ + txgbe_init_i2c_sfp_phy(hw); + + return txgbe_read_i2c_word(hw, byte_offset, + TXGBE_I2C_EEPROM_DEV_ADDR3, + data); +} + +/** + * txgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to write + * @eeprom_data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface. + **/ +s32 txgbe_write_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset, + u8 eeprom_data) +{ + txgbe_init_i2c(hw); + return hw->phy.ops.write_i2c_byte(hw, byte_offset, + TXGBE_I2C_EEPROM_DEV_ADDR, + eeprom_data); +} + +/** + * txgbe_read_i2c_byte_int - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * @lock: true if to take and release semaphore + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +static s32 txgbe_read_i2c_byte_int(struct txgbe_hw *hw, u8 byte_offset, + u8 __always_unused dev_addr, u8 *data, bool __always_unused lock) +{ + s32 status = 0; + + /* wait tx empty */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_TX_EMPTY, TXGBE_I2C_INTR_STAT_TX_EMPTY, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + /* read data */ + wr32(hw, TXGBE_I2C_DATA_CMD, + byte_offset | TXGBE_I2C_DATA_CMD_STOP); + wr32(hw, TXGBE_I2C_DATA_CMD, TXGBE_I2C_DATA_CMD_READ); + + /* wait for read complete */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_RX_FULL, TXGBE_I2C_INTR_STAT_RX_FULL, + TXGBE_I2C_TIMEOUT, 100); + if (status != 0) + goto out; + + *data = 0xFF & rd32(hw, TXGBE_I2C_DATA_CMD); + +out: + return status; +} + +/** + * txgbe_read_i2c_word_int - Reads 16 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * @lock: true if to take and release semaphore + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +static s32 txgbe_read_i2c_word_int(struct txgbe_hw *hw, u16 byte_offset, + u8 __always_unused dev_addr, + u16 *data, + bool __always_unused lock) +{ + s32 status = 0; + + if (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1) { + /* reg offset format 0x000yyyyy */ + byte_offset &= 0x1f; + + /* wait tx empty */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_TX_EMPTY, TXGBE_I2C_INTR_STAT_TX_EMPTY, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + /* write reg_offset */ + wr32(hw, TXGBE_I2C_DATA_CMD, (u8)byte_offset | TXGBE_I2C_DATA_CMD_STOP); + + usec_delay(TXGBE_I2C_TIMEOUT); + /* wait tx empty */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_TX_EMPTY, TXGBE_I2C_INTR_STAT_TX_EMPTY, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + /* read data */ + wr32(hw, TXGBE_I2C_DATA_CMD, TXGBE_I2C_DATA_CMD_READ); + wr32(hw, TXGBE_I2C_DATA_CMD, TXGBE_I2C_DATA_CMD_READ | + TXGBE_I2C_DATA_CMD_STOP); + + /* wait for read complete */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_RX_FULL, TXGBE_I2C_INTR_STAT_RX_FULL, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + *data = 0xFF & rd32(hw, TXGBE_I2C_DATA_CMD); + *data <<= 8; + *data += 0xFF & rd32(hw, TXGBE_I2C_DATA_CMD); + } else if ((hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core0) || + (hw->phy.sfp_type == txgbe_sfp_type_10g_cu_core1)) { + /* wait tx empty */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_TX_EMPTY, + TXGBE_I2C_INTR_STAT_TX_EMPTY, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + /* write reg_offset */ + wr32(hw, TXGBE_I2C_DATA_CMD, 0x23); + wr32(hw, TXGBE_I2C_DATA_CMD, byte_offset >> 8); + wr32(hw, TXGBE_I2C_DATA_CMD, (u8)byte_offset | TXGBE_I2C_DATA_CMD_STOP); + + /* wait tx empty */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_TX_EMPTY, + TXGBE_I2C_INTR_STAT_TX_EMPTY, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + /* delay for mcu access sfp internal phy through MDIO + * delay time need larger than 1ms + */ + mdelay(5); + + /* read data */ + wr32(hw, TXGBE_I2C_DATA_CMD, TXGBE_I2C_DATA_CMD_READ); + wr32(hw, TXGBE_I2C_DATA_CMD, TXGBE_I2C_DATA_CMD_READ | + TXGBE_I2C_DATA_CMD_STOP); + + /* wait for read complete */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_RX_FULL, TXGBE_I2C_INTR_STAT_RX_FULL, + TXGBE_I2C_TIMEOUT, 100); + if (status != 0) + goto out; + + /* fixme LSB data is the data of the duplicate MSB */ + *data = 0xFF & rd32(hw, TXGBE_I2C_DATA_CMD); + *data <<= 8; + *data += 0xFF & rd32(hw, TXGBE_I2C_DATA_CMD); + } + +out: + return status; +} + +/** + * txgbe_switch_i2c_slave_addr - Switch I2C slave address + * @hw: pointer to hardware structure + * @dev_addr: slave addr to switch + **/ +s32 txgbe_switch_i2c_slave_addr(struct txgbe_hw *hw, u8 dev_addr) +{ + wr32(hw, TXGBE_I2C_ENABLE, 0); + wr32(hw, TXGBE_I2C_TAR, dev_addr >> 1); + wr32(hw, TXGBE_I2C_ENABLE, 1); + return 0; +} + +/** + * txgbe_read_i2c_byte - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 txgbe_read_i2c_byte(struct txgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + txgbe_switch_i2c_slave_addr(hw, dev_addr); + + return txgbe_read_i2c_byte_int(hw, byte_offset, dev_addr, + data, true); +} + +/** + * txgbe_read_i2c_word - Reads 16 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 txgbe_read_i2c_word(struct txgbe_hw *hw, u16 byte_offset, + u8 dev_addr, u16 *data) +{ + txgbe_switch_i2c_slave_addr(hw, dev_addr); + + return txgbe_read_i2c_word_int(hw, byte_offset, dev_addr, + data, true); +} + +/** + * txgbe_write_i2c_byte_int - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * @lock: true if to take and release semaphore + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +static s32 txgbe_write_i2c_byte_int(struct txgbe_hw *hw, u8 byte_offset, + u8 __always_unused dev_addr, u8 data) +{ + s32 status = 0; + + /* wait tx empty */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_TX_EMPTY, TXGBE_I2C_INTR_STAT_TX_EMPTY, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + wr32(hw, TXGBE_I2C_DATA_CMD, byte_offset); + wr32(hw, TXGBE_I2C_DATA_CMD, + data | TXGBE_I2C_DATA_CMD_WRITE); + + /* wait for write complete */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_RX_FULL, TXGBE_I2C_INTR_STAT_RX_FULL, + TXGBE_I2C_TIMEOUT, 10); +out: + return status; +} + +/** + * txgbe_write_i2c_byte - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 txgbe_write_i2c_byte(struct txgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + return txgbe_write_i2c_byte_int(hw, byte_offset, dev_addr, + data); +} + +/** + * txgbe_tn_check_overtemp - Checks if an overtemp occurred. + * @hw: pointer to hardware structure + * + * Checks if the LASI temp alarm status was triggered due to overtemp + **/ +s32 txgbe_tn_check_overtemp(struct txgbe_hw *hw) +{ + s32 status = 0; + u32 ts_state; + + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) { + ts_state = rd32(hw, TXGBE_AML_INTR_HIGH_STS); + if (ts_state) { + wr32(hw, TXGBE_AML_INTR_RAW_HI, TXGBE_AML_INTR_CL_HI); + wr32(hw, TXGBE_AML_INTR_RAW_LO, TXGBE_AML_INTR_CL_LO); + status = TXGBE_ERR_OVERTEMP; + } else { + ts_state = rd32(hw, TXGBE_AML_INTR_LOW_STS); + if (ts_state) + status = TXGBE_ERR_UNDERTEMP; + } + } else { + /* Check that the LASI temp alarm status was triggered */ + ts_state = rd32(hw, TXGBE_TS_ALARM_ST); + + if (ts_state & TXGBE_TS_ALARM_ST_DALARM) + status = TXGBE_ERR_UNDERTEMP; + else if (ts_state & TXGBE_TS_ALARM_ST_ALARM) + status = TXGBE_ERR_OVERTEMP; + } + return status; +} + +s32 txgbe_init_external_phy(struct txgbe_hw *hw) +{ + s32 status = 0; + + struct mtd_dev *devptr = &hw->phy_dev; + + hw->phy.addr = 0; + + devptr->app_data = hw; + status = txgbe_mtd_load_driver(txgbe_read_mdio, + txgbe_write_mdio, + false, + NULL, + NULL, + NULL, + NULL, + hw->phy.addr, + devptr); + if (status != 0) { + ERROR_REPORT1(TXGBE_ERROR_INVALID_STATE, + "External PHY initialization failed.\n"); + return TXGBE_ERR_PHY; + } + + return status; +} + +s32 txgbe_uninit_external_phy(struct txgbe_hw *hw) +{ + return txgbe_mtd_unload_driver(&hw->phy_dev); +} + +s32 txgbe_set_phy_pause_advertisement(struct txgbe_hw *hw, u32 pause_bit) +{ + return txgbe_mtd_set_pause_adver(&hw->phy_dev, hw->phy.addr, + (pause_bit >> 10) & 0x3, false); +} + +s32 txgbe_get_phy_advertised_pause(struct txgbe_hw *hw, u8 *pause_bit) +{ + u16 value; + s32 status = 0; + + status = txgbe_mtd_hw_xmdio_rd(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_AUTO_NEG_DEV_TYPE, + TXGBE_MDIO_AUTO_NEG_ADVT, &value); + *pause_bit = (u8)((value >> 10) & 0x3); + return status; +} + +s32 txgbe_get_lp_advertised_pause(struct txgbe_hw *hw, u8 *pause_bit) +{ + return txgbe_mtd_get_lp_adver_pause(&hw->phy_dev, + hw->phy.addr, pause_bit); +} + +s32 txgbe_external_phy_suspend(struct txgbe_hw *hw) +{ + s32 status = 0; + u16 value = 0; + + status = txgbe_mtd_hw_xmdio_rd(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_VENDOR_SPECIFIC_2_DEV_TYPE, + TXGBE_MDIO_VENDOR_SPECIFIC_2_PORT_CTRL, &value); + + if (status) + goto out; + + value |= TXGBE_MDIO_VENDOR_SPECIFIC_2_POWER; + + status = txgbe_mtd_xmdio_wr(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_VENDOR_SPECIFIC_2_DEV_TYPE, + TXGBE_MDIO_VENDOR_SPECIFIC_2_PORT_CTRL, value); + +out: + return status; +} + +s32 txgbe_external_phy_resume(struct txgbe_hw *hw) +{ + s32 status = 0; + u16 value = 0; + + status = txgbe_mtd_hw_xmdio_rd(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_VENDOR_SPECIFIC_2_DEV_TYPE, + TXGBE_MDIO_VENDOR_SPECIFIC_2_PORT_CTRL, &value); + + if (status) + goto out; + + if (!(value & ~TXGBE_MDIO_VENDOR_SPECIFIC_2_POWER)) + goto out; + + value |= TXGBE_MDIO_VENDOR_SPECIFIC_2_SW_RST; + value &= ~TXGBE_MDIO_VENDOR_SPECIFIC_2_POWER; + + status = txgbe_mtd_xmdio_wr(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_VENDOR_SPECIFIC_2_DEV_TYPE, + TXGBE_MDIO_VENDOR_SPECIFIC_2_PORT_CTRL, value); + +out: + return status; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h new file mode 100644 index 000000000000..32b205d04f9e --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h @@ -0,0 +1,222 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_PHY_H_ +#define _TXGBE_PHY_H_ + +#include "txgbe_type.h" +#define TXGBE_I2C_EEPROM_DEV_ADDR 0xA0 +#define TXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 +#define TXGBE_I2C_EEPROM_BANK_LEN 0xFF + +/*fiber to copper module inter reg i2c addr */ +#define TXGBE_I2C_EEPROM_DEV_ADDR3 0xAC +#define TXGBE_I2C_PHY_LOCAL_RX_STATUS BIT(12) +#define TXGBE_I2C_PHY_REMOTE_RX_STATUS BIT(13) +#define TXGBE_I2C_10G_SFP_LINK_STATUS BIT(10) + +/* EEPROM byte offsets */ +#define TXGBE_SFF_IDENTIFIER 0x0 +#define TXGBE_SFF_IDENTIFIER_SFP 0x3 +#define TXGBE_SFF_IDENTIFIER_QSFP 0xC +#define TXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD +#define TXGBE_SFF_VENDOR_OUI_BYTE0 0x25 +#define TXGBE_SFF_VENDOR_OUI_BYTE1 0x26 +#define TXGBE_SFF_VENDOR_OUI_BYTE2 0x27 +#define TXGBE_SFF_1GBE_COMP_CODES 0x6 +#define TXGBE_SFF_10GBE_COMP_CODES 0x3 +#define TXGBE_SFF_25GBE_COMP_CODES 0x24 +#define TXGBE_SFF_COPPER_LENGTH 0x12 +#define TXGBE_SFF_CABLE_TECHNOLOGY 0x8 +#define TXGBE_SFF_CABLE_SPEC_COMP 0x3C +#define TXGBE_SFF_DDM_IMPLEMENTED 0x40 +#define TXGBE_SFF_SFF_8472_SWAP 0x5C +#define TXGBE_SFF_SFF_8472_COMP 0x5E +#define TXGBE_SFF_SFF_8472_OSCB 0x6E +#define TXGBE_SFF_SFF_8472_ESCB 0x76 +#define TXGBE_SFF_SFF_REVISION_ADDR 0x01 +#define TXGBE_SFF_QSFP_PAGE_SELECT 0x7F + +#define TXGBE_MODULE_QSFP_MAX_LEN 640 + +#define TXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD +#define TXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5 +#define TXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6 +#define TXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7 +#define TXGBE_SFF_QSFP_CONNECTOR 0x82 +#define TXGBE_SFF_QSFP_10GBE_COMP 0x83 +#define TXGBE_SFF_QSFP_1GBE_COMP 0x86 +#define TXGBE_SFF_QSFP_CABLE_LENGTH 0x92 +#define TXGBE_SFF_QSFP_DEVICE_TECH 0x93 +#define TXGBE_SFF_CABLE_VENDOR_NAME1 0x14 +#define TXGBE_SFF_CABLE_VENDOR_NAME2 0x15 +#define TXGBE_SFF_CABLE_VENDOR_NAME3 0x16 + +/* Bitmasks */ +#define TXGBE_SFF_DA_PASSIVE_CABLE 0x4 +#define TXGBE_SFF_DA_ACTIVE_CABLE 0x8 +#define TXGBE_SFF_1GBASESX_CAPABLE 0x1 +#define TXGBE_SFF_1GBASELX_CAPABLE 0x2 +#define TXGBE_SFF_1GBASET_CAPABLE 0x8 +#define TXGBE_SFF_10GBASESR_CAPABLE 0x10 +#define TXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define TXGBE_SFF_25GBASESR_CAPABLE 0x2 +#define TXGBE_SFF_25GBASELR_CAPABLE 0x3 +#define TXGBE_SFF_25GBASEER_CAPABLE 0x4 +#define TXGBE_SFF_25GBASECR_91FEC 0xB +#define TXGBE_SFF_25GBASECR_74FEC 0xC +#define TXGBE_SFF_25GBASECR_NOFEC 0xD +#define TXGBE_SFF_40GBASE_SR_CAPABLE 0x10 +#define TXGBE_SFF_4x10GBASESR_CAP 0x11 +#define TXGBE_SFF_40GBASEPSM4_PARALLEL 0x12 +#define TXGBE_SFF_40GBASE_SWMD4_CAP 0x1f +#define TXGBE_SFF_COPPER_5M 0x5 +#define TXGBE_SFF_COPPER_3M 0x3 +#define TXGBE_SFF_COPPER_1M 0x1 + +#define TXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +#define TXGBE_SFF_25GAUI_C2M_AOC_BER_5 0x1 +#define TXGBE_SFF_25GAUI_C2M_ACC_BER_5 0x8 +#define TXGBE_SFF_25GAUI_C2M_AOC_BER_12 0x18 +#define TXGBE_SFF_25GAUI_C2M_ACC_BER_12 0x19 + +#define TXGBE_ETHERNET_COMP_OFFSET 0x83 +#define TXGBE_SFF_ETHERNET_40G_CR4 BIT(3) +#define TXGBE_SFF_ETHERNET_40G_SR4 BIT(2) +#define TXGBE_SFF_ETHERNET_40G_LR4 BIT(1) +#define TXGBE_SFF_ETHERNET_40G_ACTIVE BIT(0) + +#define TXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 +#define TXGBE_SFF_SOFT_RS_SELECT_10G 0x8 +#define TXGBE_SFF_SOFT_RS_SELECT_1G 0x0 +#define TXGBE_SFF_ADDRESSING_MODE 0x4 +#define TXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 +#define TXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 +#define TXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 +#define TXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0 +#define TXGBE_I2C_EEPROM_READ_MASK 0x100 +#define TXGBE_I2C_EEPROM_STATUS_MASK 0x3 +#define TXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +#define TXGBE_I2C_EEPROM_STATUS_PASS 0x1 +#define TXGBE_I2C_EEPROM_STATUS_FAIL 0x2 +#define TXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 + +#define TXGBE_CS4227 0xBE /* CS4227 address */ +#define TXGBE_CS4227_GLOBAL_ID_LSB 0 +#define TXGBE_CS4227_SCRATCH 2 +#define TXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5 +#define TXGBE_CS4227_SCRATCH_VALUE 0x5aa5 +#define TXGBE_CS4227_RETRIES 5 +#define TXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to program speed */ +#define TXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to program EDC */ +#define TXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to program speed */ +#define TXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */ +#define TXGBE_CS4227_EDC_MODE_CX1 0x0002 +#define TXGBE_CS4227_EDC_MODE_SR 0x0004 +#define TXGBE_CS4227_RESET_HOLD 500 /* microseconds */ +#define TXGBE_CS4227_RESET_DELAY 500 /* milliseconds */ +#define TXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */ +#define TXGBE_PE 0xE0 /* Port expander address */ +#define TXGBE_PE_OUTPUT 1 /* Output register offset */ +#define TXGBE_PE_CONFIG 3 /* Config register offset */ +#define TXGBE_PE_BIT1 BIT(1) + +/* Flow control defines */ +#define TXGBE_TAF_SYM_PAUSE (0x1) +#define TXGBE_TAF_ASM_PAUSE (0x2) + +/* Bit-shift macros */ +#define TXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 +#define TXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 +#define TXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define TXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 +#define TXGBE_SFF_VENDOR_OUI_FTL 0x00906500 +#define TXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define TXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 + +/* I2C SDA and SCL timing parameters for standard mode */ +#define TXGBE_I2C_T_HD_STA 4 +#define TXGBE_I2C_T_LOW 5 +#define TXGBE_I2C_T_HIGH 4 +#define TXGBE_I2C_T_SU_STA 5 +#define TXGBE_I2C_T_HD_DATA 5 +#define TXGBE_I2C_T_SU_DATA 1 +#define TXGBE_I2C_T_RISE 1 +#define TXGBE_I2C_T_FALL 1 +#define TXGBE_I2C_T_SU_STO 4 +#define TXGBE_I2C_T_BUF 5 + +/* SFP+ SFF-8472 Compliance */ +#define TXGBE_SFF_SFF_8472_UNSUP 0x00 + +enum txgbe_phy_type txgbe_get_phy_type_from_id(struct txgbe_hw *hw); +s32 txgbe_get_phy_id(struct txgbe_hw *hw); +s32 txgbe_reset_phy(struct txgbe_hw *hw); +s32 txgbe_read_phy_reg_mdi(struct txgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data); +s32 txgbe_write_phy_reg_mdi(struct txgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 phy_data); +s32 txgbe_read_phy_reg(struct txgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); +s32 txgbe_write_phy_reg(struct txgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); +u32 txgbe_setup_phy_link(struct txgbe_hw *hw, u32 speed_set, bool autoneg_wait_to_complete); +u32 txgbe_setup_phy_link_speed(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); +s32 txgbe_get_copper_link_capabilities(struct txgbe_hw *hw, + u32 *speed, + bool *autoneg); +s32 txgbe_check_reset_blocked(struct txgbe_hw *hw); + +s32 txgbe_get_phy_firmware_version(struct txgbe_hw *hw, + u16 *firmware_version); + +s32 txgbe_identify_module(struct txgbe_hw *hw); +s32 txgbe_identify_sfp_module(struct txgbe_hw *hw); +s32 txgbe_identify_qsfp_module(struct txgbe_hw *hw); +s32 txgbe_tn_check_overtemp(struct txgbe_hw *hw); +s32 txgbe_init_i2c(struct txgbe_hw *hw); +s32 txgbe_clear_i2c(struct txgbe_hw *hw); +s32 txgbe_switch_i2c_slave_addr(struct txgbe_hw *hw, u8 dev_addr); +s32 txgbe_read_i2c_byte(struct txgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 txgbe_read_i2c_word(struct txgbe_hw *hw, u16 byte_offset, + u8 dev_addr, u16 *data); + +s32 txgbe_write_i2c_byte(struct txgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 txgbe_read_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data); +s32 txgbe_write_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset, + u8 eeprom_data); +s32 txgbe_read_i2c_sff8472(struct txgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data); +s32 txgbe_read_i2c_sff8636(struct txgbe_hw *hw, u8 page, u8 byte_offset, + u8 *sff8636_data); +s32 txgbe_read_i2c_sfp_phy(struct txgbe_hw *hw, u16 byte_offset, + u16 *data); + +s32 txgbe_init_external_phy(struct txgbe_hw *hw); +s32 txgbe_uninit_external_phy(struct txgbe_hw *hw); +s32 txgbe_set_phy_pause_advertisement(struct txgbe_hw *hw, u32 pause_bit); +s32 txgbe_get_phy_advertised_pause(struct txgbe_hw *hw, u8 *pause_bit); +s32 txgbe_get_lp_advertised_pause(struct txgbe_hw *hw, u8 *pause_bit); +s32 txgbe_external_phy_suspend(struct txgbe_hw *hw); +s32 txgbe_external_phy_resume(struct txgbe_hw *hw); + +u32 txgbe_read_mdio(struct mtd_dev *dev, + u16 port, + u16 mmd, + u16 reg, + u16 *value); + +u32 txgbe_write_mdio(struct mtd_dev *dev, + u16 port, + u16 mmd, + u16 reg, + u16 value); + +#endif /* _TXGBE_PHY_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ptp.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ptp.c new file mode 100644 index 000000000000..db0649f8fc49 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ptp.c @@ -0,0 +1,1050 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "txgbe.h" +#include "txgbe_hw.h" +#include + +#define TXGBE_INCVAL_10GB 0xCCCCCC +#define TXGBE_INCVAL_1GB 0x800000 +#define TXGBE_INCVAL_100 0xA00000 +#define TXGBE_INCVAL_10 0xC7F380 +#define TXGBE_INCVAL_FPGA 0x800000 +#define TXGBE_INCVAL_AML 0xA00000 + +#define TXGBE_INCVAL_SHIFT_10GB 20 +#define TXGBE_INCVAL_SHIFT_1GB 18 +#define TXGBE_INCVAL_SHIFT_100 15 +#define TXGBE_INCVAL_SHIFT_10 12 +#define TXGBE_INCVAL_SHIFT_FPGA 17 +#define TXGBE_INCVAL_SHIFT_AML 21 + +#define TXGBE_OVERFLOW_PERIOD (HZ * 30) +#define TXGBE_PTP_TX_TIMEOUT (HZ) + +#define NS_PER_SEC 1000000000ULL +#define NS_PER_MSEC 1000000ULL + +static void txgbe_ptp_setup_sdp(struct txgbe_adapter *adapter) +{ + struct cyclecounter *cc = &adapter->hw_cc; + struct txgbe_hw *hw = &adapter->hw; + u32 tsauxc, rem, tssdp, tssdp1; + u32 trgttiml0, trgttimh0, trgttiml1, trgttimh1; + u64 ns = 0; + unsigned long flags; + + if (hw->mac.type != txgbe_mac_aml && + hw->mac.type != txgbe_mac_aml40) + return; + + if (TXGBE_1588_PPS_WIDTH >= NS_PER_SEC) { + e_dev_err("PTP pps width cannot be longer than 1s!\n"); + return; + } + + /* disable the pin first */ + wr32(hw, TXGBE_TSEC_1588_AUX_CTL, 0); + TXGBE_WRITE_FLUSH(hw); + + if (!(adapter->flags2 & TXGBE_FLAG2_PTP_PPS_ENABLED)) { + if (adapter->pps_enabled == 1) { + adapter->pps_enabled = 0; + if (TXGBE_1588_TOD_ENABLE) + txgbe_set_pps(hw, adapter->pps_enabled, 0, 0); + } + return; + } + + adapter->pps_enabled = 1; + + tssdp = TXGBE_TSEC_1588_SDP_FUN_SEL_TT0; + tssdp |= TXGBE_1588_PPS_LEVEL ? + TXGBE_TSEC_1588_SDP_OUT_LEVEL_HIGH : TXGBE_TSEC_1588_SDP_OUT_LEVEL_LOW; + tsauxc = TXGBE_TSEC_1588_AUX_CTL_PLSG | TXGBE_TSEC_1588_AUX_CTL_EN_TT0 | + TXGBE_TSEC_1588_AUX_CTL_EN_TT1 | TXGBE_TSEC_1588_AUX_CTL_EN_TS0; + + tssdp1 = TXGBE_TSEC_1588_SDP_FUN_SEL_TS0; + + /* Read the current clock time, and save the cycle counter value */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->hw_tc); + adapter->pps_edge_start = adapter->hw_tc.cycle_last; + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + adapter->pps_edge_end = adapter->pps_edge_start; + + /* Figure out how far past the next second we are */ + div_u64_rem(ns, NS_PER_SEC, &rem); + + /* Figure out how many nanoseconds to add to round the clock edge up + * to the next full second + */ + rem = (NS_PER_SEC - rem); + + /* Adjust the clock edge to align with the next full second. */ + adapter->pps_edge_start += div_u64(((u64)rem << cc->shift), cc->mult); + trgttiml0 = (u32)adapter->pps_edge_start; + trgttimh0 = (u32)(adapter->pps_edge_start >> 32); + + if (TXGBE_1588_TOD_ENABLE) + txgbe_set_pps(hw, adapter->pps_enabled, ns + rem, adapter->pps_edge_start); + + rem += TXGBE_1588_PPS_WIDTH * NS_PER_MSEC; + adapter->pps_edge_end += div_u64(((u64)rem << cc->shift), cc->mult); + trgttiml1 = (u32)adapter->pps_edge_end; + trgttimh1 = (u32)(adapter->pps_edge_end >> 32); + + wr32(hw, TXGBE_TSEC_1588_TRGT_L(0), trgttiml0); + wr32(hw, TXGBE_TSEC_1588_TRGT_H(0), trgttimh0); + wr32(hw, TXGBE_TSEC_1588_TRGT_L(1), trgttiml1); + wr32(hw, TXGBE_TSEC_1588_TRGT_H(1), trgttimh1); + wr32(hw, TXGBE_TSEC_1588_SDP(0), tssdp); + wr32(hw, TXGBE_TSEC_1588_SDP(1), tssdp1); + wr32(hw, TXGBE_TSEC_1588_AUX_CTL, tsauxc); + wr32(hw, TXGBE_TSEC_1588_INT_EN, TXGBE_TSEC_1588_INT_EN_TT1); + TXGBE_WRITE_FLUSH(hw); + + rem = NS_PER_SEC; + /* Adjust the clock edge to align with the next full second. */ + adapter->sec_to_cc = div_u64(((u64)rem << cc->shift), cc->mult); +} + +/** + * txgbe_ptp_read - read raw cycle counter (to be used by time counter) + * @hw_cc: the cyclecounter structure + * + * this function reads the cyclecounter registers and is called by the + * cyclecounter structure used to construct a ns counter from the + * arbitrary fixed point registers + */ +static u64 txgbe_ptp_read(const struct cyclecounter *hw_cc) +{ + struct txgbe_adapter *adapter = + container_of(hw_cc, struct txgbe_adapter, hw_cc); + struct txgbe_hw *hw = &adapter->hw; + u64 stamp = 0; + + stamp |= (u64)rd32(hw, TXGBE_TSC_1588_SYSTIML); + stamp |= (u64)rd32(hw, TXGBE_TSC_1588_SYSTIMH) << 32; + + return stamp; +} + +/** + * txgbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp + * @adapter: private adapter structure + * @hwtstamp: stack timestamp structure + * @systim: unsigned 64bit system time value + * + * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value + * which can be used by the stack's ptp functions. + * + * The lock is used to protect consistency of the cyclecounter and the SYSTIME + * registers. However, it does not need to protect against the Rx or Tx + * timestamp registers, as there can't be a new timestamp until the old one is + * unlatched by reading. + * + * In addition to the timestamp in hardware, some controllers need a software + * overflow cyclecounter, and this function takes this into account as well. + **/ +static void txgbe_ptp_convert_to_hwtstamp(struct txgbe_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamp, + u64 timestamp) +{ + unsigned long flags; + u64 ns; + + memset(hwtstamp, 0, sizeof(*hwtstamp)); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->hw_tc, timestamp); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + hwtstamp->hwtstamp = ns_to_ktime(ns); +} + +/** + * txgbe_ptp_adjfreq + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * adjust the frequency of the ptp cycle counter by the + * indicated ppb from the base frequency. + */ +static int txgbe_ptp_adjfreq(struct ptp_clock_info *ptp, long ppb) +{ + struct txgbe_adapter *adapter = + container_of(ptp, struct txgbe_adapter, ptp_caps); + struct txgbe_hw *hw = &adapter->hw; + u64 incval; + + /* memory fence for read incval */ + smp_mb(); + incval = READ_ONCE(adapter->base_incval); + incval = adjust_by_scaled_ppm(incval, ppb); + + if (incval > TXGBE_TSC_1588_INC_IV(~0)) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); + wr32(hw, TXGBE_TSC_1588_INC, + TXGBE_TSC_1588_INC_IVP(incval, 2)); + + return 0; +} + +/** + * txgbe_ptp_adjtime + * @ptp: the ptp clock structure + * @delta: offset to adjust the cycle counter by ns + * + * adjust the timer by resetting the timecounter structure. + */ +static int txgbe_ptp_adjtime(struct ptp_clock_info *ptp, + s64 delta) +{ + struct txgbe_adapter *adapter = + container_of(ptp, struct txgbe_adapter, ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_adjtime(&adapter->hw_tc, delta); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); + + return 0; +} + +/** + * txgbe_ptp_gettimex + * @ptp: the ptp clock structure + * @ts: timespec to hold the PHC timestamp + * @sts: structure to hold the system time before and after reading the PHC + * + * read the timecounter and return the correct value on ns, + * after converting it into a struct timespec. + */ +static int txgbe_ptp_gettimex(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct txgbe_adapter *adapter = + container_of(ptp, struct txgbe_adapter, ptp_caps); + struct txgbe_hw *hw = &adapter->hw; + unsigned long flags; + u64 ns, stamp; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + ptp_read_system_prets(sts); + stamp = rd32(hw, TXGBE_TSC_1588_SYSTIML); + ptp_read_system_postts(sts); + stamp |= (u64)rd32(hw, TXGBE_TSC_1588_SYSTIMH) << 32; + + ns = timecounter_cyc2time(&adapter->hw_tc, stamp); + + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/** + * txgbe_ptp_gettime64 + * @ptp: the ptp clock structure + * @ts: timespec64 structure to hold the current time value + * + * read the timecounter and return the correct value on ns, + * after converting it into a struct timespec64. + */ +static int txgbe_ptp_gettime64(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct txgbe_adapter *adapter = + container_of(ptp, struct txgbe_adapter, ptp_caps); + unsigned long flags; + u64 ns; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->hw_tc); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/** + * txgbe_ptp_settime64 + * @ptp: the ptp clock structure + * @ts: the timespec64 containing the new time for the cycle counter + * + * reset the timecounter to use a new base value instead of the kernel + * wall timer value. + */ +static int txgbe_ptp_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct txgbe_adapter *adapter = + container_of(ptp, struct txgbe_adapter, ptp_caps); + u64 ns; + unsigned long flags; + + ns = timespec64_to_ns(ts); + + /* reset the timecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ns); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); + + return 0; +} + +/** + * txgbe_ptp_feature_enable + * @ptp: the ptp clock structure + * @rq: the requested feature to change + * @on: whether to enable or disable the feature + * + * enable (or disable) ancillary features of the phc subsystem. + * our driver only supports the PPS feature on the X540 + */ +static int txgbe_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct txgbe_adapter *adapter = + container_of(ptp, struct txgbe_adapter, ptp_caps); + struct txgbe_hw *hw = &adapter->hw; + /** + * When PPS is enabled, unmask the interrupt for the ClockOut + * feature, so that the interrupt handler can send the PPS + * event when the clock SDP triggers. Clear mask when PPS is + * disabled + */ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_setup_sdp) + return -EOPNOTSUPP; + + if (on) + adapter->flags2 |= TXGBE_FLAG2_PTP_PPS_ENABLED; + else + adapter->flags2 &= ~TXGBE_FLAG2_PTP_PPS_ENABLED; + + adapter->ptp_setup_sdp(adapter); + return 0; + } + + return -EOPNOTSUPP; +} + +/** + * txgbe_ptp_check_pps_event + * @adapter: the private adapter structure + * @eicr: the interrupt cause register value + * + * This function is called by the interrupt routine when checking for + * interrupts. It will check and handle a pps event. + */ +void txgbe_ptp_check_pps_event(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct cyclecounter *cc = &adapter->hw_cc; + u32 tsauxc, rem, int_status; + u32 trgttiml0, trgttimh0, trgttiml1, trgttimh1; + u64 ns = 0; + unsigned long flags; + + /* this check is necessary in case the interrupt was enabled via some + * alternative means (ex. debug_fs). Better to check here than + * everywhere that calls this function. + */ + if (!adapter->ptp_clock) + return; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + int_status = rd32(hw, TXGBE_TSEC_1588_INT_ST); + if (int_status & TXGBE_TSEC_1588_INT_ST_TT1) { + /* disable the pin first */ + wr32(hw, TXGBE_TSEC_1588_AUX_CTL, 0); + TXGBE_WRITE_FLUSH(hw); + + tsauxc = TXGBE_TSEC_1588_AUX_CTL_PLSG | TXGBE_TSEC_1588_AUX_CTL_EN_TT0 | + TXGBE_TSEC_1588_AUX_CTL_EN_TT1 | TXGBE_TSEC_1588_AUX_CTL_EN_TS0; + + /* Read the current clock time, and save the cycle counter value */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->hw_tc); + adapter->pps_edge_start = adapter->hw_tc.cycle_last; + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + adapter->pps_edge_end = adapter->pps_edge_start; + + /* Figure out how far past the next second we are */ + div_u64_rem(ns, NS_PER_SEC, &rem); + + /* Figure out how many nanoseconds to add to round the clock edge up + * to the next full second + */ + rem = (NS_PER_SEC - rem); + + /* Adjust the clock edge to align with the next full second. */ + adapter->pps_edge_start += div_u64(((u64)rem << cc->shift), cc->mult); + + /* Adjust the clock edge to align with the next full second. */ + trgttiml0 = (u32)adapter->pps_edge_start; + trgttimh0 = (u32)(adapter->pps_edge_start >> 32); + + rem += TXGBE_1588_PPS_WIDTH * NS_PER_MSEC; + adapter->pps_edge_end += div_u64(((u64)rem << cc->shift), cc->mult); + + trgttiml1 = (u32)adapter->pps_edge_end; + trgttimh1 = (u32)(adapter->pps_edge_end >> 32); + + wr32(hw, TXGBE_TSEC_1588_TRGT_L(0), trgttiml0); + wr32(hw, TXGBE_TSEC_1588_TRGT_H(0), trgttimh0); + wr32(hw, TXGBE_TSEC_1588_TRGT_L(1), trgttiml1); + wr32(hw, TXGBE_TSEC_1588_TRGT_H(1), trgttimh1); + + wr32(hw, TXGBE_TSEC_1588_AUX_CTL, tsauxc); + TXGBE_WRITE_FLUSH(hw); + } + } + /* we don't config PPS on SDP for txgbe_mac_sp yet, so just return. + * ptp_clock_event(adapter->ptp_clock, &event); + */ +} + +/** + * txgbe_ptp_overflow_check - watchdog task to detect SYSTIME overflow + * @adapter: private adapter struct + * + * this watchdog task periodically reads the timecounter + * in order to prevent missing when the system time registers wrap + * around. This needs to be run approximately twice a minute for the fastest + * overflowing hardware. We run it for all hardware since it shouldn't have a + * large impact. + */ +void txgbe_ptp_overflow_check(struct txgbe_adapter *adapter) +{ + bool timeout = time_is_before_jiffies(adapter->last_overflow_check + + TXGBE_OVERFLOW_PERIOD); + struct timespec64 ts; + + if (timeout) { + txgbe_ptp_gettime64(&adapter->ptp_caps, &ts); + adapter->last_overflow_check = jiffies; + } +} + +/** + * txgbe_ptp_rx_hang - detect error case when Rx timestamp registers latched + * @adapter: private network adapter structure + * + * this watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + */ +void txgbe_ptp_rx_hang(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring *rx_ring; + u32 tsyncrxctl = rd32(hw, TXGBE_PSR_1588_CTL); + unsigned long rx_event; + int n; + + /* if we don't have a valid timestamp in the registers, just update the + * timeout counter and exit + */ + if (!(tsyncrxctl & TXGBE_PSR_1588_CTL_VALID)) { + adapter->last_rx_ptp_check = jiffies; + return; + } + + /* determine the most recent watchdog or rx_timestamp event */ + rx_event = adapter->last_rx_ptp_check; + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } + + /* only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { + rd32(hw, TXGBE_PSR_1588_STMPH); + adapter->last_rx_ptp_check = jiffies; + + adapter->rx_hwtstamp_cleared++; + e_warn(drv, "clearing RX Timestamp hang"); + } +} + +/** + * txgbe_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state + * @adapter: the private adapter structure + * + * This function should be called whenever the state related to a Tx timestamp + * needs to be cleared. This helps ensure that all related bits are reset for + * the next Tx timestamp event. + */ +static void txgbe_ptp_clear_tx_timestamp(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + rd32(hw, TXGBE_TSC_1588_STMPH); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + } + clear_bit_unlock(__TXGBE_PTP_TX_IN_PROGRESS, &adapter->state); +} + +/** + * txgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: the private adapter struct + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +static void txgbe_ptp_tx_hwtstamp(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval = 0; + + regval |= (u64)rd32(hw, TXGBE_TSC_1588_STMPL); + regval |= (u64)rd32(hw, TXGBE_TSC_1588_STMPH) << 32; + + txgbe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, regval); + skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); + + txgbe_ptp_clear_tx_timestamp(adapter); +} + +/** + * txgbe_ptp_tx_hwtstamp_work + * @work: pointer to the work struct + * + * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware + * timestamp has been taken for the current skb. It is necessary, because the + * descriptor's "done" bit does not correlate with the timestamp event. + */ +static void txgbe_ptp_tx_hwtstamp_work(struct work_struct *work) +{ + struct txgbe_adapter *adapter = container_of(work, struct txgbe_adapter, + ptp_tx_work); + struct txgbe_hw *hw = &adapter->hw; + bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + + TXGBE_PTP_TX_TIMEOUT); + u32 tsynctxctl; + + /* we have to have a valid skb to poll for a timestamp */ + if (!adapter->ptp_tx_skb) { + txgbe_ptp_clear_tx_timestamp(adapter); + return; + } + + /* stop polling once we have a valid timestamp */ + tsynctxctl = rd32(hw, TXGBE_TSC_1588_CTL); + if (tsynctxctl & TXGBE_TSC_1588_CTL_VALID) { + txgbe_ptp_tx_hwtstamp(adapter); + return; + } + + /* check timeout last in case timestamp event just occurred */ + if (timeout) { + txgbe_ptp_clear_tx_timestamp(adapter); + adapter->tx_hwtstamp_timeouts++; + e_warn(drv, "clearing Tx Timestamp hang"); + } else { + /* reschedule to keep checking until we timeout */ + schedule_work(&adapter->ptp_tx_work); + } +} + +/** + * txgbe_ptp_rx_rgtstamp - utility function which checks for RX time stamp + * @q_vector: structure containing interrupt and ring information + * @skb: particular skb to send timestamp with + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +void txgbe_ptp_rx_hwtstamp(struct txgbe_adapter *adapter, struct sk_buff *skb) +{ + struct txgbe_hw *hw = &adapter->hw; + u64 regval = 0; + u32 tsyncrxctl; + + /* Read the tsyncrxctl register afterwards in order to prevent taking an + * I/O hit on every packet. + */ + tsyncrxctl = rd32(hw, TXGBE_PSR_1588_CTL); + if (!(tsyncrxctl & TXGBE_PSR_1588_CTL_VALID)) + return; + + regval |= (u64)rd32(hw, TXGBE_PSR_1588_STMPL); + regval |= (u64)rd32(hw, TXGBE_PSR_1588_STMPH) << 32; + + txgbe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); +} + +/** + * txgbe_ptp_get_ts_config - get current hardware timestamping configuration + * @adapter: pointer to adapter structure + * @ifreq: ioctl data + * + * This function returns the current timestamping settings. Rather than + * attempt to deconstruct registers to fill in the values, simply keep a copy + * of the old settings around, and return a copy when requested. + */ +int txgbe_ptp_get_ts_config(struct txgbe_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, + sizeof(*config)) ? -EFAULT : 0; +} + +/** + * txgbe_ptp_set_timestamp_mode - setup the hardware for the requested mode + * @adapter: the private txgbe adapter structure + * @config: the hwtstamp configuration requested + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware + * filters. Not all combinations are supported, in particular event + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". + * + * Since hardware always timestamps Path delay packets when timestamping V2 + * packets, regardless of the type specified in the register, only use V2 + * Event mode. This more accurately tells the user what the hardware is going + * to do anyways. + * + * Note: this may modify the hwtstamp configuration towards a more general + * mode, if required to support the specifically requested mode. + */ +static int txgbe_ptp_set_timestamp_mode(struct txgbe_adapter *adapter, + struct hwtstamp_config *config) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 tsync_tx_ctl = TXGBE_TSC_1588_CTL_ENABLED; + u32 tsync_rx_ctl = TXGBE_PSR_1588_CTL_ENABLED; + u32 tsync_rx_mtrl = PTP_EV_PORT << 16; + bool is_l2 = false; + u32 regval; + + /* reserved for future extensions */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + tsync_rx_mtrl = 0; + adapter->flags &= ~(TXGBE_FLAG_RX_HWTSTAMP_ENABLED | + TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= TXGBE_PSR_1588_CTL_TYPE_L4_V1; + tsync_rx_mtrl |= TXGBE_PSR_1588_MSGTYPE_V1_SYNC_MSG; + adapter->flags |= (TXGBE_FLAG_RX_HWTSTAMP_ENABLED | + TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= TXGBE_PSR_1588_CTL_TYPE_L4_V1; + tsync_rx_mtrl |= TXGBE_PSR_1588_MSGTYPE_V1_DELAY_REQ_MSG; + adapter->flags |= (TXGBE_FLAG_RX_HWTSTAMP_ENABLED | + TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tsync_rx_ctl |= TXGBE_PSR_1588_CTL_TYPE_EVENT_V2; + is_l2 = true; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + adapter->flags |= (TXGBE_FLAG_RX_HWTSTAMP_ENABLED | + TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_ALL: + default: + /* register RXMTRL must be set in order to do V1 packets, + * therefore it is not possible to time stamp both V1 Sync and + * Delay_Req messages unless hardware supports timestamping all + * packets => return error + */ + adapter->flags &= ~(TXGBE_FLAG_RX_HWTSTAMP_ENABLED | + TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + config->rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + /* define ethertype filter for timestamping L2 packets */ + if (is_l2) + wr32(hw, + TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_1588), + (TXGBE_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */ + TXGBE_PSR_ETYPE_SWC_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + wr32(hw, + TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_1588), + 0); + + /* enable/disable TX */ + regval = rd32(hw, TXGBE_TSC_1588_CTL); + regval &= ~TXGBE_TSC_1588_CTL_ENABLED; + regval |= tsync_tx_ctl; + wr32(hw, TXGBE_TSC_1588_CTL, regval); + + /* enable/disable RX */ + regval = rd32(hw, TXGBE_PSR_1588_CTL); + regval &= ~(TXGBE_PSR_1588_CTL_ENABLED | TXGBE_PSR_1588_CTL_TYPE_MASK); + regval |= tsync_rx_ctl; + wr32(hw, TXGBE_PSR_1588_CTL, regval); + + /* define which PTP packets are time stamped */ + wr32(hw, TXGBE_PSR_1588_MSGTYPE, tsync_rx_mtrl); + + TXGBE_WRITE_FLUSH(hw); + + /* clear TX/RX timestamp state, just to be sure */ + txgbe_ptp_clear_tx_timestamp(adapter); + rd32(hw, TXGBE_PSR_1588_STMPH); + + return 0; +} + +/** + * txgbe_ptp_set_ts_config - user entry point for timestamp mode + * @adapter: pointer to adapter struct + * @ifreq: ioctl data + * + * Set hardware to requested mode. If unsupported, return an error with no + * changes. Otherwise, store the mode for future reference. + */ +int txgbe_ptp_set_ts_config(struct txgbe_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = txgbe_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static void txgbe_ptp_link_speed_adjust(struct txgbe_adapter *adapter, + u32 *shift, u32 *incval) +{ + struct txgbe_hw *hw = &adapter->hw; + /** + * Scale the NIC cycle counter by a large factor so that + * relatively small corrections to the frequency can be added + * or subtracted. The drawbacks of a large factor include + * (a) the clock register overflows more quickly, (b) the cycle + * counter structure must be able to convert the systime value + * to nanoseconds using only a multiplier and a right-shift, + * and (c) the value must fit within the timinca register space + * => math based on internal DMA clock rate and available bits + * + * Note that when there is no link, internal DMA clock is same as when + * link speed is 10Gb. Set the registers correctly even when link is + * down to preserve the clock setting + */ + + /*amlite TODO*/ + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + *shift = TXGBE_INCVAL_SHIFT_AML; + *incval = TXGBE_INCVAL_AML; + } else { + switch (adapter->link_speed) { + case TXGBE_LINK_SPEED_10_FULL: + *shift = TXGBE_INCVAL_SHIFT_10; + *incval = TXGBE_INCVAL_10; + break; + case TXGBE_LINK_SPEED_100_FULL: + *shift = TXGBE_INCVAL_SHIFT_100; + *incval = TXGBE_INCVAL_100; + break; + case TXGBE_LINK_SPEED_1GB_FULL: + *shift = TXGBE_INCVAL_SHIFT_1GB; + *incval = TXGBE_INCVAL_1GB; + break; + case TXGBE_LINK_SPEED_10GB_FULL: + default: /* TXGBE_LINK_SPEED_10GB_FULL */ + *shift = TXGBE_INCVAL_SHIFT_10GB; + *incval = TXGBE_INCVAL_10GB; + break; + } + } +} + +/** + * txgbe_ptp_start_cyclecounter - create the cycle counter from hw + * @adapter: pointer to the adapter structure + * + * This function should be called to set the proper values for the TIMINCA + * register and tell the cyclecounter structure what the tick rate of SYSTIME + * is. It does not directly modify SYSTIME registers or the timecounter + * structure. It should be called whenever a new TIMINCA value is necessary, + * such as during initialization or when the link speed changes. + */ +void txgbe_ptp_start_cyclecounter(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + unsigned long flags; + struct cyclecounter cc; + u32 incval = 0; + + /* For some of the boards below this mask is technically incorrect. + * The timestamp mask overflows at approximately 61bits. However the + * particular hardware does not overflow on an even bitmask value. + * Instead, it overflows due to conversion of upper 32bits billions of + * cycles. Timecounters are not really intended for this purpose so + * they do not properly function if the overflow point isn't 2^N-1. + * However, the actual SYSTIME values in question take ~138 years to + * overflow. In practice this means they won't actually overflow. A + * proper fix to this problem would require modification of the + * timecounter delta calculations. + */ + cc.mask = CLOCKSOURCE_MASK(64); + cc.mult = 1; + cc.shift = 0; + + cc.read = txgbe_ptp_read; + txgbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); + wr32(hw, TXGBE_TSC_1588_INC, + TXGBE_TSC_1588_INC_IVP(incval, 2)); + + /* update the base incval used to calculate frequency adjustment */ + WRITE_ONCE(adapter->base_incval, incval); + /* memory fence for update incval */ + smp_mb(); + + /* need lock to prevent incorrect read while modifying cyclecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + memcpy(&adapter->hw_cc, &cc, sizeof(adapter->hw_cc)); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); +} + +static void txgbe_ptp_init_systime(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + wr32(hw, TXGBE_TSC_1588_SYSTIML, 0); + wr32(hw, TXGBE_TSC_1588_SYSTIMH, 0); + TXGBE_WRITE_FLUSH(hw); +} + +/** + * txgbe_ptp_reset + * @adapter: the txgbe private board structure + * + * When the MAC resets, all of the hardware configuration for timesync is + * reset. This function should be called to re-enable the device for PTP, + * using the last known settings. However, we do lose the current clock time, + * so we fallback to resetting it based on the kernel's realtime clock. + * + * This function will maintain the hwtstamp_config settings, and it retriggers + * the SDP output if it's enabled. + */ +void txgbe_ptp_reset(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + unsigned long flags; + + /* reset the hardware timestamping mode */ + txgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + txgbe_ptp_start_cyclecounter(adapter); + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + txgbe_ptp_init_systime(adapter); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, + ktime_to_ns(ktime_get_real())); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + adapter->last_overflow_check = jiffies; + + /* Now that the shift has been calculated and the systime + * registers reset, (re-)enable the Clock out feature + */ + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); +} + +/** + * txgbe_ptp_create_clock + * @adapter: the txgbe private adapter structure + * + * This functino performs setup of the user entry point function table and + * initalizes the PTP clock device used by userspace to access the clock-like + * features of the PTP core. It will be called by txgbe_ptp_init, and may + * re-use a previously initialized clock (such as during a suspend/resume + * cycle). + */ + +static long txgbe_ptp_create_clock(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct txgbe_hw *hw = &adapter->hw; + long err = 0; + + /* do nothing if we already have a clock device */ + if (!IS_ERR_OR_NULL(adapter->ptp_clock)) + return 0; + + snprintf(adapter->ptp_caps.name, sizeof(adapter->ptp_caps.name), + "%s", netdev->name); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 250000000; /* 10^-9s */ + adapter->ptp_caps.n_alarm = 0; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.n_per_out = 0; + + if (hw->mac.type == txgbe_mac_aml || + hw->mac.type == txgbe_mac_aml40) + adapter->ptp_caps.pps = 1; + else + adapter->ptp_caps.pps = 0; + + adapter->ptp_caps.adjfine = txgbe_ptp_adjfreq; + adapter->ptp_caps.adjtime = txgbe_ptp_adjtime; + adapter->ptp_caps.gettimex64 = txgbe_ptp_gettimex; + adapter->ptp_caps.settime64 = txgbe_ptp_settime64; + + adapter->ptp_caps.enable = txgbe_ptp_feature_enable; + adapter->ptp_setup_sdp = txgbe_ptp_setup_sdp; + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + pci_dev_to_dev(adapter->pdev)); + if (IS_ERR(adapter->ptp_clock)) { + err = PTR_ERR(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_dev_err("ptp_clock_register failed\n"); + goto out; + } else { + e_dev_info("registered PHC device on %s\n", netdev->name); + } + + /* Set the default timestamp mode to disabled here. We do this in + * create_clock instead of initialization, because we don't want to + * override the previous settings during a suspend/resume cycle. + */ + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + +out: + return err; +} + +/** + * txgbe_ptp_init + * @adapter: the txgbe private adapter structure + * + * This function performs the required steps for enabling ptp + * support. If ptp support has already been loaded it simply calls the + * cyclecounter init routine and exits. + */ +void txgbe_ptp_init(struct txgbe_adapter *adapter) +{ + /* initialize the spin lock first, since the user might call the clock + * functions any time after we've initialized the ptp clock device. + */ + spin_lock_init(&adapter->tmreg_lock); + + /* obtain a ptp clock device, or re-use an existing device */ + if (txgbe_ptp_create_clock(adapter)) + return; + + /* we have a clock, so we can initialize work for timestamps now */ + INIT_WORK(&adapter->ptp_tx_work, txgbe_ptp_tx_hwtstamp_work); + + /* reset the ptp related hardware bits */ + txgbe_ptp_reset(adapter); + + /* enter the TXGBE_PTP_RUNNING state */ + set_bit(__TXGBE_PTP_RUNNING, &adapter->state); +} + +/** + * txgbe_ptp_suspend - stop ptp work items + * @adapter: pointer to adapter struct + * + * This function suspends ptp activity, and prevents more work from being + * generated, but does not destroy the clock device. + */ +void txgbe_ptp_suspend(struct txgbe_adapter *adapter) +{ + /* leave the TXGBE_PTP_RUNNING STATE */ + if (!test_and_clear_bit(__TXGBE_PTP_RUNNING, &adapter->state)) + return; + + adapter->flags2 &= ~TXGBE_FLAG2_PTP_PPS_ENABLED; + + if (adapter->ptp_setup_sdp) + adapter->ptp_setup_sdp(adapter); + + cancel_work_sync(&adapter->ptp_tx_work); + txgbe_ptp_clear_tx_timestamp(adapter); +} + +/** + * txgbe_ptp_stop - destroy the ptp_clock device + * @adapter: pointer to adapter struct + * + * Completely destroy the ptp_clock device, and disable all PTP related + * features. Intended to be run when the device is being closed. + */ +void txgbe_ptp_stop(struct txgbe_adapter *adapter) +{ + /* first, suspend ptp activity */ + txgbe_ptp_suspend(adapter); + + /* now destroy the ptp clock device */ + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_dev_info("removed PHC on %s\n", + adapter->netdev->name); + } +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c new file mode 100644 index 000000000000..bd18650053af --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c @@ -0,0 +1,2047 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "txgbe.h" +#include "txgbe_type.h" +#include "txgbe_sriov.h" + +static void txgbe_set_vf_rx_tx(struct txgbe_adapter *adapter, int vf); +static int txgbe_set_queue_rate_limit_vf(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf); + +#ifdef CONFIG_PCI_IOV +static int __txgbe_enable_sriov(struct txgbe_adapter *adapter, + unsigned int num_vfs) +{ + struct txgbe_hw *hw = &adapter->hw; + int num_vf_macvlans, i; + struct vf_macvlans *mv_list; + u32 value = 0; + + adapter->flags |= TXGBE_FLAG_SRIOV_ENABLED; + e_dev_info("SR-IOV enabled with %d VFs\n", num_vfs); + + if (num_vfs != 1) { + if (adapter->ring_feature[RING_F_RSS].indices == 4) + value = TXGBE_CFG_PORT_CTL_NUM_VT_32; + else /* adapter->ring_feature[RING_F_RSS].indices <= 2 */ + value = TXGBE_CFG_PORT_CTL_NUM_VT_64; + } + wr32m(hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_NUM_VT_MASK, + value); + + /* Enable VMDq flag so device will be set in VM mode */ + adapter->flags |= TXGBE_FLAG_VMDQ_ENABLED; + if (!adapter->ring_feature[RING_F_VMDQ].limit) + adapter->ring_feature[RING_F_VMDQ].limit = 1; + adapter->ring_feature[RING_F_VMDQ].offset = num_vfs; + + num_vf_macvlans = hw->mac.num_rar_entries - + (TXGBE_MAX_PF_MACVLANS + 1 + num_vfs); + + mv_list = kcalloc(num_vf_macvlans, + sizeof(struct vf_macvlans), + GFP_KERNEL); + adapter->mv_list = mv_list; + if (mv_list) { + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&adapter->vf_mvs.l); + for (i = 0; i < num_vf_macvlans; i++) { + mv_list->vf = -1; + mv_list->free = true; + list_add(&mv_list->l, &adapter->vf_mvs.l); + mv_list++; + } + } + + /* Initialize default switching mode VEB */ + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_SW_EN, TXGBE_PSR_CTL_SW_EN); + + /* If call to enable VFs succeeded then allocate memory + * for per VF control structures. + */ + adapter->vfinfo = kcalloc(num_vfs, + sizeof(struct vf_data_storage), GFP_KERNEL); + if (!adapter->vfinfo) + return -ENOMEM; + + adapter->num_vfs = num_vfs; + + /* enable L2 switch and replication */ + adapter->flags |= TXGBE_FLAG_SRIOV_L2SWITCH_ENABLE | + TXGBE_FLAG_SRIOV_REPLICATION_ENABLE; + + /* limit traffic classes based on VFs enabled */ + if (adapter->num_vfs < 16) { + adapter->dcb_cfg.num_tcs.pg_tcs = + TXGBE_DCB_MAX_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = + TXGBE_DCB_MAX_TRAFFIC_CLASS; + } else if (adapter->num_vfs < 32) { + adapter->dcb_cfg.num_tcs.pg_tcs = 4; + adapter->dcb_cfg.num_tcs.pfc_tcs = 4; + } else { + adapter->dcb_cfg.num_tcs.pg_tcs = 1; + adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + } + adapter->dcb_cfg.vt_mode = true; + + /* Disable RSC when in SR-IOV mode */ + adapter->flags2 &= ~(TXGBE_FLAG2_RSC_CAPABLE | + TXGBE_FLAG2_RSC_ENABLED); + + /* enable spoof checking for all VFs */ + for (i = 0; i < adapter->num_vfs; i++) { + /* enable spoof checking for all VFs */ + adapter->vfinfo[i].spoofchk_enabled = true; + adapter->vfinfo[i].link_enable = true; + adapter->vfinfo[i].link_state = TXGBE_VF_LINK_STATE_AUTO; + + /* We support VF RSS querying only for 82599 and x540 + * devices at the moment. These devices share RSS + * indirection table and RSS hash key with PF therefore + * we want to disable the querying by default. + */ + adapter->vfinfo[i].rss_query_enabled = 0; + + /* Untrust all VFs */ + adapter->vfinfo[i].trusted = false; + + /* set the default xcast mode */ + adapter->vfinfo[i].xcast_mode = TXGBEVF_XCAST_MODE_NONE; + } + + return 0; +} + +#define TXGBE_BA4_ADDR(vfinfo, reg) \ + ((u8 __iomem *)((u8 *)(vfinfo)->b4_addr + (reg))) +static int txgbe_vf_backup(struct txgbe_adapter *adapter, u16 vf) +{ + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + + if (!vfinfo->b4_addr) + return -1; + return 0; +} + +static int txgbe_vf_restore(struct txgbe_adapter *adapter, u16 vf) +{ + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + + if (!vfinfo->b4_addr) + return -1; + + return 0; +} + +/** + * txgbe_get_vfs - Find and take references to all vf devices + * @adapter: Pointer to adapter struct + */ +static void txgbe_get_vfs(struct txgbe_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + u16 vendor = pdev->vendor; + struct pci_dev *vfdev; + int vf = 0; + u16 vf_id; + int pos; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return; + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); + + vfdev = pci_get_device(vendor, vf_id, NULL); + for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) { + struct vf_data_storage *vfinfo; + + if (!vfdev->is_virtfn) + continue; + if (vfdev->physfn != pdev) + continue; + if (vf >= adapter->num_vfs) + continue; + + /*pci_dev_get(vfdev);*/ + vfinfo = &adapter->vfinfo[vf]; + vfinfo->vfdev = vfdev; + vfinfo->b4_addr = ioremap(pci_resource_start(vfdev, 4), 64); +#ifdef CONFIG_PCI_IOV + txgbe_vf_backup(adapter, vf); +#endif + ++vf; + } +} + +/** + * txgbe_pet_vfs - Release references to all vf devices + * @adapter: Pointer to adapter struct + */ +static void txgbe_put_vfs(struct txgbe_adapter *adapter) +{ + unsigned int num_vfs = adapter->num_vfs, vf; + + /* put the reference to all of the vf devices */ + for (vf = 0; vf < num_vfs; ++vf) { + struct vf_data_storage *vfinfo; + struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; + + if (!vfdev) + continue; + +#ifdef CONFIG_PCI_IOV + txgbe_vf_restore(adapter, vf); +#endif + + vfinfo = &adapter->vfinfo[vf]; + iounmap(vfinfo->b4_addr); + vfinfo->b4_addr = NULL; + vfinfo->vfdev = NULL; + /*pci_dev_put(vfdev);*/ + } +} + +/* Note this function is called when the user wants to enable SR-IOV + * VFs using the now deprecated module parameter + */ +void txgbe_enable_sriov(struct txgbe_adapter *adapter) +{ + int pre_existing_vfs = 0; + unsigned int num_vfs; + + pre_existing_vfs = pci_num_vf(adapter->pdev); + if (!pre_existing_vfs && !adapter->max_vfs) + return; + + /* If there are pre-existing VFs then we have to force + * use of that many - over ride any module parameter value. + * This may result from the user unloading the PF driver + * while VFs were assigned to guest VMs or because the VFs + * have been created via the new PCI SR-IOV sysfs interface. + */ + if (pre_existing_vfs) { + num_vfs = pre_existing_vfs; + dev_warn(&adapter->pdev->dev, + "Virtual Functions already enabled for this device.\n"); + } else { + int err; + /* The sapphire/amber-lite supports up to 64 VFs per physical + * function but this implementation limits allocation to 63 so + * that basic networking resources are still available to the + * physical function. If the user requests greater thn + * 63 VFs then it is an error - reset to default of zero. + */ + num_vfs = min_t(unsigned int, adapter->max_vfs, + TXGBE_MAX_VFS_DRV_LIMIT); + + err = pci_enable_sriov(adapter->pdev, num_vfs); + if (err) { + e_err(probe, "Failed to enable PCI sriov: %d\n", err); + adapter->num_vfs = 0; + return; + } + } + + if (!__txgbe_enable_sriov(adapter, num_vfs)) { + txgbe_get_vfs(adapter); + return; + } + + /* If we have gotten to this point then there is no memory available + * to manage the VF devices - print message and bail. + */ + e_err(probe, "Unable to allocate memory for VF Data Storage SRIOV disabled\n"); + txgbe_disable_sriov(adapter); +} +#endif /* CONFIG_PCI_IOV */ + +int txgbe_disable_sriov(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + +#ifdef CONFIG_PCI_IOV + /* If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(adapter->pdev)) { + e_dev_warn("Unloading driver while VFs are assigned VFs will not be deallocated\n"); + return -EPERM; + } + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); +#endif + + /* set num VFs to 0 to prevent access to vfinfo */ + adapter->num_vfs = 0; + + /* put the reference to all of the vf devices */ +#ifdef CONFIG_PCI_IOV + txgbe_put_vfs(adapter); +#endif + + /* free VF control structures */ + kfree(adapter->vfinfo); + adapter->vfinfo = NULL; + + /* free macvlan list */ + kfree(adapter->mv_list); + adapter->mv_list = NULL; + + /* if SR-IOV is already disabled then there is nothing to do */ + if (!(adapter->flags & TXGBE_FLAG_SRIOV_ENABLED)) + return 0; + + /* set default pool back to 0 */ + wr32m(hw, TXGBE_PSR_VM_CTL, + TXGBE_PSR_VM_CTL_POOL_MASK, 0); + TXGBE_WRITE_FLUSH(hw); + + adapter->ring_feature[RING_F_VMDQ].offset = 0; + + /* take a breather then clean up driver data */ + msleep(100); + + adapter->flags &= ~(TXGBE_FLAG_SRIOV_ENABLED | + TXGBE_FLAG_SRIOV_L2SWITCH_ENABLE | + TXGBE_FLAG_SRIOV_REPLICATION_ENABLE); + + /* Disable VMDq flag so device will be set in VM mode */ + if (adapter->ring_feature[RING_F_VMDQ].limit == 1) + adapter->flags &= ~TXGBE_FLAG_VMDQ_ENABLED; + + return 0; +} + +static int txgbe_set_vf_multicasts(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u16 entries = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) + >> TXGBE_VT_MSGINFO_SHIFT; + u16 *hash_list = (u16 *)&msgbuf[1]; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + struct txgbe_hw *hw = &adapter->hw; + int i; + u32 vector_bit; + u32 vector_reg; + u32 mta_reg; + u32 vmolr = rd32(hw, TXGBE_PSR_VM_L2CTL(vf)); + + /* only so many hash values supported */ + entries = min_t(u16, entries, TXGBE_MAX_VF_MC_ENTRIES); + + /* salt away the number of multi cast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vfinfo->num_vf_mc_hashes = entries; + + /* VFs are limited to using the MTA hash table for their multicast + * addresses + */ + for (i = 0; i < entries; i++) + vfinfo->vf_mc_hashes[i] = hash_list[i]; + + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; + /* errata 5: maintain a copy of the register table conf */ + mta_reg = hw->mac.mta_shadow[vector_reg]; + mta_reg |= (1 << vector_bit); + hw->mac.mta_shadow[vector_reg] = mta_reg; + wr32(hw, TXGBE_PSR_MC_TBL(vector_reg), mta_reg); + } + vmolr |= TXGBE_PSR_VM_L2CTL_ROMPE; + wr32(hw, TXGBE_PSR_VM_L2CTL(vf), vmolr); + + return 0; +} + +void txgbe_restore_vf_multicasts(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo; + u32 i, j; + u32 vector_bit; + u32 vector_reg; + + for (i = 0; i < adapter->num_vfs; i++) { + u32 vmolr = rd32(hw, TXGBE_PSR_VM_L2CTL(i)); + + vfinfo = &adapter->vfinfo[i]; + for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { + hw->addr_ctrl.mta_in_use++; + vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; + wr32m(hw, TXGBE_PSR_MC_TBL(vector_reg), + 1 << vector_bit, 1 << vector_bit); + /* errata 5: maintain a copy of the reg table conf */ + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); + } + if (vfinfo->num_vf_mc_hashes) + vmolr |= TXGBE_PSR_VM_L2CTL_ROMPE; + else + vmolr &= ~TXGBE_PSR_VM_L2CTL_ROMPE; + wr32(hw, TXGBE_PSR_VM_L2CTL(i), vmolr); + } + + /* Restore any VF macvlans */ + txgbe_full_sync_mac_table(adapter); +} + +int txgbe_set_vf_vlan(struct txgbe_adapter *adapter, int add, int vid, u16 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + + return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add); +} + +static int txgbe_set_vf_lpe(struct txgbe_adapter *adapter, u32 max_frame, + u32 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 max_frs, reg_val; + + /* For sapphire/amber-lite we have to keep all PFs and VFs operating + * with the same max_frame value in order to avoid sending an oversize + * frame to a VF. In order to guarantee this is handled correctly + * for all cases we have several special exceptions to take into + * account before we can enable the VF for receive + */ + u32 reg_offset, vf_shift, vfre; + s32 err = 0; + + /* determine VF receive enable location */ + vf_shift = vf % 32; + reg_offset = vf / 32; + + /* enable or disable receive depending on error */ + vfre = rd32(hw, TXGBE_RDM_VF_RE(reg_offset)); + if (err) + vfre &= ~(1 << vf_shift); + else + vfre |= 1 << vf_shift; + wr32(hw, TXGBE_RDM_VF_RE(reg_offset), vfre); + + /* pull current max frame size from hardware */ + max_frs = rd32(hw, TXGBE_PSR_MAX_SZ); + if (max_frs < max_frame) + wr32(hw, TXGBE_PSR_MAX_SZ, max_frame); + + /* pull current max frame size from hardware */ + max_frs = DIV_ROUND_UP(max_frame, 1024); + reg_val = rd32(hw, TXGBE_MAC_WDG_TIMEOUT) & + TXGBE_MAC_WDG_TIMEOUT_WTO_MASK; + if (max_frs > (reg_val + TXGBE_MAC_WDG_TIMEOUT_WTO_DELTA)) + wr32(hw, TXGBE_MAC_WDG_TIMEOUT, + max_frs - TXGBE_MAC_WDG_TIMEOUT_WTO_DELTA); + + e_info(hw, "VF requests change max MTU to %d\n", max_frame); + + return 0; +} + +void txgbe_set_vmolr(struct txgbe_hw *hw, u16 vf, bool aupe) +{ + u32 vmolr = rd32(hw, TXGBE_PSR_VM_L2CTL(vf)); + + vmolr |= TXGBE_PSR_VM_L2CTL_BAM; + if (aupe) + vmolr |= TXGBE_PSR_VM_L2CTL_AUPE; + else + vmolr &= ~TXGBE_PSR_VM_L2CTL_AUPE; + wr32(hw, TXGBE_PSR_VM_L2CTL(vf), vmolr); +} + +static void txgbe_set_vmvir(struct txgbe_adapter *adapter, + u16 vid, u16 qos, u16 vf, __be16 vlan_proto) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | + TXGBE_TDM_VLAN_INS_VLANA_DEFAULT; + + if (vlan_proto == htons(ETH_P_8021AD)) + vmvir |= 1 << TXGBE_TDM_VLAN_INS_TPID_SEL_SHIFT; + wr32(hw, TXGBE_TDM_VLAN_INS(vf), vmvir); +} + +static void txgbe_clear_vmvir(struct txgbe_adapter *adapter, u32 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + + wr32(hw, TXGBE_TDM_VLAN_INS(vf), 0); +} + +static inline void txgbe_vf_reset_event(struct txgbe_adapter *adapter, u16 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + u8 num_tcs = netdev_get_num_tc(adapter->netdev); + + /* add PF assigned VLAN or VLAN 0 */ + txgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); + + /* reset offloads to defaults */ + txgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); + + /* set outgoing tags for VFs */ + if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { + txgbe_clear_vmvir(adapter, vf); + } else { + if (vfinfo->pf_qos || !num_tcs) + txgbe_set_vmvir(adapter, vfinfo->pf_vlan, + vfinfo->pf_qos, vf, vfinfo->vlan_proto); + else + txgbe_set_vmvir(adapter, vfinfo->pf_vlan, + adapter->default_up, vf, vfinfo->vlan_proto); + + if (vfinfo->spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + } + + /* reset multicast table array for vf */ + adapter->vfinfo[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + txgbe_set_rx_mode(adapter->netdev); + + txgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + + /* reset VF api back to unknown */ + adapter->vfinfo[vf].vf_api = txgbe_mbox_api_10; +} + +int txgbe_set_vf_mac(struct txgbe_adapter *adapter, + u16 vf, unsigned char *mac_addr) +{ + s32 retval = 0; + + txgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + retval = txgbe_add_mac_filter(adapter, mac_addr, vf); + if (retval >= 0) + memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, + ETH_ALEN); + else + memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN); + + return retval; +} + +static int txgbe_negotiate_vf_api(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + int api = msgbuf[1]; + + switch (api) { + case txgbe_mbox_api_10: + case txgbe_mbox_api_11: + case txgbe_mbox_api_12: + case txgbe_mbox_api_13: + case txgbe_mbox_api_21: + case txgbe_mbox_api_22: + adapter->vfinfo[vf].vf_api = api; + return 0; + default: + break; + } + + e_info(drv, "VF %d requested invalid api version %u\n", vf, api); + + return -1; +} + +static int txgbe_get_vf_queues(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct net_device *dev = adapter->netdev; + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + unsigned int default_tc = 0; + u8 num_tcs = netdev_get_num_tc(dev); + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case txgbe_mbox_api_22: + case txgbe_mbox_api_21: + case txgbe_mbox_api_20: + case txgbe_mbox_api_13: + case txgbe_mbox_api_12: + case txgbe_mbox_api_11: + break; + default: + return -1; + } + + /* only allow 1 Tx queue for bandwidth limiting */ + msgbuf[TXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); + msgbuf[TXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); + + /* if TCs > 1 determine which TC belongs to default user priority */ + if (num_tcs > 1) + default_tc = netdev_get_prio_tc_map(dev, adapter->default_up); + + /* notify VF of need for VLAN tag stripping, and correct queue */ + if (num_tcs) + msgbuf[TXGBE_VF_TRANS_VLAN] = num_tcs; + else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos) + msgbuf[TXGBE_VF_TRANS_VLAN] = 1; + else + msgbuf[TXGBE_VF_TRANS_VLAN] = 0; + + /* notify VF of default queue */ + msgbuf[TXGBE_VF_DEF_QUEUE] = default_tc; + + return 0; +} + +static int txgbe_set_vf_macvlan(struct txgbe_adapter *adapter, + u16 vf, int index, unsigned char *mac_addr) +{ + struct list_head *pos; + struct vf_macvlans *entry; + s32 retval = 0; + + if (index <= 1) { + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->vf == vf) { + entry->vf = -1; + entry->free = true; + entry->is_macvlan = false; + txgbe_del_mac_filter(adapter, + entry->vf_macvlan, vf); + } + } + } + + /* If index was zero then we were asked to clear the uc list + * for the VF. We're done. + */ + if (!index) + return 0; + + entry = NULL; + + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->free) + break; + } + + /* If we traversed the entire list and didn't find a free entry + * then we're out of space on the RAR table. Also entry may + * be NULL because the original memory allocation for the list + * failed, which is not fatal but does mean we can't support + * VF requests for MACVLAN because we couldn't allocate + * memory for the list management required. + */ + if (!entry || !entry->free) + return -ENOSPC; + + retval = txgbe_add_mac_filter(adapter, mac_addr, vf); + if (retval >= 0) { + entry->free = false; + entry->is_macvlan = true; + entry->vf = vf; + memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); + } + + return retval; +} + +#ifdef CONFIG_PCI_IOV +int txgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) +{ + unsigned char vf_mac_addr[6]; + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + unsigned int vfn = (event_mask & 0x3f); + bool enable = ((event_mask & 0x10000000U) != 0); + + if (enable) { + memset(vf_mac_addr, 0, ETH_ALEN); + memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); + } + + return 0; +} +#endif /* CONFIG_PCI_IOV */ + +static inline void txgbe_write_qde(struct txgbe_adapter *adapter, u32 vf, + u32 qde) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + u32 reg = 0; + u32 i = vf * q_per_pool; + u32 n = i / 32; + + reg = rd32(hw, TXGBE_RDM_PF_QDE(n)); + for (i = (vf * q_per_pool - n * 32); + i < ((vf + 1) * q_per_pool - n * 32); + i++) { + if (qde == 1) + reg |= qde << i; + else + reg &= qde << i; + } + + wr32(hw, TXGBE_RDM_PF_QDE(n), reg); +} + +static inline void txgbe_write_hide_vlan(struct txgbe_adapter *adapter, u32 vf, + u32 hide_vlan) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + u32 reg = 0; + u32 i = vf * q_per_pool; + u32 n = i / 32; + + reg = rd32(hw, TXGBE_RDM_PF_HIDE(n)); + for (i = (vf * q_per_pool - n * 32); + i < ((vf + 1) * q_per_pool - n * 32); + i++) { + if (hide_vlan == 1) + reg |= hide_vlan << i; + else + reg &= hide_vlan << i; + } + + wr32(hw, TXGBE_RDM_PF_HIDE(n), reg); +} + +static int txgbe_vf_reset_msg(struct txgbe_adapter *adapter, u16 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; + u32 reg = 0; + u32 reg_offset, vf_shift; + u32 msgbuf[4] = {0, 0, 0, 0}; + u8 *addr = (u8 *)(&msgbuf[1]); + struct net_device *dev = adapter->netdev; + int pf_max_frame; + + e_info(probe, "VF Reset msg received from vf %d\n", vf); + +#ifdef CONFIG_PCI_IOV + txgbe_vf_restore(adapter, vf); +#endif + + /* reset the filters for the device */ + txgbe_vf_reset_event(adapter, vf); + + /* set vf mac address */ + if (!is_zero_ether_addr(vf_mac)) + txgbe_set_vf_mac(adapter, vf, vf_mac); + + vf_shift = vf % 32; + reg_offset = vf / 32; + + /* force drop enable for all VF Rx queues */ + txgbe_write_qde(adapter, vf, 1); + + /* set transmit and receive for vf */ + txgbe_set_vf_rx_tx(adapter, vf); + + pf_max_frame = dev->mtu + ETH_HLEN; + +#if IS_ENABLED(CONFIG_FCOE) + if (dev->features & NETIF_F_FCOE_MTU) + pf_max_frame = max_t(int, pf_max_frame, + TXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif /* CONFIG_FCOE */ + + if (pf_max_frame > ETH_FRAME_LEN) + reg = (1 << vf_shift); + wr32(hw, TXGBE_RDM_VFRE_CLR(reg_offset), reg); + + /* enable VF mailbox for further messages */ + adapter->vfinfo[vf].clear_to_send = true; + + /* reply to reset with ack and vf mac address */ + msgbuf[0] = TXGBE_VF_RESET; + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] |= TXGBE_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] |= TXGBE_VT_MSGTYPE_NACK; + dev_warn(pci_dev_to_dev(adapter->pdev), + "VF %d has no MAC address assigned, you may have to assign one manually\n", + vf); + } + + /* Piggyback the multicast filter type so VF can compute the + * correct vectors + */ + msgbuf[3] = hw->mac.mc_filter_type; + txgbe_write_mbx(hw, msgbuf, TXGBE_VF_PERMADDR_MSG_LEN, vf); + + return 0; +} + +static int txgbe_set_vf_mac_addr(struct txgbe_adapter *adapter, + u32 *msgbuf, u16 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + + if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && + memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac, + ETH_ALEN)) { + u8 *pm = adapter->vfinfo[vf].vf_mac_addresses; + + e_warn(drv, "VF %d attempted to set a new MAC address but it already ", vf); + e_warn(drv, "has an administratively set MAC address "); + e_warn(drv, "%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", + pm[0], pm[1], pm[2], pm[3], pm[4], pm[5]); + return -1; + } + return txgbe_set_vf_mac(adapter, vf, new_mac) < 0; +} + +#ifdef CONFIG_PCI_IOV +static int txgbe_find_vlvf_entry(struct txgbe_hw *hw, u32 vlan) +{ + u32 vlvf; + s32 regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries */ + for (regindex = 1; regindex < TXGBE_PSR_VLAN_SWC_ENTRIES; regindex++) { + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, regindex); + vlvf = rd32(hw, TXGBE_PSR_VLAN_SWC); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + /* Return a negative value if not found */ + if (regindex >= TXGBE_PSR_VLAN_SWC_ENTRIES) + regindex = -1; + + return regindex; +} +#endif /* CONFIG_PCI_IOV */ + +static int txgbe_set_vf_vlan_msg(struct txgbe_adapter *adapter, + u32 *msgbuf, u16 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + int add = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) >> TXGBE_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & TXGBE_PSR_VLAN_SWC_VLANID_MASK); + int vlan_offload = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) >> + TXGBE_VT_MSGINFO_VLAN_OFFLOAD_SHIFT; + int err = 0; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (adapter->vfinfo[vf].pf_vlan || tcs) { + if (!vlan_offload) { + goto out; + } else { + e_warn(drv, "VF %d attempted to override set VLAN configuration\n", + vf); + return -1; + } + } + + if (add) + adapter->vfinfo[vf].vlan_count++; + else if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; + + /* in case of promiscuous mode any VLAN filter set for a VF must + * also have the PF pool added to it. + */ + if (add && adapter->netdev->flags & IFF_PROMISC) + err = txgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + + err = txgbe_set_vf_vlan(adapter, add, vid, vf); + if (!err && adapter->vfinfo[vf].spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + +#ifdef CONFIG_PCI_IOV + /* Go through all the checks to see if the VLAN filter should + * be wiped completely. + */ + if (!add && adapter->netdev->flags & IFF_PROMISC) { + u32 bits, vlvf; + s32 reg_ndx; + + reg_ndx = txgbe_find_vlvf_entry(hw, vid); + if (reg_ndx < 0) + goto out; + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, reg_ndx); + vlvf = rd32(hw, TXGBE_PSR_VLAN_SWC); + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + if (VMDQ_P(0) < 32) { + bits = rd32(hw, TXGBE_PSR_VLAN_SWC_VM_L); + bits &= ~(1 << VMDQ_P(0)); + bits |= rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + } else { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + bits &= ~(1 << (VMDQ_P(0) - 32)); + bits |= rd32(hw, TXGBE_PSR_VLAN_SWC_VM_L); + } + + /* If the filter was removed then ensure PF pool bit + * is cleared if the PF only added itself to the pool + * because the PF is in promiscuous mode. + */ + if ((vlvf & VLAN_VID_MASK) == vid && + !test_bit(vid, adapter->active_vlans) && + !bits) + txgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + } + +out: +#endif + return err; +} + +static int txgbe_set_vf_macvlan_msg(struct txgbe_adapter *adapter, + u32 *msgbuf, u16 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + int index = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) >> + TXGBE_VT_MSGINFO_SHIFT; + int err; + + if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && + index > 0) { + e_warn(drv, + "VF %d requested MACVLAN filter but is administratively denied\n", + vf); + return 0; + } + + /* An non-zero index indicates the VF is setting a filter */ + if (index) { + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + /* If the VF is allowed to set MAC filters then turn off + * anti-spoofing to avoid false positives. + */ + if (adapter->vfinfo[vf].spoofchk_enabled) + txgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false); + } + + err = txgbe_set_vf_macvlan(adapter, vf, index, new_mac); + if (err == -ENOSPC) + e_warn(drv, "VF %d has requested a MACVLAN filter but there is no space for it\n", + vf); + + return err < 0; +} + +static int txgbe_update_vf_xcast_mode(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + int xcast_mode = msgbuf[1]; + u32 vmolr, disable, enable; + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case txgbe_mbox_api_12: + /* promisc introduced in 1.3 version */ + if (xcast_mode == TXGBEVF_XCAST_MODE_PROMISC) + return -EOPNOTSUPP; + case txgbe_mbox_api_13: + case txgbe_mbox_api_20: + case txgbe_mbox_api_21: + case txgbe_mbox_api_22: + break; + default: + return -EOPNOTSUPP; + } + + if (xcast_mode > TXGBEVF_XCAST_MODE_MULTI && + !adapter->vfinfo[vf].trusted) { + xcast_mode = TXGBEVF_XCAST_MODE_MULTI; + } + + if (adapter->vfinfo[vf].xcast_mode == xcast_mode) + goto out; + + switch (xcast_mode) { + case TXGBEVF_XCAST_MODE_NONE: + disable = TXGBE_PSR_VM_L2CTL_BAM | TXGBE_PSR_VM_L2CTL_ROMPE | + TXGBE_PSR_VM_L2CTL_MPE | TXGBE_PSR_VM_L2CTL_UPE | TXGBE_PSR_VM_L2CTL_VPE; + enable = 0; + break; + case TXGBEVF_XCAST_MODE_MULTI: + disable = TXGBE_PSR_VM_L2CTL_MPE | TXGBE_PSR_VM_L2CTL_UPE | TXGBE_PSR_VM_L2CTL_VPE; + enable = TXGBE_PSR_VM_L2CTL_BAM | TXGBE_PSR_VM_L2CTL_ROMPE; + break; + case TXGBEVF_XCAST_MODE_ALLMULTI: + disable = TXGBE_PSR_VM_L2CTL_UPE; + enable = TXGBE_PSR_VM_L2CTL_BAM | TXGBE_PSR_VM_L2CTL_ROMPE | + TXGBE_PSR_VM_L2CTL_MPE | TXGBE_PSR_VM_L2CTL_VPE; + break; + case TXGBEVF_XCAST_MODE_PROMISC: + disable = 0; + enable = TXGBE_PSR_VM_L2CTL_BAM | TXGBE_PSR_VM_L2CTL_ROMPE | + TXGBE_PSR_VM_L2CTL_MPE | TXGBE_PSR_VM_L2CTL_UPE | TXGBE_PSR_VM_L2CTL_VPE; + break; + default: + return -EOPNOTSUPP; + } + + vmolr = rd32(hw, TXGBE_PSR_VM_L2CTL(vf)); + vmolr &= ~disable; + vmolr |= enable; + wr32(hw, TXGBE_PSR_VM_L2CTL(vf), vmolr); + + adapter->vfinfo[vf].xcast_mode = xcast_mode; + +out: + msgbuf[1] = xcast_mode; + + return 0; +} + +static int txgbe_get_vf_link_state(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u32 *link_state = &msgbuf[1]; + + /* verify the PF is supporting the correct API */ + switch (adapter->vfinfo[vf].vf_api) { + case txgbe_mbox_api_12: + case txgbe_mbox_api_13: + case txgbe_mbox_api_21: + case txgbe_mbox_api_22: + break; + default: + return -EOPNOTSUPP; + } + *link_state = adapter->vfinfo[vf].link_state; + + return 0; +} + +static int txgbe_get_fw_version(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + unsigned long *fw_version = (unsigned long *)&msgbuf[1]; + int ret; + + /* verify the PF is supporting the correct API */ + switch (adapter->vfinfo[vf].vf_api) { + case txgbe_mbox_api_12: + case txgbe_mbox_api_13: + case txgbe_mbox_api_21: + case txgbe_mbox_api_22: + break; + default: + return -EOPNOTSUPP; + } + + ret = kstrtoul(adapter->eeprom_id, 16, fw_version); + if (ret < 0) + return ret; + + if (*fw_version == 0) + return -EOPNOTSUPP; + + return 0; +} + +static int txgbe_add_5tuple_filter_vf(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct txgbe_5tuple_filter_info *filter = &adapter->ft_filter_info; + struct txgbe_hw *hw = &adapter->hw; + u16 index, sw_idx, i, j; + + /* look for an unused 5tuple filter index, + * and insert the filter to list. + */ + for (sw_idx = 0; sw_idx < TXGBE_MAX_RDB_5T_CTL0_FILTERS; sw_idx++) { + i = sw_idx / (sizeof(uint32_t) * 8); + j = sw_idx % (sizeof(uint32_t) * 8); + if (!(filter->fivetuple_mask[i] & (1 << j))) { + filter->fivetuple_mask[i] |= 1 << j; + break; + } + } + if (sw_idx >= TXGBE_MAX_RDB_5T_CTL0_FILTERS) { + e_err(drv, "5tuple filters are full.\n"); + return -EINVAL; + } + + /* convert filter index on each vf to the global index */ + index = msgbuf[TXGBEVF_5T_CMD] & 0xFFFF; + adapter->vfinfo[vf].ft_filter_idx[index] = sw_idx; + + /* pool index */ + msgbuf[TXGBEVF_5T_CTRL0] |= vf << TXGBE_RDB_5T_CTL0_POOL_SHIFT; + /* compute absolute queue index */ + msgbuf[TXGBEVF_5T_CTRL1] += (vf * adapter->num_rx_queues_per_pool) << + TXGBE_RDB_5T_CTL1_RING_SHIFT; + + wr32(hw, TXGBE_RDB_5T_CTL0(sw_idx), msgbuf[TXGBEVF_5T_CTRL0]); + wr32(hw, TXGBE_RDB_5T_CTL1(sw_idx), msgbuf[TXGBEVF_5T_CTRL1]); + wr32(hw, TXGBE_RDB_5T_SDP(sw_idx), msgbuf[TXGBEVF_5T_PORT]); + wr32(hw, TXGBE_RDB_5T_DA(sw_idx), msgbuf[TXGBEVF_5T_DA]); + wr32(hw, TXGBE_RDB_5T_SA(sw_idx), msgbuf[TXGBEVF_5T_SA]); + + return 0; +} + +static void txgbe_del_5tuple_filter_vf(struct txgbe_adapter *adapter, + u32 cmd, u32 vf) +{ + struct txgbe_5tuple_filter_info *filter = &adapter->ft_filter_info; + struct txgbe_hw *hw = &adapter->hw; + u16 index, sw_idx; + + /* convert the global index to filter index on each vf */ + index = cmd & 0xFFFF; + sw_idx = adapter->vfinfo[vf].ft_filter_idx[index]; + + filter->fivetuple_mask[sw_idx / (sizeof(uint32_t) * 8)] &= + ~(1 << (sw_idx % (sizeof(uint32_t) * 8))); + + wr32(hw, TXGBE_RDB_5T_CTL0(sw_idx), 0); + wr32(hw, TXGBE_RDB_5T_CTL1(sw_idx), 0); + wr32(hw, TXGBE_RDB_5T_SDP(sw_idx), 0); + wr32(hw, TXGBE_RDB_5T_DA(sw_idx), 0); + wr32(hw, TXGBE_RDB_5T_SA(sw_idx), 0); +} + +static int txgbe_set_5tuple_filter_vf(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u32 cmd = msgbuf[TXGBEVF_5T_CMD]; + bool add; + + /* verify the PF is supporting the correct API */ + if (adapter->vfinfo[vf].vf_api < txgbe_mbox_api_21) + return -EOPNOTSUPP; + + add = !!(cmd & BIT(TXGBEVF_5T_ADD_SHIFT)); + if (add) + return txgbe_add_5tuple_filter_vf(adapter, msgbuf, vf); + + txgbe_del_5tuple_filter_vf(adapter, cmd, vf); + + return 0; +} + +static int txgbe_rcv_msg_from_vf(struct txgbe_adapter *adapter, u16 vf) +{ + u16 mbx_size = TXGBE_VXMAILBOX_SIZE; + u32 msgbuf[TXGBE_VXMAILBOX_SIZE]; + struct txgbe_hw *hw = &adapter->hw; + s32 retval; + + retval = txgbe_read_mbx(hw, msgbuf, mbx_size, vf); + + if (retval) { + pr_err("Error receiving message from VF\n"); + return retval; + } + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (TXGBE_VT_MSGTYPE_ACK | TXGBE_VT_MSGTYPE_NACK)) + return retval; + + /* flush the ack before we write any messages back */ + TXGBE_WRITE_FLUSH(hw); + + if (msgbuf[0] == TXGBE_VF_RESET) + return txgbe_vf_reset_msg(adapter, vf); + + /* until the vf completes a virtual function reset it should not be + * allowed to start any configuration. + */ + + if (!adapter->vfinfo[vf].clear_to_send) { + msgbuf[0] |= TXGBE_VT_MSGTYPE_NACK; + txgbe_write_mbx(hw, msgbuf, 1, vf); + return retval; + } + + switch ((msgbuf[0] & 0xFFFF)) { + case TXGBE_VF_SET_MAC_ADDR: + retval = txgbe_set_vf_mac_addr(adapter, msgbuf, vf); + break; + case TXGBE_VF_SET_MULTICAST: + retval = txgbe_set_vf_multicasts(adapter, msgbuf, vf); + break; + case TXGBE_VF_SET_VLAN: + retval = txgbe_set_vf_vlan_msg(adapter, msgbuf, vf); + break; + case TXGBE_VF_SET_LPE: + if (msgbuf[1] > TXGBE_MAX_JUMBO_FRAME_SIZE) { + e_err(drv, "VF max_frame %d out of range\n", msgbuf[1]); + return -EINVAL; + } + retval = txgbe_set_vf_lpe(adapter, msgbuf[1], vf); + break; + case TXGBE_VF_SET_MACVLAN: + retval = txgbe_set_vf_macvlan_msg(adapter, msgbuf, vf); + break; + case TXGBE_VF_API_NEGOTIATE: + retval = txgbe_negotiate_vf_api(adapter, msgbuf, vf); + break; + case TXGBE_VF_GET_QUEUES: + retval = txgbe_get_vf_queues(adapter, msgbuf, vf); + break; + case TXGBE_VF_UPDATE_XCAST_MODE: + retval = txgbe_update_vf_xcast_mode(adapter, msgbuf, vf); + break; + case TXGBE_VF_GET_LINK_STATE: + retval = txgbe_get_vf_link_state(adapter, msgbuf, vf); + break; + case TXGBE_VF_GET_FW_VERSION: + retval = txgbe_get_fw_version(adapter, msgbuf, vf); + break; + case TXGBE_VF_SET_5TUPLE: + retval = txgbe_set_5tuple_filter_vf(adapter, msgbuf, vf); + break; + case TXGBE_VF_QUEUE_RATE_LIMIT: + retval = txgbe_set_queue_rate_limit_vf(adapter, msgbuf, vf); + break; + case TXGBE_VF_BACKUP: +#ifdef CONFIG_PCI_IOV + retval = txgbe_vf_backup(adapter, vf); +#endif + break; + default: + e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); + retval = TXGBE_ERR_MBX; + break; + } + + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= TXGBE_VT_MSGTYPE_NACK; + else + msgbuf[0] |= TXGBE_VT_MSGTYPE_ACK; + + msgbuf[0] |= TXGBE_VT_MSGTYPE_CTS; + + txgbe_write_mbx(hw, msgbuf, mbx_size, vf); + + return retval; +} + +static void txgbe_rcv_ack_from_vf(struct txgbe_adapter *adapter, u16 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 msg = TXGBE_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!adapter->vfinfo[vf].clear_to_send) + txgbe_write_mbx(hw, &msg, 1, vf); +} + +void txgbe_msg_task(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u16 vf; + + for (vf = 0; vf < adapter->num_vfs; vf++) { + /* process any reset requests */ + if (!txgbe_check_for_rst(hw, vf)) + txgbe_vf_reset_event(adapter, vf); + + /* process any messages pending */ + if (!txgbe_check_for_msg(hw, vf)) + txgbe_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ + if (!txgbe_check_for_ack(hw, vf)) + txgbe_rcv_ack_from_vf(adapter, vf); + } +} + +void txgbe_disable_tx_rx(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + /* disable transmit and receive for all vfs */ + wr32(hw, TXGBE_TDM_VF_TE(0), 0); + wr32(hw, TXGBE_TDM_VF_TE(1), 0); + + wr32(hw, TXGBE_RDM_VF_RE(0), 0); + wr32(hw, TXGBE_RDM_VF_RE(1), 0); +} + +static inline void txgbe_ping_vf(struct txgbe_adapter *adapter, int vf) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 ping; + + ping = TXGBE_PF_CONTROL_MSG; + if (adapter->vfinfo[vf].clear_to_send) + ping |= TXGBE_VT_MSGTYPE_CTS; + txgbe_write_mbx(hw, &ping, 1, vf); +} + +void txgbe_ping_all_vfs(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 ping; + u16 i; + + for (i = 0 ; i < adapter->num_vfs; i++) { + ping = TXGBE_PF_CONTROL_MSG; + if (adapter->vfinfo[i].clear_to_send) + ping |= TXGBE_VT_MSGTYPE_CTS; + txgbe_write_mbx(hw, &ping, 1, i); + } +} + +void txgbe_ping_vf_with_link_status(struct txgbe_adapter *adapter, bool link_up, u16 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 msgbuf[2] = {0, 0}; + + if (vf > adapter->num_vfs) + return; + + msgbuf[0] = TXGBE_PF_NOFITY_VF_LINK_STATUS | TXGBE_PF_CONTROL_MSG; + msgbuf[1] = (adapter->speed << 1) | link_up; + //if (adapter->notify_down) + // msgbuf[1] |= TXGBE_PF_NOFITY_VF_NET_NOT_RUNNING; + if (adapter->vfinfo[vf].clear_to_send) + msgbuf[0] |= TXGBE_VT_MSGTYPE_CTS; + txgbe_write_mbx(hw, msgbuf, 2, vf); +} + +void txgbe_ping_all_vfs_with_link_status(struct txgbe_adapter *adapter, bool link_up) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 msgbuf[2] = {0, 0}; + u16 i; + + if (!adapter->num_vfs) + return; + + msgbuf[0] = TXGBE_PF_NOFITY_VF_LINK_STATUS | TXGBE_PF_CONTROL_MSG; + if (link_up) + msgbuf[1] = (adapter->speed << 1) | link_up; + //if (adapter->notify_down) + // msgbuf[1] |= TXGBE_PF_NOFITY_VF_NET_NOT_RUNNING; + for (i = 0 ; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) + msgbuf[0] |= TXGBE_VT_MSGTYPE_CTS; + txgbe_write_mbx(hw, msgbuf, 2, i); + } +} + +/** + * txgbe_set_all_vfs - update vfs queues + * @adapter: Pointer to adapter struct + * + * Update setting transmit and receive queues for all vfs + **/ +void txgbe_set_all_vfs(struct txgbe_adapter *adapter) +{ + int i; + + for (i = 0 ; i < adapter->num_vfs; i++) { + txgbe_set_vf_link_state(adapter, i, + adapter->vfinfo[i].link_state); + } +} + +int txgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->num_vfs) + return -EINVAL; + + /* nothing to do */ + if (adapter->vfinfo[vf].trusted == setting) + return 0; + + adapter->vfinfo[vf].trusted = setting; + + /* reset VF to reconfigure features */ + adapter->vfinfo[vf].clear_to_send = false; + txgbe_ping_vf(adapter, vf); + + e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); + + return 0; +} + +static int txgbe_pci_sriov_enable(struct pci_dev __maybe_unused *dev, + int __maybe_unused num_vfs) +{ + int err = 0; +#ifdef CONFIG_PCI_IOV + struct txgbe_adapter *adapter = pci_get_drvdata(dev); + int i; + int pre_existing_vfs = pci_num_vf(dev); + + if (!(adapter->flags & TXGBE_FLAG_SRIOV_CAPABLE)) { + e_dev_warn("SRIOV not supported on this device\n"); + return -EOPNOTSUPP; + } + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + err = txgbe_disable_sriov(adapter); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + goto out; + + if (err) + goto err_out; + + /* While the SR-IOV capability structure reports total VFs to be + * 64 we limit the actual number that can be allocated to 63 so + * that some transmit/receive resources can be reserved to the + * PF. The PCI bus driver already checks for other values out of + * range. + */ + if ((num_vfs + adapter->num_vmdqs) > TXGBE_MAX_VF_FUNCTIONS) { + err = -EPERM; + goto err_out; + } + + err = __txgbe_enable_sriov(adapter, num_vfs); + if (err) + goto err_out; + + for (i = 0; i < adapter->num_vfs; i++) + txgbe_vf_configuration(dev, (i | 0x10000000)); + + /* reset before enabling SRIOV to avoid mailbox issues */ + txgbe_sriov_reinit(adapter); + + err = pci_enable_sriov(dev, num_vfs); + if (err) { + e_dev_warn("Failed to enable PCI sriov: %d\n", err); + goto err_out; + } + txgbe_get_vfs(adapter); + +out: + return num_vfs; + +err_out: +#endif + + return err; +} + +static int txgbe_pci_sriov_disable(struct pci_dev *dev) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(dev); + int err; +#ifdef CONFIG_PCI_IOV + u32 current_flags = adapter->flags; +#endif + + err = txgbe_disable_sriov(adapter); + + /* Only reinit if no error and state changed */ +#ifdef CONFIG_PCI_IOV + if (!err && current_flags != adapter->flags) + txgbe_sriov_reinit(adapter); +#endif + + return err; +} + +int txgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + if (num_vfs == 0) + return txgbe_pci_sriov_disable(dev); + else + return txgbe_pci_sriov_enable(dev, num_vfs); +} + +int txgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + s32 retval = 0; + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (vf < 0 || vf >= adapter->num_vfs) + return -EINVAL; + + if (is_valid_ether_addr(mac)) { + dev_info(pci_dev_to_dev(adapter->pdev), + "setting MAC %pM on VF %d\n", mac, vf); + dev_info(pci_dev_to_dev(adapter->pdev), + "Reload the VF driver to make this change effective.\n"); + retval = txgbe_set_vf_mac(adapter, vf, mac); + if (retval >= 0) { + adapter->vfinfo[vf].pf_set_mac = true; + if (test_bit(__TXGBE_DOWN, &adapter->state)) { + dev_warn(pci_dev_to_dev(adapter->pdev), + "The VF MAC address has been set, but the PF device is not up.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), + "Bring the PF device up before attempting to use the VF device.\n"); + } + } else { + dev_warn(pci_dev_to_dev(adapter->pdev), + "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n"); + } + } else if (is_zero_ether_addr(mac)) { + unsigned char *vf_mac_addr = + adapter->vfinfo[vf].vf_mac_addresses; + + /* nothing to do */ + if (is_zero_ether_addr(vf_mac_addr)) + return 0; + + dev_info(pci_dev_to_dev(adapter->pdev), "removing MAC on VF %d\n", + vf); + + retval = txgbe_del_mac_filter(adapter, vf_mac_addr, vf); + if (retval >= 0) { + adapter->vfinfo[vf].pf_set_mac = false; + memcpy(vf_mac_addr, mac, ETH_ALEN); + } else { + dev_warn(pci_dev_to_dev(adapter->pdev), "Could NOT remove the VF MAC address.\n"); + } + } else { + retval = -EINVAL; + } + return retval; +} + +static int txgbe_enable_port_vlan(struct txgbe_adapter *adapter, + int vf, u16 vlan, u8 qos, __be16 vlan_proto) +{ + struct txgbe_hw *hw = &adapter->hw; + int err; + + err = txgbe_set_vf_vlan(adapter, true, vlan, vf); + if (err) + goto out; + txgbe_set_vmvir(adapter, vlan, qos, vf, vlan_proto); + txgbe_set_vmolr(hw, vf, false); + if (adapter->vfinfo[vf].spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + adapter->vfinfo[vf].vlan_count++; + /* enable hide vlan */ + txgbe_write_qde(adapter, vf, 1); + txgbe_write_hide_vlan(adapter, vf, 1); + adapter->vfinfo[vf].pf_vlan = vlan; + adapter->vfinfo[vf].pf_qos = qos; + adapter->vfinfo[vf].vlan_proto = vlan_proto; + dev_info(pci_dev_to_dev(adapter->pdev), + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__TXGBE_DOWN, &adapter->state)) { + dev_warn(pci_dev_to_dev(adapter->pdev), + "The VF VLAN has been set, but the PF device is not up.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), + "Bring the PF device up before attempting to use the VF device.\n"); + } + +out: + return err; +} + +static int txgbe_disable_port_vlan(struct txgbe_adapter *adapter, int vf) +{ + struct txgbe_hw *hw = &adapter->hw; + int err; + + err = txgbe_set_vf_vlan(adapter, false, + adapter->vfinfo[vf].pf_vlan, vf); + txgbe_clear_vmvir(adapter, vf); + txgbe_set_vmolr(hw, vf, true); + hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); + if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; + /* disable hide vlan */ + txgbe_write_hide_vlan(adapter, vf, 0); + adapter->vfinfo[vf].pf_vlan = 0; + adapter->vfinfo[vf].pf_qos = 0; + adapter->vfinfo[vf].vlan_proto = 0; + + return err; +} + +int txgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, + u8 qos, __be16 vlan_proto) +{ + int err = 0; + struct txgbe_adapter *adapter = netdev_priv(netdev); + + /* VLAN IDs accepted range 0-4094 */ + if (vf >= adapter->num_vfs || (vlan > VLAN_VID_MASK - 1) || qos > 7) + return -EINVAL; + + if (vlan_proto != htons(ETH_P_8021Q) && vlan_proto != htons(ETH_P_8021AD)) + return -EPROTONOSUPPORT; + + if (vlan || qos) { + /* Check if there is already a port VLAN set, if so + * we have to delete the old one first before we + * can set the new one. The usage model had + * previously assumed the user would delete the + * old port VLAN before setting a new one but this + * is not necessarily the case. + */ + if (adapter->vfinfo[vf].pf_vlan) + err = txgbe_disable_port_vlan(adapter, vf); + if (err) + goto out; + err = txgbe_enable_port_vlan(adapter, vf, vlan, qos, vlan_proto); + } else { + err = txgbe_disable_port_vlan(adapter, vf); + } +out: + return err; +} + +int txgbe_link_mbps(struct txgbe_adapter *adapter) +{ + switch (adapter->link_speed) { + case TXGBE_LINK_SPEED_40GB_FULL: + return 40000; + case TXGBE_LINK_SPEED_25GB_FULL: + return 25000; + case TXGBE_LINK_SPEED_10GB_FULL: + return 10000; + case TXGBE_LINK_SPEED_1GB_FULL: + return 1000; + default: + return 0; + } +} + +u16 txgbe_frac_to_bi(u16 frac, u16 denom, int max_bits) +{ + u16 value = 0; + + while (frac > 0 && max_bits > 0) { + max_bits -= 1; + frac *= 2; + if (frac >= denom) { + value |= BIT(max_bits); + frac -= denom; + } + } + + return value; +} + +static void txgbe_set_vf_rate_limit(struct txgbe_adapter *adapter, int vf) +{ + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct txgbe_hw *hw = &adapter->hw; + u32 bcnrc_val; + int factor_int; + int factor_fra; + int link_speed; + u16 queue, queues_per_pool; + u16 max_tx_rate = adapter->vfinfo[vf].max_tx_rate; + + /* determine how many queues per pool based on VMDq mask */ + queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + + /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported + * and 0x004 otherwise. + */ + wr32(hw, TXGBE_TDM_MMW, 0x14); + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + if (max_tx_rate) { + u16 frac; + + link_speed = adapter->vf_rate_link_speed / 1000 * 1024; + + /* Calculate the rate factor values to set */ + factor_int = link_speed / max_tx_rate; + frac = (link_speed % max_tx_rate) * 10000 / max_tx_rate; + factor_fra = txgbe_frac_to_bi(frac, 10000, 14); + + wr32(hw, TXGBE_TDM_RL_VM_IDX, vf); + wr32m(hw, TXGBE_TDM_RL_VM_CFG, + TXGBE_TDM_FACTOR_INT_MASK, + factor_int << TXGBE_TDM_FACTOR_INT_SHIFT); + wr32m(hw, TXGBE_TDM_RL_VM_CFG, + TXGBE_TDM_FACTOR_FRA_MASK, + factor_fra << TXGBE_TDM_FACTOR_FRA_SHIFT); + wr32m(hw, TXGBE_TDM_RL_VM_CFG, + TXGBE_TDM_RL_EN, TXGBE_TDM_RL_EN); + } else { + wr32(hw, TXGBE_TDM_RL_VM_IDX, vf); + wr32m(hw, TXGBE_TDM_RL_VM_CFG, + TXGBE_TDM_RL_EN, 0); + } + } else { + max_tx_rate /= queues_per_pool; + bcnrc_val = TXGBE_TDM_RP_RATE_MAX(max_tx_rate); + + /* write value for all Tx queues belonging to VF */ + for (queue = 0; queue < queues_per_pool; queue++) { + unsigned int reg_idx = (vf * queues_per_pool) + queue; + + wr32(hw, TXGBE_TDM_RP_IDX, reg_idx); + wr32(hw, TXGBE_TDM_RP_RATE, bcnrc_val); + if (max_tx_rate) + wr32m(hw, TXGBE_TDM_RP_CTL, + TXGBE_TDM_RP_CTL_RLEN, TXGBE_TDM_RP_CTL_RLEN); + else + wr32m(hw, TXGBE_TDM_RP_CTL, + TXGBE_TDM_RP_CTL_RLEN, 0); + } + } +} + +void txgbe_check_vf_rate_limit(struct txgbe_adapter *adapter) +{ + int i; + + /* VF Tx rate limit was not set */ + if (!adapter->vf_rate_link_speed) + return; + + if (txgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) { + adapter->vf_rate_link_speed = 0; + dev_info(pci_dev_to_dev(adapter->pdev), + "Link speed has been changed. VF Transmit rate is disabled\n"); + } + + for (i = 0; i < adapter->num_vfs; i++) { + if (!adapter->vf_rate_link_speed) + adapter->vfinfo[i].max_tx_rate = 0; + + txgbe_set_vf_rate_limit(adapter, i); + } +} + +static int +txgbe_set_queue_rate_limit_vf(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct txgbe_hw *hw = &adapter->hw; + u16 queue, queues_per_pool, max_tx_rate; + int factor_int, factor_fra, link_speed; + u32 reg_idx; + + if (hw->mac.type != txgbe_mac_aml) + return -EOPNOTSUPP; + + /* verify the PF is supporting the correct API */ + if (adapter->vfinfo[vf].vf_api < txgbe_mbox_api_22) + return -EOPNOTSUPP; + + /* determine how many queues per pool based on VMDq mask */ + queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + + queue = msgbuf[TXGBEVF_Q_RATE_INDEX]; + max_tx_rate = msgbuf[TXGBEVF_Q_RATE_LIMIT]; + + /* convert queue index on each vf to the global index */ + reg_idx = (vf * queues_per_pool) + queue; + + /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported + * and 0x004 otherwise. + */ + wr32(hw, TXGBE_TDM_MMW, 0x14); + + if (max_tx_rate) { + u16 frac; + + link_speed = txgbe_link_mbps(adapter) / 1000 * 1024; + + /* Calculate the rate factor values to set */ + factor_int = link_speed / max_tx_rate; + frac = (link_speed % max_tx_rate) * 10000 / max_tx_rate; + factor_fra = txgbe_frac_to_bi(frac, 10000, 14); + + wr32(hw, TXGBE_TDM_RL_QUEUE_IDX, reg_idx); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_FACTOR_INT_MASK, factor_int << TXGBE_TDM_FACTOR_INT_SHIFT); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_FACTOR_FRA_MASK, factor_fra << TXGBE_TDM_FACTOR_FRA_SHIFT); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_RL_EN, TXGBE_TDM_RL_EN); + } else { + wr32(hw, TXGBE_TDM_RL_QUEUE_IDX, reg_idx); + wr32m(hw, TXGBE_TDM_RL_QUEUE_CFG, + TXGBE_TDM_RL_EN, 0); + } + + adapter->vfinfo[vf].queue_max_tx_rate[queue] = max_tx_rate; + e_info(drv, "set vf %d queue %d max_tx_rate to %d Mbps", + vf, queue, max_tx_rate); + + return 0; +} + +int txgbe_ndo_set_vf_bw(struct net_device *netdev, + int vf, + int min_tx_rate, + int max_tx_rate) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int link_speed; + + /* verify VF is active */ + if (vf >= adapter->num_vfs) + return -EINVAL; + + /* verify link is up */ + if (!adapter->link_up) + return -EINVAL; + + /* verify we are linked at 1 or 10 Gbps */ + if (adapter->link_speed < TXGBE_LINK_SPEED_1GB_FULL) + return -EINVAL; + + link_speed = txgbe_link_mbps(adapter); + /* rate limit cannot be less than 10Mbs or greater than link speed */ + if (max_tx_rate && (max_tx_rate <= 10 || max_tx_rate > link_speed)) + return -EINVAL; + + /* store values */ + adapter->vfinfo[vf].min_tx_rate = min_tx_rate; + adapter->vf_rate_link_speed = link_speed; + adapter->vfinfo[vf].max_tx_rate = max_tx_rate; + + /* update hardware configuration */ + txgbe_set_vf_rate_limit(adapter, vf); + + return 0; +} + +int txgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 regval; + + if (vf >= adapter->num_vfs) + return -EINVAL; + + adapter->vfinfo[vf].spoofchk_enabled = setting; + + if (vf < 32) { + regval = (setting << vf); + wr32m(hw, TXGBE_TDM_MAC_AS_L, + regval | (1 << vf), regval); + + if (adapter->vfinfo[vf].vlan_count) { + wr32m(hw, TXGBE_TDM_VLAN_AS_L, + regval | (1 << vf), regval); + } + } else { + regval = (setting << (vf - 32)); + wr32m(hw, TXGBE_TDM_MAC_AS_H, + regval | (1 << (vf - 32)), regval); + + if (adapter->vfinfo[vf].vlan_count) { + wr32m(hw, TXGBE_TDM_VLAN_AS_H, + regval | (1 << (vf - 32)), regval); + } + } + return 0; +} + +/** + * txgbe_set_vf_rx_tx - Set VF rx tx + * @adapter: Pointer to adapter struct + * @vf: VF identifier + * + * Set or reset correct transmit and receive for vf + **/ +static void txgbe_set_vf_rx_tx(struct txgbe_adapter *adapter, int vf) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx; + u32 reg_offset, vf_shift; + + vf_shift = vf % 32; + reg_offset = vf / 32; + + reg_cur_tx = rd32(hw, TXGBE_TDM_VF_TE(reg_offset)); + reg_cur_rx = rd32(hw, TXGBE_RDM_VF_RE(reg_offset)); + + if (adapter->vfinfo[vf].link_enable) { + reg_req_tx = reg_cur_tx | 1 << vf_shift; + reg_req_rx = reg_cur_rx | 1 << vf_shift; + /* Enable particular VF */ + if (reg_cur_tx != reg_req_tx) + wr32(hw, TXGBE_TDM_VF_TE(reg_offset), reg_req_tx); + if (reg_cur_rx != reg_req_rx) + wr32(hw, TXGBE_RDM_VF_RE(reg_offset), reg_req_rx); + } else { + reg_req_tx = 1 << vf_shift; + reg_req_rx = 1 << vf_shift; + /* Disable particular VF */ + if (reg_cur_tx & reg_req_tx) + wr32(hw, TXGBE_TDM_VFTE_CLR(reg_offset), reg_req_tx); + if (reg_cur_rx & reg_req_rx) + wr32(hw, TXGBE_RDM_VFRE_CLR(reg_offset), reg_req_rx); + } + if (adapter->vfinfo[vf].link_state == IFLA_VF_LINK_STATE_ENABLE && + !(rd32(hw, TXGBE_MAC_TX_CFG) & TXGBE_MAC_TX_CFG_TE)) { + wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, + TXGBE_MAC_TX_CFG_TE); + TXGBE_WRITE_FLUSH(hw); + wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, + TXGBE_MAC_TX_CFG_TE); + } +} + +/** + * txgbe_set_vf_link_state - Set link state + * @adapter: Pointer to adapter struct + * @vf: VF identifier + * @state: required link state + * + * Set a link force state on/off a single vf + **/ +void txgbe_set_vf_link_state(struct txgbe_adapter *adapter, int vf, int state) +{ + bool link_up = adapter->link_up; + + adapter->vfinfo[vf].link_state = state; + + switch (state) { + case TXGBE_VF_LINK_STATE_AUTO: + if (test_bit(__TXGBE_DOWN, &adapter->state)) { + adapter->vfinfo[vf].link_enable = false; + } else { + link_up = adapter->link_up; + adapter->vfinfo[vf].link_enable = true; + } + break; + case TXGBE_VF_LINK_STATE_ENABLE: + adapter->vfinfo[vf].link_enable = true; + link_up = true; + break; + case TXGBE_VF_LINK_STATE_DISABLE: + adapter->vfinfo[vf].link_enable = false; + link_up = false; + break; + } + + /* restart the VF */ + adapter->vfinfo[vf].clear_to_send = false; + txgbe_ping_vf(adapter, vf); + + txgbe_ping_vf_with_link_status(adapter, link_up, vf); + + txgbe_set_vf_rx_tx(adapter, vf); +} + +/** + * txgbe_ndo_set_vf_link_state - Set link state + * @netdev: network interface device structure + * @vf: VF identifier + * @state: required link state + * + * Set the link state of a specified VF, regardless of physical link state + **/ +int txgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + int ret = 0; + + if (vf < 0 || vf >= adapter->num_vfs) { + dev_err(pci_dev_to_dev(adapter->pdev), + "NDO set VF link - invalid VF identifier %d\n", vf); + ret = -EINVAL; + goto out; + } + + switch (state) { + case IFLA_VF_LINK_STATE_ENABLE: + dev_info(pci_dev_to_dev(adapter->pdev), + "NDO set VF %d link state enable\n", vf); + txgbe_set_vf_link_state(adapter, vf, TXGBE_VF_LINK_STATE_ENABLE); + break; + case IFLA_VF_LINK_STATE_DISABLE: + dev_info(pci_dev_to_dev(adapter->pdev), + "NDO set VF %d link state disable\n", vf); + txgbe_set_vf_link_state(adapter, vf, TXGBE_VF_LINK_STATE_DISABLE); + break; + case IFLA_VF_LINK_STATE_AUTO: + dev_info(pci_dev_to_dev(adapter->pdev), + "NDO set VF %d link state auto\n", vf); + txgbe_set_vf_link_state(adapter, vf, TXGBE_VF_LINK_STATE_AUTO); + break; + default: + dev_err(pci_dev_to_dev(adapter->pdev), + "NDO set VF %d - invalid link state %d\n", vf, state); + ret = -EINVAL; + } +out: + return ret; +} + +int txgbe_trans_vf_link_state(int state) +{ + switch (state) { + case TXGBE_VF_LINK_STATE_ENABLE: + return IFLA_VF_LINK_STATE_ENABLE; + case TXGBE_VF_LINK_STATE_DISABLE: + return IFLA_VF_LINK_STATE_DISABLE; + case TXGBE_VF_LINK_STATE_AUTO: + return IFLA_VF_LINK_STATE_AUTO; + } + return IFLA_VF_LINK_STATE_AUTO; +} + +int txgbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->num_vfs) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); + + ivi->max_tx_rate = adapter->vfinfo[vf].max_tx_rate; + ivi->min_tx_rate = adapter->vfinfo[vf].min_tx_rate; + + ivi->vlan = adapter->vfinfo[vf].pf_vlan; + ivi->qos = adapter->vfinfo[vf].pf_qos; + ivi->vlan_proto = adapter->vfinfo[vf].vlan_proto; + ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; + ivi->trusted = adapter->vfinfo[vf].trusted; + ivi->linkstate = txgbe_trans_vf_link_state(adapter->vfinfo[vf].link_state); + + return 0; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.h new file mode 100644 index 000000000000..c94e4c867284 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_SRIOV_H_ +#define _TXGBE_SRIOV_H_ + +/* txgbe driver limit the max number of VFs could be enabled to + * 63 (TXGBE_MAX_VF_FUNCTIONS - 1) + */ +#define TXGBE_MAX_VFS_DRV_LIMIT (TXGBE_MAX_VF_FUNCTIONS - 1) + +#define TXGBE_VF_LINK_STATE_DISABLE 0 +#define TXGBE_VF_LINK_STATE_AUTO 1 +#define TXGBE_VF_LINK_STATE_ENABLE 2 + +void txgbe_restore_vf_multicasts(struct txgbe_adapter *adapter); +int txgbe_set_vf_vlan(struct txgbe_adapter *adapter, int add, int vid, u16 vf); +void txgbe_set_vmolr(struct txgbe_hw *hw, u16 vf, bool aupe); +void txgbe_msg_task(struct txgbe_adapter *adapter); +int txgbe_set_vf_mac(struct txgbe_adapter *adapter, + u16 vf, unsigned char *mac_addr); +void txgbe_disable_tx_rx(struct txgbe_adapter *adapter); +void txgbe_ping_all_vfs(struct txgbe_adapter *adapter); +void txgbe_ping_all_vfs_with_link_status(struct txgbe_adapter *adapter, bool link_up); +void txgbe_ping_vf_with_link_status(struct txgbe_adapter *adapter, bool link_up, u16 vf); +int txgbe_trans_vf_link_state(int state); +void txgbe_set_all_vfs(struct txgbe_adapter *adapter); + +int txgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); +int txgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, + u8 qos, __be16 vlan_proto); + +int txgbe_ndo_set_vf_bw(struct net_device *netdev, + int vf, + int min_tx_rate, + int max_tx_rate); + +int txgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +int txgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state); +int txgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); + +int txgbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi); + +int txgbe_disable_sriov(struct txgbe_adapter *adapter); +#ifdef CONFIG_PCI_IOV +int txgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); +void txgbe_enable_sriov(struct txgbe_adapter *adapter); +#endif +int txgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +void txgbe_check_vf_rate_limit(struct txgbe_adapter *adapter); +void txgbe_set_vf_link_state(struct txgbe_adapter *adapter, int vf, int state); +#define TXGBE_DEV_ID_SP_VF 0x1000 +#endif /* _TXGBE_SRIOV_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_sysfs.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_sysfs.c new file mode 100644 index 000000000000..9d5a145cb3df --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_sysfs.c @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "txgbe.h" +#include "txgbe_hw.h" +#include "txgbe_type.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +/* hwmon callback functions */ +static ssize_t txgbe_hwmon_show_temp(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *txgbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value; + struct txgbe_hw *hw = txgbe_attr->hw; + + /* reset the temp field */ + hw->mac.ops.get_thermal_sensor_data(hw); + + value = txgbe_attr->sensor->temp; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t txgbe_hwmon_show_alarmthresh(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *txgbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = txgbe_attr->sensor->alarm_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t txgbe_hwmon_show_dalarmthresh(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *txgbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = txgbe_attr->sensor->dalarm_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +/** + * txgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. + * @adapter: pointer to the adapter structure + * @type: type of sensor data to display + * + * For each file we want in hwmon's sysfs interface we need a device_attribute + * This is included in our hwmon_attr struct that contains the references to + * the data structures we need to get the data to display. + */ +static int txgbe_add_hwmon_attr(struct txgbe_adapter *adapter, int type) +{ + int rc; + unsigned int n_attr; + struct hwmon_attr *txgbe_attr; + + n_attr = adapter->txgbe_hwmon_buff.n_hwmon; + txgbe_attr = &adapter->txgbe_hwmon_buff.hwmon_list[n_attr]; + + switch (type) { + case TXGBE_HWMON_TYPE_TEMP: + txgbe_attr->dev_attr.show = txgbe_hwmon_show_temp; + snprintf(txgbe_attr->name, sizeof(txgbe_attr->name), + "temp%u_input", 0); + break; + case TXGBE_HWMON_TYPE_ALARMTHRESH: + txgbe_attr->dev_attr.show = txgbe_hwmon_show_alarmthresh; + snprintf(txgbe_attr->name, sizeof(txgbe_attr->name), + "temp%u_alarmthresh", 0); + break; + case TXGBE_HWMON_TYPE_DALARMTHRESH: + txgbe_attr->dev_attr.show = txgbe_hwmon_show_dalarmthresh; + snprintf(txgbe_attr->name, sizeof(txgbe_attr->name), + "temp%u_dalarmthresh", 0); + break; + default: + rc = -EPERM; + return rc; + } + + /* These always the same regardless of type */ + txgbe_attr->sensor = + &adapter->hw.mac.thermal_sensor_data.sensor; + txgbe_attr->hw = &adapter->hw; + txgbe_attr->dev_attr.store = NULL; + txgbe_attr->dev_attr.attr.mode = 0444; + txgbe_attr->dev_attr.attr.name = txgbe_attr->name; + + rc = device_create_file(pci_dev_to_dev(adapter->pdev), + &txgbe_attr->dev_attr); + + if (rc == 0) + ++adapter->txgbe_hwmon_buff.n_hwmon; + + return rc; +} + +static void txgbe_sysfs_del_adapter(struct txgbe_adapter __maybe_unused *adapter) +{ + int i; + + if (!adapter) + return; + + for (i = 0; i < adapter->txgbe_hwmon_buff.n_hwmon; i++) { + device_remove_file(pci_dev_to_dev(adapter->pdev), + &adapter->txgbe_hwmon_buff.hwmon_list[i].dev_attr); + } + + kfree(adapter->txgbe_hwmon_buff.hwmon_list); + + if (adapter->txgbe_hwmon_buff.device) + hwmon_device_unregister(adapter->txgbe_hwmon_buff.device); +} + +/* called from txgbe_main.c */ +void txgbe_sysfs_exit(struct txgbe_adapter *adapter) +{ + txgbe_sysfs_del_adapter(adapter); +} + +/* called from txgbe_main.c */ +int txgbe_sysfs_init(struct txgbe_adapter *adapter) +{ + int rc = 0; + struct hwmon_buff *txgbe_hwmon = &adapter->txgbe_hwmon_buff; + int n_attrs; + struct txgbe_hw *hw = &adapter->hw; + + if (!adapter) + goto err; + + /* Don't create thermal hwmon interface if no sensors present */ + if (hw->mac.ops.init_thermal_sensor_thresh(hw)) + goto no_thermal; + + /* Allocation space for max attributs + * max num sensors * values (temp, alamthresh, dalarmthresh) + */ + n_attrs = 3; + txgbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), + GFP_KERNEL); + if (!txgbe_hwmon->hwmon_list) { + rc = -ENOMEM; + goto err; + } + + txgbe_hwmon->device = + hwmon_device_register(pci_dev_to_dev(adapter->pdev)); + if (IS_ERR(txgbe_hwmon->device)) { + rc = PTR_ERR(txgbe_hwmon->device); + goto err; + } + + /* Bail if any hwmon attr struct fails to initialize */ + rc = txgbe_add_hwmon_attr(adapter, TXGBE_HWMON_TYPE_TEMP); + rc |= txgbe_add_hwmon_attr(adapter, TXGBE_HWMON_TYPE_ALARMTHRESH); + rc |= txgbe_add_hwmon_attr(adapter, TXGBE_HWMON_TYPE_DALARMTHRESH); + if (rc) + goto err; + +no_thermal: + goto exit; + +err: + txgbe_sysfs_del_adapter(adapter); +exit: + return rc; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h new file mode 100644 index 000000000000..451c5936c662 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -0,0 +1,3555 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_TYPE_H_ +#define _TXGBE_TYPE_H_ + +#include +#include +#include +#include +#include +#include "txgbe_type.h" +#include "txgbe_mtd.h" + +/************ txgbe_register.h ************/ +/* Vendor ID */ +#ifndef PCI_VENDOR_ID_TRUSTNETIC +#define PCI_VENDOR_ID_TRUSTNETIC 0x8088 +#endif + +/* Device IDs */ +#define TXGBE_DEV_ID_SP1000 0x1001 +#define TXGBE_DEV_ID_WX1820 0x2001 +#define TXGBE_DEV_ID_AML 0x5000 +#define TXGBE_DEV_ID_AML5025 0x5025 +#define TXGBE_DEV_ID_AML5125 0x5125 +#define TXGBE_DEV_ID_AML5040 0x5040 +#define TXGBE_DEV_ID_AML5140 0x5140 + +/* Subsystem IDs */ +/* SFP */ +#define TXGBE_ID_SP1000_SFP 0x0000 +#define TXGBE_ID_WX1820_SFP 0x2000 +#define TXGBE_ID_SFP 0x00 + +/* copper */ +#define TXGBE_ID_SP1000_XAUI 0x1010 +#define TXGBE_ID_WX1820_XAUI 0x2010 +#define TXGBE_ID_XAUI 0x10 +#define TXGBE_ID_SP1000_SGMII 0x1020 +#define TXGBE_ID_WX1820_SGMII 0x2020 +#define TXGBE_ID_SGMII 0x20 +/* backplane */ +#define TXGBE_ID_SP1000_KR_KX_KX4 0x1030 +#define TXGBE_ID_WX1820_KR_KX_KX4 0x2030 +#define TXGBE_ID_KR_KX_KX4 0x30 +/* MAC Interface */ +#define TXGBE_ID_SP1000_MAC_XAUI 0x1040 +#define TXGBE_ID_WX1820_MAC_XAUI 0x2040 +#define TXGBE_ID_MAC_XAUI 0x40 +#define TXGBE_ID_SP1000_MAC_SGMII 0x1060 +#define TXGBE_ID_WX1820_MAC_SGMII 0x2060 +#define TXGBE_ID_MAC_SGMII 0x60 + +#define TXGBE_NCSI_SUP 0x8000 +#define TXGBE_NCSI_MASK 0x8000 +#define TXGBE_WOL_SUP 0x4000 +#define TXGBE_WOL_MASK 0x4000 +#define TXGBE_DEV_MASK 0xf0 + +#define TXGBE_FLASH_HEADER_FLAG 0x5aa5 + +/* Combined interface*/ +#define TXGBE_ID_SFI_XAUI 0x50 + +/* Revision ID */ +#define TXGBE_SP_MPW 0xfe + +/* MDIO Manageable Devices (MMDs). */ +#define TXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 /* PMA and PMD */ +#define TXGBE_MDIO_PCS_DEV_TYPE 0x3 /* Physical Coding Sublayer*/ +#define TXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 /* PHY Extender Sublayer */ +#define TXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 /* Auto-Negotiation */ +#define TXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Vendor specific 1 */ +#define TXGBE_MDIO_VENDOR_SPECIFIC_2_DEV_TYPE 0x1F /* Vendor specific 2 */ + +/* phy register definitions */ +/* VENDOR_SPECIFIC_1_DEV regs */ +#define TXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ +#define TXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ +#define TXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0-10G, 1-1G */ + +/* VENDOR_SPECIFIC_2_DEV regs */ +#define TXGBE_MDIO_VENDOR_SPECIFIC_2_PORT_CTRL 0xF001 +#define TXGBE_MDIO_VENDOR_SPECIFIC_2_SW_RST BIT(15) +#define TXGBE_MDIO_VENDOR_SPECIFIC_2_POWER BIT(11) + +/* AUTO_NEG_DEV regs */ +#define TXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ +#define TXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ +#define TXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Reg */ +#define TXGBE_MDIO_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG RX LP Status Reg */ +#define TXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */ +#define TXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +#define TXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +#define TXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */ + +#define TXGBE_MDIO_AUTO_NEG_10GBASE_EEE_ADVT 0x8 +#define TXGBE_MDIO_AUTO_NEG_1000BASE_EEE_ADVT 0x4 +#define TXGBE_MDIO_AUTO_NEG_100BASE_EEE_ADVT 0x2 +#define TXGBE_MDIO_AUTO_NEG_LP_1000BASE_CAP 0x8000 +#define TXGBE_MDIO_AUTO_NEG_LP_10GBASE_CAP 0x0800 +#define TXGBE_MDIO_AUTO_NEG_10GBASET_STAT 0x0021 + +#define TXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/ +#define TXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +#define TXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define TXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */ +#define TXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */ +#define TXGBE_MII_RESTART 0x200 +#define TXGBE_MII_AUTONEG_COMPLETE 0x20 +#define TXGBE_MII_AUTONEG_LINK_UP 0x04 +#define TXGBE_MII_AUTONEG_REG 0x0 + +/* PHY_XS_DEV regs */ +#define TXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */ +#define TXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ + +/* Media-dependent registers. */ +#define TXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ +#define TXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ +#define TXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */ +#define TXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */ + +#define TXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ +#define TXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ +#define TXGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */ +#define TXGBE_MDIO_PHY_SPEED_10M 0x0040 /* 10M capable */ + +#define TXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */ +#define TXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */ +#define TXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */ + +#define TXGBE_PHY_REVISION_MASK 0xFFFFFFF0U +#define TXGBE_MAX_PHY_ADDR 32 + +/* PHY IDs*/ +#define TN1010_PHY_ID 0x00A19410U +#define QT2022_PHY_ID 0x0043A400U +#define ATH_PHY_ID 0x03429050U +/* PHY FW revision */ +#define TNX_FW_REV 0xB +#define AQ_FW_REV 0x20 + +/* AMLITE ETH PHY Registers */ +#define SR_AN_CTRL 0x70000 +#define VR_PCS_DIG_CTRL1 0x38000 +#define SR_PCS_CTRL1 0x30000 +#define SR_PCS_CTRL2 0x30007 +#define SR_PMA_CTRL2 0x10007 +#define VR_PCS_DIG_CTRL3 0x38003 +#define VR_PMA_CTRL3 0x180a8 +#define VR_PMA_CTRL4 0x180a9 +#define SR_PMA_RS_FEC_CTRL 0x100c8 +#define ANA_OVRDEN0 0xca4 +#define ANA_OVRDEN1 0xca8 +#define ANA_OVRDVAL0 0xcb0 +#define ANA_OVRDVAL5 0xcc4 +#define OSC_CAL_N_CDR4 0x14 +#define PLL0_CFG0 0xc10 +#define PLL0_CFG2 0xc18 +#define PLL0_DIV_CFG0 0xc1c +#define PLL1_CFG0 0xc48 +#define PLL1_CFG2 0xc50 +#define PIN_OVRDEN0 0xc8c +#define PIN_OVRDVAL0 0xc94 +#define DATAPATH_CFG0 0x142c +#define DATAPATH_CFG1 0x1430 +#define AN_CFG1 0x1438 +#define SPARE52 0x16fc +#define RXS_CFG0 0x000 +#define PMD_CFG0 0x1400 +#define SR_PCS_STS1 0x30001 + +/* ETH PHY Registers */ +#define TXGBE_SR_XS_PCS_MMD_STATUS1 0x30001 +#define TXGBE_SR_PCS_CTL2 0x30007 +#define TXGBE_SR_PMA_MMD_CTL1 0x10000 +#define TXGBE_SR_MII_MMD_CTL 0x1F0000 +#define TXGBE_SR_MII_MMD_DIGI_CTL 0x1F8000 +#define TXGBE_SR_MII_MMD_AN_CTL 0x1F8001 +#define TXGBE_SR_MII_MMD_AN_ADV 0x1F0004 +#define TXGBE_SR_MII_MMD_AN_ADV_PAUSE(_v) ((0x3 & (_v)) << 7) +#define TXGBE_SR_MII_MMD_AN_ADV_PAUSE_ASM 0x80 +#define TXGBE_SR_MII_MMD_AN_ADV_PAUSE_SYM 0x100 +#define TXGBE_SR_MII_MMD_LP_BABL 0x1F0005 +#define TXGBE_SR_AN_MMD_CTL 0x70000 +#define TXGBE_SR_AN_MMD_ADV_REG1 0x70010 +#define TXGBE_SR_AN_MMD_ADV_REG1_PAUSE(_v) ((0x3 & (_v)) << 10) +#define TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM 0x400 +#define TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM 0x800 +#define TXGBE_SR_AN_MMD_ADV_REG2 0x70011 +#define TXGBE_SR_AN_MMD_ADV_REG3 0x70012 +#define TXGBE_SR_AN_MMD_LP_ABL1 0x70013 +#define TXGBE_SR_AN_MMD_LP_ABL2 0x70014 +#define TXGBE_SR_AN_MMD_LP_ABL3 0x70015 +#define TXGBE_VR_AN_KR_MODE_CL 0x78003 +#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1 0x38000 +#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS 0x38010 +#define TXGBE_PHY_MPLLA_CTL0 0x18071 +#define TXGBE_PHY_MPLLA_CTL3 0x18077 +#define TXGBE_PHY_MISC_CTL0 0x18090 +#define TXGBE_PHY_VCO_CAL_LD0 0x18092 +#define TXGBE_PHY_VCO_CAL_LD1 0x18093 +#define TXGBE_PHY_VCO_CAL_LD2 0x18094 +#define TXGBE_PHY_VCO_CAL_LD3 0x18095 +#define TXGBE_PHY_VCO_CAL_REF0 0x18096 +#define TXGBE_PHY_VCO_CAL_REF1 0x18097 +#define TXGBE_PHY_RX_AD_ACK 0x18098 +#define TXGBE_PHY_AFE_DFE_ENABLE 0x1805D +#define TXGBE_PHY_DFE_TAP_CTL0 0x1805E +#define TXGBE_PHY_RX_EQ_ATT_LVL0 0x18057 +#define TXGBE_PHY_RX_EQ_CTL0 0x18058 +#define TXGBE_PHY_RX_EQ_CTL 0x1805C +#define TXGBE_PHY_TX_EQ_CTL0 0x18036 +#define TXGBE_PHY_TX_EQ_CTL1 0x18037 +#define TXGBE_PHY_TX_RATE_CTL 0x18034 +#define TXGBE_PHY_RX_RATE_CTL 0x18054 +#define TXGBE_PHY_TX_GEN_CTL2 0x18032 +#define TXGBE_PHY_RX_GEN_CTL2 0x18052 +#define TXGBE_PHY_RX_GEN_CTL3 0x18053 +#define TXGBE_PHY_MPLLA_CTL2 0x18073 +#define TXGBE_PHY_RX_POWER_ST_CTL 0x18055 +#define TXGBE_PHY_TX_POWER_ST_CTL 0x18035 +#define TXGBE_PHY_TX_GENCTRL1 0x18031 + +#define TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_R 0x0 +#define TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X 0x1 +#define TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_MASK 0x3 +#define TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_1G 0x0 +#define TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_10G 0x2000 +#define TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_MASK 0x2000 +#define TXGBE_SR_PMA_MMD_CTL1_LB_EN 0x1 +#define TXGBE_SR_MII_MMD_CTL_AN_EN 0x1000 +#define TXGBE_SR_MII_MMD_CTL_RESTART_AN 0x0200 +#define TXGBE_SR_AN_MMD_CTL_RESTART_AN 0x0200 +#define TXGBE_SR_AN_MMD_CTL_ENABLE 0x1000 +#define TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX4 0x40 +#define TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX 0x20 +#define TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KR 0x80 +#define TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_MASK 0xFFFF +#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_ENABLE 0x1000 +#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST 0x8000 +#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK 0x1C +#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD 0x10 + +#define TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_1GBASEX_KX 32 +#define TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_10GBASER_KR 33 +#define TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_OTHER 40 +#define TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_MASK 0xFF +#define TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_1GBASEX_KX 0x56 +#define TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_10GBASER_KR 0x7B +#define TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_OTHER 0x56 +#define TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_MASK 0x7FF +#define TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_0 0x1 +#define TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_3_1 0xE +#define TXGBE_PHY_MISC_CTL0_RX_VREF_CTRL 0x1F00 +#define TXGBE_PHY_VCO_CAL_LD0_1GBASEX_KX 1344 +#define TXGBE_PHY_VCO_CAL_LD0_10GBASER_KR 1353 +#define TXGBE_PHY_VCO_CAL_LD0_OTHER 1360 +#define TXGBE_PHY_VCO_CAL_LD0_MASK 0x1000 +#define TXGBE_PHY_VCO_CAL_REF0_LD0_1GBASEX_KX 42 +#define TXGBE_PHY_VCO_CAL_REF0_LD0_10GBASER_KR 41 +#define TXGBE_PHY_VCO_CAL_REF0_LD0_OTHER 34 +#define TXGBE_PHY_VCO_CAL_REF0_LD0_MASK 0x3F +#define TXGBE_PHY_AFE_DFE_ENABLE_DFE_EN0 0x10 +#define TXGBE_PHY_AFE_DFE_ENABLE_AFE_EN0 0x1 +#define TXGBE_PHY_AFE_DFE_ENABLE_MASK 0xFF +#define TXGBE_PHY_RX_EQ_CTL_CONT_ADAPT0 0x1 +#define TXGBE_PHY_RX_EQ_CTL_CONT_ADAPT_MASK 0xF +#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_10GBASER_KR 0x0 +#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_RXAUI 0x1 +#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_1GBASEX_KX 0x3 +#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_OTHER 0x2 +#define TXGBE_PHY_TX_RATE_CTL_TX1_RATE_OTHER 0x20 +#define TXGBE_PHY_TX_RATE_CTL_TX2_RATE_OTHER 0x200 +#define TXGBE_PHY_TX_RATE_CTL_TX3_RATE_OTHER 0x2000 +#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_MASK 0x7 +#define TXGBE_PHY_TX_RATE_CTL_TX1_RATE_MASK 0x70 +#define TXGBE_PHY_TX_RATE_CTL_TX2_RATE_MASK 0x700 +#define TXGBE_PHY_TX_RATE_CTL_TX3_RATE_MASK 0x7000 +#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_10GBASER_KR 0x0 +#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_RXAUI 0x1 +#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_1GBASEX_KX 0x3 +#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_OTHER 0x2 +#define TXGBE_PHY_RX_RATE_CTL_RX1_RATE_OTHER 0x20 +#define TXGBE_PHY_RX_RATE_CTL_RX2_RATE_OTHER 0x200 +#define TXGBE_PHY_RX_RATE_CTL_RX3_RATE_OTHER 0x2000 +#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_MASK 0x7 +#define TXGBE_PHY_RX_RATE_CTL_RX1_RATE_MASK 0x70 +#define TXGBE_PHY_RX_RATE_CTL_RX2_RATE_MASK 0x700 +#define TXGBE_PHY_RX_RATE_CTL_RX3_RATE_MASK 0x7000 +#define TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_10GBASER_KR 0x200 +#define TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_10GBASER_KR_RXAUI 0x300 +#define TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_OTHER 0x100 +#define TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_MASK 0x300 +#define TXGBE_PHY_TX_GEN_CTL2_TX1_WIDTH_OTHER 0x400 +#define TXGBE_PHY_TX_GEN_CTL2_TX1_WIDTH_MASK 0xC00 +#define TXGBE_PHY_TX_GEN_CTL2_TX2_WIDTH_OTHER 0x1000 +#define TXGBE_PHY_TX_GEN_CTL2_TX2_WIDTH_MASK 0x3000 +#define TXGBE_PHY_TX_GEN_CTL2_TX3_WIDTH_OTHER 0x4000 +#define TXGBE_PHY_TX_GEN_CTL2_TX3_WIDTH_MASK 0xC000 +#define TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_10GBASER_KR 0x200 +#define TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_10GBASER_KR_RXAUI 0x300 +#define TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_OTHER 0x100 +#define TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_MASK 0x300 +#define TXGBE_PHY_RX_GEN_CTL2_RX1_WIDTH_OTHER 0x400 +#define TXGBE_PHY_RX_GEN_CTL2_RX1_WIDTH_MASK 0xC00 +#define TXGBE_PHY_RX_GEN_CTL2_RX2_WIDTH_OTHER 0x1000 +#define TXGBE_PHY_RX_GEN_CTL2_RX2_WIDTH_MASK 0x3000 +#define TXGBE_PHY_RX_GEN_CTL2_RX3_WIDTH_OTHER 0x4000 +#define TXGBE_PHY_RX_GEN_CTL2_RX3_WIDTH_MASK 0xC000 + +#define TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_8 0x100 +#define TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_10 0x200 +#define TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_16P5 0x400 +#define TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_MASK 0x700 + +#define TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME 100 +#define TXGBE_PHY_INIT_DONE_POLLING_TIME 100 + +/* amlite: FPGA */ +/* PHY MDI STANDARD CONFIG */ +#define TXGBE_MDI_PHY_ID1_OFFSET 2 +#define TXGBE_MDI_PHY_ID2_OFFSET 3 +#define TXGBE_MDI_PHY_ID_MASK 0xFFFFFC00U +#define TXGBE_MDI_PHY_SPEED_SELECT1 0x0040 +#define TXGBE_MDI_PHY_DUPLEX 0x0100 +#define TXGBE_MDI_PHY_RESTART_AN 0x0200 +#define TXGBE_MDI_PHY_ANE 0x1000 +#define TXGBE_MDI_PHY_SPEED_SELECT0 0x2000 +#define TXGBE_MDI_PHY_RESET 0x8000 + +#define TXGBE_PHY_RST_WAIT_PERIOD 50 + +#define TXGBE_MDI_PHY_INT_LSC 0x0400 +#define TXGBE_MDI_PHY_INT_ANC 0x0800 + +#define MV1119_CTRL 0 /* Page Any, Control reg */ +#define MV1119_STUS 1 /* Page Any, Status reg */ +#define MV1119_PHY_ID_1 2 /* Page Any, Phy Identifier 1 */ +#define MV1119_PHY_ID_2 3 /* Page Any, Phy Identifier 2 */ +#define MV1119_AUTO_NEGO_ADVER 4 /* Page Any, Auto-Negotiation Advertisement reg */ +#define MV1119_LK_PARTNER_ABILITY 5 /* Page Any, Link Partner Ability reg */ +#define MV1119_AUTO_NEGO_EX 6 /* Page Any, Auto-Negotiation Expansion reg */ +#define MV1119_NEXT_PAGE_TRANS 7 /* Page Any, Next Page Transmit reg */ +#define MV1119_LK_PARTNER_NEXT_PAGE 8 /* Page Any, Link Partner Next Page reg */ +#define MV1119_1000BASE_T_CTRL 9 /* Page Any, 1000BASE-T Control reg */ +#define MV1119_1000BASE_T_STUS 10 /* Page Any, 1000BASE-T Status reg */ +#define MV1119_EX_STUS 15 /* Page Any, Extended Status reg */ +#define MV1119_CO_SPEC_CTRL_1 16 /* Page 0, Copper Specific Control reg 1 */ +#define MV1119_CO_SPEC_STUS_1 17 /* Page 0, Copper Specific Status reg 1 */ +#define MV1119_CO_SPEC_INT_EN 18 /* Page 0, Copper Specific Interrupt Enable reg */ +#define MV1119_CO_SPEC_STUS_2 19 /* Page 0, Copper Specific Status reg 2 */ +#define MV1119_CO_SPEC_CTRL_3 20 /* Page 0, Copper Specific Control reg 3 */ +#define MV1119_RECE_ERR_COUT 21 /* Page 0, Receive Error Counter reg */ +#define MV1119_PAGE_ADD 22 /* Page Any, Page Address */ +#define MV1119_GLO_INT_STUS 23 /* Page 0,2, Global Interrupt Status */ +#define MV1119_CO_SPEC_CTRL_2 26 /* Page 0, Copper Specific Control reg 2 */ +#define MV1119_MAC_SPEC_CTRL_1 16 /* Page 2, MAC Specific Control reg 1 */ +#define MV1119_MAC_SPEC_INT_EN 18 /* Page 2, MAC Specific Interrupt Enable reg */ +#define MV1119_MAC_SPEC_STUS_2 19 /* Page 2, MAC Specific Status reg 2 */ +#define MV1119_MAC_SPEC_CTRL 21 /* Page 2, MAC Specific Control reg */ +#define MV1119_LED_FUN_CTRL 16 /* Page 3, LED Function Control reg */ +#define MV1119_LED_POLAR_CTRL 17 /* Page 3, LED Polarity Control reg */ +#define MV1119_LED_TIME_CTRL 18 /* Page 3, LED Timer Control reg */ + +#define CBIT(_x) (1 << (_x)) + +#define MV1119_C_RESET CBIT(15) +#define MV1119_C_LOOPBACK CBIT(14) +#define MV1119_C_AUTO_NE_EN CBIT(12) +#define MV1119_C_POWER_DOWN CBIT(11) +#define MV1119_C_RE_CO_AUTO_NE CBIT(9) +#define MV1119_C_CO_DUPLEX_MODE CBIT(8) +#define MV1119_C_SPEED_SELECT1 CBIT(6) +#define MV1119_C_10M 0x00 +#define MV1119_C_100M CBIT(13) +#define MV1119_C_1000M CBIT(6) +#define MV1119_C_FULL_DUP CBIT(8) +#define MV1119_C_HALF_DUP 0x00 +#define MV1119_ANA_ASYM_PAUSE CBIT(11) +#define MV1119_ANA_PAUSE CBIT(10) +#define MV1119_ANA_100FULL CBIT(8) +#define MV1119_ANA_100HALF CBIT(7) +#define MV1119_ANA_10FULL CBIT(6) +#define MV1119_ANA_10HALF CBIT(5) +#define MV1119_1000BC_1000FULL CBIT(9) +#define MV1119_1000BC_1000HALF CBIT(8) +#define MV1119_CSS1_SPEED (CBIT(14) | CBIT(15)) +#define MV1119_CSS1_DUPLEX CBIT(13) +#define MV1119_CSS1_LINK CBIT(10) +#define MV1119_CSS2_AUTO_NE_ERR CBIT(15) +#define MV1119_CSS2_SPEED_CH CBIT(14) +#define MV1119_CSS2_DUPLEX_CH CBIT(13) +#define MV1119_CSS2_AUTO_NE_COMPLETE CBIT(11) +#define MV1119_CSS2_CO_LINK_STATUS_CH CBIT(10) +#define MV1119_CSC_DOWNSHIFT_COUNT (CBIT(12) | CBIT(13) | CBIT(14)) +#define MV1119_CSC_DOWNSHIFT_EN CBIT(11) +#define MV1119_CSC_POWER_DOWN CBIT(2) + +#define MV1119_ANA_100 (MV1119_ANA_100FULL | MV1119_ANA_100HALF) +#define MV1119_ANA_10 (MV1119_ANA_10FULL | MV1119_ANA_10HALF) +#define MV1119_ANA_100_AND_10 (MV1119_ANA_100 | MV1119_ANA_10) +#define MV1119_1000BC_1000 (MV1119_1000BC_1000FULL | MV1119_1000BC_1000HALF) + +/**************** Global Registers ****************************/ +/* chip control Registers */ +#define TXGBE_MIS_RST 0x1000C +#define TXGBE_MIS_PWR 0x10000 +#define TXGBE_MIS_CTL 0x10004 +#define TXGBE_MIS_PF_SM 0x10008 +#define TXGBE_MIS_PRB_CTL 0x10010 /* for PCIE recovery only */ +#define TXGBE_MIS_ST 0x10028 +#define TXGBE_MIS_SWSM 0x1002C +#define TXGBE_MIS_RST_ST 0x10030 + +#define PX_PF_PEND 0x4C0 +#define PX_VF_PEND(i) (0x4D0 + 4 * (i)) /* i = [0,3]*/ +#define PX_PF_BME 0x4B8 + +#define TXGBE_MIS_RST_SW_RST 0x00000001U +#define TXGBE_MIS_RST_LAN0_RST 0x00000002U +#define TXGBE_MIS_RST_LAN1_RST 0x00000004U +#define TXGBE_MIS_RST_LAN0_EPHY_RST 0x00080000U +#define TXGBE_MIS_RST_LAN1_EPHY_RST 0x00010000U +#define TXGBE_MIS_RST_LAN0_MAC_RST 0x00100000U +#define TXGBE_MIS_RST_LAN1_MAC_RST 0x00020000U +#define TXGBE_MIS_RST_LAN0_CHG_ETH_MODE 0x20000000U +#define TXGBE_MIS_RST_LAN1_CHG_ETH_MODE 0x40000000U +#define TXGBE_MIS_RST_GLOBAL_RST 0x80000000U +#define TXGBE_MIS_RST_MASK (TXGBE_MIS_RST_SW_RST | \ + TXGBE_MIS_RST_LAN0_RST | \ + TXGBE_MIS_RST_LAN1_RST) +#define TXGBE_MIS_PWR_LAN_ID(_r) ((0xC0000000U & (_r)) >> 30) +#define TXGBE_MIS_PWR_LAN_ID_0 (1) +#define TXGBE_MIS_PWR_LAN_ID_1 (2) +#define TXGBE_MIS_PWR_LAN_ID_A (3) +#define TXGBE_MIS_ST_MNG_INIT_DN 0x00000001U +#define TXGBE_MIS_ST_MNG_VETO 0x00000100U +#define TXGBE_MIS_ST_LAN0_ECC 0x00010000U +#define TXGBE_MIS_ST_LAN1_ECC 0x00020000U +#define TXGBE_MIS_ST_MNG_ECC 0x00040000U +#define TXGBE_MIS_ST_PCORE_ECC 0x00080000U +#define TXGBE_MIS_ST_PCIWRP_ECC 0x00100000U +#define TXGBE_MIS_SWSM_SMBI 1 +#define TXGBE_MIS_RST_ST_DEV_RST_ST_DONE 0x00000000U +#define TXGBE_MIS_RST_ST_DEV_RST_ST_REQ 0x00080000U +#define TXGBE_MIS_RST_ST_DEV_RST_ST_INPROGRESS 0x00100000U +#define TXGBE_MIS_RST_ST_DEV_RST_ST_MASK 0x00180000U +#define TXGBE_MIS_RST_ST_DEV_RST_TYPE_MASK 0x00070000U +#define TXGBE_MIS_RST_ST_DEV_RST_TYPE_SHIFT 16 +#define TXGBE_MIS_RST_ST_DEV_RST_TYPE_SW_RST 0x3 +#define TXGBE_MIS_RST_ST_DEV_RST_TYPE_GLOBAL_RST 0x5 +#define TXGBE_MIS_RST_ST_RST_INIT 0x0000FF00U +#define TXGBE_MIS_RST_ST_RST_INI_SHIFT 8 +#define TXGBE_MIS_RST_ST_RST_TIM 0x000000FFU +#define TXGBE_MIS_PF_SM_SM 1 +#define TXGBE_MIS_PRB_CTL_LAN0_UP 0x2 +#define TXGBE_MIS_PRB_CTL_LAN1_UP 0x1 + +/* Sensors for PVT(Process Voltage Temperature) */ +#define TXGBE_TS_CTL 0x10300 +#define TXGBE_TS_EN 0x10304 +#define TXGBE_TS_ST 0x10308 +#define TXGBE_TS_ALARM_THRE 0x1030C +#define TXGBE_TS_DALARM_THRE 0x10310 +#define TXGBE_TS_INT_EN 0x10314 +#define TXGBE_TS_ALARM_ST 0x10318 +#define TXGBE_TS_ALARM_ST_DALARM 0x00000002U +#define TXGBE_TS_ALARM_ST_ALARM 0x00000001U + +#define TXGBE_TS_CTL_EVAL_MD 0x80000000U +#define TXGBE_TS_EN_ENA 0x00000001U +#define TXGBE_TS_ST_DATA_OUT_MASK 0x000003FFU +#define TXGBE_TS_ALARM_THRE_MASK 0x000003FFU +#define TXGBE_TS_DALARM_THRE_MASK 0x000003FFU +#define TXGBE_TS_INT_EN_DALARM_INT_EN 0x00000002U +#define TXGBE_TS_INT_EN_ALARM_INT_EN 0x00000001U + +/* Sensors for AMLITE PVT(Process Voltage Temperature) */ +#define TXGBE_AML_INTR_RAW_HI 0x10300 +#define TXGBE_AML_INTR_RAW_ME 0x10304 +#define TXGBE_AML_INTR_RAW_LO 0x10308 +#define TXGBE_AML_TS_CTL1 0x10330 +#define TXGBE_AML_TS_CTL2 0x10334 +#define TXGBE_AML_TS_ENA 0x10338 +#define TXGBE_AML_TS_STS 0x1033C +#define TXGBE_AML_INTR_HIGH_EN 0x10318 +#define TXGBE_AML_INTR_MED_EN 0x1031C +#define TXGBE_AML_INTR_LOW_EN 0x10320 +#define TXGBE_AML_INTR_HIGH_STS 0x1030C +#define TXGBE_AML_INTR_MED_STS 0x10310 +#define TXGBE_AML_INTR_LOW_STS 0x10314 + +#define TXGBE_AML_TS_STS_VLD 0x1000 +#define TXGBE_AML_INTR_EN_HI 0x00000002U +#define TXGBE_AML_INTR_EN_ME 0x00000001U +#define TXGBE_AML_INTR_EN_LO 0x00000001U +#define TXGBE_AML_INTR_CL_HI 0x00000002U +#define TXGBE_AML_INTR_CL_ME 0x00000001U +#define TXGBE_AML_INTR_CL_LO 0x00000001U +#define TXGBE_AML_EVAL_MODE_MASK 0x010U +#define TXGBE_AML_CAL_MODE_MASK 0x08U +#define TXGBE_AML_ALARM_THRE_MASK 0x1FFE0000U +#define TXGBE_AML_DALARM_THRE_MASK 0x0001FFE0U + +struct txgbe_thermal_diode_data { + s16 temp; + s16 alarm_thresh; + s16 dalarm_thresh; +}; + +struct txgbe_thermal_sensor_data { + struct txgbe_thermal_diode_data sensor; +}; + +/* FMGR Registers */ +#define TXGBE_SPI_ILDR_STATUS 0x10120 +#define TXGBE_SPI_ILDR_STATUS_PERST 0x00000001U /* PCIE_PERST is done */ +#define TXGBE_SPI_ILDR_STATUS_PWRRST 0x00000002U /* Power on reset is done */ +#define TXGBE_SPI_ILDR_STATUS_SW_RESET 0x00000080U /* software reset is done */ +#define TXGBE_SPI_ILDR_STATUS_LAN0_SW_RST 0x00000200U /* lan0 soft reset done */ +#define TXGBE_SPI_ILDR_STATUS_LAN1_SW_RST 0x00000400U /* lan1 soft reset done */ + +#define TXGBE_MAX_FLASH_LOAD_POLL_TIME 10 + +#define TXGBE_SPI_CMD 0x10104 +#define TXGBE_SPI_CMD_CMD(_v) (((_v) & 0x7) << 28) +#define TXGBE_SPI_CMD_CLK(_v) (((_v) & 0x7) << 25) +#define TXGBE_SPI_CMD_ADDR(_v) (((_v) & 0xFFFFFF)) +#define TXGBE_SPI_DATA 0x10108 +#define TXGBE_SPI_DATA_BYPASS ((0x1) << 31) +#define TXGBE_SPI_DATA_STATUS(_v) (((_v) & 0xFF) << 16) +#define TXGBE_SPI_DATA_OP_DONE ((0x1)) + +#define TXGBE_SPI_STATUS 0x1010C +#define TXGBE_SPI_STATUS_OPDONE ((0x1)) +#define TXGBE_SPI_STATUS_FLASH_BYPASS ((0x1) << 31) + +#define TXGBE_SPI_USR_CMD 0x10110 +#define TXGBE_SPI_CMDCFG0 0x10114 +#define TXGBE_SPI_CMDCFG1 0x10118 +#define TXGBE_SPI_ECC_CTL 0x10130 +#define TXGBE_SPI_ECC_INJ 0x10134 +#define TXGBE_SPI_ECC_ST 0x10138 +#define TXGBE_SPI_ILDR_SWPTR 0x10124 + +/************************* Port Registers ************************************/ +/* I2C registers */ +#define TXGBE_I2C_CON 0x14900 /* I2C Control */ +#define TXGBE_I2C_CON_SLAVE_DISABLE ((1 << 6)) +#define TXGBE_I2C_CON_RESTART_EN ((1 << 5)) +#define TXGBE_I2C_CON_10BITADDR_MASTER ((1 << 4)) +#define TXGBE_I2C_CON_10BITADDR_SLAVE ((1 << 3)) +#define TXGBE_I2C_CON_SPEED(_v) (((_v) & 0x3) << 1) +#define TXGBE_I2C_CON_MASTER_MODE ((1 << 0)) +#define TXGBE_I2C_TAR 0x14904 /* I2C Target Address */ +#define TXGBE_I2C_DATA_CMD 0x14910 /* I2C Rx/Tx Data Buf and Cmd */ +#define TXGBE_I2C_DATA_CMD_STOP ((1 << 9)) +#define TXGBE_I2C_DATA_CMD_READ ((1 << 8) | TXGBE_I2C_DATA_CMD_STOP) +#define TXGBE_I2C_DATA_CMD_WRITE ((0 << 8) | TXGBE_I2C_DATA_CMD_STOP) +#define TXGBE_I2C_SS_SCL_HCNT 0x14914 +#define TXGBE_I2C_SS_SCL_LCNT 0x14918 +#define TXGBE_I2C_FS_SCL_HCNT 0x1491C +#define TXGBE_I2C_FS_SCL_LCNT 0x14920 +#define TXGBE_I2C_HS_SCL_HCNT 0x14924 +#define TXGBE_I2C_HS_SCL_LCNT 0x14928 +#define TXGBE_I2C_INTR_STAT 0x1492C +#define TXGBE_I2C_RAW_INTR_STAT 0x14934 +#define TXGBE_I2C_INTR_STAT_RX_FULL ((0x1) << 2) +#define TXGBE_I2C_INTR_STAT_TX_EMPTY ((0x1) << 4) +#define TXGBE_I2C_INTR_MASK 0x14930 +#define TXGBE_I2C_RX_TL 0x14938 +#define TXGBE_I2C_TX_TL 0x1493C +#define TXGBE_I2C_CLR_INTR 0x14940 +#define TXGBE_I2C_CLR_RX_UNDER 0x14944 +#define TXGBE_I2C_CLR_RX_OVER 0x14948 +#define TXGBE_I2C_CLR_TX_OVER 0x1494C +#define TXGBE_I2C_CLR_RD_REQ 0x14950 +#define TXGBE_I2C_CLR_TX_ABRT 0x14954 +#define TXGBE_I2C_CLR_RX_DONE 0x14958 +#define TXGBE_I2C_CLR_ACTIVITY 0x1495C +#define TXGBE_I2C_CLR_STOP_DET 0x14960 +#define TXGBE_I2C_CLR_START_DET 0x14964 +#define TXGBE_I2C_CLR_GEN_CALL 0x14968 +#define TXGBE_I2C_ENABLE 0x1496C +#define TXGBE_I2C_STATUS 0x14970 +#define TXGBE_I2C_STATUS_MST_ACTIVITY ((1U << 5)) +#define TXGBE_I2C_TXFLR 0x14974 +#define TXGBE_I2C_RXFLR 0x14978 +#define TXGBE_I2C_SDA_HOLD 0x1497C +#define TXGBE_I2C_SDA_RX_HOLD 0xff0000 +#define TXGBE_I2C_SDA_TX_HOLD 0xffff + +#define TXGBE_I2C_TX_ABRT_SOURCE 0x14980 +#define TXGBE_I2C_SDA_SETUP 0x14994 +#define TXGBE_I2C_ENABLE_STATUS 0x1499C +#define TXGBE_I2C_FS_SPKLEN 0x149A0 +#define TXGBE_I2C_HS_SPKLEN 0x149A4 +#define TXGBE_I2C_SCL_STUCK_TIMEOUT 0x149AC +#define TXGBE_I2C_SDA_STUCK_TIMEOUT 0x149B0 +#define TXGBE_I2C_CLR_SCL_STUCK_DET 0x149B4 +#define TXGBE_I2C_DEVICE_ID 0x149b8 +#define TXGBE_I2C_COMP_PARAM_1 0x149f4 +#define TXGBE_I2C_COMP_VERSION 0x149f8 +#define TXGBE_I2C_COMP_TYPE 0x149fc + +#define TXGBE_I2C_SLAVE_ADDR (0xA0 >> 1) +#define TXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 + +/* port cfg Registers */ +#define TXGBE_CFG_PORT_CTL 0x14400 +#define TXGBE_CFG_PORT_ST 0x14404 +#define TXGBE_CFG_EX_VTYPE 0x14408 +#define TXGBE_CFG_LED_CTL 0x14424 +#define TXGBE_CFG_VXLAN 0x14410 +#define TXGBE_CFG_VXLAN_GPE 0x14414 +#define TXGBE_CFG_GENEVE 0x14418 +#define TXGBE_CFG_TEREDO 0x1441C +#define TXGBE_CFG_TCP_TIME 0x14420 +#define TXGBE_LINKUP_FILTER 0x14428 +#define TXGBE_LINKUP_FILTER_TIME 30 +#define TXGBE_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4)) + +/*AML LINK STATUS OVERWRITE*/ +#define TXGBE_AML_EPCS_MISC_CTL 0x13240 +#define TXGBE_AML_LINK_STATUS_OVRD_EN 0x00000020 +#define TXGBE_AML_LINK_STATUS_OVRD_VAL 0x00000010 + +/* port cfg bit */ +#define TXGBE_CFG_PORT_CTL_PFRSTD 0x00004000U /* Phy Function Reset Done */ +#define TXGBE_CFG_PORT_CTL_D_VLAN 0x00000001U /* double vlan*/ +#define TXGBE_CFG_PORT_CTL_ETAG_ETYPE_VLD 0x00000002U +#define TXGBE_CFG_PORT_CTL_QINQ 0x00000004U +#define TXGBE_CFG_PORT_CTL_DRV_LOAD 0x00000008U +#define TXGBE_CFG_PORT_CTL_FORCE_LKUP 0x00000010U /* force link up */ +#define TXGBE_CFG_PORT_CTL_DCB_EN 0x00000400U /* dcb enabled */ +#define TXGBE_CFG_PORT_CTL_NUM_TC_MASK 0x00000800U /* number of TCs */ +#define TXGBE_CFG_PORT_CTL_NUM_TC_4 0x00000000U +#define TXGBE_CFG_PORT_CTL_NUM_TC_8 0x00000800U +#define TXGBE_CFG_PORT_CTL_NUM_VT_MASK 0x00003000U /* number of TVs */ +#define TXGBE_CFG_PORT_CTL_NUM_VT_NONE 0x00000000U +#define TXGBE_CFG_PORT_CTL_NUM_VT_16 0x00001000U +#define TXGBE_CFG_PORT_CTL_NUM_VT_32 0x00002000U +#define TXGBE_CFG_PORT_CTL_NUM_VT_64 0x00003000U +/* Status Bit */ +#define TXGBE_CFG_PORT_ST_LINK_UP 0x00000001U +#define TXGBE_CFG_PORT_ST_LINK_10G 0x00000002U +#define TXGBE_CFG_PORT_ST_LINK_1G 0x00000004U +#define TXGBE_CFG_PORT_ST_LINK_100M 0x00000008U +#define TXGBE_CFG_PORT_ST_LAN_ID(_r) ((0x00000100U & (_r)) >> 8) +#define TXGBE_LINK_UP_TIME 90 + +/* amlite: diff from sapphire */ +#define TXGBE_CFG_PORT_ST_AML_LINK_10G 0x00000010U +#define TXGBE_CFG_PORT_ST_AML_LINK_25G 0x00000008U +#define TXGBE_CFG_PORT_ST_AML_LINK_40G 0x00000004U +#define TXGBE_CFG_PORT_ST_AML_LINK_50G 0x00000002U + +/* LED CTL Bit */ +#define TXGBE_CFG_LED_CTL_LINK_BSY_SEL 0x00000010U +#define TXGBE_CFG_LED_CTL_LINK_100M_SEL 0x00000008U +#define TXGBE_CFG_LED_CTL_LINK_1G_SEL 0x00000004U +#define TXGBE_CFG_LED_CTL_LINK_10G_SEL 0x00000002U +#define TXGBE_CFG_LED_CTL_LINK_UP_SEL 0x00000001U +#define TXGBE_CFG_LED_CTL_LINK_OD_SHIFT 16 + +#define TXGBE_AMLITE_CFG_LED_CTL_LINK_BSY_SEL 0x00000020U +#define TXGBE_AMLITE_CFG_LED_CTL_LINK_10G_SEL 0x00000010U +#define TXGBE_AMLITE_CFG_LED_CTL_LINK_25G_SEL 0x00000008U +#define TXGBE_AMLITE_CFG_LED_CTL_LINK_40G_SEL 0x00000004U +#define TXGBE_AMLITE_CFG_LED_CTL_LINK_50G_SEL 0x00000002U + +/* LED modes */ +#define TXGBE_LED_LINK_UP TXGBE_CFG_LED_CTL_LINK_UP_SEL +#define TXGBE_LED_LINK_10G TXGBE_CFG_LED_CTL_LINK_10G_SEL +#define TXGBE_LED_LINK_ACTIVE TXGBE_CFG_LED_CTL_LINK_BSY_SEL +#define TXGBE_LED_LINK_1G TXGBE_CFG_LED_CTL_LINK_1G_SEL +#define TXGBE_LED_LINK_100M TXGBE_CFG_LED_CTL_LINK_100M_SEL + +#define TXGBE_AMLITE_LED_LINK_ACTIVE TXGBE_AMLITE_CFG_LED_CTL_LINK_BSY_SEL +#define TXGBE_AMLITE_LED_LINK_10G TXGBE_AMLITE_CFG_LED_CTL_LINK_10G_SEL +#define TXGBE_AMLITE_LED_LINK_25G TXGBE_AMLITE_CFG_LED_CTL_LINK_25G_SEL +#define TXGBE_AMLITE_LED_LINK_40G TXGBE_AMLITE_CFG_LED_CTL_LINK_40G_SEL +#define TXGBE_AMLITE_LED_LINK_50G TXGBE_AMLITE_CFG_LED_CTL_LINK_50G_SEL + +/* GPIO Registers */ +#define TXGBE_GPIO_DR 0x14800 +#define TXGBE_GPIO_DDR 0x14804 +#define TXGBE_GPIO_CTL 0x14808 +#define TXGBE_GPIO_INTEN 0x14830 +#define TXGBE_GPIO_INTMASK 0x14834 +#define TXGBE_GPIO_INTTYPE_LEVEL 0x14838 +#define TXGBE_GPIO_INT_POLARITY 0x1483C +#define TXGBE_GPIO_INTSTATUS 0x14844 +#define TXGBE_GPIO_DEBOUNCE 0x14848 +#define TXGBE_GPIO_EOI 0x1484C +#define TXGBE_GPIO_EXT 0x14850 + +/*GPIO bit */ +#define TXGBE_GPIO_DR_0 0x00000001U /* SDP0 Data Value */ +#define TXGBE_GPIO_DR_1 0x00000002U /* SDP1 Data Value */ +#define TXGBE_GPIO_DR_2 0x00000004U /* SDP2 Data Value */ +#define TXGBE_GPIO_DR_3 0x00000008U /* SDP3 Data Value */ +#define TXGBE_GPIO_DR_4 0x00000010U /* SDP4 Data Value */ +#define TXGBE_GPIO_DR_5 0x00000020U /* SDP5 Data Value */ +#define TXGBE_GPIO_DR_6 0x00000040U /* SDP6 Data Value */ +#define TXGBE_GPIO_DR_7 0x00000080U /* SDP7 Data Value */ +#define TXGBE_GPIO_DDR_0 0x00000001U /* SDP0 IO direction */ +#define TXGBE_GPIO_DDR_1 0x00000002U /* SDP1 IO direction */ +#define TXGBE_GPIO_DDR_2 0x00000004U /* SDP1 IO direction */ +#define TXGBE_GPIO_DDR_3 0x00000008U /* SDP3 IO direction */ +#define TXGBE_GPIO_DDR_4 0x00000010U /* SDP4 IO direction */ +#define TXGBE_GPIO_DDR_5 0x00000020U /* SDP5 IO direction */ +#define TXGBE_GPIO_DDR_6 0x00000040U /* SDP6 IO direction */ +#define TXGBE_GPIO_DDR_7 0x00000080U /* SDP7 IO direction */ +#define TXGBE_GPIO_CTL_SW_MODE 0x00000000U /* SDP software mode */ +#define TXGBE_GPIO_INTEN_1 0x00000002U /* SDP1 interrupt enable */ +#define TXGBE_GPIO_INTEN_2 0x00000004U /* SDP2 interrupt enable */ +#define TXGBE_GPIO_INTEN_3 0x00000008U /* SDP3 interrupt enable */ +#define TXGBE_GPIO_INTEN_4 0x00000010U /* SDP4 interrupt enable */ +#define TXGBE_GPIO_INTEN_5 0x00000020U /* SDP5 interrupt enable */ +#define TXGBE_GPIO_INTEN_6 0x00000040U /* SDP6 interrupt enable */ +#define TXGBE_GPIO_INTTYPE_LEVEL_2 0x00000004U /* SDP2 interrupt type level */ +#define TXGBE_GPIO_INTTYPE_LEVEL_3 0x00000008U /* SDP3 interrupt type level */ +#define TXGBE_GPIO_INTTYPE_LEVEL_4 0x00000010U /* SDP3 interrupt type level */ +#define TXGBE_GPIO_INTTYPE_LEVEL_5 0x00000020U /* SDP5 interrupt type level */ +#define TXGBE_GPIO_INTTYPE_LEVEL_6 0x00000040U /* SDP6 interrupt type level */ +#define TXGBE_GPIO_INT_POLARITY_3 0x00000008U +#define TXGBE_GPIO_INT_POLARITY_4 0x00000010U +#define TXGBE_GPIO_INT_DEBOUNCE_2 0x00000004U +#define TXGBE_GPIO_INT_DEBOUNCE_3 0x00000008U +#define TXGBE_GPIO_INTSTATUS_1 0x00000002U /* SDP1 interrupt status */ +#define TXGBE_GPIO_INTSTATUS_2 0x00000004U /* SDP2 interrupt status */ +#define TXGBE_GPIO_INTSTATUS_3 0x00000008U /* SDP3 interrupt status */ +#define TXGBE_GPIO_INTSTATUS_4 0x00000010U /* SDP4 interrupt status */ +#define TXGBE_GPIO_INTSTATUS_5 0x00000020U /* SDP5 interrupt status */ +#define TXGBE_GPIO_INTSTATUS_6 0x00000040U /* SDP6 interrupt status */ +#define TXGBE_GPIO_EOI_2 0x00000004U /* SDP2 interrupt clear */ +#define TXGBE_GPIO_EOI_3 0x00000008U /* SDP3 interrupt clear */ +#define TXGBE_GPIO_EOI_4 0x00000010U /* SDP3 interrupt clear */ +#define TXGBE_GPIO_EOI_5 0x00000020U /* SDP5 interrupt clear */ +#define TXGBE_GPIO_EOI_6 0x00000040U /* SDP6 interrupt clear */ +#define TXGBE_SFP1_MOD_ABS_LS 0x00000004U /* GPIO_EXT SFP ABSENT*/ +#define TXGBE_SFP1_RX_LOS_LS 0x00000008U /* GPIO_EXT RX LOSS */ + +#define TXGBE_SFP1_MOD_PRST_LS 0x00000010U /* GPIO_EXT SFP ABSENT*/ + +/* TPH registers */ +#define TXGBE_CFG_TPH_TDESC 0x14F00 /* TPH conf for Tx desc write back */ +#define TXGBE_CFG_TPH_RDESC 0x14F04 /* TPH conf for Rx desc write back */ +#define TXGBE_CFG_TPH_RHDR 0x14F08 /* TPH conf for writing Rx pkt header */ +#define TXGBE_CFG_TPH_RPL 0x14F0C /* TPH conf for payload write access */ +/* TPH bit */ +#define TXGBE_CFG_TPH_TDESC_EN 0x80000000U +#define TXGBE_CFG_TPH_TDESC_PH_SHIFT 29 +#define TXGBE_CFG_TPH_TDESC_ST_SHIFT 16 +#define TXGBE_CFG_TPH_RDESC_EN 0x80000000U +#define TXGBE_CFG_TPH_RDESC_PH_SHIFT 29 +#define TXGBE_CFG_TPH_RDESC_ST_SHIFT 16 +#define TXGBE_CFG_TPH_RHDR_EN 0x00008000U +#define TXGBE_CFG_TPH_RHDR_PH_SHIFT 13 +#define TXGBE_CFG_TPH_RHDR_ST_SHIFT 0 +#define TXGBE_CFG_TPH_RPL_EN 0x80000000U +#define TXGBE_CFG_TPH_RPL_PH_SHIFT 29 +#define TXGBE_CFG_TPH_RPL_ST_SHIFT 16 + +/*********************** Transmit DMA registers **************************/ +/* transmit global control */ +#define TXGBE_TDM_CTL 0x18000 +#define TXGBE_TDM_VF_TE(_i) (0x18004 + ((_i) * 4)) +#define TXGBE_TDM_VFTE_CLR(_i) (0x180A0 + ((_i) * 4)) +#define TXGBE_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4)) /* 8 of these 0 - 7 */ +#define TXGBE_TDM_LLQ(_i) (0x18040 + ((_i) * 4)) /* 4 of these (0-3) */ +#define TXGBE_TDM_ETYPE_LB_L 0x18050 +#define TXGBE_TDM_ETYPE_LB_H 0x18054 +#define TXGBE_TDM_ETYPE_AS_L 0x18058 +#define TXGBE_TDM_ETYPE_AS_H 0x1805C +#define TXGBE_TDM_MAC_AS_L 0x18060 +#define TXGBE_TDM_MAC_AS_H 0x18064 +#define TXGBE_TDM_VLAN_AS_L 0x18070 +#define TXGBE_TDM_VLAN_AS_H 0x18074 +#define TXGBE_TDM_TCP_FLG_L 0x18078 +#define TXGBE_TDM_TCP_FLG_H 0x1807C +#define TXGBE_TDM_VLAN_INS(_i) (0x18100 + ((_i) * 4)) /* 64 of these 0 - 63 */ +#define TXGBE_TDM_DESC_FATAL(i) (0x0180D0 + (i) * 4) /*0-3*/ + +/* TDM CTL BIT */ +#define TXGBE_TDM_CTL_TE 0x1 /* Transmit Enable */ +#define TXGBE_TDM_CTL_PADDING 0x2 /* Padding byte number for ipsec ESP */ +#define TXGBE_TDM_CTL_VT_SHIFT 16 /* VLAN EtherType */ +/* Per VF Port VLAN insertion rules */ +#define TXGBE_TDM_VLAN_INS_VLANA_DEFAULT 0x40000000U /*Always use default VLAN*/ +#define TXGBE_TDM_VLAN_INS_VLANA_NEVER 0x80000000U /* Never insert VLAN tag */ +#define TXGBE_TDM_VLAN_INS_TPID_SEL_SHIFT 24 /*Tag tpid sel*/ + +#define TXGBE_TDM_RP_CTL 0x18400 +#define TXGBE_TDM_RP_CTL_RST ((0x1) << 0) +#define TXGBE_TDM_RP_CTL_RPEN ((0x1) << 2) +#define TXGBE_TDM_RP_CTL_RLEN ((0x1) << 3) +#define TXGBE_TDM_RP_IDX 0x1820C +#define TXGBE_TDM_RP_RATE 0x18404 +#define TXGBE_TDM_RP_RATE_MIN(v) ((0x3FFF & (v))) +#define TXGBE_TDM_RP_RATE_MAX(v) ((0x3FFF & (v)) << 16) + +#define TXGBE_TDM_RL_QUEUE_IDX 0x18210 +#define TXGBE_TDM_RL_QUEUE_CFG 0x18214 + +#define TXGBE_TDM_RL_VM_IDX 0x18218 +#define TXGBE_TDM_RL_VM_CFG 0x1821C +#define TXGBE_TDM_RL_CFG 0x18400 +#define TXGBE_TDM_RL_EN 0x00000001U +#define TXGBE_TDM_FACTOR_INT 0x00000001U +#define TXGBE_TDM_FACTOR_FRA 0x00000001U +#define TXGBE_TDM_FACTOR_INT_SHIFT 16 +#define TXGBE_TDM_FACTOR_FRA_SHIFT 2 +#define TXGBE_TDM_FACTOR_INT_MASK 0xffff0000 +#define TXGBE_TDM_FACTOR_FRA_MASK 0xfffc + +#define TXGBE_TDM_RL_EN 0x00000001U + +/* qos */ +#define TXGBE_TDM_PBWARB_CTL 0x18200 +#define TXGBE_TDM_PBWARB_CFG(_i) (0x18220 + ((_i) * 4)) /* 8 of these (0-7) */ +#define TXGBE_TDM_MMW 0x18208 +#define TXGBE_TDM_VM_CREDIT(_i) (0x18500 + ((_i) * 4)) +#define TXGBE_TDM_VM_CREDIT_VAL(v) (0x3FF & (v)) +/* fcoe */ +#define TXGBE_TDM_FC_EOF 0x18384 +#define TXGBE_TDM_FC_SOF 0x18380 +/* etag */ +#define TXGBE_TDM_ETAG_INS(_i) (0x18700 + ((_i) * 4)) /* 64 of these 0 - 63 */ +/* statistic */ +#define TXGBE_TDM_DRP_CNT 0x18300 +#define TXGBE_TDM_SEC_DRP 0x18304 +#define TXGBE_TDM_PKT_CNT 0x18308 +#define TXGBE_TDM_OS2BMC_CNT 0x18314 + +/**************************** Receive DMA registers **************************/ +/* receive control */ +#define TXGBE_RDM_ARB_CTL 0x12000 +#define TXGBE_RDM_VF_RE(_i) (0x12004 + ((_i) * 4)) +#define TXGBE_RDM_VFRE_CLR(_i) (0x120A0 + ((_i) * 4)) +#define TXGBE_RDM_RSC_CTL 0x1200C +#define TXGBE_RDM_ARB_CFG(_i) (0x12040 + ((_i) * 4)) /* 8 of these (0-7) */ +#define TXGBE_RDM_PF_QDE(_i) (0x12080 + ((_i) * 4)) +#define TXGBE_RDM_PF_HIDE(_i) (0x12090 + ((_i) * 4)) +/* VFRE bitmask */ +#define TXGBE_RDM_VF_RE_ENABLE_ALL 0xFFFFFFFFU + +#define TXGBE_RDM_DCACHE_CTL 0x120A8 +#define TXGBE_RDM_DCACHE_CTL_EN 0x1 +#define TXGBE_RDM_RSC_CTL_FREE_CNT_DIS 0x100 + +/* amlite: rdm_rsc_ctl_free_ctl */ +#define TXGBE_RDM_RSC_CTL_FREE_CTL 0x00000080U + +/* FCoE DMA Context Registers */ +#define TXGBE_RDM_FCPTRL 0x12410 +#define TXGBE_RDM_FCPTRH 0x12414 +#define TXGBE_RDM_FCBUF 0x12418 +#define TXGBE_RDM_FCBUF_VALID ((0x1)) /* DMA Context Valid */ +#define TXGBE_RDM_FCBUF_SIZE(_v) (((_v) & 0x3) << 3) /* User Buffer Size */ +#define TXGBE_RDM_FCBUF_COUNT(_v) (((_v) & 0xFF) << 8) /* Num of User Buf */ +#define TXGBE_RDM_FCBUF_OFFSET(_v) (((_v) & 0xFFFF) << 16) /* User Buf Offset*/ +#define TXGBE_RDM_FCRW 0x12420 +#define TXGBE_RDM_FCRW_FCSEL(_v) (((_v) & 0x1FF)) /* FC X_ID: 11 bits */ +#define TXGBE_RDM_FCRW_WE ((0x1) << 14) /* Write enable */ +#define TXGBE_RDM_FCRW_RE ((0x1) << 15) /* Read enable */ +#define TXGBE_RDM_FCRW_LASTSIZE(_v) (((_v) & 0xFFFF) << 16) + +/* statistic */ +#define TXGBE_RDM_DRP_PKT 0x12500 +#define TXGBE_RDM_BMC2OS_CNT 0x12510 + +/***************************** RDB registers *********************************/ +/* Flow Control Registers */ +#define TXGBE_RDB_RFCV(_i) (0x19200 + ((_i) * 4)) /* 4 of these (0-3)*/ +#define TXGBE_RDB_RFCL(_i) (0x19220 + ((_i) * 4)) /* 8 of these (0-7)*/ +#define TXGBE_RDB_RFCH(_i) (0x19260 + ((_i) * 4)) /* 8 of these (0-7)*/ +#define TXGBE_RDB_RFCRT 0x192A0 +#define TXGBE_RDB_RFCC 0x192A4 +/* receive packet buffer */ +#define TXGBE_RDB_PB_WRAP 0x19004 +#define TXGBE_RDB_PB_SZ(_i) (0x19020 + ((_i) * 4)) +#define TXGBE_RDB_PB_CTL 0x19000 +#define TXGBE_RDB_UP2TC 0x19008 +#define TXGBE_RDB_PB_SZ_SHIFT 10 +#define TXGBE_RDB_PB_SZ_MASK 0x000FFC00U +/* lli interrupt */ +#define TXGBE_RDB_LLI_THRE 0x19080 +#define TXGBE_RDB_LLI_THRE_SZ(_v) ((0xFFF & (_v))) +#define TXGBE_RDB_LLI_THRE_UP(_v) ((0x7 & (_v)) << 16) +#define TXGBE_RDB_LLI_THRE_UP_SHIFT 16 + +/* ring assignment */ +#define TXGBE_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4)) +#define TXGBE_RDB_RSSTBL(_i) (0x19400 + ((_i) * 4)) +#define TXGBE_RDB_RSSRK(_i) (0x19480 + ((_i) * 4)) +#define TXGBE_RDB_RSS_TC 0x194F0 +#define TXGBE_RDB_RA_CTL 0x194F4 +#define TXGBE_RDB_5T_SA(_i) (0x19600 + ((_i) * 4)) /* Src Addr Q Filter */ +#define TXGBE_RDB_5T_DA(_i) (0x19800 + ((_i) * 4)) /* Dst Addr Q Filter */ +#define TXGBE_RDB_5T_SDP(_i) (0x19A00 + ((_i) * 4)) /*Src Dst Addr Q Filter*/ +#define TXGBE_RDB_5T_CTL0(_i) (0x19C00 + ((_i) * 4)) /* Five Tuple Q Filter */ +#define TXGBE_RDB_ETYPE_CLS(_i) (0x19100 + ((_i) * 4)) /* EType Q Select */ +#define TXGBE_RDB_SYN_CLS 0x19130 +#define TXGBE_RDB_5T_CTL1(_i) (0x19E00 + ((_i) * 4)) /*128 of these (0-127)*/ +/* Flow Director registers */ +#define TXGBE_RDB_FDIR_CTL 0x19500 +#define TXGBE_RDB_FDIR_HKEY 0x19568 +#define TXGBE_RDB_FDIR_SKEY 0x1956C +#define TXGBE_RDB_FDIR_DA4_MSK 0x1953C +#define TXGBE_RDB_FDIR_SA4_MSK 0x19540 +#define TXGBE_RDB_FDIR_TCP_MSK 0x19544 +#define TXGBE_RDB_FDIR_UDP_MSK 0x19548 +#define TXGBE_RDB_FDIR_SCTP_MSK 0x19560 +#define TXGBE_RDB_FDIR_IP6_MSK 0x19574 +#define TXGBE_RDB_FDIR_OTHER_MSK 0x19570 +#define TXGBE_RDB_FDIR_FLEX_CFG(_i) (0x19580 + ((_i) * 4)) +/* Flow Director Stats registers */ +#define TXGBE_RDB_FDIR_FREE 0x19538 +#define TXGBE_RDB_FDIR_LEN 0x1954C +#define TXGBE_RDB_FDIR_USE_ST 0x19550 +#define TXGBE_RDB_FDIR_FAIL_ST 0x19554 +#define TXGBE_RDB_FDIR_MATCH 0x19558 +#define TXGBE_RDB_FDIR_MISS 0x1955C +/* Flow Director Programming registers */ +#define TXGBE_RDB_FDIR_IP6(_i) (0x1950C + ((_i) * 4)) /* 3 of these (0-2)*/ +#define TXGBE_RDB_FDIR_SA 0x19518 +#define TXGBE_RDB_FDIR_DA 0x1951C +#define TXGBE_RDB_FDIR_PORT 0x19520 +#define TXGBE_RDB_FDIR_FLEX 0x19524 +#define TXGBE_RDB_FDIR_HASH 0x19528 +#define TXGBE_RDB_FDIR_CMD 0x1952C +/* VM RSS */ +#define TXGBE_RDB_VMRSSRK(_i, _p) (0x1A000 + ((_i) * 4) + ((_p) * 0x40)) +#define TXGBE_RDB_VMRSSTBL(_i, _p) (0x1B000 + ((_i) * 4) + ((_p) * 0x40)) +/* FCoE Redirection */ +#define TXGBE_RDB_FCRE_TBL_SIZE (8) /* Max entries in FCRETA */ +#define TXGBE_RDB_FCRE_CTL 0x19140 +#define TXGBE_RDB_FCRE_CTL_ENA ((0x1)) /* FCoE Redir Table Enable */ +#define TXGBE_RDB_FCRE_TBL(_i) (0x19160 + ((_i) * 4)) +#define TXGBE_RDB_FCRE_TBL_RING(_v) (((_v) & 0x7F)) /* output queue number */ +/* statistic */ +#define TXGBE_RDB_MPCNT(_i) (0x19040 + ((_i) * 4)) /* 8 of 3FA0-3FBC*/ +#define TXGBE_RDB_PKT_CNT 0x19060 +#define TXGBE_RDB_DRP_CNT 0x19068 +#define TXGBE_RDB_LXONTXC 0x1921C +#define TXGBE_RDB_LXOFFTXC 0x19218 +#define TXGBE_RDB_PXON2OFFCNT(_i) (0x19280 + ((_i) * 4)) /* 8 of these */ +#define TXGBE_RDB_PXONTXC(_i) (0x192E0 + ((_i) * 4)) /* 8 of 3F00-3F1C*/ +#define TXGBE_RDB_PXOFFTXC(_i) (0x192C0 + ((_i) * 4)) /* 8 of 3F20-3F3C*/ +#define TXGBE_RDB_PFCMACDAL 0x19210 +#define TXGBE_RDB_PFCMACDAH 0x19214 +#define TXGBE_RDB_TXSWERR 0x1906C +#define TXGBE_RDB_TXSWERR_TB_FREE 0x3FF +/* rdb_pl_cfg reg mask */ +#define TXGBE_RDB_PL_CFG_L4HDR 0x2 +#define TXGBE_RDB_PL_CFG_L3HDR 0x4 +#define TXGBE_RDB_PL_CFG_L2HDR 0x8 +#define TXGBE_RDB_PL_CFG_TUN_OUTER_L2HDR 0x20 +#define TXGBE_RDB_PL_CFG_TUN_TUNHDR 0x10 +#define TXGBE_RDB_PL_CFG_RSS_PL_MASK 0x7 +#define TXGBE_RDB_PL_CFG_RSS_PL_SHIFT 29 +#define TXGBE_RDB_PL_CFG_RSS_EN 0x1000000 +#define TXGBE_RDB_PL_CFG_RSS_MASK 0xFF0000 +/* RQTC Bit Masks and Shifts */ +#define TXGBE_RDB_RSS_TC_SHIFT_TC(_i) ((_i) * 4) +#define TXGBE_RDB_RSS_TC_TC0_MASK (0x7 << 0) +#define TXGBE_RDB_RSS_TC_TC1_MASK (0x7 << 4) +#define TXGBE_RDB_RSS_TC_TC2_MASK (0x7 << 8) +#define TXGBE_RDB_RSS_TC_TC3_MASK (0x7 << 12) +#define TXGBE_RDB_RSS_TC_TC4_MASK (0x7 << 16) +#define TXGBE_RDB_RSS_TC_TC5_MASK (0x7 << 20) +#define TXGBE_RDB_RSS_TC_TC6_MASK (0x7 << 24) +#define TXGBE_RDB_RSS_TC_TC7_MASK (0x7 << 28) +/* Packet Buffer Initialization */ +#define TXGBE_MAX_PACKET_BUFFERS 8 +#define TXGBE_RDB_PB_SZ_48KB 0x00000030U /* 48KB Packet Buffer */ +#define TXGBE_RDB_PB_SZ_64KB 0x00000040U /* 64KB Packet Buffer */ +#define TXGBE_RDB_PB_SZ_80KB 0x00000050U /* 80KB Packet Buffer */ +#define TXGBE_RDB_PB_SZ_128KB 0x00000080U /* 128KB Packet Buffer */ +#define TXGBE_RDB_PB_SZ_MAX 0x00000200U /* 512KB Packet Buffer */ + +/* Packet buffer allocation strategies */ +enum { + PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ +#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL + PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ +#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED +}; + +/* FCRTL Bit Masks */ +#define TXGBE_RDB_RFCL_XONE 0x80000000U /* XON enable */ +#define TXGBE_RDB_RFCH_XOFFE 0x80000000U /* Packet buffer fc enable */ +/* FCCFG Bit Masks */ +#define TXGBE_RDB_RFCC_RFCE_802_3X 0x00000008U /* Tx link FC enable */ +#define TXGBE_RDB_RFCC_RFCE_PRIORITY 0x00000010U /* Tx priority FC enable */ + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define TXGBE_RDB_5T_CTL1_SIZE_BP 0x00001000U /* Packet size bypass */ +#define TXGBE_RDB_5T_CTL1_LLI 0x00100000U /* Enables low latency Int */ +#define TXGBE_RDB_5T_CTL1_RING_MASK 0x0FE00000U /* Rx queue index mask */ +#define TXGBE_RDB_5T_CTL1_RING_SHIFT 21 +#define TXGBE_RDB_LLI_THRE_PRIORITY_MASK 0x00070000U /* VLAN priority mask */ +#define TXGBE_RDB_LLI_THRE_PRIORITY_EN 0x00080000U /* VLAN priority enable */ +#define TXGBE_RDB_LLI_THRE_CMN_EN 0x00100000U /* cmn packet receiveed */ + +#define TXGBE_MAX_RDB_5T_CTL0_FILTERS 128 +#define TXGBE_RDB_5T_CTL0_PROTOCOL_MASK 0x00000003U +#define TXGBE_RDB_5T_CTL0_PROTOCOL_TCP 0x00000000U +#define TXGBE_RDB_5T_CTL0_PROTOCOL_UDP 0x00000001U +#define TXGBE_RDB_5T_CTL0_PROTOCOL_SCTP 2 +#define TXGBE_RDB_5T_CTL0_PRIORITY_MASK 0x00000007U +#define TXGBE_RDB_5T_CTL0_PRIORITY_SHIFT 2 +#define TXGBE_RDB_5T_CTL0_POOL_MASK 0x0000003FU +#define TXGBE_RDB_5T_CTL0_POOL_SHIFT 8 +#define TXGBE_RDB_5T_CTL0_5TUPLE_MASK_MASK 0x0000001FU +#define TXGBE_RDB_5T_CTL0_5TUPLE_MASK_SHIFT 25 +#define TXGBE_RDB_5T_CTL0_SOURCE_ADDR_MASK 0x1E +#define TXGBE_RDB_5T_CTL0_DEST_ADDR_MASK 0x1D +#define TXGBE_RDB_5T_CTL0_SOURCE_PORT_MASK 0x1B +#define TXGBE_RDB_5T_CTL0_DEST_PORT_MASK 0x17 +#define TXGBE_RDB_5T_CTL0_PROTOCOL_COMP_MASK 0x0F +#define TXGBE_RDB_5T_CTL0_POOL_MASK_EN 0x40000000U +#define TXGBE_RDB_5T_CTL0_QUEUE_ENABLE 0x80000000U + +#define TXGBE_RDB_ETYPE_CLS_RX_QUEUE 0x007F0000U /* bits 22:16 */ +#define TXGBE_RDB_ETYPE_CLS_RX_QUEUE_SHIFT 16 +#define TXGBE_RDB_ETYPE_CLS_LLI 0x20000000U /* bit 29 */ +#define TXGBE_RDB_ETYPE_CLS_QUEUE_EN 0x80000000U /* bit 31 */ + +/* Receive Config masks */ +#define TXGBE_RDB_PB_CTL_RXEN (0x80000000) /* Enable Receiver */ +#define TXGBE_RDB_PB_CTL_DISABLED 0x1 + +#define TXGBE_RDB_RA_CTL_RSS_EN 0x00000004U /* RSS Enable */ +#define TXGBE_RDB_RA_CTL_MULTI_RSS 0x00000001U /* VF RSS Hash Rule Enable */ +#define TXGBE_RDB_RA_CTL_RSS_MASK 0xFFFF0000U +#define TXGBE_RDB_RA_CTL_RSS_IPV4_TCP 0x00010000U +#define TXGBE_RDB_RA_CTL_RSS_IPV4 0x00020000U +#define TXGBE_RDB_RA_CTL_RSS_IPV6 0x00100000U +#define TXGBE_RDB_RA_CTL_RSS_IPV6_TCP 0x00200000U +#define TXGBE_RDB_RA_CTL_RSS_IPV4_UDP 0x00400000U +#define TXGBE_RDB_RA_CTL_RSS_IPV6_UDP 0x00800000U + +enum txgbe_fdir_pballoc_type { + TXGBE_FDIR_PBALLOC_NONE = 0, + TXGBE_FDIR_PBALLOC_64K = 1, + TXGBE_FDIR_PBALLOC_128K = 2, + TXGBE_FDIR_PBALLOC_256K = 3, +}; + +/* Flow Director register values */ +#define TXGBE_RDB_FDIR_CTL_PBALLOC_64K 0x00000001U +#define TXGBE_RDB_FDIR_CTL_PBALLOC_128K 0x00000002U +#define TXGBE_RDB_FDIR_CTL_PBALLOC_256K 0x00000003U +#define TXGBE_RDB_FDIR_CTL_INIT_DONE 0x00000008U +#define TXGBE_RDB_FDIR_CTL_PERFECT_MATCH 0x00000010U +#define TXGBE_RDB_FDIR_CTL_REPORT_STATUS 0x00000020U +#define TXGBE_RDB_FDIR_CTL_REPORT_STATUS_ALWAYS 0x00000080U +#define TXGBE_RDB_FDIR_CTL_DROP_Q_SHIFT 8 +#define TXGBE_RDB_FDIR_CTL_FILTERMODE_SHIFT 21 +#define TXGBE_RDB_FDIR_CTL_MAX_LENGTH_SHIFT 24 +#define TXGBE_RDB_FDIR_CTL_HASH_BITS_SHIFT 20 +#define TXGBE_RDB_FDIR_CTL_FULL_THRESH_MASK 0xF0000000U +#define TXGBE_RDB_FDIR_CTL_FULL_THRESH_SHIFT 28 + +#define TXGBE_RDB_FDIR_TCP_MSK_DPORTM_SHIFT 16 +#define TXGBE_RDB_FDIR_UDP_MSK_DPORTM_SHIFT 16 +#define TXGBE_RDB_FDIR_IP6_MSK_DIPM_SHIFT 16 +#define TXGBE_RDB_FDIR_OTHER_MSK_POOL 0x00000004U +#define TXGBE_RDB_FDIR_OTHER_MSK_L4P 0x00000008U +#define TXGBE_RDB_FDIR_OTHER_MSK_L3P 0x00000010U +#define TXGBE_RDB_FDIR_OTHER_MSK_TUN_TYPE 0x00000020U +#define TXGBE_RDB_FDIR_OTHER_MSK_TUN_OUTIP 0x00000040U +#define TXGBE_RDB_FDIR_OTHER_MSK_TUN 0x00000080U + +#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC 0x00000000U +#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_IP 0x00000001U +#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_L4_HDR 0x00000002U +#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_L4_PAYLOAD 0x00000003U +#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_MSK 0x00000003U +#define TXGBE_RDB_FDIR_FLEX_CFG_MSK 0x00000004U +#define TXGBE_RDB_FDIR_FLEX_CFG_OFST 0x000000F8U +#define TXGBE_RDB_FDIR_FLEX_CFG_OFST_SHIFT 3 +#define TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT 8 + +#define TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT 16 +#define TXGBE_RDB_FDIR_FLEX_FLEX_SHIFT 16 +#define TXGBE_RDB_FDIR_HASH_BUCKET_VALID_SHIFT 15 +#define TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX_SHIFT 16 + +#define TXGBE_RDB_FDIR_CMD_CMD_MASK 0x00000003U +#define TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW 0x00000001U +#define TXGBE_RDB_FDIR_CMD_CMD_REMOVE_FLOW 0x00000002U +#define TXGBE_RDB_FDIR_CMD_CMD_QUERY_REM_FILT 0x00000003U +#define TXGBE_RDB_FDIR_CMD_FILTER_VALID 0x00000004U +#define TXGBE_RDB_FDIR_CMD_FILTER_UPDATE 0x00000008U +#define TXGBE_RDB_FDIR_CMD_IPv6DMATCH 0x00000010U +#define TXGBE_RDB_FDIR_CMD_L4TYPE_UDP 0x00000020U +#define TXGBE_RDB_FDIR_CMD_L4TYPE_TCP 0x00000040U +#define TXGBE_RDB_FDIR_CMD_L4TYPE_SCTP 0x00000060U +#define TXGBE_RDB_FDIR_CMD_IPV6 0x00000080U +#define TXGBE_RDB_FDIR_CMD_CLEARHT 0x00000100U +#define TXGBE_RDB_FDIR_CMD_DROP 0x00000200U +#define TXGBE_RDB_FDIR_CMD_INT 0x00000400U +#define TXGBE_RDB_FDIR_CMD_LAST 0x00000800U +#define TXGBE_RDB_FDIR_CMD_COLLISION 0x00001000U +#define TXGBE_RDB_FDIR_CMD_QUEUE_EN 0x00008000U +#define TXGBE_RDB_FDIR_CMD_FLOW_TYPE_SHIFT 5 +#define TXGBE_RDB_FDIR_CMD_RX_QUEUE_SHIFT 16 +#define TXGBE_RDB_FDIR_CMD_TUNNEL_FILTER_SHIFT 23 +#define TXGBE_RDB_FDIR_CMD_VT_POOL_SHIFT 24 +#define TXGBE_RDB_FDIR_INIT_DONE_POLL 10 +#define TXGBE_RDB_FDIR_CMD_CMD_POLL 10 +#define TXGBE_RDB_FDIR_CMD_TUNNEL_FILTER 0x00800000U +#define TXGBE_RDB_FDIR_DROP_QUEUE 127 +#define TXGBE_FDIR_INIT_DONE_POLL 10 + +/******************************* PSR Registers *******************************/ +/* psr control */ +#define TXGBE_PSR_CTL 0x15000 +#define TXGBE_PSR_VLAN_CTL 0x15088 +#define TXGBE_PSR_VM_CTL 0x151B0 +#define TXGBE_PSR_PKT_CNT 0x151B8 +#define TXGBE_PSR_DBG_DRP_CNT 0x151C0 +/* Header split receive */ +#define TXGBE_PSR_CTL_SW_EN 0x00040000U +#define TXGBE_PSR_CTL_RSC_DIS 0x00010000U +#define TXGBE_PSR_CTL_RSC_ACK 0x00020000U +#define TXGBE_PSR_CTL_PCSD 0x00002000U +#define TXGBE_PSR_CTL_IPPCSE 0x00001000U +#define TXGBE_PSR_CTL_BAM 0x00000400U +#define TXGBE_PSR_CTL_UPE 0x00000200U +#define TXGBE_PSR_CTL_MPE 0x00000100U +#define TXGBE_PSR_CTL_MFE 0x00000080U +#define TXGBE_PSR_CTL_MO 0x00000060U +#define TXGBE_PSR_CTL_TPE 0x00000010U +#define TXGBE_PSR_CTL_MO_SHIFT 5 +/* VT_CTL bitmasks */ +#define TXGBE_PSR_VM_CTL_DIS_DEFPL 0x20000000U /* disable default pool */ +#define TXGBE_PSR_VM_CTL_REPLEN 0x40000000U /* replication enabled */ +#define TXGBE_PSR_VM_CTL_POOL_SHIFT 7 +#define TXGBE_PSR_VM_CTL_POOL_MASK (0x3F << TXGBE_PSR_VM_CTL_POOL_SHIFT) +/* VLAN Control Bit Masks */ +#define TXGBE_PSR_VLAN_CTL_VET 0x0000FFFFU /* bits 0-15 */ +#define TXGBE_PSR_VLAN_CTL_CFI 0x10000000U /* bit 28 */ +#define TXGBE_PSR_VLAN_CTL_CFIEN 0x20000000U /* bit 29 */ +#define TXGBE_PSR_VLAN_CTL_VFE 0x40000000U /* bit 30 */ + +/* vm L2 contorl */ +#define TXGBE_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4)) +/* VMOLR bitmasks */ +#define TXGBE_PSR_VM_L2CTL_LBDIS 0x00000002U /* disable loopback */ +#define TXGBE_PSR_VM_L2CTL_LLB 0x00000004U /* local pool loopback */ +#define TXGBE_PSR_VM_L2CTL_UPE 0x00000010U /* unicast promiscuous */ +#define TXGBE_PSR_VM_L2CTL_TPE 0x00000020U /* ETAG promiscuous */ +#define TXGBE_PSR_VM_L2CTL_VACC 0x00000040U /* accept nomatched vlan */ +#define TXGBE_PSR_VM_L2CTL_VPE 0x00000080U /* vlan promiscuous mode */ +#define TXGBE_PSR_VM_L2CTL_AUPE 0x00000100U /* accept untagged packets */ +#define TXGBE_PSR_VM_L2CTL_ROMPE 0x00000200U /*accept packets in MTA tbl*/ +#define TXGBE_PSR_VM_L2CTL_ROPE 0x00000400U /* accept packets in UC tbl*/ +#define TXGBE_PSR_VM_L2CTL_BAM 0x00000800U /* accept broadcast packets*/ +#define TXGBE_PSR_VM_L2CTL_MPE 0x00001000U /* multicast promiscuous */ + +/* etype switcher 1st stage */ +#define TXGBE_PSR_ETYPE_SWC(_i) (0x15128 + ((_i) * 4)) /* EType Queue Filter */ +/* ETYPE Queue Filter/Select Bit Masks */ +#define TXGBE_MAX_PSR_ETYPE_SWC_FILTERS 2 /* now only support 2 custom filters */ +#define TXGBE_PSR_ETYPE_SWC_FCOE 0x08000000U /* bit 27 */ +#define TXGBE_PSR_ETYPE_SWC_TX_ANTISPOOF 0x20000000U /* bit 29 */ +#define TXGBE_PSR_ETYPE_SWC_1588 0x40000000U /* bit 30 */ +#define TXGBE_PSR_ETYPE_SWC_FILTER_EN 0x80000000U /* bit 31 */ +#define TXGBE_PSR_ETYPE_SWC_POOL_ENABLE BIT(26) /* bit 26 */ +#define TXGBE_PSR_ETYPE_SWC_POOL_SHIFT 20 + +#define TXGBE_PSR_ETYPE_SWC_FILTER_EAPOL 0 +#define TXGBE_PSR_ETYPE_SWC_FILTER_FCOE 2 +#define TXGBE_PSR_ETYPE_SWC_FILTER_1588 3 +#define TXGBE_PSR_ETYPE_SWC_FILTER_FIP 4 +#define TXGBE_PSR_ETYPE_SWC_FILTER_LLDP 5 +#define TXGBE_PSR_ETYPE_SWC_FILTER_LACP 6 +#define TXGBE_PSR_ETYPE_SWC_FILTER_FC 7 + +/* mcasst/ucast overflow tbl */ +#define TXGBE_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4)) +#define TXGBE_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4)) + +/* vlan tbl */ +#define TXGBE_PSR_VLAN_TBL(_i) (0x16000 + ((_i) * 4)) + +/* mac switcher */ +#define TXGBE_PSR_MAC_SWC_AD_L 0x16200 +#define TXGBE_PSR_MAC_SWC_AD_H 0x16204 +#define TXGBE_PSR_MAC_SWC_VM_L 0x16208 +#define TXGBE_PSR_MAC_SWC_VM_H 0x1620C +#define TXGBE_PSR_MAC_SWC_IDX 0x16210 +/* RAH */ +#define TXGBE_PSR_MAC_SWC_AD_H_AD(v) (((v) & 0xFFFF)) +#define TXGBE_PSR_MAC_SWC_AD_H_ADTYPE(v) (((v) & 0x1) << 30) +#define TXGBE_PSR_MAC_SWC_AD_H_AV 0x80000000U +#define TXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFFU + +/* vlan switch */ +#define TXGBE_PSR_VLAN_SWC 0x16220 +#define TXGBE_PSR_VLAN_SWC_VM_L 0x16224 +#define TXGBE_PSR_VLAN_SWC_VM_H 0x16228 +#define TXGBE_PSR_VLAN_SWC_IDX 0x16230 /* 64 vlan entries */ +/* VLAN pool filtering masks */ +#define TXGBE_PSR_VLAN_SWC_VIEN 0x80000000U /* filter is valid */ +#define TXGBE_PSR_VLAN_SWC_ENTRIES 64 +#define TXGBE_PSR_VLAN_SWC_VLANID_MASK 0x00000FFFU +#define TXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ + +/* cloud switch */ +#define TXGBE_PSR_CL_SWC_DST0 0x16240 +#define TXGBE_PSR_CL_SWC_DST1 0x16244 +#define TXGBE_PSR_CL_SWC_DST2 0x16248 +#define TXGBE_PSR_CL_SWC_DST3 0x1624c +#define TXGBE_PSR_CL_SWC_KEY 0x16250 +#define TXGBE_PSR_CL_SWC_CTL 0x16254 +#define TXGBE_PSR_CL_SWC_VM_L 0x16258 +#define TXGBE_PSR_CL_SWC_VM_H 0x1625c +#define TXGBE_PSR_CL_SWC_IDX 0x16260 + +#define TXGBE_PSR_CL_SWC_CTL_VLD 0x80000000U +#define TXGBE_PSR_CL_SWC_CTL_DST_MSK 0x00000002U +#define TXGBE_PSR_CL_SWC_CTL_KEY_MSK 0x00000001U + +/* FCoE SOF/EOF */ +#define TXGBE_PSR_FC_EOF 0x15158 +#define TXGBE_PSR_FC_SOF 0x151F8 +/* FCoE Filter Context Registers */ +#define TXGBE_PSR_FC_FLT_CTXT 0x15108 +#define TXGBE_PSR_FC_FLT_CTXT_VALID ((0x1)) /* Filter Context Valid */ +#define TXGBE_PSR_FC_FLT_CTXT_FIRST ((0x1) << 1) /* Filter First */ +#define TXGBE_PSR_FC_FLT_CTXT_WR ((0x1) << 2) /* Write/Read Context */ +#define TXGBE_PSR_FC_FLT_CTXT_SEQID(_v) (((_v) & 0xFF) << 8) /* Sequence ID */ +#define TXGBE_PSR_FC_FLT_CTXT_SEQCNT(_v) (((_v) & 0xFFFF) << 16) /* Seq Count */ + +#define TXGBE_PSR_FC_FLT_RW 0x15110 +#define TXGBE_PSR_FC_FLT_RW_FCSEL(_v) (((_v) & 0x1FF)) /* FC OX_ID: 11 bits */ +#define TXGBE_PSR_FC_FLT_RW_RVALDT ((0x1) << 13) /* Fast Re-Validation */ +#define TXGBE_PSR_FC_FLT_RW_WE ((0x1) << 14) /* Write Enable */ +#define TXGBE_PSR_FC_FLT_RW_RE ((0x1) << 15) /* Read Enable */ + +#define TXGBE_PSR_FC_PARAM 0x151D8 + +/* FCoE Receive Control */ +#define TXGBE_PSR_FC_CTL 0x15100 +#define TXGBE_PSR_FC_CTL_FCOELLI ((0x1)) /* Low latency interrupt */ +#define TXGBE_PSR_FC_CTL_SAVBAD ((0x1) << 1) /* Save Bad Frames */ +#define TXGBE_PSR_FC_CTL_FRSTRDH ((0x1) << 2) /* EN 1st Read Header */ +#define TXGBE_PSR_FC_CTL_LASTSEQH ((0x1) << 3) /* EN Last Header in Seq */ +#define TXGBE_PSR_FC_CTL_ALLH ((0x1) << 4) /* EN All Headers */ +#define TXGBE_PSR_FC_CTL_FRSTSEQH ((0x1) << 5) /* EN 1st Seq. Header */ +#define TXGBE_PSR_FC_CTL_ICRC ((0x1) << 6) /* Ignore Bad FC CRC */ +#define TXGBE_PSR_FC_CTL_FCCRCBO ((0x1) << 7) /* FC CRC Byte Ordering */ +#define TXGBE_PSR_FC_CTL_FCOEVER(_v) (((_v) & 0xF) << 8) /* FCoE Version */ + +/* Management */ +#define TXGBE_PSR_MNG_FIT_CTL 0x15820 +/* Management Bit Fields and Masks */ +#define TXGBE_PSR_MNG_FIT_CTL_MPROXYE 0x40000000U /* Management Proxy Enable*/ +#define TXGBE_PSR_MNG_FIT_CTL_RCV_TCO_EN 0x00020000U /* Rcv TCO packet enable */ +#define TXGBE_PSR_MNG_FIT_CTL_EN_BMC2OS 0x10000000U /* Ena BMC2OS and OS2BMC traffic */ +#define TXGBE_PSR_MNG_FIT_CTL_EN_BMC2OS_SHIFT 28 + +#define TXGBE_PSR_MNG_FLEX_SEL 0x1582C +#define TXGBE_PSR_MNG_FLEX_DW_L(_i) (0x15A00 + ((_i) * 16)) +#define TXGBE_PSR_MNG_FLEX_DW_H(_i) (0x15A04 + ((_i) * 16)) +#define TXGBE_PSR_MNG_FLEX_MSK(_i) (0x15A08 + ((_i) * 16)) + +/* mirror */ +#define TXGBE_PSR_MR_CTL(_i) (0x15B00 + ((_i) * 4)) +#define TXGBE_PSR_MR_VLAN_L(_i) (0x15B10 + ((_i) * 8)) +#define TXGBE_PSR_MR_VLAN_H(_i) (0x15B14 + ((_i) * 8)) +#define TXGBE_PSR_MR_VM_L(_i) (0x15B30 + ((_i) * 8)) +#define TXGBE_PSR_MR_VM_H(_i) (0x15B34 + ((_i) * 8)) + +/* 1588 */ +#define TXGBE_PSR_1588_CTL 0x15188 /* Rx Time Sync Control register - RW */ +#define TXGBE_PSR_1588_STMPL 0x151E8 /* Rx timestamp Low - RO */ +#define TXGBE_PSR_1588_STMPH 0x151A4 /* Rx timestamp High - RO */ +#define TXGBE_PSR_1588_ATTRL 0x151A0 /* Rx timestamp attribute low - RO */ +#define TXGBE_PSR_1588_ATTRH 0x151A8 /* Rx timestamp attribute high - RO */ +#define TXGBE_PSR_1588_MSGTYPE 0x15120 /* RX message type register low - RW */ +/* 1588 CTL Bit */ +#define TXGBE_PSR_1588_CTL_VALID 0x00000001U /* Rx timestamp valid */ +#define TXGBE_PSR_1588_CTL_TYPE_MASK 0x0000000EU /* Rx type mask */ +#define TXGBE_PSR_1588_CTL_TYPE_L2_V2 0x00 +#define TXGBE_PSR_1588_CTL_TYPE_L4_V1 0x02 +#define TXGBE_PSR_1588_CTL_TYPE_L2_L4_V2 0x04 +#define TXGBE_PSR_1588_CTL_TYPE_EVENT_V2 0x0A +#define TXGBE_PSR_1588_CTL_ENABLED 0x00000010U /* Rx Timestamp enabled*/ +/* 1588 msg type bit */ +#define TXGBE_PSR_1588_MSGTYPE_V1_CTRLT_MASK 0x000000FFU +#define TXGBE_PSR_1588_MSGTYPE_V1_SYNC_MSG 0x00 +#define TXGBE_PSR_1588_MSGTYPE_V1_DELAY_REQ_MSG 0x01 +#define TXGBE_PSR_1588_MSGTYPE_V1_FOLLOWUP_MSG 0x02 +#define TXGBE_PSR_1588_MSGTYPE_V1_DELAY_RESP_MSG 0x03 +#define TXGBE_PSR_1588_MSGTYPE_V1_MGMT_MSG 0x04 +#define TXGBE_PSR_1588_MSGTYPE_V2_MSGID_MASK 0x0000FF00U +#define TXGBE_PSR_1588_MSGTYPE_V2_SYNC_MSG 0x0000 +#define TXGBE_PSR_1588_MSGTYPE_V2_DELAY_REQ_MSG 0x0100 +#define TXGBE_PSR_1588_MSGTYPE_V2_PDELAY_REQ_MSG 0x0200 +#define TXGBE_PSR_1588_MSGTYPE_V2_PDELAY_RESP_MSG 0x0300 +#define TXGBE_PSR_1588_MSGTYPE_V2_FOLLOWUP_MSG 0x0800 +#define TXGBE_PSR_1588_MSGTYPE_V2_DELAY_RESP_MSG 0x0900 +#define TXGBE_PSR_1588_MSGTYPE_V2_PDELAY_FOLLOWUP_MSG 0x0A00 +#define TXGBE_PSR_1588_MSGTYPE_V2_ANNOUNCE_MSG 0x0B00 +#define TXGBE_PSR_1588_MSGTYPE_V2_SIGNALLING_MSG 0x0C00 +#define TXGBE_PSR_1588_MSGTYPE_V2_MGMT_MSG 0x0D00 + +/* Wake up registers */ +#define TXGBE_PSR_WKUP_CTL 0x15B80 +#define TXGBE_PSR_WKUP_IPV 0x15B84 +#define TXGBE_PSR_LAN_FLEX_SEL 0x15B8C +#define TXGBE_PSR_WKUP_IP4TBL(_i) (0x15BC0 + ((_i) * 4)) +#define TXGBE_PSR_WKUP_IP6TBL(_i) (0x15BE0 + ((_i) * 4)) +#define TXGBE_PSR_LAN_FLEX_DW_L(_i) (0x15C00 + ((_i) * 16)) +#define TXGBE_PSR_LAN_FLEX_DW_H(_i) (0x15C04 + ((_i) * 16)) +#define TXGBE_PSR_LAN_FLEX_MSK(_i) (0x15C08 + ((_i) * 16)) +#define TXGBE_PSR_LAN_FLEX_CTL 0x15CFC +/* Wake Up Filter Control Bit */ +#define TXGBE_PSR_WKUP_CTL_LNKC 0x00000001U /* Link Status Change Wakeup Enable*/ +#define TXGBE_PSR_WKUP_CTL_MAG 0x00000002U /* Magic Packet Wakeup Enable */ +#define TXGBE_PSR_WKUP_CTL_EX 0x00000004U /* Directed Exact Wakeup Enable */ +#define TXGBE_PSR_WKUP_CTL_MC 0x00000008U /* Directed Multicast Wakeup Enable*/ +#define TXGBE_PSR_WKUP_CTL_BC 0x00000010U /* Broadcast Wakeup Enable */ +#define TXGBE_PSR_WKUP_CTL_ARP 0x00000020U /* ARP Request Packet Wakeup Enable*/ +#define TXGBE_PSR_WKUP_CTL_IPV4 0x00000040U /* Directed IPv4 Pkt Wakeup Enable */ +#define TXGBE_PSR_WKUP_CTL_IPV6 0x00000080U /* Directed IPv6 Pkt Wakeup Enable */ +#define TXGBE_PSR_WKUP_CTL_IGNORE_TCO 0x00008000U /* Ignore WakeOn TCO pkts */ +#define TXGBE_PSR_WKUP_CTL_FLX0 0x00010000U /* Flexible Filter 0 Ena */ +#define TXGBE_PSR_WKUP_CTL_FLX1 0x00020000U /* Flexible Filter 1 Ena */ +#define TXGBE_PSR_WKUP_CTL_FLX2 0x00040000U /* Flexible Filter 2 Ena */ +#define TXGBE_PSR_WKUP_CTL_FLX3 0x00080000U /* Flexible Filter 3 Ena */ +#define TXGBE_PSR_WKUP_CTL_FLX4 0x00100000U /* Flexible Filter 4 Ena */ +#define TXGBE_PSR_WKUP_CTL_FLX5 0x00200000U /* Flexible Filter 5 Ena */ +#define TXGBE_PSR_WKUP_CTL_FLX_FILTERS 0x000F0000U /* Mask for 4 flex filters */ +#define TXGBE_PSR_WKUP_CTL_FLX_FILTERS_6 0x003F0000U /* Mask for 6 flex filters*/ +#define TXGBE_PSR_WKUP_CTL_FLX_FILTERS_8 0x00FF0000U /* Mask for 8 flex filters*/ +#define TXGBE_PSR_WKUP_CTL_FW_RST_WK 0x80000000U /* Ena wake on FW reset assertion */ +/* Mask for Ext. flex filters */ +#define TXGBE_PSR_WKUP_CTL_EXT_FLX_FILTERS 0x00300000U +#define TXGBE_PSR_WKUP_CTL_ALL_FILTERS 0x000F00FFU /* Mask all 4 flex filters*/ +#define TXGBE_PSR_WKUP_CTL_ALL_FILTERS_6 0x003F00FFU /* Mask all 6 flex filters*/ +#define TXGBE_PSR_WKUP_CTL_ALL_FILTERS_8 0x00FF00FFU /* Mask all 8 flex filters*/ +#define TXGBE_PSR_WKUP_CTL_FLX_OFFSET 16 /* Offset to the Flex Filters bits*/ + +#define TXGBE_PSR_MAX_SZ 0x15020 + +/****************************** TDB ******************************************/ +#define TXGBE_TDB_TFCS 0x1CE00 +#define TXGBE_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4)) /* 8 of these */ +#define TXGBE_TDB_MNG_TC 0x1CD10 +#define TXGBE_TDB_PRB_CTL 0x17010 +#define TXGBE_TDB_PBRARB_CTL 0x1CD00 +#define TXGBE_TDB_UP2TC 0x1C800 +#define TXGBE_TDB_PBRARB_CFG(_i) (0x1CD20 + ((_i) * 4)) /* 8 of (0-7) */ + +#define TXGBE_TDB_PB_SZ_20KB 0x00005000U /* 20KB Packet Buffer */ +#define TXGBE_TDB_PB_SZ_40KB 0x0000A000U /* 40KB Packet Buffer */ +#define TXGBE_TDB_PB_SZ_MAX 0x00028000U /* 160KB Packet Buffer */ +#define TXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ +#define TXGBE_MAX_PB 8 + +/* statistic */ +#define TXGBE_TDB_OUT_PKT_CNT 0x1CF00 + +/****************************** TSEC *****************************************/ +/* Security Control Registers */ +#define TXGBE_TSC_CTL 0x1D000 +#define TXGBE_TSC_ST 0x1D004 +#define TXGBE_TSC_BUF_AF 0x1D008 +#define TXGBE_TSC_BUF_AE 0x1D00C +#define TXGBE_TSC_PRB_CTL 0x1D010 +#define TXGBE_TSC_MIN_IFG 0x1D020 +/* Security Bit Fields and Masks */ +#define TXGBE_TSC_CTL_SECTX_DIS 0x00000001U +#define TXGBE_TSC_CTL_TX_DIS 0x00000002U +#define TXGBE_TSC_CTL_STORE_FORWARD 0x00000004U +#define TXGBE_TSC_CTL_IV_MSK_EN 0x00000008U +#define TXGBE_TSC_ST_SECTX_RDY 0x00000001U +#define TXGBE_TSC_ST_OFF_DIS 0x00000002U +#define TXGBE_TSC_ST_ECC_TXERR 0x00000004U +#define TXGBE_TSC_MACTX_AFIFO_RD_WTRMRK 0x000f0000U + +/* LinkSec (MacSec) Registers */ +#define TXGBE_TSC_LSEC_CAP 0x1D200 +#define TXGBE_TSC_LSEC_CTL 0x1D204 +#define TXGBE_TSC_LSEC_SCI_L 0x1D208 +#define TXGBE_TSC_LSEC_SCI_H 0x1D20C +#define TXGBE_TSC_LSEC_SA 0x1D210 +#define TXGBE_TSC_LSEC_PKTNUM0 0x1D214 +#define TXGBE_TSC_LSEC_PKTNUM1 0x1D218 +#define TXGBE_TSC_LSEC_KEY0(_n) 0x1D21C +#define TXGBE_TSC_LSEC_KEY1(_n) 0x1D22C +#define TXGBE_TSC_LSEC_UNTAG_PKT 0x1D23C +#define TXGBE_TSC_LSEC_ENC_PKT 0x1D240 +#define TXGBE_TSC_LSEC_PROT_PKT 0x1D244 +#define TXGBE_TSC_LSEC_ENC_OCTET 0x1D248 +#define TXGBE_TSC_LSEC_PROT_OCTET 0x1D24C + +/* IpSec Registers */ +#define TXGBE_TSC_IPS_IDX 0x1D100 +#define TXGBE_TSC_IPS_IDX_WT 0x80000000U +#define TXGBE_TSC_IPS_IDX_RD 0x40000000U +#define TXGBE_TSC_IPS_IDX_SD_IDX 0x0U /* */ +#define TXGBE_TSC_IPS_IDX_EN 0x00000001U +#define TXGBE_TSC_IPS_SALT 0x1D104 +#define TXGBE_TSC_IPS_KEY(i) (0x1D108 + ((i) * 4)) + +/* 1588 */ +#define TXGBE_TSC_1588_CTL 0x1D400 /* Tx Time Sync Control reg */ +#define TXGBE_TSC_1588_STMPL 0x1D404 /* Tx timestamp value Low */ +#define TXGBE_TSC_1588_STMPH 0x1D408 /* Tx timestamp value High */ +#define TXGBE_TSC_1588_SYSTIML 0x1D40C /* System time register Low */ +#define TXGBE_TSC_1588_SYSTIMH 0x1D410 /* System time register High */ +#define TXGBE_TSC_1588_INC 0x1D414 /* Increment attributes reg */ +#define TXGBE_TSC_1588_INC_IV(v) (((v) & 0xFFFFFF)) +#define TXGBE_TSC_1588_INC_IP(v) (((v) & 0xFF) << 24) +#define TXGBE_TSC_1588_INC_IVP(v, p) \ + (((v) & 0xFFFFFF) | TXGBE_TSC_1588_INC_IP(p)) + +#define TXGBE_TSC_1588_ADJL 0x1D418 /* Time Adjustment Offset reg Low */ +#define TXGBE_TSC_1588_ADJH 0x1D41C /* Time Adjustment Offset reg High*/ + +/* 1588 fields */ +#define TXGBE_TSC_1588_CTL_VALID 0x00000001U /* Tx timestamp valid */ +#define TXGBE_TSC_1588_CTL_ENABLED 0x00000010U /* Tx timestamping enabled */ + +#define TXGBE_TSEC_1588_AUX_CTL 0x1D428 +#define TXGBE_TSEC_1588_TRGT_L(i) (0x1D42C + ((i) * 8)) /* [0,1] */ +#define TXGBE_TSEC_1588_TRGT_H(i) (0x1D430 + ((i) * 8)) /* [0,1] */ +#define TXGBE_TSEC_1588_FREQ_CLK_L(i) (0x1D43C + ((i) * 8)) /* [0,1] */ +#define TXGBE_TSEC_1588_FREQ_CLK_H(i) (0x1D440 + ((i) * 8)) /* [0,1] */ +#define TXGBE_TSEC_1588_AUX_STMP_L(i) (0x1D44C + ((i) * 8)) /* [0,1] */ +#define TXGBE_TSEC_1588_AUX_STMP_H(i) (0x1D450 + ((i) * 8)) /* [0,1] */ +#define TXGBE_TSEC_1588_SDP(n) (0x1D45C + ((n) * 4)) /* [0,3] */ + +#define TXGBE_TSEC_1588_INT_ST 0x1D420 +#define TXGBE_TSEC_1588_INT_EN 0x1D424 + +#define TXGBE_TSEC_1588_INT_ST_TT0 0x10 +#define TXGBE_TSEC_1588_INT_ST_TT1 0x20 +#define TXGBE_TSEC_1588_INT_EN_TT0 0x10 +#define TXGBE_TSEC_1588_INT_EN_TT1 0x20 + +#define TXGBE_TSEC_1588_AUX_CTL_EN_TT0 0x1 +#define TXGBE_TSEC_1588_AUX_CTL_PLSG 0x2 +#define TXGBE_TSEC_1588_AUX_CTL_EN_TT1 0x4 +#define TXGBE_TSEC_1588_AUX_CTL_EN_TS0 0x100 +#define TXGBE_TSEC_1588_AUX_CTL_EN_TS1 0x400 + +#define TXGBE_TSEC_1588_SDP_FUN_SEL_TT0 0x1 +#define TXGBE_TSEC_1588_SDP_FUN_SEL_TT1 0x2 +#define TXGBE_TSEC_1588_SDP_FUN_SEL_CL0 0x3 +#define TXGBE_TSEC_1588_SDP_FUN_SEL_CL1 0x4 +#define TXGBE_TSEC_1588_SDP_FUN_SEL_TS0 0x5 +#define TXGBE_TSEC_1588_SDP_FUN_SEL_TS1 0x6 +#define TXGBE_TSEC_1588_SDP_FUN_SEL_MASK 0x7 +#define TXGBE_TSEC_1588_SDP_OUT_LEVEL_LOW 0x10 +#define TXGBE_TSEC_1588_SDP_OUT_LEVEL_HIGH 0x0 + +/********************************* RSEC **************************************/ +/* general rsec */ +#define TXGBE_RSC_CTL 0x17000 +#define TXGBE_RSC_ST 0x17004 +/* general rsec fields */ +#define TXGBE_RSC_CTL_SECRX_DIS 0x00000001U +#define TXGBE_RSC_CTL_RX_DIS 0x00000002U +#define TXGBE_RSC_CTL_CRC_STRIP 0x00000004U +#define TXGBE_RSC_CTL_IV_MSK_EN 0x00000008U +#define TXGBE_RSC_CTL_SAVE_MAC_ERR 0x00000040U +#define TXGBE_RSC_ST_RSEC_RDY 0x00000001U +#define TXGBE_RSC_ST_RSEC_OFLD_DIS 0x00000002U +#define TXGBE_RSC_ST_ECC_RXERR 0x00000004U + +/* link sec */ +#define TXGBE_RSC_LSEC_CAP 0x17200 +#define TXGBE_RSC_LSEC_CTL 0x17204 +#define TXGBE_RSC_LSEC_SCI_L 0x17208 +#define TXGBE_RSC_LSEC_SCI_H 0x1720C +#define TXGBE_RSC_LSEC_SA0 0x17210 +#define TXGBE_RSC_LSEC_SA1 0x17214 +#define TXGBE_RSC_LSEC_PKNUM0 0x17218 +#define TXGBE_RSC_LSEC_PKNUM1 0x1721C +#define TXGBE_RSC_LSEC_KEY0(_n) 0x17220 +#define TXGBE_RSC_LSEC_KEY1(_n) 0x17230 +#define TXGBE_RSEC_LSEC_UNTAG_PKT 0x17240 +#define TXGBE_RSC_LSEC_DEC_OCTET 0x17244 +#define TXGBE_RSC_LSEC_VLD_OCTET 0x17248 +#define TXGBE_RSC_LSEC_BAD_PKT 0x1724C +#define TXGBE_RSC_LSEC_NOSCI_PKT 0x17250 +#define TXGBE_RSC_LSEC_UNSCI_PKT 0x17254 +#define TXGBE_RSC_LSEC_UNCHK_PKT 0x17258 +#define TXGBE_RSC_LSEC_DLY_PKT 0x1725C +#define TXGBE_RSC_LSEC_LATE_PKT 0x17260 +#define TXGBE_RSC_LSEC_OK_PKT(_n) 0x17264 +#define TXGBE_RSC_LSEC_INV_PKT(_n) 0x17274 +#define TXGBE_RSC_LSEC_BADSA_PKT 0x1727C +#define TXGBE_RSC_LSEC_INVSA_PKT 0x17280 + +/* ipsec */ +#define TXGBE_RSC_IPS_IDX 0x17100 +#define TXGBE_RSC_IPS_IDX_WT 0x80000000U +#define TXGBE_RSC_IPS_IDX_RD 0x40000000U +#define TXGBE_RSC_IPS_IDX_TB_IDX 0x0U /* */ +#define TXGBE_RSC_IPS_IDX_TB_IP 0x00000002U +#define TXGBE_RSC_IPS_IDX_TB_SPI 0x00000004U +#define TXGBE_RSC_IPS_IDX_TB_KEY 0x00000006U +#define TXGBE_RSC_IPS_IDX_EN 0x00000001U +#define TXGBE_RSC_IPS_IP(i) (0x17104 + ((i) * 4)) +#define TXGBE_RSC_IPS_SPI 0x17114 +#define TXGBE_RSC_IPS_IP_IDX 0x17118 +#define TXGBE_RSC_IPS_KEY(i) (0x1711C + ((i) * 4)) +#define TXGBE_RSC_IPS_SALT 0x1712C +#define TXGBE_RSC_IPS_MODE 0x17130 +#define TXGBE_RSC_IPS_MODE_IPV6 0x00000010 +#define TXGBE_RSC_IPS_MODE_DEC 0x00000008 +#define TXGBE_RSC_IPS_MODE_ESP 0x00000004 +#define TXGBE_RSC_IPS_MODE_AH 0x00000002 +#define TXGBE_RSC_IPS_MODE_VALID 0x00000001 + +/************************************** ETH PHY ******************************/ +#define TXGBE_XPCS_IDA_ADDR 0x13000 +#define TXGBE_XPCS_IDA_DATA 0x13004 +#define TXGBE_ETHPHY_IDA_ADDR 0x13008 +#define TXGBE_ETHPHY_IDA_DATA 0x1300C + +/************************************** MNG ********************************/ +#define TXGBE_MNG_FW_SM 0x1E000 +#define TXGBE_MNG_SW_SM 0x1E004 +#define TXGBE_MNG_SWFW_SYNC 0x1E008 +#define TXGBE_MNG_MBOX 0x1E100 +#define TXGBE_MNG_MBOX_CTL 0x1E044 +#define TXGBE_MNG_OS2BMC_CNT 0x1E094 +#define TXGBE_MNG_BMC2OS_CNT 0x1E090 + +/* amlite: swfw mailbox changes */ +#define TXGBE_AML_MNG_MBOX_CTL_SW2FW 0x1E0A0 +#define TXGBE_AML_MNG_MBOX_SW2FW 0x1E200 +#define TXGBE_AML_MNG_MBOX_CTL_FW2SW 0x1E0A4 +#define TXGBE_AML_MNG_MBOX_FW2SW 0x1E300 + +#define TXGBE_AML_MNG_MBOX_NOTIFY 0x80000000U + +/* Firmware Semaphore Register */ +#define TXGBE_MNG_FW_SM_MODE_MASK 0xE +#define TXGBE_MNG_FW_SM_TS_ENABLED 0x1 +/* SW Semaphore Register bitmasks */ +#define TXGBE_MNG_SW_SM_SM 0x00000001U /* software Semaphore */ + +/* SW_FW_SYNC definitions */ +#define TXGBE_MNG_SWFW_SYNC_SW_PHY 0x0001 +#define TXGBE_MNG_SWFW_SYNC_SW_FLASH 0x0008 +#define TXGBE_MNG_SWFW_SYNC_SW_MB 0x0004 + +#define TXGBE_MNG_MBOX_CTL_SWRDY 0x1 +#define TXGBE_MNG_MBOX_CTL_SWACK 0x2 +#define TXGBE_MNG_MBOX_CTL_FWRDY 0x4 +#define TXGBE_MNG_MBOX_CTL_FWACK 0x8 + +/************************************* ETH MAC *****************************/ +#define TXGBE_MAC_TX_CFG 0x11000 +#define TXGBE_MAC_RX_CFG 0x11004 +#define TXGBE_MAC_PKT_FLT 0x11008 +#define TXGBE_MAC_PKT_FLT_PR (0x1) /* promiscuous mode */ +#define TXGBE_MAC_PKT_FLT_RA (0x80000000) /* receive all */ +#define TXGBE_MAC_WDG_TIMEOUT 0x1100C +#define TXGBE_MAC_RX_FLOW_CTRL 0x11090 +#define TXGBE_MAC_ADDRESS0_HIGH 0x11300 +#define TXGBE_MAC_ADDRESS0_LOW 0x11304 +#define TXGBE_MAC_MISC_CTL 0x11f00 + +#define TXGBE_MAC_TX_CFG_TE 0x00000001U +#define TXGBE_MAC_TX_CFG_SPEED_MASK 0x60000000U +#define TXGBE_MAC_TX_CFG_SPEED_10G 0x00000000U +#define TXGBE_MAC_TX_CFG_SPEED_1G 0x60000000U +#define TXGBE_MAC_RX_CFG_RE 0x00000001U +#define TXGBE_MAC_RX_CFG_JE 0x00000100U +#define TXGBE_MAC_RX_CFG_LM 0x00000400U +#define TXGBE_MAC_WDG_TIMEOUT_PWE 0x00000100U +#define TXGBE_MAC_WDG_TIMEOUT_WTO_MASK 0x0000000FU +#define TXGBE_MAC_WDG_TIMEOUT_WTO_DELTA 2 +#define TXGBE_MAC_MISC_LINK_STS_MOD 0x1 + +#define TXGBE_LINK_BOTH_PCS_MAC 0x1 + +#define TXGBE_EPHY_STAT 0x13404 +#define TXGBE_EPHY_STAT_PPL_LOCK 0x3 + +/* amlite: new MAC_TX_CONFIG */ +#define TXGBE_MAC_TX_CFG_AML_SPEED_MASK 0x78000000U +#define TXGBE_MAC_TX_CFG_AML_SPEED_50G 0x20000000U +#define TXGBE_MAC_TX_CFG_AML_SPEED_40G 0x00000000U +#define TXGBE_MAC_TX_CFG_AML_SPEED_25G 0x10000000U +#define TXGBE_MAC_TX_CFG_AML_SPEED_10G 0x40000000U +#define TXGBE_MAC_TX_CFG_AML_SPEED_1G 0x70000000U + +#define TXGBE_MAC_RX_FLOW_CTRL_RFE 0x00000001U /* receive fc enable */ +#define TXGBE_MAC_RX_FLOW_CTRL_PFCE 0x00000100U /* pfc enable */ + +#define TXGBE_MSCA 0x11200 +#define TXGBE_MSCA_RA(v) ((0xFFFF & (v))) +#define TXGBE_MSCA_PA(v) ((0x1F & (v)) << 16) +#define TXGBE_MSCA_DA(v) ((0x1F & (v)) << 21) +#define TXGBE_MSCC 0x11204 +#define TXGBE_MSCC_DATA(v) ((0xFFFF & (v))) +#define TXGBE_MSCC_CMD(v) ((0x3 & (v)) << 16) +enum TXGBE_MSCA_CMD_value { + TXGBE_MSCA_CMD_RSV = 0, + TXGBE_MSCA_CMD_WRITE, + TXGBE_MSCA_CMD_POST_READ, + TXGBE_MSCA_CMD_READ, +}; + +#define TXGBE_MSCC_SADDR ((0x1U) << 18) +#define TXGBE_MSCC_CR(v) ((0x8U & (v)) << 19) +#define TXGBE_MSCC_BUSY ((0x1U) << 22) + +#define TXGBE_MAC_MDIO_CLAUSE_22_PORT 0x11220 +#define TXGBE_MAC_MDIO_CLAUSE_ALL_PRTCL22 0xF + +/* EEE registers */ + +/* statistic */ +#define TXGBE_MAC_LXONRXC 0x11E0C +#define TXGBE_MAC_LXOFFRXC 0x11988 +#define TXGBE_MAC_PXONRXC(_i) (0x11E30 + ((_i) * 4)) /* 8 of these */ +#define TXGBE_MAC_PXOFFRXC 0x119DC +#define TXGBE_RX_BC_FRAMES_GOOD_LOW 0x11918 +#define TXGBE_RX_CRC_ERROR_FRAMES_LOW 0x11928 +#define TXGBE_RX_LEN_ERROR_FRAMES_LOW 0x11978 +#define TXGBE_RX_UNDERSIZE_FRAMES_GOOD 0x11938 +#define TXGBE_RX_OVERSIZE_FRAMES_GOOD 0x1193C +#define TXGBE_RX_FRAME_CNT_GOOD_BAD_LOW 0x11900 +#define TXGBE_TX_FRAME_CNT_GOOD_BAD_LOW 0x1181C +#define TXGBE_TX_MC_FRAMES_GOOD_LOW 0x1182C +#define TXGBE_TX_BC_FRAMES_GOOD_LOW 0x11824 +#define TXGBE_MMC_CONTROL 0x11800 +#define TXGBE_MMC_CONTROL_RSTONRD 0x4 /* reset on read */ +#define TXGBE_MMC_CONTROL_UP 0x700 + +/********************************* BAR registers ***************************/ +/* Interrupt Registers */ +#define TXGBE_BME_CTL 0x12020 +#define TXGBE_PX_MISC_IC 0x100 +#define TXGBE_PX_MISC_ICS 0x104 +#define TXGBE_PX_MISC_IEN 0x108 +#define TXGBE_PX_MISC_IVAR 0x4FC +#define TXGBE_PX_GPIE 0x118 +#define TXGBE_PX_ISB_ADDR_L 0x160 +#define TXGBE_PX_ISB_ADDR_H 0x164 +#define TXGBE_PX_TCP_TIMER 0x170 +#define TXGBE_PX_ITRSEL 0x180 +#define TXGBE_PX_IC(_i) (0x120 + (_i) * 4) +#define TXGBE_PX_ICS(_i) (0x130 + (_i) * 4) +#define TXGBE_PX_IMS(_i) (0x140 + (_i) * 4) +#define TXGBE_PX_IMC(_i) (0x150 + (_i) * 4) +#define TXGBE_PX_IVAR(_i) (0x500 + (_i) * 4) +#define TXGBE_PX_ITR(_i) (0x200 + (_i) * 4) +#define TXGBE_PX_TRANSACTION_PENDING 0x168 +#define TXGBE_PX_INTA 0x110 + +/* Interrupt register bitmasks */ +/* Extended Interrupt Cause Read */ +#define TXGBE_PX_MISC_IC_ETH_LKDN 0x00000100U /* eth link down */ +#define TXGBE_PX_MISC_IC_DEV_RST 0x00000400U /* device reset event */ +#define TXGBE_PX_MISC_IC_TIMESYNC 0x00000800U /* time sync */ +#define TXGBE_PX_MISC_IC_STALL 0x00001000U /* trans or recv path is stalled */ +#define TXGBE_PX_MISC_IC_LINKSEC 0x00002000U /* Tx LinkSec require key exchange */ +#define TXGBE_PX_MISC_IC_RX_MISS 0x00004000U /* Packet Buffer Overrun */ +#define TXGBE_PX_MISC_IC_FLOW_DIR 0x00008000U /* FDir Exception */ +#define TXGBE_PX_MISC_IC_I2C 0x00010000U /* I2C interrupt */ +#define TXGBE_PX_MISC_IC_ETH_EVENT 0x00020000U /* err reported by MAC except eth link down */ +#define TXGBE_PX_MISC_IC_ETH_LK 0x00040000U /* link up */ +#define TXGBE_PX_MISC_IC_ETH_AN 0x00080000U /* link auto-nego done */ +#define TXGBE_PX_MISC_IC_INT_ERR 0x00100000U /* integrity error */ +#define TXGBE_PX_MISC_IC_SPI 0x00200000U /* SPI interface */ +#define TXGBE_PX_MISC_IC_TXDESC 0x00400000U /* tx desc error */ +#define TXGBE_PX_MISC_IC_VF_MBOX 0x00800000U /* VF-PF message box */ +#define TXGBE_PX_MISC_IC_GPIO 0x04000000U /* GPIO interrupt */ +#define TXGBE_PX_MISC_IC_PCIE_REQ_ERR 0x08000000U /* pcie request error int */ +#define TXGBE_PX_MISC_IC_OVER_HEAT 0x10000000U /* overheat detection */ +#define TXGBE_PX_MISC_IC_PROBE_MATCH 0x20000000U /* probe match */ +//#define TXGBE_PX_MISC_IC_MNG_HOST_MBOX 0x40000000U /* mng mailbox */ +#define TXGBE_PX_MISC_IC_TIMER 0x80000000U /* tcp timer */ + +#define TXGBE_PX_MISC_AML_ETH_LK_CHANGE 0x00000100U /* link change */ +#define TXGBE_PX_MISC_AML_ETH_PHY_EVENT 0x00040000U /* Eth phy event */ + +/* Extended Interrupt Cause Set */ +#define TXGBE_PX_MISC_ICS_ETH_LKDN 0x00000100U +#define TXGBE_PX_MISC_ICS_DEV_RST 0x00000400U +#define TXGBE_PX_MISC_ICS_TIMESYNC 0x00000800U +#define TXGBE_PX_MISC_ICS_STALL 0x00001000U +#define TXGBE_PX_MISC_ICS_LINKSEC 0x00002000U +#define TXGBE_PX_MISC_ICS_RX_MISS 0x00004000U +#define TXGBE_PX_MISC_ICS_FLOW_DIR 0x00008000U +#define TXGBE_PX_MISC_ICS_I2C 0x00010000U +#define TXGBE_PX_MISC_ICS_ETH_EVENT 0x00020000U +#define TXGBE_PX_MISC_ICS_ETH_LK 0x00040000U +#define TXGBE_PX_MISC_ICS_ETH_AN 0x00080000U +#define TXGBE_PX_MISC_ICS_INT_ERR 0x00100000U +#define TXGBE_PX_MISC_ICS_SPI 0x00200000U +#define TXGBE_PX_MISC_ICS_VF_MBOX 0x00800000U +#define TXGBE_PX_MISC_ICS_GPIO 0x04000000U +#define TXGBE_PX_MISC_ICS_PCIE_REQ_ERR 0x08000000U +#define TXGBE_PX_MISC_ICS_OVER_HEAT 0x10000000U +#define TXGBE_PX_MISC_ICS_PROBE_MATCH 0x20000000U +//#define TXGBE_PX_MISC_ICS_MNG_HOST_MBOX 0x40000000U +#define TXGBE_PX_MISC_ICS_TIMER 0x80000000U + +/* Extended Interrupt Enable Set */ +#define TXGBE_PX_MISC_IEN_ETH_LKDN 0x00000100U +#define TXGBE_PX_MISC_IEN_DEV_RST 0x00000400U +#define TXGBE_PX_MISC_IEN_TIMESYNC 0x00000800U +#define TXGBE_PX_MISC_IEN_STALL 0x00001000U +#define TXGBE_PX_MISC_IEN_LINKSEC 0x00002000U +#define TXGBE_PX_MISC_IEN_RX_MISS 0x00004000U +#define TXGBE_PX_MISC_IEN_FLOW_DIR 0x00008000U +#define TXGBE_PX_MISC_IEN_I2C 0x00010000U +#define TXGBE_PX_MISC_IEN_ETH_EVENT 0x00020000U +#define TXGBE_PX_MISC_IEN_ETH_LK 0x00040000U +#define TXGBE_PX_MISC_IEN_ETH_AN 0x00080000U +#define TXGBE_PX_MISC_IEN_INT_ERR 0x00100000U +#define TXGBE_PX_MISC_IEN_SPI 0x00200000U +#define TXGBE_PX_MISC_IEN_TXDESC 0x00400000U +#define TXGBE_PX_MISC_IEN_VF_MBOX 0x00800000U +#define TXGBE_PX_MISC_IEN_GPIO 0x04000000U +#define TXGBE_PX_MISC_IEN_PCIE_REQ_ERR 0x08000000U +#define TXGBE_PX_MISC_IEN_OVER_HEAT 0x10000000U +#define TXGBE_PX_MISC_IEN_PROBE_MATCH 0x20000000U +//#define TXGBE_PX_MISC_IEN_MNG_HOST_MBOX 0x40000000U +#define TXGBE_PX_MISC_IEN_TIMER 0x80000000U + +#define TXGBE_PX_MISC_IEN_MASK ( \ + TXGBE_PX_MISC_IEN_ETH_LKDN | \ + TXGBE_PX_MISC_IEN_DEV_RST | \ + TXGBE_PX_MISC_IEN_ETH_EVENT | \ + TXGBE_PX_MISC_IEN_ETH_LK | \ + TXGBE_PX_MISC_IEN_ETH_AN | \ + TXGBE_PX_MISC_IEN_INT_ERR | \ + TXGBE_PX_MISC_IEN_VF_MBOX | \ + TXGBE_PX_MISC_IEN_GPIO | \ + TXGBE_PX_MISC_IEN_STALL | \ + TXGBE_PX_MISC_IEN_PCIE_REQ_ERR | \ + TXGBE_PX_MISC_IEN_TIMER) + +/* General purpose Interrupt Enable */ +#define TXGBE_PX_GPIE_MODEL 0x00000001U +#define TXGBE_PX_GPIE_IMEN 0x00000002U +#define TXGBE_PX_GPIE_LL_INTERVAL 0x000000F0U +#define TXGBE_PX_GPIE_RSC_DELAY 0x00000700U + +/* Interrupt Vector Allocation Registers */ +#define TXGBE_PX_IVAR_REG_NUM 64 +#define TXGBE_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ + +#define TXGBE_MAX_INT_RATE 500000 +#define TXGBE_MIN_INT_RATE 980 +#define TXGBE_MAX_EITR 0x00000FF8U +#define TXGBE_AMLITE_MAX_EITR 0x00000FFFU +#define TXGBE_MIN_EITR 8 +#define TXGBE_PX_ITR_ITR_INT_MASK 0x00000FF8U +#define TXGBE_PX_ITR_LLI_CREDIT 0x001f0000U +#define TXGBE_PX_ITR_LLI_MOD 0x00008000U +#define TXGBE_PX_ITR_CNT_WDIS 0x80000000U +#define TXGBE_PX_ITR_ITR_CNT 0x0FE00000U + +/* transmit DMA Registers */ +#define TXGBE_PX_TR_BAL(_i) (0x03000 + ((_i) * 0x40)) +#define TXGBE_PX_TR_BAH(_i) (0x03004 + ((_i) * 0x40)) +#define TXGBE_PX_TR_WP(_i) (0x03008 + ((_i) * 0x40)) +#define TXGBE_PX_TR_RP(_i) (0x0300C + ((_i) * 0x40)) +#define TXGBE_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40)) + +/* amlite: tx head wb */ +#define TXGBE_PX_TR_HEAD_ADDRL(_i) (0x03028 + ((_i) * 0x40)) +#define TXGBE_PX_TR_HEAD_ADDRH(_i) (0x0302C + ((_i) * 0x40)) + +/* Transmit Config masks */ +#define TXGBE_PX_TR_CFG_ENABLE (1) /* Ena specific Tx Queue */ +#define TXGBE_PX_TR_CFG_TR_SIZE_SHIFT 1 /* tx desc number per ring */ +#define TXGBE_PX_TR_CFG_SWFLSH BIT(26) /* Tx Desc. wr-bk flushing */ +#define TXGBE_PX_TR_CFG_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ +#define TXGBE_PX_TR_CFG_THRE_SHIFT 8 +#define TXGBE_PX_TR_CFG_HEAD_WB BIT(27) /* amlite head wb */ +#define TXGBE_PX_TR_CFG_HEAD_WB_64BYTE BIT(28) /* amlite head wb 64byte */ + +#define TXGBE_PX_TR_RPn(q_per_pool, vf_number, vf_q_index) \ + (TXGBE_PX_TR_RP((q_per_pool) * (vf_number) + (vf_q_index))) +#define TXGBE_PX_TR_WPn(q_per_pool, vf_number, vf_q_index) \ + (TXGBE_PX_TR_WP((q_per_pool) * (vf_number) + (vf_q_index))) + +/* Receive DMA Registers */ +#define TXGBE_PX_RR_BAL(_i) (0x01000 + ((_i) * 0x40)) +#define TXGBE_PX_RR_BAH(_i) (0x01004 + ((_i) * 0x40)) +#define TXGBE_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40)) +#define TXGBE_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40)) +#define TXGBE_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40)) + +#define TXGBE_TDM_DESC_CHK(i) (0x0180B0 + (i) * 4) /*0-3*/ +#define TXGBE_TDM_DESC_NONFATAL(i) (0x0180C0 + (i) * 4) /*0-3*/ +#define TXGBE_TDM_DESC_FATAL(i) (0x0180D0 + (i) * 4) /*0-3*/ +/* PX_RR_CFG bit definitions */ +#define TXGBE_PX_RR_CFG_RR_SIZE_SHIFT 1 +#define TXGBE_PX_RR_CFG_BSIZEPKT_SHIFT 2 /* so many KBs */ +#define TXGBE_PX_RR_CFG_BSIZEHDRSIZE_SHIFT 6 + +#define TXGBE_PX_RR_CFG_DROP_EN 0x40000000U +#define TXGBE_PX_RR_CFG_VLAN 0x80000000U +#define TXGBE_PX_RR_CFG_RSC 0x20000000U +#define TXGBE_PX_RR_CFG_CNTAG 0x10000000U +#define TXGBE_PX_RR_CFG_RSC_CNT_MD 0x08000000U +#define TXGBE_PX_RR_CFG_SPLIT_MODE 0x04000000U +#define TXGBE_PX_RR_CFG_STALL 0x02000000U +#define TXGBE_PX_RR_CFG_MAX_RSCBUF_1 0x00000000U +#define TXGBE_PX_RR_CFG_MAX_RSCBUF_4 0x00800000U +#define TXGBE_PX_RR_CFG_MAX_RSCBUF_8 0x01000000U +#define TXGBE_PX_RR_CFG_MAX_RSCBUF_16 0x01800000U +#define TXGBE_PX_RR_CFG_RR_THER 0x00070000U +#define TXGBE_PX_RR_CFG_RR_THER_SHIFT 16 + +#define TXGBE_PX_RR_CFG_RR_HDR_SZ 0x0000F000U +#define TXGBE_PX_RR_CFG_RR_BUF_SZ 0x00000F00U +#define TXGBE_PX_RR_CFG_RR_SZ 0x0000007EU +#define TXGBE_PX_RR_CFG_RR_EN 0x00000001U + +/* amlite: desc merge */ +#define TXGBE_PX_RR_CFG_DESC_MERGE 0x00080000U + +/* statistic */ +#define TXGBE_PX_MPRC(_i) (0x1020 + ((_i) * 64)) +#define TXGBE_VX_GPRC(_i) (0x01014 + (0x40 * (_i))) +#define TXGBE_VX_GPTC(_i) (0x03014 + (0x40 * (_i))) +#define TXGBE_VX_GORC_LSB(_i) (0x01018 + (0x40 * (_i))) +#define TXGBE_VX_GORC_MSB(_i) (0x0101C + (0x40 * (_i))) +#define TXGBE_VX_GOTC_LSB(_i) (0x03018 + (0x40 * (_i))) +#define TXGBE_VX_GOTC_MSB(_i) (0x0301C + (0x40 * (_i))) +#define TXGBE_VX_MPRC(_i) (0x01020 + (0x40 * (_i))) + +#define TXGBE_PX_GPRC 0x12504 +#define TXGBE_PX_GPTC 0x18308 + +#define TXGBE_PX_GORC_LSB 0x12508 +#define TXGBE_PX_GORC_MSB 0x1250C + +#define TXGBE_PX_GOTC_LSB 0x1830C +#define TXGBE_PX_GOTC_MSB 0x18310 + +/************************************* Stats registers ************************/ +#define TXGBE_FCCRC 0x15160 /* Num of Good Eth CRC w/ Bad FC CRC */ +#define TXGBE_FCOERPDC 0x12514 /* FCoE Rx Packets Dropped Count */ +#define TXGBE_FCLAST 0x12518 /* FCoE Last Error Count */ +#define TXGBE_FCOEPRC 0x15164 /* Number of FCoE Packets Received */ +#define TXGBE_FCOEDWRC 0x15168 /* Number of FCoE DWords Received */ +#define TXGBE_FCOEPTC 0x18318 /* Number of FCoE Packets Transmitted */ +#define TXGBE_FCOEDWTC 0x1831C /* Number of FCoE DWords Transmitted */ + +/*************************** Flash region definition *************************/ +/* EEC Register */ +#define TXGBE_EEC_SK 0x00000001U /* EEPROM Clock */ +#define TXGBE_EEC_CS 0x00000002U /* EEPROM Chip Select */ +#define TXGBE_EEC_DI 0x00000004U /* EEPROM Data In */ +#define TXGBE_EEC_DO 0x00000008U /* EEPROM Data Out */ +#define TXGBE_EEC_FWE_MASK 0x00000030U /* FLASH Write Enable */ +#define TXGBE_EEC_FWE_DIS 0x00000010U /* Disable FLASH writes */ +#define TXGBE_EEC_FWE_EN 0x00000020U /* Enable FLASH writes */ +#define TXGBE_EEC_FWE_SHIFT 4 +#define TXGBE_EEC_REQ 0x00000040U /* EEPROM Access Request */ +#define TXGBE_EEC_GNT 0x00000080U /* EEPROM Access Grant */ +#define TXGBE_EEC_PRES 0x00000100U /* EEPROM Present */ +#define TXGBE_EEC_ARD 0x00000200U /* EEPROM Auto Read Done */ +#define TXGBE_EEC_FLUP 0x00800000U /* Flash update command */ +#define TXGBE_EEC_SEC1VAL 0x02000000U /* Sector 1 Valid */ +#define TXGBE_EEC_FLUDONE 0x04000000U /* Flash update done */ +/* EEPROM Addressing bits based on type (0-small, 1-large) */ +#define TXGBE_EEC_ADDR_SIZE 0x00000400U +#define TXGBE_EEC_SIZE 0x00007800U /* EEPROM Size */ +#define TXGBE_EERD_MAX_ADDR 0x00003FFFU /* EERD alLows 14 bits for addr. */ + +#define TXGBE_EEC_SIZE_SHIFT 11 +#define TXGBE_EEPROM_WORD_SIZE_SHIFT 6 +#define TXGBE_EEPROM_OPCODE_BITS 8 + +/* FLA Register */ +#define TXGBE_FLA_LOCKED 0x00000040U + +/* Part Number String Length */ +#define TXGBE_PBANUM_LENGTH 32 + +/* Checksum and EEPROM pointers */ +#define TXGBE_PBANUM_PTR_GUARD 0xFAFA +#define TXGBE_EEPROM_CHECKSUM 0x2F +#define TXGBE_EEPROM_SUM 0xBABA +#define TXGBE_ATLAS0_CONFIG_PTR 0x04 +#define TXGBE_PHY_PTR 0x04 +#define TXGBE_ATLAS1_CONFIG_PTR 0x05 +#define TXGBE_OPTION_ROM_PTR 0x05 +#define TXGBE_PCIE_GENERAL_PTR 0x06 +#define TXGBE_PCIE_CONFIG0_PTR 0x07 +#define TXGBE_PCIE_CONFIG1_PTR 0x08 +#define TXGBE_CORE0_PTR 0x09 +#define TXGBE_CORE1_PTR 0x0A +#define TXGBE_MAC0_PTR 0x0B +#define TXGBE_MAC1_PTR 0x0C +#define TXGBE_CSR0_CONFIG_PTR 0x0D +#define TXGBE_CSR1_CONFIG_PTR 0x0E +#define TXGBE_PCIE_ANALOG_PTR 0x02 +#define TXGBE_SHADOW_RAM_SIZE 0x4000 +#define TXGBE_TXGBE_PCIE_GENERAL_SIZE 0x24 +#define TXGBE_PCIE_CONFIG_SIZE 0x08 +#define TXGBE_EEPROM_LAST_WORD 0x800 +#define TXGBE_FW_PTR 0x0F +#define TXGBE_PBANUM0_PTR 0x05 +#define TXGBE_PBANUM1_PTR 0x06 +#define TXGBE_ALT_MAC_ADDR_PTR 0x37 +#define TXGBE_FREE_SPACE_PTR 0x3E +#define TXGBE_SW_REGION_PTR 0x1C +#define TXGBE_SHOWROM_I2C_PTR 0xB00 +#define TXGBE_SHOWROM_I2C_END 0xF00 + +#define TXGBE_SAN_MAC_ADDR_PTR 0x18 +#define TXGBE_DEVICE_CAPS 0x1C +#define TXGBE_EEPROM_VERSION_L 0x1D +#define TXGBE_EEPROM_VERSION_H 0x1E +#define TXGBE_ISCSI_BOOT_CONFIG 0x07 + +#define TXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 +#define TXGBE_MAX_MSIX_VECTORS_SAPPHIRE 0x40 + +/* MSI-X capability fields masks */ +#define TXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF + +/* Legacy EEPROM word offsets */ +#define TXGBE_ISCSI_BOOT_CAPS 0x0033 +#define TXGBE_ISCSI_SETUP_PORT_0 0x0030 +#define TXGBE_ISCSI_SETUP_PORT_1 0x0034 + +/* EEPROM Commands - SPI */ +#define TXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ +#define TXGBE_EEPROM_STATUS_RDY_SPI 0x01 +#define TXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define TXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define TXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ +#define TXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ +/* EEPROM reset Write Enable latch */ +#define TXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 +#define TXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ +#define TXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ +#define TXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define TXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define TXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Read Register */ +#define TXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ +#define TXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ +#define TXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ +#define TXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define TXGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */ +#define TXGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */ + +#define NVM_INIT_CTRL_3 0x38 +#define NVM_INIT_CTRL_3_LPLU 0x8 +#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40 +#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100 + +#define TXGBE_ETH_LENGTH_OF_ADDRESS 6 + +#define TXGBE_EEPROM_PAGE_SIZE_MAX 128 +#define TXGBE_EEPROM_RD_BUFFER_MAX_COUNT 256 /* words rd in burst */ +#define TXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* words wr in burst */ +#define TXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */ +#define TXGBE_EEPROM_CCD_BIT 2 + +#define TXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ +#define TXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ +#define TXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ +#define TXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ + +#define TXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 +#define TXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 +#define TXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 +#define TXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define TXGBE_FW_LESM_PARAMETERS_PTR 0x2 +#define TXGBE_FW_LESM_STATE_1 0x1 +#define TXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ +#define TXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define TXGBE_FW_PATCH_VERSION_4 0x7 +#define TXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ +#define TXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ +#define TXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ +#define TXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ +#define TXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ +#define TXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x17 /* Alt. SAN MAC block */ +#define TXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt SAN MAC capability */ +#define TXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt SAN MAC 0 offset */ +#define TXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt SAN MAC 1 offset */ +#define TXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt WWNN prefix offset */ +#define TXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt WWPN prefix offset */ +#define TXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt SAN MAC exists */ +#define TXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt WWN base exists */ +#define TXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */ +#define TXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */ +#define TXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */ + +/******************************** PCI Bus Info *******************************/ +#define TXGBE_PCI_DEVICE_STATUS 0xAA +#define TXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 +#define TXGBE_PCI_LINK_STATUS 0xB2 +#define TXGBE_PCI_DEVICE_CONTROL2 0xC8 +#define TXGBE_PCI_LINK_WIDTH 0x3F0 +#define TXGBE_PCI_LINK_WIDTH_1 0x10 +#define TXGBE_PCI_LINK_WIDTH_2 0x20 +#define TXGBE_PCI_LINK_WIDTH_4 0x40 +#define TXGBE_PCI_LINK_WIDTH_8 0x80 +#define TXGBE_PCI_LINK_SPEED 0xF +#define TXGBE_PCI_LINK_SPEED_2500 0x1 +#define TXGBE_PCI_LINK_SPEED_5000 0x2 +#define TXGBE_PCI_LINK_SPEED_8000 0x3 +#define TXGBE_PCI_HEADER_TYPE_REGISTER 0x0E +#define TXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define TXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 + +#define TXGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET 4 +#define TXGBE_PCIDEVCTRL2_RELAX_ORDER_MASK \ + (0x0001 << TXGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET) +#define TXGBE_PCIDEVCTRL2_RELAX_ORDER_ENABLE \ + (0x01 << TXGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET) + +#define TXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf +#define TXGBE_PCIDEVCTRL2_16_32ms_def 0x0 +#define TXGBE_PCIDEVCTRL2_50_100us 0x1 +#define TXGBE_PCIDEVCTRL2_1_2ms 0x2 +#define TXGBE_PCIDEVCTRL2_16_32ms 0x5 +#define TXGBE_PCIDEVCTRL2_65_130ms 0x6 +#define TXGBE_PCIDEVCTRL2_260_520ms 0x9 +#define TXGBE_PCIDEVCTRL2_1_2s 0xa +#define TXGBE_PCIDEVCTRL2_4_8s 0xd +#define TXGBE_PCIDEVCTRL2_17_34s 0xe + +/******************* Receive Descriptor bit definitions **********************/ +#define TXGBE_RXD_IPSEC_STATUS_SECP 0x00020000U +#define TXGBE_RXD_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000U +#define TXGBE_RXD_IPSEC_ERROR_INVALID_LENGTH 0x10000000U +#define TXGBE_RXD_IPSEC_ERROR_AUTH_FAILED 0x18000000U +#define TXGBE_RXD_IPSEC_ERROR_BIT_MASK 0x18000000U + +#define TXGBE_RXD_NEXTP_MASK 0x000FFFF0U /* Next Descriptor Index */ +#define TXGBE_RXD_NEXTP_SHIFT 0x00000004U +#define TXGBE_RXD_STAT_MASK 0x000fffffU /* Stat/NEXTP: bit 0-19 */ +#define TXGBE_RXD_STAT_DD 0x00000001U /* Done */ +#define TXGBE_RXD_STAT_EOP 0x00000002U /* End of Packet */ +#define TXGBE_RXD_STAT_CLASS_ID_MASK 0x0000001CU +#define TXGBE_RXD_STAT_CLASS_ID_TC_RSS 0x00000000U +#define TXGBE_RXD_STAT_CLASS_ID_FLM 0x00000004U /* FDir Match */ +#define TXGBE_RXD_STAT_CLASS_ID_SYN 0x00000008U +#define TXGBE_RXD_STAT_CLASS_ID_5_TUPLE 0x0000000CU +#define TXGBE_RXD_STAT_CLASS_ID_L2_ETYPE 0x00000010U +#define TXGBE_RXD_STAT_VP 0x00000020U /* IEEE VLAN Pkt */ +#define TXGBE_RXD_STAT_UDPCS 0x00000040U /* UDP xsum calculated */ +#define TXGBE_RXD_STAT_L4CS 0x00000080U /* L4 xsum calculated */ +#define TXGBE_RXD_STAT_IPCS 0x00000100U /* IP xsum calculated */ +#define TXGBE_RXD_STAT_PIF 0x00000200U /* passed in-exact filter */ +#define TXGBE_RXD_STAT_OUTERIPCS 0x00000400U /* Cloud IP xsum calculated*/ +#define TXGBE_RXD_STAT_VEXT 0x00000800U /* 1st VLAN found */ +#define TXGBE_RXD_STAT_LLINT 0x00002000U /* Pkt caused Low Latency Int */ +#define TXGBE_RXD_STAT_TS 0x00004000U /* IEEE1588 Time Stamp */ +#define TXGBE_RXD_STAT_SECP 0x00008000U /* Security Processing */ +#define TXGBE_RXD_STAT_LB 0x00010000U /* Loopback Status */ +#define TXGBE_RXD_STAT_FCEOFS 0x00020000U /* FCoE EOF/SOF Stat */ +#define TXGBE_RXD_STAT_FCSTAT 0x000C0000U /* FCoE Pkt Stat */ +#define TXGBE_RXD_STAT_FCSTAT_NOMTCH 0x00000000U /* 00: No Ctxt Match */ +#define TXGBE_RXD_STAT_FCSTAT_NODDP 0x00040000U /* 01: Ctxt w/o DDP */ +#define TXGBE_RXD_STAT_FCSTAT_FCPRSP 0x00080000U /* 10: Recv. FCP_RSP */ +#define TXGBE_RXD_STAT_FCSTAT_DDP 0x000C0000U /* 11: Ctxt w/ DDP */ + +#define TXGBE_RXD_IPV6EX 0x00001000U /* IPv6EX */ +#define TXGBE_RXD_ERR_MASK 0xfff00000U /* RDESC.ERRORS mask */ +#define TXGBE_RXD_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define TXGBE_RXD_ERR_FCEOFE 0x80000000U /* FCEOFe/IPE */ +#define TXGBE_RXD_ERR_FCERR 0x00700000U /* FCERR/FDIRERR */ +#define TXGBE_RXD_ERR_FDIR_LEN 0x00100000U /* FDIR Length error */ +#define TXGBE_RXD_ERR_FDIR_DROP 0x00200000U /* FDIR Drop error */ +#define TXGBE_RXD_ERR_FDIR_COLL 0x00400000U /* FDIR Collision error */ +#define TXGBE_RXD_ERR_HBO 0x00800000U /*Header Buffer Overflow */ +#define TXGBE_RXD_ERR_OUTERIPER 0x04000000U /* CRC IP Header error */ +#define TXGBE_RXD_ERR_SECERR_MASK 0x18000000U +#define TXGBE_RXD_ERR_RXE 0x20000000U /* Any MAC Error */ +#define TXGBE_RXD_ERR_TCPE 0x40000000U /* TCP/UDP Checksum Error */ +#define TXGBE_RXD_ERR_IPE 0x80000000U /* IP Checksum Error */ + +#define TXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000U +#define TXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FFU + +#define TXGBE_RXD_RSSTYPE_MASK 0x0000000FU +#define TXGBE_RXD_TPID_MASK 0x000001C0U +#define TXGBE_RXD_TPID_SHIFT 6 +#define TXGBE_RXD_HDRBUFLEN_MASK 0x00007FE0U +#define TXGBE_RXD_RSCCNT_MASK 0x001E0000U +#define TXGBE_RXD_RSCCNT_SHIFT 17 +#define TXGBE_RXD_HDRBUFLEN_SHIFT 5 +#define TXGBE_RXD_SPLITHEADER_EN 0x00001000U +#define TXGBE_RXD_SPH 0x8000 + +/* RSS Hash results */ +#define TXGBE_RXD_RSSTYPE_NONE 0x00000000U +#define TXGBE_RXD_RSSTYPE_IPV4_TCP 0x00000001U +#define TXGBE_RXD_RSSTYPE_IPV4 0x00000002U +#define TXGBE_RXD_RSSTYPE_IPV6_TCP 0x00000003U +#define TXGBE_RXD_RSSTYPE_IPV4_SCTP 0x00000004U +#define TXGBE_RXD_RSSTYPE_IPV6 0x00000005U +#define TXGBE_RXD_RSSTYPE_IPV6_SCTP 0x00000006U +#define TXGBE_RXD_RSSTYPE_IPV4_UDP 0x00000007U +#define TXGBE_RXD_RSSTYPE_IPV6_UDP 0x00000008U + +/** + * receive packet type + * PTYPE:8 = TUN:2 + PKT:2 + TYP:4 + **/ +/* TUN */ +#define TXGBE_PTYPE_TUN_IPV4 (0x80) +#define TXGBE_PTYPE_TUN_IPV6 (0xC0) + +/* PKT for TUN */ +#define TXGBE_PTYPE_PKT_IPIP (0x00) /* IP+IP */ +#define TXGBE_PTYPE_PKT_IG (0x10) /* IP+GRE */ +#define TXGBE_PTYPE_PKT_IGM (0x20) /* IP+GRE+MAC */ +#define TXGBE_PTYPE_PKT_IGMV (0x30) /* IP+GRE+MAC+VLAN */ +/* PKT for !TUN */ +#define TXGBE_PTYPE_PKT_MAC (0x10) +#define TXGBE_PTYPE_PKT_IP (0x20) +#define TXGBE_PTYPE_PKT_FCOE (0x30) + +/* TYP for PKT=mac */ +#define TXGBE_PTYPE_TYP_MAC (0x01) +#define TXGBE_PTYPE_TYP_TS (0x02) /* time sync */ +#define TXGBE_PTYPE_TYP_FIP (0x03) +#define TXGBE_PTYPE_TYP_LLDP (0x04) +#define TXGBE_PTYPE_TYP_CNM (0x05) +#define TXGBE_PTYPE_TYP_EAPOL (0x06) +#define TXGBE_PTYPE_TYP_ARP (0x07) +/* TYP for PKT=ip */ +#define TXGBE_PTYPE_PKT_IPV6 (0x08) +#define TXGBE_PTYPE_TYP_IPFRAG (0x01) +#define TXGBE_PTYPE_TYP_IP (0x02) +#define TXGBE_PTYPE_TYP_UDP (0x03) +#define TXGBE_PTYPE_TYP_TCP (0x04) +#define TXGBE_PTYPE_TYP_SCTP (0x05) +/* TYP for PKT=fcoe */ +#define TXGBE_PTYPE_PKT_VFT (0x08) +#define TXGBE_PTYPE_TYP_FCOE (0x00) +#define TXGBE_PTYPE_TYP_FCDATA (0x01) +#define TXGBE_PTYPE_TYP_FCRDY (0x02) +#define TXGBE_PTYPE_TYP_FCRSP (0x03) +#define TXGBE_PTYPE_TYP_FCOTHER (0x04) + +/* Packet type non-ip values */ +enum txgbe_l2_ptypes { + TXGBE_PTYPE_L2_ABORTED = (TXGBE_PTYPE_PKT_MAC), + TXGBE_PTYPE_L2_MAC = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_MAC), + TXGBE_PTYPE_L2_TS = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_TS), + TXGBE_PTYPE_L2_FIP = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_FIP), + TXGBE_PTYPE_L2_LLDP = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_LLDP), + TXGBE_PTYPE_L2_CNM = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_CNM), + TXGBE_PTYPE_L2_EAPOL = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_EAPOL), + TXGBE_PTYPE_L2_ARP = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_ARP), + + TXGBE_PTYPE_L2_IPV4_FRAG = (TXGBE_PTYPE_PKT_IP | + TXGBE_PTYPE_TYP_IPFRAG), + TXGBE_PTYPE_L2_IPV4 = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_IP), + TXGBE_PTYPE_L2_IPV4_UDP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_UDP), + TXGBE_PTYPE_L2_IPV4_TCP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_TCP), + TXGBE_PTYPE_L2_IPV4_SCTP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_SCTP), + TXGBE_PTYPE_L2_IPV6_FRAG = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 | + TXGBE_PTYPE_TYP_IPFRAG), + TXGBE_PTYPE_L2_IPV6 = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 | + TXGBE_PTYPE_TYP_IP), + TXGBE_PTYPE_L2_IPV6_UDP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 | + TXGBE_PTYPE_TYP_UDP), + TXGBE_PTYPE_L2_IPV6_TCP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 | + TXGBE_PTYPE_TYP_TCP), + TXGBE_PTYPE_L2_IPV6_SCTP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 | + TXGBE_PTYPE_TYP_SCTP), + + TXGBE_PTYPE_L2_FCOE = (TXGBE_PTYPE_PKT_FCOE | TXGBE_PTYPE_TYP_FCOE), + TXGBE_PTYPE_L2_FCOE_FCDATA = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_TYP_FCDATA), + TXGBE_PTYPE_L2_FCOE_FCRDY = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_TYP_FCRDY), + TXGBE_PTYPE_L2_FCOE_FCRSP = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_TYP_FCRSP), + TXGBE_PTYPE_L2_FCOE_FCOTHER = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_TYP_FCOTHER), + TXGBE_PTYPE_L2_FCOE_VFT = (TXGBE_PTYPE_PKT_FCOE | TXGBE_PTYPE_PKT_VFT), + TXGBE_PTYPE_L2_FCOE_VFT_FCDATA = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_PKT_VFT | TXGBE_PTYPE_TYP_FCDATA), + TXGBE_PTYPE_L2_FCOE_VFT_FCRDY = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_PKT_VFT | TXGBE_PTYPE_TYP_FCRDY), + TXGBE_PTYPE_L2_FCOE_VFT_FCRSP = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_PKT_VFT | TXGBE_PTYPE_TYP_FCRSP), + TXGBE_PTYPE_L2_FCOE_VFT_FCOTHER = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_PKT_VFT | TXGBE_PTYPE_TYP_FCOTHER), + + TXGBE_PTYPE_L2_TUN4_MAC = (TXGBE_PTYPE_TUN_IPV4 | TXGBE_PTYPE_PKT_IGM), + TXGBE_PTYPE_L2_TUN6_MAC = (TXGBE_PTYPE_TUN_IPV6 | TXGBE_PTYPE_PKT_IGM), +}; + +#define TXGBE_RXD_PKTTYPE(_rxd) \ + ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF) +#define TXGBE_PTYPE_TUN(_pt) ((_pt) & 0xC0) +#define TXGBE_PTYPE_PKT(_pt) ((_pt) & 0x30) +#define TXGBE_PTYPE_TYP(_pt) ((_pt) & 0x0F) +#define TXGBE_PTYPE_TYPL4(_pt) ((_pt) & 0x07) + +/* Security Processing bit Indication */ +#define TXGBE_RXD_LNKSEC_STATUS_SECP 0x00020000U +#define TXGBE_RXD_LNKSEC_ERROR_NO_SA_MATCH 0x08000000U +#define TXGBE_RXD_LNKSEC_ERROR_REPLAY_ERROR 0x10000000U +#define TXGBE_RXD_LNKSEC_ERROR_BIT_MASK 0x18000000U +#define TXGBE_RXD_LNKSEC_ERROR_BAD_SIG 0x18000000U + +/* Masks to determine if packets should be dropped due to frame errors */ +#define TXGBE_RXD_ERR_FRAME_ERR_MASK TXGBE_RXD_ERR_RXE + +/*********************** Adv Transmit Descriptor Config Masks ****************/ +#define TXGBE_TXD_DTALEN_MASK 0x0000FFFFU /* Data buf length(bytes) */ +#define TXGBE_TXD_MAC_LINKSEC 0x00040000U /* Insert LinkSec */ +#define TXGBE_TXD_MAC_TSTAMP 0x00080000U /* IEEE1588 time stamp */ +#define TXGBE_TXD_IPSEC_SA_INDEX_MASK 0x000003FFU /* IPSec SA index */ +#define TXGBE_TXD_IPSEC_ESP_LEN_MASK 0x000001FFU /* IPSec ESP length */ +#define TXGBE_TXD_DTYP_MASK 0x00F00000U /* DTYP mask */ +#define TXGBE_TXD_DTYP_CTXT 0x00100000U /* Adv Context Desc */ +#define TXGBE_TXD_DTYP_DATA 0x00000000U /* Adv Data Descriptor */ +#define TXGBE_TXD_EOP 0x01000000U /* End of Packet */ +#define TXGBE_TXD_IFCS 0x02000000U /* Insert FCS */ +#define TXGBE_TXD_LINKSEC 0x04000000U /* enable linksec */ +#define TXGBE_TXD_RS 0x08000000U /* Report Status */ +#define TXGBE_TXD_ECU 0x10000000U /* DDP hdr type or iSCSI */ +#define TXGBE_TXD_QCN 0x20000000U /* cntag insertion enable */ +#define TXGBE_TXD_VLE 0x40000000U /* VLAN pkt enable */ +#define TXGBE_TXD_TSE 0x80000000U /* TCP Seg enable */ +#define TXGBE_TXD_STAT_DD 0x00000001U /* Descriptor Done */ +#define TXGBE_TXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define TXGBE_TXD_CC 0x00000080U /* Check Context */ +#define TXGBE_TXD_IPSEC 0x00000100U /* enable ipsec esp */ +#define TXGBE_TXD_IIPCS 0x00000400U +#define TXGBE_TXD_EIPCS 0x00000800U +#define TXGBE_TXD_L4CS 0x00000200U +#define TXGBE_TXD_PAYLEN_SHIFT 13 /* Adv desc PAYLEN shift */ +#define TXGBE_TXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define TXGBE_TXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define TXGBE_TXD_TAG_TPID_SEL_SHIFT 11 +#define TXGBE_TXD_IPSEC_TYPE_SHIFT 14 +#define TXGBE_TXD_ENC_SHIFT 15 + +#define TXGBE_TXD_TUCMD_IPSEC_TYPE_ESP 0x00004000U /* IPSec Type ESP */ +#define TXGBE_TXD_TUCMD_IPSEC_ENCRYPT_EN 0x00008000/* ESP Encrypt Enable */ +#define TXGBE_TXD_TUCMD_FCOE 0x00010000U /* FCoE Frame Type */ +#define TXGBE_TXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ +#define TXGBE_TXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ +#define TXGBE_TXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ +#define TXGBE_TXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation End */ +#define TXGBE_TXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation Start */ +#define TXGBE_TXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ +#define TXGBE_TXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ +#define TXGBE_TXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ +#define TXGBE_TXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ +#define TXGBE_TXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define TXGBE_TXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +#define TXGBE_TXD_OUTER_IPLEN_SHIFT 12 /* Adv ctxt OUTERIPLEN shift */ +#define TXGBE_TXD_TUNNEL_LEN_SHIFT 21 /* Adv ctxt TUNNELLEN shift */ +#define TXGBE_TXD_TUNNEL_TYPE_SHIFT 11 /* Adv Tx Desc Tunnel Type shift */ +#define TXGBE_TXD_TUNNEL_DECTTL_SHIFT 27 /* Adv ctxt DECTTL shift */ +#define TXGBE_TXD_TUNNEL_UDP (0x0ULL << TXGBE_TXD_TUNNEL_TYPE_SHIFT) +#define TXGBE_TXD_TUNNEL_GRE (0x1ULL << TXGBE_TXD_TUNNEL_TYPE_SHIFT) + +/************ txgbe_type.h ************/ +/* Number of Transmit and Receive Descriptors must be a multiple of 128 */ +#define TXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 128 +#define TXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 128 +#define TXGBE_REQ_TX_BUFFER_GRANULARITY 1024 + +/* Vlan-specific macros */ +#define TXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ +#define TXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ +#define TXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ +#define TXGBE_TX_DESC_SPECIAL_PRI_SHIFT TXGBE_RX_DESC_SPECIAL_PRI_SHIFT + +/* Transmit Descriptor */ +union txgbe_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Receive Descriptor */ +union txgbe_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Context descriptors */ +struct txgbe_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/************************* Flow Directory HASH *******************************/ +/* Software ATR hash keys */ +#define TXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +#define TXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 + +/* Software ATR input stream values and masks */ +#define TXGBE_ATR_HASH_MASK 0x7fff +#define TXGBE_ATR_L4TYPE_MASK 0x3 +#define TXGBE_ATR_L4TYPE_UDP 0x1 +#define TXGBE_ATR_L4TYPE_TCP 0x2 +#define TXGBE_ATR_L4TYPE_SCTP 0x3 +#define TXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +#define TXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10 +enum txgbe_atr_flow_type { + TXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, + TXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, + TXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, + TXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, + TXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, + TXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, + TXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, + TXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, + TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10, + TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11, + TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12, + TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13, + TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14, + TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15, + TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16, + TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17, +}; + +/* Flow Director ATR input struct. */ +union txgbe_atr_input { + /* Byte layout in order, all values with MSB first: + * vm_pool - 1 byte + * flow_type - 1 byte + * vlan_id - 2 bytes + * src_ip - 16 bytes + * inner_mac - 6 bytes + * cloud_mode - 2 bytes + * tni_vni - 4 bytes + * dst_ip - 16 bytes + * src_port - 2 bytes + * dst_port - 2 bytes + * flex_bytes - 2 bytes + * bkt_hash - 2 bytes + */ + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + __be32 dst_ip[4]; + __be32 src_ip[4]; + __be16 src_port; + __be16 dst_port; + __be16 flex_bytes; + __be16 bkt_hash; + } formatted; + __be32 dword_stream[11]; +}; + +/* Flow Director compressed ATR hash input struct */ +union txgbe_atr_hash_dword { + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + } formatted; + __be32 ip; + struct { + __be16 src; + __be16 dst; + } port; + __be16 flex_bytes; + __be32 dword; +}; + +struct txgbe_ethertype_filter { + u16 rule_idx; + u64 action; + u16 ethertype; + u32 etqf; + u32 etqs; +}; + +/* Structure to store ethertype filters' info. */ +struct txgbe_etype_filter_info { + int count; + u8 ethertype_mask; /* Bit mask for every used ethertype filter */ + /* store used ethertype filters */ + struct txgbe_ethertype_filter etype_filters[TXGBE_MAX_PSR_ETYPE_SWC_FILTERS]; +}; + +/* Structure to store 5-tuple filters' info. */ +struct txgbe_5tuple_filter_info { + u32 fivetuple_mask[4]; /* Bit mask for max 128 filters */ +}; + +/****************** Manageablility Host Interface defines ********************/ +#define TXGBE_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */ +#define TXGBE_HI_MAX_BLOCK_DWORD_LENGTH 64 /* Num of dwords in range */ +#define TXGBE_HI_COMMAND_TIMEOUT 5000 /* Process HI command limit */ +#define TXGBE_HI_FLASH_ERASE_TIMEOUT 5000 /* Process Erase command limit */ +#define TXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ +#define TXGBE_HI_FLASH_VERIFY_TIMEOUT 60000 /* Process Apply command limit */ +#define TXGBE_HI_PHY_MGMT_REQ_TIMEOUT 2000 /* Wait up to 2 seconds */ + +/* CEM Support */ +#define FW_CEM_HDR_LEN 0x4 +#define FW_CEM_CMD_DRIVER_INFO 0xDD +#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 +#define FW_CEM_CMD_RESERVED 0X0 +#define FW_CEM_UNUSED_VER 0x0 +#define FW_CEM_MAX_RETRIES 3 +#define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_READ_SHADOW_RAM_CMD 0x31 +#define FW_READ_SHADOW_RAM_LEN 0x6 +#define FW_WRITE_SHADOW_RAM_CMD 0x33 +#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */ +#define FW_SHADOW_RAM_DUMP_CMD 0x36 +#define FW_SHADOW_RAM_DUMP_LEN 0 +#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ +#define FW_NVM_DATA_OFFSET 3 +#define FW_MAX_READ_BUFFER_SIZE 244 +#define FW_DISABLE_RXEN_CMD 0xDE +#define FW_DISABLE_RXEN_LEN 0x1 +#define FW_PHY_MGMT_REQ_CMD 0x20 +#define FW_RESET_CMD 0xDF +#define FW_RESET_LEN 0x2 +#define FW_SETUP_MAC_LINK_CMD 0xE0 +#define FW_SETUP_MAC_LINK_LEN 0x2 +#define FW_FLASH_UPGRADE_START_CMD 0xE3 +#define FW_FLASH_UPGRADE_START_LEN 0x1 +#define FW_FLASH_UPGRADE_WRITE_CMD 0xE4 +#define FW_FLASH_UPGRADE_VERIFY_CMD 0xE5 +#define FW_FLASH_UPGRADE_VERIFY_LEN 0x4 +#define FW_DW_OPEN_NOTIFY 0xE9 +#define FW_DW_CLOSE_NOTIFY 0xEA +#define FW_AN_STA_CMD 0xF3 +#define FW_AN_STA_LEN 0x1 +#define FW_PPS_SET_CMD 0xF6 +#define FW_PPS_SET_LEN 0x14 + +#define TXGBE_CHECKSUM_CAP_ST_PASS 0x80658383 +#define TXGBE_CHECKSUM_CAP_ST_FAIL 0x70657376 + +#define TXGBE_HIC_HDR_INDEX_MAX 255 + +/* Host Interface Command Structures */ +struct txgbe_hic_hdr { + u8 cmd; + u8 buf_len; + union { + u8 cmd_resv; + u8 ret_status; + } cmd_or_resp; + union { + u8 checksum; + u8 index; + } cksum_or_index; +}; + +struct txgbe_hic_hdr2_req { + u8 cmd; + u8 buf_lenh; + u8 buf_lenl; + union { + u8 checksum; + u8 index; + } cksum_or_index; +}; + +struct txgbe_hic_hdr2_rsp { + u8 cmd; + u8 buf_lenl; + u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ + union { + u8 checksum; + u8 index; + } cksum_or_index; +}; + +union txgbe_hic_hdr2 { + struct txgbe_hic_hdr2_req req; + struct txgbe_hic_hdr2_rsp rsp; +}; + +struct txgbe_hic_drv_info { + struct txgbe_hic_hdr hdr; + u8 port_num; + u8 ver_sub; + u8 ver_build; + u8 ver_min; + u8 ver_maj; + u8 pad; /* end spacing to ensure length is mult. of dword */ + u16 pad2; /* end spacing to ensure length is mult. of dword2 */ +}; + +/* These need to be dword aligned */ +struct txgbe_hic_read_shadow_ram { + union txgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct txgbe_hic_write_shadow_ram { + union txgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct txgbe_hic_disable_rxen { + struct txgbe_hic_hdr hdr; + u8 port_number; + u8 pad2; + u16 pad3; +}; + +struct txgbe_hic_reset { + struct txgbe_hic_hdr hdr; + u16 lan_id; + u16 reset_type; +}; + +struct txgbe_hic_phy_cfg { + struct txgbe_hic_hdr hdr; + u8 lan_id; + u8 phy_mode; + u16 phy_speed; +}; + +enum txgbe_module_id { + TXGBE_MODULE_EEPROM = 0, + TXGBE_MODULE_FIRMWARE, + TXGBE_MODULE_HARDWARE, + TXGBE_MODULE_PCIE +}; + +struct txgbe_hic_upg_start { + struct txgbe_hic_hdr hdr; + u8 module_id; + u8 pad2; + u16 pad3; +}; + +struct txgbe_hic_upg_write { + struct txgbe_hic_hdr hdr; + u8 data_len; + u8 eof_flag; + u16 check_sum; + u32 data[62]; +}; + +enum txgbe_upg_flag { + TXGBE_RESET_NONE = 0, + TXGBE_RESET_FIRMWARE, + TXGBE_RELOAD_EEPROM, + TXGBE_RESET_LAN +}; + +struct txgbe_hic_upg_verify { + struct txgbe_hic_hdr hdr; + u32 action_flag; +}; + +struct txgbe_hic_write_lldp { + struct txgbe_hic_hdr hdr; + u8 func; + u8 pad2; + u16 pad3; +}; + +struct txgbe_hic_write_autoneg { + struct txgbe_hic_hdr hdr; + u8 lan_id; + bool autoneg; + u16 pad; +}; + +struct txgbe_hic_set_pps { + struct txgbe_hic_hdr hdr; + u8 lan_id; + u8 enable; + u16 pad2; + u64 nsec; + u64 cycles; +}; + +struct txgbe_led_active_set { + struct txgbe_hic_hdr hdr; + u32 active_flag; +}; + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define TXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 + +/* Check whether address is multicast. This is little-endian specific check.*/ +#define TXGBE_IS_MULTICAST(address) \ + ((bool)(((u8 *)(address))[0] & ((u8)0x01))) + +/* Check whether an address is broadcast. */ +#define TXGBE_IS_BROADCAST(address) \ + ((((u8 *)(address))[0] == ((u8)0xff)) && \ + (((u8 *)(address))[1] == ((u8)0xff))) + +/* DCB registers */ +#define TXGBE_DCB_MAX_TRAFFIC_CLASS 8 + +/* Power Management */ +/* DMA Coalescing configuration */ +struct txgbe_dmac_config { + u16 watchdog_timer; /* usec units */ + bool fcoe_en; + u32 link_speed; + u8 fcoe_tc; + u8 num_tcs; +}; + +/* Autonegotiation advertised speeds */ +typedef u32 txgbe_autoneg_advertised; +/* Link speed */ +#define TXGBE_LINK_SPEED_UNKNOWN 0 +#define TXGBE_LINK_SPEED_100_FULL 1 +#define TXGBE_LINK_SPEED_1GB_FULL 2 +#define TXGBE_LINK_SPEED_10GB_FULL 4 +#define TXGBE_LINK_SPEED_10_FULL 8 +#define TXGBE_LINK_SPEED_AUTONEG (TXGBE_LINK_SPEED_100_FULL | \ + TXGBE_LINK_SPEED_1GB_FULL | \ + TXGBE_LINK_SPEED_10GB_FULL | \ + TXGBE_LINK_SPEED_10_FULL) + +#define TXGBE_LINK_SPEED_25GB_FULL 0x10 +#define TXGBE_LINK_SPEED_40GB_FULL 0x20 +#define TXGBE_LINK_SPEED_50GB_FULL 0x40 + +#define TXGBE_LINK_SPEED_AMLITE_AUTONEG (TXGBE_LINK_SPEED_10GB_FULL | \ + TXGBE_LINK_SPEED_25GB_FULL) +/* Amlite eth mode */ +enum amlite_eth_mode { + ETH_RATE_10G = 0, + ETH_RATE_25G +}; + +/* Physical layer type */ +typedef u32 txgbe_physical_layer; +#define TXGBE_PHYSICAL_LAYER_UNKNOWN 0 +#define TXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001 +#define TXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002 +#define TXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004 +#define TXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008 +#define TXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010 +#define TXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020 +#define TXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040 +#define TXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080 +#define TXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200 +#define TXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400 +#define TXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800 +#define TXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 +#define TXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000 +#define TXGBE_PHYSICAL_LAYER_1000BASE_SX 0x4000 + +/* Special PHY Init Routine */ +#define TXGBE_PHY_INIT_OFFSET_NL 0x002B +#define TXGBE_PHY_INIT_END_NL 0xFFFF +#define TXGBE_CONTROL_MASK_NL 0xF000 +#define TXGBE_DATA_MASK_NL 0x0FFF +#define TXGBE_CONTROL_SHIFT_NL 12 +#define TXGBE_DELAY_NL 0 +#define TXGBE_DATA_NL 1 +#define TXGBE_CONTROL_NL 0x000F +#define TXGBE_CONTROL_EOL_NL 0x0FFF +#define TXGBE_CONTROL_SOL_NL 0x0000 + +/* BitTimes (BT) conversion */ +#define TXGBE_BT2KB(BT) (((BT) + (8 * 1024 - 1)) / (8 * 1024)) +#define TXGBE_B2BT(BT) ((BT) * 8) + +/* Calculate Delay to respond to PFC */ +#define TXGBE_PFC_D 672 + +/* Calculate Cable Delay */ +#define TXGBE_CABLE_DC 5556 /* Delay Copper */ +#define TXGBE_CABLE_DO 5000 /* Delay Optical */ + +/* Calculate Interface Delay X540 */ +#define TXGBE_PHY_DC 25600 /* Delay 10G BASET */ +#define TXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */ +#define TXGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */ + +#define TXGBE_ID_X540 (TXGBE_MAC_DC + TXGBE_XAUI_DC + TXGBE_PHY_DC) + +/* Calculate Interface Delay */ +#define TXGBE_PHY_D 12800 +#define TXGBE_MAC_D 4096 +#define TXGBE_XAUI_D (2 * 1024) + +#define TXGBE_ID (TXGBE_MAC_D + TXGBE_XAUI_D + TXGBE_PHY_D) + +/* Calculate Delay incurred from higher layer */ +#define TXGBE_HD 6144 + +/* Calculate PCI Bus delay for low thresholds */ +#define TXGBE_PCI_DELAY 10000 + +/* Calculate X540 delay value in bit times */ +#define TXGBE_DV_X540(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (TXGBE_B2BT(_max_frame_link) + \ + TXGBE_PFC_D + \ + (2 * TXGBE_CABLE_DC) + \ + (2 * TXGBE_ID_X540) + \ + TXGBE_HD) / 25 + 1) + \ + 2 * TXGBE_B2BT(_max_frame_tc)) + +/* Calculate delay value in bit times */ +#define TXGBE_DV(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (TXGBE_B2BT(_max_frame_link) + \ + TXGBE_PFC_D + \ + (2 * TXGBE_CABLE_DC) + \ + (2 * TXGBE_ID) + \ + TXGBE_HD) / 25 + 1) + \ + 2 * TXGBE_B2BT(_max_frame_tc)) + +/* Calculate low threshold delay values */ +#define TXGBE_LOW_DV_X540(_max_frame_tc) \ + (2 * TXGBE_B2BT(_max_frame_tc) + \ + (36 * TXGBE_PCI_DELAY / 25) + 1) + +#define TXGBE_LOW_DV(_max_frame_tc) \ + (2 * TXGBE_LOW_DV_X540(_max_frame_tc)) + +/* Unavailable: The FCoE Boot Option ROM is not present in the flash. + * Disabled: Present; boot order is not set for any targets on the port. + * Enabled: Present; boot order is set for at least one target on the port. + */ +enum txgbe_fcoe_boot_status { + txgbe_fcoe_bootstatus_disabled = 0, + txgbe_fcoe_bootstatus_enabled = 1, + txgbe_fcoe_bootstatus_unavailable = 0xFFFF +}; + +enum txgbe_eeprom_type { + txgbe_eeprom_uninitialized = 0, + txgbe_eeprom_spi, + txgbe_flash, + txgbe_eeprom_none /* No NVM support */ +}; + +enum txgbe_phy_type { + txgbe_phy_unknown = 0, + txgbe_phy_none, + txgbe_phy_tn, + txgbe_phy_aq, + txgbe_phy_cu_unknown, + txgbe_phy_qt, + txgbe_phy_xaui, + txgbe_phy_nl, + txgbe_phy_sfp_passive_tyco, + txgbe_phy_sfp_passive_unknown, + txgbe_phy_sfp_active_unknown, + txgbe_phy_sfp_avago, + txgbe_phy_sfp_ftl, + txgbe_phy_sfp_ftl_active, + txgbe_phy_sfp_unknown, + txgbe_phy_sfp_intel, + txgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/ + txgbe_phy_generic +}; + +/* SFP+ module type IDs: + * + * ID Module Type + * ============= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CU_CORE0 + * 4 SFP_DA_CU_CORE1 + * 5 SFP_SR/LR_CORE0 + * 6 SFP_SR/LR_CORE1 + */ +enum txgbe_sfp_type { + txgbe_sfp_type_da_cu = 0, + txgbe_sfp_type_sr = 1, + txgbe_sfp_type_lr = 2, + txgbe_sfp_type_da_cu_core0 = 3, + txgbe_sfp_type_da_cu_core1 = 4, + txgbe_sfp_type_srlr_core0 = 5, + txgbe_sfp_type_srlr_core1 = 6, + txgbe_sfp_type_da_act_lmt_core0 = 7, + txgbe_sfp_type_da_act_lmt_core1 = 8, + txgbe_sfp_type_1g_cu_core0 = 9, + txgbe_sfp_type_1g_cu_core1 = 10, + txgbe_sfp_type_1g_sx_core0 = 11, + txgbe_sfp_type_1g_sx_core1 = 12, + txgbe_sfp_type_1g_lx_core0 = 13, + txgbe_sfp_type_1g_lx_core1 = 14, + txgbe_sfp_type_10g_cu_core0 = 15, /* add for qi'an'xin 10G fiber2copper sfp */ + txgbe_sfp_type_10g_cu_core1 = 16, + txgbe_sfp_type_25g_sr_core0 = 17, + txgbe_sfp_type_25g_sr_core1 = 18, + txgbe_sfp_type_25g_lr_core0 = 19, + txgbe_sfp_type_25g_lr_core1 = 20, + txgbe_sfp_type_25g_aoc_core0 = 21, + txgbe_sfp_type_25g_aoc_core1 = 22, + txgbe_qsfp_type_40g_cu_core0 = 23, + txgbe_qsfp_type_40g_cu_core1 = 24, + txgbe_qsfp_type_40g_sr_core0 = 25, + txgbe_qsfp_type_40g_sr_core1 = 26, + txgbe_qsfp_type_40g_lr_core0 = 27, + txgbe_qsfp_type_40g_lr_core1 = 28, + txgbe_qsfp_type_40g_active_core0 = 29, + txgbe_qsfp_type_40g_active_core1 = 30, + txgbe_sfp_type_not_present = 0xFFFE, + txgbe_sfp_type_unknown = 0xFFFF +}; + +enum txgbe_media_type { + txgbe_media_type_unknown = 0, + txgbe_media_type_fiber, + txgbe_media_type_fiber_qsfp, + txgbe_media_type_copper, + txgbe_media_type_backplane, + txgbe_media_type_virtual, +}; + +/* Flow Control Settings */ +enum txgbe_fc_mode { + txgbe_fc_none = 0, + txgbe_fc_rx_pause, + txgbe_fc_tx_pause, + txgbe_fc_full, + txgbe_fc_default +}; + +/* Smart Speed Settings */ +#define TXGBE_SMARTSPEED_MAX_RETRIES 3 +enum txgbe_smart_speed { + txgbe_smart_speed_auto = 0, + txgbe_smart_speed_on, + txgbe_smart_speed_off +}; + +/* PCI bus types */ +enum txgbe_bus_type { + txgbe_bus_type_unknown = 0, + txgbe_bus_type_pci, + txgbe_bus_type_pcix, + txgbe_bus_type_pci_express, + txgbe_bus_type_internal, + txgbe_bus_type_reserved +}; + +/* PCI bus speeds */ +enum txgbe_bus_speed { + txgbe_bus_speed_unknown = 0, + txgbe_bus_speed_33 = 33, + txgbe_bus_speed_66 = 66, + txgbe_bus_speed_100 = 100, + txgbe_bus_speed_120 = 120, + txgbe_bus_speed_133 = 133, + txgbe_bus_speed_2500 = 2500, + txgbe_bus_speed_5000 = 5000, + txgbe_bus_speed_8000 = 8000, + txgbe_bus_speed_reserved +}; + +/* PCI bus widths */ +enum txgbe_bus_width { + txgbe_bus_width_unknown = 0, + txgbe_bus_width_pcie_x1 = 1, + txgbe_bus_width_pcie_x2 = 2, + txgbe_bus_width_pcie_x4 = 4, + txgbe_bus_width_pcie_x8 = 8, + txgbe_bus_width_32 = 32, + txgbe_bus_width_64 = 64, + txgbe_bus_width_reserved +}; + +struct txgbe_addr_filter_info { + u32 num_mc_addrs; + u32 rar_used_count; + u32 mta_in_use; + u32 overflow_promisc; + bool user_set_promisc; +}; + +/* Bus parameters */ +struct txgbe_bus_info { + enum pci_bus_speed speed; + enum pcie_link_width width; + enum txgbe_bus_type type; + + u16 func; + u16 lan_id; +}; + +/* Flow control parameters */ +struct txgbe_fc_info { + u32 high_water[TXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */ + u32 low_water[TXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl Low-water */ + u16 pause_time; /* Flow Control Pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + bool disable_fc_autoneg; /* Do not autonegotiate FC */ + bool fc_was_autonegged; /* Is current_mode the result of autonegging? */ + enum txgbe_fc_mode current_mode; /* FC mode in effect */ + enum txgbe_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +/* Statistics counters collected by the MAC */ +struct txgbe_hw_stats { + u64 crcerrs; + u64 illerrc; + u64 errbc; + u64 mspdc; + u64 mpctotal; + u64 mpc[8]; + u64 mlfc; + u64 mrfc; + u64 rlec; + u64 lxontxc; + u64 lxonrxc; + u64 lxofftxc; + u64 lxoffrxc; + u64 pxontxc[8]; + u64 pxonrxc[8]; + u64 pxofftxc[8]; + u64 pxoffrxc[8]; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 rdpc; + u64 rddc; + u64 psrpc; + u64 psrdc; + u64 untag; + u64 tdmpc; + u64 tdmdc; + u64 tdbpc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc[8]; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mngprc; + u64 mngpdc; + u64 mngptc; + u64 tor; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 xec; + u64 qprc[16]; + u64 qptc[16]; + u64 qbrc[16]; + u64 qbtc[16]; + u64 qprdc[16]; + u64 pxon2offc[8]; + u64 fdirustat_add; + u64 fdirustat_remove; + u64 fdirfstat_fadd; + u64 fdirfstat_fremove; + u64 fdirmatch; + u64 fdirmiss; + u64 fccrc; + u64 fclast; + u64 fcoerpdc; + u64 fcoeprc; + u64 fcoeptc; + u64 fcoedwrc; + u64 fcoedwtc; + u64 fcoe_noddp; + u64 fcoe_noddp_ext_buff; + u64 ldpcec; + u64 pcrc8ec; + u64 b2ospc; + u64 b2ogprc; + u64 o2bgptc; + u64 o2bspc; +}; + +/* forward declaration */ +struct txgbe_hw; + +/* iterator type for walking multicast address lists */ +typedef u8* (*txgbe_mc_addr_itr) (struct txgbe_hw *hw, u8 **mc_addr_ptr, + u32 *vmdq); + +/* Function pointer table */ +struct txgbe_eeprom_operations { + s32 (*init_params)(struct txgbe_hw *hw); + s32 (*read)(struct txgbe_hw *hw, u16 offset, u16 *data); + s32 (*read_buffer)(struct txgbe_hw *hw, u16 offset, u16 words, u16 *data); + s32 (*write)(struct txgbe_hw *hw, u16 offset, u16 data); + s32 (*write_buffer)(struct txgbe_hw *hw, u16 offset, u16 words, u16 *data); + s32 (*validate_checksum)(struct txgbe_hw *hw, u16 *checksum_val); + s32 (*update_checksum)(struct txgbe_hw *hw); + s32 (*calc_checksum)(struct txgbe_hw *hw); +}; + +struct txgbe_flash_operations { + s32 (*init_params)(struct txgbe_hw *hw); + s32 (*read_buffer)(struct txgbe_hw *hw, u32 offset, u32 dwords, u32 *data); + s32 (*write_buffer)(struct txgbe_hw *hw, u32 offset, u32 dwords, u32 *data); +}; + +struct txgbe_mac_operations { + s32 (*init_hw)(struct txgbe_hw *hw); + s32 (*reset_hw)(struct txgbe_hw *hw); + s32 (*start_hw)(struct txgbe_hw *hw); + s32 (*clear_hw_cntrs)(struct txgbe_hw *hw); + enum txgbe_media_type (*get_media_type)(struct txgbe_hw *hw); + s32 (*get_mac_addr)(struct txgbe_hw *hw, u8 *mac_addr); + s32 (*get_san_mac_addr)(struct txgbe_hw *hw, u8 *san_mac_addr); + s32 (*set_san_mac_addr)(struct txgbe_hw *hw, u8 *san_mac_addr); + s32 (*get_device_caps)(struct txgbe_hw *hw, u16 *device_caps); + s32 (*get_wwn_prefix)(struct txgbe_hw *hw, u16 *wwnn_prefix, u16 *wwpn_prefix); + s32 (*stop_adapter)(struct txgbe_hw *hw); + s32 (*get_bus_info)(struct txgbe_hw *hw); + void (*set_lan_id)(struct txgbe_hw *hw); + s32 (*setup_sfp)(struct txgbe_hw *hw); + s32 (*enable_rx_dma)(struct txgbe_hw *hw, u32 regval); + s32 (*disable_sec_rx_path)(struct txgbe_hw *hw); + s32 (*enable_sec_rx_path)(struct txgbe_hw *hw); + s32 (*disable_sec_tx_path)(struct txgbe_hw *hw); + s32 (*enable_sec_tx_path)(struct txgbe_hw *hw); + s32 (*acquire_swfw_sync)(struct txgbe_hw *hw, u32 mask); + void (*release_swfw_sync)(struct txgbe_hw *hw, u32 mask); + + /* Link */ + void (*disable_tx_laser)(struct txgbe_hw *hw); + void (*enable_tx_laser)(struct txgbe_hw *hw); + void (*flap_tx_laser)(struct txgbe_hw *hw); + s32 (*setup_link)(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); + s32 (*setup_mac_link)(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); + s32 (*check_link)(struct txgbe_hw *hw, u32 *speed, + bool *link_up, bool link_up_wait_to_complete); + s32 (*get_link_capabilities)(struct txgbe_hw *hw, + u32 *speed, + bool *autoneg); + void (*set_rate_select_speed)(struct txgbe_hw *hw, u32 speed); + + /* Packet Buffer manipulation */ + void (*setup_rxpba)(struct txgbe_hw *hw, int num_pb, u32 headroom, + int strategy); + + /* LED */ + s32 (*led_on)(struct txgbe_hw *hw, u32 index); + s32 (*led_off)(struct txgbe_hw *hw, u32 index); + + /* RAR, Multicast, VLAN */ + s32 (*set_rar)(struct txgbe_hw *hw, u32 index, u8 *addr, u64 pools, u32 enable_addr); + s32 (*clear_rar)(struct txgbe_hw *hw, u32 index); + s32 (*insert_mac_addr)(struct txgbe_hw *hw, u8 *addr, u32 vmdq); + s32 (*set_vmdq_san_mac)(struct txgbe_hw *hw, u32 vmdq); + s32 (*init_rx_addrs)(struct txgbe_hw *hw); + s32 (*update_uc_addr_list)(struct txgbe_hw *hw, u8 *addr_list, + u32 addr_count, txgbe_mc_addr_itr next); + s32 (*update_mc_addr_list)(struct txgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, txgbe_mc_addr_itr next, + bool clear); + s32 (*enable_mc)(struct txgbe_hw *hw); + s32 (*disable_mc)(struct txgbe_hw *hw); + s32 (*clear_vfta)(struct txgbe_hw *hw); + s32 (*set_vfta)(struct txgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on); + s32 (*set_vlvf)(struct txgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool *vfta_changed); + s32 (*init_uta_tables)(struct txgbe_hw *hw); + void (*set_mac_anti_spoofing)(struct txgbe_hw *hw, bool enable, int pf); + void (*set_vlan_anti_spoofing)(struct txgbe_hw *hw, bool enable, int vf); + + /* Flow Control */ + s32 (*fc_enable)(struct txgbe_hw *hw); + s32 (*setup_fc)(struct txgbe_hw *hw); + + /* Manageability interface */ + s32 (*set_fw_drv_ver)(struct txgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub); + s32 (*get_thermal_sensor_data)(struct txgbe_hw *hw); + s32 (*init_thermal_sensor_thresh)(struct txgbe_hw *hw); + void (*get_rtrup2tc)(struct txgbe_hw *hw, u8 *map); + void (*disable_rx)(struct txgbe_hw *hw); + void (*enable_rx)(struct txgbe_hw *hw); + void (*set_ethertype_anti_spoofing)(struct txgbe_hw *hw, + bool enable, int vf); + s32 (*dmac_config)(struct txgbe_hw *hw); + s32 (*setup_eee)(struct txgbe_hw *hw, bool enable_eee); +}; + +struct txgbe_phy_operations { + s32 (*identify)(struct txgbe_hw *hw); + s32 (*identify_sfp)(struct txgbe_hw *hw); + s32 (*setup_sfp)(struct txgbe_hw *hw); + s32 (*init)(struct txgbe_hw *hw); + s32 (*reset)(struct txgbe_hw *hw); + s32 (*read_reg)(struct txgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); + s32 (*write_reg)(struct txgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); + s32 (*read_reg_mdi)(struct txgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data); + s32 (*write_reg_mdi)(struct txgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); + u32 (*setup_link)(struct txgbe_hw *hw, u32 speed_set, bool autoneg_wait_to_complete); + u32 (*setup_link_speed)(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); + s32 (*get_firmware_version)(struct txgbe_hw *hw, + u16 *firmware_version); + s32 (*read_i2c_byte)(struct txgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); + s32 (*write_i2c_byte)(struct txgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); + s32 (*read_i2c_sff8472)(struct txgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data); + s32 (*read_i2c_sff8636)(struct txgbe_hw *hw, u8 page, u8 byte_offset, + u8 *sff8636_data); + s32 (*read_i2c_eeprom)(struct txgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data); + s32 (*read_i2c_sfp_phy)(struct txgbe_hw *hw, u16 byte_offset, + u16 *data); + s32 (*write_i2c_eeprom)(struct txgbe_hw *hw, u8 byte_offset, + u8 eeprom_data); + s32 (*check_overtemp)(struct txgbe_hw *hw); +}; + +struct txgbe_eeprom_info { + struct txgbe_eeprom_operations ops; + enum txgbe_eeprom_type type; + u32 semaphore_delay; + u16 word_size; + u16 address_bits; + u16 word_page_size; + u16 ctrl_word_3; + u16 sw_region_offset; +}; + +enum txgbe_mac_type { + txgbe_mac_unknown = 0, + txgbe_mac_sp, + txgbe_mac_aml, + txgbe_mac_aml40 +}; + +struct txgbe_flash_info { + struct txgbe_flash_operations ops; + u32 semaphore_delay; + u32 dword_size; + u16 address_bits; +}; + +#define TXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 +struct txgbe_mac_info { + enum txgbe_mac_type type; + + struct txgbe_mac_operations ops; + u8 addr[TXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 perm_addr[TXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 san_addr[TXGBE_ETH_LENGTH_OF_ADDRESS]; + /* prefix for World Wide Node Name (WWNN) */ + u16 wwnn_prefix; + /* prefix for World Wide Port Name (WWPN) */ + u16 wwpn_prefix; +#define TXGBE_MAX_MTA 128 +#define TXGBE_MAX_VFTA_ENTRIES 128 + u32 mta_shadow[TXGBE_MAX_MTA]; + s32 mc_filter_type; + u32 mcft_size; + u32 vft_shadow[TXGBE_MAX_VFTA_ENTRIES]; + u32 vft_size; + u32 num_rar_entries; + u32 rar_highwater; + u32 rx_pb_size; + u32 max_tx_queues; + u32 max_rx_queues; + u32 orig_sr_pcs_ctl2; + u32 orig_sr_pma_mmd_ctl1; + u32 orig_sr_an_mmd_ctl; + u32 orig_sr_an_mmd_adv_reg2; + u32 orig_vr_xs_or_pcs_mmd_digi_ctl1; + u8 san_mac_rar_index; + bool get_link_status; + u16 max_msix_vectors; + bool arc_subsystem_valid; + bool orig_link_settings_stored; + bool autotry_restart; + u8 flags; + struct txgbe_thermal_sensor_data thermal_sensor_data; + bool thermal_sensor_enabled; + struct txgbe_dmac_config dmac_config; + bool set_lben; + bool autoneg; +}; + +struct txgbe_phy_info { + struct txgbe_phy_operations ops; + enum txgbe_phy_type type; + u32 addr; + u32 id; + enum txgbe_sfp_type sfp_type; + u32 fiber_suppport_speed; + bool sfp_setup_needed; + u32 revision; + enum txgbe_media_type media_type; + u32 phy_semaphore_mask; + u8 lan_id; /* to be delete */ + txgbe_autoneg_advertised autoneg_advertised; + enum txgbe_smart_speed smart_speed; + bool smart_speed_active; + bool multispeed_fiber; + bool reset_if_overtemp; + txgbe_physical_layer link_mode; +}; + +#include "txgbe_mbx.h" + +struct txgbe_mbx_operations { + void (*init_params)(struct txgbe_hw *hw); + s32 (*read)(struct txgbe_hw *hw, u32 *msg, u16 size, u16 vf); + s32 (*write)(struct txgbe_hw *hw, u32 *msg, u16 size, u16 vf); + s32 (*read_posted)(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*write_posted)(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*check_for_msg)(struct txgbe_hw *hw, u16 __always_unused mbx_id); + s32 (*check_for_ack)(struct txgbe_hw *hw, u16 __always_unused mbx_id); + s32 (*check_for_rst)(struct txgbe_hw *hw, u16 __always_unused mbx_id); +}; + +struct phytxeq { + u32 main; //TX EQ main (bit[5:0]) + u32 pre1; //TX EQ pre1 (bit[5:0]) + u32 pre2; //TX EQ pre2 (bit[5:0]) + u32 post; //TX EQ post (bit[5:0]) +}; + +struct txgbe_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct txgbe_mbx_info { + struct txgbe_mbx_operations ops; + struct txgbe_mbx_stats stats; + u32 timeout; + u32 udelay; + u32 v2p_mailbox; + u16 size; +}; + +enum txgbe_reset_type { + TXGBE_LAN_RESET = 0, + TXGBE_SW_RESET, + TXGBE_GLOBAL_RESET +}; + +enum txgbe_link_status { + TXGBE_LINK_STATUS_NONE = 0, + TXGBE_LINK_STATUS_KX, + TXGBE_LINK_STATUS_KX4 +}; + +struct txgbe_hw { + u8 __iomem *hw_addr; + void *back; + struct txgbe_mac_info mac; + struct txgbe_addr_filter_info addr_ctrl; + struct txgbe_fc_info fc; + struct txgbe_phy_info phy; + struct txgbe_eeprom_info eeprom; + struct txgbe_flash_info flash; + struct txgbe_bus_info bus; + struct txgbe_mbx_info mbx; + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + bool adapter_stopped; + int api_version; + enum txgbe_reset_type reset_type; + bool force_full_reset; + bool allow_unsupported_sfp; + bool wol_enabled; + bool fdir_enabled; + struct mtd_dev phy_dev; + enum txgbe_link_status link_status; + u16 tpid[8]; + u16 oem_ssid; + u16 oem_svid; + bool f2c_mod_status; /* fiber to copper modules internal phy link status */ + bool dac_sfp; /* force dac sfp to kr mode */ + bool bypass_ctle; /* DAC cable length */ + u32 q_tx_regs[512]; +}; + +/* Error Codes */ +#define TXGBE_ERR 100 +#define TXGBE_NOT_IMPLEMENTED 0x7FFFFFFF +/* (-TXGBE_ERR, TXGBE_ERR): reserved for non-txgbe defined error code */ +#define TXGBE_ERR_NOSUPP -(TXGBE_ERR + 0) +#define TXGBE_ERR_EEPROM -(TXGBE_ERR + 1) +#define TXGBE_ERR_EEPROM_CHECKSUM -(TXGBE_ERR + 2) +#define TXGBE_ERR_PHY -(TXGBE_ERR + 3) +#define TXGBE_ERR_CONFIG -(TXGBE_ERR + 4) +#define TXGBE_ERR_PARAM -(TXGBE_ERR + 5) +#define TXGBE_ERR_MAC_TYPE -(TXGBE_ERR + 6) +#define TXGBE_ERR_UNKNOWN_PHY -(TXGBE_ERR + 7) +#define TXGBE_ERR_LINK_SETUP -(TXGBE_ERR + 8) +#define TXGBE_ERR_ADAPTER_STOPPED -(TXGBE_ERR + 09) +#define TXGBE_ERR_INVALID_MAC_ADDR -(TXGBE_ERR + 10) +#define TXGBE_ERR_DEVICE_NOT_SUPPORTED -(TXGBE_ERR + 11) +#define TXGBE_ERR_MASTER_REQUESTS_PENDING -(TXGBE_ERR + 12) +#define TXGBE_ERR_INVALID_LINK_SETTINGS -(TXGBE_ERR + 13) +#define TXGBE_ERR_AUTONEG_NOT_COMPLETE -(TXGBE_ERR + 14) +#define TXGBE_ERR_RESET_FAILED -(TXGBE_ERR + 15) +#define TXGBE_ERR_SWFW_SYNC -(TXGBE_ERR + 16) +#define TXGBE_ERR_PHY_ADDR_INVALID -(TXGBE_ERR + 17) +#define TXGBE_ERR_I2C -(TXGBE_ERR + 18) +#define TXGBE_ERR_SFP_NOT_SUPPORTED -(TXGBE_ERR + 19) +#define TXGBE_ERR_SFP_NOT_PRESENT -(TXGBE_ERR + 20) +#define TXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -(TXGBE_ERR + 21) +#define TXGBE_ERR_NO_SAN_ADDR_PTR -(TXGBE_ERR + 22) +#define TXGBE_ERR_FDIR_REINIT_FAILED -(TXGBE_ERR + 23) +#define TXGBE_ERR_EEPROM_VERSION -(TXGBE_ERR + 24) +#define TXGBE_ERR_NO_SPACE -(TXGBE_ERR + 25) +#define TXGBE_ERR_OVERTEMP -(TXGBE_ERR + 26) +#define TXGBE_ERR_UNDERTEMP -(TXGBE_ERR + 27) +#define TXGBE_ERR_FC_NOT_NEGOTIATED -(TXGBE_ERR + 28) +#define TXGBE_ERR_FC_NOT_SUPPORTED -(TXGBE_ERR + 29) +#define TXGBE_ERR_SFP_SETUP_NOT_COMPLETE -(TXGBE_ERR + 30) +#define TXGBE_ERR_PBA_SECTION -(TXGBE_ERR + 31) +#define TXGBE_ERR_INVALID_ARGUMENT -(TXGBE_ERR + 32) +#define TXGBE_ERR_HOST_INTERFACE_COMMAND -(TXGBE_ERR + 33) +#define TXGBE_ERR_OUT_OF_MEM -(TXGBE_ERR + 34) +#define TXGBE_ERR_FEATURE_NOT_SUPPORTED -(TXGBE_ERR + 36) +#define TXGBE_ERR_EEPROM_PROTECTED_REGION -(TXGBE_ERR + 37) +#define TXGBE_ERR_FDIR_CMD_INCOMPLETE -(TXGBE_ERR + 38) +#define TXGBE_ERR_FLASH_LOADING_FAILED -(TXGBE_ERR + 39) +#define TXGBE_ERR_XPCS_POWER_UP_FAILED -(TXGBE_ERR + 40) +#define TXGBE_ERR_FW_RESP_INVALID -(TXGBE_ERR + 41) +#define TXGBE_ERR_PHY_INIT_NOT_DONE -(TXGBE_ERR + 42) +#define TXGBE_ERR_TIMEOUT -(TXGBE_ERR + 43) +#define TXGBE_ERR_TOKEN_RETRY -(TXGBE_ERR + 44) +#define TXGBE_ERR_REGISTER -(TXGBE_ERR + 45) +#define TXGBE_ERR_MBX -(TXGBE_ERR + 46) +#define TXGBE_ERR_MNG_ACCESS_FAILED -(TXGBE_ERR + 47) + +/** + * register operations + **/ +/* read register */ +#define TXGBE_DEAD_READ_RETRIES 10 +#define TXGBE_DEAD_READ_REG 0xdeadbeefU +#define TXGBE_DEAD_READ_REG64 0xdeadbeefdeadbeefULL +#define TXGBE_FAILED_READ_REG 0xffffffffU +#define TXGBE_FAILED_READ_REG64 0xffffffffffffffffULL + +#define TXGBE_LLDP_REG 0xf1000 +#define TXGBE_LLDP_ON 0x0000000f + +static inline bool TXGBE_REMOVED(void __iomem *addr) +{ + return unlikely(!addr); +} + +static inline u32 +txgbe_rd32(u8 __iomem *base) +{ + return readl(base); +} + +static inline u32 +rd32(struct txgbe_hw *hw, u32 reg) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val = TXGBE_FAILED_READ_REG; + + if (unlikely(!base)) + return val; + + val = txgbe_rd32(base + reg); + + return val; +} + +#define rd32a(a, reg, offset) ( \ + rd32((a), (reg) + ((offset) << 2))) + +static inline u32 +rd32m(struct txgbe_hw *hw, u32 reg, u32 mask) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val = TXGBE_FAILED_READ_REG; + + if (unlikely(!base)) + return val; + + val = txgbe_rd32(base + reg); + if (unlikely(val == TXGBE_FAILED_READ_REG)) + return val; + + return val & mask; +} + +/* write register */ +static inline void +txgbe_wr32(u8 __iomem *base, u32 val) +{ + writel(val, base); +} + +static inline void +wr32(struct txgbe_hw *hw, u32 reg, u32 val) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + + if (unlikely(!base)) + return; + + txgbe_wr32(base + reg, val); +} + +#define wr32a(a, reg, off, val) \ + wr32((a), (reg) + ((off) << 2), (val)) + +static inline void +wr32m(struct txgbe_hw *hw, u32 reg, u32 mask, u32 field) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val; + + if (unlikely(!base)) + return; + + val = txgbe_rd32(base + reg); + if (unlikely(val == TXGBE_FAILED_READ_REG)) + return; + + val = ((val & ~mask) | (field & mask)); + txgbe_wr32(base + reg, val); +} + +/* poll register */ +#define TXGBE_MDIO_TIMEOUT 1000 +#define TXGBE_I2C_TIMEOUT 1000 +#define TXGBE_SPI_TIMEOUT 1000 +static inline s32 +po32m(struct txgbe_hw *hw, u32 reg, u32 mask, u32 field, int usecs, int count) +{ + int loop; + + loop = (count ? count : (usecs + 9) / 10); + usecs = (loop ? (usecs + loop - 1) / loop : 0); + + count = loop; + do { + u32 value = rd32(hw, reg); + + if ((value & mask) == (field & mask)) + break; + + if (loop-- <= 0) + break; + udelay(usecs); + } while (true); + + return (count - loop <= count ? 0 : TXGBE_ERR_TIMEOUT); +} + +#define TXGBE_WRITE_FLUSH(H) rd32(H, TXGBE_MIS_PWR) + +#endif /* _TXGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_xsk.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_xsk.c new file mode 100644 index 000000000000..83021c4788e2 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_xsk.c @@ -0,0 +1,804 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "txgbe.h" +#include +#include +#include +#include "txgbe_xsk.h" + +static void txgbe_disable_txr_hw(struct txgbe_adapter *adapter, + struct txgbe_ring *tx_ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u8 reg_idx = tx_ring->reg_idx; + u32 txdctl; + + wr32(hw, TXGBE_PX_TR_CFG(reg_idx), TXGBE_PX_TR_CFG_SWFLSH); + + /* delay mechanism from txgbe_disable_tx */ + usleep_range(10000, 20000); + + txdctl = rd32(hw, TXGBE_PX_TR_CFG(reg_idx)); + + if (!(txdctl & TXGBE_PX_TR_CFG_ENABLE)) + return; + + e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n"); +} + +static void txgbe_disable_rxr_hw(struct txgbe_adapter *adapter, + struct txgbe_ring *rx_ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u8 reg_idx = rx_ring->reg_idx; + u32 rxdctl; + + rxdctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + rxdctl &= ~TXGBE_PX_RR_CFG_RR_EN; + + /* write value back with RXDCTL.ENABLE bit cleared */ + wr32(hw, TXGBE_PX_RR_CFG(reg_idx), rxdctl); + TXGBE_WRITE_FLUSH(hw); + + usleep_range(10000, 20000); + rxdctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + + if (!(rxdctl & TXGBE_PX_RR_CFG_RR_EN)) + return; + + e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n"); +} + +static void txgbe_disable_txr(struct txgbe_adapter *adapter, + struct txgbe_ring *tx_ring) +{ + set_bit(__TXGBE_TX_DISABLED, &tx_ring->state); + txgbe_disable_txr_hw(adapter, tx_ring); +} + +static void txgbe_reset_txr_stats(struct txgbe_ring *tx_ring) +{ + memset(&tx_ring->stats, 0, sizeof(tx_ring->stats)); + memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats)); +} + +static void txgbe_reset_rxr_stats(struct txgbe_ring *rx_ring) +{ + memset(&rx_ring->stats, 0, sizeof(rx_ring->stats)); + memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats)); +} + +/** + * txgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings + * @adapter: adapter structure + * @ring: ring index + * + * This function disables a certain Rx/Tx/XDP Tx ring. The function + * assumes that the netdev is running. + **/ +void txgbe_txrx_ring_disable(struct txgbe_adapter *adapter, int ring) +{ + struct txgbe_ring *rx_ring, *tx_ring, *xdp_ring; + + rx_ring = adapter->rx_ring[ring]; + tx_ring = adapter->tx_ring[ring]; + xdp_ring = adapter->xdp_ring[ring]; + + txgbe_disable_txr(adapter, tx_ring); + if (xdp_ring) + txgbe_disable_txr(adapter, xdp_ring); + txgbe_disable_rxr_hw(adapter, rx_ring); + + if (xdp_ring) + synchronize_rcu(); + + /* Rx/Tx/XDP Tx share the same napi context. */ + napi_disable(&rx_ring->q_vector->napi); + + txgbe_clean_tx_ring(tx_ring); + if (xdp_ring) + txgbe_clean_tx_ring(xdp_ring); + txgbe_clean_rx_ring(rx_ring); + + txgbe_reset_txr_stats(tx_ring); + if (xdp_ring) + txgbe_reset_txr_stats(xdp_ring); + txgbe_reset_rxr_stats(rx_ring); +} + +/** + * txgbe_txrx_ring_enable - Enable Rx/Tx/XDP Tx rings + * @adapter: adapter structure + * @ring: ring index + * + * This function enables a certain Rx/Tx/XDP Tx ring. The function + * assumes that the netdev is running. + **/ +void txgbe_txrx_ring_enable(struct txgbe_adapter *adapter, int ring) +{ + struct txgbe_ring *rx_ring, *tx_ring, *xdp_ring; + + rx_ring = adapter->rx_ring[ring]; + tx_ring = adapter->tx_ring[ring]; + xdp_ring = adapter->xdp_ring[ring]; + + /* Rx/Tx/XDP Tx share the same napi context. */ + napi_enable(&rx_ring->q_vector->napi); + + txgbe_configure_tx_ring(adapter, tx_ring); + if (xdp_ring) + txgbe_configure_tx_ring(adapter, xdp_ring); + txgbe_configure_rx_ring(adapter, rx_ring); + + clear_bit(__TXGBE_TX_DISABLED, &tx_ring->state); + if (xdp_ring) + clear_bit(__TXGBE_TX_DISABLED, &xdp_ring->state); +} + +struct xsk_buff_pool *txgbe_xsk_umem(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + bool xdp_on = READ_ONCE(adapter->xdp_prog); + int qid = ring->queue_index; + + if (!adapter->xsk_pools || !adapter->xsk_pools[qid] || + qid >= adapter->num_xsk_pools || !xdp_on || + !test_bit(qid, adapter->af_xdp_zc_qps)) + return NULL; + return adapter->xsk_pools[qid]; +} + +static int txgbe_alloc_xsk_umems(struct txgbe_adapter *adapter) +{ + if (adapter->xsk_pools) + return 0; + + adapter->num_xsk_pools_used = 0; + adapter->num_xsk_pools = adapter->num_rx_queues; + adapter->xsk_pools = kcalloc(adapter->num_xsk_pools, + sizeof(*adapter->xsk_pools), + GFP_KERNEL); + if (!adapter->xsk_pools) { + adapter->num_xsk_pools = 0; + return -ENOMEM; + } + + return 0; +} + +/** + * txgbe_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached + * @adapter: adapter + * + * Returns true if any of the Rx rings has an AF_XDP UMEM attached + **/ +bool txgbe_xsk_any_rx_ring_enabled(struct txgbe_adapter *adapter) +{ + int i; + + if (!adapter->xsk_pools) + return false; + + for (i = 0; i < adapter->num_xsk_pools; i++) { + if (adapter->xsk_pools[i]) + return true; + } + + return false; +} + +static int txgbe_add_xsk_umem(struct txgbe_adapter *adapter, + struct xsk_buff_pool *pool, + u16 qid) +{ + int err; + + err = txgbe_alloc_xsk_umems(adapter); + if (err) + return err; + + adapter->xsk_pools[qid] = pool; + adapter->num_xsk_pools_used++; + + return 0; +} + +static void txgbe_remove_xsk_umem(struct txgbe_adapter *adapter, u16 qid) +{ + adapter->xsk_pools[qid] = NULL; + adapter->num_xsk_pools_used--; + + if (adapter->num_xsk_pools == 0) { + kfree(adapter->xsk_pools); + adapter->xsk_pools = NULL; + adapter->num_xsk_pools = 0; + } +} + +static int txgbe_xsk_umem_enable(struct txgbe_adapter *adapter, + struct xsk_buff_pool *pool, + u16 qid) +{ + bool if_running; + int err; + + if (qid >= adapter->num_rx_queues) + return -EINVAL; + + if (adapter->xsk_pools) { + if (qid >= adapter->num_xsk_pools) + return -EINVAL; + if (adapter->xsk_pools[qid]) + return -EBUSY; + } + + err = xsk_pool_dma_map(pool, &adapter->pdev->dev, TXGBE_RX_DMA_ATTR); + if (err) + return err; + + if_running = netif_running(adapter->netdev) && + READ_ONCE(adapter->xdp_prog); + + if (if_running) + txgbe_txrx_ring_disable(adapter, qid); + + /*to avoid xsk fd get issue in some kernel version*/ + msleep(400); + + set_bit(qid, adapter->af_xdp_zc_qps); + err = txgbe_add_xsk_umem(adapter, pool, qid); + if (err) + return err; + + if (if_running) { + txgbe_txrx_ring_enable(adapter, qid); + + /* Kick start the NAPI context so that receiving will start */ + err = txgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX); + if (err) + return err; + } + + return 0; +} + +static int txgbe_xsk_umem_disable(struct txgbe_adapter *adapter, u16 qid) +{ + bool if_running; + + if (!adapter->xsk_pools || qid >= adapter->num_xsk_pools || + !adapter->xsk_pools[qid]) + return -EINVAL; + + if_running = netif_running(adapter->netdev) && + READ_ONCE(adapter->xdp_prog); + + if (if_running) + txgbe_txrx_ring_disable(adapter, qid); + + clear_bit(qid, adapter->af_xdp_zc_qps); + + xsk_pool_dma_unmap(adapter->xsk_pools[qid], TXGBE_RX_DMA_ATTR); + txgbe_remove_xsk_umem(adapter, qid); + + if (if_running) + txgbe_txrx_ring_enable(adapter, qid); + + return 0; +} + +int txgbe_xsk_umem_setup(struct txgbe_adapter *adapter, struct xsk_buff_pool *pool, + u16 qid) +{ + return pool ? txgbe_xsk_umem_enable(adapter, pool, qid) : + txgbe_xsk_umem_disable(adapter, qid); +} + +static int txgbe_run_xdp_zc(struct txgbe_adapter *adapter, + struct txgbe_ring *rx_ring, + struct xdp_buff *xdp) +{ + int err, result = TXGBE_XDP_PASS; + struct bpf_prog *xdp_prog; + struct txgbe_ring *ring; + struct xdp_frame *xdpf; + u32 act; + + rcu_read_lock(); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + act = bpf_prog_run_xdp(xdp_prog, xdp); + + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) { + result = TXGBE_XDP_CONSUMED; + break; + } + ring = adapter->xdp_ring[smp_processor_id() % MAX_XDP_QUEUES]; + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + result = txgbe_xmit_xdp_ring(ring, xdpf); + + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + break; + case XDP_REDIRECT: + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); + result = !err ? TXGBE_XDP_REDIR : TXGBE_XDP_CONSUMED; + break; + default: + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_DROP: + result = TXGBE_XDP_CONSUMED; + break; + } + rcu_read_unlock(); + return result; +} + +bool txgbe_alloc_rx_buffers_zc(struct txgbe_ring *rx_ring, u16 count) +{ + union txgbe_rx_desc *rx_desc; + struct txgbe_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + dma_addr_t dma; + bool ok = true; + + /* nothing to do */ + if (!count) + return true; + + rx_desc = TXGBE_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { + bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool); + if (!bi->xdp) { + ok = false; + break; + } + + dma = xsk_buff_xdp_get_dma(bi->xdp); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(dma); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = TXGBE_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; + + count--; + } while (count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } + + return ok; +} + +static struct sk_buff *txgbe_construct_skb_zc(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *bi, + struct xdp_buff *xdp) +{ + struct xdp_buff *xdp_buffer = bi->xdp; + unsigned int metasize = xdp_buffer->data - xdp_buffer->data_meta; + unsigned int datasize = xdp_buffer->data_end - xdp_buffer->data; + struct sk_buff *skb; + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, + xdp_buffer->data_end - xdp_buffer->data_hard_start, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, xdp_buffer->data - xdp_buffer->data_hard_start); + memcpy(__skb_put(skb, datasize), xdp_buffer->data, datasize); + if (metasize) + skb_metadata_set(skb, metasize); + xsk_buff_free(xdp_buffer); + bi->xdp = NULL; + + return skb; +} + +static void txgbe_inc_ntc(struct txgbe_ring *rx_ring) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + prefetch(TXGBE_RX_DESC(rx_ring, ntc)); +} + +int txgbe_clean_rx_irq_zc(struct txgbe_q_vector *q_vector, + struct txgbe_ring *rx_ring, + const int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + struct txgbe_adapter *adapter = q_vector->adapter; + u16 cleaned_count = txgbe_desc_unused(rx_ring); + unsigned int xdp_res, xdp_xmit = 0; + bool failure = false; + struct sk_buff *skb; + + while (likely(total_rx_packets < budget)) { + union txgbe_rx_desc *rx_desc; + struct txgbe_rx_buffer *bi; + unsigned int size; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= TXGBE_RX_BUFFER_WRITE) { + failure = failure || + !txgbe_alloc_rx_buffers_zc(rx_ring, + cleaned_count); + cleaned_count = 0; + } + + rx_desc = TXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + + if (unlikely(!txgbe_test_staterr(rx_desc, + TXGBE_RXD_STAT_EOP))) { + struct txgbe_rx_buffer *next_bi; + + xsk_buff_free(bi->xdp); + bi->xdp = NULL; + + txgbe_inc_ntc(rx_ring); + next_bi = + &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + + next_bi->discard = true; + continue; + } + + if (unlikely(bi->discard)) { + xsk_buff_free(bi->xdp); + bi->xdp = NULL; + bi->discard = false; + txgbe_inc_ntc(rx_ring); + continue; + } + + bi->xdp->data_end = bi->xdp->data + size; + xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool); + xdp_res = txgbe_run_xdp_zc(adapter, rx_ring, bi->xdp); + + if (xdp_res) { + if (xdp_res & (TXGBE_XDP_TX | TXGBE_XDP_REDIR)) + xdp_xmit |= xdp_res; + else + xsk_buff_free(bi->xdp); + + bi->xdp = NULL; + total_rx_packets++; + total_rx_bytes += size; + + cleaned_count++; + txgbe_inc_ntc(rx_ring); + continue; + } + + /* XDP_PASS path */ + skb = txgbe_construct_skb_zc(rx_ring, bi, bi->xdp); + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + break; + } + + cleaned_count++; + txgbe_inc_ntc(rx_ring); + + if (eth_skb_pad(skb)) + continue; + + total_rx_bytes += skb->len; + total_rx_packets++; + + txgbe_process_skb_fields(rx_ring, rx_desc, skb); + txgbe_rx_skb(q_vector, rx_ring, rx_desc, skb); + } + + if (xdp_xmit & TXGBE_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_xmit & TXGBE_XDP_TX) { + int index = smp_processor_id() % adapter->num_xdp_queues; + struct txgbe_ring *ring = adapter->xdp_ring[index]; + + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + + /* update tail pointer */ + wmb(); + writel(ring->next_to_use, ring->tail); + if (static_branch_unlikely(&txgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + } + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + return failure ? budget : (int)total_rx_packets; +} + +void txgbe_xsk_clean_rx_ring(struct txgbe_ring *rx_ring) +{ + struct txgbe_rx_buffer *bi; + u16 i; + + for (i = 0; i < rx_ring->count; i++) { + bi = &rx_ring->rx_buffer_info[i]; + + if (!bi->xdp) + continue; + + xsk_buff_free(bi->xdp); + bi->xdp = NULL; + } +} + +static bool txgbe_xmit_zc(struct txgbe_ring *xdp_ring, unsigned int budget) +{ + unsigned int sent_frames = 0, total_bytes = 0; + union txgbe_tx_desc *tx_desc = NULL; + u16 ntu = xdp_ring->next_to_use; + struct txgbe_tx_buffer *tx_bi; + bool work_done = true; + struct xdp_desc desc; + dma_addr_t dma; + u32 cmd_type; + + while (budget-- > 0) { + if (unlikely(!txgbe_desc_unused(xdp_ring))) { + work_done = false; + break; + } + + if (!netif_carrier_ok(xdp_ring->netdev)) + break; + + if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc)) + break; + + dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr); + xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, + desc.len); + + tx_bi = &xdp_ring->tx_buffer_info[ntu]; + tx_bi->bytecount = desc.len; + tx_bi->gso_segs = 1; + tx_bi->xdpf = NULL; + + tx_desc = TXGBE_TX_DESC(xdp_ring, ntu); + tx_desc->read.olinfo_status = 0; + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + /* put descriptor type bits */ + cmd_type = txgbe_tx_cmd_type(tx_bi->tx_flags); + cmd_type |= desc.len | TXGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = + cpu_to_le32(desc.len << TXGBE_TXD_PAYLEN_SHIFT); + + /* memory fence read desc */ + smp_wmb(); + tx_bi->next_to_watch = tx_desc; + tx_bi->next_eop = ntu; + + xdp_ring->next_rs_idx = ntu; + ntu++; + if (ntu == xdp_ring->count) + ntu = 0; + xdp_ring->next_to_use = ntu; + + sent_frames++; + total_bytes += tx_bi->bytecount; + } + if (tx_desc) { + cmd_type |= TXGBE_TXD_RS; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + /* update tail pointer */ + wmb(); + writel(xdp_ring->next_to_use, xdp_ring->tail); + xsk_tx_release(xdp_ring->xsk_pool); + + u64_stats_update_begin(&xdp_ring->syncp); + xdp_ring->stats.bytes += total_bytes; + xdp_ring->stats.packets += sent_frames; + u64_stats_update_end(&xdp_ring->syncp); + xdp_ring->q_vector->tx.total_bytes += total_bytes; + xdp_ring->q_vector->tx.total_packets += sent_frames; + } + + return (budget > 0) && work_done; +} + +static void txgbe_clean_xdp_tx_buffer(struct txgbe_ring *tx_ring, + struct txgbe_tx_buffer *tx_bi) +{ + xdp_return_frame(tx_bi->xdpf); + tx_ring->xdp_tx_active--; + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_bi, dma), + dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_bi, len, 0); + tx_bi->va = NULL; +} + +bool txgbe_clean_xdp_tx_irq(struct txgbe_q_vector *q_vector, + struct txgbe_ring *tx_ring) +{ + u32 next_rs_idx = tx_ring->next_rs_idx; + union txgbe_tx_desc *next_rs_desc; + u32 ntc = tx_ring->next_to_clean; + struct txgbe_tx_buffer *tx_bi; + u16 frames_ready = 0; + u32 xsk_frames = 0; + u16 i; + struct txgbe_adapter *adapter = q_vector->adapter; + struct txgbe_hw *hw = &adapter->hw; + u32 head = 0; + u32 temp = tx_ring->next_to_clean; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + head = *tx_ring->headwb_mem; + + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + /* we have caught up to head, no work left to do */ + if (temp == head) { + goto out_xmit; + } else if (head > temp && !(next_rs_idx >= temp && (next_rs_idx < head))) { + goto out_xmit; + } else if (!(next_rs_idx >= temp || (next_rs_idx < head))) { + goto out_xmit; + } else { + if (next_rs_idx >= ntc) + frames_ready = next_rs_idx - ntc; + else + frames_ready = next_rs_idx + tx_ring->count - ntc; + } + } else { + next_rs_desc = TXGBE_TX_DESC(tx_ring, next_rs_idx); + if (next_rs_desc->wb.status & + cpu_to_le32(TXGBE_TXD_STAT_DD)) { + if (next_rs_idx >= ntc) + frames_ready = next_rs_idx - ntc; + else + frames_ready = next_rs_idx + tx_ring->count - ntc; + } + } + + if (!frames_ready) + goto out_xmit; + + if (likely(!tx_ring->xdp_tx_active)) { + xsk_frames = frames_ready; + } else { + for (i = 0; i < frames_ready; i++) { + tx_bi = &tx_ring->tx_buffer_info[ntc]; + + if (tx_bi->xdpf) + txgbe_clean_xdp_tx_buffer(tx_ring, tx_bi); + else + xsk_frames++; + + tx_bi->xdpf = NULL; + + ++ntc; + if (ntc >= tx_ring->count) + ntc = 0; + } + } + + tx_ring->next_to_clean += frames_ready; + if (unlikely(tx_ring->next_to_clean >= tx_ring->count)) + tx_ring->next_to_clean -= tx_ring->count; + + if (xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + +out_xmit: + return txgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); +} + +int txgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 __maybe_unused flags) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_ring *ring; + + if (test_bit(__TXGBE_DOWN, &adapter->state)) + return -ENETDOWN; + if (!READ_ONCE(adapter->xdp_prog)) + return -ENXIO; + + if (qid >= adapter->num_xdp_queues) + return -ENXIO; + + if (!adapter->xsk_pools || !adapter->xsk_pools[qid]) + return -ENXIO; + + ring = adapter->xdp_ring[qid]; + if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { + if (likely(napi_schedule_prep(&ring->q_vector->napi))) + __napi_schedule(&ring->q_vector->napi); + } + + return 0; +} + +void txgbe_xsk_clean_tx_ring(struct txgbe_ring *tx_ring) +{ + unsigned long size = sizeof(struct txgbe_tx_buffer) * tx_ring->count; + u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; + struct txgbe_tx_buffer *tx_bi; + u32 xsk_frames = 0; + + while (ntc != ntu) { + tx_bi = &tx_ring->tx_buffer_info[ntc]; + + if (tx_bi->xdpf) + txgbe_clean_xdp_tx_buffer(tx_ring, tx_bi); + else + xsk_frames++; + + tx_bi->xdpf = NULL; + + ntc++; + if (ntc == tx_ring->count) + ntc = 0; + } + + if (xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_xsk.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_xsk.h new file mode 100644 index 000000000000..21ccb06e1a52 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_xsk.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_TXRX_COMMON_H_ +#define _TXGBE_TXRX_COMMON_H_ + +#include "txgbe.h" + +#ifndef TXGBE_TXD_CMD +#define TXGBE_TXD_CMD (TXGBE_TXD_EOP | \ + TXGBE_TXD_RS) +#endif + +#define TXGBE_XDP_PASS 0 +#define TXGBE_XDP_CONSUMED BIT(0) +#define TXGBE_XDP_TX BIT(1) +#define TXGBE_XDP_REDIR BIT(2) + +int txgbe_xmit_xdp_ring(struct txgbe_ring *ring, struct xdp_frame *xdpf); + +void txgbe_txrx_ring_disable(struct txgbe_adapter *adapter, int ring); +void txgbe_txrx_ring_enable(struct txgbe_adapter *adapter, int ring); + +struct xsk_buff_pool *txgbe_xsk_umem(struct txgbe_adapter *adapter, + struct txgbe_ring *ring); +int txgbe_xsk_umem_setup(struct txgbe_adapter *adapter, struct xsk_buff_pool *umem, + u16 qid); + +bool txgbe_alloc_rx_buffers_zc(struct txgbe_ring *rx_ring, u16 cleaned_count); + +int txgbe_clean_rx_irq_zc(struct txgbe_q_vector *q_vector, + struct txgbe_ring *rx_ring, + const int budget); +void txgbe_xsk_clean_rx_ring(struct txgbe_ring *rx_ring); +bool txgbe_clean_xdp_tx_irq(struct txgbe_q_vector *q_vector, + struct txgbe_ring *tx_ring); + +int txgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 __maybe_unused flags); +void txgbe_xsk_clean_tx_ring(struct txgbe_ring *tx_ring); +bool txgbe_xsk_any_rx_ring_enabled(struct txgbe_adapter *adapter); + +bool txgbe_cleanup_headers(struct txgbe_ring __maybe_unused *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb); +void txgbe_process_skb_fields(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb); +void txgbe_rx_skb(struct txgbe_q_vector *q_vector, + struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb); + +#endif /* _TXGBE_TXRX_COMMON_H_ */ -- Gitee From 2ef70eb56d26f3a893075cbe3b5466bbda0222d3 Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Sun, 28 Sep 2025 15:56:53 +0800 Subject: [PATCH 03/16] anolis: net: txgbe: fix display of supports Wake-on in the ethtool ANBZ: #26488 change wol status, it will be displayed based on the actual support situation. Signed-off-by: Duanqiang Wen --- .../net/ethernet/wangxun/txgbe/txgbe_ethtool.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 9a87fa1b6af8..04e0c26b05bf 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -2948,14 +2948,15 @@ static void txgbe_get_wol(struct net_device *netdev, struct txgbe_adapter *adapter = netdev_priv(netdev); struct txgbe_hw *hw = &adapter->hw; - wol->supported = WAKE_UCAST | WAKE_MCAST | - WAKE_BCAST | WAKE_MAGIC; + wol->supported = 0; wol->wolopts = 0; - if (!device_can_wakeup(pci_dev_to_dev(adapter->pdev))) + if ((hw->subsystem_device_id & TXGBE_WOL_MASK) != TXGBE_WOL_SUP) return; - if ((hw->subsystem_device_id & TXGBE_WOL_MASK) != TXGBE_WOL_SUP) + wol->supported = WAKE_MAGIC; + + if (!device_can_wakeup(pci_dev_to_dev(adapter->pdev))) return; if (adapter->wol & TXGBE_PSR_WKUP_CTL_EX) @@ -2981,12 +2982,6 @@ static int txgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) adapter->wol = 0; - if (wol->wolopts & WAKE_UCAST) - adapter->wol |= TXGBE_PSR_WKUP_CTL_EX; - if (wol->wolopts & WAKE_MCAST) - adapter->wol |= TXGBE_PSR_WKUP_CTL_MC; - if (wol->wolopts & WAKE_BCAST) - adapter->wol |= TXGBE_PSR_WKUP_CTL_BC; if (wol->wolopts & WAKE_MAGIC) adapter->wol |= TXGBE_PSR_WKUP_CTL_MAG; -- Gitee From 6772d959cd1c367e7833ba20862fe2dc8951baf7 Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Sun, 28 Sep 2025 16:26:09 +0800 Subject: [PATCH 04/16] anolis: net: txgbe: fix DAC cable link down ANBZ: #26488 where plugging and unplugging the DAC cable during compilation with force mode might not link, and tx ffe parameter and bypassctle need to be modified accordingly. After polling fec, modify the fec mode to RS if supported, and adding an interrupt to the phy enables the local end to receive interrupt synchronization configuration when plugging or unplugging the dac from the opposite end. Signed-off-by: Duanqiang Wen --- .../net/ethernet/wangxun/txgbe/txgbe_aml.c | 3 ++ .../net/ethernet/wangxun/txgbe/txgbe_e56.c | 54 +++++++++++++------ .../net/ethernet/wangxun/txgbe/txgbe_e56.h | 1 + .../net/ethernet/wangxun/txgbe/txgbe_phy.c | 3 ++ 4 files changed, 46 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c index 76dd9223e90e..414f9eb79b75 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c @@ -122,8 +122,11 @@ static s32 txgbe_setup_mac_link_aml(struct txgbe_hw *hw, goto out; if (ret_status == TXGBE_ERR_TIMEOUT) { + adapter->link_valid = false; adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; goto out; + } else { + adapter->link_valid = true; } if (speed == TXGBE_LINK_SPEED_25GB_FULL) { diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c index e9f65652273a..885212601105 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c @@ -139,6 +139,34 @@ int txgbe_e56_get_temp(struct txgbe_hw *hw, int *temp) return 0; } +static void txgbe_e56_ovrd_symdata(struct txgbe_hw *hw) +{ + u32 addr; + u32 rdata = 0; + int i; + + for (i = 0; i < 4; i++) { + addr = E56PHY_TXS_PIN_OVRDEN_0_ADDR + (E56PHY_TXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_SYMDATA_I, 0x1); + txgbe_wr32_ephy(hw, addr, rdata); + } +} + +static void txgbe_e56_clear_symdata(struct txgbe_hw *hw) +{ + u32 addr; + u32 rdata = 0; + int i; + + for (i = 0; i < 4; i++) { + addr = E56PHY_TXS_PIN_OVRDEN_0_ADDR + (E56PHY_TXS_OFFSET * i); + rdata = rd32_ephy(hw, addr); + txgbe_field_set(&rdata, E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_SYMDATA_I, 0x0); + txgbe_wr32_ephy(hw, addr, rdata); + } +} + u32 txgbe_e56_cfg_40g(struct txgbe_hw *hw) { u32 addr; @@ -2853,11 +2881,8 @@ static int txgbe_e56_rxs_calib_adapt_seq(struct txgbe_hw *hw, u32 speed) u32 rdata = 0x0; u32 bypass_ctle = true; - if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || - hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) + if (hw->dac_sfp) bypass_ctle = false; - else - bypass_ctle = true; if (hw->mac.type == txgbe_mac_aml) { msleep(350); @@ -2945,15 +2970,8 @@ static int txgbe_e56_rxs_calib_adapt_seq(struct txgbe_hw *hw, u32 speed) rdata = rd32_ephy(hw, addr); usleep_range(500, 1000); EPHY_RREG(E56G__PMD_CTRL_FSM_RX_STAT_0); - if (timer++ > PHYINIT_TIMEOUT) { - //Do SEQ::RX_DISABLE - rdata = 0; - addr = E56PHY_PMD_CFG_0_ADDR; - rdata = rd32_ephy(hw, addr); - txgbe_field_set(&rdata, E56PHY_PMD_CFG_0_RX_EN_CFG, 0x0); - txgbe_wr32_ephy(hw, addr, rdata); + if (timer++ > PHYINIT_TIMEOUT) return TXGBE_ERR_TIMEOUT; - } } //RXS ADC adaptation sequence @@ -3656,6 +3674,8 @@ int txgbe_set_link_to_amlite(struct txgbe_hw *hw, u32 speed) goto out; } + txgbe_e56_ovrd_symdata(hw); + value = txgbe_rd32_epcs(hw, SR_AN_CTRL); txgbe_field_set(&value, 12, 12, 0); txgbe_wr32_epcs(hw, SR_AN_CTRL, value); @@ -3909,6 +3929,7 @@ int txgbe_set_link_to_amlite(struct txgbe_hw *hw, u32 speed) txgbe_wr32_ephy(hw, PMD_CFG0, value); } + txgbe_e56_clear_symdata(hw); hw->mac.ops.enable_tx_laser(hw); status = txgbe_e56_config_rx(hw, speed); @@ -4024,17 +4045,20 @@ int txgbe_e56_fec_mode_polling(struct txgbe_hw *hw, bool *link_up) u32 speed; do { - if (!(adapter->fec_link_mode & BIT(j))) { + if (!(adapter->fec_link_mode & BIT(j % 3))) { j += 1; continue; } - adapter->cur_fec_link = adapter->fec_link_mode & BIT(j); + adapter->cur_fec_link = adapter->fec_link_mode & BIT(j % 3); mutex_lock(&adapter->e56_lock); txgbe_e56_set_fec_mode(hw, adapter->cur_fec_link); mutex_unlock(&adapter->e56_lock); + if (j == 3) + break; + for (i = 0; i < 4; i++) { msleep(250); txgbe_e56_check_phy_link(hw, &speed, link_up); @@ -4043,7 +4067,7 @@ int txgbe_e56_fec_mode_polling(struct txgbe_hw *hw, bool *link_up) } j += 1; - } while (j < 3); + } while (j < 4); return 0; } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.h index 381565570eb0..f1b03b66e232 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.h @@ -179,6 +179,7 @@ union txgbe_e56_cms_ana_ovrdval0 { #define E56PHY_TXS_WKUP_CNTDCC_WKUP_CNT_X32 FORMAT_NOPARENTHERSES(15, 8) #define E56PHY_TXS_PIN_OVRDEN_0_ADDR (E56PHY_TXS_BASE_ADDR + 0x0C) +#define E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_SYMDATA_I FORMAT_NOPARENTHERSES(19, 19) #define E56PHY_TXS_PIN_OVRDEN_0_OVRD_EN_TX0_EFUSE_BITS_I \ FORMAT_NOPARENTHERSES(28, 28) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 3fbcb1b56617..3846e29176ee 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -568,6 +568,9 @@ s32 txgbe_identify_sfp_module(struct txgbe_hw *hw) if (status != 0) goto err_read_i2c_eeprom; + hw->bypass_ctle = true; + hw->dac_sfp = false; + /* ID Module * ========= * 0 SFP_DA_CU -- Gitee From 73bef3cae799f1ce567bf17f76a466e9abdbd758 Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Sun, 28 Sep 2025 17:34:39 +0800 Subject: [PATCH 05/16] anolis: net: txgbe: fix pktgen cannot stop ANBZ: #26488 When using pktgen to send packet with count argument, the pktgen cmd cannot quit successfully and the CPU utilization would increase to 100%. The bug is introduced by delayed-releasing tx skbs/buffers mechanism. In some working scenarios, trigger irq per 2 seconds is a bit low, increase the trigger frequency to per 250ms. Signed-off-by: Duanqiang Wen --- drivers/net/ethernet/wangxun/txgbe/txgbe.h | 2 + .../net/ethernet/wangxun/txgbe/txgbe_main.c | 46 ++++++++++++++++--- 2 files changed, 42 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h index 68ae39ff2f2c..e6bd324b47d1 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h @@ -199,6 +199,7 @@ struct txgbe_tx_buffer { union txgbe_tx_desc *next_to_watch; u32 next_eop; unsigned long time_stamp; + u64 done_time; union { struct sk_buff *skb; /* XDP uses address ptr on irq_clean */ @@ -856,6 +857,7 @@ struct txgbe_adapter { struct mutex e56_lock; struct timer_list service_timer; + struct timer_list irq_timer; struct work_struct service_task; struct work_struct sfp_sta_task; struct work_struct temp_task; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index de872502c8b5..dee4459f9250 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "txgbe.h" #include "txgbe_dcb.h" @@ -674,12 +675,17 @@ static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector, u32 size; unsigned int ntf; struct txgbe_tx_buffer *free_tx_buffer; - u32 unmapped_descs = 0; + s32 unmapped_descs = 0; bool first_dma; + ktime_t now; + u64 ms; if (test_bit(__TXGBE_DOWN, &adapter->state)) return true; + now = ktime_get(); + ms = ktime_to_ms(now); + tx_buffer = &tx_ring->tx_buffer_info[i]; tx_desc = TXGBE_TX_DESC(tx_ring, i); i -= tx_ring->count; @@ -712,6 +718,7 @@ static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector, /* clear next_to_watch to prevent false hangs */ tx_buffer->next_to_watch = NULL; + tx_buffer->done_time = ms; /* update the statistics for this packet */ total_bytes += tx_buffer->bytecount; @@ -734,6 +741,8 @@ static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector, tx_buffer = tx_ring->tx_buffer_info; tx_desc = TXGBE_TX_DESC(tx_ring, 0); } + + tx_buffer->done_time = ms; } /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; @@ -744,7 +753,6 @@ static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector, tx_buffer = tx_ring->tx_buffer_info; tx_desc = TXGBE_TX_DESC(tx_ring, 0); } - /* issue prefetch for next Tx descriptor */ prefetch(tx_desc); @@ -759,7 +767,8 @@ static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector, free_tx_buffer = &tx_ring->tx_buffer_info[ntf]; ntf -= tx_ring->count; unmapped_descs = txgbe_desc_buf_unmapped(tx_ring, i, tx_ring->next_to_free); - while (unmapped_descs > adapter->desc_reserved) { + while ((unmapped_descs > adapter->desc_reserved) || + ((ms - free_tx_buffer->done_time >= 150) && (free_tx_buffer->done_time != 0))) { if (ring_is_xdp(tx_ring)) { if (free_tx_buffer->xdpf) { xdp_return_frame(free_tx_buffer->xdpf); @@ -788,6 +797,7 @@ static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector, dma_unmap_len_set(free_tx_buffer, len, 0); free_tx_buffer->va = NULL; + free_tx_buffer->done_time = 0; first_dma = false; } else { /* unmap any remaining paged data */ @@ -799,6 +809,7 @@ static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector, dma_unmap_len_set(free_tx_buffer, len, 0); free_tx_buffer->va = NULL; } + free_tx_buffer->done_time = 0; } free_tx_buffer++; @@ -5237,6 +5248,7 @@ static void txgbe_up_complete(struct txgbe_adapter *adapter) adapter->link_check_timeout = jiffies; hw->f2c_mod_status = false; mod_timer(&adapter->service_timer, jiffies); + mod_timer(&adapter->irq_timer, jiffies); /* PCIE recovery: record lan status */ if (hw->bus.lan_id == 0) { @@ -5598,6 +5610,7 @@ static void txgbe_disable_device(struct txgbe_adapter *adapter) adapter->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE; del_timer_sync(&adapter->service_timer); + del_timer_sync(&adapter->irq_timer); adapter->flags2 &= ~TXGBE_FLAG2_SERVICE_RUNNING; hw->f2c_mod_status = false; @@ -6912,8 +6925,7 @@ static void txgbe_fdir_reinit_subtask(struct txgbe_adapter *adapter) } } -void txgbe_irq_rearm_queues(struct txgbe_adapter *adapter, - u64 qmask) +static void txgbe_irq_rearm_queues(struct txgbe_adapter *adapter, u64 qmask) { u32 mask; @@ -6938,7 +6950,6 @@ void txgbe_irq_rearm_queues(struct txgbe_adapter *adapter, static void txgbe_check_hang_subtask(struct txgbe_adapter *adapter) { int i; - u64 eics = 0; /* If we're down or resetting, just bail */ if (test_bit(__TXGBE_DOWN, &adapter->state) || @@ -6953,6 +6964,18 @@ static void txgbe_check_hang_subtask(struct txgbe_adapter *adapter) for (i = 0; i < adapter->num_xdp_queues; i++) set_check_for_tx_hang(adapter->xdp_ring[i]); } +} + +static void txgbe_trigger_irq_subtask(struct txgbe_adapter *adapter) +{ + int i; + u64 eics = 0; + + /* If we're down or resetting, just bail */ + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_REMOVING, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return; if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) { /* get one bit for every active tx/rx interrupt vector */ @@ -7771,6 +7794,16 @@ static void txgbe_amlit_temp_subtask(struct txgbe_adapter *adapter) mutex_unlock(&adapter->e56_lock); } +static void txgbe_irq_timer(struct timer_list *t) +{ + struct txgbe_adapter *adapter = from_timer(adapter, t, irq_timer); + unsigned long next_event_offset = HZ / 4; + + mod_timer(&adapter->irq_timer, next_event_offset + jiffies); + + txgbe_trigger_irq_subtask(adapter); +} + static void txgbe_reset_subtask(struct txgbe_adapter *adapter) { u32 reset_flag = 0; @@ -10532,6 +10565,7 @@ static int txgbe_probe(struct pci_dev *pdev, sizeof(struct txgbe_5tuple_filter_info)); timer_setup(&adapter->service_timer, txgbe_service_timer, 0); + timer_setup(&adapter->irq_timer, txgbe_irq_timer, 0); if (TXGBE_REMOVED(hw->hw_addr)) { err = -EIO; -- Gitee From 19408687c1c0f59d96e55f827f93d876ef3eadda Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Sun, 28 Sep 2025 17:59:22 +0800 Subject: [PATCH 06/16] anolis: net: txgbe: fix missing PFC mask configuration on AML ANBZ: #26488 when set 'lldptool -T -i dev PFC enable=1,2,4', pfc priority0 still cause flow control. So add write pfc_mask(0x1f6c[0:7]) on amlite to isolate disabled priorities. Signed-off-by: Duanqiang Wen --- drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.c | 3 +++ drivers/net/ethernet/wangxun/txgbe/txgbe_type.h | 2 ++ 2 files changed, 5 insertions(+) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.c index aff3ff47505a..16000cbeb17c 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_dcb.c @@ -355,6 +355,9 @@ s32 txgbe_dcb_config_pfc(struct txgbe_hw *hw, u8 pfc_en, u8 *map) for (i = 0; i < (TXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) wr32(hw, TXGBE_RDB_RFCV(i), reg); + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + wr32m(hw, TXGBE_PFC_DIS_MASK, TXGBE_PFC_DIS_MASK_VAL, ~pfc_en); + /* Configure flow control refresh threshold value */ wr32(hw, TXGBE_RDB_RFCRT, hw->fc.pause_time / 2); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 451c5936c662..194dd77cde6b 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -956,6 +956,8 @@ struct txgbe_thermal_sensor_data { #define TXGBE_RDB_PFCMACDAH 0x19214 #define TXGBE_RDB_TXSWERR 0x1906C #define TXGBE_RDB_TXSWERR_TB_FREE 0x3FF +#define TXGBE_PFC_DIS_MASK 0x11F6c +#define TXGBE_PFC_DIS_MASK_VAL 0xff /* rdb_pl_cfg reg mask */ #define TXGBE_RDB_PL_CFG_L4HDR 0x2 #define TXGBE_RDB_PL_CFG_L3HDR 0x4 -- Gitee From 2aec141361d3b0ac9d038f2484b5f7b1f92c9ed4 Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Mon, 29 Sep 2025 09:57:53 +0800 Subject: [PATCH 07/16] anolis: net: txgbe: fix VF multicast address removal not taking effect ANBZ: #26488 VF continued reveiving multicast traffic for removed addresses, regardless of being bound to DPDK or Linux kernel driver. PF driver mailbox handling for VF update-multicast cmd only re-added addresses from the message buffer, failing to remove stale entries. Unlike PF's own multicast updates (which flush/reload the entire table), VF commands left old addresses intact. To solve this bug, we modify PF multicast update flow: flush entire table when processing VF mailbox commands and then repopulate table by iterating through all VF's multicast addresses. Known limitations about VF multicast: 1) Shared mcast table between PF and all VFs: When any VF configures a multicast address, if other VFs have multicast promiscuous enabled, they will also receive multicast packets destined for that address. 2) Ineffective address deletion: When attempting to delete a multicast address, if either the PF or other VFs have configured this same address, the deletion operation will not actually take effect. 3) Hash collision possibility: The multicast address table uses only 12 bits of the multicast MAC address for bitmap mapping, while multicast addresses contain 23 significant bits. This mismatch may cause different addresses mapped to the same table entry. Signed-off-by: Duanqiang Wen --- .../net/ethernet/wangxun/txgbe/txgbe_main.c | 14 ++++++++------ .../net/ethernet/wangxun/txgbe/txgbe_sriov.c | 18 ++++++------------ 2 files changed, 14 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index dee4459f9250..6438b63ceae2 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -4063,16 +4063,21 @@ int txgbe_write_mc_addr_list(struct net_device *netdev) struct netdev_hw_addr *ha; u8 *addr_list = NULL; int addr_count = 0; + bool clear = false; if (!hw->mac.ops.update_mc_addr_list) return -ENOMEM; if (!netif_running(netdev)) return 0; - +#ifdef CONFIG_PCI_IOV + txgbe_restore_vf_multicasts(adapter); +#else + clear = true; +#endif if (netdev_mc_empty(netdev)) { hw->mac.ops.update_mc_addr_list(hw, NULL, 0, - txgbe_addr_list_itr, true); + txgbe_addr_list_itr, clear); } else { ha = list_first_entry(&netdev->mc.list, struct netdev_hw_addr, list); @@ -4081,12 +4086,9 @@ int txgbe_write_mc_addr_list(struct net_device *netdev) addr_count = netdev_mc_count(netdev); hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, - txgbe_addr_list_itr, true); + txgbe_addr_list_itr, clear); } -#ifdef CONFIG_PCI_IOV - txgbe_restore_vf_multicasts(adapter); -#endif return addr_count; } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c index bd18650053af..d4012e8a4659 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c @@ -337,9 +337,6 @@ static int txgbe_set_vf_multicasts(struct txgbe_adapter *adapter, struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; struct txgbe_hw *hw = &adapter->hw; int i; - u32 vector_bit; - u32 vector_reg; - u32 mta_reg; u32 vmolr = rd32(hw, TXGBE_PSR_VM_L2CTL(vf)); /* only so many hash values supported */ @@ -357,18 +354,12 @@ static int txgbe_set_vf_multicasts(struct txgbe_adapter *adapter, for (i = 0; i < entries; i++) vfinfo->vf_mc_hashes[i] = hash_list[i]; - for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { - vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; - vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; - /* errata 5: maintain a copy of the register table conf */ - mta_reg = hw->mac.mta_shadow[vector_reg]; - mta_reg |= (1 << vector_bit); - hw->mac.mta_shadow[vector_reg] = mta_reg; - wr32(hw, TXGBE_PSR_MC_TBL(vector_reg), mta_reg); - } vmolr |= TXGBE_PSR_VM_L2CTL_ROMPE; wr32(hw, TXGBE_PSR_VM_L2CTL(vf), vmolr); + /* Sync up the PF and VF in the same MTA table */ + txgbe_write_mc_addr_list(adapter->netdev); + return 0; } @@ -380,6 +371,9 @@ void txgbe_restore_vf_multicasts(struct txgbe_adapter *adapter) u32 vector_bit; u32 vector_reg; + /* Clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + for (i = 0; i < adapter->num_vfs; i++) { u32 vmolr = rd32(hw, TXGBE_PSR_VM_L2CTL(i)); -- Gitee From 55740775885b2c4caf64a5e8d64dac8bc2c91f3c Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Mon, 29 Sep 2025 10:09:45 +0800 Subject: [PATCH 08/16] anolis: net: txgbe: fix bond port rate is unknown when linkup ANBZ: #26488 For 25G, when using the dual-speed optical module to link to 10G, the driver first reported the link up and then re-attempted to configure the phy to 25G, resulting in the bond port being unable to obtain the speed at this time. Signed-off-by: Duanqiang Wen --- .../net/ethernet/wangxun/txgbe/txgbe_main.c | 32 ++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 6438b63ceae2..b166202ae86d 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -7394,6 +7394,36 @@ static void txgbe_spoof_check(struct txgbe_adapter *adapter) #endif /* CONFIG_PCI_IOV */ +/** + * txgbe_watchdog_subtask - check and bring link up + * @adapter - pointer to the device adapter structure + **/ +static void txgbe_linkdown_subtask(struct txgbe_adapter *adapter) +{ + u32 __maybe_unused value = 0; + struct txgbe_hw *hw = &adapter->hw; + + /* if interface is down do nothing */ + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_REMOVING, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return; + + if (!(adapter->flags2 & TXGBE_FLAG2_LINK_DOWN)) + txgbe_watchdog_update_link(adapter); + + if (!adapter->link_up) + txgbe_watchdog_link_is_down(adapter); + +#ifdef CONFIG_PCI_IOV + txgbe_spoof_check(adapter); +#endif /* CONFIG_PCI_IOV */ + + txgbe_update_stats(adapter); + + txgbe_watchdog_flush_tx(adapter); +} + /** * txgbe_watchdog_subtask - check and bring link up * @adapter - pointer to the device adapter structure @@ -8094,7 +8124,7 @@ static void txgbe_service_task(struct work_struct *work) hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core0 || hw->phy.sfp_type == txgbe_qsfp_type_40g_cu_core1 || txgbe_is_backplane(hw))) - txgbe_watchdog_subtask(adapter); + txgbe_linkdown_subtask(adapter); txgbe_sfp_link_config_subtask(adapter); txgbe_sfp_reset_eth_phy_subtask(adapter); txgbe_check_overtemp_subtask(adapter); -- Gitee From 718cc0fe322710d43650d158986e140815ea4884 Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Mon, 29 Sep 2025 10:42:31 +0800 Subject: [PATCH 09/16] anolis: net: txgbe: fix can't read rx_pb_n_pxoff ANBZ: #26488 after turn on dcb and set pfc, rx_pb_n_pxoff(n=1~7) will keep be 0 after receive pfc packet. becaus amlite use 0x01FA0+ [0..7 * 0x4] and 0x01FC0+ [0..7 * 0x4] record rx_pb_n_pxoff and rx_pb_n_pxon. Signed-off-by: Duanqiang Wen --- drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 2 - .../net/ethernet/wangxun/txgbe/txgbe_main.c | 43 ++++++++++--------- .../net/ethernet/wangxun/txgbe/txgbe_type.h | 4 +- 3 files changed, 26 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index 5fc14df3e80c..9f1e3305f51b 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -186,8 +186,6 @@ s32 txgbe_clear_hw_cntrs(struct txgbe_hw *hw) rd32(hw, TXGBE_MAC_LXOFFRXC); for (i = 0; i < 8; i++) { - rd32(hw, TXGBE_RDB_PXONTXC(i)); - rd32(hw, TXGBE_RDB_PXOFFTXC(i)); rd32(hw, TXGBE_MAC_PXONRXC(i)); wr32m(hw, TXGBE_MMC_CONTROL, TXGBE_MMC_CONTROL_UP, i << 16); rd32(hw, TXGBE_MAC_PXOFFRXC); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index b166202ae86d..1aa937e8669e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -459,12 +459,20 @@ static void txgbe_update_xoff_received(struct txgbe_adapter *adapter) for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { u32 pxoffrxc; - wr32m(hw, TXGBE_MMC_CONTROL, TXGBE_MMC_CONTROL_UP, i << 16); - pxoffrxc = rd32(hw, TXGBE_MAC_PXOFFRXC); - hwstats->pxoffrxc[i] += pxoffrxc; - /* Get the TC for given UP */ - tc = netdev_get_prio_tc_map(adapter->netdev, i); - xoff[tc] += pxoffrxc; + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + pxoffrxc = rd32(hw, TXGBE_AML_MAC_PXOFFRXC(i)); + hwstats->pxoffrxc[i] = pxoffrxc; + /* Get the TC for given UP */ + tc = netdev_get_prio_tc_map(adapter->netdev, i); + xoff[tc] = pxoffrxc; + } else { + wr32m(hw, TXGBE_MMC_CONTROL, TXGBE_MMC_CONTROL_UP, i << 16); + pxoffrxc = rd32(hw, TXGBE_MAC_PXOFFRXC); + hwstats->pxoffrxc[i] += pxoffrxc; + /* Get the TC for given UP */ + tc = netdev_get_prio_tc_map(adapter->netdev, i); + xoff[tc] += pxoffrxc; + } } /* disarm tx queues that have received xoff frames */ @@ -2680,7 +2688,6 @@ static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data) u32 eicr; u32 ecc; u32 value = 0; - u16 vid; eicr = txgbe_misc_isb(adapter, TXGBE_ISB_MISC); @@ -2707,18 +2714,12 @@ static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data) if (eicr & TXGBE_PX_MISC_IC_PCIE_REQ_ERR) { ERROR_REPORT1(TXGBE_ERROR_POLLING, "lan id %d, PCIe request error founded.\n", hw->bus.lan_id); - pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &vid); - if (vid == TXGBE_FAILED_READ_CFG_WORD) { - ERROR_REPORT1(TXGBE_ERROR_POLLING, "PCIe link is lost.\n"); - /*when pci lose link, not check over heat*/ - if (hw->bus.lan_id == 0) { - adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; - txgbe_service_event_schedule(adapter); - } else { - wr32(&adapter->hw, TXGBE_MIS_PF_SM, 1); - } + + if (hw->bus.lan_id == 0) { + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + txgbe_service_event_schedule(adapter); } else { - adapter->flags2 |= TXGBE_FLAG2_DMA_RESET_REQUESTED; + wr32(&adapter->hw, TXGBE_MIS_PF_SM, 1); } } @@ -6799,7 +6800,10 @@ void txgbe_update_stats(struct txgbe_adapter *adapter) hwstats->pxontxc[i] += rd32(hw, TXGBE_RDB_PXONTXC(i)); hwstats->pxofftxc[i] += rd32(hw, TXGBE_RDB_PXOFFTXC(i)); - hwstats->pxonrxc[i] += rd32(hw, TXGBE_MAC_PXONRXC(i)); + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) + hwstats->pxonrxc[i] = rd32(hw, TXGBE_AML_MAC_PXONRXC(i)); + else + hwstats->pxonrxc[i] += rd32(hw, TXGBE_MAC_PXONRXC(i)); } hwstats->gprc += rd32(hw, TXGBE_PX_GPRC); @@ -7401,7 +7405,6 @@ static void txgbe_spoof_check(struct txgbe_adapter *adapter) static void txgbe_linkdown_subtask(struct txgbe_adapter *adapter) { u32 __maybe_unused value = 0; - struct txgbe_hw *hw = &adapter->hw; /* if interface is down do nothing */ if (test_bit(__TXGBE_DOWN, &adapter->state) || diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 194dd77cde6b..d3139c9d1d16 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -1636,6 +1636,8 @@ enum TXGBE_MSCA_CMD_value { #define TXGBE_MAC_LXOFFRXC 0x11988 #define TXGBE_MAC_PXONRXC(_i) (0x11E30 + ((_i) * 4)) /* 8 of these */ #define TXGBE_MAC_PXOFFRXC 0x119DC +#define TXGBE_AML_MAC_PXONRXC(_i) (0x11FC0 + ((_i) * 4)) /* 8 of these */ +#define TXGBE_AML_MAC_PXOFFRXC(_i) (0x11FA0 + ((_i) * 4)) /* 8 of these */ #define TXGBE_RX_BC_FRAMES_GOOD_LOW 0x11918 #define TXGBE_RX_CRC_ERROR_FRAMES_LOW 0x11928 #define TXGBE_RX_LEN_ERROR_FRAMES_LOW 0x11978 @@ -1647,7 +1649,7 @@ enum TXGBE_MSCA_CMD_value { #define TXGBE_TX_BC_FRAMES_GOOD_LOW 0x11824 #define TXGBE_MMC_CONTROL 0x11800 #define TXGBE_MMC_CONTROL_RSTONRD 0x4 /* reset on read */ -#define TXGBE_MMC_CONTROL_UP 0x700 +#define TXGBE_MMC_CONTROL_UP 0x70000 /********************************* BAR registers ***************************/ /* Interrupt Registers */ -- Gitee From 5d6bb2f94869f08840b90377bc1b2a408b341fdb Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Mon, 29 Sep 2025 14:11:41 +0800 Subject: [PATCH 10/16] anolis: net: txgbe: fix i2c issues ANBZ: #26488 In txgbe, when ethtool -m port will return -EINVAL with DAC because not record i2c info when identify sfp. And If repeatly reinit_locked, when txgbe_down(), may read i2c error in record i2c info flow. So skip it when port down. Signed-off-by: Duanqiang Wen --- .../net/ethernet/wangxun/txgbe/txgbe_phy.c | 81 ++++++++++--------- 1 file changed, 42 insertions(+), 39 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 3846e29176ee..0328f55177d3 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -815,7 +815,7 @@ s32 txgbe_identify_sfp_module(struct txgbe_hw *hw) if (cable_tech & (TXGBE_SFF_DA_PASSIVE_CABLE | TXGBE_SFF_DA_ACTIVE_CABLE)) { status = 0; - goto out; + goto sp_record; } /* Verify supported 1G SFP modules */ @@ -831,50 +831,53 @@ s32 txgbe_identify_sfp_module(struct txgbe_hw *hw) goto out; } } - /*record eeprom info*/ - status = hw->phy.ops.read_i2c_eeprom(hw, - TXGBE_SFF_SFF_8472_COMP, - &sff8472_rev); - if (status != 0) - goto err_read_i2c_eeprom; +sp_record: + if (hw->mac.type == txgbe_mac_sp && + !test_bit(__TXGBE_DOWN, &adapter->state)) { + /*record eeprom info*/ + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_SFF_8472_COMP, + &sff8472_rev); + if (status != 0) + goto err_read_i2c_eeprom; - /* addressing mode is not supported */ - status = hw->phy.ops.read_i2c_eeprom(hw, - TXGBE_SFF_SFF_8472_SWAP, - &addr_mode); - if (status != 0) - goto err_read_i2c_eeprom; + /* addressing mode is not supported */ + status = hw->phy.ops.read_i2c_eeprom(hw, + TXGBE_SFF_SFF_8472_SWAP, + &addr_mode); + if (status != 0) + goto err_read_i2c_eeprom; - if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) { - e_err(drv, "Address change required to access page 0xA2,"); - e_err(drv, "but not supported. Please report the module type to the driver maintainers.\n"); - page_swap = true; - } + if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) { + e_err(drv, "Address change required to access page 0xA2,"); + e_err(drv, "but not supported. Please report the module type to the driver maintainers.\n"); + page_swap = true; + } - if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap || - !(addr_mode & TXGBE_SFF_DDM_IMPLEMENTED)) { - /* We have a SFP, but it does not support SFF-8472 */ - adapter->eeprom_type = ETH_MODULE_SFF_8079; - adapter->eeprom_len = ETH_MODULE_SFF_8079_LEN; - } else { - /* We have a SFP which supports a revision of SFF-8472. */ - adapter->eeprom_type = ETH_MODULE_SFF_8472; - adapter->eeprom_len = ETH_MODULE_SFF_8472_LEN; - } - for (i = 0; i < adapter->eeprom_len; i++) { - if (i < ETH_MODULE_SFF_8079_LEN) - status = hw->phy.ops.read_i2c_eeprom(hw, i, - &databyte); - else - status = hw->phy.ops.read_i2c_sff8472(hw, i, - &databyte); + if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap || + !(addr_mode & TXGBE_SFF_DDM_IMPLEMENTED)) { + /* We have a SFP, but it does not support SFF-8472 */ + adapter->eeprom_type = ETH_MODULE_SFF_8079; + adapter->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have a SFP which supports a revision of SFF-8472. */ + adapter->eeprom_type = ETH_MODULE_SFF_8472; + adapter->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + for (i = 0; i < adapter->eeprom_len; i++) { + if (i < ETH_MODULE_SFF_8079_LEN) + status = hw->phy.ops.read_i2c_eeprom(hw, i, + &databyte); + else + status = hw->phy.ops.read_i2c_sff8472(hw, i, + &databyte); - if (status != 0) - goto err_read_i2c_eeprom; + if (status != 0) + goto err_read_i2c_eeprom; - adapter->i2c_eeprom[i] = databyte; + adapter->i2c_eeprom[i] = databyte; + } } - out: hw->mac.ops.release_swfw_sync(hw, swfw_mask); -- Gitee From 55939b6467d64a4ef7d2a1e6ac2cd4096a9481e1 Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Mon, 29 Sep 2025 15:01:59 +0800 Subject: [PATCH 11/16] anolis: net: txgbe: change default tx queue depth ANBZ: #26488 ethtool -g ethx tx 128, and then tx ring tx timeout. because amblite reserved 192 desc entry, but ring depth set to 128 is less than reserved num. Signed-off-by: Duanqiang Wen --- drivers/net/ethernet/wangxun/txgbe/txgbe.h | 3 ++- drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c | 13 ++++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h index e6bd324b47d1..2dca2b2e6a3d 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h @@ -47,7 +47,8 @@ DECLARE_STATIC_KEY_FALSE(txgbe_xdp_locking_key); #define TXGBE_DEFAULT_TXD 1024 #define TXGBE_DEFAULT_TX_WORK 256 #define TXGBE_MAX_TXD 8192 -#define TXGBE_MIN_TXD 128 +#define TXGBE_MIN_TXD 256 +#define TXGBE_MIN_TXD_AML 512 #define TXGBE_MAX_TX_WORK 65535 #if (PAGE_SIZE < 8192) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 04e0c26b05bf..a0908d00fe6a 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -1576,15 +1576,22 @@ static int txgbe_set_ringparam(struct net_device *netdev, { struct txgbe_ring *tx_ring = NULL, *rx_ring = NULL; struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; u32 new_rx_count, new_tx_count; int i, j, err = 0; if (ring->rx_mini_pending || ring->rx_jumbo_pending) return -EINVAL; - new_tx_count = clamp_t(u32, ring->tx_pending, - TXGBE_MIN_TXD, TXGBE_MAX_TXD); - new_tx_count = ALIGN(new_tx_count, TXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + if (hw->mac.type == txgbe_mac_aml || hw->mac.type == txgbe_mac_aml40) { + new_tx_count = clamp_t(u32, ring->tx_pending, + TXGBE_MIN_TXD_AML, TXGBE_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, TXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + } else { + new_tx_count = clamp_t(u32, ring->tx_pending, + TXGBE_MIN_TXD, TXGBE_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, TXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + } new_rx_count = clamp_t(u32, ring->rx_pending, TXGBE_MIN_RXD, TXGBE_MAX_RXD); -- Gitee From 631eef235daa70643ba31ab28418f126f1d348f0 Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Mon, 29 Sep 2025 15:14:26 +0800 Subject: [PATCH 12/16] anolis: net: txgbe: fix 25G link issues ANBZ: #26488 Currently, we hope to link to rs fec, but when switching to fec at the end, we didn't wait, which led to the can not link back to 25G during the dual-speed switch. So, when switching to RS fec, wait for a while for the link. And after ethtool -s ethx speed maxspeed duplex full autoneg off and down up port, then ethtool -s enp1s0f0 speed maxspeed duplex full autoneg off, can't set port speed in multispeed sfp. Because when port up, the phy.autoneg_advertised reinit to default full speed but autoneg not reinit. Signed-off-by: Duanqiang Wen --- drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c | 2 -- drivers/net/ethernet/wangxun/txgbe/txgbe_main.c | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c index 885212601105..b564719be9d6 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c @@ -4056,8 +4056,6 @@ int txgbe_e56_fec_mode_polling(struct txgbe_hw *hw, bool *link_up) txgbe_e56_set_fec_mode(hw, adapter->cur_fec_link); mutex_unlock(&adapter->e56_lock); - if (j == 3) - break; for (i = 0; i < 4; i++) { msleep(250); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 1aa937e8669e..7c9567b45e30 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -7632,6 +7632,8 @@ static void txgbe_sfp_link_config_subtask(struct txgbe_adapter *adapter) else if (speed & TXGBE_LINK_SPEED_10GB_FULL) speed = TXGBE_LINK_SPEED_10GB_FULL; } + + adapter->autoneg = autoneg; } } -- Gitee From db0b8ede2418bb72bb8aaa8de1fd88747c785695 Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Mon, 29 Sep 2025 15:21:34 +0800 Subject: [PATCH 13/16] anolis: net: txgbe: set pf promisc and vlan cannot receive packets ANBZ: #26488 e.g. 1.echo 1>/sys/class/net/enp25s0f0/device/sriov numvfs; 2.ip link set dev enp25s0f0 vf 0 trust on; 3.ifconfig enp25s0f0 promisc; 4.ip link add link enp25s0f0 name enp25s0f0.101 type vlan id 101; then can't receive vlan 100 packet. because vfta has been wrongly setting when add vlan. So fix it and delete unused vft_shadow. Signed-off-by: Duanqiang Wen --- drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 7 +------ drivers/net/ethernet/wangxun/txgbe/txgbe_main.c | 9 ++++++++- drivers/net/ethernet/wangxun/txgbe/txgbe_type.h | 1 - 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index 9f1e3305f51b..88e5cb35d247 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -2042,8 +2042,7 @@ s32 txgbe_set_vfta(struct txgbe_hw *hw, u32 vlan, u32 vind, regindex = (vlan >> 5) & 0x7F; bitindex = vlan & 0x1F; targetbit = (1 << bitindex); - /* errata 5 */ - vfta = hw->mac.vft_shadow[regindex]; + vfta = rd32(hw, TXGBE_PSR_VLAN_TBL(regindex)); if (vlan_on) { if (!(vfta & targetbit)) { vfta |= targetbit; @@ -2066,8 +2065,6 @@ s32 txgbe_set_vfta(struct txgbe_hw *hw, u32 vlan, u32 vind, if (vfta_changed) wr32(hw, TXGBE_PSR_VLAN_TBL(regindex), vfta); - /* errata 5 */ - hw->mac.vft_shadow[regindex] = vfta; return 0; } @@ -2191,8 +2188,6 @@ s32 txgbe_clear_vfta(struct txgbe_hw *hw) for (offset = 0; offset < hw->mac.vft_size; offset++) { wr32(hw, TXGBE_PSR_VLAN_TBL(offset), 0); - /* errata 5 */ - hw->mac.vft_shadow[offset] = 0; } for (offset = 0; offset < TXGBE_PSR_VLAN_SWC_ENTRIES; offset++) { diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 7c9567b45e30..f33551bcdafd 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -4376,7 +4376,14 @@ static void txgbe_scrub_vfta(struct txgbe_adapter *adapter) /* extract values from vft_shadow and write back to VFTA */ for (i = 0; i < hw->mac.vft_size; i++) { - vfta = hw->mac.vft_shadow[i]; +#ifdef CONFIG_64BIT + if (i % 2) + vfta = (u32)((adapter->active_vlans[i / 2] >> 32) & U32_MAX); + else + vfta = (u32)((adapter->active_vlans[i / 2]) & U32_MAX); +#else + vfta = (u32)((adapter->active_vlans[i]) & U32_MAX); +#endif wr32(hw, TXGBE_PSR_VLAN_TBL(i), vfta); } } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index d3139c9d1d16..3ba1eccdacce 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -3250,7 +3250,6 @@ struct txgbe_mac_info { u32 mta_shadow[TXGBE_MAX_MTA]; s32 mc_filter_type; u32 mcft_size; - u32 vft_shadow[TXGBE_MAX_VFTA_ENTRIES]; u32 vft_size; u32 num_rar_entries; u32 rar_highwater; -- Gitee From a35fcdd6666f418f5da18ab84cab5fca0b98aee6 Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Mon, 29 Sep 2025 15:45:35 +0800 Subject: [PATCH 14/16] anolis: net: txgbe: fix fec mode issues ANBZ: #26488 fix can't link 10g when fec mismatch peer in multispeed, amblit chip in multispeed, if set local fec mismatch peer. may link no, because phy setting flow error and cannot link when change fec to auto from rs with baser peer. Signed-off-by: Duanqiang Wen --- .../net/ethernet/wangxun/txgbe/txgbe_aml.c | 21 ++++- .../net/ethernet/wangxun/txgbe/txgbe_e56.c | 17 ++-- .../ethernet/wangxun/txgbe/txgbe_ethtool.c | 82 ++++++++++++++++--- drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 2 +- 4 files changed, 99 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c index 414f9eb79b75..ffac77891a5e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c @@ -129,7 +129,8 @@ static s32 txgbe_setup_mac_link_aml(struct txgbe_hw *hw, adapter->link_valid = true; } - if (speed == TXGBE_LINK_SPEED_25GB_FULL) { + if (speed == TXGBE_LINK_SPEED_25GB_FULL && + adapter->fec_link_mode == TXGBE_PHY_FEC_AUTO) { txgbe_e56_fec_mode_polling(hw, &link_up); } else { for (i = 0; i < 4; i++) { @@ -340,8 +341,22 @@ static s32 txgbe_setup_mac_link_multispeed_fiber_aml(struct txgbe_hw *hw, txgbe_e56_check_phy_link(hw, &link_speed, &link_up); if (link_up) { - adapter->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; - goto out; + int i = 0; + + for (; i < 10; i++) { + int rdata = rd32(hw, 0x14404); + + if (rdata & 0x1) { + adapter->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; + mutex_lock(&adapter->e56_lock); + txgbe_wr32_ephy(hw, + E56PHY_INTR_1_ADDR, + E56PHY_INTR_1_IDLE_EXIT1); + mutex_unlock(&adapter->e56_lock); + goto out; + } + msleep(100); + } } } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c index b564719be9d6..070ebd7c4763 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_e56.c @@ -3650,8 +3650,11 @@ int txgbe_set_link_to_amlite(struct txgbe_hw *hw, u32 speed) ~TXGBE_MAC_TX_CFG_TE); wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, ~TXGBE_MAC_RX_CFG_RE); + hw->mac.ops.disable_sec_tx_path(hw); } + hw->mac.ops.disable_tx_laser(hw); + if (hw->bus.lan_id == 0) reset = TXGBE_MIS_RST_LAN0_EPHY_RST; else @@ -3670,7 +3673,7 @@ int txgbe_set_link_to_amlite(struct txgbe_hw *hw, u32 speed) value = txgbe_rd32_epcs(hw, VR_PCS_DIG_CTRL1); if ((value & 0x8000)) { status = TXGBE_ERR_PHY_INIT_NOT_DONE; - ; + hw->mac.ops.enable_tx_laser(hw); goto out; } @@ -3930,6 +3933,13 @@ int txgbe_set_link_to_amlite(struct txgbe_hw *hw, u32 speed) } txgbe_e56_clear_symdata(hw); + + if (adapter->fec_link_mode != TXGBE_PHY_FEC_AUTO && + speed == TXGBE_LINK_SPEED_25GB_FULL) { + adapter->cur_fec_link = adapter->fec_link_mode; + txgbe_e56_set_fec_mode(hw, adapter->cur_fec_link); + } + hw->mac.ops.enable_tx_laser(hw); status = txgbe_e56_config_rx(hw, speed); @@ -3946,11 +3956,6 @@ int txgbe_set_link_to_amlite(struct txgbe_hw *hw, u32 speed) txgbe_wr32_ephy(hw, E56PHY_INTR_1_ENABLE_ADDR, E56PHY_INTR_1_IDLE_EXIT1); - if (adapter->fec_link_mode != TXGBE_PHY_FEC_AUTO) { - adapter->cur_fec_link = adapter->fec_link_mode; - txgbe_e56_set_fec_mode(hw, adapter->cur_fec_link); - } - if (status) goto out; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index a0908d00fe6a..82bbaf7a0126 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -905,20 +905,22 @@ static int txgbe_get_fec_param(struct net_device *netdev, } hw->mac.ops.check_link(hw, &speed, &link_up, false); fecparam->fec = 0; - if (speed == TXGBE_LINK_SPEED_10GB_FULL) { + if (supported_link == TXGBE_LINK_SPEED_10GB_FULL) { fecparam->fec |= ETHTOOL_FEC_OFF; + } else { + if (adapter->fec_link_mode == TXGBE_PHY_FEC_AUTO) + fecparam->fec |= ETHTOOL_FEC_AUTO; + else if (adapter->fec_link_mode & TXGBE_PHY_FEC_BASER) + fecparam->fec |= ETHTOOL_FEC_BASER; + else if (adapter->fec_link_mode & TXGBE_PHY_FEC_RS) + fecparam->fec |= ETHTOOL_FEC_RS; + else + fecparam->fec |= ETHTOOL_FEC_OFF; + } + if (speed == TXGBE_LINK_SPEED_10GB_FULL) { fecparam->active_fec = ETHTOOL_FEC_OFF; goto done; } - if (adapter->fec_link_mode == TXGBE_PHY_FEC_AUTO) - fecparam->fec |= ETHTOOL_FEC_AUTO; - else if (adapter->fec_link_mode & TXGBE_PHY_FEC_BASER) - fecparam->fec |= ETHTOOL_FEC_BASER; - else if (adapter->fec_link_mode & TXGBE_PHY_FEC_RS) - fecparam->fec |= ETHTOOL_FEC_RS; - else - fecparam->fec |= ETHTOOL_FEC_OFF; - if (!link_up) { fecparam->active_fec = ETHTOOL_FEC_OFF; goto done; @@ -979,9 +981,63 @@ static int txgbe_set_fec_param(struct net_device *netdev, goto done; } if (cur_fec_mode != adapter->fec_link_mode) { - /* reset link */ - adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; - txgbe_service_event_schedule(adapter); + int status, i; + u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; + bool link_up = false; + + if (hw->phy.multispeed_fiber && + (hw->phy.autoneg_advertised == + (TXGBE_LINK_SPEED_10GB_FULL | + TXGBE_LINK_SPEED_25GB_FULL)) && + adapter->fec_link_mode != TXGBE_PHY_FEC_AUTO) { + while (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + status = hw->mac.ops.setup_mac_link(hw, + TXGBE_LINK_SPEED_25GB_FULL, 0); + if (status != 0) + return status; + for (i = 0; i < 30; i++) { + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + if (link_up) + goto out; + msleep(200); + } + msec_delay(100); + status = hw->mac.ops.setup_mac_link(hw, + TXGBE_LINK_SPEED_10GB_FULL, 0); + if (status != 0) + return status; + for (i = 0; i < 35; i++) { + u32 link_speed; + bool link_up = 0; + + txgbe_e56_check_phy_link(hw, &link_speed, &link_up); + if (link_up) { + if (rd32(hw, 0x14404) & 0x1) { + adapter->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; + mutex_lock(&adapter->e56_lock); + txgbe_wr32_ephy(hw, + E56PHY_INTR_1_ADDR, + E56PHY_INTR_1_IDLE_EXIT1); + mutex_unlock(&adapter->e56_lock); + goto out; + } + } + msleep(200); + } + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + txgbe_service_event_schedule(adapter); +out: + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); + } else if (hw->phy.multispeed_fiber) { + while (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + hw->mac.ops.setup_link(hw, hw->phy.autoneg_advertised, true); + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); + } else { + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + txgbe_service_event_schedule(adapter); + } } done: return err; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index 88e5cb35d247..315291979113 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -2542,7 +2542,7 @@ s32 txgbe_host_interface_command(struct txgbe_hw *hw, u32 *buffer, buf[0] = rd32(hw, TXGBE_MNG_MBOX); if ((buf[0] & 0xff0000) >> 16 == 0x80) { - ERROR_REPORT1(TXGBE_ERROR_CAUTION, "It's unknown cmd.\n"); + ERROR_REPORT1(TXGBE_ERROR_UNSUPPORTED, "It's unknown cmd.\n"); status = TXGBE_ERR_MNG_ACCESS_FAILED; goto rel_out; } -- Gitee From 51012ada9c1d225e675be69dc8b174be155fbe6f Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Mon, 29 Sep 2025 16:10:51 +0800 Subject: [PATCH 15/16] anolis: net: txgbe: fix race condition when recover or close/suspend ANBZ: #26488 when do pcie recover and close or xdp_setup at same time, txgbe_close() may be called in txgbe_io_error_detected again. which ends up in a double free WARN and/or a BUG in free_msi_irqs(). To handle this situation we extend the rtnl_lock() to protect the call to netif_device_detach() and check for netif_device_present() to avoid clearing the interrupts second time in txgbe_close(); when do recover, tx_ring->desc be free, txgbe_tx_queue_clear_error_task in service_task touch tx_ring->desc will occur the calltrace, so add judgement in txgbe_tx_queue_clear_error_task. Signed-off-by: Duanqiang Wen --- .../net/ethernet/wangxun/txgbe/txgbe_main.c | 46 ++++++++++++------- 1 file changed, 29 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index f33551bcdafd..71b73b9e8466 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -6413,6 +6413,20 @@ static void txgbe_close_suspend(struct txgbe_adapter *adapter) txgbe_free_all_tx_resources(adapter); } +static void txgbe_down_suspend(struct txgbe_adapter *adapter) +{ +#ifdef HAVE_PTP_1588_CLOCK + txgbe_ptp_suspend(adapter); +#endif + + txgbe_down(adapter); + txgbe_free_irq(adapter); + + txgbe_free_isb_resources(adapter); + txgbe_free_all_rx_resources(adapter); + txgbe_free_all_tx_resources(adapter); +} + /** * txgbe_close - Disables a network interface * @netdev: network interface device structure @@ -6437,12 +6451,8 @@ int txgbe_close(struct net_device *netdev) txgbe_ptp_stop(adapter); - txgbe_down(adapter); - txgbe_free_irq(adapter); - - txgbe_free_isb_resources(adapter); - txgbe_free_all_rx_resources(adapter); - txgbe_free_all_tx_resources(adapter); + if (netif_device_present(netdev)) + txgbe_down_suspend(adapter); txgbe_fdir_filter_exit(adapter); memset(&adapter->ft_filter_info, 0, @@ -6492,14 +6502,11 @@ static int txgbe_resume(struct device *dev) if (!err && netif_running(netdev)) err = txgbe_open(netdev); + if (!err) + netif_device_attach(netdev); rtnl_unlock(); - if (err) - return err; - - netif_device_attach(netdev); - - return 0; + return err; } /** @@ -6565,16 +6572,16 @@ static int __txgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) int retval = 0; #endif + rtnl_lock(); netif_device_detach(netdev); txgbe_mac_set_default_filter(adapter, hw->mac.perm_addr); - rtnl_lock(); if (netif_running(netdev)) txgbe_close_suspend(adapter); - rtnl_unlock(); txgbe_clear_interrupt_scheme(adapter); + rtnl_unlock(); #ifdef CONFIG_PM retval = pci_save_state(pdev); if (retval) @@ -8050,6 +8057,9 @@ static void txgbe_tx_queue_clear_error_task(struct txgbe_adapter *adapter) struct txgbe_tx_buffer *tx_buffer; u32 size; + if (test_bit(__TXGBE_DOWN, &adapter->state)) + return; + for (i = 0; i < 4; i++) desc_error[i] = rd32(hw, TXGBE_TDM_DESC_NONFATAL(i)); @@ -8063,6 +8073,8 @@ static void txgbe_tx_queue_clear_error_task(struct txgbe_adapter *adapter) rd32(&adapter->hw, TXGBE_PX_TR_RP(adapter->tx_ring[i]->reg_idx))); for (j = 0; j < tx_ring->count; j++) { tx_desc = TXGBE_TX_DESC(tx_ring, j); + if (!tx_desc) + return; if (tx_desc->read.olinfo_status != 0x1) e_warn(tx_err, "queue[%d][%d]:0x%llx, 0x%x, 0x%x\n", i, j, tx_desc->read.buffer_addr, tx_desc->read.cmd_type_len, @@ -11070,14 +11082,14 @@ static pci_ers_result_t txgbe_io_error_detected(struct pci_dev *pdev, rtnl_lock(); netif_device_detach(netdev); + if (netif_running(netdev)) + txgbe_down_suspend(adapter); + if (state == pci_channel_io_perm_failure) { rtnl_unlock(); return PCI_ERS_RESULT_DISCONNECT; } - if (netif_running(netdev)) - txgbe_close(netdev); - if (!test_and_set_bit(__TXGBE_DISABLED, &adapter->state)) pci_disable_device(pdev); rtnl_unlock(); -- Gitee From 42f2725ac568793478c32695cf760bea026e3021 Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Mon, 29 Sep 2025 16:54:01 +0800 Subject: [PATCH 16/16] anolis: net: txgbe: fix invoke hot reset multi times unexpectedly ANBZ: #26488 e.g. when lan0 invoke hot reset function, when down lan0 but pending bit, it will cause hot reset again. add io_err flag to avoid it. And if lan1 detected error and raise hot reset flag, and then lan0 invoke hot reset function and dump lan0 debug info, but we need lan1 debug info. Signed-off-by: Duanqiang Wen --- drivers/net/ethernet/wangxun/txgbe/txgbe.h | 10 +-- .../net/ethernet/wangxun/txgbe/txgbe_main.c | 73 +++++++++++++------ 2 files changed, 54 insertions(+), 29 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h index 2dca2b2e6a3d..29fce194a8ab 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h @@ -680,6 +680,7 @@ struct txgbe_therm_proc_data { **/ #define TXGBE_FLAG2_RSC_CAPABLE BIT(0) #define TXGBE_FLAG2_RSC_ENABLED BIT(1) +#define TXGBE_FLAG2_DMA_RESET_REQUESTED BIT(2) #define TXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(3) #define TXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(4) #define TXGBE_FLAG2_SEARCH_FOR_SFP BIT(5) @@ -688,8 +689,9 @@ struct txgbe_therm_proc_data { #define TXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(8) #define TXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(9) #define TXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(10) -#define TXGBE_FLAG2_RSS_ENABLED BIT(12) #define TXGBE_FLAG2_PTP_PPS_ENABLED BIT(11) +#define TXGBE_FLAG2_RSS_ENABLED BIT(12) +#define TXGBE_FLAG2_RING_DUMP BIT(13) #define TXGBE_FLAG2_EEE_CAPABLE BIT(14) #define TXGBE_FLAG2_EEE_ENABLED BIT(15) #define TXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(16) @@ -707,11 +709,6 @@ struct txgbe_therm_proc_data { #define TXGBE_FLAG2_ECC_ERR_RESET BIT(29) #define TXGBE_FLAG2_RX_LEGACY BIT(30) #define TXGBE_FLAG2_PCIE_NEED_RECOVER BIT(31) -#define TXGBE_FLAG2_PCIE_NEED_Q_RESET BIT(30) -#define TXGBE_FLAG2_SERVICE_RUNNING BIT(13) - -/* amlite: dma reset */ -#define TXGBE_FLAG2_DMA_RESET_REQUESTED BIT(2) #define TXGBE_FLAG3_PHY_EVENT BIT(0) #define TXGBE_FLAG3_TEMP_SENSOR_INPROGRESS BIT(1) @@ -966,6 +963,7 @@ struct txgbe_adapter { u16 num_xsk_pools; bool cmplt_to_dis; + bool io_err; u8 i2c_eeprom[512]; u32 eeprom_len; u32 eeprom_type; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 71b73b9e8466..a382e7e83217 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -49,7 +49,7 @@ char txgbe_driver_name[32] = TXGBE_NAME; static const char txgbe_driver_string[] = "WangXun RP1000/RP2000/FF50XX PCI Express Network Driver"; -#define DRV_VERSION __stringify(2.1.1oe) +#define DRV_VERSION __stringify(2.1.1.3oe) const char txgbe_driver_version[32] = DRV_VERSION; static const char txgbe_copyright[] = @@ -166,10 +166,10 @@ static void txgbe_dump_all_ring_desc(struct txgbe_adapter *adapter) if (!netif_msg_tx_err(adapter)) return; - e_warn(tx_err, "Dump desc base addr\n"); + e_warn(tx_err, "Dump desc base addr.\n"); for (i = 0; i < adapter->num_tx_queues; i++) - e_warn(tx_err, "q_%d:0x%x%x\n", i, rd32(hw, TXGBE_PX_TR_BAH(i)), + e_warn(tx_err, "txq_%d:0x%x%x\n", i, rd32(hw, TXGBE_PX_TR_BAH(i)), rd32(hw, TXGBE_PX_TR_BAL(i))); for (i = 0; i < adapter->num_tx_queues; i++) { @@ -182,6 +182,12 @@ static void txgbe_dump_all_ring_desc(struct txgbe_adapter *adapter) tx_desc->read.cmd_type_len, tx_desc->read.olinfo_status); } + + e_warn(drv, "tx ring %d next_to_use is %d, next_to_clean is %d\n", + i, tx_ring->next_to_use, tx_ring->next_to_clean); + e_warn(drv, "tx ring %d hw rp is 0x%x, wp is 0x%x\n", + i, rd32(hw, TXGBE_PX_TR_RP(tx_ring->reg_idx)), + rd32(hw, TXGBE_PX_TR_WP(tx_ring->reg_idx))); } } @@ -548,12 +554,7 @@ static inline bool txgbe_check_tx_hang(struct txgbe_ring *tx_ring) static void txgbe_tx_timeout_dorecovery(struct txgbe_adapter *adapter) { - /* schedule immediate reset if we believe we hung */ - - if (adapter->hw.bus.lan_id == 0) - adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; - else - wr32(&adapter->hw, TXGBE_MIS_PF_SM, 1); + adapter->flags2 |= TXGBE_FLAG2_RING_DUMP; txgbe_service_event_schedule(adapter); } @@ -2675,7 +2676,6 @@ static void txgbe_tx_ring_recovery(struct txgbe_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) { if (desc_error[i / 32] & (1 << i % 32)) { - adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_Q_RESET; e_err(drv, "TDM non-fatal error, queue[%d]", i); } } @@ -2715,12 +2715,7 @@ static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data) ERROR_REPORT1(TXGBE_ERROR_POLLING, "lan id %d, PCIe request error founded.\n", hw->bus.lan_id); - if (hw->bus.lan_id == 0) { - adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; - txgbe_service_event_schedule(adapter); - } else { - wr32(&adapter->hw, TXGBE_MIS_PF_SM, 1); - } + txgbe_tx_timeout_dorecovery(adapter); } if (eicr & TXGBE_PX_MISC_IC_INT_ERR) { @@ -5292,8 +5287,6 @@ void txgbe_reinit_locked(struct txgbe_adapter *adapter) /* put off any impending NetWatchDogTimeout */ netif_trans_update(adapter->netdev); - adapter->flags2 |= TXGBE_FLAG2_SERVICE_RUNNING; - while (test_and_set_bit(__TXGBE_RESETTING, &adapter->state)) usleep_range(1000, 2000); txgbe_down(adapter); @@ -5394,7 +5387,8 @@ void txgbe_reset(struct txgbe_adapter *adapter) break; case TXGBE_ERR_MASTER_REQUESTS_PENDING: e_dev_err("master disable timed out\n"); - txgbe_tx_timeout_dorecovery(adapter); + if (!adapter->io_err) + txgbe_tx_timeout_dorecovery(adapter); break; case TXGBE_ERR_EEPROM_VERSION: /* We are running on a pre-production device, log a warning */ @@ -5621,7 +5615,6 @@ static void txgbe_disable_device(struct txgbe_adapter *adapter) del_timer_sync(&adapter->service_timer); del_timer_sync(&adapter->irq_timer); - adapter->flags2 &= ~TXGBE_FLAG2_SERVICE_RUNNING; hw->f2c_mod_status = false; cancel_work_sync(&adapter->sfp_sta_task); @@ -8022,15 +8015,44 @@ static void txgbe_reset_subtask(struct txgbe_adapter *adapter) rtnl_unlock(); } -static void txgbe_check_pcie_subtask(struct txgbe_adapter *adapter) +static void txgbe_check_ring_dump_subtask(struct txgbe_adapter *adapter) { - bool status; + struct txgbe_hw *hw = &adapter->hw; + u32 val; - if (!(adapter->flags2 & TXGBE_FLAG2_PCIE_NEED_RECOVER)) + if (!(adapter->flags2 & TXGBE_FLAG2_RING_DUMP)) return; txgbe_print_tx_hang_status(adapter); txgbe_dump_all_ring_desc(adapter); + wr32(&adapter->hw, TXGBE_MIS_PF_SM, 1); + adapter->flags2 &= ~TXGBE_FLAG2_RING_DUMP; + + /* record which func to provoke PCIE recovery */ + if (rd32(&adapter->hw, TXGBE_MIS_PF_SM) == 1) { + val = rd32m(&adapter->hw, + TXGBE_MIS_PRB_CTL, + TXGBE_MIS_PRB_CTL_LAN0_UP | TXGBE_MIS_PRB_CTL_LAN1_UP); + if (val & TXGBE_MIS_PRB_CTL_LAN0_UP) { + if (hw->bus.lan_id == 0) { + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + e_info(probe, "check_ring_dump_subtask: set recover on Lan0\n"); + } + } else if (val & TXGBE_MIS_PRB_CTL_LAN1_UP) { + if (hw->bus.lan_id == 1) { + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + e_info(probe, "check_ring_dump_subtask: set recover on Lan1\n"); + } + } + } +} + +static void txgbe_check_pcie_subtask(struct txgbe_adapter *adapter) +{ + bool status; + + if (!(adapter->flags2 & TXGBE_FLAG2_PCIE_NEED_RECOVER)) + return; wr32m(&adapter->hw, TXGBE_MIS_PF_SM, TXGBE_MIS_PF_SM_SM, 0); @@ -8138,6 +8160,7 @@ static void txgbe_service_task(struct work_struct *work) return; } + txgbe_check_ring_dump_subtask(adapter); txgbe_check_pcie_subtask(adapter); txgbe_reset_subtask(adapter); txgbe_phy_event_subtask(adapter); @@ -11082,6 +11105,8 @@ static pci_ers_result_t txgbe_io_error_detected(struct pci_dev *pdev, rtnl_lock(); netif_device_detach(netdev); + adapter->io_err = true; + if (netif_running(netdev)) txgbe_down_suspend(adapter); @@ -11167,6 +11192,8 @@ static void txgbe_io_resume(struct pci_dev *pdev) if (netif_running(netdev)) txgbe_open(netdev); + adapter->io_err = false; + netif_device_attach(netdev); rtnl_unlock(); } -- Gitee