diff --git a/Documentation/networking/ngbe.rst b/Documentation/networking/ngbe.rst new file mode 100644 index 0000000000000000000000000000000000000000..bcf03d85e4105c09e1b644a8bb3f9b7fd1462ec4 --- /dev/null +++ b/Documentation/networking/ngbe.rst @@ -0,0 +1,14 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============================================================= +Linux Base Driver for WangXun(R) Gigabit PCI Express Adapters +============================================================= + +WangXun Gigabit Linux driver. +Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. + +Support +======= +If you have problems with the software or hardware, please contact our +customer support team via email at nic-support@net-swift.com or check our website +at https://www.net-swift.com diff --git a/MAINTAINERS b/MAINTAINERS index cf41193a238a308115c93a73e04aa522a4e06639..1b137f5e7ff6f75836384d548c2a8d42385ec1b7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -18937,6 +18937,13 @@ S: Maintained F: Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst F: drivers/net/ethernet/wangxun/ +WANGXUN ETHERNET DRIVER +M: Duanqiang Wen +L: netdev@vger.kernel.org +S: Maintained +F: Documentation/networking/device_drivers/ethernet/wangxun/ngbe.rst +F: drivers/net/ethernet/wangxun/ + WATCHDOG DEVICE DRIVERS M: Wim Van Sebroeck M: Guenter Roeck diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index 997db1fe0af6d80b3963feffea1a30e97f265fe9..c573b8e108a9f04e5d2bbbc8e115f985012d50f3 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -2671,6 +2671,7 @@ CONFIG_FM10K=m CONFIG_IGC=m CONFIG_NET_VENDOR_WANGXUN=y CONFIG_TXGBE=m +CONFIG_NGBE=m # CONFIG_JME is not set # CONFIG_NET_VENDOR_MARVELL is not set CONFIG_NET_VENDOR_MELLANOX=y diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index 45d4a2859124c71bf2dd3690db27b49f99e1cef3..14c36170a9ff756122b42708cd31e1fd278edadc 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -2690,6 +2690,7 @@ CONFIG_FM10K=m CONFIG_IGC=m CONFIG_NET_VENDOR_WANGXUN=y CONFIG_TXGBE=m +CONFIG_NGBE=m # CONFIG_JME is not set # CONFIG_NET_VENDOR_MARVELL is not set CONFIG_NET_VENDOR_MELLANOX=y diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index e2df114feff49c92c21d83c992fa2b9c3dc09789..4994c8c09b696c219b72734e5988634ec4f00ed9 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -2727,6 +2727,7 @@ CONFIG_FM10K=m CONFIG_IGC=m CONFIG_NET_VENDOR_WANGXUN=y CONFIG_TXGBE=m +CONFIG_NGBE=m # CONFIG_JME is not set # CONFIG_NET_VENDOR_MARVELL is not set CONFIG_NET_VENDOR_MELLANOX=y diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 69f2b8ae8f807e11d488ab01d7077ac993ac23ba..e1879c5ae26e60e645d69de8456e9a091bb41128 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -2725,6 +2725,7 @@ CONFIG_FM10K=m CONFIG_IGC=m CONFIG_NET_VENDOR_WANGXUN=y CONFIG_TXGBE=m +CONFIG_NGBE=m # CONFIG_JME is not set # CONFIG_NET_VENDOR_MARVELL is not set CONFIG_NET_VENDOR_MELLANOX=y diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig index 8edf04a6c637d1398c4e0eb7ed31eeea793eb716..3590d7ad59f43a368dc54dea3523e25017b14227 100644 --- a/drivers/net/ethernet/wangxun/Kconfig +++ b/drivers/net/ethernet/wangxun/Kconfig @@ -30,4 +30,16 @@ config TXGBE To compile this driver as a module, choose M here. The module will be called txgbe. +config NGBE + tristate "Netswift PCI-Express Gigabit Ethernet support" + depends on PCI + imply PTP_1588_CLOCK + help + This driver supports Netswift gigabit ethernet adapters. + For more information on how to identify your adapter, go + to + + To compile this driver as a module, choose M here. The module + will be called ngbe. + endif # NET_VENDOR_WANGXUN diff --git a/drivers/net/ethernet/wangxun/Makefile b/drivers/net/ethernet/wangxun/Makefile index c34db1bead25b4339ae3ea8f01dda6bcaea31f08..ac3fb06b233ca4ff9e5164f84fdfc13bcd80ff9c 100644 --- a/drivers/net/ethernet/wangxun/Makefile +++ b/drivers/net/ethernet/wangxun/Makefile @@ -4,3 +4,4 @@ # obj-$(CONFIG_TXGBE) += txgbe/ +obj-$(CONFIG_NGBE) += ngbe/ diff --git a/drivers/net/ethernet/wangxun/ngbe/Makefile b/drivers/net/ethernet/wangxun/ngbe/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..86f33a4c82353f4310a72ade5a17512febd13e3a --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/Makefile @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. +# +# Makefile for the Wangxun(R) GbE PCI Express ethernet driver +# + +obj-$(CONFIG_NGBE) += ngbe.o + +ngbe-objs := ngbe_main.o \ + ngbe_hw.o \ + ngbe_phy.o \ + ngbe_lib.o \ + ngbe_ptp.o \ + ngbe_ethtool.o + +ngbe-$(CONFIG_DEBUG_FS) += ngbe_debugfs.o +ngbe-$(CONFIG_SYSFS) += ngbe_sysfs.o diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe.h b/drivers/net/ethernet/wangxun/ngbe/ngbe.h new file mode 100644 index 0000000000000000000000000000000000000000..69812227709128b2a504fa47f7fd0625240a2e21 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe.h @@ -0,0 +1,878 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _NGBE_H_ +#define _NGBE_H_ + +#include +#include +#include +#include +#include +#include "ngbe_type.h" +#include +#include +#include +#include + +#define NGBE_MAX_FDIR_INDICES 7 +#define NGBE_MAX_RSS_INDICES 8 +#define NGBE_MAX_MSIX_Q_VECTORS_EMERALD 9 +#define NGBE_MAX_MSIX_VECTORS_EMERALD 9 + +#define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) +#define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) + +#define NGBE_INTR_ALL 0x1FF + +/* TX/RX descriptor defines */ +#define NGBE_DEFAULT_TXD 512 /* default ring size */ +#define NGBE_DEFAULT_TX_WORK 256 +#define NGBE_DEFAULT_RXD 512 /* default ring size */ +#define NGBE_DEFAULT_RX_WORK 256 + +#define NGBE_RSS_KEY_SIZE 40 +#define NGBE_MAX_RETA_ENTRIES 128 + +/* ngbe_adapter.flag */ +#define NGBE_FLAG_MSI_CAPABLE BIT(0) +#define NGBE_FLAG_MSI_ENABLED BIT(1) +#define NGBE_FLAG_MSIX_CAPABLE BIT(2) +#define NGBE_FLAG_MSIX_ENABLED BIT(3) +#define NGBE_FLAG_LLI_PUSH BIT(4) + +#define NGBE_FLAG_IPSEC_ENABLED BIT(5) + +#define NGBE_FLAG_TPH_ENABLED BIT(6) +#define NGBE_FLAG_TPH_CAPABLE BIT(7) +#define NGBE_FLAG_TPH_ENABLED_DATA BIT(8) + +#define NGBE_FLAG_MQ_CAPABLE BIT(9) +#define NGBE_FLAG_DCB_ENABLED BIT(10) +#define NGBE_FLAG_VMDQ_ENABLED BIT(11) +#define NGBE_FLAG_FAN_FAIL_CAPABLE BIT(12) +#define NGBE_FLAG_NEED_LINK_UPDATE BIT(13) +#define NGBE_FLAG_NEED_ANC_CHECK BIT(14) +#define NGBE_FLAG_FDIR_HASH_CAPABLE BIT(15) +#define NGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(16) +#define NGBE_FLAG_SRIOV_CAPABLE BIT(19) +#define NGBE_FLAG_SRIOV_ENABLED BIT(20) +#define NGBE_FLAG_SRIOV_REPLICATION_ENABLE BIT(21) +#define NGBE_FLAG_SRIOV_L2SWITCH_ENABLE BIT(22) +#define NGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE BIT(23) +#define NGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(24) +#define NGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(25) +#define NGBE_FLAG_VXLAN_OFFLOAD_ENABLE BIT(26) +#define NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(27) +#define NGBE_FLAG_NEED_ETH_PHY_RESET BIT(28) +#define NGBE_FLAG_RX_HS_ENABLED BIT(30) +#define NGBE_FLAG_LINKSEC_ENABLED BIT(31) + +/** + * ngbe_adapter.flag2 + **/ +#define NGBE_FLAG2_RSC_CAPABLE BIT(0) +#define NGBE_FLAG2_RSC_ENABLED BIT(1) +#define NGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(3) +#define NGBE_FLAG2_TEMP_SENSOR_EVENT BIT(4) +#define NGBE_FLAG2_SEARCH_FOR_SFP BIT(5) +#define NGBE_FLAG2_SFP_NEEDS_RESET BIT(6) +#define NGBE_FLAG2_PF_RESET_REQUESTED BIT(7) +#define NGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(8) +#define NGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(9) +#define NGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(10) +#define NGBE_FLAG2_PTP_PPS_ENABLED BIT(11) +#define NGBE_FLAG2_RSS_ENABLED BIT(12) +#define NGBE_FLAG2_EEE_CAPABLE BIT(14) +#define NGBE_FLAG2_EEE_ENABLED BIT(15) +#define NGBE_FLAG2_VXLAN_REREG_NEEDED BIT(16) +#define NGBE_FLAG2_DEV_RESET_REQUESTED BIT(18) +#define NGBE_FLAG2_RESET_INTR_RECEIVED BIT(19) +#define NGBE_FLAG2_GLOBAL_RESET_REQUESTED BIT(20) +#define NGBE_FLAG2_MNG_REG_ACCESS_DISABLED BIT(22) +#define NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP BIT(23) +#define NGBE_FLAG2_PCIE_NEED_RECOVER BIT(31) + +/* preset defaults */ +#define NGBE_FLAGS_SP_INIT (NGBE_FLAG_MSI_CAPABLE | \ + NGBE_FLAG_MSIX_CAPABLE |\ + NGBE_FLAG_MQ_CAPABLE | \ + NGBE_FLAG_SRIOV_CAPABLE) + +#define NGBE_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ + +#define NGBE_CPU_TO_BE16(_x) cpu_to_be16(_x) +#define NGBE_BE16_TO_CPU(_x) be16_to_cpu(_x) +#define NGBE_CPU_TO_BE32(_x) cpu_to_be32(_x) +#define NGBE_BE32_TO_CPU(_x) be32_to_cpu(_x) +#define NGBE_EEPROM_GRANT_ATTEMPTS 100 +#define NGBE_HTONL(_i) htonl(_i) +#define NGBE_NTOHL(_i) ntohl(_i) +#define NGBE_NTOHS(_i) ntohs(_i) +#define NGBE_CPU_TO_LE32(_i) cpu_to_le32(_i) +#define NGBE_LE32_TO_CPUS(_i) le32_to_cpus(_i) + +#define NGBE_MAC_STATE_DEFAULT 0x1 +#define NGBE_MAC_STATE_MODIFIED 0x2 +#define NGBE_MAC_STATE_IN_USE 0x4 + +#define NGBE_MAX_RX_DESC_POLL 10 + +#define NGBE_RXBUFFER_2K 2048 +#define NGBE_MAX_RXBUFFER 16384 /* largest size for single descriptor */ +#define NGBE_ETH_FRAMING 20 +#define NGBE_RXBUFFER_256 256 /* Used for skb receive header */ + +#define NGBE_RX_HDR_SIZE NGBE_RXBUFFER_256 +#define NGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define NGBE_MAX_VF_FUNCTIONS 8 +#define MAX_RX_QUEUES 8 +#define MAX_TX_QUEUES 8 + +/* TX/RX descriptor defines */ +#define NGBE_DEFAULT_TXD 512 /* default ring size */ +#define NGBE_DEFAULT_TX_WORK 256 +#define NGBE_MAX_TXD 8192 +#define NGBE_MIN_TXD 128 + +#define NGBE_DEFAULT_RXD 512 /* default ring size */ +#define NGBE_DEFAULT_RX_WORK 256 +#define NGBE_MAX_RXD 8192 +#define NGBE_MIN_RXD 128 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define NGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define NGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Only for array allocations in our adapter struct. + * we can actually assign 64 queue vectors based on our extended-extended + * interrupt registers. + */ +#define MAX_MSIX_Q_VECTORS NGBE_MAX_MSIX_Q_VECTORS_EMERALD +#define MAX_MSIX_COUNT NGBE_MAX_MSIX_VECTORS_EMERALD + +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) + +#define NGBE_INTR_MISC(A) BIT((A)->num_q_vectors) +#define NGBE_INTR_MISC_VMDQ(A) BIT(((A)->num_q_vectors + (A)->ring_feature[RING_F_VMDQ].offset)) +#define NGBE_MAX_MACVLANS 8 + +/* VLAN info */ +#define NGBE_TX_FLAGS_VLAN_SHIFT 16 +#define NGBE_TX_FLAGS_VLAN_MASK 0xffff0000 +#define NGBE_MAX_PF_MACVLANS 15 + +/* Ether Types */ +#define NGBE_ETH_P_LLDP 0x88CC +#define NGBE_ETH_P_CNM 0x22E7 + +#define NGBE_DEFAULT_FCPAUSE 0xFFFF + +/* iterator for handling rings in ring container */ +#define ngbe_for_each_ring(pos, head) \ + for (pos = (head).ring; pos; pos = pos->next) + +#define NGBE_RING_SIZE(R) ((R)->count < NGBE_MAX_TXD ? (R)->count / 128 : 0) + +#define ring_uses_build_skb(ring) \ + test_bit(__NGBE_RX_BUILD_SKB_ENABLED, &(ring)->state) + +#define set_ring_hs_enabled(ring) \ + set_bit(__NGBE_RX_HS_ENABLED, &(ring)->state) +#define clear_ring_hs_enabled(ring) \ + clear_bit(__NGBE_RX_HS_ENABLED, &(ring)->state) +#define ring_is_hs_enabled(ring) \ + test_bit(__NGBE_RX_HS_ENABLED, &(ring)->state) +#define check_for_tx_hang(ring) \ + test_bit(__NGBE_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__NGBE_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__NGBE_TX_DETECT_HANG, &(ring)->state) + +#define NGBE_RX_DESC(R, i) \ + (&(((union ngbe_rx_desc *)((R)->desc))[i])) +#define NGBE_TX_DESC(R, i) \ + (&(((union ngbe_tx_desc *)((R)->desc))[i])) +#define NGBE_TX_CTXTDESC(R, i) \ + (&(((struct ngbe_tx_context_desc *)((R)->desc))[i])) + +#define NGBE_SET_FLAG(_input, _flag, _result) \ + (((_flag) <= (_result)) ? \ + ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ + ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) + +#define TCP_TIMER_VECTOR 0 +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR) +#define NGBE_7K_ITR 595 + +#define NGBE_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +#define NGBE_FAILED_READ_CFG_WORD 0xffffU + +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +#define NGBE_INTR_Q(i) (1ULL << (i)) + +/* macro to make the table lines short */ +#define NGBE_PTT(ptype, mac, ip, etype, eip, proto, layer)\ + { ptype, \ + 1, \ + /* mac */ NGBE_DEC_PTYPE_MAC_##mac, \ + /* ip */ NGBE_DEC_PTYPE_IP_##ip, \ + /* etype */ NGBE_DEC_PTYPE_ETYPE_##etype, \ + /* eip */ NGBE_DEC_PTYPE_IP_##eip, \ + /* proto */ NGBE_DEC_PTYPE_PROT_##proto, \ + /* layer */ NGBE_DEC_PTYPE_LAYER_##layer } + +#define NGBE_UKN(ptype) \ + { ptype, 0, 0, 0, 0, 0, 0, 0 } + +/* microsecond values for various ITR rates shifted by 2 to fit itr register + * with the first 3 bits reserved 0 + */ +#define NGBE_70K_ITR 57 +#define NGBE_20K_ITR 200 +#define NGBE_4K_ITR 1024 +#define NGBE_7K_ITR 595 + +enum ngbe_state_t { + __NGBE_TESTING, + __NGBE_RESETTING, + __NGBE_DOWN, + __NGBE_HANGING, + __NGBE_DISABLED, + __NGBE_REMOVING, + __NGBE_SERVICE_SCHED, + __NGBE_SERVICE_INITED, + __NGBE_IN_SFP_INIT, + __NGBE_PTP_RUNNING, + __NGBE_PTP_TX_IN_PROGRESS, +}; + +enum ngbe_isb_idx { + NGBE_ISB_HEADER, + NGBE_ISB_MISC, + NGBE_ISB_VEC0, + NGBE_ISB_VEC1, + NGBE_ISB_MAX +}; + +enum ngbe_ring_f_enum { + RING_F_NONE = 0, + RING_F_VMDQ, /* SR-IOV uses the same ring feature */ + RING_F_RSS, + RING_F_ARRAY_SIZE /* must be last in enum set */ +}; + +enum ngbe_tx_flags { + /* cmd_type flags */ + NGBE_TX_FLAGS_HW_VLAN = 0x01, + NGBE_TX_FLAGS_TSO = 0x02, + NGBE_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + NGBE_TX_FLAGS_CC = 0x08, + NGBE_TX_FLAGS_IPV4 = 0x10, + NGBE_TX_FLAGS_CSUM = 0x20, + NGBE_TX_FLAGS_OUTER_IPV4 = 0x100, + NGBE_TX_FLAGS_LINKSEC = 0x200, + NGBE_TX_FLAGS_IPSEC = 0x400, + + /* software defined flags */ + NGBE_TX_FLAGS_SW_VLAN = 0x40, + NGBE_TX_FLAGS_FCOE = 0x80, +}; + +struct ngbe_mac_addr { + u8 addr[ETH_ALEN]; + u16 state; /* bitmask */ + u64 pools; +}; + +struct ngbe_queue_stats { + u64 packets; + u64 bytes; +}; + +enum ngbe_ring_state_t { + __NGBE_RX_3K_BUFFER, + __NGBE_RX_BUILD_SKB_ENABLED, + __NGBE_TX_XPS_INIT_DONE, + __NGBE_TX_DETECT_HANG, + __NGBE_HANG_CHECK_ARMED, + __NGBE_RX_HS_ENABLED, +}; + +/* ngbe_dec_ptype.mac: outer mac */ +enum ngbe_dec_ptype_mac { + NGBE_DEC_PTYPE_MAC_IP = 0, + NGBE_DEC_PTYPE_MAC_L2 = 2, + NGBE_DEC_PTYPE_MAC_FCOE = 3, +}; + +/* ngbe_dec_ptype.[e]ip: outer&encaped ip */ +#define NGBE_DEC_PTYPE_IP_FRAG (0x4) + +enum ngbe_dec_ptype_ip { + NGBE_DEC_PTYPE_IP_NONE = 0, + NGBE_DEC_PTYPE_IP_IPV4 = 1, + NGBE_DEC_PTYPE_IP_IPV6 = 2, + NGBE_DEC_PTYPE_IP_FGV4 = + (NGBE_DEC_PTYPE_IP_FRAG | NGBE_DEC_PTYPE_IP_IPV4), + NGBE_DEC_PTYPE_IP_FGV6 = + (NGBE_DEC_PTYPE_IP_FRAG | NGBE_DEC_PTYPE_IP_IPV6), +}; + +/* ngbe_dec_ptype.layer: payload layer */ +enum ngbe_dec_ptype_layer { + NGBE_DEC_PTYPE_LAYER_NONE = 0, + NGBE_DEC_PTYPE_LAYER_PAY2 = 1, + NGBE_DEC_PTYPE_LAYER_PAY3 = 2, + NGBE_DEC_PTYPE_LAYER_PAY4 = 3, +}; + +struct ngbe_dec_ptype { + u32 ptype:8; + u32 known:1; + u32 mac:2; /* outer mac */ + u32 ip:3; /* outer ip*/ + u32 etype:3; /* encaped type */ + u32 eip:3; /* encaped ip */ + u32 prot:4; /* payload proto */ + u32 layer:3; /* payload layer */ +}; + +/* ngbe_dec_ptype.proto: payload proto */ +enum ngbe_dec_ptype_prot { + NGBE_DEC_PTYPE_PROT_NONE = 0, + NGBE_DEC_PTYPE_PROT_UDP = 1, + NGBE_DEC_PTYPE_PROT_TCP = 2, + NGBE_DEC_PTYPE_PROT_SCTP = 3, + NGBE_DEC_PTYPE_PROT_ICMP = 4, + NGBE_DEC_PTYPE_PROT_TS = 5, /* time sync */ +}; + +/* ngbe_dec_ptype.etype: encaped type */ +enum ngbe_dec_ptype_etype { + NGBE_DEC_PTYPE_ETYPE_NONE = 0, + NGBE_DEC_PTYPE_ETYPE_IPIP = 1, /* IP+IP */ + NGBE_DEC_PTYPE_ETYPE_IG = 2, /* IP+GRE */ + NGBE_DEC_PTYPE_ETYPE_IGM = 3, /* IP+GRE+MAC */ + NGBE_DEC_PTYPE_ETYPE_IGMV = 4, /* IP+GRE+MAC+VLAN */ +}; + +struct ngbe_fwd_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct net_device *vdev; + struct ngbe_adapter *adapter; + unsigned int tx_base_queue; + unsigned int rx_base_queue; + int index; /* pool index on PF */ +}; + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct ngbe_tx_buffer { + union ngbe_tx_desc *next_to_watch; + unsigned long time_stamp; + struct sk_buff *skb; + unsigned int bytecount; + unsigned short gso_segs; + __be16 protocol; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct ngbe_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + dma_addr_t page_dma; + struct page *page; + unsigned int page_offset; +}; + +struct ngbe_ring_container { + struct ngbe_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +struct ngbe_ring; + +struct ngbe_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct ngbe_rx_queue_stats { + u64 non_eop_descs; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 csum_good_cnt; + u64 csum_err; +}; + +struct ngbe_ring { + struct ngbe_ring *next; /* pointer to next ring in q_vector */ + struct ngbe_q_vector *q_vector; /* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ + struct device *dev; /* device for DMA mapping */ + struct ngbe_fwd_adapter *accel; + void *desc; /* descriptor ring memory */ + union { + struct ngbe_tx_buffer *tx_buffer_info; + struct ngbe_rx_buffer *rx_buffer_info; + }; + unsigned long state; + u8 __iomem *tail; + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + + u16 count; /* amount of descriptors */ + + u8 queue_index; /* needed for multiqueue queue management */ + + /* holds the special value that gets + * the hardware register offset + * associated with this ring, which is + * different for DCB and RSS modes + */ + u8 reg_idx; + + u16 next_to_use; + u16 next_to_clean; + + unsigned long last_rx_timestamp; + u16 rx_buf_len; + union { + u16 next_to_alloc; + struct { + u8 atr_sample_rate; + u8 atr_count; + }; + }; + + u8 dcb_tc; + struct ngbe_queue_stats stats; + struct u64_stats_sync syncp; + + union { + struct ngbe_tx_queue_stats tx_stats; + struct ngbe_rx_queue_stats rx_stats; + }; +} ____cacheline_aligned_in_smp; + +/* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct ngbe_q_vector { + struct ngbe_adapter *adapter; + int cpu; /* CPU for DCA */ + + struct rcu_head rcu; /* to avoid race with update stats on free */ + /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring + */ + u16 v_idx; + u16 itr; /* Interrupt throttle rate written to EITR */ + struct ngbe_ring_container rx, tx; + + struct napi_struct napi; + struct net_device poll_dev; + cpumask_t affinity_mask; + + int numa_node; + char name[IFNAMSIZ + 17]; + bool netpoll_rx; + + /* for dynamic allocation of rings associated with this q_vector */ + struct ngbe_ring ring[0] ____cacheline_aligned_in_smp; +}; + +struct ngbe_ring_feature { + u16 limit; /* upper limit on feature indices */ + u16 indices; /* current value of indices */ + u16 mask; /* Mask used for feature to ring mapping */ + u16 offset; /* offset to start of feature */ +}; + +#ifdef CONFIG_HWMON +#define NGBE_HWMON_TYPE_TEMP 0 +#define NGBE_HWMON_TYPE_ALARMTHRESH 1 +#define NGBE_HWMON_TYPE_DALARMTHRESH 2 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct ngbe_hw *hw; + struct ngbe_thermal_diode_data *sensor; + char name[19]; +}; + +struct hwmon_buff { + struct device *device; + struct hwmon_attr *hwmon_list; + unsigned int n_hwmon; +}; +#endif /* CONFIG_HWMON */ + +/* board specific private data structure */ +struct ngbe_adapter { + u8 __iomem *io_addr; /* Mainly for iounmap use */ + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + int max_q_vectors; /* upper limit of q_vectors for device */ + int num_q_vectors; /* current number of q_vectors for device */ + struct ngbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; + struct msix_entry *msix_entries; + struct ngbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + u32 atr_sample_rate; + + enum ngbe_fc_mode last_lfc_mode; + + u32 flags; + u32 flags2; + unsigned long state; + + unsigned int tx_ring_count; + unsigned int rx_ring_count; + u64 tx_busy; + + /* Tx fast path data */ + int num_tx_queues; + u16 tx_work_limit; + u16 tx_itr_setting; + + /* Rx fast path data */ + int num_rx_queues; + u16 rx_work_limit; + u16 rx_itr_setting; + + /* structs defined in ngbe_hw.h */ + struct ngbe_hw hw; + u16 msg_enable; + struct ngbe_hw_stats stats; + + struct ngbe_mac_addr *mac_table; + u8 rss_indir_tbl[NGBE_MAX_RETA_ENTRIES]; + u32 rss_key[NGBE_RSS_KEY_SIZE / sizeof(u32)]; + + struct timer_list service_timer; + struct work_struct service_task; + + /* TX */ + struct ngbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; + u32 tx_timeout_count; + u64 restart_queue; + + /* RX */ + struct ngbe_ring *rx_ring[MAX_RX_QUEUES]; + u64 non_eop_descs; + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + u64 hw_csum_rx_error; + u64 hw_csum_rx_good; + u64 hw_rx_no_dma_resources; + + u64 test_icr; + struct ngbe_ring test_tx_ring; + struct ngbe_ring test_rx_ring; + + u32 tx_timeout_recovery_level; + + u32 interrupt_event; + + u32 link_speed; + bool link_up; + unsigned long link_check_timeout; + u64 lsc_int; + + unsigned int num_vfs; + u8 default_up; + + unsigned int queues_per_pool; + u32 wol; + u32 led_reg; + char eeprom_id[32]; + u16 eeprom_cap; + bool netdev_registered; + + /* misc interrupt status block */ + dma_addr_t isb_dma; + u32 *isb_mem; + u32 isb_tag[NGBE_ISB_MAX]; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + unsigned long fwd_bitmask; /* bitmask indicating in use pools */ + + u32 hang_cnt; + u32 gphy_efuse[2]; + + /* ptp block */ + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned long last_overflow_check; + unsigned long last_rx_ptp_check; + spinlock_t tmreg_lock; /* Used to protect timestamp registers. */ + struct cyclecounter hw_cc; + struct timecounter hw_tc; + u32 base_incval; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + +#ifdef CONFIG_DEBUG_FS + struct dentry *ngbe_dbg_adapter; +#endif +#ifdef CONFIG_HWMON + struct hwmon_buff ngbe_hwmon_buff; +#endif /* CONFIG_HWMON */ +}; + +struct ngbe_cb { + dma_addr_t dma; + u16 vid; /* VLAN tag */ + u16 append_cnt; /* number of skb's appended */ + bool page_released; + bool dma_released; +}; + +#define NGBE_CB(skb) ((struct ngbe_cb *)(skb)->cb) + +struct ngbe_msg { + u16 msg_enable; +}; + +extern char ngbe_driver_name[]; + +static inline struct net_device *ngbe_hw_to_netdev(const struct ngbe_hw *hw) +{ + return ((struct ngbe_adapter *)hw->back)->netdev; +} + +static inline struct ngbe_msg *ngbe_hw_to_msg(const struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = + container_of(hw, struct ngbe_adapter, hw); + return (struct ngbe_msg *)&adapter->msg_enable; +} + +static inline void ngbe_intr_enable(struct ngbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) + wr32(hw, NGBE_PX_IMC, mask); +} + +static inline void ngbe_intr_disable(struct ngbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) + wr32(hw, NGBE_PX_IMS, mask); +} + +static inline void ngbe_intr_trigger(struct ngbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) + wr32(hw, NGBE_PX_ICS, mask); +} + +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} + +/* FCoE requires that all Rx buffers be over 2200 bytes in length. Since + * this is twice the size of a half page we need to double the page order + * for FCoE enabled Rx queues. + */ +static inline unsigned int ngbe_rx_bufsz(struct ngbe_ring __maybe_unused *ring) +{ +#if MAX_SKB_FRAGS < 8 + return ALIGN(NGBE_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024); +#else + return NGBE_RXBUFFER_2K; +#endif +} + +static inline unsigned int ngbe_rx_pg_order(struct ngbe_ring __maybe_unused *ring) +{ + return 0; +} + +#define ngbe_rx_pg_size(_ring) (PAGE_SIZE << ngbe_rx_pg_order(_ring)) + +/* ngbe_desc_unused - calculate if we have unused descriptors */ +static inline u16 ngbe_desc_unused(struct ngbe_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +/* ngbe_test_staterr - tests bits in Rx descriptor status and error fields */ +static inline __le32 ngbe_test_staterr(union ngbe_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +static inline u8 ngbe_max_rss_indices(struct ngbe_adapter *adapter) +{ + return NGBE_MAX_RSS_INDICES; +} + +static inline u32 ngbe_misc_isb(struct ngbe_adapter *adapter, + enum ngbe_isb_idx idx) +{ + u32 cur_tag = 0; + u32 cur_diff = 0; + + cur_tag = adapter->isb_mem[NGBE_ISB_HEADER]; + cur_diff = cur_tag - adapter->isb_tag[idx]; + + adapter->isb_tag[idx] = cur_tag; + + return cpu_to_le32(adapter->isb_mem[idx]); +} + +enum { + NGBE_ERROR_SOFTWARE, + NGBE_ERROR_POLLING, + NGBE_ERROR_INVALID_STATE, + NGBE_ERROR_UNSUPPORTED, + NGBE_ERROR_ARGUMENT, + NGBE_ERROR_CAUTION, +}; + +#define ERROR_REPORT(hw, level, format, arg...) do { \ + switch (level) { \ + case NGBE_ERROR_SOFTWARE: \ + fallthrough; \ + case NGBE_ERROR_CAUTION: \ + fallthrough; \ + case NGBE_ERROR_POLLING: \ + netif_warn(ngbe_hw_to_msg(hw), drv, ngbe_hw_to_netdev(hw), \ + format, ## arg); \ + break; \ + case NGBE_ERROR_INVALID_STATE: \ + fallthrough; \ + case NGBE_ERROR_UNSUPPORTED: \ + fallthrough; \ + case NGBE_ERROR_ARGUMENT: \ + netif_err(ngbe_hw_to_msg(hw), hw, ngbe_hw_to_netdev(hw), \ + format, ## arg); \ + break; \ + default: \ + break; \ + } \ +} while (0) + +#define ERROR_REPORT1 ERROR_REPORT +#define ERROR_REPORT2 ERROR_REPORT +#define ERROR_REPORT3 ERROR_REPORT + +#define hw_dbg(hw, format, arg...) \ + netdev_dbg(ngbe_hw_to_netdev(hw), format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_notice(format, arg...) \ + dev_notice(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dbg(msglvl, format, arg...) \ + netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_crit(msglvl, format, arg...) \ + netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) + +/* needed by ngbe_ethtool.c */ +extern char ngbe_driver_name[]; +extern const char ngbe_driver_version[]; + +void ngbe_unmap_and_free_tx_res(struct ngbe_ring *ring, struct ngbe_tx_buffer *tx_buffer); +void ngbe_disable_device(struct ngbe_adapter *adapter); +void ngbe_set_interrupt_capability(struct ngbe_adapter *adapter); +void ngbe_set_interrupt_capability(struct ngbe_adapter *adapter); +void ngbe_reset_interrupt_capability(struct ngbe_adapter *adapter); +int ngbe_init_interrupt_scheme(struct ngbe_adapter *adapter); +void ngbe_clear_interrupt_scheme(struct ngbe_adapter *adapter); +int ngbe_poll(struct napi_struct *napi, int budget); +void ngbe_alloc_rx_buffers(struct ngbe_ring *rx_ring, u16 cleaned_count); +int ngbe_del_mac_filter(struct ngbe_adapter *adapter, u8 *addr, u16 pool); +void ngbe_vlan_strip_disable(struct ngbe_adapter *adapter); +void ngbe_set_rx_mode(struct net_device *netdev); +void ngbe_reinit_locked(struct ngbe_adapter *adapter); +void ngbe_reset(struct ngbe_adapter *adapter); +u32 ngbe_read_reg(struct ngbe_hw *hw, u32 reg, bool quiet); +void ngbe_down(struct ngbe_adapter *adapter); +int ngbe_setup_tx_resources(struct ngbe_ring *tx_ring); +void ngbe_free_tx_resources(struct ngbe_ring *tx_ring); +int ngbe_setup_rx_resources(struct ngbe_ring *rx_ring); +void ngbe_free_rx_resources(struct ngbe_ring *rx_ring); +void ngbe_up(struct ngbe_adapter *adapter); +void ngbe_update_stats(struct ngbe_adapter *adapter); +void ngbe_irq_disable(struct ngbe_adapter *adapter); +void ngbe_disable_rx_queue(struct ngbe_adapter *adapter, struct ngbe_ring *ring); +void ngbe_configure_tx_ring(struct ngbe_adapter *adapter, struct ngbe_ring *ring); +void ngbe_configure_rx_ring(struct ngbe_adapter *adapter, struct ngbe_ring *ring); +void ngbe_unmap_and_free_tx_resource(struct ngbe_ring *ring, + struct ngbe_tx_buffer *tx_buffer); +netdev_tx_t ngbe_xmit_frame_ring(struct sk_buff *skb, + struct ngbe_adapter __maybe_unused *adapter, + struct ngbe_ring *tx_ring); +int ngbe_close(struct net_device *netdev); +int ngbe_open(struct net_device *netdev); +int ngbe_wol_supported(struct ngbe_adapter *adapter); +void ngbe_write_eitr(struct ngbe_q_vector *q_vector); +void ngbe_store_reta(struct ngbe_adapter *adapter); +int ngbe_setup_tc(struct net_device *dev, u8 tc); +void ngbe_do_reset(struct net_device *netdev); +u32 ngbe_rss_indir_tbl_entries(struct ngbe_adapter *adapter); + +void ngbe_vlan_strip_enable(struct ngbe_adapter *adapter); +void ngbe_ptp_overflow_check(struct ngbe_adapter *adapter); +void ngbe_ptp_rx_hang(struct ngbe_adapter *adapter); +int ngbe_ptp_get_ts_config(struct ngbe_adapter *adapter, struct ifreq *ifr); +int ngbe_ptp_set_ts_config(struct ngbe_adapter *adapter, struct ifreq *ifr); +void ngbe_ptp_init(struct ngbe_adapter *adapter); +void ngbe_ptp_reset(struct ngbe_adapter *adapter); +void ngbe_ptp_stop(struct ngbe_adapter *adapter); +void ngbe_ptp_check_pps_event(struct ngbe_adapter *adapter); +void ngbe_ptp_rx_hwtstamp(struct ngbe_adapter *adapter, struct sk_buff *skb); +void ngbe_ptp_start_cyclecounter(struct ngbe_adapter *adapter); +void ngbe_ptp_suspend(struct ngbe_adapter *adapter); +void ngbe_set_ethtool_ops(struct net_device *netdev); + +#endif /* _NGBE_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_debugfs.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..d466c2ad5cb17a96f514520c9b38cfbccdc97035 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_debugfs.c @@ -0,0 +1,682 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include +#include + +#include "ngbe.h" + +static struct dentry *ngbe_dbg_root; +static int ngbe_data_mode; + +#define NGBE_DATA_FUNC(dm) ((dm) & ~0xFFFF) +#define NGBE_DATA_ARGS(dm) ((dm) & 0xFFFF) +enum ngbe_data_func { + NGBE_FUNC_NONE = (0 << 16), + NGBE_FUNC_DUMP_BAR = (1 << 16), + NGBE_FUNC_DUMP_RDESC = (2 << 16), + NGBE_FUNC_DUMP_TDESC = (3 << 16), + NGBE_FUNC_FLASH_READ = (4 << 16), + NGBE_FUNC_FLASH_WRITE = (5 << 16), +}; + +struct ngbe_reg_info { + u32 offset; + u32 length; + char *name; +}; + +static struct ngbe_reg_info ngbe_reg_info_tbl[] = { + /* General Registers */ + {NGBE_CFG_PORT_CTL, 1, "CTRL"}, + {NGBE_CFG_PORT_ST, 1, "STATUS"}, + + /* RX Registers */ + {NGBE_PX_RR_CFG(0), 1, "SRRCTL"}, + {NGBE_PX_RR_RP(0), 1, "RDH"}, + {NGBE_PX_RR_WP(0), 1, "RDT"}, + {NGBE_PX_RR_CFG(0), 1, "RXDCTL"}, + {NGBE_PX_RR_BAL(0), 1, "RDBAL"}, + {NGBE_PX_RR_BAH(0), 1, "RDBAH"}, + + /* TX Registers */ + {NGBE_PX_TR_BAL(0), 1, "TDBAL"}, + {NGBE_PX_TR_BAH(0), 1, "TDBAH"}, + {NGBE_PX_TR_RP(0), 1, "TDH"}, + {NGBE_PX_TR_WP(0), 1, "TDT"}, + {NGBE_PX_TR_CFG(0), 1, "TXDCTL"}, + + /* MACVLAN */ + {NGBE_PSR_MAC_SWC_VM, 128, "PSR_MAC_SWC_VM"}, + {NGBE_PSR_MAC_SWC_AD_L, 32, "PSR_MAC_SWC_AD"}, + {NGBE_PSR_VLAN_TBL(0), 128, "PSR_VLAN_TBL"}, + + /* List Terminator */ + { .name = NULL } +}; + +/** + * data operation + **/ +ssize_t ngbe_simple_read_from_pcibar(struct ngbe_adapter *adapter, int res, + void __user *buf, size_t size, loff_t *ppos) +{ + loff_t pos = *ppos; + u32 miss, len, limit = pci_resource_len(adapter->pdev, res); + + if (pos < 0) + return 0; + + limit = (pos + size <= limit ? pos + size : limit); + for (miss = 0; pos < limit && !miss; buf += len, pos += len) { + u32 val = 0, reg = round_down(pos, 4); + u32 off = pos - reg; + + len = (reg + 4 <= limit ? 4 - off : 4 - off - (limit - reg - 4)); + val = ngbe_rd32(adapter->io_addr + reg); + miss = copy_to_user(buf, &val + off, len); + } + + size = pos - *ppos - miss; + *ppos += size; + + return size; +} + +ssize_t ngbe_simple_read_from_flash(struct ngbe_adapter *adapter, + void __user *buf, size_t size, loff_t *ppos) +{ + struct ngbe_hw *hw = &adapter->hw; + loff_t pos = *ppos; + size_t ret = 0; + loff_t rpos, rtail; + void __user *to = buf; + size_t available = adapter->hw.flash.dword_size << 2; + + if (pos < 0) + return -EINVAL; + if (pos >= available || !size) + return 0; + if (size > available - pos) + size = available - pos; + + rpos = round_up(pos, 4); + rtail = round_down(pos + size, 4); + if (rtail < rpos) + return 0; + + to += rpos - pos; + while (rpos <= rtail) { + u32 value = ngbe_rd32(adapter->io_addr + rpos); + + if (TCALL(hw, flash.ops.write_buffer, rpos >> 2, 1, &value)) { + ret = size; + break; + } + if (copy_to_user(to, &value, 4) == 4) { + ret = size; + break; + } + to += 4; + rpos += 4; + } + + if (ret == size) + return -EFAULT; + size -= ret; + *ppos = pos + size; + return size; +} + +ssize_t ngbe_simple_write_to_flash(struct ngbe_adapter *adapter, const void __user *from, + size_t size, loff_t *ppos, size_t available) +{ + return size; +} + +static ssize_t ngbe_dbg_data_ops_read(struct file *filp, char __user *buffer, + size_t size, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + u32 func = NGBE_DATA_FUNC(ngbe_data_mode); + + /* Ensure all reads are done */ + rmb(); + + switch (func) { + case NGBE_FUNC_DUMP_BAR: { + u32 bar = NGBE_DATA_ARGS(ngbe_data_mode); + + return ngbe_simple_read_from_pcibar(adapter, bar, buffer, size, + ppos); + } + case NGBE_FUNC_FLASH_READ: { + return ngbe_simple_read_from_flash(adapter, buffer, size, ppos); + } + case NGBE_FUNC_DUMP_RDESC: { + struct ngbe_ring *ring; + u32 queue = NGBE_DATA_ARGS(ngbe_data_mode); + + if (queue >= adapter->num_rx_queues) + return 0; + + ring = adapter->rx_ring[queue]; + + return simple_read_from_buffer(buffer, size, ppos, + ring->desc, ring->size); + } + case NGBE_FUNC_DUMP_TDESC: { + struct ngbe_ring *ring; + u32 queue = NGBE_DATA_ARGS(ngbe_data_mode); + + if (queue >= adapter->num_tx_queues) + return 0; + + ring = adapter->tx_ring[queue]; + + return simple_read_from_buffer(buffer, size, ppos, + ring->desc, ring->size); + } + default: + break; + } + + return 0; +} + +static ssize_t ngbe_dbg_data_ops_write(struct file *filp, + const char __user *buffer, + size_t size, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + u32 func = NGBE_DATA_FUNC(ngbe_data_mode); + + /* Ensure all reads are done */ + rmb(); + + switch (func) { + case NGBE_FUNC_FLASH_WRITE: { + u32 size = NGBE_DATA_ARGS(ngbe_data_mode); + + if (size > adapter->hw.flash.dword_size << 2) + size = adapter->hw.flash.dword_size << 2; + return ngbe_simple_write_to_flash(adapter, buffer, size, ppos, size); + } + default: + break; + } + + return size; +} + +static const struct file_operations ngbe_dbg_data_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ngbe_dbg_data_ops_read, + .write = ngbe_dbg_data_ops_write, +}; + +/** + * reg_ops operation + **/ +static char ngbe_dbg_reg_ops_buf[256] = ""; + +static ssize_t ngbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: mode=0x%08x\n%s\n", + adapter->netdev->name, ngbe_data_mode, + ngbe_dbg_reg_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +/** + * ngbe_regdump - register printout routine + **/ +static void +ngbe_regdump(struct ngbe_hw *hw, struct ngbe_reg_info *reg_info) +{ + int i, n = 0; + u32 buffer[256]; + + switch (reg_info->offset) { + case NGBE_PSR_MAC_SWC_AD_L: + for (i = 0; i < reg_info->length; i++) { + wr32(hw, NGBE_PSR_MAC_SWC_IDX, i); + buffer[n++] = + rd32(hw, NGBE_PSR_MAC_SWC_AD_H); + buffer[n++] = + rd32(hw, NGBE_PSR_MAC_SWC_AD_L); + } + break; + default: + for (i = 0; i < reg_info->length; i++) { + buffer[n++] = rd32(hw, + reg_info->offset + (i << 2)); + } + break; + } + + WARN_ON(n); +} + +/** + * ngbe_dump - Print registers, tx-rings and rx-rings + **/ +void ngbe_dump(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_reg_info *reg_info; + int n = 0; + struct ngbe_ring *tx_ring; + struct ngbe_tx_buffer *tx_buffer; + union ngbe_tx_desc *tx_desc; + struct my_u0 { u64 a; u64 b; } *u0; + struct ngbe_ring *rx_ring; +#ifndef CONFIG_NGBE_DISABLE_PACKET_SPLIT + union ngbe_rx_desc *rx_desc; + struct ngbe_rx_buffer *rx_buffer_info; + u32 staterr; +#endif + int i = 0; + + if (!netif_msg_hw(adapter)) + return; + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + pr_info(" Register Name Value\n"); + for (reg_info = ngbe_reg_info_tbl; reg_info->name; reg_info++) + ngbe_regdump(hw, reg_info); + + /* Print TX Ring Summary */ + if (!netdev || !netif_running(netdev)) + return; + + dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + pr_info(" %s %s %s %s\n", + "Queue [NTU] [NTC] [bi(ntc)->dma ]", + "leng", "ntw", "timestamp"); + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("%s%s %s %s %s %s\n", + "T [desc] [address 63:0 ] ", + "[PlPOIdStDDt Ln] [bi->dma ] ", + "leng", "ntw", "timestamp", "bi->skb"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + tx_desc = NGBE_TX_DESC(tx_ring, i); + tx_buffer = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (dma_unmap_len(tx_buffer, len) > 0) { + pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p", + i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp, + tx_buffer->skb); + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + pr_info(" NTC/U\n"); + else if (i == tx_ring->next_to_use) + pr_info(" NTU\n"); + else if (i == tx_ring->next_to_clean) + pr_info(" NTC\n"); + else + pr_info("\n"); + + if (netif_msg_pktdata(adapter) && + tx_buffer->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + tx_buffer->skb->data, + dma_unmap_len(tx_buffer, len), + true); + } + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + pr_info("Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("%5d %5X %5X\n", + n, rx_ring->next_to_use, rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + return; + + dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("%s%s%s", + "R [desc] [ PktBuf A0]", + "[ HeadBuf DD] [bi->dma ] [bi->skb ]", + "<-- Adv Rx Read format\n"); + pr_info("%s%s%s", + "RWB[desc] [PcsmIpSHl PtRs]", + "[vl er S cks ln] ---------------- [bi->skb ]", + "<-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = NGBE_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + if (staterr & NGBE_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + rx_buffer_info->skb); + } else { + pr_info("R [0x%03X] %016llX %016llX %016llX %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)rx_buffer_info->page_dma, + rx_buffer_info->skb); + + if (netif_msg_pktdata(adapter) && + rx_buffer_info->page_dma) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + page_address(rx_buffer_info->page) + + rx_buffer_info->page_offset, + ngbe_rx_bufsz(rx_ring), true); + } + } + + if (i == rx_ring->next_to_use) + pr_info(" NTU\n"); + else if (i == rx_ring->next_to_clean) + pr_info(" NTC\n"); + else + pr_info("\n"); + } + } +} + +static ssize_t +ngbe_dbg_reg_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + char *pc = ngbe_dbg_reg_ops_buf; + int len; + int ret; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(ngbe_dbg_reg_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(ngbe_dbg_reg_ops_buf, + sizeof(ngbe_dbg_reg_ops_buf) - 1, + ppos, + buffer, + count); + if (len < 0) + return len; + + pc[len] = '\0'; + + if (strncmp(pc, "dump", 4) == 0) { + u32 mode = 0; + u16 args; + + pc += 4; + pc += strspn(pc, " \t"); + + if (!strncmp(pc, "bar", 3)) { + pc += 3; + mode = NGBE_FUNC_DUMP_BAR; + } else if (!strncmp(pc, "rdesc", 5)) { + pc += 5; + mode = NGBE_FUNC_DUMP_RDESC; + } else if (!strncmp(pc, "tdesc", 5)) { + pc += 5; + mode = NGBE_FUNC_DUMP_TDESC; + } else { + ngbe_dump(adapter); + } + ; + if (mode && kstrtou16(pc, 1, &args) == 0) + mode |= args; + + ngbe_data_mode = mode; + } else if (strncmp(pc, "flash", 4) == 0) { + u32 mode = 0; + u16 args; + + pc += 5; + pc += strspn(pc, " \t"); + if (!strncmp(pc, "read", 3)) { + pc += 4; + mode = NGBE_FUNC_FLASH_READ; + } else if (!strncmp(pc, "write", 5)) { + pc += 5; + mode = NGBE_FUNC_FLASH_WRITE; + } + + if (mode && kstrtou16(pc, 1, &args) == 0) + mode |= args; + + ngbe_data_mode = mode; + } else if (strncmp(ngbe_dbg_reg_ops_buf, "write", 5) == 0) { + u32 reg, value; + int cnt; + + cnt = sscanf(&ngbe_dbg_reg_ops_buf[5], "%x %x", ®, &value); + if (cnt == 2) { + wr32(&adapter->hw, reg, value); + e_dev_info("write: 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("write \n"); + } + } else if (strncmp(ngbe_dbg_reg_ops_buf, "read", 4) == 0) { + u32 reg, value; + + ret = kstrtou32(&ngbe_dbg_reg_ops_buf[4], 1, ®); + if (ret == 0) { + value = rd32(&adapter->hw, reg); + e_dev_info("read 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("read \n"); + } + } else { + e_dev_info("Unknown command %s\n", ngbe_dbg_reg_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" read \n"); + e_dev_info(" write \n"); + } + return count; +} + +static const struct file_operations ngbe_dbg_reg_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ngbe_dbg_reg_ops_read, + .write = ngbe_dbg_reg_ops_write, +}; + +/* netdev_ops operation */ +static char ngbe_dbg_netdev_ops_buf[256] = ""; + +static ssize_t +ngbe_dbg_netdev_ops_read(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: mode=0x%08x\n%s\n", + adapter->netdev->name, ngbe_data_mode, + ngbe_dbg_netdev_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +static ssize_t +ngbe_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(ngbe_dbg_netdev_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(ngbe_dbg_netdev_ops_buf, + sizeof(ngbe_dbg_netdev_ops_buf) - 1, + ppos, + buffer, + count); + if (len < 0) + return len; + + ngbe_dbg_netdev_ops_buf[len] = '\0'; + + if (strncmp(ngbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev, UINT_MAX); + e_dev_info("tx_timeout called\n"); + } else { + e_dev_info("Unknown command: %s\n", ngbe_dbg_netdev_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info("tx_timeout\n"); + } + return count; +} + +static const struct file_operations ngbe_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ngbe_dbg_netdev_ops_read, + .write = ngbe_dbg_netdev_ops_write, +}; + +/** + * ngbe_dbg_adapter_init - setup the debugfs directory for the adapter + * @adapter: the adapter that is starting up + **/ +void ngbe_dbg_adapter_init(struct ngbe_adapter *adapter) +{ + const char *name = pci_name(adapter->pdev); + struct dentry *pfile; + + adapter->ngbe_dbg_adapter = debugfs_create_dir(name, ngbe_dbg_root); + if (!adapter->ngbe_dbg_adapter) { + e_dev_err("debugfs entry for %s failed\n", name); + return; + } + + pfile = debugfs_create_file("data", 0600, + adapter->ngbe_dbg_adapter, adapter, + &ngbe_dbg_data_ops_fops); + if (!pfile) + e_dev_err("debugfs netdev_ops for %s failed\n", name); + + pfile = debugfs_create_file("reg_ops", 0600, + adapter->ngbe_dbg_adapter, adapter, + &ngbe_dbg_reg_ops_fops); + if (!pfile) + e_dev_err("debugfs reg_ops for %s failed\n", name); + + pfile = debugfs_create_file("netdev_ops", 0600, + adapter->ngbe_dbg_adapter, adapter, + &ngbe_dbg_netdev_ops_fops); + if (!pfile) + e_dev_err("debugfs netdev_ops for %s failed\n", name); +} + +/** + * ngbe_dbg_adapter_exit - clear out the adapter's debugfs entries + * @pf: the pf that is stopping + **/ +void ngbe_dbg_adapter_exit(struct ngbe_adapter *adapter) +{ + debugfs_remove_recursive(adapter->ngbe_dbg_adapter); + adapter->ngbe_dbg_adapter = NULL; +} + +/** + * ngbe_dbg_init - start up debugfs for the driver + **/ +void ngbe_dbg_init(void) +{ + ngbe_dbg_root = debugfs_create_dir(ngbe_driver_name, NULL); + if (!ngbe_dbg_root) + pr_err("init of debugfs failed\n"); +} diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..6094d9d3e272e62b325fdcf9339de3e58fd586cd --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -0,0 +1,2711 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ngbe.h" +#include "ngbe_hw.h" +#include "ngbe_phy.h" + +struct ngbe_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define NGBE_NETDEV_STAT(_net_stat) { \ + .stat_string = #_net_stat, \ + .sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ +} + +static const struct ngbe_stats ngbe_gstrings_net_stats[] = { + NGBE_NETDEV_STAT(rx_packets), + NGBE_NETDEV_STAT(tx_packets), + NGBE_NETDEV_STAT(rx_bytes), + NGBE_NETDEV_STAT(tx_bytes), + NGBE_NETDEV_STAT(rx_errors), + NGBE_NETDEV_STAT(tx_errors), + NGBE_NETDEV_STAT(rx_dropped), + NGBE_NETDEV_STAT(tx_dropped), + NGBE_NETDEV_STAT(multicast), + NGBE_NETDEV_STAT(collisions), + NGBE_NETDEV_STAT(rx_over_errors), + NGBE_NETDEV_STAT(rx_crc_errors), + NGBE_NETDEV_STAT(rx_frame_errors), + NGBE_NETDEV_STAT(rx_fifo_errors), + NGBE_NETDEV_STAT(rx_missed_errors), + NGBE_NETDEV_STAT(tx_aborted_errors), + NGBE_NETDEV_STAT(tx_carrier_errors), + NGBE_NETDEV_STAT(tx_fifo_errors), + NGBE_NETDEV_STAT(tx_heartbeat_errors), +}; + +#define NGBE_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct ngbe_adapter, _stat), \ + .stat_offset = offsetof(struct ngbe_adapter, _stat) \ +} + +static struct ngbe_stats ngbe_gstrings_stats[] = { + NGBE_STAT("rx_pkts_nic", stats.gprc), + NGBE_STAT("tx_pkts_nic", stats.gptc), + NGBE_STAT("rx_bytes_nic", stats.gorc), + NGBE_STAT("tx_bytes_nic", stats.gotc), + NGBE_STAT("lsc_int", lsc_int), + NGBE_STAT("tx_busy", tx_busy), + NGBE_STAT("non_eop_descs", non_eop_descs), + NGBE_STAT("broadcast", stats.bprc), + NGBE_STAT("rx_no_buffer_count", stats.rnbc[0]), + NGBE_STAT("tx_timeout_count", tx_timeout_count), + NGBE_STAT("tx_restart_queue", restart_queue), + NGBE_STAT("rx_long_length_count", stats.roc), + NGBE_STAT("rx_short_length_count", stats.ruc), + NGBE_STAT("tx_flow_control_xon", stats.lxontxc), + NGBE_STAT("rx_flow_control_xon", stats.lxonrxc), + NGBE_STAT("tx_flow_control_xoff", stats.lxofftxc), + NGBE_STAT("rx_flow_control_xoff", stats.lxoffrxc), + NGBE_STAT("rx_csum_offload_good_count", hw_csum_rx_good), + NGBE_STAT("rx_csum_offload_errors", hw_csum_rx_error), + NGBE_STAT("alloc_rx_page_failed", alloc_rx_page_failed), + NGBE_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), + + NGBE_STAT("rx_no_dma_resources", hw_rx_no_dma_resources), + NGBE_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + NGBE_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + NGBE_STAT("os2bmc_tx_by_host", stats.o2bspc), + NGBE_STAT("os2bmc_rx_by_host", stats.b2ogprc), + + NGBE_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + NGBE_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +}; + +#define NGBE_NUM_RX_QUEUES netdev->num_tx_queues +#define NGBE_NUM_TX_QUEUES netdev->num_tx_queues + +#define NGBE_QUEUE_STATS_LEN ( \ + (NGBE_NUM_TX_QUEUES + NGBE_NUM_RX_QUEUES) * \ + (sizeof(struct ngbe_queue_stats) / sizeof(u64))) +#define NGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ngbe_gstrings_stats) +#define NGBE_NETDEV_STATS_LEN ARRAY_SIZE(ngbe_gstrings_net_stats) +#define NGBE_PB_STATS_LEN ( \ + (sizeof(((struct ngbe_adapter *)0)->stats.pxonrxc) + \ + sizeof(((struct ngbe_adapter *)0)->stats.pxontxc) + \ + sizeof(((struct ngbe_adapter *)0)->stats.pxoffrxc) + \ + sizeof(((struct ngbe_adapter *)0)->stats.pxofftxc)) \ + / sizeof(u64)) + +#define NGBE_STATS_LEN (NGBE_GLOBAL_STATS_LEN + \ + NGBE_NETDEV_STATS_LEN + \ + NGBE_PB_STATS_LEN + \ + NGBE_QUEUE_STATS_LEN) + +static const char ngbe_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; + +#define NGBE_TEST_LEN (sizeof(ngbe_gstrings_test) / ETH_GSTRING_LEN) +#define ngbe_isbackplane(type) \ + ((type == ngbe_media_type_backplane) ? true : false) + +#define NGBE_REGS_LEN 4096 +#define NGBE_GET_STAT(_A_, _R_) ((_A_)->stats.(_R_)) +#define NGBE_R32_Q(h, r) ngbe_read_reg(h, r, true) + +/* ethtool register test data */ +struct ngbe_reg_test { + u32 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* default sapphire register test */ +static struct ngbe_reg_test reg_test_sapphire[] = { + { NGBE_RDB_RFCL, 1, PATTERN_TEST, 0x8007FFE0, 0x8007FFE0 }, + { NGBE_RDB_RFCH, 1, PATTERN_TEST, 0x8007FFE0, 0x8007FFE0 }, + { NGBE_PSR_VLAN_CTL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, + { NGBE_PX_RR_BAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { NGBE_PX_RR_BAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { NGBE_PX_RR_CFG(0), 4, WRITE_NO_TEST, 0, NGBE_PX_RR_CFG_RR_EN }, + { NGBE_RDB_RFCH, 1, PATTERN_TEST, 0x8007FFE0, 0x8007FFE0 }, + { NGBE_RDB_RFCV, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 }, + { NGBE_PX_TR_BAL(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { NGBE_PX_TR_BAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { NGBE_RDB_PB_CTL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, + { NGBE_PSR_MC_TBL(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { .reg = 0 } +}; + +int ngbe_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 supported_link = 0; + u32 link_speed = 0; + bool autoneg = false; + bool link_up = 0; + u16 value = 0; + unsigned long flags; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + TCALL(hw, mac.ops.get_link_capabilities, &supported_link, &autoneg); + + /* set the supported link speeds */ + if (hw->phy.type == ngbe_phy_m88e1512_sfi || + hw->phy.type == ngbe_phy_yt8521s_sfi || + hw->phy.type == ngbe_phy_internal_yt8521s_sfi) { + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseX_Full); + } else if (hw->phy.type == ngbe_phy_internal || + hw->phy.type == ngbe_phy_m88e1512 || + hw->phy.type == ngbe_phy_yt8521s || + ((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_RGMII_FPGA)) { + if (supported_link & NGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + if (supported_link & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + if (supported_link & NGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + } + + /* set the advertised speeds */ + if (hw->phy.type == ngbe_phy_m88e1512_sfi || + hw->phy.type == ngbe_phy_yt8521s_sfi || + hw->phy.type == ngbe_phy_internal_yt8521s_sfi) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseX_Full); + } else { + if (hw->phy.autoneg_advertised) { + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_1GB_FULL) { + if (ethtool_link_ksettings_test_link_mode(cmd, supported, + 1000baseKX_Full)) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseKX_Full); + else if (ethtool_link_ksettings_test_link_mode(cmd, supported, + 1000baseT_Full)) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); + else + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseX_Full); + } + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + } + } + + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + if (autoneg) { + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + cmd->base.autoneg = AUTONEG_ENABLE; + } else { + cmd->base.autoneg = AUTONEG_DISABLE; + } + + /* Determine the remaining settings based on the PHY type. */ + switch (adapter->hw.phy.type) { + case ngbe_phy_internal: + case ngbe_phy_m88e1512: + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + cmd->base.port = PORT_TP; + break; + case ngbe_phy_sfp_passive_tyco: + case ngbe_phy_sfp_passive_unknown: + case ngbe_phy_sfp_ftl: + case ngbe_phy_sfp_avago: + case ngbe_phy_sfp_intel: + case ngbe_phy_sfp_unknown: + switch (adapter->hw.phy.sfp_type) { + /* SFP+ devices, further checking needed */ + case ngbe_sfp_type_da_cu: + case ngbe_sfp_type_da_cu_core0: + case ngbe_sfp_type_da_cu_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_DA; + break; + case ngbe_sfp_type_sr: + case ngbe_sfp_type_lr: + case ngbe_sfp_type_srlr_core0: + case ngbe_sfp_type_srlr_core1: + case ngbe_sfp_type_1g_sx_core0: + case ngbe_sfp_type_1g_sx_core1: + case ngbe_sfp_type_1g_lx_core0: + case ngbe_sfp_type_1g_lx_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_FIBRE; + break; + case ngbe_sfp_type_not_present: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_NONE; + break; + case ngbe_sfp_type_1g_cu_core0: + case ngbe_sfp_type_1g_cu_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + cmd->base.port = PORT_TP; + break; + case ngbe_sfp_type_unknown: + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_OTHER; + break; + } + break; + case ngbe_phy_yt8521s_sfi: + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_ext_yt(hw, 0xA001, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + if ((value & 7) == 0) {//utp_to_rgmii + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + cmd->base.port = PORT_TP; + } else { + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_FIBRE; + } + break; + case ngbe_phy_internal_yt8521s_sfi: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_FIBRE; + break; + case ngbe_phy_unknown: + case ngbe_phy_generic: + case ngbe_phy_sfp_unsupported: + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_OTHER; + break; + } + + if (!in_interrupt()) { + TCALL(hw, mac.ops.check_link, &link_speed, &link_up, false); + } else { + link_speed = adapter->link_speed; + link_up = adapter->link_up; + } + + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + + switch (hw->fc.requested_mode) { + case ngbe_fc_full: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + break; + case ngbe_fc_rx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + case ngbe_fc_tx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + default: + ethtool_link_ksettings_del_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_del_link_mode(cmd, advertising, + Asym_Pause); + } + + if (link_up) { + switch (link_speed) { + case NGBE_LINK_SPEED_1GB_FULL: + cmd->base.speed = SPEED_1000; + break; + case NGBE_LINK_SPEED_100_FULL: + cmd->base.speed = SPEED_100; + break; + case NGBE_LINK_SPEED_10_FULL: + cmd->base.speed = SPEED_10; + break; + default: + break; + } + cmd->base.duplex = DUPLEX_FULL; + } else { + cmd->base.speed = -1; + cmd->base.duplex = -1; + } + + return 0; +} + +static int ngbe_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 advertised, old; + s32 err = 0; + struct ethtool_link_ksettings temp_ks; + + if (hw->phy.media_type == ngbe_media_type_copper || hw->phy.multispeed_fiber) { + memcpy(&temp_ks, cmd, sizeof(struct ethtool_link_ksettings)); + /* To be compatible with test cases */ + if (hw->phy.type == ngbe_phy_m88e1512_sfi || + hw->phy.type == ngbe_phy_yt8521s_sfi || + hw->phy.type == ngbe_phy_internal_yt8521s_sfi) { + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full)) { + ethtool_link_ksettings_add_link_mode(&temp_ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_del_link_mode(&temp_ks, supported, + 1000baseX_Full); + } + } + + /* this function does not support duplex forcing, but can + * limit the advertising of the adapter to the specified speed + */ + if (!bitmap_subset(cmd->link_modes.advertising, + temp_ks.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) + return -EINVAL; + + old = hw->phy.autoneg_advertised; + advertised = 0; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = true; + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, 1000baseX_Full)) + advertised |= NGBE_LINK_SPEED_1GB_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 100baseT_Full)) + advertised |= NGBE_LINK_SPEED_100_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10baseT_Full)) + advertised |= NGBE_LINK_SPEED_10_FULL; + } else { + if (cmd->base.duplex == DUPLEX_HALF) { + e_err(probe, "unsupported duplex\n"); + return -EINVAL; + } + + switch (cmd->base.speed) { + case SPEED_10: + advertised = NGBE_LINK_SPEED_10_FULL; + break; + case SPEED_100: + advertised = NGBE_LINK_SPEED_100_FULL; + break; + case SPEED_1000: + advertised = NGBE_LINK_SPEED_1GB_FULL; + break; + default: + e_err(probe, "unsupported speed\n"); + return -EINVAL; + } + + hw->mac.autoneg = false; + } + + hw->mac.autotry_restart = true; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->phy.autoneg_advertised = advertised; + } else { + hw->phy.autoneg_advertised = NGBE_LINK_SPEED_UNKNOWN; + hw->phy.force_speed = advertised; + } + + if (netif_running(adapter->netdev)) + TCALL(hw, mac.ops.setup_link, advertised, true); + else + ngbe_reset(adapter); + } else { + /* in this case we currently only support 1Gb/FULL */ + u32 speed = cmd->base.speed; + + if (cmd->base.autoneg == AUTONEG_ENABLE || + (!ethtool_link_ksettings_test_link_mode(cmd, advertising, 1000baseT_Full)) || + (speed + cmd->base.duplex != SPEED_1000 + DUPLEX_FULL)) + return -EINVAL; + } + + return err; +} + +static void ngbe_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + + if (!hw->fc.disable_fc_autoneg) + pause->autoneg = 1; + else + pause->autoneg = 0; + + if (hw->fc.current_mode == ngbe_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == ngbe_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == ngbe_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int ngbe_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_fc_info fc = hw->fc; + + fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); + + if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) + fc.requested_mode = ngbe_fc_full; + else if (pause->rx_pause) + fc.requested_mode = ngbe_fc_rx_pause; + else if (pause->tx_pause) + fc.requested_mode = ngbe_fc_tx_pause; + else + fc.requested_mode = ngbe_fc_none; + + /* if the thing changed then we'll update and use new autoneg */ + if (memcmp(&fc, &hw->fc, sizeof(struct ngbe_fc_info))) { + hw->fc = fc; + if (netif_running(netdev)) + ngbe_reinit_locked(adapter); + else + ngbe_reset(adapter); + } + + return 0; +} + +static u32 ngbe_get_msglevel(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void ngbe_set_msglevel(struct net_device *netdev, u32 data) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int ngbe_get_regs_len(struct net_device __always_unused *netdev) +{ + return NGBE_REGS_LEN * sizeof(u32); +} + +static void ngbe_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, + void *p) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u32 i; + u32 id = 0; + + memset(p, 0, NGBE_REGS_LEN * sizeof(u32)); + regs_buff[NGBE_REGS_LEN - 1] = 0x55555555; + + regs->version = hw->revision_id << 16 | + hw->device_id; + + /* Global Registers */ + /* chip control */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_PWR);//0 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_CTL);//1 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_PF_SM);//2 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_RST);//3 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_ST);//4 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_SWSM);//5 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_RST_ST);//6 + /* pvt sensor */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_CTL);//7 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_EN);//8 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_ST);//9 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_ALARM_THRE);//10 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_DALARM_THRE);//11 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_INT_EN);//12 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_ALARM_ST);//13 + /* Fmgr Register */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_CMD);//14 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_DATA);//15 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_STATUS);//16 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_USR_CMD);//17 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_CMDCFG0);//18 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_CMDCFG1);//19 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_ILDR_STATUS);//20 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_ILDR_SWPTR);//21 + + /* Port Registers */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_PORT_CTL);//22 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_PORT_ST);//23 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_EX_VTYPE);//24 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_TCP_TIME);//25 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_LED_CTL);//26 + /* GPIO */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_DR);//27 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_DDR);//28 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_CTL);//29 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_INTEN);//30 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_INTMASK);//31 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_INTSTATUS);//32 + /* TX TPH */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_TPH_TDESC);//33 + /* RX TPH */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_TPH_RDESC);//34 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_TPH_RHDR);//35 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_TPH_RPL);//36 + + /* TDMA */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_CTL);//37 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_POOL_TE);//38 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_PB_THRE);//39 + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_LLQ);//40 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_ETYPE_LB_L);//41 + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_ETYPE_AS_L);//42 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_MAC_AS_L);//43 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_VLAN_AS_L);//44 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_TCP_FLG_L);//45 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_TCP_FLG_H);//46 + for (i = 0; i < 8; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_VLAN_INS(i));//47-54 + for (i = 0; i < 8; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_ETAG_INS(i));//55-62 + /* Transmit QOS */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_PBWARB_CTL);//63 + + /* statistics */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_DRP_CNT);//64 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_SEC_DRP);//65 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_PKT_CNT);//66 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_BYTE_CNT_L);//67 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_BYTE_CNT_H);//68 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_OS2BMC_CNT);//69 + + /* RDMA */ + /* receive control */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_ARB_CTL);//70 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_POOL_RE);//71 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_PF_QDE);//72 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_PF_HIDE);//73 + /* static */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_DRP_PKT);//74 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_PKT_CNT);//75 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_BYTE_CNT_L);//76 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_BYTE_CNT_H);//77 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_BMC2OS_CNT);//78 + + /* RDB */ + /*flow control */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RFCV);//79 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RFCL);//80 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RFCH);//81 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RFCRT);//82 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RFCC);//83 + /* receive packet buffer */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PB_CTL);//84 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PB_SZ);//85 + + /* lli interrupt */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_LLI_THRE);//86 + /* ring assignment */ + for (i = 0; i < 8; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PL_CFG(i));//87-94 + for (i = 0; i < 32; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RSSTBL(i));//95-126 + for (i = 0; i < 10; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RSSRK(i));//127-136 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RA_CTL);//137 + for (i = 0; i < 8; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_5T_SDP(i));//138-145 + for (i = 0; i < 8; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_5T_CTL0(i));//146-153 + for (i = 0; i < 8; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_5T_CTL1(i));//154-161 + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_SYN_CLS);//162 + for (i = 0; i < 8; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_ETYPE_CLS(i));//163-170 + /* statistics */ + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_MPCNT);//171 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PKT_CNT);//172 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_REPLI_CNT);//173 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_DRP_CNT);//174 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_LXONTXC);//175 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_LXOFFTXC);//176 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PFCMACDAL);//177 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PFCMACDAH);//178 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_TXSWERR);//179 + + /* PSR */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_CTL);//180 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MAX_SZ);//181 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VLAN_CTL);//182 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VM_CTL);//183 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_PKT_CNT);//184 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MNG_PKT_CNT);//185 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_DBG_DOP_CNT);//186 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MNG_DOP_CNT);//187 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VM_FLP_L);//188 + + /* vm l2 control */ + for (i = 0; i < 8; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VM_L2CTL(i));//189-196 + for (i = 0; i < 8; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_ETYPE_SWC(i));//197-204 + for (i = 0; i < 128; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MC_TBL(i));//205-332 + for (i = 0; i < 128; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_UC_TBL(i));///333-460 + for (i = 0; i < 128; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VLAN_TBL(i));//461-588 + /* mac switcher */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MAC_SWC_AD_L);//589 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MAC_SWC_AD_H);//590 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MAC_SWC_VM);//591 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MAC_SWC_IDX);//592 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VLAN_SWC);//593 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VLAN_SWC_VM_L);//594 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VLAN_SWC_IDX);//595 + + /* mirror */ + for (i = 0; i < 4; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MR_CTL(i));//596-599 + for (i = 0; i < 4; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MR_VLAN_L(i));//600-603 + for (i = 0; i < 4; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MR_VM_L(i));//604-607 + /* 1588 */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_CTL);//608 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_STMPL);//609 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_STMPH);//610 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_ATTRL);//611 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_ATTRH);//612 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_MSGTYPE);//613 + /* wake up */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_WKUP_CTL);//614 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_WKUP_IPV);//615 + for (i = 0; i < 4; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_WKUP_IP4TBL(i));//616-619 + for (i = 0; i < 4; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_WKUP_IP6TBL(i));//620-623 + for (i = 0; i < 16; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_LAN_FLEX_DW_L(i));//624-639 + for (i = 0; i < 16; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_LAN_FLEX_DW_H(i));//640-655 + for (i = 0; i < 16; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_LAN_FLEX_MSK(i));//656-671 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_LAN_FLEX_CTL);//672 + + /* TDB */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_RFCS);//673 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_PB_SZ);//674 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_PBRARB_CTL);//675 + /* statistic */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_OUT_PKT_CNT);//676 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_MNG_PKT_CNT);//677 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_LB_PKT_CNT);//678 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_MNG_LARGE_DOP_CNT);//679 + + /* TSEC */ + /* general tsec */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_CTL);//680 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_ST);//681 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_BUF_AF);//682 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_BUF_AE);//683 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_MIN_IFG);//684 + /* 1588 */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_CTL);//685 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_STMPL);//686 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_STMPH);//687 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_SYSTIML);//688 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_SYSTIMH);//689 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_INC);//690 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_ADJL);//691 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_ADJH);//692 + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_INT_ST);//693 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_INT_EN);//694 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_AUX_CTL);//695 + for (i = 0; i < 4; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_SDP(i));//696-699 + + /* RSEC */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RSEC_CTL);//700 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RSEC_ST);//701 + /* mac wrapper */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_TX_CFG);//702 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_RX_CFG);//703 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_PKT_FLT);//704 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_WDG_TIMEOUT);//705 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_TX_FLOW_CTRL);//706 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_RX_FLOW_CTRL);//707 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_INT_ST);//708 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_INT_EN);//709 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RX_FRAME_CNT_GOOD_BAD_LOW);//710 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TX_FRAME_CNT_GOOD_BAD_LOW);//711 + + /* BAR register */ + /* pf interrupt register */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_MISC_IC);//712 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_MISC_ICS);//713 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_MISC_IEN);//714 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_GPIE);//715 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_IC);//716 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_ICS);//717 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_IMS);//718 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_IMC);//719 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_ISB_ADDR_L);//720 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_ISB_ADDR_H);//721 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_ITRSEL);//722 + for (i = 0; i < 8; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_ITR(i));//723-730 + for (i = 0; i < 4; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_IVAR(i));//731-734 + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_MISC_IVAR);//735 + /* pf receive ring register */ + for (i = 0; i < 8; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_RR_BAL(i));//736-743 + for (i = 0; i < 8; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_RR_BAH(i));//744-751 + for (i = 0; i < 8; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_RR_WP(i));//752-759 + for (i = 0; i < 8; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_RR_RP(i));//760-767 + for (i = 0; i < 8; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_RR_CFG(i));//768-775 + for (i = 0; i < 8; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_TR_BAL(i));//776-783 + for (i = 0; i < 8; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_TR_BAH(i));//784-791 + for (i = 0; i < 8; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_TR_WP(i));//792-709 + for (i = 0; i < 8; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_TR_RP(i));//800-807 + for (i = 0; i < 8; i++) + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_TR_CFG(i));//808-815 +} + +static int ngbe_get_eeprom_len(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + return adapter->hw.eeprom.word_size << 1; +} + +static int ngbe_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word, eeprom_len; + int ret_val = 0; + u16 i; + u32 size; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_len = last_word - first_word + 1; + + size = sizeof(u16) * eeprom_len; + eeprom_buff = kmalloc(size, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ret_val = TCALL(hw, eeprom.ops.read_buffer, first_word, eeprom_len, + eeprom_buff); + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < eeprom_len; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int ngbe_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EINVAL; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = TCALL(hw, eeprom.ops.read, first_word, + &eeprom_buff[0]); + if (ret_val) + goto err; + + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && ret_val == 0) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = TCALL(hw, eeprom.ops.read, last_word, + &eeprom_buff[last_word - first_word]); + if (ret_val) + goto err; + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = TCALL(hw, eeprom.ops.write_buffer, first_word, + last_word - first_word + 1, + eeprom_buff); + + /* Update the checksum */ + if (ret_val == 0) + TCALL(hw, eeprom.ops.update_checksum); + +err: + kfree(eeprom_buff); + return ret_val; +} + +static void ngbe_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + strncpy(drvinfo->driver, ngbe_driver_name, + sizeof(drvinfo->driver) - 1); + strncpy(drvinfo->version, ngbe_driver_version, + sizeof(drvinfo->version) - 1); + strncpy(drvinfo->fw_version, adapter->eeprom_id, + sizeof(drvinfo->fw_version)); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info) - 1); + if (adapter->num_tx_queues <= NGBE_NUM_RX_QUEUES) + drvinfo->n_stats = NGBE_STATS_LEN - + (NGBE_NUM_RX_QUEUES - adapter->num_tx_queues) * + (sizeof(struct ngbe_queue_stats) / sizeof(u64)) * 2; + else + drvinfo->n_stats = NGBE_STATS_LEN; + + drvinfo->testinfo_len = NGBE_TEST_LEN; + drvinfo->regdump_len = ngbe_get_regs_len(netdev); +} + +static void ngbe_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = NGBE_MAX_RXD; + ring->tx_max_pending = NGBE_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int ngbe_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_ring *temp_ring; + int i, err = 0; + u32 new_rx_count, new_tx_count; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + new_tx_count = clamp_t(u32, ring->tx_pending, + NGBE_MIN_TXD, NGBE_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, NGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, + NGBE_MIN_RXD, NGBE_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, NGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + + if (new_tx_count == adapter->tx_ring_count && + new_rx_count == adapter->rx_ring_count) + return 0; + + while (test_and_set_bit(__NGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + temp_ring = vmalloc(i * sizeof(struct ngbe_ring)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + ngbe_down(adapter); + + /* Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct ngbe_ring)); + + temp_ring[i].count = new_tx_count; + err = ngbe_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + ngbe_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + ngbe_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct ngbe_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + /* Repeat the process for the Rx rings if needed */ + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct ngbe_ring)); + + temp_ring[i].count = new_rx_count; + err = ngbe_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + ngbe_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + ngbe_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct ngbe_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } + +err_setup: + ngbe_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__NGBE_RESETTING, &adapter->state); + return err; +} + +static int ngbe_get_sset_count(struct net_device *netdev, int sset) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + switch (sset) { + case ETH_SS_TEST: + return NGBE_TEST_LEN; + case ETH_SS_STATS: + if (adapter->num_tx_queues <= NGBE_NUM_RX_QUEUES) { + return NGBE_STATS_LEN - (NGBE_NUM_RX_QUEUES - adapter->num_tx_queues) * + (sizeof(struct ngbe_queue_stats) / sizeof(u64)) * 2; + } else { + return NGBE_STATS_LEN; + } + default: + return -EOPNOTSUPP; + } +} + +static void ngbe_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct net_device_stats *net_stats = &netdev->stats; + unsigned int start; + struct ngbe_ring *ring; + int i, j; + char *p; + + ngbe_update_stats(adapter); + + for (i = 0; i < NGBE_NETDEV_STATS_LEN; i++) { + p = (char *)net_stats + ngbe_gstrings_net_stats[i].stat_offset; + data[i] = (ngbe_gstrings_net_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < NGBE_GLOBAL_STATS_LEN; j++, i++) { + p = (char *)adapter + ngbe_gstrings_stats[j].stat_offset; + data[i] = (ngbe_gstrings_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < adapter->num_tx_queues; j++) { + ring = adapter->tx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + + i += 2; + } + + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + + i += 2; + } + + for (j = 0; j < NGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxontxc[j]; + data[i++] = adapter->stats.pxofftxc[j]; + } + for (j = 0; j < NGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxonrxc[j]; + data[i++] = adapter->stats.pxoffrxc[j]; + } +} + +static void ngbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + char *p = (char *)data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *ngbe_gstrings_test, + NGBE_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < NGBE_NETDEV_STATS_LEN; i++) { + memcpy(p, ngbe_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < NGBE_GLOBAL_STATS_LEN; i++) { + memcpy(p, ngbe_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_tx_queues; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_rx_queues; i++) { + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < NGBE_MAX_PACKET_BUFFERS; i++) { + sprintf(p, "tx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < NGBE_MAX_PACKET_BUFFERS; i++) { + sprintf(p, "rx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_vfs; i++) { + sprintf(p, "VF %d Rx Packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %d Rx Bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %d Tx Packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %d Tx Bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %d MC Packets", i); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int ngbe_link_test(struct ngbe_adapter *adapter, u64 *data) +{ + struct ngbe_hw *hw = &adapter->hw; + bool link_up; + u32 link_speed = 0; + + if (NGBE_REMOVED(hw->hw_addr)) { + *data = 1; + return 1; + } + *data = 0; + TCALL(hw, mac.ops.check_link, &link_speed, &link_up, true); + if (!link_up) + *data = 1; + + return *data; +} + +bool reg_pattern_test(struct ngbe_adapter *adapter, u64 *data, int reg, u32 mask, u32 write) +{ + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + + if (NGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = rd32(&adapter->hw, reg); + wr32(&adapter->hw, reg, test_pattern[pat] & write); + val = rd32(&adapter->hw, reg); + if (val != (test_pattern[pat] & write & mask)) { + e_err(drv, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg, val, test_pattern[pat] & write & mask); + *data = reg; + wr32(&adapter->hw, reg, before); + return true; + } + wr32(&adapter->hw, reg, before); + } + return false; +} + +static bool reg_set_and_check(struct ngbe_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 val, before; + + if (NGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + before = rd32(&adapter->hw, reg); + wr32(&adapter->hw, reg, write & mask); + val = rd32(&adapter->hw, reg); + if ((write & mask) != (val & mask)) { + e_err(drv, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + wr32(&adapter->hw, reg, before); + return true; + } + wr32(&adapter->hw, reg, before); + return false; +} + +static bool ngbe_reg_test(struct ngbe_adapter *adapter, u64 *data) +{ + struct ngbe_reg_test *test; + struct ngbe_hw *hw = &adapter->hw; + u32 i; + + if (NGBE_REMOVED(hw->hw_addr)) { + e_err(drv, "Adapter removed - register test blocked\n"); + *data = 1; + return true; + } + + test = reg_test_sapphire; + + /* Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + bool b = false; + + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + wr32(hw, test->reg + (i * 0x40), test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + (test->reg + 4) + (i * 8), + test->mask, + test->write); + break; + } + if (b) + return true; + } + test++; + } + + *data = 0; + return false; +} + +static irqreturn_t ngbe_test_intr(int __always_unused irq, void *data) +{ + struct net_device *netdev = (struct net_device *)data; + struct ngbe_adapter *adapter = netdev_priv(netdev); + u64 icr; + + /* get misc interrupt, as cannot get ring interrupt status */ + icr = ngbe_misc_isb(adapter, NGBE_ISB_VEC1); + icr <<= 32; + icr |= ngbe_misc_isb(adapter, NGBE_ISB_VEC0); + + adapter->test_icr = icr; + + return IRQ_HANDLED; +} + +static int ngbe_intr_test(struct ngbe_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u64 mask; + u32 i = 0, shared_int = true; + u32 irq = adapter->pdev->irq; + + if (NGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return -1; + } + *data = 0; + + /* Hook up test interrupt handler just for this test */ + if (adapter->msix_entries) { + /* NOTE: we don't test MSI-X interrupts here, yet */ + return 0; + } else if (adapter->flags & NGBE_FLAG_MSI_ENABLED) { + shared_int = false; + if (request_irq(irq, &ngbe_test_intr, 0, netdev->name, + netdev)) { + *data = 1; + return -1; + } + } else if (!request_irq(irq, &ngbe_test_intr, IRQF_PROBE_SHARED, + netdev->name, netdev)) { + shared_int = false; + } else if (request_irq(irq, &ngbe_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + ngbe_irq_disable(adapter); + NGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Test each interrupt */ + for (; i < 1; i++) { + /* Interrupt to test */ + mask = 1ULL << i; + + if (!shared_int) { + /* Disable the interrupts to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ngbe_intr_disable(&adapter->hw, ~mask); + ngbe_intr_trigger(&adapter->hw, mask); + NGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in the cause + * register and then force the same interrupt and see + * if one gets posted. If an interrupt was not posted + * to the bus, the test failed. + */ + adapter->test_icr = 0; + ngbe_intr_disable(&adapter->hw, NGBE_INTR_ALL); + ngbe_intr_trigger(&adapter->hw, mask); + NGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (!(adapter->test_icr & mask)) { + *data = 0; + break; + } + } + + /* Disable all the interrupts */ + ngbe_intr_disable(&adapter->hw, NGBE_INTR_ALL); + NGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void ngbe_free_desc_rings(struct ngbe_adapter *adapter) +{ + struct ngbe_ring *tx_ring = &adapter->test_tx_ring; + struct ngbe_ring *rx_ring = &adapter->test_rx_ring; + struct ngbe_hw *hw = &adapter->hw; + + /* shut down the DMA engines now so they can be reinitialized later */ + TCALL(hw, mac.ops.disable_rx); + ngbe_disable_rx_queue(adapter, rx_ring); + + /* now Tx */ + wr32(hw, NGBE_PX_TR_CFG(tx_ring->reg_idx), 0); + + wr32m(hw, NGBE_TDM_CTL, NGBE_TDM_CTL_TE, 0); + + ngbe_reset(adapter); + + ngbe_free_tx_resources(&adapter->test_tx_ring); + ngbe_free_rx_resources(&adapter->test_rx_ring); +} + +static int ngbe_setup_desc_rings(struct ngbe_adapter *adapter) +{ + struct ngbe_ring *tx_ring = &adapter->test_tx_ring; + struct ngbe_ring *rx_ring = &adapter->test_rx_ring; + struct ngbe_hw *hw = &adapter->hw; + int ret_val; + int err; + + TCALL(hw, mac.ops.setup_rxpba, 0, 0, PBA_STRATEGY_EQUAL); + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = NGBE_DEFAULT_TXD; + tx_ring->queue_index = 0; + tx_ring->dev = pci_dev_to_dev(adapter->pdev); + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; + + err = ngbe_setup_tx_resources(tx_ring); + if (err) + return 1; + + wr32m(&adapter->hw, NGBE_TDM_CTL, + NGBE_TDM_CTL_TE, NGBE_TDM_CTL_TE); + wr32m(hw, NGBE_TSEC_CTL, 0x2, 0); + wr32m(hw, NGBE_RSEC_CTL, 0x2, 0); + ngbe_configure_tx_ring(adapter, tx_ring); + + /* enable mac transmitter */ + wr32m(hw, NGBE_MAC_TX_CFG, + NGBE_MAC_TX_CFG_TE | NGBE_MAC_TX_CFG_SPEED_MASK, + NGBE_MAC_TX_CFG_TE | NGBE_MAC_TX_CFG_SPEED_1G); + + /* Setup Rx Descriptor ring and Rx buffers */ + rx_ring->count = NGBE_DEFAULT_RXD; + rx_ring->queue_index = 0; + rx_ring->dev = pci_dev_to_dev(adapter->pdev); + rx_ring->netdev = adapter->netdev; + rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; + + err = ngbe_setup_rx_resources(rx_ring); + if (err) { + ret_val = 4; + goto err_nomem; + } + + TCALL(hw, mac.ops.disable_rx); + + ngbe_configure_rx_ring(adapter, rx_ring); + + TCALL(hw, mac.ops.enable_rx); + + return 0; + +err_nomem: + ngbe_free_desc_rings(adapter); + return ret_val; +} + +static int ngbe_setup_loopback_test(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 reg_data; + + /* Setup MAC loopback */ + wr32m(hw, NGBE_MAC_RX_CFG, + NGBE_MAC_RX_CFG_LM, NGBE_MAC_RX_CFG_LM); + + reg_data = rd32(hw, NGBE_PSR_CTL); + reg_data |= NGBE_PSR_CTL_BAM | NGBE_PSR_CTL_UPE | + NGBE_PSR_CTL_MPE | NGBE_PSR_CTL_TPE; + wr32(hw, NGBE_PSR_CTL, reg_data); + + wr32(hw, 0x17000, + (rd32(hw, 0x17000) | + 0x00000040U) & + ~0x1U); + + wr32(hw, 0x17204, 0x4); + wr32(hw, NGBE_PSR_VLAN_CTL, + rd32(hw, NGBE_PSR_VLAN_CTL) & + ~NGBE_PSR_VLAN_CTL_VFE); + + NGBE_WRITE_FLUSH(hw); + usleep_range(10000, 20000); + + return 0; +} + +static void ngbe_loopback_cleanup(struct ngbe_adapter *adapter) +{ + wr32m(&adapter->hw, NGBE_MAC_RX_CFG, + NGBE_MAC_RX_CFG_LM, ~NGBE_MAC_RX_CFG_LM); +} + +static void ngbe_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size >>= 1; + memset(&skb->data[frame_size], 0xAA, (frame_size >> 1) - 1); + frame_size = 1; + memset(&skb->data[frame_size + 10], 0xBE, frame_size); + memset(&skb->data[frame_size + 12], 0xAF, frame_size); +} + +static bool ngbe_check_lbtest_frame(struct ngbe_rx_buffer *rx_buffer, + unsigned int frame_size) +{ + unsigned char *data; + bool match = true; + + frame_size >>= 1; + data = kmap(rx_buffer->page) + rx_buffer->page_offset; + + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + + kunmap(rx_buffer->page); + + return match; +} + +static u16 ngbe_clean_test_rings(struct ngbe_ring *rx_ring, + struct ngbe_ring *tx_ring, + unsigned int size) +{ + union ngbe_rx_desc *rx_desc; + struct ngbe_rx_buffer *rx_buffer; + struct ngbe_tx_buffer *tx_buffer; + const int bufsz = ngbe_rx_bufsz(rx_ring); + u16 rx_ntc, tx_ntc, count = 0; + + /* initialize next to clean and descriptor values */ + rx_ntc = rx_ring->next_to_clean; + tx_ntc = tx_ring->next_to_clean; + rx_desc = NGBE_RX_DESC(rx_ring, rx_ntc); + + while (ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_DD)) { + /* unmap buffer on Tx side */ + tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; + ngbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); + + /* check Rx buffer */ + rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; + + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + bufsz, + DMA_FROM_DEVICE); + + /* verify contents of skb */ + if (ngbe_check_lbtest_frame(rx_buffer, size)) + count++; + + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, + rx_buffer->page_dma, + bufsz, + DMA_FROM_DEVICE); + + /* increment Rx/Tx next to clean counters */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; + tx_ntc++; + if (tx_ntc == tx_ring->count) + tx_ntc = 0; + + /* fetch next descriptor */ + rx_desc = NGBE_RX_DESC(rx_ring, rx_ntc); + } + + /* re-map buffers to ring, store next to clean values */ + ngbe_alloc_rx_buffers(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; + tx_ring->next_to_clean = tx_ntc; + + return count; +} + +static int ngbe_run_loopback_test(struct ngbe_adapter *adapter) +{ + struct ngbe_ring *tx_ring = &adapter->test_tx_ring; + struct ngbe_ring *rx_ring = &adapter->test_rx_ring; + int i, j, lc, good_cnt, ret_val = 0; + unsigned int size = 1024; + netdev_tx_t tx_ret_val; + struct sk_buff *skb; + u32 flags_orig = adapter->flags; + + /* DCB can modify the frames on Tx */ + adapter->flags &= ~NGBE_FLAG_DCB_ENABLED; + + /* allocate test skb */ + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return 11; + + /* place data into test skb */ + ngbe_create_lbtest_frame(skb, size); + skb_put(skb, size); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + for (j = 0; j <= lc; j++) { + /* reset count of good packets */ + good_cnt = 0; + + /* place 64 packets on the transmit queue*/ + for (i = 0; i < 64; i++) { + skb_get(skb); + tx_ret_val = ngbe_xmit_frame_ring(skb, + adapter, + tx_ring); + if (tx_ret_val == NETDEV_TX_OK) + good_cnt++; + } + + mdelay(10); + + if (good_cnt != 64) { + ret_val = 12; + break; + } + + /* allow 200 milliseconds for packets to go from Tx to Rx */ + mdelay(200); + + good_cnt = ngbe_clean_test_rings(rx_ring, tx_ring, size); + if (good_cnt != 64) { + ret_val = 13; + e_dev_err("run_loopback_test: recv_cnt = %d\n", good_cnt); + break; + } + } + + /* free the original skb */ + kfree_skb(skb); + adapter->flags = flags_orig; + + return ret_val; +} + +static int ngbe_loopback_test(struct ngbe_adapter *adapter, u64 *data) +{ + *data = ngbe_setup_desc_rings(adapter); + + if (*data) + goto out; + *data = ngbe_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = ngbe_run_loopback_test(adapter); + if (*data) + e_info(hw, "mac loopback testing failed\n"); + ngbe_loopback_cleanup(adapter); + +err_loopback: + ngbe_free_desc_rings(adapter); +out: + return *data; +} + +static bool ngbe_eeprom_test(struct ngbe_adapter *adapter, u64 *data) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 devcap; + bool ret; + + if (TCALL(hw, eeprom.ops.eeprom_chksum_cap_st, NGBE_CALSUM_COMMAND, &devcap)) { + *data = 1; + ret = true; + } else { + *data = 0; + ret = false; + } + + return ret; +} + +static void ngbe_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + struct ngbe_hw *hw = &adapter->hw; + + e_dev_info("diag_test: start test\n"); + + if (NGBE_REMOVED(hw->hw_addr)) { + e_err(hw, "Adapter removed - test blocked\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + set_bit(__NGBE_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (ngbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) { + /* indicate we're in test mode */ + ngbe_close(netdev); + } else { + mdelay(20); + ngbe_reset(adapter); + } + + e_info(hw, "register testing starting\n"); + + if (ngbe_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + mdelay(20); + ngbe_reset(adapter); + e_info(hw, "eeprom testing starting\n"); + if (ngbe_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + mdelay(20); + ngbe_reset(adapter); + e_info(hw, "interrupt testing starting\n"); + if (ngbe_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (!(((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_OCP_CARD) || + ((hw->subsystem_device_id & NGBE_NCSI_MASK) == NGBE_NCSI_SUP))) { + /* If SRIOV or VMDq is enabled then skip MAC + * loopback diagnostic. + */ + if (adapter->flags & (NGBE_FLAG_SRIOV_ENABLED | + NGBE_FLAG_VMDQ_ENABLED)) { + e_info(hw, "skip MAC loopback diagnostic in VT mode\n"); + data[3] = 0; + goto skip_loopback; + } + + e_info(hw, "loopback testing starting\n"); + ngbe_loopback_test(adapter, &data[3]); + } + + data[3] = 0; + +skip_loopback: + msleep(20); + ngbe_reset(adapter); + + /* clear testing bit and return adapter to previous state */ + clear_bit(__NGBE_TESTING, &adapter->state); + if (if_running) + ngbe_open(netdev); + } else { + e_info(hw, "online testing starting\n"); + + /* Online tests */ + if (ngbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Offline tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__NGBE_TESTING, &adapter->state); + } +} + +static int ngbe_wol_exclusion(struct ngbe_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + int retval = 0; + + /* WOL not supported for all devices */ + if (!ngbe_wol_supported(adapter)) { + retval = 1; + wol->supported = 0; + } + + return retval; +} + +static void ngbe_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + struct ngbe_hw *hw = &adapter->hw; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + if (ngbe_wol_exclusion(adapter, wol) || + !device_can_wakeup(pci_dev_to_dev(adapter->pdev))) + return; + + if (adapter->wol & NGBE_PSR_WKUP_CTL_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & NGBE_PSR_WKUP_CTL_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & NGBE_PSR_WKUP_CTL_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & NGBE_PSR_WKUP_CTL_MAG) + wol->wolopts |= WAKE_MAGIC; + + if (!((hw->subsystem_device_id & NGBE_WOL_MASK) == NGBE_WOL_SUP)) + wol->wolopts = 0; +} + +static int ngbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 slot = hw->bus.lan_id; + u16 value; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (ngbe_wol_exclusion(adapter, wol)) + return wol->wolopts ? -EOPNOTSUPP : 0; + if (!((hw->subsystem_device_id & NGBE_WOL_MASK) == NGBE_WOL_SUP)) + return -EOPNOTSUPP; + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= NGBE_PSR_WKUP_CTL_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= NGBE_PSR_WKUP_CTL_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= NGBE_PSR_WKUP_CTL_BC; + if (wol->wolopts & WAKE_MAGIC) { + adapter->wol |= NGBE_PSR_WKUP_CTL_MAG; + hw->wol_enabled = !!(adapter->wol); + wr32(hw, NGBE_PSR_WKUP_CTL, adapter->wol); + ngbe_read_ee_hostif(hw, 0x7FE, &value); + /*enable wol in shadow ram*/ + ngbe_write_ee_hostif(hw, 0x7FE, value | (1 << slot)); + ngbe_write_ee_hostif(hw, 0x7FF, 0x5a5a); + device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); + return 0; + } + + ngbe_read_ee_hostif(hw, 0x7FE, &value); + /*disable wol in shadow ram*/ + ngbe_write_ee_hostif(hw, 0x7FE, value & ~(1 << slot)); + ngbe_write_ee_hostif(hw, 0x7FF, 0x5a5a); + return 0; +} + +static int ngbe_nway_reset(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + ngbe_reinit_locked(adapter); + + return 0; +} + +static int ngbe_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi || + adapter->hw.phy.type == ngbe_phy_internal_yt8521s_sfi) { + ngbe_phy_read_reg_ext_yt(hw, 0xA00B, 0, (u16 *)&adapter->led_reg); + } else if (adapter->hw.phy.type == ngbe_phy_m88e1512 || + adapter->hw.phy.type == ngbe_phy_m88e1512_sfi) { + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 3); + TCALL(hw, phy.ops.read_reg_mdi, 16, 0, (u16 *)&adapter->led_reg); + } else { + adapter->led_reg = rd32(hw, NGBE_CFG_LED_CTL); + } + return 2; + + case ETHTOOL_ID_ON: + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi || + adapter->hw.phy.type == ngbe_phy_internal_yt8521s_sfi) { + ngbe_phy_write_reg_ext_yt(hw, 0xA00B, 0, adapter->led_reg | 0x140); + } else if (adapter->hw.phy.type == ngbe_phy_m88e1512 || + adapter->hw.phy.type == ngbe_phy_m88e1512_sfi) { + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 3); + TCALL(hw, phy.ops.write_reg_mdi, 16, 0, (adapter->led_reg & ~0xF) | 0x9); + } else { + TCALL(hw, mac.ops.led_on, NGBE_LED_LINK_1G); + } + break; + + case ETHTOOL_ID_OFF: + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi || + adapter->hw.phy.type == ngbe_phy_internal_yt8521s_sfi) { + ngbe_phy_write_reg_ext_yt(hw, 0xA00B, 0, adapter->led_reg | 0x100); + } else if (adapter->hw.phy.type == ngbe_phy_m88e1512 || + adapter->hw.phy.type == ngbe_phy_m88e1512_sfi) { + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 3); + TCALL(hw, phy.ops.write_reg_mdi, 16, 0, (adapter->led_reg & ~0xF) | 0x8); + } else { + TCALL(hw, mac.ops.led_off, NGBE_LED_LINK_100M | NGBE_LED_LINK_1G); + } + break; + + case ETHTOOL_ID_INACTIVE: + /* Restore LED settings */ + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi || + adapter->hw.phy.type == ngbe_phy_internal_yt8521s_sfi) { + ngbe_phy_write_reg_ext_yt(hw, 0xA00B, 0, adapter->led_reg); + } else if (adapter->hw.phy.type == ngbe_phy_m88e1512 || + adapter->hw.phy.type == ngbe_phy_m88e1512_sfi) { + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 3); + TCALL(hw, phy.ops.write_reg_mdi, 16, 0, adapter->led_reg); + } else { + wr32(&adapter->hw, NGBE_CFG_LED_CTL, + adapter->led_reg); + } + break; + } + + return 0; +} + +static int ngbe_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; + /* only valid if in constant ITR mode */ + if (adapter->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (adapter->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + + return 0; +} + +static int ngbe_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_q_vector *q_vector; + int i; + u16 tx_itr_param, rx_itr_param; + u16 tx_itr_prev; + bool need_reset = false; + + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { + /* reject Tx specific changes in case of mixed RxTx vectors */ + if (ec->tx_coalesce_usecs) + return -EINVAL; + tx_itr_prev = adapter->rx_itr_setting; + } else { + tx_itr_prev = adapter->tx_itr_setting; + } + + if (ec->tx_max_coalesced_frames_irq) + adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; + + if ((ec->rx_coalesce_usecs > (NGBE_MAX_EITR >> 2)) || + (ec->tx_coalesce_usecs > (NGBE_MAX_EITR >> 2))) + return -EINVAL; + + if (ec->rx_coalesce_usecs > 1) + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + + if (adapter->rx_itr_setting == 1) + rx_itr_param = NGBE_20K_ITR; + else + rx_itr_param = adapter->rx_itr_setting; + + if (ec->tx_coalesce_usecs > 1) + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + + if (adapter->tx_itr_setting == 1) + tx_itr_param = NGBE_20K_ITR; + else + tx_itr_param = adapter->tx_itr_setting; + + /* mixed Rx/Tx */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + adapter->tx_itr_setting = adapter->rx_itr_setting; + + /* detect ITR changes that require update of TXDCTL.WTHRESH */ + if (adapter->tx_itr_setting != 1 && + adapter->tx_itr_setting < NGBE_70K_ITR) { + if (tx_itr_prev == 1 || + tx_itr_prev >= NGBE_70K_ITR) + need_reset = true; + } else { + if (tx_itr_prev != 1 && + tx_itr_prev < NGBE_70K_ITR) + need_reset = true; + } + + if (adapter->hw.mac.dmac_config.watchdog_timer && + (!adapter->rx_itr_setting && !adapter->tx_itr_setting)) { + e_info(probe, + "Disabling DMA coalescing because interrupt throttling is disabled\n"); + adapter->hw.mac.dmac_config.watchdog_timer = 0; + TCALL(hw, mac.ops.dmac_config); + } + + for (i = 0; i < adapter->num_q_vectors; i++) { + q_vector = adapter->q_vector[i]; + q_vector->tx.work_limit = adapter->tx_work_limit; + q_vector->rx.work_limit = adapter->rx_work_limit; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + ngbe_write_eitr(q_vector); + } + + /* do reset here at the end to make sure EITR==0 case is handled + * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings + * also locks in RSC enable/disable which requires reset + */ + if (need_reset) + ngbe_do_reset(netdev); + + return 0; +} + +static u32 ngbe_get_rxfh_key_size(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + return sizeof(adapter->rss_key); +} + +static void ngbe_get_reta(struct ngbe_adapter *adapter, u32 *indir) +{ + int i, reta_size = ngbe_rss_indir_tbl_entries(adapter); + + for (i = 0; i < reta_size; i++) + indir[i] = adapter->rss_indir_tbl[i]; +} + +static int ngbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (indir) + ngbe_get_reta(adapter, indir); + + if (key) + memcpy(key, adapter->rss_key, ngbe_get_rxfh_key_size(netdev)); + + return 0; +} + +static int ngbe_rss_indir_tbl_max(struct ngbe_adapter *adapter) +{ + return 64; +} + +static int ngbe_get_rss_hash_opts(struct ngbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on ngbe */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V4_FLOW: + if (adapter->flags2 & NGBE_FLAG2_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V6_FLOW: + if (adapter->flags2 & NGBE_FLAG2_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ngbe_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + int i; + u32 reta_entries = ngbe_rss_indir_tbl_entries(adapter); + + if (hfunc) + return -EINVAL; + + /* Fill out the redirection table */ + if (indir) { + int max_queues = min_t(int, adapter->num_rx_queues, + ngbe_rss_indir_tbl_max(adapter)); + + /*Allow at least 2 queues w/ SR-IOV.*/ + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED && max_queues < 2) + max_queues = 2; + + /* Verify user input. */ + for (i = 0; i < reta_entries; i++) + if (indir[i] >= max_queues) + return -EINVAL; + + for (i = 0; i < reta_entries; i++) + adapter->rss_indir_tbl[i] = indir[i]; + } + + /* Fill out the rss hash key */ + if (key) + memcpy(adapter->rss_key, key, ngbe_get_rxfh_key_size(netdev)); + + ngbe_store_reta(adapter); + + return 0; +} + +static int ngbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + break; + case ETHTOOL_GRXCLSRLALL: + break; + case ETHTOOL_GRXFH: + ret = ngbe_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +#define UDP_RSS_FLAGS (NGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ + NGBE_FLAG2_RSS_FIELD_IPV6_UDP) + +static int ngbe_set_rss_hash_opt(struct ngbe_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags2 = adapter->flags2; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags2 &= ~NGBE_FLAG2_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags2 |= NGBE_FLAG2_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags2 &= ~NGBE_FLAG2_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags2 |= NGBE_FLAG2_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags2 != adapter->flags2) { + struct ngbe_hw *hw = &adapter->hw; + u32 mrqc; + + mrqc = rd32(hw, NGBE_RDB_RA_CTL); + + if ((flags2 & UDP_RSS_FLAGS) && + !(adapter->flags2 & UDP_RSS_FLAGS)) + e_warn(drv, "enabling UDP RSS: fragmented packets may out of order to stack above\n"); + + adapter->flags2 = flags2; + + /* Perform hash on these packet types */ + mrqc |= NGBE_RDB_RA_CTL_RSS_IPV4 | + NGBE_RDB_RA_CTL_RSS_IPV4_TCP | + NGBE_RDB_RA_CTL_RSS_IPV6 | + NGBE_RDB_RA_CTL_RSS_IPV6_TCP; + + mrqc &= ~(NGBE_RDB_RA_CTL_RSS_IPV4_UDP | NGBE_RDB_RA_CTL_RSS_IPV6_UDP); + + if (flags2 & NGBE_FLAG2_RSS_FIELD_IPV4_UDP) + mrqc |= NGBE_RDB_RA_CTL_RSS_IPV4_UDP; + + if (flags2 & NGBE_FLAG2_RSS_FIELD_IPV6_UDP) + mrqc |= NGBE_RDB_RA_CTL_RSS_IPV6_UDP; + + wr32(hw, NGBE_RDB_RA_CTL, mrqc); + } + + return 0; +} + +static int ngbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + fallthrough; + case ETHTOOL_SRXCLSRLDEL: + break; + case ETHTOOL_SRXFH: + ret = ngbe_set_rss_hash_opt(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +static int ngbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + return 0; +} + +static int ngbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ethtool_eee eee_data; + s32 ret_val; + + if (!(adapter->flags2 & NGBE_FLAG2_EEE_CAPABLE)) + return -EOPNOTSUPP; + + memset(&eee_data, 0, sizeof(struct ethtool_eee)); + + ret_val = ngbe_get_eee(netdev, &eee_data); + if (ret_val) + return ret_val; + + if (eee_data.eee_enabled && !edata->eee_enabled) { + if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) { + e_dev_err("Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) { + e_dev_err("Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + + if (eee_data.advertised != edata->advertised) { + e_dev_err("Setting EEE advertised speeds is not supported\n"); + return -EINVAL; + } + } + + if (eee_data.eee_enabled != edata->eee_enabled) { + if (edata->eee_enabled) + adapter->flags2 |= NGBE_FLAG2_EEE_ENABLED; + else + adapter->flags2 &= ~NGBE_FLAG2_EEE_ENABLED; + + /* reset link */ + if (netif_running(netdev)) + ngbe_reinit_locked(adapter); + else + ngbe_reset(adapter); + } + + return 0; +} + +static unsigned int ngbe_max_channels(struct ngbe_adapter *adapter) +{ + unsigned int max_combined; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (!(adapter->flags & NGBE_FLAG_MSIX_ENABLED)) { + /* We only support one q_vector without MSI-X */ + max_combined = 1; + } else if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) { + /* SR-IOV currently only allows one queue on the PF */ + max_combined = 1; + } else if (tcs > 1) { + /* For DCB report channels per traffic class */ + if (tcs > 4) { + /* 8 TC w/ 8 queues per TC */ + max_combined = 8; + } else { + /* 4 TC w/ 16 queues per TC */ + max_combined = 16; + } + } else if (adapter->atr_sample_rate) { + /* support up to 64 queues with ATR */ + max_combined = NGBE_MAX_FDIR_INDICES; + } else { + /* support up to max allowed queues with RSS */ + max_combined = ngbe_max_rss_indices(adapter); + } + + return max_combined; +} + +static void ngbe_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + + /* report maximum channels */ + ch->max_combined = ngbe_max_channels(adapter); + + /* report info for other vector */ + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + /* record RSS queues */ + ch->combined_count = adapter->ring_feature[RING_F_RSS].indices; + + /* nothing else to report if RSS is disabled */ + if (ch->combined_count == 1) + return; + + /* we do not support ATR queueing if SR-IOV is enabled */ + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) + return; + + /* same thing goes for being DCB enabled */ + if (netdev_get_num_tc(dev) > 1) + return; + + /* if ATR is disabled we can exit */ + if (!adapter->atr_sample_rate) + return; +} + +static int ngbe_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + unsigned int count = ch->combined_count; + u8 max_rss_indices = ngbe_max_rss_indices(adapter); + + /* verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* verify other_count has not changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* verify the number of channels does not exceed hardware limits */ + if (count > ngbe_max_channels(adapter)) + return -EINVAL; + + /* cap RSS limit */ + if (count > max_rss_indices) + count = max_rss_indices; + adapter->ring_feature[RING_F_RSS].limit = count; + + /* use setup TC to update any traffic class queue mapping */ + return ngbe_setup_tc(dev, netdev_get_num_tc(dev)); +} + +static int ngbe_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + + /* we always support timestamping disabled */ + info->rx_filters = 1 << HWTSTAMP_FILTER_NONE; + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = (1 << HWTSTAMP_TX_OFF | HWTSTAMP_TX_ON); + + info->rx_filters |= 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC | + 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ | + 1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT | + 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT | + 1 << HWTSTAMP_FILTER_PTP_V2_SYNC | + 1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC | + 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC | + 1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ | + 1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ | + 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ | + 1 << HWTSTAMP_FILTER_PTP_V2_EVENT; + + return 0; +} + +static u32 ngbe_rss_indir_size(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + return ngbe_rss_indir_tbl_entries(adapter); +} + +static int ngbe_set_flash(struct net_device *netdev, struct ethtool_flash *ef) +{ + int ret; + const struct firmware *fw; + struct ngbe_adapter *adapter = netdev_priv(netdev); + + ret = request_firmware(&fw, ef->data, &netdev->dev); + if (ret < 0) + return ret; + + if (ef->region == 0) { + ret = ngbe_upgrade_flash(&adapter->hw, ef->region, fw->data, fw->size); + } else { + if (ngbe_mng_present(&adapter->hw)) + ret = ngbe_upgrade_flash_hostif(&adapter->hw, + ef->region, fw->data, fw->size); + else + ret = -EOPNOTSUPP; + } + + release_firmware(fw); + if (!ret) + dev_info(&netdev->dev, + "loaded firmware %s, reboot to make firmware work\n", ef->data); + return ret; +} + +static const struct ethtool_ops ngbe_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, + .get_link_ksettings = ngbe_get_link_ksettings, + .set_link_ksettings = ngbe_set_link_ksettings, + .get_drvinfo = ngbe_get_drvinfo, + .get_regs_len = ngbe_get_regs_len, + .get_regs = ngbe_get_regs, + .get_wol = ngbe_get_wol, + .set_wol = ngbe_set_wol, + .nway_reset = ngbe_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = ngbe_get_eeprom_len, + .get_eeprom = ngbe_get_eeprom, + .set_eeprom = ngbe_set_eeprom, + .get_ringparam = ngbe_get_ringparam, + .set_ringparam = ngbe_set_ringparam, + .get_pauseparam = ngbe_get_pauseparam, + .set_pauseparam = ngbe_set_pauseparam, + .get_msglevel = ngbe_get_msglevel, + .set_msglevel = ngbe_set_msglevel, + .self_test = ngbe_diag_test, + .get_strings = ngbe_get_strings, + .set_phys_id = ngbe_set_phys_id, + .get_sset_count = ngbe_get_sset_count, + .get_ethtool_stats = ngbe_get_ethtool_stats, + .get_coalesce = ngbe_get_coalesce, + .set_coalesce = ngbe_set_coalesce, + .get_rxnfc = ngbe_get_rxnfc, + .set_rxnfc = ngbe_set_rxnfc, + .get_eee = ngbe_get_eee, + .set_eee = ngbe_set_eee, + .get_channels = ngbe_get_channels, + .set_channels = ngbe_set_channels, + .get_ts_info = ngbe_get_ts_info, + .get_rxfh_indir_size = ngbe_rss_indir_size, + .get_rxfh_key_size = ngbe_get_rxfh_key_size, + .get_rxfh = ngbe_get_rxfh, + .set_rxfh = ngbe_set_rxfh, + .flash_device = ngbe_set_flash, +}; + +void ngbe_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ngbe_ethtool_ops; +} diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c new file mode 100644 index 0000000000000000000000000000000000000000..c581901a5f92be956918619c9d22dc720df43913 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c @@ -0,0 +1,3611 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "ngbe_type.h" +#include "ngbe_hw.h" +#include "ngbe_phy.h" +#include "ngbe.h" + +#define NGBE_SP_RAR_ENTRIES 32 +#define NGBE_SP_MAX_TX_QUEUES 8 +#define NGBE_SP_MAX_RX_QUEUES 8 +#define NGBE_SP_RAR_ENTRIES 32 +#define NGBE_SP_MC_TBL_SIZE 128 +#define NGBE_SP_VFT_TBL_SIZE 128 +#define NGBE_SP_RX_PB_SIZE 42 + +u8 ngbe_fmgr_cmd_op(struct ngbe_hw *hw, u32 cmd, u32 cmd_addr) +{ + u32 cmd_val = 0; + u32 time_out = 0; + + cmd_val = (cmd << SPI_CLK_CMD_OFFSET) | + (SPI_CLK_DIV << SPI_CLK_DIV_OFFSET) | + cmd_addr; + wr32(hw, SPI_H_CMD_REG_ADDR, cmd_val); + while (1) { + if (rd32(hw, SPI_H_STA_REG_ADDR) & 0x1) + break; + + if (time_out == SPI_TIME_OUT_VALUE) + return 1; + + time_out = time_out + 1; + usleep_range(10, 200); + } + + return 0; +} + +u32 ngbe_flash_read_dword(struct ngbe_hw *hw, u32 addr) +{ + u8 status; + + status = ngbe_fmgr_cmd_op(hw, SPI_CMD_READ_DWORD, addr); + if (status) + return (u32)status; + + return rd32(hw, SPI_H_DAT_REG_ADDR); +} + +/** + * ngbe_get_media_type - Get media type + * @hw: pointer to hardware structure + **/ +enum ngbe_media_type ngbe_get_media_type(struct ngbe_hw *hw) +{ + enum ngbe_media_type media_type; + + media_type = ngbe_media_type_copper; + + return media_type; +} + +/** + * ngbe_setup_copper_link - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true if waiting is needed to complete + **/ +static s32 ngbe_setup_copper_link(struct ngbe_hw *hw, + u32 speed, + bool need_restart_AN) +{ + s32 status = 0; + struct ngbe_adapter *adapter = hw->back; + + /* Setup the PHY according to input speed */ + if (!((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_RGMII_FPGA)) + status = TCALL(hw, phy.ops.setup_link, speed, need_restart_AN); + + adapter->flags |= NGBE_FLAG_NEED_ANC_CHECK; + + return status; +} + +/** + * ngbe_get_copper_link_capabilities - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + **/ +s32 ngbe_get_copper_link_capabilities(struct ngbe_hw *hw, + u32 *speed, + bool *autoneg) +{ + s32 status = 0; + u16 value = 0; + unsigned long flags; + + *speed = 0; + + if (hw->mac.autoneg) + *autoneg = true; + else + *autoneg = false; + + if (status == 0) { + *speed = NGBE_LINK_SPEED_10_FULL | + NGBE_LINK_SPEED_100_FULL | + NGBE_LINK_SPEED_1GB_FULL; + } + + if ((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_RGMII_FPGA) { + *speed = NGBE_LINK_SPEED_1GB_FULL; + hw->phy.link_mode = NGBE_PHYSICAL_LAYER_1000BASE_T; + *autoneg = false; + } + + if (hw->phy.type == ngbe_phy_m88e1512_sfi) { + *speed = NGBE_LINK_SPEED_1GB_FULL; + hw->phy.link_mode = NGBE_PHYSICAL_LAYER_1000BASE_T; + } + + if (hw->phy.type == ngbe_phy_yt8521s_sfi) { + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_ext_yt(hw, 0xA001, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + if ((value & 7) == 1) { + *speed = NGBE_LINK_SPEED_1GB_FULL; + hw->phy.link_mode = NGBE_PHYSICAL_LAYER_1000BASE_T; + } + } + + return status; +} + +int ngbe_check_flash_load(struct ngbe_hw *hw, u32 check_bit) +{ + u32 i = 0; + u32 reg = 0; + int err = 0; + /* if there's flash existing */ + if (!(rd32(hw, NGBE_SPI_STATUS) & + NGBE_SPI_STATUS_FLASH_BYPASS)) { + /* wait hw load flash done */ + for (i = 0; i < NGBE_MAX_FLASH_LOAD_POLL_TIME; i++) { + reg = rd32(hw, NGBE_SPI_ILDR_STATUS); + if (!(reg & check_bit)) + break; + + mdelay(200); + } + if (i == NGBE_MAX_FLASH_LOAD_POLL_TIME) { + err = NGBE_ERR_FLASH_LOADING_FAILED; + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, "HW Loading Flash failed: %d\n", err); + } + } + + return err; +} + +/** + * ngbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds + * @hw: pointer to hardware structure + **/ +s32 ngbe_init_thermal_sensor_thresh(struct ngbe_hw *hw) +{ + s32 status = 0; + + struct ngbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + memset(data, 0, sizeof(struct ngbe_thermal_sensor_data)); + + /* Only support thermal sensors attached to SP physical port 0 */ + if (hw->bus.lan_id) + return NGBE_NOT_IMPLEMENTED; + + wr32(hw, NGBE_TS_INT_EN, + NGBE_TS_INT_EN_DALARM_INT_EN | + NGBE_TS_INT_EN_ALARM_INT_EN); + wr32(hw, NGBE_TS_EN, NGBE_TS_EN_ENA); + + data->sensor.alarm_thresh = 115; + wr32(hw, NGBE_TS_ALARM_THRE, 0x344);/* magic num */ + data->sensor.dalarm_thresh = 110; + wr32(hw, NGBE_TS_DALARM_THRE, 0x330);/* magic num */ + + return status; +} + +int ngbe_reset_misc(struct ngbe_hw *hw) +{ + int i; + + /* receive packets that size > 2048 */ + wr32m(hw, NGBE_MAC_RX_CFG, + NGBE_MAC_RX_CFG_JE, NGBE_MAC_RX_CFG_JE); + + /* clear counters on read */ + wr32m(hw, NGBE_MMC_CONTROL, + NGBE_MMC_CONTROL_RSTONRD, NGBE_MMC_CONTROL_RSTONRD); + + wr32m(hw, NGBE_MAC_RX_FLOW_CTRL, + NGBE_MAC_RX_FLOW_CTRL_RFE, NGBE_MAC_RX_FLOW_CTRL_RFE); + + wr32(hw, NGBE_MAC_PKT_FLT, + NGBE_MAC_PKT_FLT_PR); + + wr32m(hw, NGBE_MIS_RST_ST, + NGBE_MIS_RST_ST_RST_INIT, 0x1E00); + + /* errata 4: initialize mng flex tbl and wakeup flex tbl*/ + wr32(hw, NGBE_PSR_MNG_FLEX_SEL, 0); + for (i = 0; i < 16; i++) { + wr32(hw, NGBE_PSR_MNG_FLEX_DW_L(i), 0); + wr32(hw, NGBE_PSR_MNG_FLEX_DW_H(i), 0); + wr32(hw, NGBE_PSR_MNG_FLEX_MSK(i), 0); + } + wr32(hw, NGBE_PSR_LAN_FLEX_SEL, 0); + for (i = 0; i < 16; i++) { + wr32(hw, NGBE_PSR_LAN_FLEX_DW_L(i), 0); + wr32(hw, NGBE_PSR_LAN_FLEX_DW_H(i), 0); + wr32(hw, NGBE_PSR_LAN_FLEX_MSK(i), 0); + } + + /* set pause frame dst mac addr */ + wr32(hw, NGBE_RDB_PFCMACDAL, 0xC2000001); + wr32(hw, NGBE_RDB_PFCMACDAH, 0x0180); + + wr32(hw, NGBE_MDIO_CLAUSE_SELECT, 0xF); + + if (hw->gpio_ctl == 1) { + /* gpio0 is used to power on/off control*/ + wr32(hw, NGBE_GPIO_DDR, 0x1); + wr32(hw, NGBE_GPIO_DR, NGBE_GPIO_DR_0); + } + + ngbe_init_thermal_sensor_thresh(hw); + + return 0; +} + +/** + * ngbe_init_hw - Generic hardware initialization + * @hw: pointer to hardware structure + **/ +s32 ngbe_init_hw(struct ngbe_hw *hw) +{ + s32 status; + + /* Reset the hardware */ + status = TCALL(hw, mac.ops.reset_hw); + + if (status == 0) { + /* Start the HW */ + status = TCALL(hw, mac.ops.start_hw); + } + + return status; +} + +/** + * ngbe_reset_hw - Perform hardware reset + * @hw: pointer to hardware structure + **/ +s32 ngbe_reset_hw(struct ngbe_hw *hw) +{ + s32 status; + u32 reset = 0; + u32 i; + struct ngbe_mac_info *mac = &hw->mac; + u32 sr_pcs_ctl = 0; + u32 sr_pma_mmd_ctl1 = 0; + u32 sr_an_mmd_ctl = 0; + u32 sr_an_mmd_adv_reg2 = 0; + u32 vr_xs_or_pcs_mmd_digi_ctl1 = 0; + u32 curr_vr_xs_or_pcs_mmd_digi_ctl1 = 0; + u32 curr_sr_pcs_ctl = 0; + u32 curr_sr_pma_mmd_ctl1 = 0; + u32 curr_sr_an_mmd_ctl = 0; + u32 curr_sr_an_mmd_adv_reg2 = 0; + u32 reset_status = 0; + u32 rst_delay = 0; + struct ngbe_adapter *adapter = NULL; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = TCALL(hw, mac.ops.stop_adapter); + if (status != 0) + goto reset_hw_out; + + /* Identify PHY and related function pointers */ + if (!((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_RGMII_FPGA)) { + status = TCALL(hw, phy.ops.init); + if (status) + goto reset_hw_out; + } + + if (ngbe_get_media_type(hw) == ngbe_media_type_copper) { + mac->ops.setup_link = ngbe_setup_copper_link; + mac->ops.get_link_capabilities = + ngbe_get_copper_link_capabilities; + } + + /* Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + if (hw->force_full_reset) { + rst_delay = (rd32(hw, NGBE_MIS_RST_ST) & + NGBE_MIS_RST_ST_RST_INIT) >> + NGBE_MIS_RST_ST_RST_INI_SHIFT; + if (hw->reset_type == NGBE_SW_RESET) { + for (i = 0; i < rst_delay + 20; i++) { + reset_status = + rd32(hw, NGBE_MIS_RST_ST); + if (!(reset_status & + NGBE_MIS_RST_ST_DEV_RST_ST_MASK)) + break; + mdelay(100); + } + + if (reset_status & NGBE_MIS_RST_ST_DEV_RST_ST_MASK) { + status = NGBE_ERR_RESET_FAILED; + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, + "software reset polling failed to complete.\n"); + + goto reset_hw_out; + } + status = ngbe_check_flash_load(hw, NGBE_SPI_ILDR_STATUS_SW_RESET); + if (status != 0) + goto reset_hw_out; + + } else if (hw->reset_type == NGBE_GLOBAL_RESET) { + adapter = (struct ngbe_adapter *)hw->back; + mdelay(100 * rst_delay + 2000); + pci_restore_state(adapter->pdev); + pci_save_state(adapter->pdev); + pci_wake_from_d3(adapter->pdev, false); + } + } else { + if (hw->bus.lan_id == 0) + reset = NGBE_MIS_RST_LAN0_RST; + else if (hw->bus.lan_id == 1) + reset = NGBE_MIS_RST_LAN1_RST; + else if (hw->bus.lan_id == 2) + reset = NGBE_MIS_RST_LAN2_RST; + else if (hw->bus.lan_id == 3) + reset = NGBE_MIS_RST_LAN3_RST; + + wr32(hw, NGBE_MIS_RST, + reset | rd32(hw, NGBE_MIS_RST)); + NGBE_WRITE_FLUSH(hw); + + mdelay(15); + } + + status = ngbe_reset_misc(hw); + if (status != 0) + goto reset_hw_out; + + if (!hw->mac.orig_link_settings_stored) { + hw->mac.orig_sr_pcs_ctl2 = sr_pcs_ctl; + hw->mac.orig_sr_pma_mmd_ctl1 = sr_pma_mmd_ctl1; + hw->mac.orig_sr_an_mmd_ctl = sr_an_mmd_ctl; + hw->mac.orig_sr_an_mmd_adv_reg2 = sr_an_mmd_adv_reg2; + hw->mac.orig_vr_xs_or_pcs_mmd_digi_ctl1 = + vr_xs_or_pcs_mmd_digi_ctl1; + hw->mac.orig_link_settings_stored = true; + } else { + /* If MNG FW is running on a multi-speed device that + * doesn't autoneg with out driver support we need to + * leave LMS in the state it was before we MAC reset. + * Likewise if we support WoL we don't want change the + * LMS state. + */ + hw->mac.orig_sr_pcs_ctl2 = curr_sr_pcs_ctl; + hw->mac.orig_sr_pma_mmd_ctl1 = curr_sr_pma_mmd_ctl1; + hw->mac.orig_sr_an_mmd_ctl = curr_sr_an_mmd_ctl; + hw->mac.orig_sr_an_mmd_adv_reg2 = + curr_sr_an_mmd_adv_reg2; + hw->mac.orig_vr_xs_or_pcs_mmd_digi_ctl1 = + curr_vr_xs_or_pcs_mmd_digi_ctl1; + } + + /* Store the permanent mac address */ + TCALL(hw, mac.ops.get_mac_addr, hw->mac.perm_addr); + + /* Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = NGBE_SP_RAR_ENTRIES; + TCALL(hw, mac.ops.init_rx_addrs); + + pci_set_master(((struct ngbe_adapter *)hw->back)->pdev); + +reset_hw_out: + return status; +} + +/** + * ngbe_clear_hw_cntrs - Generic clear hardware counters + * @hw: pointer to hardware structure + **/ +s32 ngbe_clear_hw_cntrs(struct ngbe_hw *hw) +{ + u16 i = 0; + + rd32(hw, NGBE_RX_CRC_ERROR_FRAMES_LOW); + rd32(hw, NGBE_RX_LEN_ERROR_FRAMES_LOW); + rd32(hw, NGBE_RDB_LXONTXC); + rd32(hw, NGBE_RDB_LXOFFTXC); + + /* rd32(hw, NGBE_MAC_LXONRXC); */ + rd32(hw, NGBE_MAC_LXOFFRXC); + + for (i = 0; i < 8; i++) { + wr32m(hw, NGBE_MMC_CONTROL, NGBE_MMC_CONTROL_UP, i << 16); + rd32(hw, NGBE_MAC_PXOFFRXC); + } + + for (i = 0; i < 8; i++) + wr32(hw, NGBE_PX_MPRC(i), 0); + + /* BPRC */ + rd32(hw, NGBE_PX_GPRC); + rd32(hw, NGBE_PX_GPTC); + rd32(hw, NGBE_PX_GORC_MSB); + rd32(hw, NGBE_PX_GOTC_MSB); + + rd32(hw, NGBE_RX_BC_FRAMES_GOOD_LOW); + rd32(hw, NGBE_RX_UNDERSIZE_FRAMES_GOOD); + rd32(hw, NGBE_RX_OVERSIZE_FRAMES_GOOD); + rd32(hw, NGBE_RX_FRAME_CNT_GOOD_BAD_LOW); + rd32(hw, NGBE_TX_FRAME_CNT_GOOD_BAD_LOW); + rd32(hw, NGBE_TX_MC_FRAMES_GOOD_LOW); + rd32(hw, NGBE_TX_BC_FRAMES_GOOD_LOW); + rd32(hw, NGBE_RDM_DRP_PKT); + + return 0; +} + +/** + * ngbe_get_mac_addr - Generic get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address + **/ +s32 ngbe_get_mac_addr(struct ngbe_hw *hw, u8 *mac_addr) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + wr32(hw, NGBE_PSR_MAC_SWC_IDX, 0); + rar_high = rd32(hw, NGBE_PSR_MAC_SWC_AD_H); + rar_low = rd32(hw, NGBE_PSR_MAC_SWC_AD_L); + + for (i = 0; i < 2; i++) + mac_addr[i] = (u8)(rar_high >> (1 - i) * 8); + + for (i = 0; i < 4; i++) + mac_addr[i + 2] = (u8)(rar_low >> (3 - i) * 8); + + return 0; +} + +/** + * ngbe_disable_pcie_master - Disable PCI-express master access + * @hw: pointer to hardware structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. NGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable + * bit hasn't caused the master requests to be disabled, else 0 + * is returned signifying master requests disabled. + **/ +s32 ngbe_disable_pcie_master(struct ngbe_hw *hw) +{ + s32 status = 0; + u32 i; + + /* Always set this bit to ensure any future transactions are blocked */ + pci_clear_master(((struct ngbe_adapter *)hw->back)->pdev); + + /* Exit if master requests are blocked */ + if (!(rd32(hw, NGBE_PX_TRANSACTION_PENDING)) || NGBE_REMOVED(hw->hw_addr)) + goto out; + + /* Poll for master request bit to clear */ + for (i = 0; i < NGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + usleep_range(100, 200); + if (!(rd32(hw, NGBE_PX_TRANSACTION_PENDING))) + goto out; + } + + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, + "PCIe transaction pending bit did not clear.\n"); + status = NGBE_ERR_MASTER_REQUESTS_PENDING; +out: + return status; +} + +/** + * ngbe_stop_adapter - Generic stop Tx/Rx units + * @hw: pointer to hardware structure + **/ +s32 ngbe_stop_adapter(struct ngbe_hw *hw) +{ + u16 i; + + /* Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = true; + + /* Disable the receive unit */ + TCALL(hw, mac.ops.disable_rx); + + /* Set interrupt mask to stop interrupts from being generated */ + ngbe_intr_disable(hw, NGBE_INTR_ALL); + + /* Clear any pending interrupts, flush previous writes */ + wr32(hw, NGBE_PX_MISC_IC, 0xffffffff); + + wr32(hw, NGBE_BME_CTL, 0x3); + + /* Disable the transmit unit. Each queue must be disabled. */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + wr32m(hw, NGBE_PX_TR_CFG(i), + NGBE_PX_TR_CFG_SWFLSH | NGBE_PX_TR_CFG_ENABLE, + NGBE_PX_TR_CFG_SWFLSH); + } + + /* Disable the receive unit by stopping each queue */ + for (i = 0; i < hw->mac.max_rx_queues; i++) { + wr32m(hw, NGBE_PX_RR_CFG(i), + NGBE_PX_RR_CFG_RR_EN, 0); + } + + /* flush all queues disables */ + NGBE_WRITE_FLUSH(hw); + mdelay(2); + + /* Prevent the PCI-E bus from hanging by disabling PCI-E master + * access and verify no pending requests + */ + return ngbe_disable_pcie_master(hw); +} + +static bool ngbe_check_cfg_remove(struct ngbe_hw *hw, struct pci_dev *pdev) +{ + u16 value; + + pci_read_config_word(pdev, PCI_VENDOR_ID, &value); + if (value == NGBE_FAILED_READ_CFG_WORD) + return true; + + return false; +} + +static u16 ngbe_read_pci_cfg_word(struct ngbe_hw *hw, u32 reg) +{ + struct ngbe_adapter *adapter = hw->back; + u16 value; + + if (NGBE_REMOVED(hw->hw_addr)) + return NGBE_FAILED_READ_CFG_WORD; + pci_read_config_word(adapter->pdev, reg, &value); + if (value == NGBE_FAILED_READ_CFG_WORD && + ngbe_check_cfg_remove(hw, adapter->pdev)) + return NGBE_FAILED_READ_CFG_WORD; + + return value; +} + +/** + * ngbe_set_pci_config_data - Generic store PCI bus info + * @hw: pointer to hardware structure + * @link_status: the link status returned by the PCI config space + **/ +void ngbe_set_pci_config_data(struct ngbe_hw *hw, u16 link_status) +{ + if (hw->bus.type == ngbe_bus_type_unknown) + hw->bus.type = ngbe_bus_type_pci_express; + + switch (link_status & NGBE_PCI_LINK_WIDTH) { + case NGBE_PCI_LINK_WIDTH_1: + hw->bus.width = ngbe_bus_width_pcie_x1; + break; + case NGBE_PCI_LINK_WIDTH_2: + hw->bus.width = ngbe_bus_width_pcie_x2; + break; + case NGBE_PCI_LINK_WIDTH_4: + hw->bus.width = ngbe_bus_width_pcie_x4; + break; + case NGBE_PCI_LINK_WIDTH_8: + hw->bus.width = ngbe_bus_width_pcie_x8; + break; + default: + hw->bus.width = ngbe_bus_width_unknown; + break; + } + + switch (link_status & NGBE_PCI_LINK_SPEED) { + case NGBE_PCI_LINK_SPEED_2500: + hw->bus.speed = ngbe_bus_speed_2500; + break; + case NGBE_PCI_LINK_SPEED_5000: + hw->bus.speed = ngbe_bus_speed_5000; + break; + case NGBE_PCI_LINK_SPEED_8000: + hw->bus.speed = ngbe_bus_speed_8000; + break; + default: + hw->bus.speed = ngbe_bus_speed_unknown; + break; + } +} + +/** + * ngbe_get_bus_info - Generic set PCI bus info + * @hw: pointer to hardware structure + **/ +s32 ngbe_get_bus_info(struct ngbe_hw *hw) +{ + u16 link_status; + + /* Get the negotiated link width and speed from PCI config space */ + link_status = ngbe_read_pci_cfg_word(hw, NGBE_PCI_LINK_STATUS); + + ngbe_set_pci_config_data(hw, link_status); + + return 0; +} + +/** + * ngbe_mng_present - returns true when manangbeent capability is present + * @hw: pointer to hardware structure + */ +bool ngbe_mng_present(struct ngbe_hw *hw) +{ + u32 fwsm; + + fwsm = rd32(hw, NGBE_MIS_ST); + return fwsm & NGBE_MIS_ST_MNG_INIT_DN; +} + +bool ngbe_check_mng_access(struct ngbe_hw *hw) +{ + if (!ngbe_mng_present(hw)) + return false; + return true; +} + +/** + * ngbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * @hw: pointer to the HW structure + **/ +void ngbe_set_lan_id_multi_port_pcie(struct ngbe_hw *hw) +{ + struct ngbe_bus_info *bus = &hw->bus; + u32 reg = 0; + + reg = rd32(hw, NGBE_CFG_PORT_ST); + bus->lan_id = NGBE_CFG_PORT_ST_LAN_ID(reg); + bus->func = bus->lan_id; +} + +/** + * ngbe_release_eeprom_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + **/ +static void ngbe_release_eeprom_semaphore(struct ngbe_hw *hw) +{ + if (ngbe_check_mng_access(hw)) { + wr32m(hw, NGBE_MIS_SWSM, + NGBE_MIS_SWSM_SMBI, 0); + NGBE_WRITE_FLUSH(hw); + } + +} + +/** + * ngbe_get_eeprom_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + **/ +static s32 ngbe_get_eeprom_semaphore(struct ngbe_hw *hw) +{ + s32 status = NGBE_ERR_EEPROM; + u32 timeout = 2000; + u32 i; + u32 swsm; + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = rd32(hw, NGBE_MIS_SWSM); + if (!(swsm & NGBE_MIS_SWSM_SMBI)) { + status = 0; + break; + } + usleep_range(50, 100); + } + + if (i == timeout) { + /* this release is particularly important because our attempts + * above to get the semaphore may have succeeded, and if there + * was a timeout, we should unconditionally clear the semaphore + * bits to free the driver to make progress + */ + ngbe_release_eeprom_semaphore(hw); + + usleep_range(50, 100); + /* one last try + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = rd32(hw, NGBE_MIS_SWSM); + if (!(swsm & NGBE_MIS_SWSM_SMBI)) + status = 0; + } + + return status; +} + +/** + * ngbe_release_swfw_sync - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + **/ +void ngbe_release_swfw_sync(struct ngbe_hw *hw, u32 mask) +{ + ngbe_get_eeprom_semaphore(hw); + if (ngbe_check_mng_access(hw)) + wr32m(hw, NGBE_MNG_SWFW_SYNC, mask, 0); + + ngbe_release_eeprom_semaphore(hw); +} + +/** + * ngbe_acquire_swfw_sync - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + **/ +s32 ngbe_acquire_swfw_sync(struct ngbe_hw *hw, u32 mask) +{ + u32 gssr = 0; + u32 swmask = mask; + u32 fwmask = mask << 16; + u32 timeout = 200; + u32 i; + + for (i = 0; i < timeout; i++) { + /* SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) + */ + if (ngbe_get_eeprom_semaphore(hw)) + return NGBE_ERR_SWFW_SYNC; + + if (ngbe_check_mng_access(hw)) { + gssr = rd32(hw, NGBE_MNG_SWFW_SYNC); + if (gssr & (fwmask | swmask)) { + /* Resource is currently in use by FW or SW */ + ngbe_release_eeprom_semaphore(hw); + mdelay(5); + } else { + gssr |= swmask; + wr32(hw, NGBE_MNG_SWFW_SYNC, gssr); + + ngbe_release_eeprom_semaphore(hw); + return 0; + } + } + } + + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, + "%s: i = %u, gssr = %u\n", __func__, i, gssr); + + /* If time expired clear the bits holding the lock and retry */ + if (gssr & (fwmask | swmask)) + ngbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); + + mdelay(5); + + return NGBE_ERR_SWFW_SYNC; +} + +/** + * ngbe_disable_sec_rx_path - Stops the receive data path + * @hw: pointer to hardware structure + **/ +s32 ngbe_disable_sec_rx_path(struct ngbe_hw *hw) +{ + int i; + int secrxreg; + + wr32m(hw, NGBE_RSEC_CTL, + NGBE_RSEC_CTL_RX_DIS, NGBE_RSEC_CTL_RX_DIS); + for (i = 0; i < 40; i++) { + secrxreg = rd32(hw, NGBE_RSEC_ST); + if (secrxreg & NGBE_RSEC_ST_RSEC_RDY) + break; + + usleep_range(1000, 2000); + } + + return 0; +} + +/** + * ngbe_enable_sec_rx_path - Enables the receive data path + * @hw: pointer to hardware structure + **/ +s32 ngbe_enable_sec_rx_path(struct ngbe_hw *hw) +{ + wr32m(hw, NGBE_RSEC_CTL, + NGBE_RSEC_CTL_RX_DIS, 0); + NGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ngbe_enable_rx_dma - Enable the Rx DMA unit on emerald + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + **/ +s32 ngbe_enable_rx_dma(struct ngbe_hw *hw, u32 regval) +{ + /* Workaround for emerald silicon errata when enabling the Rx datapath. + * If traffic is incoming before we enable the Rx unit, it could hang + * the Rx DMA unit. Therefore, make sure the security engine is + * completely disabled prior to enabling the Rx unit. + */ + TCALL(hw, mac.ops.disable_sec_rx_path); + + if (regval & NGBE_RDB_PB_CTL_PBEN) + TCALL(hw, mac.ops.enable_rx); + else + TCALL(hw, mac.ops.disable_rx); + + TCALL(hw, mac.ops.enable_sec_rx_path); + + return 0; +} + +/** + * ngbe_start_hw - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + **/ +s32 ngbe_start_hw(struct ngbe_hw *hw) +{ + int ret_val = 0; + + /* Set the media type */ + hw->phy.media_type = TCALL(hw, mac.ops.get_media_type); + + /* Clear the VLAN filter table */ + TCALL(hw, mac.ops.clear_vfta); + + /* Clear statistics registers */ + TCALL(hw, mac.ops.clear_hw_cntrs); + + NGBE_WRITE_FLUSH(hw); + + /* Setup flow control */ + ret_val = TCALL(hw, mac.ops.setup_fc); + + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + + return ret_val; +} + +/** + * ngbe_get_device_caps - Get additional device capabilities + * @hw: pointer to hardware structure + * @device_caps: the EEPROM word with the extra device capabilities + **/ +s32 ngbe_get_device_caps(struct ngbe_hw *hw, u16 *device_caps) +{ + TCALL(hw, eeprom.ops.read, + hw->eeprom.sw_region_offset + NGBE_DEVICE_CAPS, device_caps); + + return 0; +} + +/** + * ngbe_set_rar - Set Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + **/ +s32 ngbe_set_rar(struct ngbe_hw *hw, u32 index, u8 *addr, u64 pools, + u32 enable_addr) +{ + u32 rar_low, rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + ERROR_REPORT2(hw, NGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); + return NGBE_ERR_INVALID_ARGUMENT; + } + + /* select the MAC address */ + wr32(hw, NGBE_PSR_MAC_SWC_IDX, index); + + /* setup VMDq pool mapping */ + wr32(hw, NGBE_PSR_MAC_SWC_VM, pools & 0xFFFFFFFF); + + /* HW expects these in little endian so we reverse the byte + * order from network order (big endian) to little endian + * + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_low = ((u32)addr[5] | + ((u32)addr[4] << 8) | + ((u32)addr[3] << 16) | + ((u32)addr[2] << 24)); + rar_high = ((u32)addr[1] | + ((u32)addr[0] << 8)); + if (enable_addr != 0) + rar_high |= NGBE_PSR_MAC_SWC_AD_H_AV; + + wr32(hw, NGBE_PSR_MAC_SWC_AD_L, rar_low); + wr32m(hw, NGBE_PSR_MAC_SWC_AD_H, + (NGBE_PSR_MAC_SWC_AD_H_AD(~0) | + NGBE_PSR_MAC_SWC_AD_H_ADTYPE(~0) | + NGBE_PSR_MAC_SWC_AD_H_AV), + rar_high); + + return 0; +} + +/** + * ngbe_clear_rar - Remove Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + **/ +s32 ngbe_clear_rar(struct ngbe_hw *hw, u32 index) +{ + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + ERROR_REPORT2(hw, NGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); + + return NGBE_ERR_INVALID_ARGUMENT; + } + + /* Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + wr32(hw, NGBE_PSR_MAC_SWC_IDX, index); + + wr32(hw, NGBE_PSR_MAC_SWC_VM, 0); + wr32(hw, NGBE_PSR_MAC_SWC_AD_L, 0); + wr32m(hw, NGBE_PSR_MAC_SWC_AD_H, + (NGBE_PSR_MAC_SWC_AD_H_AD(~0) | + NGBE_PSR_MAC_SWC_AD_H_ADTYPE(~0) | + NGBE_PSR_MAC_SWC_AD_H_AV), + 0); + + return 0; +} + +/** + * ngbe_validate_mac_addr - Validate MAC address + * @mac_addr: pointer to MAC address. + **/ +s32 ngbe_validate_mac_addr(u8 *mac_addr) +{ + s32 status = 0; + + /* Make sure it is not a multicast address */ + if (NGBE_IS_MULTICAST(mac_addr)) + status = NGBE_ERR_INVALID_MAC_ADDR; + else if (NGBE_IS_BROADCAST(mac_addr)) + status = NGBE_ERR_INVALID_MAC_ADDR; + else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) + status = NGBE_ERR_INVALID_MAC_ADDR; + + return status; +} + +/** + * ngbe_init_rx_addrs - Initializes receive address filters. + * @hw: pointer to hardware structure + **/ +s32 ngbe_init_rx_addrs(struct ngbe_hw *hw) +{ + u32 i; + u32 rar_entries = hw->mac.num_rar_entries; + u32 psrctl; + + /* If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (ngbe_validate_mac_addr(hw->mac.addr) == + NGBE_ERR_INVALID_MAC_ADDR) { + /* Get the MAC address from the RAR0 for later reference */ + TCALL(hw, mac.ops.get_mac_addr, hw->mac.addr); + } else { + /* Setup the receive address. */ + TCALL(hw, mac.ops.set_rar, 0, hw->mac.addr, 0, + NGBE_PSR_MAC_SWC_AD_H_AV); + } + hw->addr_ctrl.overflow_promisc = 0; + hw->addr_ctrl.rar_used_count = 1; + + /* Zero out the other receive addresses. */ + for (i = 1; i < rar_entries; i++) { + wr32(hw, NGBE_PSR_MAC_SWC_IDX, i); + wr32(hw, NGBE_PSR_MAC_SWC_AD_L, 0); + wr32(hw, NGBE_PSR_MAC_SWC_AD_H, 0); + } + + /* Clear the MTA */ + hw->addr_ctrl.mta_in_use = 0; + psrctl = rd32(hw, NGBE_PSR_CTL); + psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE); + psrctl |= hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT; + wr32(hw, NGBE_PSR_CTL, psrctl); + + for (i = 0; i < hw->mac.mcft_size; i++) + wr32(hw, NGBE_PSR_MC_TBL(i), 0); + + TCALL(hw, mac.ops.init_uta_tables); + + return 0; +} + +/** + * ngbe_add_uc_addr - Adds a secondary unicast address. + * @hw: pointer to hardware structure + * @addr: new address + **/ +void ngbe_add_uc_addr(struct ngbe_hw *hw, u8 *addr, u32 vmdq) +{ + u32 rar_entries = hw->mac.num_rar_entries; + u32 rar; + + /* Place this address in the RAR if there is room, + * else put the controller into promiscuous mode + */ + if (hw->addr_ctrl.rar_used_count < rar_entries) { + rar = hw->addr_ctrl.rar_used_count; + TCALL(hw, mac.ops.set_rar, rar, addr, vmdq, + NGBE_PSR_MAC_SWC_AD_H_AV); + hw->addr_ctrl.rar_used_count++; + } else { + hw->addr_ctrl.overflow_promisc++; + } +} + +/** + * ngbe_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + **/ +static s32 ngbe_mta_vector(struct ngbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + WARN_ON(1); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * ngbe_set_mta - Set bit-vector in multicast table + * @hw: pointer to hardware structure + * @hash_value: Multicast address hash value + **/ +void ngbe_set_mta(struct ngbe_hw *hw, u8 *mc_addr) +{ + u32 vector; + u32 vector_bit; + u32 vector_reg; + + hw->addr_ctrl.mta_in_use++; + + vector = ngbe_mta_vector(hw, mc_addr); + + /* The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); +} + +/** + * ngbe_update_uc_addr_list - Updates MAC list of secondary addresses + * @hw: pointer to hardware structure + * @list: the list of new addresses + * @count: number of addresses + * @itr: iterator function to walk the address list + **/ +s32 ngbe_update_uc_addr_list(struct ngbe_hw *hw, u8 *list, + u32 count, ngbe_mc_itr itr) +{ + u8 *addr; + u32 i; + u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; + u32 uc_addr_in_use; + u32 vmdq; + + /* Clear accounting of old secondary address list, + * don't count RAR[0] + */ + uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; + hw->addr_ctrl.rar_used_count -= uc_addr_in_use; + hw->addr_ctrl.overflow_promisc = 0; + + /* Zero out the other receive addresses */ + for (i = 0; i < uc_addr_in_use; i++) { + wr32(hw, NGBE_PSR_MAC_SWC_IDX, 1 + i); + wr32(hw, NGBE_PSR_MAC_SWC_AD_L, 0); + wr32(hw, NGBE_PSR_MAC_SWC_AD_H, 0); + } + + /* Add the new addresses */ + for (i = 0; i < count; i++) { + addr = itr(hw, &list, &vmdq); + ngbe_add_uc_addr(hw, addr, vmdq); + } + + if (hw->addr_ctrl.overflow_promisc) { + /* enable promisc if not already in overflow or set by user */ + if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_UPE, NGBE_PSR_CTL_UPE); + } else { + /* only disable if set by overflow, not by user */ + if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_UPE, 0); + } + + return 0; +} + +/** + * ngbe_update_mc_addr_list - Updates MAC list of multicast addresses + * @hw: pointer to hardware structure + * @list: the list of new multicast addresses + * @count: number of addresses + * @itr: iterator function to walk the multicast address list + * @clear: flag, when set clears the table beforehand + **/ +s32 ngbe_update_mc_addr_list(struct ngbe_hw *hw, u8 *list, + u32 count, ngbe_mc_itr itr, + bool clear) +{ + u32 i; + u32 vmdq; + u32 psrctl; + + /* Set the new number of MC addresses that we are being requested to + * use. + */ + hw->addr_ctrl.num_mc_addrs = count; + hw->addr_ctrl.mta_in_use = 0; + + /* Clear mta_shadow */ + if (clear) + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* Update mta_shadow */ + for (i = 0; i < count; i++) + ngbe_set_mta(hw, itr(hw, &list, &vmdq)); + + /* Enable mta */ + for (i = 0; i < hw->mac.mcft_size; i++) + wr32a(hw, NGBE_PSR_MC_TBL(0), i, + hw->mac.mta_shadow[i]); + + if (hw->addr_ctrl.mta_in_use > 0) { + psrctl = rd32(hw, NGBE_PSR_CTL); + psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE); + psrctl |= NGBE_PSR_CTL_MFE | + (hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT); + wr32(hw, NGBE_PSR_CTL, psrctl); + } + + return 0; +} + +/** + * ngbe_enable_mc - Enable multicast address in RAR + * @hw: pointer to hardware structure + **/ +s32 ngbe_enable_mc(struct ngbe_hw *hw) +{ + struct ngbe_addr_filter_info *nafi = &hw->addr_ctrl; + u32 psrctl; + + if (nafi->mta_in_use > 0) { + psrctl = rd32(hw, NGBE_PSR_CTL); + psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE); + psrctl |= NGBE_PSR_CTL_MFE | + (hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT); + wr32(hw, NGBE_PSR_CTL, psrctl); + } + + return 0; +} + +/** + * ngbe_disable_mc - Disable multicast address in RAR + * @hw: pointer to hardware structure + **/ +s32 ngbe_disable_mc(struct ngbe_hw *hw) +{ + struct ngbe_addr_filter_info *nafi = &hw->addr_ctrl; + u32 psrctl; + + if (nafi->mta_in_use > 0) { + psrctl = rd32(hw, NGBE_PSR_CTL); + psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE); + psrctl |= hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT; + wr32(hw, NGBE_PSR_CTL, psrctl); + } + + return 0; +} + +void ngbe_enable_rx(struct ngbe_hw *hw) +{ + u32 pfdtxgswc; + + /* enable mac receiver */ + wr32m(hw, NGBE_MAC_RX_CFG, + NGBE_MAC_RX_CFG_RE, NGBE_MAC_RX_CFG_RE); + + wr32m(hw, NGBE_RSEC_CTL, 0x2, 0); + + wr32m(hw, NGBE_RDB_PB_CTL, + NGBE_RDB_PB_CTL_PBEN, NGBE_RDB_PB_CTL_PBEN); + + if (hw->mac.set_lben) { + pfdtxgswc = rd32(hw, NGBE_PSR_CTL); + pfdtxgswc |= NGBE_PSR_CTL_SW_EN; + wr32(hw, NGBE_PSR_CTL, pfdtxgswc); + hw->mac.set_lben = false; + } +} + +void ngbe_disable_rx(struct ngbe_hw *hw) +{ + u32 pfdtxgswc; + u32 rxctrl; + + rxctrl = rd32(hw, NGBE_RDB_PB_CTL); + + if (rxctrl & NGBE_RDB_PB_CTL_PBEN) { + pfdtxgswc = rd32(hw, NGBE_PSR_CTL); + if (pfdtxgswc & NGBE_PSR_CTL_SW_EN) { + pfdtxgswc &= ~NGBE_PSR_CTL_SW_EN; + wr32(hw, NGBE_PSR_CTL, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + rxctrl &= ~NGBE_RDB_PB_CTL_PBEN; + wr32(hw, NGBE_RDB_PB_CTL, rxctrl); + + /* OCP NCSI BMC need it */ + if (!(((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_OCP_CARD) || + ((hw->subsystem_device_id & NGBE_WOL_MASK) == NGBE_WOL_SUP) || + ((hw->subsystem_device_id & NGBE_NCSI_MASK) == NGBE_NCSI_SUP))) { + /* disable mac receiver */ + wr32m(hw, NGBE_MAC_RX_CFG, + NGBE_MAC_RX_CFG_RE, 0); + } + } +} + +/** + * ngbe_insert_mac_addr - Find a RAR for this mac address + * @hw: pointer to hardware structure + * @addr: Address to put into receive address register + * @vmdq: VMDq pool to assign + * + * Puts an ethernet address into a receive address register, or + * finds the rar that it is aleady in; adds to the pool list + **/ +s32 ngbe_insert_mac_addr(struct ngbe_hw *hw, u8 *addr, u32 vmdq) +{ + static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; + u32 first_empty_rar = NO_EMPTY_RAR_FOUND; + u32 rar; + u32 rar_low, rar_high; + u32 addr_low, addr_high; + + /* swap bytes for HW little endian */ + addr_low = addr[5] | + (addr[4] << 8) | + (addr[3] << 16) | + (addr[2] << 24); + addr_high = addr[1] | (addr[0] << 8); + + /* Either find the mac_id in rar or find the first empty space. + * rar_highwater points to just after the highest currently used + * rar in order to shorten the search. It grows when we add a new + * rar to the top. + */ + for (rar = 0; rar < hw->mac.rar_highwater; rar++) { + wr32(hw, NGBE_PSR_MAC_SWC_IDX, rar); + rar_high = rd32(hw, NGBE_PSR_MAC_SWC_AD_H); + + if (((NGBE_PSR_MAC_SWC_AD_H_AV & rar_high) == 0) && + first_empty_rar == NO_EMPTY_RAR_FOUND) { + first_empty_rar = rar; + } else if ((rar_high & 0xFFFF) == addr_high) { + rar_low = rd32(hw, NGBE_PSR_MAC_SWC_AD_L); + if (rar_low == addr_low) + break; /* found it already in the rars */ + } + } + + if (rar >= hw->mac.rar_highwater) { + if (first_empty_rar != NO_EMPTY_RAR_FOUND) { + /* stick it into first empty RAR slot we found */ + rar = first_empty_rar; + TCALL(hw, mac.ops.set_rar, rar, addr, vmdq, + NGBE_PSR_MAC_SWC_AD_H_AV); + } else if (rar == hw->mac.rar_highwater) { + /* add it to the top of the list and inc the highwater mark */ + TCALL(hw, mac.ops.set_rar, rar, addr, vmdq, + NGBE_PSR_MAC_SWC_AD_H_AV); + hw->mac.rar_highwater++; + } else if (rar >= hw->mac.num_rar_entries) { + return NGBE_ERR_INVALID_MAC_ADDR; + } + } + + return rar; +} + +/** + * ngbe_find_vlvf_slot - find the vlanid or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + **/ +s32 ngbe_find_vlvf_slot(struct ngbe_hw *hw, u32 vlan) +{ + u32 bits = 0; + u32 first_empty_slot = 0; + s32 regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries. Save off the first empty + * slot found along the way + */ + for (regindex = 1; regindex < NGBE_PSR_VLAN_SWC_ENTRIES; regindex++) { + wr32(hw, NGBE_PSR_VLAN_SWC_IDX, regindex); + bits = rd32(hw, NGBE_PSR_VLAN_SWC); + if (!bits && !(first_empty_slot)) + first_empty_slot = regindex; + else if ((bits & 0x0FFF) == vlan) + break; + } + + /* If regindex is less than NGBE_VLVF_ENTRIES, then we found the vlan + * in the VLVF. Else use the first empty VLVF register for this + * vlan id. + */ + if (regindex >= NGBE_PSR_VLAN_SWC_ENTRIES) { + if (first_empty_slot) { + regindex = first_empty_slot; + } else { + ERROR_REPORT1(hw, NGBE_ERROR_SOFTWARE, + "No space in VLVF.\n"); + regindex = NGBE_ERR_NO_SPACE; + } + } + + return regindex; +} + +/** + * ngbe_set_vlvf - Set VLAN Pool Filter + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * @vfta_changed: pointer to boolean flag which indicates whether VFTA + * should be changed + **/ +s32 ngbe_set_vlvf(struct ngbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool *vfta_changed) +{ + u32 vt; + + if (vlan > 4095) + return NGBE_ERR_PARAM; + + /* If VT Mode is set + * Either vlan_on + * make sure the vlan is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + vt = rd32(hw, NGBE_CFG_PORT_CTL); + if (vt & NGBE_CFG_PORT_CTL_NUM_VT_MASK) { + s32 vlvf_index; + u32 bits = 0; + + vlvf_index = ngbe_find_vlvf_slot(hw, vlan); + if (vlvf_index < 0) + return vlvf_index; + + wr32(hw, NGBE_PSR_VLAN_SWC_IDX, vlvf_index); + if (vlan_on) { + /* set the pool bit */ + if (vind < 32) { + bits = rd32(hw, NGBE_PSR_VLAN_SWC_VM_L); + bits |= (1 << vind); + wr32(hw, NGBE_PSR_VLAN_SWC_VM_L, bits); + } + } else { + /* clear the pool bit */ + if (vind < 32) { + bits = rd32(hw, NGBE_PSR_VLAN_SWC_VM_L); + bits &= ~(1 << vind); + wr32(hw, NGBE_PSR_VLAN_SWC_VM_L, bits); + } else { + bits |= rd32(hw, NGBE_PSR_VLAN_SWC_VM_L); + } + } + + /* If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + if (bits) { + wr32(hw, NGBE_PSR_VLAN_SWC, (NGBE_PSR_VLAN_SWC_VIEN | vlan)); + if (!vlan_on && vfta_changed) { + /* someone wants to clear the vfta entry + * but some pools/VFs are still using it. + * Ignore it. + */ + *vfta_changed = false; + } + } else { + wr32(hw, NGBE_PSR_VLAN_SWC, 0); + } + } + + return 0; +} + +/** + * ngbe_set_vfta - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + **/ +s32 ngbe_set_vfta(struct ngbe_hw *hw, u32 vlan, u32 vind, bool vlan_on) +{ + s32 regindex; + u32 bitindex; + u32 vfta; + u32 targetbit; + s32 ret_val = 0; + bool vfta_changed = false; + + if (vlan > 4095) + return NGBE_ERR_PARAM; + + /* this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regindex = (vlan >> 5) & 0x7F; + bitindex = vlan & 0x1F; + targetbit = (1 << bitindex); + /* errata 5 */ + vfta = hw->mac.vft_shadow[regindex]; + if (vlan_on) { + if (!(vfta & targetbit)) { + vfta |= targetbit; + vfta_changed = true; + } + } else { + if ((vfta & targetbit)) { + vfta &= ~targetbit; + vfta_changed = true; + } + } + + /* Part 2 + * Call ngbe_set_vlvf to set VLVFB and VLVF + */ + ret_val = ngbe_set_vlvf(hw, vlan, vind, vlan_on, + &vfta_changed); + if (ret_val != 0) + return ret_val; + + if (vfta_changed) + wr32(hw, NGBE_PSR_VLAN_TBL(regindex), vfta); + /* errata 5 */ + hw->mac.vft_shadow[regindex] = vfta; + return 0; +} + +/** + * ngbe_clear_vfta - Clear VLAN filter table + * @hw: pointer to hardware structure + **/ +s32 ngbe_clear_vfta(struct ngbe_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < hw->mac.vft_size; offset++) { + wr32(hw, NGBE_PSR_VLAN_TBL(offset), 0); + /* errata 5 */ + hw->mac.vft_shadow[offset] = 0; + } + + for (offset = 0; offset < NGBE_PSR_VLAN_SWC_ENTRIES; offset++) { + wr32(hw, NGBE_PSR_VLAN_SWC_IDX, offset); + wr32(hw, NGBE_PSR_VLAN_SWC, 0); + wr32(hw, NGBE_PSR_VLAN_SWC_VM_L, 0); + } + + return 0; +} + +/** + * ngbe_init_uta_tables - Initialize the Unicast Table Array + * @hw: pointer to hardware structure + **/ +s32 ngbe_init_uta_tables(struct ngbe_hw *hw) +{ + int i; + + for (i = 0; i < 128; i++) + wr32(hw, NGBE_PSR_UC_TBL(i), 0); + + return 0; +} + +/** + * ngbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for anti-spoofing + * @pf: Physical Function pool - do not enable anti-spoofing for the PF + * + **/ +void ngbe_set_mac_anti_spoofing(struct ngbe_hw *hw, bool enable, int pf) +{ + u64 pfvfspoof = 0; + + if (enable) { + /* The PF should be allowed to spoof so that it can support + * emulation mode NICs. Do not set the bits assigned to the PF + * Remaining pools belong to the PF so they do not need to have + * anti-spoofing enabled. + */ + pfvfspoof = (1 << pf) - 1; + wr32(hw, NGBE_TDM_MAC_AS_L, pfvfspoof & 0xff); + } else { + wr32(hw, NGBE_TDM_MAC_AS_L, 0); + } +} + +/** + * ngbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for VLAN anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + **/ +void ngbe_set_vlan_anti_spoofing(struct ngbe_hw *hw, bool enable, int vf) +{ + u32 pfvfspoof; + + if (vf > 8) + return; + + pfvfspoof = rd32(hw, NGBE_TDM_VLAN_AS_L); + if (enable) + pfvfspoof |= (1 << vf); + else + pfvfspoof &= ~(1 << vf); + wr32(hw, NGBE_TDM_VLAN_AS_L, pfvfspoof); +} + +/** + * ngbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for Ethertype anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing + **/ +void ngbe_set_ethertype_anti_spoofing(struct ngbe_hw *hw, bool enable, int vf) +{ + u32 pfvfspoof; + + if (vf <= 8) { + pfvfspoof = rd32(hw, NGBE_TDM_ETYPE_AS_L); + if (enable) + pfvfspoof |= (1 << vf); + else + pfvfspoof &= ~(1 << vf); + wr32(hw, NGBE_TDM_ETYPE_AS_L, pfvfspoof); + } +} + +/** + * ngbe_get_link_capabilities - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +s32 ngbe_get_link_capabilities(struct ngbe_hw *hw, + u32 *speed, + bool *autoneg) +{ + s32 status = 0; + + if (hw->device_id == NGBE_DEV_ID_EM_WX1860A2 || + hw->device_id == NGBE_DEV_ID_EM_WX1860A2S || + hw->device_id == NGBE_DEV_ID_EM_WX1860A4 || + hw->device_id == NGBE_DEV_ID_EM_WX1860A4S || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL2 || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL2S || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL4 || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL4S || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL_W || + hw->device_id == NGBE_DEV_ID_EM_WX1860A1 || + hw->device_id == NGBE_DEV_ID_EM_WX1860A1L || + hw->device_id == NGBE_DEV_ID_EM_WX1860LC) { + *speed = NGBE_LINK_SPEED_1GB_FULL | + NGBE_LINK_SPEED_100_FULL | + NGBE_LINK_SPEED_10_FULL; + *autoneg = false; + hw->phy.link_mode = NGBE_PHYSICAL_LAYER_1000BASE_T | + NGBE_PHYSICAL_LAYER_100BASE_TX; + } + + if ((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_RGMII_FPGA) { + *speed = NGBE_LINK_SPEED_1GB_FULL; + hw->phy.link_mode = NGBE_PHYSICAL_LAYER_1000BASE_T; + *autoneg = false; + } + + return status; +} + +/** + * ngbe_check_mac_link - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + **/ +s32 ngbe_check_mac_link(struct ngbe_hw *hw, + u32 *speed, + bool *link_up, + bool link_up_wait_to_complete) +{ + u32 i; + u16 value = 0; + s32 status = 0; + u16 speed_sta = 0; + + if ((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_RGMII_FPGA) { + *link_up = true; + *speed = NGBE_LINK_SPEED_1GB_FULL; + return status; + } + + if (link_up_wait_to_complete) { + for (i = 0; i < NGBE_LINK_UP_TIME; i++) { + status = TCALL(hw, phy.ops.read_reg, 0x1A, 0xA43, &value); + if (!status && (value & 0x4)) + *link_up = true; + else + *link_up = false; + + if (*link_up) + break; + msleep(100); + } + } else { + status = TCALL(hw, phy.ops.read_reg, 0x1A, 0xA43, &value); + if (!status && (value & 0x4)) + *link_up = true; + else + *link_up = false; + } + + speed_sta = value & 0x38; + if (*link_up) { + if (speed_sta == 0x28) + *speed = NGBE_LINK_SPEED_1GB_FULL; + else if (speed_sta == 0x18) + *speed = NGBE_LINK_SPEED_100_FULL; + else if (speed_sta == 0x8) + *speed = NGBE_LINK_SPEED_10_FULL; + } else { + *speed = NGBE_LINK_SPEED_UNKNOWN; + } + + return status; +} + +s32 ngbe_check_mac_link_mdi(struct ngbe_hw *hw, u32 *speed, bool *link_up, bool wait) +{ + u32 i; + u16 value = 0; + s32 status = 0; + u16 speed_sta = 0; + + /* select page */ + if (hw->phy.type == ngbe_phy_m88e1512) + status = TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 0); + else + status = TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 1); + + status = TCALL(hw, phy.ops.read_reg_mdi, 17, 0, &value); + + if (wait) { + for (i = 0; i < NGBE_LINK_UP_TIME; i++) { + status = TCALL(hw, phy.ops.read_reg_mdi, 17, 0, &value); + if (value & 0x400) + *link_up = true; + else + *link_up = false; + + if (*link_up) + break; + msleep(100); + } + } else { + status = TCALL(hw, phy.ops.read_reg_mdi, 17, 0, &value); + if (value & 0x400) + *link_up = true; + else + *link_up = false; + } + + speed_sta = value & 0xC000; + if (*link_up) { + if (speed_sta == 0x8000) + *speed = NGBE_LINK_SPEED_1GB_FULL; + else if (speed_sta == 0x4000) + *speed = NGBE_LINK_SPEED_100_FULL; + else if (speed_sta == 0x0000) + *speed = NGBE_LINK_SPEED_10_FULL; + } else { + *speed = NGBE_LINK_SPEED_UNKNOWN; + } + + return status; +} + +s32 ngbe_check_mac_link_yt(struct ngbe_hw *hw, u32 *speed, bool *link_up, bool wait) +{ + u32 i; + u16 value = 0; + s32 status = 0; + u16 speed_sta = 0; + unsigned long flags; + + if (wait) { + for (i = 0; i < NGBE_LINK_UP_TIME; i++) { + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt(hw, 0x11, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + if (value & 0x400) + *link_up = true; + else + *link_up = false; + + if (*link_up) + break; + msleep(100); + } + } else { + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt(hw, 0x11, 0, &value); + + if (value & 0x400) { + *link_up = true; + } else { + *link_up = false; + + ngbe_phy_read_reg_mdi(hw, 0x11, 0, &value); + if (value & 0x400) + *link_up = true; + else + *link_up = false; + } + spin_unlock_irqrestore(&hw->phy_lock, flags); + } + + speed_sta = value & 0xC000; + if (*link_up) { + if (speed_sta == 0x8000) + *speed = NGBE_LINK_SPEED_1GB_FULL; + else if (speed_sta == 0x4000) + *speed = NGBE_LINK_SPEED_100_FULL; + else if (speed_sta == 0x0000) + *speed = NGBE_LINK_SPEED_10_FULL; + } else { + *speed = NGBE_LINK_SPEED_UNKNOWN; + } + + return status; +} + +/** + * ngbe_set_rxpba - Initialize Rx packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +void ngbe_set_rxpba(struct ngbe_hw *hw, int num_pb, u32 headroom, + int strategy) +{ + u32 pbsize = hw->mac.rx_pb_size; + u32 rxpktsize, txpktsize, txpbthresh; + + /* Reserve headroom */ + pbsize -= headroom; + + if (!num_pb) + num_pb = 1; + + /* Divide remaining packet buffer space amongst the number of packet + * buffers requested using supplied strategy. + */ + switch (strategy) { + case PBA_STRATEGY_EQUAL: + rxpktsize = (pbsize / num_pb) << NGBE_RDB_PB_SZ_SHIFT; + wr32(hw, NGBE_RDB_PB_SZ, rxpktsize); + break; + default: + break; + } + + /* Only support an equally distributed Tx packet buffer strategy. */ + txpktsize = NGBE_TDB_PB_SZ_MAX / num_pb; + txpbthresh = (txpktsize / NGBE_KB_TO_B) - NGBE_TXPKT_SIZE_MAX; + + wr32(hw, NGBE_TDB_PB_SZ, txpktsize); + wr32(hw, NGBE_TDM_PB_THRE, txpbthresh); +} + +/** + * ngbe_host_if_command - Issue command to manageability block + * @hw: pointer to the HW structure + * @buffer: contains the command to write and where the return status will + * be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. + * + * Communicates with the manageability block. On success return 0 + * else return NGBE_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 ngbe_host_if_command(struct ngbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data) +{ + u32 hicr, i, bi; + u32 hdr_size = sizeof(struct ngbe_hic_hdr); + u16 buf_len; + u32 dword_len; + s32 status = 0; + u32 buf[64] = {}; + + if (length == 0 || length > NGBE_HI_MAX_BLOCK_BYTE_LENGTH) + return NGBE_ERR_HOST_INTERFACE_COMMAND; + + if (TCALL(hw, mac.ops.acquire_swfw_sync, NGBE_MNG_SWFW_SYNC_SW_MB) != 0) + return NGBE_ERR_SWFW_SYNC; + + /* Calculate length in DWORDs. We must be DWORD aligned */ + if ((length % (sizeof(u32))) != 0) { + status = NGBE_ERR_INVALID_ARGUMENT; + goto rel_out; + } + + /*read to clean all status*/ + if (ngbe_check_mng_access(hw)) { + hicr = rd32(hw, NGBE_MNG_MBOX_CTL); + if ((hicr & NGBE_MNG_MBOX_CTL_FWRDY)) + ERROR_REPORT1(hw, NGBE_ERROR_CAUTION, + "fwrdy is set before command.\n"); + } + + dword_len = length >> 2; + + /* The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < dword_len; i++) { + if (ngbe_check_mng_access(hw)) { + wr32a(hw, NGBE_MNG_MBOX, i, NGBE_CPU_TO_LE32(buffer[i])); + } else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + /* Setting this bit tells the ARC that a new command is pending. */ + if (ngbe_check_mng_access(hw)) { + wr32m(hw, NGBE_MNG_MBOX_CTL, + NGBE_MNG_MBOX_CTL_SWRDY, NGBE_MNG_MBOX_CTL_SWRDY); + } else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + + for (i = 0; i < timeout; i++) { + if (ngbe_check_mng_access(hw)) { + hicr = rd32(hw, NGBE_MNG_MBOX_CTL); + if ((hicr & NGBE_MNG_MBOX_CTL_FWRDY)) + break; + } + mdelay(1); + } + + buf[0] = rd32(hw, NGBE_MNG_MBOX); + /* Check command completion */ + if (timeout != 0 && i == timeout) { + ERROR_REPORT1(hw, NGBE_ERROR_CAUTION, + "Command has failed with no status valid.\n"); + + if ((buffer[0] & 0xff) != (~buf[0] >> 24)) { + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; + } + } + + if (!return_data) + goto rel_out; + + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* first pull in the header so we know the buffer length */ + for (bi = 0; bi < dword_len; bi++) { + if (ngbe_check_mng_access(hw)) { + buffer[bi] = rd32a(hw, NGBE_MNG_MBOX, bi); + NGBE_LE32_TO_CPUS(&buffer[bi]); + } else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + + /* If there is any thing in data position pull it in */ + buf_len = ((struct ngbe_hic_hdr *)buffer)->buf_len; + if (buf_len == 0) + goto rel_out; + + if (length < buf_len + hdr_size) { + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; + } + + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + + /* Pull in the rest of the buffer (bi is where we left off) */ + for (; bi <= dword_len; bi++) { + if (ngbe_check_mng_access(hw)) { + buffer[bi] = rd32a(hw, NGBE_MNG_MBOX, bi); + NGBE_LE32_TO_CPUS(&buffer[bi]); + } else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + +rel_out: + TCALL(hw, mac.ops.release_swfw_sync, NGBE_MNG_SWFW_SYNC_SW_MB); + return status; +} + +/** + * ngbe_read_ee_hostif_buffer- Read EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the hostif. + **/ +s32 ngbe_read_ee_hostif_buffer(struct ngbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + struct ngbe_hic_read_shadow_ram buffer; + u32 current_word = 0; + u16 words_to_read; + s32 status; + u32 i; + u32 value = 0; + + /* Take semaphore for the entire operation. */ + status = TCALL(hw, mac.ops.acquire_swfw_sync, NGBE_MNG_SWFW_SYNC_SW_FLASH); + if (status) + goto out; + + while (words) { + if (words > FW_MAX_READ_BUFFER_SIZE / 2) + words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; + else + words_to_read = words; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = NGBE_CPU_TO_BE32((offset + current_word) * 2); + buffer.length = NGBE_CPU_TO_BE16(words_to_read * 2); + + status = ngbe_host_if_command(hw, (u32 *)&buffer, sizeof(buffer), + NGBE_HI_CMD_TIMEOUT, + false); + + if (status) + goto out; + + for (i = 0; i < words_to_read; i++) { + u32 reg = NGBE_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + 2 * i; + + if (ngbe_check_mng_access(hw)) { + value = rd32(hw, reg); + } else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto out; + } + data[current_word] = (u16)(value & 0xffff); + current_word++; + i++; + if (i < words_to_read) { + value >>= 16; + data[current_word] = (u16)(value & 0xffff); + current_word++; + } + } + words -= words_to_read; + } + +out: + TCALL(hw, mac.ops.release_swfw_sync, NGBE_MNG_SWFW_SYNC_SW_FLASH); + + return status; +} + +/** + * ngbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * assuming that the semaphore is already obtained. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 32 bit word from the EEPROM using the hostif. + **/ +s32 ngbe_read_ee_hostif_data32(struct ngbe_hw *hw, u16 offset, + u32 *data) +{ + s32 status; + struct ngbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = NGBE_CPU_TO_BE32(offset * 2); + /* one word */ + buffer.length = NGBE_CPU_TO_BE16(sizeof(u32)); + + status = ngbe_host_if_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_CMD_TIMEOUT, false); + + if (status) + return status; + if (ngbe_check_mng_access(hw)) { + *data = (u32)rd32a(hw, NGBE_MNG_MBOX, FW_NVM_DATA_OFFSET); + } else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + + return 0; +} + +/** + * ngbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + **/ +s32 ngbe_read_ee_hostif32(struct ngbe_hw *hw, u16 offset, + u32 *data) +{ + s32 status = 0; + + if (TCALL(hw, mac.ops.acquire_swfw_sync, NGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = ngbe_read_ee_hostif_data32(hw, offset, data); + TCALL(hw, mac.ops.release_swfw_sync, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + status = NGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ngbe_init_eeprom_params - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ngbe_eeprom_info within the + * ngbe_hw struct in order to set up EEPROM access. + **/ +s32 ngbe_init_eeprom_params(struct ngbe_hw *hw) +{ + struct ngbe_eeprom_info *eeprom = &hw->eeprom; + u16 eeprom_size; + s32 status = 0; + + if (eeprom->type == ngbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ngbe_eeprom_none; + + if (!(rd32(hw, NGBE_SPI_STATUS) & + NGBE_SPI_STATUS_FLASH_BYPASS)) { + eeprom->type = ngbe_flash; + eeprom_size = 4096; + eeprom->word_size = eeprom_size >> 1; + } + } + eeprom->sw_region_offset = 0x80; + + return status; +} + +/** + * ngbe_calc_eeprom_checksum - Calculates and returns the checksum + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ngbe_calc_eeprom_checksum(struct ngbe_hw *hw) +{ + u16 *buffer = NULL; + u32 buffer_size = 0; + + u16 *eeprom_ptrs = NULL; + u16 *local_buffer; + s32 status; + u16 checksum = 0; + u16 i; + + TCALL(hw, eeprom.ops.init_params); + + if (!buffer) { + eeprom_ptrs = vmalloc(NGBE_EEPROM_LAST_WORD * sizeof(u16)); + if (!eeprom_ptrs) + return NGBE_ERR_NO_SPACE; + /* Read pointer area */ + status = ngbe_read_ee_hostif_buffer(hw, 0, + NGBE_EEPROM_LAST_WORD, + eeprom_ptrs); + if (status) + return status; + + local_buffer = eeprom_ptrs; + } else { + if (buffer_size < NGBE_EEPROM_LAST_WORD) + return NGBE_ERR_PARAM; + local_buffer = buffer; + } + + for (i = 0; i < NGBE_EEPROM_LAST_WORD; i++) + if (i != hw->eeprom.sw_region_offset + NGBE_EEPROM_CHECKSUM) + checksum += local_buffer[i]; + + checksum = (u16)NGBE_EEPROM_SUM - checksum; + if (eeprom_ptrs) + vfree(eeprom_ptrs); + + return (s32)checksum; +} + +/** + * ngbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * assuming that the semaphore is already obtained. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + **/ +s32 ngbe_read_ee_hostif_data(struct ngbe_hw *hw, u16 offset, u16 *data) +{ + s32 status; + struct ngbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = NGBE_CPU_TO_BE32(offset * 2); + /* one word */ + buffer.length = NGBE_CPU_TO_BE16(sizeof(u16)); + + status = ngbe_host_if_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_CMD_TIMEOUT, false); + + if (status) + return status; + if (ngbe_check_mng_access(hw)) { + *data = (u16)rd32a(hw, NGBE_MNG_MBOX, FW_NVM_DATA_OFFSET); + } else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + + return 0; +} + +/** + * ngbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + **/ +s32 ngbe_read_ee_hostif(struct ngbe_hw *hw, u16 offset, u16 *data) +{ + s32 status = 0; + + if (TCALL(hw, mac.ops.acquire_swfw_sync, + NGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = ngbe_read_ee_hostif_data(hw, offset, data); + TCALL(hw, mac.ops.release_swfw_sync, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + status = NGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ngbe_write_ee_hostif - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 ngbe_write_ee_hostif_data(struct ngbe_hw *hw, u16 offset, + u16 data) +{ + s32 status; + struct ngbe_hic_write_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = NGBE_CPU_TO_BE16(sizeof(u16)); + buffer.data = data; + buffer.address = NGBE_CPU_TO_BE32(offset * 2); + + status = ngbe_host_if_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_CMD_TIMEOUT, false); + + return status; +} + +/** + * ngbe_write_ee_hostif - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + **/ +s32 ngbe_write_ee_hostif(struct ngbe_hw *hw, u16 offset, + u16 data) +{ + s32 status = 0; + + if (TCALL(hw, mac.ops.acquire_swfw_sync, + NGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = ngbe_write_ee_hostif_data(hw, offset, data); + TCALL(hw, mac.ops.release_swfw_sync, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + status = NGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ngbe_write_ee_hostif_buffer - Write EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + **/ +s32 ngbe_write_ee_hostif_buffer(struct ngbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status = 0; + u16 i = 0; + + /* Take semaphore for the entire operation. */ + status = TCALL(hw, mac.ops.acquire_swfw_sync, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + if (status != 0) + return status; + + for (i = 0; i < words; i++) { + status = ngbe_write_ee_hostif_data(hw, offset + i, data[i]); + + if (status != 0) + break; + } + + TCALL(hw, mac.ops.release_swfw_sync, NGBE_MNG_SWFW_SYNC_SW_FLASH); + + return status; +} + +/** + * ngbe_update_eeprom_checksum - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + **/ +s32 ngbe_update_eeprom_checksum(struct ngbe_hw *hw) +{ + s32 status; + u16 checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = ngbe_read_ee_hostif(hw, 0, &checksum); + if (status) + return status; + + status = ngbe_calc_eeprom_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ngbe_write_ee_hostif(hw, NGBE_EEPROM_CHECKSUM, checksum); + if (status) + return status; + + return status; +} + +/** + * ngbe_validate_eeprom_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + **/ +s32 ngbe_validate_eeprom_checksum(struct ngbe_hw *hw, + u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = TCALL(hw, eeprom.ops.read, 0, &checksum); + if (status) + return status; + + status = TCALL(hw, eeprom.ops.calc_checksum); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ngbe_read_ee_hostif(hw, hw->eeprom.sw_region_offset + + NGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + return status; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + status = NGBE_ERR_EEPROM_CHECKSUM; + ERROR_REPORT1(hw, NGBE_ERROR_INVALID_STATE, + "Invalid EEPROM checksum\n"); + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + +s32 ngbe_eepromcheck_cap(struct ngbe_hw *hw, u16 offset, + u32 *data) +{ + int tmp; + s32 status; + struct ngbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_EEPROM_CHECK_STATUS; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = 0; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = 0; + /* one word */ + buffer.length = 0; + + status = ngbe_host_if_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_CMD_TIMEOUT, false); + + if (status) + return status; + if (ngbe_check_mng_access(hw)) { + tmp = (u32)rd32a(hw, NGBE_MNG_MBOX, 1); + if (tmp == NGBE_CHECKSUM_CAP_ST_PASS) + status = 0; + else + status = NGBE_ERR_EEPROM_CHECKSUM; + } else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + + return status; +} + +/** + * ngbe_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +u8 ngbe_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8)(0 - sum); +} + +/** + * ngbe_set_fw_drv_ver - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + **/ +s32 ngbe_set_fw_drv_ver(struct ngbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub) +{ + struct ngbe_hic_drv_info fw_cmd; + int i; + s32 ret_val = 0; + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.ver_maj = maj; + fw_cmd.ver_min = min; + fw_cmd.ver_build = build; + fw_cmd.ver_sub = sub; + fw_cmd.hdr.checksum = 0; + fw_cmd.hdr.checksum = ngbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + fw_cmd.pad = 0; + fw_cmd.pad2 = 0; + + mdelay(5); + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = ngbe_host_if_command(hw, (u32 *)&fw_cmd, + sizeof(fw_cmd), + NGBE_HI_CMD_TIMEOUT, + true); + if (ret_val != 0) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + ret_val = 0; + else + ret_val = NGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + return ret_val; +} + +/** + * ngbe_get_pcie_msix_count - Gets MSI-X vector count + * @hw: pointer to hardware structure + * Read PCIe configuration space, and get the MSI-X vector count from + * the capabilities table. + **/ +u16 ngbe_get_pcie_msix_count(struct ngbe_hw *hw) +{ + u16 msix_count = 1; + u16 max_msix_count; + u32 pos; + + max_msix_count = NGBE_MAX_MSIX_VECTORS_EMERALD; + pos = pci_find_capability(((struct ngbe_adapter *)hw->back)->pdev, + PCI_CAP_ID_MSIX); + if (!pos) + return msix_count; + pci_read_config_word(((struct ngbe_adapter *)hw->back)->pdev, + pos + PCI_MSIX_FLAGS, &msix_count); + + if (NGBE_REMOVED(hw->hw_addr)) + msix_count = 0; + msix_count &= NGBE_PCIE_MSIX_TBL_SZ_MASK; + + /* MSI-X count is zero-based in HW */ + msix_count++; + + if (msix_count > max_msix_count) + msix_count = max_msix_count; + + return msix_count; +} + +/** + * ngbe_negotiate_fc - Negotiate flow control + * @hw: pointer to hardware structure + * @adv_reg: flow control advertised settings + * @lp_reg: link partner's flow control settings + * @adv_sym: symmetric pause bit in advertisement + * @adv_asm: asymmetric pause bit in advertisement + * @lp_sym: symmetric pause bit in link partner advertisement + * @lp_asm: asymmetric pause bit in link partner advertisement + * + * Find the intersection between advertised settings and link partner's + * advertised settings + **/ +static s32 ngbe_negotiate_fc(struct ngbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) +{ + if ((!(adv_reg)) || (!(lp_reg))) { + ERROR_REPORT3(hw, NGBE_ERROR_UNSUPPORTED, + "Local or link partner's flow control settings are NULL. Local: %x, link partner: %x\n", + adv_reg, lp_reg); + return NGBE_ERR_FC_NOT_NEGOTIATED; + } + + if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == ngbe_fc_full) { + hw->fc.current_mode = ngbe_fc_full; + hw_dbg(hw, "Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = ngbe_fc_rx_pause; + hw_dbg(hw, "Flow Control=RX PAUSE frames only\n"); + } + } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && + (lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ngbe_fc_tx_pause; + hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); + } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && + !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ngbe_fc_rx_pause; + hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); + } else { + hw->fc.current_mode = ngbe_fc_none; + hw_dbg(hw, "Flow Control = NONE.\n"); + } + return 0; +} + +/** + * ngbe_fc_autoneg_copper - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +static s32 ngbe_fc_autoneg_copper(struct ngbe_hw *hw) +{ + u8 technology_ability_reg = 0; + u8 lp_technology_ability_reg = 0; + + if (!((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_RGMII_FPGA)) { + TCALL(hw, phy.ops.get_adv_pause, &technology_ability_reg); + TCALL(hw, phy.ops.get_lp_adv_pause, &lp_technology_ability_reg); + } + return ngbe_negotiate_fc(hw, (u32)technology_ability_reg, + (u32)lp_technology_ability_reg, + NGBE_TAF_SYM_PAUSE, NGBE_TAF_ASM_PAUSE, + NGBE_TAF_SYM_PAUSE, NGBE_TAF_ASM_PAUSE); +} + +/** + * ngbe_fc_autoneg - Configure flow control + * @hw: pointer to hardware structure + * + * Compares our advertised flow control capabilities to those advertised by + * our link partner, and determines the proper flow control mode to use. + **/ +void ngbe_fc_autoneg(struct ngbe_hw *hw) +{ + s32 ret_val = NGBE_ERR_FC_NOT_NEGOTIATED; + u32 speed; + bool link_up; + + /* AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) { + ERROR_REPORT1(hw, NGBE_ERROR_UNSUPPORTED, + "Flow control autoneg is disabled"); + goto out; + } + + TCALL(hw, mac.ops.check_link, &speed, &link_up, false); + if (!link_up) { + ERROR_REPORT1(hw, NGBE_ERROR_SOFTWARE, "The link is down"); + goto out; + } + + switch (hw->phy.media_type) { + /* Autoneg flow control on fiber adapters */ + case ngbe_media_type_fiber: + break; + + /* Autoneg flow control on copper adapters */ + case ngbe_media_type_copper: + ret_val = ngbe_fc_autoneg_copper(hw); + break; + + default: + break; + } + +out: + if (ret_val == NGBE_OK) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + +/** + * ngbe_fc_enable - Enable flow control + * @hw: pointer to hardware structure + * + * Enable flow control according to the current settings. + **/ +s32 ngbe_fc_enable(struct ngbe_hw *hw) +{ + s32 ret_val = 0; + u32 mflcn_reg, fccfg_reg; + u32 reg; + u32 fcrtl, fcrth; + + /* Validate the water mark configuration */ + if (!hw->fc.pause_time) { + ret_val = NGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* Low water mark of zero causes XOFF floods */ + if ((hw->fc.current_mode & ngbe_fc_tx_pause) && hw->fc.high_water) { + if (!hw->fc.low_water || hw->fc.low_water >= hw->fc.high_water) { + ERROR_REPORT1(hw, NGBE_ERROR_INVALID_STATE, + "Invalid water mark configuration\n"); + ret_val = NGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + } + + /* Negotiate the fc mode to use */ + ngbe_fc_autoneg(hw); + + /* Disable any previous flow control settings */ + mflcn_reg = rd32(hw, NGBE_MAC_RX_FLOW_CTRL); + mflcn_reg &= ~NGBE_MAC_RX_FLOW_CTRL_RFE; + + fccfg_reg = rd32(hw, NGBE_RDB_RFCC); + fccfg_reg &= ~NGBE_RDB_RFCC_RFCE_802_3X; + + /* The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case ngbe_fc_none: + /* Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case ngbe_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + mflcn_reg |= NGBE_MAC_RX_FLOW_CTRL_RFE; + break; + case ngbe_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + fccfg_reg |= NGBE_RDB_RFCC_RFCE_802_3X; + break; + case ngbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + mflcn_reg |= NGBE_MAC_RX_FLOW_CTRL_RFE; + fccfg_reg |= NGBE_RDB_RFCC_RFCE_802_3X; + break; + default: + ERROR_REPORT1(hw, NGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = NGBE_ERR_CONFIG; + goto out; + } + + /* Set 802.3x based flow control settings. */ + wr32(hw, NGBE_MAC_RX_FLOW_CTRL, mflcn_reg); + wr32(hw, NGBE_RDB_RFCC, fccfg_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + if ((hw->fc.current_mode & ngbe_fc_tx_pause) && + hw->fc.high_water) { + /* 32Byte granularity */ + fcrtl = (hw->fc.low_water << 10) | + NGBE_RDB_RFCL_XONE; + wr32(hw, NGBE_RDB_RFCL, fcrtl); + fcrth = (hw->fc.high_water << 10) | + NGBE_RDB_RFCH_XOFFE; + } else { + wr32(hw, NGBE_RDB_RFCL, 0); + /* In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + fcrth = rd32(hw, NGBE_RDB_PB_SZ) - 24576; + } + + wr32(hw, NGBE_RDB_RFCH, fcrth); + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010000; + wr32(hw, NGBE_RDB_RFCV, reg); + + /* Configure flow control refresh threshold value */ + wr32(hw, NGBE_RDB_RFCRT, hw->fc.pause_time / 2); + +out: + return ret_val; +} + +/** + * ngbe_setup_fc - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +s32 ngbe_setup_fc(struct ngbe_hw *hw) +{ + s32 ret_val = 0; + u16 pcap_backplane = 0; + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ngbe_fc_rx_pause) { + ERROR_REPORT1(hw, NGBE_ERROR_UNSUPPORTED, + "ngbe_fc_rx_pause not valid in strict IEEE mode\n"); + ret_val = NGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == ngbe_fc_default) + hw->fc.requested_mode = ngbe_fc_full; + + /* The possible values of fc.requested_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.requested_mode) { + case ngbe_fc_none: + /* Flow control completely disabled by software override. */ + break; + case ngbe_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + if (hw->phy.type != ngbe_phy_m88e1512_sfi && + hw->phy.type != ngbe_phy_yt8521s_sfi) + pcap_backplane |= NGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM; + else + pcap_backplane |= 0x100; + break; + case ngbe_fc_rx_pause: + fallthrough; + case ngbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + if (hw->phy.type != ngbe_phy_m88e1512_sfi && + hw->phy.type != ngbe_phy_yt8521s_sfi) + pcap_backplane |= NGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM | + NGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM; + else + pcap_backplane |= 0x80; + break; + default: + ERROR_REPORT1(hw, NGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = NGBE_ERR_CONFIG; + goto out; + } + + /* AUTOC restart handles negotiation of 1G on backplane + * and copper. + */ + if (hw->phy.media_type == ngbe_media_type_copper && + !((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_RGMII_FPGA)) + ret_val = TCALL(hw, phy.ops.set_adv_pause, pcap_backplane); + +out: + return ret_val; +} + +static int check_image_version(struct ngbe_hw *hw, const u8 *data) +{ + u32 image_v = 0x0; + u32 f_chip_v = 0x0; + u8 rdata_2; + u8 rdata_3, rdata_4; + u32 f_sub_id; + u8 wol = 0, ncsi = 0; + + image_v = data[0x13a] | data[0x13b] << 8 | + data[0x13c] << 16 | data[0x13d] << 24; + hw_dbg(hw, "image_version=image_v: %x\n", image_v); + + f_sub_id = data[0xfffdc] << 8 | data[0xfffdd]; + hw_dbg(hw, "The image's sub_id : %04x\n", f_sub_id); + if ((f_sub_id & 0x8000) == 0x8000) + ncsi = 1; + if ((f_sub_id & 0x4000) == 0x4000) + wol = 1; + hw_dbg(hw, "=2=ncsi : %x - wol : %x\n", ncsi, wol); + + rdata_2 = data[0xfffd8]; + hw_dbg(hw, "image_version=rdata_2-fffdc: %x\n", rdata_2); + rdata_3 = data[0xbc]; + hw_dbg(hw, "image_version=rdata_3-bc: %x\n", rdata_3); + rdata_4 = data[0x3c]; + hw_dbg(hw, "image_version=rdata_4-3c: %x\n", rdata_4); + + //check card's chip version + if (image_v < 0x10015 && image_v != 0x10012 && image_v != 0x10013) { + f_chip_v = 0x41;//'A' + } else if (image_v > 0x10015) { + f_chip_v = rdata_2 & 0xff; + } else if (image_v == 0x10012 || image_v == 0x10013 || image_v == 0x10015) { + if (wol == 1 || ncsi == 1) { + if (rdata_3 == 0x02) + f_chip_v = 0x41; + else + f_chip_v = 0x42; + } else { + if (rdata_4 == 0x80) + f_chip_v = 0x42; + else + f_chip_v = 0x41; + } + } + + return f_chip_v; +} + +static int ngbe_flash_write_unlock(struct ngbe_hw *hw) +{ + int status; + struct ngbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = 0x40; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = 0; + buffer.hdr.req.checksum = 0xFF; + + /* convert offset from words to bytes */ + buffer.address = 0; + /* one word */ + buffer.length = 0; + + status = ngbe_host_if_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000, false); + if (status) + return status; + + return status; +} + +u8 fmgr_usr_cmd_op(struct ngbe_hw *hw, u32 usr_cmd) +{ + u8 status = 0; + + wr32(hw, SPI_H_USR_CMD_REG_ADDR, usr_cmd); + status = ngbe_fmgr_cmd_op(hw, SPI_CMD_USER_CMD, 0); + + return status; +} + +u8 flash_erase_sector(struct ngbe_hw *hw, u32 sec_addr) +{ + u8 status = ngbe_fmgr_cmd_op(hw, SPI_CMD_ERASE_SECTOR, sec_addr); + return status; +} + +u8 flash_erase_chip(struct ngbe_hw *hw) +{ + u8 status = ngbe_fmgr_cmd_op(hw, SPI_CMD_ERASE_CHIP, 0); + return status; +} + +u8 flash_write_dword(struct ngbe_hw *hw, u32 addr, u32 dword) +{ + u8 status = 0; + + wr32(hw, SPI_H_DAT_REG_ADDR, dword); + status = ngbe_fmgr_cmd_op(hw, SPI_CMD_WRITE_DWORD, addr); + + if (status) + return status; + + if (dword != ngbe_flash_read_dword(hw, addr)) + return 1; + + return 0; +} + +int ngbe_upgrade_flash(struct ngbe_hw *hw, u32 region, + const u8 *data, u32 size) +{ + u32 sector_num = 0; + u32 read_data = 0; + u8 status = 0; + u8 skip = 0; + u32 i = 0, k = 0, n = 0; + u8 flash_vendor = 0; + u32 num[128] = {0}; + u32 chip_v = 0, image_v = 0; + u32 mac_addr0_dword0_t, mac_addr0_dword1_t; + u32 mac_addr1_dword0_t, mac_addr1_dword1_t; + u32 mac_addr2_dword0_t, mac_addr2_dword1_t; + u32 mac_addr3_dword0_t, mac_addr3_dword1_t; + u32 serial_num_dword0_t, serial_num_dword1_t, serial_num_dword2_t; + + read_data = rd32(hw, 0x10200); + if (read_data & 0x80000000) { + hw_dbg(hw, "The flash has been successfully, please reboot to make it work.\n"); + return -EOPNOTSUPP; + } + + chip_v = (rd32(hw, 0x10010) & BIT(16)) ? 0x41 : 0x42; + image_v = check_image_version(hw, data); + + hw_dbg(hw, "Checking chip/image version .......\n"); + hw_dbg(hw, "The image chip_v is %c\n", image_v); + hw_dbg(hw, "The nic chip_v is %c\n", chip_v); + if (chip_v != image_v) { + hw_dbg(hw, "====The image is not match the Gigabit card (chip version)====\n"); + hw_dbg(hw, "====Please check your image====\n"); + return -EOPNOTSUPP; + } + + /* check sub_id */ + hw_dbg(hw, "Checking sub_id .......\n"); + hw_dbg(hw, "The card's sub_id : %04x\n", hw->subsystem_device_id); + hw_dbg(hw, "The image's sub_id : %04x\n", data[0xfffdc] << 8 | data[0xfffdd]); + if ((hw->subsystem_device_id & 0xffff) == + ((data[0xfffdc] << 8 | data[0xfffdd]) & 0xffff)) { + hw_dbg(hw, "It is a right image\n"); + } else if (hw->subsystem_device_id == 0xffff) { + hw_dbg(hw, "update anyway\n"); + } else { + hw_dbg(hw, "====The Gigabit image is not match the Gigabit card====\n"); + hw_dbg(hw, "====Please check your image====\n"); + return -EOPNOTSUPP; + } + + /*check dev_id*/ + hw_dbg(hw, "Checking dev_id .......\n"); + hw_dbg(hw, "The image's dev_id : %04x\n", data[0xfffde] << 8 | data[0xfffdf]); + hw_dbg(hw, "The card's dev_id : %04x\n", hw->device_id); + if (!((hw->device_id & 0xffff) == ((data[0xfffde] << 8 | data[0xfffdf]) & 0xffff)) && + !(hw->device_id == 0xffff)) { + hw_dbg(hw, "====The Gigabit image is not match the Gigabit card====\n"); + hw_dbg(hw, "====Please check your image====\n"); + return -EOPNOTSUPP; + } + + /* unlock flash write protect */ + ngbe_release_eeprom_semaphore(hw); + ngbe_flash_write_unlock(hw); + + wr32(hw, 0x10114, 0x9f050206); + wr32(hw, 0x10194, 0x9f050206); + + mdelay(1000); + + mac_addr0_dword0_t = ngbe_flash_read_dword(hw, MAC_ADDR0_WORD0_OFFSET_1G); + mac_addr0_dword1_t = ngbe_flash_read_dword(hw, MAC_ADDR0_WORD1_OFFSET_1G) & 0xffff; + mac_addr1_dword0_t = ngbe_flash_read_dword(hw, MAC_ADDR1_WORD0_OFFSET_1G); + mac_addr1_dword1_t = ngbe_flash_read_dword(hw, MAC_ADDR1_WORD1_OFFSET_1G) & 0xffff; + mac_addr2_dword0_t = ngbe_flash_read_dword(hw, MAC_ADDR2_WORD0_OFFSET_1G); + mac_addr2_dword1_t = ngbe_flash_read_dword(hw, MAC_ADDR2_WORD1_OFFSET_1G) & 0xffff; + mac_addr3_dword0_t = ngbe_flash_read_dword(hw, MAC_ADDR3_WORD0_OFFSET_1G); + mac_addr3_dword1_t = ngbe_flash_read_dword(hw, MAC_ADDR3_WORD1_OFFSET_1G) & 0xffff; + + serial_num_dword0_t = ngbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G); + serial_num_dword1_t = ngbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 4); + serial_num_dword2_t = ngbe_flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 8); + hw_dbg(hw, "Old: MAC Address0 is: 0x%04x%08x\n", mac_addr0_dword1_t, mac_addr0_dword0_t); + hw_dbg(hw, "MAC Address1 is: 0x%04x%08x\n", mac_addr1_dword1_t, mac_addr1_dword0_t); + hw_dbg(hw, "MAC Address2 is: 0x%04x%08x\n", mac_addr2_dword1_t, mac_addr2_dword0_t); + hw_dbg(hw, "MAC Address3 is: 0x%04x%08x\n", mac_addr3_dword1_t, mac_addr3_dword0_t); + + for (k = 0; k < 128; k++) + num[k] = ngbe_flash_read_dword(hw, 0xfe000 + (k << 2)); + + status = fmgr_usr_cmd_op(hw, 0x6); /* write enable */ + status = fmgr_usr_cmd_op(hw, 0x98); /* global protection un-lock */ + mdelay(1000); + + if (flash_vendor == 1) { + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c720); + for (i = 0; i < 8; i++) { + flash_erase_sector(hw, i << 7); + mdelay(20); + } + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c7d8); + } + + /* Winbond Flash, erase chip command is okay, but erase sector doestn't work*/ + sector_num = size / SPI_SECTOR_SIZE; + if (flash_vendor == 2) { + status = flash_erase_chip(hw); + hw_dbg(hw, "Erase chip command, return status = %0d\n", status); + mdelay(1000); + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c720); + for (i = 0; i < sector_num; i++) { + status = flash_erase_sector(hw, i * SPI_SECTOR_SIZE); + hw_dbg(hw, "Erase sector[%2d] command, return status = %0d\n", i, status); + mdelay(50); + } + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c7d8); + } + + /* Program Image file in dword */ + for (i = 0; i < size; i += 4) { + read_data = data[i + 3] << 24 | + data[i + 2] << 16 | + data[i + 1] << 8 | + data[i]; + read_data = __le32_to_cpu(read_data); + skip = ((i == MAC_ADDR0_WORD0_OFFSET_1G) || + (i == MAC_ADDR0_WORD1_OFFSET_1G) || + (i == MAC_ADDR1_WORD0_OFFSET_1G) || + (i == MAC_ADDR1_WORD1_OFFSET_1G) || + (i == MAC_ADDR2_WORD0_OFFSET_1G) || + (i == MAC_ADDR2_WORD1_OFFSET_1G) || + (i == MAC_ADDR3_WORD0_OFFSET_1G) || + (i == MAC_ADDR3_WORD1_OFFSET_1G) || + (i >= PRODUCT_SERIAL_NUM_OFFSET_1G && + i <= PRODUCT_SERIAL_NUM_OFFSET_1G + 8)); + + if (read_data != 0xffffffff && !skip) { + status = flash_write_dword(hw, i, read_data); + if (status) { + hw_dbg(hw, "ERROR: Program 0x%08x @addr: 0x%08x is failed !!\n", + read_data, i); + read_data = ngbe_flash_read_dword(hw, i); + hw_dbg(hw, "Read data from Flash is: 0x%08x\n", read_data); + return 1; + } + } + if (i % 4096 == 0) + hw_dbg(hw, "\b\b\b\b%3d%%", (int)(i * 100 / size)); + } + flash_write_dword(hw, MAC_ADDR0_WORD0_OFFSET_1G, mac_addr0_dword0_t); + flash_write_dword(hw, MAC_ADDR0_WORD1_OFFSET_1G, (mac_addr0_dword1_t | 0x80000000)); + flash_write_dword(hw, MAC_ADDR1_WORD0_OFFSET_1G, mac_addr1_dword0_t); + flash_write_dword(hw, MAC_ADDR1_WORD1_OFFSET_1G, (mac_addr1_dword1_t | 0x80000000)); + flash_write_dword(hw, MAC_ADDR2_WORD0_OFFSET_1G, mac_addr2_dword0_t); + flash_write_dword(hw, MAC_ADDR2_WORD1_OFFSET_1G, (mac_addr2_dword1_t | 0x80000000)); + flash_write_dword(hw, MAC_ADDR3_WORD0_OFFSET_1G, mac_addr3_dword0_t); + flash_write_dword(hw, MAC_ADDR3_WORD1_OFFSET_1G, (mac_addr3_dword1_t | 0x80000000)); + flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G, serial_num_dword0_t); + flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 4, serial_num_dword1_t); + flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 8, serial_num_dword2_t); + for (n = 0; n < 128; n++) { + if (!(num[n] == 0xffffffff)) + flash_write_dword(hw, 0xfe000 + (n << 2), num[n]); + } + wr32(hw, 0x10200, rd32(hw, 0x10200) | 0x80000000); + + return 0; +} + +u16 ngbe_crc16_ccitt(const u8 *buf, int size) +{ + u16 crc = 0; + int i; + + while (--size >= 0) { + crc ^= (u16)*buf++ << 8; + for (i = 0; i < 8; i++) { + if (crc & 0x8000) + crc = crc << 1 ^ 0x1021; + else + crc <<= 1; + } + } + return crc; +} + +s32 ngbe_upgrade_flash_hostif(struct ngbe_hw *hw, u32 region, + const u8 *data, u32 size) +{ + struct ngbe_hic_upg_start start_cmd; + struct ngbe_hic_upg_write write_cmd; + struct ngbe_hic_upg_verify verify_cmd; + u32 offset; + s32 status = 0; + + start_cmd.hdr.cmd = FW_FLASH_UPGRADE_START_CMD; + start_cmd.hdr.buf_len = FW_FLASH_UPGRADE_START_LEN; + start_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + start_cmd.module_id = (u8)region; + start_cmd.hdr.checksum = 0; + start_cmd.hdr.checksum = ngbe_calculate_checksum((u8 *)&start_cmd, + (FW_CEM_HDR_LEN + start_cmd.hdr.buf_len)); + start_cmd.pad2 = 0; + start_cmd.pad3 = 0; + + status = ngbe_host_if_command(hw, (u32 *)&start_cmd, + sizeof(start_cmd), + NGBE_HI_FLASH_ERASE_TIMEOUT, + true); + + if (start_cmd.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) { + status = 0; + } else { + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + return status; + } + + for (offset = 0; offset < size;) { + write_cmd.hdr.cmd = FW_FLASH_UPGRADE_WRITE_CMD; + if (size - offset > 248) { + write_cmd.data_len = 248 / 4; + write_cmd.eof_flag = 0; + } else { + write_cmd.data_len = (u8)((size - offset) / 4); + write_cmd.eof_flag = 1; + } + memcpy((u8 *)write_cmd.data, &data[offset], write_cmd.data_len * 4); + write_cmd.hdr.buf_len = (write_cmd.data_len + 1) * 4; + write_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + write_cmd.check_sum = ngbe_crc16_ccitt((u8 *)write_cmd.data, + write_cmd.data_len * 4); + + status = ngbe_host_if_command(hw, (u32 *)&write_cmd, + sizeof(write_cmd), + NGBE_HI_FLASH_UPDATE_TIMEOUT, + true); + if (start_cmd.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) { + status = 0; + } else { + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + return status; + } + offset += write_cmd.data_len * 4; + } + + verify_cmd.hdr.cmd = FW_FLASH_UPGRADE_VERIFY_CMD; + verify_cmd.hdr.buf_len = FW_FLASH_UPGRADE_VERIFY_LEN; + verify_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + switch (region) { + case NGBE_MODULE_EEPROM: + verify_cmd.action_flag = NGBE_RELOAD_EEPROM; + break; + case NGBE_MODULE_FIRMWARE: + verify_cmd.action_flag = NGBE_RESET_FIRMWARE; + break; + case NGBE_MODULE_HARDWARE: + verify_cmd.action_flag = NGBE_RESET_LAN; + break; + default: + ERROR_REPORT1(hw, NGBE_ERROR_ARGUMENT, + "%s: region err %x\n", __func__, region); + return status; + } + + verify_cmd.hdr.checksum = ngbe_calculate_checksum((u8 *)&verify_cmd, (FW_CEM_HDR_LEN + + verify_cmd.hdr.buf_len)); + + status = ngbe_host_if_command(hw, (u32 *)&verify_cmd, + sizeof(verify_cmd), + NGBE_HI_FLASH_VERIFY_TIMEOUT, + true); + + if (verify_cmd.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + + return status; +} + +/** + * ngbe_led_on - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn on + **/ +s32 ngbe_led_on(struct ngbe_hw *hw, u32 index) +{ + u32 led_reg = rd32(hw, NGBE_CFG_LED_CTL); + + /* To turn on the LED, set mode to ON. */ + led_reg |= index | (index << NGBE_CFG_LED_CTL_LINK_OD_SHIFT); + wr32(hw, NGBE_CFG_LED_CTL, led_reg); + NGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ngbe_led_off - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn off + **/ +s32 ngbe_led_off(struct ngbe_hw *hw, u32 index) +{ + u32 led_reg = rd32(hw, NGBE_CFG_LED_CTL); + + /* To turn off the LED, set mode to OFF. */ + led_reg &= ~(index << NGBE_CFG_LED_CTL_LINK_OD_SHIFT); + led_reg |= index; + wr32(hw, NGBE_CFG_LED_CTL, led_reg); + NGBE_WRITE_FLUSH(hw); + return 0; +} + +/** + * ngbe_init_flash_params - Initialize flash params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ngbe_eeprom_info within the + * ngbe_hw struct in order to set up EEPROM access. + **/ +s32 ngbe_init_flash_params(struct ngbe_hw *hw) +{ + struct ngbe_flash_info *flash = &hw->flash; + u32 eec; + + eec = 0x1000000; + flash->semaphore_delay = 10; + flash->dword_size = (eec >> 2); + flash->address_bits = 24; + + return 0; +} + +/** + * ngbe_read_flash_buffer - Read FLASH dword(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of dword in EEPROM to read + * @dwords: number of dwords + * @data: dword(s) read from the EEPROM + * + * Retrieves 32 bit dword(s) read from EEPROM + **/ +s32 ngbe_read_flash_buffer(struct ngbe_hw *hw, u32 offset, + u32 dwords, u32 *data) +{ + s32 status = 0; + u32 i; + + TCALL(hw, eeprom.ops.init_params); + + if (!dwords || offset + dwords >= hw->flash.dword_size) { + status = NGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(hw, NGBE_ERROR_ARGUMENT, "Invalid FLASH arguments"); + return status; + } + + for (i = 0; i < dwords; i++) { + wr32(hw, NGBE_SPI_DATA, data[i]); + wr32(hw, NGBE_SPI_CMD, + NGBE_SPI_CMD_ADDR(offset + i) | + NGBE_SPI_CMD_CMD(0x0)); + + status = po32m(hw, NGBE_SPI_STATUS, + NGBE_SPI_STATUS_OPDONE, NGBE_SPI_STATUS_OPDONE, + NGBE_SPI_TIMEOUT, 0); + if (status) { + ERROR_REPORT1(hw, NGBE_ERROR_INVALID_STATE, "FLASH read timeout"); + break; + } + } + + return status; +} + +/** + * ngbe_write_flash_buffer - Write FLASH dword(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of dword in EEPROM to write + * @dwords: number of dwords + * @data: dword(s) write from to EEPROM + * + **/ +s32 ngbe_write_flash_buffer(struct ngbe_hw *hw, u32 offset, + u32 dwords, u32 *data) +{ + s32 status = 0; + u32 i; + + TCALL(hw, eeprom.ops.init_params); + + if (!dwords || offset + dwords >= hw->flash.dword_size) { + status = NGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(hw, NGBE_ERROR_ARGUMENT, "Invalid FLASH arguments"); + return status; + } + + for (i = 0; i < dwords; i++) { + wr32(hw, NGBE_SPI_CMD, + NGBE_SPI_CMD_ADDR(offset + i) | + NGBE_SPI_CMD_CMD(0x1)); + + status = po32m(hw, NGBE_SPI_STATUS, + NGBE_SPI_STATUS_OPDONE, NGBE_SPI_STATUS_OPDONE, + NGBE_SPI_TIMEOUT, 0); + if (status != 0) { + ERROR_REPORT1(hw, NGBE_ERROR_INVALID_STATE, "FLASH write timeout"); + break; + } + data[i] = rd32(hw, NGBE_SPI_DATA); + } + + return status; +} + +/** + * ngbe_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure + * @data: pointer to the thermal sensor data structure + * + * algorithm: + * T = (-4.8380E+01)N^0 + (3.1020E-01)N^1 + (-1.8201E-04)N^2 + + (8.1542E-08)N^3 + (-1.6743E-11)N^4 + * algorithm with 5% more deviation, easy for implementation + * T = (-50)N^0 + (0.31)N^1 + (-0.0002)N^2 + (0.0000001)N^3 + * + * Returns the thermal sensor data structure + **/ +s32 ngbe_get_thermal_sensor_data(struct ngbe_hw *hw) +{ + s64 tsv; + struct ngbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + /* Only support thermal sensors attached to physical port 0 */ + if (hw->bus.lan_id) + return NGBE_NOT_IMPLEMENTED; + + tsv = (s64)(rd32(hw, NGBE_TS_ST) & + NGBE_TS_ST_DATA_OUT_MASK); + + /* 216 < tsv < 876 */ + tsv = tsv < 876 ? tsv : 876; + tsv = tsv - 216; + tsv = tsv / 4; + tsv = tsv - 40; + + data->sensor.temp = (s16)tsv; + + return 0; +} + +s32 ngbe_init_ops_common(struct ngbe_hw *hw) +{ + struct ngbe_mac_info *mac = &hw->mac; + struct ngbe_eeprom_info *eeprom = &hw->eeprom; + struct ngbe_flash_info *flash = &hw->flash; + + /* MAC */ + mac->ops.init_hw = ngbe_init_hw; + mac->ops.reset_hw = ngbe_reset_hw; + mac->ops.clear_hw_cntrs = ngbe_clear_hw_cntrs; + mac->ops.get_mac_addr = ngbe_get_mac_addr; + mac->ops.stop_adapter = ngbe_stop_adapter; + mac->ops.get_bus_info = ngbe_get_bus_info; + mac->ops.set_lan_id = ngbe_set_lan_id_multi_port_pcie; + mac->ops.acquire_swfw_sync = ngbe_acquire_swfw_sync; + mac->ops.release_swfw_sync = ngbe_release_swfw_sync; + mac->ops.get_media_type = ngbe_get_media_type; + mac->ops.disable_sec_rx_path = ngbe_disable_sec_rx_path; + mac->ops.enable_sec_rx_path = ngbe_enable_sec_rx_path; + mac->ops.enable_rx_dma = ngbe_enable_rx_dma; + mac->ops.start_hw = ngbe_start_hw; + mac->ops.get_device_caps = ngbe_get_device_caps; + + /* RAR, Multicast, VLAN */ + mac->ops.set_rar = ngbe_set_rar; + mac->ops.clear_rar = ngbe_clear_rar; + mac->ops.init_rx_addrs = ngbe_init_rx_addrs; + mac->ops.update_uc_addr_list = ngbe_update_uc_addr_list; + mac->ops.update_mc_addr_list = ngbe_update_mc_addr_list; + mac->ops.enable_mc = ngbe_enable_mc; + mac->ops.disable_mc = ngbe_disable_mc; + mac->ops.enable_rx = ngbe_enable_rx; + mac->ops.disable_rx = ngbe_disable_rx; + mac->ops.insert_mac_addr = ngbe_insert_mac_addr; + mac->ops.set_vfta = ngbe_set_vfta; + mac->ops.set_vlvf = ngbe_set_vlvf; + mac->ops.clear_vfta = ngbe_clear_vfta; + mac->ops.init_uta_tables = ngbe_init_uta_tables; + mac->ops.set_mac_anti_spoofing = ngbe_set_mac_anti_spoofing; + mac->ops.set_vlan_anti_spoofing = ngbe_set_vlan_anti_spoofing; + mac->ops.set_ethertype_anti_spoofing = + ngbe_set_ethertype_anti_spoofing; + + /* Link */ + mac->ops.get_link_capabilities = ngbe_get_link_capabilities; + mac->ops.check_link = ngbe_check_mac_link; + mac->ops.setup_rxpba = ngbe_set_rxpba; + + /* EEPROM */ + eeprom->ops.init_params = ngbe_init_eeprom_params; + eeprom->ops.calc_checksum = ngbe_calc_eeprom_checksum; + eeprom->ops.read = ngbe_read_ee_hostif; + eeprom->ops.read_buffer = ngbe_read_ee_hostif_buffer; + eeprom->ops.read32 = ngbe_read_ee_hostif32; + eeprom->ops.write = ngbe_write_ee_hostif; + eeprom->ops.write_buffer = ngbe_write_ee_hostif_buffer; + eeprom->ops.update_checksum = ngbe_update_eeprom_checksum; + eeprom->ops.validate_checksum = ngbe_validate_eeprom_checksum; + eeprom->ops.eeprom_chksum_cap_st = ngbe_eepromcheck_cap; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = ngbe_set_fw_drv_ver; + mac->ops.get_thermal_sensor_data = + ngbe_get_thermal_sensor_data; + mac->ops.init_thermal_sensor_thresh = + ngbe_init_thermal_sensor_thresh; + + /* Flow Control */ + mac->ops.fc_enable = ngbe_fc_enable; + mac->ops.setup_fc = ngbe_setup_fc; + + /* LEDs */ + mac->ops.led_on = ngbe_led_on; + mac->ops.led_off = ngbe_led_off; + + /* FLASH */ + flash->ops.init_params = ngbe_init_flash_params; + flash->ops.read_buffer = ngbe_read_flash_buffer; + flash->ops.write_buffer = ngbe_write_flash_buffer; + + mac->mcft_size = NGBE_SP_MC_TBL_SIZE; + mac->vft_size = NGBE_SP_VFT_TBL_SIZE; + mac->num_rar_entries = NGBE_SP_RAR_ENTRIES; + mac->rx_pb_size = NGBE_SP_RX_PB_SIZE; + mac->max_rx_queues = NGBE_SP_MAX_RX_QUEUES; + mac->max_tx_queues = NGBE_SP_MAX_TX_QUEUES; + mac->max_msix_vectors = ngbe_get_pcie_msix_count(hw); + + if (hw->phy.type == ngbe_phy_m88e1512 || + hw->phy.type == ngbe_phy_m88e1512_sfi || + hw->phy.type == ngbe_phy_m88e1512_unknown) + mac->ops.check_link = ngbe_check_mac_link_mdi; + else if (hw->phy.type == ngbe_phy_yt8521s_sfi) + mac->ops.check_link = ngbe_check_mac_link_yt; + + return NGBE_OK; +} diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h new file mode 100644 index 0000000000000000000000000000000000000000..133658319f06446d4bb932ba50fb5cc1aabfa863 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _NGBE_HW_H_ +#define _NGBE_HW_H_ + +#define SPI_CLK_CMD_OFFSET 28 /* SPI command field offset */ +#define SPI_CLK_DIV_OFFSET 25 /* SPI clock divide field offset */ +#define SPI_CLK_DIV 3 +#define SPI_TIME_OUT_VALUE 10000 +#define SPI_SECTOR_SIZE (4 * 1024) /* FLASH sector size is 64KB */ + +#define SPI_CMD_WRITE_DWORD 0 /* SPI write a dword command */ +#define SPI_CMD_READ_DWORD 1 /* SPI read a dword command */ +#define SPI_CMD_USER_CMD 5 /* SPI user command */ +#define SPI_CMD_ERASE_SECTOR 3 /* SPI erase sector command */ +#define SPI_CMD_ERASE_CHIP 4 /* SPI erase chip command */ + +#define SPI_H_CMD_REG_ADDR 0x10104 /* SPI Command register address */ +#define SPI_H_DAT_REG_ADDR 0x10108 /* SPI Data register address */ +#define SPI_H_STA_REG_ADDR 0x1010c /* SPI Status register address */ +#define SPI_H_USR_CMD_REG_ADDR 0x10110 /* SPI User Command register address */ +#define SPI_CMD_CFG1_ADDR 0x10118 /* Flash command configuration register 1 */ + +#define MAC_ADDR0_WORD0_OFFSET_1G 0x006000c /* MAC Address for LAN0 */ +#define MAC_ADDR0_WORD1_OFFSET_1G 0x0060014 +#define MAC_ADDR1_WORD0_OFFSET_1G 0x006800c /* MAC Address for LAN1 */ +#define MAC_ADDR1_WORD1_OFFSET_1G 0x0068014 +#define MAC_ADDR2_WORD0_OFFSET_1G 0x007000c /* MAC Address for LAN2 */ +#define MAC_ADDR2_WORD1_OFFSET_1G 0x0070014 +#define MAC_ADDR3_WORD0_OFFSET_1G 0x007800c /* MAC Address for LAN3 */ +#define MAC_ADDR3_WORD1_OFFSET_1G 0x0078014 +#define PRODUCT_SERIAL_NUM_OFFSET_1G 0x00f0000 /* Product Serial Number */ + +/* Flow control defines */ +#define NGBE_TAF_SYM_PAUSE (0x1) +#define NGBE_TAF_ASM_PAUSE (0x2) + +u32 ngbe_flash_read_dword(struct ngbe_hw *hw, u32 addr); +int ngbe_check_flash_load(struct ngbe_hw *hw, u32 check_bit); +s32 ngbe_init_hw(struct ngbe_hw *hw); +s32 ngbe_host_if_command(struct ngbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data); +u8 ngbe_calculate_checksum(u8 *buffer, u32 length); +bool ngbe_check_mng_access(struct ngbe_hw *hw); +bool ngbe_mng_present(struct ngbe_hw *hw); +s32 ngbe_write_ee_hostif(struct ngbe_hw *hw, u16 offset, + u16 data); +s32 ngbe_disable_pcie_master(struct ngbe_hw *hw); +s32 ngbe_read_ee_hostif(struct ngbe_hw *hw, u16 offset, u16 *data); +int ngbe_upgrade_flash(struct ngbe_hw *hw, u32 region, + const u8 *data, u32 size); +s32 ngbe_upgrade_flash_hostif(struct ngbe_hw *hw, u32 region, + const u8 *data, u32 size); +s32 ngbe_init_ops_common(struct ngbe_hw *hw); + +#endif diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_lib.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_lib.c new file mode 100644 index 0000000000000000000000000000000000000000..cfa69eb0619282564a721174ded23c2f7023e31e --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_lib.c @@ -0,0 +1,543 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "ngbe.h" + +#define NGBE_RSS_8Q_MASK 0x7 + +/** + * ngbe_set_rss_queues: Allocate queues for RSS + * @adapter: board private structure to initialize + **/ +static bool ngbe_set_rss_queues(struct ngbe_adapter *adapter) +{ + struct ngbe_ring_feature *f; + u16 rss_i; + + /* set mask for 16 queue limit of RSS */ + f = &adapter->ring_feature[RING_F_RSS]; + rss_i = f->limit; + + f->indices = rss_i; + f->mask = NGBE_RSS_8Q_MASK; + adapter->num_rx_queues = rss_i; + adapter->num_tx_queues = rss_i; + + return true; +} + +/** + * ngbe_set_num_queues: Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + **/ +static void ngbe_set_num_queues(struct ngbe_adapter *adapter) +{ + /* Start with base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->queues_per_pool = 1; + + ngbe_set_rss_queues(adapter); +} + +/** + * ngbe_acquire_msix_vectors - acquire MSI-X vectors + * @adapter: board private structure + * + * Attempts to acquire a suitable range of MSI-X vector interrupts. Will + * return a negative error code if unable to acquire MSI-X vectors for any + * reason. + */ +static int ngbe_acquire_msix_vectors(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i, vectors, vector_threshold; + + if (!(adapter->flags & NGBE_FLAG_MSIX_CAPABLE)) + return -EOPNOTSUPP; + + /* We start by asking for one vector per queue pair */ + vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); + + /* It is easy to be greedy for MSI-X vectors. However, it really + * doesn't do much good if we have a lot more vectors than CPUs. We'll + * be somewhat conservative and only ask for (roughly) the same number + * of vectors as there are CPUs. + */ + vectors = min_t(int, vectors, num_online_cpus()); + + /* Some vectors are necessary for non-queue interrupts */ + vectors += NON_Q_VECTORS; + + /* Hardware can only support a maximum of hw.mac->max_msix_vectors. + * With features such as RSS and VMDq, we can easily surpass the + * number of Rx and Tx descriptor queues supported by our device. + * Thus, we cap the maximum in the rare cases where the CPU count also + * exceeds our vector limit + */ + vectors = min_t(int, vectors, hw->mac.max_msix_vectors); + + /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] + * handler, and (2) an Other (Link Status Change, etc.) handler. + */ + vector_threshold = MIN_MSIX_COUNT; + + adapter->msix_entries = kcalloc(vectors, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + + for (i = 0; i < vectors; i++) + adapter->msix_entries[i].entry = i; + + vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + vector_threshold, vectors); + if (vectors < 0) { + /* A negative count of allocated vectors indicates an error in + * acquiring within the specified range of MSI-X vectors + */ + e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", vectors); + + adapter->flags &= ~NGBE_FLAG_MSIX_ENABLED; + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + return vectors; + } + + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) { + if (vectors < 9) { + adapter->flags2 &= ~NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP; + e_dev_warn("Remain available irqs < 9. Disable MISC IRQ REMAP.\n"); + } else { + vectors -= adapter->ring_feature[RING_F_VMDQ].offset; + } + } + + /* we successfully allocated some number of vectors within our + * requested range. + */ + adapter->flags |= NGBE_FLAG_MSIX_ENABLED; + + /* Adjust for only the vectors we'll use, which is minimum + * of max_q_vectors, or the number of vectors we were allocated. + */ + vectors -= NON_Q_VECTORS; + adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); + + return 0; +} + +static void ngbe_add_ring(struct ngbe_ring *ring, + struct ngbe_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; +} + +/** + * ngbe_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int ngbe_alloc_q_vector(struct ngbe_adapter *adapter, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct ngbe_q_vector *q_vector; + struct ngbe_ring *ring; + int node = -1; + int cpu = -1; + u8 tcs = netdev_get_num_tc(adapter->netdev); + int ring_count, size; + + /* note this will allocate space for the ring structure as well! */ + ring_count = txr_count + rxr_count; + size = sizeof(struct ngbe_q_vector) + + (sizeof(struct ngbe_ring) * ring_count); + + /* customize cpu for Flow Director mapping */ + if (tcs <= 1 && !(adapter->flags & NGBE_FLAG_VMDQ_ENABLED)) { + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + + if (rss_i > 1 && adapter->atr_sample_rate) { + if (cpu_online(v_idx)) { + cpu = v_idx; + node = cpu_to_node(cpu); + } + } + } + + /* allocate q_vector and rings */ + q_vector = kzalloc_node(size, GFP_KERNEL, node); + if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* setup affinity mask and node */ + if (cpu != -1) + cpumask_set_cpu(cpu, &q_vector->affinity_mask); + + q_vector->numa_node = node; + + /* initialize CPU for DCA */ + q_vector->cpu = -1; + + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, ngbe_poll, 64); + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + q_vector->v_idx = v_idx; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + q_vector->rx.work_limit = adapter->rx_work_limit; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* initialize ITR */ + if (txr_count && !rxr_count) + q_vector->itr = NGBE_7K_ITR; + else + q_vector->itr = NGBE_7K_ITR; + + while (txr_count) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + ngbe_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* update count and index */ + txr_count--; + txr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + while (rxr_count) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + ngbe_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + + /* update count and index */ + rxr_count--; + rxr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + return 0; +} + +/** + * ngbe_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ngbe_free_q_vector(struct ngbe_adapter *adapter, int v_idx) +{ + struct ngbe_q_vector *q_vector = adapter->q_vector[v_idx]; + struct ngbe_ring *ring; + + ngbe_for_each_ring(ring, q_vector->tx) + adapter->tx_ring[ring->queue_index] = NULL; + + ngbe_for_each_ring(ring, q_vector->rx) + adapter->rx_ring[ring->queue_index] = NULL; + + adapter->q_vector[v_idx] = NULL; + + netif_napi_del(&q_vector->napi); + + kfree_rcu(q_vector, rcu); +} + +/** + * ngbe_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int ngbe_alloc_q_vectors(struct ngbe_adapter *adapter) +{ + unsigned int q_vectors = adapter->num_q_vectors; + unsigned int rxr_remaining = adapter->num_rx_queues; + unsigned int txr_remaining = adapter->num_tx_queues; + unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = ngbe_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + + err = ngbe_alloc_q_vector(adapter, q_vectors, v_idx, tqpv, txr_idx, rqpv, rxr_idx); + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + ngbe_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * ngbe_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ngbe_free_q_vectors(struct ngbe_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + ngbe_free_q_vector(adapter, v_idx); +} + +/** + * ngbe_set_interrupt_capability - set MSI-X or MSI if supported + * @adapter: board private structure to initialize + **/ +void ngbe_set_interrupt_capability(struct ngbe_adapter *adapter) +{ + int err; + + /* We will try to get MSI-X interrupts first */ + if (!ngbe_acquire_msix_vectors(adapter)) + return; + + /* At this point, we do not have MSI-X capabilities. We need to + * reconfigure or disable various features which require MSI-X + * capability. + */ + /* Disable VMDq support */ + e_dev_warn("Disabling VMQd support\n"); + adapter->flags &= ~NGBE_FLAG_VMDQ_ENABLED; + + /* Disable RSS */ + e_dev_warn("Disabling RSS support\n"); + adapter->ring_feature[RING_F_RSS].limit = 1; + + /* recalculate number of queues now that many features have been + * changed or disabled. + */ + ngbe_set_num_queues(adapter); + adapter->num_q_vectors = 1; + + if (!(adapter->flags & NGBE_FLAG_MSI_CAPABLE)) + return; + + err = pci_enable_msi(adapter->pdev); + if (err) + e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy.Err: %d\n", + err); + else + adapter->flags |= NGBE_FLAG_MSI_ENABLED; +} + +void ngbe_reset_interrupt_capability(struct ngbe_adapter *adapter) +{ + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) { + adapter->flags &= ~NGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & NGBE_FLAG_MSI_ENABLED) { + adapter->flags &= ~NGBE_FLAG_MSI_ENABLED; + pci_disable_msi(adapter->pdev); + } +} + +/** + * ngbe_cache_ring_vmdq - Descriptor ring to register mapping for VMDq + * @adapter: board private structure to initialize + **/ +static bool ngbe_cache_ring_vmdq(struct ngbe_adapter *adapter) +{ + struct ngbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + //struct ngbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; + int i; + u16 reg_idx; + + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & NGBE_FLAG_VMDQ_ENABLED)) + return false; + + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset; + + /* If we are greater than indices move to next pool */ + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = reg_idx + i; + + reg_idx = vmdq->offset; + /* If we are greater than indices move to next pool */ + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->reg_idx = reg_idx + i; + + return true; +} + +/** + * ngbe_cache_ring_rss - Descriptor ring to register mapping for RSS + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV. + * + **/ +static bool ngbe_cache_ring_rss(struct ngbe_adapter *adapter) +{ + u16 i; + + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->reg_idx = i; + + return true; +} + +/** + * ngbe_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + **/ +static void ngbe_cache_ring_register(struct ngbe_adapter *adapter) +{ + if (ngbe_cache_ring_vmdq(adapter)) + return; + + ngbe_cache_ring_rss(adapter); +} + +/** + * ngbe_init_interrupt_scheme - Determine proper interrupt scheme + * @adapter: board private structure to initialize + **/ +int ngbe_init_interrupt_scheme(struct ngbe_adapter *adapter) +{ + int err; + + /* if assigned vfs >= 7, the PF queue irq remain seq 0 and misc irq move from + * seq 1 to seq 8. it needs extra processions. + */ + if (adapter->num_vfs >= NGBE_MAX_VF_FUNCTIONS - 1) + adapter->flags2 |= NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP; + + /* Number of supported queues */ + ngbe_set_num_queues(adapter); + + /* Set interrupt mode */ + ngbe_set_interrupt_capability(adapter); + + /* Allocate memory for queues */ + err = ngbe_alloc_q_vectors(adapter); + if (err) { + e_err(probe, "Unable to allocate memory for queue vectors\n"); + ngbe_reset_interrupt_capability(adapter); + return err; + } + + ngbe_cache_ring_register(adapter); + + set_bit(__NGBE_DOWN, &adapter->state); + + return 0; +} + +/** + * ngbe_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void ngbe_clear_interrupt_scheme(struct ngbe_adapter *adapter) +{ + ngbe_free_q_vectors(adapter); + ngbe_reset_interrupt_capability(adapter); + + /* remove this flags */ + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) + adapter->flags2 &= ~NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP; +} diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c new file mode 100644 index 0000000000000000000000000000000000000000..546d00acbf7d974a2769c16f6ab524b1ec860e96 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -0,0 +1,6380 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "ngbe.h" +#include "ngbe_phy.h" +#include "ngbe_hw.h" + +char ngbe_driver_name[] = "ngbe"; +static const char ngbe_driver_string[] = + "WangXun Gigabit PCI Express Network Driver"; +static const char ngbe_copyright[] = + "Copyright (c) 2018 -2022 Beijing WangXun Technology Co., Ltd"; + +#define DRV_VERSION __stringify(1.2.3anolis) +const char ngbe_driver_version[32] = DRV_VERSION; + +static struct workqueue_struct *ngbe_wq; + +static const char ngbe_underheat_msg[] = + "Network adapter has been restarted since the temperature has been back to normal state"; + +static const char ngbe_overheat_msg[] = + "Network adapter has been stopped because it has over heated."; + +/* ngbe_pci_tbl - PCI Device ID Table + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id ngbe_pci_tbl[] = { + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A2), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A2S), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A4), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A4S), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL2), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL4), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860LC), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A1), 0}, + { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A1L), 0}, + /* required last entry */ + { .device = 0 } +}; + +union network_header { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + void *raw; +}; + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +struct ngbe_dec_ptype ngbe_ptype_lookup[256] = { + NGBE_UKN(0x00), + NGBE_UKN(0x01), + NGBE_UKN(0x02), + NGBE_UKN(0x03), + NGBE_UKN(0x04), + NGBE_UKN(0x05), + NGBE_UKN(0x06), + NGBE_UKN(0x07), + NGBE_UKN(0x08), + NGBE_UKN(0x09), + NGBE_UKN(0x0A), + NGBE_UKN(0x0B), + NGBE_UKN(0x0C), + NGBE_UKN(0x0D), + NGBE_UKN(0x0E), + NGBE_UKN(0x0F), + + /* L2: mac */ + NGBE_UKN(0x10), + NGBE_PTT(0x11, L2, NONE, NONE, NONE, NONE, PAY2), + NGBE_PTT(0x12, L2, NONE, NONE, NONE, TS, PAY2), + NGBE_PTT(0x13, L2, NONE, NONE, NONE, NONE, PAY2), + NGBE_PTT(0x14, L2, NONE, NONE, NONE, NONE, PAY2), + NGBE_PTT(0x15, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x16, L2, NONE, NONE, NONE, NONE, PAY2), + NGBE_PTT(0x17, L2, NONE, NONE, NONE, NONE, NONE), + + /* L2: ethertype filter */ + NGBE_PTT(0x18, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x19, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1A, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1B, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1C, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1D, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1E, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1F, L2, NONE, NONE, NONE, NONE, NONE), + + /* L3: ip non-tunnel */ + NGBE_UKN(0x20), + NGBE_PTT(0x21, IP, FGV4, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x22, IP, IPV4, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x23, IP, IPV4, NONE, NONE, UDP, PAY4), + NGBE_PTT(0x24, IP, IPV4, NONE, NONE, TCP, PAY4), + NGBE_PTT(0x25, IP, IPV4, NONE, NONE, SCTP, PAY4), + NGBE_UKN(0x26), + NGBE_UKN(0x27), + NGBE_UKN(0x28), + NGBE_PTT(0x29, IP, FGV6, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x2A, IP, IPV6, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x2B, IP, IPV6, NONE, NONE, UDP, PAY3), + NGBE_PTT(0x2C, IP, IPV6, NONE, NONE, TCP, PAY4), + NGBE_PTT(0x2D, IP, IPV6, NONE, NONE, SCTP, PAY4), + NGBE_UKN(0x2E), + NGBE_UKN(0x2F), + + /* L2: fcoe */ + NGBE_PTT(0x30, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x31, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x32, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x33, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x34, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_UKN(0x35), + NGBE_UKN(0x36), + NGBE_UKN(0x37), + NGBE_PTT(0x38, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x39, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x3A, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x3B, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x3C, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_UKN(0x3D), + NGBE_UKN(0x3E), + NGBE_UKN(0x3F), + + NGBE_UKN(0x40), + NGBE_UKN(0x41), + NGBE_UKN(0x42), + NGBE_UKN(0x43), + NGBE_UKN(0x44), + NGBE_UKN(0x45), + NGBE_UKN(0x46), + NGBE_UKN(0x47), + NGBE_UKN(0x48), + NGBE_UKN(0x49), + NGBE_UKN(0x4A), + NGBE_UKN(0x4B), + NGBE_UKN(0x4C), + NGBE_UKN(0x4D), + NGBE_UKN(0x4E), + NGBE_UKN(0x4F), + NGBE_UKN(0x50), + NGBE_UKN(0x51), + NGBE_UKN(0x52), + NGBE_UKN(0x53), + NGBE_UKN(0x54), + NGBE_UKN(0x55), + NGBE_UKN(0x56), + NGBE_UKN(0x57), + NGBE_UKN(0x58), + NGBE_UKN(0x59), + NGBE_UKN(0x5A), + NGBE_UKN(0x5B), + NGBE_UKN(0x5C), + NGBE_UKN(0x5D), + NGBE_UKN(0x5E), + NGBE_UKN(0x5F), + NGBE_UKN(0x60), + NGBE_UKN(0x61), + NGBE_UKN(0x62), + NGBE_UKN(0x63), + NGBE_UKN(0x64), + NGBE_UKN(0x65), + NGBE_UKN(0x66), + NGBE_UKN(0x67), + NGBE_UKN(0x68), + NGBE_UKN(0x69), + NGBE_UKN(0x6A), + NGBE_UKN(0x6B), + NGBE_UKN(0x6C), + NGBE_UKN(0x6D), + NGBE_UKN(0x6E), + NGBE_UKN(0x6F), + NGBE_UKN(0x70), + NGBE_UKN(0x71), + NGBE_UKN(0x72), + NGBE_UKN(0x73), + NGBE_UKN(0x74), + NGBE_UKN(0x75), + NGBE_UKN(0x76), + NGBE_UKN(0x77), + NGBE_UKN(0x78), + NGBE_UKN(0x79), + NGBE_UKN(0x7A), + NGBE_UKN(0x7B), + NGBE_UKN(0x7C), + NGBE_UKN(0x7D), + NGBE_UKN(0x7E), + NGBE_UKN(0x7F), + + /* IPv4 --> IPv4/IPv6 */ + NGBE_UKN(0x80), + NGBE_PTT(0x81, IP, IPV4, IPIP, FGV4, NONE, PAY3), + NGBE_PTT(0x82, IP, IPV4, IPIP, IPV4, NONE, PAY3), + NGBE_PTT(0x83, IP, IPV4, IPIP, IPV4, UDP, PAY4), + NGBE_PTT(0x84, IP, IPV4, IPIP, IPV4, TCP, PAY4), + NGBE_PTT(0x85, IP, IPV4, IPIP, IPV4, SCTP, PAY4), + NGBE_UKN(0x86), + NGBE_UKN(0x87), + NGBE_UKN(0x88), + NGBE_PTT(0x89, IP, IPV4, IPIP, FGV6, NONE, PAY3), + NGBE_PTT(0x8A, IP, IPV4, IPIP, IPV6, NONE, PAY3), + NGBE_PTT(0x8B, IP, IPV4, IPIP, IPV6, UDP, PAY4), + NGBE_PTT(0x8C, IP, IPV4, IPIP, IPV6, TCP, PAY4), + NGBE_PTT(0x8D, IP, IPV4, IPIP, IPV6, SCTP, PAY4), + NGBE_UKN(0x8E), + NGBE_UKN(0x8F), + + /* IPv4 --> GRE/NAT --> NONE/IPv4/IPv6 */ + NGBE_PTT(0x90, IP, IPV4, IG, NONE, NONE, PAY3), + NGBE_PTT(0x91, IP, IPV4, IG, FGV4, NONE, PAY3), + NGBE_PTT(0x92, IP, IPV4, IG, IPV4, NONE, PAY3), + NGBE_PTT(0x93, IP, IPV4, IG, IPV4, UDP, PAY4), + NGBE_PTT(0x94, IP, IPV4, IG, IPV4, TCP, PAY4), + NGBE_PTT(0x95, IP, IPV4, IG, IPV4, SCTP, PAY4), + NGBE_UKN(0x96), + NGBE_UKN(0x97), + NGBE_UKN(0x98), + NGBE_PTT(0x99, IP, IPV4, IG, FGV6, NONE, PAY3), + NGBE_PTT(0x9A, IP, IPV4, IG, IPV6, NONE, PAY3), + NGBE_PTT(0x9B, IP, IPV4, IG, IPV6, UDP, PAY4), + NGBE_PTT(0x9C, IP, IPV4, IG, IPV6, TCP, PAY4), + NGBE_PTT(0x9D, IP, IPV4, IG, IPV6, SCTP, PAY4), + NGBE_UKN(0x9E), + NGBE_UKN(0x9F), + + /* IPv4 --> GRE/NAT --> MAC --> NONE/IPv4/IPv6 */ + NGBE_PTT(0xA0, IP, IPV4, IGM, NONE, NONE, PAY3), + NGBE_PTT(0xA1, IP, IPV4, IGM, FGV4, NONE, PAY3), + NGBE_PTT(0xA2, IP, IPV4, IGM, IPV4, NONE, PAY3), + NGBE_PTT(0xA3, IP, IPV4, IGM, IPV4, UDP, PAY4), + NGBE_PTT(0xA4, IP, IPV4, IGM, IPV4, TCP, PAY4), + NGBE_PTT(0xA5, IP, IPV4, IGM, IPV4, SCTP, PAY4), + NGBE_UKN(0xA6), + NGBE_UKN(0xA7), + NGBE_UKN(0xA8), + NGBE_PTT(0xA9, IP, IPV4, IGM, FGV6, NONE, PAY3), + NGBE_PTT(0xAA, IP, IPV4, IGM, IPV6, NONE, PAY3), + NGBE_PTT(0xAB, IP, IPV4, IGM, IPV6, UDP, PAY4), + NGBE_PTT(0xAC, IP, IPV4, IGM, IPV6, TCP, PAY4), + NGBE_PTT(0xAD, IP, IPV4, IGM, IPV6, SCTP, PAY4), + NGBE_UKN(0xAE), + NGBE_UKN(0xAF), + + /* IPv4 --> GRE/NAT --> MAC+VLAN --> NONE/IPv4/IPv6 */ + NGBE_PTT(0xB0, IP, IPV4, IGMV, NONE, NONE, PAY3), + NGBE_PTT(0xB1, IP, IPV4, IGMV, FGV4, NONE, PAY3), + NGBE_PTT(0xB2, IP, IPV4, IGMV, IPV4, NONE, PAY3), + NGBE_PTT(0xB3, IP, IPV4, IGMV, IPV4, UDP, PAY4), + NGBE_PTT(0xB4, IP, IPV4, IGMV, IPV4, TCP, PAY4), + NGBE_PTT(0xB5, IP, IPV4, IGMV, IPV4, SCTP, PAY4), + NGBE_UKN(0xB6), + NGBE_UKN(0xB7), + NGBE_UKN(0xB8), + NGBE_PTT(0xB9, IP, IPV4, IGMV, FGV6, NONE, PAY3), + NGBE_PTT(0xBA, IP, IPV4, IGMV, IPV6, NONE, PAY3), + NGBE_PTT(0xBB, IP, IPV4, IGMV, IPV6, UDP, PAY4), + NGBE_PTT(0xBC, IP, IPV4, IGMV, IPV6, TCP, PAY4), + NGBE_PTT(0xBD, IP, IPV4, IGMV, IPV6, SCTP, PAY4), + NGBE_UKN(0xBE), + NGBE_UKN(0xBF), + + /* IPv6 --> IPv4/IPv6 */ + NGBE_UKN(0xC0), + NGBE_PTT(0xC1, IP, IPV6, IPIP, FGV4, NONE, PAY3), + NGBE_PTT(0xC2, IP, IPV6, IPIP, IPV4, NONE, PAY3), + NGBE_PTT(0xC3, IP, IPV6, IPIP, IPV4, UDP, PAY4), + NGBE_PTT(0xC4, IP, IPV6, IPIP, IPV4, TCP, PAY4), + NGBE_PTT(0xC5, IP, IPV6, IPIP, IPV4, SCTP, PAY4), + NGBE_UKN(0xC6), + NGBE_UKN(0xC7), + NGBE_UKN(0xC8), + NGBE_PTT(0xC9, IP, IPV6, IPIP, FGV6, NONE, PAY3), + NGBE_PTT(0xCA, IP, IPV6, IPIP, IPV6, NONE, PAY3), + NGBE_PTT(0xCB, IP, IPV6, IPIP, IPV6, UDP, PAY4), + NGBE_PTT(0xCC, IP, IPV6, IPIP, IPV6, TCP, PAY4), + NGBE_PTT(0xCD, IP, IPV6, IPIP, IPV6, SCTP, PAY4), + NGBE_UKN(0xCE), + NGBE_UKN(0xCF), + + /* IPv6 --> GRE/NAT -> NONE/IPv4/IPv6 */ + NGBE_PTT(0xD0, IP, IPV6, IG, NONE, NONE, PAY3), + NGBE_PTT(0xD1, IP, IPV6, IG, FGV4, NONE, PAY3), + NGBE_PTT(0xD2, IP, IPV6, IG, IPV4, NONE, PAY3), + NGBE_PTT(0xD3, IP, IPV6, IG, IPV4, UDP, PAY4), + NGBE_PTT(0xD4, IP, IPV6, IG, IPV4, TCP, PAY4), + NGBE_PTT(0xD5, IP, IPV6, IG, IPV4, SCTP, PAY4), + NGBE_UKN(0xD6), + NGBE_UKN(0xD7), + NGBE_UKN(0xD8), + NGBE_PTT(0xD9, IP, IPV6, IG, FGV6, NONE, PAY3), + NGBE_PTT(0xDA, IP, IPV6, IG, IPV6, NONE, PAY3), + NGBE_PTT(0xDB, IP, IPV6, IG, IPV6, UDP, PAY4), + NGBE_PTT(0xDC, IP, IPV6, IG, IPV6, TCP, PAY4), + NGBE_PTT(0xDD, IP, IPV6, IG, IPV6, SCTP, PAY4), + NGBE_UKN(0xDE), + NGBE_UKN(0xDF), + + /* IPv6 --> GRE/NAT -> MAC -> NONE/IPv4/IPv6 */ + NGBE_PTT(0xE0, IP, IPV6, IGM, NONE, NONE, PAY3), + NGBE_PTT(0xE1, IP, IPV6, IGM, FGV4, NONE, PAY3), + NGBE_PTT(0xE2, IP, IPV6, IGM, IPV4, NONE, PAY3), + NGBE_PTT(0xE3, IP, IPV6, IGM, IPV4, UDP, PAY4), + NGBE_PTT(0xE4, IP, IPV6, IGM, IPV4, TCP, PAY4), + NGBE_PTT(0xE5, IP, IPV6, IGM, IPV4, SCTP, PAY4), + NGBE_UKN(0xE6), + NGBE_UKN(0xE7), + NGBE_UKN(0xE8), + NGBE_PTT(0xE9, IP, IPV6, IGM, FGV6, NONE, PAY3), + NGBE_PTT(0xEA, IP, IPV6, IGM, IPV6, NONE, PAY3), + NGBE_PTT(0xEB, IP, IPV6, IGM, IPV6, UDP, PAY4), + NGBE_PTT(0xEC, IP, IPV6, IGM, IPV6, TCP, PAY4), + NGBE_PTT(0xED, IP, IPV6, IGM, IPV6, SCTP, PAY4), + NGBE_UKN(0xEE), + NGBE_UKN(0xEF), + + /* IPv6 --> GRE/NAT -> MAC--> NONE/IPv */ + NGBE_PTT(0xF0, IP, IPV6, IGMV, NONE, NONE, PAY3), + NGBE_PTT(0xF1, IP, IPV6, IGMV, FGV4, NONE, PAY3), + NGBE_PTT(0xF2, IP, IPV6, IGMV, IPV4, NONE, PAY3), + NGBE_PTT(0xF3, IP, IPV6, IGMV, IPV4, UDP, PAY4), + NGBE_PTT(0xF4, IP, IPV6, IGMV, IPV4, TCP, PAY4), + NGBE_PTT(0xF5, IP, IPV6, IGMV, IPV4, SCTP, PAY4), + NGBE_UKN(0xF6), + NGBE_UKN(0xF7), + NGBE_UKN(0xF8), + NGBE_PTT(0xF9, IP, IPV6, IGMV, FGV6, NONE, PAY3), + NGBE_PTT(0xFA, IP, IPV6, IGMV, IPV6, NONE, PAY3), + NGBE_PTT(0xFB, IP, IPV6, IGMV, IPV6, UDP, PAY4), + NGBE_PTT(0xFC, IP, IPV6, IGMV, IPV6, TCP, PAY4), + NGBE_PTT(0xFD, IP, IPV6, IGMV, IPV6, SCTP, PAY4), + NGBE_UKN(0xFE), + NGBE_UKN(0xFF), +}; + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 +#define INFO_STRING_LEN 255 + +static void ngbe_clean_rx_ring(struct ngbe_ring *rx_ring); +static void ngbe_sync_mac_table(struct ngbe_adapter *adapter); +static void ngbe_configure_pb(struct ngbe_adapter *adapter); +static void ngbe_configure(struct ngbe_adapter *adapter); +static int ngbe_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid); + +static inline struct ngbe_dec_ptype ngbe_decode_ptype(const u8 ptype) +{ + return ngbe_ptype_lookup[ptype]; +} + +static inline struct ngbe_dec_ptype +decode_rx_desc_ptype(const union ngbe_rx_desc *rx_desc) +{ + return ngbe_decode_ptype(NGBE_RXD_PKTTYPE(rx_desc)); +} + +/** + * ngbe_init_ops - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + **/ +s32 ngbe_init_ops(struct ngbe_hw *hw) +{ + ngbe_init_phy_ops_common(hw); + ngbe_init_ops_common(hw); + ngbe_init_external_phy_ops_common(hw); + + return NGBE_OK; +} + +/** + * ngbe_init_shared_code - Initialize the shared code + * @hw: pointer to hardware structure + **/ +s32 ngbe_init_shared_code(struct ngbe_hw *hw) +{ + if ((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_M88E1512_SFP || + (hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_LY_M88E1512_SFP) + hw->phy.type = ngbe_phy_m88e1512_sfi; + else if ((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_M88E1512_RJ45 || + (hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_M88E1512_RJ45) + hw->phy.type = ngbe_phy_m88e1512; + else if ((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_M88E1512_MIX) + hw->phy.type = ngbe_phy_m88e1512_unknown; + else if ((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_YT8521S_SFP || + (hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_YT8521S_SFP_GPIO || + (hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_LY_YT8521S_SFP) + hw->phy.type = ngbe_phy_yt8521s_sfi; + else if ((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_INTERNAL_YT8521S_SFP || + (hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO) + hw->phy.type = ngbe_phy_internal_yt8521s_sfi; + else + hw->phy.type = ngbe_phy_internal; + + if ((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_LY_M88E1512_SFP || + (hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_LY_YT8521S_SFP || + (hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_YT8521S_SFP_GPIO || + (hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO) + hw->gpio_ctl = 1; + + /* select claus22 */ + wr32(hw, NGBE_MDIO_CLAUSE_SELECT, 0xF); + + return ngbe_init_ops(hw); +} + +/** + * ngbe_sw_init - Initialize general software structures (struct ngbe_adapter) + * @adapter: board private structure to initialize + **/ +static const u32 def_rss_key[10] = { + 0xE291D73D, 0x1805EC6C, 0x2A94B30D, + 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, + 0x6A3E67EA, 0x14364D17, 0x3BED200D +}; + +static int ngbe_sw_init(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int err; + u32 ssid = 0; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->oem_svid = pdev->subsystem_vendor; + hw->oem_ssid = pdev->subsystem_device; + + if (pdev->subsystem_vendor == PCI_VENDOR_ID_WANGXUN) { + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + } else { + ssid = ngbe_flash_read_dword(hw, 0xfffdc); + if (ssid == 0x1) { + e_err(probe, "read of internal subsystem device id failed\n"); + err = -ENODEV; + goto out; + } + hw->subsystem_device_id = ssid >> 8 | ssid << 8; + } + + /* phy type, phy ops, mac ops */ + err = ngbe_init_shared_code(hw); + if (err) { + e_err(probe, "init_shared_code failed: %d\n", err); + goto out; + } + + adapter->mac_table = kzalloc(sizeof(*adapter->mac_table) * + hw->mac.num_rar_entries, + GFP_ATOMIC); + if (!adapter->mac_table) { + err = NGBE_ERR_OUT_OF_MEM; + e_err(probe, "mac_table allocation failed: %d\n", err); + goto out; + } + + memcpy(adapter->rss_key, def_rss_key, sizeof(def_rss_key)); + + /* Set common capability flags and settings */ + adapter->max_q_vectors = NGBE_MAX_MSIX_Q_VECTORS_EMERALD; + + /* Set MAC specific capability flags and exceptions */ + adapter->flags |= NGBE_FLAGS_SP_INIT; + adapter->flags2 |= NGBE_FLAG2_TEMP_SENSOR_CAPABLE; + adapter->flags2 |= NGBE_FLAG2_EEE_CAPABLE; + + /* default flow control settings */ + hw->fc.requested_mode = ngbe_fc_full; + hw->fc.current_mode = ngbe_fc_full; /* init for ethtool output */ + + adapter->last_lfc_mode = hw->fc.current_mode; + hw->fc.pause_time = NGBE_DEFAULT_FCPAUSE; + hw->fc.send_xon = true; + hw->fc.disable_fc_autoneg = false; + + /* set default ring sizes */ + adapter->tx_ring_count = NGBE_DEFAULT_TXD; + adapter->rx_ring_count = NGBE_DEFAULT_RXD; + + /* set default work limits */ + adapter->tx_work_limit = NGBE_DEFAULT_TX_WORK; + adapter->rx_work_limit = NGBE_DEFAULT_RX_WORK; + + adapter->tx_timeout_recovery_level = 0; + + set_bit(__NGBE_DOWN, &adapter->state); +out: + return err; +} + +/** + * ngbe_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int ngbe_setup_tx_resources(struct ngbe_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = -1; + int size; + + size = sizeof(struct ngbe_tx_buffer) * tx_ring->count; + + if (tx_ring->q_vector) + numa_node = tx_ring->q_vector->numa_node; + + tx_ring->tx_buffer_info = vzalloc_node(size, numa_node); + if (!tx_ring->tx_buffer_info) + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union ngbe_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + set_dev_node(dev, numa_node); + tx_ring->desc = dma_alloc_coherent(dev, + tx_ring->size, + &tx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!tx_ring->desc) + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + + return -ENOMEM; +} + +static inline struct netdev_queue *txring_txq(const struct ngbe_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + +/** + * ngbe_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void ngbe_clean_tx_ring(struct ngbe_ring *tx_ring) +{ + struct ngbe_tx_buffer *tx_buffer_info; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buffer_info) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + ngbe_unmap_and_free_tx_res(tx_ring, tx_buffer_info); + } + + netdev_tx_reset_queue(txring_txq(tx_ring)); + + size = sizeof(struct ngbe_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); +} + +/** + * ngbe_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void ngbe_free_tx_resources(struct ngbe_ring *tx_ring) +{ + ngbe_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; +} + +/** + * ngbe_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int ngbe_setup_all_tx_resources(struct ngbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = ngbe_setup_tx_resources(adapter->tx_ring[i]); + if (!err) + continue; + + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + ngbe_free_tx_resources(adapter->tx_ring[i]); + return err; +} + +/** + * ngbe_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void ngbe_free_all_tx_resources(struct ngbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + ngbe_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * ngbe_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int ngbe_setup_rx_resources(struct ngbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = -1; + int size; + + size = sizeof(struct ngbe_rx_buffer) * rx_ring->count; + + if (rx_ring->q_vector) + numa_node = rx_ring->q_vector->numa_node; + + rx_ring->rx_buffer_info = vzalloc_node(size, numa_node); + if (!rx_ring->rx_buffer_info) + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union ngbe_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + set_dev_node(dev, numa_node); + rx_ring->desc = dma_alloc_coherent(dev, + rx_ring->size, + &rx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!rx_ring->desc) + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + + return 0; +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * ngbe_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void ngbe_free_rx_resources(struct ngbe_ring *rx_ring) +{ + ngbe_clean_rx_ring(rx_ring); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * ngbe_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int ngbe_setup_all_rx_resources(struct ngbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = ngbe_setup_rx_resources(adapter->rx_ring[i]); + if (!err) + continue; + + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + ngbe_free_rx_resources(adapter->rx_ring[i]); + + return err; +} + +/** + * ngbe_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void ngbe_free_all_rx_resources(struct ngbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + ngbe_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * ngbe_setup_isb_resources - allocate interrupt status resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int ngbe_setup_isb_resources(struct ngbe_adapter *adapter) +{ + struct device *dev = pci_dev_to_dev(adapter->pdev); + + adapter->isb_mem = dma_alloc_coherent(dev, + sizeof(u32) * NGBE_ISB_MAX, + &adapter->isb_dma, + GFP_KERNEL); + if (!adapter->isb_mem) { + e_err(probe, "%s: alloc isb_mem failed\n", __func__); + return -ENOMEM; + } + memset(adapter->isb_mem, 0, sizeof(u32) * NGBE_ISB_MAX); + return 0; +} + +void ngbe_service_event_schedule(struct ngbe_adapter *adapter) +{ + if (!test_bit(__NGBE_DOWN, &adapter->state) && + !test_bit(__NGBE_REMOVING, &adapter->state) && + !test_and_set_bit(__NGBE_SERVICE_SCHED, &adapter->state)) + queue_work(ngbe_wq, &adapter->service_task); +} + +static void ngbe_handle_phy_event(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + u32 reg; + + reg = rd32(hw, NGBE_GPIO_INTSTATUS); + wr32(hw, NGBE_GPIO_EOI, reg); + + if (!((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_RGMII_FPGA)) + TCALL(hw, phy.ops.check_event); + + adapter->lsc_int++; + adapter->link_check_timeout = jiffies; + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_service_event_schedule(adapter); +} + +static void ngbe_check_overtemp_event(struct ngbe_adapter *adapter, u32 eicr) +{ + if (!(adapter->flags2 & NGBE_FLAG2_TEMP_SENSOR_CAPABLE)) + return; + + if (!(eicr & NGBE_PX_MISC_IC_OVER_HEAT)) + return; + if (!test_bit(__NGBE_DOWN, &adapter->state)) { + adapter->interrupt_event = eicr; + adapter->flags2 |= NGBE_FLAG2_TEMP_SENSOR_EVENT; + ngbe_service_event_schedule(adapter); + } +} + +/** + * ngbe_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +void ngbe_irq_enable(struct ngbe_adapter *adapter, bool queues, bool flush) +{ + u32 mask = 0; + struct ngbe_hw *hw = &adapter->hw; + + /* enable misc interrupt */ + mask = NGBE_PX_MISC_IEN_MASK; + + if (adapter->flags2 & NGBE_FLAG2_TEMP_SENSOR_CAPABLE) + mask |= NGBE_PX_MISC_IEN_OVER_HEAT; + + mask |= NGBE_PX_MISC_IEN_TIMESYNC; + + wr32(&adapter->hw, NGBE_GPIO_DDR, 0x1); + wr32(&adapter->hw, NGBE_GPIO_INTEN, 0x3); + wr32(&adapter->hw, NGBE_GPIO_INTTYPE_LEVEL, 0x0); + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi || + adapter->hw.phy.type == ngbe_phy_internal_yt8521s_sfi) + wr32(&adapter->hw, NGBE_GPIO_POLARITY, 0x0); + else + wr32(&adapter->hw, NGBE_GPIO_POLARITY, 0x3); + + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi || + adapter->hw.phy.type == ngbe_phy_internal_yt8521s_sfi) + mask |= NGBE_PX_MISC_IEN_GPIO; + + wr32(hw, NGBE_PX_MISC_IEN, mask); + + /* unmask interrupt */ + if (queues) + ngbe_intr_enable(&adapter->hw, NGBE_INTR_ALL); + else + ngbe_intr_enable(&adapter->hw, NGBE_INTR_MISC(adapter)); + + /* flush configuration */ + if (flush) + NGBE_WRITE_FLUSH(&adapter->hw); +} + +static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data) +{ + struct ngbe_adapter *adapter = data; + struct ngbe_hw *hw = &adapter->hw; + u32 eicr; + u32 ecc; + + eicr = ngbe_misc_isb(adapter, NGBE_ISB_MISC); + if (eicr & (NGBE_PX_MISC_IC_PHY | NGBE_PX_MISC_IC_GPIO)) + ngbe_handle_phy_event(hw); + + if (eicr & NGBE_PX_MISC_IC_INT_ERR) { + e_info(link, "Received unrecoverable ECC Err, initiating reset.\n"); + ecc = rd32(hw, NGBE_MIS_ST); + e_info(link, "ecc error status is 0x%08x\n", ecc); + if (((ecc & NGBE_MIS_ST_LAN0_ECC) && hw->bus.lan_id == 0) || + ((ecc & NGBE_MIS_ST_LAN1_ECC) && hw->bus.lan_id == 1)) + adapter->flags2 |= NGBE_FLAG2_DEV_RESET_REQUESTED; + + ngbe_service_event_schedule(adapter); + } + if (eicr & NGBE_PX_MISC_IC_DEV_RST) { + adapter->flags2 |= NGBE_FLAG2_RESET_INTR_RECEIVED; + ngbe_service_event_schedule(adapter); + } + if ((eicr & NGBE_PX_MISC_IC_STALL) || + (eicr & NGBE_PX_MISC_IC_ETH_EVENT)) { + adapter->flags2 |= NGBE_FLAG2_PF_RESET_REQUESTED; + ngbe_service_event_schedule(adapter); + } + + ngbe_check_overtemp_event(adapter, eicr); + + if (unlikely(eicr & NGBE_PX_MISC_IC_TIMESYNC)) + ngbe_ptp_check_pps_event(adapter); + + /* re-enable the original interrupt state, no lsc, no queues */ + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_irq_enable(adapter, false, false); + + return IRQ_HANDLED; +} + +static irqreturn_t ngbe_msix_clean_rings(int __always_unused irq, void *data) +{ + struct ngbe_q_vector *q_vector = data; + + /* EIAM disabled interrupts (on this vector) for us */ + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * ngbe_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void ngbe_clean_rx_ring(struct ngbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct ngbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; + + if (rx_buffer->dma) { + dma_unmap_single(dev, + rx_buffer->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_buffer->dma = 0; + } + + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; + + if (NGBE_CB(skb)->dma_released) { + dma_unmap_single(dev, + NGBE_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + NGBE_CB(skb)->dma = 0; + NGBE_CB(skb)->dma_released = false; + } + + if (NGBE_CB(skb)->page_released) + dma_unmap_page(dev, + NGBE_CB(skb)->dma, + ngbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + dev_kfree_skb(skb); + rx_buffer->skb = NULL; + } + + if (!rx_buffer->page) + continue; + + dma_unmap_page(dev, rx_buffer->page_dma, + ngbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + + __free_pages(rx_buffer->page, + ngbe_rx_pg_order(rx_ring)); + rx_buffer->page = NULL; + } + + size = sizeof(struct ngbe_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * ngbe_request_msix_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * ngbe_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int ngbe_request_msix_irqs(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int vector, err; + int ri = 0, ti = 0; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct ngbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-TxRx-%d", netdev->name, ri++); + ti++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-rx-%d", netdev->name, ri++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-tx-%d", netdev->name, ti++); + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq(entry->vector, &ngbe_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { + e_err(probe, "request_irq failed for MSIX '%s' Error: %d\n", + q_vector->name, err); + goto free_queue_irqs; + } + } + + err = request_irq(adapter->msix_entries[vector].vector, + ngbe_msix_other, 0, netdev->name, adapter); + + if (err) { + e_err(probe, "request_irq for msix_other failed: %d\n", err); + goto free_queue_irqs; + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + + irq_set_affinity_hint(adapter->msix_entries[vector].vector, NULL); + + free_irq(adapter->msix_entries[vector].vector, + adapter->q_vector[vector]); + } + adapter->flags &= ~NGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return err; +} + +/** + * ngbe_intr - legacy mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t ngbe_intr(int __always_unused irq, void *data) +{ + struct ngbe_adapter *adapter = data; + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_q_vector *q_vector = adapter->q_vector[0]; + u32 eicr; + u32 eicr_misc; + u32 ecc = 0; + return IRQ_HANDLED; + eicr = ngbe_misc_isb(adapter, NGBE_ISB_VEC0); + if (!eicr) { + /* shared interrupt alert! + * the interrupt that we masked before the EICR read. + */ + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_irq_enable(adapter, true, true); + return IRQ_NONE; /* Not our interrupt */ + } + adapter->isb_mem[NGBE_ISB_VEC0] = 0; + if (!(adapter->flags & NGBE_FLAG_MSI_ENABLED)) + wr32(&adapter->hw, NGBE_PX_INTA, 1); + + eicr_misc = ngbe_misc_isb(adapter, NGBE_ISB_MISC); + if (eicr_misc & (NGBE_PX_MISC_IC_PHY | NGBE_PX_MISC_IC_GPIO)) + ngbe_handle_phy_event(hw); + + if (eicr_misc & NGBE_PX_MISC_IC_INT_ERR) { + e_info(link, "Received unrecoverable ECC Err, initiating reset.\n"); + ecc = rd32(hw, NGBE_MIS_ST); + e_info(link, "ecc error status is 0x%08x\n", ecc); + adapter->flags2 |= NGBE_FLAG2_DEV_RESET_REQUESTED; + ngbe_service_event_schedule(adapter); + } + + if (eicr_misc & NGBE_PX_MISC_IC_DEV_RST) { + adapter->flags2 |= NGBE_FLAG2_RESET_INTR_RECEIVED; + ngbe_service_event_schedule(adapter); + } + ngbe_check_overtemp_event(adapter, eicr_misc); + + if (unlikely(eicr_misc & NGBE_PX_MISC_IC_TIMESYNC)) + ngbe_ptp_check_pps_event(adapter); + + adapter->isb_mem[NGBE_ISB_MISC] = 0; + + /* would disable interrupts here but it is auto disabled */ + napi_schedule_irqoff(&q_vector->napi); + + /* re-enable link(maybe) and non-queue interrupts, no flush. + * ngbe_poll will re-enable the queue interrupts + */ + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_irq_enable(adapter, false, false); + + return IRQ_HANDLED; +} + +/** + * ngbe_request_irq - initialize interrupts + * @adapter: board private structure + **/ +static int ngbe_request_irq(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) + err = ngbe_request_msix_irqs(adapter); + else if (adapter->flags & NGBE_FLAG_MSI_ENABLED) + err = request_irq(adapter->pdev->irq, &ngbe_intr, 0, + netdev->name, adapter); + else + err = request_irq(adapter->pdev->irq, &ngbe_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + e_err(probe, "request_irq failed, Error %d\n", err); + + return err; +} + +static void ngbe_get_hw_control(struct ngbe_adapter *adapter) +{ + /* Let firmware know the driver has taken over */ + wr32m(&adapter->hw, NGBE_CFG_PORT_CTL, + NGBE_CFG_PORT_CTL_DRV_LOAD, NGBE_CFG_PORT_CTL_DRV_LOAD); +} + +static void ngbe_setup_gpie(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 gpie = 0; + + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) + gpie = NGBE_PX_GPIE_MODEL; + + wr32(hw, NGBE_PX_GPIE, gpie); +} + +/** + * ngbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors + * @adapter: pointer to adapter struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + **/ +static void ngbe_set_ivar(struct ngbe_adapter *adapter, s8 direction, + u16 queue, u16 msix_vector) +{ + u32 ivar, index; + struct ngbe_hw *hw = &adapter->hw; + + if (direction == -1) { + /* other causes */ + msix_vector |= NGBE_PX_IVAR_ALLOC_VAL; + index = 0; + ivar = rd32(&adapter->hw, NGBE_PX_MISC_IVAR); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + /* if assigned VFs >= 7, the pf misc irq shall be remapped to 0x88. */ + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) + ivar = msix_vector; + wr32(&adapter->hw, NGBE_PX_MISC_IVAR, ivar); + } else { + /* tx or rx causes */ + msix_vector |= NGBE_PX_IVAR_ALLOC_VAL; + index = ((16 * (queue & 1)) + (8 * direction)); + ivar = rd32(hw, NGBE_PX_IVAR(queue >> 1)); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + wr32(hw, NGBE_PX_IVAR(queue >> 1), ivar); + } +} + +/** + * ngbe_write_eitr - write EITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + */ +void ngbe_write_eitr(struct ngbe_q_vector *q_vector) +{ + struct ngbe_adapter *adapter = q_vector->adapter; + struct ngbe_hw *hw = &adapter->hw; + int v_idx = q_vector->v_idx; + u32 itr_reg = q_vector->itr & NGBE_MAX_EITR; + + itr_reg |= NGBE_PX_ITR_CNT_WDIS; + + wr32(hw, NGBE_PX_ITR(v_idx), itr_reg); +} + +/** + * ngbe_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * ngbe_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +static void ngbe_configure_msix(struct ngbe_adapter *adapter) +{ + u16 v_idx; + u32 i; + u32 eitrsel = 0; + + /* Populate MSIX to EITR Select */ + if (!(adapter->flags & NGBE_FLAG_VMDQ_ENABLED)) { + wr32(&adapter->hw, NGBE_PX_ITRSEL, eitrsel); + } else { + for (i = 0; i < adapter->num_vfs; i++) + eitrsel |= 1 << i; + + wr32(&adapter->hw, NGBE_PX_ITRSEL, eitrsel); + } + + /* Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + struct ngbe_q_vector *q_vector = adapter->q_vector[v_idx]; + struct ngbe_ring *ring; + + ngbe_for_each_ring(ring, q_vector->rx) + ngbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); + + ngbe_for_each_ring(ring, q_vector->tx) + ngbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); + + ngbe_write_eitr(q_vector); + } + + ngbe_set_ivar(adapter, -1, 0, v_idx); + wr32(&adapter->hw, NGBE_PX_ITR(v_idx), 1950); +} + +/** + * ngbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts + **/ +static void ngbe_configure_msi_and_legacy(struct ngbe_adapter *adapter) +{ + struct ngbe_q_vector *q_vector = adapter->q_vector[0]; + struct ngbe_ring *ring; + + ngbe_write_eitr(q_vector); + + ngbe_for_each_ring(ring, q_vector->rx) + ngbe_set_ivar(adapter, 0, ring->reg_idx, 0); + + ngbe_for_each_ring(ring, q_vector->tx) + ngbe_set_ivar(adapter, 1, ring->reg_idx, 0); + + ngbe_set_ivar(adapter, -1, 0, 1); + + e_info(hw, "Legacy interrupt IVAR setup done\n"); +} + +/** + * ngbe_non_sfp_link_config - set up non-SFP+ link + * @hw: pointer to private hardware struct + * + * Returns 0 on success, negative on failure + **/ +static int ngbe_non_sfp_link_config(struct ngbe_hw *hw) +{ + u32 speed; + u32 ret = NGBE_ERR_LINK_SETUP; + + if (hw->mac.autoneg) + speed = hw->phy.autoneg_advertised; + else + speed = hw->phy.force_speed; + + if (!((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_OCP_CARD || + ((hw->subsystem_device_id & NGBE_NCSI_MASK) == NGBE_NCSI_SUP) || + ((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_RGMII_FPGA))) { + mdelay(50); + if (hw->phy.type == ngbe_phy_internal || + hw->phy.type == ngbe_phy_internal_yt8521s_sfi) + TCALL(hw, phy.ops.setup_once); + } + + ret = TCALL(hw, mac.ops.setup_link, speed, false); + + return ret; +} + +static void ngbe_napi_enable_all(struct ngbe_adapter *adapter) +{ + struct ngbe_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + + napi_enable(&q_vector->napi); + } +} + +static void ngbe_up_complete(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int err; + + ngbe_get_hw_control(adapter); + ngbe_setup_gpie(adapter); + + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) + ngbe_configure_msix(adapter); + else + ngbe_configure_msi_and_legacy(adapter); + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__NGBE_DOWN, &adapter->state); + ngbe_napi_enable_all(adapter); + + err = ngbe_non_sfp_link_config(hw); + if (err) + e_err(probe, "link_config FAILED %d\n", err); + + /* sellect GMII */ + wr32(hw, NGBE_MAC_TX_CFG, + (rd32(hw, NGBE_MAC_TX_CFG) & ~NGBE_MAC_TX_CFG_SPEED_MASK) | + NGBE_MAC_TX_CFG_SPEED_1G); + + /* clear any pending interrupts, may auto mask */ + rd32(hw, NGBE_PX_IC); + rd32(hw, NGBE_PX_MISC_IC); + ngbe_irq_enable(adapter, true, true); + + if (hw->gpio_ctl == 1) + /* gpio0 is used to power on/off control*/ + wr32(hw, NGBE_GPIO_DR, 0); + + /* enable transmits */ + netif_tx_start_all_queues(adapter->netdev); + + /* bring the link up in the watchdog, this could race with our first + * link up interrupt but shouldn't be a problem + */ + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + + mod_timer(&adapter->service_timer, jiffies); + + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + wr32m(hw, NGBE_CFG_PORT_CTL, + NGBE_CFG_PORT_CTL_PFRSTD, NGBE_CFG_PORT_CTL_PFRSTD); +} + +static void ngbe_free_irq(struct ngbe_adapter *adapter) +{ + int vector; + + if (!(adapter->flags & NGBE_FLAG_MSIX_ENABLED)) { + free_irq(adapter->pdev->irq, adapter); + return; + } + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct ngbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + /* free only the irqs that were actually requested */ + if (!q_vector->rx.ring && !q_vector->tx.ring) + continue; + + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); + + free_irq(entry->vector, q_vector); + } + + free_irq(adapter->msix_entries[vector++].vector, adapter); +} + +/** + * ngbe_free_isb_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static void ngbe_free_isb_resources(struct ngbe_adapter *adapter) +{ + struct device *dev = pci_dev_to_dev(adapter->pdev); + + dma_free_coherent(dev, sizeof(u32) * NGBE_ISB_MAX, + adapter->isb_mem, adapter->isb_dma); + adapter->isb_mem = NULL; +} + +static void ngbe_flush_sw_mac_table(struct ngbe_adapter *adapter) +{ + u32 i; + struct ngbe_hw *hw = &adapter->hw; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + adapter->mac_table[i].state |= NGBE_MAC_STATE_MODIFIED; + adapter->mac_table[i].state &= ~NGBE_MAC_STATE_IN_USE; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + adapter->mac_table[i].pools = 0; + } + ngbe_sync_mac_table(adapter); +} + +/* this function destroys the first RAR entry */ +static void ngbe_mac_set_default_filter(struct ngbe_adapter *adapter, + u8 *addr) +{ + struct ngbe_hw *hw = &adapter->hw; + + memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN); + adapter->mac_table[0].pools = 1ULL << 0; + adapter->mac_table[0].state = (NGBE_MAC_STATE_DEFAULT | + NGBE_MAC_STATE_IN_USE); + TCALL(hw, mac.ops.set_rar, 0, adapter->mac_table[0].addr, + adapter->mac_table[0].pools, + NGBE_PSR_MAC_SWC_AD_H_AV); +} + +void ngbe_reset(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int err; + u8 old_addr[ETH_ALEN]; + + if (NGBE_REMOVED(hw->hw_addr)) + return; + + err = TCALL(hw, mac.ops.init_hw); + switch (err) { + case 0: + break; + case NGBE_ERR_MASTER_REQUESTS_PENDING: + e_dev_err("master disable timed out\n"); + break; + case NGBE_ERR_EEPROM_VERSION: + /* We are running on a pre-production device, log a warning */ + e_dev_warn("eeprom version error.\n"); + break; + default: + e_dev_err("Hardware Error: %d\n", err); + } + + /* do not flush user set addresses */ + memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len); + ngbe_flush_sw_mac_table(adapter); + ngbe_mac_set_default_filter(adapter, old_addr); + + /* Clear saved DMA coalescing values except for watchdog_timer */ + hw->mac.dmac_config.fcoe_en = false; + hw->mac.dmac_config.link_speed = 0; + hw->mac.dmac_config.fcoe_tc = 0; + hw->mac.dmac_config.num_tcs = 0; + + if (test_bit(__NGBE_PTP_RUNNING, &adapter->state)) + ngbe_ptp_reset(adapter); +} + +/** + * ngbe_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void ngbe_clean_all_tx_rings(struct ngbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + ngbe_clean_tx_ring(adapter->tx_ring[i]); +} + +/** + * ngbe_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void ngbe_clean_all_rx_rings(struct ngbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + ngbe_clean_rx_ring(adapter->rx_ring[i]); +} + +void ngbe_down(struct ngbe_adapter *adapter) +{ + ngbe_disable_device(adapter); + + if (!pci_channel_offline(adapter->pdev)) + ngbe_reset(adapter); + + ngbe_clean_all_tx_rings(adapter); + ngbe_clean_all_rx_rings(adapter); +} + +static void ngbe_release_hw_control(struct ngbe_adapter *adapter) +{ + /* Let firmware take over control of h/w */ + wr32m(&adapter->hw, NGBE_CFG_PORT_CTL, + NGBE_CFG_PORT_CTL_DRV_LOAD, 0); +} + +/** + * ngbe_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + **/ +int ngbe_open(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + int err; + + /* disallow open during test */ + if (test_bit(__NGBE_TESTING, &adapter->state)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = ngbe_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = ngbe_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + err = ngbe_setup_isb_resources(adapter); + if (err) + goto err_req_isb; + + ngbe_configure(adapter); + + err = ngbe_request_irq(adapter); + if (err) + goto err_req_irq; + + if (adapter->num_tx_queues) { + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto err_set_queues; + } + + if (adapter->num_rx_queues) { + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + if (err) + goto err_set_queues; + } + + ngbe_ptp_init(adapter); + + ngbe_up_complete(adapter); + + return 0; + +err_set_queues: + ngbe_free_irq(adapter); +err_req_irq: + ngbe_free_isb_resources(adapter); +err_req_isb: + ngbe_free_all_rx_resources(adapter); + +err_setup_rx: + ngbe_free_all_tx_resources(adapter); +err_setup_tx: + ngbe_reset(adapter); + return err; +} + +/** + * ngbe_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + **/ +int ngbe_close(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + ngbe_ptp_stop(adapter); + + ngbe_down(adapter); + ngbe_free_irq(adapter); + + ngbe_free_isb_resources(adapter); + ngbe_free_all_rx_resources(adapter); + ngbe_free_all_tx_resources(adapter); + + ngbe_release_hw_control(adapter); + + return 0; +} + +static u64 ngbe_get_tx_completed(struct ngbe_ring *ring) +{ + return ring->stats.packets; +} + +static u64 ngbe_get_tx_pending(struct ngbe_ring *ring) +{ + struct ngbe_adapter *adapter; + struct ngbe_hw *hw; + u32 head, tail; + + if (ring->accel) + adapter = ring->accel->adapter; + else + adapter = ring->q_vector->adapter; + + hw = &adapter->hw; + head = rd32(hw, NGBE_PX_TR_RP(ring->reg_idx)); + tail = rd32(hw, NGBE_PX_TR_WP(ring->reg_idx)); + + return ((head <= tail) ? tail : tail + ring->count) - head; +} + +static inline bool ngbe_check_tx_hang(struct ngbe_ring *tx_ring) +{ + u64 tx_done = ngbe_get_tx_completed(tx_ring); + u64 tx_done_old = tx_ring->tx_stats.tx_done_old; + u64 tx_pending = ngbe_get_tx_pending(tx_ring); + + clear_check_for_tx_hang(tx_ring); + + /* Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. The + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with + * pfc clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if (tx_done_old == tx_done && tx_pending) { + /* make sure it is true for two checks in a row */ + return test_and_set_bit(__NGBE_HANG_CHECK_ARMED, + &tx_ring->state); + } + + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + /* reset the countdown */ + clear_bit(__NGBE_HANG_CHECK_ARMED, &tx_ring->state); + + return false; +} + +/** + * ngbe_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean + **/ +static bool ngbe_clean_tx_irq(struct ngbe_q_vector *q_vector, + struct ngbe_ring *tx_ring) +{ + struct ngbe_adapter *adapter = q_vector->adapter; + struct ngbe_tx_buffer *tx_buffer; + union ngbe_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + unsigned int i = tx_ring->next_to_clean; + struct ngbe_hw *hw = &adapter->hw; + u16 vid = 0; + + if (test_bit(__NGBE_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = NGBE_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union ngbe_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(NGBE_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + dev_consume_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = NGBE_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = NGBE_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (check_for_tx_hang(tx_ring)) { + if (!ngbe_check_tx_hang(tx_ring)) + adapter->hang_cnt = 0; + else + adapter->hang_cnt++; + + if (adapter->hang_cnt >= 5) { + /* schedule immediate reset if we believe we hung */ + + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH, TDT <%x>, <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "tx_buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", + tx_ring->queue_index, + rd32(hw, NGBE_PX_TR_RP(tx_ring->reg_idx)), + rd32(hw, NGBE_PX_TR_WP(tx_ring->reg_idx)), + tx_ring->next_to_use, i, + tx_ring->tx_buffer_info[i].time_stamp, jiffies); + + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &vid); + if (vid == NGBE_FAILED_READ_CFG_WORD) + e_info(hw, "pcie link has been lost.\n"); + + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + e_info(probe, + "tx hang %d detected on queue %d, resetting adapter\n", + adapter->tx_timeout_count + 1, tx_ring->queue_index); + + /* the adapter is about to reset, no point in enabling stuff */ + return true; + } + } + + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (ngbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !test_bit(__NGBE_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + + return !!budget; +} + +/** + * ngbe_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool ngbe_is_non_eop(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct ngbe_rx_buffer *rx_buffer = + &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(NGBE_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ + if (likely(ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_EOP))) + return false; + + /* place skb in next buffer to be received */ + if (ring_is_hs_enabled(rx_ring)) { + rx_buffer->skb = rx_ring->rx_buffer_info[ntc].skb; + rx_buffer->dma = rx_ring->rx_buffer_info[ntc].dma; + rx_ring->rx_buffer_info[ntc].dma = 0; + } + rx_ring->rx_buffer_info[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; + + return true; +} + +static inline void ngbe_rx_hash(struct ngbe_ring *ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + NGBE_RXD_RSSTYPE_MASK; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (NGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} + +/** + * ngbe_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containing ring specific data + * @rx_desc: current Rx descriptor being processed + * @skb: skb currently being received and modified + **/ +static inline void ngbe_rx_checksum(struct ngbe_ring *ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct ngbe_dec_ptype dptype = decode_rx_desc_ptype(rx_desc); + + skb->ip_summed = CHECKSUM_NONE; + skb_checksum_none_assert(skb); + + /* Rx csum disabled */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* if IPv4 header checksum error */ + if ((ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_IPCS) && + ngbe_test_staterr(rx_desc, NGBE_RXD_ERR_IPE)) || + (ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_OUTERIPCS) && + ngbe_test_staterr(rx_desc, NGBE_RXD_ERR_OUTERIPER))) { + ring->rx_stats.csum_err++; + return; + } + + /* L4 checksum offload flag must set for the below code to work */ + if (!ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_L4CS)) + return; + + /*likely incorrect csum if IPv6 Dest Header found */ + if (dptype.prot != NGBE_DEC_PTYPE_PROT_SCTP && NGBE_RXD_IPV6EX(rx_desc)) + return; + + /* if L4 checksum error */ + if (ngbe_test_staterr(rx_desc, NGBE_RXD_ERR_TCPE)) { + ring->rx_stats.csum_err++; + return; + } + /* If there is an outer header present that might contain a checksum + * we need to bump the checksum level by 1 to reflect the fact that + * we are indicating we validated the inner checksum. + */ + if (dptype.etype >= NGBE_DEC_PTYPE_ETYPE_IG) + skb->csum_level = 1; + + /* It must be a TCP or UDP or SCTP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + ring->rx_stats.csum_good_cnt++; +} + +static void ngbe_rx_vlan(struct ngbe_ring *ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u8 idx = 0; + u16 ethertype; + + if ((ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_VP)) { + idx = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + NGBE_RXD_TPID_MASK) >> NGBE_RXD_TPID_SHIFT; + ethertype = ring->q_vector->adapter->hw.tpid[idx]; + __vlan_hwaccel_put_tag(skb, + htons(ethertype), + le16_to_cpu(rx_desc->wb.upper.vlan)); + } +} + +/** + * ngbe_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static void ngbe_process_skb_fields(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 flags = rx_ring->q_vector->adapter->flags; + + ngbe_rx_hash(rx_ring, rx_desc, skb); + ngbe_rx_checksum(rx_ring, rx_desc, skb); + + if (unlikely(flags & NGBE_FLAG_RX_HWTSTAMP_ENABLED) && + unlikely(ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_TS))) { + ngbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb); + rx_ring->last_rx_timestamp = jiffies; + } + + ngbe_rx_vlan(rx_ring, rx_desc, skb); + skb_record_rx_queue(skb, rx_ring->queue_index); + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +/** + * ngbe_pull_tail - ngbe specific version of skb_pull_tail + * @skb: pointer to current skb being adjusted + * + * This function is an ngbe specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void ngbe_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(skb->dev, va, NGBE_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + skb_frag_off_add(frag, pull_len); + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * ngbe_dma_sync_frag - perform DMA sync for first frag of SKB + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being updated + * + * This function provides a basic DMA sync up for the first fragment of an + * skb. The reason for doing this is that the first fragment cannot be + * unmapped until we have reached the end of packet descriptor for a buffer + * chain. + */ +static void ngbe_dma_sync_frag(struct ngbe_ring *rx_ring, + struct sk_buff *skb) +{ + /* if the page was released unmap it, else just sync our portion */ + if (unlikely(NGBE_CB(skb)->page_released)) { + dma_unmap_page_attrs(rx_ring->dev, NGBE_CB(skb)->dma, + ngbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + NGBE_RX_DMA_ATTR); + } else if (ring_uses_build_skb(rx_ring)) { + unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; + + dma_sync_single_range_for_cpu(rx_ring->dev, + NGBE_CB(skb)->dma, + offset, + skb_headlen(skb), + DMA_FROM_DEVICE); + } else { + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rx_ring->dev, + NGBE_CB(skb)->dma, + skb_frag_off(frag), + skb_frag_size(frag), + DMA_FROM_DEVICE); + } +} + +/** + * ngbe_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool ngbe_cleanup_headers(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *netdev = rx_ring->netdev; + + /* verify that the packet does not have any known errors */ + if (unlikely(ngbe_test_staterr(rx_desc, + NGBE_RXD_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL))) { + dev_kfree_skb_any(skb); + return true; + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb) && !skb_headlen(skb)) + ngbe_pull_tail(skb); + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * ngbe_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void ngbe_reuse_rx_page(struct ngbe_ring *rx_ring, + struct ngbe_rx_buffer *old_buff) +{ + struct ngbe_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_buff->page_dma = old_buff->page_dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, new_buff->page_dma, + new_buff->page_offset, + ngbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); +} + +static inline bool ngbe_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +/** + * ngbe_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static bool ngbe_add_rx_frag(struct ngbe_ring *rx_ring, + struct ngbe_rx_buffer *rx_buffer, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); + unsigned int truesize = ngbe_rx_bufsz(rx_ring); + + if (size <= NGBE_RX_HDR_SIZE && !skb_is_nonlinear(skb) && + !ring_is_hs_enabled(rx_ring)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* page is not reserved, we can reuse buffer as-is */ + if (likely(!ngbe_page_is_reserved(page))) + return true; + + /* this page cannot be reused so discard it */ + __free_pages(page, ngbe_rx_pg_order(rx_ring)); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(ngbe_page_is_reserved(page))) + return false; + + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; + + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + page_ref_inc(page); + + return true; +} + +static struct sk_buff *ngbe_fetch_rx_buffer(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc) +{ + struct ngbe_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + NGBE_RX_HDR_SIZE); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return NULL; + } + + /* we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + + /* Delay unmapping of the first packet. It carries the + * header information, HW may still access the header + * after the writeback. Only unmap it when EOP is + * reached + */ + if (likely(ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_EOP))) + goto dma_sync; + + NGBE_CB(skb)->dma = rx_buffer->page_dma; + } else { + if (ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_EOP)) + ngbe_dma_sync_frag(rx_ring, skb); + +dma_sync: + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + rx_buffer->page_offset, + ngbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + rx_buffer->skb = NULL; + } + + /* pull page into skb */ + if (ngbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + ngbe_reuse_rx_page(rx_ring, rx_buffer); + } else if (NGBE_CB(skb)->dma == rx_buffer->page_dma) { + /* the page has been released from the ring */ + NGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->page_dma, + ngbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; + + return skb; +} + +static inline u16 ngbe_get_hlen(struct ngbe_ring __maybe_unused *rx_ring, + union ngbe_rx_desc *rx_desc) +{ + __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; + u16 hlen = le16_to_cpu(hdr_info) & NGBE_RXD_HDRBUFLEN_MASK; + + if (hlen > (NGBE_RX_HDR_SIZE << NGBE_RXD_HDRBUFLEN_SHIFT)) + hlen = 0; + else + hlen >>= NGBE_RXD_HDRBUFLEN_SHIFT; + + return hlen; +} + +static struct sk_buff *ngbe_fetch_rx_buffer_hs(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc) +{ + struct ngbe_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + int hdr_len = 0; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + rx_buffer->skb = NULL; + prefetchw(skb->data); + + if (!skb_is_nonlinear(skb)) { + hdr_len = ngbe_get_hlen(rx_ring, rx_desc); + if (hdr_len > 0) { + __skb_put(skb, hdr_len); + NGBE_CB(skb)->dma_released = true; + NGBE_CB(skb)->dma = rx_buffer->dma; + rx_buffer->dma = 0; + } else { + dma_unmap_single(rx_ring->dev, + rx_buffer->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_buffer->dma = 0; + if (likely(ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_EOP))) + goto dma_sync; + NGBE_CB(skb)->dma = rx_buffer->page_dma; + goto add_frag; + } + } + + if (ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_EOP)) { + if (skb_headlen(skb)) { + if (NGBE_CB(skb)->dma_released) { + dma_unmap_single(rx_ring->dev, + NGBE_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + NGBE_CB(skb)->dma = 0; + NGBE_CB(skb)->dma_released = false; + } + } else { + ngbe_dma_sync_frag(rx_ring, skb); + } + } + +dma_sync: + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + rx_buffer->page_offset, + ngbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); +add_frag: + /* pull page into skb */ + if (ngbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + ngbe_reuse_rx_page(rx_ring, rx_buffer); + } else if (NGBE_CB(skb)->dma == rx_buffer->page_dma) { + /* the page has been released from the ring */ + NGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->page_dma, + ngbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; + + return skb; +} + +static void ngbe_rx_skb(struct ngbe_q_vector *q_vector, + struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + napi_gro_receive(&q_vector->napi, skb); +} + +/** + * ngbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the system. + * + * Returns amount of work completed. + **/ +static int ngbe_clean_rx_irq(struct ngbe_q_vector *q_vector, + struct ngbe_ring *rx_ring, + int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = ngbe_desc_unused(rx_ring); + + do { + union ngbe_rx_desc *rx_desc; + struct sk_buff *skb; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= NGBE_RX_BUFFER_WRITE) { + ngbe_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = NGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); + + if (!ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_DD)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + /* retrieve a buffer from the ring */ + if (ring_is_hs_enabled(rx_ring)) + skb = ngbe_fetch_rx_buffer_hs(rx_ring, rx_desc); + else + skb = ngbe_fetch_rx_buffer(rx_ring, rx_desc); + + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; + + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (ngbe_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* verify the packet layout is correct */ + if (ngbe_cleanup_headers(rx_ring, rx_desc, skb)) + continue; + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + ngbe_process_skb_fields(rx_ring, rx_desc, skb); + + ngbe_rx_skb(q_vector, rx_ring, rx_desc, skb); + + /* update budget accounting */ + total_rx_packets++; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * ngbe_poll - NAPI polling RX/TX cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean all queues associated with a q_vector. + **/ +int ngbe_poll(struct napi_struct *napi, int budget) +{ + struct ngbe_q_vector *q_vector = container_of(napi, struct ngbe_q_vector, napi); + struct ngbe_adapter *adapter = q_vector->adapter; + struct ngbe_ring *ring; + int per_ring_budget; + bool clean_complete = true; + + ngbe_for_each_ring(ring, q_vector->tx) { + if (!ngbe_clean_tx_irq(q_vector, ring)) + clean_complete = false; + } + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling + */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget / q_vector->rx.count, 1); + else + per_ring_budget = budget; + + ngbe_for_each_ring(ring, q_vector->rx) { + int cleaned = ngbe_clean_rx_irq(q_vector, ring, + per_ring_budget); + + if (cleaned >= per_ring_budget) + clean_complete = false; + } + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* all work done, exit the polling mode */ + napi_complete(napi); + + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_intr_enable(&adapter->hw, + NGBE_INTR_Q(q_vector->v_idx)); + + return 0; +} + +#define NGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + +static inline unsigned long ngbe_tso_features(void) +{ + unsigned long features = 0; + + features |= NETIF_F_TSO; + features |= NETIF_F_TSO6; + features |= NETIF_F_GSO_PARTIAL | NGBE_GSO_PARTIAL_FEATURES; + + return features; +} + +static int __ngbe_maybe_stop_tx(struct ngbe_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(ngbe_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + + return 0; +} + +static inline int ngbe_maybe_stop_tx(struct ngbe_ring *tx_ring, u16 size) +{ + if (likely(ngbe_desc_unused(tx_ring) >= size)) + return 0; + + return __ngbe_maybe_stop_tx(tx_ring, size); +} + +void ngbe_tx_ctxtdesc(struct ngbe_ring *tx_ring, u32 vlan_macip_lens, + u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) +{ + struct ngbe_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = NGBE_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= NGBE_TXD_DTYP_CTXT; + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} + +static u8 get_ipv6_proto(struct sk_buff *skb, int offset) +{ + struct ipv6hdr *hdr = (struct ipv6hdr *)(skb->data + offset); + u8 nexthdr = hdr->nexthdr; + + offset += sizeof(struct ipv6hdr); + + while (ipv6_ext_hdr(nexthdr)) { + struct ipv6_opt_hdr _hdr, *hp; + + if (nexthdr == NEXTHDR_NONE) + break; + + hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr); + if (!hp) + break; + + if (nexthdr == NEXTHDR_FRAGMENT) + break; + else if (nexthdr == NEXTHDR_AUTH) + offset += ipv6_authlen(hp); + else + offset += ipv6_optlen(hp); + + nexthdr = hp->nexthdr; + } + + return nexthdr; +} + +static struct ngbe_dec_ptype encode_tx_desc_ptype(const struct ngbe_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + u8 tun_prot = 0; + u8 l4_prot = 0; + u8 ptype = 0; + + if (skb->encapsulation) { + union network_header hdr; + + switch (first->protocol) { + case htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) + goto encap_frag; + ptype = NGBE_PTYPE_TUN_IPV4; + break; + case htons(ETH_P_IPV6): + tun_prot = get_ipv6_proto(skb, skb_network_offset(skb)); + if (tun_prot == NEXTHDR_FRAGMENT) + goto encap_frag; + ptype = NGBE_PTYPE_TUN_IPV6; + break; + default: + goto exit; + } + + if (tun_prot == IPPROTO_IPIP) { + hdr.raw = (void *)inner_ip_hdr(skb); + ptype |= NGBE_PTYPE_PKT_IPIP; + } else if (tun_prot == IPPROTO_UDP) { + hdr.raw = (void *)inner_ip_hdr(skb); + } else { + goto exit; + } + + switch (hdr.ipv4->version) { + case IPVERSION: + l4_prot = hdr.ipv4->protocol; + if (hdr.ipv4->frag_off & htons(IP_MF | IP_OFFSET)) { + ptype |= NGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; + case 6: + l4_prot = get_ipv6_proto(skb, + skb_inner_network_offset(skb)); + ptype |= NGBE_PTYPE_PKT_IPV6; + if (l4_prot == NEXTHDR_FRAGMENT) { + ptype |= NGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; + default: + goto exit; + } + } else { +encap_frag: + switch (first->protocol) { + case htons(ETH_P_IP): + l4_prot = ip_hdr(skb)->protocol; + ptype = NGBE_PTYPE_PKT_IP; + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { + ptype |= NGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; +#ifdef NETIF_F_IPV6_CSUM + case htons(ETH_P_IPV6): + l4_prot = get_ipv6_proto(skb, skb_network_offset(skb)); + ptype = NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6; + if (l4_prot == NEXTHDR_FRAGMENT) { + ptype |= NGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; +#endif /* NETIF_F_IPV6_CSUM */ + case htons(ETH_P_1588): + ptype = NGBE_PTYPE_L2_TS; + goto exit; + case htons(ETH_P_FIP): + ptype = NGBE_PTYPE_L2_FIP; + goto exit; + case htons(NGBE_ETH_P_LLDP): + ptype = NGBE_PTYPE_L2_LLDP; + goto exit; + case htons(NGBE_ETH_P_CNM): + ptype = NGBE_PTYPE_L2_CNM; + goto exit; + case htons(ETH_P_PAE): + ptype = NGBE_PTYPE_L2_EAPOL; + goto exit; + case htons(ETH_P_ARP): + ptype = NGBE_PTYPE_L2_ARP; + goto exit; + default: + ptype = NGBE_PTYPE_L2_MAC; + goto exit; + } + } + + switch (l4_prot) { + case IPPROTO_TCP: + ptype |= NGBE_PTYPE_TYP_TCP; + break; + case IPPROTO_UDP: + ptype |= NGBE_PTYPE_TYP_UDP; + break; + case IPPROTO_SCTP: + ptype |= NGBE_PTYPE_TYP_SCTP; + break; + default: + ptype |= NGBE_PTYPE_TYP_IP; + break; + } + +exit: + return ngbe_decode_ptype(ptype); +} + +static int ngbe_tso(struct ngbe_ring *tx_ring, + struct ngbe_tx_buffer *first, + u8 *hdr_len, struct ngbe_dec_ptype dptype) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens, type_tucmd; + u32 mss_l4len_idx, l4len; + struct tcphdr *tcph; + struct iphdr *iph; + u32 tunhdr_eiplen_tunlen = 0; + u8 tun_prot = 0; + bool enc = skb->encapsulation; + + struct ipv6hdr *ipv6h; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + if (skb_header_cloned(skb)) { + int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + + if (err) + return err; + } + + iph = enc ? inner_ip_hdr(skb) : ip_hdr(skb); + if (iph->version == 4) { + tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcph->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + first->tx_flags |= NGBE_TX_FLAGS_TSO | + NGBE_TX_FLAGS_CSUM | + NGBE_TX_FLAGS_IPV4 | + NGBE_TX_FLAGS_CC; + } else if (iph->version == 6 && skb_is_gso_v6(skb)) { + ipv6h = enc ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); + ipv6h->payload_len = 0; + tcph->check = + ~csum_ipv6_magic(&ipv6h->saddr, + &ipv6h->daddr, + 0, IPPROTO_TCP, 0); + first->tx_flags |= NGBE_TX_FLAGS_TSO | + NGBE_TX_FLAGS_CSUM | + NGBE_TX_FLAGS_CC; + } + + /* compute header lengths */ + l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); + *hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data) + : skb_transport_offset(skb); + *hdr_len += l4len; + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* mss_l4len_id: use 0 as index for TSO */ + mss_l4len_idx = l4len << NGBE_TXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << NGBE_TXD_MSS_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + if (enc) { + switch (first->protocol) { + case htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + first->tx_flags |= NGBE_TX_FLAGS_OUTER_IPV4; + break; + case htons(ETH_P_IPV6): + tun_prot = ipv6_hdr(skb)->nexthdr; + break; + default: + break; + } + switch (tun_prot) { + case IPPROTO_UDP: + tunhdr_eiplen_tunlen = NGBE_TXD_TUNNEL_UDP; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + NGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_GRE: + tunhdr_eiplen_tunlen = NGBE_TXD_TUNNEL_GRE; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + NGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_IPIP: + tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) - + (char *)ip_hdr(skb)) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT; + break; + default: + break; + } + + vlan_macip_lens = skb_inner_network_header_len(skb) >> 1; + } else { + vlan_macip_lens = skb_network_header_len(skb) >> 1; + } + + vlan_macip_lens |= skb_network_offset(skb) << NGBE_TXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & NGBE_TX_FLAGS_VLAN_MASK; + + type_tucmd = dptype.ptype << 24; + ngbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, + type_tucmd, mss_l4len_idx); + + return 1; +} + +static void ngbe_tx_csum(struct ngbe_ring *tx_ring, + struct ngbe_tx_buffer *first, struct ngbe_dec_ptype dptype) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 mss_l4len_idx = 0; + u32 tunhdr_eiplen_tunlen = 0; + u8 tun_prot = 0; + u32 type_tucmd; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { + if (!(first->tx_flags & NGBE_TX_FLAGS_HW_VLAN) && + !(first->tx_flags & NGBE_TX_FLAGS_CC)) + return; + vlan_macip_lens = skb_network_offset(skb) << + NGBE_TXD_MACLEN_SHIFT; + } else { + u8 l4_prot = 0; + union { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + u8 *raw; + } network_hdr; + union { + struct tcphdr *tcphdr; + u8 *raw; + } transport_hdr; + + if (skb->encapsulation) { + network_hdr.raw = skb_inner_network_header(skb); + transport_hdr.raw = skb_inner_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + NGBE_TXD_MACLEN_SHIFT; + switch (first->protocol) { + case htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + tun_prot = ipv6_hdr(skb)->nexthdr; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but version=%d\n", + network_hdr.ipv4->version); + } + return; + } + switch (tun_prot) { + case IPPROTO_UDP: + tunhdr_eiplen_tunlen = NGBE_TXD_TUNNEL_UDP; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + NGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_GRE: + tunhdr_eiplen_tunlen = NGBE_TXD_TUNNEL_GRE; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + NGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_IPIP: + tunhdr_eiplen_tunlen = + (((char *)inner_ip_hdr(skb) - + (char *)ip_hdr(skb)) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT; + break; + default: + break; + } + } else { + network_hdr.raw = skb_network_header(skb); + transport_hdr.raw = skb_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + NGBE_TXD_MACLEN_SHIFT; + } + + switch (network_hdr.ipv4->version) { + case IPVERSION: + vlan_macip_lens |= + (transport_hdr.raw - network_hdr.raw) >> 1; + l4_prot = network_hdr.ipv4->protocol; + break; + case 6: + vlan_macip_lens |= + (transport_hdr.raw - network_hdr.raw) >> 1; + l4_prot = network_hdr.ipv6->nexthdr; + break; + default: + break; + } + + switch (l4_prot) { + case IPPROTO_TCP: + mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << + NGBE_TXD_L4LEN_SHIFT; + break; + case IPPROTO_SCTP: + mss_l4len_idx = sizeof(struct sctphdr) << + NGBE_TXD_L4LEN_SHIFT; + break; + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + NGBE_TXD_L4LEN_SHIFT; + break; + default: + break; + } + + /* update TX checksum flag */ + first->tx_flags |= NGBE_TX_FLAGS_CSUM; + } + first->tx_flags |= NGBE_TX_FLAGS_CC; + /* vlan_macip_lens: MACLEN, VLAN tag */ + vlan_macip_lens |= first->tx_flags & NGBE_TX_FLAGS_VLAN_MASK; + + type_tucmd = dptype.ptype << 24; + ngbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, + type_tucmd, mss_l4len_idx); +} + +static u32 ngbe_tx_cmd_type(u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = NGBE_TXD_DTYP_DATA | NGBE_TXD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= NGBE_SET_FLAG(tx_flags, NGBE_TX_FLAGS_HW_VLAN, NGBE_TXD_VLE); + + /* set segmentation enable bits for TSO/FSO */ + cmd_type |= NGBE_SET_FLAG(tx_flags, NGBE_TX_FLAGS_TSO, NGBE_TXD_TSE); + + /* set timestamp bit if present */ + cmd_type |= NGBE_SET_FLAG(tx_flags, NGBE_TX_FLAGS_TSTAMP, NGBE_TXD_MAC_TSTAMP); + + cmd_type |= NGBE_SET_FLAG(tx_flags, NGBE_TX_FLAGS_LINKSEC, NGBE_TXD_LINKSEC); + + return cmd_type; +} + +static void ngbe_tx_olinfo_status(union ngbe_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << NGBE_TXD_PAYLEN_SHIFT; + + /* enable L4 checksum for TSO and TX checksum offload */ + olinfo_status |= NGBE_SET_FLAG(tx_flags, + NGBE_TX_FLAGS_CSUM, + NGBE_TXD_L4CS); + + /* enable IPv4 checksum for TSO */ + olinfo_status |= NGBE_SET_FLAG(tx_flags, + NGBE_TX_FLAGS_IPV4, + NGBE_TXD_IIPCS); + /* enable outer IPv4 checksum for TSO */ + olinfo_status |= NGBE_SET_FLAG(tx_flags, + NGBE_TX_FLAGS_OUTER_IPV4, + NGBE_TXD_EIPCS); + /* Check Context must be set if Tx switch is enabled, which it + * always is for case where virtual functions are running + */ + olinfo_status |= NGBE_SET_FLAG(tx_flags, + NGBE_TX_FLAGS_CC, + NGBE_TXD_CC); + + olinfo_status |= NGBE_SET_FLAG(tx_flags, + NGBE_TX_FLAGS_IPSEC, + NGBE_TXD_IPSEC); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int ngbe_tx_map(struct ngbe_ring *tx_ring, + struct ngbe_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct ngbe_tx_buffer *tx_buffer; + union ngbe_tx_desc *tx_desc; + skb_frag_t *frag; + dma_addr_t dma; + unsigned int data_len, size; + u32 tx_flags = first->tx_flags; + u32 cmd_type = ngbe_tx_cmd_type(tx_flags); + u16 i = tx_ring->next_to_use; + + tx_desc = NGBE_TX_DESC(tx_ring, i); + + ngbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > NGBE_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ NGBE_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = NGBE_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += NGBE_MAX_DATA_PER_TXD; + size -= NGBE_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = NGBE_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | NGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + ngbe_maybe_stop_tx(tx_ring, DESC_NEEDED); + + skb_tx_timestamp(skb); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) + writel(i, tx_ring->tail); + + return 0; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + if (tx_buffer == first) + break; + if (i == 0) + i += tx_ring->count; + i--; + } + + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + tx_ring->next_to_use = i; + + return -1; +} + +netdev_tx_t ngbe_xmit_frame_ring(struct sk_buff *skb, + struct ngbe_adapter __maybe_unused *adapter, + struct ngbe_ring *tx_ring) +{ + struct ngbe_tx_buffer *first; + int tso; + u32 tx_flags = 0; + unsigned short f; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = skb->protocol; + u8 hdr_len = 0; + struct ngbe_dec_ptype dptype; + + /* need: 1 descriptor per page * PAGE_SIZE/NGBE_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/NGBE_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> + frags[f])); + + if (ngbe_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + /* if we have a HW VLAN tag being added default to the HW one */ + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb) << NGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= NGBE_TX_FLAGS_HW_VLAN; + } + + /* it is a SW VLAN check the next protocol and store the tag */ + if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { + struct vlan_hdr *vhdr, _vhdr; + + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + goto out_drop; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= NGBE_TX_FLAGS_SW_VLAN; + } + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + adapter->ptp_clock) { + if (!test_and_set_bit_lock(__NGBE_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + tx_flags |= NGBE_TX_FLAGS_TSTAMP; + + /* schedule check for Tx timestamp */ + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + schedule_work(&adapter->ptp_tx_work); + } else { + adapter->tx_hwtstamp_skipped++; + } + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + dptype = encode_tx_desc_ptype(first); + + tso = ngbe_tso(tx_ring, first, &hdr_len, dptype); + if (tso < 0) + goto out_drop; + else if (!tso) + ngbe_tx_csum(tx_ring, first, dptype); + + ngbe_tx_map(tx_ring, first, hdr_len); + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + e_dev_err("%s drop\n", __func__); + + return NETDEV_TX_OK; +} + +static netdev_tx_t ngbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_ring *tx_ring; + unsigned int r_idx = skb->queue_mapping; + + if (!netif_carrier_ok(netdev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* The minimum packet size for olinfo paylen is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + tx_ring = adapter->tx_ring[r_idx]; + + return ngbe_xmit_frame_ring(skb, adapter, tx_ring); +} + +/** + * ngbe_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int ngbe_set_mac(struct net_device *netdev, void *p) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + ngbe_del_mac_filter(adapter, hw->mac.addr, 0); + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + ngbe_mac_set_default_filter(adapter, hw->mac.addr); + e_info(drv, "The mac has been set to %02X:%02X:%02X:%02X:%02X:%02X\n", + hw->mac.addr[0], hw->mac.addr[1], hw->mac.addr[2], + hw->mac.addr[3], hw->mac.addr[4], hw->mac.addr[5]); + + return 0; +} + +/** + * ngbe_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int ngbe_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + if (new_mtu < 68 || new_mtu > 9414) + return -EINVAL; + + /* we cannot allow legacy VFs to enable their receive + * paths when MTU greater than 1500 is configured. So display a + * warning that legacy VFs will be disabled. + */ + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED && + new_mtu > ETH_DATA_LEN) + e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); + + e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + ngbe_reinit_locked(adapter); + + return 0; +} + +/** + * ngbe_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void ngbe_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + bool real_tx_hang = false; + int i; + u16 value = 0; + u32 value2 = 0; + u32 head, tail; + +#define TX_TIMEO_LIMIT 16000 + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ngbe_ring *tx_ring = adapter->tx_ring[i]; + + if (check_for_tx_hang(tx_ring) && ngbe_check_tx_hang(tx_ring)) { + real_tx_hang = true; + e_info(drv, "&&%s:i=%d&&", __func__, i); + } + } + + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &value); + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, "pci vendor id is 0x%x\n", value); + + pci_read_config_word(adapter->pdev, PCI_COMMAND, &value); + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, "pci command reg is 0x%x.\n", value); + + value2 = rd32(&adapter->hw, 0x10000); + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, "reg 0x10000 value is 0x%08x\n", value2); + value2 = rd32(&adapter->hw, 0x180d0); + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, "reg 0x180d0 value is 0x%08x\n", value2); + value2 = rd32(&adapter->hw, 0x180d4); + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, "reg 0x180d4 value is 0x%08x\n", value2); + value2 = rd32(&adapter->hw, 0x180d8); + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, "reg 0x180d8 value is 0x%08x\n", value2); + value2 = rd32(&adapter->hw, 0x180dc); + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, "reg 0x180dc value is 0x%08x\n", value2); + + for (i = 0; i < adapter->num_tx_queues; i++) { + head = rd32(&adapter->hw, NGBE_PX_TR_RP(adapter->tx_ring[i]->reg_idx)); + tail = rd32(&adapter->hw, NGBE_PX_TR_WP(adapter->tx_ring[i]->reg_idx)); + + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, + "tx ring %d next_to_use is %d, next_to_clean is %d\n", + i, adapter->tx_ring[i]->next_to_use, adapter->tx_ring[i]->next_to_clean); + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, + "tx ring %d hw rp is 0x%x, wp is 0x%x\n", i, head, tail); + } + + value2 = rd32(&adapter->hw, NGBE_PX_IMS); + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, + "PX_IMS value is 0x%08x\n", value2); + + if (value2) { + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, "clear interrupt mask.\n"); + wr32(&adapter->hw, NGBE_PX_ICS, value2); + wr32(&adapter->hw, NGBE_PX_IMC, value2); + } + + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, "tx timeout. do pcie recovery.\n"); + if (adapter->hw.bus.lan_id == 0) { + adapter->flags2 |= NGBE_FLAG2_PCIE_NEED_RECOVER; + ngbe_service_event_schedule(adapter); + } else { + wr32(&adapter->hw, NGBE_MIS_PF_SM, 1); + } +} + +static int ngbe_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + int pool_ndx = 0; + + /* User is not allowed to remove vlan ID 0 */ + if (!vid) + return 0; + + /* remove VID from filter table */ + if (hw->mac.ops.set_vfta) + TCALL(hw, mac.ops.set_vfta, vid, pool_ndx, false); + + clear_bit(vid, adapter->active_vlans); + return 0; +} + +static int ngbe_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&ifr->ifr_data; + int prtad, devad, ret = 0; + + prtad = (mii->phy_id & MDIO_PHY_ID_PRTAD) >> 5; + devad = (mii->phy_id & MDIO_PHY_ID_DEVAD); + + return ret; +} + +static int ngbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + switch (cmd) { + case SIOCGHWTSTAMP: + return ngbe_ptp_get_ts_config(adapter, ifr); + case SIOCSHWTSTAMP: + return ngbe_ptp_set_ts_config(adapter, ifr); + + case SIOCGMIIREG: + case SIOCSMIIREG: + return ngbe_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +static void ngbe_remove_adapter(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + + if (!hw->hw_addr) + return; + hw->hw_addr = NULL; + e_dev_err("Adapter removed\n"); + if (test_bit(__NGBE_SERVICE_INITED, &adapter->state)) + ngbe_service_event_schedule(adapter); +} + +static u32 ngbe_validate_register_read(struct ngbe_hw *hw, u32 reg, bool quiet) +{ + int i; + u32 value; + u8 __iomem *reg_addr; + struct ngbe_adapter *adapter = hw->back; + + reg_addr = READ_ONCE(hw->hw_addr); + if (NGBE_REMOVED(reg_addr)) + return NGBE_FAILED_READ_REG; + for (i = 0; i < NGBE_DEAD_READ_RETRIES; ++i) { + value = ngbe_rd32(reg_addr + reg); + if (value != NGBE_DEAD_READ_REG) + break; + } + if (quiet) + return value; + if (value == NGBE_DEAD_READ_REG) + e_err(drv, "%s: register %x read unchanged\n", __func__, reg); + else + e_warn(hw, "%s: register %x read recovered after %d retries\n", + __func__, reg, i + 1); + + return value; +} + +static void ngbe_check_remove(struct ngbe_hw *hw, u32 reg) +{ + u32 value; + + /* The following check not only optimizes a bit by not + * performing a read on the status register when the + * register just read was a status register read that + * returned NGBE_FAILED_READ_REG. It also blocks any + * potential recursion. + */ + if (reg == NGBE_CFG_PORT_ST) { + ngbe_remove_adapter(hw); + return; + } + value = rd32(hw, NGBE_CFG_PORT_ST); + if (value == NGBE_FAILED_READ_REG) + ngbe_remove_adapter(hw); +} + +u32 ngbe_read_reg(struct ngbe_hw *hw, u32 reg, bool quiet) +{ + u32 value; + u8 __iomem *reg_addr; + + reg_addr = READ_ONCE(hw->hw_addr); + if (NGBE_REMOVED(reg_addr)) + return NGBE_FAILED_READ_REG; + value = ngbe_rd32(reg_addr + reg); + if (unlikely(value == NGBE_FAILED_READ_REG)) + ngbe_check_remove(hw, reg); + if (unlikely(value == NGBE_DEAD_READ_REG)) + value = ngbe_validate_register_read(hw, reg, quiet); + return value; +} + +/** + * ngbe_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: storage space for 64bit statistics + * + * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This + * function replaces ngbe_get_stats for kernels which support it. + */ +static void ngbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ngbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, + start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ngbe_ring *ring = READ_ONCE(adapter->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, + start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + rcu_read_unlock(); + /* following stats updated by ngbe_watchdog_task() */ + stats->multicast = netdev->stats.multicast; + stats->rx_errors = netdev->stats.rx_errors; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; +} + +static int ngbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 vid, + u16 flags, + struct netlink_ext_ack *extack) +{ + /* guarantee we can provide a unique filter for the unicast address */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + if (netdev_uc_count(dev) >= NGBE_MAX_PF_MACVLANS) + return -ENOMEM; + } + + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); +} + +static netdev_features_t ngbe_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + u32 vlan_num = 0; + u16 vlan_depth = skb->mac_len; + __be16 type = skb->protocol; + struct vlan_hdr *vh; + + if (skb_vlan_tag_present(skb)) + vlan_num++; + + if (vlan_depth) + vlan_depth -= VLAN_HLEN; + else + vlan_depth = ETH_HLEN; + + while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { + vlan_num++; + vh = (struct vlan_hdr *)(skb->data + vlan_depth); + type = vh->h_vlan_encapsulated_proto; + vlan_depth += VLAN_HLEN; + } + + if (vlan_num > 2) + features &= ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); + + if (skb->encapsulation) { + if (unlikely(skb_inner_mac_header(skb) - + skb_transport_header(skb) > + NGBE_MAX_TUNNEL_HDR_LEN)) + return features & ~NETIF_F_CSUM_MASK; + } + + return features; +} + +void ngbe_do_reset(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + ngbe_reinit_locked(adapter); + else + ngbe_reset(adapter); +} + +static int ngbe_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + bool need_reset = false; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + ngbe_vlan_strip_enable(adapter); + else + ngbe_vlan_strip_disable(adapter); + + if (features & NETIF_F_RXHASH) { + if (!(adapter->flags2 & NGBE_FLAG2_RSS_ENABLED)) { + wr32m(&adapter->hw, NGBE_RDB_RA_CTL, + NGBE_RDB_RA_CTL_RSS_EN, NGBE_RDB_RA_CTL_RSS_EN); + adapter->flags2 |= NGBE_FLAG2_RSS_ENABLED; + } + } else { + if (adapter->flags2 & NGBE_FLAG2_RSS_ENABLED) { + wr32m(&adapter->hw, NGBE_RDB_RA_CTL, + NGBE_RDB_RA_CTL_RSS_EN, ~NGBE_RDB_RA_CTL_RSS_EN); + adapter->flags2 &= ~NGBE_FLAG2_RSS_ENABLED; + } + } + + if (need_reset) + ngbe_do_reset(netdev); + + return 0; +} + +static netdev_features_t ngbe_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + /* Turn off LRO if not RSC capable */ + features &= ~NETIF_F_LRO; + + if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) + features &= ~NETIF_F_HW_VLAN_STAG_RX; + else + features |= NETIF_F_HW_VLAN_STAG_RX; + if (!(features & NETIF_F_HW_VLAN_CTAG_TX)) + features &= ~NETIF_F_HW_VLAN_STAG_TX; + else + features |= NETIF_F_HW_VLAN_STAG_TX; + + return features; +} + +static const struct net_device_ops ngbe_netdev_ops = { + .ndo_open = ngbe_open, + .ndo_stop = ngbe_close, + .ndo_start_xmit = ngbe_xmit_frame, + .ndo_set_rx_mode = ngbe_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ngbe_set_mac, + .ndo_change_mtu = ngbe_change_mtu, + .ndo_tx_timeout = ngbe_tx_timeout, + .ndo_vlan_rx_add_vid = ngbe_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ngbe_vlan_rx_kill_vid, + .ndo_do_ioctl = ngbe_ioctl, + .ndo_get_stats64 = ngbe_get_stats64, + .ndo_fdb_add = ngbe_ndo_fdb_add, + .ndo_features_check = ngbe_features_check, + .ndo_set_features = ngbe_set_features, + .ndo_fix_features = ngbe_fix_features, +}; + +void ngbe_assign_netdev_ops(struct net_device *dev) +{ + dev->netdev_ops = &ngbe_netdev_ops; + ngbe_set_ethtool_ops(dev); + dev->watchdog_timeo = 5 * HZ; +} + +/* disable the specified rx ring/queue */ +void ngbe_disable_rx_queue(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + int wait_loop = NGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (NGBE_REMOVED(hw->hw_addr)) + return; + + /* write value back with RXDCTL.ENABLE bit cleared */ + wr32m(hw, NGBE_PX_RR_CFG(reg_idx), + NGBE_PX_RR_CFG_RR_EN, 0); + + /* hardware may take up to 100us to actually disable rx queue */ + do { + usleep_range(10, 20); + rxdctl = rd32(hw, NGBE_PX_RR_CFG(reg_idx)); + } while (--wait_loop && (rxdctl & NGBE_PX_RR_CFG_RR_EN)); + + if (!wait_loop) + e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within the polling period\n", + reg_idx); +} + +/** + * ngbe_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +void ngbe_irq_disable(struct ngbe_adapter *adapter) +{ + wr32(&adapter->hw, NGBE_PX_MISC_IEN, 0); + ngbe_intr_disable(&adapter->hw, NGBE_INTR_ALL); + + NGBE_WRITE_FLUSH(&adapter->hw); + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) { + int vector; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) + synchronize_irq(adapter->msix_entries[vector].vector); + + synchronize_irq(adapter->msix_entries[vector++].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +static void ngbe_napi_disable_all(struct ngbe_adapter *adapter) +{ + struct ngbe_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + napi_disable(&q_vector->napi); + } +} + +static void ngbe_sync_mac_table(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & NGBE_MAC_STATE_MODIFIED) { + if (adapter->mac_table[i].state & + NGBE_MAC_STATE_IN_USE) { + TCALL(hw, mac.ops.set_rar, i, + adapter->mac_table[i].addr, + adapter->mac_table[i].pools, + NGBE_PSR_MAC_SWC_AD_H_AV); + } else { + TCALL(hw, mac.ops.clear_rar, i); + } + adapter->mac_table[i].state &= + ~(NGBE_MAC_STATE_MODIFIED); + } + } +} + +void ngbe_disable_device(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ngbe_hw *hw = &adapter->hw; + u32 i; + + /* signal that we are down to the interrupt handler */ + if (test_and_set_bit(__NGBE_DOWN, &adapter->state)) + return; /* do nothing if already down */ + + ngbe_disable_pcie_master(hw); + /* disable receives */ + TCALL(hw, mac.ops.disable_rx); + + /* disable all enabled rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) + /* this call also flushes the previous write */ + ngbe_disable_rx_queue(adapter, adapter->rx_ring[i]); + + netif_tx_stop_all_queues(netdev); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + if (hw->gpio_ctl == 1) + /* gpio0 is used to power on/off control*/ + wr32(hw, NGBE_GPIO_DR, NGBE_GPIO_DR_0); + + ngbe_irq_disable(adapter); + + ngbe_napi_disable_all(adapter); + + adapter->flags2 &= ~(NGBE_FLAG2_PF_RESET_REQUESTED | + NGBE_FLAG2_DEV_RESET_REQUESTED | + NGBE_FLAG2_GLOBAL_RESET_REQUESTED); + adapter->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE; + + del_timer_sync(&adapter->service_timer); + + /*OCP NCSI need it*/ + if (!(((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_OCP_CARD) || + ((hw->subsystem_device_id & NGBE_WOL_MASK) == NGBE_WOL_SUP) || + ((hw->subsystem_device_id & NGBE_NCSI_MASK) == NGBE_NCSI_SUP))) + wr32m(hw, NGBE_MAC_TX_CFG, NGBE_MAC_TX_CFG_TE, 0); + + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < adapter->num_tx_queues; i++) { + u8 reg_idx = adapter->tx_ring[i]->reg_idx; + + wr32(hw, NGBE_PX_TR_CFG(reg_idx), NGBE_PX_TR_CFG_SWFLSH); + } + + /* Disable the Tx DMA engine */ + wr32m(hw, NGBE_TDM_CTL, NGBE_TDM_CTL_TE, 0); +} + +void ngbe_unmap_and_free_tx_res(struct ngbe_ring *ring, struct ngbe_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); +} + +static void ngbe_service_event_complete(struct ngbe_adapter *adapter) +{ + WARN_ON(!test_bit(__NGBE_SERVICE_SCHED, &adapter->state)); + + /* flush memory to make sure state is correct before next watchdog */ + smp_mb__before_atomic(); + clear_bit(__NGBE_SERVICE_SCHED, &adapter->state); +} + +/** + * ngbe_hpbthresh - calculate high water mark for flow control + * @adapter: board private structure to calculate for + * @pb - packet buffer to calculate + **/ +static int ngbe_hpbthresh(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct net_device *dev = adapter->netdev; + int link, tc, kb, marker; + u32 dv_id, rx_pba; + + /* Calculate max LAN frame size */ + link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NGBE_ETH_FRAMING; + tc = link; + + /* Calculate delay value for device */ + dv_id = NGBE_DV(link, tc); + + /* Loopback switch introduces additional latency */ + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) + dv_id += NGBE_B2BT(tc); + + /* Delay value is calculated in bit times convert to KB */ + kb = NGBE_BT2KB(dv_id); + rx_pba = rd32(hw, NGBE_RDB_PB_SZ) + >> NGBE_RDB_PB_SZ_SHIFT; + + marker = rx_pba - kb; + + /* It is possible that the packet buffer is not large enough + * to provide required headroom. In this case throw an error + * to user and a do the best we can. + */ + if (marker < 0) { + e_warn(drv, "Packet Buffer can not provide enough headroom to support flow control\n"); + marker = tc + 1; + } + + return marker; +} + +/** + * ngbe_lpbthresh - calculate low water mark for flow control + * @adapter: board private structure to calculate for + * @pb - packet buffer to calculate + **/ +static int ngbe_lpbthresh(struct ngbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int tc; + u32 dv_id; + + /* Calculate max LAN frame size */ + tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + + /* Calculate delay value for device */ + dv_id = NGBE_LOW_DV(tc); + + /* Delay value is calculated in bit times convert to KB */ + return NGBE_BT2KB(dv_id); +} + +/** + * ngbe_pbthresh_setup - calculate and setup high low water marks + **/ +static void ngbe_pbthresh_setup(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int num_tc = 0;//netdev_get_num_tc(adapter->netdev); + + if (!num_tc) + num_tc = 1; + + hw->fc.high_water = ngbe_hpbthresh(adapter); + hw->fc.low_water = ngbe_lpbthresh(adapter); + + /* Low water marks must not be larger than high water marks */ + if (hw->fc.low_water > hw->fc.high_water) + hw->fc.low_water = 0; +} + +static void ngbe_configure_pb(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int hdrm = 0; + int tc = 0;//netdev_get_num_tc(adapter->netdev); + + TCALL(hw, mac.ops.setup_rxpba, tc, hdrm, PBA_STRATEGY_EQUAL); + ngbe_pbthresh_setup(adapter); +} + +void ngbe_configure_port(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 value, i; + + value = NGBE_CFG_PORT_CTL_NUM_VT_NONE; + + /* enable double vlan and qinq, NONE VT at default */ + value |= NGBE_CFG_PORT_CTL_D_VLAN | + NGBE_CFG_PORT_CTL_QINQ; + wr32m(hw, NGBE_CFG_PORT_CTL, + NGBE_CFG_PORT_CTL_D_VLAN | + NGBE_CFG_PORT_CTL_QINQ | + NGBE_CFG_PORT_CTL_NUM_VT_MASK, + value); + + wr32(hw, NGBE_CFG_TAG_TPID(0), + ETH_P_8021Q | ETH_P_8021AD << 16); + adapter->hw.tpid[0] = ETH_P_8021Q; + adapter->hw.tpid[1] = ETH_P_8021AD; + for (i = 1; i < 4; i++) + wr32(hw, NGBE_CFG_TAG_TPID(i), + ETH_P_8021Q | ETH_P_8021Q << 16); + for (i = 2; i < 8; i++) + adapter->hw.tpid[i] = ETH_P_8021Q; +} + +int ngbe_del_mac_filter(struct ngbe_adapter *adapter, u8 *addr, u16 pool) +{ + /* search table for addr, if found, set to 0 and sync */ + u32 i; + struct ngbe_hw *hw = &adapter->hw; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (ether_addr_equal(addr, adapter->mac_table[i].addr) && + adapter->mac_table[i].pools | (1ULL << pool)) { + adapter->mac_table[i].state |= NGBE_MAC_STATE_MODIFIED; + adapter->mac_table[i].state &= ~NGBE_MAC_STATE_IN_USE; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + adapter->mac_table[i].pools = 0; + ngbe_sync_mac_table(adapter); + return 0; + } + } + return -ENOMEM; +} + +int ngbe_add_mac_filter(struct ngbe_adapter *adapter, u8 *addr, u16 pool) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & NGBE_MAC_STATE_IN_USE) + continue; + + adapter->mac_table[i].state |= (NGBE_MAC_STATE_MODIFIED | + NGBE_MAC_STATE_IN_USE); + memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN); + adapter->mac_table[i].pools = (1ULL << pool); + ngbe_sync_mac_table(adapter); + return i; + } + return -ENOMEM; +} + +int ngbe_available_rars(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 i, count = 0; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state == 0) + count++; + } + return count; +} + +static u8 *ngbe_addr_list_itr(struct ngbe_hw __maybe_unused *hw, + u8 **mc_addr_ptr, u32 *vmdq) +{ + struct netdev_hw_addr *mc_ptr; + u8 *addr = *mc_addr_ptr; + + *vmdq = 0; + + mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]); + if (mc_ptr->list.next) { + struct netdev_hw_addr *ha; + + ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list); + *mc_addr_ptr = ha->addr; + } else { + *mc_addr_ptr = NULL; + } + + return addr; +} + +/** + * ngbe_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * Returns: -ENOMEM on failure/insufficient address space + **/ +int ngbe_write_uc_addr_list(struct net_device *netdev, int pool) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > ngbe_available_rars(adapter)) + return -ENOMEM; + + if (!netdev_uc_empty(netdev)) { + struct netdev_hw_addr *ha; + + netdev_for_each_uc_addr(ha, netdev) { + ngbe_del_mac_filter(adapter, ha->addr, pool); + ngbe_add_mac_filter(adapter, ha->addr, pool); + count++; + } + } + return count; +} + +/** + * ngbe_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * Returns: -ENOMEM on failure + **/ +int ngbe_write_mc_addr_list(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + + u8 *addr_list = NULL; + int addr_count = 0; + + if (!hw->mac.ops.update_mc_addr_list) + return -ENOMEM; + + if (!netif_running(netdev)) + return 0; + + if (netdev_mc_empty(netdev)) { + TCALL(hw, mac.ops.update_mc_addr_list, NULL, 0, + ngbe_addr_list_itr, true); + } else { + ha = list_first_entry(&netdev->mc.list, + struct netdev_hw_addr, list); + addr_list = ha->addr; + addr_count = netdev_mc_count(netdev); + + TCALL(hw, mac.ops.update_mc_addr_list, addr_list, addr_count, + ngbe_addr_list_itr, true); + } + + return addr_count; +} + +/** + * ngbe_vlan_strip_enable - helper to enable vlan tag stripping + * @adapter: driver data + */ +void ngbe_vlan_strip_enable(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i, j; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ngbe_ring *ring = adapter->rx_ring[i]; + + if (ring->accel) + continue; + j = ring->reg_idx; + wr32m(hw, NGBE_PX_RR_CFG(j), + NGBE_PX_RR_CFG_VLAN, NGBE_PX_RR_CFG_VLAN); + } +} + +/** + * ngbe_vlan_strip_disable - helper to disable vlan tag stripping + * @adapter: driver data + */ +void ngbe_vlan_strip_disable(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i, j; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ngbe_ring *ring = adapter->rx_ring[i]; + + if (ring->accel) + continue; + j = ring->reg_idx; + wr32m(hw, NGBE_PX_RR_CFG(j), NGBE_PX_RR_CFG_VLAN, 0); + } +} + +/** + * ngbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + **/ +void ngbe_set_rx_mode(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 fctrl, vmolr, vlnctrl; + int count; + + /* Check for Promiscuous and All Multicast modes */ + fctrl = rd32m(hw, NGBE_PSR_CTL, + ~(NGBE_PSR_CTL_UPE | NGBE_PSR_CTL_MPE)); + vmolr = rd32m(hw, NGBE_PSR_VM_L2CTL(0), + ~(NGBE_PSR_VM_L2CTL_UPE | + NGBE_PSR_VM_L2CTL_MPE | + NGBE_PSR_VM_L2CTL_ROPE | + NGBE_PSR_VM_L2CTL_ROMPE)); + vlnctrl = rd32m(hw, NGBE_PSR_VLAN_CTL, + ~(NGBE_PSR_VLAN_CTL_VFE | + NGBE_PSR_VLAN_CTL_CFIEN)); + + /* set all bits that we expect to always be set */ + fctrl |= NGBE_PSR_CTL_BAM | NGBE_PSR_CTL_MFE; + vmolr |= NGBE_PSR_VM_L2CTL_BAM | + NGBE_PSR_VM_L2CTL_AUPE | + NGBE_PSR_VM_L2CTL_VACC; + + vlnctrl |= NGBE_PSR_VLAN_CTL_VFE; + + hw->addr_ctrl.user_set_promisc = false; + if (netdev->flags & IFF_PROMISC) { + hw->addr_ctrl.user_set_promisc = true; + fctrl |= (NGBE_PSR_CTL_UPE | NGBE_PSR_CTL_MPE); + /* pf don't want packets routing to vf, so clear UPE */ + vmolr |= NGBE_PSR_VM_L2CTL_MPE; + vlnctrl &= ~NGBE_PSR_VLAN_CTL_VFE; + } + + if (netdev->flags & IFF_ALLMULTI) { + fctrl |= NGBE_PSR_CTL_MPE; + vmolr |= NGBE_PSR_VM_L2CTL_MPE; + } + + /* This is useful for sniffing bad packets. */ + if (netdev->features & NETIF_F_RXALL) { + vmolr |= (NGBE_PSR_VM_L2CTL_UPE | NGBE_PSR_VM_L2CTL_MPE); + vlnctrl &= ~NGBE_PSR_VLAN_CTL_VFE; + /* receive bad packets */ + wr32m(hw, NGBE_RSEC_CTL, + NGBE_RSEC_CTL_SAVE_MAC_ERR, + NGBE_RSEC_CTL_SAVE_MAC_ERR); + } else { + vmolr |= NGBE_PSR_VM_L2CTL_ROPE | NGBE_PSR_VM_L2CTL_ROMPE; + } + + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + count = ngbe_write_uc_addr_list(netdev, 0); + if (count < 0) { + vmolr &= ~NGBE_PSR_VM_L2CTL_ROPE; + vmolr |= NGBE_PSR_VM_L2CTL_UPE; + } + + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = ngbe_write_mc_addr_list(netdev); + if (count < 0) { + vmolr &= ~NGBE_PSR_VM_L2CTL_ROMPE; + vmolr |= NGBE_PSR_VM_L2CTL_MPE; + } + + wr32(hw, NGBE_PSR_VLAN_CTL, vlnctrl); + wr32(hw, NGBE_PSR_CTL, fctrl); + wr32(hw, NGBE_PSR_VM_L2CTL(0), vmolr); + + if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (netdev->features & NETIF_F_HW_VLAN_STAG_RX)) + ngbe_vlan_strip_enable(adapter); + else + ngbe_vlan_strip_disable(adapter); +} + +void ngbe_vlan_mode(struct net_device *netdev, u32 features) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + bool enable; + + enable = !!(features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)); + + if (enable) + /* enable VLAN tag insert/strip */ + ngbe_vlan_strip_enable(adapter); + else + /* disable VLAN tag insert/strip */ + ngbe_vlan_strip_disable(adapter); +} + +static int ngbe_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + + /* add VID to filter table */ + if (hw->mac.ops.set_vfta) { + if (vid < VLAN_N_VID) + set_bit(vid, adapter->active_vlans); + TCALL(hw, mac.ops.set_vfta, vid, 0, true); + } + + return 0; +} + +static void ngbe_restore_vlan(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 vid; + + ngbe_vlan_mode(netdev, netdev->features); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + ngbe_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); +} + +/** + * ngbe_configure_tx_ring - Configure Tx ring after Reset + * @adapter: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +void ngbe_configure_tx_ring(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + u64 tdba = ring->dma; + int wait_loop = 10; + u32 txdctl = NGBE_PX_TR_CFG_ENABLE; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + wr32(hw, NGBE_PX_TR_CFG(reg_idx), NGBE_PX_TR_CFG_SWFLSH); + NGBE_WRITE_FLUSH(hw); + + wr32(hw, NGBE_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32)); + wr32(hw, NGBE_PX_TR_BAH(reg_idx), tdba >> 32); + + /* reset head and tail pointers */ + wr32(hw, NGBE_PX_TR_RP(reg_idx), 0); + wr32(hw, NGBE_PX_TR_WP(reg_idx), 0); + ring->tail = adapter->io_addr + NGBE_PX_TR_WP(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + txdctl |= NGBE_RING_SIZE(ring) << NGBE_PX_TR_CFG_TR_SIZE_SHIFT; + + /* set WTHRESH to encourage burst writeback, it should not be set + * higher than 1 when: + * - ITR is 0 as it could cause false TX hangs + * - ITR is set to > 100k int/sec and BQL is enabled + * + * In order to avoid issues WTHRESH + PTHRESH should always be equal + * to or less than the number of on chip descriptors, which is + * currently 40. + */ + txdctl |= 0x20 << NGBE_PX_TR_CFG_WTHRESH_SHIFT; + + clear_bit(__NGBE_HANG_CHECK_ARMED, &ring->state); + + /* enable queue */ + wr32(hw, NGBE_PX_TR_CFG(reg_idx), txdctl); + + /* poll to verify queue is enabled */ + do { + mdelay(1); + txdctl = rd32(hw, NGBE_PX_TR_CFG(reg_idx)); + } while (--wait_loop && !(txdctl & NGBE_PX_TR_CFG_ENABLE)); + if (!wait_loop) + e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +} + +/** + * ngbe_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void ngbe_configure_tx(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 i; + + /* TDM_CTL.TE must be before Tx queues are enabled */ + wr32m(hw, NGBE_TDM_CTL, + NGBE_TDM_CTL_TE, NGBE_TDM_CTL_TE); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) + ngbe_configure_tx_ring(adapter, adapter->tx_ring[i]); + + wr32m(hw, NGBE_TSEC_BUF_AE, 0x3FF, 0x10); + wr32m(hw, NGBE_TSEC_CTL, 0x2, 0); + + wr32m(hw, NGBE_TSEC_CTL, 0x1, 1); + + /* enable mac transmitter */ + wr32m(hw, NGBE_MAC_TX_CFG, + NGBE_MAC_TX_CFG_TE, NGBE_MAC_TX_CFG_TE); +} + +static void ngbe_setup_psrtype(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int pool; + + /* PSRTYPE must be initialized in adapters */ + u32 psrtype = NGBE_RDB_PL_CFG_L4HDR | + NGBE_RDB_PL_CFG_L3HDR | + NGBE_RDB_PL_CFG_L2HDR | + NGBE_RDB_PL_CFG_TUN_OUTER_L2HDR | + NGBE_RDB_PL_CFG_TUN_TUNHDR; + + for_each_set_bit(pool, &adapter->fwd_bitmask, NGBE_MAX_MACVLANS) { + wr32(hw, NGBE_RDB_PL_CFG(0), psrtype); + } +} + +/** + * Return a number of entries in the RSS indirection table + * @adapter: device handle + */ +u32 ngbe_rss_indir_tbl_entries(struct ngbe_adapter *adapter) +{ + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) + return 64; + else + return 128; +} + +/** + * Write the RETA table to HW + * @adapter: device handle + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +void ngbe_store_reta(struct ngbe_adapter *adapter) +{ + u32 i, reta_entries = ngbe_rss_indir_tbl_entries(adapter); + struct ngbe_hw *hw = &adapter->hw; + u32 reta = 0; + u8 *indir_tbl = adapter->rss_indir_tbl; + + /* Fill out the redirection table as follows: + * - 8 bit wide entries containing 4 bit RSS index + */ + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + reta |= indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + wr32(hw, NGBE_RDB_RSSTBL(i >> 2), reta); + reta = 0; + } + } +} + +/** + * ngbe_setup_tc - routine to configure net_device for multiple traffic + * classes. + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + */ +int ngbe_setup_tc(struct net_device *dev, u8 tc) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + if (netif_running(dev)) + ngbe_close(dev); + else + ngbe_reset(adapter); + + ngbe_clear_interrupt_scheme(adapter); + + if (tc) + netdev_set_num_tc(dev, tc); + else + netdev_reset_tc(dev); + + ngbe_init_interrupt_scheme(adapter); + if (netif_running(dev)) + ngbe_open(dev); + + return 0; +} + +static void ngbe_setup_reta(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 i, j; + u32 reta_entries = ngbe_rss_indir_tbl_entries(adapter); + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + + /* Program table for at least 2 queues w/ SR-IOV so that VFs can + * make full use of any rings they may have. We will use the + * PSRTYPE register to control how many rings we use within the PF. + */ + if ((adapter->flags & NGBE_FLAG_SRIOV_ENABLED) && rss_i < 2) + rss_i = 1; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + wr32(hw, NGBE_RDB_RSSRK(i), adapter->rss_key[i]); + + /* Fill out redirection table */ + memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); + + for (i = 0, j = 0; i < reta_entries; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + ngbe_store_reta(adapter); +} + +static void ngbe_setup_mrqc(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 rss_field = 0; + + /* Disable indicating checksum in descriptor, enables RSS hash */ + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_PCSD, NGBE_PSR_CTL_PCSD); + + /* Perform hash on these packet types */ + rss_field = NGBE_RDB_RA_CTL_RSS_IPV4 | + NGBE_RDB_RA_CTL_RSS_IPV4_TCP | + NGBE_RDB_RA_CTL_RSS_IPV6 | + NGBE_RDB_RA_CTL_RSS_IPV6_TCP; + + if (adapter->flags2 & NGBE_FLAG2_RSS_FIELD_IPV4_UDP) + rss_field |= NGBE_RDB_RA_CTL_RSS_IPV4_UDP; + if (adapter->flags2 & NGBE_FLAG2_RSS_FIELD_IPV6_UDP) + rss_field |= NGBE_RDB_RA_CTL_RSS_IPV6_UDP; + + netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); + + ngbe_setup_reta(adapter); + + if (adapter->flags2 & NGBE_FLAG2_RSS_ENABLED) + rss_field |= NGBE_RDB_RA_CTL_RSS_EN; + wr32(hw, NGBE_RDB_RA_CTL, rss_field); +} + +static void ngbe_set_rx_buffer_len(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + struct ngbe_ring *rx_ring; + int i; + u32 mhadd; + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); + + mhadd = rd32(hw, NGBE_PSR_MAX_SZ); + if (max_frame != mhadd) + wr32(hw, NGBE_PSR_MAX_SZ, max_frame); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring = adapter->rx_ring[i]; + + if (adapter->flags & NGBE_FLAG_RX_HS_ENABLED) { + rx_ring->rx_buf_len = NGBE_RX_HDR_SIZE; + set_ring_hs_enabled(rx_ring); + } else { + clear_ring_hs_enabled(rx_ring); + } + } +} + +static void ngbe_configure_srrctl(struct ngbe_adapter *adapter, + struct ngbe_ring *rx_ring) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 srrctl; + u16 reg_idx = rx_ring->reg_idx; + + srrctl = rd32m(hw, NGBE_PX_RR_CFG(reg_idx), + ~(NGBE_PX_RR_CFG_RR_HDR_SZ | + NGBE_PX_RR_CFG_RR_BUF_SZ | + NGBE_PX_RR_CFG_SPLIT_MODE)); + + /* configure header buffer length, needed for RSC */ + srrctl |= NGBE_RX_HDR_SIZE << NGBE_PX_RR_CFG_BSIZEHDRSIZE_SHIFT; + + /* configure the packet buffer length */ + srrctl |= ngbe_rx_bufsz(rx_ring) >> NGBE_PX_RR_CFG_BSIZEPKT_SHIFT; + if (ring_is_hs_enabled(rx_ring)) + srrctl |= NGBE_PX_RR_CFG_SPLIT_MODE; + + wr32(hw, NGBE_PX_RR_CFG(reg_idx), srrctl); +} + +static void ngbe_rx_desc_queue_enable(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + int wait_loop = NGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (NGBE_REMOVED(hw->hw_addr)) + return; + + do { + mdelay(1); + rxdctl = rd32(hw, NGBE_PX_RR_CFG(reg_idx)); + } while (--wait_loop && !(rxdctl & NGBE_PX_RR_CFG_RR_EN)); + + if (!wait_loop) + e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within the polling period\n", + reg_idx); +} + +static bool ngbe_alloc_mapped_skb(struct ngbe_ring *rx_ring, + struct ngbe_rx_buffer *bi) +{ + struct sk_buff *skb = bi->skb; + dma_addr_t dma = bi->dma; + + if (unlikely(dma)) + return true; + + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + bi->skb = skb; + } + + dma = dma_map_single(rx_ring->dev, skb->data, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + dev_kfree_skb_any(skb); + bi->skb = NULL; + + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->dma = dma; + return true; +} + +static bool ngbe_alloc_mapped_page(struct ngbe_ring *rx_ring, + struct ngbe_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(ngbe_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, + ngbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, ngbe_rx_pg_order(rx_ring)); + + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + bi->page_dma = dma; + bi->page = page; + bi->page_offset = 0; + + return true; +} + +/** + * ngbe_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void ngbe_alloc_rx_buffers(struct ngbe_ring *rx_ring, u16 cleaned_count) +{ + union ngbe_rx_desc *rx_desc; + struct ngbe_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = NGBE_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { + if (ring_is_hs_enabled(rx_ring)) { + if (!ngbe_alloc_mapped_skb(rx_ring, bi)) + break; + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + } + + if (!ngbe_alloc_mapped_page(rx_ring, bi)) + break; + rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = NGBE_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.upper.status_error = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +void ngbe_configure_rx_ring(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + u64 rdba = ring->dma; + u32 rxdctl; + u16 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + rxdctl = rd32(hw, NGBE_PX_RR_CFG(reg_idx)); + ngbe_disable_rx_queue(adapter, ring); + + wr32(hw, NGBE_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32)); + wr32(hw, NGBE_PX_RR_BAH(reg_idx), rdba >> 32); + + if (ring->count == NGBE_MAX_RXD) + rxdctl |= 0 << NGBE_PX_RR_CFG_RR_SIZE_SHIFT; + else + rxdctl |= (ring->count / 128) << NGBE_PX_RR_CFG_RR_SIZE_SHIFT; + + rxdctl |= 0x1 << NGBE_PX_RR_CFG_RR_THER_SHIFT; + wr32(hw, NGBE_PX_RR_CFG(reg_idx), rxdctl); + + /* reset head and tail pointers */ + wr32(hw, NGBE_PX_RR_RP(reg_idx), 0); + wr32(hw, NGBE_PX_RR_WP(reg_idx), 0); + ring->tail = adapter->io_addr + NGBE_PX_RR_WP(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + ring->next_to_alloc = 0; + + ngbe_configure_srrctl(adapter, ring); + + /* enable receive descriptor ring */ + wr32m(hw, NGBE_PX_RR_CFG(reg_idx), + NGBE_PX_RR_CFG_RR_EN, NGBE_PX_RR_CFG_RR_EN); + + ngbe_rx_desc_queue_enable(adapter, ring); + ngbe_alloc_rx_buffers(ring, ngbe_desc_unused(ring)); +} + +void ngbe_unmap_and_free_tx_resource(struct ngbe_ring *ring, + struct ngbe_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ +} + +void ngbe_configure_isb(struct ngbe_adapter *adapter) +{ + /* set ISB Address */ + struct ngbe_hw *hw = &adapter->hw; + + wr32(hw, NGBE_PX_ISB_ADDR_L, + adapter->isb_dma & DMA_BIT_MASK(32)); + wr32(hw, NGBE_PX_ISB_ADDR_H, adapter->isb_dma >> 32); +} + +/** + * ngbe_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void ngbe_configure_rx(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i; + u32 rxctrl; + + /* disable receives while setting up the descriptors */ + TCALL(hw, mac.ops.disable_rx); + + ngbe_setup_psrtype(adapter); + + /* enable hw crc stripping */ + wr32m(hw, NGBE_RSEC_CTL, + NGBE_RSEC_CTL_CRC_STRIP, NGBE_RSEC_CTL_CRC_STRIP); + + /* Program registers for the distribution of queues */ + ngbe_setup_mrqc(adapter); + + /* set_rx_buffer_len must be called before ring initialization */ + ngbe_set_rx_buffer_len(adapter); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + ngbe_configure_rx_ring(adapter, adapter->rx_ring[i]); + + rxctrl = rd32(hw, NGBE_RDB_PB_CTL); + + /* enable all receives */ + rxctrl |= NGBE_RDB_PB_CTL_PBEN; + TCALL(hw, mac.ops.enable_rx_dma, rxctrl); +} + +static void ngbe_configure(struct ngbe_adapter *adapter) +{ + ngbe_configure_pb(adapter); + + /* configure Double Vlan */ + ngbe_configure_port(adapter); + + ngbe_set_rx_mode(adapter->netdev); + ngbe_restore_vlan(adapter); + + ngbe_configure_tx(adapter); + ngbe_configure_rx(adapter); + ngbe_configure_isb(adapter); +} + +void ngbe_up(struct ngbe_adapter *adapter) +{ + /* hardware has been reset, we need to reload some things */ + ngbe_configure(adapter); + + ngbe_up_complete(adapter); +} + +void ngbe_reinit_locked(struct ngbe_adapter *adapter) +{ + WARN_ON(in_interrupt()); + /* put off any impending NetWatchDogTimeout */ + + netif_trans_update(adapter->netdev); + while (test_and_set_bit(__NGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + ngbe_down(adapter); + /* If SR-IOV enabled then wait a bit before bringing the adapter + * back up to give the VFs time to respond to the reset. The + * two second wait is based upon the watchdog timer cycle in + * the VF driver. + */ + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) + msleep(2000); + ngbe_up(adapter); + clear_bit(__NGBE_RESETTING, &adapter->state); +} + +/** + * ngbe_reset_hostif - send reset cmd to fw + * @hw: pointer to hardware structure + **/ +s32 ngbe_reset_hostif(struct ngbe_hw *hw) +{ + struct ngbe_hic_reset reset_cmd; + int i; + s32 status = 0; + + reset_cmd.hdr.cmd = FW_RESET_CMD; + reset_cmd.hdr.buf_len = FW_RESET_LEN; + reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + reset_cmd.lan_id = hw->bus.lan_id; + reset_cmd.reset_type = (u16)hw->reset_type; + reset_cmd.hdr.checksum = 0; + reset_cmd.hdr.checksum = ngbe_calculate_checksum((u8 *)&reset_cmd, + (FW_CEM_HDR_LEN + reset_cmd.hdr.buf_len)); + + /* send reset request to FW and wait for response */ + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + status = ngbe_host_if_command(hw, (u32 *)&reset_cmd, + sizeof(reset_cmd), + NGBE_HI_CMD_TIMEOUT, + true); + mdelay(1); + if (status != 0) + continue; + + if (reset_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + return status; +} + +static void ngbe_reset_subtask(struct ngbe_adapter *adapter) +{ + u32 reset_flag = 0; + u32 value = 0; + + if (!(adapter->flags2 & (NGBE_FLAG2_PF_RESET_REQUESTED | + NGBE_FLAG2_DEV_RESET_REQUESTED | + NGBE_FLAG2_GLOBAL_RESET_REQUESTED | + NGBE_FLAG2_RESET_INTR_RECEIVED))) + return; + + /* If we're already down, just bail */ + if (test_bit(__NGBE_DOWN, &adapter->state) || + test_bit(__NGBE_REMOVING, &adapter->state)) + return; + + e_err(drv, "Reset adapter\n"); + adapter->tx_timeout_count++; + + rtnl_lock(); + if (adapter->flags2 & NGBE_FLAG2_GLOBAL_RESET_REQUESTED) { + reset_flag |= NGBE_FLAG2_GLOBAL_RESET_REQUESTED; + adapter->flags2 &= ~NGBE_FLAG2_GLOBAL_RESET_REQUESTED; + } + if (adapter->flags2 & NGBE_FLAG2_DEV_RESET_REQUESTED) { + reset_flag |= NGBE_FLAG2_DEV_RESET_REQUESTED; + adapter->flags2 &= ~NGBE_FLAG2_DEV_RESET_REQUESTED; + } + if (adapter->flags2 & NGBE_FLAG2_PF_RESET_REQUESTED) { + reset_flag |= NGBE_FLAG2_PF_RESET_REQUESTED; + adapter->flags2 &= ~NGBE_FLAG2_PF_RESET_REQUESTED; + } + + if (adapter->flags2 & NGBE_FLAG2_RESET_INTR_RECEIVED) { + /* If there's a recovery already waiting, it takes + * precedence before starting a new reset sequence. + */ + adapter->flags2 &= ~NGBE_FLAG2_RESET_INTR_RECEIVED; + value = rd32m(&adapter->hw, NGBE_MIS_RST_ST, + NGBE_MIS_RST_ST_DEV_RST_TYPE_MASK) >> + NGBE_MIS_RST_ST_DEV_RST_TYPE_SHIFT; + + if (value == NGBE_MIS_RST_ST_DEV_RST_TYPE_SW_RST) + adapter->hw.reset_type = NGBE_SW_RESET; + else if (value == NGBE_MIS_RST_ST_DEV_RST_TYPE_GLOBAL_RST) + adapter->hw.reset_type = NGBE_GLOBAL_RESET; + + adapter->hw.force_full_reset = true; + ngbe_reinit_locked(adapter); + adapter->hw.force_full_reset = false; + goto unlock; + } + + if (reset_flag & NGBE_FLAG2_DEV_RESET_REQUESTED) { + /* Request a Device Reset + * This will start the chip's countdown to the actual full + * chip reset event, and a warning interrupt to be sent + * to all PFs, including the requestor. Our handler + * for the warning interrupt will deal with the shutdown + * and recovery of the switch setup. + */ + wr32m(&adapter->hw, NGBE_MIS_RST, + NGBE_MIS_RST_SW_RST, NGBE_MIS_RST_SW_RST); + e_info(drv, "%s: sw reset\n", __func__); + } else if (reset_flag & NGBE_FLAG2_PF_RESET_REQUESTED) { + ngbe_reinit_locked(adapter); + } else if (reset_flag & NGBE_FLAG2_GLOBAL_RESET_REQUESTED) { + /* Request a Global Reset + * + * This will start the chip's countdown to the actual full + * chip reset event, and a warning interrupt to be sent + * to all PFs, including the requestor. Our handler + * for the warning interrupt will deal with the shutdown + * and recovery of the switch setup. + */ + pci_save_state(adapter->pdev); + if (ngbe_mng_present(&adapter->hw)) { + ngbe_reset_hostif(&adapter->hw); + e_err(drv, "%s: lan reset\n", __func__); + + } else { + wr32m(&adapter->hw, NGBE_MIS_RST, + NGBE_MIS_RST_GLOBAL_RST, + NGBE_MIS_RST_GLOBAL_RST); + e_info(drv, "%s: global reset\n", __func__); + } + } + +unlock: + rtnl_unlock(); +} + +/** + * ngbe_check_overtemp_subtask - check for over temperature + * @adapter: pointer to adapter + **/ +static void ngbe_check_overtemp_subtask(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 eicr = adapter->interrupt_event; + s32 temp_state; + + if (test_bit(__NGBE_DOWN, &adapter->state)) + return; + if (!(adapter->flags2 & NGBE_FLAG2_TEMP_SENSOR_CAPABLE)) + return; + if (!(adapter->flags2 & NGBE_FLAG2_TEMP_SENSOR_EVENT)) + return; + + adapter->flags2 &= ~NGBE_FLAG2_TEMP_SENSOR_EVENT; + + /* Since the warning interrupt is for both ports + * we don't have to check if: + * This interrupt wasn't for our port. + * We may have missed the interrupt so always have to + * check if we got a LSC + */ + if (!(eicr & NGBE_PX_MISC_IC_OVER_HEAT)) + return; + + temp_state = ngbe_phy_check_overtemp(hw); + if (!temp_state || temp_state == NGBE_NOT_IMPLEMENTED) + return; + + if (temp_state == NGBE_ERR_UNDERTEMP && + test_bit(__NGBE_HANGING, &adapter->state)) { + e_crit(drv, "%s\n", ngbe_underheat_msg); + wr32m(&adapter->hw, NGBE_RDB_PB_CTL, + NGBE_RDB_PB_CTL_PBEN, NGBE_RDB_PB_CTL_PBEN); + netif_carrier_on(adapter->netdev); + + clear_bit(__NGBE_HANGING, &adapter->state); + } else if (temp_state == NGBE_ERR_OVERTEMP && + !test_and_set_bit(__NGBE_HANGING, &adapter->state)) { + e_crit(drv, "%s\n", ngbe_overheat_msg); + netif_carrier_off(adapter->netdev); + + wr32m(&adapter->hw, NGBE_RDB_PB_CTL, + NGBE_RDB_PB_CTL_PBEN, 0); + } + + adapter->interrupt_event = 0; +} + +static void ngbe_watchdog_an_complete(struct ngbe_adapter *adapter) +{ + u32 link_speed = 0; + u32 lan_speed = 0; + bool link_up = true; + struct ngbe_hw *hw = &adapter->hw; + + if (!(adapter->flags & NGBE_FLAG_NEED_ANC_CHECK)) + return; + + TCALL(hw, mac.ops.check_link, &link_speed, &link_up, false); + + adapter->link_speed = link_speed; + switch (link_speed) { + case NGBE_LINK_SPEED_100_FULL: + lan_speed = 1; + break; + case NGBE_LINK_SPEED_1GB_FULL: + lan_speed = 2; + break; + case NGBE_LINK_SPEED_10_FULL: + lan_speed = 0; + break; + default: + break; + } + wr32m(hw, NGBE_CFG_LAN_SPEED, + 0x3, lan_speed); + + if (link_speed & (NGBE_LINK_SPEED_1GB_FULL | + NGBE_LINK_SPEED_100_FULL | NGBE_LINK_SPEED_10_FULL)) { + wr32(hw, NGBE_MAC_TX_CFG, + (rd32(hw, NGBE_MAC_TX_CFG) & + ~NGBE_MAC_TX_CFG_SPEED_MASK) | NGBE_MAC_TX_CFG_TE | + NGBE_MAC_TX_CFG_SPEED_1G); + } + + adapter->flags &= ~NGBE_FLAG_NEED_ANC_CHECK; + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; +} + +static void ngbe_enable_rx_drop(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + u16 reg_idx = ring->reg_idx; + + u32 srrctl = rd32(hw, NGBE_PX_RR_CFG(reg_idx)); + + srrctl |= NGBE_PX_RR_CFG_DROP_EN; + + wr32(hw, NGBE_PX_RR_CFG(reg_idx), srrctl); +} + +static void ngbe_disable_rx_drop(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + u16 reg_idx = ring->reg_idx; + + u32 srrctl = rd32(hw, NGBE_PX_RR_CFG(reg_idx)); + + srrctl &= ~NGBE_PX_RR_CFG_DROP_EN; + + wr32(hw, NGBE_PX_RR_CFG(reg_idx), srrctl); +} + +void ngbe_set_rx_drop_en(struct ngbe_adapter *adapter) +{ + int i; + + /* We should set the drop enable bit if: + * SR-IOV is enabled + * or + * Number of Rx queues > 1 and flow control is disabled + * + * This allows us to avoid head of line blocking for security + * and performance reasons. + */ + if (adapter->num_vfs || (adapter->num_rx_queues > 1 && + !(adapter->hw.fc.current_mode & ngbe_fc_tx_pause))) { + for (i = 0; i < adapter->num_rx_queues; i++) + ngbe_enable_rx_drop(adapter, adapter->rx_ring[i]); + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + ngbe_disable_rx_drop(adapter, adapter->rx_ring[i]); + } +} + +/** + * ngbe_watchdog_update_link - update the link status + * @adapter - pointer to the device adapter structure + * @link_speed - pointer to a u32 to store the link_speed + **/ +static void ngbe_watchdog_update_link_status(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; + u32 lan_speed = 0; + u32 reg; + + if (!(adapter->flags & NGBE_FLAG_NEED_LINK_UPDATE)) + return; + + link_speed = NGBE_LINK_SPEED_1GB_FULL; + link_up = true; + + TCALL(hw, mac.ops.check_link, &link_speed, &link_up, false); + + if (link_up || time_after(jiffies, (adapter->link_check_timeout + + NGBE_TRY_LINK_TIMEOUT))) { + adapter->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE; + } + + adapter->link_speed = link_speed; + switch (link_speed) { + case NGBE_LINK_SPEED_100_FULL: + lan_speed = 1; + break; + case NGBE_LINK_SPEED_1GB_FULL: + lan_speed = 2; + break; + case NGBE_LINK_SPEED_10_FULL: + lan_speed = 0; + break; + default: + break; + } + wr32m(hw, NGBE_CFG_LAN_SPEED, 0x3, lan_speed); + + if (link_up) { + TCALL(hw, mac.ops.fc_enable); + ngbe_set_rx_drop_en(adapter); + } + + if (link_up) { + adapter->last_rx_ptp_check = jiffies; + + if (test_bit(__NGBE_PTP_RUNNING, &adapter->state)) + ngbe_ptp_start_cyclecounter(adapter); + + if (link_speed & (NGBE_LINK_SPEED_1GB_FULL | + NGBE_LINK_SPEED_100_FULL | NGBE_LINK_SPEED_10_FULL)) { + wr32(hw, NGBE_MAC_TX_CFG, + (rd32(hw, NGBE_MAC_TX_CFG) & + ~NGBE_MAC_TX_CFG_SPEED_MASK) | NGBE_MAC_TX_CFG_TE | + NGBE_MAC_TX_CFG_SPEED_1G); + } + + /* Re configure MAC RX */ + reg = rd32(hw, NGBE_MAC_RX_CFG); + wr32(hw, NGBE_MAC_RX_CFG, reg); + wr32(hw, NGBE_MAC_PKT_FLT, NGBE_MAC_PKT_FLT_PR); + reg = rd32(hw, NGBE_MAC_WDG_TIMEOUT); + wr32(hw, NGBE_MAC_WDG_TIMEOUT, reg); + } + + adapter->link_up = link_up; +} + +static void ngbe_update_default_up(struct ngbe_adapter *adapter) +{ + u8 up = 0; + + adapter->default_up = up; +} + +/** + * ngbe_watchdog_link_is_up - update netif_carrier status and + * print link up message + * @adapter - pointer to the device adapter structure + **/ +static void ngbe_watchdog_link_is_up(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ngbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool flow_rx, flow_tx; + + /* only continue if link was previously down */ + if (netif_carrier_ok(netdev)) + return; + + adapter->flags2 &= ~NGBE_FLAG2_SEARCH_FOR_SFP; + + /* flow_rx, flow_tx report link flow control status */ + flow_rx = (rd32(hw, NGBE_MAC_RX_FLOW_CTRL) & 0x101) == 0x1; + flow_tx = !!(NGBE_RDB_RFCC_RFCE_802_3X & + rd32(hw, NGBE_RDB_RFCC)); + + e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", + (link_speed == NGBE_LINK_SPEED_1GB_FULL ? + "1 Gbps" : + (link_speed == NGBE_LINK_SPEED_100_FULL ? + "100 Mbps" : + (link_speed == NGBE_LINK_SPEED_10_FULL ? + "10 Mbps" : + "unknown speed"))), + ((flow_rx && flow_tx) ? "RX/TX" : + (flow_rx ? "RX" : + (flow_tx ? "TX" : "None")))); + + netif_carrier_on(netdev); + netif_tx_wake_all_queues(netdev); + + /* update the default user priority for VFs */ + ngbe_update_default_up(adapter); +} + +/** + * ngbe_watchdog_link_is_down - update netif_carrier status and + * print link down message + * @adapter - pointer to the adapter structure + **/ +static void ngbe_watchdog_link_is_down(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + adapter->link_up = false; + adapter->link_speed = 0; + + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) + return; + + if (test_bit(__NGBE_PTP_RUNNING, &adapter->state)) + ngbe_ptp_start_cyclecounter(adapter); + + e_info(drv, "NIC Link is Down\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); +} + +static void ngbe_update_xoff_rx_lfc(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_hw_stats *hwstats = &adapter->stats; + int i; + u32 data; + + if (hw->fc.current_mode != ngbe_fc_full && + hw->fc.current_mode != ngbe_fc_rx_pause) + return; + + data = rd32(hw, NGBE_MAC_LXOFFRXC); + + hwstats->lxoffrxc += data; + + /* refill credits (no tx hang) if we received xoff */ + if (!data) + return; + + for (i = 0; i < adapter->num_tx_queues; i++) + clear_bit(__NGBE_HANG_CHECK_ARMED, + &adapter->tx_ring[i]->state); +} + +/** + * ngbe_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void ngbe_update_stats(struct ngbe_adapter *adapter) +{ + struct net_device_stats *net_stats = &adapter->netdev->stats; + + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_hw_stats *hwstats = &adapter->stats; + u64 total_mpc = 0; + u32 i, bprc, lxon, lxoff; + u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; + u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; + u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; + u64 hw_csum_rx_good = 0; + + if (test_bit(__NGBE_DOWN, &adapter->state) || + test_bit(__NGBE_RESETTING, &adapter->state)) + return; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ngbe_ring *rx_ring = adapter->rx_ring[i]; + + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + hw_csum_rx_error += rx_ring->rx_stats.csum_err; + hw_csum_rx_good += rx_ring->rx_stats.csum_good_cnt; + bytes += rx_ring->stats.bytes; + packets += rx_ring->stats.packets; + } + + adapter->non_eop_descs = non_eop_descs; + adapter->alloc_rx_page_failed = alloc_rx_page_failed; + adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; + adapter->hw_csum_rx_error = hw_csum_rx_error; + adapter->hw_csum_rx_good = hw_csum_rx_good; + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + bytes = 0; + packets = 0; + /* gather some stats to the adapter struct that are per queue */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ngbe_ring *tx_ring = adapter->tx_ring[i]; + + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + bytes += tx_ring->stats.bytes; + packets += tx_ring->stats.packets; + } + adapter->restart_queue = restart_queue; + adapter->tx_busy = tx_busy; + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + + hwstats->crcerrs += rd32(hw, NGBE_RX_CRC_ERROR_FRAMES_LOW); + + hwstats->gprc += rd32(hw, NGBE_PX_GPRC); + + ngbe_update_xoff_rx_lfc(adapter); + + hwstats->o2bgptc += rd32(hw, NGBE_TDM_OS2BMC_CNT); + if (ngbe_check_mng_access(&adapter->hw)) { + hwstats->o2bspc += rd32(hw, NGBE_MNG_OS2BMC_CNT); + hwstats->b2ospc += rd32(hw, NGBE_MNG_BMC2OS_CNT); + } + hwstats->b2ogprc += rd32(hw, NGBE_RDM_BMC2OS_CNT); + hwstats->gorc += rd32(hw, NGBE_PX_GORC_LSB); + hwstats->gorc += (u64)rd32(hw, NGBE_PX_GORC_MSB) << 32; + + hwstats->gotc += rd32(hw, NGBE_PX_GOTC_LSB); + hwstats->gotc += (u64)rd32(hw, NGBE_PX_GOTC_MSB) << 32; + + adapter->hw_rx_no_dma_resources += + rd32(hw, NGBE_RDM_DRP_PKT); + bprc = rd32(hw, NGBE_RX_BC_FRAMES_GOOD_LOW); + hwstats->bprc += bprc; + hwstats->mprc = 0; + + for (i = 0; i < 8; i++) + hwstats->mprc += rd32(hw, NGBE_PX_MPRC(i)); + + hwstats->roc += rd32(hw, NGBE_RX_OVERSIZE_FRAMES_GOOD); + hwstats->rlec += rd32(hw, NGBE_RX_LEN_ERROR_FRAMES_LOW); + lxon = rd32(hw, NGBE_RDB_LXONTXC); + hwstats->lxontxc += lxon; + lxoff = rd32(hw, NGBE_RDB_LXOFFTXC); + hwstats->lxofftxc += lxoff; + + hwstats->gptc += rd32(hw, NGBE_PX_GPTC); + hwstats->mptc += rd32(hw, NGBE_TX_MC_FRAMES_GOOD_LOW); + hwstats->ruc += rd32(hw, NGBE_RX_UNDERSIZE_FRAMES_GOOD); + hwstats->tpr += rd32(hw, NGBE_RX_FRAME_CNT_GOOD_BAD_LOW); + hwstats->bptc += rd32(hw, NGBE_TX_BC_FRAMES_GOOD_LOW); + /* Fill out the OS statistics structure */ + net_stats->multicast = hwstats->mprc; + + /* Rx Errors */ + net_stats->rx_errors = hwstats->crcerrs + + hwstats->rlec; + net_stats->rx_dropped = 0; + net_stats->rx_length_errors = hwstats->rlec; + net_stats->rx_crc_errors = hwstats->crcerrs; + total_mpc = rd32(hw, NGBE_RDB_MPCNT); + net_stats->rx_missed_errors = total_mpc; +} + +static bool ngbe_ring_tx_pending(struct ngbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ngbe_ring *tx_ring = adapter->tx_ring[i]; + + if (tx_ring->next_to_use != tx_ring->next_to_clean) + return true; + } + + return false; +} + +/** + * ngbe_watchdog_flush_tx - flush queues on link down + * @adapter - pointer to the device adapter structure + **/ +static void ngbe_watchdog_flush_tx(struct ngbe_adapter *adapter) +{ + if (!netif_carrier_ok(adapter->netdev)) { + if (ngbe_ring_tx_pending(adapter)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + e_warn(drv, "initiating reset due to lost link with pending Tx work\n"); + adapter->flags2 |= NGBE_FLAG2_PF_RESET_REQUESTED; + } + } +} + +/** + * ngbe_watchdog_subtask - check and bring link up + * @adapter - pointer to the device adapter structure + **/ +static void ngbe_watchdog_subtask(struct ngbe_adapter *adapter) +{ + /* if interface is down do nothing */ + if (test_bit(__NGBE_DOWN, &adapter->state) || + test_bit(__NGBE_REMOVING, &adapter->state) || + test_bit(__NGBE_RESETTING, &adapter->state)) + return; + + ngbe_watchdog_an_complete(adapter); + ngbe_watchdog_update_link_status(adapter); + + if (adapter->link_up) + ngbe_watchdog_link_is_up(adapter); + else + ngbe_watchdog_link_is_down(adapter); + + ngbe_update_stats(adapter); + ngbe_watchdog_flush_tx(adapter); +} + +/** + * ngbe_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter - pointer to the device adapter structure + */ +static void ngbe_check_hang_subtask(struct ngbe_adapter *adapter) +{ + int i; + + /* If we're down or resetting, just bail */ + if (test_bit(__NGBE_DOWN, &adapter->state) || + test_bit(__NGBE_REMOVING, &adapter->state) || + test_bit(__NGBE_RESETTING, &adapter->state)) + return; + + /* Force detection of hung controller */ + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_check_for_tx_hang(adapter->tx_ring[i]); + } +} + +/** + * ngbe_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void ngbe_service_task(struct work_struct *work) +{ + struct ngbe_adapter *adapter = container_of(work, + struct ngbe_adapter, + service_task); + if (NGBE_REMOVED(adapter->hw.hw_addr)) { + if (!test_bit(__NGBE_DOWN, &adapter->state)) { + rtnl_lock(); + ngbe_down(adapter); + rtnl_unlock(); + } + ngbe_service_event_complete(adapter); + return; + } + + ngbe_reset_subtask(adapter); + ngbe_check_overtemp_subtask(adapter); + ngbe_watchdog_subtask(adapter); + ngbe_check_hang_subtask(adapter); + + if (test_bit(__NGBE_PTP_RUNNING, &adapter->state)) { + ngbe_ptp_overflow_check(adapter); + if (unlikely(adapter->flags & + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)) + ngbe_ptp_rx_hang(adapter); + } + + ngbe_service_event_complete(adapter); +} + +/** + * ngbe_service_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void ngbe_service_timer(struct timer_list *t) +{ + struct ngbe_adapter *adapter = from_timer(adapter, t, service_timer); + unsigned long next_event_offset; + + /* poll faster when waiting for link */ + if ((adapter->flags & NGBE_FLAG_NEED_LINK_UPDATE) || + (adapter->flags & NGBE_FLAG_NEED_ANC_CHECK)) + next_event_offset = HZ / 10; + else + next_event_offset = HZ * 2; + + /* Reset the timer */ + mod_timer(&adapter->service_timer, next_event_offset + jiffies); + + ngbe_service_event_schedule(adapter); +} + +/** + * ngbe_wol_supported - Check whether device supports WoL + * @adapter: the adapter private structure + * @device_id: the device ID + * @subdev_id: the subsystem device ID + * + * This function is used by probe and ethtool to determine + * which devices have WoL support + * + **/ +int ngbe_wol_supported(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + + /* check eeprom to see if WOL is enabled */ + if (hw->bus.func == 0 || + hw->bus.func == 1 || + hw->bus.func == 2 || + hw->bus.func == 3) + return true; + else + return false; +} + +static void ngbe_check_minimum_link(struct ngbe_adapter *adapter, + int expected_gts) +{ + struct ngbe_hw *hw = &adapter->hw; + struct pci_dev *pdev; + + /* Some devices are not connected over PCIe and thus do not negotiate + * speed. These devices do not have valid bus info, and thus any report + * we generate may not be correct. + */ + if (hw->bus.type == ngbe_bus_type_internal) + return; + + pdev = adapter->pdev; + + pcie_print_link_status(pdev); +} + +/** + * ngbe_enumerate_functions - Get the number of ports this device has + * @adapter: adapter structure + **/ +static inline int ngbe_enumerate_functions(struct ngbe_adapter *adapter) +{ + struct pci_dev *entry, *pdev = adapter->pdev; + int physfns = 0; + + list_for_each_entry(entry, &pdev->bus->devices, bus_list) { + /* When the devices on the bus don't all match our device ID, + * we can't reliably determine the correct number of + * functions. This can occur if a function has been direct + * attached to a virtual machine using VT-d, for example. In + * this case, simply return -1 to indicate this. + */ + if (entry->vendor != pdev->vendor || + entry->device != pdev->device) + return -1; + + physfns++; + } + + return physfns; +} + +/** + * ngbe_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in ngbe_pci_tbl + * + * Returns 0 on success, negative on failure + * + * ngbe_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int ngbe_probe(struct pci_dev *pdev, + const struct pci_device_id __always_unused *ent) +{ + struct ngbe_adapter *adapter = NULL; + struct net_device *netdev; + struct ngbe_hw *hw = NULL; + int err; + int size; + u32 eeprom_cksum_devcap = 0; + u32 saved_version = 0; + u32 etrack_id = 0; + u32 eeprom_verl = 0; + int expected_gts; + char *info_string; + char *i_s_var; + static int cards_found; + u32 devcap = 0; + netdev_features_t hw_features; + struct ngbe_ring_feature *feature; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_pci_disable_dev; + } + + err = pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), + ngbe_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed %d\n", err); + goto err_pci_disable_dev; + } + + pci_enable_pcie_error_reporting(pdev); + pci_set_master(pdev); + + /* errata 16 */ + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, + 0x1000); + + size = sizeof(struct ngbe_adapter); + + netdev = alloc_etherdev_mq(sizeof(struct ngbe_adapter), NGBE_MAX_TX_QUEUES); + if (!netdev) { + err = -ENOMEM; + goto err_pci_release_regions; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + + adapter->io_addr = hw->hw_addr; + if (!adapter->io_addr) { + err = -EIO; + goto err_pci_release_regions; + } + + /* default config: 10/100/1000M autoneg on */ + hw->mac.autoneg = true; + hw->phy.autoneg_advertised = NGBE_LINK_SPEED_AUTONEG; + hw->phy.force_speed = NGBE_LINK_SPEED_UNKNOWN; + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + /* assign netdev ops and ethtool ops */ + ngbe_assign_netdev_ops(netdev); + + /* setup the private structure */ + err = ngbe_sw_init(adapter); + if (err) + goto err_sw_init; + + TCALL(hw, mac.ops.set_lan_id); + + feature = adapter->ring_feature; + feature[RING_F_RSS].limit = min_t(int, NGBE_MAX_RSS_INDICES, num_online_cpus()); + + /* check if flash load is done after hw power up */ + err = ngbe_check_flash_load(hw, NGBE_SPI_ILDR_STATUS_PERST); + if (err) + goto err_sw_init; + err = ngbe_check_flash_load(hw, NGBE_SPI_ILDR_STATUS_PWRRST); + if (err) + goto err_sw_init; + + hw->phy.reset_if_overtemp = true; + err = TCALL(hw, mac.ops.reset_hw); + hw->phy.reset_if_overtemp = false; + if (err) { + e_dev_err("HW reset failed: %d\n", err); + goto err_sw_init; + } + + netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; + netdev->features |= NETIF_F_IPV6_CSUM; + + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + netdev->features |= NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_STAG_RX; + netdev->features |= ngbe_tso_features(); + + netdev->features |= NETIF_F_RXHASH; + + netdev->features |= NETIF_F_RXCSUM; + /* copy netdev features into list of user selectable features */ + hw_features = netdev->hw_features; + hw_features |= netdev->features; + + /* set this bit last since it cannot be part of hw_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; + netdev->features |= NETIF_F_NTUPLE; + + hw_features |= NETIF_F_NTUPLE; + netdev->hw_features = hw_features; + + netdev->vlan_features |= NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6; + + netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_IP_CSUM; + + /* MTU range: 68 - 9414 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = NGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); + + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + + if (hw->bus.lan_id == 0) { + wr32(hw, NGBE_CALSUM_CAP_STATUS, 0x0); + wr32(hw, NGBE_EEPROM_VERSION_STORE_REG, 0x0); + } else { + eeprom_cksum_devcap = rd32(hw, NGBE_CALSUM_CAP_STATUS); + saved_version = rd32(hw, NGBE_EEPROM_VERSION_STORE_REG); + } + + TCALL(hw, eeprom.ops.init_params); + TCALL(hw, mac.ops.release_swfw_sync, NGBE_MNG_SWFW_SYNC_SW_MB); + if (hw->bus.lan_id == 0 || eeprom_cksum_devcap == 0) { + /* make sure the EEPROM is good */ + if (TCALL(hw, eeprom.ops.eeprom_chksum_cap_st, NGBE_CALSUM_COMMAND, &devcap)) { + e_dev_err("The EEPROM Checksum Is Not Valid\n"); + err = -EIO; + goto err_sw_init; + } + } + + ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr); + if (!is_valid_ether_addr(netdev->dev_addr)) { + e_dev_err("invalid MAC address\n"); + err = -EIO; + goto err_sw_init; + } + + adapter->mac_table = kzalloc(sizeof(*adapter->mac_table) * + hw->mac.num_rar_entries, + GFP_ATOMIC); + if (!adapter->mac_table) { + err = NGBE_ERR_OUT_OF_MEM; + e_err(probe, "mac_table allocation failed: %d\n", err); + goto err_sw_init; + } + ngbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + + timer_setup(&adapter->service_timer, ngbe_service_timer, 0); + + if (NGBE_REMOVED(hw->hw_addr)) { + err = -EIO; + goto err_sw_init; + } + INIT_WORK(&adapter->service_task, ngbe_service_task); + + set_bit(__NGBE_SERVICE_INITED, &adapter->state); + clear_bit(__NGBE_SERVICE_SCHED, &adapter->state); + + err = ngbe_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + + /* WOL not supported for all devices */ + adapter->wol = 0; + if (hw->bus.lan_id == 0 || eeprom_cksum_devcap == 0) { + TCALL(hw, eeprom.ops.read, + hw->eeprom.sw_region_offset + NGBE_DEVICE_CAPS, + &adapter->eeprom_cap); + /*only support in LAN0*/ + adapter->eeprom_cap = NGBE_DEVICE_CAPS_WOL_PORT0; + } else { + adapter->eeprom_cap = eeprom_cksum_devcap & 0xffff; + } + if (ngbe_wol_supported(adapter)) + adapter->wol = NGBE_PSR_WKUP_CTL_MAG; + if ((hw->subsystem_device_id & NGBE_WOL_MASK) == NGBE_WOL_SUP) { + /*enable wol first in shadow ram*/ + ngbe_write_ee_hostif(hw, 0x7FE, 0xa50F); + ngbe_write_ee_hostif(hw, 0x7FF, 0x5a5a); + } + hw->wol_enabled = !!(adapter->wol); + wr32(hw, NGBE_PSR_WKUP_CTL, adapter->wol); + + device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); + + /* Save off EEPROM version number and Option Rom version which + * together make a unique identify for the eeprom + */ + if (hw->bus.lan_id == 0 || saved_version == 0) { + TCALL(hw, eeprom.ops.read32, + hw->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_L, + &eeprom_verl); + etrack_id = eeprom_verl; + wr32(hw, NGBE_EEPROM_VERSION_STORE_REG, etrack_id); + wr32(hw, NGBE_CALSUM_CAP_STATUS, 0x10000 | (u32)adapter->eeprom_cap); + } else if (eeprom_cksum_devcap) { + etrack_id = saved_version; + } else { + TCALL(hw, eeprom.ops.read32, + hw->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_L, + &eeprom_verl); + etrack_id = eeprom_verl; + } + + /* Make sure offset to SCSI block is valid */ + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "0x%08x", etrack_id); + + /* reset the hardware with the new settings */ + err = TCALL(hw, mac.ops.start_hw); + if (err) { + e_dev_err("HW init failed, err = %d\n", err); + goto err_register; + } + + /* pick up the PCI bus settings for reporting later */ + TCALL(hw, mac.ops.get_bus_info); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + pci_set_drvdata(pdev, adapter); + adapter->netdev_registered = true; + + /* call save state here in standalone driver because it relies on + * adapter struct to exist, and needs to call netdev_priv + */ + pci_save_state(pdev); + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + /* keep stopping all the transmit queues for older kernels */ + netif_tx_stop_all_queues(netdev); + + /* calculate the expected PCIe bandwidth required for optimal + * performance. Note that some older parts will never have enough + * bandwidth due to being older generation PCIe parts. We clamp these + * parts to ensure that no warning is displayed, as this could confuse + * users otherwise. + */ + expected_gts = ngbe_enumerate_functions(adapter) * 10; + + /* don't check link if we failed to enumerate functions */ + if (expected_gts > 0) + ngbe_check_minimum_link(adapter, expected_gts); + + TCALL(hw, mac.ops.set_fw_drv_ver, 0xFF, 0xFF, 0xFF, 0xFF); + + if (((hw->subsystem_device_id & NGBE_NCSI_MASK) == NGBE_NCSI_SUP) || + ((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_OCP_CARD)) + e_info(probe, "NCSI : support"); + else + e_info(probe, "NCSI : unsupported"); + + e_info(probe, "PHY: %s, PBA No: Wang Xun GbE Family Controller\n", + hw->phy.type == ngbe_phy_internal ? "Internal" : "External"); + + e_info(probe, "%02x:%02x:%02x:%02x:%02x:%02x\n", + netdev->dev_addr[0], netdev->dev_addr[1], + netdev->dev_addr[2], netdev->dev_addr[3], + netdev->dev_addr[4], netdev->dev_addr[5]); + + info_string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); + if (!info_string) + goto no_info_string; + + i_s_var = info_string; + i_s_var += sprintf(info_string, "Enabled Features: "); + i_s_var += sprintf(i_s_var, "RxQ: %d TxQ: %d ", + adapter->num_rx_queues, adapter->num_tx_queues); + if (adapter->flags & NGBE_FLAG_TPH_ENABLED) + i_s_var += sprintf(i_s_var, "TPH "); + + WARN_ON(i_s_var > (info_string + INFO_STRING_LEN)); + + e_info(probe, "%s\n", info_string); + kfree(info_string); + +no_info_string: + e_info(probe, "WangXun(R) Gigabit Network Connection\n"); + cards_found++; + + return 0; +err_register: + ngbe_clear_interrupt_scheme(adapter); + ngbe_release_hw_control(adapter); +err_sw_init: + adapter->flags2 &= ~NGBE_FLAG2_SEARCH_FOR_SFP; + kfree(adapter->mac_table); + iounmap(adapter->io_addr); +err_pci_release_regions: + pci_disable_pcie_error_reporting(pdev); + pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_disable_dev: + pci_disable_device(pdev); + + return err; +} + +/** + * ngbe_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ngbe_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void ngbe_remove(struct pci_dev *pdev) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev; + bool disable_dev; + + /* if !adapter then we already cleaned up in probe */ + if (!adapter) + return; + + netdev = adapter->netdev; + + set_bit(__NGBE_REMOVING, &adapter->state); + cancel_work_sync(&adapter->service_task); + + if (adapter->netdev_registered) { + unregister_netdev(netdev); + adapter->netdev_registered = false; + } + + ngbe_clear_interrupt_scheme(adapter); + ngbe_release_hw_control(adapter); + + iounmap(adapter->io_addr); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + kfree(adapter->mac_table); + free_netdev(netdev); + + disable_dev = !test_and_set_bit(__NGBE_DISABLED, &adapter->state); + + pci_disable_pcie_error_reporting(pdev); + + if (disable_dev) + pci_disable_device(pdev); +} + +/** + * ngbe_close_suspend - actions necessary to both suspend and close flows + * @adapter: the private adapter struct + * + * This function should contain the necessary work common to both suspending + * and closing of the device. + */ +static void ngbe_close_suspend(struct ngbe_adapter *adapter) +{ + ngbe_ptp_suspend(adapter); + ngbe_disable_device(adapter); + + ngbe_clean_all_tx_rings(adapter); + ngbe_clean_all_rx_rings(adapter); + + ngbe_free_irq(adapter); + + ngbe_free_isb_resources(adapter); + ngbe_free_all_rx_resources(adapter); + ngbe_free_all_tx_resources(adapter); +} + +static int ngbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + struct ngbe_hw *hw = &adapter->hw; + u32 wufc = adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + *enable_wake = !!wufc; + + netif_device_detach(netdev); + rtnl_lock(); + if (netif_running(netdev)) + ngbe_close_suspend(adapter); + rtnl_unlock(); + + ngbe_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + if (wufc) { + ngbe_set_rx_mode(netdev); + ngbe_configure_rx(adapter); + /* enable the optics for SFP+ fiber as we can WoL */ + TCALL(hw, mac.ops.enable_tx_laser); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & NGBE_PSR_WKUP_CTL_MC) { + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_MPE, NGBE_PSR_CTL_MPE); + } + + pci_clear_master(adapter->pdev); + wr32(hw, NGBE_PSR_WKUP_CTL, wufc); + } else { + wr32(hw, NGBE_PSR_WKUP_CTL, 0); + } + + pci_wake_from_d3(pdev, !!wufc); + + ngbe_release_hw_control(adapter); + + if (!test_and_set_bit(__NGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); + + return 0; +} + +static void ngbe_shutdown(struct pci_dev *pdev) +{ + bool wake; + + ngbe_dev_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_PM +static int ngbe_suspend(struct device *dev) +{ + int retval; + bool wake; + struct pci_dev *pdev = to_pci_dev(dev); + + retval = ngbe_dev_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} + +static int ngbe_resume(struct device *dev) +{ + struct ngbe_adapter *adapter; + struct net_device *netdev; + u32 err; + struct pci_dev *pdev = to_pci_dev(dev); + + adapter = pci_get_drvdata(pdev); + netdev = adapter->netdev; + adapter->hw.hw_addr = adapter->io_addr; + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + /* pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + wr32(&adapter->hw, NGBE_PSR_WKUP_CTL, adapter->wol); + + err = pci_enable_device_mem(pdev); + if (err) { + e_dev_err("Cannot enable PCI device from suspend\n"); + return err; + } + + /* flush memory to make sure state is correct before next watchdog */ + smp_mb__before_atomic(); + clear_bit(__NGBE_DISABLED, &adapter->state); + pci_set_master(pdev); + + pci_wake_from_d3(pdev, false); + + ngbe_reset(adapter); + + rtnl_lock(); + + err = ngbe_init_interrupt_scheme(adapter); + if (!err && netif_running(netdev)) + err = ngbe_open(netdev); + + rtnl_unlock(); + + if (err) + return err; + + netif_device_attach(netdev); + + return 0; +} + +/** + * ngbe_freeze - quiesce the device (no IRQ's or DMA) + * @dev: The port's netdev + */ +static int ngbe_freeze(struct device *dev) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev)); + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + ngbe_down(adapter); + ngbe_free_irq(adapter); + } + + ngbe_reset_interrupt_capability(adapter); + + return 0; +} + +/** + * ngbe_thaw - un-quiesce the device + * @dev: The port's netdev + */ +static int ngbe_thaw(struct device *dev) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev)); + struct net_device *netdev = adapter->netdev; + + ngbe_set_interrupt_capability(adapter); + + if (netif_running(netdev)) { + u32 err = ngbe_request_irq(adapter); + + if (err) + return err; + + ngbe_up(adapter); + } + + netif_device_attach(netdev); + + return 0; +} + +static const struct dev_pm_ops ngbe_pm_ops = { + .suspend = ngbe_suspend, + .resume = ngbe_resume, + .freeze = ngbe_freeze, + .thaw = ngbe_thaw, + .poweroff = ngbe_suspend, + .restore = ngbe_resume, +}; +#endif/* CONFIG_PM */ + +static struct pci_driver ngbe_driver = { + .name = ngbe_driver_name, + .id_table = ngbe_pci_tbl, + .probe = ngbe_probe, + .remove = ngbe_remove, + .shutdown = ngbe_shutdown, +}; + +/** + * ngbe_init_module - Driver Registration Routine + * ngbe_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init ngbe_init_module(void) +{ + int ret; + + pr_info("%s - version %s\n", ngbe_driver_string, ngbe_driver_version); + pr_info("%s\n", ngbe_copyright); + + ngbe_wq = create_singlethread_workqueue(ngbe_driver_name); + if (!ngbe_wq) { + pr_err("%s: Failed to create workqueue\n", ngbe_driver_name); + return -ENOMEM; + } + + ret = pci_register_driver(&ngbe_driver); + return ret; +} +module_init(ngbe_init_module); + +/** + * ngbe_exit_module - Driver Exit Cleanup Routine + * ngbe_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit ngbe_exit_module(void) +{ + pci_unregister_driver(&ngbe_driver); + destroy_workqueue(ngbe_wq); +} + +module_exit(ngbe_exit_module); + +MODULE_DEVICE_TABLE(pci, ngbe_pci_tbl); +MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, "); +MODULE_DESCRIPTION("WangXun(R) Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_phy.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_phy.c new file mode 100644 index 0000000000000000000000000000000000000000..4aa79e5d48cebb894eb34ff204b1d968bd63f7be --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_phy.c @@ -0,0 +1,1502 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "ngbe_type.h" +#include "ngbe_phy.h" +#include "ngbe_hw.h" +#include "ngbe.h" + +/** + * ngbe_check_reset_blocked - check status of MNG FW veto bit + * @hw: pointer to the hardware structure + **/ +bool ngbe_check_reset_blocked(struct ngbe_hw *hw) +{ + u32 mmngc; + + mmngc = rd32(hw, NGBE_MIS_ST); + if (mmngc & NGBE_MIS_ST_MNG_VETO) { + ERROR_REPORT1(hw, NGBE_ERROR_SOFTWARE, + "MNG_VETO bit detected.\n"); + return true; + } + + return false; +} + +s32 ngbe_phy_read_reg(struct ngbe_hw *hw, + u32 reg_offset, + u32 page, + u16 *phy_data) +{ + /* clear input */ + *phy_data = 0; + + if (!(page == 0xa43 && (reg_offset == 0x1a || reg_offset == 0x1d))) + wr32(hw, NGBE_PHY_CONFIG(NGBE_INTERNAL_PHY_PAGE_SELECT_OFFSET), page); + + if (reg_offset >= NGBE_INTERNAL_PHY_OFFSET_MAX) { + ERROR_REPORT1(hw, NGBE_ERROR_UNSUPPORTED, + "input reg offset %d exceed maximum 31.\n", reg_offset); + return NGBE_ERR_INVALID_ARGUMENT; + } + + *phy_data = 0xFFFF & rd32(hw, NGBE_PHY_CONFIG(reg_offset)); + + return NGBE_OK; +} + +s32 ngbe_phy_write_reg(struct ngbe_hw *hw, + u32 reg_offset, + u32 page, + u16 phy_data) +{ + if (!(page == 0xa43 && (reg_offset == 0x1a || reg_offset == 0x1d))) + wr32(hw, NGBE_PHY_CONFIG(NGBE_INTERNAL_PHY_PAGE_SELECT_OFFSET), page); + + if (reg_offset >= NGBE_INTERNAL_PHY_OFFSET_MAX) { + ERROR_REPORT1(hw, NGBE_ERROR_UNSUPPORTED, + "reg offset %d exceed maximum.\n", reg_offset); + + return NGBE_ERR_INVALID_ARGUMENT; + } + wr32(hw, NGBE_PHY_CONFIG(reg_offset), phy_data); + + return NGBE_OK; +} + +/** + * ngbe_read_phy_mdi - Reads a value from a specified PHY register without + * the SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 ngbe_phy_read_reg_mdi(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data) +{ + u32 command; + s32 status = 0; + + /* setup and write the address cycle command */ + command = NGBE_MSCA_RA(reg_addr) | + NGBE_MSCA_PA(hw->phy.addr) | + NGBE_MSCA_DA(device_type); + wr32(hw, NGBE_MSCA, command); + + command = NGBE_MSCC_CMD(NGBE_MSCA_CMD_READ) | + NGBE_MSCC_BUSY | + NGBE_MDIO_CLK(6); + wr32(hw, NGBE_MSCC, command); + + /* wait to complete */ + status = po32m(hw, NGBE_MSCC, + NGBE_MSCC_BUSY, ~NGBE_MSCC_BUSY, + NGBE_MDIO_TIMEOUT, 10); + if (status != 0) { + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, + "PHY address command did not complete.\n"); + + return NGBE_ERR_PHY; + } + + /* read data from MSCC */ + *phy_data = 0xFFFF & rd32(hw, NGBE_MSCC); + + return 0; +} + +/** + * ngbe_write_phy_reg_mdi - Writes a value to specified PHY register + * without SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 ngbe_phy_write_reg_mdi(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data) +{ + u32 command; + s32 status = 0; + + /* setup and write the address cycle command */ + command = NGBE_MSCA_RA(reg_addr) | + NGBE_MSCA_PA(hw->phy.addr) | + NGBE_MSCA_DA(device_type); + wr32(hw, NGBE_MSCA, command); + + command = phy_data | + NGBE_MSCC_CMD(NGBE_MSCA_CMD_WRITE) | + NGBE_MSCC_BUSY | + NGBE_MDIO_CLK(6); + wr32(hw, NGBE_MSCC, command); + + /* wait to complete */ + status = po32m(hw, NGBE_MSCC, + NGBE_MSCC_BUSY, ~NGBE_MSCC_BUSY, + NGBE_MDIO_TIMEOUT, 10); + if (status != 0) { + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, + "PHY address command did not complete.\n"); + return NGBE_ERR_PHY; + } + + return 0; +} + +s32 ngbe_phy_write_reg_ext_yt(struct ngbe_hw *hw, u32 addr, u32 type, u16 phy_data) +{ + s32 status = 0; + + status = ngbe_phy_write_reg_mdi(hw, 0x1e, type, addr); + if (!status) + status = ngbe_phy_write_reg_mdi(hw, 0x1f, type, phy_data); + + return status; +} + +s32 ngbe_phy_read_reg_sds_mii_yt(struct ngbe_hw *hw, u32 addr, u32 type, u16 *phy_data) +{ + s32 status = 0; + + status = ngbe_phy_write_reg_ext_yt(hw, 0xa000, type, 0x02); + if (!status) + status = ngbe_phy_read_reg_mdi(hw, addr, type, phy_data); + ngbe_phy_write_reg_ext_yt(hw, 0xa000, type, 0x00); + + return status; +} + +s32 ngbe_phy_write_reg_sds_mii_yt(struct ngbe_hw *hw, u32 addr, u32 type, u16 phy_data) +{ + s32 status = 0; + + status = ngbe_phy_write_reg_ext_yt(hw, 0xa000, type, 0x02); + if (!status) + status = ngbe_phy_write_reg_mdi(hw, addr, type, phy_data); + ngbe_phy_write_reg_ext_yt(hw, 0xa000, type, 0x00); + + return status; +} + +s32 ngbe_phy_read_reg_ext_yt(struct ngbe_hw *hw, u32 addr, u32 type, u16 *phy_data) +{ + s32 status = 0; + + status = ngbe_phy_write_reg_mdi(hw, 0x1e, type, addr); + if (!status) + status = ngbe_phy_read_reg_mdi(hw, 0x1f, type, phy_data); + + return status; +} + +s32 ngbe_phy_read_reg_sds_ext_yt(struct ngbe_hw *hw, u32 addr, u32 type, u16 *phy_data) +{ + s32 status = 0; + + status = ngbe_phy_write_reg_ext_yt(hw, 0xa000, type, 0x02); + if (!status) + status = ngbe_phy_read_reg_ext_yt(hw, addr, type, phy_data); + ngbe_phy_write_reg_ext_yt(hw, 0xa000, type, 0x00); + + return status; +} + +s32 ngbe_phy_write_reg_sds_ext_yt(struct ngbe_hw *hw, u32 addr, u32 type, u16 phy_data) +{ + s32 status = 0; + + status = ngbe_phy_write_reg_ext_yt(hw, 0xa000, type, 0x02); + if (!status) + status = ngbe_phy_write_reg_ext_yt(hw, addr, type, phy_data); + ngbe_phy_write_reg_ext_yt(hw, 0xa000, type, 0x00); + + return status; +} + +int ngbe_gphy_wait_mdio_access_on(struct ngbe_hw *hw) +{ + int i; + u16 val = 0; + + for (i = 0; i < 100; i++) { + ngbe_phy_read_reg(hw, 29, 0xa43, &val); + if (val & 0x20) + break; + usleep_range(1000, 2000); + } + + return 0; +} + +s32 ngbe_check_internal_phy_id(struct ngbe_hw *hw) +{ + u16 phy_id_high = 0; + u16 phy_id_low = 0; + u16 phy_id = 0; + + ngbe_gphy_wait_mdio_access_on(hw); + + ngbe_phy_read_reg(hw, NGBE_MDI_PHY_ID1_OFFSET, 0, &phy_id_high); + phy_id = phy_id_high << 6; + ngbe_phy_read_reg(hw, NGBE_MDI_PHY_ID2_OFFSET, 0, &phy_id_low); + phy_id |= (phy_id_low & NGBE_MDI_PHY_ID_MASK) >> 10; + + if (phy_id == NGBE_INTERNAL_PHY_ID) { + hw->phy.id = (u32)phy_id; + } else { + ERROR_REPORT1(hw, NGBE_ERROR_UNSUPPORTED, + "internal phy id 0x%x not supported.\n", phy_id); + + return NGBE_ERR_DEVICE_NOT_SUPPORTED; + } + + return NGBE_OK; +} + +s32 ngbe_check_mdi_phy_id(struct ngbe_hw *hw) +{ + u16 phy_id_high = 0; + u16 phy_id_low = 0; + u32 phy_id = 0; + u8 value = 0; + u32 phy_mode = 0; + + if (hw->phy.type == ngbe_phy_m88e1512) { + /* select page 0 */ + ngbe_phy_write_reg_mdi(hw, 22, 0, 0); + } else { + /* select page 1 */ + ngbe_phy_write_reg_mdi(hw, 22, 0, 1); + } + + ngbe_phy_read_reg_mdi(hw, NGBE_MDI_PHY_ID1_OFFSET, 0, &phy_id_high); + phy_id = phy_id_high << 6; + ngbe_phy_read_reg_mdi(hw, NGBE_MDI_PHY_ID2_OFFSET, 0, &phy_id_low); + phy_id |= (phy_id_low & NGBE_MDI_PHY_ID_MASK) >> 10; + + if (phy_id == NGBE_M88E1512_PHY_ID) { + hw->phy.id = phy_id; + } else { + ERROR_REPORT1(hw, NGBE_ERROR_UNSUPPORTED, + "MDI phy id 0x%x not supported.\n", phy_id); + + return NGBE_ERR_DEVICE_NOT_SUPPORTED; + } + + if (hw->phy.type == ngbe_phy_m88e1512_unknown) { + phy_mode = ngbe_flash_read_dword(hw, 0xff010); + switch (hw->bus.lan_id) { + case 0: + value = (u8)phy_mode; + break; + case 1: + value = (u8)(phy_mode >> 8); + break; + case 2: + value = (u8)(phy_mode >> 16); + break; + case 3: + value = (u8)(phy_mode >> 24); + break; + default: + break; + } + if ((value & 0x7) == 0) { + /* mode select to RGMII-to-copper */ + hw->phy.type = ngbe_phy_m88e1512; + } else if ((value & 0x7) == 0x2) { + /* mode select to RGMII-to-sfi */ + hw->phy.type = ngbe_phy_m88e1512_sfi; + } else { + ERROR_REPORT1(hw, NGBE_ERROR_UNSUPPORTED, + "marvell 88E1512 mode %x is not supported.\n", value); + return NGBE_ERR_DEVICE_NOT_SUPPORTED; + } + } + + return NGBE_OK; +} + +bool ngbe_validate_phy_addr(struct ngbe_hw *hw, u32 phy_addr) +{ + u16 phy_id = 0; + bool valid = false; + unsigned long flags; + + hw->phy.addr = phy_addr; + + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_sds_mii_yt(hw, 0x3, 0, &phy_id); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + if (phy_id != 0xFFFF && phy_id != 0x0) + valid = true; + + return valid; +} + +s32 ngbe_check_yt_phy_id(struct ngbe_hw *hw) +{ + u16 phy_id = 0; + bool valid = false; + u32 phy_addr; + unsigned long flags; + + for (phy_addr = 0; phy_addr < 32; phy_addr++) { + valid = ngbe_validate_phy_addr(hw, phy_addr); + if (valid) { + hw->phy.addr = phy_addr; + break; + } + } + if (!valid) { + ERROR_REPORT1(hw, NGBE_ERROR_UNSUPPORTED, + "cannnot find valid phy address.\n"); + + return NGBE_ERR_DEVICE_NOT_SUPPORTED; + } + + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_sds_mii_yt(hw, 0x3, 0, &phy_id); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + if (phy_id == NGBE_YT8521S_PHY_ID || phy_id == NGBE_YT8531S_PHY_ID) { + hw->phy.id = phy_id; + } else { + ERROR_REPORT1(hw, NGBE_ERROR_UNSUPPORTED, + "MDI phy id 0x%x not supported.\n", phy_id); + + return NGBE_ERR_DEVICE_NOT_SUPPORTED; + } + + return NGBE_OK; +} + +/** + * ngbe_identify_module - Identifies module type + * @hw: pointer to hardware structure + **/ +s32 ngbe_phy_identify(struct ngbe_hw *hw) +{ + s32 status = 0; + + switch (hw->phy.type) { + case ngbe_phy_internal: + case ngbe_phy_internal_yt8521s_sfi: + status = ngbe_check_internal_phy_id(hw); + break; + case ngbe_phy_m88e1512: + case ngbe_phy_m88e1512_sfi: + case ngbe_phy_m88e1512_unknown: + status = ngbe_check_mdi_phy_id(hw); + break; + case ngbe_phy_yt8521s_sfi: + status = ngbe_check_yt_phy_id(hw); + break; + default: + status = NGBE_ERR_PHY_TYPE; + } + + return status; +} + +/** + * ngbe_init_phy_ops - PHY/SFP specific init + * @hw: pointer to hardware structure + **/ +s32 ngbe_phy_init(struct ngbe_hw *hw) +{ + s32 ret_val = 0; + u16 value = 0; + int i; + u8 lan_id = hw->bus.lan_id; + struct ngbe_adapter *adapter = hw->back; + unsigned long flags; + + /* init phy.addr according to HW design */ + hw->phy.addr = 0; + + /* Identify the PHY or SFP module */ + ret_val = TCALL(hw, phy.ops.identify); + if (ret_val == NGBE_ERR_SFP_NOT_SUPPORTED) + return ret_val; + + /* enable interrupts, only link status change and an done is allowed */ + if (hw->phy.type == ngbe_phy_internal || hw->phy.type == ngbe_phy_internal_yt8521s_sfi) { + value = NGBE_INTPHY_INT_LSC | NGBE_INTPHY_INT_ANC; + ret_val = TCALL(hw, phy.ops.write_reg, 0x12, 0xa42, value); + adapter->gphy_efuse[0] = ngbe_flash_read_dword(hw, 0xfe010 + lan_id * 8); + adapter->gphy_efuse[1] = ngbe_flash_read_dword(hw, 0xfe010 + lan_id * 8 + 4); + } else if (hw->phy.type == ngbe_phy_m88e1512 || + hw->phy.type == ngbe_phy_m88e1512_sfi) { + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 2); + TCALL(hw, phy.ops.read_reg_mdi, 21, 0, &value); + value &= ~NGBE_M88E1512_RGM_TTC; + value |= NGBE_M88E1512_RGM_RTC; + TCALL(hw, phy.ops.write_reg_mdi, 21, 0, value); + if (hw->phy.type == ngbe_phy_m88e1512) + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 0); + else + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 1); + + TCALL(hw, phy.ops.write_reg_mdi, 0, 0, NGBE_MDI_PHY_RESET); + for (i = 0; i < 15; i++) { + TCALL(hw, phy.ops.read_reg_mdi, 0, 0, &value); + if (value & NGBE_MDI_PHY_RESET) + mdelay(1); + else + break; + } + + if (i == 15) { + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, + "phy reset exceeds maximum waiting period.\n"); + return NGBE_ERR_PHY_TIMEOUT; + } + + ret_val = TCALL(hw, phy.ops.reset); + if (ret_val) + return ret_val; + + /* set LED2 to interrupt output and INTn active low */ + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 3); + TCALL(hw, phy.ops.read_reg_mdi, 18, 0, &value); + value |= NGBE_M88E1512_INT_EN; + value &= ~(NGBE_M88E1512_INT_POL); + TCALL(hw, phy.ops.write_reg_mdi, 18, 0, value); + + if (hw->phy.type == ngbe_phy_m88e1512_sfi) { + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 1); + TCALL(hw, phy.ops.read_reg_mdi, 16, 0, &value); + value &= ~0x4; + TCALL(hw, phy.ops.write_reg_mdi, 16, 0, value); + } + + /* enable link status change and AN complete interrupts */ + value = NGBE_M88E1512_INT_ANC | NGBE_M88E1512_INT_LSC; + if (hw->phy.type == ngbe_phy_m88e1512) + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 0); + else + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 1); + TCALL(hw, phy.ops.write_reg_mdi, 18, 0, value); + + TCALL(hw, phy.ops.read_reg_mdi, 0, 0, &value); + value |= NGBE_M88E1512_POWER; + TCALL(hw, phy.ops.write_reg_mdi, 0, 0, value); + } else if (hw->phy.type == ngbe_phy_yt8521s_sfi) { + /* power down in Fiber mode */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_sds_mii_yt(hw, 0x0, 0, &value); + value |= NGBE_YT_PHY_POWER; + ngbe_phy_write_reg_sds_mii_yt(hw, 0x0, 0, value); + + /* power down in UTP mode */ + ngbe_phy_read_reg_mdi(hw, 0x0, 0, &value); + value |= NGBE_YT_PHY_POWER; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + } + + return ret_val; +} + +s32 ngbe_phy_reset(struct ngbe_hw *hw) +{ + s32 status = 0; + u16 value = 0; + int i; + + /* only support internal phy */ + if (hw->phy.type != ngbe_phy_internal && + hw->phy.type != ngbe_phy_internal_yt8521s_sfi) { + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, + "%s: operation not supported.\n", __func__); + return NGBE_ERR_PHY_TYPE; + } + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + TCALL(hw, phy.ops.check_overtemp) == NGBE_ERR_OVERTEMP) { + ERROR_REPORT1(hw, NGBE_ERROR_CAUTION, + "OVERTEMP! Skip PHY reset.\n"); + return NGBE_ERR_OVERTEMP; + } + + /* Blocked by MNG FW so bail */ + if (ngbe_check_reset_blocked(hw)) + return status; + + value |= NGBE_MDI_PHY_RESET; + status = TCALL(hw, phy.ops.write_reg, 0, 0, value); + for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) { + status = TCALL(hw, phy.ops.read_reg, 0, 0, &value); + if (!(value & NGBE_MDI_PHY_RESET)) + break; + mdelay(1); + } + + if (i == NGBE_PHY_RST_WAIT_PERIOD) { + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, + "PHY MODE RESET did not complete.\n"); + return NGBE_ERR_RESET_FAILED; + } + + return status; +} + +s32 ngbe_phy_reset_m88e1512(struct ngbe_hw *hw) +{ + s32 status = 0; + + u16 value = 0; + int i; + + if (hw->phy.type != ngbe_phy_m88e1512 && + hw->phy.type != ngbe_phy_m88e1512_sfi) + return NGBE_ERR_PHY_TYPE; + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + TCALL(hw, phy.ops.check_overtemp) == NGBE_ERR_OVERTEMP) { + ERROR_REPORT1(hw, NGBE_ERROR_CAUTION, + "OVERTEMP! Skip PHY reset.\n"); + return NGBE_ERR_OVERTEMP; + } + + /* Blocked by MNG FW so bail */ + if (ngbe_check_reset_blocked(hw)) + return status; + + /* select page 18 reg 20 */ + status = TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 18); + + if (hw->phy.type == ngbe_phy_m88e1512) + /* mode select to RGMII-to-copper */ + value = 0; + else + /* mode select to RGMII-to-sfi */ + value = 2; + status = TCALL(hw, phy.ops.write_reg_mdi, 20, 0, value); + /* mode reset */ + value |= NGBE_MDI_PHY_RESET; + status = TCALL(hw, phy.ops.write_reg_mdi, 20, 0, value); + + for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) { + status = TCALL(hw, phy.ops.read_reg_mdi, 20, 0, &value); + if (!(value & NGBE_MDI_PHY_RESET)) + break; + mdelay(1); + } + + if (i == NGBE_PHY_RST_WAIT_PERIOD) { + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, + "M88E1512 MODE RESET did not complete.\n"); + return NGBE_ERR_RESET_FAILED; + } + + return status; +} + +s32 ngbe_phy_reset_yt(struct ngbe_hw *hw) +{ + s32 status = 0; + u16 value = 0; + int i; + unsigned long flags; + + if (hw->phy.type != ngbe_phy_yt8521s_sfi) + return NGBE_ERR_PHY_TYPE; + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + TCALL(hw, phy.ops.check_overtemp) == NGBE_ERR_OVERTEMP) { + ERROR_REPORT1(hw, NGBE_ERROR_CAUTION, + "OVERTEMP! Skip PHY reset.\n"); + return NGBE_ERR_OVERTEMP; + } + + /* Blocked by MNG FW so bail */ + if (ngbe_check_reset_blocked(hw)) + return status; + + /* check chip_mode first */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_ext_yt(hw, 0xa001, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + if ((value & 7) != 0) {/* fiber_to_rgmii */ + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt(hw, 0, 0, &value); + /* sds software reset */ + value |= 0x8000; + status = ngbe_phy_write_reg_sds_mii_yt(hw, 0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) { + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt(hw, 0, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + if (!(value & 0x8000)) + break; + mdelay(1); + } + } else {/* utp_to_rgmii */ + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_mdi(hw, 0, 0, &value); + /* software reset */ + value |= 0x8000; + status = ngbe_phy_write_reg_mdi(hw, 0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) { + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_mdi(hw, 0, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + if (!(value & 0x8000)) + break; + mdelay(1); + } + } + + if (i == NGBE_PHY_RST_WAIT_PERIOD) { + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, + "YT8521S Software RESET did not complete.\n"); + return NGBE_ERR_RESET_FAILED; + } + + return status; +} + +u32 ngbe_phy_setup_link(struct ngbe_hw *hw, + u32 speed, + bool need_restart_AN) +{ + u16 value = 0; + s32 status = 0; + + if (!hw->mac.autoneg) { + status = TCALL(hw, phy.ops.reset); + if (status) { + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, + "call phy reset return %d.\n", status); + return NGBE_ERR_PHY; + } + + switch (speed) { + case NGBE_LINK_SPEED_1GB_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT1; + break; + case NGBE_LINK_SPEED_100_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT0; + break; + case NGBE_LINK_SPEED_10_FULL: + value = 0; + break; + default: + value = NGBE_MDI_PHY_SPEED_SELECT0 | NGBE_MDI_PHY_SPEED_SELECT1; + ERROR_REPORT1(hw, NGBE_ERROR_CAUTION, + "unknown speed = 0x%x.\n", speed); + break; + } + /* duplex full */ + value |= NGBE_MDI_PHY_DUPLEX; + TCALL(hw, phy.ops.write_reg, 0, 0, value); + + goto skip_an; + } + + /* disable 10/100M Half Duplex */ + TCALL(hw, phy.ops.read_reg, 4, 0, &value); + value &= 0xFF5F; + TCALL(hw, phy.ops.write_reg, 4, 0, value); + + /* set advertise enable according to input speed */ + if (!(speed & NGBE_LINK_SPEED_1GB_FULL)) { + TCALL(hw, phy.ops.read_reg, 9, 0, &value); + value &= 0xFDFF; + TCALL(hw, phy.ops.write_reg, 9, 0, value); + } else { + TCALL(hw, phy.ops.read_reg, 9, 0, &value); + value |= 0x200; + TCALL(hw, phy.ops.write_reg, 9, 0, value); + } + + if (!(speed & NGBE_LINK_SPEED_100_FULL)) { + TCALL(hw, phy.ops.read_reg, 4, 0, &value); + value &= 0xFEFF; + TCALL(hw, phy.ops.write_reg, 4, 0, value); + } else { + TCALL(hw, phy.ops.read_reg, 4, 0, &value); + value |= 0x100; + TCALL(hw, phy.ops.write_reg, 4, 0, value); + } + + if (!(speed & NGBE_LINK_SPEED_10_FULL)) { + TCALL(hw, phy.ops.read_reg, 4, 0, &value); + value &= 0xFFBF; + TCALL(hw, phy.ops.write_reg, 4, 0, value); + } else { + TCALL(hw, phy.ops.read_reg, 4, 0, &value); + value |= 0x40; + TCALL(hw, phy.ops.write_reg, 4, 0, value); + } + + /* restart AN and wait AN done interrupt */ + if (((hw->subsystem_device_id & NGBE_NCSI_MASK) == NGBE_NCSI_SUP) || + ((hw->subsystem_device_id & NGBE_OEM_MASK) == NGBE_SUBID_OCP_CARD)) { + if (need_restart_AN) + value = NGBE_MDI_PHY_RESTART_AN | NGBE_MDI_PHY_ANE; + else + value = NGBE_MDI_PHY_ANE; + } else { + value = NGBE_MDI_PHY_RESTART_AN | NGBE_MDI_PHY_ANE; + } + + TCALL(hw, phy.ops.write_reg, 0, 0, value); +skip_an: + TCALL(hw, phy.ops.phy_led_ctrl); + + TCALL(hw, phy.ops.check_event); + + return NGBE_OK; +} + +u32 ngbe_phy_setup_link_m88e1512(struct ngbe_hw *hw, + u32 speed, + bool wait) +{ + u16 value_r4 = 0; + u16 value_r9 = 0; + u16 value; + + hw->phy.autoneg_advertised = 0; + if (hw->phy.type == ngbe_phy_m88e1512) { + if (!hw->mac.autoneg) { + switch (speed) { + case NGBE_LINK_SPEED_1GB_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT1; + break; + case NGBE_LINK_SPEED_100_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT0; + break; + case NGBE_LINK_SPEED_10_FULL: + value = 0; + break; + default: + value = NGBE_MDI_PHY_SPEED_SELECT0 | NGBE_MDI_PHY_SPEED_SELECT1; + ERROR_REPORT1(hw, NGBE_ERROR_CAUTION, + "unknown speed = 0x%x.\n", speed); + break; + } + /* duplex full */ + value |= NGBE_MDI_PHY_DUPLEX | 0x8000; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + + goto skip_an; + } + if (speed & NGBE_LINK_SPEED_1GB_FULL) { + value_r9 |= NGBE_M88E1512_1000BASET_FULL; + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + } + + if (speed & NGBE_LINK_SPEED_100_FULL) { + value_r4 |= NGBE_M88E1512_100BASET_FULL; + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_100_FULL; + } + + if (speed & NGBE_LINK_SPEED_10_FULL) { + value_r4 |= NGBE_M88E1512_10BASET_FULL; + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_10_FULL; + } + + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 0); + TCALL(hw, phy.ops.read_reg_mdi, 4, 0, &value); + value &= ~(NGBE_M88E1512_100BASET_FULL | + NGBE_M88E1512_100BASET_HALF | + NGBE_M88E1512_10BASET_FULL | + NGBE_M88E1512_10BASET_HALF); + value_r4 |= value; + TCALL(hw, phy.ops.write_reg_mdi, 4, 0, value_r4); + + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 0); + TCALL(hw, phy.ops.read_reg_mdi, 9, 0, &value); + value &= ~(NGBE_M88E1512_1000BASET_FULL | + NGBE_M88E1512_1000BASET_HALF); + value_r9 |= value; + TCALL(hw, phy.ops.write_reg_mdi, 9, 0, value_r9); + + value = NGBE_MDI_PHY_RESTART_AN | + NGBE_MDI_PHY_ANE | + NGBE_MDI_PHY_RESET | + NGBE_MDI_PHY_DUPLEX; + TCALL(hw, phy.ops.write_reg_mdi, 0, 0, value); + } else { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 1); + TCALL(hw, phy.ops.read_reg_mdi, 4, 0, &value); + value &= ~0x60; + value |= 0x20; + TCALL(hw, phy.ops.write_reg_mdi, 4, 0, value); + + if (hw->mac.autoneg) + value = NGBE_MDI_PHY_RESTART_AN | + NGBE_MDI_PHY_ANE | + NGBE_MDI_PHY_RESET | + NGBE_MDI_PHY_DUPLEX | + NGBE_MDI_PHY_SPEED_SELECT1; + else + value = NGBE_MDI_PHY_RESET | + NGBE_MDI_PHY_DUPLEX | + NGBE_MDI_PHY_SPEED_SELECT1; + TCALL(hw, phy.ops.write_reg_mdi, 0, 0, value); + } + TCALL(hw, phy.ops.read_reg_mdi, 0, 0, &value); +skip_an: + TCALL(hw, phy.ops.read_reg_mdi, 0, 0, &value); + value &= ~0x800; + TCALL(hw, phy.ops.write_reg_mdi, 0, 0, value); + mdelay(5); + + TCALL(hw, phy.ops.check_event); + + return NGBE_OK; +} + +u32 ngbe_phy_setup_link_yt(struct ngbe_hw *hw, + u32 speed, + bool wait) +{ + s32 ret_val = 0; + u16 value; + u16 value_r4 = 0; + u16 value_r9 = 0; + unsigned long flags; + + hw->phy.autoneg_advertised = 0; + + /* check chip_mode first */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_ext_yt(hw, 0xA001, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + if ((value & 7) == 0) {/* utp_to_rgmii */ + if (!hw->mac.autoneg) { + switch (speed) { + case NGBE_LINK_SPEED_1GB_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT1; + break; + case NGBE_LINK_SPEED_100_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT0; + break; + case NGBE_LINK_SPEED_10_FULL: + value = 0; + break; + default: + value = NGBE_MDI_PHY_SPEED_SELECT0 | NGBE_MDI_PHY_SPEED_SELECT1; + ERROR_REPORT1(hw, NGBE_ERROR_CAUTION, + "unknown speed = 0x%x.\n", speed); + break; + } + /* duplex full */ + value |= NGBE_MDI_PHY_DUPLEX | 0x8000; + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + goto skip_an; + } + + value_r4 = 0x1E0; + value_r9 = 0x300; + /*disable 100/10base-T Self-negotiation ability*/ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_mdi(hw, 0x4, 0, &value); + value &= ~value_r4; + ngbe_phy_write_reg_mdi(hw, 0x4, 0, value); + + /*disable 1000base-T Self-negotiation ability*/ + ngbe_phy_read_reg_mdi(hw, 0x9, 0, &value); + value &= ~value_r9; + ngbe_phy_write_reg_mdi(hw, 0x9, 0, value); + + value_r4 = 0x0; + value_r9 = 0x0; + + if (speed & NGBE_LINK_SPEED_1GB_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + value_r9 |= 0x200; + } + if (speed & NGBE_LINK_SPEED_100_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_100_FULL; + value_r4 |= 0x100; + } + if (speed & NGBE_LINK_SPEED_10_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_10_FULL; + value_r4 |= 0x40; + } + + /* enable 1000base-T Self-negotiation ability */ + ngbe_phy_read_reg_mdi(hw, 0x9, 0, &value); + value |= value_r9; + ngbe_phy_write_reg_mdi(hw, 0x9, 0, value); + + /* enable 100/10base-T Self-negotiation ability */ + ngbe_phy_read_reg_mdi(hw, 0x4, 0, &value); + value |= value_r4; + ngbe_phy_write_reg_mdi(hw, 0x4, 0, value); + + /* software reset to make the above configuration take effect*/ + ngbe_phy_read_reg_mdi(hw, 0x0, 0, &value); + value |= 0x9200; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); +skip_an: + /* power on in UTP mode */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_mdi(hw, 0x0, 0, &value); + value &= ~0x800; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + } else if ((value & 7) == 1) {/* fiber_to_rgmii */ + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + + /* RGMII_Config1 : Config rx and tx training delay */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_write_reg_ext_yt(hw, 0xA003, 0, 0x3cf1); + ngbe_phy_write_reg_ext_yt(hw, 0xA001, 0, 0x8041); + + ngbe_phy_read_reg_sds_ext_yt(hw, 0xA5, 0, &value); + value &= ~0x8000; + ngbe_phy_write_reg_sds_ext_yt(hw, 0xA5, 0, value); + + ngbe_phy_read_reg_ext_yt(hw, 0xA006, 0, &value); + value |= 0x1; + ngbe_phy_write_reg_ext_yt(hw, 0xA006, 0, value); + + ngbe_phy_read_reg_ext_yt(hw, 0xA001, 0, &value); + value &= ~0x8000; + ngbe_phy_write_reg_ext_yt(hw, 0xA001, 0, value); + + /* software reset */ + if (hw->mac.autoneg) + ngbe_phy_write_reg_sds_mii_yt(hw, 0x0, 0, 0x9340); + else + ngbe_phy_write_reg_sds_mii_yt(hw, 0x0, 0, 0x8140); + spin_unlock_irqrestore(&hw->phy_lock, flags); + } else if ((value & 7) == 2) { + /* power on in UTP mode */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_mdi(hw, 0x0, 0, &value); + value &= ~0x800; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + + /* power on in Fiber mode */ + ngbe_phy_read_reg_sds_mii_yt(hw, 0x0, 0, &value); + value &= ~0x800; + ngbe_phy_write_reg_sds_mii_yt(hw, 0x0, 0, value); + + ngbe_phy_read_reg_sds_mii_yt(hw, 0x11, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + if (value & 0x400) { /* fiber up */ + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + } else { /* utp up */ + value_r4 = 0x1E0; + value_r9 = 0x300; + /*disable 100/10base-T Self-negotiation ability*/ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_mdi(hw, 0x4, 0, &value); + value &= ~value_r4; + ngbe_phy_write_reg_mdi(hw, 0x4, 0, value); + + /*disable 1000base-T Self-negotiation ability*/ + ngbe_phy_read_reg_mdi(hw, 0x9, 0, &value); + value &= ~value_r9; + ngbe_phy_write_reg_mdi(hw, 0x9, 0, value); + + value_r4 = 0x0; + value_r9 = 0x0; + + if (speed & NGBE_LINK_SPEED_1GB_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + value_r9 |= 0x200; + } + if (speed & NGBE_LINK_SPEED_100_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_100_FULL; + value_r4 |= 0x100; + } + if (speed & NGBE_LINK_SPEED_10_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_10_FULL; + value_r4 |= 0x40; + } + + /* enable 1000base-T Self-negotiation ability */ + ngbe_phy_read_reg_mdi(hw, 0x9, 0, &value); + value |= value_r9; + ngbe_phy_write_reg_mdi(hw, 0x9, 0, value); + + /* enable 100/10base-T Self-negotiation ability */ + ngbe_phy_read_reg_mdi(hw, 0x4, 0, &value); + value |= value_r4; + ngbe_phy_write_reg_mdi(hw, 0x4, 0, value); + + /* software reset to make the above configuration take effect*/ + ngbe_phy_read_reg_mdi(hw, 0x0, 0, &value); + value |= 0x8000; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + } + } else if ((value & 7) == 4) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_ext_yt(hw, 0xA003, 0, &value); + value |= 0x8000; + ngbe_phy_write_reg_ext_yt(hw, 0xA003, 0, value); + + ngbe_phy_read_reg_ext_yt(hw, 0xA004, 0, &value); + value &= ~0xf0; + value |= 0xb0; + ngbe_phy_write_reg_ext_yt(hw, 0xA004, 0, value); + + ngbe_phy_read_reg_ext_yt(hw, 0xA001, 0, &value); + value &= ~0x8000; + ngbe_phy_write_reg_ext_yt(hw, 0xA001, 0, value); + + /* power on phy */ + ngbe_phy_read_reg_sds_mii_yt(hw, 0x0, 0, &value); + value &= ~0x800; + ngbe_phy_write_reg_sds_mii_yt(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + } else if ((value & 7) == 5) {/* sgmii_to_rgmii */ + if (!hw->mac.autoneg) { + switch (speed) { + case NGBE_LINK_SPEED_1GB_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT1; + break; + case NGBE_LINK_SPEED_100_FULL: + value = NGBE_MDI_PHY_SPEED_SELECT0; + break; + case NGBE_LINK_SPEED_10_FULL: + value = 0; + break; + default: + value = NGBE_MDI_PHY_SPEED_SELECT0 | NGBE_MDI_PHY_SPEED_SELECT1; + ERROR_REPORT1(hw, NGBE_ERROR_CAUTION, + "unknown speed = 0x%x.\n", speed); + break; + } + /* duplex full */ + value |= NGBE_MDI_PHY_DUPLEX | 0x8000; + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_write_reg_sds_mii_yt(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + goto skip_an_sr; + } + + value = 0; + if (speed & NGBE_LINK_SPEED_1GB_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + value |= 0x40; + } + if (speed & NGBE_LINK_SPEED_100_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_100_FULL; + value |= 0x2000; + } + if (speed & NGBE_LINK_SPEED_10_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_10_FULL; + value |= 0x0; + } + + /* duplex full */ + value |= NGBE_MDI_PHY_DUPLEX | 0x8000; + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_write_reg_sds_mii_yt(hw, 0x0, 0, value); + + /* software reset to make the above configuration take effect */ + ngbe_phy_read_reg_sds_mii_yt(hw, 0x0, 0, &value); + value |= 0x9200; + ngbe_phy_write_reg_sds_mii_yt(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); +skip_an_sr: + /* power on in UTP mode */ + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_read_reg_sds_mii_yt(hw, 0x0, 0, &value); + value &= ~0x800; + ngbe_phy_write_reg_sds_mii_yt(hw, 0x0, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + } + TCALL(hw, phy.ops.check_event); + + return ret_val; +} + +u32 ngbe_phy_led_ctrl(struct ngbe_hw *hw) +{ + u16 value = 0; + + value = 0x205B; + TCALL(hw, phy.ops.write_reg, 16, 0xd04, value); + TCALL(hw, phy.ops.write_reg, 17, 0xd04, 0); + + /*act led blinking mode set to 60ms*/ + TCALL(hw, phy.ops.read_reg, 18, 0xd04, &value); + value = value & 0xFFFC; + value |= 0x2; + + TCALL(hw, phy.ops.write_reg, 18, 0xd04, value); + + return 0; +} + +s32 ngbe_phy_check_overtemp(struct ngbe_hw *hw) +{ + s32 status = 0; + u32 ts_state; + + /* Check that the LASI temp alarm status was triggered */ + ts_state = rd32(hw, NGBE_TS_ALARM_ST); + + if (ts_state & NGBE_TS_ALARM_ST_DALARM) + status = NGBE_ERR_UNDERTEMP; + else if (ts_state & NGBE_TS_ALARM_ST_ALARM) + status = NGBE_ERR_OVERTEMP; + + return status; +} + +s32 ngbe_phy_check_event(struct ngbe_hw *hw) +{ + u16 value = 0; + struct ngbe_adapter *adapter = hw->back; + + TCALL(hw, phy.ops.read_reg, 0x1d, 0xa43, &value); + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + if (value & 0x10) + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + else if (value & 0x08) + adapter->flags |= NGBE_FLAG_NEED_ANC_CHECK; + + return NGBE_OK; +} + +s32 ngbe_phy_check_event_m88e1512(struct ngbe_hw *hw) +{ + u16 value = 0; + struct ngbe_adapter *adapter = hw->back; + + if (hw->phy.type == ngbe_phy_m88e1512) + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 0); + else + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 1); + TCALL(hw, phy.ops.read_reg_mdi, 19, 0, &value); + + if (value & NGBE_M88E1512_LSC) + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + + if (value & NGBE_M88E1512_ANC) + adapter->flags |= NGBE_FLAG_NEED_ANC_CHECK; + + return NGBE_OK; +} + +s32 ngbe_phy_check_event_yt(struct ngbe_hw *hw) +{ + u16 value = 0; + struct ngbe_adapter *adapter = hw->back; + unsigned long flags; + + spin_lock_irqsave(&hw->phy_lock, flags); + ngbe_phy_write_reg_ext_yt(hw, 0xa000, 0, 0x0); + TCALL(hw, phy.ops.read_reg_mdi, 0x13, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + if ((value & (NGBE_YT8521S_SDS_LINK_UP | NGBE_YT8521S_SDS_LINK_DOWN)) || + (value & (NGBE_YT8521S_UTP_LINK_UP | NGBE_YT8521S_UTP_LINK_DOWN))) + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + + return NGBE_OK; +} + +s32 ngbe_phy_get_advertised_pause(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value; + s32 status = 0; + + status = TCALL(hw, phy.ops.read_reg, 4, 0, &value); + *pause_bit = (u8)((value >> 10) & 0x3); + return status; +} + +s32 ngbe_phy_get_adv_pause_m88e1512(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value; + s32 status = 0; + + if (hw->phy.type == ngbe_phy_m88e1512) { + status = TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 0); + status = TCALL(hw, phy.ops.read_reg_mdi, 4, 0, &value); + *pause_bit = (u8)((value >> 10) & 0x3); + } else { + status = TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 1); + status = TCALL(hw, phy.ops.read_reg_mdi, 4, 0, &value); + *pause_bit = (u8)((value >> 7) & 0x3); + } + return status; +} + +s32 ngbe_phy_get_adv_pause_yt(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value; + s32 status = 0; + unsigned long flags; + + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt(hw, 0x04, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + *pause_bit = (u8)((value >> 7) & 0x3); + + return status; +} + +s32 ngbe_phy_get_lp_advertised_pause(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value; + s32 status = 0; + + status = TCALL(hw, phy.ops.read_reg, 0x1d, 0xa43, &value); + + status = TCALL(hw, phy.ops.read_reg, 0x1, 0, &value); + value = (value >> 5) & 0x1; + + /* if AN complete then check lp adv pause */ + status = TCALL(hw, phy.ops.read_reg, 5, 0, &value); + *pause_bit = (u8)((value >> 10) & 0x3); + + return status; +} + +s32 ngbe_phy_get_lp_adv_pause_m88e1512(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value; + s32 status = 0; + + if (hw->phy.type == ngbe_phy_m88e1512) { + status = TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 0); + status = TCALL(hw, phy.ops.read_reg_mdi, 5, 0, &value); + *pause_bit = (u8)((value >> 10) & 0x3); + } else { + status = TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 1); + status = TCALL(hw, phy.ops.read_reg_mdi, 5, 0, &value); + *pause_bit = (u8)((value >> 7) & 0x3); + } + + return status; +} + +s32 ngbe_phy_get_lp_adv_pause_yt(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value; + s32 status = 0; + unsigned long flags; + + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt(hw, 0x05, 0, &value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + *pause_bit = (u8)((value >> 7) & 0x3); + + return status; +} + +s32 ngbe_phy_set_pause_adv(struct ngbe_hw *hw, u16 pause_bit) +{ + u16 value; + s32 status = 0; + + status = TCALL(hw, phy.ops.read_reg, 4, 0, &value); + value &= ~0xC00; + value |= pause_bit; + status = TCALL(hw, phy.ops.write_reg, 4, 0, value); + + return status; +} + +s32 ngbe_phy_set_pause_adv_m88e1512(struct ngbe_hw *hw, u16 pause_bit) +{ + u16 value; + s32 status = 0; + + if (hw->phy.type == ngbe_phy_m88e1512) { + status = TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 0); + status = TCALL(hw, phy.ops.read_reg_mdi, 4, 0, &value); + value &= ~0xC00; + value |= pause_bit; + status = TCALL(hw, phy.ops.write_reg_mdi, 4, 0, value); + } else { + status = TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 1); + status = TCALL(hw, phy.ops.read_reg_mdi, 4, 0, &value); + value &= ~0x180; + value |= pause_bit; + status = TCALL(hw, phy.ops.write_reg_mdi, 4, 0, value); + } + + return status; +} + +s32 ngbe_phy_set_pause_adv_yt(struct ngbe_hw *hw, u16 pause_bit) +{ + u16 value; + s32 status = 0; + unsigned long flags; + + spin_lock_irqsave(&hw->phy_lock, flags); + status = ngbe_phy_read_reg_sds_mii_yt(hw, 0x04, 0, &value); + value &= ~0x180; + value |= pause_bit; + status = ngbe_phy_write_reg_sds_mii_yt(hw, 0x04, 0, value); + spin_unlock_irqrestore(&hw->phy_lock, flags); + + return status; +} + +s32 ngbe_gphy_dis_eee(struct ngbe_hw *hw) +{ + u16 val = 0; + + TCALL(hw, phy.ops.write_reg, 0x11, 0xa4b, 0x1110); + TCALL(hw, phy.ops.write_reg, 0xd, 0x0, 0x7); + TCALL(hw, phy.ops.write_reg, 0xe, 0x0, 0x003c); + TCALL(hw, phy.ops.write_reg, 0xd, 0x0, 0x4007); + TCALL(hw, phy.ops.write_reg, 0xe, 0x0, 0); + + /* disable 10/100M Half Duplex */ + msleep(100); + TCALL(hw, phy.ops.read_reg, 4, 0, &val); + val &= 0xff5f; + TCALL(hw, phy.ops.write_reg, 0x4, 0x0, val); + + return 0; +} + +s32 ngbe_gphy_efuse_calibration(struct ngbe_hw *hw) +{ + u32 efuse[2]; + struct ngbe_adapter *adapter = hw->back; + + ngbe_gphy_wait_mdio_access_on(hw); + + efuse[0] = adapter->gphy_efuse[0]; + efuse[1] = adapter->gphy_efuse[1]; + + if (!efuse[0] && !efuse[1]) { + efuse[0] = 0xFFFFFFFF; + efuse[1] = 0xFFFFFFFF; + } + + /* calibration */ + efuse[0] |= 0xF0000100; + efuse[1] |= 0xFF807FFF; + + /* EODR, Efuse Output Data Register */ + ngbe_phy_write_reg(hw, 16, 0xa46, (efuse[0] >> 0) & 0xFFFF); + ngbe_phy_write_reg(hw, 17, 0xa46, (efuse[0] >> 16) & 0xFFFF); + ngbe_phy_write_reg(hw, 18, 0xa46, (efuse[1] >> 0) & 0xFFFF); + ngbe_phy_write_reg(hw, 19, 0xa46, (efuse[1] >> 16) & 0xFFFF); + + ngbe_phy_write_reg(hw, 20, 0xa46, 0x01); /* set efuse ready */ + ngbe_gphy_wait_mdio_access_on(hw); + ngbe_phy_write_reg(hw, 27, 0xa43, 0x8011); + ngbe_phy_write_reg(hw, 28, 0xa43, 0x5737); + ngbe_gphy_dis_eee(hw); + + return 0; +} + +s32 ngbe_phy_setup(struct ngbe_hw *hw) +{ + int i; + u16 value = 0; + + for (i = 0; i < 15; i++) { + if (!rd32m(hw, NGBE_MIS_ST, NGBE_MIS_ST_GPHY_IN_RST(hw->bus.lan_id))) + break; + + mdelay(1); + } + + if (i == 15) { + ERROR_REPORT1(hw, NGBE_ERROR_POLLING, + "GPhy reset exceeds maximum times.\n"); + return NGBE_ERR_PHY_TIMEOUT; + } + + ngbe_gphy_efuse_calibration(hw); + TCALL(hw, phy.ops.write_reg, 20, 0xa46, 2); + ngbe_gphy_wait_mdio_access_on(hw); + + for (i = 0; i < 100; i++) { + TCALL(hw, phy.ops.read_reg, 16, 0xa42, &value); + if ((value & 0x7) == 3) + break; + usleep_range(1000, 2000); + } + + if (i == 100) + return NGBE_ERR_PHY_TIMEOUT; + + return NGBE_OK; +} + +s32 ngbe_init_phy_ops_common(struct ngbe_hw *hw) +{ + struct ngbe_phy_info *phy = &hw->phy; + + phy->ops.init = ngbe_phy_init; + phy->ops.reset = ngbe_phy_reset; + phy->ops.read_reg = ngbe_phy_read_reg; + phy->ops.write_reg = ngbe_phy_write_reg; + phy->ops.setup_link = ngbe_phy_setup_link; + phy->ops.phy_led_ctrl = ngbe_phy_led_ctrl; + phy->ops.check_overtemp = ngbe_phy_check_overtemp; + phy->ops.identify = ngbe_phy_identify; + phy->ops.check_event = ngbe_phy_check_event; + phy->ops.get_adv_pause = ngbe_phy_get_advertised_pause; + phy->ops.get_lp_adv_pause = ngbe_phy_get_lp_advertised_pause; + phy->ops.set_adv_pause = ngbe_phy_set_pause_adv; + phy->ops.setup_once = ngbe_phy_setup; + + return NGBE_OK; +} + +s32 ngbe_init_external_phy_ops_common(struct ngbe_hw *hw) +{ + struct ngbe_phy_info *phy = &hw->phy; + + if (hw->phy.type == ngbe_phy_m88e1512 || + hw->phy.type == ngbe_phy_m88e1512_sfi || + hw->phy.type == ngbe_phy_m88e1512_unknown) { + phy->ops.read_reg_mdi = ngbe_phy_read_reg_mdi; + phy->ops.write_reg_mdi = ngbe_phy_write_reg_mdi; + phy->ops.setup_link = ngbe_phy_setup_link_m88e1512; + phy->ops.reset = ngbe_phy_reset_m88e1512; + phy->ops.check_event = ngbe_phy_check_event_m88e1512; + phy->ops.get_adv_pause = ngbe_phy_get_adv_pause_m88e1512; + phy->ops.get_lp_adv_pause = ngbe_phy_get_lp_adv_pause_m88e1512; + phy->ops.set_adv_pause = ngbe_phy_set_pause_adv_m88e1512; + } else if (hw->phy.type == ngbe_phy_yt8521s_sfi) { + phy->ops.read_reg_mdi = ngbe_phy_read_reg_mdi; + phy->ops.write_reg_mdi = ngbe_phy_write_reg_mdi; + phy->ops.setup_link = ngbe_phy_setup_link_yt; + phy->ops.reset = ngbe_phy_reset_yt; + phy->ops.check_event = ngbe_phy_check_event_yt; + phy->ops.get_adv_pause = ngbe_phy_get_adv_pause_yt; + phy->ops.get_lp_adv_pause = ngbe_phy_get_lp_adv_pause_yt; + phy->ops.set_adv_pause = ngbe_phy_set_pause_adv_yt; + } + + return NGBE_OK; +} diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_phy.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_phy.h new file mode 100644 index 0000000000000000000000000000000000000000..690eaaeba4fbcb608eef6d01ee4cc7e4e09b7b2b --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_phy.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ +#ifndef _NGBE_PHY_H_ +#define _NGBE_PHY_H_ + +s32 ngbe_phy_read_reg_mdi(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data); +s32 ngbe_phy_write_reg_mdi(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data); +s32 ngbe_phy_read_reg_ext_yt(struct ngbe_hw *hw, u32 addr, u32 type, u16 *phy_data); +s32 ngbe_phy_read_reg_sds_mii_yt(struct ngbe_hw *hw, u32 addr, u32 type, u16 *phy_data); +s32 ngbe_init_phy_ops_common(struct ngbe_hw *hw); +u32 ngbe_phy_setup_link(struct ngbe_hw *hw, + u32 speed, + bool need_restart_AN); +u32 ngbe_phy_setup_link_m88e1512(struct ngbe_hw *hw, + u32 speed, + bool __maybe_unused wait); +u32 ngbe_phy_setup_link_yt(struct ngbe_hw *hw, + u32 speed, + bool wait); +s32 ngbe_phy_reset_m88e1512(struct ngbe_hw *hw); +s32 ngbe_phy_reset_yt(struct ngbe_hw *hw); +s32 ngbe_phy_check_overtemp(struct ngbe_hw *hw); +s32 ngbe_phy_check_event_m88e1512(struct ngbe_hw *hw); +s32 ngbe_phy_check_event_yt(struct ngbe_hw *hw); +s32 ngbe_phy_get_adv_pause_m88e1512(struct ngbe_hw *hw, u8 *pause_bit); +s32 ngbe_phy_get_adv_pause_yt(struct ngbe_hw *hw, u8 *pause_bit); +s32 ngbe_phy_get_lp_adv_pause_m88e1512(struct ngbe_hw *hw, u8 *pause_bit); +s32 ngbe_phy_get_lp_adv_pause_yt(struct ngbe_hw *hw, u8 *pause_bit); +s32 ngbe_phy_set_pause_adv_m88e1512(struct ngbe_hw *hw, u16 pause_bit); +s32 ngbe_phy_set_pause_adv_yt(struct ngbe_hw *hw, u16 pause_bit); +s32 ngbe_phy_write_reg_ext_yt(struct ngbe_hw *hw, u32 addr, u32 type, u16 phy_data); +s32 ngbe_init_phy_ops_common(struct ngbe_hw *hw); +s32 ngbe_init_external_phy_ops_common(struct ngbe_hw *hw); +#endif diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ptp.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ptp.c new file mode 100644 index 0000000000000000000000000000000000000000..0a1d6ba3f112edd9f1aef496fdcbc3b133b0cfd5 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ptp.c @@ -0,0 +1,837 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "ngbe.h" +#include + +/* SYSTIME is defined by a fixed point system which allows the user to + * define the scale counter increment value at every level change of + * the oscillator driving SYSTIME value. The time unit is determined by + * the clock frequency of the oscillator and TIMINCA register. + * The cyclecounter and timecounter structures are used to to convert + * the scale counter into nanoseconds. SYSTIME registers need to be converted + * to ns values by use of only a right shift. + * The following math determines the largest incvalue that will fit into + * the available bits in the TIMINCA register: + * Period * [ 2 ^ ( MaxWidth - PeriodWidth ) ] + * PeriodWidth: Number of bits to store the clock period + * MaxWidth: The maximum width value of the TIMINCA register + * Period: The clock period for the oscillator, which changes based on the link + * speed: + * At 10Gb link or no link, the period is 6.4 ns. + * At 1Gb link, the period is multiplied by 10. (64ns) + * At 100Mb link, the period is multiplied by 100. (640ns) + * round(): discard the fractional portion of the calculation + * + * The calculated value allows us to right shift the SYSTIME register + * value in order to quickly convert it into a nanosecond clock, + * while allowing for the maximum possible adjustment value. + * + * LinkSpeed ClockFreq ClockPeriod TIMINCA:IV + * 10000Mbps 156.25MHz 6.4*10^-9 0xCCCCCC(0xFFFFF/ns) + * 1000 Mbps 62.5 MHz 16 *10^-9 0x800000(0x7FFFF/ns) + * 100 Mbps 6.25 MHz 160*10^-9 0xA00000(0xFFFF/ns) + * 10 Mbps 0.625 MHz 1600*10^-9 0xC7F380(0xFFF/ns) + * FPGA 31.25 MHz 32 *10^-9 0x800000(0x3FFFF/ns) + * + * These diagrams are only for the 10Gb link period + * + * +--------------+ +--------------+ + * | 32 | | 8 | 3 | 20 | + * *--------------+ +--------------+ + * \________ 43 bits ______/ fract + * + * The 43 bit SYSTIME overflows every + * 2^43 * 10^-9 / 3600 = 2.4 hours + */ +#define NGBE_INCVAL_10GB 0xCCCCCC +#define NGBE_INCVAL_1GB 0x2000000/*in Emerald all speed is same*/ +#define NGBE_INCVAL_100 0xA00000 +#define NGBE_INCVAL_10 0xC7F380 +#define NGBE_INCVAL_FPGA 0x800000 + +#define NGBE_INCVAL_SHIFT_10GB 20 +#define NGBE_INCVAL_SHIFT_1GB 22/*in Emerald all speed is same*/ +#define NGBE_INCVAL_SHIFT_100 15 +#define NGBE_INCVAL_SHIFT_10 12 +#define NGBE_INCVAL_SHIFT_FPGA 17 + +#define NGBE_OVERFLOW_PERIOD (HZ * 30) +#define NGBE_PTP_TX_TIMEOUT (HZ) + +/** + * ngbe_ptp_read - read raw cycle counter (to be used by time counter) + * @hw_cc: the cyclecounter structure + * + * this function reads the cyclecounter registers and is called by the + * cyclecounter structure used to construct a ns counter from the + * arbitrary fixed point registers + */ +static u64 ngbe_ptp_read(const struct cyclecounter *hw_cc) +{ + struct ngbe_adapter *adapter = + container_of(hw_cc, struct ngbe_adapter, hw_cc); + struct ngbe_hw *hw = &adapter->hw; + u64 stamp = 0; + + stamp |= (u64)rd32(hw, NGBE_TSEC_1588_SYSTIML); + stamp |= (u64)rd32(hw, NGBE_TSEC_1588_SYSTIMH) << 32; + + return stamp; +} + +static void ngbe_ptp_link_speed_adjust(struct ngbe_adapter *adapter, + u32 *shift, u32 *incval) +{ + /** + * Scale the NIC cycle counter by a large factor so that + * relatively small corrections to the frequency can be added + * or subtracted. The drawbacks of a large factor include + * (a) the clock register overflows more quickly, (b) the cycle + * counter structure must be able to convert the systime value + * to nanoseconds using only a multiplier and a right-shift, + * and (c) the value must fit within the timinca register space + * => math based on internal DMA clock rate and available bits + * + * Note that when there is no link, internal DMA clock is same as when + * link speed is 10Gb. Set the registers correctly even when link is + * down to preserve the clock setting + */ + + *shift = NGBE_INCVAL_SHIFT_1GB; + *incval = NGBE_INCVAL_1GB; +} + +/** + * ngbe_ptp_adjfreq + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * adjust the frequency of the ptp cycle counter by the + * indicated ppb from the base frequency. + */ +static int ngbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct ngbe_adapter *adapter = + container_of(ptp, struct ngbe_adapter, ptp_caps); + struct ngbe_hw *hw = &adapter->hw; + u64 freq, incval; + u32 diff; + int neg_adj = 0; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + /* sync for network driver to get latest value */ + smp_mb(); + incval = READ_ONCE(adapter->base_incval); + + freq = incval; + freq *= ppb; + diff = div_u64(freq, 1000000000ULL); + + incval = neg_adj ? (incval - diff) : (incval + diff); + /* temp setting*/ + + if (incval > NGBE_TSEC_1588_INC_IV(~0)) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); + wr32(hw, NGBE_TSEC_1588_INC, NGBE_TSEC_1588_INC_IV(incval)); + + return 0; +} + +/** + * ngbe_ptp_adjtime + * @ptp: the ptp clock structure + * @delta: offset to adjust the cycle counter by ns + * + * adjust the timer by resetting the timecounter structure. + */ +static int ngbe_ptp_adjtime(struct ptp_clock_info *ptp, + s64 delta) +{ + struct ngbe_adapter *adapter = + container_of(ptp, struct ngbe_adapter, ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_adjtime(&adapter->hw_tc, delta); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + return 0; +} + +/** + * ngbe_ptp_gettime64 + * @ptp: the ptp clock structure + * @ts: timespec64 structure to hold the current time value + * + * read the timecounter and return the correct value on ns, + * after converting it into a struct timespec64. + */ +static int ngbe_ptp_gettime64(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct ngbe_adapter *adapter = + container_of(ptp, struct ngbe_adapter, ptp_caps); + unsigned long flags; + u64 ns; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->hw_tc); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/** + * ngbe_ptp_settime64 + * @ptp: the ptp clock structure + * @ts: the timespec64 containing the new time for the cycle counter + * + * reset the timecounter to use a new base value instead of the kernel + * wall timer value. + */ +static int ngbe_ptp_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct ngbe_adapter *adapter = + container_of(ptp, struct ngbe_adapter, ptp_caps); + u64 ns; + unsigned long flags; + + ns = timespec64_to_ns(ts); + + /* reset the timecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ns); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + return 0; +} + +/** + * ngbe_ptp_feature_enable + * @ptp: the ptp clock structure + * @rq: the requested feature to change + * @on: whether to enable or disable the feature + * + * enable (or disable) ancillary features of the phc subsystem. + * our driver only supports the PPS feature on the X540 + */ +static int ngbe_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +/** + * ngbe_ptp_create_clock + * @adapter: the ngbe private adapter structure + * + * This function performs setup of the user entry point function table and + * initalizes the PTP clock device used by userspace to access the clock-like + * features of the PTP core. It will be called by ngbe_ptp_init, and may + * re-use a previously initialized clock (such as during a suspend/resume + * cycle). + */ +static long ngbe_ptp_create_clock(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + long err; + + /* do nothing if we already have a clock device */ + if (!IS_ERR_OR_NULL(adapter->ptp_clock)) + return 0; + + snprintf(adapter->ptp_caps.name, sizeof(adapter->ptp_caps.name), + "%s", netdev->name); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 500000000; /* 10^-9s */ + adapter->ptp_caps.n_alarm = 0; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.n_per_out = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = ngbe_ptp_adjfreq; + adapter->ptp_caps.adjtime = ngbe_ptp_adjtime; + adapter->ptp_caps.gettime64 = ngbe_ptp_gettime64; + adapter->ptp_caps.settime64 = ngbe_ptp_settime64; + adapter->ptp_caps.enable = ngbe_ptp_feature_enable; + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + pci_dev_to_dev(adapter->pdev)); + if (!IS_ERR(adapter->ptp_clock)) { + e_dev_info("registered PHC device on %s\n", netdev->name); + } else { + err = PTR_ERR(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_dev_err("ptp_clock_register failed\n"); + return err; + } + /* Set the default timestamp mode to disabled here. We do this in + * create_clock instead of initialization, because we don't want to + * override the previous settings during a suspend/resume cycle. + */ + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + return 0; +} + +/** + * ngbe_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state + * @adapter: the private adapter structure + * + * This function should be called whenever the state related to a Tx timestamp + * needs to be cleared. This helps ensure that all related bits are reset for + * the next Tx timestamp event. + */ +static void ngbe_ptp_clear_tx_timestamp(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + + rd32(hw, NGBE_TSEC_1588_STMPH); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + } + clear_bit_unlock(__NGBE_PTP_TX_IN_PROGRESS, &adapter->state); +} + +/** + * ngbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp + * @adapter: private adapter structure + * @hwtstamp: stack timestamp structure + * @systim: unsigned 64bit system time value + * + * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value + * which can be used by the stack's ptp functions. + * + * The lock is used to protect consistency of the cyclecounter and the SYSTIME + * registers. However, it does not need to protect against the Rx or Tx + * timestamp registers, as there can't be a new timestamp until the old one is + * unlatched by reading. + * + * In addition to the timestamp in hardware, some controllers need a software + * overflow cyclecounter, and this function takes this into account as well. + **/ +static void ngbe_ptp_convert_to_hwtstamp(struct ngbe_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamp, + u64 timestamp) +{ + unsigned long flags; + u64 ns; + + memset(hwtstamp, 0, sizeof(*hwtstamp)); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->hw_tc, timestamp); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + hwtstamp->hwtstamp = ns_to_ktime(ns); +} + +/** + * ngbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: the private adapter struct + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +static void ngbe_ptp_tx_hwtstamp(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval = 0; + + regval |= (u64)rd32(hw, NGBE_TSEC_1588_STMPL); + regval |= (u64)rd32(hw, NGBE_TSEC_1588_STMPH) << 32; + + ngbe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, regval); + skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); + + ngbe_ptp_clear_tx_timestamp(adapter); +} + +/** + * ngbe_ptp_tx_hwtstamp_work + * @work: pointer to the work struct + * + * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware + * timestamp has been taken for the current skb. It is necessary, because the + * descriptor's "done" bit does not correlate with the timestamp event. + */ +static void ngbe_ptp_tx_hwtstamp_work(struct work_struct *work) +{ + struct ngbe_adapter *adapter = container_of(work, struct ngbe_adapter, + ptp_tx_work); + struct ngbe_hw *hw = &adapter->hw; + bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + NGBE_PTP_TX_TIMEOUT); + u32 tsynctxctl; + + /* we have to have a valid skb to poll for a timestamp */ + if (!adapter->ptp_tx_skb) { + ngbe_ptp_clear_tx_timestamp(adapter); + return; + } + + /* stop polling once we have a valid timestamp */ + tsynctxctl = rd32(hw, NGBE_TSEC_1588_CTL); + if (tsynctxctl & NGBE_TSEC_1588_CTL_VALID) { + ngbe_ptp_tx_hwtstamp(adapter); + return; + } + + /* check timeout last in case timestamp event just occurred */ + if (timeout) { + ngbe_ptp_clear_tx_timestamp(adapter); + adapter->tx_hwtstamp_timeouts++; + e_warn(drv, "clearing Tx Timestamp hang"); + } else { + /* reschedule to keep checking until we timeout */ + schedule_work(&adapter->ptp_tx_work); + } +} + +/** + * ngbe_ptp_overflow_check - watchdog task to detect SYSTIME overflow + * @adapter: private adapter struct + * + * this watchdog task periodically reads the timecounter + * in order to prevent missing when the system time registers wrap + * around. This needs to be run approximately twice a minute for the fastest + * overflowing hardware. We run it for all hardware since it shouldn't have a + * large impact. + */ +void ngbe_ptp_overflow_check(struct ngbe_adapter *adapter) +{ + bool timeout = time_is_before_jiffies(adapter->last_overflow_check + NGBE_OVERFLOW_PERIOD); + struct timespec64 ts; + + if (timeout) { + ngbe_ptp_gettime64(&adapter->ptp_caps, &ts); + adapter->last_overflow_check = jiffies; + } +} + +/** + * ngbe_ptp_rx_hang - detect error case when Rx timestamp registers latched + * @adapter: private network adapter structure + * + * this watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + */ +void ngbe_ptp_rx_hang(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_ring *rx_ring; + u32 tsyncrxctl = rd32(hw, NGBE_PSR_1588_CTL); + unsigned long rx_event; + int n; + + /* if we don't have a valid timestamp in the registers, just update the + * timeout counter and exit + */ + if (!(tsyncrxctl & NGBE_PSR_1588_CTL_VALID)) { + adapter->last_rx_ptp_check = jiffies; + return; + } + + /* determine the most recent watchdog or rx_timestamp event */ + rx_event = adapter->last_rx_ptp_check; + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } + + /* only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { + rd32(hw, NGBE_PSR_1588_STMPH); + adapter->last_rx_ptp_check = jiffies; + + adapter->rx_hwtstamp_cleared++; + e_warn(drv, "clearing RX Timestamp hang"); + } +} + +/** + * ngbe_ptp_start_cyclecounter - create the cycle counter from hw + * @adapter: pointer to the adapter structure + * + * This function should be called to set the proper values for the TIMINCA + * register and tell the cyclecounter structure what the tick rate of SYSTIME + * is. It does not directly modify SYSTIME registers or the timecounter + * structure. It should be called whenever a new TIMINCA value is necessary, + * such as during initialization or when the link speed changes. + */ +void ngbe_ptp_start_cyclecounter(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + unsigned long flags; + struct cyclecounter cc; + u32 incval = 0; + + /* For some of the boards below this mask is technically incorrect. + * The timestamp mask overflows at approximately 61bits. However the + * particular hardware does not overflow on an even bitmask value. + * Instead, it overflows due to conversion of upper 32bits billions of + * cycles. Timecounters are not really intended for this purpose so + * they do not properly function if the overflow point isn't 2^N-1. + * However, the actual SYSTIME values in question take ~138 years to + * overflow. In practice this means they won't actually overflow. A + * proper fix to this problem would require modification of the + * timecounter delta calculations. + */ + cc.mask = CLOCKSOURCE_MASK(64); + cc.mult = 1; + cc.shift = 0; + + cc.read = ngbe_ptp_read; + ngbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); + wr32(hw, NGBE_TSEC_1588_INC, NGBE_TSEC_1588_INC_IV(incval)); + + /* update the base incval used to calculate frequency adjustment */ + WRITE_ONCE(adapter->base_incval, incval); + /* sync for network driver to get latest value */ + smp_mb(); + + /* need lock to prevent incorrect read while modifying cyclecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + memcpy(&adapter->hw_cc, &cc, sizeof(adapter->hw_cc)); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); +} + +/** + * ngbe_ptp_get_ts_config - get current hardware timestamping configuration + * @adapter: pointer to adapter structure + * @ifreq: ioctl data + * + * This function returns the current timestamping settings. Rather than + * attempt to deconstruct registers to fill in the values, simply keep a copy + * of the old settings around, and return a copy when requested. + */ +int ngbe_ptp_get_ts_config(struct ngbe_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? -EFAULT : 0; +} + +/** + * ngbe_ptp_set_timestamp_mode - setup the hardware for the requested mode + * @adapter: the private ngbe adapter structure + * @config: the hwtstamp configuration requested + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware + * filters. Not all combinations are supported, in particular event + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". + * + * Since hardware always timestamps Path delay packets when timestamping V2 + * packets, regardless of the type specified in the register, only use V2 + * Event mode. This more accurately tells the user what the hardware is going + * to do anyways. + * + * Note: this may modify the hwtstamp configuration towards a more general + * mode, if required to support the specifically requested mode. + */ +static int ngbe_ptp_set_timestamp_mode(struct ngbe_adapter *adapter, + struct hwtstamp_config *config) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 tsync_tx_ctl = NGBE_TSEC_1588_CTL_ENABLED; + u32 tsync_rx_ctl = NGBE_PSR_1588_CTL_ENABLED; + u32 tsync_rx_mtrl = PTP_EV_PORT << 16; + bool is_l2 = false; + u32 regval; + + /* reserved for future extensions */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + tsync_rx_mtrl = 0; + adapter->flags &= ~(NGBE_FLAG_RX_HWTSTAMP_ENABLED | + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= NGBE_PSR_1588_CTL_TYPE_L4_V1; + tsync_rx_mtrl |= NGBE_PSR_1588_MSGTYPE_V1_SYNC_MSG; + adapter->flags |= (NGBE_FLAG_RX_HWTSTAMP_ENABLED | + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= NGBE_PSR_1588_CTL_TYPE_L4_V1; + tsync_rx_mtrl |= NGBE_PSR_1588_MSGTYPE_V1_DELAY_REQ_MSG; + adapter->flags |= (NGBE_FLAG_RX_HWTSTAMP_ENABLED | + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tsync_rx_ctl |= NGBE_PSR_1588_CTL_TYPE_EVENT_V2; + is_l2 = true; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + adapter->flags |= (NGBE_FLAG_RX_HWTSTAMP_ENABLED | + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_ALL: + default: + /* register RXMTRL must be set in order to do V1 packets, + * therefore it is not possible to time stamp both V1 Sync and + * Delay_Req messages unless hardware supports timestamping all + * packets => return error + */ + adapter->flags &= ~(NGBE_FLAG_RX_HWTSTAMP_ENABLED | + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + config->rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + /* define ethertype filter for timestamping L2 packets */ + if (is_l2) + wr32(hw, + NGBE_PSR_ETYPE_SWC(NGBE_PSR_ETYPE_SWC_FILTER_1588), + (NGBE_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */ + NGBE_PSR_ETYPE_SWC_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + wr32(hw, + NGBE_PSR_ETYPE_SWC(NGBE_PSR_ETYPE_SWC_FILTER_1588), + 0); + + /* enable/disable TX */ + regval = rd32(hw, NGBE_TSEC_1588_CTL); + regval &= ~NGBE_TSEC_1588_CTL_ENABLED; + regval |= tsync_tx_ctl; + wr32(hw, NGBE_TSEC_1588_CTL, regval); + + /* enable/disable RX */ + regval = rd32(hw, NGBE_PSR_1588_CTL); + regval &= ~(NGBE_PSR_1588_CTL_ENABLED | NGBE_PSR_1588_CTL_TYPE_MASK); + regval |= tsync_rx_ctl; + wr32(hw, NGBE_PSR_1588_CTL, regval); + + /* define which PTP packets are time stamped */ + wr32(hw, NGBE_PSR_1588_MSGTYPE, tsync_rx_mtrl); + + NGBE_WRITE_FLUSH(hw); + + /* clear TX/RX timestamp state, just to be sure */ + ngbe_ptp_clear_tx_timestamp(adapter); + rd32(hw, NGBE_PSR_1588_STMPH); + + return 0; +} + +/** + * ngbe_ptp_set_ts_config - user entry point for timestamp mode + * @adapter: pointer to adapter struct + * @ifreq: ioctl data + * + * Set hardware to requested mode. If unsupported, return an error with no + * changes. Otherwise, store the mode for future reference. + */ +int ngbe_ptp_set_ts_config(struct ngbe_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = ngbe_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/** + * ngbe_ptp_rx_rgtstamp - utility function which checks for RX time stamp + * @q_vector: structure containing interrupt and ring information + * @skb: particular skb to send timestamp with + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +void ngbe_ptp_rx_hwtstamp(struct ngbe_adapter *adapter, struct sk_buff *skb) +{ + struct ngbe_hw *hw = &adapter->hw; + u64 regval = 0; + u32 tsyncrxctl; + + /* Read the tsyncrxctl register afterwards in order to prevent taking an + * I/O hit on every packet. + */ + tsyncrxctl = rd32(hw, NGBE_PSR_1588_CTL); + if (!(tsyncrxctl & NGBE_PSR_1588_CTL_VALID)) + return; + + regval |= (u64)rd32(hw, NGBE_PSR_1588_STMPL); + regval |= (u64)rd32(hw, NGBE_PSR_1588_STMPH) << 32; + + ngbe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); +} + +/** + * ngbe_ptp_init + * @adapter: the ngbe private adapter structure + * + * This function performs the required steps for enabling ptp + * support. If ptp support has already been loaded it simply calls the + * cyclecounter init routine and exits. + */ +void ngbe_ptp_init(struct ngbe_adapter *adapter) +{ + /* initialize the spin lock first, since the user might call the clock + * functions any time after we've initialized the ptp clock device. + */ + spin_lock_init(&adapter->tmreg_lock); + + /* obtain a ptp clock device, or re-use an existing device */ + if (ngbe_ptp_create_clock(adapter)) + return; + + /* we have a clock, so we can initialize work for timestamps now */ + INIT_WORK(&adapter->ptp_tx_work, ngbe_ptp_tx_hwtstamp_work); + + /* reset the ptp related hardware bits */ + ngbe_ptp_reset(adapter); + + /* enter the NGBE_PTP_RUNNING state */ + set_bit(__NGBE_PTP_RUNNING, &adapter->state); +} + +/** + * ngbe_ptp_reset + * @adapter: the ngbe private board structure + * + * When the MAC resets, all of the hardware configuration for timesync is + * reset. This function should be called to re-enable the device for PTP, + * using the last known settings. However, we do lose the current clock time, + * so we fallback to resetting it based on the kernel's realtime clock. + * + * This function will maintain the hwtstamp_config settings, and it retriggers + * the SDP output if it's enabled. + */ +void ngbe_ptp_reset(struct ngbe_adapter *adapter) +{ + unsigned long flags; + + /* reset the hardware timestamping mode */ + ngbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + ngbe_ptp_start_cyclecounter(adapter); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, + ktime_to_ns(ktime_get_real())); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + adapter->last_overflow_check = jiffies; +} + +/** + * ngbe_ptp_suspend - stop ptp work items + * @adapter: pointer to adapter struct + * + * This function suspends ptp activity, and prevents more work from being + * generated, but does not destroy the clock device. + */ +void ngbe_ptp_suspend(struct ngbe_adapter *adapter) +{ + /* leave the NGBE_PTP_RUNNING STATE */ + if (!test_and_clear_bit(__NGBE_PTP_RUNNING, &adapter->state)) + return; + + adapter->flags2 &= ~NGBE_FLAG2_PTP_PPS_ENABLED; + + cancel_work_sync(&adapter->ptp_tx_work); + ngbe_ptp_clear_tx_timestamp(adapter); +} + +/** + * ngbe_ptp_stop - destroy the ptp_clock device + * @adapter: pointer to adapter struct + * + * Completely destroy the ptp_clock device, and disable all PTP related + * features. Intended to be run when the device is being closed. + */ +void ngbe_ptp_stop(struct ngbe_adapter *adapter) +{ + /* first, suspend ptp activity */ + ngbe_ptp_suspend(adapter); + + /* now destroy the ptp clock device */ + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_dev_info("removed PHC on %s\n", + adapter->netdev->name); + } +} + +/** + * ngbe_ptp_check_pps_event + * @adapter: the private adapter structure + * @eicr: the interrupt cause register value + * + * This function is called by the interrupt routine when checking for + * interrupts. It will check and handle a pps event. + */ +void ngbe_ptp_check_pps_event(struct ngbe_adapter *adapter) +{ + struct ptp_clock_event event; + + event.type = PTP_CLOCK_PPS; + + /* this check is necessary in case the interrupt was enabled via some + * alternative means (ex. debug_fs). Better to check here than + * everywhere that calls this function. + */ + if (!adapter->ptp_clock) + return; + + /* we don't config PPS on SDP yet, so just return. + * ptp_clock_event(adapter->ptp_clock, &event); + */ +} diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_sysfs.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..d75362e46b7051ecd1b43b7dd99e533afc1be3bf --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_sysfs.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include +#include +#include +#include +#include +#include +#include + +#include "ngbe.h" +#include "ngbe_hw.h" +#include "ngbe_type.h" + +#ifdef CONFIG_HWMON +#include +#endif + +#ifdef CONFIG_HWMON +/* hwmon callback functions */ +static ssize_t ngbe_hwmon_show_temp(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ngbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value; + + /* reset the temp field */ + TCALL(ngbe_attr->hw, mac.ops.get_thermal_sensor_data); + + value = ngbe_attr->sensor->temp; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t ngbe_hwmon_show_alarmthresh(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ngbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = ngbe_attr->sensor->alarm_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t ngbe_hwmon_show_dalarmthresh(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ngbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = ngbe_attr->sensor->dalarm_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +/** + * ngbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. + * @adapter: pointer to the adapter structure + * @type: type of sensor data to display + * + * For each file we want in hwmon's sysfs interface we need a device_attribute + * This is included in our hwmon_attr struct that contains the references to + * the data structures we need to get the data to display. + */ +static int ngbe_add_hwmon_attr(struct ngbe_adapter *adapter, int type) +{ + int rc; + unsigned int n_attr; + struct hwmon_attr *ngbe_attr; + + n_attr = adapter->ngbe_hwmon_buff.n_hwmon; + ngbe_attr = &adapter->ngbe_hwmon_buff.hwmon_list[n_attr]; + + switch (type) { + case NGBE_HWMON_TYPE_TEMP: + ngbe_attr->dev_attr.show = ngbe_hwmon_show_temp; + snprintf(ngbe_attr->name, sizeof(ngbe_attr->name), + "temp%u_input", 0); + break; + case NGBE_HWMON_TYPE_ALARMTHRESH: + ngbe_attr->dev_attr.show = ngbe_hwmon_show_alarmthresh; + snprintf(ngbe_attr->name, sizeof(ngbe_attr->name), + "temp%u_alarmthresh", 0); + break; + case NGBE_HWMON_TYPE_DALARMTHRESH: + ngbe_attr->dev_attr.show = ngbe_hwmon_show_dalarmthresh; + snprintf(ngbe_attr->name, sizeof(ngbe_attr->name), + "temp%u_dalarmthresh", 0); + break; + default: + rc = -EPERM; + return rc; + } + + /* These always the same regardless of type */ + ngbe_attr->sensor = + &adapter->hw.mac.thermal_sensor_data.sensor; + ngbe_attr->hw = &adapter->hw; + ngbe_attr->dev_attr.store = NULL; + ngbe_attr->dev_attr.attr.mode = 0444; + ngbe_attr->dev_attr.attr.name = ngbe_attr->name; + + rc = device_create_file(pci_dev_to_dev(adapter->pdev), + &ngbe_attr->dev_attr); + + if (rc == 0) + ++adapter->ngbe_hwmon_buff.n_hwmon; + + return rc; +} +#endif /* CONFIG_HWMON */ + +static void ngbe_sysfs_del_adapter(struct ngbe_adapter __maybe_unused *adapter) +{ +#ifdef CONFIG_HWMON + int i; + + if (!adapter) + return; + + for (i = 0; i < adapter->ngbe_hwmon_buff.n_hwmon; i++) { + device_remove_file(pci_dev_to_dev(adapter->pdev), + &adapter->ngbe_hwmon_buff.hwmon_list[i].dev_attr); + } + + kfree(adapter->ngbe_hwmon_buff.hwmon_list); + + if (adapter->ngbe_hwmon_buff.device) + hwmon_device_unregister(adapter->ngbe_hwmon_buff.device); +#endif /* CONFIG_HWMON */ +} + +/* called from ngbe_main.c */ +void ngbe_sysfs_exit(struct ngbe_adapter *adapter) +{ + ngbe_sysfs_del_adapter(adapter); +} + +/* called from ngbe_main.c */ +int ngbe_sysfs_init(struct ngbe_adapter *adapter) +{ + int rc = 0; +#ifdef CONFIG_HWMON + struct hwmon_buff *ngbe_hwmon = &adapter->ngbe_hwmon_buff; + int n_attrs; + +#endif /* CONFIG_HWMON */ + if (!adapter) + goto err; + +#ifdef CONFIG_HWMON + + /* Don't create thermal hwmon interface if no sensors present */ + if (TCALL(&adapter->hw, mac.ops.init_thermal_sensor_thresh)) + goto no_thermal; + + /* Allocation space for max attributs + * max num sensors * values (temp, alamthresh, dalarmthresh) + */ + n_attrs = 3; + ngbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), + GFP_KERNEL); + if (!ngbe_hwmon->hwmon_list) { + rc = -ENOMEM; + goto err; + } + + ngbe_hwmon->device = + hwmon_device_register(pci_dev_to_dev(adapter->pdev)); + if (IS_ERR(ngbe_hwmon->device)) { + rc = PTR_ERR(ngbe_hwmon->device); + goto err; + } + + /* Bail if any hwmon attr struct fails to initialize */ + rc = ngbe_add_hwmon_attr(adapter, NGBE_HWMON_TYPE_TEMP); + rc |= ngbe_add_hwmon_attr(adapter, NGBE_HWMON_TYPE_ALARMTHRESH); + rc |= ngbe_add_hwmon_attr(adapter, NGBE_HWMON_TYPE_DALARMTHRESH); + if (rc) + goto err; + +no_thermal: +#endif /* CONFIG_HWMON */ + goto exit; + +err: + ngbe_sysfs_del_adapter(adapter); +exit: + return rc; +} diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h new file mode 100644 index 0000000000000000000000000000000000000000..9f74f67e40f3b682df1e5b8fc300a8dcec9cccb8 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -0,0 +1,2033 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _NGBE_TYPE_H_ +#define _NGBE_TYPE_H_ + +#include +#include +#include + +/************ NGBE_register.h ************/ +/* Vendor ID */ +#ifndef PCI_VENDOR_ID_WANGXUN +#define PCI_VENDOR_ID_WANGXUN 0x8088 +#endif + +/* Device IDs */ +#define NGBE_DEV_ID_EM_WX1860AL_W 0x0100 +#define NGBE_DEV_ID_EM_WX1860A2 0x0101 +#define NGBE_DEV_ID_EM_WX1860A2S 0x0102 +#define NGBE_DEV_ID_EM_WX1860A4 0x0103 +#define NGBE_DEV_ID_EM_WX1860A4S 0x0104 +#define NGBE_DEV_ID_EM_WX1860AL2 0x0105 +#define NGBE_DEV_ID_EM_WX1860AL2S 0x0106 +#define NGBE_DEV_ID_EM_WX1860AL4 0x0107 +#define NGBE_DEV_ID_EM_WX1860AL4S 0x0108 +#define NGBE_DEV_ID_EM_WX1860LC 0x0109 +#define NGBE_DEV_ID_EM_WX1860A1 0x010a +#define NGBE_DEV_ID_EM_WX1860A1L 0x010b + +/* Subsystem ID */ +#define NGBE_SUBID_M88E1512_SFP 0x0003 +#define NGBE_SUBID_OCP_CARD 0x0040 +#define NGBE_SUBID_LY_M88E1512_SFP 0x0050 +#define NGBE_SUBID_M88E1512_RJ45 0x0051 +#define NGBE_SUBID_M88E1512_MIX 0x0052 +#define NGBE_SUBID_YT8521S_SFP 0x0060 +#define NGBE_SUBID_INTERNAL_YT8521S_SFP 0x0061 +#define NGBE_SUBID_YT8521S_SFP_GPIO 0x0062 +#define NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO 0x0064 +#define NGBE_SUBID_LY_YT8521S_SFP 0x0070 +#define NGBE_SUBID_RGMII_FPGA 0x0080 + +#define NGBE_OEM_MASK 0x00FF + +#define NGBE_NCSI_SUP 0x8000 +#define NGBE_NCSI_MASK 0x8000 +#define NGBE_WOL_SUP 0x4000 +#define NGBE_WOL_MASK 0x4000 + +/* Error Codes */ +#define NGBE_OK 0 +#define NGBE_ERR 100 +#define NGBE_NOT_IMPLEMENTED 0x7FFFFFFF +/* (-NGBE_ERR, NGBE_ERR): reserved for non-ngbe defined error code */ +/* (-NGBE_ERR, NGBE_ERR): reserved for non-ngbe defined error code */ +#define NGBE_ERR_NOSUPP -(NGBE_ERR + 0) +#define NGBE_ERR_EEPROM -(NGBE_ERR + 1) +#define NGBE_ERR_EEPROM_CHECKSUM -(NGBE_ERR + 2) +#define NGBE_ERR_PHY -(NGBE_ERR + 3) +#define NGBE_ERR_CONFIG -(NGBE_ERR + 4) +#define NGBE_ERR_PARAM -(NGBE_ERR + 5) +#define NGBE_ERR_MAC_TYPE -(NGBE_ERR + 6) +#define NGBE_ERR_UNKNOWN_PHY -(NGBE_ERR + 7) +#define NGBE_ERR_LINK_SETUP -(NGBE_ERR + 8) +#define NGBE_ERR_ADAPTER_STOPPED -(NGBE_ERR + 09) +#define NGBE_ERR_INVALID_MAC_ADDR -(NGBE_ERR + 10) +#define NGBE_ERR_DEVICE_NOT_SUPPORTED -(NGBE_ERR + 11) +#define NGBE_ERR_MASTER_REQUESTS_PENDING -(NGBE_ERR + 12) +#define NGBE_ERR_INVALID_LINK_SETTINGS -(NGBE_ERR + 13) +#define NGBE_ERR_AUTONEG_NOT_COMPLETE -(NGBE_ERR + 14) +#define NGBE_ERR_RESET_FAILED -(NGBE_ERR + 15) +#define NGBE_ERR_SWFW_SYNC -(NGBE_ERR + 16) +#define NGBE_ERR_PHY_ADDR_INVALID -(NGBE_ERR + 17) +#define NGBE_ERR_I2C -(NGBE_ERR + 18) +#define NGBE_ERR_SFP_NOT_SUPPORTED -(NGBE_ERR + 19) +#define NGBE_ERR_SFP_NOT_PRESENT -(NGBE_ERR + 20) +#define NGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -(NGBE_ERR + 21) +#define NGBE_ERR_NO_SAN_ADDR_PTR -(NGBE_ERR + 22) +#define NGBE_ERR_FDIR_REINIT_FAILED -(NGBE_ERR + 23) +#define NGBE_ERR_EEPROM_VERSION -(NGBE_ERR + 24) +#define NGBE_ERR_NO_SPACE -(NGBE_ERR + 25) +#define NGBE_ERR_OVERTEMP -(NGBE_ERR + 26) +#define NGBE_ERR_UNDERTEMP -(NGBE_ERR + 27) +#define NGBE_ERR_FC_NOT_NEGOTIATED -(NGBE_ERR + 28) +#define NGBE_ERR_FC_NOT_SUPPORTED -(NGBE_ERR + 29) +#define NGBE_ERR_SFP_SETUP_NOT_COMPLETE -(NGBE_ERR + 30) +#define NGBE_ERR_PBA_SECTION -(NGBE_ERR + 31) +#define NGBE_ERR_INVALID_ARGUMENT -(NGBE_ERR + 32) +#define NGBE_ERR_HOST_INTERFACE_COMMAND -(NGBE_ERR + 33) +#define NGBE_ERR_OUT_OF_MEM -(NGBE_ERR + 34) +#define NGBE_ERR_FEATURE_NOT_SUPPORTED -(NGBE_ERR + 36) +#define NGBE_ERR_EEPROM_PROTECTED_REGION -(NGBE_ERR + 37) +#define NGBE_ERR_FDIR_CMD_INCOMPLETE -(NGBE_ERR + 38) +#define NGBE_ERR_FLASH_LOADING_FAILED -(NGBE_ERR + 39) +#define NGBE_ERR_XPCS_POWER_UP_FAILED -(NGBE_ERR + 40) +#define NGBE_ERR_FW_RESP_INVALID -(NGBE_ERR + 41) +#define NGBE_ERR_PHY_INIT_NOT_DONE -(NGBE_ERR + 42) +#define NGBE_ERR_TIMEOUT -(NGBE_ERR + 43) +#define NGBE_ERR_TOKEN_RETRY -(NGBE_ERR + 44) +#define NGBE_ERR_REGISTER -(NGBE_ERR + 45) +#define NGBE_ERR_MBX -(NGBE_ERR + 46) +#define NGBE_ERR_MNG_ACCESS_FAILED -(NGBE_ERR + 47) +#define NGBE_ERR_PHY_TYPE -(NGBE_ERR + 48) +#define NGBE_ERR_PHY_TIMEOUT -(NGBE_ERR + 49) + +/* internal phy reg_offset [0,31] */ +#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4)) + +/* INTERNAL PHY CONTROL */ +#define NGBE_INTERNAL_PHY_PAGE_SELECT_OFFSET 31 +#define NGBE_INTERNAL_PHY_OFFSET_MAX 32 +#define NGBE_INTERNAL_PHY_ID 0x000732 +#define NGBE_INTPHY_INT_LSC 0x0010 +#define NGBE_INTPHY_INT_ANC 0x0008 + +/* PHY MDI STANDARD CONFIG */ +#define NGBE_MDI_PHY_ID1_OFFSET 2 +#define NGBE_MDI_PHY_ID2_OFFSET 3 +#define NGBE_MDI_PHY_ID_MASK 0xFFFFFC00U + +#define NGBE_M88E1512_PHY_ID 0x005043 +#define NGBE_MDI_PHY_RESET 0x8000 +#define NGBE_M88E1512_RGM_TTC 0x0010 +#define NGBE_M88E1512_RGM_RTC 0x0020 +#define NGBE_M88E1512_INT_EN 0x0080 +#define NGBE_M88E1512_INT_POL 0x0800 +#define NGBE_M88E1512_INT_LSC 0x0400 +#define NGBE_M88E1512_INT_ANC 0x0800 +#define NGBE_M88E1512_POWER 0x0800 + +#define NGBE_M88E1512_1000BASET_FULL 0x0200 +#define NGBE_M88E1512_1000BASET_HALF 0x0100 +#define NGBE_M88E1512_100BASET_FULL 0x0100 +#define NGBE_M88E1512_100BASET_HALF 0x0080 +#define NGBE_M88E1512_10BASET_FULL 0x0040 +#define NGBE_M88E1512_10BASET_HALF 0x0020 +#define NGBE_M88E1512_ANC 0x0800 +#define NGBE_M88E1512_LSC 0x0400 + +#define NGBE_PHY_RST_WAIT_PERIOD 50 + +/* yt8521s&yt8531s reg */ +#define NGBE_YT8521S_PHY_ID 0x011a +#define NGBE_YT8531S_PHY_ID 0xe91a +#define NGBE_YT_PHY_POWER 0x0800 +#define NGBE_YT8521S_SDS_LINK_UP 0x4 +#define NGBE_YT8521S_SDS_LINK_DOWN 0x8 +#define NGBE_YT8521S_UTP_LINK_UP 0x400 +#define NGBE_YT8521S_UTP_LINK_DOWN 0x800 + +/* Physical layer type */ +typedef u32 ngbe_physical_layer; +#define NGBE_PHYSICAL_LAYER_UNKNOWN 0 +#define NGBE_PHYSICAL_LAYER_1000BASE_T 0x0002 +#define NGBE_PHYSICAL_LAYER_100BASE_TX 0x0004 + +#define NGBE_MSCA_RA(v) ((0xFFFF & (v))) +#define NGBE_MSCA_PA(v) ((0x1F & (v)) << 16) +#define NGBE_MSCA_DA(v) ((0x1F & (v)) << 21) +#define NGBE_MSCC_CMD(v) ((0x3 & (v)) << 16) +#define NGBE_MSCC_DATA(v) ((0xFFFF & (v))) +#define NGBE_MSCC_CMD(v) ((0x3 & (v)) << 16) +#define NGBE_MSCC_SADDR ((0x1U) << 18) +#define NGBE_MSCC_CR(v) ((0x8U & (v)) << 19) +#define NGBE_MSCC_BUSY ((0x1U) << 22) +#define NGBE_MDIO_CLK(v) ((0x7 & (v)) << 19) + +#define NGBE_MSCA 0x11200 +#define NGBE_MSCC 0x11204 + +#define NGBE_MDIO_TIMEOUT 1000 + +/* Link speed */ +#define NGBE_LINK_SPEED_UNKNOWN 0 +#define NGBE_LINK_SPEED_100_FULL 1 +#define NGBE_LINK_SPEED_1GB_FULL 2 +#define NGBE_LINK_SPEED_10_FULL 8 +#define NGBE_LINK_SPEED_AUTONEG 0xB + +#define NGBE_MDI_PHY_SPEED_SELECT0 0x2000 +#define NGBE_MDI_PHY_SPEED_SELECT1 0x0040 +#define NGBE_MDI_PHY_DUPLEX 0x0100 +#define NGBE_MDI_PHY_RESTART_AN 0x0200 +#define NGBE_MDI_PHY_ANE 0x1000 + +/* Sensors for PVT(Process Voltage Temperature) */ +#define NGBE_TS_CTL 0x10300 +#define NGBE_TS_EN 0x10304 +#define NGBE_TS_ST 0x10308 +#define NGBE_TS_ALARM_THRE 0x1030C +#define NGBE_TS_DALARM_THRE 0x10310 +#define NGBE_TS_INT_EN 0x10314 +#define NGBE_TS_ALARM_ST 0x10318 + +#define NGBE_TS_ALARM_ST_DALARM 0x00000002U +#define NGBE_TS_ALARM_ST_ALARM 0x00000001U +#define NGBE_TS_INT_EN_DALARM_INT_EN 0x00000002U +#define NGBE_TS_INT_EN_ALARM_INT_EN 0x00000001U +#define NGBE_TS_EN_ENA 0x00000001U +#define NGBE_TS_ST_DATA_OUT_MASK 0x000003FFU + +/* read register */ +#define NGBE_FAILED_READ_REG 0xffffffffU + +/**************** Global Registers ****************************/ +/* chip control Registers */ +#define NGBE_MIS_PWR 0x10000 +#define NGBE_MIS_CTL 0x10004 +#define NGBE_MIS_PF_SM 0x10008 +#define NGBE_MIS_RST 0x1000C +#define NGBE_MIS_ST 0x10028 +#define NGBE_MIS_SWSM 0x1002C +#define NGBE_MIS_RST_ST 0x10030 + +#define NGBE_MIS_RST_ST_RST_INIT 0x0000FF00U +#define NGBE_MIS_RST_ST_DEV_RST_ST_MASK 0x00180000U +#define NGBE_MIS_RST_GLOBAL_RST 0x80000000U +#define NGBE_MIS_ST_MNG_VETO 0x00000100U +#define NGBE_MIS_ST_MNG_INIT_DN 0x00000001U +#define NGBE_MIS_RST_ST_DEV_RST_TYPE_MASK 0x00070000U +#define NGBE_MIS_RST_SW_RST 0x00000001U + +#define NGBE_MIS_RST_ST_RST_INI_SHIFT 8 +#define NGBE_MIS_SWSM_SMBI 1 +#define NGBE_MIS_RST_ST_DEV_RST_TYPE_SHIFT 16 +#define NGBE_MIS_RST_ST_DEV_RST_TYPE_SW_RST 0x3 +#define NGBE_MIS_RST_ST_DEV_RST_TYPE_GLOBAL_RST 0x5 + +#define NGBE_MIS_ST_LAN0_ECC 0x00010000U +#define NGBE_MIS_ST_LAN1_ECC 0x00020000U + +#define NGBE_MIS_RST_LAN0_RST 0x00000002U +#define NGBE_MIS_RST_LAN1_RST 0x00000004U +#define NGBE_MIS_RST_LAN2_RST 0x00000008U +#define NGBE_MIS_RST_LAN3_RST 0x00000010U + +#define NGBE_MIS_ST_GPHY_IN_RST(_r) (0x00000200U << (_r)) + +/* FMGR Registers */ +#define NGBE_SPI_CMD 0x10104 +#define NGBE_SPI_DATA 0x10108 +#define NGBE_SPI_STATUS 0x1010C +#define NGBE_SPI_USR_CMD 0x10110 +#define NGBE_SPI_CMDCFG0 0x10114 +#define NGBE_SPI_CMDCFG1 0x10118 +#define NGBE_SPI_ILDR_STATUS 0x10120 +#define NGBE_SPI_ILDR_SWPTR 0x10124 +#define NGBE_SPI_CMD_CMD(_v) (((_v) & 0x7) << 28) +#define NGBE_SPI_CMD_CLK(_v) (((_v) & 0x7) << 25) +#define NGBE_SPI_CMD_ADDR(_v) (((_v) & 0x7FFFFF)) + +#define NGBE_SPI_ILDR_STATUS_SW_RESET 0x00000800U /* software reset done */ +#define NGBE_SPI_ILDR_STATUS_PERST 0x00000001U /* PCIE_PERST is done */ +#define NGBE_SPI_ILDR_STATUS_PWRRST 0x00000002U /* Power on reset done */ + +#define NGBE_SPI_STATUS_FLASH_BYPASS ((0x1) << 31) +#define NGBE_MAX_FLASH_LOAD_POLL_TIME 10 + +#define NGBE_SPI_STATUS_OPDONE ((0x1)) +#define NGBE_SPI_STATUS_FLASH_BYPASS ((0x1) << 31) +#define NGBE_SPI_TIMEOUT 1000 + +/* ETH MAC */ +#define NGBE_MAC_TX_CFG 0x11000 +#define NGBE_MAC_RX_CFG 0x11004 +#define NGBE_MAC_PKT_FLT 0x11008 +#define NGBE_MAC_PKT_FLT_PR (0x1) /* promiscuous mode */ +#define NGBE_MAC_PKT_FLT_RA (0x80000000) /* receive all */ +#define NGBE_MAC_WDG_TIMEOUT 0x1100C +#define NGBE_MAC_TX_FLOW_CTRL 0x11070 +#define NGBE_MAC_RX_FLOW_CTRL 0x11090 +#define NGBE_MAC_INT_ST 0x110B0 +#define NGBE_MAC_INT_EN 0x110B4 + +#define NGBE_MAC_TX_CFG_TE 0x00000001U +#define NGBE_MAC_TX_CFG_SPEED_MASK 0x60000000U +#define NGBE_MAC_TX_CFG_SPEED_1G 0x60000000U +#define NGBE_MAC_RX_CFG_RE 0x00000001U +#define NGBE_MAC_RX_CFG_JE 0x00000100U +#define NGBE_MAC_RX_CFG_LM 0x00000400U +#define NGBE_MAC_RX_FLOW_CTRL_RFE 0x00000001U /* receive fc enable */ + +/* statistic */ +#define NGBE_MDIO_CLAUSE_SELECT 0x11220 +#define NGBE_MMC_CONTROL 0x11800 +#define NGBE_TX_FRAME_CNT_GOOD_BAD_LOW 0x1181C +#define NGBE_TX_MC_FRAMES_GOOD_LOW 0x1182C +#define NGBE_TX_BC_FRAMES_GOOD_LOW 0x11824 +#define NGBE_RX_FRAME_CNT_GOOD_BAD_LOW 0x11900 +#define NGBE_RX_BC_FRAMES_GOOD_LOW 0x11918 +#define NGBE_RX_CRC_ERROR_FRAMES_LOW 0x11928 +#define NGBE_RX_LEN_ERROR_FRAMES_LOW 0x11978 +#define NGBE_MAC_LXOFFRXC 0x11988 +#define NGBE_MAC_PXOFFRXC 0x119DC +#define NGBE_RX_UNDERSIZE_FRAMES_GOOD 0x11938 +#define NGBE_RX_OVERSIZE_FRAMES_GOOD 0x1193C + +#define NGBE_MMC_CONTROL_RSTONRD 0x4 /* reset on read */ +#define NGBE_MMC_CONTROL_UP 0x700 + +/* GPIO Registers */ +#define NGBE_GPIO_DR 0x14800 +#define NGBE_GPIO_DDR 0x14804 +#define NGBE_GPIO_CTL 0x14808 +#define NGBE_GPIO_INTEN 0x14830 +#define NGBE_GPIO_INTMASK 0x14834 +#define NGBE_GPIO_INTTYPE_LEVEL 0x14838 +#define NGBE_GPIO_POLARITY 0x1483C +#define NGBE_GPIO_INTSTATUS 0x14840 +#define NGBE_GPIO_EOI 0x1484C + +/*GPIO bit */ +#define NGBE_GPIO_DR_0 0x00000001U /* SDP0 Data Value */ + +/********************************* BAR registers ***************************/ +/* Interrupt Registers */ +#define NGBE_BME_CTL 0x12020 +#define NGBE_PX_MISC_IC 0x100 +#define NGBE_PX_MISC_ICS 0x104 +#define NGBE_PX_MISC_IEN 0x108 +#define NGBE_PX_INTA 0x110 +#define NGBE_PX_GPIE 0x118 +#define NGBE_PX_IC 0x120 +#define NGBE_PX_ICS 0x130 +#define NGBE_PX_IMS 0x140 +#define NGBE_PX_IMC 0x150 +#define NGBE_PX_ISB_ADDR_L 0x160 +#define NGBE_PX_ISB_ADDR_H 0x164 +#define NGBE_PX_TRANSACTION_PENDING 0x168 +#define NGBE_PX_ITRSEL 0x180 +#define NGBE_PX_MISC_IVAR 0x4FC +#define NGBE_PX_ITR(_i) (0x200 + (_i) * 4) /* [0,8] */ +#define NGBE_PX_IVAR(_i) (0x500 + (_i) * 4) /* [0,3] */ +#define NGBE_PX_MPRC(_i) (0x1020 + ((_i) * 64)) /* [0,7] */ + +#define NGBE_PX_GPRC 0x12504 +#define NGBE_PX_GORC_LSB 0x12508 +#define NGBE_PX_GORC_MSB 0x1250C + +#define NGBE_PX_GPTC 0x18308 +#define NGBE_PX_GOTC_LSB 0x1830C +#define NGBE_PX_GOTC_MSB 0x18310 + +#define NGBE_PX_RR_CFG_VLAN 0x80000000U +#define NGBE_PX_MISC_IC_OVER_HEAT 0x10000000U +#define NGBE_PX_RR_CFG_DROP_EN 0x40000000U +#define NGBE_PX_MISC_IC_PHY 0x00040000U +#define NGBE_PX_MISC_IC_GPIO 0x04000000U +#define NGBE_PX_MISC_IC_VF_MBOX 0x00800000U +#define NGBE_PX_MISC_IC_INT_ERR 0x00100000U +#define NGBE_PX_MISC_IC_DEV_RST 0x00000400U +#define NGBE_PX_MISC_IC_STALL 0x00001000U +#define NGBE_PX_MISC_IC_ETH_EVENT 0x00020000U + +/* Interrupt register bitmasks */ +/* Extended Interrupt Cause Read */ +#define NGBE_PX_MISC_IC_DEV_RST 0x00000400U /* device reset event */ +#define NGBE_PX_MISC_IC_TIMESYNC 0x00000800U /* time sync */ + +/* Extended Interrupt Enable Set */ +#define NGBE_PX_MISC_IEN_ETH_LKDN 0x00000100U +#define NGBE_PX_MISC_IEN_DEV_RST 0x00000400U +#define NGBE_PX_MISC_IEN_TIMESYNC 0x00000800U +#define NGBE_PX_MISC_IEN_STALL 0x00001000U +#define NGBE_PX_MISC_IEN_LINKSEC 0x00002000U +#define NGBE_PX_MISC_IEN_RX_MISS 0x00004000U +#define NGBE_PX_MISC_IEN_I2C 0x00010000U +#define NGBE_PX_MISC_IEN_ETH_EVENT 0x00020000U +#define NGBE_PX_MISC_IEN_ETH_LK 0x00040000U +#define NGBE_PX_MISC_IEN_ETH_AN 0x00080000U +#define NGBE_PX_MISC_IEN_INT_ERR 0x00100000U +#define NGBE_PX_MISC_IEN_SPI 0x00200000U +#define NGBE_PX_MISC_IEN_VF_MBOX 0x00800000U +#define NGBE_PX_MISC_IEN_GPIO 0x04000000U +#define NGBE_PX_MISC_IEN_PCIE_REQ_ERR 0x08000000U +#define NGBE_PX_MISC_IEN_OVER_HEAT 0x10000000U +#define NGBE_PX_MISC_IEN_PROBE_MATCH 0x20000000U +#define NGBE_PX_MISC_IEN_MNG_HOST_MBOX 0x40000000U +#define NGBE_PX_MISC_IEN_TIMER 0x80000000U + +#define NGBE_PX_MISC_IEN_MASK ( \ + NGBE_PX_MISC_IEN_ETH_LKDN | \ + NGBE_PX_MISC_IEN_DEV_RST | \ + NGBE_PX_MISC_IEN_ETH_EVENT | \ + NGBE_PX_MISC_IEN_ETH_LK | \ + NGBE_PX_MISC_IEN_ETH_AN | \ + NGBE_PX_MISC_IEN_INT_ERR | \ + NGBE_PX_MISC_IEN_VF_MBOX | \ + NGBE_PX_MISC_IEN_GPIO | \ + NGBE_PX_MISC_IEN_MNG_HOST_MBOX | \ + NGBE_PX_MISC_IEN_STALL | \ + NGBE_PX_MISC_IEN_PCIE_REQ_ERR | \ + NGBE_PX_MISC_IEN_TIMER) + +/* General purpose Interrupt Enable */ +#define NGBE_PX_GPIE_MODEL 0x00000001U + +/* Interrupt Vector Allocation Registers */ +#define NGBE_PX_IVAR_REG_NUM 64 +#define NGBE_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ + +#define NGBE_MAX_EITR 0x00007FFCU +#define NGBE_PX_ITR_CNT_WDIS 0x80000000U + +/* MSI-X capability fields masks */ +#define NGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF + +/*********************** Transmit DMA registers **************************/ +/* transmit DMA Registers */ +#define NGBE_TDM_CTL 0x18000 +#define NGBE_TDM_POOL_TE 0x18004 +#define NGBE_PX_TR_BAL(_i) (0x03000 + ((_i) * 0x40)) /* [0, 7] */ +#define NGBE_PX_TR_BAH(_i) (0x03004 + ((_i) * 0x40)) +#define NGBE_PX_TR_WP(_i) (0x03008 + ((_i) * 0x40)) +#define NGBE_PX_TR_RP(_i) (0x0300C + ((_i) * 0x40)) +#define NGBE_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40)) + +/* statistic */ +#define NGBE_TDM_DRP_CNT 0x18300 +#define NGBE_TDM_SEC_DRP 0x18304 +#define NGBE_TDM_PKT_CNT 0x18308 +#define NGBE_TDM_BYTE_CNT_L 0x1830C +#define NGBE_TDM_BYTE_CNT_H 0x18310 +#define NGBE_TDM_OS2BMC_CNT 0x18314 + +/* TDM CTL BIT */ +#define NGBE_TDM_CTL_TE 0x1 /* Transmit Enable */ + +/* Transmit Config masks */ +#define NGBE_PX_TR_CFG_ENABLE (1) /* Ena specific Tx Queue */ +#define NGBE_PX_TR_CFG_SWFLSH BIT(26) /* Tx Desc. wr-bk flushing */ +#define NGBE_PX_TR_CFG_TR_SIZE_SHIFT 1 /* tx desc number per ring */ +#define NGBE_PX_TR_CFG_THRE_SHIFT 8 +#define NGBE_PX_TR_CFG_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ + +/**************************** Receive DMA registers **************************/ +/* Receive DMA Registers */ +#define NGBE_PX_RR_BAL(_i) (0x01000 + ((_i) * 0x40)) /* [0, 7] */ +#define NGBE_PX_RR_BAH(_i) (0x01004 + ((_i) * 0x40)) +#define NGBE_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40)) +#define NGBE_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40)) +#define NGBE_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40)) + +/* receive control */ +#define NGBE_RDM_ARB_CTL 0x12000 +#define NGBE_RDM_POOL_RE 0x12004 + +#define NGBE_RDM_PF_QDE 0x12080 +#define NGBE_RDM_PF_HIDE 0x12090 + +/* statistic */ +#define NGBE_RDM_DRP_PKT 0x12500 +#define NGBE_RDM_PKT_CNT 0x12504 +#define NGBE_RDM_BYTE_CNT_L 0x12508 +#define NGBE_RDM_BYTE_CNT_H 0x1250C +#define NGBE_RDM_BMC2OS_CNT 0x12510 + +/* PX_RR_CFG bit definitions */ +#define NGBE_PX_RR_CFG_RR_EN 0x00000001U +#define NGBE_PX_RR_CFG_RR_HDR_SZ 0x0000F000U +#define NGBE_PX_RR_CFG_RR_BUF_SZ 0x00000F00U +#define NGBE_PX_RR_CFG_SPLIT_MODE 0x04000000U +#define NGBE_PX_RR_CFG_RR_SIZE_SHIFT 1 +#define NGBE_PX_RR_CFG_RR_THER_SHIFT 16 +#define NGBE_PX_RR_CFG_BSIZEPKT_SHIFT 2 /* so many KBs */ +#define NGBE_PX_RR_CFG_BSIZEHDRSIZE_SHIFT 6 /* 64byte resolution */ + +/************************* Port Registers ************************************/ +/* port cfg Registers */ +#define NGBE_CFG_PORT_CTL 0x14400 +#define NGBE_CFG_PORT_ST 0x14404 +#define NGBE_CFG_EX_VTYPE 0x14408 +#define NGBE_CFG_TCP_TIME 0x14420 +#define NGBE_CFG_LED_CTL 0x14424 +#define NGBE_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4)) /* [0,3] */ +#define NGBE_CFG_LAN_SPEED 0x14440 + +/* TPH registers */ +#define NGBE_CFG_TPH_TDESC 0x14F00 /* TPH conf for Tx desc write back */ +#define NGBE_CFG_TPH_RDESC 0x14F04 /* TPH conf for Rx desc write back */ +#define NGBE_CFG_TPH_RHDR 0x14F08 /* TPH conf for writing Rx pkt header */ +#define NGBE_CFG_TPH_RPL 0x14F0C /* TPH conf for payload write access */ + +/* Status Bit */ +#define NGBE_CFG_PORT_ST_LAN_ID(_r) ((0x00000300U & (_r)) >> 8) +#define NGBE_CFG_PORT_CTL_NUM_VT_MASK 0x00001000U /* number of TVs */ + +/* port cfg bit */ +#define NGBE_CFG_PORT_CTL_NUM_VT_NONE 0x00000000U +#define NGBE_CFG_PORT_CTL_NUM_VT_8 0x00001000U +#define NGBE_CFG_PORT_CTL_D_VLAN 0x00000001U /* double vlan*/ +#define NGBE_CFG_PORT_CTL_QINQ 0x00000004U +#define NGBE_CFG_PORT_CTL_DRV_LOAD 0x00000008U +#define NGBE_CFG_PORT_CTL_PFRSTD 0x00004000U /* Phy Function Reset Done */ + +/* LED CTL Bit */ +#define NGBE_CFG_LED_CTL_LINK_10M_SEL 0x00000008U +#define NGBE_CFG_LED_CTL_LINK_100M_SEL 0x00000004U +#define NGBE_CFG_LED_CTL_LINK_1G_SEL 0x00000002U +#define NGBE_CFG_LED_CTL_LINK_OD_SHIFT 16 + +/* LED modes */ +#define NGBE_LED_LINK_10M NGBE_CFG_LED_CTL_LINK_10M_SEL +#define NGBE_LED_LINK_1G NGBE_CFG_LED_CTL_LINK_1G_SEL +#define NGBE_LED_LINK_100M NGBE_CFG_LED_CTL_LINK_100M_SEL + +/************************************** MNG ********************************/ +#define NGBE_MNG_FW_SM 0x1E000 +#define NGBE_MNG_SW_SM 0x1E004 +#define NGBE_MNG_SWFW_SYNC 0x1E008 +#define NGBE_MNG_MBOX 0x1E100 +#define NGBE_MNG_MBOX_CTL 0x1E044 +#define NGBE_MNG_OS2BMC_CNT 0x1E094 +#define NGBE_MNG_BMC2OS_CNT 0x1E090 + +/* SW_FW_SYNC definitions */ +#define NGBE_MNG_SWFW_SYNC_SW_PHY 0x0001 +#define NGBE_MNG_SWFW_SYNC_SW_FLASH 0x0008 +#define NGBE_MNG_SWFW_SYNC_SW_MB 0x0004 + +#define NGBE_MNG_MBOX_CTL_SWRDY 0x1 +#define NGBE_MNG_MBOX_CTL_SWACK 0x2 +#define NGBE_MNG_MBOX_CTL_FWRDY 0x4 +#define NGBE_MNG_MBOX_CTL_FWACK 0x8 + +/* SW Semaphore Register bitmasks */ +#define NGBE_MNG_SW_SM_SM 0x00000001U /* software Semaphore */ + +/********************************* RSEC **************************************/ +/* general rsec */ +#define NGBE_RSEC_CTL 0x17000 +#define NGBE_RSEC_ST 0x17004 +/* general rsec fields */ +#define NGBE_RSEC_CTL_RX_DIS 0x00000002U +#define NGBE_RSEC_ST_RSEC_RDY 0x00000001U +#define NGBE_RSEC_CTL_SAVE_MAC_ERR 0x00000040U +#define NGBE_RSEC_CTL_CRC_STRIP 0x00000004U + +/***************************** RDB registers *********************************/ +#define NGBE_RDB_PB_CTL 0x19000 + +/* Flow Control Registers */ +#define NGBE_RDB_RFCV 0x19200 +#define NGBE_RDB_PFCMACDAL 0x19210 +#define NGBE_RDB_PFCMACDAH 0x19214 +#define NGBE_RDB_LXOFFTXC 0x19218 +#define NGBE_RDB_LXONTXC 0x1921C +#define NGBE_RDB_RFCL 0x19220 +#define NGBE_RDB_RFCH 0x19260 +#define NGBE_RDB_RFCRT 0x192A0 +#define NGBE_RDB_RFCC 0x192A4 + +/* receive packet buffer */ +#define NGBE_RDB_PB_WRAP 0x19004 +#define NGBE_RDB_PB_SZ 0x19020 + +/* lli interrupt */ +#define NGBE_RDB_LLI_THRE 0x19080 + +#define NGBE_RDB_RFCH 0x19260 +#define NGBE_RDB_RFCRT 0x192A0 +#define NGBE_RDB_RFCC 0x192A4 + +/* statistic */ +#define NGBE_RDB_MPCNT 0x19040 +#define NGBE_RDB_PKT_CNT 0x19060 +#define NGBE_RDB_REPLI_CNT 0x19064 +#define NGBE_RDB_DRP_CNT 0x19068 +#define NGBE_RDB_LXONTXC 0x1921C +#define NGBE_RDB_LXOFFTXC 0x19218 +#define NGBE_RDB_PFCMACDAL 0x19210 +#define NGBE_RDB_PFCMACDAH 0x19214 +#define NGBE_RDB_TXSWERR 0x1906C +#define NGBE_RDB_TXSWERR_TB_FREE 0x3FF + +/* ring assignment */ +#define NGBE_RDB_SYN_CLS 0x19130 +#define NGBE_RDB_ETYPE_CLS(_i) (0x19100 + ((_i) * 4)) /* EType Q Select */ +#define NGBE_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4)) /* [0,7] */ +#define NGBE_RDB_RSSTBL(_i) (0x19400 + ((_i) * 4)) /* [0,31] */ +#define NGBE_RDB_RSSRK(_i) (0x19480 + ((_i) * 4)) /* [0,9] */ +#define NGBE_RDB_RA_CTL 0x194F4 +#define NGBE_RDB_5T_SDP(_i) (0x19A00 + ((_i) * 4)) /*Src Dst Addr Q Filter*/ +#define NGBE_RDB_5T_CTL0(_i) (0x19C00 + ((_i) * 4)) /* Five Tuple Q Filter */ +#define NGBE_RDB_5T_CTL1(_i) (0x19E00 + ((_i) * 4)) /*8 of these (0-7)*/ + +/* Receive Config masks */ +#define NGBE_RDB_PB_CTL_PBEN (0x80000000) /* Enable Receiver */ + +#define NGBE_RDB_PB_SZ_SHIFT 10 + +/* FCCFG Bit Masks */ +#define NGBE_RDB_RFCC_RFCE_802_3X 0x00000008U /* Tx link FC enable */ + +/* rdb_pl_cfg reg mask */ +#define NGBE_RDB_PL_CFG_L4HDR 0x2 +#define NGBE_RDB_PL_CFG_L3HDR 0x4 +#define NGBE_RDB_PL_CFG_L2HDR 0x8 +#define NGBE_RDB_PL_CFG_TUN_OUTER_L2HDR 0x20 +#define NGBE_RDB_PL_CFG_TUN_TUNHDR 0x10 + +#define NGBE_RDB_RA_CTL_RSS_EN 0x00000004U /* RSS Enable */ +#define NGBE_RDB_RA_CTL_RSS_MASK 0xFFFF0000U +#define NGBE_RDB_RA_CTL_RSS_IPV4_TCP 0x00010000U +#define NGBE_RDB_RA_CTL_RSS_IPV4 0x00020000U +#define NGBE_RDB_RA_CTL_RSS_IPV6 0x00100000U +#define NGBE_RDB_RA_CTL_RSS_IPV6_TCP 0x00200000U +#define NGBE_RDB_RA_CTL_RSS_IPV4_UDP 0x00400000U +#define NGBE_RDB_RA_CTL_RSS_IPV6_UDP 0x00800000U + +/* FCRTL Bit Masks */ +#define NGBE_RDB_RFCL_XONE 0x80000000U /* XON enable */ +#define NGBE_RDB_RFCH_XOFFE 0x80000000U /* Packet buffer fc enable */ + +/* FCCFG Bit Masks */ +#define NGBE_RDB_RFCC_RFCE_802_3X 0x00000008U /* Tx link FC enable */ + +/* Packet Buffer Initialization */ +#define NGBE_MAX_PACKET_BUFFERS 8 + +/****************************** TDB ******************************************/ +#define NGBE_TDB_RFCS 0x1CE00 +#define NGBE_TDB_PB_SZ 0x1CC00 +#define NGBE_TDB_PBRARB_CTL 0x1CD00 + +#define NGBE_TDB_PB_SZ_MAX 0x00005000U /* 20KB Packet Buffer */ +#define NGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ + +/* statistic */ +#define NGBE_TDB_OUT_PKT_CNT 0x1CF00 +#define NGBE_TDB_MNG_PKT_CNT 0x1CF04 +#define NGBE_TDB_LB_PKT_CNT 0x1CF08 +#define NGBE_TDB_MNG_LARGE_DOP_CNT 0x1CF0C + +/****************************** TSEC *****************************************/ +/* Security Control Registers */ +#define NGBE_TSEC_CTL 0x1D000 +#define NGBE_TSEC_ST 0x1D004 +#define NGBE_TSEC_BUF_AF 0x1D008 +#define NGBE_TSEC_BUF_AE 0x1D00C +#define NGBE_TSEC_MIN_IFG 0x1D020 + +/* 1588 */ +#define NGBE_TSEC_1588_CTL 0x11F00 /* Tx Time Sync Control reg */ +#define NGBE_TSEC_1588_STMPL 0x11F04 /* Tx timestamp value Low */ +#define NGBE_TSEC_1588_STMPH 0x11F08 /* Tx timestamp value High */ +#define NGBE_TSEC_1588_SYSTIML 0x11F0C /* System time register Low */ +#define NGBE_TSEC_1588_SYSTIMH 0x11F10 /* System time register High */ +#define NGBE_TSEC_1588_INC 0x11F14 /* Increment attributes reg */ +#define NGBE_TSEC_1588_INC_IV(v) ((v) & 0x7FFFFFF) + +#define NGBE_TSEC_1588_ADJL 0x11F18 /* Time Adjustment Offset reg Low */ +#define NGBE_TSEC_1588_ADJH 0x11F1C /* Time Adjustment Offset reg High*/ + +#define NGBE_TSEC_1588_INT_ST 0x11F20 +#define NGBE_TSEC_1588_INT_EN 0x11F24 + +/* 1588 fields */ +#define NGBE_TSEC_1588_CTL_VALID 0x00000001U /* Tx timestamp valid */ +#define NGBE_TSEC_1588_CTL_ENABLED 0x00000010U /* Tx timestamping enabled */ + +#define NGBE_TSEC_1588_AUX_CTL 0x11F28 +#define NGBE_TSEC_1588_TRGT_L(i) (0x11F2C + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_TRGT_H(i) (0x11F30 + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_FREQ_CLK_L(i) (0x11F3C + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_FREQ_CLK_H(i) (0x11F40 + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_AUX_STMP_L(i) (0x11F4C + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_AUX_STMP_H(i) (0x11F50 + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_SDP(n) (0x11F5C + ((n) * 4)) /* [0,3] */ + +/******************************* PSR Registers *******************************/ +/* psr control */ +#define NGBE_PSR_CTL 0x15000 +#define NGBE_PSR_MAX_SZ 0x15020 +#define NGBE_PSR_VLAN_CTL 0x15088 +#define NGBE_PSR_VM_CTL 0x151B0 +#define NGBE_PSR_PKT_CNT 0x151B8 +#define NGBE_PSR_MNG_PKT_CNT 0x151BC +#define NGBE_PSR_DBG_DOP_CNT 0x151C0 +#define NGBE_PSR_MNG_DOP_CNT 0x151C4 +#define NGBE_PSR_VM_FLP_L 0x151C8 + +/* mcasst/ucast overflow tbl */ +#define NGBE_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4)) +#define NGBE_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4)) + +/* Wake up registers */ +#define NGBE_PSR_WKUP_CTL 0x15B80 +#define NGBE_PSR_WKUP_IPV 0x15B84 + +/* vlan tbl */ +#define NGBE_PSR_VLAN_TBL(_i) (0x16000 + ((_i) * 4)) + +/* vlan switch */ +#define NGBE_PSR_VLAN_SWC 0x16220 +#define NGBE_PSR_VLAN_SWC_VM_L 0x16224 +#define NGBE_PSR_VLAN_SWC_IDX 0x16230 /* 32 vlan entries */ + +/* Manangement */ +#define NGBE_PSR_MNG_FLEX_SEL 0x1582C +#define NGBE_PSR_MNG_FLEX_DW_L(_i) (0x15A00 + ((_i) * 16)) /* [0,15] */ +#define NGBE_PSR_MNG_FLEX_DW_H(_i) (0x15A04 + ((_i) * 16)) +#define NGBE_PSR_MNG_FLEX_MSK(_i) (0x15A08 + ((_i) * 16)) + +/* mirror */ +#define NGBE_PSR_MR_CTL(_i) (0x15B00 + ((_i) * 4)) /* [0,3] */ +#define NGBE_PSR_MR_VLAN_L(_i) (0x15B10 + ((_i) * 8)) +#define NGBE_PSR_MR_VM_L(_i) (0x15B30 + ((_i) * 8)) + +/* Wake up registers */ +#define NGBE_PSR_LAN_FLEX_SEL 0x15B8C +#define NGBE_PSR_WKUP_IP4TBL(_i) (0x15BC0 + ((_i) * 4)) /* [0,3] */ +#define NGBE_PSR_WKUP_IP6TBL(_i) (0x15BE0 + ((_i) * 4)) +#define NGBE_PSR_LAN_FLEX_DW_L(_i) (0x15C00 + ((_i) * 16)) /* [0,15] */ +#define NGBE_PSR_LAN_FLEX_DW_H(_i) (0x15C04 + ((_i) * 16)) +#define NGBE_PSR_LAN_FLEX_MSK(_i) (0x15C08 + ((_i) * 16)) +#define NGBE_PSR_LAN_FLEX_CTL 0x15CFC + +/* mac switcher */ +#define NGBE_PSR_MAC_SWC_AD_L 0x16200 +#define NGBE_PSR_MAC_SWC_AD_H 0x16204 +#define NGBE_PSR_MAC_SWC_VM 0x16208 +#define NGBE_PSR_MAC_SWC_IDX 0x16210 + +/* mac switcher */ +#define NGBE_PSR_MAC_SWC_AD_L 0x16200 +#define NGBE_PSR_MAC_SWC_AD_H 0x16204 +#define NGBE_PSR_MAC_SWC_VM 0x16208 +#define NGBE_PSR_MAC_SWC_IDX 0x16210 + +/* RAH */ +#define NGBE_PSR_MAC_SWC_AD_H_AD(v) (((v) & 0xFFFF)) +#define NGBE_PSR_MAC_SWC_AD_H_ADTYPE(v) (((v) & 0x1) << 30) +#define NGBE_PSR_MAC_SWC_AD_H_AV 0x80000000U +#define NGBE_CLEAR_VMDQ_ALL 0xFFFFFFFFU + +/* VLAN pool filtering masks */ +#define NGBE_PSR_VLAN_SWC_ENTRIES 32 +#define NGBE_PSR_VLAN_SWC_VIEN 0x80000000U /* filter is valid */ + +/* Header split receive */ +#define NGBE_PSR_CTL_UPE 0x00000200U +#define NGBE_PSR_CTL_MO 0x00000060U +#define NGBE_PSR_CTL_MFE 0x00000080U +#define NGBE_PSR_CTL_MPE 0x00000100U +#define NGBE_PSR_CTL_SW_EN 0x00040000U +#define NGBE_PSR_CTL_BAM 0x00000400U +#define NGBE_PSR_CTL_PCSD 0x00002000U +#define NGBE_PSR_CTL_TPE 0x00000010U +#define NGBE_PSR_CTL_MO_SHIFT 5 + +/* VLAN Control Bit Masks */ +#define NGBE_PSR_VLAN_CTL_VET 0x0000FFFFU /* bits 0-15 */ +#define NGBE_PSR_VLAN_CTL_CFI 0x10000000U /* bit 28 */ +#define NGBE_PSR_VLAN_CTL_CFIEN 0x20000000U /* bit 29 */ +#define NGBE_PSR_VLAN_CTL_VFE 0x40000000U /* bit 30 */ + +/* vm L2 contorl */ +#define NGBE_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4)) + +/* VMOLR bitmasks */ +#define NGBE_PSR_VM_L2CTL_LBDIS 0x00000002U /* disable loopback */ +#define NGBE_PSR_VM_L2CTL_LLB 0x00000004U /* local pool loopback */ +#define NGBE_PSR_VM_L2CTL_UPE 0x00000010U /* unicast promiscuous */ +#define NGBE_PSR_VM_L2CTL_TPE 0x00000020U /* ETAG promiscuous */ +#define NGBE_PSR_VM_L2CTL_VACC 0x00000040U /* accept nomatched vlan */ +#define NGBE_PSR_VM_L2CTL_VPE 0x00000080U /* vlan promiscuous mode */ +#define NGBE_PSR_VM_L2CTL_AUPE 0x00000100U /* accept untagged packets */ +#define NGBE_PSR_VM_L2CTL_ROMPE 0x00000200U /*accept packets in MTA tbl*/ +#define NGBE_PSR_VM_L2CTL_ROPE 0x00000400U /* accept packets in UC tbl*/ +#define NGBE_PSR_VM_L2CTL_BAM 0x00000800U /* accept broadcast packets*/ +#define NGBE_PSR_VM_L2CTL_MPE 0x00001000U /* multicast promiscuous */ + +/* Wake Up Filter Control Bit */ +#define NGBE_PSR_WKUP_CTL_LNKC 0x00000001U /* Link Status Change Wakeup Enable*/ +#define NGBE_PSR_WKUP_CTL_MAG 0x00000002U /* Magic Packet Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_EX 0x00000004U /* Directed Exact Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_MC 0x00000008U /* Directed Multicast Wakeup Enable*/ +#define NGBE_PSR_WKUP_CTL_BC 0x00000010U /* Broadcast Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_ARP 0x00000020U /* ARP Request Packet Wakeup Enable*/ +#define NGBE_PSR_WKUP_CTL_IPV4 0x00000040U /* Directed IPv4 Pkt Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_IPV6 0x00000080U /* Directed IPv6 Pkt Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_IGNORE_TCO 0x00008000U /* Ignore WakeOn TCO pkts */ +#define NGBE_PSR_WKUP_CTL_FLX0 0x00010000U /* Flexible Filter 0 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX1 0x00020000U /* Flexible Filter 1 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX2 0x00040000U /* Flexible Filter 2 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX3 0x00080000U /* Flexible Filter 3 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX4 0x00100000U /* Flexible Filter 4 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX5 0x00200000U /* Flexible Filter 5 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX_FILTERS 0x000F0000U /* Mask for 4 flex filters */ +#define NGBE_PSR_WKUP_CTL_FLX_FILTERS_6 0x003F0000U /* Mask for 6 flex filters*/ +#define NGBE_PSR_WKUP_CTL_FLX_FILTERS_8 0x00FF0000U /* Mask for 8 flex filters*/ +#define NGBE_PSR_WKUP_CTL_FW_RST_WK 0x80000000U /* Ena wake on FW reset assertion */ +#define NGBE_PSR_MAX_SZ 0x15020 + +/* 1588 fields */ +#define NGBE_TSEC_1588_CTL_VALID 0x00000001U /* Tx timestamp valid */ +#define NGBE_TSEC_1588_CTL_ENABLED 0x00000010U /* Tx timestamping enabled */ + +/* 1588 */ +#define NGBE_PSR_1588_CTL 0x15188 /* Rx Time Sync Control register - RW */ +#define NGBE_PSR_1588_STMPL 0x151E8 /* Rx timestamp Low - RO */ +#define NGBE_PSR_1588_STMPH 0x151A4 /* Rx timestamp High - RO */ +#define NGBE_PSR_1588_ATTRL 0x151A0 /* Rx timestamp attribute low - RO */ +#define NGBE_PSR_1588_ATTRH 0x151A8 /* Rx timestamp attribute high - RO */ +#define NGBE_PSR_1588_MSGTYPE 0x15120 /* RX message type register low - RW */ +/* 1588 CTL Bit */ +#define NGBE_PSR_1588_CTL_VALID 0x00000001U /* Rx timestamp valid */ +#define NGBE_PSR_1588_CTL_TYPE_MASK 0x0000000EU /* Rx type mask */ +#define NGBE_PSR_1588_CTL_TYPE_L2_V2 0x00 +#define NGBE_PSR_1588_CTL_TYPE_L4_V1 0x02 +#define NGBE_PSR_1588_CTL_TYPE_L2_L4_V2 0x04 +#define NGBE_PSR_1588_CTL_TYPE_EVENT_V2 0x0A +#define NGBE_PSR_1588_CTL_ENABLED 0x00000010U /* Rx Timestamp enabled*/ +/* 1588 msg type bit */ +#define NGBE_PSR_1588_MSGTYPE_V1_CTRLT_MASK 0x000000FFU +#define NGBE_PSR_1588_MSGTYPE_V1_SYNC_MSG 0x00 +#define NGBE_PSR_1588_MSGTYPE_V1_DELAY_REQ_MSG 0x01 +#define NGBE_PSR_1588_MSGTYPE_V1_FOLLOWUP_MSG 0x02 +#define NGBE_PSR_1588_MSGTYPE_V1_DELAY_RESP_MSG 0x03 +#define NGBE_PSR_1588_MSGTYPE_V1_MGMT_MSG 0x04 +#define NGBE_PSR_1588_MSGTYPE_V2_MSGID_MASK 0x0000FF00U +#define NGBE_PSR_1588_MSGTYPE_V2_SYNC_MSG 0x0000 +#define NGBE_PSR_1588_MSGTYPE_V2_DELAY_REQ_MSG 0x0100 +#define NGBE_PSR_1588_MSGTYPE_V2_PDELAY_REQ_MSG 0x0200 +#define NGBE_PSR_1588_MSGTYPE_V2_PDELAY_RESP_MSG 0x0300 +#define NGBE_PSR_1588_MSGTYPE_V2_FOLLOWUP_MSG 0x0800 +#define NGBE_PSR_1588_MSGTYPE_V2_DELAY_RESP_MSG 0x0900 +#define NGBE_PSR_1588_MSGTYPE_V2_PDELAY_FOLLOWUP_MSG 0x0A00 +#define NGBE_PSR_1588_MSGTYPE_V2_ANNOUNCE_MSG 0x0B00 +#define NGBE_PSR_1588_MSGTYPE_V2_SIGNALLING_MSG 0x0C00 +#define NGBE_PSR_1588_MSGTYPE_V2_MGMT_MSG 0x0D00 + +/* etype switcher 1st stage */ +#define NGBE_PSR_ETYPE_SWC(_i) (0x15128 + ((_i) * 4)) /* EType Queue Filter */ +/* ETYPE Queue Filter/Select Bit Masks */ +#define NGBE_MAX_PSR_ETYPE_SWC_FILTERS 8 +#define NGBE_PSR_ETYPE_SWC_FCOE 0x08000000U /* bit 27 */ +#define NGBE_PSR_ETYPE_SWC_TX_ANTISPOOF 0x20000000U /* bit 29 */ +#define NGBE_PSR_ETYPE_SWC_1588 0x40000000U /* bit 30 */ +#define NGBE_PSR_ETYPE_SWC_FILTER_EN 0x80000000U /* bit 31 */ +#define NGBE_PSR_ETYPE_SWC_POOL_ENABLE BIT(26) /* bit 26 */ +#define NGBE_PSR_ETYPE_SWC_POOL_SHIFT 20 + +/* ETQF filter list */ +#define NGBE_PSR_ETYPE_SWC_FILTER_EAPOL 0 +#define NGBE_PSR_ETYPE_SWC_FILTER_FCOE 2 +#define NGBE_PSR_ETYPE_SWC_FILTER_1588 3 +#define NGBE_PSR_ETYPE_SWC_FILTER_FIP 4 +#define NGBE_PSR_ETYPE_SWC_FILTER_LLDP 5 +#define NGBE_PSR_ETYPE_SWC_FILTER_LACP 6 +#define NGBE_PSR_ETYPE_SWC_FILTER_FC 7 + +/*********************** Transmit DMA registers **************************/ +/* transmit global control */ +#define NGBE_TDM_PB_THRE 0x18020 +#define NGBE_TDM_LLQ 0x18040 +#define NGBE_TDM_ETYPE_LB_L 0x18050 +#define NGBE_TDM_ETYPE_AS_L 0x18058 +#define NGBE_TDM_MAC_AS_L 0x18060 +#define NGBE_TDM_VLAN_AS_L 0x18070 +#define NGBE_TDM_TCP_FLG_L 0x18078 +#define NGBE_TDM_TCP_FLG_H 0x1807C +#define NGBE_TDM_VLAN_INS(_i) (0x18100 + ((_i) * 4)) /* 8 of these 0 - 7 */ + +/* qos */ +#define NGBE_TDM_PBWARB_CTL 0x18200 + +/* etag */ +#define NGBE_TDM_ETAG_INS(_i) (0x18700 + ((_i) * 4)) /* 8 of these 0 - 7 */ + + +/* PCI Bus Info */ +#define NGBE_PCI_LINK_STATUS 0xB2 +#define NGBE_PCI_LINK_WIDTH 0x3F0 +#define NGBE_PCI_LINK_WIDTH_1 0x10 +#define NGBE_PCI_LINK_WIDTH_2 0x20 +#define NGBE_PCI_LINK_WIDTH_4 0x40 +#define NGBE_PCI_LINK_WIDTH_8 0x80 +#define NGBE_PCI_LINK_SPEED 0xF +#define NGBE_PCI_LINK_SPEED_2500 0x1 +#define NGBE_PCI_LINK_SPEED_5000 0x2 +#define NGBE_PCI_LINK_SPEED_8000 0x3 + +#define NGBE_ETH_LENGTH_OF_ADDRESS 6 +#define NGBE_MAX_MTA 128 +#define NGBE_MAX_VFTA_ENTRIES 128 + +/****************** Manageablility Host Interface defines ********************/ +#define NGBE_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */ +#define NGBE_HI_MAX_BLOCK_DWORD_LENGTH 64 /* Num of dwords in range */ +#define NGBE_HI_CMD_TIMEOUT 5000 /* Process HI command limit */ +#define NGBE_HI_FLASH_ERASE_TIMEOUT 5000 /* Process Erase command limit */ +#define NGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ +#define NGBE_HI_FLASH_VERIFY_TIMEOUT 60000 /* Process Apply command limit */ +#define NGBE_HI_PHY_MGMT_REQ_TIMEOUT 2000 /* Wait up to 2 seconds */ + +/*************************** Flash region definition *************************/ +/* Checksum and EEPROM pointers */ +#define NGBE_CALSUM_CAP_STATUS 0x10224 +#define NGBE_EEPROM_VERSION_STORE_REG 0x1022C + +#define NGBE_DEVICE_CAPS 0x1C +#define NGBE_EEPROM_VERSION_L 0x1D +#define NGBE_EEPROM_VERSION_H 0x1E +#define NGBE_EEPROM_LAST_WORD 0x800 +#define NGBE_EEPROM_CHECKSUM 0x2F +#define NGBE_EEPROM_SUM 0xBABA +#define NGBE_CHECKSUM_CAP_ST_PASS 0x80658383 +#define NGBE_CALSUM_COMMAND 0xE9 + +#define NGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 +#define NGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 +#define NGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 +#define NGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define NGBE_FW_LESM_PARAMETERS_PTR 0x2 +#define NGBE_FW_LESM_STATE_1 0x1 +#define NGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ +#define NGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define NGBE_FW_PATCH_VERSION_4 0x7 +#define NGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ +#define NGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ +#define NGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ +#define NGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ +#define NGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ +#define NGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x17 /* Alt. SAN MAC block */ +#define NGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt SAN MAC capability */ +#define NGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt SAN MAC 0 offset */ +#define NGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt SAN MAC 1 offset */ +#define NGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt WWNN prefix offset */ +#define NGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt WWPN prefix offset */ +#define NGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt SAN MAC exists */ +#define NGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt WWN base exists */ +#define NGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */ +#define NGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */ +#define NGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */ + +/*********************** Adv Transmit Descriptor Config Masks ****************/ +#define NGBE_TXD_DTALEN_MASK 0x0000FFFFU /* Data buf length(bytes) */ +#define NGBE_TXD_MAC_LINKSEC 0x00040000U /* Insert LinkSec */ +#define NGBE_TXD_MAC_TSTAMP 0x00080000U /* IEEE1588 time stamp */ +#define NGBE_TXD_IPSEC_SA_INDEX_MASK 0x000003FFU /* IPSec SA index */ +#define NGBE_TXD_IPSEC_ESP_LEN_MASK 0x000001FFU /* IPSec ESP length */ +#define NGBE_TXD_DTYP_MASK 0x00F00000U /* DTYP mask */ +#define NGBE_TXD_DTYP_CTXT 0x00100000U /* Adv Context Desc */ +#define NGBE_TXD_DTYP_DATA 0x00000000U /* Adv Data Descriptor */ +#define NGBE_TXD_EOP 0x01000000U /* End of Packet */ +#define NGBE_TXD_IFCS 0x02000000U /* Insert FCS */ +#define NGBE_TXD_LINKSEC 0x04000000U /* enable linksec */ +#define NGBE_TXD_RS 0x08000000U /* Report Status */ +#define NGBE_TXD_ECU 0x10000000U /* DDP hdr type or iSCSI */ +#define NGBE_TXD_QCN 0x20000000U /* cntag insertion enable */ +#define NGBE_TXD_VLE 0x40000000U /* VLAN pkt enable */ +#define NGBE_TXD_TSE 0x80000000U /* TCP Seg enable */ +#define NGBE_TXD_STAT_DD 0x00000001U /* Descriptor Done */ +#define NGBE_TXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define NGBE_TXD_CC 0x00000080U /* Check Context */ +#define NGBE_TXD_IPSEC 0x00000100U /* enable ipsec esp */ +#define NGBE_TXD_IIPCS 0x00000400U +#define NGBE_TXD_EIPCS 0x00000800U +#define NGBE_TXD_L4CS 0x00000200U +#define NGBE_TXD_PAYLEN_SHIFT 13 /* Adv desc PAYLEN shift */ +#define NGBE_TXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define NGBE_TXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define NGBE_TXD_TAG_TPID_SEL_SHIFT 11 +#define NGBE_TXD_IPSEC_TYPE_SHIFT 14 +#define NGBE_TXD_ENC_SHIFT 15 + +#define NGBE_TXD_TUCMD_IPSEC_TYPE_ESP 0x00004000U /* IPSec Type ESP */ +#define NGBE_TXD_TUCMD_IPSEC_ENCRYPT_EN 0x00008000/* ESP Encrypt Enable */ +#define NGBE_TXD_TUCMD_FCOE 0x00010000U /* FCoE Frame Type */ +#define NGBE_TXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ +#define NGBE_TXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ +#define NGBE_TXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ +#define NGBE_TXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation End */ +#define NGBE_TXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation Start */ +#define NGBE_TXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ +#define NGBE_TXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ +#define NGBE_TXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ +#define NGBE_TXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ +#define NGBE_TXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define NGBE_TXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +#define NGBE_TXD_OUTER_IPLEN_SHIFT 12 /* Adv ctxt OUTERIPLEN shift */ +#define NGBE_TXD_TUNNEL_LEN_SHIFT 21 /* Adv ctxt TUNNELLEN shift */ +#define NGBE_TXD_TUNNEL_TYPE_SHIFT 11 /* Adv Tx Desc Tunnel Type shift */ +#define NGBE_TXD_TUNNEL_DECTTL_SHIFT 27 /* Adv ctxt DECTTL shift */ +#define NGBE_TXD_TUNNEL_UDP (0x0ULL << NGBE_TXD_TUNNEL_TYPE_SHIFT) +#define NGBE_TXD_TUNNEL_GRE (0x1ULL << NGBE_TXD_TUNNEL_TYPE_SHIFT) + +#define NGBE_MAX_TXD_PWR 14 +#define NGBE_MAX_DATA_PER_TXD BIT(NGBE_MAX_TXD_PWR) +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), NGBE_MAX_DATA_PER_TXD) +#define NGBE_TXD_CMD (NGBE_TXD_EOP | NGBE_TXD_RS) + +/******************* Receive Descriptor bit definitions **********************/ +#define NGBE_RXD_IPSEC_STATUS_SECP 0x00020000U +#define NGBE_RXD_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000U +#define NGBE_RXD_IPSEC_ERROR_INVALID_LENGTH 0x10000000U +#define NGBE_RXD_IPSEC_ERROR_AUTH_FAILED 0x18000000U +#define NGBE_RXD_IPSEC_ERROR_BIT_MASK 0x18000000U + +#define NGBE_RXD_NEXTP_MASK 0x000FFFF0U /* Next Descriptor Index */ +#define NGBE_RXD_NEXTP_SHIFT 0x00000004U +#define NGBE_RXD_STAT_MASK 0x000fffffU /* Stat/NEXTP: bit 0-19 */ +#define NGBE_RXD_STAT_DD 0x00000001U /* Done */ +#define NGBE_RXD_STAT_EOP 0x00000002U /* End of Packet */ +#define NGBE_RXD_STAT_CLASS_ID_MASK 0x0000001CU +#define NGBE_RXD_STAT_CLASS_ID_TC_RSS 0x00000000U +#define NGBE_RXD_STAT_CLASS_ID_SYN 0x00000008U +#define NGBE_RXD_STAT_CLASS_ID_5_TUPLE 0x0000000CU +#define NGBE_RXD_STAT_CLASS_ID_L2_ETYPE 0x00000010U +#define NGBE_RXD_STAT_VP 0x00000020U /* IEEE VLAN Pkt */ +#define NGBE_RXD_STAT_UDPCS 0x00000040U /* UDP xsum calculated */ +#define NGBE_RXD_STAT_L4CS 0x00000080U /* L4 xsum calculated */ +#define NGBE_RXD_STAT_IPCS 0x00000100U /* IP xsum calculated */ +#define NGBE_RXD_STAT_PIF 0x00000200U /* passed in-exact filter */ +#define NGBE_RXD_STAT_OUTERIPCS 0x00000400U /* Cloud IP xsum calculated*/ +#define NGBE_RXD_STAT_VEXT 0x00000800U /* 1st VLAN found */ +#define NGBE_RXD_STAT_LLINT 0x00002000U /* Pkt caused Low Latency Int */ +#define NGBE_RXD_STAT_TS 0x00004000U /* IEEE1588 Time Stamp */ +#define NGBE_RXD_STAT_SECP 0x00008000U /* Security Processing */ +#define NGBE_RXD_STAT_LB 0x00010000U /* Loopback Status */ +#define NGBE_RXD_STAT_FCEOFS 0x00020000U /* FCoE EOF/SOF Stat */ +#define NGBE_RXD_STAT_FCSTAT 0x000C0000U /* FCoE Pkt Stat */ +#define NGBE_RXD_STAT_FCSTAT_NOMTCH 0x00000000U /* 00: No Ctxt Match */ +#define NGBE_RXD_STAT_FCSTAT_NODDP 0x00040000U /* 01: Ctxt w/o DDP */ +#define NGBE_RXD_STAT_FCSTAT_FCPRSP 0x00080000U /* 10: Recv. FCP_RSP */ +#define NGBE_RXD_STAT_FCSTAT_DDP 0x000C0000U /* 11: Ctxt w/ DDP */ + +#define NGBE_RXD_ERR_MASK 0xfff00000U /* RDESC.ERRORS mask */ +#define NGBE_RXD_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define NGBE_RXD_ERR_FCEOFE 0x80000000U /* FCEOFe/IPE */ +#define NGBE_RXD_ERR_HBO 0x00800000U /*Header Buffer Overflow */ +#define NGBE_RXD_ERR_OUTERIPER 0x04000000U /* CRC IP Header error */ +#define NGBE_RXD_ERR_SECERR_MASK 0x18000000U +#define NGBE_RXD_ERR_RXE 0x20000000U /* Any MAC Error */ +#define NGBE_RXD_ERR_TCPE 0x40000000U /* TCP/UDP Checksum Error */ +#define NGBE_RXD_ERR_IPE 0x80000000U /* IP Checksum Error */ + +#define NGBE_RXDPS_HDRSTAT_HDRSP 0x00008000U +#define NGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FFU + +#define NGBE_RXD_RSSTYPE_MASK 0x0000000FU +#define NGBE_RXD_TPID_MASK 0x000001C0U +#define NGBE_RXD_TPID_SHIFT 6 +#define NGBE_RXD_HDRBUFLEN_MASK 0x00007FE0U +#define NGBE_RXD_RSCCNT_MASK 0x001E0000U +#define NGBE_RXD_RSCCNT_SHIFT 17 +#define NGBE_RXD_HDRBUFLEN_SHIFT 5 +#define NGBE_RXD_SPLITHEADER_EN 0x00001000U +#define NGBE_RXD_SPH 0x8000 + +/* RSS Hash results */ +#define NGBE_RXD_RSSTYPE_NONE 0x00000000U +#define NGBE_RXD_RSSTYPE_IPV4_TCP 0x00000001U +#define NGBE_RXD_RSSTYPE_IPV4 0x00000002U +#define NGBE_RXD_RSSTYPE_IPV6_TCP 0x00000003U +#define NGBE_RXD_RSSTYPE_IPV4_SCTP 0x00000004U +#define NGBE_RXD_RSSTYPE_IPV6 0x00000005U +#define NGBE_RXD_RSSTYPE_IPV6_SCTP 0x00000006U +#define NGBE_RXD_RSSTYPE_IPV4_UDP 0x00000007U +#define NGBE_RXD_RSSTYPE_IPV6_UDP 0x00000008U + +/* Masks to determine if packets should be dropped due to frame errors */ +#define NGBE_RXD_ERR_FRAME_ERR_MASK NGBE_RXD_ERR_RXE + +#define NGBE_RXD_PKTTYPE(_rxd) \ + ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF) + +#define NGBE_RXD_IPV6EX(_rxd) \ + ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 6) & 0x1) + +/** + * receive packet type + * PTYPE:8 = TUN:2 + PKT:2 + TYP:4 + **/ +/* TUN */ +#define NGBE_PTYPE_TUN_IPV4 (0x80) +#define NGBE_PTYPE_TUN_IPV6 (0xC0) +#define NGBE_MAX_TUNNEL_HDR_LEN (0x50) + +/* PKT for TUN */ +#define NGBE_PTYPE_PKT_IPIP (0x00) /* IP+IP */ +#define NGBE_PTYPE_PKT_IG (0x10) /* IP+GRE */ +#define NGBE_PTYPE_PKT_IGM (0x20) /* IP+GRE+MAC */ +#define NGBE_PTYPE_PKT_IGMV (0x30) /* IP+GRE+MAC+VLAN */ +/* PKT for !TUN */ +#define NGBE_PTYPE_PKT_MAC (0x10) +#define NGBE_PTYPE_PKT_IP (0x20) +#define NGBE_PTYPE_PKT_FCOE (0x30) + +/* TYP for PKT=mac */ +#define NGBE_PTYPE_TYP_MAC (0x01) +#define NGBE_PTYPE_TYP_TS (0x02) /* time sync */ +#define NGBE_PTYPE_TYP_FIP (0x03) +#define NGBE_PTYPE_TYP_LLDP (0x04) +#define NGBE_PTYPE_TYP_CNM (0x05) +#define NGBE_PTYPE_TYP_EAPOL (0x06) +#define NGBE_PTYPE_TYP_ARP (0x07) +/* TYP for PKT=ip */ +#define NGBE_PTYPE_PKT_IPV6 (0x08) +#define NGBE_PTYPE_TYP_IPFRAG (0x01) +#define NGBE_PTYPE_TYP_IP (0x02) +#define NGBE_PTYPE_TYP_UDP (0x03) +#define NGBE_PTYPE_TYP_TCP (0x04) +#define NGBE_PTYPE_TYP_SCTP (0x05) +/* TYP for PKT=fcoe */ +#define NGBE_PTYPE_PKT_VFT (0x08) +#define NGBE_PTYPE_TYP_FCOE (0x00) +#define NGBE_PTYPE_TYP_FCDATA (0x01) +#define NGBE_PTYPE_TYP_FCRDY (0x02) +#define NGBE_PTYPE_TYP_FCRSP (0x03) +#define NGBE_PTYPE_TYP_FCOTHER (0x04) + +#define NGBE_RSS_L4_TYPES_MASK \ + ((1ul << NGBE_RXD_RSSTYPE_IPV4_TCP) | \ + (1ul << NGBE_RXD_RSSTYPE_IPV4_UDP) | \ + (1ul << NGBE_RXD_RSSTYPE_IPV4_SCTP) | \ + (1ul << NGBE_RXD_RSSTYPE_IPV6_TCP) | \ + (1ul << NGBE_RXD_RSSTYPE_IPV6_UDP) | \ + (1ul << NGBE_RXD_RSSTYPE_IPV6_SCTP)) + +/* Packet type non-ip values */ +enum ngbe_l2_ptypes { + NGBE_PTYPE_L2_ABORTED = (NGBE_PTYPE_PKT_MAC), + NGBE_PTYPE_L2_MAC = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_MAC), + NGBE_PTYPE_L2_TS = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_TS), + NGBE_PTYPE_L2_FIP = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_FIP), + NGBE_PTYPE_L2_LLDP = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_LLDP), + NGBE_PTYPE_L2_CNM = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_CNM), + NGBE_PTYPE_L2_EAPOL = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_EAPOL), + NGBE_PTYPE_L2_ARP = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_ARP), + + NGBE_PTYPE_L2_IPV4_FRAG = (NGBE_PTYPE_PKT_IP | + NGBE_PTYPE_TYP_IPFRAG), + NGBE_PTYPE_L2_IPV4 = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_TYP_IP), + NGBE_PTYPE_L2_IPV4_UDP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_TYP_UDP), + NGBE_PTYPE_L2_IPV4_TCP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_TYP_TCP), + NGBE_PTYPE_L2_IPV4_SCTP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_TYP_SCTP), + NGBE_PTYPE_L2_IPV6_FRAG = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6 | + NGBE_PTYPE_TYP_IPFRAG), + NGBE_PTYPE_L2_IPV6 = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6 | + NGBE_PTYPE_TYP_IP), + NGBE_PTYPE_L2_IPV6_UDP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6 | + NGBE_PTYPE_TYP_UDP), + NGBE_PTYPE_L2_IPV6_TCP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6 | + NGBE_PTYPE_TYP_TCP), + NGBE_PTYPE_L2_IPV6_SCTP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6 | + NGBE_PTYPE_TYP_SCTP), + + NGBE_PTYPE_L2_FCOE = (NGBE_PTYPE_PKT_FCOE | NGBE_PTYPE_TYP_FCOE), + NGBE_PTYPE_L2_FCOE_FCDATA = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_TYP_FCDATA), + NGBE_PTYPE_L2_FCOE_FCRDY = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_TYP_FCRDY), + NGBE_PTYPE_L2_FCOE_FCRSP = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_TYP_FCRSP), + NGBE_PTYPE_L2_FCOE_FCOTHER = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_TYP_FCOTHER), + NGBE_PTYPE_L2_FCOE_VFT = (NGBE_PTYPE_PKT_FCOE | NGBE_PTYPE_PKT_VFT), + NGBE_PTYPE_L2_FCOE_VFT_FCDATA = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_PKT_VFT | NGBE_PTYPE_TYP_FCDATA), + NGBE_PTYPE_L2_FCOE_VFT_FCRDY = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_PKT_VFT | NGBE_PTYPE_TYP_FCRDY), + NGBE_PTYPE_L2_FCOE_VFT_FCRSP = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_PKT_VFT | NGBE_PTYPE_TYP_FCRSP), + NGBE_PTYPE_L2_FCOE_VFT_FCOTHER = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_PKT_VFT | NGBE_PTYPE_TYP_FCOTHER), + + NGBE_PTYPE_L2_TUN4_MAC = (NGBE_PTYPE_TUN_IPV4 | NGBE_PTYPE_PKT_IGM), + NGBE_PTYPE_L2_TUN6_MAC = (NGBE_PTYPE_TUN_IPV6 | NGBE_PTYPE_PKT_IGM), +}; + +#define NGBE_RXD_IPV6EX(_rxd) \ + ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 6) & 0x1) + +/* default to trying for four seconds */ +#define NGBE_TRY_LINK_TIMEOUT (4 * HZ) + +#define NGBE_LINK_UP_TIME 90 + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define NGBE_PCI_MASTER_DISABLE_TIMEOUT 800 + +/* transfer units */ +#define NGBE_KB_TO_B 1024 + +#define TCALL(hw, func, args...) (((hw)->func) \ + ? (hw)->func((hw), ##args) : NGBE_NOT_IMPLEMENTED) + +/* Check whether address is multicast. This is little-endian specific check.*/ +#define NGBE_IS_MULTICAST(address) \ + ((bool)(((u8 *)(address))[0] & ((u8)0x01))) + +/* Check whether an address is broadcast. */ +#define NGBE_IS_BROADCAST(address) \ + ((((u8 *)(address))[0] == ((u8)0xff)) && \ + (((u8 *)(address))[1] == ((u8)0xff))) + +/* CEM Support */ +#define FW_CEM_HDR_LEN 0x4 +#define FW_CEM_CMD_DRIVER_INFO 0xDD +#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 +#define FW_CEM_CMD_RESERVED 0X0 +#define FW_CEM_UNUSED_VER 0x0 +#define FW_CEM_MAX_RETRIES 3 +#define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_READ_SHADOW_RAM_CMD 0x31 +#define FW_READ_SHADOW_RAM_LEN 0x6 +#define FW_WRITE_SHADOW_RAM_CMD 0x33 +#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */ +#define FW_SHADOW_RAM_DUMP_CMD 0x36 +#define FW_SHADOW_RAM_DUMP_LEN 0 +#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ +#define FW_NVM_DATA_OFFSET 3 +#define FW_MAX_READ_BUFFER_SIZE 244 +#define FW_DISABLE_RXEN_CMD 0xDE +#define FW_DISABLE_RXEN_LEN 0x1 +#define FW_PHY_MGMT_REQ_CMD 0x20 +#define FW_RESET_CMD 0xDF +#define FW_RESET_LEN 0x2 +#define FW_SETUP_MAC_LINK_CMD 0xE0 +#define FW_SETUP_MAC_LINK_LEN 0x2 +#define FW_FLASH_UPGRADE_START_CMD 0xE3 +#define FW_FLASH_UPGRADE_START_LEN 0x1 +#define FW_FLASH_UPGRADE_WRITE_CMD 0xE4 +#define FW_FLASH_UPGRADE_VERIFY_CMD 0xE5 +#define FW_FLASH_UPGRADE_VERIFY_LEN 0x4 +#define FW_EEPROM_CHECK_STATUS 0xE9 +#define FW_PHY_LED_CONF 0xF1 +#define FW_PHY_SIGNAL 0xF0 + +/* ETH PHY Registers */ +#define NGBE_SR_XS_PCS_MMD_STATUS1 0x30001 +#define NGBE_SR_PCS_CTL2 0x30007 +#define NGBE_SR_PMA_MMD_CTL1 0x10000 +#define NGBE_SR_MII_MMD_CTL 0x1F0000 +#define NGBE_SR_MII_MMD_DIGI_CTL 0x1F8000 +#define NGBE_SR_MII_MMD_AN_CTL 0x1F8001 +#define NGBE_SR_MII_MMD_AN_ADV 0x1F0004 +#define NGBE_SR_MII_MMD_AN_ADV_PAUSE(_v) ((0x3 & (_v)) << 7) +#define NGBE_SR_MII_MMD_LP_BABL 0x1F0005 +#define NGBE_SR_AN_MMD_CTL 0x70000 +#define NGBE_SR_AN_MMD_ADV_REG1 0x70010 +#define NGBE_SR_AN_MMD_ADV_REG1_PAUSE(_v) ((0x3 & (_v)) << 10) +#define NGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM 0x400 +#define NGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM 0x800 +#define NGBE_SR_AN_MMD_ADV_REG2 0x70011 +#define NGBE_SR_AN_MMD_LP_ABL1 0x70013 +#define NGBE_VR_AN_KR_MODE_CL 0x78003 +#define NGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1 0x38000 +#define NGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS 0x38010 + +/* read register */ +#define NGBE_DEAD_READ_RETRIES 10 +#define NGBE_DEAD_READ_REG 0xdeadbeefU +#define NGBE_DEAD_READ_REG64 0xdeadbeefdeadbeefULL + +#define NGBE_FAILED_READ_REG 0xffffffffU +#define NGBE_FAILED_READ_REG64 0xffffffffffffffffULL + +/* BitTimes (BT) conversion */ +#define NGBE_BT2KB(BT) (((BT) + (8 * 1024 - 1)) / (8 * 1024)) +#define NGBE_B2BT(BT) ((BT) * 8) + +/* Calculate Delay to respond to PFC */ +#define NGBE_PFC_D 672 + +/* Calculate Cable Delay */ +#define NGBE_CABLE_DC 5556 /* Delay Copper */ +#define NGBE_CABLE_DO 5000 /* Delay Optical */ + +/* Calculate Interface Delay */ +#define NGBE_PHY_D 12800 +#define NGBE_MAC_D 4096 +#define NGBE_XAUI_D (2 * 1024) + +#define NGBE_ID (NGBE_MAC_D + NGBE_XAUI_D + NGBE_PHY_D) + +/* Calculate Delay incurred from higher layer */ +#define NGBE_HD 6144 + +/* Calculate PCI Bus delay for low thresholds */ +#define NGBE_PCI_DELAY 10000 + +/* Calculate delay value in bit times */ +#define NGBE_DV(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (NGBE_B2BT(_max_frame_link) + \ + NGBE_PFC_D + \ + (2 * NGBE_CABLE_DC) + \ + (2 * NGBE_ID) + \ + NGBE_HD) / 25 + 1) + \ + 2 * NGBE_B2BT(_max_frame_tc)) + +/* Calculate low threshold delay values */ +#define NGBE_LOW_DV_X540(_max_frame_tc) \ + (2 * NGBE_B2BT(_max_frame_tc) + \ + (36 * NGBE_PCI_DELAY / 25) + 1) + +#define NGBE_LOW_DV(_max_frame_tc) \ + (2 * NGBE_LOW_DV_X540(_max_frame_tc)) + +struct ngbe_hw; +typedef u8* (*ngbe_mc_itr) (struct ngbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq); + +enum ngbe_phy_type { + ngbe_phy_unknown = 0, + ngbe_phy_none, + ngbe_phy_internal, + ngbe_phy_m88e1512, + ngbe_phy_m88e1512_sfi, + ngbe_phy_m88e1512_unknown, + ngbe_phy_yt8521s, + ngbe_phy_yt8521s_sfi, + ngbe_phy_sfp_passive_tyco, + ngbe_phy_sfp_passive_unknown, + ngbe_phy_sfp_active_unknown, + ngbe_phy_sfp_avago, + ngbe_phy_sfp_ftl, + ngbe_phy_sfp_ftl_active, + ngbe_phy_sfp_unknown, + ngbe_phy_sfp_intel, + ngbe_phy_internal_yt8521s_sfi, + ngbe_phy_generic, + ngbe_phy_sfp_unsupported +}; + +enum NGBE_MSCA_CMD_value { + NGBE_MSCA_CMD_RSV = 0, + NGBE_MSCA_CMD_WRITE, + NGBE_MSCA_CMD_POST_READ, + NGBE_MSCA_CMD_READ, +}; + +enum ngbe_media_type { + ngbe_media_type_unknown = 0, + ngbe_media_type_fiber, + ngbe_media_type_copper, + ngbe_media_type_backplane, + ngbe_media_type_virtual +}; + +enum ngbe_reset_type { + NGBE_LAN_RESET = 0, + NGBE_SW_RESET, + NGBE_GLOBAL_RESET +}; + +/* PCI bus types */ +enum ngbe_bus_type { + ngbe_bus_type_unknown = 0, + ngbe_bus_type_pci, + ngbe_bus_type_pcix, + ngbe_bus_type_pci_express, + ngbe_bus_type_internal, + ngbe_bus_type_reserved +}; + +/* PCI bus speeds */ +enum ngbe_bus_speed { + ngbe_bus_speed_unknown = 0, + ngbe_bus_speed_33 = 33, + ngbe_bus_speed_66 = 66, + ngbe_bus_speed_100 = 100, + ngbe_bus_speed_120 = 120, + ngbe_bus_speed_133 = 133, + ngbe_bus_speed_2500 = 2500, + ngbe_bus_speed_5000 = 5000, + ngbe_bus_speed_8000 = 8000, + ngbe_bus_speed_reserved +}; + +/* PCI bus widths */ +enum ngbe_bus_width { + ngbe_bus_width_unknown = 0, + ngbe_bus_width_pcie_x1 = 1, + ngbe_bus_width_pcie_x2 = 2, + ngbe_bus_width_pcie_x4 = 4, + ngbe_bus_width_pcie_x8 = 8, + ngbe_bus_width_32 = 32, + ngbe_bus_width_64 = 64, + ngbe_bus_width_reserved +}; + +enum ngbe_eeprom_type { + ngbe_eeprom_uninitialized = 0, + ngbe_eeprom_spi, + ngbe_flash, + ngbe_eeprom_none /* No NVM support */ +}; + +/* Packet buffer allocation strategies */ +enum { + PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ +#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL + PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ +#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED +}; + +/* Flow Control Settings */ +enum ngbe_fc_mode { + ngbe_fc_none = 0, + ngbe_fc_rx_pause, + ngbe_fc_tx_pause, + ngbe_fc_full, + ngbe_fc_default +}; + +/* SFP+ module type IDs: + * + * ID Module Type + * ============= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CU_CORE0 + * 4 SFP_DA_CU_CORE1 + * 5 SFP_SR/LR_CORE0 + * 6 SFP_SR/LR_CORE1 + */ +enum ngbe_sfp_type { + ngbe_sfp_type_da_cu = 0, + ngbe_sfp_type_sr = 1, + ngbe_sfp_type_lr = 2, + ngbe_sfp_type_da_cu_core0 = 3, + ngbe_sfp_type_da_cu_core1 = 4, + ngbe_sfp_type_srlr_core0 = 5, + ngbe_sfp_type_srlr_core1 = 6, + ngbe_sfp_type_da_act_lmt_core0 = 7, + ngbe_sfp_type_da_act_lmt_core1 = 8, + ngbe_sfp_type_1g_cu_core0 = 9, + ngbe_sfp_type_1g_cu_core1 = 10, + ngbe_sfp_type_1g_sx_core0 = 11, + ngbe_sfp_type_1g_sx_core1 = 12, + ngbe_sfp_type_1g_lx_core0 = 13, + ngbe_sfp_type_1g_lx_core1 = 14, + ngbe_sfp_type_not_present = 0xFFFE, + ngbe_sfp_type_unknown = 0xFFFF +}; + +enum ngbe_module_id { + NGBE_MODULE_EEPROM = 0, + NGBE_MODULE_FIRMWARE, + NGBE_MODULE_HARDWARE, + NGBE_MODULE_PCIE +}; + +struct ngbe_phy_operations { + s32 (*init)(struct ngbe_hw *hw); + s32 (*reset)(struct ngbe_hw *hw); + s32 (*read_reg)(struct ngbe_hw *hw, u32 reg_offset, u32 page, u16 *phy_data); + s32 (*write_reg)(struct ngbe_hw *hw, u32 reg_offset, u32 page, u16 phy_data); + s32 (*read_reg_mdi)(struct ngbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data); + s32 (*write_reg_mdi)(struct ngbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data); + u32 (*setup_link)(struct ngbe_hw *hw, u32 speed, bool need_restart_AN); + u32 (*phy_led_ctrl)(struct ngbe_hw *hw); + s32 (*check_overtemp)(struct ngbe_hw *hw); + s32 (*identify)(struct ngbe_hw *hw); + s32 (*check_event)(struct ngbe_hw *hw); + s32 (*get_adv_pause)(struct ngbe_hw *hw, u8 *pause_bit); + s32 (*get_lp_adv_pause)(struct ngbe_hw *hw, u8 *pause_bit); + s32 (*set_adv_pause)(struct ngbe_hw *hw, u16 pause_bit); + s32 (*setup_once)(struct ngbe_hw *hw); +}; + +struct ngbe_mac_operations { + s32 (*init_hw)(struct ngbe_hw *hw); + s32 (*reset_hw)(struct ngbe_hw *hw); + s32 (*start_hw)(struct ngbe_hw *hw); + s32 (*clear_hw_cntrs)(struct ngbe_hw *hw); + enum ngbe_media_type (*get_media_type)(struct ngbe_hw *hw); + s32 (*get_mac_addr)(struct ngbe_hw *hw, u8 *mac_addr); + s32 (*get_device_caps)(struct ngbe_hw *hw, u16 *device_caps); + s32 (*stop_adapter)(struct ngbe_hw *hw); + s32 (*get_bus_info)(struct ngbe_hw *hw); + void (*set_lan_id)(struct ngbe_hw *hw); + s32 (*enable_rx_dma)(struct ngbe_hw *hw, u32 regval); + s32 (*disable_sec_rx_path)(struct ngbe_hw *hw); + s32 (*enable_sec_rx_path)(struct ngbe_hw *hw); + s32 (*acquire_swfw_sync)(struct ngbe_hw *hw, u32 mask); + void (*release_swfw_sync)(struct ngbe_hw *hw, u32 mask); + + /* Link */ + void (*disable_tx_laser)(struct ngbe_hw *hw); + void (*enable_tx_laser)(struct ngbe_hw *hw); + void (*flap_tx_laser)(struct ngbe_hw *hw); + s32 (*setup_link)(struct ngbe_hw *hw, u32 speed, bool need_restart_AN); + s32 (*check_link)(struct ngbe_hw *hw, u32 *speed, bool *link_up, bool wait); + s32 (*get_link_capabilities)(struct ngbe_hw *hw, u32 *speed, + bool *autoneg); + + /* Packet Buffer manipulation */ + void (*setup_rxpba)(struct ngbe_hw *hw, int setup_rxpba, u32 headroom, int strategy); + + /* RAR, Multicast, VLAN */ + s32 (*set_rar)(struct ngbe_hw *hw, u32 index, u8 *addr, u64 pools, u32 enable_addr); + s32 (*clear_rar)(struct ngbe_hw *hw, u32 index); + s32 (*insert_mac_addr)(struct ngbe_hw *hw, u8 *ngbe_insert_mac_addr, u32 vmdq); + s32 (*init_rx_addrs)(struct ngbe_hw *hw); + s32 (*update_uc_addr_list)(struct ngbe_hw *hw, u8 *list, + u32 count, ngbe_mc_itr itr); + s32 (*update_mc_addr_list)(struct ngbe_hw *hw, u8 *list, + u32 count, ngbe_mc_itr itr, + bool clear); + s32 (*enable_mc)(struct ngbe_hw *hw); + s32 (*disable_mc)(struct ngbe_hw *hw); + s32 (*clear_vfta)(struct ngbe_hw *hw); + s32 (*set_vfta)(struct ngbe_hw *hw, u32 vlan, u32 vind, bool vlan_on); + s32 (*set_vlvf)(struct ngbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, bool *vfta_changed); + s32 (*init_uta_tables)(struct ngbe_hw *hw); + void (*set_mac_anti_spoofing)(struct ngbe_hw *hw, bool enable, int pf); + void (*set_vlan_anti_spoofing)(struct ngbe_hw *hw, bool enable, int pf); + s32 (*dmac_config)(struct ngbe_hw *hw); + + /* Manageability interface */ + s32 (*set_fw_drv_ver)(struct ngbe_hw *hw, u8 maj, u8 min, u8 build, u8 sub); + s32 (*get_thermal_sensor_data)(struct ngbe_hw *hw); + s32 (*init_thermal_sensor_thresh)(struct ngbe_hw *hw); + + void (*disable_rx)(struct ngbe_hw *hw); + void (*enable_rx)(struct ngbe_hw *hw); + void (*set_ethertype_anti_spoofing)(struct ngbe_hw *hw, bool enable, int vf); + + /* Flow Control */ + s32 (*fc_enable)(struct ngbe_hw *hw); + s32 (*setup_fc)(struct ngbe_hw *hw); + + /* LED */ + s32 (*led_on)(struct ngbe_hw *hw, u32 index); + s32 (*led_off)(struct ngbe_hw *hw, u32 index); +}; + +/* Function pointer table */ +struct ngbe_eeprom_operations { + s32 (*init_params)(struct ngbe_hw *hw); + s32 (*read)(struct ngbe_hw *hw, u16 offset, u16 *data); + s32 (*read_buffer)(struct ngbe_hw *hw, u16 offset, u16 words, u16 *data); + s32 (*read32)(struct ngbe_hw *hw, u16 offset, u32 *data); + s32 (*write)(struct ngbe_hw *hw, u16 offset, u16 data); + s32 (*write_buffer)(struct ngbe_hw *hw, u16 offset, u16 words, u16 *data); + s32 (*validate_checksum)(struct ngbe_hw *hw, u16 *checksum_val); + s32 (*update_checksum)(struct ngbe_hw *hw); + s32 (*calc_checksum)(struct ngbe_hw *hw); + s32 (*eeprom_chksum_cap_st)(struct ngbe_hw *hw, u16 offset, u32 *data); +}; + +struct ngbe_thermal_diode_data { + s16 temp; + s16 alarm_thresh; + s16 dalarm_thresh; +}; + +struct ngbe_thermal_sensor_data { + struct ngbe_thermal_diode_data sensor; +}; + +/* DMA Coalescing configuration */ +struct ngbe_dmac_config { + u16 watchdog_timer; /* usec units */ + bool fcoe_en; + u32 link_speed; + u8 fcoe_tc; + u8 num_tcs; +}; + +struct ngbe_mac_info { + struct ngbe_mac_operations ops; + struct ngbe_thermal_sensor_data thermal_sensor_data; + struct ngbe_dmac_config dmac_config; + u32 num_rar_entries; + bool autoneg; + + u32 orig_link_settings_stored; + u32 orig_sr_pcs_ctl2; + u32 orig_sr_pma_mmd_ctl1; + u32 orig_sr_an_mmd_ctl; + u32 orig_sr_an_mmd_adv_reg2; + u32 orig_vr_xs_or_pcs_mmd_digi_ctl1; + u8 perm_addr[NGBE_ETH_LENGTH_OF_ADDRESS]; + u32 mta_shadow[NGBE_MAX_MTA]; + u32 vft_shadow[NGBE_MAX_VFTA_ENTRIES]; + u8 addr[NGBE_ETH_LENGTH_OF_ADDRESS]; + u32 rar_highwater; + u32 mcft_size; + s32 mc_filter_type; + bool set_lben; + u32 vft_size; + u32 rx_pb_size; + bool autotry_restart; + + u32 max_tx_queues; + u32 max_rx_queues; + + u16 max_msix_vectors; +}; + +typedef u32 ngbe_physical_layer; + +/* Autonegotiation advertised speeds */ +typedef u32 ngbe_autoneg_advertised; + +struct ngbe_phy_info { + enum ngbe_phy_type type; + struct ngbe_phy_operations ops; + ngbe_physical_layer link_mode; + ngbe_autoneg_advertised autoneg_advertised; + enum ngbe_media_type media_type; + enum ngbe_sfp_type sfp_type; + + u32 id; + u32 addr; + bool reset_if_overtemp; + u32 force_speed; + u32 phy_semaphore_mask; + bool multispeed_fiber; +}; + +/* Bus parameters */ +struct ngbe_bus_info { + enum ngbe_bus_type type; + enum pci_bus_speed speed; + enum pcie_link_width width; + + u16 lan_id; + u16 func; +}; + +struct ngbe_eeprom_info { + struct ngbe_eeprom_operations ops; + enum ngbe_eeprom_type type; + + u16 sw_region_offset; + u32 semaphore_delay; + u16 word_size; +}; + +struct ngbe_addr_filter_info { + u32 num_mc_addrs; + u32 rar_used_count; + u32 mta_in_use; + u32 overflow_promisc; + bool user_set_promisc; +}; + +struct ngbe_flash_operations { + s32 (*init_params)(struct ngbe_hw *hw); + s32 (*read_buffer)(struct ngbe_hw *hw, u32 offset, u32 dwords, u32 *data); + s32 (*write_buffer)(struct ngbe_hw *hw, u32 offset, u32 dwords, u32 *data); +}; + +/* Flow control parameters */ +struct ngbe_fc_info { + u32 high_water; /* Flow Ctrl High-water */ + u32 low_water; /* Flow Ctrl Low-water */ + u16 pause_time; /* Flow Control Pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + bool disable_fc_autoneg; /* Do not autonegotiate FC */ + bool fc_was_autonegged; /* Is current_mode the result of autonegging? */ + enum ngbe_fc_mode current_mode; /* FC mode in effect */ + enum ngbe_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +struct ngbe_flash_info { + struct ngbe_flash_operations ops; + u32 semaphore_delay; + u32 dword_size; + u16 address_bits; +}; + +struct ngbe_hw { + u8 __iomem *hw_addr; + void *back; + struct ngbe_mac_info mac; + struct ngbe_phy_info phy; + struct ngbe_bus_info bus; + struct ngbe_eeprom_info eeprom; + struct ngbe_addr_filter_info addr_ctrl; + struct ngbe_fc_info fc; + struct ngbe_flash_info flash; + + u16 device_id; + u16 vendor_id; + u8 revision_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u16 oem_ssid; + u16 oem_svid; + + bool gpio_ctl; + bool autoneg; + bool force_full_reset; + bool adapter_stopped; + bool wol_enabled; + + ngbe_physical_layer link_mode; + enum ngbe_reset_type reset_type; + u16 tpid[8]; + + spinlock_t phy_lock; /* Used to protect phy registers. */ +}; + +/* Host Interface Command Structures */ +struct ngbe_hic_hdr { + u8 cmd; + u8 buf_len; + union { + u8 cmd_resv; + u8 ret_status; + } cmd_or_resp; + u8 checksum; +}; + +struct ngbe_hic_reset { + struct ngbe_hic_hdr hdr; + u16 lan_id; + u16 reset_type; +}; + +struct ngbe_hic_hdr2_req { + u8 cmd; + u8 buf_lenh; + u8 buf_lenl; + u8 checksum; +}; + +struct ngbe_hic_hdr2_rsp { + u8 cmd; + u8 buf_lenl; + u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ + u8 checksum; +}; + +union ngbe_hic_hdr2 { + struct ngbe_hic_hdr2_req req; + struct ngbe_hic_hdr2_rsp rsp; +}; + +struct ngbe_hic_drv_info { + struct ngbe_hic_hdr hdr; + u8 port_num; + u8 ver_sub; + u8 ver_build; + u8 ver_min; + u8 ver_maj; + u8 pad; /* end spacing to ensure length is mult. of dword */ + u16 pad2; /* end spacing to ensure length is mult. of dword2 */ +}; + +/* These need to be dword aligned */ +struct ngbe_hic_read_shadow_ram { + union ngbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ngbe_hic_write_shadow_ram { + union ngbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +/* Transmit Descriptor */ +union ngbe_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Receive Descriptor */ +union ngbe_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Context descriptors */ +struct ngbe_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* Statistics counters collected by the MAC */ +struct ngbe_hw_stats { + u64 crcerrs; + u64 illerrc; + u64 errbc; + u64 mspdc; + u64 mpctotal; + u64 mpc[8]; + u64 mlfc; + u64 mrfc; + u64 rlec; + u64 lxontxc; + u64 lxonrxc; + u64 lxofftxc; + u64 lxoffrxc; + u64 pxontxc[8]; + u64 pxonrxc[8]; + u64 pxofftxc[8]; + u64 pxoffrxc[8]; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc[8]; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mngprc; + u64 mngpdc; + u64 mngptc; + u64 tor; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 xec; + u64 qprc[16]; + u64 qptc[16]; + u64 qbrc[16]; + u64 qbtc[16]; + u64 qprdc[16]; + u64 pxon2offc[8]; + u64 fccrc; + u64 fclast; + u64 fcoerpdc; + u64 fcoeprc; + u64 fcoeptc; + u64 fcoedwrc; + u64 fcoedwtc; + u64 fcoe_noddp; + u64 fcoe_noddp_ext_buff; + u64 ldpcec; + u64 pcrc8ec; + u64 b2ospc; + u64 b2ogprc; + u64 o2bgptc; + u64 o2bspc; +}; + +struct ngbe_hic_upg_start { + struct ngbe_hic_hdr hdr; + u8 module_id; + u8 pad2; + u16 pad3; +}; + +struct ngbe_hic_upg_write { + struct ngbe_hic_hdr hdr; + u8 data_len; + u8 eof_flag; + u16 check_sum; + u32 data[62]; +}; + +enum ngbe_upg_flag { + NGBE_RESET_NONE = 0, + NGBE_RESET_FIRMWARE, + NGBE_RELOAD_EEPROM, + NGBE_RESET_LAN +}; + +struct ngbe_hic_upg_verify { + struct ngbe_hic_hdr hdr; + u32 action_flag; +}; + +static inline u32 +ngbe_rd32(u8 __iomem *base) +{ + return readl(base); +} + +static inline u32 +rd32(struct ngbe_hw *hw, u32 reg) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val = NGBE_FAILED_READ_REG; + + if (unlikely(!base)) + return val; + + val = ngbe_rd32(base + reg); + + return val; +} + +static inline u32 +rd32m(struct ngbe_hw *hw, u32 reg, u32 mask) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val = NGBE_FAILED_READ_REG; + + if (unlikely(!base)) + return val; + + val = ngbe_rd32(base + reg); + if (unlikely(val == NGBE_FAILED_READ_REG)) + return val; + + return val & mask; +} + +/* write register */ +static inline void +ngbe_wr32(u8 __iomem *base, u32 val) +{ + writel(val, base); +} + +static inline void +wr32(struct ngbe_hw *hw, u32 reg, u32 val) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + + if (unlikely(!base)) + return; + + ngbe_wr32(base + reg, val); +} + +#define wr32a(a, reg, off, val) \ + wr32((a), (reg) + ((off) << 2), (val)) +#define rd32a(a, reg, offset) ( \ + rd32((a), (reg) + ((offset) << 2))) + +static inline void +wr32m(struct ngbe_hw *hw, u32 reg, u32 mask, u32 field) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val; + + if (unlikely(!base)) + return; + + val = ngbe_rd32(base + reg); + if (unlikely(val == NGBE_FAILED_READ_REG)) + return; + + val = ((val & ~mask) | (field & mask)); + ngbe_wr32(base + reg, val); +} + +static inline s32 +po32m(struct ngbe_hw *hw, u32 reg, u32 mask, u32 field, int usecs, int count) +{ + int loop; + + loop = (count ? count : (usecs + 9) / 10); + usecs = (loop ? (usecs + loop - 1) / loop : 0); + + count = loop; + do { + u32 value = rd32(hw, reg); + + if ((value & mask) == (field & mask)) + break; + + if (loop-- <= 0) + break; + + udelay(usecs); + } while (true); + + return (count - loop <= count ? 0 : NGBE_ERR_TIMEOUT); +} + +static inline bool NGBE_REMOVED(void __iomem *addr) +{ + return unlikely(!addr); +} + +#define NGBE_WRITE_FLUSH(H) rd32(H, NGBE_MIS_PWR) + +#endif /* _NGBE_TYPE_H_ */