diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile index 83838054c762126db7afc9cf4518b57648f5377b..681602fa3a07fc5efcb3096094ddcd3fae14726a 100644 --- a/drivers/net/ethernet/wangxun/txgbe/Makefile +++ b/drivers/net/ethernet/wangxun/txgbe/Makefile @@ -9,7 +9,8 @@ obj-$(CONFIG_TXGBE) += txgbe.o txgbe-objs := txgbe_main.o txgbe_ethtool.o \ txgbe_hw.o txgbe_phy.o \ txgbe_lib.o txgbe_ptp.o \ - txgbe_mbx.o txgbe_sriov.o + txgbe_mbx.o txgbe_sriov.o \ + txgbe_pcierr.o txgbe-$(CONFIG_DEBUG_FS) += txgbe_debugfs.o txgbe-${CONFIG_SYSFS} += txgbe_sysfs.o diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h index 79367867ea99085e3bca8dce14a7dc70dedd1801..605a07ea933f09b49b4e0bd63cf591ce491db5b1 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h @@ -801,8 +801,8 @@ static inline struct netdev_queue *txring_txq(const struct txgbe_ring *ring) int txgbe_wol_supported(struct txgbe_adapter *adapter); int txgbe_write_uc_addr_list(struct net_device *netdev, int pool); -int txgbe_add_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool); -int txgbe_del_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool); +int txgbe_add_mac_filter(struct txgbe_adapter *adapter, const u8 *addr, u16 pool); +int txgbe_del_mac_filter(struct txgbe_adapter *adapter, const u8 *addr, u16 pool); int txgbe_available_rars(struct txgbe_adapter *adapter); void txgbe_vlan_mode(struct net_device *netdev, u32 features); u32 txgbe_rss_indir_tbl_entries(struct txgbe_adapter *adapter); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 06ab4268965410b91decab6d9160c60a3c250010..9917d2fdd76ee5f8703a7f180709f0f68e873310 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -1094,7 +1094,7 @@ static void txgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs, regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_LAN_FLEX_CTL); /* TDB */ - regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_RFCS); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_TFCS); regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_PB_SZ(0)); regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_UP2TC); regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_PBRARB_CTL); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index fea1b0957834e375ee4b10574d38d63b2fac5def..4282a544d438675ffbec7e9d452b9d6e271fe3ed 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -23,6 +23,7 @@ #include "txgbe_hw.h" #include "txgbe_phy.h" #include "txgbe_sriov.h" +#include "txgbe_pcierr.h" char txgbe_driver_name[] = "txgbe"; #define DRV_VERSION __stringify(1.3.5.1-k) @@ -123,6 +124,55 @@ static inline int txgbe_enumerate_functions(struct txgbe_adapter *adapter) return physfns; } +void txgbe_print_tx_hang_status(struct txgbe_adapter *adapter) +{ + int pos; + u32 value; + struct pci_dev *pdev = adapter->pdev; + u16 devctl2; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); + if (!pos) + return; + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_STATUS, &value); + e_info(probe, "AER Uncorrectable Error Status: 0x%08x\n", value); + txgbe_aer_print_error(adapter, TXGBE_AER_UNCORRECTABLE, value); + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &value); + e_info(probe, "AER Uncorrectable Error Mask: 0x%08x\n", value); + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &value); + e_info(probe, "AER Uncorrectable Error Severity: 0x%08x\n", value); + pci_read_config_dword(pdev, pos + PCI_ERR_COR_STATUS, &value); + e_info(probe, "AER Correctable Error Status: 0x%08x\n", value); + txgbe_aer_print_error(adapter, TXGBE_AER_CORRECTABLE, value); + pci_read_config_dword(pdev, pos + PCI_ERR_COR_MASK, &value); + e_info(probe, "AER Correctable Error Mask: 0x%08x\n", value); + pci_read_config_dword(pdev, pos + PCI_ERR_CAP, &value); + e_info(probe, "AER Capabilities and Control Register: 0x%08x\n", value); + + pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &devctl2); + e_info(probe, "Device Control2 Register: 0x%04x\n", devctl2); + + e_info(probe, "Tx flow control Status[TDB_TFCS 0xCE00]: 0x%x\n", + rd32(&adapter->hw, TXGBE_TDB_TFCS)); + + e_info(tx_err, "tdm_desc_fatal_0: 0x%x\n", + rd32(&adapter->hw, 0x180d0)); + e_info(tx_err, "tdm_desc_fatal_1: 0x%x\n", + rd32(&adapter->hw, 0x180d4)); + e_info(tx_err, "tdm_desc_fatal_2: 0x%x\n", + rd32(&adapter->hw, 0x180d8)); + e_info(tx_err, "tdm_desc_fatal_3: 0x%x\n", + rd32(&adapter->hw, 0x180dc)); + e_info(tx_err, "tdm_desc_nonfatal_0: 0x%x\n", + rd32(&adapter->hw, 0x180c0)); + e_info(tx_err, "tdm_desc_nonfatal_1: 0x%x\n", + rd32(&adapter->hw, 0x180c4)); + e_info(tx_err, "tdm_desc_nonfatal_2: 0x%x\n", + rd32(&adapter->hw, 0x180c8)); + e_info(tx_err, "tdm_desc_nonfatal_3: 0x%x\n", + rd32(&adapter->hw, 0x180cc)); +} + void txgbe_service_event_schedule(struct txgbe_adapter *adapter) { if (!test_bit(__TXGBE_DOWN, &adapter->state) && @@ -354,6 +404,29 @@ static inline bool txgbe_check_tx_hang(struct txgbe_ring *tx_ring) return false; } +static void txgbe_tx_timeout_dorecovery(struct txgbe_adapter *adapter) +{ + /* schedule immediate reset if we believe we hung */ + if (adapter->hw.bus.lan_id == 0) + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + else + wr32(&adapter->hw, TXGBE_MIS_PF_SM, 1); + txgbe_service_event_schedule(adapter); +} + +/** + * txgbe_tx_timeout_reset - initiate reset due to Tx timeout + * @adapter: driver private struct + **/ +static void txgbe_tx_timeout_reset(struct txgbe_adapter *adapter) +{ + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + adapter->flags2 |= TXGBE_FLAG2_PF_RESET_REQUESTED; + e_warn(drv, "initiating reset due to tx timeout\n"); + txgbe_service_event_schedule(adapter); + } +} + /** * txgbe_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure @@ -362,27 +435,18 @@ static inline bool txgbe_check_tx_hang(struct txgbe_ring *tx_ring) static void txgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct txgbe_adapter *adapter = netdev_priv(netdev); - bool real_tx_hang = false; - int i; - u16 value = 0; + bool tdm_desc_fatal = false; u32 value2 = 0, value3 = 0; + u16 pci_cmd = 0; u32 head, tail; - - for (i = 0; i < adapter->num_tx_queues; i++) { - struct txgbe_ring *tx_ring = adapter->tx_ring[i]; - - if (check_for_tx_hang(tx_ring) && txgbe_check_tx_hang(tx_ring)) - real_tx_hang = true; - } - - if (real_tx_hang) - netif_warn(adapter, drv, netdev, "Real Tx hang.\n"); + u16 vid = 0; + int i; /* Dump the relevant registers to determine the cause of a timeout event. */ - pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &value); - netif_warn(adapter, drv, netdev, "pci vendor id: 0x%x\n", value); - pci_read_config_word(adapter->pdev, PCI_COMMAND, &value); - netif_warn(adapter, drv, netdev, "pci command reg: 0x%x.\n", value); + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &vid); + netif_warn(adapter, drv, netdev, "pci vendor id: 0x%x\n", vid); + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); + netif_warn(adapter, drv, netdev, "pci command reg: 0x%x.\n", pci_cmd); value2 = rd32(&adapter->hw, 0x10000); netif_warn(adapter, drv, netdev, "reg mis_pwr: 0x%08x\n", value2); @@ -414,12 +478,19 @@ static void txgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue) "PX_IMS0 value is 0x%08x, PX_IMS1 value is 0x%08x\n", value2, value3); - if (value2 || value3) { - netif_warn(adapter, drv, netdev, "clear interrupt mask.\n"); - wr32(&adapter->hw, TXGBE_PX_ICS(0), value2); - wr32(&adapter->hw, TXGBE_PX_IMC(0), value2); - wr32(&adapter->hw, TXGBE_PX_ICS(1), value3); - wr32(&adapter->hw, TXGBE_PX_IMC(1), value3); + /* only check pf queue tdm desc error */ + if ((rd32(&adapter->hw, TXGBE_TDM_DESC_FATAL(0)) & 0xffffffff) || + (rd32(&adapter->hw, TXGBE_TDM_DESC_FATAL(1)) & 0xffffffff)) + tdm_desc_fatal = true; + + /* PCIe link loss, tdm desc fatal error or memory space can't access */ + if (vid == TXGBE_FAILED_READ_CFG_WORD || + tdm_desc_fatal || + !(pci_cmd & 0x2)) { + txgbe_tx_timeout_dorecovery(adapter); + } else { + txgbe_print_tx_hang_status(adapter); + txgbe_tx_timeout_reset(adapter); } } @@ -1629,6 +1700,18 @@ static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data) txgbe_service_event_schedule(adapter); } + + if (eicr & TXGBE_PX_MISC_IC_PCIE_REQ_ERR) { + ERROR_REPORT1(hw, TXGBE_ERROR_POLLING, + "lan id %d, PCIe request error founded.\n", hw->bus.lan_id); + if (hw->bus.lan_id == 0) { + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + txgbe_service_event_schedule(adapter); + } else { + wr32(&adapter->hw, TXGBE_MIS_PF_SM, 1); + } + } + if (eicr & TXGBE_PX_MISC_IC_DEV_RST) { adapter->flags2 |= TXGBE_FLAG2_RESET_INTR_RECEIVED; txgbe_service_event_schedule(adapter); @@ -2906,7 +2989,7 @@ static void txgbe_mac_set_default_filter(struct txgbe_adapter *adapter, TXGBE_PSR_MAC_SWC_AD_H_AV); } -int txgbe_add_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool) +int txgbe_add_mac_filter(struct txgbe_adapter *adapter, const u8 *addr, u16 pool) { struct txgbe_hw *hw = &adapter->hw; u32 i; @@ -2952,7 +3035,7 @@ static void txgbe_flush_sw_mac_table(struct txgbe_adapter *adapter) txgbe_sync_mac_table(adapter); } -int txgbe_del_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool) +int txgbe_del_mac_filter(struct txgbe_adapter *adapter, const u8 *addr, u16 pool) { /* search table for addr, if found, set to 0 and sync */ u32 i; @@ -2987,6 +3070,25 @@ int txgbe_del_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool) return -ENOMEM; } +static int txgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int ret; + + ret = txgbe_add_mac_filter(adapter, addr, VMDQ_P(0)); + + return min_t(int, ret, 0); +} + +static int txgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + txgbe_del_mac_filter(adapter, addr, VMDQ_P(0)); + + return 0; +} + /** * txgbe_write_uc_addr_list - write unicast addresses to RAR table * @netdev: network interface device structure @@ -3091,10 +3193,11 @@ void txgbe_set_rx_mode(struct net_device *netdev) * sufficient space to store all the addresses then enable * unicast promiscuous mode */ - count = txgbe_write_uc_addr_list(netdev, VMDQ_P(0)); - if (count < 0) { + if (__dev_uc_sync(netdev, txgbe_uc_sync, txgbe_uc_unsync)) { vmolr &= ~TXGBE_PSR_VM_L2CTL_ROPE; - vmolr |= TXGBE_PSR_VM_L2CTL_UPE; + fctrl |= TXGBE_PSR_CTL_UPE; + e_dev_warn("uc count is %d, available mac entry is %d, enable promisc mode\n", + netdev_uc_count(netdev), txgbe_available_rars(adapter)); } /* Write addresses to the MTA, if the attempt fails @@ -5365,6 +5468,7 @@ static void txgbe_service_timer(struct timer_list *t) struct txgbe_adapter *adapter = from_timer(adapter, t, service_timer); unsigned long next_event_offset; struct txgbe_hw *hw = &adapter->hw; + u32 val = 0; /* poll faster when waiting for link */ if (adapter->flags & TXGBE_FLAG_NEED_LINK_UPDATE) { @@ -5376,6 +5480,24 @@ static void txgbe_service_timer(struct timer_list *t) next_event_offset = HZ * 2; } + /* record which func to provoke PCIE recovery */ + if (rd32(&adapter->hw, TXGBE_MIS_PF_SM) == 1) { + val = rd32m(&adapter->hw, TXGBE_MIS_PRB_CTL, + TXGBE_MIS_PRB_CTL_LAN0_UP | + TXGBE_MIS_PRB_CTL_LAN1_UP); + if (val & TXGBE_MIS_PRB_CTL_LAN0_UP) { + if (hw->bus.lan_id == 0) { + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + e_info(probe, "set recover on Lan0\n"); + } + } else if (val & TXGBE_MIS_PRB_CTL_LAN1_UP) { + if (hw->bus.lan_id == 1) { + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + e_info(probe, "set recover on Lan1\n"); + } + } + } + /* Reset the timer */ mod_timer(&adapter->service_timer, next_event_offset + jiffies); @@ -5466,6 +5588,30 @@ static void txgbe_reset_subtask(struct txgbe_adapter *adapter) rtnl_unlock(); } +static void txgbe_check_pcie_subtask(struct txgbe_adapter *adapter) +{ + bool status; + + if (!(adapter->flags2 & TXGBE_FLAG2_PCIE_NEED_RECOVER)) + return; + + txgbe_print_tx_hang_status(adapter); + + wr32m(&adapter->hw, TXGBE_MIS_PF_SM, TXGBE_MIS_PF_SM_SM, 0); + + if (!(adapter->flags & TXGBE_FLAG_SRIOV_ENABLED)) { + status = txgbe_check_recovery_capability(adapter->pdev); + if (status) { + e_info(probe, "do recovery\n"); + txgbe_pcie_do_recovery(adapter->pdev); + } else { + e_err(drv, "This platform can't support pcie recovery, skip it\n"); + } + } + + adapter->flags2 &= ~TXGBE_FLAG2_PCIE_NEED_RECOVER; +} + /** * txgbe_service_task - manages and runs subtasks * @work: pointer to work_struct containing our data @@ -5485,6 +5631,7 @@ static void txgbe_service_task(struct work_struct *work) return; } + txgbe_check_pcie_subtask(adapter); txgbe_reset_subtask(adapter); txgbe_sfp_detection_subtask(adapter); txgbe_sfp_link_config_subtask(adapter); @@ -5502,36 +5649,6 @@ static void txgbe_service_task(struct work_struct *work) txgbe_service_event_complete(adapter); } -static u8 get_ipv6_proto(struct sk_buff *skb, int offset) -{ - struct ipv6hdr *hdr = (struct ipv6hdr *)(skb->data + offset); - u8 nexthdr = hdr->nexthdr; - - offset += sizeof(struct ipv6hdr); - - while (ipv6_ext_hdr(nexthdr)) { - struct ipv6_opt_hdr _hdr, *hp; - - if (nexthdr == NEXTHDR_NONE) - break; - - hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr); - if (!hp) - break; - - if (nexthdr == NEXTHDR_FRAGMENT) - break; - else if (nexthdr == NEXTHDR_AUTH) - offset += ipv6_authlen(hp); - else - offset += ipv6_optlen(hp); - - nexthdr = hp->nexthdr; - } - - return nexthdr; -} - union network_header { struct iphdr *ipv4; struct ipv6hdr *ipv6; @@ -5544,6 +5661,9 @@ static struct txgbe_dptype encode_tx_desc_ptype(const struct txgbe_tx_buffer *fi u8 tun_prot = 0; u8 l4_prot = 0; u8 ptype = 0; + unsigned char *exthdr; + unsigned char *l4_hdr; + __be16 frag_off; if (skb->encapsulation) { union network_header hdr; @@ -5556,7 +5676,12 @@ static struct txgbe_dptype encode_tx_desc_ptype(const struct txgbe_tx_buffer *fi ptype = TXGBE_PTYPE_TUN_IPV4; break; case htons(ETH_P_IPV6): - tun_prot = get_ipv6_proto(skb, skb_network_offset(skb)); + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr); + tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &tun_prot, &frag_off); if (tun_prot == NEXTHDR_FRAGMENT) goto encap_frag; ptype = TXGBE_PTYPE_TUN_IPV6; @@ -5565,7 +5690,8 @@ static struct txgbe_dptype encode_tx_desc_ptype(const struct txgbe_tx_buffer *fi goto exit; } - if (tun_prot == IPPROTO_IPIP) { + if (tun_prot == IPPROTO_IPIP || + tun_prot == IPPROTO_IPV6) { hdr.raw = (void *)inner_ip_hdr(skb); ptype |= TXGBE_PTYPE_PKT_IPIP; } else if (tun_prot == IPPROTO_UDP) { @@ -5610,8 +5736,13 @@ static struct txgbe_dptype encode_tx_desc_ptype(const struct txgbe_tx_buffer *fi } break; case 6: - l4_prot = get_ipv6_proto(skb, - skb_inner_network_offset(skb)); + l4_hdr = skb_inner_transport_header(skb); + exthdr = skb_inner_network_header(skb) + sizeof(struct ipv6hdr); + l4_prot = inner_ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_prot, &frag_off); + ptype |= TXGBE_PTYPE_PKT_IPV6; if (l4_prot == NEXTHDR_FRAGMENT) { ptype |= TXGBE_PTYPE_TYP_IPFRAG; @@ -5623,7 +5754,6 @@ static struct txgbe_dptype encode_tx_desc_ptype(const struct txgbe_tx_buffer *fi } } else { encap_frag: - switch (first->protocol) { case htons(ETH_P_IP): l4_prot = ip_hdr(skb)->protocol; @@ -5633,16 +5763,22 @@ static struct txgbe_dptype encode_tx_desc_ptype(const struct txgbe_tx_buffer *fi goto exit; } break; -#ifdef NETIF_F_IPV6_CSUM + case htons(ETH_P_IPV6): - l4_prot = get_ipv6_proto(skb, skb_network_offset(skb)); + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr); + l4_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_prot, &frag_off); + ptype = TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6; if (l4_prot == NEXTHDR_FRAGMENT) { ptype |= TXGBE_PTYPE_TYP_IPFRAG; goto exit; } break; -#endif /* NETIF_F_IPV6_CSUM */ + case htons(ETH_P_1588): ptype = TXGBE_PTYPE_L2_TS; goto exit; @@ -5698,6 +5834,9 @@ static int txgbe_tso(struct txgbe_ring *tx_ring, u32 tunhdr_eiplen_tunlen = 0; u8 tun_prot = 0; + unsigned char *exthdr; + unsigned char *l4_hdr; + __be16 frag_off; bool enc = skb->encapsulation; struct ipv6hdr *ipv6h; @@ -5764,7 +5903,12 @@ static int txgbe_tso(struct txgbe_ring *tx_ring, first->tx_flags |= TXGBE_TX_FLAGS_OUTER_IPV4; break; case htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr); tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &tun_prot, &frag_off); break; default: break; @@ -5789,6 +5933,7 @@ static int txgbe_tso(struct txgbe_ring *tx_ring, TXGBE_TXD_TUNNEL_LEN_SHIFT); break; case IPPROTO_IPIP: + case IPPROTO_IPV6: tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) - (char *)ip_hdr(skb)) >> 2) << TXGBE_TXD_OUTER_IPLEN_SHIFT; @@ -5806,6 +5951,12 @@ static int txgbe_tso(struct txgbe_ring *tx_ring, vlan_macip_lens |= first->tx_flags & TXGBE_TX_FLAGS_VLAN_MASK; type_tucmd = dptype.ptype << 24; + + if (skb->vlan_proto == htons(ETH_P_8021AD)) + type_tucmd |= TXGBE_SET_FLAG(first->tx_flags, + TXGBE_TX_FLAGS_HW_VLAN, + 0x1 << TXGBE_TXD_TAG_TPID_SEL_SHIFT); + txgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, type_tucmd, mss_l4len_idx); @@ -5826,6 +5977,7 @@ static void txgbe_tx_csum(struct txgbe_ring *tx_ring, u32 type_tucmd; if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: if (!(first->tx_flags & TXGBE_TX_FLAGS_HW_VLAN) && !(first->tx_flags & TXGBE_TX_FLAGS_CC)) return; @@ -5833,6 +5985,9 @@ static void txgbe_tx_csum(struct txgbe_ring *tx_ring, TXGBE_TXD_MACLEN_SHIFT; } else { u8 l4_prot = 0; + unsigned char *exthdr; + unsigned char *l4_hdr; + __be16 frag_off; union { struct iphdr *ipv4; @@ -5854,7 +6009,13 @@ static void txgbe_tx_csum(struct txgbe_ring *tx_ring, tun_prot = ip_hdr(skb)->protocol; break; case htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr); tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &tun_prot, &frag_off); + break; default: if (unlikely(net_ratelimit())) { @@ -5884,6 +6045,7 @@ static void txgbe_tx_csum(struct txgbe_ring *tx_ring, TXGBE_TXD_TUNNEL_LEN_SHIFT); break; case IPPROTO_IPIP: + case IPPROTO_IPV6: tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) - (char *)ip_hdr(skb)) >> 2) << @@ -5909,7 +6071,11 @@ static void txgbe_tx_csum(struct txgbe_ring *tx_ring, case 6: vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1; + exthdr = network_hdr.raw + sizeof(struct ipv6hdr); l4_prot = network_hdr.ipv6->nexthdr; + if (transport_hdr.raw != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_prot, &frag_off); break; default: break; @@ -5930,7 +6096,8 @@ static void txgbe_tx_csum(struct txgbe_ring *tx_ring, TXGBE_TXD_L4LEN_SHIFT; break; default: - break; + skb_checksum_help(skb); + goto csum_failed; } /* update TX checksum flag */ @@ -5941,6 +6108,12 @@ static void txgbe_tx_csum(struct txgbe_ring *tx_ring, vlan_macip_lens |= first->tx_flags & TXGBE_TX_FLAGS_VLAN_MASK; type_tucmd = dptype.ptype << 24; + + if (skb->vlan_proto == htons(ETH_P_8021AD)) + type_tucmd |= TXGBE_SET_FLAG(first->tx_flags, + TXGBE_TX_FLAGS_HW_VLAN, + 0x1 << TXGBE_TXD_TAG_TPID_SEL_SHIFT); + txgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, type_tucmd, mss_l4len_idx); } @@ -7297,6 +7470,245 @@ u16 txgbe_read_pci_cfg_word(struct txgbe_hw *hw, u32 reg) return value; } +#ifdef CONFIG_PCI_IOV +static u32 txgbe_read_pci_cfg_dword(struct txgbe_hw *hw, u32 reg) +{ + struct txgbe_adapter *adapter = hw->back; + u32 value; + + if (TXGBE_REMOVED(hw->hw_addr)) + return TXGBE_FAILED_READ_CFG_DWORD; + pci_read_config_dword(adapter->pdev, reg, &value); + if (value == TXGBE_FAILED_READ_CFG_DWORD && + txgbe_check_cfg_remove(hw, adapter->pdev)) + return TXGBE_FAILED_READ_CFG_DWORD; + return value; +} +#endif /* CONFIG_PCI_IOV */ + +void txgbe_write_pci_cfg_word(struct txgbe_hw *hw, u32 reg, u16 value) +{ + struct txgbe_adapter *adapter = hw->back; + + if (TXGBE_REMOVED(hw->hw_addr)) + return; + pci_write_config_word(adapter->pdev, reg, value); +} + +static inline void txgbe_issue_vf_flr(struct txgbe_adapter *adapter, + struct pci_dev *vfdev) +{ + int pos, i; + u16 status; + + /* wait for pending transactions on the bus */ + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(vfdev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + goto clear; + } + + e_dev_warn("Issuing VFLR with pending transactions\n"); + +clear: + pos = pci_find_capability(vfdev, PCI_CAP_ID_EXP); + if (!pos) + return; + + e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev)); + pci_write_config_word(vfdev, pos + PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_BCR_FLR); + msleep(100); +} + +/** + * txgbe_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t txgbe_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + +#ifdef CONFIG_PCI_IOV + struct txgbe_hw *hw = &adapter->hw; + struct pci_dev *bdev, *vfdev; + u32 dw0, dw1, dw2, dw3; + int vf, pos; + u16 req_id, pf_func; + + if (adapter->num_vfs == 0) + goto skip_bad_vf_detection; + + bdev = pdev->bus->self; + while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) + bdev = bdev->bus->self; + + if (!bdev) + goto skip_bad_vf_detection; + + pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR); + if (!pos) + goto skip_bad_vf_detection; + + dw0 = txgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG); + dw1 = txgbe_read_pci_cfg_dword(hw, + pos + PCI_ERR_HEADER_LOG + 4); + dw2 = txgbe_read_pci_cfg_dword(hw, + pos + PCI_ERR_HEADER_LOG + 8); + dw3 = txgbe_read_pci_cfg_dword(hw, + pos + PCI_ERR_HEADER_LOG + 12); + if (TXGBE_REMOVED(hw->hw_addr)) + goto skip_bad_vf_detection; + + req_id = dw1 >> 16; + /* if bit 7 of the requestor ID is set then it's a VF */ + if (!(req_id & 0x0080)) + goto skip_bad_vf_detection; + + pf_func = req_id & 0x01; + if ((pf_func & 1) == (pdev->devfn & 1)) { + vf = (req_id & 0x7F) >> 1; + e_dev_err("VF %d has caused a PCIe error\n", vf); + e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: %8.8x\tdw3: %8.8x\n", + dw0, dw1, dw2, dw3); + + /* Find the pci device of the offending VF */ + vfdev = pci_get_device(PCI_VENDOR_ID_WANGXUN, + TXGBE_VF_DEVICE_ID, NULL); + while (vfdev) { + if (vfdev->devfn == (req_id & 0xFF)) + break; + vfdev = pci_get_device(PCI_VENDOR_ID_WANGXUN, + TXGBE_VF_DEVICE_ID, vfdev); + } + /* There's a slim chance the VF could have been hot + * plugged, so if it is no longer present we don't need + * to issue the VFLR.Just clean up the AER in that case. + */ + if (vfdev) { + txgbe_issue_vf_flr(adapter, vfdev); + /* Free device reference count */ + pci_dev_put(vfdev); + } + + pci_aer_clear_nonfatal_status(pdev); + } + + /* Even though the error may have occurred on the other port + * we still need to increment the vf error reference count for + * both ports because the I/O resume function will be called + * for both of them. + */ + adapter->vferr_refcount++; + + return PCI_ERS_RESULT_RECOVERED; + +skip_bad_vf_detection: +#endif /* CONFIG_PCI_IOV */ + + if (!test_bit(__TXGBE_SERVICE_INITED, &adapter->state)) + return PCI_ERS_RESULT_DISCONNECT; + + rtnl_lock(); + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (netif_running(netdev)) + txgbe_close(netdev); + + if (!test_and_set_bit(__TXGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); + rtnl_unlock(); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * txgbe_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t txgbe_io_slot_reset(struct pci_dev *pdev) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED; + + if (pci_enable_device_mem(pdev)) { + e_err(probe, "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + /* atomic_inc() before lock. */ + smp_mb__before_atomic(); + clear_bit(__TXGBE_DISABLED, &adapter->state); + adapter->hw.hw_addr = adapter->io_addr; + pci_set_master(pdev); + pci_restore_state(pdev); + + /* After second error pci->state_saved is false, this + * resets it so EEH doesn't break. + */ + pci_save_state(pdev); + + pci_wake_from_d3(pdev, false); + + txgbe_reset(adapter); + + result = PCI_ERS_RESULT_RECOVERED; + } + + pci_aer_clear_nonfatal_status(pdev); + + return result; +} + +/** + * txgbe_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void txgbe_io_resume(struct pci_dev *pdev) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + +#ifdef CONFIG_PCI_IOV + if (adapter->vferr_refcount) { + e_info(drv, "Resuming after VF err\n"); + adapter->vferr_refcount--; + return; + } +#endif + rtnl_lock(); + if (netif_running(netdev)) + txgbe_open(netdev); + + netif_device_attach(netdev); + rtnl_unlock(); +} + +static const struct pci_error_handlers txgbe_err_handler = { + .error_detected = txgbe_io_error_detected, + .slot_reset = txgbe_io_slot_reset, + .resume = txgbe_io_resume, +}; + static struct pci_driver txgbe_driver = { .name = txgbe_driver_name, .id_table = txgbe_pci_tbl, @@ -7308,6 +7720,7 @@ static struct pci_driver txgbe_driver = { #endif .shutdown = txgbe_shutdown, .sriov_configure = txgbe_pci_sriov_configure, + .err_handler = &txgbe_err_handler }; /** diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_pcierr.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_pcierr.c new file mode 100644 index 0000000000000000000000000000000000000000..73fbcff2cf4290acf76596650cb1261576b44b2d --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_pcierr.c @@ -0,0 +1,310 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include +#include +#include + +#include "txgbe_pcierr.h" +#include "txgbe.h" +#define TXGBE_ROOT_PORT_INTR_ON_MESG_MASK (PCI_ERR_ROOT_CMD_COR_EN | \ + PCI_ERR_ROOT_CMD_NONFATAL_EN | \ + PCI_ERR_ROOT_CMD_FATAL_EN) + +#ifndef PCI_ERS_RESULT_NO_AER_DRIVER +/* No AER capabilities registered for the driver */ +#define PCI_ERS_RESULT_NO_AER_DRIVER ((__force pci_ers_result_t)6) +#endif + +static const char *aer_correctable_error_string[16] = { + "RxErr", /* Bit Position 0 */ + NULL, + NULL, + NULL, + NULL, + NULL, + "BadTLP", /* Bit Position 6 */ + "BadDLLP", /* Bit Position 7 */ + "Rollover", /* Bit Position 8 */ + NULL, + NULL, + NULL, + "Timeout", /* Bit Position 12 */ + "NonFatalErr", /* Bit Position 13 */ + "CorrIntErr", /* Bit Position 14 */ + "HeaderOF", /* Bit Position 15 */ +}; + +static const char *aer_uncorrectable_error_string[27] = { + "Undefined", /* Bit Position 0 */ + NULL, + NULL, + NULL, + "DLP", /* Bit Position 4 */ + "SDES", /* Bit Position 5 */ + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + "TLP", /* Bit Position 12 */ + "FCP", /* Bit Position 13 */ + "CmpltTO", /* Bit Position 14 */ + "CmpltAbrt", /* Bit Position 15 */ + "UnxCmplt", /* Bit Position 16 */ + "RxOF", /* Bit Position 17 */ + "MalfTLP", /* Bit Position 18 */ + "ECRC", /* Bit Position 19 */ + "UnsupReq", /* Bit Position 20 */ + "ACSViol", /* Bit Position 21 */ + "UncorrIntErr", /* Bit Position 22 */ + "BlockedTLP", /* Bit Position 23 */ + "AtomicOpBlocked", /* Bit Position 24 */ + "TLPBlockedErr", /* Bit Position 25 */ + "PoisonTLPBlocked", /* Bit Position 26 */ +}; + +static pci_ers_result_t merge_result(enum pci_ers_result orig, + enum pci_ers_result new) +{ + if (new == PCI_ERS_RESULT_NO_AER_DRIVER) + return PCI_ERS_RESULT_NO_AER_DRIVER; + if (new == PCI_ERS_RESULT_NONE) + return orig; + switch (orig) { + case PCI_ERS_RESULT_CAN_RECOVER: + case PCI_ERS_RESULT_RECOVERED: + orig = new; + break; + case PCI_ERS_RESULT_DISCONNECT: + if (new == PCI_ERS_RESULT_NEED_RESET) + orig = PCI_ERS_RESULT_NEED_RESET; + break; + default: + break; + } + return orig; +} + +static int txgbe_report_error_detected(struct pci_dev *dev, + pci_channel_state_t state, + enum pci_ers_result *result) +{ + pci_ers_result_t vote; + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + if (!dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->error_detected) { + /* If any device in the subtree does not have an error_detected + * callback, PCI_ERS_RESULT_NO_AER_DRIVER prevents subsequent + * error callbacks of "any" device in the subtree, and will + * exit in the disconnected error state. + */ + if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) + vote = PCI_ERS_RESULT_NO_AER_DRIVER; + else + vote = PCI_ERS_RESULT_NONE; + } else { + err_handler = dev->driver->err_handler; + vote = err_handler->error_detected(dev, state); + } + + *result = merge_result(*result, vote); + device_unlock(&dev->dev); + return 0; +} + +static int txgbe_report_frozen_detected(struct pci_dev *dev, void *data) +{ + return txgbe_report_error_detected(dev, pci_channel_io_frozen, data); +} + +static int txgbe_report_mmio_enabled(struct pci_dev *dev, void *data) +{ + pci_ers_result_t vote, *result = data; + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + if (!dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->mmio_enabled) + goto out; + + err_handler = dev->driver->err_handler; + vote = err_handler->mmio_enabled(dev); + *result = merge_result(*result, vote); +out: + device_unlock(&dev->dev); + return 0; +} + +static int txgbe_report_slot_reset(struct pci_dev *dev, void *data) +{ + pci_ers_result_t vote, *result = data; + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + if (!dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->slot_reset) + goto out; + + err_handler = dev->driver->err_handler; + vote = err_handler->slot_reset(dev); + *result = merge_result(*result, vote); +out: + device_unlock(&dev->dev); + return 0; +} + +static int txgbe_report_resume(struct pci_dev *dev, void *data) +{ + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + dev->error_state = pci_channel_io_normal; + if (!dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->resume) + goto out; + + err_handler = dev->driver->err_handler; + err_handler->resume(dev); +out: + device_unlock(&dev->dev); + return 0; +} + +void txgbe_pcie_do_recovery(struct pci_dev *dev) +{ + pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER; + struct pci_bus *bus; + u32 reg32; + int pos; + int delay = 1; + u32 id; + u16 ctrl; + + /* Error recovery runs on all subordinates of the first downstream port. + * If the downstream port detected the error, it is cleared at the end. + */ + if (!(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)) + dev = dev->bus->self; + bus = dev->subordinate; + + pci_walk_bus(bus, txgbe_report_frozen_detected, &status); + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); + if (pos) { + /* Disable Root's interrupt in response to error messages */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, ®32); + reg32 &= ~TXGBE_ROOT_PORT_INTR_ON_MESG_MASK; + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); + } + + pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl); + ctrl |= PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); + + usleep_range(2000, 4000); + ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); + + ssleep(1); + + pci_read_config_dword(dev, PCI_COMMAND, &id); + while (id == ~0) { + if (delay > 60000) { + pci_warn(dev, "not ready %dms after %s; giving up\n", + delay - 1, "bus_reset"); + return; + } + + if (delay > 1000) + pci_info(dev, "not ready %dms after %s; waiting\n", + delay - 1, "bus_reset"); + + msleep(delay); + delay *= 2; + pci_read_config_dword(dev, PCI_COMMAND, &id); + } + + if (delay > 1000) + pci_info(dev, "ready %dms after %s\n", delay - 1, + "bus_reset"); + + pci_info(dev, "Root Port link has been reset\n"); + + if (pos) { + /* Clear Root Error Status */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, ®32); + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32); + + /* Enable Root Port's interrupt in response to error messages */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, ®32); + reg32 |= TXGBE_ROOT_PORT_INTR_ON_MESG_MASK; + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); + } + + if (status == PCI_ERS_RESULT_CAN_RECOVER) { + status = PCI_ERS_RESULT_RECOVERED; + pci_dbg(dev, "broadcast mmio_enabled message\n"); + pci_walk_bus(bus, txgbe_report_mmio_enabled, &status); + } + + if (status == PCI_ERS_RESULT_NEED_RESET) { + status = PCI_ERS_RESULT_RECOVERED; + pci_dbg(dev, "broadcast slot_reset message\n"); + pci_walk_bus(bus, txgbe_report_slot_reset, &status); + } + + if (status != PCI_ERS_RESULT_RECOVERED) + goto failed; + + pci_dbg(dev, "broadcast resume message\n"); + pci_walk_bus(bus, txgbe_report_resume, &status); + +failed: + ; +} + +void txgbe_aer_print_error(struct txgbe_adapter *adapter, u32 severity, u32 status) +{ + unsigned long i; + const char *errmsg = NULL; + struct pci_dev *pdev = adapter->pdev; + unsigned long val = status; + + for_each_set_bit(i, &val, 32) { + if (severity == TXGBE_AER_CORRECTABLE) { + errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ? + aer_correctable_error_string[i] : NULL; + } else { + errmsg = i < ARRAY_SIZE(aer_uncorrectable_error_string) ? + aer_uncorrectable_error_string[i] : NULL; + } + if (errmsg) + dev_info(&pdev->dev, " [%2ld] %-22s\n", i, errmsg); + } +} + +bool txgbe_check_recovery_capability(struct pci_dev *dev) +{ + if (IS_ENABLED(CONFIG_X86)) { + goto out; + } else { + /* check upstream bridge is root or PLX bridge, + * or cpu is kupeng 920 or not + */ + if (dev->bus->self->vendor == 0x10b5 || + dev->bus->self->vendor == 0x19e5) + goto out; + else + return false; + } +out: + return true; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_pcierr.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_pcierr.h new file mode 100644 index 0000000000000000000000000000000000000000..1143c82a84b4d937d05fbf9650061453e4fdc843 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_pcierr.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_PCIERR_H_ +#define _TXGBE_PCIERR_H_ + +#include "txgbe.h" + +#define TXGBE_AER_UNCORRECTABLE 1 +#define TXGBE_AER_CORRECTABLE 2 + +void txgbe_pcie_do_recovery(struct pci_dev *dev); +void txgbe_aer_print_error(struct txgbe_adapter *adapter, u32 severity, u32 status); +bool txgbe_check_recovery_capability(struct pci_dev *dev); + +#endif diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 5e1f9891d9bed383706cdc414656ef112d56e443..6a72db4ff96a78d80d7382f32d956eebc32fc3cb 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -16,6 +16,7 @@ /* Device IDs */ #define TXGBE_DEV_ID_SP1000 0x1001 #define TXGBE_DEV_ID_WX1820 0x2001 +#define TXGBE_VF_DEVICE_ID 0x1000 /* Subsystem IDs */ /* SFP */ @@ -497,6 +498,7 @@ struct txgbe_thermal_sensor_data { #define TXGBE_TDM_TCP_FLG_H 0x1807C #define TXGBE_TDM_VFTE_CLR(_i) (0x180A0 + ((_i) * 4)) #define TXGBE_TDM_VLAN_INS(_i) (0x18100 + ((_i) * 4)) /* 64 of these 0 - 63 */ +#define TXGBE_TDM_DESC_FATAL(i) (0x0180D0 + (i) * 4) /*0-3*/ /* TDM CTL BIT */ #define TXGBE_TDM_CTL_TE 0x1 /* Transmit Enable */ #define TXGBE_TDM_CTL_PADDING 0x2 /* Padding byte number for ipsec ESP */ @@ -1048,7 +1050,7 @@ enum txgbe_fdir_pballoc_type { #define TXGBE_PSR_MAX_SZ 0x15020 /****************************** TDB ******************************************/ -#define TXGBE_TDB_RFCS 0x1CE00 +#define TXGBE_TDB_TFCS 0x1CE00 #define TXGBE_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4)) /* 8 of these */ #define TXGBE_TDB_MNG_TC 0x1CD10 #define TXGBE_TDB_PRB_CTL 0x17010