diff --git a/Documentation/networking/device_drivers/ethernet/wangxun/txgbevf.rst b/Documentation/networking/device_drivers/ethernet/wangxun/txgbevf.rst new file mode 100644 index 0000000000000000000000000000000000000000..07935a1dc38c2352ef4a4170445b78cfcaf1bcfe --- /dev/null +++ b/Documentation/networking/device_drivers/ethernet/wangxun/txgbevf.rst @@ -0,0 +1,40 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================================================================ +Linux Base Virtual Function Driver for WangXun(R) 10G Ethernet +================================================================ + +WangXun 10 Gigabit Virtual Function Linux driver. +Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. + + +Contents +======== + +- Identifying Your Adapter +- Known Issues +- Support + + +Identifying Your Adapter +======================== +The driver is compatible with WangXun Sapphire Dual ports Ethernet Adapters. + +Known Issues/Troubleshooting +============================ + +SR-IOV requires the correct platform and OS support. + +The guest OS loading this driver must support MSI-X interrupts. + +This driver is only supported as a loadable module at this time. Intel is not +supplying patches against the kernel source to allow for static linking of the +drivers. + +VLANs: There is a limit of a total of 64 shared VLANs to 1 or more VFs. + + +Support +======= +If you got any problem, contact Wangxun support team via support@trustnetic.com +and Cc: netdev. diff --git a/MAINTAINERS b/MAINTAINERS index 444c07155994fe63da27a6ff106e493e5d953820..431c465180758b624c49ec9b2a6d6e5339a852fb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -18979,16 +18979,12 @@ F: drivers/input/tablet/wacom_serial4.c WANGXUN ETHERNET DRIVER M: Jiawen Wu -L: netdev@vger.kernel.org -S: Maintained -F: Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst -F: drivers/net/ethernet/wangxun/ - -WANGXUN ETHERNET DRIVER M: Duanqiang Wen L: netdev@vger.kernel.org S: Maintained +F: Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst F: Documentation/networking/device_drivers/ethernet/wangxun/ngbe.rst +F: Documentation/networking/device_drivers/ethernet/wangxun/txgbevf.rst F: drivers/net/ethernet/wangxun/ WATCHDOG DEVICE DRIVERS diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_TXGBEVF b/anolis/configs/L0-MANDATORY/arm64/CONFIG_TXGBEVF new file mode 100644 index 0000000000000000000000000000000000000000..50cf5de61e180d10df5ed3e6849732079c069b08 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_TXGBEVF @@ -0,0 +1 @@ +CONFIG_TXGBEVF=m diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_TXGBEVF b/anolis/configs/L0-MANDATORY/x86/CONFIG_TXGBEVF new file mode 100644 index 0000000000000000000000000000000000000000..50cf5de61e180d10df5ed3e6849732079c069b08 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_TXGBEVF @@ -0,0 +1 @@ +CONFIG_TXGBEVF=m diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig index 3590d7ad59f43a368dc54dea3523e25017b14227..65446e10e00f23ef21b08e4c7e3c8f690f6b5dea 100644 --- a/drivers/net/ethernet/wangxun/Kconfig +++ b/drivers/net/ethernet/wangxun/Kconfig @@ -42,4 +42,16 @@ config NGBE To compile this driver as a module, choose M here. The module will be called ngbe. +config TXGBEVF + tristate "Netswift PCI-Express 10GbE Virtual Function support" + depends on PCI + imply PTP_1588_CLOCK + help + This driver supports Netswift 10gigabit ethernet adapters + virtual functions. For more information on how to identify + your adapter, go to + + To compile this driver as a module, choose M here. The module + will be called txgbevf. + endif # NET_VENDOR_WANGXUN diff --git a/drivers/net/ethernet/wangxun/Makefile b/drivers/net/ethernet/wangxun/Makefile index ac3fb06b233ca4ff9e5164f84fdfc13bcd80ff9c..1163c5f126ae2e868dec4a40ba72dd469a9cf3c9 100644 --- a/drivers/net/ethernet/wangxun/Makefile +++ b/drivers/net/ethernet/wangxun/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_TXGBE) += txgbe/ obj-$(CONFIG_NGBE) += ngbe/ +obj-$(CONFIG_TXGBEVF) += txgbevf/ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe.h b/drivers/net/ethernet/wangxun/ngbe/ngbe.h index 6301cf3533187bdc89f5ea450829cae2031e11c4..b4a4cb44b88388fb404ece5a66f016c8edbaa6b9 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe.h @@ -718,6 +718,7 @@ struct ngbe_msg { }; extern char ngbe_driver_name[]; +extern const char ngbe_driver_version[]; static inline struct net_device *ngbe_hw_to_netdev(const struct ngbe_hw *hw) { diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index befbcd367087a7e461c2afb3519302d846f344bd..13d81085894db3614d8740316c5bfdca35e0c7d8 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -1016,6 +1016,8 @@ static void ngbe_get_drvinfo(struct net_device *netdev, strncpy(drvinfo->driver, ngbe_driver_name, sizeof(drvinfo->driver) - 1); + strncpy(drvinfo->version, ngbe_driver_version, + sizeof(drvinfo->version) - 1); strncpy(drvinfo->fw_version, adapter->eeprom_id, sizeof(drvinfo->fw_version)); strncpy(drvinfo->bus_info, pci_name(adapter->pdev), diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index 9624018dde0f00c2e742a1746faf408df4575029..52d89e6d716c85638641d6f0e64fd46f39f77219 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -19,6 +19,8 @@ #include "ngbe_sriov.h" char ngbe_driver_name[] = "ngbe"; +#define DRV_VERSION __stringify(1.2.5-k) +const char ngbe_driver_version[32] = DRV_VERSION; static struct workqueue_struct *ngbe_wq; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h index 88c816a6468eb49688e3e330579f16214ffdeda9..666cf79e9cae974119b0f2464320d3e6350c9dee 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h @@ -243,6 +243,13 @@ enum txgbe_ring_f_enum { #define TXGBE_VMDQ_4Q_MASK 0x7C #define TXGBE_VMDQ_2Q_MASK 0x7E +#define TXGBE_RSS_64Q_MASK 0x3F +#define TXGBE_RSS_16Q_MASK 0xF +#define TXGBE_RSS_8Q_MASK 0x7 +#define TXGBE_RSS_4Q_MASK 0x3 +#define TXGBE_RSS_2Q_MASK 0x1 +#define TXGBE_RSS_DISABLED_MASK 0x0 + #define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) #define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) @@ -445,6 +452,7 @@ struct txgbe_mac_addr { #define TXGBE_FLAG2_EEE_CAPABLE BIT(14) #define TXGBE_FLAG2_EEE_ENABLED BIT(15) #define TXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(16) +#define TXGBE_FLAG2_VLAN_PROMISC BIT(17) #define TXGBE_FLAG2_DEV_RESET_REQUESTED BIT(18) #define TXGBE_FLAG2_RESET_INTR_RECEIVED BIT(19) #define TXGBE_FLAG2_GLOBAL_RESET_REQUESTED BIT(20) @@ -594,6 +602,7 @@ struct txgbe_adapter { struct timer_list service_timer; struct work_struct service_task; + struct work_struct sfp_sta_task; struct hlist_head fdir_filter_list; unsigned long fdir_overflow; /* number of times ATR was backed off */ union txgbe_atr_input fdir_mask; @@ -602,6 +611,8 @@ struct txgbe_adapter { u32 atr_sample_rate; spinlock_t fdir_perfect_lock; /*spinlock for FDIR */ + struct txgbe_etype_filter_info etype_filter_info; + u32 wol; char eeprom_id[32]; @@ -641,7 +652,7 @@ struct txgbe_adapter { #define TXGBE_MAX_RETA_ENTRIES 128 u8 rss_indir_tbl[TXGBE_MAX_RETA_ENTRIES]; #define TXGBE_RSS_KEY_SIZE 40 - u32 *rss_key; + u32 rss_key[TXGBE_RSS_KEY_SIZE / sizeof(u32)]; /* misc interrupt status block */ dma_addr_t isb_dma; @@ -652,7 +663,6 @@ struct txgbe_adapter { struct vf_data_storage *vfinfo; struct vf_macvlans vf_mvs; struct vf_macvlans *mv_list; - u8 vf_mode; #ifdef CONFIG_PCI_IOV u32 timer_event_accumulator; u32 vferr_refcount; @@ -687,7 +697,7 @@ struct txgbe_fdir_filter { struct hlist_node fdir_node; union txgbe_atr_input filter; u16 sw_idx; - u16 action; + u64 action; }; enum txgbe_state_t { @@ -782,6 +792,8 @@ static inline void txgbe_dbg_init(void) {} static inline void txgbe_dbg_exit(void) {} #endif +void txgbe_setup_reta(struct txgbe_adapter *adapter); + static inline struct netdev_queue *txring_txq(const struct txgbe_ring *ring) { return netdev_get_tx_queue(ring->netdev, ring->queue_index); @@ -874,6 +886,7 @@ static inline void txgbe_intr_trigger(struct txgbe_hw *hw, u64 qmask) #define usec_delay(_x) udelay(_x) extern char txgbe_driver_name[]; +extern const char txgbe_driver_version[]; struct txgbe_msg { u16 msg_enable; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 25b71e47efe9e5d45108da78e5186c40c0741e4d..725caf0197f3d6d73d6b3cefd1029674971c4568 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -624,10 +624,7 @@ static int txgbe_set_link_ksettings(struct net_device *netdev, curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); curr_autoneg = !!(curr_autoneg & (0x1 << 12)); if (old == advertised && curr_autoneg == adapter->an37) - return err; - } else { - if (old == advertised) - return err; + return -EINVAL; } /* this sets the link speed and restarts auto-neg */ while (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) @@ -661,7 +658,7 @@ static int txgbe_set_link_ksettings(struct net_device *netdev, 10000baseKR_Full)) { err = txgbe_set_link_to_kr(hw, 1); advertised |= TXGBE_LINK_SPEED_10GB_FULL; - return err; + } else if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseKX4_Full)) { err = txgbe_set_link_to_kx4(hw, 1); @@ -1278,6 +1275,8 @@ static void txgbe_get_drvinfo(struct net_device *netdev, strncpy(drvinfo->driver, txgbe_driver_name, sizeof(drvinfo->driver) - 1); + strscpy(drvinfo->version, txgbe_driver_version, + sizeof(drvinfo->version)); strncpy(drvinfo->fw_version, adapter->eeprom_id, sizeof(drvinfo->fw_version)); strncpy(drvinfo->bus_info, pci_name(adapter->pdev), @@ -2185,6 +2184,10 @@ static int txgbe_run_loopback_test(struct txgbe_adapter *adapter) static int txgbe_loopback_test(struct txgbe_adapter *adapter, u64 *data) { + /* Let firmware know the driver has taken over */ + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_DRV_LOAD, TXGBE_CFG_PORT_CTL_DRV_LOAD); + *data = txgbe_setup_desc_rings(adapter); if (*data) goto out; @@ -2205,6 +2208,10 @@ static int txgbe_loopback_test(struct txgbe_adapter *adapter, u64 *data) err_loopback: txgbe_free_desc_rings(adapter); out: + /* Let firmware take over control of h/w */ + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_DRV_LOAD, 0); + return *data; } @@ -2421,7 +2428,8 @@ static int txgbe_set_phys_id(struct net_device *netdev, return 2; case ETHTOOL_ID_ON: - if (hw->oem_ssid != 0x0085 && hw->oem_svid == 0x1bd4) { + if ((hw->oem_ssid != 0x0085 && hw->oem_svid == 0x1bd4) || + (hw->oem_ssid != 0x0085 && hw->oem_svid == 0x1ff9)) { if (adapter->link_up) { switch (adapter->link_speed) { case TXGBE_LINK_SPEED_10GB_FULL: @@ -2445,7 +2453,8 @@ static int txgbe_set_phys_id(struct net_device *netdev, break; case ETHTOOL_ID_OFF: - if (hw->oem_ssid != 0x0085 && hw->oem_svid == 0x1bd4) { + if ((hw->oem_ssid != 0x0085 && hw->oem_svid == 0x1bd4) || + (hw->oem_ssid != 0x0085 && hw->oem_svid == 0x1ff9)) { if (adapter->link_up) { switch (adapter->link_speed) { case TXGBE_LINK_SPEED_10GB_FULL: @@ -2543,6 +2552,13 @@ static int txgbe_set_coalesce(struct net_device *netdev, u16 tx_itr_prev; bool need_reset = false; + if (ec->tx_max_coalesced_frames_irq == adapter->tx_work_limit && + adapter->rx_itr_setting <= 1 ? ec->rx_coalesce_usecs == adapter->rx_itr_setting : + ec->rx_coalesce_usecs == adapter->rx_itr_setting >> 2) { + e_info(probe, "no coalesce parameters changed, aborting\n"); + return -EINVAL; + } + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { /* reject Tx specific changes in case of mixed RxTx vectors */ if (ec->tx_coalesce_usecs) @@ -2621,6 +2637,38 @@ static int txgbe_set_coalesce(struct net_device *netdev, return 0; } +static int txgbe_match_etype_entry(struct txgbe_adapter *adapter, u16 sw_idx) +{ + struct txgbe_etype_filter_info *ef_info = &adapter->etype_filter_info; + int i; + + for (i = 0; i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS; i++) { + if (ef_info->etype_filters[i].rule_idx == sw_idx) + break; + } + + return i; +} + +static int txgbe_get_etype_rule(struct txgbe_adapter *adapter, + struct ethtool_rx_flow_spec *fsp, int ef_idx) +{ + struct txgbe_etype_filter_info *ef_info = &adapter->etype_filter_info; + u8 mask[6] = {0, 0, 0, 0, 0, 0}; + u8 mac[6] = {0, 0, 0, 0, 0, 0}; + + fsp->flow_type = ETHER_FLOW; + ether_addr_copy(fsp->h_u.ether_spec.h_dest, mac); + ether_addr_copy(fsp->m_u.ether_spec.h_dest, mask); + ether_addr_copy(fsp->h_u.ether_spec.h_source, mac); + ether_addr_copy(fsp->m_u.ether_spec.h_source, mask); + fsp->h_u.ether_spec.h_proto = htons(ef_info->etype_filters[ef_idx].ethertype); + fsp->m_u.ether_spec.h_proto = 0xFFFF; + fsp->ring_cookie = ef_info->etype_filters[ef_idx].action; + + return 0; +} + static int txgbe_get_ethtool_fdir_entry(struct txgbe_adapter *adapter, struct ethtool_rxnfc *cmd) { @@ -2630,6 +2678,15 @@ static int txgbe_get_ethtool_fdir_entry(struct txgbe_adapter *adapter, struct hlist_node *node; struct txgbe_fdir_filter *rule = NULL; + if (adapter->etype_filter_info.count > 0) { + int ef_idx; + + ef_idx = txgbe_match_etype_entry(adapter, fsp->location); + if (ef_idx < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS) + return txgbe_get_etype_rule(adapter, fsp, ef_idx); + } + + /* report total rule count */ cmd->data = (1024 << adapter->fdir_pballoc) - 2; @@ -2692,9 +2749,10 @@ static int txgbe_get_ethtool_fdir_all(struct txgbe_adapter *adapter, struct ethtool_rxnfc *cmd, u32 *rule_locs) { + struct txgbe_etype_filter_info *ef_info = &adapter->etype_filter_info; struct hlist_node *node; struct txgbe_fdir_filter *rule; - int cnt = 0; + int cnt = 0, i; /* report total rule count */ cmd->data = (1024 << adapter->fdir_pballoc) - 2; @@ -2707,6 +2765,13 @@ static int txgbe_get_ethtool_fdir_all(struct txgbe_adapter *adapter, cnt++; } + for (i = 0; i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS; i++) { + if (ef_info->ethertype_mask & (1 << i)) { + rule_locs[cnt] = ef_info->etype_filters[i].rule_idx; + cnt++; + } + } + cmd->rule_cnt = cnt; return 0; @@ -2766,7 +2831,8 @@ static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, ret = 0; break; case ETHTOOL_GRXCLSRLCNT: - cmd->rule_cnt = adapter->fdir_filter_count; + cmd->rule_cnt = adapter->fdir_filter_count + + adapter->etype_filter_info.count; ret = 0; break; case ETHTOOL_GRXCLSRULE: @@ -2786,6 +2852,160 @@ static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } +static int +txgbe_ethertype_filter_lookup(struct txgbe_etype_filter_info *ef_info, + u16 ethertype) +{ + int i; + + for (i = 0; i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS; i++) { + if (ef_info->etype_filters[i].ethertype == ethertype && + (ef_info->ethertype_mask & (1 << i))) + return i; + } + return -1; +} + +static int +txgbe_ethertype_filter_insert(struct txgbe_etype_filter_info *ef_info, + struct txgbe_ethertype_filter *etype_filter) +{ + int i; + + for (i = 0; i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS; i++) { + if (ef_info->ethertype_mask & (1 << i)) + continue; + + ef_info->ethertype_mask |= 1 << i; + ef_info->etype_filters[i].ethertype = etype_filter->ethertype; + ef_info->etype_filters[i].etqf = etype_filter->etqf; + ef_info->etype_filters[i].etqs = etype_filter->etqs; + ef_info->etype_filters[i].rule_idx = etype_filter->rule_idx; + ef_info->etype_filters[i].action = etype_filter->action; + break; + } + + return (i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS ? i : -1); +} + +static int txgbe_add_ethertype_filter(struct txgbe_adapter *adapter, + struct ethtool_rx_flow_spec *fsp) +{ + struct txgbe_etype_filter_info *ef_info = &adapter->etype_filter_info; + struct txgbe_ethertype_filter etype_filter; + struct txgbe_hw *hw = &adapter->hw; + u16 ethertype; + u32 etqf = 0; + u32 etqs = 0; + u8 queue, vf; + u32 ring; + int ret; + + ethertype = ntohs(fsp->h_u.ether_spec.h_proto); + if (!ethertype) { + e_err(drv, "protocol number is missing for ethertype filter\n"); + return -EINVAL; + } + if (ethertype == ETH_P_IP || ethertype == ETH_P_IPV6) { + e_err(drv, "unsupported ether_type(0x%04x) in ethertype filter\n", + ethertype); + return -EINVAL; + } + + ret = txgbe_ethertype_filter_lookup(ef_info, ethertype); + if (ret >= 0) { + e_err(drv, "ethertype (0x%04x) filter exists.", ethertype); + return -EEXIST; + } + + /* ring_cookie is a masked into a set of queues and txgbe pools */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + e_err(drv, "drop option is unsupported."); + return -EINVAL; + } + + ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + if (!vf && ring >= adapter->num_rx_queues) + return -EINVAL; + else if (vf && ((vf > adapter->num_vfs) || + ring >= adapter->num_rx_queues_per_pool)) + return -EINVAL; + + /* Map the ring onto the absolute queue index */ + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * adapter->num_rx_queues_per_pool) + ring; + + etqs |= queue << TXGBE_RDB_ETYPE_CLS_RX_QUEUE_SHIFT; + etqs |= TXGBE_RDB_ETYPE_CLS_QUEUE_EN; + etqf = TXGBE_PSR_ETYPE_SWC_FILTER_EN | ethertype; + if (adapter->num_vfs) { + u8 pool; + + if (!vf) + pool = adapter->num_vfs; + else + pool = vf - 1; + + etqf |= TXGBE_PSR_ETYPE_SWC_POOL_ENABLE; + etqf |= pool << TXGBE_PSR_ETYPE_SWC_POOL_SHIFT; + } + + etype_filter.ethertype = ethertype; + etype_filter.etqf = etqf; + etype_filter.etqs = etqs; + etype_filter.rule_idx = fsp->location; + etype_filter.action = fsp->ring_cookie; + ret = txgbe_ethertype_filter_insert(ef_info, &etype_filter); + if (ret < 0) { + e_err(drv, "ethertype filters are full."); + return -ENOSPC; + } + + wr32(hw, TXGBE_PSR_ETYPE_SWC(ret), etqf); + wr32(hw, TXGBE_RDB_ETYPE_CLS(ret), etqs); + TXGBE_WRITE_FLUSH(hw); + + ef_info->count++; + + return 0; +} + +static int txgbe_del_ethertype_filter(struct txgbe_adapter *adapter, u16 sw_idx) +{ + struct txgbe_etype_filter_info *ef_info = &adapter->etype_filter_info; + struct txgbe_hw *hw = &adapter->hw; + u16 ethertype; + int idx; + + idx = txgbe_match_etype_entry(adapter, sw_idx); + if (idx == TXGBE_MAX_PSR_ETYPE_SWC_FILTERS) + return -EINVAL; + + ethertype = ef_info->etype_filters[idx].ethertype; + if (!ethertype) { + e_err(drv, "ethertype filter doesn't exist."); + return -ENOENT; + } + + ef_info->ethertype_mask &= ~(1 << idx); + ef_info->etype_filters[idx].ethertype = 0; + ef_info->etype_filters[idx].etqf = 0; + ef_info->etype_filters[idx].etqs = 0; + ef_info->etype_filters[idx].etqs = false; + ef_info->etype_filters[idx].rule_idx = 0; + + wr32(hw, TXGBE_PSR_ETYPE_SWC(idx), 0); + wr32(hw, TXGBE_RDB_ETYPE_CLS(idx), 0); + TXGBE_WRITE_FLUSH(hw); + + ef_info->count--; + + return 0; +} + static int txgbe_update_ethtool_fdir_entry(struct txgbe_adapter *adapter, struct txgbe_fdir_filter *input, u16 sw_idx) @@ -2924,6 +3144,9 @@ static int txgbe_add_ethtool_fdir_entry(struct txgbe_adapter *adapter, int err; u16 ptype = 0; + if ((fsp->flow_type & ~FLOW_EXT) == ETHER_FLOW) + return txgbe_add_ethertype_filter(adapter, fsp); + if (!(adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE)) return -EOPNOTSUPP; @@ -2934,12 +3157,16 @@ static int txgbe_add_ethtool_fdir_entry(struct txgbe_adapter *adapter, queue = TXGBE_RDB_FDIR_DROP_QUEUE; } else { u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); if (ring >= adapter->num_rx_queues) return -EINVAL; /* Map the ring onto the absolute queue index */ - queue = adapter->rx_ring[ring]->reg_idx; + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * adapter->num_rx_queues_per_pool) + ring; } /* Don't allow indexes to exist outside of available space */ @@ -3089,6 +3316,12 @@ static int txgbe_del_ethtool_fdir_entry(struct txgbe_adapter *adapter, (struct ethtool_rx_flow_spec *)&cmd->fs; int err; + if (adapter->etype_filter_info.count > 0) { + err = txgbe_del_ethertype_filter(adapter, fsp->location); + if (!err) + return 0; + } + spin_lock(&adapter->fdir_perfect_lock); err = txgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location); spin_unlock(&adapter->fdir_perfect_lock); @@ -3267,6 +3500,7 @@ static int txgbe_set_rxfh(struct net_device *netdev, const u32 *indir, struct txgbe_adapter *adapter = netdev_priv(netdev); int i; u32 reta_entries = 128; + struct txgbe_hw *hw = &adapter->hw; if (hfunc) return -EINVAL; @@ -3276,6 +3510,11 @@ static int txgbe_set_rxfh(struct net_device *netdev, const u32 *indir, int max_queues = min_t(int, adapter->num_rx_queues, TXGBE_RSS_INDIR_TBL_MAX); + /*Allow at least 2 queues w/ SR-IOV.*/ + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED && + max_queues < 2) + max_queues = 2; + /* Verify user input. */ for (i = 0; i < reta_entries; i++) if (indir[i] >= max_queues) @@ -3283,12 +3522,18 @@ static int txgbe_set_rxfh(struct net_device *netdev, const u32 *indir, for (i = 0; i < reta_entries; i++) adapter->rss_indir_tbl[i] = indir[i]; + + txgbe_store_reta(adapter); } /* Fill out the rss hash key */ - if (key) + if (key) { memcpy(adapter->rss_key, key, txgbe_get_rxfh_key_size(netdev)); + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + wr32(hw, TXGBE_RDB_RSSRK(i), adapter->rss_key[i]); + } txgbe_store_reta(adapter); return 0; @@ -3342,6 +3587,9 @@ static unsigned int txgbe_max_channels(struct txgbe_adapter *adapter) if (!(adapter->flags & TXGBE_FLAG_MSIX_ENABLED)) { /* We only support one q_vector without MSI-X */ max_combined = 1; + } else if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + /* SR-IOV currently only allows one queue on the PF */ + max_combined = adapter->ring_feature[RING_F_RSS].mask + 1; } else if (adapter->atr_sample_rate) { /* support up to 64 queues with ATR */ max_combined = TXGBE_MAX_FDIR_INDICES; @@ -3370,6 +3618,10 @@ static void txgbe_get_channels(struct net_device *dev, /* record RSS queues */ ch->combined_count = adapter->ring_feature[RING_F_RSS].indices; + /* we do not support ATR queueing if SR-IOV is enabled */ + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + return; + /* nothing else to report if RSS is disabled */ if (ch->combined_count == 1) return; @@ -3421,20 +3673,27 @@ static int txgbe_get_module_info(struct net_device *dev, u32 status; u8 sff8472_rev, addr_mode; bool page_swap = false; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + + if (TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask) != 0) + return -EBUSY; + + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + cancel_work_sync(&adapter->sfp_sta_task); /* Check whether we support SFF-8472 or not */ status = TCALL(hw, phy.ops.read_i2c_eeprom, TXGBE_SFF_SFF_8472_COMP, &sff8472_rev); if (status != 0) - return -EIO; + goto ERROR_IO; /* addressing mode is not supported */ status = TCALL(hw, phy.ops.read_i2c_eeprom, TXGBE_SFF_SFF_8472_SWAP, &addr_mode); if (status != 0) - return -EIO; + goto ERROR_IO; if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) { netif_err(adapter, drv, dev, @@ -3442,7 +3701,8 @@ static int txgbe_get_module_info(struct net_device *dev, page_swap = true; } - if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) { + if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap || + !(addr_mode & TXGBE_SFF_DDM_IMPLEMENTED)) { /* We have a SFP, but it does not support SFF-8472 */ modinfo->type = ETH_MODULE_SFF_8079; modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; @@ -3452,7 +3712,12 @@ static int txgbe_get_module_info(struct net_device *dev, modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; } + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + return 0; +ERROR_IO: + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + return -EIO; } static int txgbe_get_module_eeprom(struct net_device *dev, @@ -3464,14 +3729,21 @@ static int txgbe_get_module_eeprom(struct net_device *dev, u32 status = TXGBE_ERR_PHY_ADDR_INVALID; u8 databyte = 0xFF; int i = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + + if (TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask) != 0) + return -EBUSY; + + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + cancel_work_sync(&adapter->sfp_sta_task); if (ee->len == 0) - return -EINVAL; + goto ERROR_INVAL; for (i = ee->offset; i < ee->offset + ee->len; i++) { /* I2C reads can take long time */ if (test_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) - return -EBUSY; + goto ERROR_BUSY; if (i < ETH_MODULE_SFF_8079_LEN) status = TCALL(hw, phy.ops.read_i2c_eeprom, i, @@ -3481,12 +3753,24 @@ static int txgbe_get_module_eeprom(struct net_device *dev, &databyte); if (status != 0) - return -EIO; + goto ERROR_IO; data[i - ee->offset] = databyte; } + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + return 0; + +ERROR_INVAL: + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + return -EINVAL; +ERROR_BUSY: + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + return -EBUSY; +ERROR_IO: + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + return -EIO; } static int txgbe_set_flash(struct net_device *netdev, struct ethtool_flash *ef) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index 98477aaa2551cfa16bfcf5b0a1850756ec4ee4c8..5d519f4ddab0695a5550be9096ccb24219cb163a 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -603,7 +603,7 @@ s32 txgbe_led_off(struct txgbe_hw *hw, u32 index) static s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw) { s32 status = TXGBE_ERR_EEPROM; - u32 timeout = 2000; + u32 timeout = 4000; u32 i; u32 swsm; @@ -3143,6 +3143,7 @@ s32 txgbe_init_ops(struct txgbe_hw *hw) phy->ops.read_i2c_byte = txgbe_read_i2c_byte; phy->ops.read_i2c_sff8472 = txgbe_read_i2c_sff8472; phy->ops.read_i2c_eeprom = txgbe_read_i2c_eeprom; + phy->ops.read_i2c_sfp_phy = txgbe_read_i2c_sfp_phy; phy->ops.identify_sfp = txgbe_identify_module; phy->ops.check_overtemp = txgbe_check_overtemp; phy->ops.identify = txgbe_identify_phy; @@ -5855,6 +5856,22 @@ s32 txgbe_check_mac_link(struct txgbe_hw *hw, u32 *speed, *link_up = false; } + if (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1) { + *link_up = hw->f2c_mod_status; + + if (*link_up) { + /* recover led configure when link up */ + wr32(hw, TXGBE_CFG_LED_CTL, 0); + } else { + /* over write led when link down */ + hw->mac.ops.led_off(hw, TXGBE_LED_LINK_UP | + TXGBE_LED_LINK_10G | + TXGBE_LED_LINK_1G | + TXGBE_LED_LINK_ACTIVE); + } + } + if (*link_up) { if ((links_reg & TXGBE_CFG_PORT_ST_LINK_10G) == TXGBE_CFG_PORT_ST_LINK_10G) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c index 2ab7547a5172c7cfbdc5fedea47840716c4e3052..9643719304419059561fd26d1e72c3a0710554e7 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c @@ -107,7 +107,7 @@ static bool txgbe_set_vmdq_queues(struct txgbe_adapter *adapter) /* 64 pool mode with 2 queues per pool, or * 16/32/64 pool mode with 1 queue per pool */ - if (vmdq_i > 32 || rss_i < 4 || adapter->vf_mode == 63) { + if (vmdq_i > 32) { vmdq_m = TXGBE_VMDQ_2Q_MASK; rss_m = TXGBE_RSS_2Q_MASK; rss_i = min_t(u16, rss_i, 2); @@ -115,7 +115,8 @@ static bool txgbe_set_vmdq_queues(struct txgbe_adapter *adapter) } else { vmdq_m = TXGBE_VMDQ_4Q_MASK; rss_m = TXGBE_RSS_4Q_MASK; - rss_i = 4; + /* We can support 4, 2, or 1 queues */ + rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1; } /* remove the starting offset from the pool count */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index e880d6a46e1e5a3e345ae1bc653f49d497b11f83..de84ffaa14a01987ba6adfbbb2dba6caa5cead66 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -25,6 +25,8 @@ #include "txgbe_sriov.h" char txgbe_driver_name[] = "txgbe"; +#define DRV_VERSION __stringify(1.3.5.1-k) +const char txgbe_driver_version[32] = DRV_VERSION; static const char txgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated." @@ -2136,28 +2138,6 @@ static void txgbe_configure_srrctl(struct txgbe_adapter *adapter, wr32(hw, TXGBE_PX_RR_CFG(reg_idx), srrctl); } -/** - * txgbe_init_rss_key - Initialize adapter RSS key - * @adapter: device handle - * - * Allocates and initializes the RSS key if it is not allocated. - **/ -static inline int txgbe_init_rss_key(struct txgbe_adapter *adapter) -{ - u32 *rss_key; - - if (!adapter->rss_key) { - rss_key = kzalloc(TXGBE_RSS_KEY_SIZE, GFP_KERNEL); - if (unlikely(!rss_key)) - return -ENOMEM; - - netdev_rss_key_fill(rss_key, TXGBE_RSS_KEY_SIZE); - adapter->rss_key = rss_key; - } - - return 0; -} - /** * Write the RETA table to HW * @@ -2186,7 +2166,25 @@ void txgbe_store_reta(struct txgbe_adapter *adapter) } } -static void txgbe_setup_reta(struct txgbe_adapter *adapter) +static void txgbe_store_vfreta(struct txgbe_adapter *adapter) +{ + unsigned int pf_pool = adapter->num_vfs; + u8 *indir_tbl = adapter->rss_indir_tbl; + struct txgbe_hw *hw = &adapter->hw; + u32 reta = 0; + u32 i; + + /* Write redirection table to HW */ + for (i = 0; i < 64; i++) { + reta |= indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + wr32(hw, TXGBE_RDB_VMRSSTBL(i >> 2, pf_pool), reta); + reta = 0; + } + } +} + +void txgbe_setup_reta(struct txgbe_adapter *adapter) { struct txgbe_hw *hw = &adapter->hw; u32 i, j; @@ -2213,6 +2211,27 @@ static void txgbe_setup_reta(struct txgbe_adapter *adapter) txgbe_store_reta(adapter); } +static void txgbe_setup_vfreta(struct txgbe_adapter *adapter) +{ + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + unsigned int pf_pool = adapter->num_vfs; + struct txgbe_hw *hw = &adapter->hw; + u32 i, j; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + wr32(hw, TXGBE_RDB_VMRSSRK(i, pf_pool), *(adapter->rss_key + i)); + + for (i = 0, j = 0; i < 64; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + txgbe_store_vfreta(adapter); +} + static void txgbe_setup_mrqc(struct txgbe_adapter *adapter) { struct txgbe_hw *hw = &adapter->hw; @@ -2235,7 +2254,23 @@ static void txgbe_setup_mrqc(struct txgbe_adapter *adapter) netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); - txgbe_setup_reta(adapter); + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + unsigned int pool = adapter->num_vfs; + u32 vfmrqc; + + /* Setup RSS through the VF registers */ + txgbe_setup_vfreta(adapter); + + vfmrqc = rd32(hw, TXGBE_RDB_PL_CFG(pool)); + vfmrqc &= ~TXGBE_RDB_PL_CFG_RSS_MASK; + vfmrqc |= rss_field | TXGBE_RDB_PL_CFG_RSS_EN; + wr32(hw, TXGBE_RDB_PL_CFG(pool), vfmrqc); + + /* Enable VF RSS mode */ + rss_field |= TXGBE_RDB_RA_CTL_MULTI_RSS; + } else { + txgbe_setup_reta(adapter); + } if (adapter->flags2 & TXGBE_FLAG2_RSS_ENABLED) rss_field |= TXGBE_RDB_RA_CTL_RSS_EN; @@ -2364,6 +2399,8 @@ void txgbe_configure_rx_ring(struct txgbe_adapter *adapter, u64 rdba = ring->dma; u32 rxdctl; u16 reg_idx = ring->reg_idx; + struct net_device *netdev = adapter->netdev; + netdev_features_t features = netdev->features; /* disable queue to avoid issues while updating state */ rxdctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); @@ -2377,6 +2414,12 @@ void txgbe_configure_rx_ring(struct txgbe_adapter *adapter, else rxdctl |= (ring->count / 128) << TXGBE_PX_RR_CFG_RR_SIZE_SHIFT; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + rxdctl |= TXGBE_PX_RR_CFG_VLAN; + else + rxdctl &= ~TXGBE_PX_RR_CFG_VLAN; + rxdctl |= 0x1 << TXGBE_PX_RR_CFG_RR_THER_SHIFT; wr32(hw, TXGBE_PX_RR_CFG(reg_idx), rxdctl); @@ -2509,7 +2552,7 @@ static int txgbe_vlan_rx_add_vid(struct net_device *netdev, int pool_ndx = VMDQ_P(0); /* add VID to filter table */ - if (hw->mac.ops.set_vfta) + if (!vid || !(adapter->flags2 & TXGBE_FLAG2_VLAN_PROMISC)) TCALL(hw, mac.ops.set_vfta, vid, pool_ndx, true); if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) { @@ -2535,7 +2578,8 @@ static int txgbe_vlan_rx_kill_vid(struct net_device *netdev, /* remove VID from filter table */ if (hw->mac.ops.set_vfta) { - TCALL(hw, mac.ops.set_vfta, vid, pool_ndx, false); + if (vid && !(adapter->flags2 & TXGBE_FLAG2_VLAN_PROMISC)) + TCALL(hw, mac.ops.set_vfta, vid, pool_ndx, false); if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) { int i; /* remove vlan id from all pools */ @@ -2551,6 +2595,128 @@ static int txgbe_vlan_rx_kill_vid(struct net_device *netdev, return 0; } +static void txgbe_vlan_promisc_enable(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 vlnctrl, i; + u32 vlvfb; + u32 vind; + u32 bits; + + vlnctrl = rd32(hw, TXGBE_PSR_VLAN_CTL); + + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) { + /* we need to keep the VLAN filter on in SRIOV */ + vlnctrl |= TXGBE_PSR_VLAN_CTL_VFE; + wr32(hw, TXGBE_PSR_VLAN_CTL, vlnctrl); + } else { + vlnctrl &= ~TXGBE_PSR_VLAN_CTL_VFE; + wr32(hw, TXGBE_PSR_VLAN_CTL, vlnctrl); + return; + } + + /* We are already in VLAN promisc, nothing to do */ + if (adapter->flags2 & TXGBE_FLAG2_VLAN_PROMISC) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags2 |= TXGBE_FLAG2_VLAN_PROMISC; + + /* Add PF to all active pools */ + vind = VMDQ_P(0); + for (i = TXGBE_PSR_VLAN_SWC_ENTRIES; --i;) { + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, i); + vlvfb = rd32(hw, TXGBE_PSR_VLAN_SWC_IDX); + + if (vind < 32) { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_L); + bits |= (1 << vind); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_L, + bits); + } else { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + bits |= (1 << (vind - 32)); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_H, + bits); + } + } + + /* Set all bits in the VLAN filter table array */ + for (i = 0; i < hw->mac.vft_size; i++) + wr32(hw, TXGBE_PSR_VLAN_TBL(i), ~0U); +} + +static void txgbe_scrub_vfta(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 i, vid, bits; + u32 vfta; + u32 vind; + u32 vlvf; + + for (i = TXGBE_PSR_VLAN_SWC_ENTRIES; --i;) { + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, i); + vlvf = rd32(hw, TXGBE_PSR_VLAN_SWC_IDX); + + /* pull VLAN ID from VLVF */ + vid = vlvf & ~TXGBE_PSR_VLAN_SWC_VIEN; + + if (vlvf & TXGBE_PSR_VLAN_SWC_VIEN) { + /* if PF is part of this then continue */ + if (test_bit(vid, adapter->active_vlans)) + continue; + } + + /* remove PF from the pool */ + vind = VMDQ_P(0); + if (vind < 32) { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_L); + bits &= ~(1 << vind); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_L, + bits); + } else { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + bits &= ~(1 << (vind - 32)); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_H, + bits); + } + } + + /* extract values from vft_shadow and write back to VFTA */ + for (i = 0; i < hw->mac.vft_size; i++) { + vfta = hw->mac.vft_shadow[i]; + wr32(hw, TXGBE_PSR_VLAN_TBL(i), vfta); + } +} + +static void txgbe_vlan_promisc_disable(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 vlnctrl; + + /* configure vlan filtering */ + vlnctrl = rd32(hw, TXGBE_PSR_VLAN_CTL); + vlnctrl |= TXGBE_PSR_VLAN_CTL_VFE; + wr32(hw, TXGBE_PSR_VLAN_CTL, vlnctrl); + + /* We are not in VLAN promisc, nothing to do */ + if (!(adapter->flags2 & TXGBE_FLAG2_VLAN_PROMISC)) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags2 &= ~TXGBE_FLAG2_VLAN_PROMISC; + + txgbe_scrub_vfta(adapter); +} + /** * txgbe_vlan_strip_disable - helper to disable vlan tag stripping * @adapter: driver data @@ -2858,6 +3024,7 @@ void txgbe_set_rx_mode(struct net_device *netdev) struct txgbe_hw *hw = &adapter->hw; u32 fctrl, vmolr, vlnctrl; int count; + netdev_features_t features = netdev->features; /* Check for Promiscuous and All Multicast modes */ fctrl = rd32m(hw, TXGBE_PSR_CTL, @@ -2876,7 +3043,6 @@ void txgbe_set_rx_mode(struct net_device *netdev) vmolr |= TXGBE_PSR_VM_L2CTL_BAM | TXGBE_PSR_VM_L2CTL_AUPE | TXGBE_PSR_VM_L2CTL_VACC; - vlnctrl |= TXGBE_PSR_VLAN_CTL_VFE; hw->addr_ctrl.user_set_promisc = false; if (netdev->flags & IFF_PROMISC) { @@ -2885,6 +3051,12 @@ void txgbe_set_rx_mode(struct net_device *netdev) /* pf don't want packets routing to vf, so clear UPE */ vmolr |= TXGBE_PSR_VM_L2CTL_MPE; vlnctrl &= ~TXGBE_PSR_VLAN_CTL_VFE; + + if ((adapter->flags & (TXGBE_FLAG_VMDQ_ENABLED | + TXGBE_FLAG_SRIOV_ENABLED))) + vlnctrl |= TXGBE_PSR_VLAN_CTL_VFE; + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + features &= ~NETIF_F_HW_VLAN_STAG_FILTER; } if (netdev->flags & IFF_ALLMULTI) { @@ -2900,6 +3072,8 @@ void txgbe_set_rx_mode(struct net_device *netdev) wr32m(hw, TXGBE_RSC_CTL, TXGBE_RSC_CTL_SAVE_MAC_ERR, TXGBE_RSC_CTL_SAVE_MAC_ERR); + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + features &= ~NETIF_F_HW_VLAN_STAG_FILTER; } else { vmolr |= TXGBE_PSR_VM_L2CTL_ROPE | TXGBE_PSR_VM_L2CTL_ROMPE; } @@ -2924,14 +3098,17 @@ void txgbe_set_rx_mode(struct net_device *netdev) vmolr |= TXGBE_PSR_VM_L2CTL_MPE; } - wr32(hw, TXGBE_PSR_VLAN_CTL, vlnctrl); wr32(hw, TXGBE_PSR_CTL, fctrl); wr32(hw, TXGBE_PSR_VM_L2CTL(VMDQ_P(0)), vmolr); - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) - txgbe_vlan_strip_enable(adapter); + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + txgbe_vlan_promisc_disable(adapter); else - txgbe_vlan_strip_disable(adapter); + txgbe_vlan_promisc_enable(adapter); + if (features & NETIF_F_HW_VLAN_STAG_FILTER) + txgbe_vlan_promisc_disable(adapter); + else + txgbe_vlan_promisc_enable(adapter); } static void txgbe_napi_enable_all(struct txgbe_adapter *adapter) @@ -3080,6 +3257,23 @@ static void txgbe_configure_pb(struct txgbe_adapter *adapter) txgbe_pbthresh_setup(adapter); } +static void txgbe_ethertype_filter_restore(struct txgbe_adapter *adapter) +{ + struct txgbe_etype_filter_info *filter_info = &adapter->etype_filter_info; + struct txgbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < TXGBE_MAX_PSR_ETYPE_SWC_FILTERS; i++) { + if (filter_info->ethertype_mask & (1 << i)) { + wr32(hw, TXGBE_PSR_ETYPE_SWC(i), + filter_info->etype_filters[i].etqf); + wr32(hw, TXGBE_RDB_ETYPE_CLS(i), + filter_info->etype_filters[i].etqs); + TXGBE_WRITE_FLUSH(hw); + } + } +} + static void txgbe_fdir_filter_restore(struct txgbe_adapter *adapter) { struct txgbe_hw *hw = &adapter->hw; @@ -3195,6 +3389,8 @@ static void txgbe_configure_virtualization(struct txgbe_adapter *adapter) wr32(hw, TXGBE_TDM_VF_TE(reg_offset), (~0) << vf_shift); wr32(hw, TXGBE_TDM_VF_TE(reg_offset ^ 1), reg_offset - 1); + adapter->flags2 &= ~TXGBE_FLAG2_VLAN_PROMISC; + if (!(adapter->flags & TXGBE_FLAG_SRIOV_ENABLED)) return; @@ -3245,7 +3441,7 @@ void txgbe_configure_port(struct txgbe_adapter *adapter) u32 i; if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) { - if (adapter->ring_feature[RING_F_RSS].indices == 4) + if (adapter->ring_feature[RING_F_RSS].mask == TXGBE_RSS_4Q_MASK) value = TXGBE_CFG_PORT_CTL_NUM_VT_32; else /* adapter->ring_feature[RING_F_RSS].indices <= 2 */ value = TXGBE_CFG_PORT_CTL_NUM_VT_64; @@ -3287,6 +3483,8 @@ static void txgbe_configure(struct txgbe_adapter *adapter) TCALL(hw, mac.ops.disable_sec_rx_path); + txgbe_ethertype_filter_restore(adapter); + if (adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) { txgbe_init_fdir_signature(&adapter->hw, adapter->fdir_pballoc); @@ -3441,7 +3639,7 @@ static void txgbe_up_complete(struct txgbe_adapter *adapter) */ adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; adapter->link_check_timeout = jiffies; - + hw->f2c_mod_status = false; mod_timer(&adapter->service_timer, jiffies); if (hw->bus.lan_id == 0) { @@ -3708,6 +3906,9 @@ void txgbe_disable_device(struct txgbe_adapter *adapter) del_timer_sync(&adapter->service_timer); + hw->f2c_mod_status = false; + cancel_work_sync(&adapter->sfp_sta_task); + if (hw->bus.lan_id == 0) wr32m(hw, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN0_UP, 0); else if (hw->bus.lan_id == 1) @@ -3781,6 +3982,12 @@ s32 txgbe_init_shared_code(struct txgbe_hw *hw) return status; } +static const u32 def_rss_key[10] = { + 0xE291D73D, 0x1805EC6C, 0x2A94B30D, + 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, + 0x6A3E67EA, 0x14364D17, 0x3BED200D +}; + /** * txgbe_sw_init - Initialize general software structures (struct txgbe_adapter) * @adapter: board private structure to initialize @@ -3829,8 +4036,7 @@ static int txgbe_sw_init(struct txgbe_adapter *adapter) return err; } - if (txgbe_init_rss_key(adapter)) - return -ENOMEM; + memcpy(adapter->rss_key, def_rss_key, sizeof(def_rss_key)); /* Set common capability flags and settings */ rss = min_t(int, TXGBE_MAX_RSS_INDICES, @@ -3843,7 +4049,6 @@ static int txgbe_sw_init(struct txgbe_adapter *adapter) adapter->tx_itr_setting = 1; adapter->atr_sample_rate = 20; - adapter->vf_mode = 63; adapter->flags |= TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE; adapter->flags |= TXGBE_FLAG_VXLAN_OFFLOAD_ENABLE; @@ -5099,6 +5304,53 @@ static void txgbe_sfp_link_config_subtask(struct txgbe_adapter *adapter) clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); } +static void txgbe_sfp_phy_status_work(struct work_struct *work) +{ + struct txgbe_adapter *adapter = container_of(work, + struct txgbe_adapter, + sfp_sta_task); + struct txgbe_hw *hw = &adapter->hw; + u16 data = 0; + bool status = false; + s32 i2c_status; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + + if (TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask) != 0) + return; + + if (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1) { + i2c_status = TCALL(hw, phy.ops.read_i2c_sfp_phy, + 0x0a, + &data); + + if (i2c_status != 0) + goto RELEASE_SEM; + + /* Avoid read module info and read f2c module internal phy + * may cause i2c controller read reg data err + */ + if ((data & 0x83ff) != 0 || data == 0) + goto RELEASE_SEM; + + if ((data & TXGBE_I2C_PHY_LOCAL_RX_STATUS) && + (data & TXGBE_I2C_PHY_REMOTE_RX_STATUS)) + status = true; + else + status = false; + } + +RELEASE_SEM: + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + /* sync sfp status to firmware */ + wr32(hw, TXGBE_TSC_LSEC_PKTNUM0, data | 0x80000000); + + if (hw->f2c_mod_status != status) { + hw->f2c_mod_status = status; + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + } +} + static void txgbe_service_timer(struct timer_list *t) { struct txgbe_adapter *adapter = from_timer(adapter, t, service_timer); @@ -5119,6 +5371,12 @@ static void txgbe_service_timer(struct timer_list *t) mod_timer(&adapter->service_timer, next_event_offset + jiffies); txgbe_service_event_schedule(adapter); + + if (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1) { + next_event_offset = HZ / 10; + queue_work(txgbe_wq, &adapter->sfp_sta_task); + } } static void txgbe_reset_subtask(struct txgbe_adapter *adapter) @@ -6392,16 +6650,10 @@ txgbe_features_check(struct sk_buff *skb, struct net_device *dev, static netdev_features_t txgbe_fix_features(struct net_device *netdev, netdev_features_t features) { - struct txgbe_adapter *adapter = netdev_priv(netdev); - /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ if (!(features & NETIF_F_RXCSUM)) features &= ~NETIF_F_LRO; - /* Turn off LRO if not RSC capable */ - if (!(adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE)) - features &= ~NETIF_F_LRO; - if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) features &= ~NETIF_F_HW_VLAN_STAG_RX; else @@ -6410,6 +6662,11 @@ static netdev_features_t txgbe_fix_features(struct net_device *netdev, netdev_fe features &= ~NETIF_F_HW_VLAN_STAG_TX; else features |= NETIF_F_HW_VLAN_STAG_TX; + + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + features |= NETIF_F_HW_VLAN_STAG_FILTER; + else + features &= ~NETIF_F_HW_VLAN_STAG_FILTER; return features; } @@ -6418,6 +6675,7 @@ static int txgbe_set_features(struct net_device *netdev, netdev_features_t featu { struct txgbe_adapter *adapter = netdev_priv(netdev); bool need_reset = false; + netdev_features_t changed = netdev->features ^ features; /* Make sure RSC matches LRO, reset if change */ if (!(features & NETIF_F_LRO)) { @@ -6474,11 +6732,6 @@ static int txgbe_set_features(struct net_device *netdev, netdev_features_t featu break; } - if (features & NETIF_F_HW_VLAN_CTAG_RX && features & NETIF_F_HW_VLAN_STAG_RX) - txgbe_vlan_strip_enable(adapter); - else - txgbe_vlan_strip_disable(adapter); - if (features & NETIF_F_RXHASH) { if (!(adapter->flags2 & TXGBE_FLAG2_RSS_ENABLED)) { wr32m(&adapter->hw, TXGBE_RDB_RA_CTL, @@ -6493,8 +6746,15 @@ static int txgbe_set_features(struct net_device *netdev, netdev_features_t featu } } + netdev->features = features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + need_reset = true; + if (need_reset) txgbe_do_reset(netdev); + else if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) + txgbe_set_rx_mode(netdev); return 0; } @@ -6580,7 +6840,6 @@ static int txgbe_probe(struct pci_dev *pdev, char *info_string, *i_s_var; u8 part_str[TXGBE_PBANUM_LENGTH]; bool disable_dev = false; - u32 match; err = pci_enable_device_mem(pdev); if (err) @@ -6689,8 +6948,7 @@ static int txgbe_probe(struct pci_dev *pdev, ); } - match = min_t(u32, adapter->vf_mode, TXGBE_MAX_VFS_DRV_LIMIT); - pci_sriov_set_totalvfs(pdev, match); + pci_sriov_set_totalvfs(pdev, TXGBE_MAX_VFS_DRV_LIMIT); txgbe_enable_sriov(adapter); #endif /* CONFIG_PCI_IOV */ @@ -6712,7 +6970,8 @@ static int txgbe_probe(struct pci_dev *pdev, NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_RXALL; + NETIF_F_RXALL | + NETIF_F_NTUPLE; netdev->features |= NETIF_F_NTUPLE; adapter->flags |= TXGBE_FLAG_FDIR_HASH_CAPABLE; @@ -6725,6 +6984,7 @@ static int txgbe_probe(struct pci_dev *pdev, /* set this bit last since it cannot be part of vlan_features */ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; @@ -6757,6 +7017,8 @@ static int txgbe_probe(struct pci_dev *pdev, } txgbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + memset(&adapter->etype_filter_info, 0, + sizeof(struct txgbe_etype_filter_info)); timer_setup(&adapter->service_timer, txgbe_service_timer, 0); @@ -6765,6 +7027,7 @@ static int txgbe_probe(struct pci_dev *pdev, goto err_free_mac_table; } INIT_WORK(&adapter->service_task, txgbe_service_task); + INIT_WORK(&adapter->sfp_sta_task, txgbe_sfp_phy_status_work); set_bit(__TXGBE_SERVICE_INITED, &adapter->state); clear_bit(__TXGBE_SERVICE_SCHED, &adapter->state); @@ -7083,3 +7346,4 @@ MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl); MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, "); MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Network Driver"); MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.h index 47ff20284eda707eda4460770e742a7716cfbbb3..24ac69d945022c59bdae1a10a419ea7dda659c06 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.h @@ -77,6 +77,7 @@ enum txgbe_pfvf_api_rev { #define TXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ #define TXGBE_VF_UPDATE_XCAST_MODE 0x0c #define TXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */ +#define TXGBE_VF_GET_FW_VERSION 0x11 /* get fw version */ #define TXGBE_VF_BACKUP 0x8001 /* VF requests backup */ /* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 2ca537ef1a28d9c9a55783e637048c867773a0b2..6d829bcac03390855cf6ebf643ddebef23c8cb43 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -63,6 +63,10 @@ s32 txgbe_identify_sfp_module(struct txgbe_hw *hw) u8 oui_bytes[3] = {0, 0, 0}; u8 cable_tech = 0; u8 cable_spec = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + + if (TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask) != 0) + return TXGBE_ERR_SWFW_SYNC; if (TCALL(hw, mac.ops.get_media_type) != txgbe_media_type_fiber) { hw->phy.sfp_type = txgbe_sfp_type_not_present; @@ -267,9 +271,13 @@ s32 txgbe_identify_sfp_module(struct txgbe_hw *hw) } out: + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + return status; err_read_i2c_eeprom: + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + hw->phy.sfp_type = txgbe_sfp_type_not_present; if (hw->phy.type != txgbe_phy_nl) hw->phy.type = txgbe_phy_unknown; @@ -288,15 +296,41 @@ s32 txgbe_init_i2c(struct txgbe_hw *hw) TXGBE_I2C_CON_SLAVE_DISABLE)); /* Default addr is 0xA0 ,bit 0 is configure for read/write! */ wr32(hw, TXGBE_I2C_TAR, TXGBE_I2C_SLAVE_ADDR); + wr32(hw, TXGBE_I2C_SS_SCL_HCNT, 780); + wr32(hw, TXGBE_I2C_SS_SCL_LCNT, 780); + wr32(hw, TXGBE_I2C_RX_TL, 0); /* 1byte for rx full signal */ + wr32(hw, TXGBE_I2C_TX_TL, 4); + wr32(hw, TXGBE_I2C_SCL_STUCK_TIMEOUT, 0xFFFFFF); + wr32(hw, TXGBE_I2C_SDA_STUCK_TIMEOUT, 0xFFFFFF); + + wr32(hw, TXGBE_I2C_INTR_MASK, 0); + wr32(hw, TXGBE_I2C_ENABLE, 1); + return 0; +} + +static s32 txgbe_init_i2c_sfp_phy(struct txgbe_hw *hw) +{ + wr32(hw, TXGBE_I2C_ENABLE, 0); + + wr32(hw, TXGBE_I2C_CON, + (TXGBE_I2C_CON_MASTER_MODE | + TXGBE_I2C_CON_SPEED(1) | + TXGBE_I2C_CON_RESTART_EN | + TXGBE_I2C_CON_SLAVE_DISABLE)); + /* Default addr is 0xA0 ,bit 0 is configure for read/write! */ + wr32(hw, TXGBE_I2C_TAR, TXGBE_I2C_SLAVE_ADDR); wr32(hw, TXGBE_I2C_SS_SCL_HCNT, 600); wr32(hw, TXGBE_I2C_SS_SCL_LCNT, 600); - wr32(hw, TXGBE_I2C_RX_TL, 0); /* 1byte for rx full signal */ + + wr32(hw, TXGBE_I2C_RX_TL, 1); /* 2bytes for rx full signal */ wr32(hw, TXGBE_I2C_TX_TL, 4); + wr32(hw, TXGBE_I2C_SCL_STUCK_TIMEOUT, 0xFFFFFF); wr32(hw, TXGBE_I2C_SDA_STUCK_TIMEOUT, 0xFFFFFF); wr32(hw, TXGBE_I2C_INTR_MASK, 0); wr32(hw, TXGBE_I2C_ENABLE, 1); + return 0; } @@ -311,6 +345,7 @@ s32 txgbe_init_i2c(struct txgbe_hw *hw) s32 txgbe_read_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset, u8 *eeprom_data) { + txgbe_init_i2c(hw); return TCALL(hw, phy.ops.read_i2c_byte, byte_offset, TXGBE_I2C_EEPROM_DEV_ADDR, eeprom_data); @@ -327,6 +362,7 @@ s32 txgbe_read_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset, s32 txgbe_read_i2c_sff8472(struct txgbe_hw *hw, u8 byte_offset, u8 *sff8472_data) { + txgbe_init_i2c(hw); return TCALL(hw, phy.ops.read_i2c_byte, byte_offset, TXGBE_I2C_EEPROM_DEV_ADDR2, sff8472_data); @@ -347,10 +383,6 @@ static s32 txgbe_read_i2c_byte_int(struct txgbe_hw *hw, u8 byte_offset, u8 __maybe_unused dev_addr, u8 *data, bool lock) { s32 status = 0; - u32 swfw_mask = hw->phy.phy_semaphore_mask; - - if (lock && 0 != TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask)) - return TXGBE_ERR_SWFW_SYNC; /* wait tx empty */ status = txgbe_po32m(hw, TXGBE_I2C_RAW_INTR_STAT, @@ -376,8 +408,6 @@ static s32 txgbe_read_i2c_byte_int(struct txgbe_hw *hw, u8 byte_offset, *data = 0xFF & rd32(hw, TXGBE_I2C_DATA_CMD); out: - if (lock) - TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); return status; } @@ -414,6 +444,83 @@ s32 txgbe_read_i2c_byte(struct txgbe_hw *hw, u8 byte_offset, data, true); } +/** + * txgbe_read_i2c_word_int - Reads 16 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * @lock: true if to take and release semaphore + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 txgbe_read_i2c_word_int(struct txgbe_hw *hw, u16 byte_offset, + u8 __always_unused dev_addr, u16 *data, bool __always_unused lock) +{ + s32 status = 0; + + if (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1) { + /* reg offset format 0x000yyyyy */ + byte_offset &= 0x1f; + + /* wait tx empty */ + status = txgbe_po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_TX_EMPTY, TXGBE_I2C_INTR_STAT_TX_EMPTY, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + /* write reg_offset */ + wr32(hw, TXGBE_I2C_DATA_CMD, (u8)byte_offset | TXGBE_I2C_DATA_CMD_STOP); + + usec_delay(TXGBE_I2C_TIMEOUT); + /* wait tx empty */ + status = txgbe_po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_TX_EMPTY, TXGBE_I2C_INTR_STAT_TX_EMPTY, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + /* read data */ + wr32(hw, TXGBE_I2C_DATA_CMD, TXGBE_I2C_DATA_CMD_READ); + wr32(hw, TXGBE_I2C_DATA_CMD, TXGBE_I2C_DATA_CMD_READ | TXGBE_I2C_DATA_CMD_STOP); + + /* wait for read complete */ + status = txgbe_po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_RX_FULL, TXGBE_I2C_INTR_STAT_RX_FULL, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + *data = 0xFF & rd32(hw, TXGBE_I2C_DATA_CMD); + *data <<= 8; + *data += 0xFF & rd32(hw, TXGBE_I2C_DATA_CMD); + } + +out: + return status; +} + +s32 txgbe_read_i2c_word(struct txgbe_hw *hw, u16 byte_offset, + u8 dev_addr, u16 *data) +{ + txgbe_switch_i2c_slave_addr(hw, dev_addr); + + return txgbe_read_i2c_word_int(hw, byte_offset, dev_addr, + data, true); +} + +s32 txgbe_read_i2c_sfp_phy(struct txgbe_hw *hw, u16 byte_offset, + u16 *data) +{ + txgbe_init_i2c_sfp_phy(hw); + + return txgbe_read_i2c_word(hw, byte_offset, + TXGBE_I2C_EEPROM_DEV_ADDR3, + data); +} + /** * txgbe_tn_check_overtemp - Checks if an overtemp occurred. * @hw: pointer to hardware structure diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h index b119e2c0c1030d0d97af607a79620f011c5d62c5..bfd9fd1bbddd544dec0db3e53c13f5a6294fb062 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h @@ -9,6 +9,12 @@ #define TXGBE_I2C_EEPROM_DEV_ADDR 0xA0 #define TXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 +/*fiber to copper module inter reg i2c addr */ +#define TXGBE_I2C_EEPROM_DEV_ADDR3 0xAC +#define TXGBE_I2C_10G_SFP_LINK_STATUS BIT(10) +#define TXGBE_I2C_PHY_LOCAL_RX_STATUS BIT(12) +#define TXGBE_I2C_PHY_REMOTE_RX_STATUS BIT(13) + /* EEPROM byte offsets */ #define TXGBE_SFF_IDENTIFIER 0x0 #define TXGBE_SFF_IDENTIFIER_SFP 0x3 @@ -19,6 +25,7 @@ #define TXGBE_SFF_10GBE_COMP_CODES 0x3 #define TXGBE_SFF_CABLE_TECHNOLOGY 0x8 #define TXGBE_SFF_CABLE_SPEC_COMP 0x3C +#define TXGBE_SFF_DDM_IMPLEMENTED 0x40 #define TXGBE_SFF_SFF_8472_SWAP 0x5C #define TXGBE_SFF_SFF_8472_COMP 0x5E @@ -60,5 +67,9 @@ s32 txgbe_read_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); s32 txgbe_read_i2c_sff8472(struct txgbe_hw *hw, u8 byte_offset, u8 *sff8472_data); +s32 txgbe_read_i2c_word(struct txgbe_hw *hw, u16 byte_offset, + u8 dev_addr, u16 *data); +s32 txgbe_read_i2c_sfp_phy(struct txgbe_hw *hw, u16 byte_offset, + u16 *data); #endif /* _TXGBE_PHY_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c index 053fa8bab40f8b9936b2d6bde06c80a47502ba2c..ea11948a3806426a23706c370e3fbc9eddfd995a 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c @@ -213,15 +213,14 @@ void txgbe_enable_sriov(struct txgbe_adapter *adapter) "Virtual Functions already enabled for this device\n"); } else { int err; - int match; /* The sapphire supports up to 64 VFs per physical function * but this implementation limits allocation to 63 so that * basic networking resources are still available to the * physical function. If the user requests greater thn * 63 VFs then it is an error - reset to default of zero. */ - match = min_t(unsigned int, adapter->vf_mode, TXGBE_MAX_VFS_DRV_LIMIT); - adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, match); + adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, + TXGBE_MAX_VFS_DRV_LIMIT); err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); if (err) { @@ -602,12 +601,7 @@ static int txgbe_get_vf_queues(struct txgbe_adapter *adapter, msgbuf[TXGBE_VF_TRANS_VLAN] = 0; /* notify VF of default queue */ - if (adapter->vf_mode == 63) - msgbuf[TXGBE_VF_DEF_QUEUE] = default_tc; - else if (adapter->vf_mode == 31) - msgbuf[TXGBE_VF_DEF_QUEUE] = 4; - else - msgbuf[TXGBE_VF_DEF_QUEUE] = default_tc; + msgbuf[TXGBE_VF_DEF_QUEUE] = default_tc; return 0; } @@ -809,7 +803,7 @@ static int txgbe_set_vf_mac_addr(struct txgbe_adapter *adapter, return -1; } - if (adapter->vfinfo[vf].pf_set_mac && + if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac, ETH_ALEN)) { e_warn(drv, @@ -911,7 +905,9 @@ static int txgbe_set_vf_vlan_msg(struct txgbe_adapter *adapter, * is cleared if the PF only added itself to the pool * because the PF is in promiscuous mode. */ - if ((vlvf & VLAN_VID_MASK) == vid && !bits) + if ((vlvf & VLAN_VID_MASK) == vid && + !test_bit(vid, adapter->active_vlans) && + !bits) txgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); } @@ -928,7 +924,7 @@ static int txgbe_set_vf_macvlan_msg(struct txgbe_adapter *adapter, TXGBE_VT_MSGINFO_SHIFT; int err; - if (adapter->vfinfo[vf].pf_set_mac && index > 0) { + if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && index > 0) { e_warn(drv, "VF %d requested MACVLAN filter but is administratively denied\n", vf); @@ -978,6 +974,12 @@ static int txgbe_update_vf_xcast_mode(struct txgbe_adapter *adapter, default: return -EOPNOTSUPP; } + + if (xcast_mode > TXGBEVF_XCAST_MODE_MULTI && + !adapter->vfinfo[vf].trusted) { + xcast_mode = TXGBEVF_XCAST_MODE_MULTI; + } + if (adapter->vfinfo[vf].xcast_mode == xcast_mode) goto out; @@ -1036,6 +1038,31 @@ static int txgbe_get_vf_link_state(struct txgbe_adapter *adapter, return 0; } +static int txgbe_get_fw_version(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + unsigned long *fw_version = (unsigned long *)&msgbuf[1]; + int ret; + + /* verify the PF is supporting the correct API */ + switch (adapter->vfinfo[vf].vf_api) { + case txgbe_mbox_api_12: + case txgbe_mbox_api_13: + break; + default: + return -EOPNOTSUPP; + } + + ret = kstrtoul(adapter->eeprom_id, 16, fw_version); + if (ret < 0) + return ret; + + if (*fw_version == 0) + return -EOPNOTSUPP; + + return 0; +} + static int txgbe_rcv_msg_from_vf(struct txgbe_adapter *adapter, u16 vf) { u16 mbx_size = TXGBE_VXMAILBOX_SIZE; @@ -1102,6 +1129,9 @@ static int txgbe_rcv_msg_from_vf(struct txgbe_adapter *adapter, u16 vf) case TXGBE_VF_GET_LINK_STATE: retval = txgbe_get_vf_link_state(adapter, msgbuf, vf); break; + case TXGBE_VF_GET_FW_VERSION: + retval = txgbe_get_fw_version(adapter, msgbuf, vf); + break; case TXGBE_VF_BACKUP: #ifdef CONFIG_PCI_IOV retval = txgbe_vf_backup(adapter, vf); @@ -1268,13 +1298,14 @@ static int txgbe_pci_sriov_enable(struct pci_dev __maybe_unused *dev, for (i = 0; i < adapter->num_vfs; i++) txgbe_vf_configuration(dev, (i | 0x10000000)); + txgbe_sriov_reinit(adapter); + err = pci_enable_sriov(dev, num_vfs); if (err) { e_dev_warn("Failed to enable PCI sriov: %d\n", err); goto err_out; } txgbe_get_vfs(adapter); - txgbe_sriov_reinit(adapter); out: return num_vfs; @@ -1317,25 +1348,47 @@ int txgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) s32 retval = 0; struct txgbe_adapter *adapter = netdev_priv(netdev); - if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs) + if (vf < 0 || vf >= adapter->num_vfs) return -EINVAL; - dev_info(pci_dev_to_dev(adapter->pdev), - "setting MAC %pM on VF %d\n", mac, vf); - dev_info(pci_dev_to_dev(adapter->pdev), - "Reload the VF driver to make this change effective.\n"); - retval = txgbe_set_vf_mac(adapter, vf, mac); - if (retval >= 0) { - adapter->vfinfo[vf].pf_set_mac = true; - if (test_bit(__TXGBE_DOWN, &adapter->state)) { - dev_warn(pci_dev_to_dev(adapter->pdev), - "The VF MAC address has been set, but the PF device is not up.\n"); + if (is_valid_ether_addr(mac)) { + dev_info(pci_dev_to_dev(adapter->pdev), + "setting MAC %pM on VF %d\n", mac, vf); + dev_info(pci_dev_to_dev(adapter->pdev), + "Reload the VF driver to make this change effective.\n"); + retval = txgbe_set_vf_mac(adapter, vf, mac); + if (retval >= 0) { + adapter->vfinfo[vf].pf_set_mac = true; + if (test_bit(__TXGBE_DOWN, &adapter->state)) { + dev_warn(pci_dev_to_dev(adapter->pdev), + "The VF MAC address has been set, but the PF device is not up.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), + "Bring the PF device up before attempting to use the VF device.\n"); + } + } else { dev_warn(pci_dev_to_dev(adapter->pdev), - "Bring the PF device up before attempting to use the VF device.\n"); + "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n"); + } + } else if (is_zero_ether_addr(mac)) { + unsigned char *vf_mac_addr = + adapter->vfinfo[vf].vf_mac_addresses; + + /* nothing to do */ + if (is_zero_ether_addr(vf_mac_addr)) + return 0; + + dev_info(pci_dev_to_dev(adapter->pdev), "removing MAC on VF %d\n", + vf); + + retval = txgbe_del_mac_filter(adapter, vf_mac_addr, vf); + if (retval >= 0) { + adapter->vfinfo[vf].pf_set_mac = false; + memcpy(vf_mac_addr, mac, ETH_ALEN); + } else { + dev_warn(pci_dev_to_dev(adapter->pdev), "Could NOT remove the VF MAC address.\n"); } } else { - dev_warn(pci_dev_to_dev(adapter->pdev), - "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n"); + retval = -EINVAL; } return retval; @@ -1404,7 +1457,7 @@ int txgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, if (vf >= adapter->num_vfs || vlan > VLAN_VID_MASK - 1 || qos > 7) return -EINVAL; - if (vlan_proto != htons(ETH_P_8021Q)) + if (vlan_proto != htons(ETH_P_8021Q) && vlan_proto != htons(ETH_P_8021AD)) return -EPROTONOSUPPORT; if (vlan || qos) { diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 4a4d779dbc6163ca18d3180404927c280582c81e..5e1f9891d9bed383706cdc414656ef112d56e443 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -648,6 +648,9 @@ struct txgbe_thermal_sensor_data { #define TXGBE_RDB_PL_CFG_TUN_TUNHDR 0x10 #define TXGBE_RDB_PL_CFG_RSS_PL_MASK 0x7 #define TXGBE_RDB_PL_CFG_RSS_PL_SHIFT 29 +#define TXGBE_RDB_PL_CFG_RSS_EN 0x1000000 +#define TXGBE_RDB_PL_CFG_RSS_MASK 0xFF0000 + /* RQTC Bit Masks and Shifts */ #define TXGBE_RDB_RSS_TC_SHIFT_TC(_i) ((_i) * 4) #define TXGBE_RDB_RSS_TC_TC0_MASK (0x7 << 0) @@ -717,6 +720,7 @@ enum { #define TXGBE_RDB_PB_CTL_DISABLED 0x1 #define TXGBE_RDB_RA_CTL_RSS_EN 0x00000004U /* RSS Enable */ +#define TXGBE_RDB_RA_CTL_MULTI_RSS 0x00000001U /* VF RSS Hash Rule Enable */ #define TXGBE_RDB_RA_CTL_RSS_MASK 0xFFFF0000U #define TXGBE_RDB_RA_CTL_RSS_IPV4_TCP 0x00010000U #define TXGBE_RDB_RA_CTL_RSS_IPV4 0x00020000U @@ -846,7 +850,7 @@ enum txgbe_fdir_pballoc_type { /* etype switcher 1st stage */ #define TXGBE_PSR_ETYPE_SWC(_i) (0x15128 + ((_i) * 4)) /* EType Queue Filter */ /* ETYPE Queue Filter/Select Bit Masks */ -#define TXGBE_MAX_PSR_ETYPE_SWC_FILTERS 8 +#define TXGBE_MAX_PSR_ETYPE_SWC_FILTERS 2 #define TXGBE_PSR_ETYPE_SWC_FCOE 0x08000000U /* bit 27 */ #define TXGBE_PSR_ETYPE_SWC_TX_ANTISPOOF 0x20000000U /* bit 29 */ #define TXGBE_PSR_ETYPE_SWC_1588 0x40000000U /* bit 30 */ @@ -1892,8 +1896,8 @@ enum txgbe_l2_ptypes { #define TXGBE_TXD_TUNNEL_GRE (0x1ULL << TXGBE_TXD_TUNNEL_TYPE_SHIFT) /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define TXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 -#define TXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define TXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 128 +#define TXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 128 /* Transmit Descriptor */ union txgbe_tx_desc { @@ -2027,6 +2031,22 @@ union txgbe_atr_hash_dword { __be32 dword; }; +struct txgbe_ethertype_filter { + u16 rule_idx; + u64 action; + u16 ethertype; + u32 etqf; + u32 etqs; +}; + +/* Structure to store ethertype filters' info. */ +struct txgbe_etype_filter_info { + int count; + u8 ethertype_mask; /* Bit mask for every used ethertype filter */ + /* store used ethertype filters */ + struct txgbe_ethertype_filter etype_filters[TXGBE_MAX_PSR_ETYPE_SWC_FILTERS]; +}; + /****************** Manageablility Host Interface defines ********************/ #define TXGBE_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */ #define TXGBE_HI_MAX_BLOCK_DWORD_LENGTH 64 /* Num of dwords in range */ @@ -2557,6 +2577,7 @@ struct txgbe_phy_operations { u8 *sff8472_data); s32 (*read_i2c_eeprom)(struct txgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); + s32 (*read_i2c_sfp_phy)(struct txgbe_hw *hw, u16 byte_offset, u16 *data); s32 (*check_overtemp)(struct txgbe_hw *hw); }; @@ -2678,6 +2699,7 @@ struct txgbe_hw { u16 tpid[8]; u16 oem_ssid; u16 oem_svid; + bool f2c_mod_status; }; struct txgbe_hic_write_lldp { diff --git a/drivers/net/ethernet/wangxun/txgbevf/Makefile b/drivers/net/ethernet/wangxun/txgbevf/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..d7216e1f979fb85719746f5d6306108d0844e42b --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbevf/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. +# +# Makefile for the Wangxun(R) 10GbE PCI Express ethernet driver +# virtual functions. +# + +obj-$(CONFIG_TXGBEVF) += txgbevf.o + +txgbevf-objs := txgbevf_main.o txgbe_vf.o \ + txgbe_mbx.o txgbe_txrx.o \ + txgbe_ethtool.o \ No newline at end of file diff --git a/drivers/net/ethernet/wangxun/txgbevf/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbevf/txgbe_ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..d6b3e80b95e24e153de827773a27453ffc67b711 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbevf/txgbe_ethtool.c @@ -0,0 +1,1240 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */ + +#include +#include +#include +#include +#include +#include +#include +#include "txgbe_vf.h" +#include "txgbe_mbx.h" + +static const char txgbe_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", + "Link test (on/offline)" +}; + +#define TXGBE_TEST_LEN (sizeof(txgbe_gstrings_test) / ETH_GSTRING_LEN) + +struct txgbe_stat { + char name[ETH_GSTRING_LEN]; + int size; + int offset; +}; + +struct txgbe_queue_stats { + u64 packets; + u64 bytes; +}; + +#define TXGBE_STAT(_name, _stru, _stat) { \ + .name = _name, \ + .size = sizeof_field(_stru, _stat), \ + .offset = offsetof(_stru, _stat), \ +} + +#define TXGBE_NET_STAT(_stat) \ + TXGBE_STAT("net."#_stat, txgbe_net_stats_t, _stat) +static const struct txgbe_stat txgbe_net_stats[] = { +// TXGBE_NET_STAT(rx_packets), +// TXGBE_NET_STAT(tx_packets), +// TXGBE_NET_STAT(rx_bytes), +// TXGBE_NET_STAT(tx_bytes), +// TXGBE_NET_STAT(multicast), +}; + +#define TXGBE_NET_STATS_LEN ARRAY_SIZE(txgbe_net_stats) + +#define TXGBE_SW_STAT(_name, _stat) \ + TXGBE_STAT("sw." _name, struct txgbe_adapter, _stat) +static struct txgbe_stat txgbe_sw_stats[] = { + TXGBE_SW_STAT("tx_busy", sw_stats.tx_busy), + TXGBE_SW_STAT("tx_restart_queue", sw_stats.tx_restart_queue), + TXGBE_SW_STAT("tx_timeout_count", sw_stats.tx_timeout_count), + + TXGBE_SW_STAT("rx_csum_bad", sw_stats.rx_csum_bad), + TXGBE_SW_STAT("rx_no_dma_resources", sw_stats.rx_no_dma_resources), + TXGBE_SW_STAT("rx_alloc_page_failed", sw_stats.rx_alloc_page_failed), + TXGBE_SW_STAT("rx_alloc_buff_failed", sw_stats.rx_alloc_buff_failed), +}; + +#define TXGBE_SW_STATS_LEN ARRAY_SIZE(txgbe_sw_stats) + +#define TXGBE_HW_STAT(_name, _stat) \ + TXGBE_STAT("hw." _name, struct txgbe_adapter, _stat) + +static struct txgbe_stat txgbe_hw_stats[] = { + TXGBE_HW_STAT("rx_packets", stats.gprc), + TXGBE_HW_STAT("tx_packets", stats.gptc), + TXGBE_HW_STAT("rx_bytes", stats.gorc), + TXGBE_HW_STAT("tx_bytes", stats.gotc), + TXGBE_HW_STAT("multicast", stats.mprc), + TXGBE_HW_STAT("last_gprc", last_stats.gprc), + TXGBE_HW_STAT("last_gptc", last_stats.gptc), + TXGBE_HW_STAT("last_gorc", last_stats.gorc), + TXGBE_HW_STAT("last_gotc", last_stats.gotc), + TXGBE_HW_STAT("last_mprc", last_stats.mprc), + TXGBE_HW_STAT("base_gprc", base_stats.gprc), + TXGBE_HW_STAT("base_gptc", base_stats.gptc), + TXGBE_HW_STAT("base_gorc", base_stats.gorc), + TXGBE_HW_STAT("base_gotc", base_stats.gotc), + TXGBE_HW_STAT("base_mprc", base_stats.mprc), + TXGBE_HW_STAT("reset_gprc", reset_stats.gprc), + TXGBE_HW_STAT("reset_gptc", reset_stats.gptc), + TXGBE_HW_STAT("reset_gorc", reset_stats.gorc), + TXGBE_HW_STAT("reset_gotc", reset_stats.gotc), + TXGBE_HW_STAT("reset_mprc", reset_stats.mprc), +}; + +#define TXGBE_HW_STATS_LEN ARRAY_SIZE(txgbe_hw_stats) + +#define TXGBE_QUEUE_STATS_LEN \ + ((((struct txgbe_adapter *)netdev_priv(netdev))->num_tx_queues \ + + ((struct txgbe_adapter *)netdev_priv(netdev))->num_rx_queues) \ + * (sizeof(struct txgbe_queue_stats) / sizeof(u64))) + +#define TXGBE_STATS_LEN (TXGBE_NET_STATS_LEN \ + + TXGBE_SW_STATS_LEN \ + + TXGBE_HW_STATS_LEN \ + + TXGBE_QUEUE_STATS_LEN) + +static int txgbevf_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 link_speed = 0; + bool link_up = false; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseT_Full); + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.port = -1; + + if (!in_interrupt()) { + hw->mac.get_link_status = 1; + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + } else { + /* this case is a special workaround for RHEL5 bonding + * that calls this routine from interrupt context + */ + link_speed = adapter->link_speed; + link_up = adapter->link_up; + } + + if (link_up) { + __u32 speed = SPEED_10000; + + switch (link_speed) { + case TXGBE_LINK_SPEED_10GB_FULL: + speed = SPEED_10000; + break; + case TXGBE_LINK_SPEED_1GB_FULL: + speed = SPEED_1000; + break; + case TXGBE_LINK_SPEED_100_FULL: + speed = SPEED_100; + break; + } + + cmd->base.speed = speed; + cmd->base.duplex = DUPLEX_FULL; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + + return 0; +} + +static void txgbe_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, txgbevf_driver_name, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, txgbevf_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, txgbe_firmware_version, + sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); +} + +static int txgbevf_set_link_ksettings(struct net_device __always_unused *netdev, + const struct ethtool_link_ksettings __always_unused *cmd) +{ + return -EINVAL; +} + +#define TXGBE_REGS_LEN 45 +static int txgbe_get_regs_len(struct net_device __always_unused *netdev) +{ + return TXGBE_REGS_LEN * sizeof(u32); +} + +#define TXGBE_GET_STAT(_A_, _R_) ((_A_)->stats.(_R_)) + +static void txgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, TXGBE_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; + + /* TXGBE_VFCTRL is a Write Only register, so just return 0 */ + regs_buff[0] = 0x0; + + /* General Registers */ + regs_buff[1] = rd32(hw, TXGBE_VXSTATUS); + regs_buff[3] = rd32(hw, TXGBE_VXRXMEMWRAP); + + /* Interrupt */ + regs_buff[5] = rd32(hw, TXGBE_VXICR); + regs_buff[6] = rd32(hw, TXGBE_VXICS); + regs_buff[7] = rd32(hw, TXGBE_VXIMS); + regs_buff[8] = rd32(hw, TXGBE_VXIMC); + regs_buff[11] = rd32(hw, TXGBE_VXITR(0)); + regs_buff[12] = rd32(hw, TXGBE_VXIVAR(0)); + regs_buff[13] = rd32(hw, TXGBE_VXIVAR_MISC); + + /* Receive DMA */ + for (i = 0; i < 2; i++) + regs_buff[14 + i] = rd32(hw, TXGBE_VXRDBAL(i)); + for (i = 0; i < 2; i++) + regs_buff[16 + i] = rd32(hw, TXGBE_VXRDBAH(i)); + for (i = 0; i < 2; i++) + regs_buff[20 + i] = rd32(hw, TXGBE_VXRDH(i)); + for (i = 0; i < 2; i++) + regs_buff[22 + i] = rd32(hw, TXGBE_VXRDT(i)); + for (i = 0; i < 2; i++) + regs_buff[24 + i] = rd32(hw, TXGBE_VXRXDCTL(i)); + + /* Receive */ + regs_buff[28] = rd32(hw, TXGBE_VXMRQC); + + /* Transmit */ + for (i = 0; i < 2; i++) + regs_buff[29 + i] = rd32(hw, TXGBE_VXTDBAL(i)); + for (i = 0; i < 2; i++) + regs_buff[31 + i] = rd32(hw, TXGBE_VXTDBAH(i)); + for (i = 0; i < 2; i++) + regs_buff[35 + i] = rd32(hw, TXGBE_VXTDH(i)); + for (i = 0; i < 2; i++) + regs_buff[37 + i] = rd32(hw, TXGBE_VXTDT(i)); + for (i = 0; i < 2; i++) + regs_buff[39 + i] = rd32(hw, TXGBE_VXTXDCTL(i)); +} + +static int txgbe_nway_reset(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + txgbevf_reinit_locked(adapter); + + return 0; +} + +static int txgbe_get_eeprom(struct net_device __always_unused *netdev, + struct ethtool_eeprom __always_unused *eeprom, + u8 __always_unused *bytes) +{ + return -EOPNOTSUPP; +} + +static int txgbe_set_eeprom(struct net_device __always_unused *netdev, + struct ethtool_eeprom __always_unused *eeprom, + u8 __always_unused *bytes) +{ + return -EOPNOTSUPP; +} + +static u32 txgbe_get_rxfh_indir_size(struct net_device *netdev) +{ + return TXGBE_VFRETA_SIZE; +} + +static u32 txgbe_get_rxfh_key_size(struct net_device *netdev) +{ + return TXGBE_RSS_HASH_KEY_SIZE; +} + +static int txgbevf_get_reta_locked(struct txgbe_hw *hw, u32 *reta, + int num_rx_queues) +{ + int err, i, j; + u32 msgbuf[TXGBE_VXMAILBOX_SIZE]; + u32 *hw_reta = &msgbuf[1]; + u32 mask = 0; + + /* We have to use a mailbox for 82599 and x540 devices only. + * For these devices RETA has 128 entries. + * Also these VFs support up to 4 RSS queues. Therefore PF will compress + * 16 RETA entries in each DWORD giving 2 bits to each entry. + */ + int dwords = 128 / 16; + + /* We support the RSS querying for 82599 and x540 devices only. + * Thus return an error if API doesn't support RETA querying or querying + * is not supported for this device type. + */ + switch (hw->api_version) { + case txgbe_mbox_api_13: + case txgbe_mbox_api_12: + break; + default: + return -EOPNOTSUPP; + } + + msgbuf[0] = TXGBE_VF_GET_RETA; + + err = hw->mbx.ops.write_posted(hw, msgbuf, 1, 0); + + if (err) + return err; + + err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1, 0); + + if (err) + return err; + + msgbuf[0] &= ~TXGBE_VT_MSGTYPE_CTS; + + /* If the operation has been refused by a PF return -EPERM */ + if (msgbuf[0] == (TXGBE_VF_GET_RETA | TXGBE_VT_MSGTYPE_NACK)) + return -EPERM; + + /* If we didn't get an ACK there must have been + * some sort of mailbox error so we should treat it + * as such. + */ + if (msgbuf[0] != (TXGBE_VF_GET_RETA | TXGBE_VT_MSGTYPE_ACK)) + return TXGBE_ERR_MBX; + + /* ixgbevf doesn't support more than 2 queues at the moment */ + if (num_rx_queues > 1) + mask = 0x1; + + for (i = 0; i < dwords; i++) + for (j = 0; j < 16; j++) + reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask; + + return 0; +} + +/** + * txgbevf_get_rss_key_locked - get the RSS Random Key + * @hw: pointer to the HW structure + * @rss_key: buffer to fill with RSS Hash Key contents. + * + * The "rss_key" buffer should be big enough to contain 10 registers. + * + * Returns: 0 on success. + * if API doesn't support this operation - (-EOPNOTSUPP). + */ +static int txgbevf_get_rss_key_locked(struct txgbe_hw *hw, u8 *rss_key) +{ + int err; + u32 msgbuf[TXGBE_VXMAILBOX_SIZE]; + + /* We currently support the RSS Random Key retrieval for 82599 and x540 + * devices only. + * + * Thus return an error if API doesn't support RSS Random Key retrieval + * or if the operation is not supported for this device type. + */ + switch (hw->api_version) { + case txgbe_mbox_api_13: + case txgbe_mbox_api_12: + break; + default: + return -EOPNOTSUPP; + } + + msgbuf[0] = TXGBE_VF_GET_RSS_KEY; + err = hw->mbx.ops.write_posted(hw, msgbuf, 1, 0); + + if (err) + return err; + + err = hw->mbx.ops.read_posted(hw, msgbuf, 11, 0); + + if (err) + return err; + + msgbuf[0] &= ~TXGBE_VT_MSGTYPE_CTS; + + /* If the operation has been refused by a PF return -EPERM */ + if (msgbuf[0] == (TXGBE_VF_GET_RSS_KEY | TXGBE_VT_MSGTYPE_NACK)) + return -EPERM; + + /* If we didn't get an ACK there must have been + * some sort of mailbox error so we should treat it + * as such. + */ + if (msgbuf[0] != (TXGBE_VF_GET_RSS_KEY | TXGBE_VT_MSGTYPE_ACK)) + return TXGBE_ERR_MBX; + + memcpy(rss_key, msgbuf + 1, TXGBE_RSS_HASH_KEY_SIZE); + + return 0; +} + +static int txgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int err = 0; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (adapter->hw.mac.type == 1) { + if (key) + memcpy(key, adapter->rss_key, txgbe_get_rxfh_key_size(netdev)); + + if (indir) { + int i; + + for (i = 0; i < TXGBE_VFRETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + } + } else { + if (!indir && !key) + return 0; + spin_lock_bh(&adapter->mbx_lock); + if (indir) + err = txgbevf_get_reta_locked(&adapter->hw, indir, + adapter->num_rx_queues); + + if (!err && key) + err = txgbevf_get_rss_key_locked(&adapter->hw, key); + spin_unlock_bh(&adapter->mbx_lock); + } + return err; +} + +static void txgbe_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = TXGBE_MAX_RXD; + ring->tx_max_pending = TXGBE_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int txgbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_ring *tx_ring = NULL, *rx_ring = NULL; + int i, err = 0; + u32 new_rx_count, new_tx_count; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + new_rx_count = max_t(u32, ring->rx_pending, (u32)TXGBE_MIN_RXD); + new_rx_count = min_t(u32, new_rx_count, (u32)TXGBE_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, TXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = max_t(u32, ring->tx_pending, (u32)TXGBE_MIN_TXD); + new_tx_count = min_t(u32, new_tx_count, (u32)TXGBE_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, TXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + + if (new_tx_count == adapter->tx_ring_count && + new_rx_count == adapter->rx_ring_count) + return 0; + + while (test_and_set_bit(__TXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + /* If the adapter isn't up and running then just set the + * new parameters and scurry for the exits. + */ + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + if (new_tx_count != adapter->tx_ring_count) { + tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring)); + if (!tx_ring) { + err = -ENOMEM; + goto clear_reset; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + /* clone ring and setup updated count */ + tx_ring[i] = *adapter->tx_ring[i]; + tx_ring[i].count = new_tx_count; + err = txgbevf_setup_tx_resources(&tx_ring[i]); + if (err) { + while (i) { + i--; + txgbevf_free_tx_resources(&tx_ring[i]); + } + + vfree(tx_ring); + tx_ring = NULL; + + goto clear_reset; + } + } + } + + if (new_rx_count != adapter->rx_ring_count) { + rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring)); + if (!rx_ring) { + err = -ENOMEM; + goto clear_reset; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + /* clone ring and setup updated count */ + rx_ring[i] = *adapter->rx_ring[i]; + rx_ring[i].count = new_rx_count; + err = txgbevf_setup_rx_resources(adapter, &rx_ring[i]); + if (err) { + while (i) { + i--; + txgbevf_free_rx_resources(&rx_ring[i]); + } + + vfree(rx_ring); + rx_ring = NULL; + + goto clear_reset; + } + } + } + + txgbevf_down(adapter); + txgbe_free_irq(adapter); + + /* Tx */ + if (tx_ring) { + for (i = 0; i < adapter->num_tx_queues; i++) { + txgbevf_free_tx_resources(adapter->tx_ring[i]); + *adapter->tx_ring[i] = tx_ring[i]; + } + adapter->tx_ring_count = new_tx_count; + + vfree(tx_ring); + tx_ring = NULL; + } + + /* Rx */ + if (rx_ring) { + for (i = 0; i < adapter->num_rx_queues; i++) { + txgbevf_free_rx_resources(adapter->rx_ring[i]); + *adapter->rx_ring[i] = rx_ring[i]; + } + adapter->rx_ring_count = new_rx_count; + + vfree(rx_ring); + rx_ring = NULL; + } + + txgbe_configure(adapter); + txgbe_request_irq(adapter); + txgbe_up_complete(adapter); + +clear_reset: + /* free Tx resources if Rx error is encountered */ + if (tx_ring) { + for (i = 0; i < adapter->num_tx_queues; i++) + txgbevf_free_tx_resources(&tx_ring[i]); + vfree(tx_ring); + } + + clear_bit(__TXGBE_RESETTING, &adapter->state); + return err; +} + +static u32 txgbe_get_msglevel(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void txgbe_set_msglevel(struct net_device *netdev, u32 data) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int txgbe_link_test(struct txgbe_adapter *adapter, u64 *data) +{ + struct txgbe_hw *hw = &adapter->hw; + bool link_up; + u32 link_speed = 0; + *data = 0; + + hw->mac.ops.check_link(hw, &link_speed, &link_up, true); + if (!link_up) + *data = 1; + + return *data; +} + +/* ethtool register test data */ +struct txgbe_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* default VF register test */ +static struct txgbe_reg_test reg_test_vf[] = { + { TXGBE_VXRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { TXGBE_VXRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { TXGBE_VXRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { TXGBE_VXRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 }, + { TXGBE_VXTDBAL(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { TXGBE_VXTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0, 0 } +}; + +static int +reg_pattern_test(struct txgbe_hw *hw, u32 r, u32 m, u32 w) +{ + static const u32 _test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + u32 pat, val, before; + struct txgbe_adapter *adapter; + + if (TXGBE_REMOVED(hw->hw_addr)) + return 1; + + adapter = hw->back; + for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { + before = rd32(hw, r); + wr32(hw, r, _test[pat] & w); + val = rd32(hw, r); + if (val != (_test[pat] & w & m)) { + e_err(drv, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + r, val, _test[pat] & w & m); + wr32(hw, r, before); + return 1; + } + wr32(hw, r, before); + } + return 0; +} + +static int +reg_set_and_check(struct txgbe_hw *hw, u32 r, u32 m, u32 w) +{ + u32 val, before; + struct txgbe_adapter *adapter; + + if (TXGBE_REMOVED(hw->hw_addr)) + return 1; + + adapter = hw->back; + + before = rd32(hw, r); + wr32(hw, r, w & m); + val = rd32(hw, r); + if ((w & m) != (val & m)) { + e_err(hw, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + r, (val & m), (w & m)); + wr32(hw, r, before); + return 1; + } + wr32(hw, r, before); + + return 0; +} + +int txgbe_diag_reg_test(struct txgbe_hw *hw) +{ + struct txgbe_reg_test *test; + int rc; + u32 i; + struct txgbe_adapter *adapter = hw->back; + + if (TXGBE_REMOVED(hw->hw_addr)) { + e_err(hw, "Adapter removed - register test blocked\n"); + return 1; + } + + test = reg_test_vf; + + /* Perform the register test, looping through the test table + * until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + rc = 0; + switch (test->test_type) { + case PATTERN_TEST: + rc = reg_pattern_test(hw, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + rc = reg_set_and_check(hw, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + wr32(hw, test->reg + (i * 0x40), + test->write); + break; + case TABLE32_TEST: + rc = reg_pattern_test(hw, + test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + rc = reg_pattern_test(hw, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + rc = reg_pattern_test(hw, + test->reg + 4 + (i * 8), + test->mask, + test->write); + break; + } + if (rc) + return rc; + } + test++; + } + + return 0; +} + +static int txgbe_reg_test(struct txgbe_adapter *adapter, u64 *data) +{ + struct txgbe_hw *hw = &adapter->hw; + + *data = txgbe_diag_reg_test(hw); + + return *data; +} + +static void txgbe_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + + if (TXGBE_REMOVED(adapter->hw.hw_addr)) { + e_err(hw, "Adapter removed - test blocked\n"); + eth_test->flags |= ETH_TEST_FL_FAILED; + data[0] = 1; + data[1] = 1; + return; + } + set_bit(__TXGBE_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (txgbe_link_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + txgbevf_close(netdev); + else + txgbevf_reset(adapter); + + e_info(hw, "register testing starting\n"); + if (txgbe_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + txgbevf_reset(adapter); + + clear_bit(__TXGBE_TESTING, &adapter->state); + if (if_running) + txgbevf_open(netdev); + } else { + e_info(hw, "online testing starting\n"); + /* Online tests */ + if (txgbe_link_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + + clear_bit(__TXGBE_TESTING, &adapter->state); + } + msleep_interruptible(4 * 1000); +} + +static int txgbe_get_sset_count(struct net_device *netdev, int stringset) +{ + switch (stringset) { + case ETH_SS_TEST: + return TXGBE_TEST_LEN; + case ETH_SS_STATS: + return TXGBE_STATS_LEN; + default: + return -EINVAL; + } +} + +static void txgbe_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + char *p = (char *)data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *txgbe_gstrings_test, + TXGBE_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + + for (i = 0; i < TXGBE_HW_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%-26s\t", + txgbe_hw_stats[i].name); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < TXGBE_SW_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%-26s\t", + txgbe_sw_stats[i].name); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + sprintf(p, "tx_queue_%u_%-16s", i, "packets"); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_%-16s", i, "bytes"); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_rx_queues; i++) { + sprintf(p, "rx_queue_%u_%-16s", i, "packets"); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_%-16s", i, "bytes"); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < TXGBE_NET_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%-26s\t", + txgbe_net_stats[i].name); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static void txgbe_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + //txgbe_net_stats_t *net_stats = &adapter->net_stats; + struct txgbe_ring *ring; + int i = 0, j; + char *p; + unsigned int start; + + txgbevf_update_stats(adapter); + + for (j = 0; j < TXGBE_HW_STATS_LEN; j++) { + p = (char *)adapter + txgbe_hw_stats[j].offset; + data[i++] = (txgbe_hw_stats[j].size == sizeof(u64)) + ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < TXGBE_SW_STATS_LEN; j++) { + p = (char *)adapter + txgbe_sw_stats[j].offset; + data[i++] = (txgbe_sw_stats[j].size == sizeof(u64)) + ? *(u64 *)p : *(u32 *)p; + } + + /* populate Tx queue data */ + for (j = 0; j < adapter->num_tx_queues; j++) { + ring = adapter->tx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + + continue; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; + } + + /* populate Rx queue data */ + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + + i += 2; + } +} + +static int txgbe_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + /* only valid if in constant ITR mode */ + ec->rx_coalesce_usecs = adapter->rx_itr_setting > 1 + ? adapter->rx_itr_setting << 1 + : adapter->rx_itr_setting; + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + ec->tx_coalesce_usecs = adapter->tx_itr_setting > 1 + ? adapter->tx_itr_setting << 1 + : adapter->tx_itr_setting; + + return 0; +} + +static int txgbe_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_q_vector *q_vector; + int i; + u16 tx_itr_param, rx_itr_param; + + /* don't accept tx specific changes if we've got mixed RxTx vectors */ + if (adapter->q_vector[0]->tx.count && + adapter->q_vector[0]->rx.count && + ec->tx_coalesce_usecs) + return -EINVAL; + + if ((ec->rx_coalesce_usecs > TXGBE_VXITR_INTERVAL(~0) >> 1) || + (ec->tx_coalesce_usecs > TXGBE_VXITR_INTERVAL(~0) >> 1)) + return -EINVAL; + + adapter->rx_itr_setting = ec->rx_coalesce_usecs > 1 + ? ec->rx_coalesce_usecs >> 1 + : ec->rx_coalesce_usecs; + + if (adapter->rx_itr_setting == 1) + rx_itr_param = TXGBE_20K_ITR; + else + rx_itr_param = adapter->rx_itr_setting; + + adapter->tx_itr_setting = ec->tx_coalesce_usecs > 1 + ? ec->tx_coalesce_usecs >> 1 + : ec->tx_coalesce_usecs; + + if (adapter->tx_itr_setting == 1) + tx_itr_param = TXGBE_12K_ITR; + else + tx_itr_param = adapter->tx_itr_setting; + + for (i = 0; i < adapter->num_q_vectors; i++) { + q_vector = adapter->q_vector[i]; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + txgbevf_write_eitr(q_vector); + } + + return 0; +} + +static int txgbe_get_rss_hash_opts(struct txgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on txgbevf */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + goto TCP_V4_FLOW_CASE; + case UDP_V4_FLOW: + goto UDP_V4_FLOW_CASE; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + goto IPV4_FLOW_CASE; + case TCP_V6_FLOW: + goto TCP_V6_FLOW_CASE; + case UDP_V6_FLOW: + goto UDP_V6_FLOW_CASE; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + goto IPV6_FLOW_CASE; + default: + return -EINVAL; + } + +TCP_V4_FLOW_CASE: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; +UDP_V4_FLOW_CASE: + if (adapter->flagsd & TXGBE_F_ENA_RSS_IPV4UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; +IPV4_FLOW_CASE: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + return 0; + +TCP_V6_FLOW_CASE: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; +UDP_V6_FLOW_CASE: + if (adapter->flagsd & TXGBE_F_ENA_RSS_IPV6UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; +IPV6_FLOW_CASE: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + return 0; +} + +#define UDP_RSS_FLAGS (TXGBE_F_ENA_RSS_IPV4UDP | \ + TXGBE_F_ENA_RSS_IPV6UDP) + +static int txgbe_set_rss_hash_opt(struct txgbe_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 flags = adapter->flagsd; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~TXGBE_F_ENA_RSS_IPV4UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= TXGBE_F_ENA_RSS_IPV4UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~TXGBE_F_ENA_RSS_IPV6UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= TXGBE_F_ENA_RSS_IPV6UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != adapter->flagsd) { + u32 vfmrqc = 0; + + if ((flags & UDP_RSS_FLAGS) && + !(adapter->flagsd & UDP_RSS_FLAGS)) + e_info(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flagsd = flags; + + vfmrqc = rd32(hw, TXGBE_VXMRQC) >> 16; + /* Perform hash on these packet types */ + vfmrqc |= TXGBE_VXMRQC_RSS_ALG_IPV4 | + TXGBE_VXMRQC_RSS_ALG_IPV4_TCP | + TXGBE_VXMRQC_RSS_ALG_IPV6 | + TXGBE_VXMRQC_RSS_ALG_IPV6_TCP; + + vfmrqc &= ~(TXGBE_VXMRQC_RSS_ALG_IPV4_UDP | + TXGBE_VXMRQC_RSS_ALG_IPV6_UDP); + + if (flags & TXGBE_F_ENA_RSS_IPV4UDP) + vfmrqc |= TXGBE_VXMRQC_RSS_ALG_IPV4_UDP; + + if (flags & TXGBE_F_ENA_RSS_IPV6UDP) + vfmrqc |= TXGBE_VXMRQC_RSS_ALG_IPV6_UDP; + + wr32m(hw, TXGBE_VXMRQC, TXGBE_VXMRQC_RSS(~0), + TXGBE_VXMRQC_RSS(vfmrqc)); + } + + return 0; +} + +static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + __always_unused u32 *rule_locs) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + int ret; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = adapter->num_rx_queues; + break; + case ETHTOOL_GRXFH: + ret = txgbe_get_rss_hash_opts(adapter, info); + if (ret) + return ret; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int txgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + if (cmd->cmd == ETHTOOL_SRXFH) + ret = txgbe_set_rss_hash_opt(adapter, cmd); + + return ret; +} + +static const struct ethtool_ops txgbe_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | + ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, + .get_link_ksettings = txgbevf_get_link_ksettings, + .set_link_ksettings = txgbevf_set_link_ksettings, + .get_drvinfo = txgbe_get_drvinfo, + .get_regs_len = txgbe_get_regs_len, + .get_regs = txgbe_get_regs, + .nway_reset = txgbe_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom = txgbe_get_eeprom, + .set_eeprom = txgbe_set_eeprom, + .get_rxfh_indir_size = txgbe_get_rxfh_indir_size, + .get_rxfh_key_size = txgbe_get_rxfh_key_size, + .get_rxfh = txgbe_get_rxfh, + .get_ringparam = txgbe_get_ringparam, + .set_ringparam = txgbe_set_ringparam, + .get_msglevel = txgbe_get_msglevel, + .set_msglevel = txgbe_set_msglevel, + .self_test = txgbe_diag_test, + .get_sset_count = txgbe_get_sset_count, + .get_strings = txgbe_get_strings, + .get_ethtool_stats = txgbe_get_ethtool_stats, + .get_coalesce = txgbe_get_coalesce, + .set_coalesce = txgbe_set_coalesce, + .get_rxnfc = txgbe_get_rxnfc, + .set_rxnfc = txgbe_set_rxnfc, +}; + +void txgbevf_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &txgbe_ethtool_ops; +} diff --git a/drivers/net/ethernet/wangxun/txgbevf/txgbe_mbx.c b/drivers/net/ethernet/wangxun/txgbevf/txgbe_mbx.c new file mode 100644 index 0000000000000000000000000000000000000000..0278a0354d476b7947035b67073ec94af931d3d6 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbevf/txgbe_mbx.c @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */ +#include "txgbe_vf.h" +#include "txgbe_mbx.h" + +u32 txgbevf_read_v2p_mailbox(struct txgbe_hw *hw) +{ + u32 v2p_mailbox = rd32(hw, TXGBE_VXMAILBOX); + + v2p_mailbox |= hw->mbx.v2p_mailbox; + /* read and clear mirrored mailbox flags */ + v2p_mailbox |= rd32a(hw, TXGBE_VXMBMEM, TXGBE_VXMAILBOX_SIZE); + wr32a(hw, TXGBE_VXMBMEM, TXGBE_VXMAILBOX_SIZE, 0); + hw->mbx.v2p_mailbox |= v2p_mailbox & TXGBE_VXMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +s32 txgbevf_check_for_bit_vf(struct txgbe_hw *hw, u32 mask) +{ + u32 mailbox = txgbevf_read_v2p_mailbox(hw); + + hw->mbx.v2p_mailbox &= ~mask; + + return (mailbox & mask ? 0 : TXGBE_ERR_MBX); +} + +s32 txgbevf_obtain_mbx_lock_vf(struct txgbe_hw *hw) +{ + s32 err = TXGBE_ERR_MBX; + u32 mailbox; + + /* Take ownership of the buffer */ + wr32(hw, TXGBE_VXMAILBOX, TXGBE_VXMAILBOX_VFU); + + /* reserve mailbox for vf use */ + mailbox = txgbevf_read_v2p_mailbox(hw); + if (mailbox & TXGBE_VXMAILBOX_VFU) + err = 0; + else + txgbevf_dbg(hw, + "Failed to obtain mailbox lock for VF"); + + return err; +} + +/** + * txgbevf_read_mbx_vf - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +s32 txgbevf_read_mbx_vf(struct txgbe_hw *hw, u32 *msg, u16 size, + u16 __always_unused mbx_id) +{ + s32 err = 0; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = txgbevf_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = rd32a(hw, TXGBE_VXMBMEM, i); + + /* Acknowledge receipt and release mailbox, then we're done */ + wr32(hw, TXGBE_VXMAILBOX, TXGBE_VXMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return err; +} + +s32 txgbevf_poll_for_msg(struct txgbe_hw *hw, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && hw->mbx.ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->udelay); + } + + if (countdown == 0) + txgbevf_dbg(hw, "Polling for VF%d mailbox message timedout", mbx_id); + +out: + return countdown ? 0 : TXGBE_ERR_MBX; +} + +s32 txgbevf_poll_for_ack(struct txgbe_hw *hw, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && hw->mbx.ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->udelay); + } + + if (countdown == 0) + txgbevf_dbg(hw, "Polling for VF%d mailbox ack timedout", mbx_id); + +out: + return countdown ? 0 : TXGBE_ERR_MBX; +} + +/** + * txgbevf_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +s32 txgbevf_read_posted_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + s32 err = TXGBE_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + err = txgbevf_poll_for_msg(hw, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!err) + err = hw->mbx.ops.read(hw, msg, size, mbx_id); +out: + return err; +} + +/** + * txgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +s32 txgbevf_write_posted_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + s32 err; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->timeout) + return TXGBE_ERR_MBX; + + /* send msg */ + err = hw->mbx.ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!err) + err = txgbevf_poll_for_ack(hw, mbx_id); + + return err; +} + +/** + * txgbevf_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the Status bit or else ERR_MBX + **/ +s32 txgbevf_check_for_msg_vf(struct txgbe_hw *hw, u16 __always_unused mbx_id) +{ + s32 err = TXGBE_ERR_MBX; + + /* read clear the pf sts bit */ + if (!txgbevf_check_for_bit_vf(hw, TXGBE_VXMAILBOX_PFSTS)) { + err = 0; + hw->mbx.stats.reqs++; + } + + return err; +} + +/** + * txgbevf_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX + **/ +s32 txgbevf_check_for_ack_vf(struct txgbe_hw *hw, u16 __always_unused mbx_id) +{ + s32 err = TXGBE_ERR_MBX; + + /* read clear the pf ack bit */ + if (!txgbevf_check_for_bit_vf(hw, TXGBE_VXMAILBOX_PFACK)) { + err = 0; + hw->mbx.stats.acks++; + } + + return err; +} + +/** + * txgbevf_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns true if the PF has set the reset done bit or else false + **/ +s32 txgbevf_check_for_rst_vf(struct txgbe_hw *hw, u16 __always_unused mbx_id) +{ + s32 err = TXGBE_ERR_MBX; + + if (!txgbevf_check_for_bit_vf(hw, (TXGBE_VXMAILBOX_RSTD | + TXGBE_VXMAILBOX_RSTI))) { + err = 0; + hw->mbx.stats.rsts++; + } + + return err; +} + +/** + * txgbevf_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 txgbevf_write_mbx_vf(struct txgbe_hw *hw, u32 *msg, u16 size, + u16 __always_unused mbx_id) +{ + s32 err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = txgbevf_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + txgbevf_check_for_msg_vf(hw, 0); + txgbevf_check_for_ack_vf(hw, 0); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + wr32a(hw, TXGBE_VXMBMEM, i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + wr32(hw, TXGBE_VXMAILBOX, TXGBE_VXMAILBOX_REQ); + +out_no_write: + return err; +} + +void txgbevf_init_mbx_params_vf(struct txgbe_hw *hw) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to begin communications + */ + mbx->timeout = 0; + mbx->udelay = TXGBE_VF_MBX_INIT_DELAY; + + mbx->size = TXGBE_VXMAILBOX_SIZE; + + mbx->ops.read = txgbevf_read_mbx_vf; + mbx->ops.write = txgbevf_write_mbx_vf; + mbx->ops.read_posted = txgbevf_read_posted_mbx; + mbx->ops.write_posted = txgbevf_write_posted_mbx; + mbx->ops.check_for_msg = txgbevf_check_for_msg_vf; + mbx->ops.check_for_ack = txgbevf_check_for_ack_vf; + mbx->ops.check_for_rst = txgbevf_check_for_rst_vf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} diff --git a/drivers/net/ethernet/wangxun/txgbevf/txgbe_mbx.h b/drivers/net/ethernet/wangxun/txgbevf/txgbe_mbx.h new file mode 100644 index 0000000000000000000000000000000000000000..bd0d8b5b45696c92076a43ea0c12674ae23d7dba --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbevf/txgbe_mbx.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */ +#ifndef _TXGBE_MBX_H_ +#define _TXGBE_MBX_H_ + +#define TXGBE_VF_GET_FW_VERSION 0x11 + +#define TXGBE_VF_MBX_INIT_TIMEOUT 2000 +#define TXGBE_VT_MSGTYPE_ACK 0x80000000 + +/* mailbox API, legacy requests */ +#define TXGBE_VF_RESET 0x01 +#define TXGBE_VF_SET_MAC_ADDR 0x02 +#define TXGBE_VF_SET_MULTICAST 0x03 +#define TXGBE_VF_SET_VLAN 0x04 +#define TXGBE_VF_SET_LPE 0x05 +#define TXGBE_VF_SET_MACVLAN 0x06 +#define TXGBE_VF_API_NEGOTIATE 0x08 + +#define TXGBE_VF_GET_QUEUES 0x09 +#define TXGBE_VF_GET_RETA 0x0a +#define TXGBE_VF_GET_RSS_KEY 0x0b +#define TXGBE_VF_UPDATE_XCAST_MODE 0x0c +#define TXGBE_VF_GET_LINK_STATE 0x10 + +#define TXGBE_VF_BACKUP 0x8001 + +#define TXGBE_VT_MSGTYPE_ACK 0x80000000 +#define TXGBE_VT_MSGTYPE_NACK 0x40000000 +#define TXGBE_VT_MSGTYPE_CTS 0x20000000 +#define TXGBE_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for extra info for certain messages */ +#define TXGBE_VT_MSGINFO_MASK (0xFF << TXGBE_VT_MSGINFO_SHIFT) + +#define TXGBE_VF_MC_TYPE_WORD 3 + +#define TXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +/* GET_QUEUES return data indices within the mailbox */ +#define TXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define TXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define TXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define TXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ + +enum txgbe_pfvf_api_rev { + txgbe_mbox_api_null, + txgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ + txgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + txgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + txgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ + txgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ + //txgbe_mbox_api_max, /* indicates that API version is not known */ + txgbe_mbox_api_unknown /* indicates that API version is not known */ +}; + +void txgbevf_init_mbx_params_vf(struct txgbe_hw *hw); + +#endif diff --git a/drivers/net/ethernet/wangxun/txgbevf/txgbe_txrx.c b/drivers/net/ethernet/wangxun/txgbevf/txgbe_txrx.c new file mode 100644 index 0000000000000000000000000000000000000000..cc83bf252c759abb86edecfd276facccf533e1ee --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbevf/txgbe_txrx.c @@ -0,0 +1,496 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */ + +#include + +#include "txgbe_txrx.h" + +/* macro to make the table lines short */ +#define TXGBE_PTT(ptype, mac, ip, etype, eip, proto, layer)\ + { ptype, \ + 1, \ + /* mac */ TXGBE_DEC_PTYPE_MAC_##mac, \ + /* ip */ TXGBE_DEC_PTYPE_IP_##ip, \ + /* etype */ TXGBE_DEC_PTYPE_ETYPE_##etype, \ + /* eip */ TXGBE_DEC_PTYPE_IP_##eip, \ + /* proto */ TXGBE_DEC_PTYPE_PROT_##proto, \ + /* layer */ TXGBE_DEC_PTYPE_LAYER_##layer } + +#define TXGBE_UKN(ptype) \ + { ptype, 0, 0, 0, 0, 0, 0, 0 } + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +/* for ((pt=0;pt<256;pt++)); do printf "macro(0x%02X),\n" $pt; done */ +#define TXGBE_PTYPE_MAX (256) +struct txgbe_dec_ptype txgbevf_ptype_lookup[TXGBE_PTYPE_MAX] = { + TXGBE_UKN(0x00), + TXGBE_UKN(0x01), + TXGBE_UKN(0x02), + TXGBE_UKN(0x03), + TXGBE_UKN(0x04), + TXGBE_UKN(0x05), + TXGBE_UKN(0x06), + TXGBE_UKN(0x07), + TXGBE_UKN(0x08), + TXGBE_UKN(0x09), + TXGBE_UKN(0x0A), + TXGBE_UKN(0x0B), + TXGBE_UKN(0x0C), + TXGBE_UKN(0x0D), + TXGBE_UKN(0x0E), + TXGBE_UKN(0x0F), + + /* L2: mac */ + TXGBE_UKN(0x10), + TXGBE_PTT(0x11, L2, NONE, NONE, NONE, NONE, PAY2), + TXGBE_PTT(0x12, L2, NONE, NONE, NONE, TS, PAY2), + TXGBE_PTT(0x13, L2, NONE, NONE, NONE, NONE, PAY2), + TXGBE_PTT(0x14, L2, NONE, NONE, NONE, NONE, PAY2), + TXGBE_PTT(0x15, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x16, L2, NONE, NONE, NONE, NONE, PAY2), + TXGBE_PTT(0x17, L2, NONE, NONE, NONE, NONE, NONE), + + /* L2: ethertype filter */ + TXGBE_PTT(0x18, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x19, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1A, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1B, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1C, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1D, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1E, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1F, L2, NONE, NONE, NONE, NONE, NONE), + + /* L3: ip non-tunnel */ + TXGBE_UKN(0x20), + TXGBE_PTT(0x21, IP, FGV4, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x22, IP, IPV4, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x23, IP, IPV4, NONE, NONE, UDP, PAY4), + TXGBE_PTT(0x24, IP, IPV4, NONE, NONE, TCP, PAY4), + TXGBE_PTT(0x25, IP, IPV4, NONE, NONE, SCTP, PAY4), + TXGBE_UKN(0x26), + TXGBE_UKN(0x27), + TXGBE_UKN(0x28), + TXGBE_PTT(0x29, IP, FGV6, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x2A, IP, IPV6, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x2B, IP, IPV6, NONE, NONE, UDP, PAY3), + TXGBE_PTT(0x2C, IP, IPV6, NONE, NONE, TCP, PAY4), + TXGBE_PTT(0x2D, IP, IPV6, NONE, NONE, SCTP, PAY4), + TXGBE_UKN(0x2E), + TXGBE_UKN(0x2F), + + /* L2: fcoe */ + TXGBE_PTT(0x30, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x31, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x32, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x33, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x34, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x35, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x36, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x37, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x38, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x39, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_UKN(0x3A), + TXGBE_UKN(0x3B), + TXGBE_UKN(0x3C), + TXGBE_UKN(0x3D), + TXGBE_UKN(0x3E), + TXGBE_UKN(0x3F), + + TXGBE_UKN(0x40), + TXGBE_UKN(0x41), + TXGBE_UKN(0x42), + TXGBE_UKN(0x43), + TXGBE_UKN(0x44), + TXGBE_UKN(0x45), + TXGBE_UKN(0x46), + TXGBE_UKN(0x47), + TXGBE_UKN(0x48), + TXGBE_UKN(0x49), + TXGBE_UKN(0x4A), + TXGBE_UKN(0x4B), + TXGBE_UKN(0x4C), + TXGBE_UKN(0x4D), + TXGBE_UKN(0x4E), + TXGBE_UKN(0x4F), + TXGBE_UKN(0x50), + TXGBE_UKN(0x51), + TXGBE_UKN(0x52), + TXGBE_UKN(0x53), + TXGBE_UKN(0x54), + TXGBE_UKN(0x55), + TXGBE_UKN(0x56), + TXGBE_UKN(0x57), + TXGBE_UKN(0x58), + TXGBE_UKN(0x59), + TXGBE_UKN(0x5A), + TXGBE_UKN(0x5B), + TXGBE_UKN(0x5C), + TXGBE_UKN(0x5D), + TXGBE_UKN(0x5E), + TXGBE_UKN(0x5F), + TXGBE_UKN(0x60), + TXGBE_UKN(0x61), + TXGBE_UKN(0x62), + TXGBE_UKN(0x63), + TXGBE_UKN(0x64), + TXGBE_UKN(0x65), + TXGBE_UKN(0x66), + TXGBE_UKN(0x67), + TXGBE_UKN(0x68), + TXGBE_UKN(0x69), + TXGBE_UKN(0x6A), + TXGBE_UKN(0x6B), + TXGBE_UKN(0x6C), + TXGBE_UKN(0x6D), + TXGBE_UKN(0x6E), + TXGBE_UKN(0x6F), + TXGBE_UKN(0x70), + TXGBE_UKN(0x71), + TXGBE_UKN(0x72), + TXGBE_UKN(0x73), + TXGBE_UKN(0x74), + TXGBE_UKN(0x75), + TXGBE_UKN(0x76), + TXGBE_UKN(0x77), + TXGBE_UKN(0x78), + TXGBE_UKN(0x79), + TXGBE_UKN(0x7A), + TXGBE_UKN(0x7B), + TXGBE_UKN(0x7C), + TXGBE_UKN(0x7D), + TXGBE_UKN(0x7E), + TXGBE_UKN(0x7F), + + /* IPv4 --> IPv4/IPv6 */ + TXGBE_UKN(0x80), + TXGBE_PTT(0x81, IP, IPV4, IPIP, FGV4, NONE, PAY3), + TXGBE_PTT(0x82, IP, IPV4, IPIP, IPV4, NONE, PAY3), + TXGBE_PTT(0x83, IP, IPV4, IPIP, IPV4, UDP, PAY4), + TXGBE_PTT(0x84, IP, IPV4, IPIP, IPV4, TCP, PAY4), + TXGBE_PTT(0x85, IP, IPV4, IPIP, IPV4, SCTP, PAY4), + TXGBE_UKN(0x86), + TXGBE_UKN(0x87), + TXGBE_UKN(0x88), + TXGBE_PTT(0x89, IP, IPV4, IPIP, FGV6, NONE, PAY3), + TXGBE_PTT(0x8A, IP, IPV4, IPIP, IPV6, NONE, PAY3), + TXGBE_PTT(0x8B, IP, IPV4, IPIP, IPV6, UDP, PAY4), + TXGBE_PTT(0x8C, IP, IPV4, IPIP, IPV6, TCP, PAY4), + TXGBE_PTT(0x8D, IP, IPV4, IPIP, IPV6, SCTP, PAY4), + TXGBE_UKN(0x8E), + TXGBE_UKN(0x8F), + + /* IPv4 --> GRE/NAT --> NONE/IPv4/IPv6 */ + TXGBE_PTT(0x90, IP, IPV4, IG, NONE, NONE, PAY3), + TXGBE_PTT(0x91, IP, IPV4, IG, FGV4, NONE, PAY3), + TXGBE_PTT(0x92, IP, IPV4, IG, IPV4, NONE, PAY3), + TXGBE_PTT(0x93, IP, IPV4, IG, IPV4, UDP, PAY4), + TXGBE_PTT(0x94, IP, IPV4, IG, IPV4, TCP, PAY4), + TXGBE_PTT(0x95, IP, IPV4, IG, IPV4, SCTP, PAY4), + TXGBE_UKN(0x96), + TXGBE_UKN(0x97), + TXGBE_UKN(0x98), + TXGBE_PTT(0x99, IP, IPV4, IG, FGV6, NONE, PAY3), + TXGBE_PTT(0x9A, IP, IPV4, IG, IPV6, NONE, PAY3), + TXGBE_PTT(0x9B, IP, IPV4, IG, IPV6, UDP, PAY4), + TXGBE_PTT(0x9C, IP, IPV4, IG, IPV6, TCP, PAY4), + TXGBE_PTT(0x9D, IP, IPV4, IG, IPV6, SCTP, PAY4), + TXGBE_UKN(0x9E), + TXGBE_UKN(0x9F), + + /* IPv4 --> GRE/NAT --> MAC --> NONE/IPv4/IPv6 */ + TXGBE_PTT(0xA0, IP, IPV4, IGM, NONE, NONE, PAY3), + TXGBE_PTT(0xA1, IP, IPV4, IGM, FGV4, NONE, PAY3), + TXGBE_PTT(0xA2, IP, IPV4, IGM, IPV4, NONE, PAY3), + TXGBE_PTT(0xA3, IP, IPV4, IGM, IPV4, UDP, PAY4), + TXGBE_PTT(0xA4, IP, IPV4, IGM, IPV4, TCP, PAY4), + TXGBE_PTT(0xA5, IP, IPV4, IGM, IPV4, SCTP, PAY4), + TXGBE_UKN(0xA6), + TXGBE_UKN(0xA7), + TXGBE_UKN(0xA8), + TXGBE_PTT(0xA9, IP, IPV4, IGM, FGV6, NONE, PAY3), + TXGBE_PTT(0xAA, IP, IPV4, IGM, IPV6, NONE, PAY3), + TXGBE_PTT(0xAB, IP, IPV4, IGM, IPV6, UDP, PAY4), + TXGBE_PTT(0xAC, IP, IPV4, IGM, IPV6, TCP, PAY4), + TXGBE_PTT(0xAD, IP, IPV4, IGM, IPV6, SCTP, PAY4), + TXGBE_UKN(0xAE), + TXGBE_UKN(0xAF), + + /* IPv4 --> GRE/NAT --> MAC+VLAN --> NONE/IPv4/IPv6 */ + TXGBE_PTT(0xB0, IP, IPV4, IGMV, NONE, NONE, PAY3), + TXGBE_PTT(0xB1, IP, IPV4, IGMV, FGV4, NONE, PAY3), + TXGBE_PTT(0xB2, IP, IPV4, IGMV, IPV4, NONE, PAY3), + TXGBE_PTT(0xB3, IP, IPV4, IGMV, IPV4, UDP, PAY4), + TXGBE_PTT(0xB4, IP, IPV4, IGMV, IPV4, TCP, PAY4), + TXGBE_PTT(0xB5, IP, IPV4, IGMV, IPV4, SCTP, PAY4), + TXGBE_UKN(0xB6), + TXGBE_UKN(0xB7), + TXGBE_UKN(0xB8), + TXGBE_PTT(0xB9, IP, IPV4, IGMV, FGV6, NONE, PAY3), + TXGBE_PTT(0xBA, IP, IPV4, IGMV, IPV6, NONE, PAY3), + TXGBE_PTT(0xBB, IP, IPV4, IGMV, IPV6, UDP, PAY4), + TXGBE_PTT(0xBC, IP, IPV4, IGMV, IPV6, TCP, PAY4), + TXGBE_PTT(0xBD, IP, IPV4, IGMV, IPV6, SCTP, PAY4), + TXGBE_UKN(0xBE), + TXGBE_UKN(0xBF), + + /* IPv6 --> IPv4/IPv6 */ + TXGBE_UKN(0xC0), + TXGBE_PTT(0xC1, IP, IPV6, IPIP, FGV4, NONE, PAY3), + TXGBE_PTT(0xC2, IP, IPV6, IPIP, IPV4, NONE, PAY3), + TXGBE_PTT(0xC3, IP, IPV6, IPIP, IPV4, UDP, PAY4), + TXGBE_PTT(0xC4, IP, IPV6, IPIP, IPV4, TCP, PAY4), + TXGBE_PTT(0xC5, IP, IPV6, IPIP, IPV4, SCTP, PAY4), + TXGBE_UKN(0xC6), + TXGBE_UKN(0xC7), + TXGBE_UKN(0xC8), + TXGBE_PTT(0xC9, IP, IPV6, IPIP, FGV6, NONE, PAY3), + TXGBE_PTT(0xCA, IP, IPV6, IPIP, IPV6, NONE, PAY3), + TXGBE_PTT(0xCB, IP, IPV6, IPIP, IPV6, UDP, PAY4), + TXGBE_PTT(0xCC, IP, IPV6, IPIP, IPV6, TCP, PAY4), + TXGBE_PTT(0xCD, IP, IPV6, IPIP, IPV6, SCTP, PAY4), + TXGBE_UKN(0xCE), + TXGBE_UKN(0xCF), + + /* IPv6 --> GRE/NAT -> NONE/IPv4/IPv6 */ + TXGBE_PTT(0xD0, IP, IPV6, IG, NONE, NONE, PAY3), + TXGBE_PTT(0xD1, IP, IPV6, IG, FGV4, NONE, PAY3), + TXGBE_PTT(0xD2, IP, IPV6, IG, IPV4, NONE, PAY3), + TXGBE_PTT(0xD3, IP, IPV6, IG, IPV4, UDP, PAY4), + TXGBE_PTT(0xD4, IP, IPV6, IG, IPV4, TCP, PAY4), + TXGBE_PTT(0xD5, IP, IPV6, IG, IPV4, SCTP, PAY4), + TXGBE_UKN(0xD6), + TXGBE_UKN(0xD7), + TXGBE_UKN(0xD8), + TXGBE_PTT(0xD9, IP, IPV6, IG, FGV6, NONE, PAY3), + TXGBE_PTT(0xDA, IP, IPV6, IG, IPV6, NONE, PAY3), + TXGBE_PTT(0xDB, IP, IPV6, IG, IPV6, UDP, PAY4), + TXGBE_PTT(0xDC, IP, IPV6, IG, IPV6, TCP, PAY4), + TXGBE_PTT(0xDD, IP, IPV6, IG, IPV6, SCTP, PAY4), + TXGBE_UKN(0xDE), + TXGBE_UKN(0xDF), + + /* IPv6 --> GRE/NAT -> MAC -> NONE/IPv4/IPv6 */ + TXGBE_PTT(0xE0, IP, IPV6, IGM, NONE, NONE, PAY3), + TXGBE_PTT(0xE1, IP, IPV6, IGM, FGV4, NONE, PAY3), + TXGBE_PTT(0xE2, IP, IPV6, IGM, IPV4, NONE, PAY3), + TXGBE_PTT(0xE3, IP, IPV6, IGM, IPV4, UDP, PAY4), + TXGBE_PTT(0xE4, IP, IPV6, IGM, IPV4, TCP, PAY4), + TXGBE_PTT(0xE5, IP, IPV6, IGM, IPV4, SCTP, PAY4), + TXGBE_UKN(0xE6), + TXGBE_UKN(0xE7), + TXGBE_UKN(0xE8), + TXGBE_PTT(0xE9, IP, IPV6, IGM, FGV6, NONE, PAY3), + TXGBE_PTT(0xEA, IP, IPV6, IGM, IPV6, NONE, PAY3), + TXGBE_PTT(0xEB, IP, IPV6, IGM, IPV6, UDP, PAY4), + TXGBE_PTT(0xEC, IP, IPV6, IGM, IPV6, TCP, PAY4), + TXGBE_PTT(0xED, IP, IPV6, IGM, IPV6, SCTP, PAY4), + TXGBE_UKN(0xEE), + TXGBE_UKN(0xEF), + + /* IPv6 --> GRE/NAT -> MAC--> NONE/IPv */ + TXGBE_PTT(0xF0, IP, IPV6, IGMV, NONE, NONE, PAY3), + TXGBE_PTT(0xF1, IP, IPV6, IGMV, FGV4, NONE, PAY3), + TXGBE_PTT(0xF2, IP, IPV6, IGMV, IPV4, NONE, PAY3), + TXGBE_PTT(0xF3, IP, IPV6, IGMV, IPV4, UDP, PAY4), + TXGBE_PTT(0xF4, IP, IPV6, IGMV, IPV4, TCP, PAY4), + TXGBE_PTT(0xF5, IP, IPV6, IGMV, IPV4, SCTP, PAY4), + TXGBE_UKN(0xF6), + TXGBE_UKN(0xF7), + TXGBE_UKN(0xF8), + TXGBE_PTT(0xF9, IP, IPV6, IGMV, FGV6, NONE, PAY3), + TXGBE_PTT(0xFA, IP, IPV6, IGMV, IPV6, NONE, PAY3), + TXGBE_PTT(0xFB, IP, IPV6, IGMV, IPV6, UDP, PAY4), + TXGBE_PTT(0xFC, IP, IPV6, IGMV, IPV6, TCP, PAY4), + TXGBE_PTT(0xFD, IP, IPV6, IGMV, IPV6, SCTP, PAY4), + TXGBE_UKN(0xFE), + TXGBE_UKN(0xFF), +}; + +static inline struct txgbe_dec_ptype txgbe_decode_ptype(const u8 ptype) +{ + return txgbevf_ptype_lookup[ptype]; +} + +static u8 get_ipv6_proto(struct sk_buff *skb, int offset) +{ + struct ipv6hdr *iphdr = (struct ipv6hdr *)(skb->data + offset); + u8 nexthdr = iphdr->nexthdr; + + offset += sizeof(struct ipv6hdr); + + while (ipv6_ext_hdr(nexthdr)) { + struct ipv6_opt_hdr _hdr, *hp; + + if (nexthdr == NEXTHDR_NONE) + break; + + hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr); + if (!hp) + break; + + if (nexthdr == NEXTHDR_FRAGMENT) + break; + else if (nexthdr == NEXTHDR_AUTH) + offset += ipv6_authlen(hp); + else + offset += ipv6_optlen(hp); + + nexthdr = hp->nexthdr; + } + + return nexthdr; +} + +struct txgbe_dec_ptype txgbe_rx_decode_ptype(const union txgbe_rx_desc *rx_desc) +{ + return txgbe_decode_ptype(TXGBE_RXD_PKTTYPE(rx_desc)); +} + +#ifndef ETH_P_TEB +#define ETH_P_TEB 0x6558 +#endif +struct txgbe_dec_ptype txgbe_tx_encode_ptype(const struct txgbe_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + u8 tun_prot = 0; + u8 l4_prot = 0; + u8 ptype = 0; + + if (skb->encapsulation) { + union network_header hdr; + + switch (first->protocol) { + case htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) + goto encap_frag; + ptype = TXGBE_PTYPE_TUN_IPV4; + break; + case htons(ETH_P_IPV6): + tun_prot = get_ipv6_proto(skb, skb_network_offset(skb)); + if (tun_prot == NEXTHDR_FRAGMENT) + goto encap_frag; + ptype = TXGBE_PTYPE_TUN_IPV6; + break; + default: + goto exit; + } + + if (tun_prot == IPPROTO_IPIP) { + hdr.raw = (void *)inner_ip_hdr(skb); + ptype |= TXGBE_PTYPE_PKT_IPIP; + } else if (tun_prot == IPPROTO_UDP) { + hdr.raw = (void *)inner_ip_hdr(skb); + /* fixme: VXLAN-GPE neither ETHER nor IP */ + + if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB)) { + ptype |= TXGBE_PTYPE_PKT_IG; + } else { + if (((struct ethhdr *) + skb_inner_mac_header(skb))->h_proto + == htons(ETH_P_8021Q)) { + ptype |= TXGBE_PTYPE_PKT_IGMV; + } else { + ptype |= TXGBE_PTYPE_PKT_IGM; + } + } + } else if (tun_prot == IPPROTO_GRE) { + hdr.raw = (void *)inner_ip_hdr(skb); + if (skb->inner_protocol == htons(ETH_P_IP) || + skb->inner_protocol == htons(ETH_P_IPV6)) { + ptype |= TXGBE_PTYPE_PKT_IG; + } else { + if (((struct ethhdr *) + skb_inner_mac_header(skb))->h_proto + == htons(ETH_P_8021Q)) { + ptype |= TXGBE_PTYPE_PKT_IGMV; + } else { + ptype |= TXGBE_PTYPE_PKT_IGM; + } + } + } else { + goto exit; + } + + switch (hdr.ipv4->version) { + case IPVERSION: + l4_prot = hdr.ipv4->protocol; + if (hdr.ipv4->frag_off & htons(IP_MF | IP_OFFSET)) { + ptype |= TXGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; + case 6: + l4_prot = get_ipv6_proto(skb, + skb_inner_network_offset(skb)); + ptype |= TXGBE_PTYPE_PKT_IPV6; + if (l4_prot == NEXTHDR_FRAGMENT) { + ptype |= TXGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; + default: + goto exit; + } + } else { +encap_frag: + switch (first->protocol) { + case htons(ETH_P_IP): + l4_prot = ip_hdr(skb)->protocol; + ptype = TXGBE_PTYPE_PKT_IP; + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { + ptype |= TXGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; + case htons(ETH_P_IPV6): + l4_prot = get_ipv6_proto(skb, skb_network_offset(skb)); + ptype = TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6; + if (l4_prot == NEXTHDR_FRAGMENT) { + ptype |= TXGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; + case htons(ETH_P_1588): + ptype = TXGBE_PTYPE_L2_TS; + goto exit; + case htons(ETH_P_FIP): + ptype = TXGBE_PTYPE_L2_FIP; + goto exit; + case htons(0x88cc): + ptype = TXGBE_PTYPE_L2_LLDP; + goto exit; + case htons(0x22e7): + ptype = TXGBE_PTYPE_L2_CNM; + goto exit; + case htons(ETH_P_PAE): + ptype = TXGBE_PTYPE_L2_EAPOL; + goto exit; + case htons(ETH_P_ARP): + ptype = TXGBE_PTYPE_L2_ARP; + goto exit; + default: + ptype = TXGBE_PTYPE_L2_MAC; + goto exit; + } + } + + switch (l4_prot) { + case IPPROTO_TCP: + ptype |= TXGBE_PTYPE_TYP_TCP; + break; + case IPPROTO_UDP: + ptype |= TXGBE_PTYPE_TYP_UDP; + break; + case IPPROTO_SCTP: + ptype |= TXGBE_PTYPE_TYP_SCTP; + break; + default: + ptype |= 0x2; + break; + } + +exit: + return txgbe_decode_ptype(ptype); +} diff --git a/drivers/net/ethernet/wangxun/txgbevf/txgbe_txrx.h b/drivers/net/ethernet/wangxun/txgbevf/txgbe_txrx.h new file mode 100644 index 0000000000000000000000000000000000000000..8cf86f251f33a0f1e112b84278f48eacf96341c4 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbevf/txgbe_txrx.h @@ -0,0 +1,176 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_TXRX_H_ +#define _TXGBE_TXRX_H_ + +#include +#include "txgbe_vf.h" + +union network_header { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + void *raw; +}; + +/** + * Packet Type + * PTYPE:8 = TUN:2 + PKT:2 + TYP:4 + **/ +#define TXGBE_PTYPE_PKT(_pt) ((_pt) & 0x30) +#define TXGBE_PTYPE_TYP(_pt) ((_pt) & 0x0F) +#define TXGBE_PTYPE_TYPL4(_pt) ((_pt) & 0x07) + +/* TUN */ +#define TXGBE_PTYPE_TUN(_pt) ((_pt) & 0xC0) +#define TXGBE_PTYPE_TUN_IPV4 (0x80) +#define TXGBE_PTYPE_TUN_IPV6 (0xC0) + +/* PKT for TUN */ +#define TXGBE_PTYPE_PKT_IPIP (0x00) /* IP+IP */ +#define TXGBE_PTYPE_PKT_IG (0x10) /* IP+GRE */ +#define TXGBE_PTYPE_PKT_IGM (0x20) /* IP+GRE+MAC */ +#define TXGBE_PTYPE_PKT_IGMV (0x30) /* IP+GRE+MAC+VLAN */ +/* PKT for !TUN */ +#define TXGBE_PTYPE_PKT_MAC (0x10) +#define TXGBE_PTYPE_PKT_IP (0x20) +#define TXGBE_PTYPE_PKT_FCOE (0x30) + +/* TYP for PKT=mac */ +#define TXGBE_PTYPE_TYP_MAC (0x01) +#define TXGBE_PTYPE_TYP_TS (0x02) /* time sync */ +#define TXGBE_PTYPE_TYP_FIP (0x03) +#define TXGBE_PTYPE_TYP_LLDP (0x04) +#define TXGBE_PTYPE_TYP_CNM (0x05) +#define TXGBE_PTYPE_TYP_EAPOL (0x06) +#define TXGBE_PTYPE_TYP_ARP (0x07) +/* TYP for PKT=ip */ +#define TXGBE_PTYPE_PKT_IPV6 (0x08) +#define TXGBE_PTYPE_TYP_IPFRAG (0x01) +#define TXGBE_PTYPE_TYP_IPDATA (0x02) +#define TXGBE_PTYPE_TYP_UDP (0x03) +#define TXGBE_PTYPE_TYP_TCP (0x04) +#define TXGBE_PTYPE_TYP_SCTP (0x05) +/* TYP for PKT=fcoe */ +#define TXGBE_PTYPE_PKT_VFT (0x08) +#define TXGBE_PTYPE_TYP_FCOE (0x00) +#define TXGBE_PTYPE_TYP_FCDATA (0x01) +#define TXGBE_PTYPE_TYP_FCRDY (0x02) +#define TXGBE_PTYPE_TYP_FCRSP (0x03) +#define TXGBE_PTYPE_TYP_FCOTHER (0x04) + +/* packet type non-ip values */ +enum txgbe_l2_ptypes { + TXGBE_PTYPE_L2_ABORTED = (TXGBE_PTYPE_PKT_MAC), + TXGBE_PTYPE_L2_MAC = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_MAC), + TXGBE_PTYPE_L2_TS = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_TS), + TXGBE_PTYPE_L2_FIP = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_FIP), + TXGBE_PTYPE_L2_LLDP = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_LLDP), + TXGBE_PTYPE_L2_CNM = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_CNM), + TXGBE_PTYPE_L2_EAPOL = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_EAPOL), + TXGBE_PTYPE_L2_ARP = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_ARP), + + TXGBE_PTYPE_L2_IPV4_FRAG = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_IPFRAG), + TXGBE_PTYPE_L2_IPV4 = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_IPDATA), + TXGBE_PTYPE_L2_IPV4_UDP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_UDP), + TXGBE_PTYPE_L2_IPV4_TCP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_TCP), + TXGBE_PTYPE_L2_IPV4_SCTP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_SCTP), + TXGBE_PTYPE_L2_IPV6_FRAG = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 | + TXGBE_PTYPE_TYP_IPFRAG), + TXGBE_PTYPE_L2_IPV6 = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 | + TXGBE_PTYPE_TYP_IPDATA), + TXGBE_PTYPE_L2_IPV6_UDP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 | + TXGBE_PTYPE_TYP_UDP), + TXGBE_PTYPE_L2_IPV6_TCP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 | + TXGBE_PTYPE_TYP_TCP), + TXGBE_PTYPE_L2_IPV6_SCTP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 | + TXGBE_PTYPE_TYP_SCTP), + + TXGBE_PTYPE_L2_FCOE = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_TYP_FCOE), + TXGBE_PTYPE_L2_FCOE_FCDATA = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_TYP_FCDATA), + TXGBE_PTYPE_L2_FCOE_FCRDY = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_TYP_FCRDY), + TXGBE_PTYPE_L2_FCOE_FCRSP = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_TYP_FCRSP), + TXGBE_PTYPE_L2_FCOE_FCOTHER = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_TYP_FCOTHER), + TXGBE_PTYPE_L2_FCOE_VFT = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_PKT_VFT), + TXGBE_PTYPE_L2_FCOE_VFT_FCDATA = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_PKT_VFT | TXGBE_PTYPE_TYP_FCDATA), + TXGBE_PTYPE_L2_FCOE_VFT_FCRDY = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_PKT_VFT | TXGBE_PTYPE_TYP_FCRDY), + TXGBE_PTYPE_L2_FCOE_VFT_FCRSP = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_PKT_VFT | TXGBE_PTYPE_TYP_FCRSP), + TXGBE_PTYPE_L2_FCOE_VFT_FCOTHER = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_PKT_VFT | TXGBE_PTYPE_TYP_FCOTHER), + + TXGBE_PTYPE_L2_TUN4_MAC = (TXGBE_PTYPE_TUN_IPV4 | TXGBE_PTYPE_PKT_IGM), + TXGBE_PTYPE_L2_TUN6_MAC = (TXGBE_PTYPE_TUN_IPV6 | TXGBE_PTYPE_PKT_IGM), +}; + +/** + * Packet Type decoding + **/ +/* txgbe_dec_ptype.mac: outer mac */ +enum txgbe_dec_ptype_mac { + TXGBE_DEC_PTYPE_MAC_IP = 0, + TXGBE_DEC_PTYPE_MAC_L2 = 2, + TXGBE_DEC_PTYPE_MAC_FCOE = 3, +}; + +/* txgbe_dec_ptype.[e]ip: outer&encaped ip */ +#define TXGBE_DEC_PTYPE_IP_FRAG (0x4) +enum txgbe_dec_ptype_ip { + TXGBE_DEC_PTYPE_IP_NONE = 0, + TXGBE_DEC_PTYPE_IP_IPV4 = 1, + TXGBE_DEC_PTYPE_IP_IPV6 = 2, + TXGBE_DEC_PTYPE_IP_FGV4 = + (TXGBE_DEC_PTYPE_IP_FRAG | TXGBE_DEC_PTYPE_IP_IPV4), + TXGBE_DEC_PTYPE_IP_FGV6 = + (TXGBE_DEC_PTYPE_IP_FRAG | TXGBE_DEC_PTYPE_IP_IPV6), +}; + +/* txgbe_dec_ptype.etype: encaped type */ +enum txgbe_dec_ptype_etype { + TXGBE_DEC_PTYPE_ETYPE_NONE = 0, + TXGBE_DEC_PTYPE_ETYPE_IPIP = 1, /* IP+IP */ + TXGBE_DEC_PTYPE_ETYPE_IG = 2, /* IP+GRE */ + TXGBE_DEC_PTYPE_ETYPE_IGM = 3, /* IP+GRE+MAC */ + TXGBE_DEC_PTYPE_ETYPE_IGMV = 4, /* IP+GRE+MAC+VLAN */ +}; + +/* txgbe_dec_ptype.proto: payload proto */ +enum txgbe_dec_ptype_prot { + TXGBE_DEC_PTYPE_PROT_NONE = 0, + TXGBE_DEC_PTYPE_PROT_UDP = 1, + TXGBE_DEC_PTYPE_PROT_TCP = 2, + TXGBE_DEC_PTYPE_PROT_SCTP = 3, + TXGBE_DEC_PTYPE_PROT_ICMP = 4, + TXGBE_DEC_PTYPE_PROT_TS = 5, /* time sync */ +}; + +/* txgbe_dec_ptype.layer: payload layer */ +enum txgbe_dec_ptype_layer { + TXGBE_DEC_PTYPE_LAYER_NONE = 0, + TXGBE_DEC_PTYPE_LAYER_PAY2 = 1, + TXGBE_DEC_PTYPE_LAYER_PAY3 = 2, + TXGBE_DEC_PTYPE_LAYER_PAY4 = 3, +}; + +struct txgbe_dec_ptype { + u32 ptype:8; + u32 known:1; + u32 mac:2; /* outer mac */ + u32 ip:3; /* outer ip*/ + u32 etype:3; /* encaped type */ + u32 eip:3; /* encaped ip */ + u32 prot:4; /* payload proto */ + u32 layer:3; /* payload layer */ +}; + +struct txgbe_dec_ptype txgbe_rx_decode_ptype(const union txgbe_rx_desc *rx_desc); +struct txgbe_dec_ptype txgbe_tx_encode_ptype(const struct txgbe_tx_buffer *first); +#endif diff --git a/drivers/net/ethernet/wangxun/txgbevf/txgbe_vf.c b/drivers/net/ethernet/wangxun/txgbevf/txgbe_vf.c new file mode 100644 index 0000000000000000000000000000000000000000..3b0610542b58d133acb37fb50a95f22ba9d9d2f7 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbevf/txgbe_vf.c @@ -0,0 +1,628 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */ +#include "txgbe_vf.h" +#include "txgbe_mbx.h" + +s32 txgbe_start_hw_vf(struct txgbe_hw *hw) +{ + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + + return 0; +} + +s32 txgbe_get_mac_addr_vf(struct txgbe_hw *hw, u8 *mac_addr) +{ + int i; + + for (i = 0; i < 6; i++) + mac_addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +s32 txgbe_init_hw_vf(struct txgbe_hw *hw) +{ + s32 status = hw->mac.ops.start_hw(hw); + + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + + return status; +} + +/* txgbe_virt_clr_reg - Set register to default (power on) state. + * @hw: pointer to hardware structure + */ +static void txgbe_virt_clr_reg(struct txgbe_hw *hw) +{ + int i; + u32 vfsrrctl; + + /* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */ + vfsrrctl = TXGBE_VXRXDCTL_HDRSZ(txgbe_hdr_sz(TXGBE_RX_HDR_SIZE)); + vfsrrctl |= TXGBE_VXRXDCTL_BUFSZ(txgbe_buf_sz(TXGBE_RX_BUF_SIZE)); + + for (i = 0; i < 7; i++) { + wr32m(hw, TXGBE_VXRXDCTL(i), + (TXGBE_VXRXDCTL_HDRSZ(~0) | TXGBE_VXRXDCTL_BUFSZ(~0)), + vfsrrctl); + } + + txgbe_flush(hw); +} + +s32 txgbe_reset_hw_vf(struct txgbe_hw *hw) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + u32 timeout = TXGBE_VF_INIT_TIMEOUT; + s32 err; + u32 msgbuf[TXGBE_VF_PERMADDR_MSG_LEN]; + u8 *addr = (u8 *)(&msgbuf[1]); + u32 i; + + hw->mac.ops.stop_adapter(hw); + + /* reset the api version */ + hw->api_version = txgbe_mbox_api_10; + + txgbevf_dbg(hw, "Issuing a function reset to MAC\n"); + + /* backup msix vectors */ + for (i = 0; i < 16; i++) + hw->b4_buf[i] = txgbe_rd32(hw->b4_addr, i * 4); + + wr32m(hw, TXGBE_VXCTRL, TXGBE_VXCTRL_RST, TXGBE_VXCTRL_RST); + txgbe_flush(hw); + + msleep(50); + + /* we cannot reset while the RSTI / RSTD bits are asserted */ + while (!mbx->ops.check_for_rst(hw, 0) && timeout) { + timeout--; + udelay(5); + } + + /* restore msix vectors */ + for (i = 0; i < 16; i++) + txgbe_wr32(hw->b4_addr, i * 4, hw->b4_buf[i]); + + if (!timeout) + return TXGBE_ERR_RESET_FAILED; + + /* Reset VF registers to initial values */ + txgbe_virt_clr_reg(hw); + + /* mailbox timeout can now become active */ + mbx->timeout = TXGBE_VF_MBX_INIT_TIMEOUT; + + msgbuf[0] = TXGBE_VF_RESET; + err = mbx->ops.write_posted(hw, msgbuf, 1, 0); + if (err) + return err; + + usleep_range(10000, 20000); + + /* set our "perm_addr" based on info provided by PF + * also set up the mc_filter_type which is piggy backed + * on the mac address in word 3 + */ + err = mbx->ops.read_posted(hw, msgbuf, + TXGBE_VF_PERMADDR_MSG_LEN, 0); + if (err) + return err; + + if (msgbuf[0] != (TXGBE_VF_RESET | TXGBE_VT_MSGTYPE_ACK) && + msgbuf[0] != (TXGBE_VF_RESET | TXGBE_VT_MSGTYPE_NACK)) + return TXGBE_ERR_INVALID_MAC_ADDR; + + if (msgbuf[0] == (TXGBE_VF_RESET | TXGBE_VT_MSGTYPE_ACK)) + memcpy(hw->mac.perm_addr, addr, 6); + + hw->mac.mc_filter_type = msgbuf[TXGBE_VF_MC_TYPE_WORD]; + + return 0; +} + +s32 txgbe_stop_adapter_vf(struct txgbe_hw *hw) +{ + u32 reg_val; + u16 i; + + /* Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = true; + + /* Clear interrupt mask to stop from interrupts being generated */ + wr32(hw, TXGBE_VXIMS, TXGBE_VF_IRQ_CLEAR_MASK); + + /* Clear any pending interrupts, flush previous writes */ + wr32(hw, TXGBE_VXICR, ~0); + + /* Disable the transmit unit. Each queue must be disabled. */ + for (i = 0; i < hw->mac.max_tx_queues; i++) + wr32(hw, TXGBE_VXTXDCTL(i), TXGBE_VXTXDCTL_FLUSH); + + /* Disable the receive unit by stopping each queue */ + for (i = 0; i < hw->mac.max_rx_queues; i++) { + reg_val = rd32(hw, TXGBE_VXRXDCTL(i)); + reg_val &= ~TXGBE_VXRXDCTL_ENABLE; + wr32(hw, TXGBE_VXRXDCTL(i), reg_val); + } + /* Clear packet split and pool config */ + wr32(hw, TXGBE_VXMRQC, 0); + + /* flush all queues disables */ + txgbe_flush(hw); + usleep_range(10000, 20000); + + return 0; +} + +s32 txgbe_get_fw_version(struct txgbe_hw *hw) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + s32 err; + + msgbuf[0] = TXGBE_VF_GET_FW_VERSION; + msgbuf[1] = 0x0; + + err = mbx->ops.write_posted(hw, msgbuf, 2, 0); + if (err) + return err; + + err = mbx->ops.read_posted(hw, msgbuf, 2, 0); + if (err) + return err; + + if (err || (msgbuf[0] & TXGBE_VT_MSGTYPE_NACK)) { + err = TXGBE_ERR_MBX; + } else { + snprintf(txgbe_firmware_version, TXGBE_FW_VER_SIZE, "0x%08x", msgbuf[1]); + err = 0; + } + + return err; +} + +s32 txgbe_check_mac_link_vf(struct txgbe_hw *hw, txgbe_link_speed *speed, + bool *link_up, bool __always_unused autoneg_wait_to_complete) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + struct txgbe_mac_info *mac = &hw->mac; + s32 err = 0; + u32 links_reg; + u32 in_msg = 0; + u8 i = 0; + + /* If we were hit with a reset drop the link */ + if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) + mac->get_link_status = true; + + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + links_reg = rd32(hw, TXGBE_VXSTATUS); + if (!(links_reg & TXGBE_VXSTATUS_UP)) + goto out; + + /* for SFP+ modules and DA cables, it can take up to 500usecs + * before the link status is correct + */ + if (!po32m(hw, TXGBE_VXSTATUS, TXGBE_VXSTATUS_UP, 0, 100, 5)) + goto out; + + for (i = 0; i < 100; i++) { + udelay(5); + links_reg = rd32(hw, TXGBE_VXSTATUS); + + if (!(links_reg & TXGBE_VXSTATUS_UP)) + goto out; + } + + switch (TXGBE_VXSTATUS_SPEED(links_reg)) { + case TXGBE_VXSTATUS_SPEED_10G: + *speed = TXGBE_LINK_SPEED_10GB_FULL; + break; + case TXGBE_VXSTATUS_SPEED_1G: + *speed = TXGBE_LINK_SPEED_1GB_FULL; + break; + case TXGBE_VXSTATUS_SPEED_100M: + *speed = TXGBE_LINK_SPEED_100_FULL; + break; + } + + /* if the read failed it could just be a mailbox collision, best wait + * until we are called again and don't report an error + */ + if (mbx->ops.read(hw, &in_msg, 1, 0)) + goto out; + + if (!(in_msg & TXGBE_VT_MSGTYPE_CTS)) { + /* msg is not CTS and is NACK we must have lost CTS status */ + if (in_msg & TXGBE_VT_MSGTYPE_NACK) + err = -1; + goto out; + } + + /* the pf is talking, if we timed out in the past we reinit */ + if (!mbx->timeout) { + err = -1; + goto out; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link + */ + mac->get_link_status = false; +out: + *link_up = !mac->get_link_status; + return err; +} + +s32 txgbe_set_rar_vf(struct txgbe_hw *hw, u32 __always_unused index, u8 *addr, + u32 __always_unused vmdq, u32 __always_unused enable_addr) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 err; + + memset(msgbuf, 0, 12); + msgbuf[0] = TXGBE_VF_SET_MAC_ADDR; + memcpy(msg_addr, addr, 6); + err = mbx->ops.write_posted(hw, msgbuf, 3, 0); + + if (!err) + err = mbx->ops.read_posted(hw, msgbuf, 3, 0); + + msgbuf[0] &= ~TXGBE_VT_MSGTYPE_CTS; + + /* if nacked the address was rejected, use "perm_addr" */ + if (!err && + (msgbuf[0] == (TXGBE_VF_SET_MAC_ADDR | TXGBE_VT_MSGTYPE_NACK))) { + txgbe_get_mac_addr_vf(hw, hw->mac.addr); + return TXGBE_ERR_MBX; + } + + return err; +} + +s32 txgbe_set_uc_addr_vf(struct txgbe_hw *hw, u32 index, u8 *addr) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 err; + + memset(msgbuf, 0, sizeof(msgbuf)); + /* If index is one then this is the start of a new list and needs + * indication to the PF so it can do it's own list management. + * If it is zero then that tells the PF to just clear all of + * this VF's macvlans and there is no new list. + */ + msgbuf[0] |= index << TXGBE_VT_MSGINFO_SHIFT; + msgbuf[0] |= TXGBE_VF_SET_MACVLAN; + if (addr) + memcpy(msg_addr, addr, 6); + err = mbx->ops.write_posted(hw, msgbuf, 3, 0); + + if (!err) + err = mbx->ops.read_posted(hw, msgbuf, 3, 0); + + msgbuf[0] &= ~TXGBE_VT_MSGTYPE_CTS; + + if (!err) + if (msgbuf[0] == (TXGBE_VF_SET_MACVLAN | TXGBE_VT_MSGTYPE_NACK)) + err = TXGBE_ERR_OUT_OF_MEM; + + return err; +} + +static s32 txgbe_mta_vector(struct txgbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + txgbevf_dbg(hw, "MC filter type param set incorrectly\n"); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + + return vector; +} + +s32 txgbe_update_mc_addr_list_vf(struct txgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, txgbe_mc_addr_itr next, + bool __always_unused clear) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[TXGBE_VXMAILBOX_SIZE]; + u16 *vector_list = (u16 *)&msgbuf[1]; + u32 vector; + u32 cnt, i; + u32 vmdq; + + /* Each entry in the list uses 1 16 bit word. We have 30 + * 16 bit words available in our HW msg buffer (minus 1 for the + * msg type). That's 30 hash values if we pack 'em right. If + * there are more than 30 MC addresses to add then punt the + * extras for now and then add code to handle more than 30 later. + * It would be unusual for a server to request that many multi-cast + * addresses except for in large enterprise network environments. + */ + + txgbevf_dbg(hw, "MC Addr Count = %d\n", mc_addr_count); + + cnt = (mc_addr_count > 30) ? 30 : mc_addr_count; + msgbuf[0] = TXGBE_VF_SET_MULTICAST; + msgbuf[0] |= cnt << TXGBE_VT_MSGINFO_SHIFT; + + for (i = 0; i < cnt; i++) { + vector = txgbe_mta_vector(hw, next(hw, &mc_addr_list, &vmdq)); + txgbevf_dbg(hw, "Hash value = 0x%03X\n", vector); + vector_list[i] = (u16)vector; + } + + return mbx->ops.write_posted(hw, msgbuf, TXGBE_VXMAILBOX_SIZE, 0); +} + +s32 txgbe_update_xcast_mode(struct txgbe_hw *hw, int xcast_mode) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + s32 err; + + switch (hw->api_version) { + case txgbe_mbox_api_12: + if (xcast_mode > TXGBE_XCAST_MODE_ALLMULTI) + return TXGBE_ERR_FEATURE_NOT_SUPPORTED; + case txgbe_mbox_api_13: + //case txgbe_mbox_api_15: + break; + default: + return TXGBE_ERR_FEATURE_NOT_SUPPORTED; + } + + msgbuf[0] = TXGBE_VF_UPDATE_XCAST_MODE; + msgbuf[1] = xcast_mode; + + err = mbx->ops.write_posted(hw, msgbuf, 2, 0); + if (err) + return err; + + err = mbx->ops.read_posted(hw, msgbuf, 2, 0); + if (err) + return err; + + msgbuf[0] &= ~TXGBE_VT_MSGTYPE_CTS; + if (msgbuf[0] == (TXGBE_VF_UPDATE_XCAST_MODE | TXGBE_VT_MSGTYPE_NACK)) + return TXGBE_ERR_FEATURE_NOT_SUPPORTED; + return 0; +} + +s32 txgbe_get_link_state_vf(struct txgbe_hw *hw, bool *link_state) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + s32 err; + s32 ret_val; + + msgbuf[0] = TXGBE_VF_GET_LINK_STATE; + msgbuf[1] = 0x0; + + err = mbx->ops.write_posted(hw, msgbuf, 2, 0); + if (err) + return err; + + err = mbx->ops.read_posted(hw, msgbuf, 2, 0); + if (err) + return err; + + if (err || (msgbuf[0] & TXGBE_VT_MSGTYPE_NACK)) { + ret_val = TXGBE_ERR_MBX; + } else { + ret_val = 0; + *link_state = msgbuf[1]; + } + + return ret_val; +} + +s32 txgbe_set_vfta_vf(struct txgbe_hw *hw, u32 vlan, u32 vind, + bool __always_unused vlan_on, bool __always_unused vlvf_bypass) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + s32 err; + + msgbuf[0] = TXGBE_VF_SET_VLAN; + msgbuf[1] = vlan; + /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ + msgbuf[0] |= vlan_on << TXGBE_VT_MSGINFO_SHIFT; + + err = mbx->ops.write_posted(hw, msgbuf, 2, 0); + if (!err) + err = mbx->ops.read_posted(hw, msgbuf, 1, 0); + + if (!err && (msgbuf[0] & TXGBE_VT_MSGTYPE_ACK)) + return 0; + + return err | (msgbuf[0] & TXGBE_VT_MSGTYPE_NACK); +} + +/** + * txgbe_negotiate_api_version - Negotiate supported API version + * @hw: pointer to the HW structure + * @api: integer containing requested API version + **/ +int txgbe_negotiate_api_version(struct txgbe_hw *hw, int api) +{ + int err; + u32 msg[3]; + + /* Negotiate the mailbox API version */ + msg[0] = TXGBE_VF_API_NEGOTIATE; + msg[1] = api; + msg[2] = 0; + err = hw->mbx.ops.write_posted(hw, msg, 3, 0); + + if (!err) + err = hw->mbx.ops.read_posted(hw, msg, 3, 0); + + if (!err) { + msg[0] &= ~TXGBE_VT_MSGTYPE_CTS; + + /* Store value and return 0 on success */ + if (msg[0] == (TXGBE_VF_API_NEGOTIATE | TXGBE_VT_MSGTYPE_ACK)) { + hw->api_version = api; + return 0; + } + + err = TXGBE_ERR_INVALID_ARGUMENT; + } + return err; +} + +static s32 txgbe_write_msg_read_ack(struct txgbe_hw *hw, u32 *msg, + u32 *retmsg, u16 size) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + s32 retval = mbx->ops.write_posted(hw, msg, size, 0); + + if (retval) + return retval; + + return mbx->ops.read_posted(hw, retmsg, size, 0); +} + +/** + * txgbe_rlpml_set_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + **/ +s32 txgbe_rlpml_set_vf(struct txgbe_hw *hw, u16 max_size) +{ + u32 msgbuf[2]; + s32 retval; + + msgbuf[0] = TXGBE_VF_SET_LPE; + msgbuf[1] = max_size; + + retval = txgbe_write_msg_read_ack(hw, msgbuf, msgbuf, 2); + if (retval) + return retval; + if ((msgbuf[0] & TXGBE_VF_SET_LPE) && + (msgbuf[0] & TXGBE_VT_MSGTYPE_NACK)) + return TXGBE_ERR_MBX; + + return 0; +} + +int txgbe_get_queues(struct txgbe_hw *hw, unsigned int *num_tcs, + unsigned int *default_tc) +{ + int err; + u32 msg[5]; + + /* do nothing if API doesn't support txgbe_get_queues */ + switch (hw->api_version) { + case txgbe_mbox_api_11: + case txgbe_mbox_api_12: + case txgbe_mbox_api_13: + break; + default: + return 0; + } + + /* Fetch queue configuration from the PF */ + msg[0] = TXGBE_VF_GET_QUEUES; + msg[1] = 0; + msg[2] = 0; + msg[3] = 0; + msg[4] = 0; + err = hw->mbx.ops.write_posted(hw, msg, 5, 0); + + if (!err) + err = hw->mbx.ops.read_posted(hw, msg, 5, 0); + if (!err) { + msg[0] &= ~TXGBE_VT_MSGTYPE_CTS; + + /* if we didn't get an ACK there must have been + * some sort of mailbox error so we should treat it + * as such + */ + if (msg[0] != (TXGBE_VF_GET_QUEUES | TXGBE_VT_MSGTYPE_ACK)) + return TXGBE_ERR_MBX; + + /* record and validate values from message */ + hw->mac.max_tx_queues = msg[TXGBE_VF_TX_QUEUES]; + if (hw->mac.max_tx_queues == 0 || + hw->mac.max_tx_queues > TXGBE_VF_MAX_TX_QUEUES) + hw->mac.max_tx_queues = TXGBE_VF_MAX_TX_QUEUES; + + hw->mac.max_rx_queues = msg[TXGBE_VF_RX_QUEUES]; + if (hw->mac.max_rx_queues == 0 || + hw->mac.max_rx_queues > TXGBE_VF_MAX_RX_QUEUES) + hw->mac.max_rx_queues = TXGBE_VF_MAX_RX_QUEUES; + + *num_tcs = msg[TXGBE_VF_TRANS_VLAN]; + /* in case of unknown state assume we cannot tag frames */ + if (*num_tcs > hw->mac.max_rx_queues) + *num_tcs = 1; + + *default_tc = msg[TXGBE_VF_DEF_QUEUE]; + /* default to queue 0 on out-of-bounds queue number */ + if (*default_tc >= hw->mac.max_tx_queues) + *default_tc = 0; + } + + return err; +} + +void txgbe_init_ops_vf(struct txgbe_hw *hw) +{ + /* MAC */ + hw->mac.ops.init_hw = txgbe_init_hw_vf; + hw->mac.ops.reset_hw = txgbe_reset_hw_vf; + hw->mac.ops.start_hw = txgbe_start_hw_vf; + /* Cannot clear stats on VF */ + hw->mac.ops.get_mac_addr = txgbe_get_mac_addr_vf; + hw->mac.ops.get_fw_version = txgbe_get_fw_version; + hw->mac.ops.stop_adapter = txgbe_stop_adapter_vf; + + /* Link */ + hw->mac.ops.check_link = txgbe_check_mac_link_vf; + + /* RAR, Multicast, VLAN */ + hw->mac.ops.set_rar = txgbe_set_rar_vf; + hw->mac.ops.set_uc_addr = txgbe_set_uc_addr_vf; + hw->mac.ops.update_mc_addr_list = txgbe_update_mc_addr_list_vf; + hw->mac.ops.update_xcast_mode = txgbe_update_xcast_mode; + hw->mac.ops.get_link_state = txgbe_get_link_state_vf; + hw->mac.ops.set_vfta = txgbe_set_vfta_vf; + + hw->mac.max_tx_queues = 1; + hw->mac.max_rx_queues = 1; + + hw->mbx.ops.init_params = txgbevf_init_mbx_params_vf; +} diff --git a/drivers/net/ethernet/wangxun/txgbevf/txgbe_vf.h b/drivers/net/ethernet/wangxun/txgbevf/txgbe_vf.h new file mode 100644 index 0000000000000000000000000000000000000000..52d758aee4cbed182481314af5f7c730777b0ed8 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbevf/txgbe_vf.h @@ -0,0 +1,1209 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */ + +#ifndef __TXGBE_VF_H__ +#define __TXGBE_VF_H__ + +#include +#include +#include + +#ifndef PCI_VENDOR_ID_WANGXUN +#define PCI_VENDOR_ID_WANGXUN 0x8088 +#endif + +#define TXGBE_DEV_ID_SP1000_VF 0x1000 +#define TXGBE_DEV_ID_WX1820_VF 0x2000 + +#define TXGBE_VF_MAX_TX_QUEUES 4 +#define TXGBE_VF_MAX_RX_QUEUES 4 +#define TXGBE_MAX_RSS_QUEUES 4 +#define TXGBE_RX_BUFFER_WRITE 16 + +#define MAX_RX_QUEUES (TXGBE_VF_MAX_RX_QUEUES) +#define MAX_TX_QUEUES (TXGBE_VF_MAX_TX_QUEUES) + +#define TXGBE_VFRSSRK_REGS 10 /* 10 registers for RSS key */ + +#define TXGBE_DEFAULT_TXD 128 +#define TXGBE_DEFAULT_RXD 128 +#define TXGBE_MAX_TXD 4096 +#define TXGBE_MIN_TXD 64 +#define TXGBE_MAX_RXD 4096 +#define TXGBE_MIN_RXD 64 + +#define TXGBE_MAX_TXD_PWR 14 +#define TXGBE_MAX_DATA_PER_TXD BIT(TXGBE_MAX_TXD_PWR) + +/* Number of Transmit and Receive Descriptors(*1024) */ +#define TXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define TXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), TXGBE_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +#define TXGBE_MAX_JUMBO_FRAME_SIZE 9432 + +#define TXGBE_VF_INIT_TIMEOUT 200 +#define TXGBE_VF_PERMADDR_MSG_LEN 4 +#define TXGBE_VF_IRQ_CLEAR_MASK 7 + +#define TXGBE_FW_VER_SIZE 32 + +/** + * VF Registers + * r=ring index [0,7], i=local index, + * g=value for register, f=value for field + **/ +#define TXGBE_VXRXMEMWRAP 0x00000 /* i=[0,7] */ +#define TXGBE_VXRXMEMWRAP_WRAP(g, i) ((0x7 << 4 * (i) & (g)) >> 4 * (i)) +#define TXGBE_VXRXMEMWRAP_EMPTY(g, i) ((0x8 << 4 * (i) & (g)) >> 4 * (i)) +#define TXGBE_VXSTATUS 0x00004 +#define TXGBE_VXSTATUS_UP BIT(0) +#define TXGBE_VXSTATUS_SPEED(g) ((0x7 & (g)) >> 1) +#define TXGBE_VXSTATUS_SPEED_10G (0x1) +#define TXGBE_VXSTATUS_SPEED_1G (0x2) +#define TXGBE_VXSTATUS_SPEED_100M (0x4) +#define TXGBE_VXSTATUS_BUSY BIT(4) +#define TXGBE_VXSTATUS_LANID BIT(8) +#define TXGBE_VXCTRL 0x00008 +#define TXGBE_VXCTRL_RST BIT(0) +#define TXGBE_VXMRQC 0x00078 +#define TXGBE_VXMRQC_RSV BIT(0) +#define TXGBE_VXMRQC_PSR(f) ((0x1F & (f)) << 1) +#define TXGBE_VXMRQC_PSR_L4HDR BIT(0) +#define TXGBE_VXMRQC_PSR_L3HDR BIT(1) +#define TXGBE_VXMRQC_PSR_L2HDR BIT(2) +#define TXGBE_VXMRQC_PSR_TUNHDR BIT(3) +#define TXGBE_VXMRQC_PSR_TUNMAC BIT(4) +#define TXGBE_VXMRQC_RSS(f) ((0xFFFF & (f)) << 16) +#define TXGBE_VXMRQC_RSS_ALG(f) ((0xFF) & (f)) +#define TXGBE_VXMRQC_RSS_ALG_IPV4_TCP BIT(0) +#define TXGBE_VXMRQC_RSS_ALG_IPV4 BIT(1) +#define TXGBE_VXMRQC_RSS_ALG_IPV6 BIT(4) +#define TXGBE_VXMRQC_RSS_ALG_IPV6_TCP BIT(5) +#define TXGBE_VXMRQC_RSS_ALG_IPV4_UDP BIT(6) +#define TXGBE_VXMRQC_RSS_ALG_IPV6_UDP BIT(7) +#define TXGBE_VXMRQC_RSS_EN ((0x1) << 8) +#define TXGBE_VXMRQC_RSS_HASH(f) ((0x7 & (f)) << 13) +#define TXGBE_VXRSSRK(i) (0x00080 + ((i) * 4)) /* i=[0,9] */ +#define TXGBE_VXRETA(i) (0x000C0 + ((i) * 4)) /* i=[0,15] */ +#define TXGBE_VXICR 0x00100 +#define TXGBE_VXIC_MBOX ((0x1) << 0) +#define TXGBE_VXIC_DONE1 ((0x1) << 1) +#define TXGBE_VXIC_DONE2 ((0x1) << 2) +#define TXGBE_VXICS 0x00104 +#define TXGBE_VXIMS 0x00108 +#define TXGBE_VXIMC 0x0010C +#define TXGBE_VXLLI 0x00118 +#define TXGBE_VXITR(i) (0x00200 + (4 * (i))) /* i=[0,1] */ +#define TXGBE_VXITR_INTERVAL(f) ((0x1FF & (f)) << 3) +#define TXGBE_VXITR_LLI ((0x1) << 15) +#define TXGBE_VXITR_LLI_CREDIT(f) ((0x1F & (f)) << 16) +#define TXGBE_VXITR_CNT(f) ((0x7F & (f)) << 21) +#define TXGBE_VXITR_CNT_WDIS ((0x1) << 31) +#define TXGBE_VXIVAR(i) (0x00240 + (4 * (i))) /* i=[0,3] */ +#define TXGBE_VXIVAR_ALLOC(i, f) ((0x1 & (f)) << 8 * (i)) +#define TXGBE_VXIVAR_VALID(i, f) ((0x80 & (f)) << 8 * (i)) +#define TXGBE_VXIVAR_MISC 0x00260 +#define TXGBE_VXIVAR_MISC_ALLOC(f) ((0x3 & (f))) +#define TXGBE_VXIVAR_MISC_VALID ((0x80)) + +#define TXGBE_VXITR(i) (0x00200 + (4 * (i))) /* i=[0,1] */ +#define TXGBE_VXITR_INTERVAL(f) ((0x1FF & (f)) << 3) +#define TXGBE_VXITR_LLI ((0x1) << 15) +#define TXGBE_VXITR_LLI_CREDIT(f) ((0x1F & (f)) << 16) +#define TXGBE_VXITR_CNT(f) ((0x7F & (f)) << 21) +#define TXGBE_VXITR_CNT_WDIS ((0x1) << 31) +#define TXGBE_VXIVAR(i) (0x00240 + (4 * (i))) /* i=[0,3] */ +#define TXGBE_VXIVAR_ALLOC(i, f) ((0x1 & (f)) << 8 * (i)) +#define TXGBE_VXIVAR_VALID(i, f) ((0x80 & (f)) << 8 * (i)) +#define TXGBE_VXIVAR_MISC 0x00260 +#define TXGBE_VXIVAR_MISC_ALLOC(f) ((0x3 & (f))) +#define TXGBE_VXIVAR_MISC_VALID ((0x80)) +#define NON_Q_VECTORS (1) +#define MAX_Q_VECTORS (5) +#define MIN_MSIX_COUNT (1 + NON_Q_VECTORS) + +/*** @txgbe_rx_desc.rd.lower.pkt_addr ***/ +#define TXGBE_RXD_PKTADDR(v) cpu_to_le64((v)) + +/*** @txgbe_rx_desc.rd.lower.hdr_addr ***/ +#define TXGBE_RXD_HDRADDR(v) cpu_to_le64((v)) + +#define TXGBE_RX_DESC(R, i) \ + (&(((union txgbe_rx_desc *)((R)->desc))[i])) +#define TXGBE_TX_DESC(R, i) \ + (&(((struct txgbe_tx_desc *)((R)->desc))[i])) +#define TXGBE_TX_CTXTDESC(R, i) \ + (&(((struct txgbe_adv_tx_context_desc *)((R)->desc))[i])) + +/*** @txgbe_rx_desc.wb.lower.lo_dword ***/ +/* RSS Hash results */ +#define TXGBE_RXD_RSSTYPE(rxd) \ + ((le32_to_cpu((rxd)->wb.lower.lo_dword.data)) & 0xF) +#define TXGBE_RSSTYPE_NONE (0) +#define TXGBE_RSSTYPE_IPV4_TCP (1) +#define TXGBE_RSSTYPE_IPV4 (2) +#define TXGBE_RSSTYPE_IPV6_TCP (3) +#define TXGBE_RSSTYPE_IPV4_SCTP (4) +#define TXGBE_RSSTYPE_IPV6 (5) +#define TXGBE_RSSTYPE_IPV6_SCTP (6) +#define TXGBE_RSSTYPE_IPV4_UDP (7) +#define TXGBE_RSSTYPE_IPV6_UDP (8) +#define TXGBE_RSSTYPE_FDIR (15) +#define TXGBE_RXD_SECTYPE(rxd) \ + ((le32_to_cpu((rxd)->wb.lower.lo_dword.data) >> 4) & 0x3) +#define TXGBE_SECTYPE_NONE (0) +#define TXGBE_SECTYPE_LINKSEC (1) +#define TXGBE_SECTYPE_IPSECESP (2) +#define TXGBE_SECTYPE_IPSECAH (3) +#define TXGBE_RXD_TPID_SEL(rxd) \ + ((le32_to_cpu((rxd)->wb.lower.lo_dword.data) >> 6) & 0x7) +#define TXGBE_RXD_PKTTYPE(rxd) \ + ((le32_to_cpu((rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF) +#define TXGBE_RXD_RSCCNT(rxd) \ + ((le32_to_cpu((rxd)->wb.lower.lo_dword.data) >> 17) & 0xF) +#define TXGBE_RXD_HDRLEN(rxd) \ + ((le32_to_cpu((rxd)->wb.lower.lo_dword.data) >> 21) & 0x3FF) +#define TXGBE_RXD_SPH ((0x1) << 31) + +/*** @txgbe_rx_desc.wb.lower.hi_dword ***/ +/** bit 0-31, as rss hash when **/ +#define TXGBE_RXD_RSS_HASH(rxd) \ + ((le32_to_cpu((rxd)->wb.lower.hi_dword.data))) + +/** bit 0-31, as ip csum when **/ +#define TXGBE_RXD_IPCSUM(rxd) \ + ((le16_to_cpu((rxd)->wb.lower.hi_dword.ip_csum.ipid))) +#define TXGBE_RXD_IPCSUM_CSUM(rxd) \ + ((le16_to_cpu((rxd)->wb.lower.hi_dword.ip_csum.csum))) + +/** bit 0-31, as fdir id when **/ +#define TXGBE_RXD_FDIR_ID(rxd) \ + ((le32_to_cpu((rxd)->wb.lower.hi_dword.data))) + +/*** @txgbe_rx_desc.wb.upper.status ***/ +#define TXGBE_RXD_STATUS(rxd) \ + (le32_to_cpu((rxd)->wb.upper.status)) /* All Status */ +/** bit 0-1 **/ +#define TXGBE_RXD_STAT_DD ((0x1) << 0) /* Descriptor Done */ +#define TXGBE_RXD_STAT_EOP ((0x1) << 1) /* End of Packet */ +/** bit 2-31, when EOP=1 **/ +#define TXGBE_RXD_NEXTP_RESV(v) ((0x3 & (v)) << 2) +#define TXGBE_RXD_NEXTP(v) ((0xFFFF & (v)) << 4) /* Next Descriptor Index */ +/** bit 2-31, when EOP=0 **/ +#define TXGBE_RXD_STAT_CLASS(v) ((0x7 & (v)) << 2) /* Packet Class */ +#define TXGBE_PKT_CLASS(r) (((r) >> 2) & 0x7) +#define TXGBE_PKT_CLASS_TC_RSS (0) /* RSS Hash */ +#define TXGBE_PKT_CLASS_FLM (1) /* FDir Match */ +#define TXGBE_PKT_CLASS_SYN (2) /* TCP Sync */ +#define TXGBE_PKT_CLASS_5TUPLE (3) /* 5 Tuple */ +#define TXGBE_PKT_CLASS_L2ETYPE (4) /* L2 Ethertype */ +#define TXGBE_RXD_STAT_VP ((0x1) << 5) /* IEEE VLAN Packet */ +#define TXGBE_RXD_STAT_UDPCS ((0x1) << 6) /* UDP xsum calculated */ +#define TXGBE_RXD_STAT_TPCS ((0x1) << 7) /* L4 xsum calculated */ +#define TXGBE_RXD_STAT_IPCS ((0x1) << 8) /* IP xsum calculated */ +#define TXGBE_RXD_STAT_PIF ((0x1) << 9) /* Non-unicast address */ +#define TXGBE_RXD_STAT_EIPCS ((0x1) << 10) /* Encap IP xsum calculated */ +#define TXGBE_RXD_STAT_VEXT ((0x1) << 11) /* Multi-VLAN */ +#define TXGBE_RXD_STAT_IPV6EX ((0x1) << 12) /* IPv6 with option header */ +#define TXGBE_RXD_STAT_LLINT ((0x1) << 13) /* Pkt caused Low Latency Interrupt */ +#define TXGBE_RXD_STAT_TS ((0x1) << 14) /* IEEE1588 Time Stamp */ +#define TXGBE_RXD_STAT_SECP ((0x1) << 15) /* Security Processing */ +#define TXGBE_RXD_STAT_LB ((0x1) << 16) /* Loopback Status */ +/* bit 17-30, when PKTTYPE=IP */ +#define TXGBE_RXD_STAT_BMC ((0x1) << 17) /* PKTTYPE=IP, BMC status */ +#define TXGBE_RXD_ERR_FDIRERR(v) ((0x7 & (v)) << 20) /* FDIRERR */ +#define TXGBE_RXD_ERR_FDIR_LEN ((0x1) << 20) /* FDIR Length error */ +#define TXGBE_RXD_ERR_FDIR_DROP ((0x1) << 21) /* FDIR Drop error */ +#define TXGBE_RXD_ERR_FDIR_COLL ((0x1) << 22) /* FDIR Collision error */ +#define TXGBE_RXD_ERR_HBO ((0x1) << 23) /*Header Buffer Overflow */ +#define TXGBE_RXD_ERR_EIPERR ((0x1) << 26) /* Encap IP header error */ +#define TXGBE_RXD_ERR_SECERR(v) ((0x3 & (v)) << 27) +#define TXGBE_IP_SECERR_0 (0) +#define TXGBE_IP_SECERR_1 (1) +#define TXGBE_IP_SECERR_2 (2) +#define TXGBE_IP_SECERR_3 (3) +#define TXGBE_RXD_ERR_RXE ((0x1) << 29) /* Any MAC Error */ +#define TXGBE_RXD_ERR_TPE ((0x1) << 30) /* TCP/UDP Checksum Error */ +#define TXGBE_RXD_ERR_IPE ((0x1) << 31) /* IP Checksum Error */ +/* bit 17-30, when PKTTYPE=FCOE */ +#define TXGBE_RXD_STAT_FCOEFS ((0x1) << 17) /* PKTTYPE=FCOE, FCoE EOF/SOF Stat */ +#define TXGBE_RXD_STAT_FCSTAT(v) ((0x3 & (v)) << 18) /* FCoE Pkt Stat */ +#define TXGBE_FCOE_FCSTAT(r) (((r) >> 18) & 0x7) +#define TXGBE_FCOE_FCSTAT_NOMTCH (0) /* No Ctxt Match */ +#define TXGBE_FCOE_FCSTAT_NODDP (1) /* Ctxt w/o DDP */ +#define TXGBE_FCOE_FCSTAT_FCPRSP (2) /* Recv. FCP_RSP */ +#define TXGBE_FCOE_FCSTAT_DDP (3) /* Ctxt w/ DDP */ +#define TXGBE_RXD_ERR_FCERR(v) ((0x7 & (v)) << 20) /* FCERR */ +#define TXGBE_FCOE_FCERR_0 (0) +#define TXGBE_FCOE_FCERR_1 (1) +#define TXGBE_FCOE_FCERR_2 (2) +#define TXGBE_FCOE_FCERR_3 (3) +#define TXGBE_FCOE_FCERR_4 (4) +#define TXGBE_FCOE_FCERR_5 (5) +#define TXGBE_FCOE_FCERR_6 (6) +#define TXGBE_FCOE_FCERR_7 (7) + +#define TXGBE_TXD_DTYP_DATA 0x00000000U /* Adv Data Descriptor */ +//#define TXGBE_TXD_EOP 0x01000000U /* End of Packet */ +#define TXGBE_TXD_IFCS 0x02000000U /* Insert FCS */ + +#define TXGBE_TXD_IDX_SHIFT 4 /* Adv desc Index shift */ +//#define TXGBE_TXD_CC 0x00000080U /* Check Context */ +//#define TXGBE_TXD_IPSEC 0x00000100U /* enable ipsec esp */ +#define TXGBE_TXD_IIPCS 0x00000400U +//#define TXGBE_TXD_EIPCS 0x00000800U +#define TXGBE_TXD_L4CS 0x00000200U +#define TXGBE_TXD_PAYLEN_SHIFT 13 /* Adv desc PAYLEN shift */ +#define TXGBE_TXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define TXGBE_TXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define TXGBE_TXD_TAG_TPID_SEL_SHIFT 11 +#define TXGBE_TXD_IPSEC_TYPE_SHIFT 14 +#define TXGBE_TXD_ENC_SHIFT 15 + +#define TXGBE_TXD_TUCMD_IPSEC_TYPE_ESP 0x00004000U /* IPSec Type ESP */ +#define TXGBE_TXD_TUCMD_IPSEC_ENCRYPT_EN 0x00008000/* ESP Encrypt Enable */ +#define TXGBE_TXD_TUCMD_FCOE 0x00010000U /* FCoE Frame Type */ +#define TXGBE_TXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ +#define TXGBE_TXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ +#define TXGBE_TXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ +#define TXGBE_TXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ +#define TXGBE_TXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ +#define TXGBE_TXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define TXGBE_TXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +#define TXGBE_TXD_OUTER_IPLEN_SHIFT 12 /* Adv ctxt OUTERIPLEN shift */ +#define TXGBE_TXD_TUNNEL_LEN_SHIFT 21 /* Adv ctxt TUNNELLEN shift */ + +#define TXGBE_TXD_TUNNEL_TYPE_SHIFT 11 /* Adv Tx Desc Tunnel Type shift */ +#define TXGBE_TXD_TUNNEL_DECTTL_SHIFT 27 /* Adv ctxt DECTTL shift */ +#define TXGBE_TXD_TUNNEL_UDP (0x0ULL << TXGBE_TXD_TUNNEL_TYPE_SHIFT) +#define TXGBE_TXD_TUNNEL_GRE (0x1ULL << TXGBE_TXD_TUNNEL_TYPE_SHIFT) + +/*** @txgbe_tx_ctxt_desc.rd.type_tucmd_mlhl ***/ +#define TXGBE_TXD_IPSEC_ESPLEN(v) (((v) & 0x1FF)) /* IPSec ESP length */ +#define TXGBE_TXD_SNAP ((0x1) << 10) /* SNAP indication */ +#define TXGBE_TXD_TPID_SEL(v) (((v) & 0x7) << 10) /* VLAN TPID index */ +#define TXGBE_TXD_IPSEC_TYPE(v) (((v) & 0x1) << 14) /* IPSec Type */ +#define TXGBE_IPSEC_TYPE_AH (0) +#define TXGBE_IPSEC_TYPE_ESP (1) +#define TXGBE_TXD_IPSEC_ESPENC(v) (((v) & 0x1) << 15) /* ESP encrypt */ +#define TXGBE_TXD_DTYP_CTXT ((0x1) << 20) /* CTXT/DATA descriptor */ +#define TXGBE_TXD_PKTTYPE(v) (((v) & 0xFF) << 24) /* packet type */ +/*** @txgbe_tx_ctxt_desc.rd.mss_l4len_idx ***/ +#define TXGBE_TXD_CTX_DD ((0x1)) /* Descriptor Done */ +#define TXGBE_TXD_TPLEN(v) (((v) & 0xFF) << 8) /* transport header length */ +#define TXGBE_TXD_MSS(v) (((v) & 0xFFFF) << 16) /* transport maximum segment size */ +/*** @txgbe_rx_desc.wb.upper.length ***/ +#define TXGBE_RXD_LENGTH(rxd) \ + ((le16_to_cpu((rxd)->wb.upper.length))) + +/*** @txgbe_rx_desc.wb.upper.vlan ***/ +#define TXGBE_RXD_VLAN(rxd) \ + ((le16_to_cpu((rxd)->wb.upper.vlan))) + +/* Receive Path */ +#define TXGBE_VXRDBAL(r) (0x01000 + (0x40 * (r))) +#define TXGBE_VXRDBAH(r) (0x01004 + (0x40 * (r))) +#define TXGBE_VXRDT(r) (0x01008 + (0x40 * (r))) +#define TXGBE_VXRDH(r) (0x0100C + (0x40 * (r))) +#define TXGBE_VXRXDCTL(r) (0x01010 + (0x40 * (r))) +#define TXGBE_VXRXDCTL_ENABLE ((0x1) << 0) +#define TXGBE_VXRXDCTL_BUFSZ(f) ((0xF & (f)) << 8) +#define TXGBE_VXRXDCTL_BUFLEN(f) ((0x3F & (f)) << 1) +#define TXGBE_VXRXDCTL_HDRSZ(f) ((0xF & (f)) << 12) +#define TXGBE_VXRXDCTL_WTHRESH(f) ((0x7 & (f)) << 16) +#define TXGBE_VXRXDCTL_ETAG ((0x1) << 22) +#define TXGBE_VXRXDCTL_RSCMAX(f) ((0x3 & (f)) << 23) +#define TXGBE_RSCMAX_1 (0) +#define TXGBE_RSCMAX_4 (1) +#define TXGBE_RSCMAX_8 (2) +#define TXGBE_RSCMAX_16 (3) +#define TXGBE_VXRXDCTL_STALL ((0x1) << 25) +#define TXGBE_VXRXDCTL_SPLIT ((0x1) << 26) +#define TXGBE_VXRXDCTL_RSCMODE ((0x1) << 27) +#define TXGBE_VXRXDCTL_CNTAG ((0x1) << 28) +#define TXGBE_VXRXDCTL_RSCEN ((0x1) << 29) +#define TXGBE_VXRXDCTL_DROP ((0x1) << 30) +#define TXGBE_VXRXDCTL_VLAN ((0x1) << 31) + +/* Transmit Path */ +#define TXGBE_VXTDBAL(r) (0x03000 + (0x40 * (r))) +#define TXGBE_VXTDBAH(r) (0x03004 + (0x40 * (r))) +#define TXGBE_VXTDT(r) (0x03008 + (0x40 * (r))) +#define TXGBE_VXTDH(r) (0x0300C + (0x40 * (r))) +#define TXGBE_VXTXDCTL(r) (0x03010 + (0x40 * (r))) +#define TXGBE_VXTXDCTL_ENABLE ((0x1) << 0) +#define TXGBE_VXTXDCTL_BUFLEN(f) ((0x3F & (f)) << 1) +#define TXGBE_VXTXDCTL_PTHRESH(f) ((0xF & (f)) << 8) +#define TXGBE_VXTXDCTL_WTHRESH(f) ((0x7F & (f)) << 16) +#define TXGBE_VXTXDCTL_FLUSH ((0x1) << 26) + +/* board specific private data structure */ +#define TXGBE_F_CAP_RX_CSUM BIT(0) +#define TXGBE_F_CAP_LRO BIT(1) +#define TXGBE_F_REQ_RESET BIT(2) +#define TXGBE_F_REQ_QUEUE_RESET BIT(3) +#define TXGBE_F_ENA_RSS_IPV4UDP BIT(4) +#define TXGBE_F_ENA_RSS_IPV6UDP BIT(5) + +#define TXGBE_VXGPRC(r) (0x01014 + (0x40 * (r))) +#define TXGBE_VXGORC_LSB(r) (0x01018 + (0x40 * (r))) +#define TXGBE_VXGORC_MSB(r) (0x0101C + (0x40 * (r))) +#define TXGBE_VXMPRC(r) (0x01020 + (0x40 * (r))) +#define TXGBE_VXGPTC(r) (0x03014 + (0x40 * (r))) +#define TXGBE_VXGOTC_LSB(r) (0x03018 + (0x40 * (r))) +#define TXGBE_VXGOTC_MSB(r) (0x0301C + (0x40 * (r))) + +#define TXGBE_VXSTATUS 0x00004 +#define TXGBE_VXSTATUS_UP BIT(0) +#define TXGBE_VXSTATUS_SPEED(g) ((0x7 & (g)) >> 1) +#define TXGBE_VXSTATUS_SPEED_10G (0x1) +#define TXGBE_VXSTATUS_SPEED_1G (0x2) +#define TXGBE_VXSTATUS_SPEED_100M (0x4) + +#define TXGBE_VXMAILBOX 0x00600 +#define TXGBE_VXMAILBOX_REQ ((0x1) << 0) /* Request for PF Ready bit */ +#define TXGBE_VXMAILBOX_ACK ((0x1) << 1) /* Ack PF message received */ +#define TXGBE_VXMAILBOX_VFU ((0x1) << 2) /* VF owns the mailbox buffer */ +#define TXGBE_VXMAILBOX_PFU ((0x1) << 3) /* PF owns the mailbox buffer */ +#define TXGBE_VXMAILBOX_PFSTS ((0x1) << 4) /* PF wrote a message in the MB */ +#define TXGBE_VXMAILBOX_PFACK ((0x1) << 5) /* PF ack the previous VF msg */ +#define TXGBE_VXMAILBOX_RSTI ((0x1) << 6) /* PF has reset indication */ +#define TXGBE_VXMAILBOX_RSTD ((0x1) << 7) /* PF has indicated reset done */ +#define TXGBE_VXMAILBOX_R2C_BITS (TXGBE_VXMAILBOX_RSTD |\ + TXGBE_VXMAILBOX_PFSTS | TXGBE_VXMAILBOX_PFACK) +#define TXGBE_VXMAILBOX_SIZE (16 - 1) + +#define TXGBE_VXMBMEM 0x00C00 /* 16*4B */ + +#define TXGBE_LINK_SPEED_100_FULL 0x0008 +#define TXGBE_LINK_SPEED_1GB_FULL 0x0020 +#define TXGBE_LINK_SPEED_10GB_FULL 0x0080 + +__maybe_unused static int txgbe_conf_size(int v, int mwidth, int uwidth) +{ + int _v = v; + + return (_v) == 2 << (mwidth) ? 0 : (_v) >> (uwidth); +} + +#define txgbe_buf_len(v) txgbe_conf_size(v, 13, 7) +#define txgbe_hdr_sz(v) txgbe_conf_size(v, 10, 6) +#define txgbe_buf_sz(v) txgbe_conf_size(v, 14, 10) +#define txgbe_pkt_thresh(v) txgbe_conf_size(v, 4, 0) + +/* Supported Rx Buffer Sizes */ +#define TXGBE_RXBUFFER_256 (256) /* Used for packet split */ +#define TXGBE_RXBUFFER_2048 (2048) +#define TXGBE_RXBUFFER_3072 (3072) +#define TXGBE_RX_HDR_SIZE TXGBE_RXBUFFER_256 +#define TXGBE_RX_BUF_SIZE TXGBE_RXBUFFER_2048 + +#define TXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#if (PAGE_SIZE < 8192) +#define TXGBE_MAX_FRAME_BUILD_SKB \ + (SKB_WITH_OVERHEAD(TXGBE_RXBUFFER_2048) - TXGBE_SKB_PAD) +#else +#define TXGBE_MAX_FRAME_BUILD_SKB TXGBE_RXBUFFER_2048 +#endif + +#define TXGBE_100K_ITR (0x005) +#define TXGBE_20K_ITR (0x019) +#define TXGBE_12K_ITR (0x02A) + +/*#define TXGBE_VFRETA_SIZE 64 64 entries */ +#define TXGBE_VFRETA_SIZE 128 /* 128 entries */ + +#define TXGBE_RSS_HASH_KEY_SIZE 40 +#define TXGBE_VFRSSRK_REGS 10 /* 10 registers for RSS key */ + +enum txgbe_xcast_modes { + TXGBE_XCAST_MODE_NONE = 0, + TXGBE_XCAST_MODE_MULTI, + TXGBE_XCAST_MODE_ALLMULTI, + TXGBE_XCAST_MODE_PROMISC, +}; + +/* Error Codes: + * (-256, 256): reserved for non-txgbe defined error code + */ +#define TXGBE_ERR_BASE (0x100) +enum txgbe_error { + TXGBE_ERR_NULL = TXGBE_ERR_BASE, /* errline=__LINE__+errno-256 */ + TXGBE_ERR_NOSUPP, + TXGBE_ERR_EEPROM, + TXGBE_ERR_EEPROM_CHECKSUM, + TXGBE_ERR_PHY, + TXGBE_ERR_CONFIG, + TXGBE_ERR_PARAM, + TXGBE_ERR_MAC_TYPE, + TXGBE_ERR_UNKNOWN_PHY, + TXGBE_ERR_LINK_SETUP, + TXGBE_ERR_ADAPTER_STOPPED, + TXGBE_ERR_INVALID_MAC_ADDR, + TXGBE_ERR_DEVICE_NOT_SUPPORTED, + TXGBE_ERR_MASTER_REQUESTS_PENDING, + TXGBE_ERR_INVALID_LINK_SETTINGS, + TXGBE_ERR_AUTONEG_NOT_COMPLETE, + TXGBE_ERR_RESET_FAILED, + TXGBE_ERR_SWFW_SYNC, + TXGBE_ERR_PHY_ADDR_INVALID, + TXGBE_ERR_I2C, + TXGBE_ERR_SFP_NOT_SUPPORTED, + TXGBE_ERR_SFP_NOT_PRESENT, + TXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT, + TXGBE_ERR_NO_SAN_ADDR_PTR, + TXGBE_ERR_FDIR_REINIT_FAILED, + TXGBE_ERR_EEPROM_VERSION, + TXGBE_ERR_NO_SPACE, + TXGBE_ERR_OVERTEMP, + TXGBE_ERR_UNDERTEMP, + TXGBE_ERR_FC_NOT_NEGOTIATED, + TXGBE_ERR_FC_NOT_SUPPORTED, + TXGBE_ERR_SFP_SETUP_NOT_COMPLETE, + TXGBE_ERR_PBA_SECTION, + TXGBE_ERR_INVALID_ARGUMENT, + TXGBE_ERR_HOST_INTERFACE_COMMAND, + TXGBE_ERR_OUT_OF_MEM, + TXGBE_ERR_FEATURE_NOT_SUPPORTED, + TXGBE_ERR_EEPROM_PROTECTED_REGION, + TXGBE_ERR_FDIR_CMD_INCOMPLETE, + TXGBE_ERR_FLASH_LOADING_FAILED, + TXGBE_ERR_XPCS_POWER_UP_FAILED, + TXGBE_ERR_FW_RESP_INVALID, + TXGBE_ERR_PHY_INIT_NOT_DONE, + TXGBE_ERR_TOKEN_RETRY, + TXGBE_ERR_REG_TMOUT, + TXGBE_ERR_REG_ACCESS, + TXGBE_ERR_MBX, +}; + +#define TXGBE_ERR_NOSUPP (-TXGBE_ERR_NOSUPP) +#define TXGBE_ERR_EEPROM (-TXGBE_ERR_EEPROM) +#define TXGBE_ERR_EEPROM_CHECKSUM (-TXGBE_ERR_EEPROM_CHECKSUM) +#define TXGBE_ERR_PHY (-TXGBE_ERR_PHY) +#define TXGBE_ERR_CONFIG (-TXGBE_ERR_CONFIG) +#define TXGBE_ERR_PARAM (-TXGBE_ERR_PARAM) +#define TXGBE_ERR_MAC_TYPE (-TXGBE_ERR_MAC_TYPE) +#define TXGBE_ERR_UNKNOWN_PHY (-TXGBE_ERR_UNKNOWN_PHY) +#define TXGBE_ERR_LINK_SETUP (-TXGBE_ERR_LINK_SETUP) +#define TXGBE_ERR_ADAPTER_STOPPED (-TXGBE_ERR_ADAPTER_STOPPED) +#define TXGBE_ERR_INVALID_MAC_ADDR (-TXGBE_ERR_INVALID_MAC_ADDR) +#define TXGBE_ERR_DEVICE_NOT_SUPPORTED (-TXGBE_ERR_DEVICE_NOT_SUPPORTED) +#define TXGBE_ERR_MASTER_REQUESTS_PENDING (-TXGBE_ERR_MASTER_REQUESTS_PENDING) +#define TXGBE_ERR_INVALID_LINK_SETTINGS (-TXGBE_ERR_INVALID_LINK_SETTINGS) +#define TXGBE_ERR_AUTONEG_NOT_COMPLETE (-TXGBE_ERR_AUTONEG_NOT_COMPLETE) +#define TXGBE_ERR_RESET_FAILED (-TXGBE_ERR_RESET_FAILED) +#define TXGBE_ERR_SWFW_SYNC (-TXGBE_ERR_SWFW_SYNC) +#define TXGBE_ERR_PHY_ADDR_INVALID (-TXGBE_ERR_PHY_ADDR_INVALID) +#define TXGBE_ERR_I2C (-TXGBE_ERR_I2C) +#define TXGBE_ERR_SFP_NOT_SUPPORTED (-TXGBE_ERR_SFP_NOT_SUPPORTED) +#define TXGBE_ERR_SFP_NOT_PRESENT (-TXGBE_ERR_SFP_NOT_PRESENT) +#define TXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT (-TXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT) +#define TXGBE_ERR_NO_SAN_ADDR_PTR (-TXGBE_ERR_NO_SAN_ADDR_PTR) +#define TXGBE_ERR_FDIR_REINIT_FAILED (-TXGBE_ERR_FDIR_REINIT_FAILED) +#define TXGBE_ERR_EEPROM_VERSION (-TXGBE_ERR_EEPROM_VERSION) +#define TXGBE_ERR_NO_SPACE (-TXGBE_ERR_NO_SPACE) +#define TXGBE_ERR_OVERTEMP (-TXGBE_ERR_OVERTEMP) +#define TXGBE_ERR_UNDERTEMP (-TXGBE_ERR_UNDERTEMP) +#define TXGBE_ERR_FC_NOT_NEGOTIATED (-TXGBE_ERR_FC_NOT_NEGOTIATED) +#define TXGBE_ERR_FC_NOT_SUPPORTED (-TXGBE_ERR_FC_NOT_SUPPORTED) +#define TXGBE_ERR_SFP_SETUP_NOT_COMPLETE (-TXGBE_ERR_SFP_SETUP_NOT_COMPLETE) +#define TXGBE_ERR_PBA_SECTION (-TXGBE_ERR_PBA_SECTION) +#define TXGBE_ERR_INVALID_ARGUMENT (-TXGBE_ERR_INVALID_ARGUMENT) +#define TXGBE_ERR_HOST_INTERFACE_COMMAND (-TXGBE_ERR_HOST_INTERFACE_COMMAND) +#define TXGBE_ERR_OUT_OF_MEM (-TXGBE_ERR_OUT_OF_MEM) +#define TXGBE_ERR_FEATURE_NOT_SUPPORTED (-TXGBE_ERR_FEATURE_NOT_SUPPORTED) +#define TXGBE_ERR_EEPROM_PROTECTED_REGION (-TXGBE_ERR_EEPROM_PROTECTED_REGION) +#define TXGBE_ERR_FDIR_CMD_INCOMPLETE (-TXGBE_ERR_FDIR_CMD_INCOMPLETE) +#define TXGBE_ERR_FLASH_LOADING_FAILED (-TXGBE_ERR_FLASH_LOADING_FAILED) +#define TXGBE_ERR_XPCS_POWER_UP_FAILED (-TXGBE_ERR_XPCS_POWER_UP_FAILED) +#define TXGBE_ERR_FW_RESP_INVALID (-TXGBE_ERR_FW_RESP_INVALID) +#define TXGBE_ERR_PHY_INIT_NOT_DONE (-TXGBE_ERR_PHY_INIT_NOT_DONE) +#define TXGBE_ERR_TOKEN_RETRY (-TXGBE_ERR_TOKEN_RETRY) +#define TXGBE_ERR_REG_TMOUT (-TXGBE_ERR_REG_TMOUT) +#define TXGBE_ERR_REG_ACCESS (-TXGBE_ERR_REG_ACCESS) +#define TXGBE_ERR_MBX (-TXGBE_ERR_MBX) + +extern char txgbe_firmware_version[]; +extern char txgbevf_driver_name[]; +extern const char txgbevf_driver_version[]; + +typedef u32 txgbe_link_speed; + +struct txgbe_hw; + +struct txgbe_q_vector; + +typedef u8* (*txgbe_mc_addr_itr) (struct txgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq); + +enum txbgevf_state_t { + __TXGBE_TESTING, + __TXGBE_RESETTING, + __TXGBE_DOWN, + __TXGBE_DISABLED, + __TXGBE_REMOVING, + __TXGBE_SERVICE_SCHED, + __TXGBE_SERVICE_INITED, + __TXGBE_RESET_REQUESTED, + __TXGBE_QUEUE_RESET_REQUESTED, +}; + +enum txgbe_mac_type { + txgbe_mac_unknown = 0, + txgbe_mac_sp, + txgbe_mac_sp_vf, + txgbe_num_macs +}; + +struct txgbe_info { + enum txgbe_mac_type mac; + unsigned int flags; +}; + +enum txgbe_boards { + board_sp_vf, +}; + +enum txgbe_ring_state_t { + __TXGBE_RX_3K_BUFFER, + __TXGBE_RX_BUILD_SKB_ENABLED, + __TXGBE_TX_DETECT_HANG, + __TXGBE_HANG_CHECK_ARMED, + __TXGBE_RX_CSUM_UDP_ZERO_ERR, + __TXGBE_TX_XDP_RING, + __TXGBE_TX_XDP_RING_PRIMED, +}; + +enum txgbe_tx_flags { + /* cmd_type flags */ + TXGBE_TX_FLAGS_VLAN = 0x01, + TXGBE_TX_FLAGS_TSO = 0x02, + TXGBE_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + TXGBE_TX_FLAGS_CC = 0x08, + TXGBE_TX_FLAGS_IPV4 = 0x10, + TXGBE_TX_FLAGS_CSUM = 0x20, + TXGBE_TX_FLAGS_OUTER_IPV4 = 0x100, + TXGBE_TX_FLAGS_LINKSEC = 0x200, + TXGBE_TX_FLAGS_IPSEC = 0x400, + + /* software defined flags */ + TXGBE_TX_FLAGS_FCOE = 0x80, +}; + +#define TXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 +#define TXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 +#define TXGBE_TX_FLAGS_VLAN_SHIFT 16 + +#define TXGBE_SET_FLAG(_input, _flag, _result) \ + (((_flag) <= (_result)) ? \ + ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ + ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) + +struct txgbe_mac_operations { + s32 (*init_hw)(struct txgbe_hw *hw); + s32 (*reset_hw)(struct txgbe_hw *hw); + s32 (*start_hw)(struct txgbe_hw *hw); + s32 (*get_mac_addr)(struct txgbe_hw *hw, u8 *mac_addr); + s32 (*get_fw_version)(struct txgbe_hw *hw); + + /* Link */ + s32 (*check_link)(struct txgbe_hw *hw, + txgbe_link_speed *speed, bool *link_up, bool autoneg_wait_to_complete); + + /* RAR, Multicast, VLAN */ + s32 (*set_rar)(struct txgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, u32 enable_addr); + s32 (*set_uc_addr)(struct txgbe_hw *hw, u32 index, u8 *addr); + + s32 (*update_mc_addr_list)(struct txgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, txgbe_mc_addr_itr next, + bool clear); + s32 (*update_xcast_mode)(struct txgbe_hw *hw, int xcast_mode); + s32 (*get_link_state)(struct txgbe_hw *hw, bool *link_state); + s32 (*set_vfta)(struct txgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, bool vlvf_bypass); + s32 (*stop_adapter)(struct txgbe_hw *hw); +}; + +struct txgbe_mac_info { + struct txgbe_mac_operations ops; + u8 addr[6]; + u8 perm_addr[6]; + + enum txgbe_mac_type type; + + s32 mc_filter_type; + + bool get_link_status; + u32 max_tx_queues; + u32 max_rx_queues; + u32 max_msix_vectors; +}; + +struct txgbe_mbx_operations { + void (*init_params)(struct txgbe_hw *hw); + s32 (*read)(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*write)(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*read_posted)(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*write_posted)(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*check_for_msg)(struct txgbe_hw *hw, u16 mbx_id); + s32 (*check_for_ack)(struct txgbe_hw *hw, u16 mbx_id); + s32 (*check_for_rst)(struct txgbe_hw *hw, u16 mbx_id); +}; + +struct txgbe_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct txgbe_mbx_info { + struct txgbe_mbx_operations ops; + struct txgbe_mbx_stats stats; + u32 timeout; + u32 udelay; + u32 v2p_mailbox; /* buffered r2c bits */ + u16 size; +}; + +struct txgbe_hw { + void *back; + u16 *msg_enable; + struct pci_dev *pdev; + + u8 __iomem *hw_addr; + u8 __iomem *b4_addr; + + struct txgbe_mac_info mac; + struct txgbe_mbx_info mbx; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; + bool adapter_stopped; + + int api_version; + + u32 b4_buf[16]; +}; + +struct txgbe_sw_stats { + u64 tx_busy; + u64 tx_restart_queue; + u64 tx_timeout_count; + u64 rx_csum_bad; + u64 rx_no_dma_resources; + u64 rx_alloc_page_failed; + u64 rx_alloc_buff_failed; +}; + +struct txgbe_hw_stats { + u64 gprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 mprc; +}; + +struct txgbe_ring_stats { + u64 packets; + u64 bytes; +}; + +struct txgbe_tx_queue_stats { + u64 tx_restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct txgbe_rx_queue_stats { + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 alloc_rx_page; + u64 csum_err; +}; + +/* Context descriptors */ +struct txgbe_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +struct txgbe_tx_desc { + __le64 pkt_addr; + __le32 cmd_type_len; + __le32 status; +}; + +/*** @txgbe_tx_desc.cmd_type_len ***/ +#define TXGBE_TXD_DTALEN(v) (((v) & 0xFFFF)) /* data buffer length */ +#define TXGBE_TXD_TSTAMP (0x1 << 19) /* IEEE1588 time stamp */ +#define TXGBE_TXD_EOP (0x1 << 24) /* End of Packet */ +#define TXGBE_TXD_FCS (0x1 << 25) /* Insert FCS */ +#define TXGBE_TXD_LINKSEC (0x1 << 26) /* Insert LinkSec */ +#define TXGBE_TXD_RS (0x1 << 27) /* Report Status */ +#define TXGBE_TXD_ECU (0x1 << 28) /* forward to ECU */ +#define TXGBE_TXD_CNTAG (0x1 << 29) /* insert CN tag */ +#define TXGBE_TXD_VLE (0x1 << 30) /* insert VLAN tag */ +#define TXGBE_TXD_TSE (0x1 << 31) /* enable transmit segmentation */ + +/*** @txgbe_tx_desc.status ***/ +#define TXGBE_TXD_STAT_DD TXGBE_TXD_CTX_DD /* Descriptor Done */ +#define TXGBE_TXD_BAK_DESC ((0x1) << 4) /* use backup descriptor */ +#define TXGBE_TXD_CC ((0x1) << 7) /* check context */ +#define TXGBE_TXD_IPSEC ((0x1) << 8) /* request IPSec offload */ +#define TXGBE_TXD_TPCS ((0x1) << 9) /* insert TCP/UDP checksum */ +#define TXGBE_TXD_IPCS ((0x1) << 10) /* insert IP checksum */ +#define TXGBE_TXD_EIPCS ((0x1) << 11) /* insert outer IP checksum */ +#define TXGBE_TXD_MNGFLT ((0x1) << 12) /* enable management filter */ +#define TXGBE_TXD_PAYLEN(v) (((v) & 0x7FFFF) << 13) /* payload length */ +struct txgbe_tx_buffer { + struct txgbe_tx_desc *next_to_watch; + unsigned long time_stamp; + union { + struct sk_buff *skb; + /* XDP uses address ptr on irq_clean */ + void *data; + }; + unsigned int bytecount; + unsigned short gso_segs; + __be16 protocol; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct txgbe_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma_addr; + dma_addr_t page_dma; + struct page *page; + u32 page_offset; + u16 pagecnt_bias; +}; + +struct txgbe_ring; +struct txgbe_ring_container { + struct txgbe_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u8 count; /* total number of rings in vector */ + u16 itr; /* current ITR setting for ring */ +}; + +struct txgbe_ring { + struct txgbe_ring *next; + struct txgbe_q_vector *q_vector; /* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ + struct bpf_prog *xdp_prog; + struct device *dev; /* device for DMA mapping */ + void *desc; /* descriptor ring memory */ + union { + struct txgbe_tx_buffer *tx_buffer_info; + struct txgbe_rx_buffer *rx_buffer_info; + }; + unsigned long state; + u8 __iomem *tail; + dma_addr_t dma_addr; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + + u16 count; /* amount of descriptors */ + + u8 que_idx; /* software netdev-relative queue offset */ + u8 reg_idx; /* hardware global-absolute ring offset */ + struct sk_buff *skb; + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; + + struct txgbe_ring_stats stats; + struct u64_stats_sync syncp; + + union { + struct txgbe_tx_queue_stats tx_stats; + struct txgbe_rx_queue_stats rx_stats; + }; +} ____cacheline_internodealigned_in_smp; + +struct txgbe_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u8 __iomem *io_addr; + u8 __iomem *b4_addr; + struct net_device *netdev; + struct pci_dev *pdev; + struct txgbe_hw hw; + unsigned long state; + u32 *rss_key; + u8 rss_indir_tbl[128]; + u32 flags; + bool link_state; +#define TXGBE_FLAG_RX_CSUM_ENABLED BIT(1) +#define TXGBE_FLAGS_LEGACY_RX BIT(2) +#define TXGBE_FLAG_RSS_FIELD_IPV4_UDP BIT(4) +#define TXGBE_FLAG_RSS_FIELD_IPV6_UDP BIT(5) + + /* statistic states */ + struct rtnl_link_stats64 net_stats; + struct txgbe_sw_stats sw_stats; + struct txgbe_hw_stats stats, last_stats, base_stats, reset_stats; + struct txgbe_hw_stats reg_stats[MAX_TX_QUEUES], last_reg_stats[MAX_TX_QUEUES]; + + /* interrupt vector accounting */ + struct txgbe_q_vector *q_vector[MAX_Q_VECTORS]; + int num_q_vectors; + struct msix_entry *msix_entries; + + /* Rings, Tx first since it is accessed in hotpath */ + struct txgbe_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */ + struct txgbe_ring *rx_ring[MAX_RX_QUEUES]; /* One per active queue */ + +#define DEFAULT_DEBUG_LEVEL (0x7) + u16 msg_enable; + + u32 flagsd; /* flags define: CAP */ + u16 bd_number; + + /* mailbox spin lock */ + spinlock_t mbx_lock; + + /* pf statstic spin lock */ + spinlock_t pf_count_lock; + + u32 link_speed; + bool link_up; + + /* Tx hotpath */ + u16 tx_ring_count; + u16 num_tx_queues; + u16 tx_itr_setting; + + /* Rx hotpath */ + u16 rx_ring_count; + u16 num_rx_queues; + u16 rx_itr_setting; + + unsigned long last_reset; + + u32 eims_enable_mask; + u32 eims_other; + + struct timer_list service_timer; + struct work_struct service_task; +}; + +struct txgbe_q_vector { + struct txgbe_adapter *adapter; + u16 v_idx; + u16 itr; + struct napi_struct napi; + struct txgbe_ring_container rx; + struct txgbe_ring_container tx; + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 17]; + bool netpoll_rx; + + /* for dynamic allocation of rings associated with this q_vector */ + struct txgbe_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +union txgbe_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } rd; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 data; /* RSS Hash */ + struct { + __le16 ipid; /* IP id */ + __le16 csum; /* Packet Checksum */ + } ip_csum; + } hi_dword; + } lower; + struct { + __le32 status; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define ring_uses_large_buffer(ring) \ + test_bit(__TXGBE_RX_3K_BUFFER, &(ring)->state) +#define set_ring_uses_large_buffer(ring) \ + set_bit(__TXGBE_RX_3K_BUFFER, &(ring)->state) +#define clear_ring_uses_large_buffer(ring) \ + clear_bit(__TXGBE_RX_3K_BUFFER, &(ring)->state) + +#define ring_uses_build_skb(ring) \ + test_bit(__TXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state) +#define set_ring_build_skb_enabled(ring) \ + set_bit(__TXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state) +#define clear_ring_build_skb_enabled(ring) \ + clear_bit(__TXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state) + +#define check_for_tx_hang(ring) \ + test_bit(__TXGBE_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__TXGBE_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__TXGBE_TX_DETECT_HANG, &(ring)->state) + +static inline unsigned int txgbe_rx_pg_order(struct txgbe_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return 1; +#endif + return 0; +} + +#define txgbe_rx_pg_size(_ring) (PAGE_SIZE << txgbe_rx_pg_order(_ring)) + +__maybe_unused static struct net_device *txgbe_hw_to_netdev(const struct txgbe_hw *hw) +{ + struct txgbe_adapter *adapter = + container_of(hw, struct txgbe_adapter, hw); + return adapter->netdev; +} + +static inline unsigned int txgbe_rx_bufsz(struct txgbe_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return TXGBE_RXBUFFER_3072; + + if (ring_uses_build_skb(ring)) + return TXGBE_MAX_FRAME_BUILD_SKB; +#endif + return TXGBE_RXBUFFER_2048; +} + +#define txgbevf_dbg(hw, fmt, arg...) \ + netdev_dbg(txgbe_hw_to_netdev(hw), fmt, ##arg) + +#define e_dev_info(format, arg...) \ + dev_info(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_notice(format, arg...) \ + dev_notice(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dbg(msglvl, format, arg...) \ + netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_crit(msglvl, format, arg...) \ + netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) + +/* iterator for handling rings in ring container */ +#define txgbe_for_each_ring(pos, head) \ + for (pos = (head).ring; pos; pos = pos->next) + +/* read register */ +#define TXGBE_DEAD_READ_RETRIES 10 +#define TXGBE_DEAD_READ_REG 0xdeadbeefU +#define TXGBE_DEAD_READ_REG64 0xdeadbeefdeadbeefULL +#define TXGBE_FAILED_READ_REG 0xffffffffU +#define TXGBE_FAILED_READ_REG64 0xffffffffffffffffULL + +#define TXGBE_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +static inline bool TXGBE_REMOVED(void __iomem *addr) +{ + return unlikely(!addr); +} + +static inline u32 +txgbe_rd32(u8 __iomem *base, u32 reg) +{ + return readl(base + reg); +} + +static inline u32 +rd32(struct txgbe_hw *hw, u32 reg) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val = TXGBE_FAILED_READ_REG; + + if (unlikely(!base)) + return val; + + val = txgbe_rd32(base, reg); + + return val; +} + +#define rd32a(a, reg, offset) ( \ + rd32((a), (reg) + ((offset) << 2))) + +static inline u32 +rd32m(struct txgbe_hw *hw, u32 reg, u32 mask) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val = TXGBE_FAILED_READ_REG; + + if (unlikely(!base)) + return val; + + val = txgbe_rd32(base, reg); + if (unlikely(val == TXGBE_FAILED_READ_REG)) + return val; + + return val & mask; +} + +/* write register */ +static inline void +txgbe_wr32(u8 __iomem *base, u32 reg, u32 val) +{ + writel(val, base + reg); +} + +static inline void +wr32(struct txgbe_hw *hw, u32 reg, u32 val) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + + if (unlikely(!base)) + return; + + txgbe_wr32(base, reg, val); +} + +#define wr32a(a, reg, off, val) \ + wr32((a), (reg) + ((off) << 2), (val)) + +static inline void +wr32m(struct txgbe_hw *hw, u32 reg, u32 mask, u32 field) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val; + + if (unlikely(!base)) + return; + + val = txgbe_rd32(base, reg); + if (unlikely(val == TXGBE_FAILED_READ_REG)) + return; + + val = ((val & ~mask) | (field & mask)); + txgbe_wr32(base, reg, val); +} + +/* poll register */ +#define TXGBE_MDIO_TIMEOUT 1000 +#define TXGBE_I2C_TIMEOUT 1000 +#define TXGBE_SPI_TIMEOUT 1000 +static inline s32 +po32m(struct txgbe_hw *hw, u32 reg, u32 mask, u32 field, u16 time, u16 loop) +{ + bool msec = false; + + if (time / loop > 1000 * MAX_UDELAY_MS) { + msec = true; + time /= 1000; + } + + do { + u32 val = rd32(hw, reg); + + if (val == TXGBE_FAILED_READ_REG) + return TXGBE_ERR_REG_ACCESS; + + if (val != TXGBE_DEAD_READ_REG && + (val & mask) == (field & mask)) + break; + else if (--loop == 0) + break; + + if (msec) + mdelay(time); + else + udelay(time); + } while (true); + + return (loop > 0 ? 0 : -TXGBE_ERR_REG_TMOUT); +} + +static inline struct netdev_queue *txring_txq(const struct txgbe_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->que_idx); +} + +static inline u16 txgbe_desc_unused(struct txgbe_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +static inline unsigned int txgbe_rx_offset(struct txgbe_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? TXGBE_SKB_PAD : 0; +} + +#define txgbe_flush(a) rd32(a, TXGBE_VXSTATUS) + +int txgbevf_open(struct net_device *netdev); +int txgbevf_close(struct net_device *netdev); +int txgbe_negotiate_api_version(struct txgbe_hw *hw, int api); +void txgbe_init_ops_vf(struct txgbe_hw *hw); +s32 txgbe_rlpml_set_vf(struct txgbe_hw *hw, u16 max_size); +int txgbe_get_queues(struct txgbe_hw *hw, unsigned int *num_tcs, + unsigned int *default_tc); +void txgbevf_set_rx_mode(struct net_device *netdev); +void txgbe_init_last_counter_stats(struct txgbe_adapter *adapter); +int txgbevf_poll(struct napi_struct *napi, int budget); +void txgbevf_free_rx_resources(struct txgbe_ring *rx_ring); +int txgbe_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid); +void txgbevf_service_event_schedule(struct txgbe_adapter *adapter); +void txgbevf_write_eitr(struct txgbe_q_vector *q_vector); +void txgbevf_disable_rx_queue(struct txgbe_adapter *adapter, + struct txgbe_ring *ring); +void txgbevf_alloc_rx_buffers(struct txgbe_ring *rx_ring, + u16 cleaned_count); +void txgbevf_set_ethtool_ops(struct net_device *netdev); +void txgbevf_reinit_locked(struct txgbe_adapter *adapter); +int txgbevf_setup_tx_resources(struct txgbe_ring *tx_ring); +void txgbevf_free_tx_resources(struct txgbe_ring *tx_ring); +int txgbevf_setup_rx_resources(struct txgbe_adapter *adapter, + struct txgbe_ring *rx_ring); +void txgbevf_down(struct txgbe_adapter *adapter); +void txgbe_free_irq(struct txgbe_adapter *adapter); +void txgbe_configure(struct txgbe_adapter *adapter); +int txgbe_request_irq(struct txgbe_adapter *adapter); +void txgbe_up_complete(struct txgbe_adapter *adapter); +void txgbevf_reset(struct txgbe_adapter *adapter); +void txgbevf_update_stats(struct txgbe_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c b/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c new file mode 100644 index 0000000000000000000000000000000000000000..a217ab2387c4e85756d5c6749ce5d1a11478c345 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c @@ -0,0 +1,4509 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "txgbe_vf.h" +#include "txgbe_mbx.h" +#include "txgbe_txrx.h" + +char txgbevf_driver_name[] = "txgbevf"; + +static const char txgbe_driver_string[] = + "WangXun(R) 10GbE PCI Express Virtual Function Linux Network Driver"; + +#define DRV_VERSION __stringify(1.3.1-k) +const char txgbevf_driver_version[32] = DRV_VERSION; + +char txgbe_firmware_version[TXGBE_FW_VER_SIZE] = "N/A"; + +/* txgbe_pci_tbl - PCI Device ID Table */ +static struct pci_device_id txgbe_pci_tbl[] = { + { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_SP1000_VF), 0}, + { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_WX1820_VF), 0}, + { .device = 0 } /* required last entry */ +}; + +static struct txgbe_info txgbe_sp_vf_info = { + .mac = txgbe_mac_sp_vf, + .flags = 0, +}; + +static const struct txgbe_info *txgbe_info_tbl[] = { + [board_sp_vf] = &txgbe_sp_vf_info, +}; + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +static inline int txgbevf_init_rss_key(struct txgbe_adapter *adapter) +{ + u32 *rss_key; + + if (!adapter->rss_key) { + rss_key = kzalloc(40, GFP_KERNEL); + if (unlikely(!rss_key)) + return -ENOMEM; + netdev_rss_key_fill(rss_key, 40); + adapter->rss_key = rss_key; + } + return 0; +} + +void txgbe_negotiate_api(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int api[] = { + txgbe_mbox_api_13, + txgbe_mbox_api_12, + txgbe_mbox_api_11, + txgbe_mbox_api_10, + txgbe_mbox_api_unknown}; + int err = 0, idx = 0; + + spin_lock_bh(&adapter->mbx_lock); + + while (api[idx] != txgbe_mbox_api_unknown) { + err = txgbe_negotiate_api_version(hw, api[idx]); + if (!err) + break; + idx++; + } + + spin_unlock_bh(&adapter->mbx_lock); +} + +void txgbevf_reset(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + s32 err; + + if (unlikely(!hw->hw_addr)) + return; + + err = hw->mac.ops.reset_hw(hw); + if (!err) + err = hw->mac.ops.init_hw(hw); + + if (err) + e_err(probe, "reset function\n"); + else + txgbe_negotiate_api(adapter); + + if (is_valid_ether_addr(adapter->hw.mac.addr)) { + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); + } + + adapter->last_reset = jiffies; +} + +/** + * txgbe_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +void txgbe_clean_tx_ring(struct txgbe_ring *tx_ring) +{ + u16 i = tx_ring->next_to_clean; + struct txgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + + while (i != tx_ring->next_to_use) { + struct txgbe_tx_desc *eop_desc, *tx_desc; + + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = TXGBE_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = TXGBE_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* reset next_to_use and next_to_clean */ + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * txgbevf_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void txgbevf_free_tx_resources(struct txgbe_ring *tx_ring) +{ + txgbe_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma_addr); + + tx_ring->desc = NULL; +} + +/** + * txgbevf_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int txgbevf_setup_tx_resources(struct txgbe_ring *tx_ring) +{ + int size; + + size = sizeof(struct txgbe_tx_buffer) * tx_ring->count; + tx_ring->tx_buffer_info = vmalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + u64_stats_init(&tx_ring->syncp); + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct txgbe_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size, + &tx_ring->dma_addr, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(tx_ring->dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * txgbe_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +int txgbe_setup_all_tx_resources(struct txgbe_adapter *adapter) +{ + int i; + int err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = txgbevf_setup_tx_resources(adapter->tx_ring[i]); + if (!err) + continue; + e_err(drv, "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + txgbevf_free_tx_resources(adapter->tx_ring[i]); + return err; +} + +/** + * txgbe_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +void txgbe_free_all_tx_resources(struct txgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]->desc) + txgbevf_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * txgbevf_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int txgbevf_setup_rx_resources(struct txgbe_adapter *adapter, + struct txgbe_ring *rx_ring) +{ + int size; + + size = sizeof(struct txgbe_rx_buffer) * rx_ring->count; + rx_ring->rx_buffer_info = vmalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + u64_stats_init(&rx_ring->syncp); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union txgbe_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size, + &rx_ring->dma_addr, GFP_KERNEL); + + if (!rx_ring->desc) + goto err; + + return 0; +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * txgbe_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +int txgbe_setup_all_rx_resources(struct txgbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = txgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]); + if (!err) + continue; + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + txgbevf_free_rx_resources(adapter->rx_ring[i]); + return err; +} + +/** + * txgbe_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +void txgbe_clean_rx_ring(struct txgbe_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + + /* Free Rx ring sk_buff */ + if (rx_ring->skb) { + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + } + + /* Free all the Rx ring pages */ + while (i != rx_ring->next_to_alloc) { + struct txgbe_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[i]; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma_addr, + rx_buffer->page_offset, + txgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, + rx_buffer->dma_addr, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + TXGBE_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + + i++; + if (i == rx_ring->count) + i = 0; + } + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * txgbe_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +void txgbe_clean_all_rx_rings(struct txgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + txgbe_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * txgbe_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +void txgbe_clean_all_tx_rings(struct txgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + txgbe_clean_tx_ring(adapter->tx_ring[i]); +} + +/** + * txgbevf_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void txgbevf_free_rx_resources(struct txgbe_ring *rx_ring) +{ + txgbe_clean_rx_ring(rx_ring); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma_addr); + + rx_ring->desc = NULL; +} + +/** + * txgbe_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +void txgbe_free_all_rx_resources(struct txgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]->desc) + txgbevf_free_rx_resources(adapter->rx_ring[i]); +} + +int txgbe_configure_dcb(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + unsigned int def_q = 0; + unsigned int num_tcs = 0; + unsigned int num_rx_queues = adapter->num_rx_queues; + unsigned int num_tx_queues = adapter->num_tx_queues; + int err; + + spin_lock_bh(&adapter->mbx_lock); + + /* fetch queue configuration from the PF */ + err = txgbe_get_queues(hw, &num_tcs, &def_q); + + spin_unlock_bh(&adapter->mbx_lock); + + if (err) + return err; + + if (num_tcs > 1) { + /* we need only one Tx queue */ + num_tx_queues = 1; + + /* update default Tx ring register index */ + adapter->tx_ring[0]->reg_idx = def_q; + + /* we need as many queues as traffic classes */ + num_rx_queues = num_tcs; + } + + /* if we have a bad config abort request queue reset */ + if (adapter->num_rx_queues != num_rx_queues || + adapter->num_tx_queues != num_tx_queues) { + /* force mailbox timeout to prevent further messages */ + hw->mbx.timeout = 0; + + /* wait for watchdog to come around and bail us out */ + adapter->flagsd |= TXGBE_F_REQ_QUEUE_RESET; + } + + return 0; +} + +void txgbe_restore_vlan(struct txgbe_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + txgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +/** + * txgbevf_configure_tx_ring - Configure Tx ring after Reset + * @adapter: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +void txgbevf_configure_tx_ring(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u64 tdba = ring->dma_addr; + u32 txdctl = 0; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + wr32(hw, TXGBE_VXTXDCTL(reg_idx), TXGBE_VXTXDCTL_FLUSH); + txgbe_flush(hw); + + wr32(hw, TXGBE_VXTDBAL(reg_idx), tdba & DMA_BIT_MASK(32)); + wr32(hw, TXGBE_VXTDBAH(reg_idx), tdba >> 32); + + /* enable relaxed ordering */ + pcie_capability_clear_and_set_word(adapter->pdev, PCI_EXP_DEVCTL, + 0, PCI_EXP_DEVCTL_RELAX_EN); + + /* reset head and tail pointers */ + wr32(hw, TXGBE_VXTDH(reg_idx), 0); + wr32(hw, TXGBE_VXTDT(reg_idx), 0); + ring->tail = adapter->io_addr + TXGBE_VXTDT(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + txdctl |= TXGBE_VXTXDCTL_BUFLEN(txgbe_buf_len(ring->count)); + txdctl |= TXGBE_VXTXDCTL_ENABLE; + + /* reinitialize tx_buffer_info */ + memset(ring->tx_buffer_info, 0, + sizeof(struct txgbe_tx_buffer) * ring->count); + + clear_bit(__TXGBE_HANG_CHECK_ARMED, &ring->state); + clear_bit(__TXGBE_TX_XDP_RING_PRIMED, &ring->state); + + wr32(hw, TXGBE_VXTXDCTL(reg_idx), txdctl); + /* poll to verify queue is enabled */ + if (po32m(hw, TXGBE_VXTXDCTL(reg_idx), + TXGBE_VXTXDCTL_ENABLE, TXGBE_VXTXDCTL_ENABLE, 1000, 10)) + e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +} + +/** + * txgbe_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +void txgbe_configure_tx(struct txgbe_adapter *adapter) +{ + u32 i; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) + txgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +static void txgbe_set_rx_buffer_len(struct txgbe_adapter *adapter, + struct txgbe_ring *rx_ring) +{ + struct net_device *netdev = adapter->netdev; + unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + + /* set build_skb and buffer size flags */ + clear_ring_build_skb_enabled(rx_ring); + clear_ring_uses_large_buffer(rx_ring); + + set_ring_build_skb_enabled(rx_ring); + + if (PAGE_SIZE < 8192) { + if (max_frame <= TXGBE_MAX_FRAME_BUILD_SKB) + return; + + set_ring_uses_large_buffer(rx_ring); + } +} + +void txgbe_setup_psrtype(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + /* PSRTYPE must be initialized */ + u32 psrtype = TXGBE_VXMRQC_PSR_L2HDR | + TXGBE_VXMRQC_PSR_L3HDR | + TXGBE_VXMRQC_PSR_L4HDR | + TXGBE_VXMRQC_PSR_TUNHDR | + TXGBE_VXMRQC_PSR_TUNMAC; + + if (adapter->num_rx_queues > 1) + psrtype |= BIT(14); + + wr32m(hw, TXGBE_VXMRQC, TXGBE_VXMRQC_PSR(~0), TXGBE_VXMRQC_PSR(psrtype)); +} + +void txgbe_setup_vfmrqc(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 vfmrqc = 0, vfreta = 0; + u16 rss_i = adapter->num_rx_queues; + u8 i, j; + + /* Fill out hash function seeds */ + netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); + for (i = 0; i < TXGBE_VFRSSRK_REGS; i++) + wr32(hw, TXGBE_VXRSSRK(i), adapter->rss_key[i]); + + for (i = 0, j = 0; i < 128; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + + vfreta |= j << (i & 0x3) * 8; + if ((i & 3) == 3) { + wr32(hw, TXGBE_VXRETA(i >> 2), vfreta); + vfreta = 0; + } + } + + /* Perform hash on these packet types */ + vfmrqc |= TXGBE_VXMRQC_RSS_ALG_IPV4 | + TXGBE_VXMRQC_RSS_ALG_IPV4_TCP | + TXGBE_VXMRQC_RSS_ALG_IPV6 | + TXGBE_VXMRQC_RSS_ALG_IPV6_TCP; + + vfmrqc |= TXGBE_VXMRQC_RSS_EN; + + if (adapter->num_rx_queues > 3) + vfmrqc |= TXGBE_VXMRQC_RSS_HASH(2); + else if (adapter->num_rx_queues > 1) + vfmrqc |= TXGBE_VXMRQC_RSS_HASH(1); + + wr32m(hw, TXGBE_VXMRQC, TXGBE_VXMRQC_RSS(~0), TXGBE_VXMRQC_RSS(vfmrqc)); +} + +void txgbe_configure_srrctl(struct txgbe_adapter *adapter, + struct txgbe_ring *ring, int index) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 srrctl; + + srrctl = rd32m(hw, TXGBE_VXRXDCTL(index), + ~(TXGBE_VXRXDCTL_HDRSZ(~0) | TXGBE_VXRXDCTL_BUFSZ(~0))); + srrctl |= TXGBE_VXRXDCTL_DROP; + srrctl |= TXGBE_VXRXDCTL_HDRSZ(txgbe_hdr_sz(TXGBE_RX_HDR_SIZE)); + if (ring_uses_large_buffer(ring)) + srrctl |= TXGBE_VXRXDCTL_BUFSZ(txgbe_buf_sz(3072)); + else + srrctl |= TXGBE_VXRXDCTL_BUFSZ(txgbe_buf_sz(TXGBE_RX_BUF_SIZE)); + + wr32(hw, TXGBE_VXRXDCTL(index), srrctl); +} + +void txgbe_rx_desc_queue_enable(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u8 reg_idx = ring->reg_idx; + + if (TXGBE_REMOVED(hw->hw_addr)) + return; + if (po32m(hw, TXGBE_VXRXDCTL(reg_idx), + TXGBE_VXRXDCTL_ENABLE, TXGBE_VXRXDCTL_ENABLE, 1000, 10)) + e_err(probe, + "RXDCTL.ENABLE queue %d not set while polling\n", + reg_idx); +} + +void txgbevf_configure_rx_ring(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + union txgbe_rx_desc *rx_desc; + u64 rdba = ring->dma_addr; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + rxdctl = rd32(hw, TXGBE_VXRXDCTL(reg_idx)); + txgbevf_disable_rx_queue(adapter, ring); + + wr32(hw, TXGBE_VXRDBAL(reg_idx), rdba & DMA_BIT_MASK(32)); + wr32(hw, TXGBE_VXRDBAH(reg_idx), rdba >> 32); + + /* enable relaxed ordering */ + pcie_capability_clear_and_set_word(adapter->pdev, PCI_EXP_DEVCTL, + 0, PCI_EXP_DEVCTL_RELAX_EN); + + /* reset head and tail pointers */ + wr32(hw, TXGBE_VXRDH(reg_idx), 0); + wr32(hw, TXGBE_VXRDT(reg_idx), 0); + ring->tail = adapter->io_addr + TXGBE_VXRDT(reg_idx); + + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct txgbe_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = TXGBE_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + ring->next_to_alloc = 0; + + txgbe_configure_srrctl(adapter, ring, reg_idx); + + /* allow any size packet since we can handle overflow */ + rxdctl &= ~TXGBE_VXRXDCTL_BUFLEN(~0); + + rxdctl |= TXGBE_VXRXDCTL_BUFLEN(txgbe_buf_len(ring->count)); + rxdctl |= TXGBE_VXRXDCTL_ENABLE | TXGBE_VXRXDCTL_VLAN; + + /* enable RSC */ + rxdctl &= ~TXGBE_VXRXDCTL_RSCMAX(~0); + rxdctl |= TXGBE_VXRXDCTL_RSCMAX(TXGBE_RSCMAX_1); + rxdctl |= TXGBE_VXRXDCTL_RSCEN; + + wr32(hw, TXGBE_VXRXDCTL(reg_idx), rxdctl); + + txgbe_rx_desc_queue_enable(adapter, ring); + txgbevf_alloc_rx_buffers(ring, txgbe_desc_unused(ring)); +} + +/** + * txgbe_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +void txgbe_configure_rx(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int i, ret; + + txgbe_setup_psrtype(adapter); + txgbe_setup_vfmrqc(adapter); + + spin_lock_bh(&adapter->mbx_lock); + ret = txgbe_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); + spin_unlock_bh(&adapter->mbx_lock); + if (ret) + e_info(drv, "Failed to set MTU at %d\n", netdev->mtu); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct txgbe_ring *rx_ring = adapter->rx_ring[i]; + + txgbe_set_rx_buffer_len(adapter, rx_ring); + txgbevf_configure_rx_ring(adapter, rx_ring); + } +} + +void txgbe_configure(struct txgbe_adapter *adapter) +{ + txgbe_configure_dcb(adapter); + txgbevf_set_rx_mode(adapter->netdev); + txgbe_restore_vlan(adapter); + txgbe_configure_tx(adapter); + txgbe_configure_rx(adapter); +} + +/** + * txgbe_msix_rings - single unshared vector rx clean (all queues) + * @irq: unused + * @data: pointer to our q_vector struct for this interrupt vector + **/ +irqreturn_t txgbe_msix_rings(int __always_unused irq, void *data) +{ + struct txgbe_q_vector *q_vector = data; + + /* EIAM disabled interrupts (on this vector) for us */ + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} + +irqreturn_t txgbe_msix_other(int __always_unused irq, void *data) +{ + struct txgbe_adapter *adapter = data; + struct txgbe_hw *hw = &adapter->hw; + + hw->mac.get_link_status = 1; + + txgbevf_service_event_schedule(adapter); + + wr32(hw, TXGBE_VXIMC, adapter->eims_other); + + return IRQ_HANDLED; +} + +/** + * txgbe_request_msix_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * txgbe_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +int txgbe_request_msix_irqs(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int vector, err; + int ri = 0, ti = 0; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct txgbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "TxRx", ri++); + ti++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "rx", ri++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "tx", ti++); + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq(entry->vector, &txgbe_msix_rings, 0, + q_vector->name, q_vector); + if (err) { + e_err(drv, "request_irq failed for MSIX interrupt Error: %d\n", err); + goto free_queue_irqs; + } + } + + err = request_irq(adapter->msix_entries[vector].vector, + &txgbe_msix_other, 0, netdev->name, adapter); + if (err) { + e_err(drv, "request_irq for msix_other failed: %d\n", err); + goto free_queue_irqs; + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + free_irq(adapter->msix_entries[vector].vector, + adapter->q_vector[vector]); + } + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return err; +} + +/** + * txgbe_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +int txgbe_request_irq(struct txgbe_adapter *adapter) +{ + int err; + + err = txgbe_request_msix_irqs(adapter); + + if (err) + e_err(probe, "request_irq failed, Error %d\n", err); + + return err; +} + +void txgbe_free_irq(struct txgbe_adapter *adapter) +{ + int vector; + + if (!adapter->msix_entries) + return; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct txgbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + /* free only the irqs that were actually requested */ + if (!q_vector->rx.ring && !q_vector->tx.ring) + continue; + + free_irq(entry->vector, q_vector); + } + + free_irq(adapter->msix_entries[vector++].vector, adapter); +} + +void txgbe_napi_enable_all(struct txgbe_adapter *adapter) +{ + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) + napi_enable(&adapter->q_vector[q_idx]->napi); +} + +void txgbe_napi_disable_all(struct txgbe_adapter *adapter) +{ + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) + napi_disable(&adapter->q_vector[q_idx]->napi); +} + +/** + * txgbevf_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +inline void txgbevf_irq_enable(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + wr32(hw, TXGBE_VXIMC, adapter->eims_enable_mask); +} + +void txgbe_save_reset_stats(struct txgbe_adapter *adapter) +{ + /* Only save pre-reset stats if there are some */ + if (adapter->stats.gprc || adapter->stats.gptc) { + adapter->reset_stats.gprc = adapter->stats.gprc - + adapter->base_stats.gprc; + adapter->reset_stats.gptc = adapter->stats.gptc - + adapter->base_stats.gptc; + adapter->reset_stats.gorc = adapter->stats.gorc - + adapter->base_stats.gorc; + adapter->reset_stats.gotc = adapter->stats.gotc - + adapter->base_stats.gotc; + adapter->reset_stats.mprc = adapter->stats.mprc - + adapter->base_stats.mprc; + } +} + +/** + * txgbe_set_ivar - set IVAR registers - maps interrupt causes to vectors + * @adapter: pointer to adapter struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * + **/ +void txgbe_set_ivar(struct txgbe_adapter *adapter, s8 direction, + u8 queue, u8 msix_vector) +{ + u32 ivar, index; + struct txgbe_hw *hw = &adapter->hw; + + if (direction == -1) { + /* other causes */ + msix_vector |= TXGBE_VXIVAR_MISC_VALID; + ivar = rd32(hw, TXGBE_VXIVAR_MISC); + ivar &= ~0xFF; + ivar |= msix_vector; + wr32(hw, TXGBE_VXIVAR_MISC, ivar); + } else { + /* tx or rx causes */ + msix_vector |= TXGBE_VXIVAR_MISC_VALID; + index = ((16 * (queue & 1)) + (8 * direction)); + ivar = rd32(hw, TXGBE_VXIVAR(queue >> 1)); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + wr32(hw, TXGBE_VXIVAR(queue >> 1), ivar); + } +} + +/** + * txgbe_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * txgbe_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +void txgbe_configure_msix(struct txgbe_adapter *adapter) +{ + struct txgbe_q_vector *q_vector; + int v_idx; + + adapter->eims_enable_mask = 0; + + /* Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + struct txgbe_ring *ring; + + q_vector = adapter->q_vector[v_idx]; + + txgbe_for_each_ring(ring, q_vector->rx) + txgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); + + txgbe_for_each_ring(ring, q_vector->tx) + txgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); + + if (q_vector->tx.ring && !q_vector->rx.ring) { + /* tx only vector */ + if (adapter->tx_itr_setting == 1) + q_vector->itr = TXGBE_12K_ITR; + else + q_vector->itr = adapter->tx_itr_setting; + } else { + /* rx or rx/tx vector */ + if (adapter->rx_itr_setting == 1) + q_vector->itr = TXGBE_20K_ITR; + else + q_vector->itr = adapter->rx_itr_setting; + } + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= BIT(v_idx); + + txgbevf_write_eitr(q_vector); + } + + txgbe_set_ivar(adapter, -1, 1, v_idx); + + /* setup eims_other and add value to global eims_enable_mask */ + adapter->eims_other = BIT(v_idx); + adapter->eims_enable_mask |= adapter->eims_other; +} + +void txgbe_up_complete(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct txgbe_hw *hw = &adapter->hw; + bool state; + + txgbe_configure_msix(adapter); + spin_lock_bh(&adapter->mbx_lock); + + if (is_valid_ether_addr(hw->mac.addr)) + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 0); + else + hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0, 0); + + spin_unlock_bh(&adapter->mbx_lock); + + state = adapter->link_state; + spin_lock_bh(&adapter->mbx_lock); + hw->mac.ops.get_link_state(hw, &adapter->link_state); + spin_unlock_bh(&adapter->mbx_lock); + if (state && state != adapter->link_state) + e_info(drv, "VF is administratively disabled\n"); + + /* memory barrier */ + smp_mb__before_atomic(); + clear_bit(__TXGBE_DOWN, &adapter->state); + txgbe_napi_enable_all(adapter); + + /* clear any pending interrupts, may auto mask */ + wr32(hw, TXGBE_VXICR, ~0); + txgbevf_irq_enable(adapter); + + msleep(1000); + /* enable transmits */ + netif_tx_start_all_queues(netdev); + txgbe_save_reset_stats(adapter); + txgbe_init_last_counter_stats(adapter); + + hw->mac.get_link_status = 1; + mod_timer(&adapter->service_timer, jiffies); +} + +int txgbevf_open(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + int err; + + /* A previous failure to open the device because of a lack of + * available MSIX vector resources may have reset the number + * of q vectors variable to zero. The only way to recover + * is to unload/reload the driver and hope that the system has + * been able to recover some MSIX vector resources. + */ + if (!adapter->num_q_vectors) + return -ENOMEM; + + if (hw->adapter_stopped) { + txgbevf_reset(adapter); + /* if adapter is still stopped then PF isn't up and + * the vf can't start. + */ + if (hw->adapter_stopped) { + err = TXGBE_ERR_MBX; + e_err(drv, "Unable to start - perhaps PF isn't up yet\n"); + goto err_setup_reset; + } + } + + /* disallow open during test */ + if (test_bit(__TXGBE_TESTING, &adapter->state)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = txgbe_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = txgbe_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + txgbe_configure(adapter); + err = txgbe_request_irq(adapter); + if (err) + goto err_req_irq; + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto err_set_queues; + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + if (err) + goto err_set_queues; + + txgbe_up_complete(adapter); + + return 0; + +err_set_queues: + txgbe_free_irq(adapter); +err_req_irq: + txgbe_free_all_rx_resources(adapter); +err_setup_rx: + txgbe_free_all_tx_resources(adapter); +err_setup_tx: + txgbevf_reset(adapter); +err_setup_reset: + + return err; +} + +void txgbevf_disable_rx_queue(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (TXGBE_REMOVED(hw->hw_addr)) + return; + rxdctl = rd32(hw, TXGBE_VXRXDCTL(reg_idx)); + rxdctl &= ~TXGBE_VXRXDCTL_ENABLE; + + /* write value back with RXDCTL.ENABLE bit cleared */ + wr32(hw, TXGBE_VXRXDCTL(reg_idx), rxdctl); + + /* the hardware may take up to 100us to really disable the rx queue */ + if (po32m(hw, TXGBE_VXRXDCTL(reg_idx), + TXGBE_VXRXDCTL_ENABLE, 0, 10, 10)) + e_info(probe, "RXDCTL.ENABLE queue %d not cleared while polling\n", reg_idx); +} + +/** + * txgbevf_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +inline void txgbevf_irq_disable(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int vector; + + wr32(hw, TXGBE_VXIMS, ~0); + + txgbe_flush(hw); + + for (vector = 0; vector < adapter->num_q_vectors; vector++) + synchronize_irq(adapter->msix_entries[vector].vector); + + synchronize_irq(adapter->msix_entries[vector++].vector); +} + +void txgbevf_down(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct txgbe_hw *hw = &adapter->hw; + int i; + + /* signal that we are down to the interrupt handler */ + if (test_and_set_bit(__TXGBE_DOWN, &adapter->state)) + return; /* do nothing if already down */ + + /* disable all enabled rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) + txgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); + + usleep_range(10000, 20000); + + netif_tx_stop_all_queues(netdev); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + txgbevf_irq_disable(adapter); + + txgbe_napi_disable_all(adapter); + + del_timer_sync(&adapter->service_timer); + + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < adapter->num_tx_queues; i++) { + u8 reg_idx = adapter->tx_ring[i]->reg_idx; + + wr32(hw, TXGBE_VXTXDCTL(reg_idx), + TXGBE_VXTXDCTL_FLUSH); + } + + if (!pci_channel_offline(adapter->pdev)) + txgbevf_reset(adapter); + + txgbe_clean_all_tx_rings(adapter); + txgbe_clean_all_rx_rings(adapter); +} + +int txgbevf_close(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + txgbevf_down(adapter); + txgbe_free_irq(adapter); + + txgbe_free_all_tx_resources(adapter); + txgbe_free_all_rx_resources(adapter); + + return 0; +} + +/** + * txgbe_sw_init - Initialize general software structures (struct txgbe_adapter) + * @adapter: board private structure to initialize + **/ +static int txgbe_sw_init(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + int err = 0; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + txgbe_init_ops_vf(hw); + hw->mbx.ops.init_params(hw); + err = txgbevf_init_rss_key(adapter); + if (err) + return err; + + /* assume legacy case in which PF would only give VF 2 queues */ + hw->mac.max_tx_queues = 4; + hw->mac.max_rx_queues = 4; + + /* lock to protect mailbox accesses */ + spin_lock_init(&adapter->mbx_lock); + spin_lock_init(&adapter->pf_count_lock); + + /*make sure PF is up*/ + if (adapter->bd_number == 0) + msleep(1500); + + err = hw->mac.ops.reset_hw(hw); + if (err) { + dev_info(&pdev->dev, + "PF still in reset state. Is the PF interface up?\n"); + } else { + err = hw->mac.ops.init_hw(hw); + if (err) { + dev_err(&pdev->dev, + "init_shared_code failed: %d\n", err); + return err; + } + txgbe_negotiate_api(adapter); + err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + if (err) + dev_info(&pdev->dev, "Error reading MAC address\n"); + else if (is_zero_ether_addr(adapter->hw.mac.addr)) + dev_info(&pdev->dev, + "MAC address not assigned by administrator.\n"); + ether_addr_copy(netdev->dev_addr, hw->mac.addr); + } + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_info(&pdev->dev, "Assigning random MAC address\n"); + eth_hw_addr_random(netdev); + ether_addr_copy(hw->mac.addr, netdev->dev_addr); + ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr); + } + + /* Enable dynamic interrupt throttling rates */ + adapter->rx_itr_setting = 1; + adapter->tx_itr_setting = 1; + + /* set default ring sizes */ + adapter->tx_ring_count = TXGBE_DEFAULT_TXD; + adapter->rx_ring_count = TXGBE_DEFAULT_RXD; + + /* enable rx csum by default */ + adapter->flagsd |= TXGBE_F_CAP_RX_CSUM; + + adapter->link_state = true; + + set_bit(__TXGBE_DOWN, &adapter->state); + + return 0; +} + +void txgbe_init_last_counter_stats(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i = 0; + + adapter->last_stats.gprc = 0; + adapter->last_stats.gorc = 0; + adapter->last_stats.gptc = 0; + adapter->last_stats.gotc = 0; + adapter->last_stats.mprc = 0; + + for (i = 0; i < MAX_RX_QUEUES; i++) { + adapter->last_reg_stats[i].gprc = rd32(hw, TXGBE_VXGPRC(i)); + adapter->last_stats.gprc += adapter->last_reg_stats[i].gprc; + + adapter->last_reg_stats[i].gorc = rd32(hw, TXGBE_VXGORC_LSB(i)); + adapter->last_reg_stats[i].gorc = adapter->last_reg_stats[i].gorc | + ((u64)(rd32(hw, TXGBE_VXGORC_MSB(i))) << 32); + adapter->last_stats.gorc += adapter->last_reg_stats[i].gorc; + + adapter->last_reg_stats[i].gptc = rd32(hw, TXGBE_VXGPTC(i)); + adapter->last_stats.gptc += adapter->last_reg_stats[i].gptc; + + adapter->last_reg_stats[i].gotc = rd32(hw, TXGBE_VXGOTC_LSB(i)); + adapter->last_reg_stats[i].gotc = adapter->last_reg_stats[i].gotc | + ((u64)(rd32(hw, TXGBE_VXGOTC_MSB(i))) << 32); + adapter->last_stats.gotc += adapter->last_reg_stats[i].gotc; + + adapter->last_reg_stats[i].mprc = rd32(hw, TXGBE_VXMPRC(i)); + adapter->last_stats.mprc += adapter->last_reg_stats[i].mprc; + + adapter->reg_stats[i].gprc = 0; + adapter->reg_stats[i].gorc = 0; + adapter->reg_stats[i].gptc = 0; + adapter->reg_stats[i].gotc = 0; + adapter->reg_stats[i].mprc = 0; + } + + adapter->base_stats.gprc = adapter->last_stats.gprc; + adapter->base_stats.gorc = adapter->last_stats.gorc; + adapter->base_stats.gptc = adapter->last_stats.gptc; + adapter->base_stats.gotc = adapter->last_stats.gotc; + adapter->base_stats.mprc = adapter->last_stats.mprc; +} + +void txgbe_set_num_queues(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + unsigned int def_q = 0; + unsigned int num_tcs = 0; + int err; + u16 rss; + u16 queue; + + /* Start with base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + + spin_lock_bh(&adapter->mbx_lock); + + /* fetch queue configuration from the PF */ + err = txgbe_get_queues(hw, &num_tcs, &def_q); + + spin_unlock_bh(&adapter->mbx_lock); + + if (err) + return; + + /* we need as many queues as traffic classes */ + if (num_tcs > 1) { + adapter->num_rx_queues = num_tcs; + } else { + rss = min_t(u16, num_online_cpus(), TXGBE_MAX_RSS_QUEUES); + queue = min_t(u16, hw->mac.max_rx_queues, hw->mac.max_tx_queues); + rss = min_t(u16, queue, rss); + + switch (hw->api_version) { + case txgbe_mbox_api_11: + case txgbe_mbox_api_12: + case txgbe_mbox_api_13: + adapter->num_rx_queues = rss; + adapter->num_tx_queues = rss; + default: + break; + } + } +} + +int txgbe_acquire_msix_vectors(struct txgbe_adapter *adapter, + int vectors) +{ + int vector_threshold; + + vector_threshold = MIN_MSIX_COUNT; + vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + vector_threshold, vectors); + + if (vectors < 0) { + e_err(drv, "Unable to allocate MSI-X interrupts\n"); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return vectors; + } + + adapter->num_q_vectors = min_t(u16, vectors - NON_Q_VECTORS, 2); + + return 0; +} + +/** + * txgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + */ +int txgbevf_set_interrupt_capability(struct txgbe_adapter *adapter) +{ + int vector, v_budget; + + /* It's easy to be greedy for MSI-X vectors, but it really + * doesn't do us much good if we have a lot more vectors + * than CPU's. So let's be conservative and only ask for + * (roughly) the same number of vectors as there are CPU's. + * The default is to use pairs of vectors. + */ + v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); + v_budget = min_t(int, v_budget, num_online_cpus()); + v_budget += NON_Q_VECTORS; + + adapter->msix_entries = kcalloc(v_budget, + sizeof(struct msix_entry), GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + + for (vector = 0; vector < v_budget; vector++) + adapter->msix_entries[vector].entry = vector; + + /* A failure in MSI-X entry allocation isn't fatal, but the VF driver + * does not support any other modes, so we will simply fail here. Note + * that we clean up the msix_entries pointer else-where. + */ + return txgbe_acquire_msix_vectors(adapter, v_budget); +} + +void txgbe_add_ring(struct txgbe_ring *ring, + struct txgbe_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; +} + +/** + * txgbe_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_idx: index of vector in adapter struct + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +int txgbe_alloc_q_vector(struct txgbe_adapter *adapter, int v_idx, + int txr_count, int txr_idx, + int xdp_count, int xdp_idx, + int rxr_count, int rxr_idx) +{ + struct txgbe_q_vector *q_vector; + int reg_idx = txr_idx + xdp_idx; + struct txgbe_ring *ring; + int ring_count, size; + + ring_count = txr_count + xdp_count + rxr_count; + size = sizeof(*q_vector) + (sizeof(*ring) * ring_count); + + /* allocate q_vector and rings */ + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, txgbevf_poll, 64); + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + q_vector->v_idx = v_idx; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + while (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + txgbe_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->que_idx = txr_idx; + ring->reg_idx = reg_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* update count and index */ + txr_count--; + txr_idx++; + reg_idx++; + + /* push pointer to next ring */ + ring++; + } + + while (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + txgbe_add_ring(ring, &q_vector->rx); + + /* errata: UDP frames with a 0 checksum + * can be marked as checksum errors. + */ + if (adapter->hw.mac.type == txgbe_mac_sp_vf) + set_bit(__TXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->que_idx = rxr_idx; + ring->reg_idx = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + + /* update count and index */ + rxr_count--; + rxr_idx++; + + /* push pointer to next ring */ + ring++; + } + + return 0; +} + +/** + * txgbe_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +void txgbe_free_q_vector(struct txgbe_adapter *adapter, int v_idx) +{ + struct txgbe_q_vector *q_vector = adapter->q_vector[v_idx]; + struct txgbe_ring *ring; + + txgbe_for_each_ring(ring, q_vector->tx) + adapter->tx_ring[ring->que_idx] = NULL; + + txgbe_for_each_ring(ring, q_vector->rx) + adapter->rx_ring[ring->que_idx] = NULL; + + adapter->q_vector[v_idx] = NULL; + netif_napi_del(&q_vector->napi); + + /* txgbe_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + kfree_rcu(q_vector, rcu); +} + +/** + * txgbe_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +int txgbe_alloc_q_vectors(struct txgbe_adapter *adapter) +{ + int q_vectors = adapter->num_q_vectors; + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int xdp_remaining = 0; + int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) { + for (; rxr_remaining; v_idx++, q_vectors--) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); + + err = txgbe_alloc_q_vector(adapter, v_idx, + 0, 0, 0, 0, rqpv, rxr_idx); + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + rxr_idx += rqpv; + } + } + + for (; q_vectors; v_idx++, q_vectors--) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors); + int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors); + + err = txgbe_alloc_q_vector(adapter, v_idx, + tqpv, txr_idx, + xqpv, xdp_idx, + rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + rxr_idx += rqpv; + txr_remaining -= tqpv; + txr_idx += tqpv; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + txgbe_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * txgbevf_reset_interrupt_capability - Reset MSIX setup + * @adapter: board private structure + * + **/ +void txgbevf_reset_interrupt_capability(struct txgbe_adapter *adapter) +{ + if (!adapter->msix_entries) + return; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; +} + +/** + * txgbevf_init_interrupt_scheme - Determine if MSIX is supported and init + * @adapter: board private structure to initialize + * + **/ +int txgbevf_init_interrupt_scheme(struct txgbe_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int err; + + /* Number of supported queues */ + txgbe_set_num_queues(adapter); + + err = txgbevf_set_interrupt_capability(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to setup interrupt capabilities\n"); + goto err_set_interrupt; + } + + err = txgbe_alloc_q_vectors(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to allocate memory for queue vectors\n"); + goto err_alloc_q_vectors; + } + + dev_info(&pdev->dev, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", + (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", + adapter->num_rx_queues, adapter->num_tx_queues); + + set_bit(__TXGBE_DOWN, &adapter->state); + + return 0; +err_alloc_q_vectors: + txgbevf_reset_interrupt_capability(adapter); +err_set_interrupt: + return err; +} + +static void txgbe_set_features(struct txgbe_adapter *adapter, u8 fea_flags) +{ + struct net_device *netdev = adapter->netdev; + netdev_features_t hw_features; + + netdev->features = NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC; + + netdev->gso_partial_features = NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_IPXIP4 | + + NETIF_F_GSO_IPXIP6 | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->features |= netdev->gso_partial_features | + NETIF_F_GSO_PARTIAL; + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + netdev->features |= NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_STAG_RX; + + hw_features = netdev->hw_features; + hw_features |= netdev->features; + netdev->hw_features = hw_features; + + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; +} + +void txgbe_update_itr(struct txgbe_q_vector *q_vector, + struct txgbe_ring_container *ring_container) +{ + int bytes = ring_container->total_bytes; + int packets = ring_container->total_packets; + u32 timepassed_us; + u64 bytes_perint; + u8 itr_setting = ring_container->itr; + + if (packets == 0) + return; + + /* simple throttlerate management + * 0-20MB/s lowest (100000 ints/s) + * 20-100MB/s low (20000 ints/s) + * 100-1249MB/s bulk (12000 ints/s) + */ + timepassed_us = q_vector->itr >> 2; + bytes_perint = bytes / timepassed_us; + + switch (itr_setting) { + case lowest_latency: + if (bytes_perint > 10) + itr_setting = low_latency; + break; + case low_latency: + if (bytes_perint > 20) + itr_setting = bulk_latency; + else if (bytes_perint <= 10) + itr_setting = lowest_latency; + break; + case bulk_latency: + if (bytes_perint <= 20) + itr_setting = low_latency; + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itr_setting; +} + +/** + * txgbevf_write_eitr - write VTEITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + */ +void txgbevf_write_eitr(struct txgbe_q_vector *q_vector) +{ + struct txgbe_adapter *adapter = q_vector->adapter; + struct txgbe_hw *hw = &adapter->hw; + int v_idx = q_vector->v_idx; + u32 itr_reg = TXGBE_VXITR_INTERVAL(q_vector->itr); + + /* set the WDIS bit to not clear the timer bits and cause an + * immediate assertion of the interrupt + */ + itr_reg |= TXGBE_VXITR_CNT_WDIS; + + wr32(hw, TXGBE_VXITR(v_idx), itr_reg); +} + +void txgbe_set_itr(struct txgbe_q_vector *q_vector) +{ + u32 new_itr = q_vector->itr; + u8 current_itr; + + txgbe_update_itr(q_vector, &q_vector->tx); + txgbe_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = TXGBE_100K_ITR; + break; + case low_latency: + new_itr = TXGBE_20K_ITR; + break; + case bulk_latency: + new_itr = TXGBE_12K_ITR; + break; + default: + break; + } + + if (new_itr != q_vector->itr) { + /* do an exponential smoothing */ + new_itr = (10 * new_itr * q_vector->itr) / + ((9 * new_itr) + q_vector->itr); + + /* save the algorithm value here */ + q_vector->itr = new_itr; + + txgbevf_write_eitr(q_vector); + } +} + +bool txgbe_alloc_mapped_page(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(txgbe_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + TXGBE_RX_DMA_ATTR); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, txgbe_rx_pg_order(rx_ring)); + + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + bi->dma_addr = dma; + bi->page = page; + bi->page_offset = txgbe_rx_offset(rx_ring); + bi->pagecnt_bias = 1; + rx_ring->rx_stats.alloc_rx_page++; + + return true; +} + +/** + * txgbevf_alloc_rx_buffers - Replace used receive buffers; packet split + * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on + * @cleaned_count: number of buffers to replace + **/ +void txgbevf_alloc_rx_buffers(struct txgbe_ring *rx_ring, + u16 cleaned_count) +{ + union txgbe_rx_desc *rx_desc; + struct txgbe_rx_buffer *bi; + unsigned int i = rx_ring->next_to_use; + + /* nothing to do or no valid netdev defined */ + if (!cleaned_count || !rx_ring->netdev) + return; + + rx_desc = TXGBE_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { + if (!txgbe_alloc_mapped_page(rx_ring, bi)) + break; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma_addr, + bi->page_offset, + txgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* Refresh the desc even if pkt_addr didn't change + * because each write-back erases this info. + */ + rx_desc->rd.pkt_addr = TXGBE_RXD_PKTADDR(bi->dma_addr + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = TXGBE_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the hdr_addr for the next_to_use descriptor */ + rx_desc->rd.hdr_addr = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +/** + * txgbe_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static void txgbe_add_rx_frag(struct txgbe_ring __always_unused *rx_ring, + struct txgbe_rx_buffer *rx_buffer, + struct sk_buff *skb, + unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = txgbe_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(TXGBE_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +static struct sk_buff *txgbe_build_skb(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + union txgbe_rx_desc *rx_desc) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + void *va = xdp->data_meta; + +#if (PAGE_SIZE < 8192) + unsigned int truesize = txgbe_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + struct sk_buff *skb; + + /* Prefetch first cache line of first page. If xdp->data_meta + * is unused, this points to xdp->data, otherwise, we likely + * have a consumer accessing first few bytes of meta data, + * and then actual data. + */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* build an skb around the page buffer */ + skb = build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, xdp->data_end - xdp->data); + + if (metasize) + skb_metadata_set(skb, metasize); + + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + + return skb; +} + +static struct sk_buff *txgbe_construct_skb(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + union txgbe_rx_desc *rx_desc) +{ + unsigned int size = xdp->data_end - xdp->data; +#if (PAGE_SIZE < 8192) + unsigned int truesize = txgbe_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(xdp->data); +#if L1_CACHE_BYTES < 128 + prefetch(xdp->data + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, TXGBE_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > TXGBE_RX_HDR_SIZE) + headlen = eth_get_headlen(skb->dev, xdp->data, TXGBE_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), xdp->data, + ALIGN(headlen, sizeof(long))); + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, + (xdp->data + headlen) - + page_address(rx_buffer->page), + size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +static inline bool txgbe_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +static bool txgbe_can_reuse_rx_page(struct txgbe_rx_buffer *rx_buffer) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote pages */ + if (unlikely(txgbe_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((page_count(page) - pagecnt_bias) > 1)) + return false; + +#else +#define TXGBE_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - TXGBE_RXBUFFER_2048) + + if (rx_buffer->page_offset > TXGBE_LAST_OFFSET) + return false; + +#endif + + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + if (likely(!pagecnt_bias)) { + page_ref_inc(page); + rx_buffer->pagecnt_bias = 1; + } + + return true; +} + +/** + * txgbe_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +void txgbe_reuse_rx_page(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *old_buff) +{ + struct txgbe_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_buff->page = old_buff->page; + new_buff->dma_addr = old_buff->dma_addr; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static void txgbe_put_rx_buffer(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *rx_buffer, + struct sk_buff *skb) +{ + if (txgbe_can_reuse_rx_page(rx_buffer)) { + /* hand second half of page back to the ring */ + txgbe_reuse_rx_page(rx_ring, rx_buffer); + } else { + if (IS_ERR(skb)) + /* We are not reusing the buffer so unmap it and free + * any references we are holding to it + */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma_addr, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + TXGBE_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + +static struct txgbe_rx_buffer *txgbe_get_rx_buffer(struct txgbe_ring *rx_ring, + const unsigned int size) +{ + struct txgbe_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma_addr, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +/** + * txgbe_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +bool txgbe_is_non_eop(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(TXGBE_RX_DESC(rx_ring, ntc)); + + if (likely(TXGBE_RXD_STATUS(rx_desc) & TXGBE_RXD_STAT_EOP)) + return false; + + return true; +} + +/** + * txgbe_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +bool txgbe_cleanup_headers(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; + + /* verify that the packet does not have any known errors */ + if (unlikely(TXGBE_RXD_STATUS(rx_desc) & TXGBE_RXD_ERR_RXE)) { + struct net_device *netdev = rx_ring->netdev; + + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +#define TXGBE_RSS_L4_TYPES_MASK \ + ((1ul << TXGBE_RSSTYPE_IPV4_TCP) | \ + (1ul << TXGBE_RSSTYPE_IPV4_UDP) | \ + (1ul << TXGBE_RSSTYPE_IPV6_TCP) | \ + (1ul << TXGBE_RSSTYPE_IPV6_UDP)) + +inline void txgbe_rx_hash(struct txgbe_ring *ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = TXGBE_RXD_RSSTYPE(rx_desc); + + if (!rss_type) + return; + + skb_set_hash(skb, TXGBE_RXD_RSS_HASH(rx_desc), + (TXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} + +inline void txgbe_rx_checksum(struct txgbe_ring *ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct txgbe_dec_ptype dptype; + + skb->ip_summed = CHECKSUM_NONE; + + /* Rx csum disabled */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + dptype = txgbe_rx_decode_ptype(rx_desc); + if (!dptype.known) + return; + + if (dptype.etype) + skb->encapsulation = 1; + + /* if IP and error */ + if ((TXGBE_RXD_STATUS(rx_desc) & TXGBE_RXD_STAT_IPCS && + TXGBE_RXD_STATUS(rx_desc) & TXGBE_RXD_ERR_IPE) || + (TXGBE_RXD_STATUS(rx_desc) & TXGBE_RXD_STAT_EIPCS && + TXGBE_RXD_STATUS(rx_desc) & TXGBE_RXD_ERR_EIPERR)) { + ring->rx_stats.csum_err++; + return; + } + + if (!(TXGBE_RXD_STATUS(rx_desc) & TXGBE_RXD_STAT_TPCS)) + return; + + if (TXGBE_RXD_STATUS(rx_desc) & TXGBE_RXD_ERR_TPE) { + ring->rx_stats.csum_err++; + return; + } + + /* It must be a TCP or UDP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +void txgbe_process_skb_fields(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + txgbe_rx_hash(rx_ring, rx_desc, skb); + txgbe_rx_checksum(rx_ring, rx_desc, skb); + + if (TXGBE_RXD_STATUS(rx_desc) & TXGBE_RXD_STAT_VP) { + u16 vid = TXGBE_RXD_VLAN(rx_desc); + unsigned long *active_vlans = netdev_priv(rx_ring->netdev); + + if (test_bit(vid & VLAN_VID_MASK, active_vlans)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static void txgbe_rx_skb(struct txgbe_q_vector *q_vector, + struct sk_buff *skb) +{ + napi_gro_receive(&q_vector->napi, skb); +} + +int txgbe_clean_rx_irq(struct txgbe_q_vector *q_vector, + struct txgbe_ring *rx_ring, + int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = txgbe_desc_unused(rx_ring); + struct sk_buff *skb = rx_ring->skb; + struct xdp_buff xdp; + + xdp.data = NULL; + xdp.data_end = NULL; + + while (likely(total_rx_packets < budget)) { + struct txgbe_rx_buffer *rx_buffer; + union txgbe_rx_desc *rx_desc; + unsigned int size; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= TXGBE_RX_BUFFER_WRITE) { + txgbevf_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = TXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); + + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + if (!(TXGBE_RXD_STATUS(rx_desc) & TXGBE_RXD_STAT_DD)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * RXD_STAT_DD bit is set + */ + rmb(); + + /* retrieve a buffer from the ring */ + rx_buffer = txgbe_get_rx_buffer(rx_ring, size); + + if (!skb) { + xdp.data = page_address(rx_buffer->page) + + rx_buffer->page_offset; + + xdp.data_meta = xdp.data; + + xdp.data_hard_start = xdp.data - + txgbe_rx_offset(rx_ring); + xdp.data_end = xdp.data + size; + } + + if (skb) { + txgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); + } else if (ring_uses_build_skb(rx_ring)) { + skb = txgbe_build_skb(rx_ring, rx_buffer, + &xdp, rx_desc); + } else { + skb = txgbe_construct_skb(rx_ring, rx_buffer, + &xdp, rx_desc); + } + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + txgbe_put_rx_buffer(rx_ring, rx_buffer, skb); + cleaned_count++; + + /* fetch next buffer in frame if non-eop */ + if (txgbe_is_non_eop(rx_ring, rx_desc)) + continue; + + /* verify the packet layout is correct */ + if (txgbe_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* Workarouid hardware that can't do proper VEPA multicast + * source pruning. + */ + if ((skb->pkt_type == PACKET_BROADCAST || + skb->pkt_type == PACKET_MULTICAST) && + ether_addr_equal(rx_ring->netdev->dev_addr, + eth_hdr(skb)->h_source)) { + dev_kfree_skb_irq(skb); + continue; + } + + /* populate checksum, VLAN, and protocol */ + txgbe_process_skb_fields(rx_ring, rx_desc, skb); + + txgbe_rx_skb(q_vector, skb); + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_rx_packets++; + } + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + q_vector->netpoll_rx = false; + + return total_rx_packets; +} + +inline void txgbe_irq_enable_queues(struct txgbe_adapter *adapter, + u32 qmask) +{ + struct txgbe_hw *hw = &adapter->hw; + + wr32(hw, TXGBE_VXIMC, qmask); +} + +inline int txgbe_skb_pad_nonzero(struct sk_buff *skb, int pad) +{ + int err; + int ntail; + + /* If the skbuff is non linear tailroom is always zero.. */ + if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { + memset(skb->data + skb->len, 0x1, pad); + return 0; + } + + ntail = skb->data_len + pad - (skb->end - skb->tail); + if (likely(skb_cloned(skb) || ntail > 0)) { + err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); + if (unlikely(err)) + goto free_skb; + } + + /* FIXME: The use of this function with non-linear skb's really needs + * to be audited. + */ + err = skb_linearize(skb); + if (unlikely(err)) + goto free_skb; + + memset(skb->data + skb->len, 0x1, pad); + return 0; + +free_skb: + kfree_skb(skb); + return err; +} + +static int __txgbe_maybe_stop_tx(struct txgbe_ring *tx_ring, int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->que_idx); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(txgbe_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->que_idx); + ++tx_ring->tx_stats.tx_restart_queue; + + return 0; +} + +static inline int txgbe_maybe_stop_tx(struct txgbe_ring *tx_ring, int size) +{ + if (likely(txgbe_desc_unused(tx_ring) >= size)) + return 0; + return __txgbe_maybe_stop_tx(tx_ring, size); +} + +void txgbevf_tx_ctxtdesc(struct txgbe_ring *tx_ring, + u32 vlan_macip_lens, + u32 fcoe_sof_eof, + u32 type_tucmd, + u32 mss_l4len_idx) +{ + struct txgbe_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = TXGBE_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= TXGBE_TXD_DTYP_CTXT; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} + +int txgbe_tso(struct txgbe_ring *tx_ring, + struct txgbe_tx_buffer *first, u8 *hdr_len, struct txgbe_dec_ptype dptype) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens, type_tucmd; + u32 mss_l4len_idx, l4len; + struct tcphdr *tcph; + struct iphdr *iph; + u32 tunhdr_eiplen_tunlen = 0; + int err = 0; + u8 tun_prot = 0; + bool enc = skb->encapsulation; + struct ipv6hdr *ipv6h; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + + iph = enc ? inner_ip_hdr(skb) : ip_hdr(skb); + + if (iph->version == 4) { + tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); + + iph->tot_len = 0; + iph->check = 0; + tcph->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + first->tx_flags |= TXGBE_TX_FLAGS_TSO | + TXGBE_TX_FLAGS_CSUM | + TXGBE_TX_FLAGS_IPV4 | + TXGBE_TX_FLAGS_CC; + } else if (iph->version == 6 && skb_is_gso_v6(skb)) { + ipv6h = enc ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); + ipv6h->payload_len = 0; + tcph->check = + ~csum_ipv6_magic(&ipv6h->saddr, + &ipv6h->daddr, + 0, IPPROTO_TCP, 0); + first->tx_flags |= TXGBE_TX_FLAGS_TSO | + TXGBE_TX_FLAGS_CSUM | + TXGBE_TX_FLAGS_CC; + } + + /* compute header lengths */ + l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); + *hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data) + : skb_transport_offset(skb); + *hdr_len += l4len; + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* mss_l4len_id: use 1 as index for TSO */ + mss_l4len_idx = l4len << TXGBE_TXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << TXGBE_TXD_MSS_SHIFT; + mss_l4len_idx |= (1u << 0x4); + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + if (enc) { + switch (first->protocol) { + case htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + first->tx_flags |= TXGBE_TX_FLAGS_OUTER_IPV4; + break; + case htons(ETH_P_IPV6): + tun_prot = ipv6_hdr(skb)->nexthdr; + break; + default: + break; + } + switch (tun_prot) { + case IPPROTO_UDP: + tunhdr_eiplen_tunlen = TXGBE_TXD_TUNNEL_UDP; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + TXGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_GRE: + tunhdr_eiplen_tunlen = TXGBE_TXD_TUNNEL_GRE; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + TXGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_IPIP: + tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) - + (char *)ip_hdr(skb)) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT; + break; + default: + break; + } + + vlan_macip_lens = skb_inner_network_header_len(skb) >> 1; + } else { + vlan_macip_lens = skb_network_header_len(skb) >> 1; + } + vlan_macip_lens |= skb_network_offset(skb) << TXGBE_TXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & TXGBE_TX_FLAGS_VLAN_MASK; + + type_tucmd = dptype.ptype << 24; + + if (skb->vlan_proto == htons(ETH_P_8021AD)) + type_tucmd |= TXGBE_SET_FLAG(first->tx_flags, + TXGBE_TX_FLAGS_VLAN, + 0x1 << TXGBE_TXD_TAG_TPID_SEL_SHIFT); + + txgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, + type_tucmd, mss_l4len_idx); + + return 1; +} + +void txgbe_tx_csum(struct txgbe_ring *tx_ring, + struct txgbe_tx_buffer *first, struct txgbe_dec_ptype dptype) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 mss_l4len_idx = 0; + u32 tunhdr_eiplen_tunlen = 0; + u8 tun_prot = 0; + u32 type_tucmd; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { + if (!(first->tx_flags & TXGBE_TX_FLAGS_VLAN) && + !(first->tx_flags & TXGBE_TX_FLAGS_CC)) + return; + vlan_macip_lens = skb_network_offset(skb) << + TXGBE_TXD_MACLEN_SHIFT; + } else { + u8 l4_prot = 0; + + union { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + u8 *raw; + } network_hdr; + union { + struct tcphdr *tcphdr; + u8 *raw; + } transport_hdr; + + if (skb->encapsulation) { + network_hdr.raw = skb_inner_network_header(skb); + transport_hdr.raw = skb_inner_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + TXGBE_TXD_MACLEN_SHIFT; + switch (first->protocol) { + case htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + tun_prot = ipv6_hdr(skb)->nexthdr; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but version=%d\n", + network_hdr.ipv4->version); + } + return; + } + switch (tun_prot) { + case IPPROTO_UDP: + tunhdr_eiplen_tunlen = TXGBE_TXD_TUNNEL_UDP; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + TXGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_GRE: + tunhdr_eiplen_tunlen = TXGBE_TXD_TUNNEL_GRE; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + TXGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_IPIP: + tunhdr_eiplen_tunlen = + (((char *)inner_ip_hdr(skb) - + (char *)ip_hdr(skb)) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT; + break; + default: + break; + } + + } else { + network_hdr.raw = skb_network_header(skb); + transport_hdr.raw = skb_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + TXGBE_TXD_MACLEN_SHIFT; + } + + switch (network_hdr.ipv4->version) { + case IPVERSION: + vlan_macip_lens |= + (transport_hdr.raw - network_hdr.raw) >> 1; + l4_prot = network_hdr.ipv4->protocol; + break; + case 6: + vlan_macip_lens |= + (transport_hdr.raw - network_hdr.raw) >> 1; + l4_prot = network_hdr.ipv6->nexthdr; + break; + default: + break; + } + + switch (l4_prot) { + case IPPROTO_TCP: + mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << + TXGBE_TXD_L4LEN_SHIFT; + break; + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + TXGBE_TXD_L4LEN_SHIFT; + break; + default: + break; + } + + /* update TX checksum flag */ + first->tx_flags |= TXGBE_TX_FLAGS_CSUM; + } + first->tx_flags |= TXGBE_TX_FLAGS_CC; + /* vlan_macip_lens: MACLEN, VLAN tag */ + vlan_macip_lens |= skb_network_offset(skb) << TXGBE_TXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & TXGBE_TX_FLAGS_VLAN_MASK; + + type_tucmd = dptype.ptype << 24; + + if (skb->vlan_proto == htons(ETH_P_8021AD)) + type_tucmd |= TXGBE_SET_FLAG(first->tx_flags, + TXGBE_TX_FLAGS_VLAN, + 0x1 << TXGBE_TXD_TAG_TPID_SEL_SHIFT); + + txgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, + type_tucmd, mss_l4len_idx); +} + +__le32 txgbe_tx_olinfo_status(struct txgbe_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + __le32 status = TXGBE_TXD_PAYLEN(paylen); + + /* enable L4 checksum for TSO and TX checksum offload */ + if (tx_flags & TXGBE_TX_FLAGS_CSUM) + status |= TXGBE_TXD_TPCS; + + /* enable IPv4 checksum for TSO */ + if (tx_flags & TXGBE_TX_FLAGS_IPV4) + status |= TXGBE_TXD_IPCS; + + /* enable IPv4 checksum for TSO */ + if (tx_flags & TXGBE_TX_FLAGS_OUTER_IPV4) + status |= TXGBE_TXD_EIPCS; + + /* use index 1 context for TSO/FSO/FCOE */ + if (tx_flags & TXGBE_TX_FLAGS_TSO) + status |= TXGBE_TXD_BAK_DESC; + + /* Check Context must be set if Tx switch is enabled, which it + * always is for case where virtual functions are running + */ + if (tx_flags & TXGBE_TX_FLAGS_CC) + status |= TXGBE_TXD_CC; + + return status; +} + +__le32 txgbe_tx_cmd_type(u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + __le32 cmd_type = cpu_to_le32(TXGBE_TXD_FCS); + + /* set HW vlan bit if vlan is present */ + if (tx_flags & TXGBE_TX_FLAGS_VLAN) + cmd_type |= cpu_to_le32(TXGBE_TXD_VLE); + + /* set segmentation enable bits for TSO/FSO */ + if (tx_flags & TXGBE_TX_FLAGS_TSO) + cmd_type |= cpu_to_le32(TXGBE_TXD_TSE); + + return cmd_type; +} + +void txgbevf_unmap_and_free_tx_resource(struct txgbe_ring *tx_ring, + struct txgbe_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ +} + +void txgbe_tx_map(struct txgbe_ring *tx_ring, + struct txgbe_tx_buffer *first, + const u8 hdr_len) +{ + dma_addr_t dma_addr; + struct sk_buff *skb = first->skb; + struct txgbe_tx_buffer *tx_buffer; + struct txgbe_tx_desc *tx_desc; + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned int data_len = skb->data_len; + unsigned int size = skb_headlen(skb); + unsigned int paylen = skb->len - hdr_len; + u32 tx_flags = first->tx_flags; + __le32 cmd_type, status; + u16 i = tx_ring->next_to_use; + + tx_desc = TXGBE_TX_DESC(tx_ring, i); + + status = txgbe_tx_olinfo_status(tx_desc, tx_flags, paylen); + cmd_type = txgbe_tx_cmd_type(tx_flags); + + dma_addr = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma_addr)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(first, len, size); + dma_unmap_addr_set(first, dma, dma_addr); + + tx_desc->pkt_addr = cpu_to_le64(dma_addr); + tx_desc->status = status; + for (;;) { + if (size == 0) + goto next_frag; + + while (unlikely(size > TXGBE_MAX_DATA_PER_TXD)) { + tx_desc->cmd_type_len = + cmd_type | cpu_to_le32(TXGBE_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = TXGBE_TX_DESC(tx_ring, 0); + i = 0; + } + + dma_addr += TXGBE_MAX_DATA_PER_TXD; + size -= TXGBE_MAX_DATA_PER_TXD; + + tx_desc->pkt_addr = cpu_to_le64(dma_addr); + tx_desc->status = status; + } + + if (likely(!data_len)) + break; + + tx_desc->cmd_type_len = cmd_type | cpu_to_le32(size); + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = TXGBE_TX_DESC(tx_ring, 0); + i = 0; + } +next_frag: + size = skb_frag_size(frag); + data_len -= size; + + dma_addr = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma_addr)) + goto dma_error; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma_addr); + + tx_desc->pkt_addr = cpu_to_le64(dma_addr); + tx_desc->status = status; + + frag++; + } + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* write last descriptor with RS and EOP bits */ + cmd_type |= cpu_to_le32(size) | cpu_to_le32(TXGBE_TXD_EOP | TXGBE_TXD_RS); + tx_desc->cmd_type_len = cmd_type; + + /* set the timestamp */ + first->time_stamp = jiffies; + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + txgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) + writel(i, tx_ring->tail); + + return; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + txgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer); + if (tx_buffer == first) + break; + if (i == 0) + i = tx_ring->count; + i--; + } + + tx_ring->next_to_use = i; +} + +static int txgbe_xmit_frame_ring(struct sk_buff *skb, + struct txgbe_ring *tx_ring) +{ + struct txgbe_tx_buffer *first; + struct txgbe_dec_ptype dptype; + int tso; + u32 tx_flags = 0; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + unsigned short f; + __be16 protocol = skb->protocol; + u8 hdr_len = 0; + + /* work around hw errata 3 */ + u16 _llclen, *llclen; + + llclen = skb_header_pointer(skb, ETH_HLEN - 2, sizeof(u16), &_llclen); + if (*llclen == 0x3 || *llclen == 0x4 || *llclen == 0x5) { + if (txgbe_skb_pad_nonzero(skb, ETH_ZLEN - skb->len)) + return -ENOMEM; + __skb_put(skb, ETH_ZLEN - skb->len); + } + /* if this is an LLDP ether frame then drop it - VFs do not + * forward LLDP frames. + */ + if (ntohs(skb->protocol) == ETH_P_LLDP) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + /* need: 1 descriptor per page * PAGE_SIZE/TXGBE_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/TXGBE_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> + frags[f])); + + if (txgbe_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb) << TXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= TXGBE_TX_FLAGS_VLAN; + /* else if it is a SW VLAN check the next protocol and store the tag */ + } else if (protocol == htons(ETH_P_8021Q) || + protocol == htons(ETH_P_8021AD)) { + struct vlan_hdr *vhdr, _vhdr; + + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + goto out_drop; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= ntohs(vhdr->h_vlan_TCI) << TXGBE_TX_FLAGS_VLAN_SHIFT; + } + + if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { + struct vlan_hdr *vhdr, _vhdr; + + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + goto out_drop; + + protocol = vhdr->h_vlan_encapsulated_proto; + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + /* encode packet type */ + dptype = txgbe_tx_encode_ptype(first); + + tso = txgbe_tso(tx_ring, first, &hdr_len, dptype); + if (tso < 0) + goto out_drop; + else if (!tso) + txgbe_tx_csum(tx_ring, first, dptype); + + txgbe_tx_map(tx_ring, first, hdr_len); + + netif_trans_update(tx_ring->netdev); + + txgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + return NETDEV_TX_OK; +} + +static netdev_tx_t txgbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_ring *tx_ring; + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* The minimum packet size for olinfo paylen is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + + tx_ring = adapter->tx_ring[skb->queue_mapping]; + + return txgbe_xmit_frame_ring(skb, tx_ring); +} + +u8 *txgbe_addr_list_itr(struct txgbe_hw __maybe_unused *hw, u8 **mc_addr_ptr, + u32 *vmdq) +{ + struct netdev_hw_addr *mc_ptr; + u8 *addr = *mc_addr_ptr; + *vmdq = 0; + + mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]); + if (mc_ptr->list.next) { + struct netdev_hw_addr *ha; + + ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list); + *mc_addr_ptr = ha->addr; + } else { + *mc_addr_ptr = NULL; + } + + return addr; +} + +int txgbevf_write_uc_addr_list(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + int count = 0; + + if (!netdev_uc_empty(netdev)) { + struct netdev_hw_addr *ha; + + netdev_for_each_uc_addr(ha, netdev) { + hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); + usleep_range(200, 300); + } + } else { + /* If the list is empty then send message to PF driver to + * clear all macvlans on this VF. + */ + hw->mac.ops.set_uc_addr(hw, 0, NULL); + } + + return count; +} + +/** + * txgbevf_set_rx_mode - Multicast and unicast set + * @netdev: network interface device structure + * + * The set_rx_method entry point is called whenever the multicast address + * list, unicast address list or the network interface flags are updated. + * This routine is responsible for configuring the hardware for proper + * multicast mode and configuring requested unicast filters. + **/ +void txgbevf_set_rx_mode(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + unsigned int flags = netdev->flags; + int xcast_mode; + u8 *addr_list = NULL; + int addr_count = 0; + + xcast_mode = (flags & IFF_ALLMULTI) ? TXGBE_XCAST_MODE_ALLMULTI : + (flags & (IFF_BROADCAST | IFF_MULTICAST)) ? + TXGBE_XCAST_MODE_MULTI : TXGBE_XCAST_MODE_NONE; + /* request the most inclusive mode we need */ + if (flags & IFF_PROMISC) + xcast_mode = TXGBE_XCAST_MODE_PROMISC; + else if (flags & IFF_ALLMULTI) + xcast_mode = TXGBE_XCAST_MODE_ALLMULTI; + else if (flags & (IFF_BROADCAST | IFF_MULTICAST)) + xcast_mode = TXGBE_XCAST_MODE_MULTI; + else + xcast_mode = TXGBE_XCAST_MODE_NONE; + + /* reprogram multicast list */ + addr_count = netdev_mc_count(netdev); + if (addr_count) { + struct netdev_hw_addr *ha; + + ha = list_first_entry(&netdev->mc.list, + struct netdev_hw_addr, list); + addr_list = ha->addr; + } + + spin_lock_bh(&adapter->mbx_lock); + + hw->mac.ops.update_xcast_mode(hw, xcast_mode); + + hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, + txgbe_addr_list_itr, false); + + txgbevf_write_uc_addr_list(netdev); + + spin_unlock_bh(&adapter->mbx_lock); +} + +void txgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + u64 bytes, packets; + const struct txgbe_ring *ring; + int i; + struct rtnl_link_stats64 *net_stats = &adapter->net_stats; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + unsigned int start; + + ring = adapter->rx_ring[i]; + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + bytes = ring->stats.bytes; + packets = ring->stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + stats->rx_bytes += bytes; + stats->rx_packets += packets; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + unsigned int start; + + ring = adapter->tx_ring[i]; + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + bytes = ring->stats.bytes; + packets = ring->stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + stats->tx_bytes += bytes; + stats->tx_packets += packets; + } + rcu_read_unlock(); + + /* following stats updated by txgbe_watchdog_subtask() */ + stats->multicast = net_stats->multicast; + stats->tx_errors = net_stats->tx_errors; + stats->tx_dropped = net_stats->tx_dropped; + stats->rx_errors = net_stats->rx_errors; + stats->rx_dropped = net_stats->rx_dropped; + stats->rx_crc_errors = net_stats->rx_crc_errors; + stats->rx_length_errors = net_stats->rx_length_errors; +} + +/** + * txgbe_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +int txgbe_set_mac(struct net_device *netdev, void *p) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + int err; + + if (!is_valid_ether_addr((u8 *)addr->sa_data)) + return -EADDRNOTAVAIL; + + spin_lock_bh(&adapter->mbx_lock); + + err = hw->mac.ops.set_rar(hw, 0, (u8 *)addr->sa_data, 0, 1); + + spin_unlock_bh(&adapter->mbx_lock); + + if (err) + return -EPERM; + + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.perm_addr, addr->sa_data, netdev->addr_len); + ether_addr_copy(netdev->dev_addr, addr->sa_data); + + return 0; +} + +void txgbevf_reinit_locked(struct txgbe_adapter *adapter) +{ + WARN_ON(in_interrupt()); + + while (test_and_set_bit(__TXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + /*txgbevf_down + free_irq*/ + txgbevf_down(adapter); + txgbe_free_irq(adapter); + + /*txgbe_up + request_irq*/ + txgbe_configure(adapter); + txgbe_request_irq(adapter); + txgbe_up_complete(adapter); + + clear_bit(__TXGBE_RESETTING, &adapter->state); +} + +/** + * txgbe_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +int txgbe_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + int ret; + + if (new_mtu < 68 || new_mtu > 9414) + return -EINVAL; + + /* notify the PF of our intent to use this size of frame */ + ret = txgbe_rlpml_set_vf(hw, max_frame); + if (ret) + return -EINVAL; + + e_info(probe, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + + /* set new MTU */ + netdev->mtu = new_mtu; + if (netif_running(netdev)) + txgbevf_reinit_locked(adapter); + + return 0; +} + +void txgbe_tx_timeout_reset(struct txgbe_adapter *adapter) +{ + /* Do the reset outside of interrupt context */ + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + adapter->flagsd |= TXGBE_F_REQ_RESET; + txgbevf_service_event_schedule(adapter); + } +} + +/** + * txgbe_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void txgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + txgbe_tx_timeout_reset(adapter); +} + +int txgbe_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + int err; + + spin_lock_bh(&adapter->mbx_lock); + + /* add VID to filter table */ + err = hw->mac.ops.set_vfta(hw, vid, 0, true, false); + + spin_unlock_bh(&adapter->mbx_lock); + + if (!err) { + if (vid < VLAN_N_VID) + set_bit(vid, adapter->active_vlans); + } else { + e_err(drv, "VF set VLAN failed\n"); + return -EIO; + } + + return err; +} + +int txgbe_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + + spin_lock_bh(&adapter->mbx_lock); + + /* remove VID from filter table */ + hw->mac.ops.set_vfta(hw, vid, 0, false, false); + + spin_unlock_bh(&adapter->mbx_lock); + + clear_bit(vid, adapter->active_vlans); + + return 0; +} + +inline u32 txgbe_get_tx_pending(struct txgbe_ring *ring) +{ + struct txgbe_adapter *adapter = netdev_priv(ring->netdev); + struct txgbe_hw *hw = &adapter->hw; + + u32 head = rd32(hw, TXGBE_VXTDH(ring->reg_idx)); + u32 tail = rd32(hw, TXGBE_VXTDT(ring->reg_idx)); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; +} + +inline bool txgbe_check_tx_hang(struct txgbe_ring *tx_ring) +{ + u32 tx_done = tx_ring->stats.packets; + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; + u32 tx_pending = txgbe_get_tx_pending(tx_ring); + + clear_check_for_tx_hang(tx_ring); + + /* Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. The + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with + * pfc clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if (tx_done_old == tx_done && tx_pending) + /* make sure it is true for two checks in a row */ + return test_and_set_bit(__TXGBE_HANG_CHECK_ARMED, + &tx_ring->state); + /* reset the countdown */ + clear_bit(__TXGBE_HANG_CHECK_ARMED, &tx_ring->state); + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + + return false; +} + +/** + * txgbe_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: board private structure + * @tx_ring: tx ring to clean + **/ +bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector, + struct txgbe_ring *tx_ring, int napi_budget) +{ + struct txgbe_adapter *adapter = q_vector->adapter; + struct txgbe_tx_buffer *tx_buffer; + struct txgbe_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = tx_ring->count / 2; + unsigned int i = tx_ring->next_to_clean; + + if (test_bit(__TXGBE_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = TXGBE_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + struct txgbe_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->status & TXGBE_TXD_STAT_DD)) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + napi_consume_skb(tx_buffer->skb, napi_budget); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + //tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = TXGBE_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = TXGBE_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (check_for_tx_hang(tx_ring) && txgbe_check_tx_hang(tx_ring)) { + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_tx_desc *eop_desc; + + eop_desc = tx_ring->tx_buffer_info[i].next_to_watch; + + pr_err("Detected Tx Unit Hang%s\n" + " Tx Queue <%d>\n" + " TDH, TDT <%x>, <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "tx_buffer_info[next_to_clean]\n" + " next_to_watch <%p>\n" + " eop_desc.status <%x>\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", + " ", + tx_ring->que_idx, + rd32(hw, TXGBE_VXTDH(tx_ring->reg_idx)), + rd32(hw, TXGBE_VXTDT(tx_ring->reg_idx)), + tx_ring->next_to_use, i, + eop_desc, (eop_desc ? eop_desc->status : 0), + tx_ring->tx_buffer_info[i].time_stamp, jiffies); + + netif_stop_subqueue(tx_ring->netdev, + tx_ring->que_idx); + + /* schedule immediate reset if we believe we hung */ + txgbe_tx_timeout_reset(adapter); + + return true; + } + + netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (txgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->que_idx) && + !test_bit(__TXGBE_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->que_idx); + ++tx_ring->tx_stats.tx_restart_queue; + } + } + + return !!budget; +} + +/** + * txgbevf_poll - NAPI polling calback + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean more than one or more rings associated with a + * q_vector. + **/ +int txgbevf_poll(struct napi_struct *napi, int budget) +{ + struct txgbe_q_vector *q_vector = + container_of(napi, struct txgbe_q_vector, napi); + struct txgbe_adapter *adapter = q_vector->adapter; + struct txgbe_ring *ring; + int per_ring_budget, work_done = 0; + bool clean_complete = true; + + txgbe_for_each_ring(ring, q_vector->tx) { + if (!txgbe_clean_tx_irq(q_vector, ring, budget)) + clean_complete = false; + } + + if (budget <= 0) + return budget; + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling + */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget / q_vector->rx.count, 1); + else + per_ring_budget = budget; + + txgbe_for_each_ring(ring, q_vector->rx) { + int cleaned = txgbe_clean_rx_irq(q_vector, ring, + per_ring_budget); + work_done += cleaned; + if (cleaned >= per_ring_budget) + clean_complete = false; + } + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* all work done, exit the polling mode */ + napi_complete(napi); + if (adapter->rx_itr_setting == 1) + txgbe_set_itr(q_vector); + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + if (q_vector->v_idx > 2) + txgbe_irq_enable_queues(adapter, 1 << (q_vector->v_idx >> 2)); + else + txgbe_irq_enable_queues(adapter, 1 << q_vector->v_idx); + } + + return 0; +} + +void txgbevf_service_event_schedule(struct txgbe_adapter *adapter) +{ + if (!test_bit(__TXGBE_DOWN, &adapter->state) && + !test_bit(__TXGBE_REMOVING, &adapter->state) && + !test_and_set_bit(__TXGBE_SERVICE_SCHED, &adapter->state)) + schedule_work(&adapter->service_task); +} + +static void txgbe_service_event_complete(struct txgbe_adapter *adapter) +{ + WARN_ON(!test_bit(__TXGBE_SERVICE_SCHED, &adapter->state)); + + /* flush memory to make sure + * state is correct before next watchdog + */ + smp_mb__before_atomic(); + clear_bit(__TXGBE_SERVICE_SCHED, &adapter->state); +} + +/** + * txgbe_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +void txgbe_free_q_vectors(struct txgbe_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + txgbe_free_q_vector(adapter, v_idx); +} + +/** + * txgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void txgbevf_clear_interrupt_scheme(struct txgbe_adapter *adapter) +{ + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + + txgbe_free_q_vectors(adapter); + txgbevf_reset_interrupt_capability(adapter); +} + +static void txgbe_queue_reset_subtask(struct txgbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + + if (!(adapter->flagsd & TXGBE_F_REQ_QUEUE_RESET)) + return; + + adapter->flagsd &= ~TXGBE_F_REQ_QUEUE_RESET; + + /* if interface is down do nothing */ + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return; + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + rtnl_lock(); + + /* disable running interface */ + if (netif_running(dev)) + txgbevf_close(dev); + + /* free and reallocate queues */ + txgbevf_clear_interrupt_scheme(adapter); + txgbevf_init_interrupt_scheme(adapter); + + /* reenable running interface */ + if (netif_running(dev)) + txgbevf_open(dev); + + rtnl_unlock(); +} + +static void txgbe_reset_subtask(struct txgbe_adapter *adapter) +{ + if (!(adapter->flagsd & TXGBE_F_REQ_RESET)) + return; + + adapter->flagsd &= ~TXGBE_F_REQ_RESET; + + /* If we're already down or resetting, just bail */ + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return; + + adapter->sw_stats.tx_timeout_count++; + + txgbevf_reinit_locked(adapter); +} + +/** + * txgbe_watchdog_update_link - update the link status + * @adapter - pointer to the device adapter structure + **/ +static void txgbe_watchdog_update_link(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; + s32 err; + + spin_lock_bh(&adapter->mbx_lock); + + err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + + spin_unlock_bh(&adapter->mbx_lock); + + /* if check for link returns error we will need to reset */ + if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) { + adapter->flagsd |= TXGBE_F_REQ_RESET; + link_up = false; + } + + adapter->link_up = link_up; + adapter->link_speed = link_speed; +} + +/** + * txgbe_watchdog_link_is_up - update netif_carrier status and + * print link up message + * @adapter - pointer to the device adapter structure + **/ +static void txgbe_watchdog_link_is_up(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + /* only continue if link was previously down */ + if (netif_carrier_ok(netdev)) + return; + + dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n", + (adapter->link_speed == TXGBE_LINK_SPEED_10GB_FULL) ? + "10 Gbps" : + (adapter->link_speed == TXGBE_LINK_SPEED_1GB_FULL) ? + "1 Gbps" : + (adapter->link_speed == TXGBE_LINK_SPEED_100_FULL) ? + "100 Mbps" : + "unknown speed"); + + netif_carrier_on(netdev); +} + +/** + * txgbe_watchdog_link_is_down - update netif_carrier status and + * print link down message + * @adapter - pointer to the adapter structure + **/ +static void txgbe_watchdog_link_is_down(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + adapter->link_speed = 0; + + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) + return; + + dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); + + netif_carrier_off(netdev); +} + +void txgbe_update32(struct txgbe_hw *hw, u32 reg, + u64 *last, u64 *counter) +{ + u64 curr = rd32(hw, reg); + + if (curr < *last) + *counter += 0x100000000LL; + *last = curr; + *counter &= 0xFFFFFFFF00000000LL; + *counter |= curr; +} + +void txgbe_update36(struct txgbe_hw *hw, u32 loreg, u32 hireg, + u64 *last, u64 *counter) +{ + u64 lo32 = rd32(hw, loreg); /* snapshot */ + u64 hi32 = rd32(hw, hireg); + u64 curr = hi32 << 32 | lo32; + + if (curr < *last) + *counter += 0x1000000000LL; + *last = curr; + *counter &= 0xFFFFFFF000000000LL; + *counter |= curr; +} + +/** + * txgbevf_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void txgbevf_update_stats(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring *ring; + struct rtnl_link_stats64 *net_stats = &adapter->net_stats; + u64 tx_restart_queue = 0, tx_busy = 0; + u64 rx_csum_bad = 0; + u32 page_failed = 0, buff_failed = 0; + u32 i; + + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return; + + for (i = 0; i < adapter->num_rx_queues; i++) { + ring = adapter->rx_ring[i]; + rx_csum_bad += ring->rx_stats.csum_err; + page_failed += ring->rx_stats.alloc_rx_page_failed; + buff_failed += ring->rx_stats.alloc_rx_buff_failed; + } + adapter->sw_stats.rx_csum_bad = rx_csum_bad; + adapter->sw_stats.rx_alloc_page_failed = page_failed; + adapter->sw_stats.rx_alloc_buff_failed = buff_failed; + + for (i = 0; i < adapter->num_tx_queues; i++) { + ring = adapter->tx_ring[i]; + tx_restart_queue += ring->tx_stats.tx_restart_queue; + tx_busy += ring->tx_stats.tx_busy; + } + adapter->sw_stats.tx_restart_queue = tx_restart_queue; + adapter->sw_stats.tx_busy = tx_busy; + + /* update hardware counters */ + spin_lock(&adapter->pf_count_lock); + adapter->last_stats.gprc = adapter->reset_stats.gprc; + adapter->stats.gprc = adapter->reset_stats.gprc; + + adapter->last_stats.gptc = adapter->reset_stats.gptc; + adapter->stats.gptc = adapter->reset_stats.gptc; + + adapter->last_stats.gorc = adapter->reset_stats.gorc; + adapter->stats.gorc = adapter->reset_stats.gorc; + + adapter->last_stats.gotc = adapter->reset_stats.gotc; + adapter->stats.gotc = adapter->reset_stats.gotc; + + adapter->last_stats.mprc = adapter->reset_stats.mprc; + adapter->stats.mprc = adapter->reset_stats.mprc; + + for (i = 0; i < MAX_TX_QUEUES; i++) { + adapter->last_stats.gprc += adapter->last_reg_stats[i].gprc; + txgbe_update32(hw, TXGBE_VXGPRC(i), &adapter->last_reg_stats[i].gprc, + &adapter->reg_stats[i].gprc); + adapter->stats.gprc += adapter->reg_stats[i].gprc; + + adapter->last_stats.gptc += adapter->last_reg_stats[i].gptc; + txgbe_update32(hw, TXGBE_VXGPTC(i), &adapter->last_reg_stats[i].gptc, + &adapter->reg_stats[i].gptc); + adapter->stats.gptc += adapter->reg_stats[i].gptc; + + adapter->last_stats.gorc += adapter->last_reg_stats[i].gorc; + txgbe_update36(hw, TXGBE_VXGORC_LSB(i), TXGBE_VXGORC_MSB(i), + &adapter->last_reg_stats[i].gorc, &adapter->reg_stats[i].gorc); + adapter->stats.gorc += adapter->reg_stats[i].gorc; + + adapter->last_stats.gotc += adapter->last_reg_stats[i].gotc; + txgbe_update36(hw, TXGBE_VXGOTC_LSB(i), TXGBE_VXGOTC_MSB(i), + &adapter->last_reg_stats[i].gotc, &adapter->reg_stats[i].gotc); + adapter->stats.gotc += adapter->reg_stats[i].gotc; + + adapter->last_stats.mprc += adapter->last_reg_stats[i].mprc; + txgbe_update32(hw, TXGBE_VXMPRC(i), &adapter->last_reg_stats[i].mprc, + &adapter->reg_stats[i].mprc); + adapter->stats.mprc += adapter->reg_stats[i].mprc; + } + spin_unlock(&adapter->pf_count_lock); + + /* update global counters */ + net_stats->multicast = adapter->stats.mprc - adapter->base_stats.mprc; + net_stats->rx_crc_errors = rx_csum_bad; +} + +static void txgbe_watchdog_subtask(struct txgbe_adapter *adapter) +{ + /* if interface is down do nothing */ + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return; + + txgbe_watchdog_update_link(adapter); + if (adapter->link_up && adapter->link_state) + txgbe_watchdog_link_is_up(adapter); + else + txgbe_watchdog_link_is_down(adapter); + + txgbevf_update_stats(adapter); +} + +/* txgbe_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter - pointer to the device adapter structure + * + * This function serves two purposes. First it strobes the interrupt lines + * in order to make certain interrupts are occurring. Secondly it sets the + * bits needed to check for TX hangs. As a result we should immediately + * determine if a hang has occurred. + */ +static void txgbe_check_hang_subtask(struct txgbe_adapter *adapter) +{ + int i; + + /* If we're down or resetting, just bail */ + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return; + + /* Force detection of hung controller */ + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_check_for_tx_hang(adapter->tx_ring[i]); + } +} + +/** + * txgbe_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +void txgbe_service_task(struct work_struct *work) +{ + struct txgbe_adapter *adapter = container_of(work, + struct txgbe_adapter, + service_task); + struct txgbe_hw *hw = &adapter->hw; + + if (unlikely(!hw->hw_addr)) { + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + rtnl_lock(); + txgbevf_down(adapter); + rtnl_unlock(); + } + txgbe_service_event_complete(adapter); + return; + } + + txgbe_queue_reset_subtask(adapter); + txgbe_reset_subtask(adapter); + txgbe_watchdog_subtask(adapter); + txgbe_check_hang_subtask(adapter); + + txgbe_service_event_complete(adapter); +} + +#define TXGBE_MAX_TUNNEL_HDR_LEN 80 +static netdev_features_t +txgbe_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + u32 vlan_num = 0; + u16 vlan_depth = skb->mac_len; + __be16 type = skb->protocol; + struct vlan_hdr *vh; + + if (skb_vlan_tag_present(skb)) + vlan_num++; + + if (vlan_depth) + vlan_depth -= VLAN_HLEN; + else + vlan_depth = ETH_HLEN; + + while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { + vlan_num++; + vh = (struct vlan_hdr *)(skb->data + vlan_depth); + type = vh->h_vlan_encapsulated_proto; + vlan_depth += VLAN_HLEN; + } + + if (vlan_num > 2) + features &= ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); + + if (skb->encapsulation) { + if (unlikely(skb_inner_mac_header(skb) - + skb_transport_header(skb) > + TXGBE_MAX_TUNNEL_HDR_LEN)){ + return features & ~NETIF_F_CSUM_MASK; + } + } + return features; +} + +static const struct net_device_ops txgbe_netdev_ops = { + .ndo_open = txgbevf_open, + .ndo_stop = txgbevf_close, + .ndo_start_xmit = txgbe_xmit_frame, + .ndo_set_rx_mode = txgbevf_set_rx_mode, + .ndo_get_stats64 = txgbe_get_stats64, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = txgbe_set_mac, + .ndo_change_mtu = txgbe_change_mtu, + .ndo_tx_timeout = txgbe_tx_timeout, + .ndo_vlan_rx_add_vid = txgbe_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = txgbe_vlan_rx_kill_vid, + .ndo_features_check = txgbe_features_check, +}; + +void txgbevf_assign_netdev_ops(struct net_device *dev) +{ + dev->netdev_ops = &txgbe_netdev_ops; + txgbevf_set_ethtool_ops(dev); + dev->watchdog_timeo = 5 * HZ; +} + +/** + * txgbe_service_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +void txgbe_service_timer(struct timer_list *t) +{ + struct txgbe_adapter *adapter = from_timer(adapter, t, service_timer); + + /* Reset the timer */ + mod_timer(&adapter->service_timer, (HZ * 2) + jiffies); + + txgbevf_service_event_schedule(adapter); +} + +/** + * txgbe_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in txgbe_pci_tbl + * + * Returns 0 on success, negative on failure + * + * txgbe_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int txgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct txgbe_adapter *adapter = NULL; + struct net_device *netdev; + int err; + struct txgbe_hw *hw = NULL; + const struct txgbe_info *ei = txgbe_info_tbl[ent->driver_data]; + static int cards_found; + unsigned int min_mtu, max_mtu; + u8 fea_flags = 0; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), + txgbevf_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_disable_dev; + } + + pci_enable_pcie_error_reporting(pdev); + pci_set_master(pdev); + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_pci_disable_dev; + } + fea_flags |= 0x1; + + netdev = devm_alloc_etherdev_mqs(&pdev->dev, + sizeof(struct txgbe_adapter), + TXGBE_VF_MAX_TX_QUEUES, + TXGBE_VF_MAX_RX_QUEUES); + if (!netdev) { + err = -ENOMEM; + goto err_pci_release_regions; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + + hw->back = adapter; + hw->msg_enable = &adapter->msg_enable; + hw->pdev = adapter->pdev; + hw->mac.type = ei->mac; + adapter->flagsd = ei->flags; + adapter->msg_enable = DEFAULT_DEBUG_LEVEL; + + pci_save_state(pdev); + + adapter->io_addr = devm_ioremap(&pdev->dev, + pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!adapter->io_addr) { + err = -EIO; + goto err_pci_release_regions; + } + + adapter->b4_addr = devm_ioremap(&pdev->dev, + pci_resource_start(pdev, 4), + pci_resource_len(pdev, 4)); + if (!adapter->b4_addr) { + err = -EIO; + goto err_pci_release_regions; + } + hw->hw_addr = adapter->io_addr; + hw->b4_addr = adapter->b4_addr; + + txgbevf_assign_netdev_ops(netdev); + adapter->bd_number = cards_found; + + /* setup the private structure */ + err = txgbe_sw_init(adapter); + if (err) + goto err_sw_init; + + /* netdev offload capabilities */ + txgbe_set_features(adapter, fea_flags); + + /* The HW MAC address was set and/or determined in sw_init */ + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_info(&pdev->dev, + "txgbe: invalid MAC address\n"); + err = -EIO; + goto err_sw_init; + } + + min_mtu = ETH_MIN_MTU; + switch (adapter->hw.api_version) { + case txgbe_mbox_api_11: + case txgbe_mbox_api_12: + case txgbe_mbox_api_13: + max_mtu = TXGBE_MAX_JUMBO_FRAME_SIZE - + (ETH_HLEN + ETH_FCS_LEN); + break; + default: + if (adapter->hw.mac.type != txgbe_mac_sp_vf) + max_mtu = TXGBE_MAX_JUMBO_FRAME_SIZE - + (ETH_HLEN + ETH_FCS_LEN); + else + max_mtu = ETH_DATA_LEN + ETH_FCS_LEN; + break; + } + + netdev->min_mtu = min_mtu; + netdev->max_mtu = max_mtu; + + timer_setup(&adapter->service_timer, txgbe_service_timer, 0); + + if (unlikely(!hw->hw_addr)) { + err = -EIO; + goto err_sw_init; + } + INIT_WORK(&adapter->service_task, txgbe_service_task); + set_bit(__TXGBE_SERVICE_INITED, &adapter->state); + clear_bit(__TXGBE_SERVICE_SCHED, &adapter->state); + + err = txgbevf_init_interrupt_scheme(adapter); + if (err) + goto err_register; + + hw->mac.ops.get_fw_version(hw); + + strcpy(netdev->name, "eth%d"); + + err = register_netdev(netdev); + if (err) + goto err_sw_init; + + pci_set_drvdata(pdev, netdev); + netif_carrier_off(netdev); + + netif_tx_stop_all_queues(netdev); + txgbe_init_last_counter_stats(adapter); + if (netdev->features & NETIF_F_GRO) + e_info(probe, "GRO is enabled\n"); + + e_info(probe, "%s\n", txgbe_driver_string); + cards_found++; + + return 0; + +err_register: + txgbevf_clear_interrupt_scheme(adapter); +err_sw_init: + txgbevf_reset_interrupt_capability(adapter); + iounmap(adapter->io_addr); +err_pci_release_regions: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_disable_dev: + if (!adapter) + pci_disable_device(pdev); + return err; +} + +/** + * txgbe_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * txgbe_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void txgbe_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct txgbe_adapter *adapter; + bool disable_dev; + + if (!netdev) + return; + pdev->dev_flags = pdev->dev_flags & ~PCI_DEV_FLAGS_ASSIGNED; + adapter = netdev_priv(netdev); + + set_bit(__TXGBE_REMOVING, &adapter->state); + cancel_work_sync(&adapter->service_task); + + if (netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(netdev); + + txgbevf_clear_interrupt_scheme(adapter); + + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + e_info(probe, "Remove complete\n"); + + disable_dev = !test_and_set_bit(__TXGBE_DISABLED, &adapter->state); + + pci_disable_pcie_error_reporting(pdev); + + if (disable_dev) + pci_disable_device(pdev); +} + +int txgbe_suspend(struct pci_dev *pdev, pm_message_t __maybe_unused state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct txgbe_adapter *adapter = netdev_priv(netdev); + int retval = 0; + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + rtnl_lock(); + txgbevf_down(adapter); + txgbe_free_irq(adapter); + txgbe_free_all_tx_resources(adapter); + txgbe_free_all_rx_resources(adapter); + rtnl_unlock(); + } + + txgbevf_clear_interrupt_scheme(adapter); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + if (!test_and_set_bit(__TXGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); + + return 0; +} + +int txgbe_resume(struct pci_dev *pdev) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + u32 err; + + pci_restore_state(pdev); + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); + return err; + } + + adapter->hw.hw_addr = adapter->io_addr; + + /* protect context */ + smp_mb__before_atomic(); + clear_bit(__TXGBE_DISABLED, &adapter->state); + pci_set_master(pdev); + + txgbevf_reset(adapter); + + rtnl_lock(); + err = txgbevf_init_interrupt_scheme(adapter); + if (!err && netif_running(netdev)) + err = txgbevf_open(netdev); + rtnl_unlock(); + if (err) + return err; + + netif_device_attach(netdev); + + return err; +} + +static struct pci_driver txgbe_driver = { + .name = txgbevf_driver_name, + .id_table = txgbe_pci_tbl, + .probe = txgbe_probe, + .remove = txgbe_remove, + .suspend = txgbe_suspend, + .resume = txgbe_resume, +}; + +/** + * txgbe_init_module - Driver Registration Routine + * + * txgbe_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init txgbe_init_module(void) +{ + int ret; + + ret = pci_register_driver(&txgbe_driver); + return ret; +} + +module_init(txgbe_init_module); + +/** + * txgbe_exit_module - Driver Exit Cleanup Routine + * + * txgbe_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit txgbe_exit_module(void) +{ + pci_unregister_driver(&txgbe_driver); +} + +module_exit(txgbe_exit_module); + +MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl); +MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, "); +MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Virtual Function Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION);