From 99c9e4a4659fb6eef856bf57681549e21a4dd83a Mon Sep 17 00:00:00 2001 From: DuanqiangWen Date: Tue, 18 Jul 2023 01:46:31 -0400 Subject: [PATCH 01/10] anolis: net: txgbe: fix open/close notify sleep too long ANBZ: #5502 fix open/close notify interface sleep too long and read wrong buffer Signed-off-by: DuanqiangWen --- drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 9 +++++---- drivers/net/ethernet/wangxun/txgbe/txgbe_type.h | 1 + 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index f8c08e9b803e..ae68645e8c45 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -5369,12 +5369,12 @@ s32 txgbe_close_notify(struct txgbe_hw *hw) status = txgbe_host_interface_command(hw, (u32 *)&buffer, sizeof(buffer), - TXGBE_HI_COMMAND_TIMEOUT, false); + TXGBE_HI_COMMAND_NOTIFY_TIMEOUT, false); if (status) return status; if (txgbe_check_mng_access(hw)) { - tmp = (u32)rd32(hw, TXGBE_MNG_SW_SM); + tmp = (u32)rd32a(hw, TXGBE_MNG_MBOX, 1); if (tmp == TXGBE_CHECKSUM_CAP_ST_PASS) status = 0; else @@ -5404,16 +5404,17 @@ s32 txgbe_open_notify(struct txgbe_hw *hw) status = txgbe_host_interface_command(hw, (u32 *)&buffer, sizeof(buffer), - TXGBE_HI_COMMAND_TIMEOUT, false); + TXGBE_HI_COMMAND_NOTIFY_TIMEOUT, false); if (status) return status; if (txgbe_check_mng_access(hw)) { - tmp = (u32)rd32(hw, TXGBE_MNG_SW_SM); + tmp = (u32)rd32a(hw, TXGBE_MNG_MBOX, 1); if (tmp == TXGBE_CHECKSUM_CAP_ST_PASS) status = 0; else status = TXGBE_ERR_EEPROM_CHECKSUM; + } else { status = TXGBE_ERR_MNG_ACCESS_FAILED; return status; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index df278db96ca9..26197c356edc 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -2027,6 +2027,7 @@ union txgbe_atr_hash_dword { #define TXGBE_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */ #define TXGBE_HI_MAX_BLOCK_DWORD_LENGTH 64 /* Num of dwords in range */ #define TXGBE_HI_COMMAND_TIMEOUT 5000 /* Process HI command limit */ +#define TXGBE_HI_COMMAND_NOTIFY_TIMEOUT 500 /* Process HI command limit */ #define TXGBE_HI_FLASH_ERASE_TIMEOUT 5000 /* Process Erase command limit */ #define TXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ #define TXGBE_HI_FLASH_VERIFY_TIMEOUT 60000 /* Process Apply command limit */ -- Gitee From 4800f2896b52f335de1b46c2b23fc91c79c92c63 Mon Sep 17 00:00:00 2001 From: DuanqiangWen Date: Tue, 18 Jul 2023 01:50:39 -0400 Subject: [PATCH 02/10] anolis: net: txgbe: add sriov support ANBZ: #5502 add sriov support for pcie pass-through Signed-off-by: DuanqiangWen --- drivers/net/ethernet/wangxun/txgbe/Makefile | 3 +- drivers/net/ethernet/wangxun/txgbe/txgbe.h | 195 +- drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 366 +++- .../net/ethernet/wangxun/txgbe/txgbe_lib.c | 162 +- .../net/ethernet/wangxun/txgbe/txgbe_main.c | 537 ++++-- .../net/ethernet/wangxun/txgbe/txgbe_mbx.c | 676 +++++++ .../net/ethernet/wangxun/txgbe/txgbe_mbx.h | 130 ++ .../net/ethernet/wangxun/txgbe/txgbe_sriov.c | 1669 +++++++++++++++++ .../net/ethernet/wangxun/txgbe/txgbe_sriov.h | 47 + .../net/ethernet/wangxun/txgbe/txgbe_type.h | 47 +- 10 files changed, 3587 insertions(+), 245 deletions(-) create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.h create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c create mode 100644 drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.h diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile index dd84037ff169..83838054c762 100644 --- a/drivers/net/ethernet/wangxun/txgbe/Makefile +++ b/drivers/net/ethernet/wangxun/txgbe/Makefile @@ -8,7 +8,8 @@ obj-$(CONFIG_TXGBE) += txgbe.o txgbe-objs := txgbe_main.o txgbe_ethtool.o \ txgbe_hw.o txgbe_phy.o \ - txgbe_lib.o txgbe_ptp.o + txgbe_lib.o txgbe_ptp.o \ + txgbe_mbx.o txgbe_sriov.o txgbe-$(CONFIG_DEBUG_FS) += txgbe_debugfs.o txgbe-${CONFIG_SYSFS} += txgbe_sysfs.o diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h index dbfde49e0b80..6fb0af56b92e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h @@ -24,6 +24,7 @@ /* Ether Types */ #define TXGBE_ETH_P_CNM 0x22E7 +#define TXGBE_ETH_P_LLDP 0x88CC /* TX/RX descriptor defines */ #define TXGBE_DEFAULT_TXD 512 @@ -225,13 +226,22 @@ struct txgbe_ring { enum txgbe_ring_f_enum { RING_F_NONE = 0, + RING_F_VMDQ, /* SR-IOV uses the same ring feature */ RING_F_RSS, RING_F_FDIR, +#if IS_ENABLED(CONFIG_FCOE) + RING_F_FCOE, +#endif /* CONFIG_FCOE */ RING_F_ARRAY_SIZE /* must be last in enum set */ }; #define TXGBE_MAX_RSS_INDICES 63 #define TXGBE_MAX_FDIR_INDICES 63 +#define TXGBE_MAX_VMDQ_INDICES 64 + +#define TXGBE_VMDQ_8Q_MASK 0x78 +#define TXGBE_VMDQ_4Q_MASK 0x7C +#define TXGBE_VMDQ_2Q_MASK 0x7E #define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) #define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) @@ -351,6 +361,7 @@ static inline u16 txgbe_desc_unused(struct txgbe_ring *ring) (&(((struct txgbe_tx_context_desc *)((R)->desc))[i])) #define TXGBE_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ +#define TXGBE_FCOE_JUMBO_FRAME_SIZE 3072 #define TCP_TIMER_VECTOR 0 #define OTHER_VECTOR 1 @@ -381,32 +392,78 @@ struct txgbe_mac_addr { /** * txgbe_adapter.flag **/ -#define TXGBE_FLAG_NEED_LINK_UPDATE BIT(0) -#define TXGBE_FLAG_NEED_LINK_CONFIG BIT(1) -#define TXGBE_FLAG_MSI_ENABLED BIT(2) +#define TXGBE_FLAG_MSI_CAPABLE BIT(0) +#define TXGBE_FLAG_MSI_ENABLED BIT(1) +#define TXGBE_FLAG_MSIX_CAPABLE BIT(2) #define TXGBE_FLAG_MSIX_ENABLED BIT(3) -#define TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(4) -#define TXGBE_FLAG_VXLAN_OFFLOAD_ENABLE BIT(5) -#define TXGBE_FLAG_FDIR_HASH_CAPABLE BIT(6) -#define TXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(7) -#define TXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(8) -#define TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(9) +#ifndef TXGBE_NO_LLI +#define TXGBE_FLAG_LLI_PUSH BIT(4) +#endif + +#define TXGBE_FLAG_TPH_ENABLED BIT(6) +#define TXGBE_FLAG_TPH_CAPABLE BIT(7) +#define TXGBE_FLAG_TPH_ENABLED_DATA BIT(8) + +#define TXGBE_FLAG_MQ_CAPABLE BIT(9) +#define TXGBE_FLAG_DCB_ENABLED BIT(10) +#define TXGBE_FLAG_VMDQ_ENABLED BIT(11) +#define TXGBE_FLAG_FAN_FAIL_CAPABLE BIT(12) +#define TXGBE_FLAG_NEED_LINK_UPDATE BIT(13) +#define TXGBE_FLAG_NEED_LINK_CONFIG BIT(14) +#define TXGBE_FLAG_FDIR_HASH_CAPABLE BIT(15) +#define TXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(16) + +#define TXGBE_FLAG_SRIOV_CAPABLE BIT(19) +#define TXGBE_FLAG_SRIOV_ENABLED BIT(20) +#define TXGBE_FLAG_SRIOV_REPLICATION_ENABLE BIT(21) +#define TXGBE_FLAG_SRIOV_L2SWITCH_ENABLE BIT(22) +#define TXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE BIT(23) +#define TXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(24) +#define TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(25) +#define TXGBE_FLAG_VXLAN_OFFLOAD_ENABLE BIT(26) +#define TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(27) +#define TXGBE_FLAG_NEED_ETH_PHY_RESET BIT(28) +#define TXGBE_FLAG_RX_HS_ENABLED BIT(30) +#define TXGBE_FLAG_LINKSEC_ENABLED BIT(31) +#define TXGBE_FLAG_IPSEC_ENABLED BIT(5) /** * txgbe_adapter.flag2 **/ -#define TXGBE_FLAG2_MNG_REG_ACCESS_DISABLED BIT(0) -#define TXGBE_FLAG2_SFP_NEEDS_RESET BIT(1) -#define TXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(2) -#define TXGBE_FLAG2_PF_RESET_REQUESTED BIT(3) -#define TXGBE_FLAG2_RESET_INTR_RECEIVED BIT(4) -#define TXGBE_FLAG2_GLOBAL_RESET_REQUESTED BIT(5) -#define TXGBE_FLAG2_RSC_CAPABLE BIT(6) -#define TXGBE_FLAG2_RSC_ENABLED BIT(7) -#define TXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8) -#define TXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9) -#define TXGBE_FLAG2_RSS_ENABLED BIT(10) -#define TXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(11) +#define TXGBE_FLAG2_RSC_CAPABLE BIT(0) +#define TXGBE_FLAG2_RSC_ENABLED BIT(1) +#define TXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(3) +#define TXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(4) +#define TXGBE_FLAG2_SEARCH_FOR_SFP BIT(5) +#define TXGBE_FLAG2_SFP_NEEDS_RESET BIT(6) +#define TXGBE_FLAG2_PF_RESET_REQUESTED BIT(7) +#define TXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(8) +#define TXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(9) +#define TXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(10) +#define TXGBE_FLAG2_RSS_ENABLED BIT(12) +#define TXGBE_FLAG2_PTP_PPS_ENABLED BIT(11) +#define TXGBE_FLAG2_EEE_CAPABLE BIT(14) +#define TXGBE_FLAG2_EEE_ENABLED BIT(15) +#define TXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(16) +#define TXGBE_FLAG2_DEV_RESET_REQUESTED BIT(18) +#define TXGBE_FLAG2_RESET_INTR_RECEIVED BIT(19) +#define TXGBE_FLAG2_GLOBAL_RESET_REQUESTED BIT(20) +#define TXGBE_FLAG2_CLOUD_SWITCH_ENABLED BIT(21) +#define TXGBE_FLAG2_MNG_REG_ACCESS_DISABLED BIT(22) +#define KR BIT(23) +#define TXGBE_FLAG2_KR_TRAINING BIT(24) +#define TXGBE_FLAG2_KR_AUTO BIT(25) +#define TXGBE_FLAG2_LINK_DOWN BIT(26) +#define TXGBE_FLAG2_KR_PRO_DOWN BIT(27) +#define TXGBE_FLAG2_KR_PRO_REINIT BIT(28) +#define TXGBE_FLAG2_ECC_ERR_RESET BIT(29) +#define TXGBE_FLAG2_PCIE_NEED_RECOVER BIT(31) + +/* preset defaults */ +#define TXGBE_FLAGS_SP_INIT (TXGBE_FLAG_MSI_CAPABLE \ + | TXGBE_FLAG_MSIX_CAPABLE \ + | TXGBE_FLAG_MQ_CAPABLE \ + | TXGBE_FLAG_SRIOV_CAPABLE) #define TXGBE_SET_FLAG(_input, _flag, _result) \ ((_flag <= _result) ? \ @@ -421,6 +478,45 @@ enum txgbe_isb_idx { TXGBE_ISB_MAX }; +#define TXGBE_MAX_VF_MC_ENTRIES 30 +#define TXGBE_MAX_VF_FUNCTIONS 64 + +struct vf_data_storage { + struct pci_dev *vfdev; + u8 __iomem *b4_addr; + u32 b4_buf[16]; + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[TXGBE_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u16 default_vf_vlan_id; + u16 vlans_enabled; + bool clear_to_send; + bool pf_set_mac; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 min_tx_rate; + u16 max_tx_rate; + u16 vlan_count; + u8 spoofchk_enabled; + int link_enable; + int link_state; + +#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN + bool rss_query_enabled; +#endif + u8 trusted; + int xcast_mode; + unsigned int vf_api; +}; + +struct vf_macvlans { + struct list_head l; + int vf; + bool free; + bool is_macvlan; + u8 vf_macvlan[ETH_ALEN]; +}; + /* board specific private data structure */ struct txgbe_adapter { u8 __iomem *io_addr; @@ -448,6 +544,8 @@ struct txgbe_adapter { int num_rx_queues; u16 rx_itr_setting; u16 rx_work_limit; + int num_rx_pools; /* does not include pools assigned to VFs */ + int num_rx_queues_per_pool; /* TX */ struct txgbe_ring *tx_ring[TXGBE_MAX_TX_QUEUES] ____cacheline_aligned_in_smp; @@ -547,8 +645,28 @@ struct txgbe_adapter { dma_addr_t isb_dma; u32 *isb_mem; u32 isb_tag[TXGBE_ISB_MAX]; + + unsigned int num_vfs; + struct vf_data_storage *vfinfo; + struct vf_macvlans vf_mvs; + struct vf_macvlans *mv_list; + u8 vf_mode; +#ifdef CONFIG_PCI_IOV + u32 timer_event_accumulator; + u32 vferr_refcount; +#endif + unsigned int num_vmdqs; /* does not include pools assigned to VFs */ + unsigned int queues_per_pool; + u8 default_up; }; +/* must account for pools assigned to VFs. */ +#ifdef CONFIG_PCI_IOV +#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) +#else +#define VMDQ_P(p) (p) +#endif + static inline u32 txgbe_misc_isb(struct txgbe_adapter *adapter, enum txgbe_isb_idx idx) { @@ -687,6 +805,15 @@ void txgbe_set_rx_drop_en(struct txgbe_adapter *adapter); void txgbe_store_reta(struct txgbe_adapter *adapter); +void txgbe_set_vlan_anti_spoofing(struct txgbe_hw *hw, bool enable, int vf); +void txgbe_set_ethertype_anti_spoofing(struct txgbe_hw *hw, + bool enable, int vf); + +#ifdef CONFIG_PCI_IOV +void txgbe_sriov_reinit(struct txgbe_adapter *adapter); +#endif + +void txgbe_full_sync_mac_table(struct txgbe_adapter *adapter); /** * interrupt masking operations. each bit in PX_ICn correspond to a interrupt. * disable a interrupt by writing to PX_IMS with the corresponding bit=1 @@ -762,6 +889,11 @@ __maybe_unused static struct txgbe_msg *txgbe_hw_to_msg(const struct txgbe_hw *h return (struct txgbe_msg *)&adapter->msg_enable; } +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} + #define txgbe_dbg(hw, fmt, arg...) \ netdev_dbg(txgbe_hw_to_netdev(hw), fmt, ##arg) @@ -806,4 +938,25 @@ enum { #define ERROR_REPORT2 ERROR_REPORT #define ERROR_REPORT3 ERROR_REPORT +#define hw_dbg(hw, format, arg...) \ + netdev_dbg(ngbe_hw_to_netdev(hw), format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_notice(format, arg...) \ + dev_notice(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dbg(msglvl, format, arg...) \ + netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_crit(msglvl, format, arg...) \ + netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) + #endif /* _TXGBE_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index ae68645e8c45..0f068b5e8bf9 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -986,21 +986,23 @@ s32 txgbe_fc_enable(struct txgbe_hw *hw) u32 mflcn_reg, fccfg_reg; u32 reg; u32 fcrtl, fcrth; + int i; /* Validate the water mark configuration */ if (!hw->fc.pause_time) { ret_val = TXGBE_ERR_INVALID_LINK_SETTINGS; goto out; } - /* Low water mark of zero causes XOFF floods */ - if ((hw->fc.current_mode & txgbe_fc_tx_pause) && - hw->fc.high_water) { - if (!hw->fc.low_water || - hw->fc.low_water >= hw->fc.high_water) { - txgbe_dbg(hw, "Invalid water mark configuration\n"); - ret_val = TXGBE_ERR_INVALID_LINK_SETTINGS; - goto out; + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & txgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + if (!hw->fc.low_water[i] || + hw->fc.low_water[i] >= hw->fc.high_water[i]) { + txgbe_dbg(hw, "Invalid water mark configuration\n"); + ret_val = TXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } } } @@ -1064,29 +1066,32 @@ s32 txgbe_fc_enable(struct txgbe_hw *hw) wr32(hw, TXGBE_RDB_RFCC, fccfg_reg); /* Set up and enable Rx high/low water mark thresholds, enable XON. */ - if ((hw->fc.current_mode & txgbe_fc_tx_pause) && - hw->fc.high_water) { - fcrtl = (hw->fc.low_water << 10) | - TXGBE_RDB_RFCL_XONE; - wr32(hw, TXGBE_RDB_RFCL(0), fcrtl); - fcrth = (hw->fc.high_water << 10) | - TXGBE_RDB_RFCH_XOFFE; - } else { - wr32(hw, TXGBE_RDB_RFCL(0), 0); - /* In order to prevent Tx hangs when the internal Tx - * switch is enabled we must set the high water mark - * to the Rx packet buffer size - 24KB. This allows - * the Tx switch to function even under heavy Rx - * workloads. - */ - fcrth = rd32(hw, TXGBE_RDB_PB_SZ(0)) - 24576; - } + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & txgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + fcrtl = (hw->fc.low_water[i] << 10) | + TXGBE_RDB_RFCL_XONE; + wr32(hw, TXGBE_RDB_RFCL(i), fcrtl); + fcrth = (hw->fc.high_water[i] << 10) | + TXGBE_RDB_RFCH_XOFFE; + } else { + wr32(hw, TXGBE_RDB_RFCL(i), 0); + /* In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + fcrth = rd32(hw, TXGBE_RDB_PB_SZ(i)) - 24576; + } - wr32(hw, TXGBE_RDB_RFCH(0), fcrth); + wr32(hw, TXGBE_RDB_RFCH(i), fcrth); + } /* Configure pause time */ reg = hw->fc.pause_time * 0x00010001; - wr32(hw, TXGBE_RDB_RFCV(0), reg); + for (i = 0; i < (TXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) + wr32(hw, TXGBE_RDB_RFCV(i), reg); /* Configure flow control refresh threshold value */ wr32(hw, TXGBE_RDB_RFCRT, hw->fc.pause_time / 2); @@ -1267,6 +1272,9 @@ s32 txgbe_disable_pcie_master(struct txgbe_hw *hw) struct txgbe_adapter *adapter = container_of(hw, struct txgbe_adapter, hw); s32 status = 0; u32 i; + unsigned int num_vfs = adapter->num_vfs; + u16 dev_ctl; + u32 vf_bme_clear = 0; /* Always set this bit to ensure any future transactions are blocked */ pci_clear_master(adapter->pdev); @@ -1276,6 +1284,18 @@ s32 txgbe_disable_pcie_master(struct txgbe_hw *hw) TXGBE_REMOVED(hw->hw_addr)) goto out; + /* BME disable handshake will not be finished if any VF BME is 0 */ + for (i = 0; i < num_vfs; i++) { + struct pci_dev *vfdev = adapter->vfinfo[i].vfdev; + + if (!vfdev) + continue; + pci_read_config_word(vfdev, 0x4, &dev_ctl); + if ((dev_ctl & 0x4) == 0) { + vf_bme_clear = 1; + break; + } + } /* Poll for master request bit to clear */ for (i = 0; i < TXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { usec_delay(100); @@ -1283,9 +1303,11 @@ s32 txgbe_disable_pcie_master(struct txgbe_hw *hw) goto out; } - ERROR_REPORT1(hw, TXGBE_ERROR_POLLING, - "PCIe transaction pending bit did not clear.\n"); - status = TXGBE_ERR_MASTER_REQUESTS_PENDING; + if (!vf_bme_clear) { + ERROR_REPORT1(hw, TXGBE_ERROR_POLLING, + "PCIe transaction pending bit did not clear.\n"); + status = TXGBE_ERR_MASTER_REQUESTS_PENDING; + } out: return status; } @@ -1554,6 +1576,161 @@ s32 txgbe_init_uta_tables(struct txgbe_hw *hw) return 0; } +/** + * txgbe_find_vlvf_slot - find the vlanid or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * + * return the VLVF index where this VLAN id should be placed + * + **/ +s32 txgbe_find_vlvf_slot(struct txgbe_hw *hw, u32 vlan) +{ + u32 bits = 0; + u32 first_empty_slot = 0; + s32 regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries. Save off the first empty + * slot found along the way + */ + for (regindex = 1; regindex < TXGBE_PSR_VLAN_SWC_ENTRIES; regindex++) { + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, regindex); + bits = rd32(hw, TXGBE_PSR_VLAN_SWC); + if (!bits && !(first_empty_slot)) + first_empty_slot = regindex; + else if ((bits & 0x0FFF) == vlan) + break; + } + + /* If regindex is less than TXGBE_VLVF_ENTRIES, then we found the vlan + * in the VLVF. Else use the first empty VLVF register for this + * vlan id. + */ + if (regindex >= TXGBE_PSR_VLAN_SWC_ENTRIES) { + if (first_empty_slot) { + regindex = first_empty_slot; + } else { + ERROR_REPORT1(hw, TXGBE_ERROR_SOFTWARE, + "No space in VLVF.\n"); + regindex = TXGBE_ERR_NO_SPACE; + } + } + + return regindex; +} + +/** + * txgbe_set_vlvf - Set VLAN Pool Filter + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * @vfta_changed: pointer to boolean flag which indicates whether VFTA + * should be changed + * + * Turn on/off specified bit in VLVF table. + **/ +s32 txgbe_set_vlvf(struct txgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool *vfta_changed) +{ + u32 vt; + + if (vlan > 4095) + return TXGBE_ERR_PARAM; + + /* If VT Mode is set + * Either vlan_on + * make sure the vlan is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + vt = rd32(hw, TXGBE_CFG_PORT_CTL); + if (vt & TXGBE_CFG_PORT_CTL_NUM_VT_MASK) { + s32 vlvf_index; + u32 bits; + + vlvf_index = txgbe_find_vlvf_slot(hw, vlan); + if (vlvf_index < 0) + return vlvf_index; + + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, vlvf_index); + if (vlan_on) { + /* set the pool bit */ + if (vind < 32) { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_L); + bits |= (1 << vind); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_L, + bits); + } else { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + bits |= (1 << (vind - 32)); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_H, + bits); + } + } else { + /* clear the pool bit */ + if (vind < 32) { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_L); + bits &= ~(1 << vind); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_L, + bits); + bits |= rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + } else { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + bits &= ~(1 << (vind - 32)); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_H, + bits); + bits |= rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_L); + } + } + + /* If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + if (bits) { + wr32(hw, TXGBE_PSR_VLAN_SWC, + (TXGBE_PSR_VLAN_SWC_VIEN | vlan)); + if (!vlan_on && !vfta_changed) { + /* someone wants to clear the vfta entry + * but some pools/VFs are still using it. + * Ignore it. + */ + *vfta_changed = false; + } + } else { + wr32(hw, TXGBE_PSR_VLAN_SWC, 0); + } + } + + return 0; +} + /** * txgbe_set_vfta - Set VLAN filter table * @hw: pointer to hardware structure @@ -1571,14 +1748,15 @@ s32 txgbe_set_vfta(struct txgbe_hw *hw, u32 vlan, u32 vind, u32 vfta; u32 targetbit; bool vfta_changed = false; + s32 ret_val = 0; if (vlan > 4095) return TXGBE_ERR_PARAM; /* The VFTA is a bitstring made up of 128 32-bit registers * that enable the particular VLAN id, much like the MTA: - * bits[11-5]: which register - * bits[4-0]: which bit in the register + * bits[11-5]: which register + * bits[4-0]: which bit in the register */ regindex = (vlan >> 5) & 0x7F; bitindex = vlan & 0x1F; @@ -1597,8 +1775,17 @@ s32 txgbe_set_vfta(struct txgbe_hw *hw, u32 vlan, u32 vind, } } + /* Part 2 + * Call txgbe_set_vlvf to set VLVFB and VLVF + */ + ret_val = txgbe_set_vlvf(hw, vlan, vind, vlan_on, + &vfta_changed); + if (ret_val != 0) + return ret_val; + if (vfta_changed) wr32(hw, TXGBE_PSR_VLAN_TBL(regindex), vfta); + /* errata 5 */ hw->mac.vft_shadow[regindex] = vfta; return 0; @@ -1630,6 +1817,62 @@ s32 txgbe_clear_vfta(struct txgbe_hw *hw) return 0; } +/** + * txgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for VLAN anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + * + **/ +void txgbe_set_vlan_anti_spoofing(struct txgbe_hw *hw, bool enable, int vf) +{ + u32 pfvfspoof; + + if (vf < 32) { + pfvfspoof = rd32(hw, TXGBE_TDM_VLAN_AS_L); + if (enable) + pfvfspoof |= (1 << vf); + else + pfvfspoof &= ~(1 << vf); + wr32(hw, TXGBE_TDM_VLAN_AS_L, pfvfspoof); + } else { + pfvfspoof = rd32(hw, TXGBE_TDM_VLAN_AS_H); + if (enable) + pfvfspoof |= (1 << (vf - 32)); + else + pfvfspoof &= ~(1 << (vf - 32)); + wr32(hw, TXGBE_TDM_VLAN_AS_H, pfvfspoof); + } +} + +/** + * txgbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for Ethertype anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing + * + **/ +void txgbe_set_ethertype_anti_spoofing(struct txgbe_hw *hw, bool enable, int vf) +{ + u32 pfvfspoof; + + if (vf < 32) { + pfvfspoof = rd32(hw, TXGBE_TDM_ETYPE_AS_L); + if (enable) + pfvfspoof |= (1 << vf); + else + pfvfspoof &= ~(1 << vf); + wr32(hw, TXGBE_TDM_ETYPE_AS_L, pfvfspoof); + } else { + pfvfspoof = rd32(hw, TXGBE_TDM_ETYPE_AS_H); + if (enable) + pfvfspoof |= (1 << (vf - 32)); + else + pfvfspoof &= ~(1 << (vf - 32)); + wr32(hw, TXGBE_TDM_ETYPE_AS_H, pfvfspoof); + } +} + /** * Get alternative WWNN/WWPN prefix from the EEPROM * @hw: pointer to hardware structure @@ -2925,6 +3168,9 @@ s32 txgbe_init_ops(struct txgbe_hw *hw) mac->ops.set_vfta = txgbe_set_vfta; mac->ops.clear_vfta = txgbe_clear_vfta; mac->ops.init_uta_tables = txgbe_init_uta_tables; + mac->ops.set_vlan_anti_spoofing = txgbe_set_vlan_anti_spoofing; + mac->ops.set_ethertype_anti_spoofing = + txgbe_set_ethertype_anti_spoofing; /* Flow Control */ mac->ops.fc_enable = txgbe_fc_enable; @@ -2960,6 +3206,8 @@ s32 txgbe_init_ops(struct txgbe_hw *hw) mac->ops.init_thermal_sensor_thresh = txgbe_init_thermal_sensor_thresh; + hw->mbx.ops.init_params = txgbe_init_mbx_params_pf; + return 0; } @@ -4114,7 +4362,7 @@ int txgbe_reset_misc(struct txgbe_hw *hw) wr32(hw, TXGBE_MAC_PKT_FLT, TXGBE_MAC_PKT_FLT_PR); wr32m(hw, TXGBE_MIS_RST_ST, - TXGBE_MIS_RST_ST_RST_INIT, 0x1E00); + TXGBE_MIS_RST_ST_RST_INIT, 0xA00); /* errata 4: initialize mng flex tbl and wakeup flex tbl*/ wr32(hw, TXGBE_PSR_MNG_FLEX_SEL, 0); @@ -4456,15 +4704,20 @@ static void txgbe_fdir_enable(struct txgbe_hw *hw, u32 fdirctrl) s32 txgbe_init_fdir_signature(struct txgbe_hw *hw, u32 fdirctrl) { u32 flex = 0; + struct txgbe_adapter *adapter = (struct txgbe_adapter *)hw->back; + int i = VMDQ_P(0) / 4; + int j = VMDQ_P(0) % 4; - flex = rd32m(hw, TXGBE_RDB_FDIR_FLEX_CFG(0), - ~(TXGBE_RDB_FDIR_FLEX_CFG_BASE_MSK | + flex = rd32m(hw, TXGBE_RDB_FDIR_FLEX_CFG(i), + ~((TXGBE_RDB_FDIR_FLEX_CFG_BASE_MSK | TXGBE_RDB_FDIR_FLEX_CFG_MSK | - TXGBE_RDB_FDIR_FLEX_CFG_OFST)); + TXGBE_RDB_FDIR_FLEX_CFG_OFST) << + (TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT * j))); flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC | - 0x6 << TXGBE_RDB_FDIR_FLEX_CFG_OFST_SHIFT); - wr32(hw, TXGBE_RDB_FDIR_FLEX_CFG(0), flex); + 0x6 << TXGBE_RDB_FDIR_FLEX_CFG_OFST_SHIFT) << + (TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT * j); + wr32(hw, TXGBE_RDB_FDIR_FLEX_CFG(i), flex); /* Continue setup of fdirctrl register bits: * Move the flexible bytes to use the ethertype - shift 6 words @@ -4478,11 +4731,6 @@ s32 txgbe_init_fdir_signature(struct txgbe_hw *hw, u32 fdirctrl) /* write hashes and fdirctrl register, poll for completion */ txgbe_fdir_enable(hw, fdirctrl); - if (hw->revision_id == TXGBE_SP_MPW) { - /* errata 1: disable RSC of drop ring 0 */ - wr32m(hw, TXGBE_PX_RR_CFG(0), - TXGBE_PX_RR_CFG_RSC, ~TXGBE_PX_RR_CFG_RSC); - } return 0; } @@ -4499,12 +4747,12 @@ s32 txgbe_init_fdir_perfect(struct txgbe_hw *hw, u32 fdirctrl, struct txgbe_adapter *adapter = container_of(hw, struct txgbe_adapter, hw); /* Continue setup of fdirctrl register bits: - * Turn perfect match filtering on - * Report hash in RSS field of Rx wb descriptor - * Initialize the drop queue - * Move the flexible bytes to use the ethertype - shift 6 words - * Set the maximum length per hash bucket to 0xA filters - * Send interrupt when 64 (0x4 * 16) filters are left + * Turn perfect match filtering on + * Report hash in RSS field of Rx wb descriptor + * Initialize the drop queue + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 (0x4 * 16) filters are left */ fdirctrl |= TXGBE_RDB_FDIR_CTL_PERFECT_MATCH | (TXGBE_RDB_FDIR_DROP_QUEUE << @@ -4782,6 +5030,9 @@ s32 txgbe_fdir_set_input_mask(struct txgbe_hw *hw, u32 fdirm = 0; u32 fdirtcpm; u32 flex = 0; + struct txgbe_adapter *adapter = (struct txgbe_adapter *)hw->back; + int i = VMDQ_P(0) / 4; + int j = VMDQ_P(0) % 4; /* Program the relevant mask registers. If src/dst_port or src/dst_addr * are zero, then assume a full mask for that field. Also assume that @@ -4825,24 +5076,27 @@ s32 txgbe_fdir_set_input_mask(struct txgbe_hw *hw, /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ wr32(hw, TXGBE_RDB_FDIR_OTHER_MSK, fdirm); - flex = rd32m(hw, TXGBE_RDB_FDIR_FLEX_CFG(0), - ~(TXGBE_RDB_FDIR_FLEX_CFG_BASE_MSK | + flex = rd32m(hw, TXGBE_RDB_FDIR_FLEX_CFG(i), + ~((TXGBE_RDB_FDIR_FLEX_CFG_BASE_MSK | TXGBE_RDB_FDIR_FLEX_CFG_MSK | - TXGBE_RDB_FDIR_FLEX_CFG_OFST)); + TXGBE_RDB_FDIR_FLEX_CFG_OFST) << + (TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT * j))); flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC | - 0x6 << TXGBE_RDB_FDIR_FLEX_CFG_OFST_SHIFT); + 0x6 << TXGBE_RDB_FDIR_FLEX_CFG_OFST_SHIFT) << + (TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT * j); switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) { case 0x0000: - /* Mask Flex Bytes */ - flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK; + flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK << + (TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT * j); + fallthrough; case 0xFFFF: break; default: txgbe_dbg(hw, "Error on flexible byte mask\n"); return TXGBE_ERR_CONFIG; } - wr32(hw, TXGBE_RDB_FDIR_FLEX_CFG(0), flex); + wr32(hw, TXGBE_RDB_FDIR_FLEX_CFG(i), flex); /* store the TCP/UDP port masks, bit reversed from port layout */ fdirtcpm = txgbe_get_fdirtcpm(input_mask); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c index 07ac1bb1de03..2ab7547a5172 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c @@ -2,7 +2,67 @@ /* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ #include "txgbe.h" +#include "txgbe_sriov.h" +/** + * txgbe_cache_ring_vmdq - Descriptor ring to register mapping for VMDq + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for VMDq to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE/SRIOV are enabled along + * with VMDq. + * + **/ +static bool txgbe_cache_ring_vmdq(struct txgbe_adapter *adapter) +{ + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct txgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; + int i; + u16 reg_idx; + + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) + return false; + + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->rx_ring[i]->reg_idx = reg_idx; + } + + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & rss->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->tx_ring[i]->reg_idx = reg_idx; + } + + return true; +} + +/** + * txgbe_cache_ring_rss - Descriptor ring to register mapping for RSS + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV. + * + **/ +static bool txgbe_cache_ring_rss(struct txgbe_adapter *adapter) +{ + u16 i; + + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->reg_idx = i; + + return true; +} /** * txgbe_cache_ring_register - Descriptor ring to register mapping * @adapter: board private structure to initialize @@ -16,13 +76,10 @@ **/ static void txgbe_cache_ring_register(struct txgbe_adapter *adapter) { - u16 i; - - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->reg_idx = i; + if (txgbe_cache_ring_vmdq(adapter)) + return; - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i]->reg_idx = i; + txgbe_cache_ring_rss(adapter); } #define TXGBE_RSS_64Q_MASK 0x3F @@ -32,6 +89,59 @@ static void txgbe_cache_ring_register(struct txgbe_adapter *adapter) #define TXGBE_RSS_2Q_MASK 0x1 #define TXGBE_RSS_DISABLED_MASK 0x0 +static bool txgbe_set_vmdq_queues(struct txgbe_adapter *adapter) +{ + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; + u16 rss_m = TXGBE_RSS_DISABLED_MASK; + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) + return false; + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* double check we are limited to maximum pools */ + vmdq_i = min_t(u16, TXGBE_MAX_VMDQ_INDICES, vmdq_i); + + /* 64 pool mode with 2 queues per pool, or + * 16/32/64 pool mode with 1 queue per pool + */ + if (vmdq_i > 32 || rss_i < 4 || adapter->vf_mode == 63) { + vmdq_m = TXGBE_VMDQ_2Q_MASK; + rss_m = TXGBE_RSS_2Q_MASK; + rss_i = min_t(u16, rss_i, 2); + /* 32 pool mode with 4 queues per pool */ + } else { + vmdq_m = TXGBE_VMDQ_4Q_MASK; + rss_m = TXGBE_RSS_4Q_MASK; + rss_i = 4; + } + + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* limit RSS based on user input and save for later use */ + adapter->ring_feature[RING_F_RSS].indices = rss_i; + adapter->ring_feature[RING_F_RSS].mask = rss_m; + + adapter->queues_per_pool = rss_i;/*maybe same to num_rx_queues_per_pool*/ + adapter->num_rx_pools = vmdq_i; + adapter->num_rx_queues_per_pool = rss_i; + + adapter->num_rx_queues = vmdq_i * rss_i; + adapter->num_tx_queues = vmdq_i * rss_i; + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~TXGBE_FLAG_FDIR_HASH_CAPABLE; + + return true; +} + /** * txgbe_set_rss_queues: Allocate queues for RSS * @adapter: board private structure to initialize @@ -84,7 +194,13 @@ static void txgbe_set_num_queues(struct txgbe_adapter *adapter) /* Start with base case */ adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; + adapter->queues_per_pool = 1;/*maybe same to num_rx_queues_per_pool*/ + adapter->num_rx_pools = adapter->num_rx_queues; + adapter->num_rx_queues_per_pool = 1; + + if (txgbe_set_vmdq_queues(adapter)) + return; txgbe_set_rss_queues(adapter); } @@ -196,8 +312,10 @@ static int txgbe_alloc_q_vector(struct txgbe_adapter *adapter, struct txgbe_ring *ring; int node = -1; int cpu = -1; + u8 tcs = netdev_get_num_tc(adapter->netdev); + int ring_count, size; - u16 rss_i = 0; + u16 __maybe_unused rss_i = 0; /* note this will allocate space for the ring structure as well! */ ring_count = txr_count + rxr_count; @@ -205,11 +323,14 @@ static int txgbe_alloc_q_vector(struct txgbe_adapter *adapter, (sizeof(struct txgbe_ring) * ring_count); /* customize cpu for Flow Director mapping */ - rss_i = adapter->ring_feature[RING_F_RSS].indices; - if (rss_i > 1 && adapter->atr_sample_rate) { - if (cpu_online(v_idx)) { - cpu = v_idx; - node = cpu_to_node(cpu); + if (tcs <= 1 && !(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) { + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + + if (rss_i > 1 && adapter->atr_sample_rate) { + if (cpu_online(v_idx)) { + cpu = v_idx; + node = cpu_to_node(cpu); + } } } @@ -272,7 +393,11 @@ static int txgbe_alloc_q_vector(struct txgbe_adapter *adapter, /* apply Tx specific ring traits */ ring->count = adapter->tx_ring_count; - ring->queue_index = txr_idx; + if (adapter->num_vmdqs > 1) + ring->queue_index = + txr_idx % adapter->queues_per_pool; + else + ring->queue_index = txr_idx; /* assign ring to adapter */ adapter->tx_ring[txr_idx] = ring; @@ -298,7 +423,11 @@ static int txgbe_alloc_q_vector(struct txgbe_adapter *adapter, /* apply Rx specific ring traits */ ring->count = adapter->rx_ring_count; - ring->queue_index = rxr_idx; + if (adapter->num_vmdqs > 1) + ring->queue_index = + rxr_idx % adapter->queues_per_pool; + else + ring->queue_index = rxr_idx; /* assign ring to adapter */ adapter->rx_ring[rxr_idx] = ring; @@ -446,6 +575,11 @@ void txgbe_set_interrupt_capability(struct txgbe_adapter *adapter) if (!txgbe_acquire_msix_vectors(adapter)) return; +#ifdef CONFIG_PCI_IOV + /* Disable SR-IOV support */ + e_dev_warn("Disabling SR-IOV support\n"); + txgbe_disable_sriov(adapter); +#endif /* CONFIG_PCI_IOV */ /* Disable RSS */ dev_warn(&adapter->pdev->dev, "Disabling RSS support\n"); adapter->ring_feature[RING_F_RSS].limit = 1; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index b2fa8d553a35..4fa3a087afa8 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -22,6 +22,7 @@ #include "txgbe.h" #include "txgbe_hw.h" #include "txgbe_phy.h" +#include "txgbe_sriov.h" char txgbe_driver_name[] = "txgbe"; @@ -99,6 +100,11 @@ static inline int txgbe_enumerate_functions(struct txgbe_adapter *adapter) int physfns = 0; list_for_each_entry(entry, &pdev->bus->devices, bus_list) { +#ifdef CONFIG_PCI_IOV + /* don't count virtual functions */ + if (entry->is_virtfn) + continue; +#endif /* When the devices on the bus don't all match our device ID, * we can't reliably determine the correct number of * functions. This can occur if a function has been direct @@ -1284,7 +1290,13 @@ static void txgbe_configure_msix(struct txgbe_adapter *adapter) u16 v_idx; /* Populate MSIX to EITR Select */ - wr32(&adapter->hw, TXGBE_PX_ITRSEL, 0); + if (adapter->num_vfs >= 32) { + u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; + + wr32(&adapter->hw, TXGBE_PX_ITRSEL, eitrsel); + } else { + wr32(&adapter->hw, TXGBE_PX_ITRSEL, 0); + } /* Populate the IVAR table and set the ITR values to the * corresponding register. @@ -1602,6 +1614,9 @@ static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data) if (eicr & (TXGBE_PX_MISC_IC_ETH_LK | TXGBE_PX_MISC_IC_ETH_LKDN)) txgbe_check_lsc(adapter); + if (eicr & TXGBE_PX_MISC_IC_VF_MBOX) + txgbe_msg_task(adapter); + if (eicr & TXGBE_PX_MISC_IC_INT_ERR) { netif_info(adapter, link, adapter->netdev, "Received unrecoverable ECC Err, initiating reset.\n"); @@ -2090,8 +2105,9 @@ void txgbe_set_rx_drop_en(struct txgbe_adapter *adapter) * This allows us to avoid head of line blocking for security * and performance reasons. */ - if ((adapter->num_rx_queues > 1 && - !(adapter->hw.fc.current_mode & txgbe_fc_tx_pause))) { + if (adapter->num_vfs || + (adapter->num_rx_queues > 1 && + !(adapter->hw.fc.current_mode & txgbe_fc_tx_pause))) { for (i = 0; i < adapter->num_rx_queues; i++) txgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); } else { @@ -2177,6 +2193,9 @@ static void txgbe_setup_reta(struct txgbe_adapter *adapter) u32 reta_entries = 128; u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED && rss_i < 2) + rss_i = 2; + /* Fill out hash function seeds */ for (i = 0; i < 10; i++) wr32(hw, TXGBE_RDB_RSSRK(i), adapter->rss_key[i]); @@ -2402,7 +2421,7 @@ static void txgbe_setup_psrtype(struct txgbe_adapter *adapter) psrtype |= 1 << 29; for_each_set_bit(pool, &adapter->fwd_bitmask, TXGBE_MAX_MACVLANS) - wr32(hw, TXGBE_RDB_PL_CFG(pool), psrtype); + wr32(hw, TXGBE_RDB_PL_CFG(VMDQ_P(pool)), psrtype); } static void txgbe_set_rx_buffer_len(struct txgbe_adapter *adapter) @@ -2487,10 +2506,20 @@ static int txgbe_vlan_rx_add_vid(struct net_device *netdev, { struct txgbe_adapter *adapter = netdev_priv(netdev); struct txgbe_hw *hw = &adapter->hw; + int pool_ndx = VMDQ_P(0); /* add VID to filter table */ if (hw->mac.ops.set_vfta) - TCALL(hw, mac.ops.set_vfta, vid, 0, true); + TCALL(hw, mac.ops.set_vfta, vid, pool_ndx, true); + + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) { + int i; + /* enable vlan id for all pools */ + for_each_set_bit(i, &adapter->fwd_bitmask, + TXGBE_MAX_MACVLANS) + TCALL(hw, mac.ops.set_vfta, vid, + VMDQ_P(i), true); + } set_bit(vid, adapter->active_vlans); @@ -2502,10 +2531,20 @@ static int txgbe_vlan_rx_kill_vid(struct net_device *netdev, { struct txgbe_adapter *adapter = netdev_priv(netdev); struct txgbe_hw *hw = &adapter->hw; + int pool_ndx = VMDQ_P(0); /* remove VID from filter table */ - if (hw->mac.ops.set_vfta) - TCALL(hw, mac.ops.set_vfta, vid, 0, false); + if (hw->mac.ops.set_vfta) { + TCALL(hw, mac.ops.set_vfta, vid, pool_ndx, false); + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) { + int i; + /* remove vlan id from all pools */ + for_each_set_bit(i, &adapter->fwd_bitmask, + TXGBE_MAX_MACVLANS) + TCALL(hw, mac.ops.set_vfta, vid, + VMDQ_P(i), false); + } + } clear_bit(vid, adapter->active_vlans); @@ -2572,7 +2611,6 @@ static void txgbe_restore_vlan(struct txgbe_adapter *adapter) struct net_device *netdev = adapter->netdev; u16 vid; - txgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); txgbe_vlan_mode(netdev, netdev->features); for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) @@ -2584,8 +2622,9 @@ static u8 *txgbe_addr_list_itr(struct txgbe_hw __maybe_unused *hw, { struct netdev_hw_addr *mc_ptr; u8 *addr = *mc_addr_ptr; + struct txgbe_adapter *adapter = hw->back; - *vmdq = 0; + *vmdq = VMDQ_P(0); mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]); if (mc_ptr->list.next) { @@ -2636,6 +2675,10 @@ int txgbe_write_mc_addr_list(struct net_device *netdev) txgbe_addr_list_itr, true); } +#ifdef CONFIG_PCI_IOV + txgbe_restore_vf_multicasts(adapter); +#endif + return addr_count; } @@ -2680,7 +2723,7 @@ static void txgbe_mac_set_default_filter(struct txgbe_adapter *adapter, struct txgbe_hw *hw = &adapter->hw; memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN); - adapter->mac_table[0].pools = 1ULL; + adapter->mac_table[0].pools = 1ULL << VMDQ_P(0); adapter->mac_table[0].state = (TXGBE_MAC_STATE_DEFAULT | TXGBE_MAC_STATE_IN_USE); TCALL(hw, mac.ops.set_rar, 0, adapter->mac_table[0].addr, @@ -2819,7 +2862,7 @@ void txgbe_set_rx_mode(struct net_device *netdev) /* Check for Promiscuous and All Multicast modes */ fctrl = rd32m(hw, TXGBE_PSR_CTL, ~(TXGBE_PSR_CTL_UPE | TXGBE_PSR_CTL_MPE)); - vmolr = rd32m(hw, TXGBE_PSR_VM_L2CTL(0), + vmolr = rd32m(hw, TXGBE_PSR_VM_L2CTL(VMDQ_P(0)), ~(TXGBE_PSR_VM_L2CTL_UPE | TXGBE_PSR_VM_L2CTL_MPE | TXGBE_PSR_VM_L2CTL_ROPE | @@ -2865,7 +2908,7 @@ void txgbe_set_rx_mode(struct net_device *netdev) * sufficient space to store all the addresses then enable * unicast promiscuous mode */ - count = txgbe_write_uc_addr_list(netdev, 0); + count = txgbe_write_uc_addr_list(netdev, VMDQ_P(0)); if (count < 0) { vmolr &= ~TXGBE_PSR_VM_L2CTL_ROPE; vmolr |= TXGBE_PSR_VM_L2CTL_UPE; @@ -2883,7 +2926,7 @@ void txgbe_set_rx_mode(struct net_device *netdev) wr32(hw, TXGBE_PSR_VLAN_CTL, vlnctrl); wr32(hw, TXGBE_PSR_CTL, fctrl); - wr32(hw, TXGBE_PSR_VM_L2CTL(0), vmolr); + wr32(hw, TXGBE_PSR_VM_L2CTL(VMDQ_P(0)), vmolr); if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) txgbe_vlan_strip_enable(adapter); @@ -2918,7 +2961,6 @@ void txgbe_clear_vxlan_port(struct txgbe_adapter *adapter) adapter->vxlan_port = 0; if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) return; - wr32(&adapter->hw, TXGBE_CFG_VXLAN, 0); } #define TXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ @@ -2951,6 +2993,9 @@ static int txgbe_hpbthresh(struct txgbe_adapter *adapter, int pb) /* Calculate delay value for device */ dv_id = TXGBE_DV(link, tc); + /* Loopback switch introduces additional latency */ + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + dv_id += TXGBE_B2BT(tc); /* Delay value is calculated in bit times convert to KB */ kb = TXGBE_BT2KB(dv_id); rx_pba = rd32(hw, TXGBE_RDB_PB_SZ(pb)) >> TXGBE_RDB_PB_SZ_SHIFT; @@ -3001,15 +3046,23 @@ static int txgbe_lpbthresh(struct txgbe_adapter *adapter, int __maybe_unused pb) static void txgbe_pbthresh_setup(struct txgbe_adapter *adapter) { struct txgbe_hw *hw = &adapter->hw; + int num_tc = netdev_get_num_tc(adapter->netdev); + int i; - hw->fc.high_water = txgbe_hpbthresh(adapter, 0); - hw->fc.low_water = txgbe_lpbthresh(adapter, 0); + if (!num_tc) + num_tc = 1; - /* Low water marks must not be larger than high water marks */ - if (hw->fc.low_water > hw->fc.high_water) - hw->fc.low_water = 0; + for (i = 0; i < num_tc; i++) { + hw->fc.high_water[i] = txgbe_hpbthresh(adapter, i); + hw->fc.low_water[i] = txgbe_lpbthresh(adapter, i); - hw->fc.high_water = 0; + /* Low water marks must not be larger than high water marks */ + if (hw->fc.low_water[i] > hw->fc.high_water[i]) + hw->fc.low_water[i] = 0; + } + + for (; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) + hw->fc.high_water[i] = 0; } static void txgbe_configure_pb(struct txgbe_adapter *adapter) @@ -3038,7 +3091,7 @@ static void txgbe_fdir_filter_restore(struct txgbe_adapter *adapter) if (!hlist_empty(&adapter->fdir_filter_list)) txgbe_fdir_set_input_mask(hw, &adapter->fdir_mask, - adapter->cloud_mode); + adapter->cloud_mode); hlist_for_each_entry_safe(filter, node, &adapter->fdir_filter_list, fdir_node) { @@ -3046,23 +3099,35 @@ static void txgbe_fdir_filter_restore(struct txgbe_adapter *adapter) queue = TXGBE_RDB_FDIR_DROP_QUEUE; } else { u32 ring = ethtool_get_flow_spec_ring(filter->action); + u8 vf = ethtool_get_flow_spec_ring_vf(filter->action); - if (ring >= adapter->num_rx_queues) { - netif_err(adapter, drv, adapter->netdev, - "FDIR restore failed, ring:%u\n", - ring); + if (!vf && ring >= adapter->num_rx_queues) { + e_err(drv, + "FDIR restore failed w/o vf, ring:%u\n", + ring); + continue; + } else if (vf && + ((vf > adapter->num_vfs) || + ring >= adapter->num_rx_queues_per_pool)) { + e_err(drv, + "FDIR restore failed vf:%hhu, ring:%u\n", + vf, ring); continue; } /* Map the ring onto the absolute queue index */ - queue = adapter->rx_ring[ring]->reg_idx; + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * + adapter->num_rx_queues_per_pool) + ring; } txgbe_fdir_write_perfect_filter(hw, - &filter->filter, - filter->sw_idx, - queue, - adapter->cloud_mode); + &filter->filter, + filter->sw_idx, + queue, + adapter->cloud_mode); } spin_unlock(&adapter->fdir_perfect_lock); @@ -3078,17 +3143,122 @@ static void txgbe_configure_isb(struct txgbe_adapter *adapter) wr32(hw, TXGBE_PX_ISB_ADDR_H, adapter->isb_dma >> 32); } +/** + * txgbe_configure_bridge_mode - common settings for configuring bridge mode + * @adapter - the private structure + * + * This function's purpose is to remove code duplication and configure some + * settings require to switch bridge modes. + **/ +static void txgbe_configure_bridge_mode(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + if (adapter->flags & TXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE) + /* disable Tx loopback, rely on switch hairpin mode */ + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_SW_EN, 0); + else + /* enable Tx loopback for internal VF/PF communication */ + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_SW_EN, TXGBE_PSR_CTL_SW_EN); +} + +static void txgbe_configure_virtualization(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 reg_offset, vf_shift; + u32 i; + + if (!(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) + return; + + wr32m(hw, TXGBE_PSR_VM_CTL, + TXGBE_PSR_VM_CTL_POOL_MASK | + TXGBE_PSR_VM_CTL_REPLEN, + VMDQ_P(0) << TXGBE_PSR_VM_CTL_POOL_SHIFT | + TXGBE_PSR_VM_CTL_REPLEN); + + /* accept untagged packets until a vlan tag is + * specifically set for the VMDQ queue/pool + */ + for_each_set_bit(i, &adapter->fwd_bitmask, TXGBE_MAX_MACVLANS) + wr32m(hw, TXGBE_PSR_VM_L2CTL(i), + TXGBE_PSR_VM_L2CTL_AUPE, TXGBE_PSR_VM_L2CTL_AUPE); + + vf_shift = VMDQ_P(0) % 32; + reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; + + /* Enable only the PF pools for Tx/Rx */ + wr32(hw, TXGBE_RDM_VF_RE(reg_offset), (~0) << vf_shift); + wr32(hw, TXGBE_RDM_VF_RE(reg_offset ^ 1), reg_offset - 1); + wr32(hw, TXGBE_TDM_VF_TE(reg_offset), (~0) << vf_shift); + wr32(hw, TXGBE_TDM_VF_TE(reg_offset ^ 1), reg_offset - 1); + + if (!(adapter->flags & TXGBE_FLAG_SRIOV_ENABLED)) + return; + + /* configure default bridge settings */ + txgbe_configure_bridge_mode(adapter); + + /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be + * calling set_ethertype_anti_spoofing for each VF in loop below. + */ + if (hw->mac.ops.set_ethertype_anti_spoofing) { + wr32(hw, + TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_LLDP), + (TXGBE_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */ + TXGBE_PSR_ETYPE_SWC_TX_ANTISPOOF | + TXGBE_ETH_P_LLDP)); /* LLDP eth procotol type */ + + wr32(hw, + TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_FC), + (TXGBE_PSR_ETYPE_SWC_FILTER_EN | + TXGBE_PSR_ETYPE_SWC_TX_ANTISPOOF | + ETH_P_PAUSE)); + } + + for (i = 0; i < adapter->num_vfs; i++) { + if (!adapter->vfinfo[i].spoofchk_enabled) + txgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false); + + /* enable ethertype anti spoofing if hw supports it */ + TCALL(hw, mac.ops.set_ethertype_anti_spoofing, true, i); + } +} + +#ifdef CONFIG_PCI_IOV +void txgbe_sriov_reinit(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + txgbe_setup_tc(netdev, netdev_get_num_tc(netdev)); + rtnl_unlock(); +} +#endif + void txgbe_configure_port(struct txgbe_adapter *adapter) { struct txgbe_hw *hw = &adapter->hw; - u32 value, i; + u32 value = 0; + u32 i; - value = TXGBE_CFG_PORT_CTL_D_VLAN | TXGBE_CFG_PORT_CTL_QINQ; - wr32m(hw, TXGBE_CFG_PORT_CTL, - TXGBE_CFG_PORT_CTL_D_VLAN | - TXGBE_CFG_PORT_CTL_QINQ, - value); + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) { + if (adapter->ring_feature[RING_F_RSS].indices == 4) + value = TXGBE_CFG_PORT_CTL_NUM_VT_32; + else /* adapter->ring_feature[RING_F_RSS].indices <= 2 */ + value = TXGBE_CFG_PORT_CTL_NUM_VT_64; + } + value |= TXGBE_CFG_PORT_CTL_D_VLAN | TXGBE_CFG_PORT_CTL_QINQ; + wr32m(hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_NUM_TC_MASK | + TXGBE_CFG_PORT_CTL_NUM_VT_MASK | + TXGBE_CFG_PORT_CTL_DCB_EN | + TXGBE_CFG_PORT_CTL_D_VLAN | + TXGBE_CFG_PORT_CTL_QINQ, + value); wr32(hw, TXGBE_CFG_TAG_TPID(0), ETH_P_8021Q | ETH_P_8021AD << 16); adapter->hw.tpid[0] = ETH_P_8021Q; @@ -3106,6 +3276,10 @@ static void txgbe_configure(struct txgbe_adapter *adapter) txgbe_configure_pb(adapter); + /* We must restore virtualization before VLANs or else + * the VLVF registers will not be populated + */ + txgbe_configure_virtualization(adapter); txgbe_configure_port(adapter); txgbe_set_rx_mode(adapter->netdev); @@ -3254,6 +3428,8 @@ static void txgbe_up_complete(struct txgbe_adapter *adapter) /* Set PF Reset Done bit so PF/VF Mail Ops can work */ wr32m(hw, TXGBE_CFG_PORT_CTL, TXGBE_CFG_PORT_CTL_PFRSTD, TXGBE_CFG_PORT_CTL_PFRSTD); + /* update setting rx tx for all active vfs */ + txgbe_set_all_vfs(adapter); } void txgbe_reinit_locked(struct txgbe_adapter *adapter) @@ -3264,6 +3440,10 @@ void txgbe_reinit_locked(struct txgbe_adapter *adapter) while (test_and_set_bit(__TXGBE_RESETTING, &adapter->state)) usleep_range(1000, 2000); txgbe_down(adapter); + + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + msleep(2000); + txgbe_up(adapter); clear_bit(__TXGBE_RESETTING, &adapter->state); } @@ -3313,7 +3493,7 @@ void txgbe_reset(struct txgbe_adapter *adapter) txgbe_mac_set_default_filter(adapter, old_addr); /* update SAN MAC vmdq pool selection */ - TCALL(hw, mac.ops.set_vmdq_san_mac, 0); + TCALL(hw, mac.ops.set_vmdq_san_mac, VMDQ_P(0)); if (test_bit(__TXGBE_PTP_RUNNING, &adapter->state)) txgbe_ptp_reset(adapter); @@ -3503,6 +3683,20 @@ void txgbe_disable_device(struct txgbe_adapter *adapter) dev_err(&adapter->pdev->dev, "%s: invalid bus lan id %d\n", __func__, hw->bus.lan_id); + if (adapter->num_vfs) { + /* Clear EITR Select mapping */ + wr32(&adapter->hw, TXGBE_PX_ITRSEL, 0); + + /* Mark all the VFs as inactive */ + for (i = 0 ; i < adapter->num_vfs; i++) + adapter->vfinfo[i].clear_to_send = 0; + + /* ping all the active vfs to let them know we are going down */ + txgbe_ping_all_vfs(adapter); + + /* Disable all VFTE/VFRE TX/RX */ + txgbe_set_all_vfs(adapter); + } if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP))) { @@ -3611,18 +3805,28 @@ static int txgbe_sw_init(struct txgbe_adapter *adapter) adapter->tx_itr_setting = 1; adapter->atr_sample_rate = 20; + adapter->vf_mode = 63; adapter->flags |= TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE; + adapter->flags |= TXGBE_FLAG_VXLAN_OFFLOAD_ENABLE; + adapter->flags |= TXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags2 |= TXGBE_FLAG2_RSC_CAPABLE; + adapter->flags |= TXGBE_FLAGS_SP_INIT; + fdir = min_t(int, TXGBE_MAX_FDIR_INDICES, num_online_cpus()); adapter->ring_feature[RING_F_FDIR].limit = fdir; adapter->fdir_pballoc = TXGBE_FDIR_PBALLOC_64K; adapter->max_q_vectors = TXGBE_MAX_MSIX_Q_VECTORS_SAPPHIRE; + adapter->ring_feature[RING_F_VMDQ].limit = 0; + adapter->num_vfs = 0; + /* n-tuple support exists, always init our spinlock */ spin_lock_init(&adapter->fdir_perfect_lock); + TCALL(hw, mbx.ops.init_params); + /* default flow control settings */ hw->fc.requested_mode = txgbe_fc_full; hw->fc.current_mode = txgbe_fc_full; @@ -3638,6 +3842,8 @@ static int txgbe_sw_init(struct txgbe_adapter *adapter) adapter->tx_work_limit = TXGBE_DEFAULT_TX_WORK; adapter->rx_work_limit = TXGBE_DEFAULT_RX_WORK; + adapter->num_vmdqs = 1; + set_bit(0, &adapter->fwd_bitmask); set_bit(__TXGBE_DOWN, &adapter->state); @@ -3923,10 +4129,19 @@ static void txgbe_free_all_rx_resources(struct txgbe_adapter *adapter) static int txgbe_change_mtu(struct net_device *netdev, int new_mtu) { struct txgbe_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; if (new_mtu < 68 || new_mtu > 9414) return -EINVAL; + /* we cannot allow legacy VFs to enable their receive + * paths when MTU greater than 1500 is configured. So display a + * warning that legacy VFs will be disabled. + */ + if ((adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) && + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) + e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); + netif_info(adapter, probe, netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); @@ -3982,14 +4197,16 @@ int txgbe_open(struct net_device *netdev) if (err) goto err_req_irq; - /* Notify the stack of the actual queue counts. */ - err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_vmdqs > 1 + ? adapter->queues_per_pool + : adapter->num_tx_queues); if (err) goto err_set_queues; - err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); - if (err) - goto err_set_queues; + err = netif_set_real_num_rx_queues(netdev, adapter->num_vmdqs > 1 + ? adapter->queues_per_pool + : adapter->num_rx_queues); txgbe_ptp_init(adapter); @@ -4563,6 +4780,9 @@ static void txgbe_watchdog_link_is_up(struct txgbe_adapter *adapter) netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); + + /* ping all the active vfs to let them know link has changed */ + txgbe_ping_all_vfs(adapter); } /** @@ -4587,6 +4807,8 @@ static void txgbe_watchdog_link_is_down(struct txgbe_adapter *adapter) netif_info(adapter, drv, netdev, "NIC Link is Down\n"); netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); + /* ping all the active vfs to let them know link has changed */ + txgbe_ping_all_vfs(adapter); } static bool txgbe_ring_tx_pending(struct txgbe_adapter *adapter) @@ -4603,6 +4825,34 @@ static bool txgbe_ring_tx_pending(struct txgbe_adapter *adapter) return false; } +static bool txgbe_vf_tx_pending(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + + u32 i, j; + + if (!adapter->num_vfs) + return false; + + for (i = 0; i < adapter->num_vfs; i++) { + for (j = 0; j < q_per_pool; j++) { + u32 h, t; + + h = rd32(hw, + TXGBE_PX_TR_RPN(q_per_pool, i, j)); + t = rd32(hw, + TXGBE_PX_TR_WPN(q_per_pool, i, j)); + + if (h != t) + return true; + } + } + + return false; +} + /** * txgbe_watchdog_flush_tx - flush queues on link down * @adapter: pointer to the device adapter structure @@ -4610,7 +4860,8 @@ static bool txgbe_ring_tx_pending(struct txgbe_adapter *adapter) static void txgbe_watchdog_flush_tx(struct txgbe_adapter *adapter) { if (!netif_carrier_ok(adapter->netdev)) { - if (txgbe_ring_tx_pending(adapter)) { + if (txgbe_ring_tx_pending(adapter) || + txgbe_vf_tx_pending(adapter)) { /* We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going * to get done, so reset controller to flush Tx. @@ -4623,6 +4874,24 @@ static void txgbe_watchdog_flush_tx(struct txgbe_adapter *adapter) } } +static void txgbe_spoof_check(struct txgbe_adapter *adapter) +{ + u32 ssvpc; + + /* Do not perform spoof check if in non-IOV mode */ + if (adapter->num_vfs == 0) + return; + ssvpc = rd32(&adapter->hw, TXGBE_TDM_SEC_DRP); + + /* ssvpc register is cleared on read, if zero then no + * spoofed packets in the last interval. + */ + if (!ssvpc) + return; + + e_warn(drv, "%d Spoofed packets detected\n", ssvpc); +} + /** * txgbe_watchdog_subtask - check and bring link up * @adapter: pointer to the device adapter structure @@ -4642,6 +4911,10 @@ static void txgbe_watchdog_subtask(struct txgbe_adapter *adapter) else txgbe_watchdog_link_is_down(adapter); +#ifdef CONFIG_PCI_IOV + txgbe_spoof_check(adapter); +#endif /* CONFIG_PCI_IOV */ + txgbe_update_stats(adapter); txgbe_watchdog_flush_tx(adapter); @@ -5780,7 +6053,11 @@ netdev_tx_t txgbe_xmit_frame_ring(struct sk_buff *skb, adapter->tx_hwtstamp_skipped++; } } - + /* Use the l2switch_enable flag - would be false if the DMA + * Tx switch had been disabled. + */ + if (adapter->flags & TXGBE_FLAG_SRIOV_L2SWITCH_ENABLE) + tx_flags |= TXGBE_TX_FLAGS_CC; /* record initial flags and protocol */ first->tx_flags = tx_flags; first->protocol = protocol; @@ -5858,11 +6135,14 @@ static int txgbe_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - txgbe_del_mac_filter(adapter, hw->mac.addr, 0); + txgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); txgbe_mac_set_default_filter(adapter, hw->mac.addr); + e_info(drv, "The mac has been set to %02X:%02X:%02X:%02X:%02X:%02X\n", + hw->mac.addr[0], hw->mac.addr[1], hw->mac.addr[2], + hw->mac.addr[3], hw->mac.addr[4], hw->mac.addr[5]); return 0; } @@ -5887,7 +6167,7 @@ static int txgbe_add_sanmac_netdev(struct net_device *dev) rtnl_unlock(); /* update SAN MAC vmdq pool selection */ - TCALL(hw, mac.ops.set_vmdq_san_mac, 0); + TCALL(hw, mac.ops.set_vmdq_san_mac, VMDQ_P(0)); } return err; } @@ -5968,105 +6248,6 @@ void txgbe_do_reset(struct net_device *netdev) txgbe_reset(adapter); } -static netdev_features_t txgbe_fix_features(struct net_device *netdev, - netdev_features_t features) -{ - struct txgbe_adapter *adapter = netdev_priv(netdev); - - /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ - if (!(features & NETIF_F_RXCSUM)) - features &= ~NETIF_F_LRO; - - /* Turn off LRO if not RSC capable */ - if (!(adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE)) - features &= ~NETIF_F_LRO; - - return features; -} - -static int txgbe_set_features(struct net_device *netdev, - netdev_features_t features) -{ - struct txgbe_adapter *adapter = netdev_priv(netdev); - bool need_reset = false; - - /* Make sure RSC matches LRO, reset if change */ - if (!(features & NETIF_F_LRO)) { - if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) - need_reset = true; - adapter->flags2 &= ~TXGBE_FLAG2_RSC_ENABLED; - } else if ((adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) && - !(adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED)) { - if (adapter->rx_itr_setting == 1 || - adapter->rx_itr_setting > TXGBE_MIN_RSC_ITR) { - adapter->flags2 |= TXGBE_FLAG2_RSC_ENABLED; - need_reset = true; - } else if ((netdev->features ^ features) & NETIF_F_LRO) { - netif_info(adapter, probe, netdev, - "rx-usecs set too low, disabling RSC\n"); - } - } - - /* Check if Flow Director n-tuple support was enabled or disabled. If - * the state changed, we need to reset. - */ - switch (features & NETIF_F_NTUPLE) { - case NETIF_F_NTUPLE: - /* turn off ATR, enable perfect filters and reset */ - if (!(adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE)) - need_reset = true; - - adapter->flags &= ~TXGBE_FLAG_FDIR_HASH_CAPABLE; - adapter->flags |= TXGBE_FLAG_FDIR_PERFECT_CAPABLE; - break; - default: - /* turn off perfect filters, enable ATR and reset */ - if (adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE) - need_reset = true; - - adapter->flags &= ~TXGBE_FLAG_FDIR_PERFECT_CAPABLE; - - /* We cannot enable ATR if RSS is disabled */ - if (adapter->ring_feature[RING_F_RSS].limit <= 1) - break; - - /* A sample rate of 0 indicates ATR disabled */ - if (!adapter->atr_sample_rate) - break; - - adapter->flags |= TXGBE_FLAG_FDIR_HASH_CAPABLE; - break; - } - - if (features & NETIF_F_HW_VLAN_CTAG_RX) - txgbe_vlan_strip_enable(adapter); - else - txgbe_vlan_strip_disable(adapter); - - if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && - features & NETIF_F_RXCSUM)) - txgbe_clear_vxlan_port(adapter); - - if (features & NETIF_F_RXHASH) { - if (!(adapter->flags2 & TXGBE_FLAG2_RSS_ENABLED)) { - wr32m(&adapter->hw, TXGBE_RDB_RA_CTL, - TXGBE_RDB_RA_CTL_RSS_EN, TXGBE_RDB_RA_CTL_RSS_EN); - adapter->flags2 |= TXGBE_FLAG2_RSS_ENABLED; - } - } else { - if (adapter->flags2 & TXGBE_FLAG2_RSS_ENABLED) { - wr32m(&adapter->hw, TXGBE_RDB_RA_CTL, - TXGBE_RDB_RA_CTL_RSS_EN, ~TXGBE_RDB_RA_CTL_RSS_EN); - adapter->flags2 &= ~TXGBE_FLAG2_RSS_ENABLED; - } - } - - if (need_reset) - txgbe_do_reset(netdev); - - return 0; -} - static int txgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, @@ -6083,6 +6264,24 @@ static int txgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); } +void txgbe_full_sync_mac_table(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & TXGBE_MAC_STATE_IN_USE) { + TCALL(hw, mac.ops.set_rar, i, + adapter->mac_table[i].addr, + adapter->mac_table[i].pools, + TXGBE_PSR_MAC_SWC_AD_H_AV); + } else { + TCALL(hw, mac.ops.clear_rar, i); + } + adapter->mac_table[i].state &= ~(TXGBE_MAC_STATE_MODIFIED); + } +} + #define TXGBE_MAX_TUNNEL_HDR_LEN 80 static netdev_features_t txgbe_features_check(struct sk_buff *skb, struct net_device *dev, @@ -6136,8 +6335,14 @@ static const struct net_device_ops txgbe_netdev_ops = { .ndo_get_stats64 = txgbe_get_stats64, .ndo_fdb_add = txgbe_ndo_fdb_add, .ndo_features_check = txgbe_features_check, - .ndo_set_features = txgbe_set_features, - .ndo_fix_features = txgbe_fix_features, + .ndo_set_vf_mac = txgbe_ndo_set_vf_mac, + .ndo_set_vf_trust = txgbe_ndo_set_vf_trust, + .ndo_set_vf_spoofchk = txgbe_ndo_set_vf_spoofchk, + .ndo_set_vf_link_state = txgbe_ndo_set_vf_link_state, + .ndo_set_vf_vlan = txgbe_ndo_set_vf_vlan, + .ndo_set_vf_rate = txgbe_ndo_set_vf_bw, + .ndo_get_vf_config = txgbe_ndo_get_vf_config, + }; void txgbe_assign_netdev_ops(struct net_device *dev) @@ -6195,6 +6400,7 @@ static int txgbe_probe(struct pci_dev *pdev, char *info_string, *i_s_var; u8 part_str[TXGBE_PBANUM_LENGTH]; bool disable_dev = false; + u32 match; err = pci_enable_device_mem(pdev); if (err) @@ -6241,6 +6447,7 @@ static int txgbe_probe(struct pci_dev *pdev, adapter->netdev = netdev; adapter->pdev = pdev; hw = &adapter->hw; + hw->back = adapter; adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; adapter->io_addr = devm_ioremap(&pdev->dev, @@ -6285,6 +6492,26 @@ static int txgbe_probe(struct pci_dev *pdev, goto err_free_mac_table; } +#ifdef CONFIG_PCI_IOV + if (adapter->num_vfs > 0) { + netif_warn(adapter, probe, netdev, + "Enabling SR-IOV VFs using the max_vfs module parameter is deprecated.\n"); + netif_warn(adapter, probe, netdev, "Please use the pci sysfs interface instead. Ex:\n"); + netif_warn(adapter, probe, netdev, + "echo '%d' > /sys/bus/pci/devices/%04x:%02x:%02x.%1x/sriov_numvfs\n", + adapter->num_vfs, + pci_domain_nr(pdev->bus), + pdev->bus->number, + PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn) + ); + } + + match = min_t(u32, adapter->vf_mode, TXGBE_MAX_VFS_DRV_LIMIT); + pci_sriov_set_totalvfs(pdev, match); + txgbe_enable_sriov(adapter); +#endif /* CONFIG_PCI_IOV */ + netdev->features = NETIF_F_SG | NETIF_F_LRO | NETIF_F_TSO | @@ -6305,7 +6532,8 @@ static int txgbe_probe(struct pci_dev *pdev, NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXALL; - netdev->hw_features |= NETIF_F_NTUPLE; + netdev->features |= NETIF_F_NTUPLE; + adapter->flags |= TXGBE_FLAG_FDIR_HASH_CAPABLE; netdev->features |= NETIF_F_HIGHDMA; @@ -6330,7 +6558,6 @@ static int txgbe_probe(struct pci_dev *pdev, netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = TXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); - /* make sure the EEPROM is good */ if (TCALL(hw, eeprom.ops.validate_checksum, NULL)) { dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); @@ -6483,6 +6710,7 @@ static int txgbe_probe(struct pci_dev *pdev, "allocation for info string failed\n"); goto no_info_string; } + i_s_var = info_string; i_s_var += sprintf(info_string, "Enabled Features: "); i_s_var += sprintf(i_s_var, "RxQ: %d TxQ: %d ", @@ -6499,6 +6727,12 @@ static int txgbe_probe(struct pci_dev *pdev, netif_info(adapter, probe, netdev, "%s\n", info_string); kfree(info_string); no_info_string: + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + int i; + + for (i = 0; i < adapter->num_vfs; i++) + txgbe_vf_configuration(pdev, (i | 0x10000000)); + } /* firmware requires blank driver version */ TCALL(hw, mac.ops.set_fw_drv_ver, 0xFF, 0xFF, 0xFF, 0xFF); @@ -6574,6 +6808,10 @@ static void txgbe_remove(struct pci_dev *pdev) adapter->netdev_registered = false; } +#ifdef CONFIG_PCI_IOV + txgbe_disable_sriov(adapter); +#endif + txgbe_clear_interrupt_scheme(adapter); txgbe_release_hw_control(adapter); @@ -6613,6 +6851,7 @@ static struct pci_driver txgbe_driver = { .resume = txgbe_resume, #endif .shutdown = txgbe_shutdown, + .sriov_configure = txgbe_pci_sriov_configure, }; /** diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.c new file mode 100644 index 000000000000..7d13dbd32322 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.c @@ -0,0 +1,676 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include "txgbe_type.h" +#include "txgbe.h" +#include "txgbe_mbx.h" + +/** + * txgbe_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +int txgbe_read_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int err = TXGBE_ERR_MBX; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + err = TCALL(hw, mbx.ops.read, msg, size, mbx_id); + + return err; +} + +/** + * txgbe_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +int txgbe_write_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int err = 0; + + if (size > mbx->size) { + err = TXGBE_ERR_MBX; + ERROR_REPORT2(hw, TXGBE_ERROR_ARGUMENT, + "Invalid mailbox message size %d", size); + } else { + err = TCALL(hw, mbx.ops.write, msg, size, mbx_id); + } + + return err; +} + +/** + * txgbe_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int txgbe_check_for_msg(struct txgbe_hw *hw, u16 mbx_id) +{ + int err = TXGBE_ERR_MBX; + + err = TCALL(hw, mbx.ops.check_for_msg, mbx_id); + + return err; +} + +/** + * txgbe_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int txgbe_check_for_ack(struct txgbe_hw *hw, u16 mbx_id) +{ + int err = TXGBE_ERR_MBX; + + err = TCALL(hw, mbx.ops.check_for_ack, mbx_id); + + return err; +} + +/** + * txgbe_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int txgbe_check_for_rst(struct txgbe_hw *hw, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int err = TXGBE_ERR_MBX; + + if (mbx->ops.check_for_rst) + err = mbx->ops.check_for_rst(hw, mbx_id); + + return err; +} + +/** + * txgbe_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +int txgbe_poll_for_msg(struct txgbe_hw *hw, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && TCALL(hw, mbx.ops.check_for_msg, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->udelay); + } + + if (countdown == 0) + ERROR_REPORT2(hw, TXGBE_ERROR_POLLING, + "Polling for VF%d mailbox message timedout", mbx_id); + +out: + return countdown ? 0 : TXGBE_ERR_MBX; +} + +/** + * txgbe_poll_for_ack - Wait for message acknowledgment + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgment + **/ +int txgbe_poll_for_ack(struct txgbe_hw *hw, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && TCALL(hw, mbx.ops.check_for_ack, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->udelay); + } + + if (countdown == 0) + ERROR_REPORT2(hw, TXGBE_ERROR_POLLING, + "Polling for VF%d mailbox ack timedout", mbx_id); + +out: + return countdown ? 0 : TXGBE_ERR_MBX; +} + +/** + * txgbe_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +int txgbe_read_posted_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int err = TXGBE_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + err = txgbe_poll_for_msg(hw, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!err) + err = TCALL(hw, mbx.ops.read, msg, size, mbx_id); +out: + return err; +} + +/** + * txgbe_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +int txgbe_write_posted_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int err; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->timeout) + return TXGBE_ERR_MBX; + + /* send msg */ + err = TCALL(hw, mbx.ops.write, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!err) + err = txgbe_poll_for_ack(hw, mbx_id); + + return err; +} + +/** + * txgbe_init_mbx_ops - Initialize MB function pointers + * @hw: pointer to the HW structure + * + * Setups up the mailbox read and write message function pointers + **/ +void txgbe_init_mbx_ops(struct txgbe_hw *hw) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + + mbx->ops.read_posted = txgbe_read_posted_mbx; + mbx->ops.write_posted = txgbe_write_posted_mbx; +} + +/** + * txgbe_read_v2p_mailbox - read v2p mailbox + * @hw: pointer to the HW structure + * + * This function is used to read the v2p mailbox without losing the read to + * clear status bits. + **/ +u32 txgbe_read_v2p_mailbox(struct txgbe_hw *hw) +{ + u32 v2p_mailbox = rd32(hw, TXGBE_VXMAILBOX); + + v2p_mailbox |= hw->mbx.v2p_mailbox; + /* read and clear mirrored mailbox flags */ + v2p_mailbox |= rd32a(hw, TXGBE_VXMBMEM, TXGBE_VXMAILBOX_SIZE); + wr32a(hw, TXGBE_VXMBMEM, TXGBE_VXMAILBOX_SIZE, 0); + hw->mbx.v2p_mailbox |= v2p_mailbox & TXGBE_VXMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +/** + * txgbe_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +int txgbe_check_for_bit_vf(struct txgbe_hw *hw, u32 mask) +{ + u32 mailbox = txgbe_read_v2p_mailbox(hw); + + hw->mbx.v2p_mailbox &= ~mask; + + return (mailbox & mask ? 0 : TXGBE_ERR_MBX); +} + +/** + * txgbe_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the Status bit or else ERR_MBX + **/ +int txgbe_check_for_msg_vf(struct txgbe_hw *hw, u16 __maybe_unused mbx_id) +{ + int err = TXGBE_ERR_MBX; + + /* read clear the pf sts bit */ + if (!txgbe_check_for_bit_vf(hw, TXGBE_VXMAILBOX_PFSTS)) { + err = 0; + hw->mbx.stats.reqs++; + } + + return err; +} + +/** + * txgbe_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX + **/ +int txgbe_check_for_ack_vf(struct txgbe_hw *hw, u16 __maybe_unused mbx_id) +{ + int err = TXGBE_ERR_MBX; + + /* read clear the pf ack bit */ + if (!txgbe_check_for_bit_vf(hw, TXGBE_VXMAILBOX_PFACK)) { + err = 0; + hw->mbx.stats.acks++; + } + + return err; +} + +/** + * txgbe_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns true if the PF has set the reset done bit or else false + **/ +int txgbe_check_for_rst_vf(struct txgbe_hw *hw, u16 __maybe_unused mbx_id) +{ + int err = TXGBE_ERR_MBX; + + if (!txgbe_check_for_bit_vf(hw, (TXGBE_VXMAILBOX_RSTD | + TXGBE_VXMAILBOX_RSTI))) { + err = 0; + hw->mbx.stats.rsts++; + } + + return err; +} + +/** + * txgbe_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return SUCCESS if we obtained the mailbox lock + **/ +int txgbe_obtain_mbx_lock_vf(struct txgbe_hw *hw) +{ + int err = TXGBE_ERR_MBX; + u32 mailbox; + + /* Take ownership of the buffer */ + wr32(hw, TXGBE_VXMAILBOX, TXGBE_VXMAILBOX_VFU); + + /* reserve mailbox for vf use */ + mailbox = txgbe_read_v2p_mailbox(hw); + if (mailbox & TXGBE_VXMAILBOX_VFU) + err = 0; + else + ERROR_REPORT2(hw, TXGBE_ERROR_POLLING, + "Failed to obtain mailbox lock for VF"); + + return err; +} + +/** + * txgbe_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +int txgbe_write_mbx_vf(struct txgbe_hw *hw, u32 *msg, u16 size, + u16 __maybe_unused mbx_id) +{ + int err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = txgbe_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + txgbe_check_for_msg_vf(hw, 0); + txgbe_check_for_ack_vf(hw, 0); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + wr32a(hw, TXGBE_VXMBMEM, i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + wr32(hw, TXGBE_VXMAILBOX, TXGBE_VXMAILBOX_REQ); + +out_no_write: + return err; +} + +/** + * txgbe_read_mbx_vf - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +int txgbe_read_mbx_vf(struct txgbe_hw *hw, u32 *msg, u16 size, + u16 __maybe_unused mbx_id) +{ + int err = 0; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = txgbe_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = rd32a(hw, TXGBE_VXMBMEM, i); + + /* Acknowledge receipt and release mailbox, then we're done */ + wr32(hw, TXGBE_VXMAILBOX, TXGBE_VXMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return err; +} + +/** + * txgbe_init_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for vf mailbox + */ +void txgbe_init_mbx_params_vf(struct txgbe_hw *hw) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to begin communications + */ + mbx->timeout = 0; + mbx->udelay = TXGBE_VF_MBX_INIT_DELAY; + + mbx->size = TXGBE_VXMAILBOX_SIZE; + + mbx->ops.read = txgbe_read_mbx_vf; + mbx->ops.write = txgbe_write_mbx_vf; + mbx->ops.read_posted = txgbe_read_posted_mbx; + mbx->ops.write_posted = txgbe_write_posted_mbx; + mbx->ops.check_for_msg = txgbe_check_for_msg_vf; + mbx->ops.check_for_ack = txgbe_check_for_ack_vf; + mbx->ops.check_for_rst = txgbe_check_for_rst_vf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} + +int txgbe_check_for_bit_pf(struct txgbe_hw *hw, u32 mask, int index) +{ + u32 mbvficr = rd32(hw, TXGBE_MBVFICR(index)); + int err = TXGBE_ERR_MBX; + + if (mbvficr & mask) { + err = 0; + wr32(hw, TXGBE_MBVFICR(index), mask); + } + + return err; +} + +/** + * txgbe_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +int txgbe_check_for_msg_pf(struct txgbe_hw *hw, u16 vf) +{ + int err = TXGBE_ERR_MBX; + int index = TXGBE_MBVFICR_INDEX(vf); + u32 vf_bit = vf % 16; + + if (!txgbe_check_for_bit_pf(hw, TXGBE_MBVFICR_VFREQ_VF1 << vf_bit, + index)) { + err = 0; + hw->mbx.stats.reqs++; + } + + return err; +} + +/** + * txgbe_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +int txgbe_check_for_ack_pf(struct txgbe_hw *hw, u16 vf) +{ + int err = TXGBE_ERR_MBX; + int index = TXGBE_MBVFICR_INDEX(vf); + u32 vf_bit = vf % 16; + + if (!txgbe_check_for_bit_pf(hw, TXGBE_MBVFICR_VFACK_VF1 << vf_bit, + index)) { + err = 0; + hw->mbx.stats.acks++; + } + + return err; +} + +/** + * txgbe_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +int txgbe_check_for_rst_pf(struct txgbe_hw *hw, u16 vf) +{ + u32 reg_offset = (vf < 32) ? 0 : 1; + u32 vf_shift = vf % 32; + u32 vflre = 0; + int err = TXGBE_ERR_MBX; + + vflre = rd32(hw, TXGBE_VFLRE(reg_offset)); + + if (vflre & (1 << vf_shift)) { + err = 0; + wr32(hw, TXGBE_VFLREC(reg_offset), (1 << vf_shift)); + hw->mbx.stats.rsts++; + } + + return err; +} + +/** + * txgbe_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +int txgbe_obtain_mbx_lock_pf(struct txgbe_hw *hw, u16 vf) +{ + int err = TXGBE_ERR_MBX; + u32 mailbox; + + /* Take ownership of the buffer */ + wr32(hw, TXGBE_PXMAILBOX(vf), TXGBE_PXMAILBOX_PFU); + + /* reserve mailbox for vf use */ + mailbox = rd32(hw, TXGBE_PXMAILBOX(vf)); + if (mailbox & TXGBE_PXMAILBOX_PFU) + err = 0; + else + ERROR_REPORT2(hw, TXGBE_ERROR_POLLING, + "Failed to obtain mailbox lock for PF%d", vf); + + return err; +} + +/** + * txgbe_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +int txgbe_write_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, + u16 vf) +{ + int err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = txgbe_obtain_mbx_lock_pf(hw, vf); + if (err) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + txgbe_check_for_msg_pf(hw, vf); + txgbe_check_for_ack_pf(hw, vf); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + wr32a(hw, TXGBE_PXMBMEM(vf), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + /* set mirrored mailbox flags */ + wr32a(hw, TXGBE_PXMBMEM(vf), TXGBE_VXMAILBOX_SIZE, TXGBE_PXMAILBOX_STS); + wr32(hw, TXGBE_PXMAILBOX(vf), TXGBE_PXMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return err; +} + +/** + * txgbe_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +int txgbe_read_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, u16 vf) +{ + int err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = txgbe_obtain_mbx_lock_pf(hw, vf); + if (err) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = rd32a(hw, TXGBE_PXMBMEM(vf), i); + + /* Acknowledge the message and release buffer */ + /* set mirrored mailbox flags */ + wr32a(hw, TXGBE_PXMBMEM(vf), TXGBE_VXMAILBOX_SIZE, TXGBE_PXMAILBOX_ACK); + wr32(hw, TXGBE_PXMAILBOX(vf), TXGBE_PXMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return err; +} + +/** + * txgbe_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +void txgbe_init_mbx_params_pf(struct txgbe_hw *hw) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + + mbx->timeout = 0; + mbx->udelay = 0; + + mbx->size = TXGBE_VXMAILBOX_SIZE; + + mbx->ops.read = txgbe_read_mbx_pf; + mbx->ops.write = txgbe_write_mbx_pf; + mbx->ops.read_posted = txgbe_read_posted_mbx; + mbx->ops.write_posted = txgbe_write_posted_mbx; + mbx->ops.check_for_msg = txgbe_check_for_msg_pf; + mbx->ops.check_for_ack = txgbe_check_for_ack_pf; + mbx->ops.check_for_rst = txgbe_check_for_rst_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.h new file mode 100644 index 000000000000..47ff20284eda --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_mbx.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_MBX_H_ +#define _TXGBE_MBX_H_ + +#define TXGBE_VXMAILBOX_SIZE (16 - 1) + +#define TXGBE_VXMAILBOX 0x00600 +#define TXGBE_VXMAILBOX_REQ ((0x1) << 0) /* Request for PF Ready bit */ +#define TXGBE_VXMAILBOX_ACK ((0x1) << 1) /* Ack PF message received */ +#define TXGBE_VXMAILBOX_VFU ((0x1) << 2) /* VF owns the mailbox buffer */ +#define TXGBE_VXMAILBOX_PFU ((0x1) << 3) /* PF owns the mailbox buffer */ +#define TXGBE_VXMAILBOX_PFSTS ((0x1) << 4) /* PF wrote a message in the MB */ +#define TXGBE_VXMAILBOX_PFACK ((0x1) << 5) /* PF ack the previous VF msg */ +#define TXGBE_VXMAILBOX_RSTI ((0x1) << 6) /* PF has reset indication */ +#define TXGBE_VXMAILBOX_RSTD ((0x1) << 7) /* PF has indicated reset done */ +#define TXGBE_VXMAILBOX_R2C_BITS (TXGBE_VXMAILBOX_RSTD | \ + TXGBE_VXMAILBOX_PFSTS | TXGBE_VXMAILBOX_PFACK) + +#define TXGBE_VXMBMEM 0x00C00 /* 16*4B */ + +#define TXGBE_PXMAILBOX(i) (0x00600 + (4 * (i))) /* i=[0,63] */ +#define TXGBE_PXMAILBOX_STS ((0x1) << 0) /* Initiate message send to VF */ +#define TXGBE_PXMAILBOX_ACK ((0x1) << 1) /* Ack message recv'd from VF */ +#define TXGBE_PXMAILBOX_VFU ((0x1) << 2) /* VF owns the mailbox buffer */ +#define TXGBE_PXMAILBOX_PFU ((0x1) << 3) /* PF owns the mailbox buffer */ +#define TXGBE_PXMAILBOX_RVFU ((0x1) << 4) /* Reset VFU - used when VF stuck*/ + +#define TXGBE_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,63] */ + +#define TXGBE_VFLRP(i) (0x00490 + (4 * (i))) /* i=[0,1] */ +#define TXGBE_VFLRE(i) (0x004A0 + (4 * (i))) /* i=[0,1] */ +#define TXGBE_VFLREC(i) (0x004A8 + (4 * (i))) /* i=[0,1] */ + +/* SR-IOV specific macros */ +#define TXGBE_MBVFICR(i) (0x00480 + (4 * (i))) /* i=[0,3] */ +#define TXGBE_MBVFICR_INDEX(vf) ((vf) >> 4) +#define TXGBE_MBVFICR_VFREQ_MASK (0x0000FFFF) /* bits for VF messages */ +#define TXGBE_MBVFICR_VFREQ_VF1 (0x00000001) /* bit for VF 1 message */ +#define TXGBE_MBVFICR_VFACK_MASK (0xFFFF0000) /* bits for VF acks */ +#define TXGBE_MBVFICR_VFACK_VF1 (0x00010000) /* bit for VF 1 ack */ + +#define TXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with this are the ACK */ +#define TXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with this are the NACK */ +#define TXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still clear to send requests */ +#define TXGBE_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for extra info for certain messages */ +#define TXGBE_VT_MSGINFO_MASK (0xFF << TXGBE_VT_MSGINFO_SHIFT) + +enum txgbe_pfvf_api_rev { + txgbe_mbox_api_null, + txgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ + txgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + txgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + txgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ + txgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ + txgbe_mbox_api_unknown, /* indicates that API version is not known */ +}; + +/* mailbox API, legacy requests */ +#define TXGBE_VF_RESET 0x01 /* VF requests reset */ +#define TXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define TXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define TXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ + +/* mailbox API, version 1.0 VF requests */ +#define TXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define TXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ +#define TXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ + +/* mailbox API, version 1.1 VF requests */ +#define TXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ + +/* mailbox API, version 1.2 VF requests */ +#define TXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define TXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ +#define TXGBE_VF_UPDATE_XCAST_MODE 0x0c +#define TXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */ +#define TXGBE_VF_BACKUP 0x8001 /* VF requests backup */ + +/* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */ +enum txgbevf_xcast_modes { + TXGBEVF_XCAST_MODE_NONE = 0, + TXGBEVF_XCAST_MODE_MULTI, + TXGBEVF_XCAST_MODE_ALLMULTI, + TXGBEVF_XCAST_MODE_PROMISC, +}; + +/* GET_QUEUES return data indices within the mailbox */ +#define TXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define TXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define TXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define TXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ + +/* length of permanent address message returned from PF */ +#define TXGBE_VF_PERMADDR_MSG_LEN 4 +/* word in permanent address message with the current multicast type */ +#define TXGBE_VF_MC_TYPE_WORD 3 + +#define TXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ + +/* mailbox API, version 2.0 VF requests */ +#define TXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ +#define TXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ +#define TXGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */ +#define TXGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */ +#define TXGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */ +#define TXGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */ +#define TXGBE_VF_GET_MTU 0x0E /* get bounds on MTU */ +#define TXGBE_VF_SET_MTU 0x0F /* set a specific MTU */ + +/* mailbox API, version 2.0 PF requests */ +#define TXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */ + +#define TXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define TXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +int txgbe_read_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); +int txgbe_write_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); +int txgbe_read_posted_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); +int txgbe_write_posted_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); +int txgbe_check_for_msg(struct txgbe_hw *hw, u16 mbx_id); +int txgbe_check_for_ack(struct txgbe_hw *hw, u16 mbx_id); +int txgbe_check_for_rst(struct txgbe_hw *hw, u16 mbx_id); +void txgbe_init_mbx_ops(struct txgbe_hw *hw); +void txgbe_init_mbx_params_vf(struct txgbe_hw *hw); +void txgbe_init_mbx_params_pf(struct txgbe_hw *hw); + +#endif /* _TXGBE_MBX_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c new file mode 100644 index 000000000000..053fa8bab40f --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.c @@ -0,0 +1,1669 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "txgbe.h" +#include "txgbe_type.h" +#include "txgbe_sriov.h" + +static void txgbe_set_vf_rx_tx(struct txgbe_adapter *adapter, int vf); + +#ifdef CONFIG_PCI_IOV +static int __txgbe_enable_sriov(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int num_vf_macvlans, i; + struct vf_macvlans *mv_list; + u32 value = 0; + + adapter->flags |= TXGBE_FLAG_SRIOV_ENABLED; + e_dev_info("SR-IOV enabled with %d VFs\n", adapter->num_vfs); + + if (adapter->num_vfs != 1) { + if (adapter->ring_feature[RING_F_RSS].indices == 4) + value = TXGBE_CFG_PORT_CTL_NUM_VT_32; + else /* adapter->ring_feature[RING_F_RSS].indices <= 2 */ + value = TXGBE_CFG_PORT_CTL_NUM_VT_64; + } + wr32m(hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_NUM_VT_MASK, + value); + + /* Enable VMDq flag so device will be set in VM mode */ + adapter->flags |= TXGBE_FLAG_VMDQ_ENABLED; + if (!adapter->ring_feature[RING_F_VMDQ].limit) + adapter->ring_feature[RING_F_VMDQ].limit = 1; + adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs; + + num_vf_macvlans = hw->mac.num_rar_entries - + (TXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); + + mv_list = kcalloc(num_vf_macvlans, + sizeof(struct vf_macvlans), + GFP_KERNEL); + adapter->mv_list = mv_list; + if (mv_list) { + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&adapter->vf_mvs.l); + for (i = 0; i < num_vf_macvlans; i++) { + mv_list->vf = -1; + mv_list->free = true; + list_add(&mv_list->l, &adapter->vf_mvs.l); + mv_list++; + } + } + + /* Initialize default switching mode VEB */ + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_SW_EN, TXGBE_PSR_CTL_SW_EN); + + /* If call to enable VFs succeeded then allocate memory + * for per VF control structures. + */ + adapter->vfinfo = kcalloc(adapter->num_vfs, + sizeof(struct vf_data_storage), GFP_KERNEL); + if (!adapter->vfinfo) + return -ENOMEM; + + /* enable L2 switch and replication */ + adapter->flags |= TXGBE_FLAG_SRIOV_L2SWITCH_ENABLE | + TXGBE_FLAG_SRIOV_REPLICATION_ENABLE; + + /* Disable RSC when in SR-IOV mode */ + adapter->flags2 &= ~(TXGBE_FLAG2_RSC_CAPABLE | + TXGBE_FLAG2_RSC_ENABLED); + + /* enable spoof checking for all VFs */ + for (i = 0; i < adapter->num_vfs; i++) { + /* enable spoof checking for all VFs */ + adapter->vfinfo[i].spoofchk_enabled = true; + adapter->vfinfo[i].link_enable = true; + + /* Untrust all VFs */ + adapter->vfinfo[i].trusted = false; + + /* set the default xcast mode */ + adapter->vfinfo[i].xcast_mode = TXGBEVF_XCAST_MODE_NONE; + } + + return 0; +} + +#define TXGBE_BA4_ADDR(vfinfo, reg) \ + ((u8 __iomem *)((u8 *)(vfinfo)->b4_addr + (reg))) +static int txgbe_vf_backup(struct txgbe_adapter *adapter, u16 vf) +{ + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + + if (!vfinfo->b4_addr) + return -1; + + return 0; +} + +static int txgbe_vf_restore(struct txgbe_adapter *adapter, u16 vf) +{ + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + + if (!vfinfo->b4_addr) + return -1; + + return 0; +} + +/** + * txgbe_get_vfs - Find and take references to all vf devices + * @adapter: Pointer to adapter struct + */ +static void txgbe_get_vfs(struct txgbe_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + u16 vendor = pdev->vendor; + struct pci_dev *vfdev; + int vf = 0; + u16 vf_id; + int pos; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return; + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); + + vfdev = pci_get_device(vendor, vf_id, NULL); + + for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) { + struct vf_data_storage *vfinfo; + + if (!vfdev->is_virtfn) + continue; + if (vfdev->physfn != pdev) + continue; + if (vf >= adapter->num_vfs) + continue; + + /*pci_dev_get(vfdev);*/ + vfinfo = &adapter->vfinfo[vf]; + vfinfo->vfdev = vfdev; + vfinfo->b4_addr = ioremap(pci_resource_start(vfdev, 4), 64); +#ifdef CONFIG_PCI_IOV + txgbe_vf_backup(adapter, vf); +#endif + ++vf; + } +} + +/** + * txgbe_pet_vfs - Release references to all vf devices + * @adapter: Pointer to adapter struct + */ +static void txgbe_put_vfs(struct txgbe_adapter *adapter) +{ + unsigned int num_vfs = adapter->num_vfs, vf; + + /* put the reference to all of the vf devices */ + for (vf = 0; vf < num_vfs; ++vf) { + struct vf_data_storage *vfinfo; + struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; + + if (!vfdev) + continue; + +#ifdef CONFIG_PCI_IOV + txgbe_vf_restore(adapter, vf); +#endif + + vfinfo = &adapter->vfinfo[vf]; + iounmap(vfinfo->b4_addr); + vfinfo->b4_addr = NULL; + vfinfo->vfdev = NULL; + /*pci_dev_put(vfdev);*/ + } +} + +/* Note this function is called when the user wants to enable SR-IOV + * VFs using the now deprecated module parameter + */ +void txgbe_enable_sriov(struct txgbe_adapter *adapter) +{ + int pre_existing_vfs = 0; + + pre_existing_vfs = pci_num_vf(adapter->pdev); + if (!pre_existing_vfs && !adapter->num_vfs) + return; + + /* If there are pre-existing VFs then we have to force + * use of that many - over ride any module parameter value. + * This may result from the user unloading the PF driver + * while VFs were assigned to guest VMs or because the VFs + * have been created via the new PCI SR-IOV sysfs interface. + */ + if (pre_existing_vfs) { + adapter->num_vfs = pre_existing_vfs; + dev_warn(&adapter->pdev->dev, + "Virtual Functions already enabled for this device\n"); + } else { + int err; + int match; + /* The sapphire supports up to 64 VFs per physical function + * but this implementation limits allocation to 63 so that + * basic networking resources are still available to the + * physical function. If the user requests greater thn + * 63 VFs then it is an error - reset to default of zero. + */ + match = min_t(unsigned int, adapter->vf_mode, TXGBE_MAX_VFS_DRV_LIMIT); + adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, match); + + err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + if (err) { + e_err(probe, "Failed to enable PCI sriov: %d\n", err); + adapter->num_vfs = 0; + return; + } + } + + if (!__txgbe_enable_sriov(adapter)) { + txgbe_get_vfs(adapter); + return; + } + + /* If we have gotten to this point then there is no memory available + * to manage the VF devices - print message and bail. + */ + e_err(probe, "Unable to allocate memory for VF Data Storage SRIOV disabled\n"); + txgbe_disable_sriov(adapter); +} +#endif /* CONFIG_PCI_IOV */ + +int txgbe_disable_sriov(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + +#ifdef CONFIG_PCI_IOV + /* If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(adapter->pdev)) { + e_dev_warn("Unloading driver while VFs are assigned VFs will not be deallocated\n"); + return -EPERM; + } + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); +#endif + + /* set num VFs to 0 to prevent access to vfinfo */ + adapter->num_vfs = 0; + + /* put the reference to all of the vf devices */ +#ifdef CONFIG_PCI_IOV + txgbe_put_vfs(adapter); +#endif + + /* free VF control structures */ + kfree(adapter->vfinfo); + adapter->vfinfo = NULL; + + /* free macvlan list */ + kfree(adapter->mv_list); + adapter->mv_list = NULL; + + /* if SR-IOV is already disabled then there is nothing to do */ + if (!(adapter->flags & TXGBE_FLAG_SRIOV_ENABLED)) + return 0; + + /* set default pool back to 0 */ + wr32m(hw, TXGBE_PSR_VM_CTL, + TXGBE_PSR_VM_CTL_POOL_MASK, 0); + TXGBE_WRITE_FLUSH(hw); + + adapter->ring_feature[RING_F_VMDQ].offset = 0; + + /* take a breather then clean up driver data */ + msleep(100); + + adapter->flags &= ~(TXGBE_FLAG_SRIOV_ENABLED | + TXGBE_FLAG_SRIOV_L2SWITCH_ENABLE | + TXGBE_FLAG_SRIOV_REPLICATION_ENABLE); + + /* Disable VMDq flag so device will be set in VM mode */ + if (adapter->ring_feature[RING_F_VMDQ].limit == 1) + adapter->flags &= ~TXGBE_FLAG_VMDQ_ENABLED; + + return 0; +} + +static int txgbe_set_vf_multicasts(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u16 entries = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) + >> TXGBE_VT_MSGINFO_SHIFT; + u16 *hash_list = (u16 *)&msgbuf[1]; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + struct txgbe_hw *hw = &adapter->hw; + int i; + u32 vector_bit; + u32 vector_reg; + u32 mta_reg; + u32 vmolr = rd32(hw, TXGBE_PSR_VM_L2CTL(vf)); + + /* only so many hash values supported */ + entries = min_t(u16, entries, (u16)TXGBE_MAX_VF_MC_ENTRIES); + + /* salt away the number of multi cast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vfinfo->num_vf_mc_hashes = entries; + + /* VFs are limited to using the MTA hash table for their multicast + * addresses + */ + for (i = 0; i < entries; i++) + vfinfo->vf_mc_hashes[i] = hash_list[i]; + + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; + /* errata 5: maintain a copy of the register table conf */ + mta_reg = hw->mac.mta_shadow[vector_reg]; + mta_reg |= (1 << vector_bit); + hw->mac.mta_shadow[vector_reg] = mta_reg; + wr32(hw, TXGBE_PSR_MC_TBL(vector_reg), mta_reg); + } + vmolr |= TXGBE_PSR_VM_L2CTL_ROMPE; + wr32(hw, TXGBE_PSR_VM_L2CTL(vf), vmolr); + + return 0; +} + +void txgbe_restore_vf_multicasts(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo; + u32 i, j; + u32 vector_bit; + u32 vector_reg; + + for (i = 0; i < adapter->num_vfs; i++) { + u32 vmolr = rd32(hw, TXGBE_PSR_VM_L2CTL(i)); + + vfinfo = &adapter->vfinfo[i]; + for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { + hw->addr_ctrl.mta_in_use++; + vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; + wr32m(hw, TXGBE_PSR_MC_TBL(vector_reg), + 1 << vector_bit, 1 << vector_bit); + /* errata 5: maintain a copy of the reg table conf */ + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); + } + if (vfinfo->num_vf_mc_hashes) + vmolr |= TXGBE_PSR_VM_L2CTL_ROMPE; + else + vmolr &= ~TXGBE_PSR_VM_L2CTL_ROMPE; + wr32(hw, TXGBE_PSR_VM_L2CTL(i), vmolr); + } + + /* Restore any VF macvlans */ + txgbe_full_sync_mac_table(adapter); +} + +int txgbe_set_vf_vlan(struct txgbe_adapter *adapter, int add, int vid, u16 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + + return TCALL(hw, mac.ops.set_vfta, vid, vf, (bool)add); +} + +static int txgbe_set_vf_lpe(struct txgbe_adapter *adapter, u32 max_frame, + u32 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 max_frs, reg_val; + + /* For sapphire we have to keep all PFs and VFs operating with + * the same max_frame value in order to avoid sending an oversize + * frame to a VF. In order to guarantee this is handled correctly + * for all cases we have several special exceptions to take into + * account before we can enable the VF for receive + */ + struct net_device *dev = adapter->netdev; + int pf_max_frame = dev->mtu + ETH_HLEN; + u32 reg_offset, vf_shift, vfre; + s32 err = 0; + +#if IS_ENABLED(CONFIG_FCOE) + if (dev->features & NETIF_F_FCOE_MTU) + pf_max_frame = max_t(int, pf_max_frame, + TXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif /* CONFIG_FCOE */ + + switch (adapter->vfinfo[vf].vf_api) { + case txgbe_mbox_api_11: + case txgbe_mbox_api_12: + case txgbe_mbox_api_13: + /* Version 1.1 supports jumbo frames on VFs if PF has + * jumbo frames enabled which means legacy VFs are + * disabled + */ + if (pf_max_frame > ETH_FRAME_LEN) + break; + fallthrough; + default: + /* If the PF or VF are running w/ jumbo frames enabled + * we need to shut down the VF Rx path as we cannot + * support jumbo frames on legacy VFs + */ + if (pf_max_frame > ETH_FRAME_LEN || + max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) + err = -EINVAL; + break; + } + + /* determine VF receive enable location */ + vf_shift = vf % 32; + reg_offset = vf / 32; + + /* enable or disable receive depending on error */ + vfre = rd32(hw, TXGBE_RDM_VF_RE(reg_offset)); + if (err) + vfre &= ~(1 << vf_shift); + else + vfre |= 1 << vf_shift; + wr32(hw, TXGBE_RDM_VF_RE(reg_offset), vfre); + + if (err) { + e_err(drv, "VF max_frame %d out of range\n", max_frame); + return err; + } + + /* pull current max frame size from hardware */ + max_frs = DIV_ROUND_UP(max_frame, 1024); + reg_val = rd32(hw, TXGBE_MAC_WDG_TIMEOUT) & + TXGBE_MAC_WDG_TIMEOUT_WTO_MASK; + if (max_frs > (reg_val + TXGBE_MAC_WDG_TIMEOUT_WTO_DELTA)) { + wr32(hw, TXGBE_MAC_WDG_TIMEOUT, + max_frs - TXGBE_MAC_WDG_TIMEOUT_WTO_DELTA); + } + + e_info(hw, "VF requests change max MTU to %d\n", max_frame); + + return 0; +} + +void txgbe_set_vmolr(struct txgbe_hw *hw, u16 vf, bool aupe) +{ + u32 vmolr = rd32(hw, TXGBE_PSR_VM_L2CTL(vf)); + + vmolr |= TXGBE_PSR_VM_L2CTL_BAM; + if (aupe) + vmolr |= TXGBE_PSR_VM_L2CTL_AUPE; + else + vmolr &= ~TXGBE_PSR_VM_L2CTL_AUPE; + wr32(hw, TXGBE_PSR_VM_L2CTL(vf), vmolr); +} + +static void txgbe_set_vmvir(struct txgbe_adapter *adapter, + u16 vid, u16 qos, u16 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | + TXGBE_TDM_VLAN_INS_VLANA_DEFAULT; + + wr32(hw, TXGBE_TDM_VLAN_INS(vf), vmvir); +} + +static void txgbe_clear_vmvir(struct txgbe_adapter *adapter, u32 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + + wr32(hw, TXGBE_TDM_VLAN_INS(vf), 0); +} + +static inline void txgbe_vf_reset_event(struct txgbe_adapter *adapter, u16 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + u8 num_tcs = netdev_get_num_tc(adapter->netdev); + + /* add PF assigned VLAN or VLAN 0 */ + txgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); + + /* reset offloads to defaults */ + txgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); + + /* set outgoing tags for VFs */ + if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { + txgbe_clear_vmvir(adapter, vf); + } else { + if (vfinfo->pf_qos || !num_tcs) + txgbe_set_vmvir(adapter, vfinfo->pf_vlan, + vfinfo->pf_qos, vf); + else + txgbe_set_vmvir(adapter, vfinfo->pf_vlan, + adapter->default_up, vf); + + if (vfinfo->spoofchk_enabled) + TCALL(hw, mac.ops.set_vlan_anti_spoofing, true, vf); + } + + /* reset multicast table array for vf */ + adapter->vfinfo[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + txgbe_set_rx_mode(adapter->netdev); + + txgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + + /* reset VF api back to unknown */ + adapter->vfinfo[vf].vf_api = txgbe_mbox_api_10; +} + +int txgbe_set_vf_mac(struct txgbe_adapter *adapter, + u16 vf, unsigned char *mac_addr) +{ + s32 retval = 0; + + txgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + retval = txgbe_add_mac_filter(adapter, mac_addr, vf); + if (retval >= 0) + memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, + ETH_ALEN); + else + memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN); + + return retval; +} + +static int txgbe_negotiate_vf_api(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + int api = msgbuf[1]; + + switch (api) { + case txgbe_mbox_api_10: + case txgbe_mbox_api_11: + case txgbe_mbox_api_12: + case txgbe_mbox_api_13: + adapter->vfinfo[vf].vf_api = api; + return 0; + default: + break; + } + + e_info(drv, "VF %d requested invalid api version %u\n", vf, api); + + return -1; +} + +static int txgbe_get_vf_queues(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct net_device *dev = adapter->netdev; + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + unsigned int default_tc = 0; + u8 num_tcs = netdev_get_num_tc(dev); + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case txgbe_mbox_api_20: + case txgbe_mbox_api_13: + case txgbe_mbox_api_12: + case txgbe_mbox_api_11: + break; + default: + return -1; + } + + /* only allow 1 Tx queue for bandwidth limiting */ + msgbuf[TXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); + msgbuf[TXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); + + /* notify VF of need for VLAN tag stripping, and correct queue */ + if (num_tcs) + msgbuf[TXGBE_VF_TRANS_VLAN] = num_tcs; + else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos) + msgbuf[TXGBE_VF_TRANS_VLAN] = 1; + else + msgbuf[TXGBE_VF_TRANS_VLAN] = 0; + + /* notify VF of default queue */ + if (adapter->vf_mode == 63) + msgbuf[TXGBE_VF_DEF_QUEUE] = default_tc; + else if (adapter->vf_mode == 31) + msgbuf[TXGBE_VF_DEF_QUEUE] = 4; + else + msgbuf[TXGBE_VF_DEF_QUEUE] = default_tc; + + return 0; +} + +static int txgbe_set_vf_macvlan(struct txgbe_adapter *adapter, + u16 vf, int index, unsigned char *mac_addr) +{ + struct list_head *pos; + struct vf_macvlans *entry; + s32 retval = 0; + + if (index <= 1) { + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->vf == vf) { + entry->vf = -1; + entry->free = true; + entry->is_macvlan = false; + txgbe_del_mac_filter(adapter, + entry->vf_macvlan, vf); + } + } + } + + /* If index was zero then we were asked to clear the uc list + * for the VF. We're done. + */ + if (!index) + return 0; + + entry = NULL; + + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->free) + break; + } + + /* If we traversed the entire list and didn't find a free entry + * then we're out of space on the RAR table. Also entry may + * be NULL because the original memory allocation for the list + * failed, which is not fatal but does mean we can't support + * VF requests for MACVLAN because we couldn't allocate + * memory for the list management required. + */ + if (!entry || !entry->free) + return -ENOSPC; + + retval = txgbe_add_mac_filter(adapter, mac_addr, vf); + if (retval >= 0) { + entry->free = false; + entry->is_macvlan = true; + entry->vf = vf; + memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); + } + + return retval; +} + +#ifdef CONFIG_PCI_IOV +int txgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) +{ + unsigned char vf_mac_addr[6]; + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + unsigned int vfn = (event_mask & 0x3f); + bool enable = ((event_mask & 0x10000000U) != 0); + + if (enable) { + memset(vf_mac_addr, 0, ETH_ALEN); + memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); + } + + return 0; +} +#endif /* CONFIG_PCI_IOV */ + +static inline void txgbe_write_qde(struct txgbe_adapter *adapter, u32 vf, + u32 qde) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + u32 reg = 0; + u32 i = vf * q_per_pool; + u32 n = i / 32; + + reg = rd32(hw, TXGBE_RDM_PF_QDE(n)); + for (i = (vf * q_per_pool - n * 32); + i < ((vf + 1) * q_per_pool - n * 32); + i++) { + if (qde == 1) + reg |= qde << i; + else + reg &= qde << i; + } + + wr32(hw, TXGBE_RDM_PF_QDE(n), reg); +} + +static inline void txgbe_write_hide_vlan(struct txgbe_adapter *adapter, u32 vf, + u32 hide_vlan) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + u32 reg = 0; + u32 i = vf * q_per_pool; + u32 n = i / 32; + + reg = rd32(hw, TXGBE_RDM_PF_HIDE(n)); + for (i = (vf * q_per_pool - n * 32); + i < ((vf + 1) * q_per_pool - n * 32); + i++) { + if (hide_vlan == 1) + reg |= hide_vlan << i; + else + reg &= hide_vlan << i; + } + + wr32(hw, TXGBE_RDM_PF_HIDE(n), reg); +} + +static int txgbe_vf_reset_msg(struct txgbe_adapter *adapter, u16 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; + u32 reg = 0; + u32 reg_offset, vf_shift; + u32 msgbuf[4] = {0, 0, 0, 0}; + u8 *addr = (u8 *)(&msgbuf[1]); + struct net_device *dev = adapter->netdev; + int pf_max_frame; + + e_info(probe, "VF Reset msg received from vf %d\n", vf); + +#ifdef CONFIG_PCI_IOV + txgbe_vf_restore(adapter, vf); +#endif + + /* reset the filters for the device */ + txgbe_vf_reset_event(adapter, vf); + + /* set vf mac address */ + if (!is_zero_ether_addr(vf_mac)) + txgbe_set_vf_mac(adapter, vf, vf_mac); + + vf_shift = vf % 32; + reg_offset = vf / 32; + + /* force drop enable for all VF Rx queues */ + txgbe_write_qde(adapter, vf, 1); + + /* set transmit and receive for vf */ + txgbe_set_vf_rx_tx(adapter, vf); + + pf_max_frame = dev->mtu + ETH_HLEN; + +#if IS_ENABLED(CONFIG_FCOE) + if (dev->features & NETIF_F_FCOE_MTU) + pf_max_frame = max_t(int, pf_max_frame, + TXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif /* CONFIG_FCOE */ + + if (pf_max_frame > ETH_FRAME_LEN) + reg = (1 << vf_shift); + wr32(hw, TXGBE_RDM_VFRE_CLR(reg_offset), reg); + + /* enable VF mailbox for further messages */ + adapter->vfinfo[vf].clear_to_send = true; + + /* reply to reset with ack and vf mac address */ + msgbuf[0] = TXGBE_VF_RESET; + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] |= TXGBE_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] |= TXGBE_VT_MSGTYPE_NACK; + dev_warn(pci_dev_to_dev(adapter->pdev), + "VF %d has no MAC address assigned, you may have to assign one manually\n", + vf); + } + + /* Piggyback the multicast filter type so VF can compute the + * correct vectors + */ + msgbuf[3] = hw->mac.mc_filter_type; + txgbe_write_mbx(hw, msgbuf, TXGBE_VF_PERMADDR_MSG_LEN, vf); + + return 0; +} + +static int txgbe_set_vf_mac_addr(struct txgbe_adapter *adapter, + u32 *msgbuf, u16 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + + if (adapter->vfinfo[vf].pf_set_mac && + memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac, + ETH_ALEN)) { + e_warn(drv, + "VF %d already has an administratively set MAC address\n", + vf); + + return -1; + } + + return txgbe_set_vf_mac(adapter, vf, new_mac) < 0; +} + +#ifdef CONFIG_PCI_IOV +static int txgbe_find_vlvf_entry(struct txgbe_hw *hw, u32 vlan) +{ + u32 vlvf; + s32 regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries */ + for (regindex = 1; regindex < TXGBE_PSR_VLAN_SWC_ENTRIES; regindex++) { + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, regindex); + vlvf = rd32(hw, TXGBE_PSR_VLAN_SWC); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + /* Return a negative value if not found */ + if (regindex >= TXGBE_PSR_VLAN_SWC_ENTRIES) + regindex = -1; + + return regindex; +} +#endif /* CONFIG_PCI_IOV */ + +static int txgbe_set_vf_vlan_msg(struct txgbe_adapter *adapter, + u32 *msgbuf, u16 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + int add = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) >> TXGBE_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & TXGBE_PSR_VLAN_SWC_VLANID_MASK); + int err; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (adapter->vfinfo[vf].pf_vlan || tcs) { + e_warn(drv, + "VF %d attempted to override administratively set VLAN configuration\n", + vf); + return 0; + } + + if (add) + adapter->vfinfo[vf].vlan_count++; + else if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; + + /* in case of promiscuous mode any VLAN filter set for a VF must + * also have the PF pool added to it. + */ + if (add && adapter->netdev->flags & IFF_PROMISC) + err = txgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + + err = txgbe_set_vf_vlan(adapter, add, vid, vf); + if (!err && adapter->vfinfo[vf].spoofchk_enabled) + TCALL(hw, mac.ops.set_vlan_anti_spoofing, true, vf); + +#ifdef CONFIG_PCI_IOV + /* Go through all the checks to see if the VLAN filter should + * be wiped completely. + */ + if (!add && adapter->netdev->flags & IFF_PROMISC) { + u32 bits, vlvf; + s32 reg_ndx; + + reg_ndx = txgbe_find_vlvf_entry(hw, vid); + if (reg_ndx < 0) + goto out; + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, reg_ndx); + vlvf = rd32(hw, TXGBE_PSR_VLAN_SWC); + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + if (VMDQ_P(0) < 32) { + bits = rd32(hw, TXGBE_PSR_VLAN_SWC_VM_L); + bits &= ~(1 << VMDQ_P(0)); + bits |= rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + } else { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + bits &= ~(1 << (VMDQ_P(0) - 32)); + bits |= rd32(hw, TXGBE_PSR_VLAN_SWC_VM_L); + } + + /* If the filter was removed then ensure PF pool bit + * is cleared if the PF only added itself to the pool + * because the PF is in promiscuous mode. + */ + if ((vlvf & VLAN_VID_MASK) == vid && !bits) + txgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + } + +out: +#endif + return err; +} + +static int txgbe_set_vf_macvlan_msg(struct txgbe_adapter *adapter, + u32 *msgbuf, u16 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + int index = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) >> + TXGBE_VT_MSGINFO_SHIFT; + int err; + + if (adapter->vfinfo[vf].pf_set_mac && index > 0) { + e_warn(drv, + "VF %d requested MACVLAN filter but is administratively denied\n", + vf); + return 0; + } + + /* An non-zero index indicates the VF is setting a filter */ + if (index) { + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } +#if defined(IFLA_VF_MAX) && defined(HAVE_VF_SPOOFCHK_CONFIGURE) + /* If the VF is allowed to set MAC filters then turn off + * anti-spoofing to avoid false positives. + */ + if (adapter->vfinfo[vf].spoofchk_enabled) + txgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false); +#endif /* defined(IFLA_VF_MAX) && defined(HAVE_VF_SPOOFCHK_CONFIGURE) */ + } + + err = txgbe_set_vf_macvlan(adapter, vf, index, new_mac); + if (err == -ENOSPC) + e_warn(drv, + "VF %d has requested a MACVLAN filter but there is no space for it\n", + vf); + + return err < 0; +} + +static int txgbe_update_vf_xcast_mode(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + int xcast_mode = msgbuf[1]; + u32 vmolr, disable, enable; + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case txgbe_mbox_api_12: + /* promisc introduced in 1.3 version */ + if (xcast_mode == TXGBEVF_XCAST_MODE_PROMISC) + return -EOPNOTSUPP; + fallthrough; + case txgbe_mbox_api_13: + break; + default: + return -EOPNOTSUPP; + } + if (adapter->vfinfo[vf].xcast_mode == xcast_mode) + goto out; + + switch (xcast_mode) { + case TXGBEVF_XCAST_MODE_NONE: + disable = TXGBE_PSR_VM_L2CTL_BAM | TXGBE_PSR_VM_L2CTL_ROMPE | + TXGBE_PSR_VM_L2CTL_MPE | TXGBE_PSR_VM_L2CTL_UPE | TXGBE_PSR_VM_L2CTL_VPE; + enable = 0; + break; + case TXGBEVF_XCAST_MODE_MULTI: + disable = TXGBE_PSR_VM_L2CTL_MPE | TXGBE_PSR_VM_L2CTL_UPE | TXGBE_PSR_VM_L2CTL_VPE; + enable = TXGBE_PSR_VM_L2CTL_BAM | TXGBE_PSR_VM_L2CTL_ROMPE; + break; + case TXGBEVF_XCAST_MODE_ALLMULTI: + disable = TXGBE_PSR_VM_L2CTL_UPE | TXGBE_PSR_VM_L2CTL_VPE; + enable = TXGBE_PSR_VM_L2CTL_BAM | TXGBE_PSR_VM_L2CTL_ROMPE | TXGBE_PSR_VM_L2CTL_MPE; + break; + case TXGBEVF_XCAST_MODE_PROMISC: + disable = 0; + enable = TXGBE_PSR_VM_L2CTL_BAM | TXGBE_PSR_VM_L2CTL_ROMPE | + TXGBE_PSR_VM_L2CTL_MPE | TXGBE_PSR_VM_L2CTL_UPE | TXGBE_PSR_VM_L2CTL_VPE; + break; + default: + return -EOPNOTSUPP; + } + + vmolr = rd32(hw, TXGBE_PSR_VM_L2CTL(vf)); + vmolr &= ~disable; + vmolr |= enable; + wr32(hw, TXGBE_PSR_VM_L2CTL(vf), vmolr); + + adapter->vfinfo[vf].xcast_mode = xcast_mode; + +out: + msgbuf[1] = xcast_mode; + + return 0; +} + +static int txgbe_get_vf_link_state(struct txgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u32 *link_state = &msgbuf[1]; + + /* verify the PF is supporting the correct API */ + switch (adapter->vfinfo[vf].vf_api) { + case txgbe_mbox_api_12: + case txgbe_mbox_api_13: + break; + default: + return -EOPNOTSUPP; + } + + *link_state = adapter->vfinfo[vf].link_enable; + + return 0; +} + +static int txgbe_rcv_msg_from_vf(struct txgbe_adapter *adapter, u16 vf) +{ + u16 mbx_size = TXGBE_VXMAILBOX_SIZE; + u32 msgbuf[TXGBE_VXMAILBOX_SIZE]; + struct txgbe_hw *hw = &adapter->hw; + s32 retval; + + retval = txgbe_read_mbx(hw, msgbuf, mbx_size, vf); + + if (retval) { + pr_err("Error receiving message from VF\n"); + return retval; + } + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (TXGBE_VT_MSGTYPE_ACK | TXGBE_VT_MSGTYPE_NACK)) + return retval; + + /* flush the ack before we write any messages back */ + TXGBE_WRITE_FLUSH(hw); + + if (msgbuf[0] == TXGBE_VF_RESET) + return txgbe_vf_reset_msg(adapter, vf); + + /* until the vf completes a virtual function reset it should not be + * allowed to start any configuration. + */ + + if (!adapter->vfinfo[vf].clear_to_send) { + msgbuf[0] |= TXGBE_VT_MSGTYPE_NACK; + txgbe_write_mbx(hw, msgbuf, 1, vf); + return retval; + } + + switch ((msgbuf[0] & 0xFFFF)) { + case TXGBE_VF_SET_MAC_ADDR: + retval = txgbe_set_vf_mac_addr(adapter, msgbuf, vf); + break; + case TXGBE_VF_SET_MULTICAST: + retval = txgbe_set_vf_multicasts(adapter, msgbuf, vf); + break; + case TXGBE_VF_SET_VLAN: + retval = txgbe_set_vf_vlan_msg(adapter, msgbuf, vf); + break; + case TXGBE_VF_SET_LPE: + if (msgbuf[1] > TXGBE_MAX_JUMBO_FRAME_SIZE) { + e_err(drv, "VF max_frame %d out of range\n", msgbuf[1]); + return -EINVAL; + } + retval = txgbe_set_vf_lpe(adapter, msgbuf[1], vf); + break; + case TXGBE_VF_SET_MACVLAN: + retval = txgbe_set_vf_macvlan_msg(adapter, msgbuf, vf); + break; + case TXGBE_VF_API_NEGOTIATE: + retval = txgbe_negotiate_vf_api(adapter, msgbuf, vf); + break; + case TXGBE_VF_GET_QUEUES: + retval = txgbe_get_vf_queues(adapter, msgbuf, vf); + break; + case TXGBE_VF_UPDATE_XCAST_MODE: + retval = txgbe_update_vf_xcast_mode(adapter, msgbuf, vf); + break; + case TXGBE_VF_GET_LINK_STATE: + retval = txgbe_get_vf_link_state(adapter, msgbuf, vf); + break; + case TXGBE_VF_BACKUP: +#ifdef CONFIG_PCI_IOV + retval = txgbe_vf_backup(adapter, vf); +#endif + break; + default: + e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); + retval = TXGBE_ERR_MBX; + break; + } + + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= TXGBE_VT_MSGTYPE_NACK; + else + msgbuf[0] |= TXGBE_VT_MSGTYPE_ACK; + + msgbuf[0] |= TXGBE_VT_MSGTYPE_CTS; + + txgbe_write_mbx(hw, msgbuf, mbx_size, vf); + + return retval; +} + +static void txgbe_rcv_ack_from_vf(struct txgbe_adapter *adapter, u16 vf) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 msg = TXGBE_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!adapter->vfinfo[vf].clear_to_send) + txgbe_write_mbx(hw, &msg, 1, vf); +} + +void txgbe_msg_task(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u16 vf; + + for (vf = 0; vf < adapter->num_vfs; vf++) { + /* process any reset requests */ + if (!txgbe_check_for_rst(hw, vf)) + txgbe_vf_reset_event(adapter, vf); + + /* process any messages pending */ + if (!txgbe_check_for_msg(hw, vf)) + txgbe_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ + if (!txgbe_check_for_ack(hw, vf)) + txgbe_rcv_ack_from_vf(adapter, vf); + } +} + +void txgbe_disable_tx_rx(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + /* disable transmit and receive for all vfs */ + wr32(hw, TXGBE_TDM_VF_TE(0), 0); + wr32(hw, TXGBE_TDM_VF_TE(1), 0); + + wr32(hw, TXGBE_RDM_VF_RE(0), 0); + wr32(hw, TXGBE_RDM_VF_RE(1), 0); +} + +static inline void txgbe_ping_vf(struct txgbe_adapter *adapter, int vf) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 ping; + + ping = TXGBE_PF_CONTROL_MSG; + if (adapter->vfinfo[vf].clear_to_send) + ping |= TXGBE_VT_MSGTYPE_CTS; + txgbe_write_mbx(hw, &ping, 1, vf); +} + +void txgbe_ping_all_vfs(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 ping; + u16 i; + + for (i = 0 ; i < adapter->num_vfs; i++) { + ping = TXGBE_PF_CONTROL_MSG; + if (adapter->vfinfo[i].clear_to_send) + ping |= TXGBE_VT_MSGTYPE_CTS; + txgbe_write_mbx(hw, &ping, 1, i); + } +} + +/** + * ixgbe_set_all_vfs - update vfs queues + * @adapter: Pointer to adapter struct + * + * Update setting transmit and receive queues for all vfs + **/ +void txgbe_set_all_vfs(struct txgbe_adapter *adapter) +{ + int i; + + for (i = 0 ; i < adapter->num_vfs; i++) { + txgbe_set_vf_link_state(adapter, i, + adapter->vfinfo[i].link_state); + } +} + +int txgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->num_vfs) + return -EINVAL; + + /* nothing to do */ + if (adapter->vfinfo[vf].trusted == setting) + return 0; + + adapter->vfinfo[vf].trusted = setting; + + /* reset VF to reconfigure features */ + adapter->vfinfo[vf].clear_to_send = false; + txgbe_ping_vf(adapter, vf); + + netif_info(adapter, drv, netdev, "VF %u is %strusted\n", vf, setting ? "" : "not "); + + return 0; +} + +static int txgbe_pci_sriov_enable(struct pci_dev __maybe_unused *dev, + int __maybe_unused num_vfs) +{ + int err = 0; +#ifdef CONFIG_PCI_IOV + struct txgbe_adapter *adapter = pci_get_drvdata(dev); + int i; + int pre_existing_vfs = pci_num_vf(dev); + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + err = txgbe_disable_sriov(adapter); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + goto out; + + if (err) + goto err_out; + + /* While the SR-IOV capability structure reports total VFs to be + * 64 we limit the actual number that can be allocated to 63 so + * that some transmit/receive resources can be reserved to the + * PF. The PCI bus driver already checks for other values out of + * range. + */ + if ((num_vfs + adapter->num_vmdqs) > TXGBE_MAX_VF_FUNCTIONS) { + err = -EPERM; + goto err_out; + } + + adapter->num_vfs = num_vfs; + + err = __txgbe_enable_sriov(adapter); + if (err) + goto err_out; + + for (i = 0; i < adapter->num_vfs; i++) + txgbe_vf_configuration(dev, (i | 0x10000000)); + + err = pci_enable_sriov(dev, num_vfs); + if (err) { + e_dev_warn("Failed to enable PCI sriov: %d\n", err); + goto err_out; + } + txgbe_get_vfs(adapter); + txgbe_sriov_reinit(adapter); + +out: + return num_vfs; + +err_out: +#endif + + return err; +} + +static int txgbe_pci_sriov_disable(struct pci_dev *dev) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(dev); + int err; +#ifdef CONFIG_PCI_IOV + u32 current_flags = adapter->flags; +#endif + + err = txgbe_disable_sriov(adapter); + + /* Only reinit if no error and state changed */ +#ifdef CONFIG_PCI_IOV + if (!err && current_flags != adapter->flags) + txgbe_sriov_reinit(adapter); +#endif + + return err; +} + +int txgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + if (num_vfs == 0) + return txgbe_pci_sriov_disable(dev); + else + return txgbe_pci_sriov_enable(dev, num_vfs); +} + +int txgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + s32 retval = 0; + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs) + return -EINVAL; + + dev_info(pci_dev_to_dev(adapter->pdev), + "setting MAC %pM on VF %d\n", mac, vf); + dev_info(pci_dev_to_dev(adapter->pdev), + "Reload the VF driver to make this change effective.\n"); + retval = txgbe_set_vf_mac(adapter, vf, mac); + if (retval >= 0) { + adapter->vfinfo[vf].pf_set_mac = true; + if (test_bit(__TXGBE_DOWN, &adapter->state)) { + dev_warn(pci_dev_to_dev(adapter->pdev), + "The VF MAC address has been set, but the PF device is not up.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), + "Bring the PF device up before attempting to use the VF device.\n"); + } + } else { + dev_warn(pci_dev_to_dev(adapter->pdev), + "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n"); + } + + return retval; +} + +static int txgbe_enable_port_vlan(struct txgbe_adapter *adapter, + int vf, u16 vlan, u8 qos) +{ + struct txgbe_hw *hw = &adapter->hw; + int err; + + err = txgbe_set_vf_vlan(adapter, true, vlan, vf); + if (err) + goto out; + txgbe_set_vmvir(adapter, vlan, qos, vf); + txgbe_set_vmolr(hw, vf, false); + if (adapter->vfinfo[vf].spoofchk_enabled) + TCALL(hw, mac.ops.set_vlan_anti_spoofing, true, vf); + adapter->vfinfo[vf].vlan_count++; + /* enable hide vlan */ + txgbe_write_qde(adapter, vf, 1); + txgbe_write_hide_vlan(adapter, vf, 1); + adapter->vfinfo[vf].pf_vlan = vlan; + adapter->vfinfo[vf].pf_qos = qos; + dev_info(pci_dev_to_dev(adapter->pdev), + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__TXGBE_DOWN, &adapter->state)) { + dev_warn(pci_dev_to_dev(adapter->pdev), + "The VF VLAN has been set, but the PF device is not up.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), + "Bring the PF device up before attempting to use the VF device.\n"); + } + +out: + return err; +} + +static int txgbe_disable_port_vlan(struct txgbe_adapter *adapter, int vf) +{ + struct txgbe_hw *hw = &adapter->hw; + int err; + + err = txgbe_set_vf_vlan(adapter, false, + adapter->vfinfo[vf].pf_vlan, vf); + txgbe_clear_vmvir(adapter, vf); + txgbe_set_vmolr(hw, vf, true); + TCALL(hw, mac.ops.set_vlan_anti_spoofing, false, vf); + if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; + /* disable hide vlan */ + txgbe_write_hide_vlan(adapter, vf, 0); + adapter->vfinfo[vf].pf_vlan = 0; + adapter->vfinfo[vf].pf_qos = 0; + + return err; +} + +int txgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, + u8 qos, __be16 vlan_proto) + +{ + int err = 0; + struct txgbe_adapter *adapter = netdev_priv(netdev); + + /* VLAN IDs accepted range 0-4094 */ + if (vf >= adapter->num_vfs || vlan > VLAN_VID_MASK - 1 || qos > 7) + return -EINVAL; + + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + if (vlan || qos) { + /* Check if there is already a port VLAN set, if so + * we have to delete the old one first before we + * can set the new one. The usage model had + * previously assumed the user would delete the + * old port VLAN before setting a new one but this + * is not necessarily the case. + */ + if (adapter->vfinfo[vf].pf_vlan) + err = txgbe_disable_port_vlan(adapter, vf); + if (err) + goto out; + err = txgbe_enable_port_vlan(adapter, vf, vlan, qos); + + } else { + err = txgbe_disable_port_vlan(adapter, vf); + } +out: + return err; +} + +static void txgbe_set_vf_rate_limit(struct txgbe_adapter *adapter, int vf) +{ + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct txgbe_hw *hw = &adapter->hw; + u32 bcnrc_val; + u16 queue, queues_per_pool; + u16 max_tx_rate = adapter->vfinfo[vf].max_tx_rate; + u16 min_tx_rate = adapter->vfinfo[vf].min_tx_rate; + + /* determine how many queues per pool based on VMDq mask */ + queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + + max_tx_rate /= queues_per_pool; + bcnrc_val = TXGBE_TDM_RP_RATE_MAX(max_tx_rate); + + min_tx_rate /= queues_per_pool; + bcnrc_val |= TXGBE_TDM_RP_RATE_MIN(min_tx_rate); + + /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported + * and 0x004 otherwise. + */ + wr32(hw, TXGBE_TDM_MMW, 0x14); + + /* write value for all Tx queues belonging to VF */ + for (queue = 0; queue < queues_per_pool; queue++) { + unsigned int reg_idx = (vf * queues_per_pool) + queue; + + wr32(hw, TXGBE_TDM_RP_IDX, reg_idx); + wr32(hw, TXGBE_TDM_RP_RATE, bcnrc_val); + if (max_tx_rate) + wr32m(hw, TXGBE_TDM_RP_CTL, + TXGBE_TDM_RP_CTL_RLEN, TXGBE_TDM_RP_CTL_RLEN); + else + wr32m(hw, TXGBE_TDM_RP_CTL, + TXGBE_TDM_RP_CTL_RLEN, 0); + } +} + +int txgbe_ndo_set_vf_bw(struct net_device *netdev, + int vf, + int min_tx_rate, + int max_tx_rate) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + /* verify VF is active */ + if (vf >= adapter->num_vfs) + return -EINVAL; + + /* verify link is up */ + if (!adapter->link_up) + return -EINVAL; + + /* verify we are linked at 1 or 10 Gbps */ + if (adapter->link_speed < TXGBE_LINK_SPEED_1GB_FULL) + return -EINVAL; + + /* store values */ + adapter->vfinfo[vf].min_tx_rate = min_tx_rate; + adapter->vfinfo[vf].max_tx_rate = max_tx_rate; + + /* update hardware configuration */ + txgbe_set_vf_rate_limit(adapter, vf); + + return 0; +} + +int txgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 regval; + + if (vf >= adapter->num_vfs) + return -EINVAL; + + adapter->vfinfo[vf].spoofchk_enabled = setting; + + if (vf < 32) { + regval = (setting << vf); + wr32m(hw, TXGBE_TDM_MAC_AS_L, + regval | (1 << vf), regval); + + if (adapter->vfinfo[vf].vlan_count) { + wr32m(hw, TXGBE_TDM_VLAN_AS_L, + regval | (1 << vf), regval); + } + } else { + regval = (setting << (vf - 32)); + wr32m(hw, TXGBE_TDM_MAC_AS_H, + regval | (1 << (vf - 32)), regval); + + if (adapter->vfinfo[vf].vlan_count) { + wr32m(hw, TXGBE_TDM_VLAN_AS_H, + regval | (1 << (vf - 32)), regval); + } + } + return 0; +} + +/** + * ixgbe_set_vf_rx_tx - Set VF rx tx + * @adapter: Pointer to adapter struct + * @vf: VF identifier + * + * Set or reset correct transmit and receive for vf + **/ +static void txgbe_set_vf_rx_tx(struct txgbe_adapter *adapter, int vf) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx; + u32 reg_offset, vf_shift; + + vf_shift = vf % 32; + reg_offset = vf / 32; + + reg_cur_tx = rd32(hw, TXGBE_TDM_VF_TE(reg_offset)); + reg_cur_rx = rd32(hw, TXGBE_RDM_VF_RE(reg_offset)); + + if (adapter->vfinfo[vf].link_enable) { + reg_req_tx = reg_cur_tx | 1 << vf_shift; + reg_req_rx = reg_cur_rx | 1 << vf_shift; + /* Enable particular VF */ + if (reg_cur_tx != reg_req_tx) + wr32(hw, TXGBE_TDM_VF_TE(reg_offset), reg_req_tx); + if (reg_cur_rx != reg_req_rx) + wr32(hw, TXGBE_RDM_VF_RE(reg_offset), reg_req_rx); + } else { + reg_req_tx = 1 << vf_shift; + reg_req_rx = 1 << vf_shift; + /* Disable particular VF */ + if (reg_cur_tx & reg_req_tx) + wr32(hw, TXGBE_TDM_VFTE_CLR(reg_offset), reg_req_tx); + if (reg_cur_rx & reg_req_rx) + wr32(hw, TXGBE_RDM_VFRE_CLR(reg_offset), reg_req_rx); + } +} + +/** + * txgbe_set_vf_link_state - Set link state + * @adapter: Pointer to adapter struct + * @vf: VF identifier + * @state: required link state + * + * Set a link force state on/off a single vf + **/ +void txgbe_set_vf_link_state(struct txgbe_adapter *adapter, int vf, int state) +{ + adapter->vfinfo[vf].link_state = state; + + switch (state) { + case IFLA_VF_LINK_STATE_AUTO: + if (test_bit(__TXGBE_DOWN, &adapter->state)) + adapter->vfinfo[vf].link_enable = false; + else + adapter->vfinfo[vf].link_enable = true; + break; + case IFLA_VF_LINK_STATE_ENABLE: + adapter->vfinfo[vf].link_enable = true; + break; + case IFLA_VF_LINK_STATE_DISABLE: + adapter->vfinfo[vf].link_enable = false; + break; + } + + txgbe_set_vf_rx_tx(adapter, vf); + + /* restart the VF */ + adapter->vfinfo[vf].clear_to_send = false; + txgbe_ping_vf(adapter, vf); +} + +/** + * txgbe_ndo_set_vf_link_state - Set link state + * @netdev: network interface device structure + * @vf: VF identifier + * @state: required link state + * + * Set the link state of a specified VF, regardless of physical link state + **/ +int txgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (vf < 0 || vf >= adapter->num_vfs) { + dev_err(pci_dev_to_dev(adapter->pdev), + "NDO set VF link - invalid VF identifier %d\n", vf); + ret = -EINVAL; + goto out; + } + + switch (state) { + case IFLA_VF_LINK_STATE_ENABLE: + dev_info(pci_dev_to_dev(adapter->pdev), + "NDO set VF %d link state %d - not supported\n", + vf, state); + break; + case IFLA_VF_LINK_STATE_DISABLE: + dev_info(pci_dev_to_dev(adapter->pdev), + "NDO set VF %d link state disable\n", vf); + txgbe_set_vf_link_state(adapter, vf, state); + break; + case IFLA_VF_LINK_STATE_AUTO: + dev_info(pci_dev_to_dev(adapter->pdev), + "NDO set VF %d link state auto\n", vf); + txgbe_set_vf_link_state(adapter, vf, state); + break; + default: + dev_err(pci_dev_to_dev(adapter->pdev), + "NDO set VF %d - invalid link state %d\n", vf, state); + ret = -EINVAL; + } +out: + return ret; +} + +int txgbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->num_vfs) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); + + ivi->max_tx_rate = adapter->vfinfo[vf].max_tx_rate; + ivi->min_tx_rate = adapter->vfinfo[vf].min_tx_rate; + + ivi->vlan = adapter->vfinfo[vf].pf_vlan; + ivi->qos = adapter->vfinfo[vf].pf_qos; + ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; + ivi->trusted = adapter->vfinfo[vf].trusted; + ivi->linkstate = adapter->vfinfo[vf].link_state; + + return 0; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.h new file mode 100644 index 000000000000..17d4205eada7 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_sriov.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_SRIOV_H_ +#define _TXGBE_SRIOV_H_ + +/* txgbe driver limit the max number of VFs could be enabled to + * 63 (TXGBE_MAX_VF_FUNCTIONS - 1) + */ +#define TXGBE_MAX_VFS_DRV_LIMIT (TXGBE_MAX_VF_FUNCTIONS - 1) + +void txgbe_restore_vf_multicasts(struct txgbe_adapter *adapter); +int txgbe_set_vf_vlan(struct txgbe_adapter *adapter, int add, int vid, u16 vf); +void txgbe_set_vmolr(struct txgbe_hw *hw, u16 vf, bool aupe); +void txgbe_msg_task(struct txgbe_adapter *adapter); +int txgbe_set_vf_mac(struct txgbe_adapter *adapter, + u16 vf, unsigned char *mac_addr); +void txgbe_disable_tx_rx(struct txgbe_adapter *adapter); +void txgbe_ping_all_vfs(struct txgbe_adapter *adapter); +void txgbe_set_all_vfs(struct txgbe_adapter *adapter); + +int txgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); +int txgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, + u8 qos, __be16 vlan_proto); + +int txgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, + int max_tx_rate); + +int txgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +int txgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state); +int txgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); +int txgbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi); + +int txgbe_disable_sriov(struct txgbe_adapter *adapter); +#ifdef CONFIG_PCI_IOV +int txgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); +void txgbe_enable_sriov(struct txgbe_adapter *adapter); +#endif +int txgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +void txgbe_set_vf_link_state(struct txgbe_adapter *adapter, int vf, int state); + +/* These are defined in txgbe_type.h on behalf of the VF driver + * but we need them here unwrapped for the PF driver. + */ +#define TXGBE_DEV_ID_SP_VF 0x1000 +#endif /* _TXGBE_SRIOV_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 26197c356edc..bc6e99836206 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -51,6 +51,8 @@ /* Combined interface*/ #define TXGBE_ID_SFI_XAUI 0x50 +#define TXGBE_DCB_MAX_TRAFFIC_CLASS 8 + /* Revision ID */ #define TXGBE_SP_MPW 1 /* ETH PHY Registers */ @@ -493,6 +495,7 @@ struct txgbe_thermal_sensor_data { #define TXGBE_TDM_VLAN_AS_H 0x18074 #define TXGBE_TDM_TCP_FLG_L 0x18078 #define TXGBE_TDM_TCP_FLG_H 0x1807C +#define TXGBE_TDM_VFTE_CLR(_i) (0x180A0 + ((_i) * 4)) #define TXGBE_TDM_VLAN_INS(_i) (0x18100 + ((_i) * 4)) /* 64 of these 0 - 63 */ /* TDM CTL BIT */ #define TXGBE_TDM_CTL_TE 0x1 /* Transmit Enable */ @@ -535,6 +538,7 @@ struct txgbe_thermal_sensor_data { #define TXGBE_RDM_ARB_CFG(_i) (0x12040 + ((_i) * 4)) /* 8 of these (0-7) */ #define TXGBE_RDM_PF_QDE(_i) (0x12080 + ((_i) * 4)) #define TXGBE_RDM_PF_HIDE(_i) (0x12090 + ((_i) * 4)) +#define TXGBE_RDM_VFRE_CLR(_i) (0x120A0 + ((_i) * 4)) /* VFRE bitmask */ #define TXGBE_RDM_VF_RE_ENABLE_ALL 0xFFFFFFFFU @@ -1403,9 +1407,9 @@ enum TXGBE_MSCA_CMD_value { #define TXGBE_PX_TR_CFG_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ #define TXGBE_PX_TR_CFG_THRE_SHIFT 8 -#define TXGBE_PX_TR_RPn(q_per_pool, vf_number, vf_q_index) \ +#define TXGBE_PX_TR_RPN(q_per_pool, vf_number, vf_q_index) \ (TXGBE_PX_TR_RP((q_per_pool) * (vf_number) + (vf_q_index))) -#define TXGBE_PX_TR_WPn(q_per_pool, vf_number, vf_q_index) \ +#define TXGBE_PX_TR_WPN(q_per_pool, vf_number, vf_q_index) \ (TXGBE_PX_TR_WP((q_per_pool) * (vf_number) + (vf_q_index))) /* Receive DMA Registers */ @@ -2370,8 +2374,8 @@ struct txgbe_bus_info { /* Flow control parameters */ struct txgbe_fc_info { - u32 high_water; /* Flow Ctrl High-water */ - u32 low_water; /* Flow Ctrl Low-water */ + u32 high_water[TXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */ + u32 low_water[TXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl Low-water */ u16 pause_time; /* Flow Control Pause timer */ bool disable_fc_autoneg; /* Do not autonegotiate FC */ bool fc_was_autonegged; /* Is current_mode the result of autonegging? */ @@ -2525,6 +2529,8 @@ struct txgbe_mac_operations { s32 (*clear_vfta)(struct txgbe_hw *hw); s32 (*set_vfta)(struct txgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on); s32 (*init_uta_tables)(struct txgbe_hw *hw); + void (*set_ethertype_anti_spoofing)(struct txgbe_hw *hw, bool enable, int vf); + void (*set_vlan_anti_spoofing)(struct txgbe_hw *hw, bool enable, int pf); /* Flow Control */ s32 (*fc_enable)(struct txgbe_hw *hw); @@ -2604,6 +2610,37 @@ struct txgbe_phy_info { txgbe_physical_layer link_mode; }; +#include "txgbe_mbx.h" + +struct txgbe_mbx_operations { + void (*init_params)(struct txgbe_hw *hw); + s32 (*read)(struct txgbe_hw *hw, u32 *msg, u16 size, u16 vf); + s32 (*write)(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*read_posted)(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*write_posted)(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*check_for_msg)(struct txgbe_hw *hw, u16 mbx_id); + s32 (*check_for_ack)(struct txgbe_hw *hw, u16 mbx_id); + s32 (*check_for_rst)(struct txgbe_hw *hw, u16 mbx_id); +}; + +struct txgbe_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct txgbe_mbx_info { + struct txgbe_mbx_operations ops; + struct txgbe_mbx_stats stats; + u32 timeout; + u32 udelay; + u32 v2p_mailbox; + u16 size; +}; + enum txgbe_reset_type { TXGBE_LAN_RESET = 0, TXGBE_SW_RESET, @@ -2618,12 +2655,14 @@ enum txgbe_link_status { struct txgbe_hw { u8 __iomem *hw_addr; + void *back; struct txgbe_mac_info mac; struct txgbe_addr_filter_info addr_ctrl; struct txgbe_fc_info fc; struct txgbe_phy_info phy; struct txgbe_eeprom_info eeprom; struct txgbe_bus_info bus; + struct txgbe_mbx_info mbx; u16 device_id; u16 vendor_id; u16 subsystem_device_id; -- Gitee From 508515deaa2fd86a7c5f3770673a7f2ec9757e8a Mon Sep 17 00:00:00 2001 From: DuanqiangWen Date: Tue, 18 Jul 2023 01:58:32 -0400 Subject: [PATCH 03/10] anolis: net: txgbe: add lldp support ANBZ: #5502 add lldp support for nic, user-space can use ethtool to get/set lldp private flags. Signed-off-by: DuanqiangWen --- drivers/net/ethernet/wangxun/txgbe/txgbe.h | 3 + .../ethernet/wangxun/txgbe/txgbe_ethtool.c | 118 ++++++++++++++++++ drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 77 ++++++++---- drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h | 5 +- .../net/ethernet/wangxun/txgbe/txgbe_main.c | 58 ++++++++- .../net/ethernet/wangxun/txgbe/txgbe_type.h | 10 ++ 6 files changed, 242 insertions(+), 29 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h index 6fb0af56b92e..59809d8e89f1 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h @@ -459,6 +459,8 @@ struct txgbe_mac_addr { #define TXGBE_FLAG2_ECC_ERR_RESET BIT(29) #define TXGBE_FLAG2_PCIE_NEED_RECOVER BIT(31) +#define TXGBE_ETH_PRIV_FLAG_LLDP BIT(0) + /* preset defaults */ #define TXGBE_FLAGS_SP_INIT (TXGBE_FLAG_MSI_CAPABLE \ | TXGBE_FLAG_MSIX_CAPABLE \ @@ -658,6 +660,7 @@ struct txgbe_adapter { unsigned int num_vmdqs; /* does not include pools assigned to VFs */ unsigned int queues_per_pool; u8 default_up; + u64 eth_priv_flags; }; /* must account for pools assigned to VFs. */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index a96260fd2cc8..d2fe565d0a47 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -133,6 +133,24 @@ static const char txgbe_gstrings_test[][ETH_GSTRING_LEN] = { #define txgbe_isbackplane(type) \ ((type == txgbe_media_type_backplane) ? true : false) +struct txgbe_priv_flags { + char flag_string[ETH_GSTRING_LEN]; + u64 flag; + bool read_only; +}; + +#define TXGBE_PRIV_FLAG(_name, _flag, _read_only) { \ + .flag_string = _name, \ + .flag = _flag, \ + .read_only = _read_only, \ +} + +static const struct txgbe_priv_flags txgbe_gstrings_priv_flags[] = { + TXGBE_PRIV_FLAG("lldp", TXGBE_ETH_PRIV_FLAG_LLDP, 0), +}; + +#define TXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(txgbe_gstrings_priv_flags) + static __u32 txgbe_backplane_type(struct txgbe_hw *hw) { __u32 mode = 0x00; @@ -1140,6 +1158,8 @@ static int txgbe_get_sset_count(struct net_device *netdev, int sset) } else { return TXGBE_STATS_LEN; } + case ETH_SS_PRIV_FLAGS: + return TXGBE_PRIV_FLAGS_STR_LEN; default: return -EOPNOTSUPP; } @@ -1219,6 +1239,18 @@ static void txgbe_get_ethtool_stats(struct net_device *netdev, } } +static void txgbe_get_priv_flag_strings(struct net_device *netdev, u8 *data) +{ + char *p = (char *)data; + unsigned int i; + + for (i = 0; i < TXGBE_PRIV_FLAGS_STR_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + txgbe_gstrings_priv_flags[i].flag_string); + p += ETH_GSTRING_LEN; + } +} + static void txgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { @@ -1263,6 +1295,9 @@ static void txgbe_get_strings(struct net_device *netdev, u32 stringset, p += ETH_GSTRING_LEN; } break; + case ETH_SS_PRIV_FLAGS: + txgbe_get_priv_flag_strings(netdev, data); + break; } } @@ -3169,6 +3204,87 @@ static int txgbe_set_flash(struct net_device *netdev, struct ethtool_flash *ef) return ret; } +/** + * txgbe_get_priv_flags - report device private flags + * @dev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the txgbe_gstrings_priv_flags + * array. + * + * Returns a u32 bitmap of flags. + **/ +static u32 txgbe_get_priv_flags(struct net_device *dev) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_hw *hw = &adapter->hw; + u32 i; + u32 ret_flags = 0; + + if (txgbe_is_lldp(hw)) + e_err(drv, "Can not get lldp flags from flash\n"); + + for (i = 0; i < TXGBE_PRIV_FLAGS_STR_LEN; i++) { + const struct txgbe_priv_flags *priv_flags; + + priv_flags = &txgbe_gstrings_priv_flags[i]; + + if (priv_flags->flag & adapter->eth_priv_flags) + ret_flags |= BIT(i); + } + return ret_flags; +} + +/** + * txgbe_set_priv_flags - set private flags + * @dev: network interface device structure + * @flags: bit flags to be set + **/ +static int txgbe_set_priv_flags(struct net_device *dev, u32 flags) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + u32 orig_flags, new_flags, changed_flags; + bool reset_needed = 0; + u32 i; + s32 status = 0; + + orig_flags = adapter->eth_priv_flags; + new_flags = orig_flags; + + for (i = 0; i < TXGBE_PRIV_FLAGS_STR_LEN; i++) { + const struct txgbe_priv_flags *priv_flags; + + priv_flags = &txgbe_gstrings_priv_flags[i]; + + if (flags & BIT(i)) + new_flags |= priv_flags->flag; + else + new_flags &= ~(priv_flags->flag); + + /* If this is a read-only flag, it can't be changed */ + if (priv_flags->read_only && + ((orig_flags ^ new_flags) & ~BIT(i))) + return -EOPNOTSUPP; + } + + changed_flags = orig_flags ^ new_flags; + + if (!changed_flags) + return 0; + + if (changed_flags & TXGBE_ETH_PRIV_FLAG_LLDP) + reset_needed = 1; + + if (changed_flags & TXGBE_ETH_PRIV_FLAG_LLDP) + status = txgbe_hic_write_lldp(&adapter->hw, + (u32)(new_flags & TXGBE_ETH_PRIV_FLAG_LLDP)); + + if (!status) + adapter->eth_priv_flags = new_flags; + + return status; +} + static const struct ethtool_ops txgbe_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, .get_link_ksettings = txgbe_get_link_ksettings, @@ -3193,6 +3309,8 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .get_strings = txgbe_get_strings, .set_phys_id = txgbe_set_phys_id, .get_sset_count = txgbe_get_sset_count, + .get_priv_flags = txgbe_get_priv_flags, + .set_priv_flags = txgbe_set_priv_flags, .get_ethtool_stats = txgbe_get_ethtool_stats, .get_coalesce = txgbe_get_coalesce, .set_coalesce = txgbe_set_coalesce, diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index 0f068b5e8bf9..1eece84422ae 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -2330,14 +2330,18 @@ u8 fmgr_cmd_op(struct txgbe_hw *hw, u32 cmd, u32 cmd_addr) return 0; } -u32 txgbe_flash_read_dword(struct txgbe_hw *hw, u32 addr) +int txgbe_flash_read_dword(struct txgbe_hw *hw, u32 addr, u32 *data) { - u8 status = fmgr_cmd_op(hw, SPI_CMD_READ_DWORD, addr); + int ret = 0; - if (status) - return (u32)status; + ret = fmgr_cmd_op(hw, SPI_CMD_READ_DWORD, addr); + if (ret < 0) + return ret; + + *data = rd32(hw, SPI_H_DAT_REG_ADDR); + + return ret; - return rd32(hw, SPI_H_DAT_REG_ADDR); } /** @@ -4474,21 +4478,14 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw) pci_wake_from_d3(adapter->pdev, false); } } else { - if (txgbe_mng_present(hw)) { - if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || - ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP))) { - txgbe_reset_hostif(hw); - } - } else { - if (hw->bus.lan_id == 0) - reset = TXGBE_MIS_RST_LAN0_RST; - else - reset = TXGBE_MIS_RST_LAN1_RST; + if (hw->bus.lan_id == 0) + reset = TXGBE_MIS_RST_LAN0_RST; + else + reset = TXGBE_MIS_RST_LAN1_RST; + + wr32(hw, TXGBE_MIS_RST, reset | rd32(hw, TXGBE_MIS_RST)); + TXGBE_WRITE_FLUSH(hw); - wr32(hw, TXGBE_MIS_RST, - reset | rd32(hw, TXGBE_MIS_RST)); - TXGBE_WRITE_FLUSH(hw); - } usec_delay(10); if (hw->bus.lan_id == 0) @@ -4536,9 +4533,6 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw) /*make sure phy power is up*/ msleep(100); - /* Store the permanent mac address */ - TCALL(hw, mac.ops.get_mac_addr, hw->mac.perm_addr); - /* Store MAC address from RAR0, clear receive address registers, and * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. @@ -5863,3 +5857,42 @@ s32 txgbe_check_mac_link(struct txgbe_hw *hw, u32 *speed, return 0; } + +int txgbe_is_lldp(struct txgbe_hw *hw) +{ + u32 tmp = 0, lldp_flash_data = 0, i = 0; + struct txgbe_adapter *adapter = hw->back; + s32 status = 0; + + for (; i < 0x1000 / sizeof(u32); i++) { + status = txgbe_flash_read_dword(hw, TXGBE_LLDP_REG + i * 4, &tmp); + if (status) + return status; + if (tmp == U32_MAX) + break; + lldp_flash_data = tmp; + } + if (lldp_flash_data & BIT(hw->bus.lan_id)) + adapter->eth_priv_flags |= TXGBE_ETH_PRIV_FLAG_LLDP; + else + adapter->eth_priv_flags &= ~TXGBE_ETH_PRIV_FLAG_LLDP; + + return 0; +} + +s32 txgbe_hic_write_lldp(struct txgbe_hw *hw, u32 open) +{ + int status; + struct txgbe_adapter *adapter = hw->back; + struct pci_dev *pdev = adapter->pdev; + struct txgbe_hic_write_lldp buffer; + + buffer.hdr.cmd = 0xf1 - open; + buffer.hdr.buf_len = 0x1; + buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + buffer.hdr.checksum = FW_DEFAULT_CHECKSUM; + buffer.func = PCI_FUNC(pdev->devfn); + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000, false); + return status; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h index afc2668d368a..375b2a772b6e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h @@ -224,6 +224,9 @@ s32 txgbe_set_link_to_kx(struct txgbe_hw *hw, bool autoneg); u8 fmgr_cmd_op(struct txgbe_hw *hw, u32 cmd, u32 cmd_addr); -u32 txgbe_flash_read_dword(struct txgbe_hw *hw, u32 addr); +int txgbe_flash_read_dword(struct txgbe_hw *hw, u32 addr, u32 *data); + +int txgbe_is_lldp(struct txgbe_hw *hw); +s32 txgbe_hic_write_lldp(struct txgbe_hw *hw, u32 open); #endif /* _TXGBE_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 4fa3a087afa8..22d0dc7b4a66 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -3351,11 +3351,42 @@ static void txgbe_setup_gpie(struct txgbe_adapter *adapter) wr32(hw, TXGBE_PX_GPIE, gpie); } +static void reinit_gpio_int(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 reg; + + wr32(hw, TXGBE_GPIO_INTMASK, 0xFF); + reg = rd32(hw, TXGBE_GPIO_INTSTATUS); + if (reg & TXGBE_GPIO_INTSTATUS_2) { + adapter->flags2 |= TXGBE_FLAG2_SFP_NEEDS_RESET; + wr32(hw, TXGBE_GPIO_EOI, TXGBE_GPIO_EOI_2); + adapter->sfp_poll_time = 0; + txgbe_service_event_schedule(adapter); + } + if (reg & TXGBE_GPIO_INTSTATUS_3) { + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + wr32(hw, TXGBE_GPIO_EOI, TXGBE_GPIO_EOI_3); + txgbe_service_event_schedule(adapter); + } + + if (reg & TXGBE_GPIO_INTSTATUS_6) { + wr32(hw, TXGBE_GPIO_EOI, TXGBE_GPIO_EOI_6); + adapter->flags |= + TXGBE_FLAG_NEED_LINK_CONFIG; + txgbe_service_event_schedule(adapter); + } + wr32(hw, TXGBE_GPIO_INTMASK, 0x0); +} + static void txgbe_up_complete(struct txgbe_adapter *adapter) { struct txgbe_hw *hw = &adapter->hw; u32 links_reg; + /* workaround gpio int lost in lldp-on condition */ + reinit_gpio_int(adapter); + txgbe_get_hw_control(adapter); txgbe_setup_gpie(adapter); @@ -3494,6 +3525,8 @@ void txgbe_reset(struct txgbe_adapter *adapter) /* update SAN MAC vmdq pool selection */ TCALL(hw, mac.ops.set_vmdq_san_mac, VMDQ_P(0)); + if (txgbe_is_lldp(hw)) + e_dev_err("Can not get lldp flags from flash\n"); if (test_bit(__TXGBE_PTP_RUNNING, &adapter->state)) txgbe_ptp_reset(adapter); @@ -3699,7 +3732,8 @@ void txgbe_disable_device(struct txgbe_adapter *adapter) } if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || - ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP))) { + ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP) || + adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) { /* disable mac transmiter */ wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, 0); } @@ -3712,6 +3746,9 @@ void txgbe_disable_device(struct txgbe_adapter *adapter) /* Disable the Tx DMA engine */ wr32m(hw, TXGBE_TDM_CTL, TXGBE_TDM_CTL_TE, 0); + + /* workaround gpio int lost in lldp-on condition */ + reinit_gpio_int(adapter); } void txgbe_down(struct txgbe_adapter *adapter) @@ -3721,7 +3758,8 @@ void txgbe_down(struct txgbe_adapter *adapter) txgbe_disable_device(adapter); txgbe_reset(adapter); - if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP))) + if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) /* power down the optics for SFP+ fiber */ TCALL(&adapter->hw, mac.ops.disable_tx_laser); @@ -3766,7 +3804,7 @@ static int txgbe_sw_init(struct txgbe_adapter *adapter) hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; } else { - ssid = txgbe_flash_read_dword(hw, 0xfffdc); + txgbe_flash_read_dword(hw, 0xfffdc, &ssid); if (ssid == 0x1) { netif_err(adapter, probe, adapter->netdev, "read of internal subsystem device id failed\n"); @@ -3843,6 +3881,8 @@ static int txgbe_sw_init(struct txgbe_adapter *adapter) adapter->rx_work_limit = TXGBE_DEFAULT_RX_WORK; adapter->num_vmdqs = 1; + if (txgbe_is_lldp(hw)) + e_dev_err("Can not get lldp flags from flash\n"); set_bit(0, &adapter->fwd_bitmask); set_bit(__TXGBE_DOWN, &adapter->state); @@ -4246,7 +4286,8 @@ static void txgbe_close_suspend(struct txgbe_adapter *adapter) txgbe_ptp_suspend(adapter); txgbe_disable_device(adapter); - if (!((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP)) + if (!((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP || + adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) TCALL(hw, mac.ops.disable_tx_laser); txgbe_clean_all_tx_rings(adapter); txgbe_clean_all_rx_rings(adapter); @@ -4348,6 +4389,7 @@ static int txgbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake) #endif netif_device_detach(netdev); + txgbe_mac_set_default_filter(adapter, hw->mac.perm_addr); rtnl_lock(); if (netif_running(netdev)) @@ -6491,6 +6533,8 @@ static int txgbe_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "HW Init failed: %d\n", err); goto err_free_mac_table; } + /* Store the permanent mac address */ + TCALL(hw, mac.ops.get_mac_addr, hw->mac.perm_addr); #ifdef CONFIG_PCI_IOV if (adapter->num_vfs > 0) { @@ -6660,7 +6704,8 @@ static int txgbe_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, adapter); adapter->netdev_registered = true; - if (!((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP)) + if (!((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP || + adapter->eth_priv_flags & TXGBE_ETH_PRIV_FLAG_LLDP)) /* power down the optics for SFP+ fiber */ TCALL(hw, mac.ops.disable_tx_laser); @@ -6788,9 +6833,10 @@ static void txgbe_remove(struct pci_dev *pdev) struct txgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev; bool disable_dev; + struct txgbe_hw *hw = &adapter->hw; netdev = adapter->netdev; - + txgbe_mac_set_default_filter(adapter, hw->mac.perm_addr); txgbe_dbg_adapter_exit(adapter); set_bit(__TXGBE_REMOVING, &adapter->state); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index bc6e99836206..733fc3f0346a 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -2678,6 +2678,13 @@ struct txgbe_hw { u16 oem_svid; }; +struct txgbe_hic_write_lldp { + struct txgbe_hic_hdr hdr; + u8 func; + u8 pad2; + u16 pad3; +}; + #define TCALL(hw, func, args...) (((hw)->func) \ ? (hw)->func((hw), ##args) : TXGBE_NOT_IMPLEMENTED) @@ -2743,6 +2750,9 @@ struct txgbe_hw { #define TXGBE_FAILED_READ_REG 0xffffffffU #define TXGBE_FAILED_READ_REG64 0xffffffffffffffffULL +#define TXGBE_LLDP_REG 0xf1000 +#define TXGBE_LLDP_ON 0x0000000f + static inline bool TXGBE_REMOVED(void __iomem *addr) { return unlikely(!addr); -- Gitee From 44b8aeb916bed1250f6461d33a57e37771d08fd7 Mon Sep 17 00:00:00 2001 From: DuanqiangWen Date: Tue, 18 Jul 2023 02:02:48 -0400 Subject: [PATCH 04/10] anolis: net: txgbe: add protect for vr reset ANBZ: #5502 when transmitting packets, unpluging and pluging fiber optic cables, may cause tx can't work. so add protect for vr rst in setup link to ensure transmission. Signed-off-by: DuanqiangWen --- drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 61 +++++++++++++++++++ drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h | 3 + .../net/ethernet/wangxun/txgbe/txgbe_type.h | 2 + 3 files changed, 66 insertions(+) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index 1eece84422ae..c70e8c0fe689 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -1423,6 +1423,49 @@ s32 txgbe_enable_sec_rx_path(struct txgbe_hw *hw) return 0; } +/** + * txgbe_disable_sec_tx_path - Stops the transmit data path + * @hw: pointer to hardware structure + * + * Stops the transmit data path and waits for the HW to internally empty + * the tx security block + **/ +s32 txgbe_disable_sec_tx_path(struct txgbe_hw *hw) +{ +#define TXGBE_MAX_SECTX_POLL 40 + + int i; + int secrxreg; + + wr32m(hw, TXGBE_TSC_CTL, TXGBE_TSC_CTL_TX_DIS, TXGBE_TSC_CTL_TX_DIS); + for (i = 0; i < TXGBE_MAX_SECTX_POLL; i++) { + secrxreg = rd32(hw, TXGBE_TSC_ST); + if (!(secrxreg & TXGBE_TSC_ST_SECTX_RDY)) + usec_delay(1000); + else + break; + } + + /* For informational purposes only */ + if (i >= TXGBE_MAX_SECTX_POLL) + ERROR_REPORT2(hw, TXGBE_ERROR_INVALID_STATE, "disable tx sec failed.\n"); + + return 0; +} + +/** + * txgbe_enable_sec_Tx_path - Enables the transmit data path + * @hw: pointer to hardware structure + * + * Enables the transmit data path. + **/ +s32 txgbe_enable_sec_tx_path(struct txgbe_hw *hw) +{ + wr32m(hw, TXGBE_TSC_CTL, TXGBE_TSC_CTL_TX_DIS, 0); + TXGBE_WRITE_FLUSH(hw); + return 0; +} + /** * txgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM * @hw: pointer to hardware structure @@ -3156,6 +3199,8 @@ s32 txgbe_init_ops(struct txgbe_hw *hw) mac->ops.start_hw = txgbe_start_hw; mac->ops.get_san_mac_addr = txgbe_get_san_mac_addr; mac->ops.get_wwn_prefix = txgbe_get_wwn_prefix; + mac->ops.disable_sec_tx_path = txgbe_disable_sec_tx_path; + mac->ops.enable_sec_tx_path = txgbe_enable_sec_tx_path; /* LEDs */ mac->ops.led_on = txgbe_led_on; @@ -3605,6 +3650,9 @@ s32 txgbe_set_link_to_kx4(struct txgbe_hw *hw, bool autoneg) } wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, ~TXGBE_MAC_TX_CFG_TE); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, ~TXGBE_MAC_RX_CFG_RE); + + TCALL(hw, mac.ops.disable_sec_tx_path); /* 2. Disable xpcs AN-73 */ if (!autoneg) @@ -3766,6 +3814,9 @@ s32 txgbe_set_link_to_kx4(struct txgbe_hw *hw, bool autoneg) } out: + TCALL(hw, mac.ops.enable_sec_tx_path); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, TXGBE_MAC_RX_CFG_RE); + return status; } @@ -3797,6 +3848,8 @@ s32 txgbe_set_link_to_kx(struct txgbe_hw *hw, u32 speed, bool autoneg) } wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, ~TXGBE_MAC_TX_CFG_TE); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, ~TXGBE_MAC_RX_CFG_RE); + TCALL(hw, mac.ops.disable_sec_tx_path); /* 2. Disable xpcs AN-73 */ if (!autoneg) @@ -3979,6 +4032,9 @@ s32 txgbe_set_link_to_kx(struct txgbe_hw *hw, u32 speed, bool autoneg) txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); out: + TCALL(hw, mac.ops.enable_sec_tx_path); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, TXGBE_MAC_RX_CFG_RE); + return status; } @@ -4005,6 +4061,8 @@ static s32 txgbe_set_link_to_sfi(struct txgbe_hw *hw, u32 speed) } wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, ~TXGBE_MAC_TX_CFG_TE); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, ~TXGBE_MAC_RX_CFG_RE); + TCALL(hw, mac.ops.disable_sec_tx_path); /* 2. Disable xpcs AN-73 */ txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0); @@ -4254,6 +4312,9 @@ static s32 txgbe_set_link_to_sfi(struct txgbe_hw *hw, u32 speed) } out: + TCALL(hw, mac.ops.enable_sec_tx_path); + wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, TXGBE_MAC_RX_CFG_RE); + return status; } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h index 375b2a772b6e..09c34ffedf78 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h @@ -229,4 +229,7 @@ int txgbe_flash_read_dword(struct txgbe_hw *hw, u32 addr, u32 *data); int txgbe_is_lldp(struct txgbe_hw *hw); s32 txgbe_hic_write_lldp(struct txgbe_hw *hw, u32 open); +s32 txgbe_disable_sec_tx_path(struct txgbe_hw *hw); +s32 txgbe_enable_sec_tx_path(struct txgbe_hw *hw); + #endif /* _TXGBE_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 733fc3f0346a..4a4d779dbc61 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -2543,6 +2543,8 @@ struct txgbe_mac_operations { s32 (*init_thermal_sensor_thresh)(struct txgbe_hw *hw); s32 (*disable_rx)(struct txgbe_hw *hw); s32 (*enable_rx)(struct txgbe_hw *hw); + s32 (*disable_sec_tx_path)(struct txgbe_hw *hw); + s32 (*enable_sec_tx_path)(struct txgbe_hw *hw); }; struct txgbe_phy_operations { -- Gitee From 9e71e04c3ff63058e23bd3f72d2db66a98c38657 Mon Sep 17 00:00:00 2001 From: DuanqiangWen Date: Mon, 31 Jul 2023 02:15:10 -0400 Subject: [PATCH 05/10] anolis: net: txgbe: add rx fc count ANBZ: #5502 add ethtool show rx flow control frame count display. Signed-off-by: DuanqiangWen --- .../net/ethernet/wangxun/txgbe/txgbe_main.c | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 22d0dc7b4a66..1fa1b8010d49 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -4524,6 +4524,30 @@ static void txgbe_get_stats64(struct net_device *netdev, stats->rx_missed_errors = netdev->stats.rx_missed_errors; } +static void txgbe_update_xoff_rx_lfc(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_hw_stats *hwstats = &adapter->stats; + int i; + u32 data; + + if (hw->fc.current_mode != txgbe_fc_full && + hw->fc.current_mode != txgbe_fc_rx_pause) + return; + + data = rd32(hw, TXGBE_MAC_LXOFFRXC); + + hwstats->lxoffrxc += data; + + /* refill credits (no tx hang) if we received xoff */ + if (!data) + return; + + for (i = 0; i < adapter->num_tx_queues; i++) + clear_bit(__TXGBE_HANG_CHECK_ARMED, + &adapter->tx_ring[i]->state); +} + /** * txgbe_update_stats - Update the board statistics counters. * @adapter: board private structure @@ -4608,6 +4632,8 @@ void txgbe_update_stats(struct txgbe_adapter *adapter) hwstats->gprc += rd32(hw, TXGBE_PX_GPRC); + txgbe_update_xoff_rx_lfc(adapter); + hwstats->o2bgptc += rd32(hw, TXGBE_TDM_OS2BMC_CNT); if (txgbe_check_mng_access(&adapter->hw)) { hwstats->o2bspc += rd32(hw, TXGBE_MNG_OS2BMC_CNT); -- Gitee From 1929a59d9deeb7c03867c00570145d51f361eb3f Mon Sep 17 00:00:00 2001 From: DuanqiangWen Date: Tue, 1 Aug 2023 04:01:13 -0400 Subject: [PATCH 06/10] anolis: net: txgbe: fix rxhash bug ANBZ: #5502 ethtool -k to set rxhash off/on, but it is not effectable, beacause drivers don't implement ndo_fix/set_features interface. Signed-off-by: DuanqiangWen --- .../net/ethernet/wangxun/txgbe/txgbe_main.c | 112 +++++++++++++++++- 1 file changed, 111 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 1fa1b8010d49..fc3328a9fe8b 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -6388,6 +6388,115 @@ txgbe_features_check(struct sk_buff *skb, struct net_device *dev, return features; } +static netdev_features_t txgbe_fix_features(struct net_device *netdev, netdev_features_t features) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + /* Turn off LRO if not RSC capable */ + if (!(adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE)) + features &= ~NETIF_F_LRO; + + if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) + features &= ~NETIF_F_HW_VLAN_STAG_RX; + else + features |= NETIF_F_HW_VLAN_STAG_RX; + if (!(features & NETIF_F_HW_VLAN_CTAG_TX)) + features &= ~NETIF_F_HW_VLAN_STAG_TX; + else + features |= NETIF_F_HW_VLAN_STAG_TX; + + return features; +} + +static int txgbe_set_features(struct net_device *netdev, netdev_features_t features) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + bool need_reset = false; + + /* Make sure RSC matches LRO, reset if change */ + if (!(features & NETIF_F_LRO)) { + if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) + need_reset = true; + adapter->flags2 &= ~TXGBE_FLAG2_RSC_ENABLED; + } else if ((adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) && + !(adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED)) { + if (adapter->rx_itr_setting == 1 || + adapter->rx_itr_setting > TXGBE_MIN_RSC_ITR) { + adapter->flags2 |= TXGBE_FLAG2_RSC_ENABLED; + need_reset = true; + } else if ((netdev->features ^ features) & NETIF_F_LRO) { + e_info(probe, "rx-usecs set too low, falling back to GRO\n"); + } + } + + /* Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. + */ + switch (features & NETIF_F_NTUPLE) { + case NETIF_F_NTUPLE: + /* turn off ATR, enable perfect filters and reset */ + if (!(adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + need_reset = true; + + adapter->flags &= ~TXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->flags |= TXGBE_FLAG_FDIR_PERFECT_CAPABLE; + break; + default: + /* turn off perfect filters, enable ATR and reset */ + if (adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE) + need_reset = true; + + adapter->flags &= ~TXGBE_FLAG_FDIR_PERFECT_CAPABLE; + + /* We cannot enable ATR if VMDq is enabled */ + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) + break; + + /* We cannot enable ATR if we have 2 or more traffic classes */ + if (netdev_get_num_tc(netdev) > 1) + break; + + /* We cannot enable ATR if RSS is disabled */ + if (adapter->ring_feature[RING_F_RSS].limit <= 1) + break; + + /* A sample rate of 0 indicates ATR disabled */ + if (!adapter->atr_sample_rate) + break; + + adapter->flags |= TXGBE_FLAG_FDIR_HASH_CAPABLE; + break; + } + + if (features & NETIF_F_HW_VLAN_CTAG_RX && features & NETIF_F_HW_VLAN_STAG_RX) + txgbe_vlan_strip_enable(adapter); + else + txgbe_vlan_strip_disable(adapter); + + if (features & NETIF_F_RXHASH) { + if (!(adapter->flags2 & TXGBE_FLAG2_RSS_ENABLED)) { + wr32m(&adapter->hw, TXGBE_RDB_RA_CTL, + TXGBE_RDB_RA_CTL_RSS_EN, TXGBE_RDB_RA_CTL_RSS_EN); + adapter->flags2 |= TXGBE_FLAG2_RSS_ENABLED; + } + } else { + if (adapter->flags2 & TXGBE_FLAG2_RSS_ENABLED) { + wr32m(&adapter->hw, TXGBE_RDB_RA_CTL, + TXGBE_RDB_RA_CTL_RSS_EN, ~TXGBE_RDB_RA_CTL_RSS_EN); + adapter->flags2 &= ~TXGBE_FLAG2_RSS_ENABLED; + } + } + + if (need_reset) + txgbe_do_reset(netdev); + + return 0; +} + static const struct net_device_ops txgbe_netdev_ops = { .ndo_open = txgbe_open, .ndo_stop = txgbe_close, @@ -6410,7 +6519,8 @@ static const struct net_device_ops txgbe_netdev_ops = { .ndo_set_vf_vlan = txgbe_ndo_set_vf_vlan, .ndo_set_vf_rate = txgbe_ndo_set_vf_bw, .ndo_get_vf_config = txgbe_ndo_get_vf_config, - + .ndo_fix_features = txgbe_fix_features, + .ndo_set_features = txgbe_set_features, }; void txgbe_assign_netdev_ops(struct net_device *dev) -- Gitee From d89ad12ae92fc2e147fb5d51d71bd7f2634d6f98 Mon Sep 17 00:00:00 2001 From: DuanqiangWen Date: Mon, 7 Aug 2023 21:37:07 -0400 Subject: [PATCH 07/10] anolis: net: txgbe: add 1000M autoneg on/off switch ANBZ: #5502 add support for ethtool to set link speed 1000Base-X, and add switch for on/off switch for 1000Base-X. Signed-off-by: DuanqiangWen --- drivers/net/ethernet/wangxun/txgbe/txgbe.h | 1 + .../ethernet/wangxun/txgbe/txgbe_ethtool.c | 511 ++++++++++++++---- drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 83 +-- .../net/ethernet/wangxun/txgbe/txgbe_main.c | 2 + 4 files changed, 416 insertions(+), 181 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h index 59809d8e89f1..88c816a6468e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe.h @@ -661,6 +661,7 @@ struct txgbe_adapter { unsigned int queues_per_pool; u8 default_up; u64 eth_priv_flags; + u8 an37; }; /* must account for pools assigned to VFs. */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index d2fe565d0a47..c09916863b58 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -19,6 +19,7 @@ #define TXGBE_ALL_RAR_ENTRIES 16 #define TXGBE_RSS_INDIR_TBL_MAX 64 +#define ETHTOOL_LINK_MODE_SPEED_MASK 0xfffe903f enum {NETDEV_STATS, TXGBE_STATS}; @@ -151,94 +152,221 @@ static const struct txgbe_priv_flags txgbe_gstrings_priv_flags[] = { #define TXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(txgbe_gstrings_priv_flags) -static __u32 txgbe_backplane_type(struct txgbe_hw *hw) -{ - __u32 mode = 0x00; +static int txgbe_set_advertising_1g_10gtypes(struct txgbe_hw *hw, + struct ethtool_link_ksettings *cmd, + u32 advertised_speed) +{ + switch (hw->phy.sfp_type) { + case txgbe_sfp_type_da_cu: + case txgbe_sfp_type_da_act_lmt_core0: + case txgbe_sfp_type_da_act_lmt_core1: + case txgbe_sfp_type_da_cu_core0: + case txgbe_sfp_type_da_cu_core1: + case txgbe_sfp_type_srlr_core0: + case txgbe_sfp_type_srlr_core1: + if (advertised_speed & TXGBE_LINK_SPEED_10GB_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseLR_Full); + } + if (advertised_speed & TXGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseX_Full); - switch (hw->phy.link_mode) { - case TXGBE_PHYSICAL_LAYER_10GBASE_KX4: - mode = SUPPORTED_10000baseKX4_Full; break; - case TXGBE_PHYSICAL_LAYER_10GBASE_KR: - mode = SUPPORTED_10000baseKR_Full; + case txgbe_sfp_type_sr: + if (advertised_speed & TXGBE_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseSR_Full); + + if (advertised_speed & TXGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseX_Full); + break; - case TXGBE_PHYSICAL_LAYER_1000BASE_KX: - mode = SUPPORTED_1000baseKX_Full; + case txgbe_sfp_type_lr: + if (advertised_speed & TXGBE_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseLR_Full); + + if (advertised_speed & TXGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseX_Full); + + break; + case txgbe_sfp_type_1g_cu_core0: + case txgbe_sfp_type_1g_cu_core1: + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + break; + case txgbe_sfp_type_1g_sx_core0: + case txgbe_sfp_type_1g_sx_core1: + case txgbe_sfp_type_1g_lx_core0: + case txgbe_sfp_type_1g_lx_core1: + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseX_Full); break; default: - mode = (SUPPORTED_10000baseKX4_Full | - SUPPORTED_10000baseKR_Full | - SUPPORTED_1000baseKX_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full); break; } - return mode; + + return 0; } -static int txgbe_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd) +static int txgbe_set_supported_1g_10gtypes(struct txgbe_hw *hw, + struct ethtool_link_ksettings *cmd) +{ + switch (hw->phy.sfp_type) { + case txgbe_sfp_type_da_cu: + case txgbe_sfp_type_da_act_lmt_core0: + case txgbe_sfp_type_da_act_lmt_core1: + case txgbe_sfp_type_da_cu_core0: + case txgbe_sfp_type_da_cu_core1: + case txgbe_sfp_type_srlr_core0: + case txgbe_sfp_type_srlr_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseLR_Full); + break; + case txgbe_sfp_type_sr: + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseSR_Full); + break; + case txgbe_sfp_type_lr: + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseLR_Full); + break; + case txgbe_sfp_type_1g_cu_core0: + case txgbe_sfp_type_1g_cu_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + break; + case txgbe_sfp_type_1g_sx_core0: + case txgbe_sfp_type_1g_sx_core1: + case txgbe_sfp_type_1g_lx_core0: + case txgbe_sfp_type_1g_lx_core1: + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseX_Full); + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); + break; + } + + return 0; +} + +int txgbe_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct txgbe_adapter *adapter = netdev_priv(netdev); struct txgbe_hw *hw = &adapter->hw; u32 supported_link; u32 link_speed = 0; bool autoneg = false; - u32 supported, advertising; bool link_up; - ethtool_convert_link_mode_to_legacy_u32(&supported, - cmd->link_modes.supported); + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); TCALL(hw, mac.ops.get_link_capabilities, &supported_link, &autoneg); + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) + autoneg = adapter->an37 ? 1 : 0; + /* set the supported link speeds */ - if (supported_link & TXGBE_LINK_SPEED_10GB_FULL) - supported |= (txgbe_isbackplane(hw->phy.media_type)) ? - txgbe_backplane_type(hw) : SUPPORTED_10000baseT_Full; - if (supported_link & TXGBE_LINK_SPEED_1GB_FULL) - supported |= (txgbe_isbackplane(hw->phy.media_type)) ? - SUPPORTED_1000baseKX_Full : SUPPORTED_1000baseT_Full; - if (supported_link & TXGBE_LINK_SPEED_100_FULL) - supported |= SUPPORTED_100baseT_Full; - if (supported_link & TXGBE_LINK_SPEED_10_FULL) - supported |= SUPPORTED_10baseT_Full; - - /* default advertised speed if phy.autoneg_advertised isn't set */ - advertising = supported; + if (hw->phy.media_type == txgbe_media_type_copper) { + if (supported_link & TXGBE_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseT_Full); + if (supported_link & TXGBE_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Full); + if (supported_link & TXGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Full); + + if (supported_link & TXGBE_LINK_SPEED_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Full); + } else if (hw->phy.media_type == txgbe_media_type_fiber) { + if ((supported_link & TXGBE_LINK_SPEED_10GB_FULL) || + (supported_link & TXGBE_LINK_SPEED_1GB_FULL)) + txgbe_set_supported_1g_10gtypes(hw, cmd); + if (hw->phy.multispeed_fiber) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseX_Full); + } else { + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseKX4_Full); + } /* set the advertised speeds */ if (hw->phy.autoneg_advertised) { - advertising = 0; - if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_100_FULL) - advertising |= ADVERTISED_100baseT_Full; - if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10GB_FULL) - advertising |= (supported & ADVERTISED_MASK_10G); + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10GB_FULL) { + if (hw->phy.media_type == txgbe_media_type_copper) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseT_Full); + } else if (hw->phy.media_type == txgbe_media_type_fiber) { + txgbe_set_advertising_1g_10gtypes(hw, cmd, + hw->phy.autoneg_advertised); + } else { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseKX4_Full); + } + } if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_1GB_FULL) { - if (supported & SUPPORTED_1000baseKX_Full) - advertising |= ADVERTISED_1000baseKX_Full; + if (hw->phy.media_type == txgbe_media_type_copper) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); + else if (hw->phy.media_type == txgbe_media_type_fiber) + txgbe_set_advertising_1g_10gtypes(hw, cmd, + hw->phy.autoneg_advertised); else - advertising |= ADVERTISED_1000baseT_Full; + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseKX_Full); } + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10_FULL) - advertising |= ADVERTISED_10baseT_Full; + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + } else { - /* default modes in case phy.autoneg_advertised isn't set */ - if (supported_link & TXGBE_LINK_SPEED_10GB_FULL) - advertising |= ADVERTISED_10000baseT_Full; - if (supported_link & TXGBE_LINK_SPEED_1GB_FULL) - advertising |= ADVERTISED_1000baseT_Full; - if (supported_link & TXGBE_LINK_SPEED_100_FULL) - advertising |= ADVERTISED_100baseT_Full; - if (hw->phy.multispeed_fiber && !autoneg) { - if (supported_link & TXGBE_LINK_SPEED_10GB_FULL) - advertising = ADVERTISED_10000baseT_Full; + if (supported_link & TXGBE_LINK_SPEED_10GB_FULL) { + if (hw->phy.media_type == txgbe_media_type_copper) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseT_Full); + } else if (hw->phy.media_type == txgbe_media_type_fiber) { + txgbe_set_advertising_1g_10gtypes(hw, cmd, + TXGBE_LINK_SPEED_10GB_FULL); + } else { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseKX4_Full); + } + } + if (supported_link & TXGBE_LINK_SPEED_1GB_FULL) { + if (hw->phy.media_type == txgbe_media_type_copper) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); + else if (hw->phy.media_type == txgbe_media_type_fiber) + txgbe_set_advertising_1g_10gtypes(hw, cmd, + TXGBE_LINK_SPEED_1GB_FULL); + else + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseKX_Full); + } + if (supported_link & TXGBE_LINK_SPEED_100_FULL) { + if (hw->phy.media_type == txgbe_media_type_copper) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); + } + if (supported_link & TXGBE_LINK_SPEED_10_FULL) { + if (hw->phy.media_type == txgbe_media_type_copper) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); } - if (supported_link & TXGBE_LINK_SPEED_10_FULL) - advertising |= ADVERTISED_10baseT_Full; } if (autoneg) { - supported |= SUPPORTED_Autoneg; - advertising |= ADVERTISED_Autoneg; + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); cmd->base.autoneg = AUTONEG_ENABLE; } else { cmd->base.autoneg = AUTONEG_DISABLE; @@ -249,13 +377,13 @@ static int txgbe_get_link_ksettings(struct net_device *netdev, case txgbe_phy_tn: case txgbe_phy_aq: case txgbe_phy_cu_unknown: - supported |= SUPPORTED_TP; - advertising |= ADVERTISED_TP; + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); cmd->base.port = PORT_TP; break; case txgbe_phy_qt: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_FIBRE; break; case txgbe_phy_nl: @@ -270,8 +398,8 @@ static int txgbe_get_link_ksettings(struct net_device *netdev, case txgbe_sfp_type_da_cu: case txgbe_sfp_type_da_cu_core0: case txgbe_sfp_type_da_cu_core1: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_DA; break; case txgbe_sfp_type_sr: @@ -282,67 +410,84 @@ static int txgbe_get_link_ksettings(struct net_device *netdev, case txgbe_sfp_type_1g_sx_core1: case txgbe_sfp_type_1g_lx_core0: case txgbe_sfp_type_1g_lx_core1: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_FIBRE; break; case txgbe_sfp_type_not_present: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_NONE; break; case txgbe_sfp_type_1g_cu_core0: case txgbe_sfp_type_1g_cu_core1: - supported |= SUPPORTED_TP; - advertising |= ADVERTISED_TP; + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); cmd->base.port = PORT_TP; break; case txgbe_sfp_type_unknown: default: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_OTHER; break; } break; case txgbe_phy_xaui: - supported |= SUPPORTED_TP; - advertising |= ADVERTISED_TP; + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); cmd->base.port = PORT_TP; break; case txgbe_phy_unknown: case txgbe_phy_generic: case txgbe_phy_sfp_unsupported: default: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_OTHER; break; } + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI && + (hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) { + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) { + cmd->base.speed = -1; + cmd->base.duplex = -1; + + return 0; + } + } if (!in_interrupt()) { TCALL(hw, mac.ops.check_link, &link_speed, &link_up, false); } else { + /* this case is a special workaround for RHEL5 bonding + * that calls this routine from interrupt context + */ link_speed = adapter->link_speed; link_up = adapter->link_up; } - supported |= SUPPORTED_Pause; + /* Indicate pause support */ + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); switch (hw->fc.requested_mode) { case txgbe_fc_full: - advertising |= ADVERTISED_Pause; + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); break; case txgbe_fc_rx_pause: - advertising |= ADVERTISED_Pause | - ADVERTISED_Asym_Pause; + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); break; case txgbe_fc_tx_pause: - advertising |= ADVERTISED_Asym_Pause; + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); break; default: - advertising &= ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); + ethtool_link_ksettings_del_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_del_link_mode(cmd, advertising, + Asym_Pause); } if (link_up) { @@ -367,57 +512,121 @@ static int txgbe_get_link_ksettings(struct net_device *netdev, cmd->base.speed = -1; cmd->base.duplex = -1; } - - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - advertising); + if (!(ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseT_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseKR_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseKX4_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseLR_Full)) && + (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseKX_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseX_Full)) + ) { + if (!adapter->an37) + ethtool_link_ksettings_del_link_mode(cmd, advertising, Autoneg); + else + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + cmd->base.autoneg = adapter->an37; + } return 0; } static int txgbe_set_link_ksettings(struct net_device *netdev, - const struct ethtool_link_ksettings *cmd) + const struct ethtool_link_ksettings *cmd) { struct txgbe_adapter *adapter = netdev_priv(netdev); struct txgbe_hw *hw = &adapter->hw; u32 advertised, old; s32 err = 0; - u32 supported, advertising; - - ethtool_convert_link_mode_to_legacy_u32(&supported, - cmd->link_modes.supported); - ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising); - - if (hw->phy.multispeed_fiber) { + struct ethtool_link_ksettings temp_ks; + u32 curr_autoneg = 2; + + if (hw->phy.media_type == txgbe_media_type_copper || hw->phy.multispeed_fiber) { + memcpy(&temp_ks, cmd, sizeof(struct ethtool_link_ksettings)); + /* To be compatible with test cases */ + if (hw->phy.media_type == txgbe_media_type_fiber) { + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseT_Full)) { + ethtool_link_ksettings_add_link_mode(&temp_ks, supported, + 10000baseT_Full); + ethtool_link_ksettings_del_link_mode(&temp_ks, supported, + 10000baseSR_Full); + ethtool_link_ksettings_del_link_mode(&temp_ks, supported, + 10000baseLR_Full); + } + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full)) { + ethtool_link_ksettings_add_link_mode(&temp_ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_del_link_mode(&temp_ks, supported, + 1000baseX_Full); + } + } /* this function does not support duplex forcing, but can * limit the advertising of the adapter to the specified speed */ - if (advertising & ~supported) + if (!bitmap_subset(cmd->link_modes.advertising, + temp_ks.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) return -EINVAL; /* only allow one speed at a time if no autoneg */ if (!cmd->base.autoneg && hw->phy.multispeed_fiber) { - if (advertising == (ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full)) + if ((ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseSR_Full) && + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseX_Full)) | + (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseLR_Full) && + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseX_Full))) return -EINVAL; } old = hw->phy.autoneg_advertised; advertised = 0; - if (advertising & ADVERTISED_10000baseT_Full) + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseSR_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseLR_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseT_Full)) advertised |= TXGBE_LINK_SPEED_10GB_FULL; - if (advertising & ADVERTISED_1000baseT_Full) + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseX_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full)) advertised |= TXGBE_LINK_SPEED_1GB_FULL; - if (advertising & ADVERTISED_100baseT_Full) + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100baseT_Full)) advertised |= TXGBE_LINK_SPEED_100_FULL; - if (advertising & ADVERTISED_10baseT_Full) + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10baseT_Full)) advertised |= TXGBE_LINK_SPEED_10_FULL; - if (old == advertised) - return err; + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII || + (advertised == TXGBE_LINK_SPEED_1GB_FULL && + hw->phy.multispeed_fiber)) + adapter->an37 = cmd->base.autoneg ? 1 : 0; + + if (advertised == TXGBE_LINK_SPEED_1GB_FULL && + hw->phy.media_type != txgbe_media_type_copper) { + curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); + curr_autoneg = !!(curr_autoneg & (0x1 << 12)); + if (old == advertised && curr_autoneg == adapter->an37) + return err; + } else { + if (old == advertised) + return err; + } /* this sets the link speed and restarts auto-neg */ while (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) usleep_range(1000, 2000); @@ -425,45 +634,113 @@ static int txgbe_set_link_ksettings(struct net_device *netdev, hw->mac.autotry_restart = true; err = TCALL(hw, mac.ops.setup_link, advertised, true); if (err) { - netif_info(adapter, probe, adapter->netdev, - "setup link failed with code %d\n", err); + e_info(probe, "setup link failed with code %d\n", err); TCALL(hw, mac.ops.setup_link, old, true); } if ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) TCALL(hw, mac.ops.flap_tx_laser); clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); } else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4 || - (hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) { + (hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) { if (!cmd->base.autoneg) { - if (advertising == (ADVERTISED_10000baseKR_Full | - ADVERTISED_1000baseKX_Full | - ADVERTISED_10000baseKX4_Full)) + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseKR_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseKX_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseKX4_Full)) return -EINVAL; } else { err = txgbe_set_link_to_kr(hw, 1); return err; } advertised = 0; - if (advertising & ADVERTISED_10000baseKR_Full) { + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseKR_Full)) { err = txgbe_set_link_to_kr(hw, 1); advertised |= TXGBE_LINK_SPEED_10GB_FULL; - } else if (advertising & ADVERTISED_10000baseKX4_Full) { + return err; + } else if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseKX4_Full)) { err = txgbe_set_link_to_kx4(hw, 1); advertised |= TXGBE_LINK_SPEED_10GB_FULL; - } else if (advertising & ADVERTISED_1000baseKX_Full) { + return err; + } else if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseKX_Full)) { advertised |= TXGBE_LINK_SPEED_1GB_FULL; err = txgbe_set_link_to_kx(hw, TXGBE_LINK_SPEED_1GB_FULL, 0); + return err; } + return err; } else { /* in this case we currently only support 10Gb/FULL */ u32 speed = cmd->base.speed; + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseT_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseKR_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseKX4_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseLR_Full)) { + if (cmd->base.autoneg == AUTONEG_ENABLE || + !ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseT_Full) || + (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL)) + return -EINVAL; + } else if ((ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseKX_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseX_Full))) { + memcpy(&temp_ks, cmd, sizeof(struct ethtool_link_ksettings)); + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full)) { + ethtool_link_ksettings_add_link_mode(&temp_ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_del_link_mode(&temp_ks, supported, + 1000baseX_Full); + } - if (cmd->base.autoneg == AUTONEG_ENABLE || - advertising != ADVERTISED_10000baseT_Full || - (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL)) - return -EINVAL; - } + if (!bitmap_subset(cmd->link_modes.advertising, + temp_ks.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) + return -EINVAL; + + old = hw->phy.autoneg_advertised; + advertised = 0; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseX_Full) || + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full)) + advertised |= TXGBE_LINK_SPEED_1GB_FULL; + + adapter->an37 = cmd->base.autoneg ? 1 : 0; + if (advertised == TXGBE_LINK_SPEED_1GB_FULL) { + curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); + curr_autoneg = !!(curr_autoneg & (0x1 << 12)); + } + if (old == advertised && curr_autoneg == adapter->an37) + return err; + /* this sets the link speed and restarts auto-neg */ + while (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + hw->mac.autotry_restart = true; + err = TCALL(hw, mac.ops.setup_link, advertised, true); + if (err) { + e_info(probe, "setup link failed with code %d\n", err); + TCALL(hw, mac.ops.setup_link, old, true); + } + if ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) + TCALL(hw, mac.ops.flap_tx_laser); + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); + } + } return err; } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index c70e8c0fe689..98477aaa2551 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -1423,49 +1423,6 @@ s32 txgbe_enable_sec_rx_path(struct txgbe_hw *hw) return 0; } -/** - * txgbe_disable_sec_tx_path - Stops the transmit data path - * @hw: pointer to hardware structure - * - * Stops the transmit data path and waits for the HW to internally empty - * the tx security block - **/ -s32 txgbe_disable_sec_tx_path(struct txgbe_hw *hw) -{ -#define TXGBE_MAX_SECTX_POLL 40 - - int i; - int secrxreg; - - wr32m(hw, TXGBE_TSC_CTL, TXGBE_TSC_CTL_TX_DIS, TXGBE_TSC_CTL_TX_DIS); - for (i = 0; i < TXGBE_MAX_SECTX_POLL; i++) { - secrxreg = rd32(hw, TXGBE_TSC_ST); - if (!(secrxreg & TXGBE_TSC_ST_SECTX_RDY)) - usec_delay(1000); - else - break; - } - - /* For informational purposes only */ - if (i >= TXGBE_MAX_SECTX_POLL) - ERROR_REPORT2(hw, TXGBE_ERROR_INVALID_STATE, "disable tx sec failed.\n"); - - return 0; -} - -/** - * txgbe_enable_sec_Tx_path - Enables the transmit data path - * @hw: pointer to hardware structure - * - * Enables the transmit data path. - **/ -s32 txgbe_enable_sec_tx_path(struct txgbe_hw *hw) -{ - wr32m(hw, TXGBE_TSC_CTL, TXGBE_TSC_CTL_TX_DIS, 0); - TXGBE_WRITE_FLUSH(hw); - return 0; -} - /** * txgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM * @hw: pointer to hardware structure @@ -2644,6 +2601,7 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, u32 speedcnt = 0; u32 i = 0; bool autoneg, link_up = false; + struct txgbe_adapter *adapter = hw->back; /* Mask off requested but non-supported speeds */ status = TCALL(hw, mac.ops.get_link_capabilities, @@ -2701,6 +2659,7 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, } if (speed & TXGBE_LINK_SPEED_1GB_FULL) { + u32 curr_autoneg = 2; speedcnt++; if (highest_link_speed == TXGBE_LINK_SPEED_UNKNOWN) highest_link_speed = TXGBE_LINK_SPEED_1GB_FULL; @@ -2711,7 +2670,14 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, if (status != 0) return status; - if (link_speed == TXGBE_LINK_SPEED_1GB_FULL && link_up) + if (link_speed == TXGBE_LINK_SPEED_1GB_FULL) { + curr_autoneg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); + curr_autoneg = !!(curr_autoneg & (0x1 << 12)); + } + + if (link_speed == TXGBE_LINK_SPEED_1GB_FULL && + link_up && + adapter->an37 == curr_autoneg) goto out; /* Allow module to change analog characteristics (10G->1G) */ @@ -3199,8 +3165,6 @@ s32 txgbe_init_ops(struct txgbe_hw *hw) mac->ops.start_hw = txgbe_start_hw; mac->ops.get_san_mac_addr = txgbe_get_san_mac_addr; mac->ops.get_wwn_prefix = txgbe_get_wwn_prefix; - mac->ops.disable_sec_tx_path = txgbe_disable_sec_tx_path; - mac->ops.enable_sec_tx_path = txgbe_enable_sec_tx_path; /* LEDs */ mac->ops.led_on = txgbe_led_on; @@ -3284,7 +3248,7 @@ s32 txgbe_get_link_capabilities(struct txgbe_hw *hw, hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core0 || hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core1) { *speed = TXGBE_LINK_SPEED_1GB_FULL; - *autoneg = false; + *autoneg = true; } else if (hw->phy.multispeed_fiber) { *speed = TXGBE_LINK_SPEED_10GB_FULL | TXGBE_LINK_SPEED_1GB_FULL; @@ -3540,6 +3504,7 @@ s32 txgbe_set_hard_rate_select_speed(struct txgbe_hw *hw, static s32 txgbe_set_sgmii_an37_ability(struct txgbe_hw *hw) { u32 value; + struct txgbe_adapter *adapter = hw->back; txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, 0x3002); /* for sgmii + external phy, set to 0x0105 (phy sgmii mode) */ @@ -3553,8 +3518,12 @@ static s32 txgbe_set_sgmii_an37_ability(struct txgbe_hw *hw) } txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_DIGI_CTL, 0x0200); value = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); - value = (value & ~0x1200) | (0x1 << 12) | (0x1 << 9); + value = (value & ~0x1200) | (0x1 << 9); + if (adapter->an37) + value |= (0x1 << 12); + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_CTL, value); + return 0; } @@ -3650,9 +3619,6 @@ s32 txgbe_set_link_to_kx4(struct txgbe_hw *hw, bool autoneg) } wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, ~TXGBE_MAC_TX_CFG_TE); - wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, ~TXGBE_MAC_RX_CFG_RE); - - TCALL(hw, mac.ops.disable_sec_tx_path); /* 2. Disable xpcs AN-73 */ if (!autoneg) @@ -3814,9 +3780,6 @@ s32 txgbe_set_link_to_kx4(struct txgbe_hw *hw, bool autoneg) } out: - TCALL(hw, mac.ops.enable_sec_tx_path); - wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, TXGBE_MAC_RX_CFG_RE); - return status; } @@ -3848,8 +3811,6 @@ s32 txgbe_set_link_to_kx(struct txgbe_hw *hw, u32 speed, bool autoneg) } wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, ~TXGBE_MAC_TX_CFG_TE); - wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, ~TXGBE_MAC_RX_CFG_RE); - TCALL(hw, mac.ops.disable_sec_tx_path); /* 2. Disable xpcs AN-73 */ if (!autoneg) @@ -4032,9 +3993,6 @@ s32 txgbe_set_link_to_kx(struct txgbe_hw *hw, u32 speed, bool autoneg) txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); out: - TCALL(hw, mac.ops.enable_sec_tx_path); - wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, TXGBE_MAC_RX_CFG_RE); - return status; } @@ -4061,8 +4019,6 @@ static s32 txgbe_set_link_to_sfi(struct txgbe_hw *hw, u32 speed) } wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, ~TXGBE_MAC_TX_CFG_TE); - wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, ~TXGBE_MAC_RX_CFG_RE); - TCALL(hw, mac.ops.disable_sec_tx_path); /* 2. Disable xpcs AN-73 */ txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0); @@ -4312,9 +4268,6 @@ static s32 txgbe_set_link_to_sfi(struct txgbe_hw *hw, u32 speed) } out: - TCALL(hw, mac.ops.enable_sec_tx_path); - wr32m(hw, TXGBE_MAC_RX_CFG, TXGBE_MAC_RX_CFG_RE, TXGBE_MAC_RX_CFG_RE); - return status; } @@ -4389,12 +4342,14 @@ s32 txgbe_setup_mac_link(struct txgbe_hw *hw, } else { txgbe_set_link_to_kx(hw, speed, 0); txgbe_set_sgmii_an37_ability(hw); + hw->phy.autoneg_advertised |= speed; } } else if (txgbe_get_media_type(hw) == txgbe_media_type_fiber) { txgbe_set_link_to_sfi(hw, speed); if (speed == TXGBE_LINK_SPEED_1GB_FULL) { txgbe_setup_fc(hw); txgbe_set_sgmii_an37_ability(hw); + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_1GB_FULL; } } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index fc3328a9fe8b..e880d6a46e1e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -3880,6 +3880,8 @@ static int txgbe_sw_init(struct txgbe_adapter *adapter) adapter->tx_work_limit = TXGBE_DEFAULT_TX_WORK; adapter->rx_work_limit = TXGBE_DEFAULT_RX_WORK; + adapter->an37 = 1; + adapter->num_vmdqs = 1; if (txgbe_is_lldp(hw)) e_dev_err("Can not get lldp flags from flash\n"); -- Gitee From 7fa97b5ac4479cb855ea7d0c3c191e9ac17e1fa7 Mon Sep 17 00:00:00 2001 From: DuanqiangWen Date: Wed, 9 Aug 2023 01:50:44 -0400 Subject: [PATCH 08/10] anolis: net: txgbe: add tx_multicast tx_broadcast statistics ANBZ: #5502 add ethtool -S ethx to show NIC tx broadcast packets count and tx multicast packets count. Signed-off-by: DuanqiangWen --- drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c | 6 ++++-- drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c | 5 +---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index c09916863b58..911f46b660d1 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -57,8 +57,10 @@ static const struct txgbe_stats txgbe_gstrings_stats[] = { TXGBE_NETDEV_STAT("tx_errors", tx_errors), TXGBE_NETDEV_STAT("rx_dropped", rx_dropped), TXGBE_NETDEV_STAT("tx_dropped", tx_dropped), - TXGBE_NETDEV_STAT("multicast", multicast), - TXGBE_STAT("broadcast", stats.bprc), + TXGBE_NETDEV_STAT("rx_multicast", multicast), + TXGBE_STAT("rx_broadcast", stats.bprc), + TXGBE_STAT("tx_broadcast", stats.bptc), + TXGBE_STAT("tx_multicast", stats.mptc), TXGBE_STAT("rx_no_buffer_count", stats.rnbc[0]), TXGBE_NETDEV_STAT("collisions", collisions), TXGBE_NETDEV_STAT("rx_over_errors", rx_over_errors), diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 36ec587d6b94..2ca537ef1a28 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -17,11 +17,8 @@ s32 txgbe_check_reset_blocked(struct txgbe_hw *hw) u32 mmngc; mmngc = rd32(hw, TXGBE_MIS_ST); - if (mmngc & TXGBE_MIS_ST_MNG_VETO) { - ERROR_REPORT1(hw, TXGBE_ERROR_SOFTWARE, - "MNG_VETO bit detected.\n"); + if (mmngc & TXGBE_MIS_ST_MNG_VETO) return true; - } return false; } -- Gitee From 5fbb0a921343fafdc469f6566f39d08dca30effd Mon Sep 17 00:00:00 2001 From: DuanqiangWen Date: Thu, 10 Aug 2023 22:51:09 -0400 Subject: [PATCH 09/10] anolis: net: txgbe: fix set tx-frames-irq failed ANBZ: #5502 fix ethtool -c ethx tx-frames-irq num, cmd return unsupported command. Signed-off-by: DuanqiangWen --- drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 911f46b660d1..6efd79f9f6ce 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -3565,7 +3565,7 @@ static int txgbe_set_priv_flags(struct net_device *dev, u32 flags) } static const struct ethtool_ops txgbe_ethtool_ops = { - .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, .get_link_ksettings = txgbe_get_link_ksettings, .set_link_ksettings = txgbe_set_link_ksettings, .get_drvinfo = txgbe_get_drvinfo, -- Gitee From 275e6e0765ebdfde8a8ddf74bb17fa97c300083b Mon Sep 17 00:00:00 2001 From: DuanqiangWen Date: Wed, 30 Aug 2023 02:15:36 -0400 Subject: [PATCH 10/10] anolis: net: txgbe: fix ethtool self test failed when sriov is enabled ANBZ: #5502 fix ethtool -t ethx offline, when sriov enabled, self test will failed. Signed-off-by: DuanqiangWen --- .../ethernet/wangxun/txgbe/txgbe_ethtool.c | 43 ++++++++++++++++--- 1 file changed, 37 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 6efd79f9f6ce..25b71e47efe9 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -2229,6 +2229,24 @@ static void txgbe_diag_test(struct net_device *netdev, set_bit(__TXGBE_TESTING, &adapter->state); if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + int i; + + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) { + e_warn(drv, "Please take VFS offline\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__TXGBE_TESTING, + &adapter->state); + goto skip_ol_tests; + } + } + } /* Offline tests */ netif_info(adapter, hw, netdev, "offline testing starting\n"); @@ -2260,15 +2278,27 @@ static void txgbe_diag_test(struct net_device *netdev, if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP))) { - txgbe_reset(adapter); - netif_info(adapter, hw, netdev, - "loopback testing starting\n"); - if (txgbe_loopback_test(adapter, &data[3])) - eth_test->flags |= ETH_TEST_FL_FAILED; + e_info(hw, "skip MAC loopback diagnostic when veto set\n"); + data[3] = 0; + goto skip_loopback; } - data[3] = 0; + /* If SRIOV or VMDq is enabled then skip MAC + * loopback diagnostic. + */ + if (adapter->flags & (TXGBE_FLAG_SRIOV_ENABLED | + TXGBE_FLAG_VMDQ_ENABLED)) { + e_info(hw, "skip MAC loopback diagnostic in VT mode\n"); + data[3] = 0; + goto skip_loopback; + } + + txgbe_reset(adapter); + netif_info(adapter, hw, netdev, "loopback testing starting\n"); + if (txgbe_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; +skip_loopback: txgbe_reset(adapter); /* clear testing bit and return adapter to previous state */ @@ -2293,6 +2323,7 @@ static void txgbe_diag_test(struct net_device *netdev, clear_bit(__TXGBE_TESTING, &adapter->state); } +skip_ol_tests: msleep_interruptible(4 * 1000); } -- Gitee