| // SPDX-License-Identifier: GPL-2.0 |
| /* Copyright(c) 1999 - 2018 Intel Corporation. */ |
| |
| /* ethtool support for ixgbevf */ |
| |
| #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| |
| #include <linux/types.h> |
| #include <linux/module.h> |
| #include <linux/slab.h> |
| #include <linux/pci.h> |
| #include <linux/netdevice.h> |
| #include <linux/ethtool.h> |
| #include <linux/vmalloc.h> |
| #include <linux/if_vlan.h> |
| #include <linux/uaccess.h> |
| |
| #include "ixgbevf.h" |
| |
| enum {NETDEV_STATS, IXGBEVF_STATS}; |
| |
| struct ixgbe_stats { |
| char stat_string[ETH_GSTRING_LEN]; |
| int type; |
| int sizeof_stat; |
| int stat_offset; |
| }; |
| |
| #define IXGBEVF_STAT(_name, _stat) { \ |
| .stat_string = _name, \ |
| .type = IXGBEVF_STATS, \ |
| .sizeof_stat = sizeof_field(struct ixgbevf_adapter, _stat), \ |
| .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \ |
| } |
| |
| #define IXGBEVF_NETDEV_STAT(_net_stat) { \ |
| .stat_string = #_net_stat, \ |
| .type = NETDEV_STATS, \ |
| .sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \ |
| .stat_offset = offsetof(struct net_device_stats, _net_stat) \ |
| } |
| |
| static struct ixgbe_stats ixgbevf_gstrings_stats[] = { |
| IXGBEVF_NETDEV_STAT(rx_packets), |
| IXGBEVF_NETDEV_STAT(tx_packets), |
| IXGBEVF_NETDEV_STAT(rx_bytes), |
| IXGBEVF_NETDEV_STAT(tx_bytes), |
| IXGBEVF_STAT("tx_busy", tx_busy), |
| IXGBEVF_STAT("tx_restart_queue", restart_queue), |
| IXGBEVF_STAT("tx_timeout_count", tx_timeout_count), |
| IXGBEVF_NETDEV_STAT(multicast), |
| IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error), |
| IXGBEVF_STAT("alloc_rx_page", alloc_rx_page), |
| IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed), |
| IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), |
| IXGBEVF_STAT("tx_ipsec", tx_ipsec), |
| IXGBEVF_STAT("rx_ipsec", rx_ipsec), |
| }; |
| |
| #define IXGBEVF_QUEUE_STATS_LEN ( \ |
| (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \ |
| ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_xdp_queues + \ |
| ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \ |
| (sizeof(struct ixgbevf_stats) / sizeof(u64))) |
| #define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats) |
| |
| #define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN) |
| static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { |
| "Register test (offline)", |
| "Link test (on/offline)" |
| }; |
| |
| #define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) |
| |
| static const char ixgbevf_priv_flags_strings[][ETH_GSTRING_LEN] = { |
| #define IXGBEVF_PRIV_FLAGS_LEGACY_RX BIT(0) |
| "legacy-rx", |
| }; |
| |
| #define IXGBEVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbevf_priv_flags_strings) |
| |
| static int ixgbevf_get_link_ksettings(struct net_device *netdev, |
| struct ethtool_link_ksettings *cmd) |
| { |
| struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
| |
| ethtool_link_ksettings_zero_link_mode(cmd, supported); |
| ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); |
| cmd->base.autoneg = AUTONEG_DISABLE; |
| cmd->base.port = -1; |
| |
| if (adapter->link_up) { |
| __u32 speed = SPEED_10000; |
| |
| switch (adapter->link_speed) { |
| case IXGBE_LINK_SPEED_10GB_FULL: |
| speed = SPEED_10000; |
| break; |
| case IXGBE_LINK_SPEED_1GB_FULL: |
| speed = SPEED_1000; |
| break; |
| case IXGBE_LINK_SPEED_100_FULL: |
| speed = SPEED_100; |
| break; |
| } |
| |
| cmd->base.speed = speed; |
| cmd->base.duplex = DUPLEX_FULL; |
| } else { |
| cmd->base.speed = SPEED_UNKNOWN; |
| cmd->base.duplex = DUPLEX_UNKNOWN; |
| } |
| |
| return 0; |
| } |
| |
| static u32 ixgbevf_get_msglevel(struct net_device *netdev) |
| { |
| struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
| |
| return adapter->msg_enable; |
| } |
| |
| static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data) |
| { |
| struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
| |
| adapter->msg_enable = data; |
| } |
| |
| static int ixgbevf_get_regs_len(struct net_device *netdev) |
| { |
| #define IXGBE_REGS_LEN 45 |
| return IXGBE_REGS_LEN * sizeof(u32); |
| } |
| |
| static void ixgbevf_get_regs(struct net_device *netdev, |
| struct ethtool_regs *regs, |
| void *p) |
| { |
| struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
| struct ixgbe_hw *hw = &adapter->hw; |
| u32 *regs_buff = p; |
| u32 regs_len = ixgbevf_get_regs_len(netdev); |
| u8 i; |
| |
| memset(p, 0, regs_len); |
| |
| /* generate a number suitable for ethtool's register version */ |
| regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id; |
| |
| /* General Registers */ |
| regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL); |
| regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS); |
| regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS); |
| regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP); |
| regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER); |
| |
| /* Interrupt */ |
| /* don't read EICR because it can clear interrupt causes, instead |
| * read EICS which is a shadow but doesn't clear EICR |
| */ |
| regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS); |
| regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS); |
| regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS); |
| regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC); |
| regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC); |
| regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM); |
| regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0)); |
| regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0)); |
| regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); |
| |
| /* Receive DMA */ |
| for (i = 0; i < 2; i++) |
| regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i)); |
| for (i = 0; i < 2; i++) |
| regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i)); |
| for (i = 0; i < 2; i++) |
| regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i)); |
| for (i = 0; i < 2; i++) |
| regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i)); |
| for (i = 0; i < 2; i++) |
| regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i)); |
| for (i = 0; i < 2; i++) |
| regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); |
| for (i = 0; i < 2; i++) |
| regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i)); |
| |
| /* Receive */ |
| regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE); |
| |
| /* Transmit */ |
| for (i = 0; i < 2; i++) |
| regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i)); |
| for (i = 0; i < 2; i++) |
| regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i)); |
| for (i = 0; i < 2; i++) |
| regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i)); |
| for (i = 0; i < 2; i++) |
| regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i)); |
| for (i = 0; i < 2; i++) |
| regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i)); |
| for (i = 0; i < 2; i++) |
| regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); |
| for (i = 0; i < 2; i++) |
| regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i)); |
| for (i = 0; i < 2; i++) |
| regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i)); |
| } |
| |
| static void ixgbevf_get_drvinfo(struct net_device *netdev, |
| struct ethtool_drvinfo *drvinfo) |
| { |
| struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
| |
| strscpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver)); |
| strscpy(drvinfo->bus_info, pci_name(adapter->pdev), |
| sizeof(drvinfo->bus_info)); |
| |
| drvinfo->n_priv_flags = IXGBEVF_PRIV_FLAGS_STR_LEN; |
| } |
| |
| static void ixgbevf_get_ringparam(struct net_device *netdev, |
| struct ethtool_ringparam *ring, |
| struct kernel_ethtool_ringparam *kernel_ring, |
| struct netlink_ext_ack *extack) |
| { |
| struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
| |
| ring->rx_max_pending = IXGBEVF_MAX_RXD; |
| ring->tx_max_pending = IXGBEVF_MAX_TXD; |
| ring->rx_pending = adapter->rx_ring_count; |
| ring->tx_pending = adapter->tx_ring_count; |
| } |
| |
| static int ixgbevf_set_ringparam(struct net_device *netdev, |
| struct ethtool_ringparam *ring, |
| struct kernel_ethtool_ringparam *kernel_ring, |
| struct netlink_ext_ack *extack) |
| { |
| struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
| struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; |
| u32 new_rx_count, new_tx_count; |
| int i, j, err = 0; |
| |
| if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) |
| return -EINVAL; |
| |
| new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD); |
| new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD); |
| new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); |
| |
| new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD); |
| new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD); |
| new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); |
| |
| /* if nothing to do return success */ |
| if ((new_tx_count == adapter->tx_ring_count) && |
| (new_rx_count == adapter->rx_ring_count)) |
| return 0; |
| |
| while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) |
| usleep_range(1000, 2000); |
| |
| if (!netif_running(adapter->netdev)) { |
| for (i = 0; i < adapter->num_tx_queues; i++) |
| adapter->tx_ring[i]->count = new_tx_count; |
| for (i = 0; i < adapter->num_xdp_queues; i++) |
| adapter->xdp_ring[i]->count = new_tx_count; |
| for (i = 0; i < adapter->num_rx_queues; i++) |
| adapter->rx_ring[i]->count = new_rx_count; |
| adapter->tx_ring_count = new_tx_count; |
| adapter->xdp_ring_count = new_tx_count; |
| adapter->rx_ring_count = new_rx_count; |
| goto clear_reset; |
| } |
| |
| if (new_tx_count != adapter->tx_ring_count) { |
| tx_ring = vmalloc(array_size(sizeof(*tx_ring), |
| adapter->num_tx_queues + |
| adapter->num_xdp_queues)); |
| if (!tx_ring) { |
| err = -ENOMEM; |
| goto clear_reset; |
| } |
| |
| for (i = 0; i < adapter->num_tx_queues; i++) { |
| /* clone ring and setup updated count */ |
| tx_ring[i] = *adapter->tx_ring[i]; |
| tx_ring[i].count = new_tx_count; |
| err = ixgbevf_setup_tx_resources(&tx_ring[i]); |
| if (err) { |
| while (i) { |
| i--; |
| ixgbevf_free_tx_resources(&tx_ring[i]); |
| } |
| |
| vfree(tx_ring); |
| tx_ring = NULL; |
| |
| goto clear_reset; |
| } |
| } |
| |
| for (j = 0; j < adapter->num_xdp_queues; i++, j++) { |
| /* clone ring and setup updated count */ |
| tx_ring[i] = *adapter->xdp_ring[j]; |
| tx_ring[i].count = new_tx_count; |
| err = ixgbevf_setup_tx_resources(&tx_ring[i]); |
| if (err) { |
| while (i) { |
| i--; |
| ixgbevf_free_tx_resources(&tx_ring[i]); |
| } |
| |
| vfree(tx_ring); |
| tx_ring = NULL; |
| |
| goto clear_reset; |
| } |
| } |
| } |
| |
| if (new_rx_count != adapter->rx_ring_count) { |
| rx_ring = vmalloc(array_size(sizeof(*rx_ring), |
| adapter->num_rx_queues)); |
| if (!rx_ring) { |
| err = -ENOMEM; |
| goto clear_reset; |
| } |
| |
| for (i = 0; i < adapter->num_rx_queues; i++) { |
| /* clone ring and setup updated count */ |
| rx_ring[i] = *adapter->rx_ring[i]; |
| |
| /* Clear copied XDP RX-queue info */ |
| memset(&rx_ring[i].xdp_rxq, 0, |
| sizeof(rx_ring[i].xdp_rxq)); |
| |
| rx_ring[i].count = new_rx_count; |
| err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); |
| if (err) { |
| while (i) { |
| i--; |
| ixgbevf_free_rx_resources(&rx_ring[i]); |
| } |
| |
| vfree(rx_ring); |
| rx_ring = NULL; |
| |
| goto clear_reset; |
| } |
| } |
| } |
| |
| /* bring interface down to prepare for update */ |
| ixgbevf_down(adapter); |
| |
| /* Tx */ |
| if (tx_ring) { |
| for (i = 0; i < adapter->num_tx_queues; i++) { |
| ixgbevf_free_tx_resources(adapter->tx_ring[i]); |
| *adapter->tx_ring[i] = tx_ring[i]; |
| } |
| adapter->tx_ring_count = new_tx_count; |
| |
| for (j = 0; j < adapter->num_xdp_queues; i++, j++) { |
| ixgbevf_free_tx_resources(adapter->xdp_ring[j]); |
| *adapter->xdp_ring[j] = tx_ring[i]; |
| } |
| adapter->xdp_ring_count = new_tx_count; |
| |
| vfree(tx_ring); |
| tx_ring = NULL; |
| } |
| |
| /* Rx */ |
| if (rx_ring) { |
| for (i = 0; i < adapter->num_rx_queues; i++) { |
| ixgbevf_free_rx_resources(adapter->rx_ring[i]); |
| *adapter->rx_ring[i] = rx_ring[i]; |
| } |
| adapter->rx_ring_count = new_rx_count; |
| |
| vfree(rx_ring); |
| rx_ring = NULL; |
| } |
| |
| /* restore interface using new values */ |
| ixgbevf_up(adapter); |
| |
| clear_reset: |
| /* free Tx resources if Rx error is encountered */ |
| if (tx_ring) { |
| for (i = 0; |
| i < adapter->num_tx_queues + adapter->num_xdp_queues; i++) |
| ixgbevf_free_tx_resources(&tx_ring[i]); |
| vfree(tx_ring); |
| } |
| |
| clear_bit(__IXGBEVF_RESETTING, &adapter->state); |
| return err; |
| } |
| |
| static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset) |
| { |
| switch (stringset) { |
| case ETH_SS_TEST: |
| return IXGBEVF_TEST_LEN; |
| case ETH_SS_STATS: |
| return IXGBEVF_STATS_LEN; |
| case ETH_SS_PRIV_FLAGS: |
| return IXGBEVF_PRIV_FLAGS_STR_LEN; |
| default: |
| return -EINVAL; |
| } |
| } |
| |
| static void ixgbevf_get_ethtool_stats(struct net_device *netdev, |
| struct ethtool_stats *stats, u64 *data) |
| { |
| struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
| struct rtnl_link_stats64 temp; |
| const struct rtnl_link_stats64 *net_stats; |
| unsigned int start; |
| struct ixgbevf_ring *ring; |
| int i, j; |
| char *p; |
| |
| ixgbevf_update_stats(adapter); |
| net_stats = dev_get_stats(netdev, &temp); |
| for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) { |
| switch (ixgbevf_gstrings_stats[i].type) { |
| case NETDEV_STATS: |
| p = (char *)net_stats + |
| ixgbevf_gstrings_stats[i].stat_offset; |
| break; |
| case IXGBEVF_STATS: |
| p = (char *)adapter + |
| ixgbevf_gstrings_stats[i].stat_offset; |
| break; |
| default: |
| data[i] = 0; |
| continue; |
| } |
| |
| data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat == |
| sizeof(u64)) ? *(u64 *)p : *(u32 *)p; |
| } |
| |
| /* populate Tx queue data */ |
| for (j = 0; j < adapter->num_tx_queues; j++) { |
| ring = adapter->tx_ring[j]; |
| if (!ring) { |
| data[i++] = 0; |
| data[i++] = 0; |
| continue; |
| } |
| |
| do { |
| start = u64_stats_fetch_begin(&ring->syncp); |
| data[i] = ring->stats.packets; |
| data[i + 1] = ring->stats.bytes; |
| } while (u64_stats_fetch_retry(&ring->syncp, start)); |
| i += 2; |
| } |
| |
| /* populate XDP queue data */ |
| for (j = 0; j < adapter->num_xdp_queues; j++) { |
| ring = adapter->xdp_ring[j]; |
| if (!ring) { |
| data[i++] = 0; |
| data[i++] = 0; |
| continue; |
| } |
| |
| do { |
| start = u64_stats_fetch_begin(&ring->syncp); |
| data[i] = ring->stats.packets; |
| data[i + 1] = ring->stats.bytes; |
| } while (u64_stats_fetch_retry(&ring->syncp, start)); |
| i += 2; |
| } |
| |
| /* populate Rx queue data */ |
| for (j = 0; j < adapter->num_rx_queues; j++) { |
| ring = adapter->rx_ring[j]; |
| if (!ring) { |
| data[i++] = 0; |
| data[i++] = 0; |
| continue; |
| } |
| |
| do { |
| start = u64_stats_fetch_begin(&ring->syncp); |
| data[i] = ring->stats.packets; |
| data[i + 1] = ring->stats.bytes; |
| } while (u64_stats_fetch_retry(&ring->syncp, start)); |
| i += 2; |
| } |
| } |
| |
| static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, |
| u8 *data) |
| { |
| struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
| char *p = (char *)data; |
| int i; |
| |
| switch (stringset) { |
| case ETH_SS_TEST: |
| memcpy(data, *ixgbe_gstrings_test, |
| IXGBEVF_TEST_LEN * ETH_GSTRING_LEN); |
| break; |
| case ETH_SS_STATS: |
| for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) { |
| memcpy(p, ixgbevf_gstrings_stats[i].stat_string, |
| ETH_GSTRING_LEN); |
| p += ETH_GSTRING_LEN; |
| } |
| |
| for (i = 0; i < adapter->num_tx_queues; i++) { |
| sprintf(p, "tx_queue_%u_packets", i); |
| p += ETH_GSTRING_LEN; |
| sprintf(p, "tx_queue_%u_bytes", i); |
| p += ETH_GSTRING_LEN; |
| } |
| for (i = 0; i < adapter->num_xdp_queues; i++) { |
| sprintf(p, "xdp_queue_%u_packets", i); |
| p += ETH_GSTRING_LEN; |
| sprintf(p, "xdp_queue_%u_bytes", i); |
| p += ETH_GSTRING_LEN; |
| } |
| for (i = 0; i < adapter->num_rx_queues; i++) { |
| sprintf(p, "rx_queue_%u_packets", i); |
| p += ETH_GSTRING_LEN; |
| sprintf(p, "rx_queue_%u_bytes", i); |
| p += ETH_GSTRING_LEN; |
| } |
| break; |
| case ETH_SS_PRIV_FLAGS: |
| memcpy(data, ixgbevf_priv_flags_strings, |
| IXGBEVF_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); |
| break; |
| } |
| } |
| |
| static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data) |
| { |
| struct ixgbe_hw *hw = &adapter->hw; |
| bool link_up; |
| u32 link_speed = 0; |
| *data = 0; |
| |
| hw->mac.ops.check_link(hw, &link_speed, &link_up, true); |
| if (!link_up) |
| *data = 1; |
| |
| return *data; |
| } |
| |
| /* ethtool register test data */ |
| struct ixgbevf_reg_test { |
| u16 reg; |
| u8 array_len; |
| u8 test_type; |
| u32 mask; |
| u32 write; |
| }; |
| |
| /* In the hardware, registers are laid out either singly, in arrays |
| * spaced 0x40 bytes apart, or in contiguous tables. We assume |
| * most tests take place on arrays or single registers (handled |
| * as a single-element array) and special-case the tables. |
| * Table tests are always pattern tests. |
| * |
| * We also make provision for some required setup steps by specifying |
| * registers to be written without any read-back testing. |
| */ |
| |
| #define PATTERN_TEST 1 |
| #define SET_READ_TEST 2 |
| #define WRITE_NO_TEST 3 |
| #define TABLE32_TEST 4 |
| #define TABLE64_TEST_LO 5 |
| #define TABLE64_TEST_HI 6 |
| |
| /* default VF register test */ |
| static const struct ixgbevf_reg_test reg_test_vf[] = { |
| { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, |
| { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, |
| { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, |
| { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, |
| { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, |
| { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 }, |
| { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, |
| { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, |
| { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, |
| { .reg = 0 } |
| }; |
| |
| static const u32 register_test_patterns[] = { |
| 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF |
| }; |
| |
| static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data, |
| int reg, u32 mask, u32 write) |
| { |
| u32 pat, val, before; |
| |
| if (IXGBE_REMOVED(adapter->hw.hw_addr)) { |
| *data = 1; |
| return true; |
| } |
| for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { |
| before = ixgbevf_read_reg(&adapter->hw, reg); |
| ixgbe_write_reg(&adapter->hw, reg, |
| register_test_patterns[pat] & write); |
| val = ixgbevf_read_reg(&adapter->hw, reg); |
| if (val != (register_test_patterns[pat] & write & mask)) { |
| hw_dbg(&adapter->hw, |
| "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", |
| reg, val, |
| register_test_patterns[pat] & write & mask); |
| *data = reg; |
| ixgbe_write_reg(&adapter->hw, reg, before); |
| return true; |
| } |
| ixgbe_write_reg(&adapter->hw, reg, before); |
| } |
| return false; |
| } |
| |
| static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data, |
| int reg, u32 mask, u32 write) |
| { |
| u32 val, before; |
| |
| if (IXGBE_REMOVED(adapter->hw.hw_addr)) { |
| *data = 1; |
| return true; |
| } |
| before = ixgbevf_read_reg(&adapter->hw, reg); |
| ixgbe_write_reg(&adapter->hw, reg, write & mask); |
| val = ixgbevf_read_reg(&adapter->hw, reg); |
| if ((write & mask) != (val & mask)) { |
| pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", |
| reg, (val & mask), write & mask); |
| *data = reg; |
| ixgbe_write_reg(&adapter->hw, reg, before); |
| return true; |
| } |
| ixgbe_write_reg(&adapter->hw, reg, before); |
| return false; |
| } |
| |
| static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) |
| { |
| const struct ixgbevf_reg_test *test; |
| u32 i; |
| |
| if (IXGBE_REMOVED(adapter->hw.hw_addr)) { |
| dev_err(&adapter->pdev->dev, |
| "Adapter removed - register test blocked\n"); |
| *data = 1; |
| return 1; |
| } |
| test = reg_test_vf; |
| |
| /* Perform the register test, looping through the test table |
| * until we either fail or reach the null entry. |
| */ |
| while (test->reg) { |
| for (i = 0; i < test->array_len; i++) { |
| bool b = false; |
| |
| switch (test->test_type) { |
| case PATTERN_TEST: |
| b = reg_pattern_test(adapter, data, |
| test->reg + (i * 0x40), |
| test->mask, |
| test->write); |
| break; |
| case SET_READ_TEST: |
| b = reg_set_and_check(adapter, data, |
| test->reg + (i * 0x40), |
| test->mask, |
| test->write); |
| break; |
| case WRITE_NO_TEST: |
| ixgbe_write_reg(&adapter->hw, |
| test->reg + (i * 0x40), |
| test->write); |
| break; |
| case TABLE32_TEST: |
| b = reg_pattern_test(adapter, data, |
| test->reg + (i * 4), |
| test->mask, |
| test->write); |
| break; |
| case TABLE64_TEST_LO: |
| b = reg_pattern_test(adapter, data, |
| test->reg + (i * 8), |
| test->mask, |
| test->write); |
| break; |
| case TABLE64_TEST_HI: |
| b = reg_pattern_test(adapter, data, |
| test->reg + 4 + (i * 8), |
| test->mask, |
| test->write); |
| break; |
| } |
| if (b) |
| return 1; |
| } |
| test++; |
| } |
| |
| *data = 0; |
| return *data; |
| } |
| |
| static void ixgbevf_diag_test(struct net_device *netdev, |
| struct ethtool_test *eth_test, u64 *data) |
| { |
| struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
| bool if_running = netif_running(netdev); |
| |
| if (IXGBE_REMOVED(adapter->hw.hw_addr)) { |
| dev_err(&adapter->pdev->dev, |
| "Adapter removed - test blocked\n"); |
| data[0] = 1; |
| data[1] = 1; |
| eth_test->flags |= ETH_TEST_FL_FAILED; |
| return; |
| } |
| set_bit(__IXGBEVF_TESTING, &adapter->state); |
| if (eth_test->flags == ETH_TEST_FL_OFFLINE) { |
| /* Offline tests */ |
| |
| hw_dbg(&adapter->hw, "offline testing starting\n"); |
| |
| /* Link test performed before hardware reset so autoneg doesn't |
| * interfere with test result |
| */ |
| if (ixgbevf_link_test(adapter, &data[1])) |
| eth_test->flags |= ETH_TEST_FL_FAILED; |
| |
| if (if_running) |
| /* indicate we're in test mode */ |
| ixgbevf_close(netdev); |
| else |
| ixgbevf_reset(adapter); |
| |
| hw_dbg(&adapter->hw, "register testing starting\n"); |
| if (ixgbevf_reg_test(adapter, &data[0])) |
| eth_test->flags |= ETH_TEST_FL_FAILED; |
| |
| ixgbevf_reset(adapter); |
| |
| clear_bit(__IXGBEVF_TESTING, &adapter->state); |
| if (if_running) |
| ixgbevf_open(netdev); |
| } else { |
| hw_dbg(&adapter->hw, "online testing starting\n"); |
| /* Online tests */ |
| if (ixgbevf_link_test(adapter, &data[1])) |
| eth_test->flags |= ETH_TEST_FL_FAILED; |
| |
| /* Online tests aren't run; pass by default */ |
| data[0] = 0; |
| |
| clear_bit(__IXGBEVF_TESTING, &adapter->state); |
| } |
| msleep_interruptible(4 * 1000); |
| } |
| |
| static int ixgbevf_nway_reset(struct net_device *netdev) |
| { |
| struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
| |
| if (netif_running(netdev)) |
| ixgbevf_reinit_locked(adapter); |
| |
| return 0; |
| } |
| |
| static int ixgbevf_get_coalesce(struct net_device *netdev, |
| struct ethtool_coalesce *ec, |
| struct kernel_ethtool_coalesce *kernel_coal, |
| struct netlink_ext_ack *extack) |
| { |
| struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
| |
| /* only valid if in constant ITR mode */ |
| if (adapter->rx_itr_setting <= 1) |
| ec->rx_coalesce_usecs = adapter->rx_itr_setting; |
| else |
| ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; |
| |
| /* if in mixed Tx/Rx queues per vector mode, report only Rx settings */ |
| if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) |
| return 0; |
| |
| /* only valid if in constant ITR mode */ |
| if (adapter->tx_itr_setting <= 1) |
| ec->tx_coalesce_usecs = adapter->tx_itr_setting; |
| else |
| ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; |
| |
| return 0; |
| } |
| |
| static int ixgbevf_set_coalesce(struct net_device *netdev, |
| struct ethtool_coalesce *ec, |
| struct kernel_ethtool_coalesce *kernel_coal, |
| struct netlink_ext_ack *extack) |
| { |
| struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
| struct ixgbevf_q_vector *q_vector; |
| int num_vectors, i; |
| u16 tx_itr_param, rx_itr_param; |
| |
| /* don't accept Tx specific changes if we've got mixed RxTx vectors */ |
| if (adapter->q_vector[0]->tx.count && |
| adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs) |
| return -EINVAL; |
| |
| if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || |
| (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) |
| return -EINVAL; |
| |
| if (ec->rx_coalesce_usecs > 1) |
| adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; |
| else |
| adapter->rx_itr_setting = ec->rx_coalesce_usecs; |
| |
| if (adapter->rx_itr_setting == 1) |
| rx_itr_param = IXGBE_20K_ITR; |
| else |
| rx_itr_param = adapter->rx_itr_setting; |
| |
| if (ec->tx_coalesce_usecs > 1) |
| adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; |
| else |
| adapter->tx_itr_setting = ec->tx_coalesce_usecs; |
| |
| if (adapter->tx_itr_setting == 1) |
| tx_itr_param = IXGBE_12K_ITR; |
| else |
| tx_itr_param = adapter->tx_itr_setting; |
| |
| num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
| |
| for (i = 0; i < num_vectors; i++) { |
| q_vector = adapter->q_vector[i]; |
| if (q_vector->tx.count && !q_vector->rx.count) |
| /* Tx only */ |
| q_vector->itr = tx_itr_param; |
| else |
| /* Rx only or mixed */ |
| q_vector->itr = rx_itr_param; |
| ixgbevf_write_eitr(q_vector); |
| } |
| |
| return 0; |
| } |
| |
| static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, |
| u32 *rules __always_unused) |
| { |
| struct ixgbevf_adapter *adapter = netdev_priv(dev); |
| |
| switch (info->cmd) { |
| case ETHTOOL_GRXRINGS: |
| info->data = adapter->num_rx_queues; |
| return 0; |
| default: |
| hw_dbg(&adapter->hw, "Command parameters not supported\n"); |
| return -EOPNOTSUPP; |
| } |
| } |
| |
| static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev) |
| { |
| struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
| |
| if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) |
| return IXGBEVF_X550_VFRETA_SIZE; |
| |
| return IXGBEVF_82599_RETA_SIZE; |
| } |
| |
| static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev) |
| { |
| return IXGBEVF_RSS_HASH_KEY_SIZE; |
| } |
| |
| static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, |
| u8 *hfunc) |
| { |
| struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
| int err = 0; |
| |
| if (hfunc) |
| *hfunc = ETH_RSS_HASH_TOP; |
| |
| if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) { |
| if (key) |
| memcpy(key, adapter->rss_key, |
| ixgbevf_get_rxfh_key_size(netdev)); |
| |
| if (indir) { |
| int i; |
| |
| for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++) |
| indir[i] = adapter->rss_indir_tbl[i]; |
| } |
| } else { |
| /* If neither indirection table nor hash key was requested |
| * - just return a success avoiding taking any locks. |
| */ |
| if (!indir && !key) |
| return 0; |
| |
| spin_lock_bh(&adapter->mbx_lock); |
| if (indir) |
| err = ixgbevf_get_reta_locked(&adapter->hw, indir, |
| adapter->num_rx_queues); |
| |
| if (!err && key) |
| err = ixgbevf_get_rss_key_locked(&adapter->hw, key); |
| |
| spin_unlock_bh(&adapter->mbx_lock); |
| } |
| |
| return err; |
| } |
| |
| static u32 ixgbevf_get_priv_flags(struct net_device *netdev) |
| { |
| struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
| u32 priv_flags = 0; |
| |
| if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX) |
| priv_flags |= IXGBEVF_PRIV_FLAGS_LEGACY_RX; |
| |
| return priv_flags; |
| } |
| |
| static int ixgbevf_set_priv_flags(struct net_device *netdev, u32 priv_flags) |
| { |
| struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
| unsigned int flags = adapter->flags; |
| |
| flags &= ~IXGBEVF_FLAGS_LEGACY_RX; |
| if (priv_flags & IXGBEVF_PRIV_FLAGS_LEGACY_RX) |
| flags |= IXGBEVF_FLAGS_LEGACY_RX; |
| |
| if (flags != adapter->flags) { |
| adapter->flags = flags; |
| |
| /* reset interface to repopulate queues */ |
| if (netif_running(netdev)) |
| ixgbevf_reinit_locked(adapter); |
| } |
| |
| return 0; |
| } |
| |
| static const struct ethtool_ops ixgbevf_ethtool_ops = { |
| .supported_coalesce_params = ETHTOOL_COALESCE_USECS, |
| .get_drvinfo = ixgbevf_get_drvinfo, |
| .get_regs_len = ixgbevf_get_regs_len, |
| .get_regs = ixgbevf_get_regs, |
| .nway_reset = ixgbevf_nway_reset, |
| .get_link = ethtool_op_get_link, |
| .get_ringparam = ixgbevf_get_ringparam, |
| .set_ringparam = ixgbevf_set_ringparam, |
| .get_msglevel = ixgbevf_get_msglevel, |
| .set_msglevel = ixgbevf_set_msglevel, |
| .self_test = ixgbevf_diag_test, |
| .get_sset_count = ixgbevf_get_sset_count, |
| .get_strings = ixgbevf_get_strings, |
| .get_ethtool_stats = ixgbevf_get_ethtool_stats, |
| .get_coalesce = ixgbevf_get_coalesce, |
| .set_coalesce = ixgbevf_set_coalesce, |
| .get_rxnfc = ixgbevf_get_rxnfc, |
| .get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size, |
| .get_rxfh_key_size = ixgbevf_get_rxfh_key_size, |
| .get_rxfh = ixgbevf_get_rxfh, |
| .get_link_ksettings = ixgbevf_get_link_ksettings, |
| .get_priv_flags = ixgbevf_get_priv_flags, |
| .set_priv_flags = ixgbevf_set_priv_flags, |
| }; |
| |
| void ixgbevf_set_ethtool_ops(struct net_device *netdev) |
| { |
| netdev->ethtool_ops = &ixgbevf_ethtool_ops; |
| } |