Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Paul Greenwalt | ba1124f | 2023-10-25 14:41:52 -0700 | [diff] [blame] | 2 | /* Copyright (c) 2018-2023, Intel Corporation. */ |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 3 | |
| 4 | /* Intel(R) Ethernet Connection E800 Series Linux Driver */ |
| 5 | |
| 6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 7 | |
Jeff Kirsher | 34a2a3b8 | 2020-05-29 00:18:33 -0700 | [diff] [blame] | 8 | #include <generated/utsrelease.h> |
Jesse Brandeburg | 0288c3e | 2023-10-11 16:33:33 -0700 | [diff] [blame] | 9 | #include <linux/crash_dump.h> |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 10 | #include "ice.h" |
Anirudh Venkataramanan | eff380a | 2019-10-24 01:11:17 -0700 | [diff] [blame] | 11 | #include "ice_base.h" |
Anirudh Venkataramanan | 45d3d42 | 2018-09-19 17:23:04 -0700 | [diff] [blame] | 12 | #include "ice_lib.h" |
Michal Swiatkowski | 1b8f15b | 2020-05-07 17:41:08 -0700 | [diff] [blame] | 13 | #include "ice_fltr.h" |
Anirudh Venkataramanan | 37b6f64 | 2019-02-28 15:24:22 -0800 | [diff] [blame] | 14 | #include "ice_dcb_lib.h" |
Dave Ertman | b94b013 | 2019-11-06 02:05:29 -0800 | [diff] [blame] | 15 | #include "ice_dcb_nl.h" |
Michal Swiatkowski | 0545cc8 | 2024-03-25 22:34:31 +0100 | [diff] [blame] | 16 | #include "devlink/devlink.h" |
Piotr Raczynski | 4ebc5f2 | 2024-03-25 22:34:32 +0100 | [diff] [blame] | 17 | #include "devlink/devlink_port.h" |
Konrad Knitter | 4da71a7 | 2023-12-01 10:08:39 -0800 | [diff] [blame] | 18 | #include "ice_hwmon.h" |
Jesse Brandeburg | 3089cf6 | 2021-06-08 16:35:17 -0700 | [diff] [blame] | 19 | /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the |
| 20 | * ice tracepoint functions. This must be done exactly once across the |
| 21 | * ice driver. |
| 22 | */ |
| 23 | #define CREATE_TRACE_POINTS |
| 24 | #include "ice_trace.h" |
Grzegorz Nitka | b3be918 | 2021-08-19 17:08:57 -0700 | [diff] [blame] | 25 | #include "ice_eswitch.h" |
Kiran Patil | 0d08a44 | 2021-08-06 10:49:05 +0200 | [diff] [blame] | 26 | #include "ice_tc_lib.h" |
Brett Creeley | c31af68 | 2021-12-02 08:38:46 -0800 | [diff] [blame] | 27 | #include "ice_vsi_vlan_ops.h" |
Marek Majtyka | 66c0e13 | 2023-02-01 11:24:18 +0100 | [diff] [blame] | 28 | #include <net/xdp_sock_drv.h> |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 29 | |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 30 | #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 31 | static const char ice_driver_string[] = DRV_SUMMARY; |
| 32 | static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; |
| 33 | |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 34 | /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */ |
| 35 | #define ICE_DDP_PKG_PATH "intel/ice/ddp/" |
| 36 | #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" |
| 37 | |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 38 | MODULE_DESCRIPTION(DRV_SUMMARY); |
Alexander Lobakin | 306ec72 | 2024-04-18 13:36:07 +0200 | [diff] [blame] | 39 | MODULE_IMPORT_NS(LIBIE); |
Jesse Brandeburg | 98674eb | 2018-09-14 17:37:57 -0700 | [diff] [blame] | 40 | MODULE_LICENSE("GPL v2"); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 41 | MODULE_FIRMWARE(ICE_DDP_PKG_FILE); |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 42 | |
| 43 | static int debug = -1; |
| 44 | module_param(debug, int, 0644); |
Anirudh Venkataramanan | 7ec59ee | 2018-03-20 07:58:06 -0700 | [diff] [blame] | 45 | #ifndef CONFIG_DYNAMIC_DEBUG |
| 46 | MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)"); |
| 47 | #else |
| 48 | MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); |
| 49 | #endif /* !CONFIG_DYNAMIC_DEBUG */ |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 50 | |
Maciej Fijalkowski | 22bf877 | 2021-08-19 14:00:03 +0200 | [diff] [blame] | 51 | DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key); |
| 52 | EXPORT_SYMBOL(ice_xdp_locking_key); |
Dave Ertman | d25a0fc | 2021-05-20 09:37:49 -0500 | [diff] [blame] | 53 | |
Jacob Keller | 649c87c | 2022-02-22 16:26:50 -0800 | [diff] [blame] | 54 | /** |
| 55 | * ice_hw_to_dev - Get device pointer from the hardware structure |
| 56 | * @hw: pointer to the device HW structure |
| 57 | * |
| 58 | * Used to access the device pointer from compilation units which can't easily |
| 59 | * include the definition of struct ice_pf without leading to circular header |
| 60 | * dependencies. |
| 61 | */ |
| 62 | struct device *ice_hw_to_dev(struct ice_hw *hw) |
| 63 | { |
| 64 | struct ice_pf *pf = container_of(hw, struct ice_pf, hw); |
| 65 | |
| 66 | return &pf->pdev->dev; |
| 67 | } |
| 68 | |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 69 | static struct workqueue_struct *ice_wq; |
Dave Ertman | bb52f42 | 2023-06-20 15:18:46 -0700 | [diff] [blame] | 70 | struct workqueue_struct *ice_lag_wq; |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 71 | static const struct net_device_ops ice_netdev_safe_mode_ops; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 72 | static const struct net_device_ops ice_netdev_ops; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 73 | |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 74 | static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); |
Anirudh Venkataramanan | 28c2a64 | 2018-09-19 17:23:07 -0700 | [diff] [blame] | 75 | |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 76 | static void ice_vsi_release_all(struct ice_pf *pf); |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 77 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 78 | static int ice_rebuild_channels(struct ice_pf *pf); |
| 79 | static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr); |
| 80 | |
Michal Swiatkowski | 195bb48 | 2021-10-12 11:31:03 -0700 | [diff] [blame] | 81 | static int |
| 82 | ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, |
| 83 | void *cb_priv, enum tc_setup_type type, void *type_data, |
| 84 | void *data, |
| 85 | void (*cleanup)(struct flow_block_cb *block_cb)); |
| 86 | |
Wojciech Drewek | f6e8fb5 | 2023-07-12 13:03:31 +0200 | [diff] [blame] | 87 | bool netif_is_ice(const struct net_device *dev) |
Dave Ertman | df006dd | 2020-11-20 16:39:26 -0800 | [diff] [blame] | 88 | { |
| 89 | return dev && (dev->netdev_ops == &ice_netdev_ops); |
| 90 | } |
| 91 | |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 92 | /** |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 93 | * ice_get_tx_pending - returns number of Tx descriptors not processed |
| 94 | * @ring: the ring of descriptors |
| 95 | */ |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 96 | static u16 ice_get_tx_pending(struct ice_tx_ring *ring) |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 97 | { |
Brett Creeley | c1ddf1f | 2019-07-25 01:55:28 -0700 | [diff] [blame] | 98 | u16 head, tail; |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 99 | |
| 100 | head = ring->next_to_clean; |
Brett Creeley | c1ddf1f | 2019-07-25 01:55:28 -0700 | [diff] [blame] | 101 | tail = ring->next_to_use; |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 102 | |
| 103 | if (head != tail) |
| 104 | return (head < tail) ? |
| 105 | tail - head : (tail + ring->count - head); |
| 106 | return 0; |
| 107 | } |
| 108 | |
| 109 | /** |
| 110 | * ice_check_for_hang_subtask - check for and recover hung queues |
| 111 | * @pf: pointer to PF struct |
| 112 | */ |
| 113 | static void ice_check_for_hang_subtask(struct ice_pf *pf) |
| 114 | { |
| 115 | struct ice_vsi *vsi = NULL; |
Brett Creeley | e89e899 | 2019-04-16 10:30:51 -0700 | [diff] [blame] | 116 | struct ice_hw *hw; |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 117 | unsigned int i; |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 118 | int packets; |
Brett Creeley | e89e899 | 2019-04-16 10:30:51 -0700 | [diff] [blame] | 119 | u32 v; |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 120 | |
| 121 | ice_for_each_vsi(pf, v) |
| 122 | if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { |
| 123 | vsi = pf->vsi[v]; |
| 124 | break; |
| 125 | } |
| 126 | |
Anirudh Venkataramanan | e97fb1a | 2021-03-02 10:15:37 -0800 | [diff] [blame] | 127 | if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state)) |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 128 | return; |
| 129 | |
| 130 | if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) |
| 131 | return; |
| 132 | |
Brett Creeley | e89e899 | 2019-04-16 10:30:51 -0700 | [diff] [blame] | 133 | hw = &vsi->back->hw; |
| 134 | |
Maciej Fijalkowski | 2faf63b | 2021-08-19 14:00:04 +0200 | [diff] [blame] | 135 | ice_for_each_txq(vsi, i) { |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 136 | struct ice_tx_ring *tx_ring = vsi->tx_rings[i]; |
Benjamin Mikailenko | 288ecf4 | 2022-11-18 16:20:02 -0500 | [diff] [blame] | 137 | struct ice_ring_stats *ring_stats; |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 138 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 139 | if (!tx_ring) |
| 140 | continue; |
| 141 | if (ice_ring_ch_enabled(tx_ring)) |
| 142 | continue; |
| 143 | |
Benjamin Mikailenko | 288ecf4 | 2022-11-18 16:20:02 -0500 | [diff] [blame] | 144 | ring_stats = tx_ring->ring_stats; |
| 145 | if (!ring_stats) |
| 146 | continue; |
| 147 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 148 | if (tx_ring->desc) { |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 149 | /* If packet counter has not changed the queue is |
| 150 | * likely stalled, so force an interrupt for this |
| 151 | * queue. |
| 152 | * |
| 153 | * prev_pkt would be negative if there was no |
| 154 | * pending work. |
| 155 | */ |
Benjamin Mikailenko | 288ecf4 | 2022-11-18 16:20:02 -0500 | [diff] [blame] | 156 | packets = ring_stats->stats.pkts & INT_MAX; |
| 157 | if (ring_stats->tx_stats.prev_pkt == packets) { |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 158 | /* Trigger sw interrupt to revive the queue */ |
Brett Creeley | e89e899 | 2019-04-16 10:30:51 -0700 | [diff] [blame] | 159 | ice_trigger_sw_intr(hw, tx_ring->q_vector); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 160 | continue; |
| 161 | } |
| 162 | |
| 163 | /* Memory barrier between read of packet count and call |
| 164 | * to ice_get_tx_pending() |
| 165 | */ |
| 166 | smp_rmb(); |
Benjamin Mikailenko | 288ecf4 | 2022-11-18 16:20:02 -0500 | [diff] [blame] | 167 | ring_stats->tx_stats.prev_pkt = |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 168 | ice_get_tx_pending(tx_ring) ? packets : -1; |
| 169 | } |
| 170 | } |
| 171 | } |
| 172 | |
| 173 | /** |
Tony Nguyen | 561f437 | 2019-04-16 10:34:50 -0700 | [diff] [blame] | 174 | * ice_init_mac_fltr - Set initial MAC filters |
| 175 | * @pf: board private structure |
| 176 | * |
Anirudh Venkataramanan | 2f2da36 | 2019-04-16 10:35:03 -0700 | [diff] [blame] | 177 | * Set initial set of MAC filters for PF VSI; configure filters for permanent |
Tony Nguyen | 561f437 | 2019-04-16 10:34:50 -0700 | [diff] [blame] | 178 | * address and broadcast address. If an error is encountered, netdevice will be |
| 179 | * unregistered. |
| 180 | */ |
| 181 | static int ice_init_mac_fltr(struct ice_pf *pf) |
| 182 | { |
Tony Nguyen | 561f437 | 2019-04-16 10:34:50 -0700 | [diff] [blame] | 183 | struct ice_vsi *vsi; |
Michal Swiatkowski | 1b8f15b | 2020-05-07 17:41:08 -0700 | [diff] [blame] | 184 | u8 *perm_addr; |
Tony Nguyen | 561f437 | 2019-04-16 10:34:50 -0700 | [diff] [blame] | 185 | |
Anirudh Venkataramanan | 208ff75 | 2019-08-08 07:39:33 -0700 | [diff] [blame] | 186 | vsi = ice_get_main_vsi(pf); |
Tony Nguyen | 561f437 | 2019-04-16 10:34:50 -0700 | [diff] [blame] | 187 | if (!vsi) |
| 188 | return -EINVAL; |
| 189 | |
Michal Swiatkowski | 1b8f15b | 2020-05-07 17:41:08 -0700 | [diff] [blame] | 190 | perm_addr = vsi->port_info->mac.perm_addr; |
Tony Nguyen | c148469 | 2021-10-07 16:01:58 -0700 | [diff] [blame] | 191 | return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); |
Tony Nguyen | 561f437 | 2019-04-16 10:34:50 -0700 | [diff] [blame] | 192 | } |
| 193 | |
| 194 | /** |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 195 | * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 196 | * @netdev: the net device on which the sync is happening |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 197 | * @addr: MAC address to sync |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 198 | * |
| 199 | * This is a callback function which is called by the in kernel device sync |
| 200 | * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only |
| 201 | * populates the tmp_sync_list, which is later used by ice_add_mac to add the |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 202 | * MAC filters from the hardware. |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 203 | */ |
| 204 | static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) |
| 205 | { |
| 206 | struct ice_netdev_priv *np = netdev_priv(netdev); |
| 207 | struct ice_vsi *vsi = np->vsi; |
| 208 | |
Michal Swiatkowski | 1b8f15b | 2020-05-07 17:41:08 -0700 | [diff] [blame] | 209 | if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, |
| 210 | ICE_FWD_TO_VSI)) |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 211 | return -EINVAL; |
| 212 | |
| 213 | return 0; |
| 214 | } |
| 215 | |
| 216 | /** |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 217 | * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 218 | * @netdev: the net device on which the unsync is happening |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 219 | * @addr: MAC address to unsync |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 220 | * |
| 221 | * This is a callback function which is called by the in kernel device unsync |
| 222 | * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only |
| 223 | * populates the tmp_unsync_list, which is later used by ice_remove_mac to |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 224 | * delete the MAC filters from the hardware. |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 225 | */ |
| 226 | static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) |
| 227 | { |
| 228 | struct ice_netdev_priv *np = netdev_priv(netdev); |
| 229 | struct ice_vsi *vsi = np->vsi; |
| 230 | |
Brett Creeley | 3ba7f53 | 2021-08-06 09:51:27 -0700 | [diff] [blame] | 231 | /* Under some circumstances, we might receive a request to delete our |
| 232 | * own device address from our uc list. Because we store the device |
| 233 | * address in the VSI's MAC filter list, we need to ignore such |
| 234 | * requests and not delete our device address from this list. |
| 235 | */ |
| 236 | if (ether_addr_equal(addr, netdev->dev_addr)) |
| 237 | return 0; |
| 238 | |
Michal Swiatkowski | 1b8f15b | 2020-05-07 17:41:08 -0700 | [diff] [blame] | 239 | if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, |
| 240 | ICE_FWD_TO_VSI)) |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 241 | return -EINVAL; |
| 242 | |
| 243 | return 0; |
| 244 | } |
| 245 | |
| 246 | /** |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 247 | * ice_vsi_fltr_changed - check if filter state changed |
| 248 | * @vsi: VSI to be checked |
| 249 | * |
| 250 | * returns true if filter state has changed, false otherwise. |
| 251 | */ |
| 252 | static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) |
| 253 | { |
Anirudh Venkataramanan | e97fb1a | 2021-03-02 10:15:37 -0800 | [diff] [blame] | 254 | return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || |
Ivan Vecera | 1273f89 | 2022-03-31 09:20:08 -0700 | [diff] [blame] | 255 | test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 256 | } |
| 257 | |
| 258 | /** |
Brett Creeley | fabf480 | 2021-03-02 10:15:34 -0800 | [diff] [blame] | 259 | * ice_set_promisc - Enable promiscuous mode for a given PF |
Akeem G Abodunrin | 5eda8af | 2019-02-26 16:35:14 -0800 | [diff] [blame] | 260 | * @vsi: the VSI being configured |
| 261 | * @promisc_m: mask of promiscuous config bits |
Akeem G Abodunrin | 5eda8af | 2019-02-26 16:35:14 -0800 | [diff] [blame] | 262 | * |
| 263 | */ |
Brett Creeley | fabf480 | 2021-03-02 10:15:34 -0800 | [diff] [blame] | 264 | static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) |
Akeem G Abodunrin | 5eda8af | 2019-02-26 16:35:14 -0800 | [diff] [blame] | 265 | { |
Tony Nguyen | 5e24d59 | 2021-10-07 15:56:57 -0700 | [diff] [blame] | 266 | int status; |
Akeem G Abodunrin | 5eda8af | 2019-02-26 16:35:14 -0800 | [diff] [blame] | 267 | |
| 268 | if (vsi->type != ICE_VSI_PF) |
| 269 | return 0; |
| 270 | |
Ivan Vecera | 1273f89 | 2022-03-31 09:20:08 -0700 | [diff] [blame] | 271 | if (ice_vsi_has_non_zero_vlans(vsi)) { |
| 272 | promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX); |
| 273 | status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, |
| 274 | promisc_m); |
| 275 | } else { |
| 276 | status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, |
| 277 | promisc_m, 0); |
| 278 | } |
Grzegorz Siwik | abddafd | 2022-08-12 15:25:49 +0200 | [diff] [blame] | 279 | if (status && status != -EEXIST) |
| 280 | return status; |
Ivan Vecera | 1273f89 | 2022-03-31 09:20:08 -0700 | [diff] [blame] | 281 | |
Jesse Brandeburg | 43fbca0 | 2023-02-06 15:54:36 -0800 | [diff] [blame] | 282 | netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n", |
| 283 | vsi->vsi_num, promisc_m); |
Grzegorz Siwik | abddafd | 2022-08-12 15:25:49 +0200 | [diff] [blame] | 284 | return 0; |
Brett Creeley | fabf480 | 2021-03-02 10:15:34 -0800 | [diff] [blame] | 285 | } |
| 286 | |
| 287 | /** |
| 288 | * ice_clear_promisc - Disable promiscuous mode for a given PF |
| 289 | * @vsi: the VSI being configured |
| 290 | * @promisc_m: mask of promiscuous config bits |
| 291 | * |
| 292 | */ |
| 293 | static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) |
| 294 | { |
Tony Nguyen | 5e24d59 | 2021-10-07 15:56:57 -0700 | [diff] [blame] | 295 | int status; |
Brett Creeley | fabf480 | 2021-03-02 10:15:34 -0800 | [diff] [blame] | 296 | |
| 297 | if (vsi->type != ICE_VSI_PF) |
| 298 | return 0; |
| 299 | |
Ivan Vecera | 1273f89 | 2022-03-31 09:20:08 -0700 | [diff] [blame] | 300 | if (ice_vsi_has_non_zero_vlans(vsi)) { |
| 301 | promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX); |
| 302 | status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, |
| 303 | promisc_m); |
| 304 | } else { |
| 305 | status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, |
| 306 | promisc_m, 0); |
| 307 | } |
| 308 | |
Jesse Brandeburg | 43fbca0 | 2023-02-06 15:54:36 -0800 | [diff] [blame] | 309 | netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n", |
| 310 | vsi->vsi_num, promisc_m); |
Tony Nguyen | c148469 | 2021-10-07 16:01:58 -0700 | [diff] [blame] | 311 | return status; |
Akeem G Abodunrin | 5eda8af | 2019-02-26 16:35:14 -0800 | [diff] [blame] | 312 | } |
| 313 | |
| 314 | /** |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 315 | * ice_vsi_sync_fltr - Update the VSI filter list to the HW |
| 316 | * @vsi: ptr to the VSI |
| 317 | * |
| 318 | * Push any outstanding VSI filter changes through the AdminQ. |
| 319 | */ |
| 320 | static int ice_vsi_sync_fltr(struct ice_vsi *vsi) |
| 321 | { |
Brett Creeley | c31af68 | 2021-12-02 08:38:46 -0800 | [diff] [blame] | 322 | struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); |
Anirudh Venkataramanan | 9a94684 | 2020-02-06 01:20:09 -0800 | [diff] [blame] | 323 | struct device *dev = ice_pf_to_dev(vsi->back); |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 324 | struct net_device *netdev = vsi->netdev; |
| 325 | bool promisc_forced_on = false; |
| 326 | struct ice_pf *pf = vsi->back; |
| 327 | struct ice_hw *hw = &pf->hw; |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 328 | u32 changed_flags = 0; |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 329 | int err; |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 330 | |
| 331 | if (!vsi->netdev) |
| 332 | return -EINVAL; |
| 333 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 334 | while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 335 | usleep_range(1000, 2000); |
| 336 | |
| 337 | changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; |
| 338 | vsi->current_netdev_flags = vsi->netdev->flags; |
| 339 | |
| 340 | INIT_LIST_HEAD(&vsi->tmp_sync_list); |
| 341 | INIT_LIST_HEAD(&vsi->tmp_unsync_list); |
| 342 | |
| 343 | if (ice_vsi_fltr_changed(vsi)) { |
Anirudh Venkataramanan | e97fb1a | 2021-03-02 10:15:37 -0800 | [diff] [blame] | 344 | clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); |
| 345 | clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 346 | |
| 347 | /* grab the netdev's addr_list_lock */ |
| 348 | netif_addr_lock_bh(netdev); |
| 349 | __dev_uc_sync(netdev, ice_add_mac_to_sync_list, |
| 350 | ice_add_mac_to_unsync_list); |
| 351 | __dev_mc_sync(netdev, ice_add_mac_to_sync_list, |
| 352 | ice_add_mac_to_unsync_list); |
| 353 | /* our temp lists are populated. release lock */ |
| 354 | netif_addr_unlock_bh(netdev); |
| 355 | } |
| 356 | |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 357 | /* Remove MAC addresses in the unsync list */ |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 358 | err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); |
Michal Swiatkowski | 1b8f15b | 2020-05-07 17:41:08 -0700 | [diff] [blame] | 359 | ice_fltr_free_list(dev, &vsi->tmp_unsync_list); |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 360 | if (err) { |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 361 | netdev_err(netdev, "Failed to delete MAC filters\n"); |
| 362 | /* if we failed because of alloc failures, just bail */ |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 363 | if (err == -ENOMEM) |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 364 | goto out; |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 365 | } |
| 366 | |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 367 | /* Add MAC addresses in the sync list */ |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 368 | err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); |
Michal Swiatkowski | 1b8f15b | 2020-05-07 17:41:08 -0700 | [diff] [blame] | 369 | ice_fltr_free_list(dev, &vsi->tmp_sync_list); |
Preethi Banala | 89f3e4a | 2019-02-19 15:04:04 -0800 | [diff] [blame] | 370 | /* If filter is added successfully or already exists, do not go into |
| 371 | * 'if' condition and report it as error. Instead continue processing |
| 372 | * rest of the function. |
| 373 | */ |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 374 | if (err && err != -EEXIST) { |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 375 | netdev_err(netdev, "Failed to add MAC filters\n"); |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 376 | /* If there is no more space for new umac filters, VSI |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 377 | * should go into promiscuous mode. There should be some |
| 378 | * space reserved for promiscuous filters. |
| 379 | */ |
| 380 | if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 381 | !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC, |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 382 | vsi->state)) { |
| 383 | promisc_forced_on = true; |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 384 | netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 385 | vsi->vsi_num); |
| 386 | } else { |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 387 | goto out; |
| 388 | } |
| 389 | } |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 390 | err = 0; |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 391 | /* check for changes in promiscuous modes */ |
Akeem G Abodunrin | 5eda8af | 2019-02-26 16:35:14 -0800 | [diff] [blame] | 392 | if (changed_flags & IFF_ALLMULTI) { |
| 393 | if (vsi->current_netdev_flags & IFF_ALLMULTI) { |
Ivan Vecera | 1273f89 | 2022-03-31 09:20:08 -0700 | [diff] [blame] | 394 | err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS); |
Akeem G Abodunrin | 5eda8af | 2019-02-26 16:35:14 -0800 | [diff] [blame] | 395 | if (err) { |
Akeem G Abodunrin | 5eda8af | 2019-02-26 16:35:14 -0800 | [diff] [blame] | 396 | vsi->current_netdev_flags &= ~IFF_ALLMULTI; |
| 397 | goto out_promisc; |
| 398 | } |
Bruce Allan | 92ace48 | 2020-05-07 17:41:10 -0700 | [diff] [blame] | 399 | } else { |
| 400 | /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ |
Ivan Vecera | 1273f89 | 2022-03-31 09:20:08 -0700 | [diff] [blame] | 401 | err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS); |
Akeem G Abodunrin | 5eda8af | 2019-02-26 16:35:14 -0800 | [diff] [blame] | 402 | if (err) { |
Akeem G Abodunrin | 5eda8af | 2019-02-26 16:35:14 -0800 | [diff] [blame] | 403 | vsi->current_netdev_flags |= IFF_ALLMULTI; |
| 404 | goto out_promisc; |
| 405 | } |
| 406 | } |
| 407 | } |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 408 | |
| 409 | if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || |
Anirudh Venkataramanan | e97fb1a | 2021-03-02 10:15:37 -0800 | [diff] [blame] | 410 | test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) { |
| 411 | clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 412 | if (vsi->current_netdev_flags & IFF_PROMISC) { |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 413 | /* Apply Rx filter rule to get traffic from wire */ |
Michal Wilczynski | d739342 | 2022-07-04 15:12:26 +0200 | [diff] [blame] | 414 | if (!ice_is_dflt_vsi_in_use(vsi->port_info)) { |
| 415 | err = ice_set_dflt_vsi(vsi); |
Brett Creeley | fc0f39b | 2019-12-12 03:12:55 -0800 | [diff] [blame] | 416 | if (err && err != -EEXIST) { |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 417 | netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n", |
Brett Creeley | fc0f39b | 2019-12-12 03:12:55 -0800 | [diff] [blame] | 418 | err, vsi->vsi_num); |
| 419 | vsi->current_netdev_flags &= |
| 420 | ~IFF_PROMISC; |
| 421 | goto out_promisc; |
| 422 | } |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 423 | err = 0; |
Brett Creeley | c31af68 | 2021-12-02 08:38:46 -0800 | [diff] [blame] | 424 | vlan_ops->dis_rx_filtering(vsi); |
Jesse Brandeburg | 43fbca0 | 2023-02-06 15:54:36 -0800 | [diff] [blame] | 425 | |
| 426 | /* promiscuous mode implies allmulticast so |
| 427 | * that VSIs that are in promiscuous mode are |
| 428 | * subscribed to multicast packets coming to |
| 429 | * the port |
| 430 | */ |
| 431 | err = ice_set_promisc(vsi, |
| 432 | ICE_MCAST_PROMISC_BITS); |
| 433 | if (err) |
| 434 | goto out_promisc; |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 435 | } |
| 436 | } else { |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 437 | /* Clear Rx filter to remove traffic from wire */ |
Michal Wilczynski | d739342 | 2022-07-04 15:12:26 +0200 | [diff] [blame] | 438 | if (ice_is_vsi_dflt_vsi(vsi)) { |
| 439 | err = ice_clear_dflt_vsi(vsi); |
Brett Creeley | fc0f39b | 2019-12-12 03:12:55 -0800 | [diff] [blame] | 440 | if (err) { |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 441 | netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n", |
Brett Creeley | fc0f39b | 2019-12-12 03:12:55 -0800 | [diff] [blame] | 442 | err, vsi->vsi_num); |
| 443 | vsi->current_netdev_flags |= |
| 444 | IFF_PROMISC; |
| 445 | goto out_promisc; |
| 446 | } |
Jian Shen | 7dc839f | 2022-07-29 18:17:54 +0800 | [diff] [blame] | 447 | if (vsi->netdev->features & |
Brett Creeley | 1babaf7 | 2021-12-02 08:38:50 -0800 | [diff] [blame] | 448 | NETIF_F_HW_VLAN_CTAG_FILTER) |
Brett Creeley | c31af68 | 2021-12-02 08:38:46 -0800 | [diff] [blame] | 449 | vlan_ops->ena_rx_filtering(vsi); |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 450 | } |
Jesse Brandeburg | 43fbca0 | 2023-02-06 15:54:36 -0800 | [diff] [blame] | 451 | |
| 452 | /* disable allmulti here, but only if allmulti is not |
| 453 | * still enabled for the netdev |
| 454 | */ |
| 455 | if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) { |
| 456 | err = ice_clear_promisc(vsi, |
| 457 | ICE_MCAST_PROMISC_BITS); |
| 458 | if (err) { |
| 459 | netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n", |
| 460 | err, vsi->vsi_num); |
| 461 | } |
| 462 | } |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 463 | } |
| 464 | } |
| 465 | goto exit; |
| 466 | |
| 467 | out_promisc: |
Anirudh Venkataramanan | e97fb1a | 2021-03-02 10:15:37 -0800 | [diff] [blame] | 468 | set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 469 | goto exit; |
| 470 | out: |
| 471 | /* if something went wrong then set the changed flag so we try again */ |
Anirudh Venkataramanan | e97fb1a | 2021-03-02 10:15:37 -0800 | [diff] [blame] | 472 | set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); |
| 473 | set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 474 | exit: |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 475 | clear_bit(ICE_CFG_BUSY, vsi->state); |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 476 | return err; |
| 477 | } |
| 478 | |
| 479 | /** |
| 480 | * ice_sync_fltr_subtask - Sync the VSI filter list with HW |
| 481 | * @pf: board private structure |
| 482 | */ |
| 483 | static void ice_sync_fltr_subtask(struct ice_pf *pf) |
| 484 | { |
| 485 | int v; |
| 486 | |
| 487 | if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) |
| 488 | return; |
| 489 | |
| 490 | clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); |
| 491 | |
Brett Creeley | 80ed404a | 2019-02-08 12:50:54 -0800 | [diff] [blame] | 492 | ice_for_each_vsi(pf, v) |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 493 | if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && |
| 494 | ice_vsi_sync_fltr(pf->vsi[v])) { |
| 495 | /* come back and try again later */ |
| 496 | set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); |
| 497 | break; |
| 498 | } |
| 499 | } |
| 500 | |
| 501 | /** |
Anirudh Venkataramanan | 7b9ffc7 | 2019-02-28 15:24:24 -0800 | [diff] [blame] | 502 | * ice_pf_dis_all_vsi - Pause all VSIs on a PF |
| 503 | * @pf: the PF |
| 504 | * @locked: is the rtnl_lock already held |
| 505 | */ |
Anirudh Venkataramanan | 7b9ffc7 | 2019-02-28 15:24:24 -0800 | [diff] [blame] | 506 | static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) |
Anirudh Venkataramanan | 7b9ffc7 | 2019-02-28 15:24:24 -0800 | [diff] [blame] | 507 | { |
Kiran Patil | b126bd6 | 2020-11-20 16:39:27 -0800 | [diff] [blame] | 508 | int node; |
Anirudh Venkataramanan | 7b9ffc7 | 2019-02-28 15:24:24 -0800 | [diff] [blame] | 509 | int v; |
| 510 | |
| 511 | ice_for_each_vsi(pf, v) |
| 512 | if (pf->vsi[v]) |
| 513 | ice_dis_vsi(pf->vsi[v], locked); |
Kiran Patil | b126bd6 | 2020-11-20 16:39:27 -0800 | [diff] [blame] | 514 | |
| 515 | for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++) |
| 516 | pf->pf_agg_node[node].num_vsis = 0; |
| 517 | |
| 518 | for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++) |
| 519 | pf->vf_agg_node[node].num_vsis = 0; |
Anirudh Venkataramanan | 7b9ffc7 | 2019-02-28 15:24:24 -0800 | [diff] [blame] | 520 | } |
| 521 | |
| 522 | /** |
Wojciech Drewek | c1e5da5 | 2021-10-26 12:38:40 +0200 | [diff] [blame] | 523 | * ice_clear_sw_switch_recipes - clear switch recipes |
| 524 | * @pf: board private structure |
| 525 | * |
| 526 | * Mark switch recipes as not created in sw structures. There are cases where |
| 527 | * rules (especially advanced rules) need to be restored, either re-read from |
| 528 | * hardware or added again. For example after the reset. 'recp_created' flag |
| 529 | * prevents from doing that and need to be cleared upfront. |
| 530 | */ |
| 531 | static void ice_clear_sw_switch_recipes(struct ice_pf *pf) |
| 532 | { |
| 533 | struct ice_sw_recipe *recp; |
| 534 | u8 i; |
| 535 | |
| 536 | recp = pf->hw.switch_info->recp_list; |
| 537 | for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) |
| 538 | recp[i].recp_created = false; |
| 539 | } |
| 540 | |
| 541 | /** |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 542 | * ice_prepare_for_reset - prep for reset |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 543 | * @pf: board private structure |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 544 | * @reset_type: reset type requested |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 545 | * |
| 546 | * Inform or close all dependent features in prep for reset. |
| 547 | */ |
| 548 | static void |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 549 | ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 550 | { |
| 551 | struct ice_hw *hw = &pf->hw; |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 552 | struct ice_vsi *vsi; |
Jacob Keller | c4c2c7d | 2022-02-16 13:37:35 -0800 | [diff] [blame] | 553 | struct ice_vf *vf; |
| 554 | unsigned int bkt; |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 555 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 556 | dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type); |
| 557 | |
Brett Creeley | 5abac9d | 2019-02-13 10:51:14 -0800 | [diff] [blame] | 558 | /* already prepared for reset */ |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 559 | if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) |
Brett Creeley | 5abac9d | 2019-02-13 10:51:14 -0800 | [diff] [blame] | 560 | return; |
| 561 | |
Grzegorz Nitka | 25a7123 | 2024-07-15 17:39:10 +0200 | [diff] [blame] | 562 | synchronize_irq(pf->oicr_irq.virq); |
| 563 | |
Dave Ertman | f9f5301 | 2021-05-20 09:37:51 -0500 | [diff] [blame] | 564 | ice_unplug_aux_dev(pf); |
| 565 | |
Anirudh Venkataramanan | 007676b | 2018-09-19 17:42:57 -0700 | [diff] [blame] | 566 | /* Notify VFs of impending reset */ |
| 567 | if (ice_check_sq_alive(hw, &hw->mailboxq)) |
| 568 | ice_vc_notify_reset(pf); |
| 569 | |
Akeem G Abodunrin | c7aeb4d | 2019-06-26 02:20:18 -0700 | [diff] [blame] | 570 | /* Disable VFs until reset is completed */ |
Jacob Keller | 3d5985a | 2022-02-16 13:37:38 -0800 | [diff] [blame] | 571 | mutex_lock(&pf->vfs.table_lock); |
Jacob Keller | c4c2c7d | 2022-02-16 13:37:35 -0800 | [diff] [blame] | 572 | ice_for_each_vf(pf, bkt, vf) |
Jacob Keller | fa4a15c | 2023-01-18 17:16:51 -0800 | [diff] [blame] | 573 | ice_set_vf_state_dis(vf); |
Jacob Keller | 3d5985a | 2022-02-16 13:37:38 -0800 | [diff] [blame] | 574 | mutex_unlock(&pf->vfs.table_lock); |
Akeem G Abodunrin | c7aeb4d | 2019-06-26 02:20:18 -0700 | [diff] [blame] | 575 | |
Wojciech Drewek | c1e5da5 | 2021-10-26 12:38:40 +0200 | [diff] [blame] | 576 | if (ice_is_eswitch_mode_switchdev(pf)) { |
| 577 | if (reset_type != ICE_RESET_PFR) |
| 578 | ice_clear_sw_switch_recipes(pf); |
| 579 | } |
| 580 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 581 | /* release ADQ specific HW and SW resources */ |
| 582 | vsi = ice_get_main_vsi(pf); |
| 583 | if (!vsi) |
| 584 | goto skip; |
| 585 | |
| 586 | /* to be on safe side, reset orig_rss_size so that normal flow |
| 587 | * of deciding rss_size can take precedence |
| 588 | */ |
| 589 | vsi->orig_rss_size = 0; |
| 590 | |
| 591 | if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { |
| 592 | if (reset_type == ICE_RESET_PFR) { |
| 593 | vsi->old_ena_tc = vsi->all_enatc; |
| 594 | vsi->old_numtc = vsi->all_numtc; |
| 595 | } else { |
| 596 | ice_remove_q_channels(vsi, true); |
| 597 | |
| 598 | /* for other reset type, do not support channel rebuild |
| 599 | * hence reset needed info |
| 600 | */ |
| 601 | vsi->old_ena_tc = 0; |
| 602 | vsi->all_enatc = 0; |
| 603 | vsi->old_numtc = 0; |
| 604 | vsi->all_numtc = 0; |
| 605 | vsi->req_txq = 0; |
| 606 | vsi->req_rxq = 0; |
| 607 | clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); |
| 608 | memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt)); |
| 609 | } |
| 610 | } |
| 611 | skip: |
| 612 | |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 613 | /* clear SW filtering DB */ |
| 614 | ice_clear_hw_tbls(hw); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 615 | /* disable the VSIs and their queues that are not already DOWN */ |
Anirudh Venkataramanan | 7b9ffc7 | 2019-02-28 15:24:24 -0800 | [diff] [blame] | 616 | ice_pf_dis_all_vsi(pf, false); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 617 | |
Jacob Keller | 06c16d8 | 2021-06-09 09:39:50 -0700 | [diff] [blame] | 618 | if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) |
Jacob Keller | c75d5e6 | 2024-01-25 13:57:50 -0800 | [diff] [blame] | 619 | ice_ptp_prepare_for_reset(pf, reset_type); |
Jacob Keller | 06c16d8 | 2021-06-09 09:39:50 -0700 | [diff] [blame] | 620 | |
Karol Kolacinski | 43113ff | 2022-03-01 10:38:03 -0800 | [diff] [blame] | 621 | if (ice_is_feature_supported(pf, ICE_F_GNSS)) |
| 622 | ice_gnss_exit(pf); |
| 623 | |
Usha Ketineni | c5a2a4a | 2018-10-26 11:44:35 -0700 | [diff] [blame] | 624 | if (hw->port_info) |
| 625 | ice_sched_clear_port(hw->port_info); |
| 626 | |
Piotr Gardocki | fdd288e | 2024-06-14 12:38:11 +0200 | [diff] [blame] | 627 | ice_shutdown_all_ctrlq(hw, false); |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 628 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 629 | set_bit(ICE_PREPARED_FOR_RESET, pf->state); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 630 | } |
| 631 | |
| 632 | /** |
| 633 | * ice_do_reset - Initiate one of many types of resets |
| 634 | * @pf: board private structure |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 635 | * @reset_type: reset type requested before this function was called. |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 636 | */ |
| 637 | static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) |
| 638 | { |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 639 | struct device *dev = ice_pf_to_dev(pf); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 640 | struct ice_hw *hw = &pf->hw; |
| 641 | |
| 642 | dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 643 | |
Dave Ertman | 3579aa8 | 2023-06-20 15:18:54 -0700 | [diff] [blame] | 644 | if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) { |
| 645 | dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n"); |
| 646 | reset_type = ICE_RESET_CORER; |
| 647 | } |
| 648 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 649 | ice_prepare_for_reset(pf, reset_type); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 650 | |
| 651 | /* trigger the reset */ |
| 652 | if (ice_reset(hw, reset_type)) { |
| 653 | dev_err(dev, "reset %d failed\n", reset_type); |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 654 | set_bit(ICE_RESET_FAILED, pf->state); |
| 655 | clear_bit(ICE_RESET_OICR_RECV, pf->state); |
| 656 | clear_bit(ICE_PREPARED_FOR_RESET, pf->state); |
| 657 | clear_bit(ICE_PFR_REQ, pf->state); |
| 658 | clear_bit(ICE_CORER_REQ, pf->state); |
| 659 | clear_bit(ICE_GLOBR_REQ, pf->state); |
Jacob Keller | 1c08052 | 2021-05-06 08:39:59 -0700 | [diff] [blame] | 660 | wake_up(&pf->reset_wait_queue); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 661 | return; |
| 662 | } |
| 663 | |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 664 | /* PFR is a bit of a special case because it doesn't result in an OICR |
| 665 | * interrupt. So for PFR, rebuild after the reset and clear the reset- |
| 666 | * associated state bits. |
| 667 | */ |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 668 | if (reset_type == ICE_RESET_PFR) { |
| 669 | pf->pfr_count++; |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 670 | ice_rebuild(pf, reset_type); |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 671 | clear_bit(ICE_PREPARED_FOR_RESET, pf->state); |
| 672 | clear_bit(ICE_PFR_REQ, pf->state); |
Jacob Keller | 1c08052 | 2021-05-06 08:39:59 -0700 | [diff] [blame] | 673 | wake_up(&pf->reset_wait_queue); |
Jacob Keller | dac5728 | 2022-02-22 16:27:04 -0800 | [diff] [blame] | 674 | ice_reset_all_vfs(pf); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 675 | } |
| 676 | } |
| 677 | |
| 678 | /** |
| 679 | * ice_reset_subtask - Set up for resetting the device and driver |
| 680 | * @pf: board private structure |
| 681 | */ |
| 682 | static void ice_reset_subtask(struct ice_pf *pf) |
| 683 | { |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 684 | enum ice_reset_req reset_type = ICE_RESET_INVAL; |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 685 | |
| 686 | /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 687 | * OICR interrupt. The OICR handler (ice_misc_intr) determines what type |
| 688 | * of reset is pending and sets bits in pf->state indicating the reset |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 689 | * type and ICE_RESET_OICR_RECV. So, if the latter bit is set |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 690 | * prepare for pending reset if not already (for PF software-initiated |
| 691 | * global resets the software should already be prepared for it as |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 692 | * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 693 | * by firmware or software on other PFs, that bit is not set so prepare |
| 694 | * for the reset now), poll for reset done, rebuild and return. |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 695 | */ |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 696 | if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { |
Dave Ertman | 2ebd442 | 2019-02-13 10:51:08 -0800 | [diff] [blame] | 697 | /* Perform the largest reset requested */ |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 698 | if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) |
Dave Ertman | 2ebd442 | 2019-02-13 10:51:08 -0800 | [diff] [blame] | 699 | reset_type = ICE_RESET_CORER; |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 700 | if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) |
Dave Ertman | 2ebd442 | 2019-02-13 10:51:08 -0800 | [diff] [blame] | 701 | reset_type = ICE_RESET_GLOBR; |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 702 | if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) |
Anirudh Venkataramanan | 03af840 | 2019-08-02 01:25:25 -0700 | [diff] [blame] | 703 | reset_type = ICE_RESET_EMPR; |
Dave Ertman | 2ebd442 | 2019-02-13 10:51:08 -0800 | [diff] [blame] | 704 | /* return if no valid reset type requested */ |
| 705 | if (reset_type == ICE_RESET_INVAL) |
| 706 | return; |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 707 | ice_prepare_for_reset(pf, reset_type); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 708 | |
| 709 | /* make sure we are ready to rebuild */ |
Anirudh Venkataramanan | fd2a981 | 2018-08-09 06:29:47 -0700 | [diff] [blame] | 710 | if (ice_check_reset(&pf->hw)) { |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 711 | set_bit(ICE_RESET_FAILED, pf->state); |
Anirudh Venkataramanan | fd2a981 | 2018-08-09 06:29:47 -0700 | [diff] [blame] | 712 | } else { |
| 713 | /* done with reset. start rebuild */ |
| 714 | pf->hw.reset_ongoing = false; |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 715 | ice_rebuild(pf, reset_type); |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 716 | /* clear bit to resume normal operations, but |
Anirudh Venkataramanan | 94c4441 | 2019-02-19 15:04:12 -0800 | [diff] [blame] | 717 | * ICE_NEEDS_RESTART bit is set in case rebuild failed |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 718 | */ |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 719 | clear_bit(ICE_RESET_OICR_RECV, pf->state); |
| 720 | clear_bit(ICE_PREPARED_FOR_RESET, pf->state); |
| 721 | clear_bit(ICE_PFR_REQ, pf->state); |
| 722 | clear_bit(ICE_CORER_REQ, pf->state); |
| 723 | clear_bit(ICE_GLOBR_REQ, pf->state); |
Jacob Keller | 1c08052 | 2021-05-06 08:39:59 -0700 | [diff] [blame] | 724 | wake_up(&pf->reset_wait_queue); |
Jacob Keller | dac5728 | 2022-02-22 16:27:04 -0800 | [diff] [blame] | 725 | ice_reset_all_vfs(pf); |
Anirudh Venkataramanan | fd2a981 | 2018-08-09 06:29:47 -0700 | [diff] [blame] | 726 | } |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 727 | |
| 728 | return; |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 729 | } |
| 730 | |
| 731 | /* No pending resets to finish processing. Check for new resets */ |
Dave Ertman | 3579aa8 | 2023-06-20 15:18:54 -0700 | [diff] [blame] | 732 | if (test_bit(ICE_PFR_REQ, pf->state)) { |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 733 | reset_type = ICE_RESET_PFR; |
Dave Ertman | 3579aa8 | 2023-06-20 15:18:54 -0700 | [diff] [blame] | 734 | if (pf->lag && pf->lag->bonded) { |
| 735 | dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n"); |
| 736 | reset_type = ICE_RESET_CORER; |
| 737 | } |
| 738 | } |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 739 | if (test_bit(ICE_CORER_REQ, pf->state)) |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 740 | reset_type = ICE_RESET_CORER; |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 741 | if (test_bit(ICE_GLOBR_REQ, pf->state)) |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 742 | reset_type = ICE_RESET_GLOBR; |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 743 | /* If no valid reset type requested just return */ |
| 744 | if (reset_type == ICE_RESET_INVAL) |
| 745 | return; |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 746 | |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 747 | /* reset if not already down or busy */ |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 748 | if (!test_bit(ICE_DOWN, pf->state) && |
| 749 | !test_bit(ICE_CFG_BUSY, pf->state)) { |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 750 | ice_do_reset(pf, reset_type); |
| 751 | } |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 752 | } |
| 753 | |
| 754 | /** |
Jesse Brandeburg | 2e0ab37 | 2019-08-02 01:25:32 -0700 | [diff] [blame] | 755 | * ice_print_topo_conflict - print topology conflict message |
| 756 | * @vsi: the VSI whose topology status is being checked |
| 757 | */ |
| 758 | static void ice_print_topo_conflict(struct ice_vsi *vsi) |
| 759 | { |
| 760 | switch (vsi->port_info->phy.link_info.topo_media_conflict) { |
| 761 | case ICE_AQ_LINK_TOPO_CONFLICT: |
| 762 | case ICE_AQ_LINK_MEDIA_CONFLICT: |
Paul Greenwalt | 5878589 | 2019-10-09 07:09:49 -0700 | [diff] [blame] | 763 | case ICE_AQ_LINK_TOPO_UNREACH_PRT: |
| 764 | case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT: |
| 765 | case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA: |
Paul Greenwalt | 5c57145 | 2021-03-02 10:12:07 -0800 | [diff] [blame] | 766 | netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n"); |
Jesse Brandeburg | 2e0ab37 | 2019-08-02 01:25:32 -0700 | [diff] [blame] | 767 | break; |
Paul Greenwalt | 5878589 | 2019-10-09 07:09:49 -0700 | [diff] [blame] | 768 | case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: |
Anirudh Venkataramanan | 4fc5fbe | 2021-07-16 15:16:39 -0700 | [diff] [blame] | 769 | if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags)) |
| 770 | netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n"); |
| 771 | else |
| 772 | netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); |
Paul Greenwalt | 5878589 | 2019-10-09 07:09:49 -0700 | [diff] [blame] | 773 | break; |
Jesse Brandeburg | 2e0ab37 | 2019-08-02 01:25:32 -0700 | [diff] [blame] | 774 | default: |
| 775 | break; |
| 776 | } |
| 777 | } |
| 778 | |
| 779 | /** |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 780 | * ice_print_link_msg - print link up or down message |
| 781 | * @vsi: the VSI whose link status is being queried |
| 782 | * @isup: boolean for if the link is now up or down |
| 783 | */ |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 784 | void ice_print_link_msg(struct ice_vsi *vsi, bool isup) |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 785 | { |
Paul Greenwalt | f776b3a | 2019-04-16 10:34:52 -0700 | [diff] [blame] | 786 | struct ice_aqc_get_phy_caps_data *caps; |
Paul Greenwalt | 5ee3056 | 2020-07-09 09:16:10 -0700 | [diff] [blame] | 787 | const char *an_advertised; |
Paul Greenwalt | f776b3a | 2019-04-16 10:34:52 -0700 | [diff] [blame] | 788 | const char *fec_req; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 789 | const char *speed; |
Paul Greenwalt | f776b3a | 2019-04-16 10:34:52 -0700 | [diff] [blame] | 790 | const char *fec; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 791 | const char *fc; |
Jesse Brandeburg | 4326098 | 2019-08-02 01:25:31 -0700 | [diff] [blame] | 792 | const char *an; |
Tony Nguyen | 5518ac2 | 2021-10-07 15:59:03 -0700 | [diff] [blame] | 793 | int status; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 794 | |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 795 | if (!vsi) |
| 796 | return; |
| 797 | |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 798 | if (vsi->current_isup == isup) |
| 799 | return; |
| 800 | |
| 801 | vsi->current_isup = isup; |
| 802 | |
| 803 | if (!isup) { |
| 804 | netdev_info(vsi->netdev, "NIC Link is Down\n"); |
| 805 | return; |
| 806 | } |
| 807 | |
| 808 | switch (vsi->port_info->phy.link_info.link_speed) { |
Paul Greenwalt | aeccadb | 2024-05-30 13:06:17 -0400 | [diff] [blame] | 809 | case ICE_AQ_LINK_SPEED_200GB: |
| 810 | speed = "200 G"; |
| 811 | break; |
Anirudh Venkataramanan | 072efdf | 2019-04-16 10:35:02 -0700 | [diff] [blame] | 812 | case ICE_AQ_LINK_SPEED_100GB: |
| 813 | speed = "100 G"; |
| 814 | break; |
| 815 | case ICE_AQ_LINK_SPEED_50GB: |
| 816 | speed = "50 G"; |
| 817 | break; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 818 | case ICE_AQ_LINK_SPEED_40GB: |
| 819 | speed = "40 G"; |
| 820 | break; |
| 821 | case ICE_AQ_LINK_SPEED_25GB: |
| 822 | speed = "25 G"; |
| 823 | break; |
| 824 | case ICE_AQ_LINK_SPEED_20GB: |
| 825 | speed = "20 G"; |
| 826 | break; |
| 827 | case ICE_AQ_LINK_SPEED_10GB: |
| 828 | speed = "10 G"; |
| 829 | break; |
| 830 | case ICE_AQ_LINK_SPEED_5GB: |
| 831 | speed = "5 G"; |
| 832 | break; |
| 833 | case ICE_AQ_LINK_SPEED_2500MB: |
| 834 | speed = "2.5 G"; |
| 835 | break; |
| 836 | case ICE_AQ_LINK_SPEED_1000MB: |
| 837 | speed = "1 G"; |
| 838 | break; |
| 839 | case ICE_AQ_LINK_SPEED_100MB: |
| 840 | speed = "100 M"; |
| 841 | break; |
| 842 | default: |
Simon Perron Caissy | 5b13886 | 2020-09-17 13:13:47 -0700 | [diff] [blame] | 843 | speed = "Unknown "; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 844 | break; |
| 845 | } |
| 846 | |
| 847 | switch (vsi->port_info->fc.current_mode) { |
| 848 | case ICE_FC_FULL: |
Anirudh Venkataramanan | 2f2da36 | 2019-04-16 10:35:03 -0700 | [diff] [blame] | 849 | fc = "Rx/Tx"; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 850 | break; |
| 851 | case ICE_FC_TX_PAUSE: |
Anirudh Venkataramanan | 2f2da36 | 2019-04-16 10:35:03 -0700 | [diff] [blame] | 852 | fc = "Tx"; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 853 | break; |
| 854 | case ICE_FC_RX_PAUSE: |
Anirudh Venkataramanan | 2f2da36 | 2019-04-16 10:35:03 -0700 | [diff] [blame] | 855 | fc = "Rx"; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 856 | break; |
Brett Creeley | 203a068 | 2019-02-19 15:04:06 -0800 | [diff] [blame] | 857 | case ICE_FC_NONE: |
| 858 | fc = "None"; |
| 859 | break; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 860 | default: |
| 861 | fc = "Unknown"; |
| 862 | break; |
| 863 | } |
| 864 | |
Paul Greenwalt | f776b3a | 2019-04-16 10:34:52 -0700 | [diff] [blame] | 865 | /* Get FEC mode based on negotiated link info */ |
| 866 | switch (vsi->port_info->phy.link_info.fec_info) { |
| 867 | case ICE_AQ_LINK_25G_RS_528_FEC_EN: |
Paul Greenwalt | f776b3a | 2019-04-16 10:34:52 -0700 | [diff] [blame] | 868 | case ICE_AQ_LINK_25G_RS_544_FEC_EN: |
| 869 | fec = "RS-FEC"; |
| 870 | break; |
| 871 | case ICE_AQ_LINK_25G_KR_FEC_EN: |
| 872 | fec = "FC-FEC/BASE-R"; |
| 873 | break; |
| 874 | default: |
| 875 | fec = "NONE"; |
| 876 | break; |
| 877 | } |
| 878 | |
Jesse Brandeburg | 4326098 | 2019-08-02 01:25:31 -0700 | [diff] [blame] | 879 | /* check if autoneg completed, might be false due to not supported */ |
| 880 | if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) |
| 881 | an = "True"; |
| 882 | else |
| 883 | an = "False"; |
| 884 | |
Paul Greenwalt | f776b3a | 2019-04-16 10:34:52 -0700 | [diff] [blame] | 885 | /* Get FEC mode requested based on PHY caps last SW configuration */ |
Tony Nguyen | 9efe35d0 | 2019-11-08 06:23:25 -0800 | [diff] [blame] | 886 | caps = kzalloc(sizeof(*caps), GFP_KERNEL); |
Paul Greenwalt | f776b3a | 2019-04-16 10:34:52 -0700 | [diff] [blame] | 887 | if (!caps) { |
| 888 | fec_req = "Unknown"; |
Paul Greenwalt | 5ee3056 | 2020-07-09 09:16:10 -0700 | [diff] [blame] | 889 | an_advertised = "Unknown"; |
Paul Greenwalt | f776b3a | 2019-04-16 10:34:52 -0700 | [diff] [blame] | 890 | goto done; |
| 891 | } |
| 892 | |
| 893 | status = ice_aq_get_phy_caps(vsi->port_info, false, |
Anirudh Venkataramanan | d6730a8 | 2021-03-25 15:35:06 -0700 | [diff] [blame] | 894 | ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); |
Paul Greenwalt | f776b3a | 2019-04-16 10:34:52 -0700 | [diff] [blame] | 895 | if (status) |
| 896 | netdev_info(vsi->netdev, "Get phy capability failed.\n"); |
| 897 | |
Paul Greenwalt | 5ee3056 | 2020-07-09 09:16:10 -0700 | [diff] [blame] | 898 | an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off"; |
| 899 | |
Paul Greenwalt | f776b3a | 2019-04-16 10:34:52 -0700 | [diff] [blame] | 900 | if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || |
| 901 | caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) |
| 902 | fec_req = "RS-FEC"; |
| 903 | else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || |
| 904 | caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) |
| 905 | fec_req = "FC-FEC/BASE-R"; |
| 906 | else |
| 907 | fec_req = "NONE"; |
| 908 | |
Tony Nguyen | 9efe35d0 | 2019-11-08 06:23:25 -0800 | [diff] [blame] | 909 | kfree(caps); |
Paul Greenwalt | f776b3a | 2019-04-16 10:34:52 -0700 | [diff] [blame] | 910 | |
| 911 | done: |
Paul Greenwalt | 5ee3056 | 2020-07-09 09:16:10 -0700 | [diff] [blame] | 912 | netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n", |
| 913 | speed, fec_req, fec, an_advertised, an, fc); |
Jesse Brandeburg | 2e0ab37 | 2019-08-02 01:25:32 -0700 | [diff] [blame] | 914 | ice_print_topo_conflict(vsi); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 915 | } |
| 916 | |
| 917 | /** |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 918 | * ice_vsi_link_event - update the VSI's netdev |
| 919 | * @vsi: the VSI on which the link event occurred |
| 920 | * @link_up: whether or not the VSI needs to be set up or down |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 921 | */ |
| 922 | static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) |
| 923 | { |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 924 | if (!vsi) |
| 925 | return; |
| 926 | |
Anirudh Venkataramanan | e97fb1a | 2021-03-02 10:15:37 -0800 | [diff] [blame] | 927 | if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev) |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 928 | return; |
| 929 | |
| 930 | if (vsi->type == ICE_VSI_PF) { |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 931 | if (link_up == netif_carrier_ok(vsi->netdev)) |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 932 | return; |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 933 | |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 934 | if (link_up) { |
| 935 | netif_carrier_on(vsi->netdev); |
| 936 | netif_tx_wake_all_queues(vsi->netdev); |
| 937 | } else { |
| 938 | netif_carrier_off(vsi->netdev); |
| 939 | netif_tx_stop_all_queues(vsi->netdev); |
| 940 | } |
| 941 | } |
| 942 | } |
| 943 | |
| 944 | /** |
Dave Ertman | 7d9c9b7 | 2020-07-13 13:53:04 -0700 | [diff] [blame] | 945 | * ice_set_dflt_mib - send a default config MIB to the FW |
| 946 | * @pf: private PF struct |
| 947 | * |
| 948 | * This function sends a default configuration MIB to the FW. |
| 949 | * |
| 950 | * If this function errors out at any point, the driver is still able to |
| 951 | * function. The main impact is that LFC may not operate as expected. |
| 952 | * Therefore an error state in this function should be treated with a DBG |
| 953 | * message and continue on with driver rebuild/reenable. |
| 954 | */ |
| 955 | static void ice_set_dflt_mib(struct ice_pf *pf) |
| 956 | { |
| 957 | struct device *dev = ice_pf_to_dev(pf); |
| 958 | u8 mib_type, *buf, *lldpmib = NULL; |
| 959 | u16 len, typelen, offset = 0; |
| 960 | struct ice_lldp_org_tlv *tlv; |
Bruce Allan | 12aae8f | 2020-10-12 15:53:26 -0700 | [diff] [blame] | 961 | struct ice_hw *hw = &pf->hw; |
Dave Ertman | 7d9c9b7 | 2020-07-13 13:53:04 -0700 | [diff] [blame] | 962 | u32 ouisubtype; |
| 963 | |
Dave Ertman | 7d9c9b7 | 2020-07-13 13:53:04 -0700 | [diff] [blame] | 964 | mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; |
| 965 | lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL); |
| 966 | if (!lldpmib) { |
| 967 | dev_dbg(dev, "%s Failed to allocate MIB memory\n", |
| 968 | __func__); |
| 969 | return; |
| 970 | } |
| 971 | |
| 972 | /* Add ETS CFG TLV */ |
| 973 | tlv = (struct ice_lldp_org_tlv *)lldpmib; |
| 974 | typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | |
| 975 | ICE_IEEE_ETS_TLV_LEN); |
| 976 | tlv->typelen = htons(typelen); |
| 977 | ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | |
| 978 | ICE_IEEE_SUBTYPE_ETS_CFG); |
| 979 | tlv->ouisubtype = htonl(ouisubtype); |
| 980 | |
| 981 | buf = tlv->tlvinfo; |
| 982 | buf[0] = 0; |
| 983 | |
| 984 | /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0. |
| 985 | * Octets 5 - 12 are BW values, set octet 5 to 100% BW. |
| 986 | * Octets 13 - 20 are TSA values - leave as zeros |
| 987 | */ |
| 988 | buf[5] = 0x64; |
Jesse Brandeburg | 5a259f8 | 2023-12-05 17:01:12 -0800 | [diff] [blame] | 989 | len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); |
Dave Ertman | 7d9c9b7 | 2020-07-13 13:53:04 -0700 | [diff] [blame] | 990 | offset += len + 2; |
| 991 | tlv = (struct ice_lldp_org_tlv *) |
| 992 | ((char *)tlv + sizeof(tlv->typelen) + len); |
| 993 | |
| 994 | /* Add ETS REC TLV */ |
| 995 | buf = tlv->tlvinfo; |
| 996 | tlv->typelen = htons(typelen); |
| 997 | |
| 998 | ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | |
| 999 | ICE_IEEE_SUBTYPE_ETS_REC); |
| 1000 | tlv->ouisubtype = htonl(ouisubtype); |
| 1001 | |
| 1002 | /* First octet of buf is reserved |
| 1003 | * Octets 1 - 4 map UP to TC - all UPs map to zero |
| 1004 | * Octets 5 - 12 are BW values - set TC 0 to 100%. |
| 1005 | * Octets 13 - 20 are TSA value - leave as zeros |
| 1006 | */ |
| 1007 | buf[5] = 0x64; |
| 1008 | offset += len + 2; |
| 1009 | tlv = (struct ice_lldp_org_tlv *) |
| 1010 | ((char *)tlv + sizeof(tlv->typelen) + len); |
| 1011 | |
| 1012 | /* Add PFC CFG TLV */ |
| 1013 | typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | |
| 1014 | ICE_IEEE_PFC_TLV_LEN); |
| 1015 | tlv->typelen = htons(typelen); |
| 1016 | |
| 1017 | ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | |
| 1018 | ICE_IEEE_SUBTYPE_PFC_CFG); |
| 1019 | tlv->ouisubtype = htonl(ouisubtype); |
| 1020 | |
| 1021 | /* Octet 1 left as all zeros - PFC disabled */ |
| 1022 | buf[0] = 0x08; |
Jesse Brandeburg | 5a259f8 | 2023-12-05 17:01:12 -0800 | [diff] [blame] | 1023 | len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); |
Dave Ertman | 7d9c9b7 | 2020-07-13 13:53:04 -0700 | [diff] [blame] | 1024 | offset += len + 2; |
| 1025 | |
| 1026 | if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL)) |
| 1027 | dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__); |
| 1028 | |
| 1029 | kfree(lldpmib); |
| 1030 | } |
| 1031 | |
| 1032 | /** |
Brett Creeley | 99d4075 | 2021-10-13 09:02:19 -0700 | [diff] [blame] | 1033 | * ice_check_phy_fw_load - check if PHY FW load failed |
| 1034 | * @pf: pointer to PF struct |
| 1035 | * @link_cfg_err: bitmap from the link info structure |
| 1036 | * |
| 1037 | * check if external PHY FW load failed and print an error message if it did |
| 1038 | */ |
| 1039 | static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err) |
| 1040 | { |
| 1041 | if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) { |
| 1042 | clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); |
| 1043 | return; |
| 1044 | } |
| 1045 | |
| 1046 | if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags)) |
| 1047 | return; |
| 1048 | |
| 1049 | if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) { |
| 1050 | dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n"); |
| 1051 | set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); |
| 1052 | } |
| 1053 | } |
| 1054 | |
| 1055 | /** |
Anirudh Venkataramanan | c77849f5 | 2021-05-06 08:40:01 -0700 | [diff] [blame] | 1056 | * ice_check_module_power |
| 1057 | * @pf: pointer to PF struct |
| 1058 | * @link_cfg_err: bitmap from the link info structure |
| 1059 | * |
| 1060 | * check module power level returned by a previous call to aq_get_link_info |
| 1061 | * and print error messages if module power level is not supported |
| 1062 | */ |
| 1063 | static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err) |
| 1064 | { |
| 1065 | /* if module power level is supported, clear the flag */ |
| 1066 | if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT | |
| 1067 | ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) { |
| 1068 | clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); |
| 1069 | return; |
| 1070 | } |
| 1071 | |
| 1072 | /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the |
| 1073 | * above block didn't clear this bit, there's nothing to do |
| 1074 | */ |
| 1075 | if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags)) |
| 1076 | return; |
| 1077 | |
| 1078 | if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) { |
| 1079 | dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n"); |
| 1080 | set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); |
| 1081 | } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) { |
| 1082 | dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n"); |
| 1083 | set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); |
| 1084 | } |
| 1085 | } |
| 1086 | |
| 1087 | /** |
Brett Creeley | 99d4075 | 2021-10-13 09:02:19 -0700 | [diff] [blame] | 1088 | * ice_check_link_cfg_err - check if link configuration failed |
| 1089 | * @pf: pointer to the PF struct |
| 1090 | * @link_cfg_err: bitmap from the link info structure |
| 1091 | * |
| 1092 | * print if any link configuration failure happens due to the value in the |
| 1093 | * link_cfg_err parameter in the link info structure |
| 1094 | */ |
| 1095 | static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err) |
| 1096 | { |
| 1097 | ice_check_module_power(pf, link_cfg_err); |
| 1098 | ice_check_phy_fw_load(pf, link_cfg_err); |
| 1099 | } |
| 1100 | |
| 1101 | /** |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1102 | * ice_link_event - process the link event |
Anirudh Venkataramanan | 2f2da36 | 2019-04-16 10:35:03 -0700 | [diff] [blame] | 1103 | * @pf: PF that the link event is associated with |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1104 | * @pi: port_info for the port that the link event is associated with |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 1105 | * @link_up: true if the physical link is up and false if it is down |
| 1106 | * @link_speed: current link speed received from the link event |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1107 | * |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 1108 | * Returns 0 on success and negative on failure |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1109 | */ |
| 1110 | static int |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 1111 | ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, |
| 1112 | u16 link_speed) |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1113 | { |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 1114 | struct device *dev = ice_pf_to_dev(pf); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1115 | struct ice_phy_info *phy_info; |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 1116 | struct ice_vsi *vsi; |
| 1117 | u16 old_link_speed; |
| 1118 | bool old_link; |
Tony Nguyen | 5518ac2 | 2021-10-07 15:59:03 -0700 | [diff] [blame] | 1119 | int status; |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1120 | |
| 1121 | phy_info = &pi->phy; |
| 1122 | phy_info->link_info_old = phy_info->link_info; |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1123 | |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 1124 | old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1125 | old_link_speed = phy_info->link_info_old.link_speed; |
| 1126 | |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 1127 | /* update the link info structures and re-enable link events, |
| 1128 | * don't bail on failure due to other book keeping needed |
| 1129 | */ |
Anirudh Venkataramanan | d348d51 | 2021-03-25 15:35:07 -0700 | [diff] [blame] | 1130 | status = ice_update_link_info(pi); |
| 1131 | if (status) |
Tony Nguyen | 5f87ec4 | 2021-10-07 15:56:02 -0700 | [diff] [blame] | 1132 | dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n", |
| 1133 | pi->lport, status, |
Anirudh Venkataramanan | d348d51 | 2021-03-25 15:35:07 -0700 | [diff] [blame] | 1134 | ice_aq_str(pi->hw->adminq.sq_last_status)); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1135 | |
Brett Creeley | 99d4075 | 2021-10-13 09:02:19 -0700 | [diff] [blame] | 1136 | ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); |
Anirudh Venkataramanan | c77849f5 | 2021-05-06 08:40:01 -0700 | [diff] [blame] | 1137 | |
Dave Ertman | 0ce6c34 | 2020-07-13 13:53:06 -0700 | [diff] [blame] | 1138 | /* Check if the link state is up after updating link info, and treat |
| 1139 | * this event as an UP event since the link is actually UP now. |
| 1140 | */ |
| 1141 | if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) |
| 1142 | link_up = true; |
| 1143 | |
Anirudh Venkataramanan | 208ff75 | 2019-08-08 07:39:33 -0700 | [diff] [blame] | 1144 | vsi = ice_get_main_vsi(pf); |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 1145 | if (!vsi || !vsi->port_info) |
| 1146 | return -EINVAL; |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1147 | |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 1148 | /* turn off PHY if media was removed */ |
| 1149 | if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && |
| 1150 | !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { |
| 1151 | set_bit(ICE_FLAG_NO_MEDIA, pf->flags); |
Anirudh Venkataramanan | d348d51 | 2021-03-25 15:35:07 -0700 | [diff] [blame] | 1152 | ice_set_link(vsi, false); |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 1153 | } |
| 1154 | |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 1155 | /* if the old link up/down and speed is the same as the new */ |
| 1156 | if (link_up == old_link && link_speed == old_link_speed) |
Anirudh Venkataramanan | d348d51 | 2021-03-25 15:35:07 -0700 | [diff] [blame] | 1157 | return 0; |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 1158 | |
Jacob Keller | 6b1ff5d | 2022-12-05 11:52:43 -0800 | [diff] [blame] | 1159 | ice_ptp_link_change(pf, pf->hw.pf_id, link_up); |
Jacob Keller | 3a74962 | 2021-10-13 08:54:51 -0700 | [diff] [blame] | 1160 | |
Dave Ertman | 7d9c9b7 | 2020-07-13 13:53:04 -0700 | [diff] [blame] | 1161 | if (ice_is_dcb_active(pf)) { |
| 1162 | if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) |
| 1163 | ice_dcb_rebuild(pf); |
| 1164 | } else { |
| 1165 | if (link_up) |
| 1166 | ice_set_dflt_mib(pf); |
| 1167 | } |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 1168 | ice_vsi_link_event(vsi, link_up); |
| 1169 | ice_print_link_msg(vsi, link_up); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1170 | |
Brett Creeley | 26a9152 | 2019-12-12 03:13:01 -0800 | [diff] [blame] | 1171 | ice_vc_notify_link_state(pf); |
Anirudh Venkataramanan | 53b8dec | 2018-09-19 17:43:00 -0700 | [diff] [blame] | 1172 | |
Anirudh Venkataramanan | d348d51 | 2021-03-25 15:35:07 -0700 | [diff] [blame] | 1173 | return 0; |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1174 | } |
| 1175 | |
| 1176 | /** |
Anirudh Venkataramanan | 4f4be03 | 2018-10-18 08:37:09 -0700 | [diff] [blame] | 1177 | * ice_watchdog_subtask - periodic tasks not using event driven scheduling |
| 1178 | * @pf: board private structure |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1179 | */ |
Anirudh Venkataramanan | 4f4be03 | 2018-10-18 08:37:09 -0700 | [diff] [blame] | 1180 | static void ice_watchdog_subtask(struct ice_pf *pf) |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1181 | { |
Anirudh Venkataramanan | 4f4be03 | 2018-10-18 08:37:09 -0700 | [diff] [blame] | 1182 | int i; |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1183 | |
Anirudh Venkataramanan | 4f4be03 | 2018-10-18 08:37:09 -0700 | [diff] [blame] | 1184 | /* if interface is down do nothing */ |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 1185 | if (test_bit(ICE_DOWN, pf->state) || |
| 1186 | test_bit(ICE_CFG_BUSY, pf->state)) |
Anirudh Venkataramanan | 4f4be03 | 2018-10-18 08:37:09 -0700 | [diff] [blame] | 1187 | return; |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1188 | |
Anirudh Venkataramanan | 4f4be03 | 2018-10-18 08:37:09 -0700 | [diff] [blame] | 1189 | /* make sure we don't do these things too often */ |
| 1190 | if (time_before(jiffies, |
| 1191 | pf->serv_tmr_prev + pf->serv_tmr_period)) |
| 1192 | return; |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1193 | |
Anirudh Venkataramanan | 4f4be03 | 2018-10-18 08:37:09 -0700 | [diff] [blame] | 1194 | pf->serv_tmr_prev = jiffies; |
| 1195 | |
Anirudh Venkataramanan | 4f4be03 | 2018-10-18 08:37:09 -0700 | [diff] [blame] | 1196 | /* Update the stats for active netdevs so the network stack |
| 1197 | * can look at updated numbers whenever it cares to |
| 1198 | */ |
| 1199 | ice_update_pf_stats(pf); |
Brett Creeley | 80ed404a | 2019-02-08 12:50:54 -0800 | [diff] [blame] | 1200 | ice_for_each_vsi(pf, i) |
Anirudh Venkataramanan | 4f4be03 | 2018-10-18 08:37:09 -0700 | [diff] [blame] | 1201 | if (pf->vsi[i] && pf->vsi[i]->netdev) |
| 1202 | ice_update_vsi_stats(pf->vsi[i]); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1203 | } |
| 1204 | |
| 1205 | /** |
Brett Creeley | 250c3b3 | 2019-02-26 16:35:23 -0800 | [diff] [blame] | 1206 | * ice_init_link_events - enable/initialize link events |
| 1207 | * @pi: pointer to the port_info instance |
| 1208 | * |
| 1209 | * Returns -EIO on failure, 0 on success |
| 1210 | */ |
| 1211 | static int ice_init_link_events(struct ice_port_info *pi) |
| 1212 | { |
| 1213 | u16 mask; |
| 1214 | |
| 1215 | mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | |
Brett Creeley | 99d4075 | 2021-10-13 09:02:19 -0700 | [diff] [blame] | 1216 | ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL | |
| 1217 | ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL)); |
Brett Creeley | 250c3b3 | 2019-02-26 16:35:23 -0800 | [diff] [blame] | 1218 | |
| 1219 | if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 1220 | dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", |
Brett Creeley | 250c3b3 | 2019-02-26 16:35:23 -0800 | [diff] [blame] | 1221 | pi->lport); |
| 1222 | return -EIO; |
| 1223 | } |
| 1224 | |
| 1225 | if (ice_aq_get_link_info(pi, true, NULL, NULL)) { |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 1226 | dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n", |
Brett Creeley | 250c3b3 | 2019-02-26 16:35:23 -0800 | [diff] [blame] | 1227 | pi->lport); |
| 1228 | return -EIO; |
| 1229 | } |
| 1230 | |
| 1231 | return 0; |
| 1232 | } |
| 1233 | |
| 1234 | /** |
| 1235 | * ice_handle_link_event - handle link event via ARQ |
Anirudh Venkataramanan | 2f2da36 | 2019-04-16 10:35:03 -0700 | [diff] [blame] | 1236 | * @pf: PF that the link event is associated with |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 1237 | * @event: event structure containing link status info |
Brett Creeley | 250c3b3 | 2019-02-26 16:35:23 -0800 | [diff] [blame] | 1238 | */ |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 1239 | static int |
| 1240 | ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) |
Brett Creeley | 250c3b3 | 2019-02-26 16:35:23 -0800 | [diff] [blame] | 1241 | { |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 1242 | struct ice_aqc_get_link_status_data *link_data; |
Brett Creeley | 250c3b3 | 2019-02-26 16:35:23 -0800 | [diff] [blame] | 1243 | struct ice_port_info *port_info; |
| 1244 | int status; |
| 1245 | |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 1246 | link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf; |
Brett Creeley | 250c3b3 | 2019-02-26 16:35:23 -0800 | [diff] [blame] | 1247 | port_info = pf->hw.port_info; |
| 1248 | if (!port_info) |
| 1249 | return -EINVAL; |
| 1250 | |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 1251 | status = ice_link_event(pf, port_info, |
| 1252 | !!(link_data->link_info & ICE_AQ_LINK_UP), |
| 1253 | le16_to_cpu(link_data->link_speed)); |
Brett Creeley | 250c3b3 | 2019-02-26 16:35:23 -0800 | [diff] [blame] | 1254 | if (status) |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 1255 | dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", |
| 1256 | status); |
Brett Creeley | 250c3b3 | 2019-02-26 16:35:23 -0800 | [diff] [blame] | 1257 | |
| 1258 | return status; |
| 1259 | } |
| 1260 | |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1261 | /** |
Paul M Stillwell Jr | 9d3535e | 2023-12-12 21:07:14 -0800 | [diff] [blame] | 1262 | * ice_get_fwlog_data - copy the FW log data from ARQ event |
| 1263 | * @pf: PF that the FW log event is associated with |
| 1264 | * @event: event structure containing FW log data |
| 1265 | */ |
| 1266 | static void |
| 1267 | ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event) |
| 1268 | { |
| 1269 | struct ice_fwlog_data *fwlog; |
| 1270 | struct ice_hw *hw = &pf->hw; |
| 1271 | |
| 1272 | fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail]; |
| 1273 | |
| 1274 | memset(fwlog->data, 0, PAGE_SIZE); |
| 1275 | fwlog->data_size = le16_to_cpu(event->desc.datalen); |
| 1276 | |
| 1277 | memcpy(fwlog->data, event->msg_buf, fwlog->data_size); |
| 1278 | ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size); |
| 1279 | |
| 1280 | if (ice_fwlog_ring_full(&hw->fwlog_ring)) { |
| 1281 | /* the rings are full so bump the head to create room */ |
| 1282 | ice_fwlog_ring_increment(&hw->fwlog_ring.head, |
| 1283 | hw->fwlog_ring.size); |
| 1284 | } |
| 1285 | } |
| 1286 | |
| 1287 | /** |
Przemek Kitszel | fb9840c | 2023-08-08 17:54:17 -0400 | [diff] [blame] | 1288 | * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1289 | * @pf: pointer to the PF private structure |
Przemek Kitszel | fb9840c | 2023-08-08 17:54:17 -0400 | [diff] [blame] | 1290 | * @task: intermediate helper storage and identifier for waiting |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1291 | * @opcode: the opcode to wait for |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1292 | * |
Przemek Kitszel | fb9840c | 2023-08-08 17:54:17 -0400 | [diff] [blame] | 1293 | * Prepares to wait for a specific AdminQ completion event on the ARQ for |
| 1294 | * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event(). |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1295 | * |
Przemek Kitszel | fb9840c | 2023-08-08 17:54:17 -0400 | [diff] [blame] | 1296 | * Calls are separated to allow caller registering for event before sending |
| 1297 | * the command, which mitigates a race between registering and FW responding. |
| 1298 | * |
| 1299 | * To obtain only the descriptor contents, pass an task->event with null |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1300 | * msg_buf. If the complete data buffer is desired, allocate the |
Przemek Kitszel | fb9840c | 2023-08-08 17:54:17 -0400 | [diff] [blame] | 1301 | * task->event.msg_buf with enough space ahead of time. |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1302 | */ |
Przemek Kitszel | fb9840c | 2023-08-08 17:54:17 -0400 | [diff] [blame] | 1303 | void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task, |
| 1304 | u16 opcode) |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1305 | { |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1306 | INIT_HLIST_NODE(&task->entry); |
| 1307 | task->opcode = opcode; |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1308 | task->state = ICE_AQ_TASK_WAITING; |
| 1309 | |
| 1310 | spin_lock_bh(&pf->aq_wait_lock); |
| 1311 | hlist_add_head(&task->entry, &pf->aq_wait_list); |
| 1312 | spin_unlock_bh(&pf->aq_wait_lock); |
Przemek Kitszel | fb9840c | 2023-08-08 17:54:17 -0400 | [diff] [blame] | 1313 | } |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1314 | |
Przemek Kitszel | fb9840c | 2023-08-08 17:54:17 -0400 | [diff] [blame] | 1315 | /** |
| 1316 | * ice_aq_wait_for_event - Wait for an AdminQ event from firmware |
| 1317 | * @pf: pointer to the PF private structure |
| 1318 | * @task: ptr prepared by ice_aq_prep_for_event() |
| 1319 | * @timeout: how long to wait, in jiffies |
| 1320 | * |
| 1321 | * Waits for a specific AdminQ completion event on the ARQ for a given PF. The |
| 1322 | * current thread will be put to sleep until the specified event occurs or |
| 1323 | * until the given timeout is reached. |
| 1324 | * |
| 1325 | * Returns: zero on success, or a negative error code on failure. |
| 1326 | */ |
| 1327 | int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task, |
| 1328 | unsigned long timeout) |
| 1329 | { |
| 1330 | enum ice_aq_task_state *state = &task->state; |
| 1331 | struct device *dev = ice_pf_to_dev(pf); |
| 1332 | unsigned long start = jiffies; |
| 1333 | long ret; |
| 1334 | int err; |
Jacob Keller | 1e8249c | 2020-10-07 10:54:45 -0700 | [diff] [blame] | 1335 | |
Przemek Kitszel | fb9840c | 2023-08-08 17:54:17 -0400 | [diff] [blame] | 1336 | ret = wait_event_interruptible_timeout(pf->aq_wait_queue, |
| 1337 | *state != ICE_AQ_TASK_WAITING, |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1338 | timeout); |
Przemek Kitszel | fb9840c | 2023-08-08 17:54:17 -0400 | [diff] [blame] | 1339 | switch (*state) { |
| 1340 | case ICE_AQ_TASK_NOT_PREPARED: |
| 1341 | WARN(1, "call to %s without ice_aq_prep_for_event()", __func__); |
| 1342 | err = -EINVAL; |
| 1343 | break; |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1344 | case ICE_AQ_TASK_WAITING: |
| 1345 | err = ret < 0 ? ret : -ETIMEDOUT; |
| 1346 | break; |
| 1347 | case ICE_AQ_TASK_CANCELED: |
| 1348 | err = ret < 0 ? ret : -ECANCELED; |
| 1349 | break; |
| 1350 | case ICE_AQ_TASK_COMPLETE: |
| 1351 | err = ret < 0 ? ret : 0; |
| 1352 | break; |
| 1353 | default: |
Przemek Kitszel | fb9840c | 2023-08-08 17:54:17 -0400 | [diff] [blame] | 1354 | WARN(1, "Unexpected AdminQ wait task state %u", *state); |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1355 | err = -EINVAL; |
| 1356 | break; |
| 1357 | } |
| 1358 | |
Jacob Keller | 1e8249c | 2020-10-07 10:54:45 -0700 | [diff] [blame] | 1359 | dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n", |
| 1360 | jiffies_to_msecs(jiffies - start), |
| 1361 | jiffies_to_msecs(timeout), |
Przemek Kitszel | fb9840c | 2023-08-08 17:54:17 -0400 | [diff] [blame] | 1362 | task->opcode); |
Jacob Keller | 1e8249c | 2020-10-07 10:54:45 -0700 | [diff] [blame] | 1363 | |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1364 | spin_lock_bh(&pf->aq_wait_lock); |
| 1365 | hlist_del(&task->entry); |
| 1366 | spin_unlock_bh(&pf->aq_wait_lock); |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1367 | |
| 1368 | return err; |
| 1369 | } |
| 1370 | |
| 1371 | /** |
| 1372 | * ice_aq_check_events - Check if any thread is waiting for an AdminQ event |
| 1373 | * @pf: pointer to the PF private structure |
| 1374 | * @opcode: the opcode of the event |
| 1375 | * @event: the event to check |
| 1376 | * |
| 1377 | * Loops over the current list of pending threads waiting for an AdminQ event. |
| 1378 | * For each matching task, copy the contents of the event into the task |
| 1379 | * structure and wake up the thread. |
| 1380 | * |
| 1381 | * If multiple threads wait for the same opcode, they will all be woken up. |
| 1382 | * |
| 1383 | * Note that event->msg_buf will only be duplicated if the event has a buffer |
| 1384 | * with enough space already allocated. Otherwise, only the descriptor and |
| 1385 | * message length will be copied. |
| 1386 | * |
| 1387 | * Returns: true if an event was found, false otherwise |
| 1388 | */ |
| 1389 | static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, |
| 1390 | struct ice_rq_event_info *event) |
| 1391 | { |
Przemek Kitszel | e1e8a14 | 2023-08-08 17:54:15 -0400 | [diff] [blame] | 1392 | struct ice_rq_event_info *task_ev; |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1393 | struct ice_aq_task *task; |
| 1394 | bool found = false; |
| 1395 | |
| 1396 | spin_lock_bh(&pf->aq_wait_lock); |
| 1397 | hlist_for_each_entry(task, &pf->aq_wait_list, entry) { |
Przemek Kitszel | fb9840c | 2023-08-08 17:54:17 -0400 | [diff] [blame] | 1398 | if (task->state != ICE_AQ_TASK_WAITING) |
| 1399 | continue; |
| 1400 | if (task->opcode != opcode) |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1401 | continue; |
| 1402 | |
Przemek Kitszel | b214b98 | 2023-08-08 17:54:16 -0400 | [diff] [blame] | 1403 | task_ev = &task->event; |
Przemek Kitszel | e1e8a14 | 2023-08-08 17:54:15 -0400 | [diff] [blame] | 1404 | memcpy(&task_ev->desc, &event->desc, sizeof(event->desc)); |
| 1405 | task_ev->msg_len = event->msg_len; |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1406 | |
| 1407 | /* Only copy the data buffer if a destination was set */ |
Przemek Kitszel | e1e8a14 | 2023-08-08 17:54:15 -0400 | [diff] [blame] | 1408 | if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) { |
| 1409 | memcpy(task_ev->msg_buf, event->msg_buf, |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1410 | event->buf_len); |
Przemek Kitszel | e1e8a14 | 2023-08-08 17:54:15 -0400 | [diff] [blame] | 1411 | task_ev->buf_len = event->buf_len; |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1412 | } |
| 1413 | |
| 1414 | task->state = ICE_AQ_TASK_COMPLETE; |
| 1415 | found = true; |
| 1416 | } |
| 1417 | spin_unlock_bh(&pf->aq_wait_lock); |
| 1418 | |
| 1419 | if (found) |
| 1420 | wake_up(&pf->aq_wait_queue); |
| 1421 | } |
| 1422 | |
| 1423 | /** |
| 1424 | * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks |
| 1425 | * @pf: the PF private structure |
| 1426 | * |
| 1427 | * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads. |
| 1428 | * This will then cause ice_aq_wait_for_event to exit with -ECANCELED. |
| 1429 | */ |
| 1430 | static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) |
| 1431 | { |
| 1432 | struct ice_aq_task *task; |
| 1433 | |
| 1434 | spin_lock_bh(&pf->aq_wait_lock); |
| 1435 | hlist_for_each_entry(task, &pf->aq_wait_list, entry) |
| 1436 | task->state = ICE_AQ_TASK_CANCELED; |
| 1437 | spin_unlock_bh(&pf->aq_wait_lock); |
| 1438 | |
| 1439 | wake_up(&pf->aq_wait_queue); |
| 1440 | } |
| 1441 | |
Jacob Keller | afc24d6 | 2023-02-22 09:09:17 -0800 | [diff] [blame] | 1442 | #define ICE_MBX_OVERFLOW_WATERMARK 64 |
| 1443 | |
Brett Creeley | 250c3b3 | 2019-02-26 16:35:23 -0800 | [diff] [blame] | 1444 | /** |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1445 | * __ice_clean_ctrlq - helper function to clean controlq rings |
| 1446 | * @pf: ptr to struct ice_pf |
| 1447 | * @q_type: specific Control queue type |
| 1448 | */ |
| 1449 | static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) |
| 1450 | { |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 1451 | struct device *dev = ice_pf_to_dev(pf); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1452 | struct ice_rq_event_info event; |
| 1453 | struct ice_hw *hw = &pf->hw; |
| 1454 | struct ice_ctl_q_info *cq; |
| 1455 | u16 pending, i = 0; |
| 1456 | const char *qtype; |
| 1457 | u32 oldval, val; |
| 1458 | |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1459 | /* Do not clean control queue if/when PF reset fails */ |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 1460 | if (test_bit(ICE_RESET_FAILED, pf->state)) |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1461 | return 0; |
| 1462 | |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1463 | switch (q_type) { |
| 1464 | case ICE_CTL_Q_ADMIN: |
| 1465 | cq = &hw->adminq; |
| 1466 | qtype = "Admin"; |
| 1467 | break; |
Jacob Keller | 8f5ee3c | 2021-06-09 09:39:46 -0700 | [diff] [blame] | 1468 | case ICE_CTL_Q_SB: |
| 1469 | cq = &hw->sbq; |
| 1470 | qtype = "Sideband"; |
| 1471 | break; |
Anirudh Venkataramanan | 75d2b25 | 2018-09-19 17:42:54 -0700 | [diff] [blame] | 1472 | case ICE_CTL_Q_MAILBOX: |
| 1473 | cq = &hw->mailboxq; |
| 1474 | qtype = "Mailbox"; |
Vignesh Sridhar | 0891c89 | 2021-03-02 10:12:00 -0800 | [diff] [blame] | 1475 | /* we are going to try to detect a malicious VF, so set the |
| 1476 | * state to begin detection |
| 1477 | */ |
| 1478 | hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; |
Anirudh Venkataramanan | 75d2b25 | 2018-09-19 17:42:54 -0700 | [diff] [blame] | 1479 | break; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1480 | default: |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 1481 | dev_warn(dev, "Unknown control queue type 0x%x\n", q_type); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1482 | return 0; |
| 1483 | } |
| 1484 | |
| 1485 | /* check for error indications - PF_xx_AxQLEN register layout for |
| 1486 | * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN. |
| 1487 | */ |
| 1488 | val = rd32(hw, cq->rq.len); |
| 1489 | if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | |
| 1490 | PF_FW_ARQLEN_ARQCRIT_M)) { |
| 1491 | oldval = val; |
| 1492 | if (val & PF_FW_ARQLEN_ARQVFE_M) |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 1493 | dev_dbg(dev, "%s Receive Queue VF Error detected\n", |
| 1494 | qtype); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1495 | if (val & PF_FW_ARQLEN_ARQOVFL_M) { |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 1496 | dev_dbg(dev, "%s Receive Queue Overflow Error detected\n", |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1497 | qtype); |
| 1498 | } |
| 1499 | if (val & PF_FW_ARQLEN_ARQCRIT_M) |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 1500 | dev_dbg(dev, "%s Receive Queue Critical Error detected\n", |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1501 | qtype); |
| 1502 | val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | |
| 1503 | PF_FW_ARQLEN_ARQCRIT_M); |
| 1504 | if (oldval != val) |
| 1505 | wr32(hw, cq->rq.len, val); |
| 1506 | } |
| 1507 | |
| 1508 | val = rd32(hw, cq->sq.len); |
| 1509 | if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | |
| 1510 | PF_FW_ATQLEN_ATQCRIT_M)) { |
| 1511 | oldval = val; |
| 1512 | if (val & PF_FW_ATQLEN_ATQVFE_M) |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 1513 | dev_dbg(dev, "%s Send Queue VF Error detected\n", |
| 1514 | qtype); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1515 | if (val & PF_FW_ATQLEN_ATQOVFL_M) { |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 1516 | dev_dbg(dev, "%s Send Queue Overflow Error detected\n", |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1517 | qtype); |
| 1518 | } |
| 1519 | if (val & PF_FW_ATQLEN_ATQCRIT_M) |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 1520 | dev_dbg(dev, "%s Send Queue Critical Error detected\n", |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1521 | qtype); |
| 1522 | val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | |
| 1523 | PF_FW_ATQLEN_ATQCRIT_M); |
| 1524 | if (oldval != val) |
| 1525 | wr32(hw, cq->sq.len, val); |
| 1526 | } |
| 1527 | |
| 1528 | event.buf_len = cq->rq_buf_size; |
Tony Nguyen | 9efe35d0 | 2019-11-08 06:23:25 -0800 | [diff] [blame] | 1529 | event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1530 | if (!event.msg_buf) |
| 1531 | return 0; |
| 1532 | |
| 1533 | do { |
Jacob Keller | afc24d6 | 2023-02-22 09:09:17 -0800 | [diff] [blame] | 1534 | struct ice_mbx_data data = {}; |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1535 | u16 opcode; |
Tony Nguyen | 5518ac2 | 2021-10-07 15:59:03 -0700 | [diff] [blame] | 1536 | int ret; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1537 | |
| 1538 | ret = ice_clean_rq_elem(hw, cq, &event, &pending); |
Tony Nguyen | d54699e | 2021-10-07 15:58:01 -0700 | [diff] [blame] | 1539 | if (ret == -EALREADY) |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1540 | break; |
| 1541 | if (ret) { |
Tony Nguyen | 5f87ec4 | 2021-10-07 15:56:02 -0700 | [diff] [blame] | 1542 | dev_err(dev, "%s Receive Queue event error %d\n", qtype, |
| 1543 | ret); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1544 | break; |
| 1545 | } |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1546 | |
| 1547 | opcode = le16_to_cpu(event.desc.opcode); |
| 1548 | |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 1549 | /* Notify any thread that might be waiting for this event */ |
| 1550 | ice_aq_check_events(pf, opcode, &event); |
| 1551 | |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1552 | switch (opcode) { |
Brett Creeley | 250c3b3 | 2019-02-26 16:35:23 -0800 | [diff] [blame] | 1553 | case ice_aqc_opc_get_link_status: |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 1554 | if (ice_handle_link_event(pf, &event)) |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 1555 | dev_err(dev, "Could not handle link event\n"); |
Brett Creeley | 250c3b3 | 2019-02-26 16:35:23 -0800 | [diff] [blame] | 1556 | break; |
Brett Creeley | 2309ae3 | 2020-01-22 07:21:31 -0800 | [diff] [blame] | 1557 | case ice_aqc_opc_event_lan_overflow: |
| 1558 | ice_vf_lan_overflow_event(pf, &event); |
| 1559 | break; |
Anirudh Venkataramanan | 1071a83 | 2018-09-19 17:42:59 -0700 | [diff] [blame] | 1560 | case ice_mbx_opc_send_msg_to_pf: |
Jacob Keller | afc24d6 | 2023-02-22 09:09:17 -0800 | [diff] [blame] | 1561 | data.num_msg_proc = i; |
| 1562 | data.num_pending_arq = pending; |
| 1563 | data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries; |
| 1564 | data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK; |
| 1565 | |
Jacob Keller | be96815 | 2023-02-22 09:09:20 -0800 | [diff] [blame] | 1566 | ice_vc_process_vf_msg(pf, &event, &data); |
Anirudh Venkataramanan | 1071a83 | 2018-09-19 17:42:59 -0700 | [diff] [blame] | 1567 | break; |
Paul M Stillwell Jr | 9d3535e | 2023-12-12 21:07:14 -0800 | [diff] [blame] | 1568 | case ice_aqc_opc_fw_logs_event: |
| 1569 | ice_get_fwlog_data(pf, &event); |
| 1570 | break; |
Anirudh Venkataramanan | 00cc3f1 | 2019-02-28 15:24:26 -0800 | [diff] [blame] | 1571 | case ice_aqc_opc_lldp_set_mib_change: |
| 1572 | ice_dcb_process_lldp_set_mib_change(pf, &event); |
| 1573 | break; |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1574 | default: |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 1575 | dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 1576 | qtype, opcode); |
| 1577 | break; |
| 1578 | } |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1579 | } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); |
| 1580 | |
Tony Nguyen | 9efe35d0 | 2019-11-08 06:23:25 -0800 | [diff] [blame] | 1581 | kfree(event.msg_buf); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1582 | |
| 1583 | return pending && (i == ICE_DFLT_IRQ_WORK); |
| 1584 | } |
| 1585 | |
| 1586 | /** |
Anirudh Venkataramanan | 3d6b640 | 2018-08-09 06:28:56 -0700 | [diff] [blame] | 1587 | * ice_ctrlq_pending - check if there is a difference between ntc and ntu |
| 1588 | * @hw: pointer to hardware info |
| 1589 | * @cq: control queue information |
| 1590 | * |
| 1591 | * returns true if there are pending messages in a queue, false if there aren't |
| 1592 | */ |
| 1593 | static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) |
| 1594 | { |
| 1595 | u16 ntu; |
| 1596 | |
| 1597 | ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); |
| 1598 | return cq->rq.next_to_clean != ntu; |
| 1599 | } |
| 1600 | |
| 1601 | /** |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1602 | * ice_clean_adminq_subtask - clean the AdminQ rings |
| 1603 | * @pf: board private structure |
| 1604 | */ |
| 1605 | static void ice_clean_adminq_subtask(struct ice_pf *pf) |
| 1606 | { |
| 1607 | struct ice_hw *hw = &pf->hw; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1608 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 1609 | if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1610 | return; |
| 1611 | |
| 1612 | if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) |
| 1613 | return; |
| 1614 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 1615 | clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1616 | |
Anirudh Venkataramanan | 3d6b640 | 2018-08-09 06:28:56 -0700 | [diff] [blame] | 1617 | /* There might be a situation where new messages arrive to a control |
| 1618 | * queue between processing the last message and clearing the |
| 1619 | * EVENT_PENDING bit. So before exiting, check queue head again (using |
| 1620 | * ice_ctrlq_pending) and process new messages if any. |
| 1621 | */ |
| 1622 | if (ice_ctrlq_pending(hw, &hw->adminq)) |
| 1623 | __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1624 | |
| 1625 | ice_flush(hw); |
| 1626 | } |
| 1627 | |
| 1628 | /** |
Anirudh Venkataramanan | 75d2b25 | 2018-09-19 17:42:54 -0700 | [diff] [blame] | 1629 | * ice_clean_mailboxq_subtask - clean the MailboxQ rings |
| 1630 | * @pf: board private structure |
| 1631 | */ |
| 1632 | static void ice_clean_mailboxq_subtask(struct ice_pf *pf) |
| 1633 | { |
| 1634 | struct ice_hw *hw = &pf->hw; |
| 1635 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 1636 | if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) |
Anirudh Venkataramanan | 75d2b25 | 2018-09-19 17:42:54 -0700 | [diff] [blame] | 1637 | return; |
| 1638 | |
| 1639 | if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) |
| 1640 | return; |
| 1641 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 1642 | clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); |
Anirudh Venkataramanan | 75d2b25 | 2018-09-19 17:42:54 -0700 | [diff] [blame] | 1643 | |
| 1644 | if (ice_ctrlq_pending(hw, &hw->mailboxq)) |
| 1645 | __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); |
| 1646 | |
| 1647 | ice_flush(hw); |
| 1648 | } |
| 1649 | |
| 1650 | /** |
Jacob Keller | 8f5ee3c | 2021-06-09 09:39:46 -0700 | [diff] [blame] | 1651 | * ice_clean_sbq_subtask - clean the Sideband Queue rings |
| 1652 | * @pf: board private structure |
| 1653 | */ |
| 1654 | static void ice_clean_sbq_subtask(struct ice_pf *pf) |
| 1655 | { |
| 1656 | struct ice_hw *hw = &pf->hw; |
| 1657 | |
Grzegorz Nitka | 7a15668 | 2023-12-06 20:29:18 +0100 | [diff] [blame] | 1658 | /* if mac_type is not generic, sideband is not supported |
| 1659 | * and there's nothing to do here |
| 1660 | */ |
| 1661 | if (!ice_is_generic_mac(hw)) { |
Jacob Keller | 8f5ee3c | 2021-06-09 09:39:46 -0700 | [diff] [blame] | 1662 | clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); |
| 1663 | return; |
| 1664 | } |
| 1665 | |
| 1666 | if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) |
| 1667 | return; |
| 1668 | |
| 1669 | if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB)) |
| 1670 | return; |
| 1671 | |
| 1672 | clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); |
| 1673 | |
| 1674 | if (ice_ctrlq_pending(hw, &hw->sbq)) |
| 1675 | __ice_clean_ctrlq(pf, ICE_CTL_Q_SB); |
| 1676 | |
| 1677 | ice_flush(hw); |
| 1678 | } |
| 1679 | |
| 1680 | /** |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1681 | * ice_service_task_schedule - schedule the service task to wake up |
| 1682 | * @pf: board private structure |
| 1683 | * |
| 1684 | * If not already scheduled, this puts the task into the work queue. |
| 1685 | */ |
Brett Creeley | 28bf267 | 2020-05-11 18:01:46 -0700 | [diff] [blame] | 1686 | void ice_service_task_schedule(struct ice_pf *pf) |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1687 | { |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 1688 | if (!test_bit(ICE_SERVICE_DIS, pf->state) && |
| 1689 | !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && |
| 1690 | !test_bit(ICE_NEEDS_RESTART, pf->state)) |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1691 | queue_work(ice_wq, &pf->serv_task); |
| 1692 | } |
| 1693 | |
| 1694 | /** |
| 1695 | * ice_service_task_complete - finish up the service task |
| 1696 | * @pf: board private structure |
| 1697 | */ |
| 1698 | static void ice_service_task_complete(struct ice_pf *pf) |
| 1699 | { |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 1700 | WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1701 | |
| 1702 | /* force memory (pf->state) to sync before next service task */ |
| 1703 | smp_mb__before_atomic(); |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 1704 | clear_bit(ICE_SERVICE_SCHED, pf->state); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1705 | } |
| 1706 | |
| 1707 | /** |
Akeem G Abodunrin | 8d81fa5 | 2018-08-09 06:29:57 -0700 | [diff] [blame] | 1708 | * ice_service_task_stop - stop service task and cancel works |
| 1709 | * @pf: board private structure |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 1710 | * |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 1711 | * Return 0 if the ICE_SERVICE_DIS bit was not already set, |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 1712 | * 1 otherwise. |
Akeem G Abodunrin | 8d81fa5 | 2018-08-09 06:29:57 -0700 | [diff] [blame] | 1713 | */ |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 1714 | static int ice_service_task_stop(struct ice_pf *pf) |
Akeem G Abodunrin | 8d81fa5 | 2018-08-09 06:29:57 -0700 | [diff] [blame] | 1715 | { |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 1716 | int ret; |
| 1717 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 1718 | ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); |
Akeem G Abodunrin | 8d81fa5 | 2018-08-09 06:29:57 -0700 | [diff] [blame] | 1719 | |
| 1720 | if (pf->serv_tmr.function) |
| 1721 | del_timer_sync(&pf->serv_tmr); |
| 1722 | if (pf->serv_task.func) |
| 1723 | cancel_work_sync(&pf->serv_task); |
| 1724 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 1725 | clear_bit(ICE_SERVICE_SCHED, pf->state); |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 1726 | return ret; |
Akeem G Abodunrin | 8d81fa5 | 2018-08-09 06:29:57 -0700 | [diff] [blame] | 1727 | } |
| 1728 | |
| 1729 | /** |
Brett Creeley | 5995b6d | 2019-02-13 10:51:15 -0800 | [diff] [blame] | 1730 | * ice_service_task_restart - restart service task and schedule works |
| 1731 | * @pf: board private structure |
| 1732 | * |
| 1733 | * This function is needed for suspend and resume works (e.g WoL scenario) |
| 1734 | */ |
| 1735 | static void ice_service_task_restart(struct ice_pf *pf) |
| 1736 | { |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 1737 | clear_bit(ICE_SERVICE_DIS, pf->state); |
Brett Creeley | 5995b6d | 2019-02-13 10:51:15 -0800 | [diff] [blame] | 1738 | ice_service_task_schedule(pf); |
| 1739 | } |
| 1740 | |
| 1741 | /** |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 1742 | * ice_service_timer - timer callback to schedule service task |
| 1743 | * @t: pointer to timer_list |
| 1744 | */ |
| 1745 | static void ice_service_timer(struct timer_list *t) |
| 1746 | { |
| 1747 | struct ice_pf *pf = from_timer(pf, t, serv_tmr); |
| 1748 | |
| 1749 | mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); |
| 1750 | ice_service_task_schedule(pf); |
| 1751 | } |
| 1752 | |
| 1753 | /** |
Marcin Szycik | cc2a9d6 | 2024-04-04 16:04:51 +0200 | [diff] [blame] | 1754 | * ice_mdd_maybe_reset_vf - reset VF after MDD event |
| 1755 | * @pf: pointer to the PF structure |
| 1756 | * @vf: pointer to the VF structure |
| 1757 | * @reset_vf_tx: whether Tx MDD has occurred |
| 1758 | * @reset_vf_rx: whether Rx MDD has occurred |
| 1759 | * |
| 1760 | * Since the queue can get stuck on VF MDD events, the PF can be configured to |
| 1761 | * automatically reset the VF by enabling the private ethtool flag |
| 1762 | * mdd-auto-reset-vf. |
| 1763 | */ |
| 1764 | static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf, |
| 1765 | bool reset_vf_tx, bool reset_vf_rx) |
| 1766 | { |
| 1767 | struct device *dev = ice_pf_to_dev(pf); |
| 1768 | |
| 1769 | if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) |
| 1770 | return; |
| 1771 | |
| 1772 | /* VF MDD event counters will be cleared by reset, so print the event |
| 1773 | * prior to reset. |
| 1774 | */ |
| 1775 | if (reset_vf_tx) |
| 1776 | ice_print_vf_tx_mdd_event(vf); |
| 1777 | |
| 1778 | if (reset_vf_rx) |
| 1779 | ice_print_vf_rx_mdd_event(vf); |
| 1780 | |
| 1781 | dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n", |
| 1782 | pf->hw.pf_id, vf->vf_id); |
| 1783 | ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK); |
| 1784 | } |
| 1785 | |
| 1786 | /** |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 1787 | * ice_handle_mdd_event - handle malicious driver detect event |
| 1788 | * @pf: pointer to the PF structure |
| 1789 | * |
Paul Greenwalt | 9d5c5a5 | 2020-02-13 13:31:16 -0800 | [diff] [blame] | 1790 | * Called from service task. OICR interrupt handler indicates MDD event. |
| 1791 | * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log |
| 1792 | * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events |
| 1793 | * disable the queue, the PF can be configured to reset the VF using ethtool |
| 1794 | * private flag mdd-auto-reset-vf. |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 1795 | */ |
| 1796 | static void ice_handle_mdd_event(struct ice_pf *pf) |
| 1797 | { |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 1798 | struct device *dev = ice_pf_to_dev(pf); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 1799 | struct ice_hw *hw = &pf->hw; |
Jacob Keller | c4c2c7d | 2022-02-16 13:37:35 -0800 | [diff] [blame] | 1800 | struct ice_vf *vf; |
| 1801 | unsigned int bkt; |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 1802 | u32 reg; |
| 1803 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 1804 | if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { |
Paul Greenwalt | 9d5c5a5 | 2020-02-13 13:31:16 -0800 | [diff] [blame] | 1805 | /* Since the VF MDD event logging is rate limited, check if |
| 1806 | * there are pending MDD events. |
| 1807 | */ |
| 1808 | ice_print_vfs_mdd_events(pf); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 1809 | return; |
Paul Greenwalt | 9d5c5a5 | 2020-02-13 13:31:16 -0800 | [diff] [blame] | 1810 | } |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 1811 | |
Paul Greenwalt | 9d5c5a5 | 2020-02-13 13:31:16 -0800 | [diff] [blame] | 1812 | /* find what triggered an MDD event */ |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 1813 | reg = rd32(hw, GL_MDET_TX_PQM); |
| 1814 | if (reg & GL_MDET_TX_PQM_VALID_M) { |
Jesse Brandeburg | 5a259f8 | 2023-12-05 17:01:12 -0800 | [diff] [blame] | 1815 | u8 pf_num = FIELD_GET(GL_MDET_TX_PQM_PF_NUM_M, reg); |
| 1816 | u16 vf_num = FIELD_GET(GL_MDET_TX_PQM_VF_NUM_M, reg); |
| 1817 | u8 event = FIELD_GET(GL_MDET_TX_PQM_MAL_TYPE_M, reg); |
| 1818 | u16 queue = FIELD_GET(GL_MDET_TX_PQM_QNUM_M, reg); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 1819 | |
| 1820 | if (netif_msg_tx_err(pf)) |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 1821 | dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 1822 | event, queue, pf_num, vf_num); |
| 1823 | wr32(hw, GL_MDET_TX_PQM, 0xffffffff); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 1824 | } |
| 1825 | |
Paul Greenwalt | ba1124f | 2023-10-25 14:41:52 -0700 | [diff] [blame] | 1826 | reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw)); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 1827 | if (reg & GL_MDET_TX_TCLAN_VALID_M) { |
Jesse Brandeburg | 5a259f8 | 2023-12-05 17:01:12 -0800 | [diff] [blame] | 1828 | u8 pf_num = FIELD_GET(GL_MDET_TX_TCLAN_PF_NUM_M, reg); |
| 1829 | u16 vf_num = FIELD_GET(GL_MDET_TX_TCLAN_VF_NUM_M, reg); |
| 1830 | u8 event = FIELD_GET(GL_MDET_TX_TCLAN_MAL_TYPE_M, reg); |
| 1831 | u16 queue = FIELD_GET(GL_MDET_TX_TCLAN_QNUM_M, reg); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 1832 | |
Ben Shelton | 1d8bd99 | 2020-02-06 01:20:12 -0800 | [diff] [blame] | 1833 | if (netif_msg_tx_err(pf)) |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 1834 | dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 1835 | event, queue, pf_num, vf_num); |
Paul Greenwalt | ba1124f | 2023-10-25 14:41:52 -0700 | [diff] [blame] | 1836 | wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 1837 | } |
| 1838 | |
| 1839 | reg = rd32(hw, GL_MDET_RX); |
| 1840 | if (reg & GL_MDET_RX_VALID_M) { |
Jesse Brandeburg | 5a259f8 | 2023-12-05 17:01:12 -0800 | [diff] [blame] | 1841 | u8 pf_num = FIELD_GET(GL_MDET_RX_PF_NUM_M, reg); |
| 1842 | u16 vf_num = FIELD_GET(GL_MDET_RX_VF_NUM_M, reg); |
| 1843 | u8 event = FIELD_GET(GL_MDET_RX_MAL_TYPE_M, reg); |
| 1844 | u16 queue = FIELD_GET(GL_MDET_RX_QNUM_M, reg); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 1845 | |
| 1846 | if (netif_msg_rx_err(pf)) |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 1847 | dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 1848 | event, queue, pf_num, vf_num); |
| 1849 | wr32(hw, GL_MDET_RX, 0xffffffff); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 1850 | } |
| 1851 | |
Paul Greenwalt | 9d5c5a5 | 2020-02-13 13:31:16 -0800 | [diff] [blame] | 1852 | /* check to see if this PF caused an MDD event */ |
| 1853 | reg = rd32(hw, PF_MDET_TX_PQM); |
| 1854 | if (reg & PF_MDET_TX_PQM_VALID_M) { |
| 1855 | wr32(hw, PF_MDET_TX_PQM, 0xFFFF); |
| 1856 | if (netif_msg_tx_err(pf)) |
| 1857 | dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n"); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 1858 | } |
| 1859 | |
Paul Greenwalt | ba1124f | 2023-10-25 14:41:52 -0700 | [diff] [blame] | 1860 | reg = rd32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw)); |
Paul Greenwalt | 9d5c5a5 | 2020-02-13 13:31:16 -0800 | [diff] [blame] | 1861 | if (reg & PF_MDET_TX_TCLAN_VALID_M) { |
Paul Greenwalt | ba1124f | 2023-10-25 14:41:52 -0700 | [diff] [blame] | 1862 | wr32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw), 0xffff); |
Paul Greenwalt | 9d5c5a5 | 2020-02-13 13:31:16 -0800 | [diff] [blame] | 1863 | if (netif_msg_tx_err(pf)) |
| 1864 | dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n"); |
| 1865 | } |
| 1866 | |
| 1867 | reg = rd32(hw, PF_MDET_RX); |
| 1868 | if (reg & PF_MDET_RX_VALID_M) { |
| 1869 | wr32(hw, PF_MDET_RX, 0xFFFF); |
| 1870 | if (netif_msg_rx_err(pf)) |
| 1871 | dev_info(dev, "Malicious Driver Detection event RX detected on PF\n"); |
| 1872 | } |
| 1873 | |
| 1874 | /* Check to see if one of the VFs caused an MDD event, and then |
| 1875 | * increment counters and set print pending |
| 1876 | */ |
Jacob Keller | 3d5985a | 2022-02-16 13:37:38 -0800 | [diff] [blame] | 1877 | mutex_lock(&pf->vfs.table_lock); |
Jacob Keller | c4c2c7d | 2022-02-16 13:37:35 -0800 | [diff] [blame] | 1878 | ice_for_each_vf(pf, bkt, vf) { |
Marcin Szycik | cc2a9d6 | 2024-04-04 16:04:51 +0200 | [diff] [blame] | 1879 | bool reset_vf_tx = false, reset_vf_rx = false; |
| 1880 | |
Jacob Keller | c4c2c7d | 2022-02-16 13:37:35 -0800 | [diff] [blame] | 1881 | reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id)); |
Anirudh Venkataramanan | 7c4bc1f | 2018-09-19 17:43:01 -0700 | [diff] [blame] | 1882 | if (reg & VP_MDET_TX_PQM_VALID_M) { |
Jacob Keller | c4c2c7d | 2022-02-16 13:37:35 -0800 | [diff] [blame] | 1883 | wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF); |
Paul Greenwalt | 9d5c5a5 | 2020-02-13 13:31:16 -0800 | [diff] [blame] | 1884 | vf->mdd_tx_events.count++; |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 1885 | set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); |
Paul Greenwalt | 9d5c5a5 | 2020-02-13 13:31:16 -0800 | [diff] [blame] | 1886 | if (netif_msg_tx_err(pf)) |
| 1887 | dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", |
Jacob Keller | c4c2c7d | 2022-02-16 13:37:35 -0800 | [diff] [blame] | 1888 | vf->vf_id); |
Marcin Szycik | cc2a9d6 | 2024-04-04 16:04:51 +0200 | [diff] [blame] | 1889 | |
| 1890 | reset_vf_tx = true; |
Anirudh Venkataramanan | 7c4bc1f | 2018-09-19 17:43:01 -0700 | [diff] [blame] | 1891 | } |
| 1892 | |
Jacob Keller | c4c2c7d | 2022-02-16 13:37:35 -0800 | [diff] [blame] | 1893 | reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id)); |
Anirudh Venkataramanan | 7c4bc1f | 2018-09-19 17:43:01 -0700 | [diff] [blame] | 1894 | if (reg & VP_MDET_TX_TCLAN_VALID_M) { |
Jacob Keller | c4c2c7d | 2022-02-16 13:37:35 -0800 | [diff] [blame] | 1895 | wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF); |
Paul Greenwalt | 9d5c5a5 | 2020-02-13 13:31:16 -0800 | [diff] [blame] | 1896 | vf->mdd_tx_events.count++; |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 1897 | set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); |
Paul Greenwalt | 9d5c5a5 | 2020-02-13 13:31:16 -0800 | [diff] [blame] | 1898 | if (netif_msg_tx_err(pf)) |
| 1899 | dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", |
Jacob Keller | c4c2c7d | 2022-02-16 13:37:35 -0800 | [diff] [blame] | 1900 | vf->vf_id); |
Marcin Szycik | cc2a9d6 | 2024-04-04 16:04:51 +0200 | [diff] [blame] | 1901 | |
| 1902 | reset_vf_tx = true; |
Anirudh Venkataramanan | 7c4bc1f | 2018-09-19 17:43:01 -0700 | [diff] [blame] | 1903 | } |
| 1904 | |
Jacob Keller | c4c2c7d | 2022-02-16 13:37:35 -0800 | [diff] [blame] | 1905 | reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id)); |
Anirudh Venkataramanan | 7c4bc1f | 2018-09-19 17:43:01 -0700 | [diff] [blame] | 1906 | if (reg & VP_MDET_TX_TDPU_VALID_M) { |
Jacob Keller | c4c2c7d | 2022-02-16 13:37:35 -0800 | [diff] [blame] | 1907 | wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF); |
Paul Greenwalt | 9d5c5a5 | 2020-02-13 13:31:16 -0800 | [diff] [blame] | 1908 | vf->mdd_tx_events.count++; |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 1909 | set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); |
Paul Greenwalt | 9d5c5a5 | 2020-02-13 13:31:16 -0800 | [diff] [blame] | 1910 | if (netif_msg_tx_err(pf)) |
| 1911 | dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", |
Jacob Keller | c4c2c7d | 2022-02-16 13:37:35 -0800 | [diff] [blame] | 1912 | vf->vf_id); |
Marcin Szycik | cc2a9d6 | 2024-04-04 16:04:51 +0200 | [diff] [blame] | 1913 | |
| 1914 | reset_vf_tx = true; |
Anirudh Venkataramanan | 7c4bc1f | 2018-09-19 17:43:01 -0700 | [diff] [blame] | 1915 | } |
| 1916 | |
Jacob Keller | c4c2c7d | 2022-02-16 13:37:35 -0800 | [diff] [blame] | 1917 | reg = rd32(hw, VP_MDET_RX(vf->vf_id)); |
Anirudh Venkataramanan | 7c4bc1f | 2018-09-19 17:43:01 -0700 | [diff] [blame] | 1918 | if (reg & VP_MDET_RX_VALID_M) { |
Jacob Keller | c4c2c7d | 2022-02-16 13:37:35 -0800 | [diff] [blame] | 1919 | wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF); |
Paul Greenwalt | 9d5c5a5 | 2020-02-13 13:31:16 -0800 | [diff] [blame] | 1920 | vf->mdd_rx_events.count++; |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 1921 | set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); |
Paul Greenwalt | 9d5c5a5 | 2020-02-13 13:31:16 -0800 | [diff] [blame] | 1922 | if (netif_msg_rx_err(pf)) |
| 1923 | dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", |
Jacob Keller | c4c2c7d | 2022-02-16 13:37:35 -0800 | [diff] [blame] | 1924 | vf->vf_id); |
Anirudh Venkataramanan | 7c4bc1f | 2018-09-19 17:43:01 -0700 | [diff] [blame] | 1925 | |
Marcin Szycik | cc2a9d6 | 2024-04-04 16:04:51 +0200 | [diff] [blame] | 1926 | reset_vf_rx = true; |
Anirudh Venkataramanan | 7c4bc1f | 2018-09-19 17:43:01 -0700 | [diff] [blame] | 1927 | } |
Marcin Szycik | cc2a9d6 | 2024-04-04 16:04:51 +0200 | [diff] [blame] | 1928 | |
| 1929 | if (reset_vf_tx || reset_vf_rx) |
| 1930 | ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx, |
| 1931 | reset_vf_rx); |
Anirudh Venkataramanan | 7c4bc1f | 2018-09-19 17:43:01 -0700 | [diff] [blame] | 1932 | } |
Jacob Keller | 3d5985a | 2022-02-16 13:37:38 -0800 | [diff] [blame] | 1933 | mutex_unlock(&pf->vfs.table_lock); |
Paul Greenwalt | 9d5c5a5 | 2020-02-13 13:31:16 -0800 | [diff] [blame] | 1934 | |
| 1935 | ice_print_vfs_mdd_events(pf); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 1936 | } |
| 1937 | |
| 1938 | /** |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 1939 | * ice_force_phys_link_state - Force the physical link state |
| 1940 | * @vsi: VSI to force the physical link state to up/down |
| 1941 | * @link_up: true/false indicates to set the physical link to up/down |
| 1942 | * |
| 1943 | * Force the physical link state by getting the current PHY capabilities from |
| 1944 | * hardware and setting the PHY config based on the determined capabilities. If |
| 1945 | * link changes a link event will be triggered because both the Enable Automatic |
| 1946 | * Link Update and LESM Enable bits are set when setting the PHY capabilities. |
| 1947 | * |
| 1948 | * Returns 0 on success, negative on failure |
| 1949 | */ |
| 1950 | static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) |
| 1951 | { |
| 1952 | struct ice_aqc_get_phy_caps_data *pcaps; |
| 1953 | struct ice_aqc_set_phy_cfg_data *cfg; |
| 1954 | struct ice_port_info *pi; |
| 1955 | struct device *dev; |
| 1956 | int retcode; |
| 1957 | |
| 1958 | if (!vsi || !vsi->port_info || !vsi->back) |
| 1959 | return -EINVAL; |
| 1960 | if (vsi->type != ICE_VSI_PF) |
| 1961 | return 0; |
| 1962 | |
Anirudh Venkataramanan | 9a94684 | 2020-02-06 01:20:09 -0800 | [diff] [blame] | 1963 | dev = ice_pf_to_dev(vsi->back); |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 1964 | |
| 1965 | pi = vsi->port_info; |
| 1966 | |
Tony Nguyen | 9efe35d0 | 2019-11-08 06:23:25 -0800 | [diff] [blame] | 1967 | pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 1968 | if (!pcaps) |
| 1969 | return -ENOMEM; |
| 1970 | |
Anirudh Venkataramanan | d6730a8 | 2021-03-25 15:35:06 -0700 | [diff] [blame] | 1971 | retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 1972 | NULL); |
| 1973 | if (retcode) { |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 1974 | dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n", |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 1975 | vsi->vsi_num, retcode); |
| 1976 | retcode = -EIO; |
| 1977 | goto out; |
| 1978 | } |
| 1979 | |
| 1980 | /* No change in link */ |
| 1981 | if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && |
| 1982 | link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) |
| 1983 | goto out; |
| 1984 | |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 1985 | /* Use the current user PHY configuration. The current user PHY |
| 1986 | * configuration is initialized during probe from PHY capabilities |
| 1987 | * software mode, and updated on set PHY configuration. |
| 1988 | */ |
| 1989 | cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL); |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 1990 | if (!cfg) { |
| 1991 | retcode = -ENOMEM; |
| 1992 | goto out; |
| 1993 | } |
| 1994 | |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 1995 | cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 1996 | if (link_up) |
| 1997 | cfg->caps |= ICE_AQ_PHY_ENA_LINK; |
| 1998 | else |
| 1999 | cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; |
| 2000 | |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2001 | retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 2002 | if (retcode) { |
| 2003 | dev_err(dev, "Failed to set phy config, VSI %d error %d\n", |
| 2004 | vsi->vsi_num, retcode); |
| 2005 | retcode = -EIO; |
| 2006 | } |
| 2007 | |
Tony Nguyen | 9efe35d0 | 2019-11-08 06:23:25 -0800 | [diff] [blame] | 2008 | kfree(cfg); |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 2009 | out: |
Tony Nguyen | 9efe35d0 | 2019-11-08 06:23:25 -0800 | [diff] [blame] | 2010 | kfree(pcaps); |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 2011 | return retcode; |
| 2012 | } |
| 2013 | |
| 2014 | /** |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2015 | * ice_init_nvm_phy_type - Initialize the NVM PHY type |
| 2016 | * @pi: port info structure |
| 2017 | * |
Paul Greenwalt | ea78ce4 | 2020-07-09 09:16:07 -0700 | [diff] [blame] | 2018 | * Initialize nvm_phy_type_[low|high] for link lenient mode support |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2019 | */ |
| 2020 | static int ice_init_nvm_phy_type(struct ice_port_info *pi) |
| 2021 | { |
| 2022 | struct ice_aqc_get_phy_caps_data *pcaps; |
| 2023 | struct ice_pf *pf = pi->hw->back; |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 2024 | int err; |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2025 | |
| 2026 | pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); |
| 2027 | if (!pcaps) |
| 2028 | return -ENOMEM; |
| 2029 | |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 2030 | err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, |
| 2031 | pcaps, NULL); |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2032 | |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 2033 | if (err) { |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2034 | dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2035 | goto out; |
| 2036 | } |
| 2037 | |
| 2038 | pf->nvm_phy_type_hi = pcaps->phy_type_high; |
| 2039 | pf->nvm_phy_type_lo = pcaps->phy_type_low; |
| 2040 | |
| 2041 | out: |
| 2042 | kfree(pcaps); |
| 2043 | return err; |
| 2044 | } |
| 2045 | |
| 2046 | /** |
Paul Greenwalt | ea78ce4 | 2020-07-09 09:16:07 -0700 | [diff] [blame] | 2047 | * ice_init_link_dflt_override - Initialize link default override |
| 2048 | * @pi: port info structure |
Bruce Allan | b4e813d | 2020-07-09 09:16:08 -0700 | [diff] [blame] | 2049 | * |
| 2050 | * Initialize link default override and PHY total port shutdown during probe |
Paul Greenwalt | ea78ce4 | 2020-07-09 09:16:07 -0700 | [diff] [blame] | 2051 | */ |
| 2052 | static void ice_init_link_dflt_override(struct ice_port_info *pi) |
| 2053 | { |
| 2054 | struct ice_link_default_override_tlv *ldo; |
| 2055 | struct ice_pf *pf = pi->hw->back; |
| 2056 | |
| 2057 | ldo = &pf->link_dflt_override; |
Bruce Allan | b4e813d | 2020-07-09 09:16:08 -0700 | [diff] [blame] | 2058 | if (ice_get_link_default_override(ldo, pi)) |
| 2059 | return; |
| 2060 | |
| 2061 | if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS)) |
| 2062 | return; |
| 2063 | |
| 2064 | /* Enable Total Port Shutdown (override/replace link-down-on-close |
| 2065 | * ethtool private flag) for ports with Port Disable bit set. |
| 2066 | */ |
| 2067 | set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); |
| 2068 | set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); |
Paul Greenwalt | ea78ce4 | 2020-07-09 09:16:07 -0700 | [diff] [blame] | 2069 | } |
| 2070 | |
| 2071 | /** |
| 2072 | * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings |
| 2073 | * @pi: port info structure |
| 2074 | * |
Anirudh Venkataramanan | 0a02944 | 2021-03-25 15:35:12 -0700 | [diff] [blame] | 2075 | * If default override is enabled, initialize the user PHY cfg speed and FEC |
Paul Greenwalt | ea78ce4 | 2020-07-09 09:16:07 -0700 | [diff] [blame] | 2076 | * settings using the default override mask from the NVM. |
| 2077 | * |
| 2078 | * The PHY should only be configured with the default override settings the |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 2079 | * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state |
Paul Greenwalt | ea78ce4 | 2020-07-09 09:16:07 -0700 | [diff] [blame] | 2080 | * is used to indicate that the user PHY cfg default override is initialized |
| 2081 | * and the PHY has not been configured with the default override settings. The |
| 2082 | * state is set here, and cleared in ice_configure_phy the first time the PHY is |
| 2083 | * configured. |
Anirudh Venkataramanan | 0a02944 | 2021-03-25 15:35:12 -0700 | [diff] [blame] | 2084 | * |
| 2085 | * This function should be called only if the FW doesn't support default |
| 2086 | * configuration mode, as reported by ice_fw_supports_report_dflt_cfg. |
Paul Greenwalt | ea78ce4 | 2020-07-09 09:16:07 -0700 | [diff] [blame] | 2087 | */ |
| 2088 | static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi) |
| 2089 | { |
| 2090 | struct ice_link_default_override_tlv *ldo; |
| 2091 | struct ice_aqc_set_phy_cfg_data *cfg; |
| 2092 | struct ice_phy_info *phy = &pi->phy; |
| 2093 | struct ice_pf *pf = pi->hw->back; |
| 2094 | |
| 2095 | ldo = &pf->link_dflt_override; |
| 2096 | |
| 2097 | /* If link default override is enabled, use to mask NVM PHY capabilities |
| 2098 | * for speed and FEC default configuration. |
| 2099 | */ |
| 2100 | cfg = &phy->curr_user_phy_cfg; |
| 2101 | |
| 2102 | if (ldo->phy_type_low || ldo->phy_type_high) { |
| 2103 | cfg->phy_type_low = pf->nvm_phy_type_lo & |
| 2104 | cpu_to_le64(ldo->phy_type_low); |
| 2105 | cfg->phy_type_high = pf->nvm_phy_type_hi & |
| 2106 | cpu_to_le64(ldo->phy_type_high); |
| 2107 | } |
| 2108 | cfg->link_fec_opt = ldo->fec_options; |
| 2109 | phy->curr_user_fec_req = ICE_FEC_AUTO; |
| 2110 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 2111 | set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); |
Paul Greenwalt | ea78ce4 | 2020-07-09 09:16:07 -0700 | [diff] [blame] | 2112 | } |
| 2113 | |
| 2114 | /** |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2115 | * ice_init_phy_user_cfg - Initialize the PHY user configuration |
| 2116 | * @pi: port info structure |
| 2117 | * |
| 2118 | * Initialize the current user PHY configuration, speed, FEC, and FC requested |
| 2119 | * mode to default. The PHY defaults are from get PHY capabilities topology |
| 2120 | * with media so call when media is first available. An error is returned if |
| 2121 | * called when media is not available. The PHY initialization completed state is |
| 2122 | * set here. |
| 2123 | * |
| 2124 | * These configurations are used when setting PHY |
| 2125 | * configuration. The user PHY configuration is updated on set PHY |
| 2126 | * configuration. Returns 0 on success, negative on failure |
| 2127 | */ |
| 2128 | static int ice_init_phy_user_cfg(struct ice_port_info *pi) |
| 2129 | { |
| 2130 | struct ice_aqc_get_phy_caps_data *pcaps; |
| 2131 | struct ice_phy_info *phy = &pi->phy; |
| 2132 | struct ice_pf *pf = pi->hw->back; |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 2133 | int err; |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2134 | |
| 2135 | if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) |
| 2136 | return -EIO; |
| 2137 | |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2138 | pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); |
| 2139 | if (!pcaps) |
| 2140 | return -ENOMEM; |
| 2141 | |
Anirudh Venkataramanan | 0a02944 | 2021-03-25 15:35:12 -0700 | [diff] [blame] | 2142 | if (ice_fw_supports_report_dflt_cfg(pi->hw)) |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 2143 | err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, |
| 2144 | pcaps, NULL); |
Anirudh Venkataramanan | 0a02944 | 2021-03-25 15:35:12 -0700 | [diff] [blame] | 2145 | else |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 2146 | err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, |
| 2147 | pcaps, NULL); |
| 2148 | if (err) { |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2149 | dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2150 | goto err_out; |
| 2151 | } |
| 2152 | |
Paul Greenwalt | ea78ce4 | 2020-07-09 09:16:07 -0700 | [diff] [blame] | 2153 | ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg); |
| 2154 | |
| 2155 | /* check if lenient mode is supported and enabled */ |
Anirudh Venkataramanan | dc6aaa1 | 2021-03-25 15:35:14 -0700 | [diff] [blame] | 2156 | if (ice_fw_supports_link_override(pi->hw) && |
Paul Greenwalt | ea78ce4 | 2020-07-09 09:16:07 -0700 | [diff] [blame] | 2157 | !(pcaps->module_compliance_enforcement & |
| 2158 | ICE_AQC_MOD_ENFORCE_STRICT_MODE)) { |
| 2159 | set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); |
| 2160 | |
Anirudh Venkataramanan | 0a02944 | 2021-03-25 15:35:12 -0700 | [diff] [blame] | 2161 | /* if the FW supports default PHY configuration mode, then the driver |
| 2162 | * does not have to apply link override settings. If not, |
| 2163 | * initialize user PHY configuration with link override values |
Paul Greenwalt | ea78ce4 | 2020-07-09 09:16:07 -0700 | [diff] [blame] | 2164 | */ |
Anirudh Venkataramanan | 0a02944 | 2021-03-25 15:35:12 -0700 | [diff] [blame] | 2165 | if (!ice_fw_supports_report_dflt_cfg(pi->hw) && |
| 2166 | (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) { |
Paul Greenwalt | ea78ce4 | 2020-07-09 09:16:07 -0700 | [diff] [blame] | 2167 | ice_init_phy_cfg_dflt_override(pi); |
| 2168 | goto out; |
| 2169 | } |
| 2170 | } |
| 2171 | |
Anirudh Venkataramanan | 0a02944 | 2021-03-25 15:35:12 -0700 | [diff] [blame] | 2172 | /* if link default override is not enabled, set user flow control and |
| 2173 | * FEC settings based on what get_phy_caps returned |
Paul Greenwalt | ea78ce4 | 2020-07-09 09:16:07 -0700 | [diff] [blame] | 2174 | */ |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2175 | phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, |
| 2176 | pcaps->link_fec_options); |
| 2177 | phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); |
| 2178 | |
Paul Greenwalt | ea78ce4 | 2020-07-09 09:16:07 -0700 | [diff] [blame] | 2179 | out: |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2180 | phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 2181 | set_bit(ICE_PHY_INIT_COMPLETE, pf->state); |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2182 | err_out: |
| 2183 | kfree(pcaps); |
| 2184 | return err; |
| 2185 | } |
| 2186 | |
| 2187 | /** |
| 2188 | * ice_configure_phy - configure PHY |
| 2189 | * @vsi: VSI of PHY |
| 2190 | * |
| 2191 | * Set the PHY configuration. If the current PHY configuration is the same as |
| 2192 | * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise |
| 2193 | * configure the based get PHY capabilities for topology with media. |
| 2194 | */ |
| 2195 | static int ice_configure_phy(struct ice_vsi *vsi) |
| 2196 | { |
| 2197 | struct device *dev = ice_pf_to_dev(vsi->back); |
Anirudh Venkataramanan | efc1edd | 2021-03-25 15:35:15 -0700 | [diff] [blame] | 2198 | struct ice_port_info *pi = vsi->port_info; |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2199 | struct ice_aqc_get_phy_caps_data *pcaps; |
| 2200 | struct ice_aqc_set_phy_cfg_data *cfg; |
Anirudh Venkataramanan | efc1edd | 2021-03-25 15:35:15 -0700 | [diff] [blame] | 2201 | struct ice_phy_info *phy = &pi->phy; |
| 2202 | struct ice_pf *pf = vsi->back; |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 2203 | int err; |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2204 | |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2205 | /* Ensure we have media as we cannot configure a medialess port */ |
Anirudh Venkataramanan | efc1edd | 2021-03-25 15:35:15 -0700 | [diff] [blame] | 2206 | if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) |
Katarzyna Wieczerzycka | 6a8d8bb5 | 2023-12-15 12:01:56 +0100 | [diff] [blame] | 2207 | return -ENOMEDIUM; |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2208 | |
| 2209 | ice_print_topo_conflict(vsi); |
| 2210 | |
Anirudh Venkataramanan | 4fc5fbe | 2021-07-16 15:16:39 -0700 | [diff] [blame] | 2211 | if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) && |
| 2212 | phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2213 | return -EPERM; |
| 2214 | |
Anirudh Venkataramanan | efc1edd | 2021-03-25 15:35:15 -0700 | [diff] [blame] | 2215 | if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2216 | return ice_force_phys_link_state(vsi, true); |
| 2217 | |
| 2218 | pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); |
| 2219 | if (!pcaps) |
| 2220 | return -ENOMEM; |
| 2221 | |
| 2222 | /* Get current PHY config */ |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 2223 | err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, |
| 2224 | NULL); |
| 2225 | if (err) { |
Tony Nguyen | 5f87ec4 | 2021-10-07 15:56:02 -0700 | [diff] [blame] | 2226 | dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n", |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 2227 | vsi->vsi_num, err); |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2228 | goto done; |
| 2229 | } |
| 2230 | |
| 2231 | /* If PHY enable link is configured and configuration has not changed, |
| 2232 | * there's nothing to do |
| 2233 | */ |
| 2234 | if (pcaps->caps & ICE_AQC_PHY_EN_LINK && |
Anirudh Venkataramanan | efc1edd | 2021-03-25 15:35:15 -0700 | [diff] [blame] | 2235 | ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg)) |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2236 | goto done; |
| 2237 | |
| 2238 | /* Use PHY topology as baseline for configuration */ |
| 2239 | memset(pcaps, 0, sizeof(*pcaps)); |
Anirudh Venkataramanan | 0a02944 | 2021-03-25 15:35:12 -0700 | [diff] [blame] | 2240 | if (ice_fw_supports_report_dflt_cfg(pi->hw)) |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 2241 | err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, |
| 2242 | pcaps, NULL); |
Anirudh Venkataramanan | 0a02944 | 2021-03-25 15:35:12 -0700 | [diff] [blame] | 2243 | else |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 2244 | err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, |
| 2245 | pcaps, NULL); |
| 2246 | if (err) { |
Tony Nguyen | 5f87ec4 | 2021-10-07 15:56:02 -0700 | [diff] [blame] | 2247 | dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n", |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 2248 | vsi->vsi_num, err); |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2249 | goto done; |
| 2250 | } |
| 2251 | |
| 2252 | cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); |
| 2253 | if (!cfg) { |
| 2254 | err = -ENOMEM; |
| 2255 | goto done; |
| 2256 | } |
| 2257 | |
Paul Greenwalt | ea78ce4 | 2020-07-09 09:16:07 -0700 | [diff] [blame] | 2258 | ice_copy_phy_caps_to_cfg(pi, pcaps, cfg); |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2259 | |
| 2260 | /* Speed - If default override pending, use curr_user_phy_cfg set in |
| 2261 | * ice_init_phy_user_cfg_ldo. |
| 2262 | */ |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 2263 | if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, |
Paul Greenwalt | ea78ce4 | 2020-07-09 09:16:07 -0700 | [diff] [blame] | 2264 | vsi->back->state)) { |
Anirudh Venkataramanan | efc1edd | 2021-03-25 15:35:15 -0700 | [diff] [blame] | 2265 | cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low; |
| 2266 | cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high; |
Paul Greenwalt | ea78ce4 | 2020-07-09 09:16:07 -0700 | [diff] [blame] | 2267 | } else { |
| 2268 | u64 phy_low = 0, phy_high = 0; |
| 2269 | |
| 2270 | ice_update_phy_type(&phy_low, &phy_high, |
| 2271 | pi->phy.curr_user_speed_req); |
| 2272 | cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); |
| 2273 | cfg->phy_type_high = pcaps->phy_type_high & |
| 2274 | cpu_to_le64(phy_high); |
| 2275 | } |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2276 | |
| 2277 | /* Can't provide what was requested; use PHY capabilities */ |
| 2278 | if (!cfg->phy_type_low && !cfg->phy_type_high) { |
| 2279 | cfg->phy_type_low = pcaps->phy_type_low; |
| 2280 | cfg->phy_type_high = pcaps->phy_type_high; |
| 2281 | } |
| 2282 | |
| 2283 | /* FEC */ |
Anirudh Venkataramanan | efc1edd | 2021-03-25 15:35:15 -0700 | [diff] [blame] | 2284 | ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req); |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2285 | |
| 2286 | /* Can't provide what was requested; use PHY capabilities */ |
| 2287 | if (cfg->link_fec_opt != |
| 2288 | (cfg->link_fec_opt & pcaps->link_fec_options)) { |
| 2289 | cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; |
| 2290 | cfg->link_fec_opt = pcaps->link_fec_options; |
| 2291 | } |
| 2292 | |
| 2293 | /* Flow Control - always supported; no need to check against |
| 2294 | * capabilities |
| 2295 | */ |
Anirudh Venkataramanan | efc1edd | 2021-03-25 15:35:15 -0700 | [diff] [blame] | 2296 | ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req); |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2297 | |
| 2298 | /* Enable link and link update */ |
| 2299 | cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; |
| 2300 | |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 2301 | err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); |
Tony Nguyen | c148469 | 2021-10-07 16:01:58 -0700 | [diff] [blame] | 2302 | if (err) |
Tony Nguyen | 5f87ec4 | 2021-10-07 15:56:02 -0700 | [diff] [blame] | 2303 | dev_err(dev, "Failed to set phy config, VSI %d error %d\n", |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 2304 | vsi->vsi_num, err); |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2305 | |
| 2306 | kfree(cfg); |
| 2307 | done: |
| 2308 | kfree(pcaps); |
| 2309 | return err; |
| 2310 | } |
| 2311 | |
| 2312 | /** |
| 2313 | * ice_check_media_subtask - Check for media |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 2314 | * @pf: pointer to PF struct |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2315 | * |
| 2316 | * If media is available, then initialize PHY user configuration if it is not |
| 2317 | * been, and configure the PHY if the interface is up. |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 2318 | */ |
| 2319 | static void ice_check_media_subtask(struct ice_pf *pf) |
| 2320 | { |
| 2321 | struct ice_port_info *pi; |
| 2322 | struct ice_vsi *vsi; |
| 2323 | int err; |
| 2324 | |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2325 | /* No need to check for media if it's already present */ |
| 2326 | if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 2327 | return; |
| 2328 | |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2329 | vsi = ice_get_main_vsi(pf); |
| 2330 | if (!vsi) |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 2331 | return; |
| 2332 | |
| 2333 | /* Refresh link info and check if media is present */ |
| 2334 | pi = vsi->port_info; |
| 2335 | err = ice_update_link_info(pi); |
| 2336 | if (err) |
| 2337 | return; |
| 2338 | |
Brett Creeley | 99d4075 | 2021-10-13 09:02:19 -0700 | [diff] [blame] | 2339 | ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); |
Anirudh Venkataramanan | c77849f5 | 2021-05-06 08:40:01 -0700 | [diff] [blame] | 2340 | |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 2341 | if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 2342 | if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2343 | ice_init_phy_user_cfg(pi); |
| 2344 | |
| 2345 | /* PHY settings are reset on media insertion, reconfigure |
| 2346 | * PHY to preserve settings. |
| 2347 | */ |
Anirudh Venkataramanan | e97fb1a | 2021-03-02 10:15:37 -0800 | [diff] [blame] | 2348 | if (test_bit(ICE_VSI_DOWN, vsi->state) && |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2349 | test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 2350 | return; |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 2351 | |
| 2352 | err = ice_configure_phy(vsi); |
| 2353 | if (!err) |
| 2354 | clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 2355 | |
| 2356 | /* A Link Status Event will be generated; the event handler |
| 2357 | * will complete bringing the interface up |
| 2358 | */ |
| 2359 | } |
| 2360 | } |
| 2361 | |
| 2362 | /** |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 2363 | * ice_service_task - manage and run subtasks |
| 2364 | * @work: pointer to work_struct contained by the PF struct |
| 2365 | */ |
| 2366 | static void ice_service_task(struct work_struct *work) |
| 2367 | { |
| 2368 | struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); |
| 2369 | unsigned long start_time = jiffies; |
| 2370 | |
| 2371 | /* subtasks */ |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 2372 | |
| 2373 | /* process reset requests first */ |
| 2374 | ice_reset_subtask(pf); |
| 2375 | |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 2376 | /* bail if a reset/recovery cycle is pending or rebuild failed */ |
Dave Ertman | 5df7e45 | 2018-09-19 17:23:11 -0700 | [diff] [blame] | 2377 | if (ice_is_reset_in_progress(pf->state) || |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 2378 | test_bit(ICE_SUSPENDED, pf->state) || |
| 2379 | test_bit(ICE_NEEDS_RESTART, pf->state)) { |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 2380 | ice_service_task_complete(pf); |
| 2381 | return; |
| 2382 | } |
| 2383 | |
Alexander Lobakin | 32d53c0 | 2022-03-23 13:43:52 +0100 | [diff] [blame] | 2384 | if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) { |
| 2385 | struct iidc_event *event; |
| 2386 | |
| 2387 | event = kzalloc(sizeof(*event), GFP_KERNEL); |
| 2388 | if (event) { |
| 2389 | set_bit(IIDC_EVENT_CRIT_ERR, event->type); |
| 2390 | /* report the entire OICR value to AUX driver */ |
| 2391 | swap(event->reg, pf->oicr_err_reg); |
| 2392 | ice_send_event_to_aux(pf, event); |
| 2393 | kfree(event); |
| 2394 | } |
| 2395 | } |
| 2396 | |
Dave Ertman | 248401cb | 2023-03-10 11:48:33 -0800 | [diff] [blame] | 2397 | /* unplug aux dev per request, if an unplug request came in |
| 2398 | * while processing a plug request, this will handle it |
| 2399 | */ |
| 2400 | if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags)) |
| 2401 | ice_unplug_aux_dev(pf); |
Dave Ertman | 5dbbbd0 | 2022-01-20 16:27:56 -0800 | [diff] [blame] | 2402 | |
Dave Ertman | 248401cb | 2023-03-10 11:48:33 -0800 | [diff] [blame] | 2403 | /* Plug aux device per request */ |
| 2404 | if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) |
| 2405 | ice_plug_aux_dev(pf); |
Ivan Vecera | 5cb1ebd | 2022-03-10 18:16:41 +0100 | [diff] [blame] | 2406 | |
Dave Ertman | 97b0129 | 2022-02-18 12:39:25 -0800 | [diff] [blame] | 2407 | if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { |
| 2408 | struct iidc_event *event; |
| 2409 | |
| 2410 | event = kzalloc(sizeof(*event), GFP_KERNEL); |
| 2411 | if (event) { |
| 2412 | set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); |
| 2413 | ice_send_event_to_aux(pf, event); |
| 2414 | kfree(event); |
| 2415 | } |
| 2416 | } |
| 2417 | |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 2418 | ice_clean_adminq_subtask(pf); |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 2419 | ice_check_media_subtask(pf); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 2420 | ice_check_for_hang_subtask(pf); |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 2421 | ice_sync_fltr_subtask(pf); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 2422 | ice_handle_mdd_event(pf); |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 2423 | ice_watchdog_subtask(pf); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 2424 | |
| 2425 | if (ice_is_safe_mode(pf)) { |
| 2426 | ice_service_task_complete(pf); |
| 2427 | return; |
| 2428 | } |
| 2429 | |
| 2430 | ice_process_vflr_event(pf); |
Anirudh Venkataramanan | 75d2b25 | 2018-09-19 17:42:54 -0700 | [diff] [blame] | 2431 | ice_clean_mailboxq_subtask(pf); |
Jacob Keller | 8f5ee3c | 2021-06-09 09:39:46 -0700 | [diff] [blame] | 2432 | ice_clean_sbq_subtask(pf); |
Brett Creeley | 28bf267 | 2020-05-11 18:01:46 -0700 | [diff] [blame] | 2433 | ice_sync_arfs_fltrs(pf); |
Qi Zhang | d621831 | 2021-03-09 11:08:10 +0800 | [diff] [blame] | 2434 | ice_flush_fdir_ctx(pf); |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 2435 | |
| 2436 | /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */ |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 2437 | ice_service_task_complete(pf); |
| 2438 | |
| 2439 | /* If the tasks have taken longer than one service timer period |
| 2440 | * or there is more work to be done, reset the service timer to |
| 2441 | * schedule the service task now. |
| 2442 | */ |
| 2443 | if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 2444 | test_bit(ICE_MDD_EVENT_PENDING, pf->state) || |
| 2445 | test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || |
| 2446 | test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || |
| 2447 | test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || |
Jacob Keller | 8f5ee3c | 2021-06-09 09:39:46 -0700 | [diff] [blame] | 2448 | test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 2449 | test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 2450 | mod_timer(&pf->serv_tmr, jiffies); |
| 2451 | } |
| 2452 | |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 2453 | /** |
Anirudh Venkataramanan | f31e4b6 | 2018-03-20 07:58:07 -0700 | [diff] [blame] | 2454 | * ice_set_ctrlq_len - helper function to set controlq length |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 2455 | * @hw: pointer to the HW instance |
Anirudh Venkataramanan | f31e4b6 | 2018-03-20 07:58:07 -0700 | [diff] [blame] | 2456 | */ |
| 2457 | static void ice_set_ctrlq_len(struct ice_hw *hw) |
| 2458 | { |
| 2459 | hw->adminq.num_rq_entries = ICE_AQ_LEN; |
| 2460 | hw->adminq.num_sq_entries = ICE_AQ_LEN; |
| 2461 | hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; |
| 2462 | hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; |
Lukasz Czapnik | c8a1071 | 2020-02-27 10:15:00 -0800 | [diff] [blame] | 2463 | hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M; |
Brett Creeley | 1183621 | 2019-07-25 01:55:38 -0700 | [diff] [blame] | 2464 | hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; |
Anirudh Venkataramanan | 75d2b25 | 2018-09-19 17:42:54 -0700 | [diff] [blame] | 2465 | hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; |
| 2466 | hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; |
Jacob Keller | 8f5ee3c | 2021-06-09 09:39:46 -0700 | [diff] [blame] | 2467 | hw->sbq.num_rq_entries = ICE_SBQ_LEN; |
| 2468 | hw->sbq.num_sq_entries = ICE_SBQ_LEN; |
| 2469 | hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN; |
| 2470 | hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN; |
Anirudh Venkataramanan | f31e4b6 | 2018-03-20 07:58:07 -0700 | [diff] [blame] | 2471 | } |
| 2472 | |
| 2473 | /** |
Henry Tieman | 87324e7 | 2019-11-08 06:23:29 -0800 | [diff] [blame] | 2474 | * ice_schedule_reset - schedule a reset |
| 2475 | * @pf: board private structure |
| 2476 | * @reset: reset being requested |
| 2477 | */ |
| 2478 | int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) |
| 2479 | { |
| 2480 | struct device *dev = ice_pf_to_dev(pf); |
| 2481 | |
| 2482 | /* bail out if earlier reset has failed */ |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 2483 | if (test_bit(ICE_RESET_FAILED, pf->state)) { |
Henry Tieman | 87324e7 | 2019-11-08 06:23:29 -0800 | [diff] [blame] | 2484 | dev_dbg(dev, "earlier reset has failed\n"); |
| 2485 | return -EIO; |
| 2486 | } |
| 2487 | /* bail if reset/recovery already in progress */ |
| 2488 | if (ice_is_reset_in_progress(pf->state)) { |
| 2489 | dev_dbg(dev, "Reset already in progress\n"); |
| 2490 | return -EBUSY; |
| 2491 | } |
| 2492 | |
| 2493 | switch (reset) { |
| 2494 | case ICE_RESET_PFR: |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 2495 | set_bit(ICE_PFR_REQ, pf->state); |
Henry Tieman | 87324e7 | 2019-11-08 06:23:29 -0800 | [diff] [blame] | 2496 | break; |
| 2497 | case ICE_RESET_CORER: |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 2498 | set_bit(ICE_CORER_REQ, pf->state); |
Henry Tieman | 87324e7 | 2019-11-08 06:23:29 -0800 | [diff] [blame] | 2499 | break; |
| 2500 | case ICE_RESET_GLOBR: |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 2501 | set_bit(ICE_GLOBR_REQ, pf->state); |
Henry Tieman | 87324e7 | 2019-11-08 06:23:29 -0800 | [diff] [blame] | 2502 | break; |
| 2503 | default: |
| 2504 | return -EINVAL; |
| 2505 | } |
| 2506 | |
| 2507 | ice_service_task_schedule(pf); |
| 2508 | return 0; |
| 2509 | } |
| 2510 | |
| 2511 | /** |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 2512 | * ice_irq_affinity_notify - Callback for affinity changes |
| 2513 | * @notify: context as to what irq was changed |
| 2514 | * @mask: the new affinity mask |
| 2515 | * |
| 2516 | * This is a callback function used by the irq_set_affinity_notifier function |
| 2517 | * so that we may register to receive changes to the irq affinity masks. |
| 2518 | */ |
Bruce Allan | c8b7abd | 2019-02-26 16:35:11 -0800 | [diff] [blame] | 2519 | static void |
| 2520 | ice_irq_affinity_notify(struct irq_affinity_notify *notify, |
| 2521 | const cpumask_t *mask) |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 2522 | { |
| 2523 | struct ice_q_vector *q_vector = |
| 2524 | container_of(notify, struct ice_q_vector, affinity_notify); |
| 2525 | |
| 2526 | cpumask_copy(&q_vector->affinity_mask, mask); |
| 2527 | } |
| 2528 | |
| 2529 | /** |
| 2530 | * ice_irq_affinity_release - Callback for affinity notifier release |
| 2531 | * @ref: internal core kernel usage |
| 2532 | * |
| 2533 | * This is a callback function used by the irq_set_affinity_notifier function |
| 2534 | * to inform the current notification subscriber that they will no longer |
| 2535 | * receive notifications. |
| 2536 | */ |
| 2537 | static void ice_irq_affinity_release(struct kref __always_unused *ref) {} |
| 2538 | |
| 2539 | /** |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 2540 | * ice_vsi_ena_irq - Enable IRQ for the given VSI |
| 2541 | * @vsi: the VSI being configured |
| 2542 | */ |
| 2543 | static int ice_vsi_ena_irq(struct ice_vsi *vsi) |
| 2544 | { |
Brett Creeley | ba88073 | 2019-06-26 02:20:25 -0700 | [diff] [blame] | 2545 | struct ice_hw *hw = &vsi->back->hw; |
| 2546 | int i; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 2547 | |
Brett Creeley | ba88073 | 2019-06-26 02:20:25 -0700 | [diff] [blame] | 2548 | ice_for_each_q_vector(vsi, i) |
| 2549 | ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 2550 | |
| 2551 | ice_flush(hw); |
| 2552 | return 0; |
| 2553 | } |
| 2554 | |
| 2555 | /** |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 2556 | * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI |
| 2557 | * @vsi: the VSI being configured |
| 2558 | * @basename: name for the vector |
| 2559 | */ |
| 2560 | static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) |
| 2561 | { |
| 2562 | int q_vectors = vsi->num_q_vectors; |
| 2563 | struct ice_pf *pf = vsi->back; |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 2564 | struct device *dev; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 2565 | int rx_int_idx = 0; |
| 2566 | int tx_int_idx = 0; |
| 2567 | int vector, err; |
| 2568 | int irq_num; |
| 2569 | |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 2570 | dev = ice_pf_to_dev(pf); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 2571 | for (vector = 0; vector < q_vectors; vector++) { |
| 2572 | struct ice_q_vector *q_vector = vsi->q_vectors[vector]; |
| 2573 | |
Piotr Raczynski | 4aad533 | 2023-05-15 21:03:17 +0200 | [diff] [blame] | 2574 | irq_num = q_vector->irq.virq; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 2575 | |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 2576 | if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) { |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 2577 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
| 2578 | "%s-%s-%d", basename, "TxRx", rx_int_idx++); |
| 2579 | tx_int_idx++; |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 2580 | } else if (q_vector->rx.rx_ring) { |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 2581 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
| 2582 | "%s-%s-%d", basename, "rx", rx_int_idx++); |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 2583 | } else if (q_vector->tx.tx_ring) { |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 2584 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
| 2585 | "%s-%s-%d", basename, "tx", tx_int_idx++); |
| 2586 | } else { |
| 2587 | /* skip this unused q_vector */ |
| 2588 | continue; |
| 2589 | } |
Jacob Keller | b03d519 | 2022-02-16 13:37:29 -0800 | [diff] [blame] | 2590 | if (vsi->type == ICE_VSI_CTRL && vsi->vf) |
Qi Zhang | da62c5f | 2021-03-09 11:08:03 +0800 | [diff] [blame] | 2591 | err = devm_request_irq(dev, irq_num, vsi->irq_handler, |
| 2592 | IRQF_SHARED, q_vector->name, |
| 2593 | q_vector); |
| 2594 | else |
| 2595 | err = devm_request_irq(dev, irq_num, vsi->irq_handler, |
| 2596 | 0, q_vector->name, q_vector); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 2597 | if (err) { |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 2598 | netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", |
| 2599 | err); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 2600 | goto free_q_irqs; |
| 2601 | } |
| 2602 | |
| 2603 | /* register for affinity change notifications */ |
Brett Creeley | 28bf267 | 2020-05-11 18:01:46 -0700 | [diff] [blame] | 2604 | if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { |
| 2605 | struct irq_affinity_notify *affinity_notify; |
| 2606 | |
| 2607 | affinity_notify = &q_vector->affinity_notify; |
| 2608 | affinity_notify->notify = ice_irq_affinity_notify; |
| 2609 | affinity_notify->release = ice_irq_affinity_release; |
| 2610 | irq_set_affinity_notifier(irq_num, affinity_notify); |
| 2611 | } |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 2612 | |
| 2613 | /* assign the mask for this irq */ |
Michal Schmidt | dee5576 | 2024-06-07 14:22:34 -0700 | [diff] [blame] | 2614 | irq_update_affinity_hint(irq_num, &q_vector->affinity_mask); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 2615 | } |
| 2616 | |
Alexander Lobakin | d7442f5 | 2022-04-04 18:15:09 +0200 | [diff] [blame] | 2617 | err = ice_set_cpu_rx_rmap(vsi); |
| 2618 | if (err) { |
| 2619 | netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n", |
| 2620 | vsi->vsi_num, ERR_PTR(err)); |
| 2621 | goto free_q_irqs; |
| 2622 | } |
| 2623 | |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 2624 | vsi->irqs_ready = true; |
| 2625 | return 0; |
| 2626 | |
| 2627 | free_q_irqs: |
Piotr Raczynski | 4aad533 | 2023-05-15 21:03:17 +0200 | [diff] [blame] | 2628 | while (vector--) { |
| 2629 | irq_num = vsi->q_vectors[vector]->irq.virq; |
Brett Creeley | 28bf267 | 2020-05-11 18:01:46 -0700 | [diff] [blame] | 2630 | if (!IS_ENABLED(CONFIG_RFS_ACCEL)) |
| 2631 | irq_set_affinity_notifier(irq_num, NULL); |
Michal Schmidt | dee5576 | 2024-06-07 14:22:34 -0700 | [diff] [blame] | 2632 | irq_update_affinity_hint(irq_num, NULL); |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 2633 | devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 2634 | } |
| 2635 | return err; |
| 2636 | } |
| 2637 | |
| 2638 | /** |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2639 | * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP |
| 2640 | * @vsi: VSI to setup Tx rings used by XDP |
| 2641 | * |
| 2642 | * Return 0 on success and negative value on error |
| 2643 | */ |
| 2644 | static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) |
| 2645 | { |
Anirudh Venkataramanan | 9a94684 | 2020-02-06 01:20:09 -0800 | [diff] [blame] | 2646 | struct device *dev = ice_pf_to_dev(vsi->back); |
Maciej Fijalkowski | 9610bd9 | 2021-08-19 14:00:02 +0200 | [diff] [blame] | 2647 | struct ice_tx_desc *tx_desc; |
| 2648 | int i, j; |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2649 | |
Maciej Fijalkowski | 2faf63b | 2021-08-19 14:00:04 +0200 | [diff] [blame] | 2650 | ice_for_each_xdp_txq(vsi, i) { |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2651 | u16 xdp_q_idx = vsi->alloc_txq + i; |
Benjamin Mikailenko | 288ecf4 | 2022-11-18 16:20:02 -0500 | [diff] [blame] | 2652 | struct ice_ring_stats *ring_stats; |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 2653 | struct ice_tx_ring *xdp_ring; |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2654 | |
| 2655 | xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2656 | if (!xdp_ring) |
| 2657 | goto free_xdp_rings; |
| 2658 | |
Benjamin Mikailenko | 288ecf4 | 2022-11-18 16:20:02 -0500 | [diff] [blame] | 2659 | ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); |
| 2660 | if (!ring_stats) { |
| 2661 | ice_free_tx_ring(xdp_ring); |
| 2662 | goto free_xdp_rings; |
| 2663 | } |
| 2664 | |
| 2665 | xdp_ring->ring_stats = ring_stats; |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2666 | xdp_ring->q_index = xdp_q_idx; |
| 2667 | xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2668 | xdp_ring->vsi = vsi; |
| 2669 | xdp_ring->netdev = NULL; |
| 2670 | xdp_ring->dev = dev; |
| 2671 | xdp_ring->count = vsi->num_tx_desc; |
Ciara Loftus | b1d95cc | 2020-06-09 13:19:45 +0000 | [diff] [blame] | 2672 | WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2673 | if (ice_setup_tx_ring(xdp_ring)) |
| 2674 | goto free_xdp_rings; |
| 2675 | ice_set_ring_xdp(xdp_ring); |
Maciej Fijalkowski | 22bf877 | 2021-08-19 14:00:03 +0200 | [diff] [blame] | 2676 | spin_lock_init(&xdp_ring->tx_lock); |
Maciej Fijalkowski | 9610bd9 | 2021-08-19 14:00:02 +0200 | [diff] [blame] | 2677 | for (j = 0; j < xdp_ring->count; j++) { |
| 2678 | tx_desc = ICE_TX_DESC(xdp_ring, j); |
Maciej Fijalkowski | e19778e | 2022-03-17 19:36:29 +0100 | [diff] [blame] | 2679 | tx_desc->cmd_type_offset_bsz = 0; |
Maciej Fijalkowski | 9610bd9 | 2021-08-19 14:00:02 +0200 | [diff] [blame] | 2680 | } |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2681 | } |
| 2682 | |
| 2683 | return 0; |
| 2684 | |
| 2685 | free_xdp_rings: |
Benjamin Mikailenko | 288ecf4 | 2022-11-18 16:20:02 -0500 | [diff] [blame] | 2686 | for (; i >= 0; i--) { |
| 2687 | if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) { |
| 2688 | kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); |
| 2689 | vsi->xdp_rings[i]->ring_stats = NULL; |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2690 | ice_free_tx_ring(vsi->xdp_rings[i]); |
Benjamin Mikailenko | 288ecf4 | 2022-11-18 16:20:02 -0500 | [diff] [blame] | 2691 | } |
| 2692 | } |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2693 | return -ENOMEM; |
| 2694 | } |
| 2695 | |
| 2696 | /** |
| 2697 | * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI |
| 2698 | * @vsi: VSI to set the bpf prog on |
| 2699 | * @prog: the bpf prog pointer |
| 2700 | */ |
| 2701 | static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) |
| 2702 | { |
| 2703 | struct bpf_prog *old_prog; |
| 2704 | int i; |
| 2705 | |
| 2706 | old_prog = xchg(&vsi->xdp_prog, prog); |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2707 | ice_for_each_rxq(vsi, i) |
| 2708 | WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); |
Maciej Fijalkowski | 46974842 | 2023-06-15 13:33:26 +0200 | [diff] [blame] | 2709 | |
| 2710 | if (old_prog) |
| 2711 | bpf_prog_put(old_prog); |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2712 | } |
| 2713 | |
Larysa Zaremba | f3df404 | 2024-06-03 14:42:34 -0700 | [diff] [blame] | 2714 | static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid) |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2715 | { |
Larysa Zaremba | f3df404 | 2024-06-03 14:42:34 -0700 | [diff] [blame] | 2716 | struct ice_q_vector *q_vector; |
| 2717 | struct ice_tx_ring *ring; |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2718 | |
Maciej Fijalkowski | 22bf877 | 2021-08-19 14:00:03 +0200 | [diff] [blame] | 2719 | if (static_key_enabled(&ice_xdp_locking_key)) |
Larysa Zaremba | f3df404 | 2024-06-03 14:42:34 -0700 | [diff] [blame] | 2720 | return vsi->xdp_rings[qid % vsi->num_xdp_txq]; |
Maciej Fijalkowski | 22bf877 | 2021-08-19 14:00:03 +0200 | [diff] [blame] | 2721 | |
Larysa Zaremba | f3df404 | 2024-06-03 14:42:34 -0700 | [diff] [blame] | 2722 | q_vector = vsi->rx_rings[qid]->q_vector; |
| 2723 | ice_for_each_tx_ring(ring, q_vector->tx) |
| 2724 | if (ice_ring_is_xdp(ring)) |
| 2725 | return ring; |
| 2726 | |
| 2727 | return NULL; |
| 2728 | } |
| 2729 | |
| 2730 | /** |
| 2731 | * ice_map_xdp_rings - Map XDP rings to interrupt vectors |
| 2732 | * @vsi: the VSI with XDP rings being configured |
| 2733 | * |
| 2734 | * Map XDP rings to interrupt vectors and perform the configuration steps |
| 2735 | * dependent on the mapping. |
| 2736 | */ |
| 2737 | void ice_map_xdp_rings(struct ice_vsi *vsi) |
| 2738 | { |
| 2739 | int xdp_rings_rem = vsi->num_xdp_txq; |
| 2740 | int v_idx, q_idx; |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2741 | |
| 2742 | /* follow the logic from ice_vsi_map_rings_to_vectors */ |
| 2743 | ice_for_each_q_vector(vsi, v_idx) { |
| 2744 | struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; |
| 2745 | int xdp_rings_per_v, q_id, q_base; |
| 2746 | |
| 2747 | xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, |
| 2748 | vsi->num_q_vectors - v_idx); |
| 2749 | q_base = vsi->num_xdp_txq - xdp_rings_rem; |
| 2750 | |
| 2751 | for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 2752 | struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2753 | |
| 2754 | xdp_ring->q_vector = q_vector; |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 2755 | xdp_ring->next = q_vector->tx.tx_ring; |
| 2756 | q_vector->tx.tx_ring = xdp_ring; |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2757 | } |
| 2758 | xdp_rings_rem -= xdp_rings_per_v; |
| 2759 | } |
| 2760 | |
Larysa Zaremba | f3df404 | 2024-06-03 14:42:34 -0700 | [diff] [blame] | 2761 | ice_for_each_rxq(vsi, q_idx) { |
| 2762 | vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi, |
| 2763 | q_idx); |
| 2764 | ice_tx_xsk_pool(vsi, q_idx); |
Maciej Fijalkowski | 9ead7e7 | 2022-08-11 20:21:49 +0200 | [diff] [blame] | 2765 | } |
Larysa Zaremba | f3df404 | 2024-06-03 14:42:34 -0700 | [diff] [blame] | 2766 | } |
| 2767 | |
| 2768 | /** |
| 2769 | * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP |
| 2770 | * @vsi: VSI to bring up Tx rings used by XDP |
| 2771 | * @prog: bpf program that will be assigned to VSI |
| 2772 | * @cfg_type: create from scratch or restore the existing configuration |
| 2773 | * |
| 2774 | * Return 0 on success and negative value on error |
| 2775 | */ |
| 2776 | int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, |
| 2777 | enum ice_xdp_cfg cfg_type) |
| 2778 | { |
| 2779 | u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; |
| 2780 | struct ice_pf *pf = vsi->back; |
| 2781 | struct ice_qs_cfg xdp_qs_cfg = { |
| 2782 | .qs_mutex = &pf->avail_q_mutex, |
| 2783 | .pf_map = pf->avail_txqs, |
| 2784 | .pf_map_size = pf->max_pf_txqs, |
| 2785 | .q_count = vsi->num_xdp_txq, |
| 2786 | .scatter_count = ICE_MAX_SCATTER_TXQS, |
| 2787 | .vsi_map = vsi->txq_map, |
| 2788 | .vsi_map_offset = vsi->alloc_txq, |
| 2789 | .mapping_mode = ICE_VSI_MAP_CONTIG |
| 2790 | }; |
| 2791 | struct device *dev; |
| 2792 | int status, i; |
| 2793 | |
| 2794 | dev = ice_pf_to_dev(pf); |
| 2795 | vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, |
| 2796 | sizeof(*vsi->xdp_rings), GFP_KERNEL); |
| 2797 | if (!vsi->xdp_rings) |
| 2798 | return -ENOMEM; |
| 2799 | |
| 2800 | vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; |
| 2801 | if (__ice_vsi_get_qs(&xdp_qs_cfg)) |
| 2802 | goto err_map_xdp; |
| 2803 | |
| 2804 | if (static_key_enabled(&ice_xdp_locking_key)) |
| 2805 | netdev_warn(vsi->netdev, |
| 2806 | "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n"); |
| 2807 | |
| 2808 | if (ice_xdp_alloc_setup_rings(vsi)) |
| 2809 | goto clear_xdp_rings; |
Maciej Fijalkowski | 9ead7e7 | 2022-08-11 20:21:49 +0200 | [diff] [blame] | 2810 | |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2811 | /* omit the scheduler update if in reset path; XDP queues will be |
| 2812 | * taken into account at the end of ice_vsi_rebuild, where |
| 2813 | * ice_cfg_vsi_lan is being called |
| 2814 | */ |
Larysa Zaremba | 744d197 | 2024-06-03 14:42:33 -0700 | [diff] [blame] | 2815 | if (cfg_type == ICE_XDP_CFG_PART) |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2816 | return 0; |
| 2817 | |
Larysa Zaremba | f3df404 | 2024-06-03 14:42:34 -0700 | [diff] [blame] | 2818 | ice_map_xdp_rings(vsi); |
| 2819 | |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2820 | /* tell the Tx scheduler that right now we have |
| 2821 | * additional queues |
| 2822 | */ |
| 2823 | for (i = 0; i < vsi->tc_cfg.numtc; i++) |
| 2824 | max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; |
| 2825 | |
| 2826 | status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, |
| 2827 | max_txqs); |
| 2828 | if (status) { |
Tony Nguyen | 5f87ec4 | 2021-10-07 15:56:02 -0700 | [diff] [blame] | 2829 | dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n", |
| 2830 | status); |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2831 | goto clear_xdp_rings; |
| 2832 | } |
Marta Plantykow | f65ee53 | 2021-10-26 18:47:19 +0200 | [diff] [blame] | 2833 | |
| 2834 | /* assign the prog only when it's not already present on VSI; |
| 2835 | * this flow is a subject of both ethtool -L and ndo_bpf flows; |
| 2836 | * VSI rebuild that happens under ethtool -L can expose us to |
| 2837 | * the bpf_prog refcount issues as we would be swapping same |
| 2838 | * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put |
| 2839 | * on it as it would be treated as an 'old_prog'; for ndo_bpf |
| 2840 | * this is not harmful as dev_xdp_install bumps the refcount |
| 2841 | * before calling the op exposed by the driver; |
| 2842 | */ |
| 2843 | if (!ice_is_xdp_ena_vsi(vsi)) |
| 2844 | ice_vsi_assign_bpf_prog(vsi, prog); |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2845 | |
| 2846 | return 0; |
| 2847 | clear_xdp_rings: |
Maciej Fijalkowski | 2faf63b | 2021-08-19 14:00:04 +0200 | [diff] [blame] | 2848 | ice_for_each_xdp_txq(vsi, i) |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2849 | if (vsi->xdp_rings[i]) { |
| 2850 | kfree_rcu(vsi->xdp_rings[i], rcu); |
| 2851 | vsi->xdp_rings[i] = NULL; |
| 2852 | } |
| 2853 | |
| 2854 | err_map_xdp: |
| 2855 | mutex_lock(&pf->avail_q_mutex); |
Maciej Fijalkowski | 2faf63b | 2021-08-19 14:00:04 +0200 | [diff] [blame] | 2856 | ice_for_each_xdp_txq(vsi, i) { |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2857 | clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); |
| 2858 | vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; |
| 2859 | } |
| 2860 | mutex_unlock(&pf->avail_q_mutex); |
| 2861 | |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 2862 | devm_kfree(dev, vsi->xdp_rings); |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2863 | return -ENOMEM; |
| 2864 | } |
| 2865 | |
| 2866 | /** |
| 2867 | * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings |
| 2868 | * @vsi: VSI to remove XDP rings |
Larysa Zaremba | 744d197 | 2024-06-03 14:42:33 -0700 | [diff] [blame] | 2869 | * @cfg_type: disable XDP permanently or allow it to be restored later |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2870 | * |
| 2871 | * Detach XDP rings from irq vectors, clean up the PF bitmap and free |
| 2872 | * resources |
| 2873 | */ |
Larysa Zaremba | 744d197 | 2024-06-03 14:42:33 -0700 | [diff] [blame] | 2874 | int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type) |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2875 | { |
| 2876 | u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; |
| 2877 | struct ice_pf *pf = vsi->back; |
| 2878 | int i, v_idx; |
| 2879 | |
| 2880 | /* q_vectors are freed in reset path so there's no point in detaching |
Larysa Zaremba | 744d197 | 2024-06-03 14:42:33 -0700 | [diff] [blame] | 2881 | * rings |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2882 | */ |
Larysa Zaremba | 744d197 | 2024-06-03 14:42:33 -0700 | [diff] [blame] | 2883 | if (cfg_type == ICE_XDP_CFG_PART) |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2884 | goto free_qmap; |
| 2885 | |
| 2886 | ice_for_each_q_vector(vsi, v_idx) { |
| 2887 | struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 2888 | struct ice_tx_ring *ring; |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2889 | |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 2890 | ice_for_each_tx_ring(ring, q_vector->tx) |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2891 | if (!ring->tx_buf || !ice_ring_is_xdp(ring)) |
| 2892 | break; |
| 2893 | |
| 2894 | /* restore the value of last node prior to XDP setup */ |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 2895 | q_vector->tx.tx_ring = ring; |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2896 | } |
| 2897 | |
| 2898 | free_qmap: |
| 2899 | mutex_lock(&pf->avail_q_mutex); |
Maciej Fijalkowski | 2faf63b | 2021-08-19 14:00:04 +0200 | [diff] [blame] | 2900 | ice_for_each_xdp_txq(vsi, i) { |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2901 | clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); |
| 2902 | vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; |
| 2903 | } |
| 2904 | mutex_unlock(&pf->avail_q_mutex); |
| 2905 | |
Maciej Fijalkowski | 2faf63b | 2021-08-19 14:00:04 +0200 | [diff] [blame] | 2906 | ice_for_each_xdp_txq(vsi, i) |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2907 | if (vsi->xdp_rings[i]) { |
Maciej Fijalkowski | f9124c6 | 2022-03-17 19:36:27 +0100 | [diff] [blame] | 2908 | if (vsi->xdp_rings[i]->desc) { |
| 2909 | synchronize_rcu(); |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2910 | ice_free_tx_ring(vsi->xdp_rings[i]); |
Maciej Fijalkowski | f9124c6 | 2022-03-17 19:36:27 +0100 | [diff] [blame] | 2911 | } |
Benjamin Mikailenko | 288ecf4 | 2022-11-18 16:20:02 -0500 | [diff] [blame] | 2912 | kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); |
| 2913 | vsi->xdp_rings[i]->ring_stats = NULL; |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2914 | kfree_rcu(vsi->xdp_rings[i], rcu); |
| 2915 | vsi->xdp_rings[i] = NULL; |
| 2916 | } |
| 2917 | |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 2918 | devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2919 | vsi->xdp_rings = NULL; |
| 2920 | |
Maciej Fijalkowski | 22bf877 | 2021-08-19 14:00:03 +0200 | [diff] [blame] | 2921 | if (static_key_enabled(&ice_xdp_locking_key)) |
| 2922 | static_branch_dec(&ice_xdp_locking_key); |
| 2923 | |
Larysa Zaremba | 744d197 | 2024-06-03 14:42:33 -0700 | [diff] [blame] | 2924 | if (cfg_type == ICE_XDP_CFG_PART) |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2925 | return 0; |
| 2926 | |
| 2927 | ice_vsi_assign_bpf_prog(vsi, NULL); |
| 2928 | |
| 2929 | /* notify Tx scheduler that we destroyed XDP queues and bring |
| 2930 | * back the old number of child nodes |
| 2931 | */ |
| 2932 | for (i = 0; i < vsi->tc_cfg.numtc; i++) |
| 2933 | max_txqs[i] = vsi->num_txq; |
| 2934 | |
Marta Plantykow | c8f135c | 2020-05-15 17:42:15 -0700 | [diff] [blame] | 2935 | /* change number of XDP Tx queues to 0 */ |
| 2936 | vsi->num_xdp_txq = 0; |
| 2937 | |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2938 | return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, |
| 2939 | max_txqs); |
| 2940 | } |
| 2941 | |
| 2942 | /** |
Michal Swiatkowski | c7a2190 | 2020-11-02 04:37:27 -0500 | [diff] [blame] | 2943 | * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI |
| 2944 | * @vsi: VSI to schedule napi on |
| 2945 | */ |
| 2946 | static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi) |
| 2947 | { |
| 2948 | int i; |
| 2949 | |
| 2950 | ice_for_each_rxq(vsi, i) { |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 2951 | struct ice_rx_ring *rx_ring = vsi->rx_rings[i]; |
Michal Swiatkowski | c7a2190 | 2020-11-02 04:37:27 -0500 | [diff] [blame] | 2952 | |
Maciej Fijalkowski | ebc33a3 | 2024-07-26 20:17:14 +0200 | [diff] [blame] | 2953 | if (READ_ONCE(rx_ring->xsk_pool)) |
Michal Swiatkowski | c7a2190 | 2020-11-02 04:37:27 -0500 | [diff] [blame] | 2954 | napi_schedule(&rx_ring->q_vector->napi); |
| 2955 | } |
| 2956 | } |
| 2957 | |
| 2958 | /** |
Maciej Fijalkowski | 22bf877 | 2021-08-19 14:00:03 +0200 | [diff] [blame] | 2959 | * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have |
| 2960 | * @vsi: VSI to determine the count of XDP Tx qs |
| 2961 | * |
| 2962 | * returns 0 if Tx qs count is higher than at least half of CPU count, |
| 2963 | * -ENOMEM otherwise |
| 2964 | */ |
| 2965 | int ice_vsi_determine_xdp_res(struct ice_vsi *vsi) |
| 2966 | { |
| 2967 | u16 avail = ice_get_avail_txq_count(vsi->back); |
| 2968 | u16 cpus = num_possible_cpus(); |
| 2969 | |
| 2970 | if (avail < cpus / 2) |
| 2971 | return -ENOMEM; |
| 2972 | |
| 2973 | vsi->num_xdp_txq = min_t(u16, avail, cpus); |
| 2974 | |
| 2975 | if (vsi->num_xdp_txq < cpus) |
| 2976 | static_branch_inc(&ice_xdp_locking_key); |
| 2977 | |
| 2978 | return 0; |
| 2979 | } |
| 2980 | |
| 2981 | /** |
Maciej Fijalkowski | 60bc72b | 2023-01-31 21:45:00 +0100 | [diff] [blame] | 2982 | * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP |
| 2983 | * @vsi: Pointer to VSI structure |
| 2984 | */ |
| 2985 | static int ice_max_xdp_frame_size(struct ice_vsi *vsi) |
| 2986 | { |
| 2987 | if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) |
| 2988 | return ICE_RXBUF_1664; |
| 2989 | else |
| 2990 | return ICE_RXBUF_3072; |
| 2991 | } |
| 2992 | |
| 2993 | /** |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 2994 | * ice_xdp_setup_prog - Add or remove XDP eBPF program |
| 2995 | * @vsi: VSI to setup XDP for |
| 2996 | * @prog: XDP program |
| 2997 | * @extack: netlink extended ack |
| 2998 | */ |
| 2999 | static int |
| 3000 | ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, |
| 3001 | struct netlink_ext_ack *extack) |
| 3002 | { |
Maciej Fijalkowski | 60bc72b | 2023-01-31 21:45:00 +0100 | [diff] [blame] | 3003 | unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 3004 | bool if_running = netif_running(vsi->netdev); |
| 3005 | int ret = 0, xdp_ring_err = 0; |
| 3006 | |
Maciej Fijalkowski | 2fba7dc | 2023-01-31 21:45:03 +0100 | [diff] [blame] | 3007 | if (prog && !prog->aux->xdp_has_frags) { |
| 3008 | if (frame_size > ice_max_xdp_frame_size(vsi)) { |
| 3009 | NL_SET_ERR_MSG_MOD(extack, |
| 3010 | "MTU is too large for linear frames and XDP prog does not support frags"); |
| 3011 | return -EOPNOTSUPP; |
| 3012 | } |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 3013 | } |
| 3014 | |
Maciej Fijalkowski | 46974842 | 2023-06-15 13:33:26 +0200 | [diff] [blame] | 3015 | /* hot swap progs and avoid toggling link */ |
| 3016 | if (ice_is_xdp_ena_vsi(vsi) == !!prog) { |
| 3017 | ice_vsi_assign_bpf_prog(vsi, prog); |
| 3018 | return 0; |
| 3019 | } |
| 3020 | |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 3021 | /* need to stop netdev while setting up the program for Rx rings */ |
Anirudh Venkataramanan | e97fb1a | 2021-03-02 10:15:37 -0800 | [diff] [blame] | 3022 | if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 3023 | ret = ice_down(vsi); |
| 3024 | if (ret) { |
Jesse Brandeburg | af23635 | 2020-02-13 13:31:26 -0800 | [diff] [blame] | 3025 | NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 3026 | return ret; |
| 3027 | } |
| 3028 | } |
| 3029 | |
| 3030 | if (!ice_is_xdp_ena_vsi(vsi) && prog) { |
Maciej Fijalkowski | 22bf877 | 2021-08-19 14:00:03 +0200 | [diff] [blame] | 3031 | xdp_ring_err = ice_vsi_determine_xdp_res(vsi); |
| 3032 | if (xdp_ring_err) { |
| 3033 | NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP"); |
| 3034 | } else { |
Larysa Zaremba | 744d197 | 2024-06-03 14:42:33 -0700 | [diff] [blame] | 3035 | xdp_ring_err = ice_prepare_xdp_rings(vsi, prog, |
| 3036 | ICE_XDP_CFG_FULL); |
Maciej Fijalkowski | 22bf877 | 2021-08-19 14:00:03 +0200 | [diff] [blame] | 3037 | if (xdp_ring_err) |
| 3038 | NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); |
| 3039 | } |
Lorenzo Bianconi | b6a4103 | 2023-02-14 15:39:27 +0100 | [diff] [blame] | 3040 | xdp_features_set_redirect_target(vsi->netdev, true); |
Przemyslaw Patynowski | 7e753eb | 2022-08-11 12:09:22 +0200 | [diff] [blame] | 3041 | /* reallocate Rx queues that are used for zero-copy */ |
| 3042 | xdp_ring_err = ice_realloc_zc_buf(vsi, true); |
| 3043 | if (xdp_ring_err) |
| 3044 | NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed"); |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 3045 | } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { |
Marek Majtyka | 66c0e13 | 2023-02-01 11:24:18 +0100 | [diff] [blame] | 3046 | xdp_features_clear_redirect_target(vsi->netdev); |
Larysa Zaremba | 744d197 | 2024-06-03 14:42:33 -0700 | [diff] [blame] | 3047 | xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL); |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 3048 | if (xdp_ring_err) |
Jesse Brandeburg | af23635 | 2020-02-13 13:31:26 -0800 | [diff] [blame] | 3049 | NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); |
Przemyslaw Patynowski | 7e753eb | 2022-08-11 12:09:22 +0200 | [diff] [blame] | 3050 | /* reallocate Rx queues that were used for zero-copy */ |
| 3051 | xdp_ring_err = ice_realloc_zc_buf(vsi, false); |
| 3052 | if (xdp_ring_err) |
| 3053 | NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed"); |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 3054 | } |
| 3055 | |
| 3056 | if (if_running) |
| 3057 | ret = ice_up(vsi); |
| 3058 | |
Michal Swiatkowski | c7a2190 | 2020-11-02 04:37:27 -0500 | [diff] [blame] | 3059 | if (!ret && prog) |
| 3060 | ice_vsi_rx_napi_schedule(vsi); |
Krzysztof Kazimierczak | 2d4238f | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 3061 | |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 3062 | return (ret || xdp_ring_err) ? -ENOMEM : 0; |
| 3063 | } |
| 3064 | |
| 3065 | /** |
Maciej Fijalkowski | ebc5399 | 2021-05-20 08:34:59 +0200 | [diff] [blame] | 3066 | * ice_xdp_safe_mode - XDP handler for safe mode |
| 3067 | * @dev: netdevice |
| 3068 | * @xdp: XDP command |
| 3069 | */ |
| 3070 | static int ice_xdp_safe_mode(struct net_device __always_unused *dev, |
| 3071 | struct netdev_bpf *xdp) |
| 3072 | { |
| 3073 | NL_SET_ERR_MSG_MOD(xdp->extack, |
| 3074 | "Please provide working DDP firmware package in order to use XDP\n" |
| 3075 | "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst"); |
| 3076 | return -EOPNOTSUPP; |
| 3077 | } |
| 3078 | |
| 3079 | /** |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 3080 | * ice_xdp - implements XDP handler |
| 3081 | * @dev: netdevice |
| 3082 | * @xdp: XDP command |
| 3083 | */ |
| 3084 | static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
| 3085 | { |
| 3086 | struct ice_netdev_priv *np = netdev_priv(dev); |
| 3087 | struct ice_vsi *vsi = np->vsi; |
| 3088 | |
| 3089 | if (vsi->type != ICE_VSI_PF) { |
Jesse Brandeburg | af23635 | 2020-02-13 13:31:26 -0800 | [diff] [blame] | 3090 | NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 3091 | return -EINVAL; |
| 3092 | } |
| 3093 | |
| 3094 | switch (xdp->command) { |
| 3095 | case XDP_SETUP_PROG: |
| 3096 | return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); |
Magnus Karlsson | 1742b3d | 2020-08-28 10:26:15 +0200 | [diff] [blame] | 3097 | case XDP_SETUP_XSK_POOL: |
| 3098 | return ice_xsk_pool_setup(vsi, xdp->xsk.pool, |
Krzysztof Kazimierczak | 2d4238f | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 3099 | xdp->xsk.queue_id); |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 3100 | default: |
| 3101 | return -EINVAL; |
| 3102 | } |
| 3103 | } |
| 3104 | |
| 3105 | /** |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3106 | * ice_ena_misc_vector - enable the non-queue interrupts |
| 3107 | * @pf: board private structure |
| 3108 | */ |
| 3109 | static void ice_ena_misc_vector(struct ice_pf *pf) |
| 3110 | { |
| 3111 | struct ice_hw *hw = &pf->hw; |
Karol Kolacinski | 82e71b2 | 2023-11-29 13:40:23 +0100 | [diff] [blame] | 3112 | u32 pf_intr_start_offset; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3113 | u32 val; |
| 3114 | |
Paul Greenwalt | 9d5c5a5 | 2020-02-13 13:31:16 -0800 | [diff] [blame] | 3115 | /* Disable anti-spoof detection interrupt to prevent spurious event |
| 3116 | * interrupts during a function reset. Anti-spoof functionally is |
| 3117 | * still supported. |
| 3118 | */ |
| 3119 | val = rd32(hw, GL_MDCK_TX_TDPU); |
| 3120 | val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M; |
| 3121 | wr32(hw, GL_MDCK_TX_TDPU, val); |
| 3122 | |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3123 | /* clear things first */ |
| 3124 | wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ |
| 3125 | rd32(hw, PFINT_OICR); /* read to clear */ |
| 3126 | |
Bruce Allan | 3bcd7fa | 2018-08-09 06:28:59 -0700 | [diff] [blame] | 3127 | val = (PFINT_OICR_ECC_ERR_M | |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3128 | PFINT_OICR_MAL_DETECT_M | |
| 3129 | PFINT_OICR_GRST_M | |
| 3130 | PFINT_OICR_PCI_EXCEPTION_M | |
Anirudh Venkataramanan | 007676b | 2018-09-19 17:42:57 -0700 | [diff] [blame] | 3131 | PFINT_OICR_VFLR_M | |
Bruce Allan | 3bcd7fa | 2018-08-09 06:28:59 -0700 | [diff] [blame] | 3132 | PFINT_OICR_HMC_ERR_M | |
Dave Ertman | 348048e | 2021-05-20 09:37:50 -0500 | [diff] [blame] | 3133 | PFINT_OICR_PE_PUSH_M | |
Bruce Allan | 3bcd7fa | 2018-08-09 06:28:59 -0700 | [diff] [blame] | 3134 | PFINT_OICR_PE_CRITERR_M); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3135 | |
| 3136 | wr32(hw, PFINT_OICR_ENA, val); |
| 3137 | |
| 3138 | /* SW_ITR_IDX = 0, but don't change INTENA */ |
Piotr Raczynski | 4aad533 | 2023-05-15 21:03:17 +0200 | [diff] [blame] | 3139 | wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3140 | GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); |
Karol Kolacinski | 82e71b2 | 2023-11-29 13:40:23 +0100 | [diff] [blame] | 3141 | |
| 3142 | if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) |
| 3143 | return; |
| 3144 | pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST; |
| 3145 | wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), |
| 3146 | GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); |
| 3147 | } |
| 3148 | |
| 3149 | /** |
| 3150 | * ice_ll_ts_intr - ll_ts interrupt handler |
| 3151 | * @irq: interrupt number |
| 3152 | * @data: pointer to a q_vector |
| 3153 | */ |
| 3154 | static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data) |
| 3155 | { |
| 3156 | struct ice_pf *pf = data; |
| 3157 | u32 pf_intr_start_offset; |
| 3158 | struct ice_ptp_tx *tx; |
| 3159 | unsigned long flags; |
| 3160 | struct ice_hw *hw; |
| 3161 | u32 val; |
| 3162 | u8 idx; |
| 3163 | |
| 3164 | hw = &pf->hw; |
| 3165 | tx = &pf->ptp.port.tx; |
| 3166 | spin_lock_irqsave(&tx->lock, flags); |
| 3167 | ice_ptp_complete_tx_single_tstamp(tx); |
| 3168 | |
| 3169 | idx = find_next_bit_wrap(tx->in_use, tx->len, |
| 3170 | tx->last_ll_ts_idx_read + 1); |
| 3171 | if (idx != tx->len) |
| 3172 | ice_ptp_req_tx_single_tstamp(tx, idx); |
| 3173 | spin_unlock_irqrestore(&tx->lock, flags); |
| 3174 | |
| 3175 | val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | |
| 3176 | (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); |
| 3177 | pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST; |
| 3178 | wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), |
| 3179 | val); |
| 3180 | |
| 3181 | return IRQ_HANDLED; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3182 | } |
| 3183 | |
| 3184 | /** |
| 3185 | * ice_misc_intr - misc interrupt handler |
| 3186 | * @irq: interrupt number |
| 3187 | * @data: pointer to a q_vector |
| 3188 | */ |
| 3189 | static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) |
| 3190 | { |
| 3191 | struct ice_pf *pf = (struct ice_pf *)data; |
Karol Kolacinski | 00d5000 | 2023-11-29 13:40:22 +0100 | [diff] [blame] | 3192 | irqreturn_t ret = IRQ_HANDLED; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3193 | struct ice_hw *hw = &pf->hw; |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 3194 | struct device *dev; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3195 | u32 oicr, ena_mask; |
| 3196 | |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 3197 | dev = ice_pf_to_dev(pf); |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 3198 | set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); |
| 3199 | set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); |
Jacob Keller | 8f5ee3c | 2021-06-09 09:39:46 -0700 | [diff] [blame] | 3200 | set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3201 | |
| 3202 | oicr = rd32(hw, PFINT_OICR); |
| 3203 | ena_mask = rd32(hw, PFINT_OICR_ENA); |
| 3204 | |
Anirudh Venkataramanan | 0e674ae | 2019-04-16 10:30:43 -0700 | [diff] [blame] | 3205 | if (oicr & PFINT_OICR_SWINT_M) { |
| 3206 | ena_mask &= ~PFINT_OICR_SWINT_M; |
| 3207 | pf->sw_int_count++; |
| 3208 | } |
| 3209 | |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 3210 | if (oicr & PFINT_OICR_MAL_DETECT_M) { |
| 3211 | ena_mask &= ~PFINT_OICR_MAL_DETECT_M; |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 3212 | set_bit(ICE_MDD_EVENT_PENDING, pf->state); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 3213 | } |
Anirudh Venkataramanan | 007676b | 2018-09-19 17:42:57 -0700 | [diff] [blame] | 3214 | if (oicr & PFINT_OICR_VFLR_M) { |
Brett Creeley | f844d52 | 2020-02-27 10:14:55 -0800 | [diff] [blame] | 3215 | /* disable any further VFLR event notifications */ |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 3216 | if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { |
Brett Creeley | f844d52 | 2020-02-27 10:14:55 -0800 | [diff] [blame] | 3217 | u32 reg = rd32(hw, PFINT_OICR_ENA); |
| 3218 | |
| 3219 | reg &= ~PFINT_OICR_VFLR_M; |
| 3220 | wr32(hw, PFINT_OICR_ENA, reg); |
| 3221 | } else { |
| 3222 | ena_mask &= ~PFINT_OICR_VFLR_M; |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 3223 | set_bit(ICE_VFLR_EVENT_PENDING, pf->state); |
Brett Creeley | f844d52 | 2020-02-27 10:14:55 -0800 | [diff] [blame] | 3224 | } |
Anirudh Venkataramanan | 007676b | 2018-09-19 17:42:57 -0700 | [diff] [blame] | 3225 | } |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 3226 | |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 3227 | if (oicr & PFINT_OICR_GRST_M) { |
| 3228 | u32 reset; |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 3229 | |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 3230 | /* we have a reset warning */ |
| 3231 | ena_mask &= ~PFINT_OICR_GRST_M; |
Jesse Brandeburg | 5a259f8 | 2023-12-05 17:01:12 -0800 | [diff] [blame] | 3232 | reset = FIELD_GET(GLGEN_RSTAT_RESET_TYPE_M, |
| 3233 | rd32(hw, GLGEN_RSTAT)); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 3234 | |
| 3235 | if (reset == ICE_RESET_CORER) |
| 3236 | pf->corer_count++; |
| 3237 | else if (reset == ICE_RESET_GLOBR) |
| 3238 | pf->globr_count++; |
Brett Creeley | ca4929b | 2018-09-19 17:23:18 -0700 | [diff] [blame] | 3239 | else if (reset == ICE_RESET_EMPR) |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 3240 | pf->empr_count++; |
Brett Creeley | ca4929b | 2018-09-19 17:23:18 -0700 | [diff] [blame] | 3241 | else |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 3242 | dev_dbg(dev, "Invalid reset type %d\n", reset); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 3243 | |
| 3244 | /* If a reset cycle isn't already in progress, we set a bit in |
| 3245 | * pf->state so that the service task can start a reset/rebuild. |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 3246 | */ |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 3247 | if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 3248 | if (reset == ICE_RESET_CORER) |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 3249 | set_bit(ICE_CORER_RECV, pf->state); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 3250 | else if (reset == ICE_RESET_GLOBR) |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 3251 | set_bit(ICE_GLOBR_RECV, pf->state); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 3252 | else |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 3253 | set_bit(ICE_EMPR_RECV, pf->state); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 3254 | |
Anirudh Venkataramanan | fd2a981 | 2018-08-09 06:29:47 -0700 | [diff] [blame] | 3255 | /* There are couple of different bits at play here. |
| 3256 | * hw->reset_ongoing indicates whether the hardware is |
| 3257 | * in reset. This is set to true when a reset interrupt |
| 3258 | * is received and set back to false after the driver |
| 3259 | * has determined that the hardware is out of reset. |
| 3260 | * |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 3261 | * ICE_RESET_OICR_RECV in pf->state indicates |
Anirudh Venkataramanan | fd2a981 | 2018-08-09 06:29:47 -0700 | [diff] [blame] | 3262 | * that a post reset rebuild is required before the |
| 3263 | * driver is operational again. This is set above. |
| 3264 | * |
| 3265 | * As this is the start of the reset/rebuild cycle, set |
| 3266 | * both to indicate that. |
| 3267 | */ |
| 3268 | hw->reset_ongoing = true; |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 3269 | } |
| 3270 | } |
| 3271 | |
Jacob Keller | ea9b847 | 2021-06-09 09:39:53 -0700 | [diff] [blame] | 3272 | if (oicr & PFINT_OICR_TSYN_TX_M) { |
| 3273 | ena_mask &= ~PFINT_OICR_TSYN_TX_M; |
Karol Kolacinski | 82e71b2 | 2023-11-29 13:40:23 +0100 | [diff] [blame] | 3274 | if (ice_pf_state_is_nominal(pf) && |
| 3275 | pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) { |
| 3276 | struct ice_ptp_tx *tx = &pf->ptp.port.tx; |
| 3277 | unsigned long flags; |
| 3278 | u8 idx; |
| 3279 | |
| 3280 | spin_lock_irqsave(&tx->lock, flags); |
| 3281 | idx = find_next_bit_wrap(tx->in_use, tx->len, |
| 3282 | tx->last_ll_ts_idx_read + 1); |
| 3283 | if (idx != tx->len) |
| 3284 | ice_ptp_req_tx_single_tstamp(tx, idx); |
| 3285 | spin_unlock_irqrestore(&tx->lock, flags); |
| 3286 | } else if (ice_ptp_pf_handles_tx_interrupt(pf)) { |
Karol Kolacinski | 6e8b2c8 | 2023-06-01 14:15:03 -0700 | [diff] [blame] | 3287 | set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); |
Karol Kolacinski | 00d5000 | 2023-11-29 13:40:22 +0100 | [diff] [blame] | 3288 | ret = IRQ_WAKE_THREAD; |
| 3289 | } |
Jacob Keller | ea9b847 | 2021-06-09 09:39:53 -0700 | [diff] [blame] | 3290 | } |
| 3291 | |
Maciej Machnikowski | 172db5f | 2021-06-16 09:35:22 -0700 | [diff] [blame] | 3292 | if (oicr & PFINT_OICR_TSYN_EVNT_M) { |
| 3293 | u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; |
| 3294 | u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx)); |
| 3295 | |
Maciej Machnikowski | 172db5f | 2021-06-16 09:35:22 -0700 | [diff] [blame] | 3296 | ena_mask &= ~PFINT_OICR_TSYN_EVNT_M; |
Karol Kolacinski | 6e8b2c8 | 2023-06-01 14:15:03 -0700 | [diff] [blame] | 3297 | |
Jacob Keller | 42d40bb | 2023-09-08 14:37:14 -0700 | [diff] [blame] | 3298 | if (ice_pf_src_tmr_owned(pf)) { |
Karol Kolacinski | 6e8b2c8 | 2023-06-01 14:15:03 -0700 | [diff] [blame] | 3299 | /* Save EVENTs from GLTSYN register */ |
| 3300 | pf->ptp.ext_ts_irq |= gltsyn_stat & |
| 3301 | (GLTSYN_STAT_EVENT0_M | |
| 3302 | GLTSYN_STAT_EVENT1_M | |
| 3303 | GLTSYN_STAT_EVENT2_M); |
| 3304 | |
Karol Kolacinski | 00d5000 | 2023-11-29 13:40:22 +0100 | [diff] [blame] | 3305 | ice_ptp_extts_event(pf); |
Karol Kolacinski | 6e8b2c8 | 2023-06-01 14:15:03 -0700 | [diff] [blame] | 3306 | } |
Maciej Machnikowski | 172db5f | 2021-06-16 09:35:22 -0700 | [diff] [blame] | 3307 | } |
| 3308 | |
Dave Ertman | 348048e | 2021-05-20 09:37:50 -0500 | [diff] [blame] | 3309 | #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M) |
| 3310 | if (oicr & ICE_AUX_CRIT_ERR) { |
Alexander Lobakin | 32d53c0 | 2022-03-23 13:43:52 +0100 | [diff] [blame] | 3311 | pf->oicr_err_reg |= oicr; |
| 3312 | set_bit(ICE_AUX_ERR_PENDING, pf->state); |
Dave Ertman | 348048e | 2021-05-20 09:37:50 -0500 | [diff] [blame] | 3313 | ena_mask &= ~ICE_AUX_CRIT_ERR; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3314 | } |
| 3315 | |
Md Fahad Iqbal Polash | 8d7189d | 2019-02-28 15:25:58 -0800 | [diff] [blame] | 3316 | /* Report any remaining unexpected interrupts */ |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3317 | oicr &= ena_mask; |
| 3318 | if (oicr) { |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 3319 | dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3320 | /* If a critical error is pending there is no choice but to |
| 3321 | * reset the device. |
| 3322 | */ |
Dave Ertman | 348048e | 2021-05-20 09:37:50 -0500 | [diff] [blame] | 3323 | if (oicr & (PFINT_OICR_PCI_EXCEPTION_M | |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 3324 | PFINT_OICR_ECC_ERR_M)) { |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 3325 | set_bit(ICE_PFR_REQ, pf->state); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 3326 | } |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3327 | } |
Karol Kolacinski | 00d5000 | 2023-11-29 13:40:22 +0100 | [diff] [blame] | 3328 | ice_service_task_schedule(pf); |
| 3329 | if (ret == IRQ_HANDLED) |
| 3330 | ice_irq_dynamic_ena(hw, NULL, NULL); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3331 | |
Karol Kolacinski | 00d5000 | 2023-11-29 13:40:22 +0100 | [diff] [blame] | 3332 | return ret; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3333 | } |
| 3334 | |
| 3335 | /** |
Karol Kolacinski | 1229b33 | 2022-09-16 13:17:28 -0700 | [diff] [blame] | 3336 | * ice_misc_intr_thread_fn - misc interrupt thread function |
| 3337 | * @irq: interrupt number |
| 3338 | * @data: pointer to a q_vector |
| 3339 | */ |
| 3340 | static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) |
| 3341 | { |
Karol Kolacinski | 1229b33 | 2022-09-16 13:17:28 -0700 | [diff] [blame] | 3342 | struct ice_pf *pf = data; |
Jacob Keller | 0ec38df | 2023-06-01 14:15:07 -0700 | [diff] [blame] | 3343 | struct ice_hw *hw; |
| 3344 | |
| 3345 | hw = &pf->hw; |
Karol Kolacinski | 1229b33 | 2022-09-16 13:17:28 -0700 | [diff] [blame] | 3346 | |
Jacob Keller | 30f1587 | 2022-11-18 14:27:29 -0800 | [diff] [blame] | 3347 | if (ice_is_reset_in_progress(pf->state)) |
Karol Kolacinski | 00d5000 | 2023-11-29 13:40:22 +0100 | [diff] [blame] | 3348 | goto skip_irq; |
Karol Kolacinski | 6e8b2c8 | 2023-06-01 14:15:03 -0700 | [diff] [blame] | 3349 | |
| 3350 | if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) { |
Jacob Keller | 9a8648c | 2023-06-01 14:15:06 -0700 | [diff] [blame] | 3351 | /* Process outstanding Tx timestamps. If there is more work, |
| 3352 | * re-arm the interrupt to trigger again. |
| 3353 | */ |
| 3354 | if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { |
| 3355 | wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); |
| 3356 | ice_flush(hw); |
| 3357 | } |
Karol Kolacinski | 6e8b2c8 | 2023-06-01 14:15:03 -0700 | [diff] [blame] | 3358 | } |
Jacob Keller | 30f1587 | 2022-11-18 14:27:29 -0800 | [diff] [blame] | 3359 | |
Karol Kolacinski | 00d5000 | 2023-11-29 13:40:22 +0100 | [diff] [blame] | 3360 | skip_irq: |
Jacob Keller | 0ec38df | 2023-06-01 14:15:07 -0700 | [diff] [blame] | 3361 | ice_irq_dynamic_ena(hw, NULL, NULL); |
| 3362 | |
Jacob Keller | 30f1587 | 2022-11-18 14:27:29 -0800 | [diff] [blame] | 3363 | return IRQ_HANDLED; |
Karol Kolacinski | 1229b33 | 2022-09-16 13:17:28 -0700 | [diff] [blame] | 3364 | } |
| 3365 | |
| 3366 | /** |
Brett Creeley | 0e04e8e | 2019-02-08 12:50:34 -0800 | [diff] [blame] | 3367 | * ice_dis_ctrlq_interrupts - disable control queue interrupts |
| 3368 | * @hw: pointer to HW structure |
| 3369 | */ |
| 3370 | static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) |
| 3371 | { |
| 3372 | /* disable Admin queue Interrupt causes */ |
| 3373 | wr32(hw, PFINT_FW_CTL, |
| 3374 | rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M); |
| 3375 | |
| 3376 | /* disable Mailbox queue Interrupt causes */ |
| 3377 | wr32(hw, PFINT_MBX_CTL, |
| 3378 | rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M); |
| 3379 | |
Jacob Keller | 8f5ee3c | 2021-06-09 09:39:46 -0700 | [diff] [blame] | 3380 | wr32(hw, PFINT_SB_CTL, |
| 3381 | rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M); |
| 3382 | |
Brett Creeley | 0e04e8e | 2019-02-08 12:50:34 -0800 | [diff] [blame] | 3383 | /* disable Control queue Interrupt causes */ |
| 3384 | wr32(hw, PFINT_OICR_CTL, |
| 3385 | rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M); |
| 3386 | |
| 3387 | ice_flush(hw); |
| 3388 | } |
| 3389 | |
| 3390 | /** |
Karol Kolacinski | 82e71b2 | 2023-11-29 13:40:23 +0100 | [diff] [blame] | 3391 | * ice_free_irq_msix_ll_ts- Unroll ll_ts vector setup |
| 3392 | * @pf: board private structure |
| 3393 | */ |
| 3394 | static void ice_free_irq_msix_ll_ts(struct ice_pf *pf) |
| 3395 | { |
| 3396 | int irq_num = pf->ll_ts_irq.virq; |
| 3397 | |
| 3398 | synchronize_irq(irq_num); |
| 3399 | devm_free_irq(ice_pf_to_dev(pf), irq_num, pf); |
| 3400 | |
| 3401 | ice_free_irq(pf, pf->ll_ts_irq); |
| 3402 | } |
| 3403 | |
| 3404 | /** |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3405 | * ice_free_irq_msix_misc - Unroll misc vector setup |
| 3406 | * @pf: board private structure |
| 3407 | */ |
| 3408 | static void ice_free_irq_msix_misc(struct ice_pf *pf) |
| 3409 | { |
Piotr Raczynski | 4aad533 | 2023-05-15 21:03:17 +0200 | [diff] [blame] | 3410 | int misc_irq_num = pf->oicr_irq.virq; |
Brett Creeley | 0e04e8e | 2019-02-08 12:50:34 -0800 | [diff] [blame] | 3411 | struct ice_hw *hw = &pf->hw; |
| 3412 | |
| 3413 | ice_dis_ctrlq_interrupts(hw); |
| 3414 | |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3415 | /* disable OICR interrupt */ |
Brett Creeley | 0e04e8e | 2019-02-08 12:50:34 -0800 | [diff] [blame] | 3416 | wr32(hw, PFINT_OICR_ENA, 0); |
| 3417 | ice_flush(hw); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3418 | |
Piotr Raczynski | 0501893 | 2023-05-15 21:03:14 +0200 | [diff] [blame] | 3419 | synchronize_irq(misc_irq_num); |
| 3420 | devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3421 | |
Piotr Raczynski | 4aad533 | 2023-05-15 21:03:17 +0200 | [diff] [blame] | 3422 | ice_free_irq(pf, pf->oicr_irq); |
Karol Kolacinski | 82e71b2 | 2023-11-29 13:40:23 +0100 | [diff] [blame] | 3423 | if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) |
| 3424 | ice_free_irq_msix_ll_ts(pf); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3425 | } |
| 3426 | |
| 3427 | /** |
Brett Creeley | 0e04e8e | 2019-02-08 12:50:34 -0800 | [diff] [blame] | 3428 | * ice_ena_ctrlq_interrupts - enable control queue interrupts |
| 3429 | * @hw: pointer to HW structure |
Brett Creeley | b07833a | 2019-02-28 15:25:59 -0800 | [diff] [blame] | 3430 | * @reg_idx: HW vector index to associate the control queue interrupts with |
Brett Creeley | 0e04e8e | 2019-02-08 12:50:34 -0800 | [diff] [blame] | 3431 | */ |
Brett Creeley | b07833a | 2019-02-28 15:25:59 -0800 | [diff] [blame] | 3432 | static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx) |
Brett Creeley | 0e04e8e | 2019-02-08 12:50:34 -0800 | [diff] [blame] | 3433 | { |
| 3434 | u32 val; |
| 3435 | |
Brett Creeley | b07833a | 2019-02-28 15:25:59 -0800 | [diff] [blame] | 3436 | val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) | |
Brett Creeley | 0e04e8e | 2019-02-08 12:50:34 -0800 | [diff] [blame] | 3437 | PFINT_OICR_CTL_CAUSE_ENA_M); |
| 3438 | wr32(hw, PFINT_OICR_CTL, val); |
| 3439 | |
| 3440 | /* enable Admin queue Interrupt causes */ |
Brett Creeley | b07833a | 2019-02-28 15:25:59 -0800 | [diff] [blame] | 3441 | val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) | |
Brett Creeley | 0e04e8e | 2019-02-08 12:50:34 -0800 | [diff] [blame] | 3442 | PFINT_FW_CTL_CAUSE_ENA_M); |
| 3443 | wr32(hw, PFINT_FW_CTL, val); |
| 3444 | |
| 3445 | /* enable Mailbox queue Interrupt causes */ |
Brett Creeley | b07833a | 2019-02-28 15:25:59 -0800 | [diff] [blame] | 3446 | val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) | |
Brett Creeley | 0e04e8e | 2019-02-08 12:50:34 -0800 | [diff] [blame] | 3447 | PFINT_MBX_CTL_CAUSE_ENA_M); |
| 3448 | wr32(hw, PFINT_MBX_CTL, val); |
| 3449 | |
Karol Kolacinski | 82e71b2 | 2023-11-29 13:40:23 +0100 | [diff] [blame] | 3450 | if (!hw->dev_caps.ts_dev_info.ts_ll_int_read) { |
| 3451 | /* enable Sideband queue Interrupt causes */ |
| 3452 | val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) | |
| 3453 | PFINT_SB_CTL_CAUSE_ENA_M); |
| 3454 | wr32(hw, PFINT_SB_CTL, val); |
| 3455 | } |
Jacob Keller | 8f5ee3c | 2021-06-09 09:39:46 -0700 | [diff] [blame] | 3456 | |
Brett Creeley | 0e04e8e | 2019-02-08 12:50:34 -0800 | [diff] [blame] | 3457 | ice_flush(hw); |
| 3458 | } |
| 3459 | |
| 3460 | /** |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3461 | * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events |
| 3462 | * @pf: board private structure |
| 3463 | * |
| 3464 | * This sets up the handler for MSIX 0, which is used to manage the |
Anirudh Venkataramanan | df17b7e | 2018-10-26 11:44:46 -0700 | [diff] [blame] | 3465 | * non-queue interrupts, e.g. AdminQ and errors. This is not used |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3466 | * when in MSI or Legacy interrupt mode. |
| 3467 | */ |
| 3468 | static int ice_req_irq_msix_misc(struct ice_pf *pf) |
| 3469 | { |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 3470 | struct device *dev = ice_pf_to_dev(pf); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3471 | struct ice_hw *hw = &pf->hw; |
Karol Kolacinski | 82e71b2 | 2023-11-29 13:40:23 +0100 | [diff] [blame] | 3472 | u32 pf_intr_start_offset; |
| 3473 | struct msi_map irq; |
Piotr Raczynski | 4aad533 | 2023-05-15 21:03:17 +0200 | [diff] [blame] | 3474 | int err = 0; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3475 | |
| 3476 | if (!pf->int_name[0]) |
| 3477 | snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 3478 | dev_driver_string(dev), dev_name(dev)); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3479 | |
Karol Kolacinski | 82e71b2 | 2023-11-29 13:40:23 +0100 | [diff] [blame] | 3480 | if (!pf->int_name_ll_ts[0]) |
| 3481 | snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1, |
| 3482 | "%s-%s:ll_ts", dev_driver_string(dev), dev_name(dev)); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 3483 | /* Do not request IRQ but do enable OICR interrupt since settings are |
| 3484 | * lost during reset. Note that this function is called only during |
| 3485 | * rebuild path and not while reset is in progress. |
| 3486 | */ |
Dave Ertman | 5df7e45 | 2018-09-19 17:23:11 -0700 | [diff] [blame] | 3487 | if (ice_is_reset_in_progress(pf->state)) |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 3488 | goto skip_req_irq; |
| 3489 | |
Brett Creeley | cbe66bf | 2019-04-16 10:30:44 -0700 | [diff] [blame] | 3490 | /* reserve one vector in irq_tracker for misc interrupts */ |
Karol Kolacinski | 82e71b2 | 2023-11-29 13:40:23 +0100 | [diff] [blame] | 3491 | irq = ice_alloc_irq(pf, false); |
| 3492 | if (irq.index < 0) |
| 3493 | return irq.index; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3494 | |
Karol Kolacinski | 82e71b2 | 2023-11-29 13:40:23 +0100 | [diff] [blame] | 3495 | pf->oicr_irq = irq; |
Piotr Raczynski | 4aad533 | 2023-05-15 21:03:17 +0200 | [diff] [blame] | 3496 | err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr, |
| 3497 | ice_misc_intr_thread_fn, 0, |
| 3498 | pf->int_name, pf); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3499 | if (err) { |
Karol Kolacinski | 1229b33 | 2022-09-16 13:17:28 -0700 | [diff] [blame] | 3500 | dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n", |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3501 | pf->int_name, err); |
Piotr Raczynski | 4aad533 | 2023-05-15 21:03:17 +0200 | [diff] [blame] | 3502 | ice_free_irq(pf, pf->oicr_irq); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3503 | return err; |
| 3504 | } |
| 3505 | |
Karol Kolacinski | 82e71b2 | 2023-11-29 13:40:23 +0100 | [diff] [blame] | 3506 | /* reserve one vector in irq_tracker for ll_ts interrupt */ |
| 3507 | if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) |
| 3508 | goto skip_req_irq; |
| 3509 | |
| 3510 | irq = ice_alloc_irq(pf, false); |
| 3511 | if (irq.index < 0) |
| 3512 | return irq.index; |
| 3513 | |
| 3514 | pf->ll_ts_irq = irq; |
| 3515 | err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0, |
| 3516 | pf->int_name_ll_ts, pf); |
| 3517 | if (err) { |
| 3518 | dev_err(dev, "devm_request_irq for %s failed: %d\n", |
| 3519 | pf->int_name_ll_ts, err); |
| 3520 | ice_free_irq(pf, pf->ll_ts_irq); |
| 3521 | return err; |
| 3522 | } |
| 3523 | |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 3524 | skip_req_irq: |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3525 | ice_ena_misc_vector(pf); |
| 3526 | |
Piotr Raczynski | 4aad533 | 2023-05-15 21:03:17 +0200 | [diff] [blame] | 3527 | ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index); |
Karol Kolacinski | 82e71b2 | 2023-11-29 13:40:23 +0100 | [diff] [blame] | 3528 | /* This enables LL TS interrupt */ |
| 3529 | pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST; |
| 3530 | if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) |
| 3531 | wr32(hw, PFINT_SB_CTL, |
| 3532 | ((pf->ll_ts_irq.index + pf_intr_start_offset) & |
| 3533 | PFINT_SB_CTL_MSIX_INDX_M) | PFINT_SB_CTL_CAUSE_ENA_M); |
Piotr Raczynski | 4aad533 | 2023-05-15 21:03:17 +0200 | [diff] [blame] | 3534 | wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index), |
Brett Creeley | 63f545e | 2018-12-19 10:03:29 -0800 | [diff] [blame] | 3535 | ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3536 | |
| 3537 | ice_flush(hw); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 3538 | ice_irq_dynamic_ena(hw, NULL, NULL); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3539 | |
| 3540 | return 0; |
| 3541 | } |
| 3542 | |
| 3543 | /** |
Anirudh Venkataramanan | df0f847 | 2018-09-19 17:23:09 -0700 | [diff] [blame] | 3544 | * ice_napi_add - register NAPI handler for the VSI |
| 3545 | * @vsi: VSI for which NAPI handler is to be registered |
| 3546 | * |
| 3547 | * This function is only called in the driver's load path. Registering the NAPI |
| 3548 | * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, |
| 3549 | * reset/rebuild, etc.) |
| 3550 | */ |
| 3551 | static void ice_napi_add(struct ice_vsi *vsi) |
| 3552 | { |
| 3553 | int v_idx; |
| 3554 | |
| 3555 | if (!vsi->netdev) |
| 3556 | return; |
| 3557 | |
Amritha Nambiar | 91fdbce | 2023-12-01 15:28:40 -0800 | [diff] [blame] | 3558 | ice_for_each_q_vector(vsi, v_idx) { |
Anirudh Venkataramanan | df0f847 | 2018-09-19 17:23:09 -0700 | [diff] [blame] | 3559 | netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, |
Jakub Kicinski | b48b89f | 2022-09-27 06:27:53 -0700 | [diff] [blame] | 3560 | ice_napi_poll); |
Amritha Nambiar | 080b0c8 | 2024-02-13 11:48:50 -0800 | [diff] [blame] | 3561 | __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false); |
Amritha Nambiar | 91fdbce | 2023-12-01 15:28:40 -0800 | [diff] [blame] | 3562 | } |
Anirudh Venkataramanan | df0f847 | 2018-09-19 17:23:09 -0700 | [diff] [blame] | 3563 | } |
| 3564 | |
| 3565 | /** |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 3566 | * ice_set_ops - set netdev and ethtools ops for the given netdev |
Lorenzo Bianconi | b6a4103 | 2023-02-14 15:39:27 +0100 | [diff] [blame] | 3567 | * @vsi: the VSI associated with the new netdev |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 3568 | */ |
Lorenzo Bianconi | b6a4103 | 2023-02-14 15:39:27 +0100 | [diff] [blame] | 3569 | static void ice_set_ops(struct ice_vsi *vsi) |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 3570 | { |
Lorenzo Bianconi | b6a4103 | 2023-02-14 15:39:27 +0100 | [diff] [blame] | 3571 | struct net_device *netdev = vsi->netdev; |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 3572 | struct ice_pf *pf = ice_netdev_to_pf(netdev); |
| 3573 | |
| 3574 | if (ice_is_safe_mode(pf)) { |
| 3575 | netdev->netdev_ops = &ice_netdev_safe_mode_ops; |
| 3576 | ice_set_ethtool_safe_mode_ops(netdev); |
| 3577 | return; |
| 3578 | } |
| 3579 | |
| 3580 | netdev->netdev_ops = &ice_netdev_ops; |
Jakub Kicinski | b20e6c1 | 2020-09-25 17:56:46 -0700 | [diff] [blame] | 3581 | netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; |
Larysa Zaremba | 9031d5f | 2023-12-05 22:08:34 +0100 | [diff] [blame] | 3582 | netdev->xdp_metadata_ops = &ice_xdp_md_ops; |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 3583 | ice_set_ethtool_ops(netdev); |
Lorenzo Bianconi | b6a4103 | 2023-02-14 15:39:27 +0100 | [diff] [blame] | 3584 | |
| 3585 | if (vsi->type != ICE_VSI_PF) |
| 3586 | return; |
| 3587 | |
| 3588 | netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | |
| 3589 | NETDEV_XDP_ACT_XSK_ZEROCOPY | |
| 3590 | NETDEV_XDP_ACT_RX_SG; |
Maciej Fijalkowski | eeb2b53 | 2023-07-19 15:24:12 +0200 | [diff] [blame] | 3591 | netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD; |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 3592 | } |
| 3593 | |
| 3594 | /** |
| 3595 | * ice_set_netdev_features - set features for the given netdev |
| 3596 | * @netdev: netdev instance |
| 3597 | */ |
| 3598 | static void ice_set_netdev_features(struct net_device *netdev) |
| 3599 | { |
| 3600 | struct ice_pf *pf = ice_netdev_to_pf(netdev); |
Brett Creeley | 1babaf7 | 2021-12-02 08:38:50 -0800 | [diff] [blame] | 3601 | bool is_dvm_ena = ice_is_dvm_ena(&pf->hw); |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3602 | netdev_features_t csumo_features; |
| 3603 | netdev_features_t vlano_features; |
| 3604 | netdev_features_t dflt_features; |
| 3605 | netdev_features_t tso_features; |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 3606 | |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 3607 | if (ice_is_safe_mode(pf)) { |
| 3608 | /* safe mode */ |
| 3609 | netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; |
| 3610 | netdev->hw_features = netdev->features; |
| 3611 | return; |
| 3612 | } |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 3613 | |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3614 | dflt_features = NETIF_F_SG | |
| 3615 | NETIF_F_HIGHDMA | |
Henry Tieman | 148beb6 | 2020-05-11 18:01:40 -0700 | [diff] [blame] | 3616 | NETIF_F_NTUPLE | |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3617 | NETIF_F_RXHASH; |
| 3618 | |
| 3619 | csumo_features = NETIF_F_RXCSUM | |
| 3620 | NETIF_F_IP_CSUM | |
Anirudh Venkataramanan | cf909e1 | 2018-12-19 10:03:32 -0800 | [diff] [blame] | 3621 | NETIF_F_SCTP_CRC | |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3622 | NETIF_F_IPV6_CSUM; |
| 3623 | |
| 3624 | vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | |
| 3625 | NETIF_F_HW_VLAN_CTAG_TX | |
| 3626 | NETIF_F_HW_VLAN_CTAG_RX; |
| 3627 | |
Brett Creeley | 1babaf7 | 2021-12-02 08:38:50 -0800 | [diff] [blame] | 3628 | /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */ |
| 3629 | if (is_dvm_ena) |
| 3630 | vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER; |
| 3631 | |
Tony Nguyen | a4e82a8 | 2020-05-06 09:32:30 -0700 | [diff] [blame] | 3632 | tso_features = NETIF_F_TSO | |
| 3633 | NETIF_F_TSO_ECN | |
| 3634 | NETIF_F_TSO6 | |
| 3635 | NETIF_F_GSO_GRE | |
| 3636 | NETIF_F_GSO_UDP_TUNNEL | |
| 3637 | NETIF_F_GSO_GRE_CSUM | |
| 3638 | NETIF_F_GSO_UDP_TUNNEL_CSUM | |
| 3639 | NETIF_F_GSO_PARTIAL | |
| 3640 | NETIF_F_GSO_IPXIP4 | |
| 3641 | NETIF_F_GSO_IPXIP6 | |
Brett Creeley | a54e3b8 | 2019-12-12 03:12:53 -0800 | [diff] [blame] | 3642 | NETIF_F_GSO_UDP_L4; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3643 | |
Tony Nguyen | a4e82a8 | 2020-05-06 09:32:30 -0700 | [diff] [blame] | 3644 | netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | |
| 3645 | NETIF_F_GSO_GRE_CSUM; |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 3646 | /* set features that user can change */ |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3647 | netdev->hw_features = dflt_features | csumo_features | |
| 3648 | vlano_features | tso_features; |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 3649 | |
Tony Nguyen | a4e82a8 | 2020-05-06 09:32:30 -0700 | [diff] [blame] | 3650 | /* add support for HW_CSUM on packets with MPLS header */ |
Joe Damato | 69e66c0 | 2022-03-17 21:12:12 -0700 | [diff] [blame] | 3651 | netdev->mpls_features = NETIF_F_HW_CSUM | |
| 3652 | NETIF_F_TSO | |
| 3653 | NETIF_F_TSO6; |
Tony Nguyen | a4e82a8 | 2020-05-06 09:32:30 -0700 | [diff] [blame] | 3654 | |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 3655 | /* enable features */ |
| 3656 | netdev->features |= netdev->hw_features; |
Kiran Patil | 0d08a44 | 2021-08-06 10:49:05 +0200 | [diff] [blame] | 3657 | |
| 3658 | netdev->hw_features |= NETIF_F_HW_TC; |
Maciej Fijalkowski | 44ece4e | 2022-07-07 12:16:51 +0200 | [diff] [blame] | 3659 | netdev->hw_features |= NETIF_F_LOOPBACK; |
Kiran Patil | 0d08a44 | 2021-08-06 10:49:05 +0200 | [diff] [blame] | 3660 | |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3661 | /* encap and VLAN devices inherit default, csumo and tso features */ |
| 3662 | netdev->hw_enc_features |= dflt_features | csumo_features | |
| 3663 | tso_features; |
| 3664 | netdev->vlan_features |= dflt_features | csumo_features | |
| 3665 | tso_features; |
Brett Creeley | 1babaf7 | 2021-12-02 08:38:50 -0800 | [diff] [blame] | 3666 | |
| 3667 | /* advertise support but don't enable by default since only one type of |
| 3668 | * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one |
| 3669 | * type turns on the other has to be turned off. This is enforced by the |
| 3670 | * ice_fix_features() ndo callback. |
| 3671 | */ |
| 3672 | if (is_dvm_ena) |
| 3673 | netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | |
| 3674 | NETIF_F_HW_VLAN_STAG_TX; |
Jesse Brandeburg | dddd406 | 2022-07-27 09:24:05 +0200 | [diff] [blame] | 3675 | |
| 3676 | /* Leave CRC / FCS stripping enabled by default, but allow the value to |
| 3677 | * be changed at runtime |
| 3678 | */ |
| 3679 | netdev->hw_features |= NETIF_F_RXFCS; |
Pawel Chmielewski | fce92db | 2023-02-07 17:23:03 +0100 | [diff] [blame] | 3680 | |
| 3681 | netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 3682 | } |
| 3683 | |
| 3684 | /** |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3685 | * ice_fill_rss_lut - Fill the RSS lookup table with default values |
| 3686 | * @lut: Lookup table |
| 3687 | * @rss_table_size: Lookup table size |
| 3688 | * @rss_size: Range of queue number for hashing |
| 3689 | */ |
| 3690 | void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) |
| 3691 | { |
| 3692 | u16 i; |
| 3693 | |
| 3694 | for (i = 0; i < rss_table_size; i++) |
| 3695 | lut[i] = i % rss_size; |
| 3696 | } |
| 3697 | |
| 3698 | /** |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 3699 | * ice_pf_vsi_setup - Set up a PF VSI |
| 3700 | * @pf: board private structure |
| 3701 | * @pi: pointer to the port_info instance |
| 3702 | * |
Anirudh Venkataramanan | 0e674ae | 2019-04-16 10:30:43 -0700 | [diff] [blame] | 3703 | * Returns pointer to the successfully allocated VSI software struct |
| 3704 | * on success, otherwise returns NULL on failure. |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 3705 | */ |
| 3706 | static struct ice_vsi * |
| 3707 | ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) |
| 3708 | { |
Jacob Keller | 5e509ab | 2023-01-18 17:16:43 -0800 | [diff] [blame] | 3709 | struct ice_vsi_cfg_params params = {}; |
| 3710 | |
| 3711 | params.type = ICE_VSI_PF; |
Mateusz Polchlopek | deea427 | 2024-04-19 05:11:04 -0400 | [diff] [blame] | 3712 | params.port_info = pi; |
Jacob Keller | 5e509ab | 2023-01-18 17:16:43 -0800 | [diff] [blame] | 3713 | params.flags = ICE_VSI_FLAG_INIT; |
| 3714 | |
| 3715 | return ice_vsi_setup(pf, ¶ms); |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 3716 | } |
| 3717 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 3718 | static struct ice_vsi * |
| 3719 | ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, |
| 3720 | struct ice_channel *ch) |
| 3721 | { |
Jacob Keller | 5e509ab | 2023-01-18 17:16:43 -0800 | [diff] [blame] | 3722 | struct ice_vsi_cfg_params params = {}; |
| 3723 | |
| 3724 | params.type = ICE_VSI_CHNL; |
Mateusz Polchlopek | deea427 | 2024-04-19 05:11:04 -0400 | [diff] [blame] | 3725 | params.port_info = pi; |
Jacob Keller | 5e509ab | 2023-01-18 17:16:43 -0800 | [diff] [blame] | 3726 | params.ch = ch; |
| 3727 | params.flags = ICE_VSI_FLAG_INIT; |
| 3728 | |
| 3729 | return ice_vsi_setup(pf, ¶ms); |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 3730 | } |
| 3731 | |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 3732 | /** |
Henry Tieman | 148beb6 | 2020-05-11 18:01:40 -0700 | [diff] [blame] | 3733 | * ice_ctrl_vsi_setup - Set up a control VSI |
| 3734 | * @pf: board private structure |
| 3735 | * @pi: pointer to the port_info instance |
| 3736 | * |
| 3737 | * Returns pointer to the successfully allocated VSI software struct |
| 3738 | * on success, otherwise returns NULL on failure. |
| 3739 | */ |
| 3740 | static struct ice_vsi * |
| 3741 | ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) |
| 3742 | { |
Jacob Keller | 5e509ab | 2023-01-18 17:16:43 -0800 | [diff] [blame] | 3743 | struct ice_vsi_cfg_params params = {}; |
| 3744 | |
| 3745 | params.type = ICE_VSI_CTRL; |
Mateusz Polchlopek | deea427 | 2024-04-19 05:11:04 -0400 | [diff] [blame] | 3746 | params.port_info = pi; |
Jacob Keller | 5e509ab | 2023-01-18 17:16:43 -0800 | [diff] [blame] | 3747 | params.flags = ICE_VSI_FLAG_INIT; |
| 3748 | |
| 3749 | return ice_vsi_setup(pf, ¶ms); |
Henry Tieman | 148beb6 | 2020-05-11 18:01:40 -0700 | [diff] [blame] | 3750 | } |
| 3751 | |
| 3752 | /** |
Anirudh Venkataramanan | 0e674ae | 2019-04-16 10:30:43 -0700 | [diff] [blame] | 3753 | * ice_lb_vsi_setup - Set up a loopback VSI |
| 3754 | * @pf: board private structure |
| 3755 | * @pi: pointer to the port_info instance |
| 3756 | * |
| 3757 | * Returns pointer to the successfully allocated VSI software struct |
| 3758 | * on success, otherwise returns NULL on failure. |
| 3759 | */ |
| 3760 | struct ice_vsi * |
| 3761 | ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) |
| 3762 | { |
Jacob Keller | 5e509ab | 2023-01-18 17:16:43 -0800 | [diff] [blame] | 3763 | struct ice_vsi_cfg_params params = {}; |
| 3764 | |
| 3765 | params.type = ICE_VSI_LB; |
Mateusz Polchlopek | deea427 | 2024-04-19 05:11:04 -0400 | [diff] [blame] | 3766 | params.port_info = pi; |
Jacob Keller | 5e509ab | 2023-01-18 17:16:43 -0800 | [diff] [blame] | 3767 | params.flags = ICE_VSI_FLAG_INIT; |
| 3768 | |
| 3769 | return ice_vsi_setup(pf, ¶ms); |
Anirudh Venkataramanan | 0e674ae | 2019-04-16 10:30:43 -0700 | [diff] [blame] | 3770 | } |
| 3771 | |
| 3772 | /** |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 3773 | * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3774 | * @netdev: network interface to be adjusted |
Brett Creeley | 2bfefa2 | 2021-12-02 08:38:44 -0800 | [diff] [blame] | 3775 | * @proto: VLAN TPID |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 3776 | * @vid: VLAN ID to be added |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3777 | * |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 3778 | * net_device_ops implementation for adding VLAN IDs |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3779 | */ |
Bruce Allan | c8b7abd | 2019-02-26 16:35:11 -0800 | [diff] [blame] | 3780 | static int |
Brett Creeley | 2bfefa2 | 2021-12-02 08:38:44 -0800 | [diff] [blame] | 3781 | ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3782 | { |
| 3783 | struct ice_netdev_priv *np = netdev_priv(netdev); |
Brett Creeley | c31af68 | 2021-12-02 08:38:46 -0800 | [diff] [blame] | 3784 | struct ice_vsi_vlan_ops *vlan_ops; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3785 | struct ice_vsi *vsi = np->vsi; |
Brett Creeley | fb05ba1 | 2021-12-02 08:38:42 -0800 | [diff] [blame] | 3786 | struct ice_vlan vlan; |
Akeem G Abodunrin | 5eda8af | 2019-02-26 16:35:14 -0800 | [diff] [blame] | 3787 | int ret; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3788 | |
Brett Creeley | 42f3efe | 2020-01-22 07:21:24 -0800 | [diff] [blame] | 3789 | /* VLAN 0 is added by default during load/reset */ |
| 3790 | if (!vid) |
| 3791 | return 0; |
| 3792 | |
Ivan Vecera | 1273f89 | 2022-03-31 09:20:08 -0700 | [diff] [blame] | 3793 | while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) |
| 3794 | usleep_range(1000, 2000); |
| 3795 | |
| 3796 | /* Add multicast promisc rule for the VLAN ID to be added if |
| 3797 | * all-multicast is currently enabled. |
| 3798 | */ |
| 3799 | if (vsi->current_netdev_flags & IFF_ALLMULTI) { |
| 3800 | ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, |
| 3801 | ICE_MCAST_VLAN_PROMISC_BITS, |
| 3802 | vid); |
| 3803 | if (ret) |
| 3804 | goto finish; |
| 3805 | } |
| 3806 | |
Brett Creeley | c31af68 | 2021-12-02 08:38:46 -0800 | [diff] [blame] | 3807 | vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); |
Brett Creeley | 4f74dcc | 2018-08-09 06:29:56 -0700 | [diff] [blame] | 3808 | |
Brett Creeley | 42f3efe | 2020-01-22 07:21:24 -0800 | [diff] [blame] | 3809 | /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged |
| 3810 | * packets aren't pruned by the device's internal switch on Rx |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3811 | */ |
Brett Creeley | 2bfefa2 | 2021-12-02 08:38:44 -0800 | [diff] [blame] | 3812 | vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0); |
Brett Creeley | c31af68 | 2021-12-02 08:38:46 -0800 | [diff] [blame] | 3813 | ret = vlan_ops->add_vlan(vsi, &vlan); |
Ivan Vecera | 1273f89 | 2022-03-31 09:20:08 -0700 | [diff] [blame] | 3814 | if (ret) |
| 3815 | goto finish; |
| 3816 | |
| 3817 | /* If all-multicast is currently enabled and this VLAN ID is only one |
| 3818 | * besides VLAN-0 we have to update look-up type of multicast promisc |
| 3819 | * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN. |
| 3820 | */ |
| 3821 | if ((vsi->current_netdev_flags & IFF_ALLMULTI) && |
| 3822 | ice_vsi_num_non_zero_vlans(vsi) == 1) { |
| 3823 | ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, |
| 3824 | ICE_MCAST_PROMISC_BITS, 0); |
| 3825 | ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, |
| 3826 | ICE_MCAST_VLAN_PROMISC_BITS, 0); |
| 3827 | } |
| 3828 | |
| 3829 | finish: |
| 3830 | clear_bit(ICE_CFG_BUSY, vsi->state); |
Akeem G Abodunrin | 5eda8af | 2019-02-26 16:35:14 -0800 | [diff] [blame] | 3831 | |
| 3832 | return ret; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3833 | } |
| 3834 | |
| 3835 | /** |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 3836 | * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3837 | * @netdev: network interface to be adjusted |
Brett Creeley | 2bfefa2 | 2021-12-02 08:38:44 -0800 | [diff] [blame] | 3838 | * @proto: VLAN TPID |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 3839 | * @vid: VLAN ID to be removed |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3840 | * |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 3841 | * net_device_ops implementation for removing VLAN IDs |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3842 | */ |
Bruce Allan | c8b7abd | 2019-02-26 16:35:11 -0800 | [diff] [blame] | 3843 | static int |
Brett Creeley | 2bfefa2 | 2021-12-02 08:38:44 -0800 | [diff] [blame] | 3844 | ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3845 | { |
| 3846 | struct ice_netdev_priv *np = netdev_priv(netdev); |
Brett Creeley | c31af68 | 2021-12-02 08:38:46 -0800 | [diff] [blame] | 3847 | struct ice_vsi_vlan_ops *vlan_ops; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3848 | struct ice_vsi *vsi = np->vsi; |
Brett Creeley | fb05ba1 | 2021-12-02 08:38:42 -0800 | [diff] [blame] | 3849 | struct ice_vlan vlan; |
Akeem G Abodunrin | 5eda8af | 2019-02-26 16:35:14 -0800 | [diff] [blame] | 3850 | int ret; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3851 | |
Brett Creeley | 42f3efe | 2020-01-22 07:21:24 -0800 | [diff] [blame] | 3852 | /* don't allow removal of VLAN 0 */ |
| 3853 | if (!vid) |
| 3854 | return 0; |
| 3855 | |
Ivan Vecera | 1273f89 | 2022-03-31 09:20:08 -0700 | [diff] [blame] | 3856 | while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) |
| 3857 | usleep_range(1000, 2000); |
| 3858 | |
Grzegorz Siwik | abddafd | 2022-08-12 15:25:49 +0200 | [diff] [blame] | 3859 | ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, |
| 3860 | ICE_MCAST_VLAN_PROMISC_BITS, vid); |
| 3861 | if (ret) { |
| 3862 | netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n", |
| 3863 | vsi->vsi_num); |
| 3864 | vsi->current_netdev_flags |= IFF_ALLMULTI; |
| 3865 | } |
| 3866 | |
Brett Creeley | c31af68 | 2021-12-02 08:38:46 -0800 | [diff] [blame] | 3867 | vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); |
| 3868 | |
Brett Creeley | bc42afa | 2021-12-02 08:38:41 -0800 | [diff] [blame] | 3869 | /* Make sure VLAN delete is successful before updating VLAN |
Brett Creeley | 4f74dcc | 2018-08-09 06:29:56 -0700 | [diff] [blame] | 3870 | * information |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3871 | */ |
Brett Creeley | 2bfefa2 | 2021-12-02 08:38:44 -0800 | [diff] [blame] | 3872 | vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0); |
Brett Creeley | c31af68 | 2021-12-02 08:38:46 -0800 | [diff] [blame] | 3873 | ret = vlan_ops->del_vlan(vsi, &vlan); |
Akeem G Abodunrin | 5eda8af | 2019-02-26 16:35:14 -0800 | [diff] [blame] | 3874 | if (ret) |
Ivan Vecera | 1273f89 | 2022-03-31 09:20:08 -0700 | [diff] [blame] | 3875 | goto finish; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3876 | |
Ivan Vecera | 1273f89 | 2022-03-31 09:20:08 -0700 | [diff] [blame] | 3877 | /* Remove multicast promisc rule for the removed VLAN ID if |
| 3878 | * all-multicast is enabled. |
| 3879 | */ |
| 3880 | if (vsi->current_netdev_flags & IFF_ALLMULTI) |
| 3881 | ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, |
| 3882 | ICE_MCAST_VLAN_PROMISC_BITS, vid); |
| 3883 | |
| 3884 | if (!ice_vsi_has_non_zero_vlans(vsi)) { |
| 3885 | /* Update look-up type of multicast promisc rule for VLAN 0 |
| 3886 | * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when |
| 3887 | * all-multicast is enabled and VLAN 0 is the only VLAN rule. |
| 3888 | */ |
| 3889 | if (vsi->current_netdev_flags & IFF_ALLMULTI) { |
| 3890 | ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, |
| 3891 | ICE_MCAST_VLAN_PROMISC_BITS, |
| 3892 | 0); |
| 3893 | ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, |
| 3894 | ICE_MCAST_PROMISC_BITS, 0); |
| 3895 | } |
| 3896 | } |
| 3897 | |
| 3898 | finish: |
| 3899 | clear_bit(ICE_CFG_BUSY, vsi->state); |
| 3900 | |
| 3901 | return ret; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3902 | } |
| 3903 | |
| 3904 | /** |
Michal Swiatkowski | 195bb48 | 2021-10-12 11:31:03 -0700 | [diff] [blame] | 3905 | * ice_rep_indr_tc_block_unbind |
| 3906 | * @cb_priv: indirection block private data |
| 3907 | */ |
| 3908 | static void ice_rep_indr_tc_block_unbind(void *cb_priv) |
| 3909 | { |
| 3910 | struct ice_indr_block_priv *indr_priv = cb_priv; |
| 3911 | |
| 3912 | list_del(&indr_priv->list); |
| 3913 | kfree(indr_priv); |
| 3914 | } |
| 3915 | |
| 3916 | /** |
| 3917 | * ice_tc_indir_block_unregister - Unregister TC indirect block notifications |
| 3918 | * @vsi: VSI struct which has the netdev |
| 3919 | */ |
| 3920 | static void ice_tc_indir_block_unregister(struct ice_vsi *vsi) |
| 3921 | { |
| 3922 | struct ice_netdev_priv *np = netdev_priv(vsi->netdev); |
| 3923 | |
| 3924 | flow_indr_dev_unregister(ice_indr_setup_tc_cb, np, |
| 3925 | ice_rep_indr_tc_block_unbind); |
| 3926 | } |
| 3927 | |
| 3928 | /** |
Michal Swiatkowski | 195bb48 | 2021-10-12 11:31:03 -0700 | [diff] [blame] | 3929 | * ice_tc_indir_block_register - Register TC indirect block notifications |
| 3930 | * @vsi: VSI struct which has the netdev |
| 3931 | * |
| 3932 | * Returns 0 on success, negative value on failure |
| 3933 | */ |
| 3934 | static int ice_tc_indir_block_register(struct ice_vsi *vsi) |
| 3935 | { |
| 3936 | struct ice_netdev_priv *np; |
| 3937 | |
| 3938 | if (!vsi || !vsi->netdev) |
| 3939 | return -EINVAL; |
| 3940 | |
| 3941 | np = netdev_priv(vsi->netdev); |
| 3942 | |
| 3943 | INIT_LIST_HEAD(&np->tc_indr_block_priv_list); |
| 3944 | return flow_indr_dev_register(ice_indr_setup_tc_cb, np); |
| 3945 | } |
| 3946 | |
| 3947 | /** |
Anirudh Venkataramanan | 8c24370 | 2019-09-03 01:31:06 -0700 | [diff] [blame] | 3948 | * ice_get_avail_q_count - Get count of queues in use |
| 3949 | * @pf_qmap: bitmap to get queue use count from |
| 3950 | * @lock: pointer to a mutex that protects access to pf_qmap |
| 3951 | * @size: size of the bitmap |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3952 | */ |
Anirudh Venkataramanan | 8c24370 | 2019-09-03 01:31:06 -0700 | [diff] [blame] | 3953 | static u16 |
| 3954 | ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size) |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3955 | { |
Karol Kolacinski | 88865fc | 2020-05-07 17:41:05 -0700 | [diff] [blame] | 3956 | unsigned long bit; |
| 3957 | u16 count = 0; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3958 | |
Anirudh Venkataramanan | 8c24370 | 2019-09-03 01:31:06 -0700 | [diff] [blame] | 3959 | mutex_lock(lock); |
| 3960 | for_each_clear_bit(bit, pf_qmap, size) |
| 3961 | count++; |
| 3962 | mutex_unlock(lock); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3963 | |
Anirudh Venkataramanan | 8c24370 | 2019-09-03 01:31:06 -0700 | [diff] [blame] | 3964 | return count; |
| 3965 | } |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 3966 | |
Anirudh Venkataramanan | 8c24370 | 2019-09-03 01:31:06 -0700 | [diff] [blame] | 3967 | /** |
| 3968 | * ice_get_avail_txq_count - Get count of Tx queues in use |
| 3969 | * @pf: pointer to an ice_pf instance |
| 3970 | */ |
| 3971 | u16 ice_get_avail_txq_count(struct ice_pf *pf) |
| 3972 | { |
| 3973 | return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, |
| 3974 | pf->max_pf_txqs); |
| 3975 | } |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3976 | |
Anirudh Venkataramanan | 8c24370 | 2019-09-03 01:31:06 -0700 | [diff] [blame] | 3977 | /** |
| 3978 | * ice_get_avail_rxq_count - Get count of Rx queues in use |
| 3979 | * @pf: pointer to an ice_pf instance |
| 3980 | */ |
| 3981 | u16 ice_get_avail_rxq_count(struct ice_pf *pf) |
| 3982 | { |
| 3983 | return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, |
| 3984 | pf->max_pf_rxqs); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3985 | } |
| 3986 | |
| 3987 | /** |
| 3988 | * ice_deinit_pf - Unrolls initialziations done by ice_init_pf |
| 3989 | * @pf: board private structure to initialize |
| 3990 | */ |
| 3991 | static void ice_deinit_pf(struct ice_pf *pf) |
| 3992 | { |
Akeem G Abodunrin | 8d81fa5 | 2018-08-09 06:29:57 -0700 | [diff] [blame] | 3993 | ice_service_task_stop(pf); |
Dave Ertman | bb52f42 | 2023-06-20 15:18:46 -0700 | [diff] [blame] | 3994 | mutex_destroy(&pf->lag_mutex); |
Ivan Vecera | 486b9ee | 2022-04-23 12:20:21 +0200 | [diff] [blame] | 3995 | mutex_destroy(&pf->adev_mutex); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3996 | mutex_destroy(&pf->sw_mutex); |
Dave Ertman | b94b013 | 2019-11-06 02:05:29 -0800 | [diff] [blame] | 3997 | mutex_destroy(&pf->tc_mutex); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 3998 | mutex_destroy(&pf->avail_q_mutex); |
Jacob Keller | 3d5985a | 2022-02-16 13:37:38 -0800 | [diff] [blame] | 3999 | mutex_destroy(&pf->vfs.table_lock); |
Anirudh Venkataramanan | 78b5713 | 2019-08-02 01:25:21 -0700 | [diff] [blame] | 4000 | |
| 4001 | if (pf->avail_txqs) { |
| 4002 | bitmap_free(pf->avail_txqs); |
| 4003 | pf->avail_txqs = NULL; |
| 4004 | } |
| 4005 | |
| 4006 | if (pf->avail_rxqs) { |
| 4007 | bitmap_free(pf->avail_rxqs); |
| 4008 | pf->avail_rxqs = NULL; |
| 4009 | } |
Jacob Keller | 06c16d8 | 2021-06-09 09:39:50 -0700 | [diff] [blame] | 4010 | |
| 4011 | if (pf->ptp.clock) |
| 4012 | ptp_clock_unregister(pf->ptp.clock); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 4013 | } |
| 4014 | |
| 4015 | /** |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4016 | * ice_set_pf_caps - set PFs capability flags |
| 4017 | * @pf: pointer to the PF instance |
| 4018 | */ |
| 4019 | static void ice_set_pf_caps(struct ice_pf *pf) |
| 4020 | { |
| 4021 | struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; |
| 4022 | |
Dave Ertman | d25a0fc | 2021-05-20 09:37:49 -0500 | [diff] [blame] | 4023 | clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); |
Dave Ertman | 88f62ae | 2022-02-11 10:26:03 -0800 | [diff] [blame] | 4024 | if (func_caps->common_cap.rdma) |
Dave Ertman | d25a0fc | 2021-05-20 09:37:49 -0500 | [diff] [blame] | 4025 | set_bit(ICE_FLAG_RDMA_ENA, pf->flags); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4026 | clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); |
| 4027 | if (func_caps->common_cap.dcb) |
| 4028 | set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4029 | clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); |
| 4030 | if (func_caps->common_cap.sr_iov_1_1) { |
| 4031 | set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); |
Jacob Keller | 000773c | 2022-02-16 13:37:36 -0800 | [diff] [blame] | 4032 | pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs, |
Jacob Keller | dc36796 | 2022-02-22 16:26:53 -0800 | [diff] [blame] | 4033 | ICE_MAX_SRIOV_VFS); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4034 | } |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4035 | clear_bit(ICE_FLAG_RSS_ENA, pf->flags); |
| 4036 | if (func_caps->common_cap.rss_table_size) |
| 4037 | set_bit(ICE_FLAG_RSS_ENA, pf->flags); |
| 4038 | |
Henry Tieman | 148beb6 | 2020-05-11 18:01:40 -0700 | [diff] [blame] | 4039 | clear_bit(ICE_FLAG_FD_ENA, pf->flags); |
| 4040 | if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) { |
| 4041 | u16 unused; |
| 4042 | |
| 4043 | /* ctrl_vsi_idx will be set to a valid value when flow director |
| 4044 | * is setup by ice_init_fdir |
| 4045 | */ |
| 4046 | pf->ctrl_vsi_idx = ICE_NO_VSI; |
| 4047 | set_bit(ICE_FLAG_FD_ENA, pf->flags); |
| 4048 | /* force guaranteed filter pool for PF */ |
| 4049 | ice_alloc_fd_guar_item(&pf->hw, &unused, |
| 4050 | func_caps->fd_fltr_guar); |
| 4051 | /* force shared filter pool for PF */ |
| 4052 | ice_alloc_fd_shrd_item(&pf->hw, &unused, |
| 4053 | func_caps->fd_fltr_best_effort); |
| 4054 | } |
| 4055 | |
Jacob Keller | 06c16d8 | 2021-06-09 09:39:50 -0700 | [diff] [blame] | 4056 | clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); |
Paul Greenwalt | ba1124f | 2023-10-25 14:41:52 -0700 | [diff] [blame] | 4057 | if (func_caps->common_cap.ieee_1588 && |
| 4058 | !(pf->hw.mac_type == ICE_MAC_E830)) |
Jacob Keller | 06c16d8 | 2021-06-09 09:39:50 -0700 | [diff] [blame] | 4059 | set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); |
| 4060 | |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4061 | pf->max_pf_txqs = func_caps->common_cap.num_txq; |
| 4062 | pf->max_pf_rxqs = func_caps->common_cap.num_rxq; |
| 4063 | } |
| 4064 | |
| 4065 | /** |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 4066 | * ice_init_pf - Initialize general software structures (struct ice_pf) |
| 4067 | * @pf: board private structure to initialize |
| 4068 | */ |
Anirudh Venkataramanan | 78b5713 | 2019-08-02 01:25:21 -0700 | [diff] [blame] | 4069 | static int ice_init_pf(struct ice_pf *pf) |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 4070 | { |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4071 | ice_set_pf_caps(pf); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 4072 | |
| 4073 | mutex_init(&pf->sw_mutex); |
Dave Ertman | b94b013 | 2019-11-06 02:05:29 -0800 | [diff] [blame] | 4074 | mutex_init(&pf->tc_mutex); |
Ivan Vecera | 486b9ee | 2022-04-23 12:20:21 +0200 | [diff] [blame] | 4075 | mutex_init(&pf->adev_mutex); |
Dave Ertman | bb52f42 | 2023-06-20 15:18:46 -0700 | [diff] [blame] | 4076 | mutex_init(&pf->lag_mutex); |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 4077 | |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 4078 | INIT_HLIST_HEAD(&pf->aq_wait_list); |
| 4079 | spin_lock_init(&pf->aq_wait_lock); |
| 4080 | init_waitqueue_head(&pf->aq_wait_queue); |
| 4081 | |
Jacob Keller | 1c08052 | 2021-05-06 08:39:59 -0700 | [diff] [blame] | 4082 | init_waitqueue_head(&pf->reset_wait_queue); |
| 4083 | |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 4084 | /* setup service timer and periodic service task */ |
| 4085 | timer_setup(&pf->serv_tmr, ice_service_timer, 0); |
| 4086 | pf->serv_tmr_period = HZ; |
| 4087 | INIT_WORK(&pf->serv_task, ice_service_task); |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 4088 | clear_bit(ICE_SERVICE_SCHED, pf->state); |
Anirudh Venkataramanan | 78b5713 | 2019-08-02 01:25:21 -0700 | [diff] [blame] | 4089 | |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4090 | mutex_init(&pf->avail_q_mutex); |
Anirudh Venkataramanan | 78b5713 | 2019-08-02 01:25:21 -0700 | [diff] [blame] | 4091 | pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); |
| 4092 | if (!pf->avail_txqs) |
| 4093 | return -ENOMEM; |
| 4094 | |
| 4095 | pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); |
| 4096 | if (!pf->avail_rxqs) { |
Michal Swiatkowski | 59ac325 | 2022-08-17 10:53:20 +0200 | [diff] [blame] | 4097 | bitmap_free(pf->avail_txqs); |
Anirudh Venkataramanan | 78b5713 | 2019-08-02 01:25:21 -0700 | [diff] [blame] | 4098 | pf->avail_txqs = NULL; |
| 4099 | return -ENOMEM; |
| 4100 | } |
| 4101 | |
Jacob Keller | 3d5985a | 2022-02-16 13:37:38 -0800 | [diff] [blame] | 4102 | mutex_init(&pf->vfs.table_lock); |
| 4103 | hash_init(pf->vfs.table); |
Jacob Keller | dde7db6 | 2023-02-22 09:09:13 -0800 | [diff] [blame] | 4104 | ice_mbx_init_snapshot(&pf->hw); |
Jacob Keller | 3d5985a | 2022-02-16 13:37:38 -0800 | [diff] [blame] | 4105 | |
Anirudh Venkataramanan | 78b5713 | 2019-08-02 01:25:21 -0700 | [diff] [blame] | 4106 | return 0; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 4107 | } |
| 4108 | |
| 4109 | /** |
Anirudh Venkataramanan | 3176551 | 2021-02-26 13:19:30 -0800 | [diff] [blame] | 4110 | * ice_is_wol_supported - check if WoL is supported |
| 4111 | * @hw: pointer to hardware info |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 4112 | * |
| 4113 | * Check if WoL is supported based on the HW configuration. |
| 4114 | * Returns true if NVM supports and enables WoL for this port, false otherwise |
| 4115 | */ |
Anirudh Venkataramanan | 3176551 | 2021-02-26 13:19:30 -0800 | [diff] [blame] | 4116 | bool ice_is_wol_supported(struct ice_hw *hw) |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 4117 | { |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 4118 | u16 wol_ctrl; |
| 4119 | |
| 4120 | /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control |
| 4121 | * word) indicates WoL is not supported on the corresponding PF ID. |
| 4122 | */ |
| 4123 | if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl)) |
| 4124 | return false; |
| 4125 | |
Anirudh Venkataramanan | 3176551 | 2021-02-26 13:19:30 -0800 | [diff] [blame] | 4126 | return !(BIT(hw->port_info->lport) & wol_ctrl); |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 4127 | } |
| 4128 | |
| 4129 | /** |
Henry Tieman | 87324e7 | 2019-11-08 06:23:29 -0800 | [diff] [blame] | 4130 | * ice_vsi_recfg_qs - Change the number of queues on a VSI |
| 4131 | * @vsi: VSI being changed |
| 4132 | * @new_rx: new number of Rx queues |
| 4133 | * @new_tx: new number of Tx queues |
Dave Ertman | a6a0974 | 2023-01-24 09:19:43 -0800 | [diff] [blame] | 4134 | * @locked: is adev device_lock held |
Henry Tieman | 87324e7 | 2019-11-08 06:23:29 -0800 | [diff] [blame] | 4135 | * |
| 4136 | * Only change the number of queues if new_tx, or new_rx is non-0. |
| 4137 | * |
| 4138 | * Returns 0 on success. |
| 4139 | */ |
Dave Ertman | a6a0974 | 2023-01-24 09:19:43 -0800 | [diff] [blame] | 4140 | int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) |
Henry Tieman | 87324e7 | 2019-11-08 06:23:29 -0800 | [diff] [blame] | 4141 | { |
| 4142 | struct ice_pf *pf = vsi->back; |
Jan Sokolowski | f4b91c1 | 2024-06-21 10:54:19 -0700 | [diff] [blame] | 4143 | int i, err = 0, timeout = 50; |
Henry Tieman | 87324e7 | 2019-11-08 06:23:29 -0800 | [diff] [blame] | 4144 | |
| 4145 | if (!new_rx && !new_tx) |
| 4146 | return -EINVAL; |
| 4147 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 4148 | while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { |
Henry Tieman | 87324e7 | 2019-11-08 06:23:29 -0800 | [diff] [blame] | 4149 | timeout--; |
| 4150 | if (!timeout) |
| 4151 | return -EBUSY; |
| 4152 | usleep_range(1000, 2000); |
| 4153 | } |
| 4154 | |
| 4155 | if (new_tx) |
Karol Kolacinski | 88865fc | 2020-05-07 17:41:05 -0700 | [diff] [blame] | 4156 | vsi->req_txq = (u16)new_tx; |
Henry Tieman | 87324e7 | 2019-11-08 06:23:29 -0800 | [diff] [blame] | 4157 | if (new_rx) |
Karol Kolacinski | 88865fc | 2020-05-07 17:41:05 -0700 | [diff] [blame] | 4158 | vsi->req_rxq = (u16)new_rx; |
Henry Tieman | 87324e7 | 2019-11-08 06:23:29 -0800 | [diff] [blame] | 4159 | |
| 4160 | /* set for the next time the netdev is started */ |
| 4161 | if (!netif_running(vsi->netdev)) { |
Eric Joyner | d47bf9a | 2024-06-17 14:46:25 +0200 | [diff] [blame] | 4162 | err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); |
| 4163 | if (err) |
| 4164 | goto rebuild_err; |
Henry Tieman | 87324e7 | 2019-11-08 06:23:29 -0800 | [diff] [blame] | 4165 | dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); |
| 4166 | goto done; |
| 4167 | } |
| 4168 | |
| 4169 | ice_vsi_close(vsi); |
Eric Joyner | d47bf9a | 2024-06-17 14:46:25 +0200 | [diff] [blame] | 4170 | err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); |
| 4171 | if (err) |
| 4172 | goto rebuild_err; |
Jan Sokolowski | f4b91c1 | 2024-06-21 10:54:19 -0700 | [diff] [blame] | 4173 | |
| 4174 | ice_for_each_traffic_class(i) { |
| 4175 | if (vsi->tc_cfg.ena_tc & BIT(i)) |
| 4176 | netdev_set_tc_queue(vsi->netdev, |
| 4177 | vsi->tc_cfg.tc_info[i].netdev_tc, |
| 4178 | vsi->tc_cfg.tc_info[i].qcount_tx, |
| 4179 | vsi->tc_cfg.tc_info[i].qoffset); |
| 4180 | } |
Dave Ertman | a6a0974 | 2023-01-24 09:19:43 -0800 | [diff] [blame] | 4181 | ice_pf_dcb_recfg(pf, locked); |
Henry Tieman | 87324e7 | 2019-11-08 06:23:29 -0800 | [diff] [blame] | 4182 | ice_vsi_open(vsi); |
Eric Joyner | d47bf9a | 2024-06-17 14:46:25 +0200 | [diff] [blame] | 4183 | goto done; |
| 4184 | |
| 4185 | rebuild_err: |
| 4186 | dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n", |
| 4187 | err); |
Henry Tieman | 87324e7 | 2019-11-08 06:23:29 -0800 | [diff] [blame] | 4188 | done: |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 4189 | clear_bit(ICE_CFG_BUSY, pf->state); |
Henry Tieman | 87324e7 | 2019-11-08 06:23:29 -0800 | [diff] [blame] | 4190 | return err; |
| 4191 | } |
| 4192 | |
| 4193 | /** |
Brett Creeley | cd1f56f | 2020-07-13 13:53:14 -0700 | [diff] [blame] | 4194 | * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode |
| 4195 | * @pf: PF to configure |
| 4196 | * |
| 4197 | * No VLAN offloads/filtering are advertised in safe mode so make sure the PF |
| 4198 | * VSI can still Tx/Rx VLAN tagged packets. |
| 4199 | */ |
| 4200 | static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) |
| 4201 | { |
| 4202 | struct ice_vsi *vsi = ice_get_main_vsi(pf); |
| 4203 | struct ice_vsi_ctx *ctxt; |
Brett Creeley | cd1f56f | 2020-07-13 13:53:14 -0700 | [diff] [blame] | 4204 | struct ice_hw *hw; |
Tony Nguyen | 5518ac2 | 2021-10-07 15:59:03 -0700 | [diff] [blame] | 4205 | int status; |
Brett Creeley | cd1f56f | 2020-07-13 13:53:14 -0700 | [diff] [blame] | 4206 | |
| 4207 | if (!vsi) |
| 4208 | return; |
| 4209 | |
| 4210 | ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); |
| 4211 | if (!ctxt) |
| 4212 | return; |
| 4213 | |
| 4214 | hw = &pf->hw; |
| 4215 | ctxt->info = vsi->info; |
| 4216 | |
| 4217 | ctxt->info.valid_sections = |
| 4218 | cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | |
| 4219 | ICE_AQ_VSI_PROP_SECURITY_VALID | |
| 4220 | ICE_AQ_VSI_PROP_SW_VALID); |
| 4221 | |
| 4222 | /* disable VLAN anti-spoof */ |
| 4223 | ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << |
| 4224 | ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); |
| 4225 | |
| 4226 | /* disable VLAN pruning and keep all other settings */ |
| 4227 | ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; |
| 4228 | |
| 4229 | /* allow all VLANs on Tx and don't strip on Rx */ |
Brett Creeley | 7bd527a | 2021-12-02 08:38:45 -0800 | [diff] [blame] | 4230 | ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL | |
| 4231 | ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING; |
Brett Creeley | cd1f56f | 2020-07-13 13:53:14 -0700 | [diff] [blame] | 4232 | |
| 4233 | status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); |
| 4234 | if (status) { |
Tony Nguyen | 5f87ec4 | 2021-10-07 15:56:02 -0700 | [diff] [blame] | 4235 | dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n", |
Tony Nguyen | 5518ac2 | 2021-10-07 15:59:03 -0700 | [diff] [blame] | 4236 | status, ice_aq_str(hw->adminq.sq_last_status)); |
Brett Creeley | cd1f56f | 2020-07-13 13:53:14 -0700 | [diff] [blame] | 4237 | } else { |
| 4238 | vsi->info.sec_flags = ctxt->info.sec_flags; |
| 4239 | vsi->info.sw_flags2 = ctxt->info.sw_flags2; |
Brett Creeley | 7bd527a | 2021-12-02 08:38:45 -0800 | [diff] [blame] | 4240 | vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags; |
Brett Creeley | cd1f56f | 2020-07-13 13:53:14 -0700 | [diff] [blame] | 4241 | } |
| 4242 | |
| 4243 | kfree(ctxt); |
| 4244 | } |
| 4245 | |
| 4246 | /** |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4247 | * ice_log_pkg_init - log result of DDP package load |
| 4248 | * @hw: pointer to hardware info |
Wojciech Drewek | 247dd97 | 2021-10-07 15:54:37 -0700 | [diff] [blame] | 4249 | * @state: state of package load |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4250 | */ |
Wojciech Drewek | 247dd97 | 2021-10-07 15:54:37 -0700 | [diff] [blame] | 4251 | static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state) |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4252 | { |
Wojciech Drewek | 247dd97 | 2021-10-07 15:54:37 -0700 | [diff] [blame] | 4253 | struct ice_pf *pf = hw->back; |
| 4254 | struct device *dev; |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4255 | |
Wojciech Drewek | 247dd97 | 2021-10-07 15:54:37 -0700 | [diff] [blame] | 4256 | dev = ice_pf_to_dev(pf); |
| 4257 | |
| 4258 | switch (state) { |
| 4259 | case ICE_DDP_PKG_SUCCESS: |
| 4260 | dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", |
| 4261 | hw->active_pkg_name, |
| 4262 | hw->active_pkg_ver.major, |
| 4263 | hw->active_pkg_ver.minor, |
| 4264 | hw->active_pkg_ver.update, |
| 4265 | hw->active_pkg_ver.draft); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4266 | break; |
Wojciech Drewek | 247dd97 | 2021-10-07 15:54:37 -0700 | [diff] [blame] | 4267 | case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: |
| 4268 | dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", |
| 4269 | hw->active_pkg_name, |
| 4270 | hw->active_pkg_ver.major, |
| 4271 | hw->active_pkg_ver.minor, |
| 4272 | hw->active_pkg_ver.update, |
| 4273 | hw->active_pkg_ver.draft); |
| 4274 | break; |
| 4275 | case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED: |
| 4276 | dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", |
| 4277 | hw->active_pkg_name, |
| 4278 | hw->active_pkg_ver.major, |
| 4279 | hw->active_pkg_ver.minor, |
| 4280 | ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); |
| 4281 | break; |
| 4282 | case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: |
| 4283 | dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", |
| 4284 | hw->active_pkg_name, |
| 4285 | hw->active_pkg_ver.major, |
| 4286 | hw->active_pkg_ver.minor, |
| 4287 | hw->active_pkg_ver.update, |
| 4288 | hw->active_pkg_ver.draft, |
| 4289 | hw->pkg_name, |
| 4290 | hw->pkg_ver.major, |
| 4291 | hw->pkg_ver.minor, |
| 4292 | hw->pkg_ver.update, |
| 4293 | hw->pkg_ver.draft); |
| 4294 | break; |
| 4295 | case ICE_DDP_PKG_FW_MISMATCH: |
Victor Raj | b827291 | 2020-05-15 17:36:34 -0700 | [diff] [blame] | 4296 | dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); |
| 4297 | break; |
Wojciech Drewek | 247dd97 | 2021-10-07 15:54:37 -0700 | [diff] [blame] | 4298 | case ICE_DDP_PKG_INVALID_FILE: |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 4299 | dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4300 | break; |
Wojciech Drewek | 247dd97 | 2021-10-07 15:54:37 -0700 | [diff] [blame] | 4301 | case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH: |
| 4302 | dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4303 | break; |
Wojciech Drewek | 247dd97 | 2021-10-07 15:54:37 -0700 | [diff] [blame] | 4304 | case ICE_DDP_PKG_FILE_VERSION_TOO_LOW: |
| 4305 | dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", |
| 4306 | ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); |
| 4307 | break; |
| 4308 | case ICE_DDP_PKG_FILE_SIGNATURE_INVALID: |
| 4309 | dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); |
| 4310 | break; |
| 4311 | case ICE_DDP_PKG_FILE_REVISION_TOO_LOW: |
| 4312 | dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); |
| 4313 | break; |
| 4314 | case ICE_DDP_PKG_LOAD_ERROR: |
| 4315 | dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); |
Jesse Brandeburg | 0092db5 | 2021-12-21 15:05:38 -0800 | [diff] [blame] | 4316 | /* poll for reset to complete */ |
| 4317 | if (ice_check_reset(hw)) |
| 4318 | dev_err(dev, "Error resetting device. Please reload the driver\n"); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4319 | break; |
Wojciech Drewek | 247dd97 | 2021-10-07 15:54:37 -0700 | [diff] [blame] | 4320 | case ICE_DDP_PKG_ERR: |
| 4321 | default: |
| 4322 | dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n"); |
Jesse Brandeburg | 0092db5 | 2021-12-21 15:05:38 -0800 | [diff] [blame] | 4323 | break; |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4324 | } |
| 4325 | } |
| 4326 | |
| 4327 | /** |
| 4328 | * ice_load_pkg - load/reload the DDP Package file |
| 4329 | * @firmware: firmware structure when firmware requested or NULL for reload |
| 4330 | * @pf: pointer to the PF instance |
| 4331 | * |
| 4332 | * Called on probe and post CORER/GLOBR rebuild to load DDP Package and |
| 4333 | * initialize HW tables. |
| 4334 | */ |
| 4335 | static void |
| 4336 | ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) |
| 4337 | { |
Wojciech Drewek | 247dd97 | 2021-10-07 15:54:37 -0700 | [diff] [blame] | 4338 | enum ice_ddp_state state = ICE_DDP_PKG_ERR; |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 4339 | struct device *dev = ice_pf_to_dev(pf); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4340 | struct ice_hw *hw = &pf->hw; |
| 4341 | |
| 4342 | /* Load DDP Package */ |
| 4343 | if (firmware && !hw->pkg_copy) { |
Wojciech Drewek | 247dd97 | 2021-10-07 15:54:37 -0700 | [diff] [blame] | 4344 | state = ice_copy_and_init_pkg(hw, firmware->data, |
| 4345 | firmware->size); |
| 4346 | ice_log_pkg_init(hw, state); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4347 | } else if (!firmware && hw->pkg_copy) { |
| 4348 | /* Reload package during rebuild after CORER/GLOBR reset */ |
Wojciech Drewek | 247dd97 | 2021-10-07 15:54:37 -0700 | [diff] [blame] | 4349 | state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); |
| 4350 | ice_log_pkg_init(hw, state); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4351 | } else { |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 4352 | dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n"); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4353 | } |
| 4354 | |
Wojciech Drewek | 247dd97 | 2021-10-07 15:54:37 -0700 | [diff] [blame] | 4355 | if (!ice_is_init_pkg_successful(state)) { |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4356 | /* Safe Mode */ |
| 4357 | clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); |
| 4358 | return; |
| 4359 | } |
| 4360 | |
| 4361 | /* Successful download package is the precondition for advanced |
| 4362 | * features, hence setting the ICE_FLAG_ADV_FEATURES flag |
| 4363 | */ |
| 4364 | set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); |
| 4365 | } |
| 4366 | |
| 4367 | /** |
Brett Creeley | c585ea4 | 2018-10-26 10:40:58 -0700 | [diff] [blame] | 4368 | * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines |
| 4369 | * @pf: pointer to the PF structure |
| 4370 | * |
| 4371 | * There is no error returned here because the driver should be able to handle |
| 4372 | * 128 Byte cache lines, so we only print a warning in case issues are seen, |
| 4373 | * specifically with Tx. |
| 4374 | */ |
| 4375 | static void ice_verify_cacheline_size(struct ice_pf *pf) |
| 4376 | { |
| 4377 | if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 4378 | dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", |
Brett Creeley | c585ea4 | 2018-10-26 10:40:58 -0700 | [diff] [blame] | 4379 | ICE_CACHE_LINE_BYTES); |
| 4380 | } |
| 4381 | |
| 4382 | /** |
Paul M Stillwell Jr | e3710a0 | 2019-09-09 06:47:42 -0700 | [diff] [blame] | 4383 | * ice_send_version - update firmware with driver version |
| 4384 | * @pf: PF struct |
| 4385 | * |
Tony Nguyen | d54699e | 2021-10-07 15:58:01 -0700 | [diff] [blame] | 4386 | * Returns 0 on success, else error code |
Paul M Stillwell Jr | e3710a0 | 2019-09-09 06:47:42 -0700 | [diff] [blame] | 4387 | */ |
Tony Nguyen | 5e24d59 | 2021-10-07 15:56:57 -0700 | [diff] [blame] | 4388 | static int ice_send_version(struct ice_pf *pf) |
Paul M Stillwell Jr | e3710a0 | 2019-09-09 06:47:42 -0700 | [diff] [blame] | 4389 | { |
| 4390 | struct ice_driver_ver dv; |
| 4391 | |
Jeff Kirsher | 34a2a3b8 | 2020-05-29 00:18:33 -0700 | [diff] [blame] | 4392 | dv.major_ver = 0xff; |
| 4393 | dv.minor_ver = 0xff; |
| 4394 | dv.build_ver = 0xff; |
Paul M Stillwell Jr | e3710a0 | 2019-09-09 06:47:42 -0700 | [diff] [blame] | 4395 | dv.subbuild_ver = 0; |
Jeff Kirsher | 34a2a3b8 | 2020-05-29 00:18:33 -0700 | [diff] [blame] | 4396 | strscpy((char *)dv.driver_string, UTS_RELEASE, |
Paul M Stillwell Jr | e3710a0 | 2019-09-09 06:47:42 -0700 | [diff] [blame] | 4397 | sizeof(dv.driver_string)); |
| 4398 | return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); |
| 4399 | } |
| 4400 | |
| 4401 | /** |
Henry Tieman | 148beb6 | 2020-05-11 18:01:40 -0700 | [diff] [blame] | 4402 | * ice_init_fdir - Initialize flow director VSI and configuration |
| 4403 | * @pf: pointer to the PF instance |
| 4404 | * |
| 4405 | * returns 0 on success, negative on error |
| 4406 | */ |
| 4407 | static int ice_init_fdir(struct ice_pf *pf) |
| 4408 | { |
| 4409 | struct device *dev = ice_pf_to_dev(pf); |
| 4410 | struct ice_vsi *ctrl_vsi; |
| 4411 | int err; |
| 4412 | |
| 4413 | /* Side Band Flow Director needs to have a control VSI. |
| 4414 | * Allocate it and store it in the PF. |
| 4415 | */ |
| 4416 | ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); |
| 4417 | if (!ctrl_vsi) { |
| 4418 | dev_dbg(dev, "could not create control VSI\n"); |
| 4419 | return -ENOMEM; |
| 4420 | } |
| 4421 | |
| 4422 | err = ice_vsi_open_ctrl(ctrl_vsi); |
| 4423 | if (err) { |
| 4424 | dev_dbg(dev, "could not open control VSI\n"); |
| 4425 | goto err_vsi_open; |
| 4426 | } |
| 4427 | |
| 4428 | mutex_init(&pf->hw.fdir_fltr_lock); |
| 4429 | |
| 4430 | err = ice_fdir_create_dflt_rules(pf); |
| 4431 | if (err) |
| 4432 | goto err_fdir_rule; |
| 4433 | |
| 4434 | return 0; |
| 4435 | |
| 4436 | err_fdir_rule: |
| 4437 | ice_fdir_release_flows(&pf->hw); |
| 4438 | ice_vsi_close(ctrl_vsi); |
| 4439 | err_vsi_open: |
| 4440 | ice_vsi_release(ctrl_vsi); |
| 4441 | if (pf->ctrl_vsi_idx != ICE_NO_VSI) { |
| 4442 | pf->vsi[pf->ctrl_vsi_idx] = NULL; |
| 4443 | pf->ctrl_vsi_idx = ICE_NO_VSI; |
| 4444 | } |
| 4445 | return err; |
| 4446 | } |
| 4447 | |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 4448 | static void ice_deinit_fdir(struct ice_pf *pf) |
| 4449 | { |
| 4450 | struct ice_vsi *vsi = ice_get_ctrl_vsi(pf); |
| 4451 | |
| 4452 | if (!vsi) |
| 4453 | return; |
| 4454 | |
| 4455 | ice_vsi_manage_fdir(vsi, false); |
| 4456 | ice_vsi_release(vsi); |
| 4457 | if (pf->ctrl_vsi_idx != ICE_NO_VSI) { |
| 4458 | pf->vsi[pf->ctrl_vsi_idx] = NULL; |
| 4459 | pf->ctrl_vsi_idx = ICE_NO_VSI; |
| 4460 | } |
| 4461 | |
| 4462 | mutex_destroy(&(&pf->hw)->fdir_fltr_lock); |
| 4463 | } |
| 4464 | |
Henry Tieman | 148beb6 | 2020-05-11 18:01:40 -0700 | [diff] [blame] | 4465 | /** |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4466 | * ice_get_opt_fw_name - return optional firmware file name or NULL |
| 4467 | * @pf: pointer to the PF instance |
| 4468 | */ |
| 4469 | static char *ice_get_opt_fw_name(struct ice_pf *pf) |
| 4470 | { |
| 4471 | /* Optional firmware name same as default with additional dash |
| 4472 | * followed by a EUI-64 identifier (PCIe Device Serial Number) |
| 4473 | */ |
| 4474 | struct pci_dev *pdev = pf->pdev; |
Jacob Keller | ceb2f00 | 2020-03-02 18:25:03 -0800 | [diff] [blame] | 4475 | char *opt_fw_filename; |
| 4476 | u64 dsn; |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4477 | |
| 4478 | /* Determine the name of the optional file using the DSN (two |
| 4479 | * dwords following the start of the DSN Capability). |
| 4480 | */ |
Jacob Keller | ceb2f00 | 2020-03-02 18:25:03 -0800 | [diff] [blame] | 4481 | dsn = pci_get_dsn(pdev); |
| 4482 | if (!dsn) |
| 4483 | return NULL; |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4484 | |
Jacob Keller | ceb2f00 | 2020-03-02 18:25:03 -0800 | [diff] [blame] | 4485 | opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL); |
| 4486 | if (!opt_fw_filename) |
| 4487 | return NULL; |
| 4488 | |
Paul M Stillwell Jr | 1a9c561 | 2020-05-15 17:55:03 -0700 | [diff] [blame] | 4489 | snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg", |
Jacob Keller | ceb2f00 | 2020-03-02 18:25:03 -0800 | [diff] [blame] | 4490 | ICE_DDP_PKG_PATH, dsn); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4491 | |
| 4492 | return opt_fw_filename; |
| 4493 | } |
| 4494 | |
| 4495 | /** |
| 4496 | * ice_request_fw - Device initialization routine |
| 4497 | * @pf: pointer to the PF instance |
Michal Wilczynski | cc5776f | 2024-04-19 04:08:52 -0400 | [diff] [blame] | 4498 | * @firmware: double pointer to firmware struct |
| 4499 | * |
| 4500 | * Return: zero when successful, negative values otherwise. |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4501 | */ |
Michal Wilczynski | cc5776f | 2024-04-19 04:08:52 -0400 | [diff] [blame] | 4502 | static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware) |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4503 | { |
| 4504 | char *opt_fw_filename = ice_get_opt_fw_name(pf); |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 4505 | struct device *dev = ice_pf_to_dev(pf); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4506 | int err = 0; |
| 4507 | |
| 4508 | /* optional device-specific DDP (if present) overrides the default DDP |
| 4509 | * package file. kernel logs a debug message if the file doesn't exist, |
| 4510 | * and warning messages for other errors. |
| 4511 | */ |
| 4512 | if (opt_fw_filename) { |
Michal Wilczynski | cc5776f | 2024-04-19 04:08:52 -0400 | [diff] [blame] | 4513 | err = firmware_request_nowarn(firmware, opt_fw_filename, dev); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4514 | kfree(opt_fw_filename); |
Michal Wilczynski | cc5776f | 2024-04-19 04:08:52 -0400 | [diff] [blame] | 4515 | if (!err) |
| 4516 | return err; |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4517 | } |
Michal Wilczynski | cc5776f | 2024-04-19 04:08:52 -0400 | [diff] [blame] | 4518 | err = request_firmware(firmware, ICE_DDP_PKG_FILE, dev); |
| 4519 | if (err) |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 4520 | dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); |
Michal Wilczynski | cc5776f | 2024-04-19 04:08:52 -0400 | [diff] [blame] | 4521 | |
| 4522 | return err; |
| 4523 | } |
| 4524 | |
| 4525 | /** |
| 4526 | * ice_init_tx_topology - performs Tx topology initialization |
| 4527 | * @hw: pointer to the hardware structure |
| 4528 | * @firmware: pointer to firmware structure |
| 4529 | * |
| 4530 | * Return: zero when init was successful, negative values otherwise. |
| 4531 | */ |
| 4532 | static int |
| 4533 | ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware) |
| 4534 | { |
| 4535 | u8 num_tx_sched_layers = hw->num_tx_sched_layers; |
| 4536 | struct ice_pf *pf = hw->back; |
| 4537 | struct device *dev; |
| 4538 | u8 *buf_copy; |
| 4539 | int err; |
| 4540 | |
| 4541 | dev = ice_pf_to_dev(pf); |
| 4542 | /* ice_cfg_tx_topo buf argument is not a constant, |
| 4543 | * so we have to make a copy |
| 4544 | */ |
| 4545 | buf_copy = kmemdup(firmware->data, firmware->size, GFP_KERNEL); |
| 4546 | |
| 4547 | err = ice_cfg_tx_topo(hw, buf_copy, firmware->size); |
| 4548 | if (!err) { |
| 4549 | if (hw->num_tx_sched_layers > num_tx_sched_layers) |
| 4550 | dev_info(dev, "Tx scheduling layers switching feature disabled\n"); |
| 4551 | else |
| 4552 | dev_info(dev, "Tx scheduling layers switching feature enabled\n"); |
| 4553 | /* if there was a change in topology ice_cfg_tx_topo triggered |
| 4554 | * a CORER and we need to re-init hw |
| 4555 | */ |
| 4556 | ice_deinit_hw(hw); |
| 4557 | err = ice_init_hw(hw); |
| 4558 | |
| 4559 | return err; |
| 4560 | } else if (err == -EIO) { |
| 4561 | dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n"); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4562 | } |
| 4563 | |
Michal Wilczynski | cc5776f | 2024-04-19 04:08:52 -0400 | [diff] [blame] | 4564 | return 0; |
| 4565 | } |
| 4566 | |
| 4567 | /** |
| 4568 | * ice_init_ddp_config - DDP related configuration |
| 4569 | * @hw: pointer to the hardware structure |
| 4570 | * @pf: pointer to pf structure |
| 4571 | * |
| 4572 | * This function loads DDP file from the disk, then initializes Tx |
| 4573 | * topology. At the end DDP package is loaded on the card. |
| 4574 | * |
| 4575 | * Return: zero when init was successful, negative values otherwise. |
| 4576 | */ |
| 4577 | static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf) |
| 4578 | { |
| 4579 | struct device *dev = ice_pf_to_dev(pf); |
| 4580 | const struct firmware *firmware = NULL; |
| 4581 | int err; |
| 4582 | |
| 4583 | err = ice_request_fw(pf, &firmware); |
| 4584 | if (err) { |
| 4585 | dev_err(dev, "Fail during requesting FW: %d\n", err); |
| 4586 | return err; |
| 4587 | } |
| 4588 | |
| 4589 | err = ice_init_tx_topology(hw, firmware); |
| 4590 | if (err) { |
| 4591 | dev_err(dev, "Fail during initialization of Tx topology: %d\n", |
| 4592 | err); |
| 4593 | release_firmware(firmware); |
| 4594 | return err; |
| 4595 | } |
| 4596 | |
| 4597 | /* Download firmware to device */ |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4598 | ice_load_pkg(firmware, pf); |
| 4599 | release_firmware(firmware); |
Michal Wilczynski | cc5776f | 2024-04-19 04:08:52 -0400 | [diff] [blame] | 4600 | |
| 4601 | return 0; |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 4602 | } |
| 4603 | |
| 4604 | /** |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 4605 | * ice_print_wake_reason - show the wake up cause in the log |
| 4606 | * @pf: pointer to the PF struct |
| 4607 | */ |
| 4608 | static void ice_print_wake_reason(struct ice_pf *pf) |
| 4609 | { |
| 4610 | u32 wus = pf->wakeup_reason; |
| 4611 | const char *wake_str; |
| 4612 | |
| 4613 | /* if no wake event, nothing to print */ |
| 4614 | if (!wus) |
| 4615 | return; |
| 4616 | |
| 4617 | if (wus & PFPM_WUS_LNKC_M) |
| 4618 | wake_str = "Link\n"; |
| 4619 | else if (wus & PFPM_WUS_MAG_M) |
| 4620 | wake_str = "Magic Packet\n"; |
| 4621 | else if (wus & PFPM_WUS_MNG_M) |
| 4622 | wake_str = "Management\n"; |
| 4623 | else if (wus & PFPM_WUS_FW_RST_WK_M) |
| 4624 | wake_str = "Firmware Reset\n"; |
| 4625 | else |
| 4626 | wake_str = "Unknown\n"; |
| 4627 | |
| 4628 | dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); |
| 4629 | } |
| 4630 | |
| 4631 | /** |
Paul M Stillwell Jr | 96a9a93 | 2023-12-12 21:07:12 -0800 | [diff] [blame] | 4632 | * ice_pf_fwlog_update_module - update 1 module |
| 4633 | * @pf: pointer to the PF struct |
| 4634 | * @log_level: log_level to use for the @module |
| 4635 | * @module: module to update |
| 4636 | */ |
| 4637 | void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module) |
| 4638 | { |
| 4639 | struct ice_hw *hw = &pf->hw; |
| 4640 | |
| 4641 | hw->fwlog_cfg.module_entries[module].log_level = log_level; |
| 4642 | } |
| 4643 | |
| 4644 | /** |
Paul M Stillwell Jr | 418e534 | 2023-01-23 16:57:14 -0800 | [diff] [blame] | 4645 | * ice_register_netdev - register netdev |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 4646 | * @vsi: pointer to the VSI struct |
Anirudh Venkataramanan | 1e23f07 | 2021-03-02 10:12:03 -0800 | [diff] [blame] | 4647 | */ |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 4648 | static int ice_register_netdev(struct ice_vsi *vsi) |
Anirudh Venkataramanan | 1e23f07 | 2021-03-02 10:12:03 -0800 | [diff] [blame] | 4649 | { |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 4650 | int err; |
Anirudh Venkataramanan | 1e23f07 | 2021-03-02 10:12:03 -0800 | [diff] [blame] | 4651 | |
Anirudh Venkataramanan | 1e23f07 | 2021-03-02 10:12:03 -0800 | [diff] [blame] | 4652 | if (!vsi || !vsi->netdev) |
| 4653 | return -EIO; |
| 4654 | |
| 4655 | err = register_netdev(vsi->netdev); |
| 4656 | if (err) |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 4657 | return err; |
Anirudh Venkataramanan | 1e23f07 | 2021-03-02 10:12:03 -0800 | [diff] [blame] | 4658 | |
Anirudh Venkataramanan | a476d72 | 2021-03-02 10:15:41 -0800 | [diff] [blame] | 4659 | set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); |
Anirudh Venkataramanan | 1e23f07 | 2021-03-02 10:12:03 -0800 | [diff] [blame] | 4660 | netif_carrier_off(vsi->netdev); |
| 4661 | netif_tx_stop_all_queues(vsi->netdev); |
Anirudh Venkataramanan | 1e23f07 | 2021-03-02 10:12:03 -0800 | [diff] [blame] | 4662 | |
Anirudh Venkataramanan | 1e23f07 | 2021-03-02 10:12:03 -0800 | [diff] [blame] | 4663 | return 0; |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 4664 | } |
| 4665 | |
| 4666 | static void ice_unregister_netdev(struct ice_vsi *vsi) |
| 4667 | { |
| 4668 | if (!vsi || !vsi->netdev) |
| 4669 | return; |
| 4670 | |
| 4671 | unregister_netdev(vsi->netdev); |
| 4672 | clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); |
| 4673 | } |
| 4674 | |
| 4675 | /** |
| 4676 | * ice_cfg_netdev - Allocate, configure and register a netdev |
| 4677 | * @vsi: the VSI associated with the new netdev |
| 4678 | * |
| 4679 | * Returns 0 on success, negative value on failure |
| 4680 | */ |
| 4681 | static int ice_cfg_netdev(struct ice_vsi *vsi) |
| 4682 | { |
| 4683 | struct ice_netdev_priv *np; |
| 4684 | struct net_device *netdev; |
| 4685 | u8 mac_addr[ETH_ALEN]; |
| 4686 | |
| 4687 | netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, |
| 4688 | vsi->alloc_rxq); |
| 4689 | if (!netdev) |
| 4690 | return -ENOMEM; |
| 4691 | |
| 4692 | set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); |
| 4693 | vsi->netdev = netdev; |
| 4694 | np = netdev_priv(netdev); |
| 4695 | np->vsi = vsi; |
| 4696 | |
| 4697 | ice_set_netdev_features(netdev); |
Lorenzo Bianconi | b6a4103 | 2023-02-14 15:39:27 +0100 | [diff] [blame] | 4698 | ice_set_ops(vsi); |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 4699 | |
| 4700 | if (vsi->type == ICE_VSI_PF) { |
| 4701 | SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); |
| 4702 | ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); |
| 4703 | eth_hw_addr_set(netdev, mac_addr); |
| 4704 | } |
| 4705 | |
| 4706 | netdev->priv_flags |= IFF_UNICAST_FLT; |
| 4707 | |
| 4708 | /* Setup netdev TC information */ |
| 4709 | ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); |
| 4710 | |
| 4711 | netdev->max_mtu = ICE_MAX_MTU; |
| 4712 | |
| 4713 | return 0; |
| 4714 | } |
| 4715 | |
| 4716 | static void ice_decfg_netdev(struct ice_vsi *vsi) |
| 4717 | { |
| 4718 | clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); |
Anirudh Venkataramanan | 1e23f07 | 2021-03-02 10:12:03 -0800 | [diff] [blame] | 4719 | free_netdev(vsi->netdev); |
| 4720 | vsi->netdev = NULL; |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 4721 | } |
| 4722 | |
Jan Sokolowski | 5708155 | 2023-07-13 15:21:24 +0200 | [diff] [blame] | 4723 | /** |
| 4724 | * ice_wait_for_fw - wait for full FW readiness |
| 4725 | * @hw: pointer to the hardware structure |
| 4726 | * @timeout: milliseconds that can elapse before timing out |
| 4727 | */ |
| 4728 | static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout) |
| 4729 | { |
| 4730 | int fw_loading; |
| 4731 | u32 elapsed = 0; |
| 4732 | |
| 4733 | while (elapsed <= timeout) { |
| 4734 | fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M; |
| 4735 | |
| 4736 | /* firmware was not yet loaded, we have to wait more */ |
| 4737 | if (fw_loading) { |
| 4738 | elapsed += 100; |
| 4739 | msleep(100); |
| 4740 | continue; |
| 4741 | } |
| 4742 | return 0; |
| 4743 | } |
| 4744 | |
| 4745 | return -ETIMEDOUT; |
| 4746 | } |
| 4747 | |
Wojciech Drewek | 41cc4e5 | 2024-02-05 14:03:56 +0100 | [diff] [blame] | 4748 | int ice_init_dev(struct ice_pf *pf) |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 4749 | { |
| 4750 | struct device *dev = ice_pf_to_dev(pf); |
| 4751 | struct ice_hw *hw = &pf->hw; |
| 4752 | int err; |
| 4753 | |
| 4754 | err = ice_init_hw(hw); |
| 4755 | if (err) { |
| 4756 | dev_err(dev, "ice_init_hw failed: %d\n", err); |
| 4757 | return err; |
| 4758 | } |
| 4759 | |
Jan Sokolowski | 5708155 | 2023-07-13 15:21:24 +0200 | [diff] [blame] | 4760 | /* Some cards require longer initialization times |
| 4761 | * due to necessity of loading FW from an external source. |
| 4762 | * This can take even half a minute. |
| 4763 | */ |
| 4764 | if (ice_is_pf_c827(hw)) { |
| 4765 | err = ice_wait_for_fw(hw, 30000); |
| 4766 | if (err) { |
| 4767 | dev_err(dev, "ice_wait_for_fw timed out"); |
| 4768 | return err; |
| 4769 | } |
| 4770 | } |
| 4771 | |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 4772 | ice_init_feature_support(pf); |
| 4773 | |
Michal Wilczynski | cc5776f | 2024-04-19 04:08:52 -0400 | [diff] [blame] | 4774 | err = ice_init_ddp_config(hw, pf); |
| 4775 | if (err) |
| 4776 | return err; |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 4777 | |
Michal Wilczynski | cc5776f | 2024-04-19 04:08:52 -0400 | [diff] [blame] | 4778 | /* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 4779 | * set in pf->state, which will cause ice_is_safe_mode to return |
| 4780 | * true |
| 4781 | */ |
| 4782 | if (ice_is_safe_mode(pf)) { |
| 4783 | /* we already got function/device capabilities but these don't |
| 4784 | * reflect what the driver needs to do in safe mode. Instead of |
| 4785 | * adding conditional logic everywhere to ignore these |
| 4786 | * device/function capabilities, override them. |
| 4787 | */ |
| 4788 | ice_set_safe_mode_caps(hw); |
| 4789 | } |
| 4790 | |
| 4791 | err = ice_init_pf(pf); |
| 4792 | if (err) { |
| 4793 | dev_err(dev, "ice_init_pf failed: %d\n", err); |
| 4794 | goto err_init_pf; |
| 4795 | } |
| 4796 | |
| 4797 | pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; |
| 4798 | pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; |
| 4799 | pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; |
| 4800 | pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; |
| 4801 | if (pf->hw.tnl.valid_count[TNL_VXLAN]) { |
| 4802 | pf->hw.udp_tunnel_nic.tables[0].n_entries = |
| 4803 | pf->hw.tnl.valid_count[TNL_VXLAN]; |
| 4804 | pf->hw.udp_tunnel_nic.tables[0].tunnel_types = |
| 4805 | UDP_TUNNEL_TYPE_VXLAN; |
| 4806 | } |
| 4807 | if (pf->hw.tnl.valid_count[TNL_GENEVE]) { |
| 4808 | pf->hw.udp_tunnel_nic.tables[1].n_entries = |
| 4809 | pf->hw.tnl.valid_count[TNL_GENEVE]; |
| 4810 | pf->hw.udp_tunnel_nic.tables[1].tunnel_types = |
| 4811 | UDP_TUNNEL_TYPE_GENEVE; |
| 4812 | } |
| 4813 | |
| 4814 | err = ice_init_interrupt_scheme(pf); |
| 4815 | if (err) { |
| 4816 | dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); |
| 4817 | err = -EIO; |
| 4818 | goto err_init_interrupt_scheme; |
| 4819 | } |
| 4820 | |
| 4821 | /* In case of MSIX we are going to setup the misc vector right here |
| 4822 | * to handle admin queue events etc. In case of legacy and MSI |
| 4823 | * the misc functionality and queue processing is combined in |
| 4824 | * the same vector and that gets setup at open. |
| 4825 | */ |
| 4826 | err = ice_req_irq_msix_misc(pf); |
| 4827 | if (err) { |
| 4828 | dev_err(dev, "setup of misc vector failed: %d\n", err); |
| 4829 | goto err_req_irq_msix_misc; |
| 4830 | } |
| 4831 | |
| 4832 | return 0; |
| 4833 | |
| 4834 | err_req_irq_msix_misc: |
| 4835 | ice_clear_interrupt_scheme(pf); |
| 4836 | err_init_interrupt_scheme: |
| 4837 | ice_deinit_pf(pf); |
| 4838 | err_init_pf: |
| 4839 | ice_deinit_hw(hw); |
| 4840 | return err; |
| 4841 | } |
| 4842 | |
Wojciech Drewek | 41cc4e5 | 2024-02-05 14:03:56 +0100 | [diff] [blame] | 4843 | void ice_deinit_dev(struct ice_pf *pf) |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 4844 | { |
| 4845 | ice_free_irq_msix_misc(pf); |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 4846 | ice_deinit_pf(pf); |
| 4847 | ice_deinit_hw(&pf->hw); |
Jakub Buchocki | 24b454b | 2023-06-12 10:14:21 -0700 | [diff] [blame] | 4848 | |
| 4849 | /* Service task is already stopped, so call reset directly. */ |
| 4850 | ice_reset(&pf->hw, ICE_RESET_PFR); |
| 4851 | pci_wait_for_pending_transaction(pf->pdev); |
| 4852 | ice_clear_interrupt_scheme(pf); |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 4853 | } |
| 4854 | |
| 4855 | static void ice_init_features(struct ice_pf *pf) |
| 4856 | { |
| 4857 | struct device *dev = ice_pf_to_dev(pf); |
| 4858 | |
| 4859 | if (ice_is_safe_mode(pf)) |
| 4860 | return; |
| 4861 | |
| 4862 | /* initialize DDP driven features */ |
| 4863 | if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) |
| 4864 | ice_ptp_init(pf); |
| 4865 | |
| 4866 | if (ice_is_feature_supported(pf, ICE_F_GNSS)) |
| 4867 | ice_gnss_init(pf); |
| 4868 | |
Arkadiusz Kubalewski | d7999f5 | 2023-09-13 21:49:41 +0100 | [diff] [blame] | 4869 | if (ice_is_feature_supported(pf, ICE_F_CGU) || |
| 4870 | ice_is_feature_supported(pf, ICE_F_PHY_RCLK)) |
| 4871 | ice_dpll_init(pf); |
| 4872 | |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 4873 | /* Note: Flow director init failure is non-fatal to load */ |
| 4874 | if (ice_init_fdir(pf)) |
| 4875 | dev_err(dev, "could not initialize flow director\n"); |
| 4876 | |
| 4877 | /* Note: DCB init failure is non-fatal to load */ |
| 4878 | if (ice_init_pf_dcb(pf, false)) { |
| 4879 | clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); |
| 4880 | clear_bit(ICE_FLAG_DCB_ENA, pf->flags); |
| 4881 | } else { |
| 4882 | ice_cfg_lldp_mib_change(&pf->hw, true); |
| 4883 | } |
| 4884 | |
| 4885 | if (ice_init_lag(pf)) |
| 4886 | dev_warn(dev, "Failed to init link aggregation support\n"); |
Konrad Knitter | 4da71a7 | 2023-12-01 10:08:39 -0800 | [diff] [blame] | 4887 | |
| 4888 | ice_hwmon_init(pf); |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 4889 | } |
| 4890 | |
| 4891 | static void ice_deinit_features(struct ice_pf *pf) |
| 4892 | { |
Mateusz Pacuszka | 42066c4d | 2023-10-11 16:33:34 -0700 | [diff] [blame] | 4893 | if (ice_is_safe_mode(pf)) |
| 4894 | return; |
| 4895 | |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 4896 | ice_deinit_lag(pf); |
| 4897 | if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)) |
| 4898 | ice_cfg_lldp_mib_change(&pf->hw, false); |
| 4899 | ice_deinit_fdir(pf); |
| 4900 | if (ice_is_feature_supported(pf, ICE_F_GNSS)) |
| 4901 | ice_gnss_exit(pf); |
| 4902 | if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) |
| 4903 | ice_ptp_release(pf); |
Arkadiusz Kubalewski | d7999f5 | 2023-09-13 21:49:41 +0100 | [diff] [blame] | 4904 | if (test_bit(ICE_FLAG_DPLL, pf->flags)) |
| 4905 | ice_dpll_deinit(pf); |
Michal Swiatkowski | af41b18 | 2023-10-24 13:09:20 +0200 | [diff] [blame] | 4906 | if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) |
| 4907 | xa_destroy(&pf->eswitch.reprs); |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 4908 | } |
| 4909 | |
| 4910 | static void ice_init_wakeup(struct ice_pf *pf) |
| 4911 | { |
| 4912 | /* Save wakeup reason register for later use */ |
| 4913 | pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS); |
| 4914 | |
| 4915 | /* check for a power management event */ |
| 4916 | ice_print_wake_reason(pf); |
| 4917 | |
| 4918 | /* clear wake status, all bits */ |
| 4919 | wr32(&pf->hw, PFPM_WUS, U32_MAX); |
| 4920 | |
| 4921 | /* Disable WoL at init, wait for user to enable */ |
| 4922 | device_set_wakeup_enable(ice_pf_to_dev(pf), false); |
| 4923 | } |
| 4924 | |
| 4925 | static int ice_init_link(struct ice_pf *pf) |
| 4926 | { |
| 4927 | struct device *dev = ice_pf_to_dev(pf); |
| 4928 | int err; |
| 4929 | |
| 4930 | err = ice_init_link_events(pf->hw.port_info); |
| 4931 | if (err) { |
| 4932 | dev_err(dev, "ice_init_link_events failed: %d\n", err); |
| 4933 | return err; |
| 4934 | } |
| 4935 | |
| 4936 | /* not a fatal error if this fails */ |
| 4937 | err = ice_init_nvm_phy_type(pf->hw.port_info); |
| 4938 | if (err) |
| 4939 | dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); |
| 4940 | |
| 4941 | /* not a fatal error if this fails */ |
| 4942 | err = ice_update_link_info(pf->hw.port_info); |
| 4943 | if (err) |
| 4944 | dev_err(dev, "ice_update_link_info failed: %d\n", err); |
| 4945 | |
| 4946 | ice_init_link_dflt_override(pf->hw.port_info); |
| 4947 | |
| 4948 | ice_check_link_cfg_err(pf, |
| 4949 | pf->hw.port_info->phy.link_info.link_cfg_err); |
| 4950 | |
| 4951 | /* if media available, initialize PHY settings */ |
| 4952 | if (pf->hw.port_info->phy.link_info.link_info & |
| 4953 | ICE_AQ_MEDIA_AVAILABLE) { |
| 4954 | /* not a fatal error if this fails */ |
| 4955 | err = ice_init_phy_user_cfg(pf->hw.port_info); |
| 4956 | if (err) |
| 4957 | dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); |
| 4958 | |
| 4959 | if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { |
| 4960 | struct ice_vsi *vsi = ice_get_main_vsi(pf); |
| 4961 | |
| 4962 | if (vsi) |
| 4963 | ice_configure_phy(vsi); |
| 4964 | } |
| 4965 | } else { |
| 4966 | set_bit(ICE_FLAG_NO_MEDIA, pf->flags); |
| 4967 | } |
| 4968 | |
| 4969 | return err; |
| 4970 | } |
| 4971 | |
| 4972 | static int ice_init_pf_sw(struct ice_pf *pf) |
| 4973 | { |
| 4974 | bool dvm = ice_is_dvm_ena(&pf->hw); |
| 4975 | struct ice_vsi *vsi; |
| 4976 | int err; |
| 4977 | |
| 4978 | /* create switch struct for the switch element created by FW on boot */ |
| 4979 | pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL); |
| 4980 | if (!pf->first_sw) |
| 4981 | return -ENOMEM; |
| 4982 | |
| 4983 | if (pf->hw.evb_veb) |
| 4984 | pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; |
| 4985 | else |
| 4986 | pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; |
| 4987 | |
| 4988 | pf->first_sw->pf = pf; |
| 4989 | |
| 4990 | /* record the sw_id available for later use */ |
| 4991 | pf->first_sw->sw_id = pf->hw.port_info->sw_id; |
| 4992 | |
| 4993 | err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); |
| 4994 | if (err) |
| 4995 | goto err_aq_set_port_params; |
| 4996 | |
| 4997 | vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); |
| 4998 | if (!vsi) { |
| 4999 | err = -ENOMEM; |
| 5000 | goto err_pf_vsi_setup; |
| 5001 | } |
| 5002 | |
| 5003 | return 0; |
| 5004 | |
| 5005 | err_pf_vsi_setup: |
| 5006 | err_aq_set_port_params: |
| 5007 | kfree(pf->first_sw); |
| 5008 | return err; |
| 5009 | } |
| 5010 | |
| 5011 | static void ice_deinit_pf_sw(struct ice_pf *pf) |
| 5012 | { |
| 5013 | struct ice_vsi *vsi = ice_get_main_vsi(pf); |
| 5014 | |
| 5015 | if (!vsi) |
| 5016 | return; |
| 5017 | |
| 5018 | ice_vsi_release(vsi); |
| 5019 | kfree(pf->first_sw); |
| 5020 | } |
| 5021 | |
| 5022 | static int ice_alloc_vsis(struct ice_pf *pf) |
| 5023 | { |
| 5024 | struct device *dev = ice_pf_to_dev(pf); |
| 5025 | |
| 5026 | pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi; |
| 5027 | if (!pf->num_alloc_vsi) |
| 5028 | return -EIO; |
| 5029 | |
| 5030 | if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { |
| 5031 | dev_warn(dev, |
| 5032 | "limiting the VSI count due to UDP tunnel limitation %d > %d\n", |
| 5033 | pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); |
| 5034 | pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; |
| 5035 | } |
| 5036 | |
| 5037 | pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), |
| 5038 | GFP_KERNEL); |
| 5039 | if (!pf->vsi) |
| 5040 | return -ENOMEM; |
| 5041 | |
| 5042 | pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi, |
| 5043 | sizeof(*pf->vsi_stats), GFP_KERNEL); |
| 5044 | if (!pf->vsi_stats) { |
| 5045 | devm_kfree(dev, pf->vsi); |
| 5046 | return -ENOMEM; |
| 5047 | } |
| 5048 | |
| 5049 | return 0; |
| 5050 | } |
| 5051 | |
| 5052 | static void ice_dealloc_vsis(struct ice_pf *pf) |
| 5053 | { |
| 5054 | devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats); |
| 5055 | pf->vsi_stats = NULL; |
| 5056 | |
| 5057 | pf->num_alloc_vsi = 0; |
| 5058 | devm_kfree(ice_pf_to_dev(pf), pf->vsi); |
| 5059 | pf->vsi = NULL; |
| 5060 | } |
| 5061 | |
| 5062 | static int ice_init_devlink(struct ice_pf *pf) |
| 5063 | { |
| 5064 | int err; |
| 5065 | |
| 5066 | err = ice_devlink_register_params(pf); |
| 5067 | if (err) |
| 5068 | return err; |
| 5069 | |
| 5070 | ice_devlink_init_regions(pf); |
| 5071 | ice_devlink_register(pf); |
| 5072 | |
| 5073 | return 0; |
| 5074 | } |
| 5075 | |
| 5076 | static void ice_deinit_devlink(struct ice_pf *pf) |
| 5077 | { |
| 5078 | ice_devlink_unregister(pf); |
| 5079 | ice_devlink_destroy_regions(pf); |
| 5080 | ice_devlink_unregister_params(pf); |
| 5081 | } |
| 5082 | |
| 5083 | static int ice_init(struct ice_pf *pf) |
| 5084 | { |
| 5085 | int err; |
| 5086 | |
| 5087 | err = ice_init_dev(pf); |
| 5088 | if (err) |
| 5089 | return err; |
| 5090 | |
| 5091 | err = ice_alloc_vsis(pf); |
| 5092 | if (err) |
| 5093 | goto err_alloc_vsis; |
| 5094 | |
| 5095 | err = ice_init_pf_sw(pf); |
| 5096 | if (err) |
| 5097 | goto err_init_pf_sw; |
| 5098 | |
| 5099 | ice_init_wakeup(pf); |
| 5100 | |
| 5101 | err = ice_init_link(pf); |
| 5102 | if (err) |
| 5103 | goto err_init_link; |
| 5104 | |
| 5105 | err = ice_send_version(pf); |
| 5106 | if (err) |
| 5107 | goto err_init_link; |
| 5108 | |
| 5109 | ice_verify_cacheline_size(pf); |
| 5110 | |
| 5111 | if (ice_is_safe_mode(pf)) |
| 5112 | ice_set_safe_mode_vlan_cfg(pf); |
| 5113 | else |
| 5114 | /* print PCI link speed and width */ |
| 5115 | pcie_print_link_status(pf->pdev); |
| 5116 | |
| 5117 | /* ready to go, so clear down state bit */ |
| 5118 | clear_bit(ICE_DOWN, pf->state); |
| 5119 | clear_bit(ICE_SERVICE_DIS, pf->state); |
| 5120 | |
| 5121 | /* since everything is good, start the service timer */ |
| 5122 | mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); |
| 5123 | |
| 5124 | return 0; |
| 5125 | |
| 5126 | err_init_link: |
| 5127 | ice_deinit_pf_sw(pf); |
| 5128 | err_init_pf_sw: |
| 5129 | ice_dealloc_vsis(pf); |
| 5130 | err_alloc_vsis: |
| 5131 | ice_deinit_dev(pf); |
| 5132 | return err; |
| 5133 | } |
| 5134 | |
| 5135 | static void ice_deinit(struct ice_pf *pf) |
| 5136 | { |
| 5137 | set_bit(ICE_SERVICE_DIS, pf->state); |
| 5138 | set_bit(ICE_DOWN, pf->state); |
| 5139 | |
| 5140 | ice_deinit_pf_sw(pf); |
| 5141 | ice_dealloc_vsis(pf); |
| 5142 | ice_deinit_dev(pf); |
| 5143 | } |
| 5144 | |
| 5145 | /** |
| 5146 | * ice_load - load pf by init hw and starting VSI |
| 5147 | * @pf: pointer to the pf instance |
Wojciech Drewek | 41cc4e5 | 2024-02-05 14:03:56 +0100 | [diff] [blame] | 5148 | * |
| 5149 | * This function has to be called under devl_lock. |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 5150 | */ |
| 5151 | int ice_load(struct ice_pf *pf) |
| 5152 | { |
| 5153 | struct ice_vsi *vsi; |
| 5154 | int err; |
| 5155 | |
Wojciech Drewek | 41cc4e5 | 2024-02-05 14:03:56 +0100 | [diff] [blame] | 5156 | devl_assert_locked(priv_to_devlink(pf)); |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 5157 | |
| 5158 | vsi = ice_get_main_vsi(pf); |
Jacob Keller | 5e509ab | 2023-01-18 17:16:43 -0800 | [diff] [blame] | 5159 | |
Wojciech Drewek | 41cc4e5 | 2024-02-05 14:03:56 +0100 | [diff] [blame] | 5160 | /* init channel list */ |
| 5161 | INIT_LIST_HEAD(&vsi->ch_list); |
Jacob Keller | 5e509ab | 2023-01-18 17:16:43 -0800 | [diff] [blame] | 5162 | |
Wojciech Drewek | 41cc4e5 | 2024-02-05 14:03:56 +0100 | [diff] [blame] | 5163 | err = ice_cfg_netdev(vsi); |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 5164 | if (err) |
Wojciech Drewek | 41cc4e5 | 2024-02-05 14:03:56 +0100 | [diff] [blame] | 5165 | return err; |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 5166 | |
Wojciech Drewek | 41cc4e5 | 2024-02-05 14:03:56 +0100 | [diff] [blame] | 5167 | /* Setup DCB netlink interface */ |
| 5168 | ice_dcbnl_setup(vsi); |
| 5169 | |
| 5170 | err = ice_init_mac_fltr(pf); |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 5171 | if (err) |
Wojciech Drewek | 41cc4e5 | 2024-02-05 14:03:56 +0100 | [diff] [blame] | 5172 | goto err_init_mac_fltr; |
| 5173 | |
| 5174 | err = ice_devlink_create_pf_port(pf); |
| 5175 | if (err) |
| 5176 | goto err_devlink_create_pf_port; |
| 5177 | |
| 5178 | SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); |
| 5179 | |
| 5180 | err = ice_register_netdev(vsi); |
| 5181 | if (err) |
| 5182 | goto err_register_netdev; |
| 5183 | |
| 5184 | err = ice_tc_indir_block_register(vsi); |
| 5185 | if (err) |
| 5186 | goto err_tc_indir_block_register; |
| 5187 | |
| 5188 | ice_napi_add(vsi); |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 5189 | |
| 5190 | err = ice_init_rdma(pf); |
| 5191 | if (err) |
| 5192 | goto err_init_rdma; |
| 5193 | |
| 5194 | ice_init_features(pf); |
| 5195 | ice_service_task_restart(pf); |
| 5196 | |
| 5197 | clear_bit(ICE_DOWN, pf->state); |
| 5198 | |
| 5199 | return 0; |
| 5200 | |
| 5201 | err_init_rdma: |
Wojciech Drewek | 41cc4e5 | 2024-02-05 14:03:56 +0100 | [diff] [blame] | 5202 | ice_tc_indir_block_unregister(vsi); |
| 5203 | err_tc_indir_block_register: |
| 5204 | ice_unregister_netdev(vsi); |
| 5205 | err_register_netdev: |
| 5206 | ice_devlink_destroy_pf_port(pf); |
| 5207 | err_devlink_create_pf_port: |
| 5208 | err_init_mac_fltr: |
| 5209 | ice_decfg_netdev(vsi); |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 5210 | return err; |
| 5211 | } |
| 5212 | |
| 5213 | /** |
| 5214 | * ice_unload - unload pf by stopping VSI and deinit hw |
| 5215 | * @pf: pointer to the pf instance |
Wojciech Drewek | 41cc4e5 | 2024-02-05 14:03:56 +0100 | [diff] [blame] | 5216 | * |
| 5217 | * This function has to be called under devl_lock. |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 5218 | */ |
| 5219 | void ice_unload(struct ice_pf *pf) |
| 5220 | { |
Wojciech Drewek | 41cc4e5 | 2024-02-05 14:03:56 +0100 | [diff] [blame] | 5221 | struct ice_vsi *vsi = ice_get_main_vsi(pf); |
| 5222 | |
| 5223 | devl_assert_locked(priv_to_devlink(pf)); |
| 5224 | |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 5225 | ice_deinit_features(pf); |
| 5226 | ice_deinit_rdma(pf); |
Wojciech Drewek | 41cc4e5 | 2024-02-05 14:03:56 +0100 | [diff] [blame] | 5227 | ice_tc_indir_block_unregister(vsi); |
| 5228 | ice_unregister_netdev(vsi); |
| 5229 | ice_devlink_destroy_pf_port(pf); |
| 5230 | ice_decfg_netdev(vsi); |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 5231 | } |
| 5232 | |
Anirudh Venkataramanan | 1e23f07 | 2021-03-02 10:12:03 -0800 | [diff] [blame] | 5233 | /** |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5234 | * ice_probe - Device initialization routine |
| 5235 | * @pdev: PCI device information struct |
| 5236 | * @ent: entry in ice_pci_tbl |
| 5237 | * |
| 5238 | * Returns 0 on success, negative on failure |
| 5239 | */ |
Bruce Allan | c8b7abd | 2019-02-26 16:35:11 -0800 | [diff] [blame] | 5240 | static int |
| 5241 | ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5242 | { |
Bruce Allan | 77ed84f | 2019-02-08 12:50:50 -0800 | [diff] [blame] | 5243 | struct device *dev = &pdev->dev; |
Michal Schmidt | 0e2bddf | 2024-03-26 00:20:37 +0100 | [diff] [blame] | 5244 | struct ice_adapter *adapter; |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5245 | struct ice_pf *pf; |
| 5246 | struct ice_hw *hw; |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 5247 | int err; |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5248 | |
Anirudh Venkataramanan | 50ac747 | 2021-07-28 12:39:10 -0700 | [diff] [blame] | 5249 | if (pdev->is_virtfn) { |
| 5250 | dev_err(dev, "can't probe a virtual function\n"); |
| 5251 | return -EINVAL; |
| 5252 | } |
| 5253 | |
Jesse Brandeburg | 0288c3e | 2023-10-11 16:33:33 -0700 | [diff] [blame] | 5254 | /* when under a kdump kernel initiate a reset before enabling the |
| 5255 | * device in order to clear out any pending DMA transactions. These |
| 5256 | * transactions can cause some systems to machine check when doing |
| 5257 | * the pcim_enable_device() below. |
| 5258 | */ |
| 5259 | if (is_kdump_kernel()) { |
| 5260 | pci_save_state(pdev); |
| 5261 | pci_clear_master(pdev); |
| 5262 | err = pcie_flr(pdev); |
| 5263 | if (err) |
| 5264 | return err; |
| 5265 | pci_restore_state(pdev); |
| 5266 | } |
| 5267 | |
Tony Nguyen | 4ee656b | 2020-02-06 01:20:13 -0800 | [diff] [blame] | 5268 | /* this driver uses devres, see |
| 5269 | * Documentation/driver-api/driver-model/devres.rst |
| 5270 | */ |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5271 | err = pcim_enable_device(pdev); |
| 5272 | if (err) |
| 5273 | return err; |
| 5274 | |
Jesse Brandeburg | 80ad6dd | 2021-03-31 14:17:01 -0700 | [diff] [blame] | 5275 | err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev)); |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5276 | if (err) { |
Bruce Allan | 77ed84f | 2019-02-08 12:50:50 -0800 | [diff] [blame] | 5277 | dev_err(dev, "BAR0 I/O map error %d\n", err); |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5278 | return err; |
| 5279 | } |
| 5280 | |
Jacob Keller | 1adf7ea | 2020-03-11 18:58:15 -0700 | [diff] [blame] | 5281 | pf = ice_allocate_pf(dev); |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5282 | if (!pf) |
| 5283 | return -ENOMEM; |
| 5284 | |
Dave Ertman | 73e30a6 | 2021-10-04 05:15:25 -0700 | [diff] [blame] | 5285 | /* initialize Auxiliary index to invalid value */ |
| 5286 | pf->aux_idx = -1; |
| 5287 | |
Anirudh Venkataramanan | 2f2da36 | 2019-04-16 10:35:03 -0700 | [diff] [blame] | 5288 | /* set up for high or low DMA */ |
Bruce Allan | 77ed84f | 2019-02-08 12:50:50 -0800 | [diff] [blame] | 5289 | err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5290 | if (err) { |
Bruce Allan | 77ed84f | 2019-02-08 12:50:50 -0800 | [diff] [blame] | 5291 | dev_err(dev, "DMA configuration failed: 0x%x\n", err); |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5292 | return err; |
| 5293 | } |
| 5294 | |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5295 | pci_set_master(pdev); |
| 5296 | |
Michal Schmidt | 0e2bddf | 2024-03-26 00:20:37 +0100 | [diff] [blame] | 5297 | adapter = ice_adapter_get(pdev); |
| 5298 | if (IS_ERR(adapter)) |
| 5299 | return PTR_ERR(adapter); |
| 5300 | |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5301 | pf->pdev = pdev; |
Michal Schmidt | 0e2bddf | 2024-03-26 00:20:37 +0100 | [diff] [blame] | 5302 | pf->adapter = adapter; |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5303 | pci_set_drvdata(pdev, pf); |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 5304 | set_bit(ICE_DOWN, pf->state); |
Akeem G Abodunrin | 8d81fa5 | 2018-08-09 06:29:57 -0700 | [diff] [blame] | 5305 | /* Disable service task until DOWN bit is cleared */ |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 5306 | set_bit(ICE_SERVICE_DIS, pf->state); |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5307 | |
| 5308 | hw = &pf->hw; |
| 5309 | hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; |
Michal Swiatkowski | 4e56802 | 2019-10-09 07:09:46 -0700 | [diff] [blame] | 5310 | pci_save_state(pdev); |
| 5311 | |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5312 | hw->back = pf; |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 5313 | hw->port_info = NULL; |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5314 | hw->vendor_id = pdev->vendor; |
| 5315 | hw->device_id = pdev->device; |
| 5316 | pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); |
| 5317 | hw->subsystem_vendor_id = pdev->subsystem_vendor; |
| 5318 | hw->subsystem_device_id = pdev->subsystem_device; |
| 5319 | hw->bus.device = PCI_SLOT(pdev->devfn); |
| 5320 | hw->bus.func = PCI_FUNC(pdev->devfn); |
Anirudh Venkataramanan | f31e4b6 | 2018-03-20 07:58:07 -0700 | [diff] [blame] | 5321 | ice_set_ctrlq_len(hw); |
| 5322 | |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5323 | pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); |
| 5324 | |
Anirudh Venkataramanan | 7ec59ee | 2018-03-20 07:58:06 -0700 | [diff] [blame] | 5325 | #ifndef CONFIG_DYNAMIC_DEBUG |
| 5326 | if (debug < -1) |
| 5327 | hw->debug_mask = debug; |
| 5328 | #endif |
| 5329 | |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 5330 | err = ice_init(pf); |
Anirudh Venkataramanan | 08771bc | 2021-02-26 13:19:22 -0800 | [diff] [blame] | 5331 | if (err) |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 5332 | goto err_init; |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 5333 | |
Wojciech Drewek | 41cc4e5 | 2024-02-05 14:03:56 +0100 | [diff] [blame] | 5334 | devl_lock(priv_to_devlink(pf)); |
| 5335 | err = ice_load(pf); |
Anirudh Venkataramanan | 08771bc | 2021-02-26 13:19:22 -0800 | [diff] [blame] | 5336 | if (err) |
Wojciech Drewek | 41cc4e5 | 2024-02-05 14:03:56 +0100 | [diff] [blame] | 5337 | goto err_load; |
Dave Ertman | d25a0fc | 2021-05-20 09:37:49 -0500 | [diff] [blame] | 5338 | |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 5339 | err = ice_init_devlink(pf); |
| 5340 | if (err) |
| 5341 | goto err_init_devlink; |
Michal Swiatkowski | 118c6bd | 2024-03-25 22:34:33 +0100 | [diff] [blame] | 5342 | devl_unlock(priv_to_devlink(pf)); |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 5343 | |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5344 | return 0; |
Anirudh Venkataramanan | f31e4b6 | 2018-03-20 07:58:07 -0700 | [diff] [blame] | 5345 | |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 5346 | err_init_devlink: |
Wojciech Drewek | 41cc4e5 | 2024-02-05 14:03:56 +0100 | [diff] [blame] | 5347 | ice_unload(pf); |
Wojciech Drewek | 41cc4e5 | 2024-02-05 14:03:56 +0100 | [diff] [blame] | 5348 | err_load: |
Michal Swiatkowski | 118c6bd | 2024-03-25 22:34:33 +0100 | [diff] [blame] | 5349 | devl_unlock(priv_to_devlink(pf)); |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 5350 | ice_deinit(pf); |
| 5351 | err_init: |
Michal Schmidt | 0e2bddf | 2024-03-26 00:20:37 +0100 | [diff] [blame] | 5352 | ice_adapter_put(pdev); |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5353 | pci_disable_device(pdev); |
Anirudh Venkataramanan | f31e4b6 | 2018-03-20 07:58:07 -0700 | [diff] [blame] | 5354 | return err; |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5355 | } |
| 5356 | |
| 5357 | /** |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5358 | * ice_set_wake - enable or disable Wake on LAN |
| 5359 | * @pf: pointer to the PF struct |
| 5360 | * |
| 5361 | * Simple helper for WoL control |
| 5362 | */ |
| 5363 | static void ice_set_wake(struct ice_pf *pf) |
| 5364 | { |
| 5365 | struct ice_hw *hw = &pf->hw; |
| 5366 | bool wol = pf->wol_ena; |
| 5367 | |
| 5368 | /* clear wake state, otherwise new wake events won't fire */ |
| 5369 | wr32(hw, PFPM_WUS, U32_MAX); |
| 5370 | |
| 5371 | /* enable / disable APM wake up, no RMW needed */ |
| 5372 | wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0); |
| 5373 | |
| 5374 | /* set magic packet filter enabled */ |
| 5375 | wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0); |
| 5376 | } |
| 5377 | |
| 5378 | /** |
Tony Nguyen | ef86048 | 2021-03-02 10:15:45 -0800 | [diff] [blame] | 5379 | * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5380 | * @pf: pointer to the PF struct |
| 5381 | * |
| 5382 | * Issue firmware command to enable multicast magic wake, making |
| 5383 | * sure that any locally administered address (LAA) is used for |
| 5384 | * wake, and that PF reset doesn't undo the LAA. |
| 5385 | */ |
| 5386 | static void ice_setup_mc_magic_wake(struct ice_pf *pf) |
| 5387 | { |
| 5388 | struct device *dev = ice_pf_to_dev(pf); |
| 5389 | struct ice_hw *hw = &pf->hw; |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5390 | u8 mac_addr[ETH_ALEN]; |
| 5391 | struct ice_vsi *vsi; |
Tony Nguyen | 5518ac2 | 2021-10-07 15:59:03 -0700 | [diff] [blame] | 5392 | int status; |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5393 | u8 flags; |
| 5394 | |
| 5395 | if (!pf->wol_ena) |
| 5396 | return; |
| 5397 | |
| 5398 | vsi = ice_get_main_vsi(pf); |
| 5399 | if (!vsi) |
| 5400 | return; |
| 5401 | |
| 5402 | /* Get current MAC address in case it's an LAA */ |
| 5403 | if (vsi->netdev) |
| 5404 | ether_addr_copy(mac_addr, vsi->netdev->dev_addr); |
| 5405 | else |
| 5406 | ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); |
| 5407 | |
| 5408 | flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN | |
| 5409 | ICE_AQC_MAN_MAC_UPDATE_LAA_WOL | |
| 5410 | ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP; |
| 5411 | |
| 5412 | status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); |
| 5413 | if (status) |
Tony Nguyen | 5f87ec4 | 2021-10-07 15:56:02 -0700 | [diff] [blame] | 5414 | dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n", |
Tony Nguyen | 5518ac2 | 2021-10-07 15:59:03 -0700 | [diff] [blame] | 5415 | status, ice_aq_str(hw->adminq.sq_last_status)); |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5416 | } |
| 5417 | |
| 5418 | /** |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5419 | * ice_remove - Device removal routine |
| 5420 | * @pdev: PCI device information struct |
| 5421 | */ |
| 5422 | static void ice_remove(struct pci_dev *pdev) |
| 5423 | { |
| 5424 | struct ice_pf *pf = pci_get_drvdata(pdev); |
Dave Ertman | 81b2358 | 2018-09-19 17:43:07 -0700 | [diff] [blame] | 5425 | int i; |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5426 | |
Anirudh Venkataramanan | afd9d4a | 2018-10-26 10:40:51 -0700 | [diff] [blame] | 5427 | for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { |
| 5428 | if (!ice_is_reset_in_progress(pf->state)) |
| 5429 | break; |
| 5430 | msleep(100); |
| 5431 | } |
| 5432 | |
Brett Creeley | f844d52 | 2020-02-27 10:14:55 -0800 | [diff] [blame] | 5433 | if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 5434 | set_bit(ICE_VF_RESETS_DISABLED, pf->state); |
Brett Creeley | f844d52 | 2020-02-27 10:14:55 -0800 | [diff] [blame] | 5435 | ice_free_vfs(pf); |
| 5436 | } |
| 5437 | |
Konrad Knitter | 4da71a7 | 2023-12-01 10:08:39 -0800 | [diff] [blame] | 5438 | ice_hwmon_exit(pf); |
| 5439 | |
Akeem G Abodunrin | 8d81fa5 | 2018-08-09 06:29:57 -0700 | [diff] [blame] | 5440 | ice_service_task_stop(pf); |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 5441 | ice_aq_cancel_waiting_tasks(pf); |
Dave Ertman | f9f5301 | 2021-05-20 09:37:51 -0500 | [diff] [blame] | 5442 | set_bit(ICE_DOWN, pf->state); |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 5443 | |
Brett Creeley | 28bf267 | 2020-05-11 18:01:46 -0700 | [diff] [blame] | 5444 | if (!ice_is_safe_mode(pf)) |
| 5445 | ice_remove_arfs(pf); |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 5446 | |
Michal Swiatkowski | 118c6bd | 2024-03-25 22:34:33 +0100 | [diff] [blame] | 5447 | devl_lock(priv_to_devlink(pf)); |
Wojciech Drewek | 41cc4e5 | 2024-02-05 14:03:56 +0100 | [diff] [blame] | 5448 | ice_deinit_devlink(pf); |
| 5449 | |
Wojciech Drewek | 41cc4e5 | 2024-02-05 14:03:56 +0100 | [diff] [blame] | 5450 | ice_unload(pf); |
| 5451 | devl_unlock(priv_to_devlink(pf)); |
| 5452 | |
| 5453 | ice_deinit(pf); |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 5454 | ice_vsi_release_all(pf); |
Michal Swiatkowski | 5b246e5 | 2022-12-21 12:38:18 +0100 | [diff] [blame] | 5455 | |
| 5456 | ice_setup_mc_magic_wake(pf); |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5457 | ice_set_wake(pf); |
Jacob Keller | 1adf7ea | 2020-03-11 18:58:15 -0700 | [diff] [blame] | 5458 | |
Michal Schmidt | 0e2bddf | 2024-03-26 00:20:37 +0100 | [diff] [blame] | 5459 | ice_adapter_put(pdev); |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5460 | pci_disable_device(pdev); |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5461 | } |
| 5462 | |
Brett Creeley | 5995b6d | 2019-02-13 10:51:15 -0800 | [diff] [blame] | 5463 | /** |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5464 | * ice_shutdown - PCI callback for shutting down device |
| 5465 | * @pdev: PCI device information struct |
| 5466 | */ |
| 5467 | static void ice_shutdown(struct pci_dev *pdev) |
| 5468 | { |
| 5469 | struct ice_pf *pf = pci_get_drvdata(pdev); |
| 5470 | |
| 5471 | ice_remove(pdev); |
| 5472 | |
| 5473 | if (system_state == SYSTEM_POWER_OFF) { |
| 5474 | pci_wake_from_d3(pdev, pf->wol_ena); |
| 5475 | pci_set_power_state(pdev, PCI_D3hot); |
| 5476 | } |
| 5477 | } |
| 5478 | |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5479 | /** |
| 5480 | * ice_prepare_for_shutdown - prep for PCI shutdown |
| 5481 | * @pf: board private structure |
| 5482 | * |
| 5483 | * Inform or close all dependent features in prep for PCI device shutdown |
| 5484 | */ |
| 5485 | static void ice_prepare_for_shutdown(struct ice_pf *pf) |
| 5486 | { |
| 5487 | struct ice_hw *hw = &pf->hw; |
| 5488 | u32 v; |
| 5489 | |
| 5490 | /* Notify VFs of impending reset */ |
| 5491 | if (ice_check_sq_alive(hw, &hw->mailboxq)) |
| 5492 | ice_vc_notify_reset(pf); |
| 5493 | |
| 5494 | dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); |
| 5495 | |
| 5496 | /* disable the VSIs and their queues that are not already DOWN */ |
| 5497 | ice_pf_dis_all_vsi(pf, false); |
| 5498 | |
| 5499 | ice_for_each_vsi(pf, v) |
| 5500 | if (pf->vsi[v]) |
| 5501 | pf->vsi[v]->vsi_num = 0; |
| 5502 | |
Piotr Gardocki | fdd288e | 2024-06-14 12:38:11 +0200 | [diff] [blame] | 5503 | ice_shutdown_all_ctrlq(hw, true); |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5504 | } |
| 5505 | |
| 5506 | /** |
| 5507 | * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme |
| 5508 | * @pf: board private structure to reinitialize |
| 5509 | * |
| 5510 | * This routine reinitialize interrupt scheme that was cleared during |
| 5511 | * power management suspend callback. |
| 5512 | * |
| 5513 | * This should be called during resume routine to re-allocate the q_vectors |
| 5514 | * and reacquire interrupts. |
| 5515 | */ |
| 5516 | static int ice_reinit_interrupt_scheme(struct ice_pf *pf) |
| 5517 | { |
| 5518 | struct device *dev = ice_pf_to_dev(pf); |
| 5519 | int ret, v; |
| 5520 | |
| 5521 | /* Since we clear MSIX flag during suspend, we need to |
| 5522 | * set it back during resume... |
| 5523 | */ |
| 5524 | |
| 5525 | ret = ice_init_interrupt_scheme(pf); |
| 5526 | if (ret) { |
| 5527 | dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); |
| 5528 | return ret; |
| 5529 | } |
| 5530 | |
| 5531 | /* Remap vectors and rings, after successful re-init interrupts */ |
| 5532 | ice_for_each_vsi(pf, v) { |
| 5533 | if (!pf->vsi[v]) |
| 5534 | continue; |
| 5535 | |
| 5536 | ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); |
| 5537 | if (ret) |
| 5538 | goto err_reinit; |
| 5539 | ice_vsi_map_rings_to_vectors(pf->vsi[v]); |
Amritha Nambiar | 080b0c8 | 2024-02-13 11:48:50 -0800 | [diff] [blame] | 5540 | ice_vsi_set_napi_queues(pf->vsi[v]); |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5541 | } |
| 5542 | |
| 5543 | ret = ice_req_irq_msix_misc(pf); |
| 5544 | if (ret) { |
| 5545 | dev_err(dev, "Setting up misc vector failed after device suspend %d\n", |
| 5546 | ret); |
| 5547 | goto err_reinit; |
| 5548 | } |
| 5549 | |
| 5550 | return 0; |
| 5551 | |
| 5552 | err_reinit: |
| 5553 | while (v--) |
| 5554 | if (pf->vsi[v]) |
| 5555 | ice_vsi_free_q_vectors(pf->vsi[v]); |
| 5556 | |
| 5557 | return ret; |
| 5558 | } |
| 5559 | |
| 5560 | /** |
| 5561 | * ice_suspend |
| 5562 | * @dev: generic device information structure |
| 5563 | * |
| 5564 | * Power Management callback to quiesce the device and prepare |
| 5565 | * for D3 transition. |
| 5566 | */ |
Jesse Brandeburg | 75a3f93 | 2024-03-05 18:50:22 -0800 | [diff] [blame] | 5567 | static int ice_suspend(struct device *dev) |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5568 | { |
| 5569 | struct pci_dev *pdev = to_pci_dev(dev); |
| 5570 | struct ice_pf *pf; |
| 5571 | int disabled, v; |
| 5572 | |
| 5573 | pf = pci_get_drvdata(pdev); |
| 5574 | |
| 5575 | if (!ice_pf_state_is_nominal(pf)) { |
| 5576 | dev_err(dev, "Device is not ready, no need to suspend it\n"); |
| 5577 | return -EBUSY; |
| 5578 | } |
| 5579 | |
| 5580 | /* Stop watchdog tasks until resume completion. |
| 5581 | * Even though it is most likely that the service task is |
| 5582 | * disabled if the device is suspended or down, the service task's |
| 5583 | * state is controlled by a different state bit, and we should |
| 5584 | * store and honor whatever state that bit is in at this point. |
| 5585 | */ |
| 5586 | disabled = ice_service_task_stop(pf); |
| 5587 | |
En-Wei Wu | bc69ad7 | 2024-05-30 22:21:31 +0800 | [diff] [blame] | 5588 | ice_deinit_rdma(pf); |
Dave Ertman | f9f5301 | 2021-05-20 09:37:51 -0500 | [diff] [blame] | 5589 | |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5590 | /* Already suspended?, then there is nothing to do */ |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 5591 | if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5592 | if (!disabled) |
| 5593 | ice_service_task_restart(pf); |
| 5594 | return 0; |
| 5595 | } |
| 5596 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 5597 | if (test_bit(ICE_DOWN, pf->state) || |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5598 | ice_is_reset_in_progress(pf->state)) { |
| 5599 | dev_err(dev, "can't suspend device in reset or already down\n"); |
| 5600 | if (!disabled) |
| 5601 | ice_service_task_restart(pf); |
| 5602 | return 0; |
| 5603 | } |
| 5604 | |
| 5605 | ice_setup_mc_magic_wake(pf); |
| 5606 | |
| 5607 | ice_prepare_for_shutdown(pf); |
| 5608 | |
| 5609 | ice_set_wake(pf); |
| 5610 | |
| 5611 | /* Free vectors, clear the interrupt scheme and release IRQs |
| 5612 | * for proper hibernation, especially with large number of CPUs. |
| 5613 | * Otherwise hibernation might fail when mapping all the vectors back |
| 5614 | * to CPU0. |
| 5615 | */ |
| 5616 | ice_free_irq_msix_misc(pf); |
| 5617 | ice_for_each_vsi(pf, v) { |
| 5618 | if (!pf->vsi[v]) |
| 5619 | continue; |
| 5620 | ice_vsi_free_q_vectors(pf->vsi[v]); |
| 5621 | } |
| 5622 | ice_clear_interrupt_scheme(pf); |
| 5623 | |
Anirudh Venkataramanan | 466e439 | 2020-09-02 08:53:45 -0700 | [diff] [blame] | 5624 | pci_save_state(pdev); |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5625 | pci_wake_from_d3(pdev, pf->wol_ena); |
| 5626 | pci_set_power_state(pdev, PCI_D3hot); |
| 5627 | return 0; |
| 5628 | } |
| 5629 | |
| 5630 | /** |
| 5631 | * ice_resume - PM callback for waking up from D3 |
| 5632 | * @dev: generic device information structure |
| 5633 | */ |
Jesse Brandeburg | 75a3f93 | 2024-03-05 18:50:22 -0800 | [diff] [blame] | 5634 | static int ice_resume(struct device *dev) |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5635 | { |
| 5636 | struct pci_dev *pdev = to_pci_dev(dev); |
| 5637 | enum ice_reset_req reset_type; |
| 5638 | struct ice_pf *pf; |
| 5639 | struct ice_hw *hw; |
| 5640 | int ret; |
| 5641 | |
| 5642 | pci_set_power_state(pdev, PCI_D0); |
| 5643 | pci_restore_state(pdev); |
| 5644 | pci_save_state(pdev); |
| 5645 | |
| 5646 | if (!pci_device_is_present(pdev)) |
| 5647 | return -ENODEV; |
| 5648 | |
| 5649 | ret = pci_enable_device_mem(pdev); |
| 5650 | if (ret) { |
| 5651 | dev_err(dev, "Cannot enable device after suspend\n"); |
| 5652 | return ret; |
| 5653 | } |
| 5654 | |
| 5655 | pf = pci_get_drvdata(pdev); |
| 5656 | hw = &pf->hw; |
| 5657 | |
| 5658 | pf->wakeup_reason = rd32(hw, PFPM_WUS); |
| 5659 | ice_print_wake_reason(pf); |
| 5660 | |
| 5661 | /* We cleared the interrupt scheme when we suspended, so we need to |
| 5662 | * restore it now to resume device functionality. |
| 5663 | */ |
| 5664 | ret = ice_reinit_interrupt_scheme(pf); |
| 5665 | if (ret) |
| 5666 | dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); |
| 5667 | |
En-Wei Wu | bc69ad7 | 2024-05-30 22:21:31 +0800 | [diff] [blame] | 5668 | ret = ice_init_rdma(pf); |
| 5669 | if (ret) |
| 5670 | dev_err(dev, "Reinitialize RDMA during resume failed: %d\n", |
| 5671 | ret); |
| 5672 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 5673 | clear_bit(ICE_DOWN, pf->state); |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5674 | /* Now perform PF reset and rebuild */ |
| 5675 | reset_type = ICE_RESET_PFR; |
| 5676 | /* re-enable service task for reset, but allow reset to schedule it */ |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 5677 | clear_bit(ICE_SERVICE_DIS, pf->state); |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5678 | |
| 5679 | if (ice_schedule_reset(pf, reset_type)) |
| 5680 | dev_err(dev, "Reset during resume failed.\n"); |
| 5681 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 5682 | clear_bit(ICE_SUSPENDED, pf->state); |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5683 | ice_service_task_restart(pf); |
| 5684 | |
| 5685 | /* Restart the service task */ |
| 5686 | mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); |
| 5687 | |
| 5688 | return 0; |
| 5689 | } |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5690 | |
| 5691 | /** |
Brett Creeley | 5995b6d | 2019-02-13 10:51:15 -0800 | [diff] [blame] | 5692 | * ice_pci_err_detected - warning that PCI error has been detected |
| 5693 | * @pdev: PCI device information struct |
| 5694 | * @err: the type of PCI error |
| 5695 | * |
| 5696 | * Called to warn that something happened on the PCI bus and the error handling |
| 5697 | * is in progress. Allows the driver to gracefully prepare/handle PCI errors. |
| 5698 | */ |
| 5699 | static pci_ers_result_t |
Luc Van Oostenryck | 16d79cd | 2020-07-02 18:26:49 +0200 | [diff] [blame] | 5700 | ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err) |
Brett Creeley | 5995b6d | 2019-02-13 10:51:15 -0800 | [diff] [blame] | 5701 | { |
| 5702 | struct ice_pf *pf = pci_get_drvdata(pdev); |
| 5703 | |
| 5704 | if (!pf) { |
| 5705 | dev_err(&pdev->dev, "%s: unrecoverable device error %d\n", |
| 5706 | __func__, err); |
| 5707 | return PCI_ERS_RESULT_DISCONNECT; |
| 5708 | } |
| 5709 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 5710 | if (!test_bit(ICE_SUSPENDED, pf->state)) { |
Brett Creeley | 5995b6d | 2019-02-13 10:51:15 -0800 | [diff] [blame] | 5711 | ice_service_task_stop(pf); |
| 5712 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 5713 | if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { |
| 5714 | set_bit(ICE_PFR_REQ, pf->state); |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 5715 | ice_prepare_for_reset(pf, ICE_RESET_PFR); |
Brett Creeley | 5995b6d | 2019-02-13 10:51:15 -0800 | [diff] [blame] | 5716 | } |
| 5717 | } |
| 5718 | |
| 5719 | return PCI_ERS_RESULT_NEED_RESET; |
| 5720 | } |
| 5721 | |
| 5722 | /** |
| 5723 | * ice_pci_err_slot_reset - a PCI slot reset has just happened |
| 5724 | * @pdev: PCI device information struct |
| 5725 | * |
| 5726 | * Called to determine if the driver can recover from the PCI slot reset by |
| 5727 | * using a register read to determine if the device is recoverable. |
| 5728 | */ |
| 5729 | static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) |
| 5730 | { |
| 5731 | struct ice_pf *pf = pci_get_drvdata(pdev); |
| 5732 | pci_ers_result_t result; |
| 5733 | int err; |
| 5734 | u32 reg; |
| 5735 | |
| 5736 | err = pci_enable_device_mem(pdev); |
| 5737 | if (err) { |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 5738 | dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n", |
Brett Creeley | 5995b6d | 2019-02-13 10:51:15 -0800 | [diff] [blame] | 5739 | err); |
| 5740 | result = PCI_ERS_RESULT_DISCONNECT; |
| 5741 | } else { |
| 5742 | pci_set_master(pdev); |
| 5743 | pci_restore_state(pdev); |
| 5744 | pci_save_state(pdev); |
| 5745 | pci_wake_from_d3(pdev, false); |
| 5746 | |
| 5747 | /* Check for life */ |
| 5748 | reg = rd32(&pf->hw, GLGEN_RTRIG); |
| 5749 | if (!reg) |
| 5750 | result = PCI_ERS_RESULT_RECOVERED; |
| 5751 | else |
| 5752 | result = PCI_ERS_RESULT_DISCONNECT; |
| 5753 | } |
| 5754 | |
Brett Creeley | 5995b6d | 2019-02-13 10:51:15 -0800 | [diff] [blame] | 5755 | return result; |
| 5756 | } |
| 5757 | |
| 5758 | /** |
| 5759 | * ice_pci_err_resume - restart operations after PCI error recovery |
| 5760 | * @pdev: PCI device information struct |
| 5761 | * |
| 5762 | * Called to allow the driver to bring things back up after PCI error and/or |
| 5763 | * reset recovery have finished |
| 5764 | */ |
| 5765 | static void ice_pci_err_resume(struct pci_dev *pdev) |
| 5766 | { |
| 5767 | struct ice_pf *pf = pci_get_drvdata(pdev); |
| 5768 | |
| 5769 | if (!pf) { |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 5770 | dev_err(&pdev->dev, "%s failed, device is unrecoverable\n", |
| 5771 | __func__); |
Brett Creeley | 5995b6d | 2019-02-13 10:51:15 -0800 | [diff] [blame] | 5772 | return; |
| 5773 | } |
| 5774 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 5775 | if (test_bit(ICE_SUSPENDED, pf->state)) { |
Brett Creeley | 5995b6d | 2019-02-13 10:51:15 -0800 | [diff] [blame] | 5776 | dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", |
| 5777 | __func__); |
| 5778 | return; |
| 5779 | } |
| 5780 | |
Przemek Kitszel | 31642d2 | 2023-10-19 10:32:19 -0700 | [diff] [blame] | 5781 | ice_restore_all_vfs_msi_state(pf); |
Nick Nunley | a54a0b2 | 2020-07-13 13:53:07 -0700 | [diff] [blame] | 5782 | |
Brett Creeley | 5995b6d | 2019-02-13 10:51:15 -0800 | [diff] [blame] | 5783 | ice_do_reset(pf, ICE_RESET_PFR); |
| 5784 | ice_service_task_restart(pf); |
| 5785 | mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); |
| 5786 | } |
| 5787 | |
| 5788 | /** |
| 5789 | * ice_pci_err_reset_prepare - prepare device driver for PCI reset |
| 5790 | * @pdev: PCI device information struct |
| 5791 | */ |
| 5792 | static void ice_pci_err_reset_prepare(struct pci_dev *pdev) |
| 5793 | { |
| 5794 | struct ice_pf *pf = pci_get_drvdata(pdev); |
| 5795 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 5796 | if (!test_bit(ICE_SUSPENDED, pf->state)) { |
Brett Creeley | 5995b6d | 2019-02-13 10:51:15 -0800 | [diff] [blame] | 5797 | ice_service_task_stop(pf); |
| 5798 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 5799 | if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { |
| 5800 | set_bit(ICE_PFR_REQ, pf->state); |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 5801 | ice_prepare_for_reset(pf, ICE_RESET_PFR); |
Brett Creeley | 5995b6d | 2019-02-13 10:51:15 -0800 | [diff] [blame] | 5802 | } |
| 5803 | } |
| 5804 | } |
| 5805 | |
| 5806 | /** |
| 5807 | * ice_pci_err_reset_done - PCI reset done, device driver reset can begin |
| 5808 | * @pdev: PCI device information struct |
| 5809 | */ |
| 5810 | static void ice_pci_err_reset_done(struct pci_dev *pdev) |
| 5811 | { |
| 5812 | ice_pci_err_resume(pdev); |
| 5813 | } |
| 5814 | |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5815 | /* ice_pci_tbl - PCI Device ID Table |
| 5816 | * |
| 5817 | * Wildcard entries (PCI_ANY_ID) should come last |
| 5818 | * Last entry must be all 0s |
| 5819 | * |
| 5820 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, |
| 5821 | * Class, Class Mask, private data (not used) } |
| 5822 | */ |
| 5823 | static const struct pci_device_id ice_pci_tbl[] = { |
Pawel Chmielewski | f8ab08c | 2023-10-25 14:41:56 -0700 | [diff] [blame] | 5824 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE) }, |
| 5825 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP) }, |
| 5826 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP) }, |
| 5827 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE) }, |
| 5828 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP) }, |
| 5829 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP) }, |
| 5830 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE) }, |
| 5831 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP) }, |
| 5832 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP) }, |
| 5833 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T) }, |
| 5834 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII) }, |
| 5835 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE) }, |
| 5836 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP) }, |
| 5837 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP) }, |
| 5838 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T) }, |
| 5839 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII) }, |
| 5840 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE) }, |
| 5841 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP) }, |
| 5842 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T) }, |
| 5843 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII) }, |
| 5844 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE) }, |
| 5845 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP) }, |
| 5846 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T) }, |
| 5847 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) }, |
| 5848 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) }, |
| 5849 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) }, |
Grzegorz Nitka | f64e189 | 2023-12-06 20:29:17 +0100 | [diff] [blame] | 5850 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_BACKPLANE), }, |
| 5851 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), }, |
| 5852 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), }, |
| 5853 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), }, |
Paul Greenwalt | a8e682f | 2024-03-28 21:07:08 -0400 | [diff] [blame] | 5854 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_BACKPLANE) }, |
| 5855 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_QSFP56) }, |
| 5856 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP) }, |
| 5857 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP_DD) }, |
Paul Greenwalt | 4fd1040 | 2024-03-28 21:07:07 -0400 | [diff] [blame] | 5858 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_BACKPLANE), }, |
| 5859 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_BACKPLANE), }, |
| 5860 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_QSFP), }, |
| 5861 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), }, |
| 5862 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), }, |
| 5863 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), }, |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5864 | /* required last entry */ |
Pawel Chmielewski | f8ab08c | 2023-10-25 14:41:56 -0700 | [diff] [blame] | 5865 | {} |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5866 | }; |
| 5867 | MODULE_DEVICE_TABLE(pci, ice_pci_tbl); |
| 5868 | |
Jesse Brandeburg | 75a3f93 | 2024-03-05 18:50:22 -0800 | [diff] [blame] | 5869 | static DEFINE_SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5870 | |
Brett Creeley | 5995b6d | 2019-02-13 10:51:15 -0800 | [diff] [blame] | 5871 | static const struct pci_error_handlers ice_pci_err_handler = { |
| 5872 | .error_detected = ice_pci_err_detected, |
| 5873 | .slot_reset = ice_pci_err_slot_reset, |
| 5874 | .reset_prepare = ice_pci_err_reset_prepare, |
| 5875 | .reset_done = ice_pci_err_reset_done, |
| 5876 | .resume = ice_pci_err_resume |
| 5877 | }; |
| 5878 | |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5879 | static struct pci_driver ice_driver = { |
| 5880 | .name = KBUILD_MODNAME, |
| 5881 | .id_table = ice_pci_tbl, |
| 5882 | .probe = ice_probe, |
| 5883 | .remove = ice_remove, |
Jesse Brandeburg | 75a3f93 | 2024-03-05 18:50:22 -0800 | [diff] [blame] | 5884 | .driver.pm = pm_sleep_ptr(&ice_pm_ops), |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 5885 | .shutdown = ice_shutdown, |
Anirudh Venkataramanan | ddf30f7 | 2018-09-19 17:42:55 -0700 | [diff] [blame] | 5886 | .sriov_configure = ice_sriov_configure, |
Michal Swiatkowski | 05c16687 | 2023-10-19 10:32:22 -0700 | [diff] [blame] | 5887 | .sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix, |
| 5888 | .sriov_set_msix_vec_count = ice_sriov_set_msix_vec_count, |
Brett Creeley | 5995b6d | 2019-02-13 10:51:15 -0800 | [diff] [blame] | 5889 | .err_handler = &ice_pci_err_handler |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5890 | }; |
| 5891 | |
| 5892 | /** |
| 5893 | * ice_module_init - Driver registration routine |
| 5894 | * |
| 5895 | * ice_module_init is the first routine called when the driver is |
| 5896 | * loaded. All it does is register with the PCI subsystem. |
| 5897 | */ |
| 5898 | static int __init ice_module_init(void) |
| 5899 | { |
Dave Ertman | bb52f42 | 2023-06-20 15:18:46 -0700 | [diff] [blame] | 5900 | int status = -ENOMEM; |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5901 | |
Jeff Kirsher | 34a2a3b8 | 2020-05-29 00:18:33 -0700 | [diff] [blame] | 5902 | pr_info("%s\n", ice_driver_string); |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5903 | pr_info("%s\n", ice_copyright); |
| 5904 | |
Pawel Chmielewski | 982b019 | 2023-10-15 19:43:04 -0400 | [diff] [blame] | 5905 | ice_adv_lnk_speed_maps_init(); |
| 5906 | |
Anirudh Venkataramanan | 4d159f7 | 2023-01-30 14:06:40 -0800 | [diff] [blame] | 5907 | ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 5908 | if (!ice_wq) { |
| 5909 | pr_err("Failed to create workqueue\n"); |
Dave Ertman | bb52f42 | 2023-06-20 15:18:46 -0700 | [diff] [blame] | 5910 | return status; |
| 5911 | } |
| 5912 | |
| 5913 | ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0); |
| 5914 | if (!ice_lag_wq) { |
| 5915 | pr_err("Failed to create LAG workqueue\n"); |
| 5916 | goto err_dest_wq; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 5917 | } |
| 5918 | |
Paul M Stillwell Jr | 96a9a93 | 2023-12-12 21:07:12 -0800 | [diff] [blame] | 5919 | ice_debugfs_init(); |
| 5920 | |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5921 | status = pci_register_driver(&ice_driver); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 5922 | if (status) { |
Anirudh Venkataramanan | 2f2da36 | 2019-04-16 10:35:03 -0700 | [diff] [blame] | 5923 | pr_err("failed to register PCI driver, err %d\n", status); |
Dave Ertman | bb52f42 | 2023-06-20 15:18:46 -0700 | [diff] [blame] | 5924 | goto err_dest_lag_wq; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 5925 | } |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5926 | |
Dave Ertman | bb52f42 | 2023-06-20 15:18:46 -0700 | [diff] [blame] | 5927 | return 0; |
| 5928 | |
| 5929 | err_dest_lag_wq: |
| 5930 | destroy_workqueue(ice_lag_wq); |
Paul M Stillwell Jr | 96a9a93 | 2023-12-12 21:07:12 -0800 | [diff] [blame] | 5931 | ice_debugfs_exit(); |
Dave Ertman | bb52f42 | 2023-06-20 15:18:46 -0700 | [diff] [blame] | 5932 | err_dest_wq: |
| 5933 | destroy_workqueue(ice_wq); |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5934 | return status; |
| 5935 | } |
| 5936 | module_init(ice_module_init); |
| 5937 | |
| 5938 | /** |
| 5939 | * ice_module_exit - Driver exit cleanup routine |
| 5940 | * |
| 5941 | * ice_module_exit is called just before the driver is removed |
| 5942 | * from memory. |
| 5943 | */ |
| 5944 | static void __exit ice_module_exit(void) |
| 5945 | { |
| 5946 | pci_unregister_driver(&ice_driver); |
Wojciech Drewek | 500d0df | 2024-02-05 14:03:57 +0100 | [diff] [blame] | 5947 | ice_debugfs_exit(); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 5948 | destroy_workqueue(ice_wq); |
Dave Ertman | bb52f42 | 2023-06-20 15:18:46 -0700 | [diff] [blame] | 5949 | destroy_workqueue(ice_lag_wq); |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 5950 | pr_info("module unloaded\n"); |
| 5951 | } |
| 5952 | module_exit(ice_module_exit); |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 5953 | |
| 5954 | /** |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 5955 | * ice_set_mac_address - NDO callback to set MAC address |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 5956 | * @netdev: network interface device structure |
| 5957 | * @pi: pointer to an address structure |
| 5958 | * |
| 5959 | * Returns 0 on success, negative on failure |
| 5960 | */ |
| 5961 | static int ice_set_mac_address(struct net_device *netdev, void *pi) |
| 5962 | { |
| 5963 | struct ice_netdev_priv *np = netdev_priv(netdev); |
| 5964 | struct ice_vsi *vsi = np->vsi; |
| 5965 | struct ice_pf *pf = vsi->back; |
| 5966 | struct ice_hw *hw = &pf->hw; |
| 5967 | struct sockaddr *addr = pi; |
Brett Creeley | b357d971 | 2021-08-24 12:27:53 -0700 | [diff] [blame] | 5968 | u8 old_mac[ETH_ALEN]; |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 5969 | u8 flags = 0; |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 5970 | u8 *mac; |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 5971 | int err; |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 5972 | |
| 5973 | mac = (u8 *)addr->sa_data; |
| 5974 | |
| 5975 | if (!is_valid_ether_addr(mac)) |
| 5976 | return -EADDRNOTAVAIL; |
| 5977 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 5978 | if (test_bit(ICE_DOWN, pf->state) || |
Dave Ertman | 5df7e45 | 2018-09-19 17:23:11 -0700 | [diff] [blame] | 5979 | ice_is_reset_in_progress(pf->state)) { |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 5980 | netdev_err(netdev, "can't set mac %pM. device not ready\n", |
| 5981 | mac); |
| 5982 | return -EBUSY; |
| 5983 | } |
| 5984 | |
Kiran Patil | 9fea749 | 2021-10-15 16:35:17 -0700 | [diff] [blame] | 5985 | if (ice_chnl_dmac_fltr_cnt(pf)) { |
| 5986 | netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n", |
| 5987 | mac); |
| 5988 | return -EAGAIN; |
| 5989 | } |
| 5990 | |
Brett Creeley | 3ba7f53 | 2021-08-06 09:51:27 -0700 | [diff] [blame] | 5991 | netif_addr_lock_bh(netdev); |
Brett Creeley | b357d971 | 2021-08-24 12:27:53 -0700 | [diff] [blame] | 5992 | ether_addr_copy(old_mac, netdev->dev_addr); |
| 5993 | /* change the netdev's MAC address */ |
Jakub Kicinski | a05e4c0 | 2021-10-04 09:05:21 -0700 | [diff] [blame] | 5994 | eth_hw_addr_set(netdev, mac); |
Brett Creeley | b357d971 | 2021-08-24 12:27:53 -0700 | [diff] [blame] | 5995 | netif_addr_unlock_bh(netdev); |
| 5996 | |
Lihong Yang | 757976a | 2020-05-07 17:41:09 -0700 | [diff] [blame] | 5997 | /* Clean up old MAC filter. Not an error if old filter doesn't exist */ |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 5998 | err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); |
| 5999 | if (err && err != -ENOENT) { |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 6000 | err = -EADDRNOTAVAIL; |
Akeem G Abodunrin | bbb968e | 2019-07-25 02:53:51 -0700 | [diff] [blame] | 6001 | goto err_update_filters; |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 6002 | } |
| 6003 | |
Nick Nunley | 13ed5e8 | 2020-11-20 16:38:33 -0800 | [diff] [blame] | 6004 | /* Add filter for new MAC. If filter exists, return success */ |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 6005 | err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); |
Ivan Vecera | 2c0069f | 2022-03-31 09:20:07 -0700 | [diff] [blame] | 6006 | if (err == -EEXIST) { |
Nick Nunley | 13ed5e8 | 2020-11-20 16:38:33 -0800 | [diff] [blame] | 6007 | /* Although this MAC filter is already present in hardware it's |
| 6008 | * possible in some cases (e.g. bonding) that dev_addr was |
| 6009 | * modified outside of the driver and needs to be restored back |
| 6010 | * to this value. |
| 6011 | */ |
Lihong Yang | 757976a | 2020-05-07 17:41:09 -0700 | [diff] [blame] | 6012 | netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); |
Ivan Vecera | 2c0069f | 2022-03-31 09:20:07 -0700 | [diff] [blame] | 6013 | |
| 6014 | return 0; |
| 6015 | } else if (err) { |
Brett Creeley | 3ba7f53 | 2021-08-06 09:51:27 -0700 | [diff] [blame] | 6016 | /* error if the new filter addition failed */ |
Lihong Yang | 757976a | 2020-05-07 17:41:09 -0700 | [diff] [blame] | 6017 | err = -EADDRNOTAVAIL; |
Ivan Vecera | 2c0069f | 2022-03-31 09:20:07 -0700 | [diff] [blame] | 6018 | } |
Lihong Yang | 757976a | 2020-05-07 17:41:09 -0700 | [diff] [blame] | 6019 | |
Akeem G Abodunrin | bbb968e | 2019-07-25 02:53:51 -0700 | [diff] [blame] | 6020 | err_update_filters: |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 6021 | if (err) { |
Anirudh Venkataramanan | 2f2da36 | 2019-04-16 10:35:03 -0700 | [diff] [blame] | 6022 | netdev_err(netdev, "can't set MAC %pM. filter update failed\n", |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 6023 | mac); |
Brett Creeley | b357d971 | 2021-08-24 12:27:53 -0700 | [diff] [blame] | 6024 | netif_addr_lock_bh(netdev); |
Jakub Kicinski | f3956eb | 2021-10-01 14:32:23 -0700 | [diff] [blame] | 6025 | eth_hw_addr_set(netdev, old_mac); |
Brett Creeley | 3ba7f53 | 2021-08-06 09:51:27 -0700 | [diff] [blame] | 6026 | netif_addr_unlock_bh(netdev); |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 6027 | return err; |
| 6028 | } |
| 6029 | |
Anirudh Venkataramanan | 2f2da36 | 2019-04-16 10:35:03 -0700 | [diff] [blame] | 6030 | netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 6031 | netdev->dev_addr); |
| 6032 | |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 6033 | /* write new MAC address to the firmware */ |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 6034 | flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 6035 | err = ice_aq_manage_mac_write(hw, mac, flags, NULL); |
| 6036 | if (err) { |
Tony Nguyen | 5f87ec4 | 2021-10-07 15:56:02 -0700 | [diff] [blame] | 6037 | netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n", |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 6038 | mac, err); |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 6039 | } |
| 6040 | return 0; |
| 6041 | } |
| 6042 | |
| 6043 | /** |
| 6044 | * ice_set_rx_mode - NDO callback to set the netdev filters |
| 6045 | * @netdev: network interface device structure |
| 6046 | */ |
| 6047 | static void ice_set_rx_mode(struct net_device *netdev) |
| 6048 | { |
| 6049 | struct ice_netdev_priv *np = netdev_priv(netdev); |
| 6050 | struct ice_vsi *vsi = np->vsi; |
| 6051 | |
Wojciech Drewek | 2571a3f | 2023-07-12 13:03:27 +0200 | [diff] [blame] | 6052 | if (!vsi || ice_is_switchdev_running(vsi->back)) |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 6053 | return; |
| 6054 | |
| 6055 | /* Set the flags to synchronize filters |
| 6056 | * ndo_set_rx_mode may be triggered even without a change in netdev |
| 6057 | * flags |
| 6058 | */ |
Anirudh Venkataramanan | e97fb1a | 2021-03-02 10:15:37 -0800 | [diff] [blame] | 6059 | set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); |
| 6060 | set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 6061 | set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); |
| 6062 | |
| 6063 | /* schedule our worker thread which will take care of |
| 6064 | * applying the new filter changes |
| 6065 | */ |
| 6066 | ice_service_task_schedule(vsi->back); |
| 6067 | } |
| 6068 | |
| 6069 | /** |
Usha Ketineni | 1ddef45 | 2019-11-06 02:05:28 -0800 | [diff] [blame] | 6070 | * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate |
| 6071 | * @netdev: network interface device structure |
| 6072 | * @queue_index: Queue ID |
| 6073 | * @maxrate: maximum bandwidth in Mbps |
| 6074 | */ |
| 6075 | static int |
| 6076 | ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) |
| 6077 | { |
| 6078 | struct ice_netdev_priv *np = netdev_priv(netdev); |
| 6079 | struct ice_vsi *vsi = np->vsi; |
Usha Ketineni | 1ddef45 | 2019-11-06 02:05:28 -0800 | [diff] [blame] | 6080 | u16 q_handle; |
Tony Nguyen | 5518ac2 | 2021-10-07 15:59:03 -0700 | [diff] [blame] | 6081 | int status; |
Usha Ketineni | 1ddef45 | 2019-11-06 02:05:28 -0800 | [diff] [blame] | 6082 | u8 tc; |
| 6083 | |
| 6084 | /* Validate maxrate requested is within permitted range */ |
| 6085 | if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) { |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 6086 | netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n", |
Usha Ketineni | 1ddef45 | 2019-11-06 02:05:28 -0800 | [diff] [blame] | 6087 | maxrate, queue_index); |
| 6088 | return -EINVAL; |
| 6089 | } |
| 6090 | |
| 6091 | q_handle = vsi->tx_rings[queue_index]->q_handle; |
| 6092 | tc = ice_dcb_get_tc(vsi, queue_index); |
| 6093 | |
Sridhar Samudrala | 479cdfe | 2023-06-09 17:40:24 -0700 | [diff] [blame] | 6094 | vsi = ice_locate_vsi_using_queue(vsi, queue_index); |
| 6095 | if (!vsi) { |
| 6096 | netdev_err(netdev, "Invalid VSI for given queue %d\n", |
| 6097 | queue_index); |
| 6098 | return -EINVAL; |
| 6099 | } |
| 6100 | |
Usha Ketineni | 1ddef45 | 2019-11-06 02:05:28 -0800 | [diff] [blame] | 6101 | /* Set BW back to default, when user set maxrate to 0 */ |
| 6102 | if (!maxrate) |
| 6103 | status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, |
| 6104 | q_handle, ICE_MAX_BW); |
| 6105 | else |
| 6106 | status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, |
| 6107 | q_handle, ICE_MAX_BW, maxrate * 1000); |
Tony Nguyen | c148469 | 2021-10-07 16:01:58 -0700 | [diff] [blame] | 6108 | if (status) |
Tony Nguyen | 5f87ec4 | 2021-10-07 15:56:02 -0700 | [diff] [blame] | 6109 | netdev_err(netdev, "Unable to set Tx max rate, error %d\n", |
| 6110 | status); |
Usha Ketineni | 1ddef45 | 2019-11-06 02:05:28 -0800 | [diff] [blame] | 6111 | |
Tony Nguyen | c148469 | 2021-10-07 16:01:58 -0700 | [diff] [blame] | 6112 | return status; |
Usha Ketineni | 1ddef45 | 2019-11-06 02:05:28 -0800 | [diff] [blame] | 6113 | } |
| 6114 | |
| 6115 | /** |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 6116 | * ice_fdb_add - add an entry to the hardware database |
| 6117 | * @ndm: the input from the stack |
| 6118 | * @tb: pointer to array of nladdr (unused) |
| 6119 | * @dev: the net device pointer |
| 6120 | * @addr: the MAC address entry being added |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 6121 | * @vid: VLAN ID |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 6122 | * @flags: instructions from stack about fdb operation |
Bruce Allan | 99be37e | 2019-02-08 12:50:28 -0800 | [diff] [blame] | 6123 | * @extack: netlink extended ack |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 6124 | */ |
Bruce Allan | 99be37e | 2019-02-08 12:50:28 -0800 | [diff] [blame] | 6125 | static int |
| 6126 | ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], |
| 6127 | struct net_device *dev, const unsigned char *addr, u16 vid, |
| 6128 | u16 flags, struct netlink_ext_ack __always_unused *extack) |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 6129 | { |
| 6130 | int err; |
| 6131 | |
| 6132 | if (vid) { |
| 6133 | netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n"); |
| 6134 | return -EINVAL; |
| 6135 | } |
| 6136 | if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { |
| 6137 | netdev_err(dev, "FDB only supports static addresses\n"); |
| 6138 | return -EINVAL; |
| 6139 | } |
| 6140 | |
| 6141 | if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) |
| 6142 | err = dev_uc_add_excl(dev, addr); |
| 6143 | else if (is_multicast_ether_addr(addr)) |
| 6144 | err = dev_mc_add_excl(dev, addr); |
| 6145 | else |
| 6146 | err = -EINVAL; |
| 6147 | |
| 6148 | /* Only return duplicate errors if NLM_F_EXCL is set */ |
| 6149 | if (err == -EEXIST && !(flags & NLM_F_EXCL)) |
| 6150 | err = 0; |
| 6151 | |
| 6152 | return err; |
| 6153 | } |
| 6154 | |
| 6155 | /** |
| 6156 | * ice_fdb_del - delete an entry from the hardware database |
| 6157 | * @ndm: the input from the stack |
| 6158 | * @tb: pointer to array of nladdr (unused) |
| 6159 | * @dev: the net device pointer |
| 6160 | * @addr: the MAC address entry being added |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 6161 | * @vid: VLAN ID |
Alaa Mohamed | ca4567f | 2022-05-05 17:09:57 +0200 | [diff] [blame] | 6162 | * @extack: netlink extended ack |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 6163 | */ |
Bruce Allan | c8b7abd | 2019-02-26 16:35:11 -0800 | [diff] [blame] | 6164 | static int |
| 6165 | ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], |
| 6166 | struct net_device *dev, const unsigned char *addr, |
Alaa Mohamed | ca4567f | 2022-05-05 17:09:57 +0200 | [diff] [blame] | 6167 | __always_unused u16 vid, struct netlink_ext_ack *extack) |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 6168 | { |
| 6169 | int err; |
| 6170 | |
| 6171 | if (ndm->ndm_state & NUD_PERMANENT) { |
| 6172 | netdev_err(dev, "FDB only supports static addresses\n"); |
| 6173 | return -EINVAL; |
| 6174 | } |
| 6175 | |
| 6176 | if (is_unicast_ether_addr(addr)) |
| 6177 | err = dev_uc_del(dev, addr); |
| 6178 | else if (is_multicast_ether_addr(addr)) |
| 6179 | err = dev_mc_del(dev, addr); |
| 6180 | else |
| 6181 | err = -EINVAL; |
| 6182 | |
| 6183 | return err; |
| 6184 | } |
| 6185 | |
Brett Creeley | 1babaf7 | 2021-12-02 08:38:50 -0800 | [diff] [blame] | 6186 | #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ |
| 6187 | NETIF_F_HW_VLAN_CTAG_TX | \ |
| 6188 | NETIF_F_HW_VLAN_STAG_RX | \ |
| 6189 | NETIF_F_HW_VLAN_STAG_TX) |
| 6190 | |
Anatolii Gerasymenko | affa102 | 2022-07-27 09:24:06 +0200 | [diff] [blame] | 6191 | #define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ |
| 6192 | NETIF_F_HW_VLAN_STAG_RX) |
| 6193 | |
Brett Creeley | 1babaf7 | 2021-12-02 08:38:50 -0800 | [diff] [blame] | 6194 | #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ |
| 6195 | NETIF_F_HW_VLAN_STAG_FILTER) |
| 6196 | |
| 6197 | /** |
| 6198 | * ice_fix_features - fix the netdev features flags based on device limitations |
| 6199 | * @netdev: ptr to the netdev that flags are being fixed on |
| 6200 | * @features: features that need to be checked and possibly fixed |
| 6201 | * |
| 6202 | * Make sure any fixups are made to features in this callback. This enables the |
| 6203 | * driver to not have to check unsupported configurations throughout the driver |
| 6204 | * because that's the responsiblity of this callback. |
| 6205 | * |
| 6206 | * Single VLAN Mode (SVM) Supported Features: |
| 6207 | * NETIF_F_HW_VLAN_CTAG_FILTER |
| 6208 | * NETIF_F_HW_VLAN_CTAG_RX |
| 6209 | * NETIF_F_HW_VLAN_CTAG_TX |
| 6210 | * |
| 6211 | * Double VLAN Mode (DVM) Supported Features: |
| 6212 | * NETIF_F_HW_VLAN_CTAG_FILTER |
| 6213 | * NETIF_F_HW_VLAN_CTAG_RX |
| 6214 | * NETIF_F_HW_VLAN_CTAG_TX |
| 6215 | * |
| 6216 | * NETIF_F_HW_VLAN_STAG_FILTER |
| 6217 | * NETIF_HW_VLAN_STAG_RX |
| 6218 | * NETIF_HW_VLAN_STAG_TX |
| 6219 | * |
| 6220 | * Features that need fixing: |
| 6221 | * Cannot simultaneously enable CTAG and STAG stripping and/or insertion. |
| 6222 | * These are mutually exlusive as the VSI context cannot support multiple |
| 6223 | * VLAN ethertypes simultaneously for stripping and/or insertion. If this |
| 6224 | * is not done, then default to clearing the requested STAG offload |
| 6225 | * settings. |
| 6226 | * |
| 6227 | * All supported filtering has to be enabled or disabled together. For |
| 6228 | * example, in DVM, CTAG and STAG filtering have to be enabled and disabled |
| 6229 | * together. If this is not done, then default to VLAN filtering disabled. |
| 6230 | * These are mutually exclusive as there is currently no way to |
| 6231 | * enable/disable VLAN filtering based on VLAN ethertype when using VLAN |
| 6232 | * prune rules. |
| 6233 | */ |
| 6234 | static netdev_features_t |
| 6235 | ice_fix_features(struct net_device *netdev, netdev_features_t features) |
| 6236 | { |
| 6237 | struct ice_netdev_priv *np = netdev_priv(netdev); |
Roman Storozhenko | 9542ef4 | 2022-06-07 08:54:57 +0200 | [diff] [blame] | 6238 | netdev_features_t req_vlan_fltr, cur_vlan_fltr; |
| 6239 | bool cur_ctag, cur_stag, req_ctag, req_stag; |
Brett Creeley | 1babaf7 | 2021-12-02 08:38:50 -0800 | [diff] [blame] | 6240 | |
Roman Storozhenko | 9542ef4 | 2022-06-07 08:54:57 +0200 | [diff] [blame] | 6241 | cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES; |
| 6242 | cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; |
| 6243 | cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; |
Brett Creeley | 1babaf7 | 2021-12-02 08:38:50 -0800 | [diff] [blame] | 6244 | |
Roman Storozhenko | 9542ef4 | 2022-06-07 08:54:57 +0200 | [diff] [blame] | 6245 | req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES; |
| 6246 | req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; |
| 6247 | req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; |
Brett Creeley | 1babaf7 | 2021-12-02 08:38:50 -0800 | [diff] [blame] | 6248 | |
Roman Storozhenko | 9542ef4 | 2022-06-07 08:54:57 +0200 | [diff] [blame] | 6249 | if (req_vlan_fltr != cur_vlan_fltr) { |
| 6250 | if (ice_is_dvm_ena(&np->vsi->back->hw)) { |
| 6251 | if (req_ctag && req_stag) { |
| 6252 | features |= NETIF_VLAN_FILTERING_FEATURES; |
| 6253 | } else if (!req_ctag && !req_stag) { |
| 6254 | features &= ~NETIF_VLAN_FILTERING_FEATURES; |
| 6255 | } else if ((!cur_ctag && req_ctag && !cur_stag) || |
| 6256 | (!cur_stag && req_stag && !cur_ctag)) { |
| 6257 | features |= NETIF_VLAN_FILTERING_FEATURES; |
| 6258 | netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n"); |
| 6259 | } else if ((cur_ctag && !req_ctag && cur_stag) || |
| 6260 | (cur_stag && !req_stag && cur_ctag)) { |
| 6261 | features &= ~NETIF_VLAN_FILTERING_FEATURES; |
| 6262 | netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n"); |
| 6263 | } |
Brett Creeley | 1babaf7 | 2021-12-02 08:38:50 -0800 | [diff] [blame] | 6264 | } else { |
Roman Storozhenko | 9542ef4 | 2022-06-07 08:54:57 +0200 | [diff] [blame] | 6265 | if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER) |
| 6266 | netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n"); |
| 6267 | |
| 6268 | if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER) |
| 6269 | features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
Brett Creeley | 1babaf7 | 2021-12-02 08:38:50 -0800 | [diff] [blame] | 6270 | } |
| 6271 | } |
| 6272 | |
| 6273 | if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) && |
| 6274 | (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) { |
| 6275 | netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n"); |
| 6276 | features &= ~(NETIF_F_HW_VLAN_STAG_RX | |
| 6277 | NETIF_F_HW_VLAN_STAG_TX); |
| 6278 | } |
| 6279 | |
Anatolii Gerasymenko | affa102 | 2022-07-27 09:24:06 +0200 | [diff] [blame] | 6280 | if (!(netdev->features & NETIF_F_RXFCS) && |
| 6281 | (features & NETIF_F_RXFCS) && |
| 6282 | (features & NETIF_VLAN_STRIPPING_FEATURES) && |
| 6283 | !ice_vsi_has_non_zero_vlans(np->vsi)) { |
| 6284 | netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n"); |
| 6285 | features &= ~NETIF_VLAN_STRIPPING_FEATURES; |
| 6286 | } |
| 6287 | |
Brett Creeley | 1babaf7 | 2021-12-02 08:38:50 -0800 | [diff] [blame] | 6288 | return features; |
| 6289 | } |
| 6290 | |
| 6291 | /** |
Larysa Zaremba | 714ed94 | 2023-12-05 22:08:39 +0100 | [diff] [blame] | 6292 | * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto |
| 6293 | * @vsi: PF's VSI |
| 6294 | * @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order |
| 6295 | * |
| 6296 | * Store current stripped VLAN proto in ring packet context, |
| 6297 | * so it can be accessed more efficiently by packet processing code. |
| 6298 | */ |
| 6299 | static void |
| 6300 | ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype) |
| 6301 | { |
| 6302 | u16 i; |
| 6303 | |
| 6304 | ice_for_each_alloc_rxq(vsi, i) |
| 6305 | vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype; |
| 6306 | } |
| 6307 | |
| 6308 | /** |
Brett Creeley | 1babaf7 | 2021-12-02 08:38:50 -0800 | [diff] [blame] | 6309 | * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI |
| 6310 | * @vsi: PF's VSI |
| 6311 | * @features: features used to determine VLAN offload settings |
| 6312 | * |
| 6313 | * First, determine the vlan_ethertype based on the VLAN offload bits in |
| 6314 | * features. Then determine if stripping and insertion should be enabled or |
| 6315 | * disabled. Finally enable or disable VLAN stripping and insertion. |
| 6316 | */ |
| 6317 | static int |
| 6318 | ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features) |
| 6319 | { |
| 6320 | bool enable_stripping = true, enable_insertion = true; |
| 6321 | struct ice_vsi_vlan_ops *vlan_ops; |
| 6322 | int strip_err = 0, insert_err = 0; |
| 6323 | u16 vlan_ethertype = 0; |
| 6324 | |
| 6325 | vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); |
| 6326 | |
| 6327 | if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) |
| 6328 | vlan_ethertype = ETH_P_8021AD; |
| 6329 | else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) |
| 6330 | vlan_ethertype = ETH_P_8021Q; |
| 6331 | |
| 6332 | if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX))) |
| 6333 | enable_stripping = false; |
| 6334 | if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX))) |
| 6335 | enable_insertion = false; |
| 6336 | |
| 6337 | if (enable_stripping) |
| 6338 | strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype); |
| 6339 | else |
| 6340 | strip_err = vlan_ops->dis_stripping(vsi); |
| 6341 | |
| 6342 | if (enable_insertion) |
| 6343 | insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype); |
| 6344 | else |
| 6345 | insert_err = vlan_ops->dis_insertion(vsi); |
| 6346 | |
| 6347 | if (strip_err || insert_err) |
| 6348 | return -EIO; |
| 6349 | |
Larysa Zaremba | 714ed94 | 2023-12-05 22:08:39 +0100 | [diff] [blame] | 6350 | ice_set_rx_rings_vlan_proto(vsi, enable_stripping ? |
| 6351 | htons(vlan_ethertype) : 0); |
| 6352 | |
Brett Creeley | 1babaf7 | 2021-12-02 08:38:50 -0800 | [diff] [blame] | 6353 | return 0; |
| 6354 | } |
| 6355 | |
| 6356 | /** |
| 6357 | * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI |
| 6358 | * @vsi: PF's VSI |
| 6359 | * @features: features used to determine VLAN filtering settings |
| 6360 | * |
| 6361 | * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the |
| 6362 | * features. |
| 6363 | */ |
| 6364 | static int |
| 6365 | ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features) |
| 6366 | { |
| 6367 | struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); |
| 6368 | int err = 0; |
| 6369 | |
| 6370 | /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking |
| 6371 | * if either bit is set |
| 6372 | */ |
| 6373 | if (features & |
| 6374 | (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) |
| 6375 | err = vlan_ops->ena_rx_filtering(vsi); |
| 6376 | else |
| 6377 | err = vlan_ops->dis_rx_filtering(vsi); |
| 6378 | |
| 6379 | return err; |
| 6380 | } |
| 6381 | |
| 6382 | /** |
| 6383 | * ice_set_vlan_features - set VLAN settings based on suggested feature set |
| 6384 | * @netdev: ptr to the netdev being adjusted |
| 6385 | * @features: the feature set that the stack is suggesting |
| 6386 | * |
| 6387 | * Only update VLAN settings if the requested_vlan_features are different than |
| 6388 | * the current_vlan_features. |
| 6389 | */ |
| 6390 | static int |
| 6391 | ice_set_vlan_features(struct net_device *netdev, netdev_features_t features) |
| 6392 | { |
| 6393 | netdev_features_t current_vlan_features, requested_vlan_features; |
| 6394 | struct ice_netdev_priv *np = netdev_priv(netdev); |
| 6395 | struct ice_vsi *vsi = np->vsi; |
| 6396 | int err; |
| 6397 | |
| 6398 | current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES; |
| 6399 | requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES; |
| 6400 | if (current_vlan_features ^ requested_vlan_features) { |
Anatolii Gerasymenko | affa102 | 2022-07-27 09:24:06 +0200 | [diff] [blame] | 6401 | if ((features & NETIF_F_RXFCS) && |
| 6402 | (features & NETIF_VLAN_STRIPPING_FEATURES)) { |
| 6403 | dev_err(ice_pf_to_dev(vsi->back), |
| 6404 | "To enable VLAN stripping, you must first enable FCS/CRC stripping\n"); |
| 6405 | return -EIO; |
| 6406 | } |
| 6407 | |
Brett Creeley | 1babaf7 | 2021-12-02 08:38:50 -0800 | [diff] [blame] | 6408 | err = ice_set_vlan_offload_features(vsi, features); |
| 6409 | if (err) |
| 6410 | return err; |
| 6411 | } |
| 6412 | |
| 6413 | current_vlan_features = netdev->features & |
| 6414 | NETIF_VLAN_FILTERING_FEATURES; |
| 6415 | requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES; |
| 6416 | if (current_vlan_features ^ requested_vlan_features) { |
| 6417 | err = ice_set_vlan_filtering_features(vsi, features); |
| 6418 | if (err) |
| 6419 | return err; |
| 6420 | } |
| 6421 | |
| 6422 | return 0; |
| 6423 | } |
| 6424 | |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 6425 | /** |
Maciej Fijalkowski | 44ece4e | 2022-07-07 12:16:51 +0200 | [diff] [blame] | 6426 | * ice_set_loopback - turn on/off loopback mode on underlying PF |
| 6427 | * @vsi: ptr to VSI |
| 6428 | * @ena: flag to indicate the on/off setting |
| 6429 | */ |
| 6430 | static int ice_set_loopback(struct ice_vsi *vsi, bool ena) |
| 6431 | { |
| 6432 | bool if_running = netif_running(vsi->netdev); |
| 6433 | int ret; |
| 6434 | |
| 6435 | if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { |
| 6436 | ret = ice_down(vsi); |
| 6437 | if (ret) { |
| 6438 | netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n"); |
| 6439 | return ret; |
| 6440 | } |
| 6441 | } |
| 6442 | ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL); |
| 6443 | if (ret) |
| 6444 | netdev_err(vsi->netdev, "Failed to toggle loopback state\n"); |
| 6445 | if (if_running) |
| 6446 | ret = ice_up(vsi); |
| 6447 | |
| 6448 | return ret; |
| 6449 | } |
| 6450 | |
| 6451 | /** |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 6452 | * ice_set_features - set the netdev feature flags |
| 6453 | * @netdev: ptr to the netdev being adjusted |
| 6454 | * @features: the feature set that the stack is suggesting |
| 6455 | */ |
Bruce Allan | c8b7abd | 2019-02-26 16:35:11 -0800 | [diff] [blame] | 6456 | static int |
| 6457 | ice_set_features(struct net_device *netdev, netdev_features_t features) |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 6458 | { |
Maciej Fijalkowski | c67672f | 2022-07-07 12:16:50 +0200 | [diff] [blame] | 6459 | netdev_features_t changed = netdev->features ^ features; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 6460 | struct ice_netdev_priv *np = netdev_priv(netdev); |
| 6461 | struct ice_vsi *vsi = np->vsi; |
Henry Tieman | 5f8cc35 | 2019-11-06 02:05:30 -0800 | [diff] [blame] | 6462 | struct ice_pf *pf = vsi->back; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 6463 | int ret = 0; |
| 6464 | |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 6465 | /* Don't set any netdev advanced features with device in Safe Mode */ |
Maciej Fijalkowski | c67672f | 2022-07-07 12:16:50 +0200 | [diff] [blame] | 6466 | if (ice_is_safe_mode(pf)) { |
| 6467 | dev_err(ice_pf_to_dev(pf), |
| 6468 | "Device is in Safe Mode - not enabling advanced netdev features\n"); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 6469 | return ret; |
| 6470 | } |
| 6471 | |
Henry Tieman | 5f8cc35 | 2019-11-06 02:05:30 -0800 | [diff] [blame] | 6472 | /* Do not change setting during reset */ |
| 6473 | if (ice_is_reset_in_progress(pf->state)) { |
Maciej Fijalkowski | c67672f | 2022-07-07 12:16:50 +0200 | [diff] [blame] | 6474 | dev_err(ice_pf_to_dev(pf), |
| 6475 | "Device is resetting, changing advanced netdev features temporarily unavailable.\n"); |
Henry Tieman | 5f8cc35 | 2019-11-06 02:05:30 -0800 | [diff] [blame] | 6476 | return -EBUSY; |
| 6477 | } |
| 6478 | |
Tony Nguyen | 8f529ff | 2019-04-16 10:21:23 -0700 | [diff] [blame] | 6479 | /* Multiple features can be changed in one call so keep features in |
| 6480 | * separate if/else statements to guarantee each feature is checked |
| 6481 | */ |
Maciej Fijalkowski | c67672f | 2022-07-07 12:16:50 +0200 | [diff] [blame] | 6482 | if (changed & NETIF_F_RXHASH) |
| 6483 | ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH)); |
Md Fahad Iqbal Polash | 492af0a | 2018-09-19 17:23:17 -0700 | [diff] [blame] | 6484 | |
Brett Creeley | 1babaf7 | 2021-12-02 08:38:50 -0800 | [diff] [blame] | 6485 | ret = ice_set_vlan_features(netdev, features); |
| 6486 | if (ret) |
| 6487 | return ret; |
Tony Nguyen | 3171948 | 2019-04-16 10:30:39 -0700 | [diff] [blame] | 6488 | |
Jesse Brandeburg | dddd406 | 2022-07-27 09:24:05 +0200 | [diff] [blame] | 6489 | /* Turn on receive of FCS aka CRC, and after setting this |
| 6490 | * flag the packet data will have the 4 byte CRC appended |
| 6491 | */ |
| 6492 | if (changed & NETIF_F_RXFCS) { |
Anatolii Gerasymenko | affa102 | 2022-07-27 09:24:06 +0200 | [diff] [blame] | 6493 | if ((features & NETIF_F_RXFCS) && |
| 6494 | (features & NETIF_VLAN_STRIPPING_FEATURES)) { |
| 6495 | dev_err(ice_pf_to_dev(vsi->back), |
| 6496 | "To disable FCS/CRC stripping, you must first disable VLAN stripping\n"); |
| 6497 | return -EIO; |
| 6498 | } |
| 6499 | |
Jesse Brandeburg | dddd406 | 2022-07-27 09:24:05 +0200 | [diff] [blame] | 6500 | ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS)); |
| 6501 | ret = ice_down_up(vsi); |
| 6502 | if (ret) |
| 6503 | return ret; |
| 6504 | } |
| 6505 | |
Maciej Fijalkowski | c67672f | 2022-07-07 12:16:50 +0200 | [diff] [blame] | 6506 | if (changed & NETIF_F_NTUPLE) { |
| 6507 | bool ena = !!(features & NETIF_F_NTUPLE); |
| 6508 | |
| 6509 | ice_vsi_manage_fdir(vsi, ena); |
| 6510 | ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi); |
Brett Creeley | 28bf267 | 2020-05-11 18:01:46 -0700 | [diff] [blame] | 6511 | } |
Henry Tieman | 148beb6 | 2020-05-11 18:01:40 -0700 | [diff] [blame] | 6512 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 6513 | /* don't turn off hw_tc_offload when ADQ is already enabled */ |
| 6514 | if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) { |
| 6515 | dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n"); |
| 6516 | return -EACCES; |
| 6517 | } |
Kiran Patil | 9fea749 | 2021-10-15 16:35:17 -0700 | [diff] [blame] | 6518 | |
Maciej Fijalkowski | c67672f | 2022-07-07 12:16:50 +0200 | [diff] [blame] | 6519 | if (changed & NETIF_F_HW_TC) { |
| 6520 | bool ena = !!(features & NETIF_F_HW_TC); |
Kiran Patil | 9fea749 | 2021-10-15 16:35:17 -0700 | [diff] [blame] | 6521 | |
Maciej Fijalkowski | c67672f | 2022-07-07 12:16:50 +0200 | [diff] [blame] | 6522 | ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) : |
| 6523 | clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags); |
| 6524 | } |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 6525 | |
Maciej Fijalkowski | 44ece4e | 2022-07-07 12:16:51 +0200 | [diff] [blame] | 6526 | if (changed & NETIF_F_LOOPBACK) |
| 6527 | ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK)); |
| 6528 | |
| 6529 | return ret; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 6530 | } |
| 6531 | |
| 6532 | /** |
Brett Creeley | c31af68 | 2021-12-02 08:38:46 -0800 | [diff] [blame] | 6533 | * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 6534 | * @vsi: VSI to setup VLAN properties for |
| 6535 | */ |
| 6536 | static int ice_vsi_vlan_setup(struct ice_vsi *vsi) |
| 6537 | { |
Brett Creeley | 1babaf7 | 2021-12-02 08:38:50 -0800 | [diff] [blame] | 6538 | int err; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 6539 | |
Brett Creeley | 1babaf7 | 2021-12-02 08:38:50 -0800 | [diff] [blame] | 6540 | err = ice_set_vlan_offload_features(vsi, vsi->netdev->features); |
| 6541 | if (err) |
| 6542 | return err; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 6543 | |
Brett Creeley | 1babaf7 | 2021-12-02 08:38:50 -0800 | [diff] [blame] | 6544 | err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features); |
| 6545 | if (err) |
| 6546 | return err; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 6547 | |
Brett Creeley | c31af68 | 2021-12-02 08:38:46 -0800 | [diff] [blame] | 6548 | return ice_vsi_add_vlan_zero(vsi); |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 6549 | } |
| 6550 | |
| 6551 | /** |
Michal Swiatkowski | 0db66d2 | 2022-12-21 12:38:15 +0100 | [diff] [blame] | 6552 | * ice_vsi_cfg_lan - Setup the VSI lan related config |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 6553 | * @vsi: the VSI being configured |
| 6554 | * |
| 6555 | * Return 0 on success and negative value on error |
| 6556 | */ |
Michal Swiatkowski | 0db66d2 | 2022-12-21 12:38:15 +0100 | [diff] [blame] | 6557 | int ice_vsi_cfg_lan(struct ice_vsi *vsi) |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 6558 | { |
| 6559 | int err; |
| 6560 | |
Jesse Brandeburg | 6a8d013 | 2022-12-13 16:01:31 -0800 | [diff] [blame] | 6561 | if (vsi->netdev && vsi->type == ICE_VSI_PF) { |
Anirudh Venkataramanan | c7f2c42 | 2018-08-09 06:29:00 -0700 | [diff] [blame] | 6562 | ice_set_rx_mode(vsi->netdev); |
Anirudh Venkataramanan | 9ecd25c | 2018-10-26 10:40:54 -0700 | [diff] [blame] | 6563 | |
Jesse Brandeburg | 6a8d013 | 2022-12-13 16:01:31 -0800 | [diff] [blame] | 6564 | err = ice_vsi_vlan_setup(vsi); |
| 6565 | if (err) |
| 6566 | return err; |
Anirudh Venkataramanan | c7f2c42 | 2018-08-09 06:29:00 -0700 | [diff] [blame] | 6567 | } |
Anirudh Venkataramanan | a629cf0 | 2019-02-28 15:24:27 -0800 | [diff] [blame] | 6568 | ice_vsi_cfg_dcb_rings(vsi); |
Anirudh Venkataramanan | 03f7a98 | 2018-12-19 10:03:27 -0800 | [diff] [blame] | 6569 | |
| 6570 | err = ice_vsi_cfg_lan_txqs(vsi); |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 6571 | if (!err && ice_is_xdp_ena_vsi(vsi)) |
| 6572 | err = ice_vsi_cfg_xdp_txqs(vsi); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 6573 | if (!err) |
| 6574 | err = ice_vsi_cfg_rxqs(vsi); |
| 6575 | |
| 6576 | return err; |
| 6577 | } |
| 6578 | |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6579 | /* THEORY OF MODERATION: |
Jesse Brandeburg | d8eb7ad | 2021-09-20 12:30:12 -0700 | [diff] [blame] | 6580 | * The ice driver hardware works differently than the hardware that DIMLIB was |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6581 | * originally made for. ice hardware doesn't have packet count limits that |
| 6582 | * can trigger an interrupt, but it *does* have interrupt rate limit support, |
Jesse Brandeburg | d8eb7ad | 2021-09-20 12:30:12 -0700 | [diff] [blame] | 6583 | * which is hard-coded to a limit of 250,000 ints/second. |
| 6584 | * If not using dynamic moderation, the INTRL value can be modified |
| 6585 | * by ethtool rx-usecs-high. |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6586 | */ |
| 6587 | struct ice_dim { |
| 6588 | /* the throttle rate for interrupts, basically worst case delay before |
| 6589 | * an initial interrupt fires, value is stored in microseconds. |
| 6590 | */ |
| 6591 | u16 itr; |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6592 | }; |
| 6593 | |
| 6594 | /* Make a different profile for Rx that doesn't allow quite so aggressive |
Jesse Brandeburg | d8eb7ad | 2021-09-20 12:30:12 -0700 | [diff] [blame] | 6595 | * moderation at the high end (it maxes out at 126us or about 8k interrupts a |
| 6596 | * second. |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6597 | */ |
| 6598 | static const struct ice_dim rx_profile[] = { |
Jesse Brandeburg | d8eb7ad | 2021-09-20 12:30:12 -0700 | [diff] [blame] | 6599 | {2}, /* 500,000 ints/s, capped at 250K by INTRL */ |
| 6600 | {8}, /* 125,000 ints/s */ |
| 6601 | {16}, /* 62,500 ints/s */ |
| 6602 | {62}, /* 16,129 ints/s */ |
| 6603 | {126} /* 7,936 ints/s */ |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6604 | }; |
| 6605 | |
| 6606 | /* The transmit profile, which has the same sorts of values |
| 6607 | * as the previous struct |
| 6608 | */ |
| 6609 | static const struct ice_dim tx_profile[] = { |
Jesse Brandeburg | d8eb7ad | 2021-09-20 12:30:12 -0700 | [diff] [blame] | 6610 | {2}, /* 500,000 ints/s, capped at 250K by INTRL */ |
| 6611 | {8}, /* 125,000 ints/s */ |
| 6612 | {40}, /* 16,125 ints/s */ |
| 6613 | {128}, /* 7,812 ints/s */ |
| 6614 | {256} /* 3,906 ints/s */ |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6615 | }; |
| 6616 | |
| 6617 | static void ice_tx_dim_work(struct work_struct *work) |
| 6618 | { |
| 6619 | struct ice_ring_container *rc; |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6620 | struct dim *dim; |
Jesse Brandeburg | d8eb7ad | 2021-09-20 12:30:12 -0700 | [diff] [blame] | 6621 | u16 itr; |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6622 | |
| 6623 | dim = container_of(work, struct dim, work); |
Wu Yunchuan | c59cc26 | 2023-07-17 11:11:54 +0800 | [diff] [blame] | 6624 | rc = dim->priv; |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6625 | |
Jesse Brandeburg | d8eb7ad | 2021-09-20 12:30:12 -0700 | [diff] [blame] | 6626 | WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile)); |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6627 | |
| 6628 | /* look up the values in our local table */ |
| 6629 | itr = tx_profile[dim->profile_ix].itr; |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6630 | |
Jesse Brandeburg | d8eb7ad | 2021-09-20 12:30:12 -0700 | [diff] [blame] | 6631 | ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim); |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6632 | ice_write_itr(rc, itr); |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6633 | |
| 6634 | dim->state = DIM_START_MEASURE; |
| 6635 | } |
| 6636 | |
| 6637 | static void ice_rx_dim_work(struct work_struct *work) |
| 6638 | { |
| 6639 | struct ice_ring_container *rc; |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6640 | struct dim *dim; |
Jesse Brandeburg | d8eb7ad | 2021-09-20 12:30:12 -0700 | [diff] [blame] | 6641 | u16 itr; |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6642 | |
| 6643 | dim = container_of(work, struct dim, work); |
Wu Yunchuan | c59cc26 | 2023-07-17 11:11:54 +0800 | [diff] [blame] | 6644 | rc = dim->priv; |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6645 | |
Jesse Brandeburg | d8eb7ad | 2021-09-20 12:30:12 -0700 | [diff] [blame] | 6646 | WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile)); |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6647 | |
| 6648 | /* look up the values in our local table */ |
| 6649 | itr = rx_profile[dim->profile_ix].itr; |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6650 | |
Jesse Brandeburg | d8eb7ad | 2021-09-20 12:30:12 -0700 | [diff] [blame] | 6651 | ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim); |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6652 | ice_write_itr(rc, itr); |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6653 | |
| 6654 | dim->state = DIM_START_MEASURE; |
| 6655 | } |
| 6656 | |
Jesse Brandeburg | d8eb7ad | 2021-09-20 12:30:12 -0700 | [diff] [blame] | 6657 | #define ICE_DIM_DEFAULT_PROFILE_IX 1 |
| 6658 | |
| 6659 | /** |
| 6660 | * ice_init_moderation - set up interrupt moderation |
| 6661 | * @q_vector: the vector containing rings to be configured |
| 6662 | * |
| 6663 | * Set up interrupt moderation registers, with the intent to do the right thing |
| 6664 | * when called from reset or from probe, and whether or not dynamic moderation |
| 6665 | * is enabled or not. Take special care to write all the registers in both |
| 6666 | * dynamic moderation mode or not in order to make sure hardware is in a known |
| 6667 | * state. |
| 6668 | */ |
| 6669 | static void ice_init_moderation(struct ice_q_vector *q_vector) |
| 6670 | { |
| 6671 | struct ice_ring_container *rc; |
| 6672 | bool tx_dynamic, rx_dynamic; |
| 6673 | |
| 6674 | rc = &q_vector->tx; |
| 6675 | INIT_WORK(&rc->dim.work, ice_tx_dim_work); |
| 6676 | rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; |
| 6677 | rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; |
| 6678 | rc->dim.priv = rc; |
| 6679 | tx_dynamic = ITR_IS_DYNAMIC(rc); |
| 6680 | |
| 6681 | /* set the initial TX ITR to match the above */ |
| 6682 | ice_write_itr(rc, tx_dynamic ? |
| 6683 | tx_profile[rc->dim.profile_ix].itr : rc->itr_setting); |
| 6684 | |
| 6685 | rc = &q_vector->rx; |
| 6686 | INIT_WORK(&rc->dim.work, ice_rx_dim_work); |
| 6687 | rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; |
| 6688 | rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; |
| 6689 | rc->dim.priv = rc; |
| 6690 | rx_dynamic = ITR_IS_DYNAMIC(rc); |
| 6691 | |
| 6692 | /* set the initial RX ITR to match the above */ |
| 6693 | ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr : |
| 6694 | rc->itr_setting); |
| 6695 | |
| 6696 | ice_set_q_vector_intrl(q_vector); |
| 6697 | } |
| 6698 | |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 6699 | /** |
Anirudh Venkataramanan | 2b245cb | 2018-03-20 07:58:14 -0700 | [diff] [blame] | 6700 | * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI |
| 6701 | * @vsi: the VSI being configured |
| 6702 | */ |
| 6703 | static void ice_napi_enable_all(struct ice_vsi *vsi) |
| 6704 | { |
| 6705 | int q_idx; |
| 6706 | |
| 6707 | if (!vsi->netdev) |
| 6708 | return; |
| 6709 | |
Anirudh Venkataramanan | b4603db | 2019-04-16 10:21:28 -0700 | [diff] [blame] | 6710 | ice_for_each_q_vector(vsi, q_idx) { |
Young Xiao | eec9037 | 2018-11-29 01:54:10 +0000 | [diff] [blame] | 6711 | struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; |
| 6712 | |
Jesse Brandeburg | d8eb7ad | 2021-09-20 12:30:12 -0700 | [diff] [blame] | 6713 | ice_init_moderation(q_vector); |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 6714 | |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 6715 | if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) |
Young Xiao | eec9037 | 2018-11-29 01:54:10 +0000 | [diff] [blame] | 6716 | napi_enable(&q_vector->napi); |
| 6717 | } |
Anirudh Venkataramanan | 2b245cb | 2018-03-20 07:58:14 -0700 | [diff] [blame] | 6718 | } |
| 6719 | |
| 6720 | /** |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 6721 | * ice_up_complete - Finish the last steps of bringing up a connection |
| 6722 | * @vsi: The VSI being configured |
| 6723 | * |
| 6724 | * Return 0 on success and negative value on error |
| 6725 | */ |
| 6726 | static int ice_up_complete(struct ice_vsi *vsi) |
| 6727 | { |
| 6728 | struct ice_pf *pf = vsi->back; |
| 6729 | int err; |
| 6730 | |
Brett Creeley | ba88073 | 2019-06-26 02:20:25 -0700 | [diff] [blame] | 6731 | ice_vsi_cfg_msix(vsi); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 6732 | |
| 6733 | /* Enable only Rx rings, Tx rings were enabled by the FW when the |
| 6734 | * Tx queue group list was configured and the context bits were |
| 6735 | * programmed using ice_vsi_cfg_txqs |
| 6736 | */ |
Brett Creeley | 13a6233 | 2020-01-22 07:21:29 -0800 | [diff] [blame] | 6737 | err = ice_vsi_start_all_rx_rings(vsi); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 6738 | if (err) |
| 6739 | return err; |
| 6740 | |
Anirudh Venkataramanan | e97fb1a | 2021-03-02 10:15:37 -0800 | [diff] [blame] | 6741 | clear_bit(ICE_VSI_DOWN, vsi->state); |
Anirudh Venkataramanan | 2b245cb | 2018-03-20 07:58:14 -0700 | [diff] [blame] | 6742 | ice_napi_enable_all(vsi); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 6743 | ice_vsi_ena_irq(vsi); |
| 6744 | |
| 6745 | if (vsi->port_info && |
| 6746 | (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && |
Jesse Brandeburg | 6a8d013 | 2022-12-13 16:01:31 -0800 | [diff] [blame] | 6747 | vsi->netdev && vsi->type == ICE_VSI_PF) { |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 6748 | ice_print_link_msg(vsi, true); |
| 6749 | netif_tx_start_all_queues(vsi->netdev); |
| 6750 | netif_carrier_on(vsi->netdev); |
Jacob Keller | 6b1ff5d | 2022-12-05 11:52:43 -0800 | [diff] [blame] | 6751 | ice_ptp_link_change(pf, pf->hw.pf_id, true); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 6752 | } |
| 6753 | |
Paul Greenwalt | 31b6298f | 2022-04-28 14:11:42 -0700 | [diff] [blame] | 6754 | /* Perform an initial read of the statistics registers now to |
| 6755 | * set the baseline so counters are ready when interface is up |
| 6756 | */ |
| 6757 | ice_update_eth_stats(vsi); |
Jesse Brandeburg | 6a8d013 | 2022-12-13 16:01:31 -0800 | [diff] [blame] | 6758 | |
| 6759 | if (vsi->type == ICE_VSI_PF) |
| 6760 | ice_service_task_schedule(pf); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 6761 | |
Bruce Allan | 1b5c19c | 2019-02-26 16:35:07 -0800 | [diff] [blame] | 6762 | return 0; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 6763 | } |
| 6764 | |
| 6765 | /** |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6766 | * ice_up - Bring the connection back up after being down |
| 6767 | * @vsi: VSI being configured |
| 6768 | */ |
| 6769 | int ice_up(struct ice_vsi *vsi) |
| 6770 | { |
| 6771 | int err; |
| 6772 | |
Michal Swiatkowski | 0db66d2 | 2022-12-21 12:38:15 +0100 | [diff] [blame] | 6773 | err = ice_vsi_cfg_lan(vsi); |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6774 | if (!err) |
| 6775 | err = ice_up_complete(vsi); |
| 6776 | |
| 6777 | return err; |
| 6778 | } |
| 6779 | |
| 6780 | /** |
| 6781 | * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 6782 | * @syncp: pointer to u64_stats_sync |
| 6783 | * @stats: stats that pkts and bytes count will be taken from |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6784 | * @pkts: packets stats counter |
| 6785 | * @bytes: bytes stats counter |
| 6786 | * |
| 6787 | * This function fetches stats from the ring considering the atomic operations |
| 6788 | * that needs to be performed to read u64 values in 32 bit machine. |
| 6789 | */ |
Marcin Szycik | c8ff29b | 2022-01-27 16:04:26 +0100 | [diff] [blame] | 6790 | void |
| 6791 | ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, |
| 6792 | struct ice_q_stats stats, u64 *pkts, u64 *bytes) |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6793 | { |
| 6794 | unsigned int start; |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6795 | |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6796 | do { |
Thomas Gleixner | 068c38a | 2022-10-26 15:22:14 +0200 | [diff] [blame] | 6797 | start = u64_stats_fetch_begin(syncp); |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 6798 | *pkts = stats.pkts; |
| 6799 | *bytes = stats.bytes; |
Thomas Gleixner | 068c38a | 2022-10-26 15:22:14 +0200 | [diff] [blame] | 6800 | } while (u64_stats_fetch_retry(syncp, start)); |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6801 | } |
| 6802 | |
| 6803 | /** |
Marta Plantykow | 49d358e | 2020-05-15 17:42:16 -0700 | [diff] [blame] | 6804 | * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters |
| 6805 | * @vsi: the VSI to be updated |
Jesse Brandeburg | 1a0f25a | 2021-11-12 17:06:02 -0800 | [diff] [blame] | 6806 | * @vsi_stats: the stats struct to be updated |
Marta Plantykow | 49d358e | 2020-05-15 17:42:16 -0700 | [diff] [blame] | 6807 | * @rings: rings to work on |
| 6808 | * @count: number of rings |
| 6809 | */ |
| 6810 | static void |
Jesse Brandeburg | 1a0f25a | 2021-11-12 17:06:02 -0800 | [diff] [blame] | 6811 | ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, |
| 6812 | struct rtnl_link_stats64 *vsi_stats, |
| 6813 | struct ice_tx_ring **rings, u16 count) |
Marta Plantykow | 49d358e | 2020-05-15 17:42:16 -0700 | [diff] [blame] | 6814 | { |
Marta Plantykow | 49d358e | 2020-05-15 17:42:16 -0700 | [diff] [blame] | 6815 | u16 i; |
| 6816 | |
| 6817 | for (i = 0; i < count; i++) { |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 6818 | struct ice_tx_ring *ring; |
| 6819 | u64 pkts = 0, bytes = 0; |
Marta Plantykow | 49d358e | 2020-05-15 17:42:16 -0700 | [diff] [blame] | 6820 | |
| 6821 | ring = READ_ONCE(rings[i]); |
Benjamin Mikailenko | 288ecf4 | 2022-11-18 16:20:02 -0500 | [diff] [blame] | 6822 | if (!ring || !ring->ring_stats) |
Maciej Fijalkowski | f1535469 | 2022-03-07 18:47:39 +0100 | [diff] [blame] | 6823 | continue; |
Benjamin Mikailenko | 288ecf4 | 2022-11-18 16:20:02 -0500 | [diff] [blame] | 6824 | ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp, |
| 6825 | ring->ring_stats->stats, &pkts, |
| 6826 | &bytes); |
Marta Plantykow | 49d358e | 2020-05-15 17:42:16 -0700 | [diff] [blame] | 6827 | vsi_stats->tx_packets += pkts; |
| 6828 | vsi_stats->tx_bytes += bytes; |
Benjamin Mikailenko | 288ecf4 | 2022-11-18 16:20:02 -0500 | [diff] [blame] | 6829 | vsi->tx_restart += ring->ring_stats->tx_stats.restart_q; |
| 6830 | vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy; |
| 6831 | vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize; |
Marta Plantykow | 49d358e | 2020-05-15 17:42:16 -0700 | [diff] [blame] | 6832 | } |
| 6833 | } |
| 6834 | |
| 6835 | /** |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6836 | * ice_update_vsi_ring_stats - Update VSI stats counters |
| 6837 | * @vsi: the VSI to be updated |
| 6838 | */ |
| 6839 | static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) |
| 6840 | { |
Benjamin Mikailenko | 2fd5e43 | 2022-11-18 16:20:01 -0500 | [diff] [blame] | 6841 | struct rtnl_link_stats64 *net_stats, *stats_prev; |
Jesse Brandeburg | 1a0f25a | 2021-11-12 17:06:02 -0800 | [diff] [blame] | 6842 | struct rtnl_link_stats64 *vsi_stats; |
Przemek Kitszel | 257310e | 2024-02-27 15:31:06 +0100 | [diff] [blame] | 6843 | struct ice_pf *pf = vsi->back; |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6844 | u64 pkts, bytes; |
| 6845 | int i; |
| 6846 | |
Jesse Brandeburg | 1a0f25a | 2021-11-12 17:06:02 -0800 | [diff] [blame] | 6847 | vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC); |
| 6848 | if (!vsi_stats) |
| 6849 | return; |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6850 | |
| 6851 | /* reset non-netdev (extended) stats */ |
| 6852 | vsi->tx_restart = 0; |
| 6853 | vsi->tx_busy = 0; |
| 6854 | vsi->tx_linearize = 0; |
| 6855 | vsi->rx_buf_failed = 0; |
| 6856 | vsi->rx_page_failed = 0; |
| 6857 | |
| 6858 | rcu_read_lock(); |
| 6859 | |
| 6860 | /* update Tx rings counters */ |
Jesse Brandeburg | 1a0f25a | 2021-11-12 17:06:02 -0800 | [diff] [blame] | 6861 | ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings, |
| 6862 | vsi->num_txq); |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6863 | |
| 6864 | /* update Rx rings counters */ |
| 6865 | ice_for_each_rxq(vsi, i) { |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 6866 | struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]); |
Benjamin Mikailenko | 288ecf4 | 2022-11-18 16:20:02 -0500 | [diff] [blame] | 6867 | struct ice_ring_stats *ring_stats; |
Paul M Stillwell Jr | b6b0501 | 2021-05-06 08:40:07 -0700 | [diff] [blame] | 6868 | |
Benjamin Mikailenko | 288ecf4 | 2022-11-18 16:20:02 -0500 | [diff] [blame] | 6869 | ring_stats = ring->ring_stats; |
| 6870 | ice_fetch_u64_stats_per_ring(&ring_stats->syncp, |
| 6871 | ring_stats->stats, &pkts, |
| 6872 | &bytes); |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6873 | vsi_stats->rx_packets += pkts; |
| 6874 | vsi_stats->rx_bytes += bytes; |
Benjamin Mikailenko | 288ecf4 | 2022-11-18 16:20:02 -0500 | [diff] [blame] | 6875 | vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed; |
| 6876 | vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed; |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6877 | } |
| 6878 | |
Marta Plantykow | 49d358e | 2020-05-15 17:42:16 -0700 | [diff] [blame] | 6879 | /* update XDP Tx rings counters */ |
| 6880 | if (ice_is_xdp_ena_vsi(vsi)) |
Jesse Brandeburg | 1a0f25a | 2021-11-12 17:06:02 -0800 | [diff] [blame] | 6881 | ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings, |
Marta Plantykow | 49d358e | 2020-05-15 17:42:16 -0700 | [diff] [blame] | 6882 | vsi->num_xdp_txq); |
| 6883 | |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6884 | rcu_read_unlock(); |
Jesse Brandeburg | 1a0f25a | 2021-11-12 17:06:02 -0800 | [diff] [blame] | 6885 | |
Benjamin Mikailenko | 2fd5e43 | 2022-11-18 16:20:01 -0500 | [diff] [blame] | 6886 | net_stats = &vsi->net_stats; |
| 6887 | stats_prev = &vsi->net_stats_prev; |
| 6888 | |
Przemek Kitszel | 257310e | 2024-02-27 15:31:06 +0100 | [diff] [blame] | 6889 | /* Update netdev counters, but keep in mind that values could start at |
| 6890 | * random value after PF reset. And as we increase the reported stat by |
| 6891 | * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not, |
| 6892 | * let's skip this round. |
| 6893 | */ |
| 6894 | if (likely(pf->stat_prev_loaded)) { |
| 6895 | net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets; |
| 6896 | net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes; |
| 6897 | net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets; |
| 6898 | net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes; |
Benjamin Mikailenko | 2fd5e43 | 2022-11-18 16:20:01 -0500 | [diff] [blame] | 6899 | } |
| 6900 | |
Benjamin Mikailenko | 2fd5e43 | 2022-11-18 16:20:01 -0500 | [diff] [blame] | 6901 | stats_prev->tx_packets = vsi_stats->tx_packets; |
| 6902 | stats_prev->tx_bytes = vsi_stats->tx_bytes; |
| 6903 | stats_prev->rx_packets = vsi_stats->rx_packets; |
| 6904 | stats_prev->rx_bytes = vsi_stats->rx_bytes; |
Jesse Brandeburg | 1a0f25a | 2021-11-12 17:06:02 -0800 | [diff] [blame] | 6905 | |
| 6906 | kfree(vsi_stats); |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6907 | } |
| 6908 | |
| 6909 | /** |
| 6910 | * ice_update_vsi_stats - Update VSI stats counters |
| 6911 | * @vsi: the VSI to be updated |
| 6912 | */ |
Bruce Allan | 5a4a867 | 2019-07-25 02:53:50 -0700 | [diff] [blame] | 6913 | void ice_update_vsi_stats(struct ice_vsi *vsi) |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6914 | { |
| 6915 | struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; |
| 6916 | struct ice_eth_stats *cur_es = &vsi->eth_stats; |
| 6917 | struct ice_pf *pf = vsi->back; |
| 6918 | |
Anirudh Venkataramanan | e97fb1a | 2021-03-02 10:15:37 -0800 | [diff] [blame] | 6919 | if (test_bit(ICE_VSI_DOWN, vsi->state) || |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 6920 | test_bit(ICE_CFG_BUSY, pf->state)) |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6921 | return; |
| 6922 | |
| 6923 | /* get stats as recorded by Tx/Rx rings */ |
| 6924 | ice_update_vsi_ring_stats(vsi); |
| 6925 | |
| 6926 | /* get VSI stats as recorded by the hardware */ |
| 6927 | ice_update_eth_stats(vsi); |
| 6928 | |
| 6929 | cur_ns->tx_errors = cur_es->tx_errors; |
Anirudh Venkataramanan | 51fe27e1 | 2021-03-25 15:35:16 -0700 | [diff] [blame] | 6930 | cur_ns->rx_dropped = cur_es->rx_discards; |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6931 | cur_ns->tx_dropped = cur_es->tx_discards; |
| 6932 | cur_ns->multicast = cur_es->rx_multicast; |
| 6933 | |
| 6934 | /* update some more netdev stats if this is main VSI */ |
| 6935 | if (vsi->type == ICE_VSI_PF) { |
| 6936 | cur_ns->rx_crc_errors = pf->stats.crc_errors; |
| 6937 | cur_ns->rx_errors = pf->stats.crc_errors + |
Brett Creeley | 4f1fe43 | 2020-05-15 17:36:44 -0700 | [diff] [blame] | 6938 | pf->stats.illegal_bytes + |
Brett Creeley | 4f1fe43 | 2020-05-15 17:36:44 -0700 | [diff] [blame] | 6939 | pf->stats.rx_undersize + |
| 6940 | pf->hw_csum_rx_error + |
| 6941 | pf->stats.rx_jabber + |
| 6942 | pf->stats.rx_fragments + |
| 6943 | pf->stats.rx_oversize; |
Brett Creeley | 56923ab | 2019-06-26 02:20:22 -0700 | [diff] [blame] | 6944 | /* record drops from the port level */ |
| 6945 | cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6946 | } |
| 6947 | } |
| 6948 | |
| 6949 | /** |
| 6950 | * ice_update_pf_stats - Update PF port stats counters |
| 6951 | * @pf: PF whose stats needs to be updated |
| 6952 | */ |
Bruce Allan | 5a4a867 | 2019-07-25 02:53:50 -0700 | [diff] [blame] | 6953 | void ice_update_pf_stats(struct ice_pf *pf) |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6954 | { |
| 6955 | struct ice_hw_port_stats *prev_ps, *cur_ps; |
| 6956 | struct ice_hw *hw = &pf->hw; |
Henry Tieman | 4ab9564 | 2020-05-11 18:01:41 -0700 | [diff] [blame] | 6957 | u16 fd_ctr_base; |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 6958 | u8 port; |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6959 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 6960 | port = hw->port_info->lport; |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6961 | prev_ps = &pf->stats_prev; |
| 6962 | cur_ps = &pf->stats; |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6963 | |
Benjamin Mikailenko | 2fd5e43 | 2022-11-18 16:20:01 -0500 | [diff] [blame] | 6964 | if (ice_is_reset_in_progress(pf->state)) |
| 6965 | pf->stat_prev_loaded = false; |
| 6966 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 6967 | ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, |
Jacob Keller | 36517fd | 2019-06-26 02:20:13 -0700 | [diff] [blame] | 6968 | &prev_ps->eth.rx_bytes, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6969 | &cur_ps->eth.rx_bytes); |
| 6970 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 6971 | ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, |
Jacob Keller | 36517fd | 2019-06-26 02:20:13 -0700 | [diff] [blame] | 6972 | &prev_ps->eth.rx_unicast, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6973 | &cur_ps->eth.rx_unicast); |
| 6974 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 6975 | ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, |
Jacob Keller | 36517fd | 2019-06-26 02:20:13 -0700 | [diff] [blame] | 6976 | &prev_ps->eth.rx_multicast, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6977 | &cur_ps->eth.rx_multicast); |
| 6978 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 6979 | ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, |
Jacob Keller | 36517fd | 2019-06-26 02:20:13 -0700 | [diff] [blame] | 6980 | &prev_ps->eth.rx_broadcast, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6981 | &cur_ps->eth.rx_broadcast); |
| 6982 | |
Brett Creeley | 56923ab | 2019-06-26 02:20:22 -0700 | [diff] [blame] | 6983 | ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, |
| 6984 | &prev_ps->eth.rx_discards, |
| 6985 | &cur_ps->eth.rx_discards); |
| 6986 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 6987 | ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, |
Jacob Keller | 36517fd | 2019-06-26 02:20:13 -0700 | [diff] [blame] | 6988 | &prev_ps->eth.tx_bytes, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6989 | &cur_ps->eth.tx_bytes); |
| 6990 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 6991 | ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, |
Jacob Keller | 36517fd | 2019-06-26 02:20:13 -0700 | [diff] [blame] | 6992 | &prev_ps->eth.tx_unicast, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6993 | &cur_ps->eth.tx_unicast); |
| 6994 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 6995 | ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, |
Jacob Keller | 36517fd | 2019-06-26 02:20:13 -0700 | [diff] [blame] | 6996 | &prev_ps->eth.tx_multicast, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 6997 | &cur_ps->eth.tx_multicast); |
| 6998 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 6999 | ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, |
Jacob Keller | 36517fd | 2019-06-26 02:20:13 -0700 | [diff] [blame] | 7000 | &prev_ps->eth.tx_broadcast, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7001 | &cur_ps->eth.tx_broadcast); |
| 7002 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7003 | ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7004 | &prev_ps->tx_dropped_link_down, |
| 7005 | &cur_ps->tx_dropped_link_down); |
| 7006 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7007 | ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, |
Jacob Keller | 36517fd | 2019-06-26 02:20:13 -0700 | [diff] [blame] | 7008 | &prev_ps->rx_size_64, &cur_ps->rx_size_64); |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7009 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7010 | ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, |
Jacob Keller | 36517fd | 2019-06-26 02:20:13 -0700 | [diff] [blame] | 7011 | &prev_ps->rx_size_127, &cur_ps->rx_size_127); |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7012 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7013 | ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, |
Jacob Keller | 36517fd | 2019-06-26 02:20:13 -0700 | [diff] [blame] | 7014 | &prev_ps->rx_size_255, &cur_ps->rx_size_255); |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7015 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7016 | ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, |
Jacob Keller | 36517fd | 2019-06-26 02:20:13 -0700 | [diff] [blame] | 7017 | &prev_ps->rx_size_511, &cur_ps->rx_size_511); |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7018 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7019 | ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7020 | &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); |
| 7021 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7022 | ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7023 | &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); |
| 7024 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7025 | ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7026 | &prev_ps->rx_size_big, &cur_ps->rx_size_big); |
| 7027 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7028 | ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, |
Jacob Keller | 36517fd | 2019-06-26 02:20:13 -0700 | [diff] [blame] | 7029 | &prev_ps->tx_size_64, &cur_ps->tx_size_64); |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7030 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7031 | ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, |
Jacob Keller | 36517fd | 2019-06-26 02:20:13 -0700 | [diff] [blame] | 7032 | &prev_ps->tx_size_127, &cur_ps->tx_size_127); |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7033 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7034 | ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, |
Jacob Keller | 36517fd | 2019-06-26 02:20:13 -0700 | [diff] [blame] | 7035 | &prev_ps->tx_size_255, &cur_ps->tx_size_255); |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7036 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7037 | ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, |
Jacob Keller | 36517fd | 2019-06-26 02:20:13 -0700 | [diff] [blame] | 7038 | &prev_ps->tx_size_511, &cur_ps->tx_size_511); |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7039 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7040 | ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7041 | &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); |
| 7042 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7043 | ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7044 | &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); |
| 7045 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7046 | ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7047 | &prev_ps->tx_size_big, &cur_ps->tx_size_big); |
| 7048 | |
Henry Tieman | 4ab9564 | 2020-05-11 18:01:41 -0700 | [diff] [blame] | 7049 | fd_ctr_base = hw->fd_ctr_base; |
| 7050 | |
| 7051 | ice_stat_update40(hw, |
| 7052 | GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)), |
| 7053 | pf->stat_prev_loaded, &prev_ps->fd_sb_match, |
| 7054 | &cur_ps->fd_sb_match); |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7055 | ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7056 | &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); |
| 7057 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7058 | ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7059 | &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); |
| 7060 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7061 | ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7062 | &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); |
| 7063 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7064 | ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7065 | &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); |
| 7066 | |
Anirudh Venkataramanan | 4b0fdce | 2019-02-28 15:24:29 -0800 | [diff] [blame] | 7067 | ice_update_dcb_stats(pf); |
| 7068 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7069 | ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7070 | &prev_ps->crc_errors, &cur_ps->crc_errors); |
| 7071 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7072 | ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7073 | &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); |
| 7074 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7075 | ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7076 | &prev_ps->mac_local_faults, |
| 7077 | &cur_ps->mac_local_faults); |
| 7078 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7079 | ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7080 | &prev_ps->mac_remote_faults, |
| 7081 | &cur_ps->mac_remote_faults); |
| 7082 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7083 | ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7084 | &prev_ps->rx_undersize, &cur_ps->rx_undersize); |
| 7085 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7086 | ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7087 | &prev_ps->rx_fragments, &cur_ps->rx_fragments); |
| 7088 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7089 | ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7090 | &prev_ps->rx_oversize, &cur_ps->rx_oversize); |
| 7091 | |
Usha Ketineni | 9e7a5d1 | 2019-07-25 02:53:53 -0700 | [diff] [blame] | 7092 | ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7093 | &prev_ps->rx_jabber, &cur_ps->rx_jabber); |
| 7094 | |
Henry Tieman | 4ab9564 | 2020-05-11 18:01:41 -0700 | [diff] [blame] | 7095 | cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; |
| 7096 | |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7097 | pf->stat_prev_loaded = true; |
| 7098 | } |
| 7099 | |
| 7100 | /** |
| 7101 | * ice_get_stats64 - get statistics for network device structure |
| 7102 | * @netdev: network interface device structure |
| 7103 | * @stats: main device statistics structure |
| 7104 | */ |
| 7105 | static |
| 7106 | void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) |
| 7107 | { |
| 7108 | struct ice_netdev_priv *np = netdev_priv(netdev); |
| 7109 | struct rtnl_link_stats64 *vsi_stats; |
| 7110 | struct ice_vsi *vsi = np->vsi; |
| 7111 | |
| 7112 | vsi_stats = &vsi->net_stats; |
| 7113 | |
Dave Ertman | 3d57fd10 | 2019-08-08 07:39:28 -0700 | [diff] [blame] | 7114 | if (!vsi->num_txq || !vsi->num_rxq) |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7115 | return; |
Dave Ertman | 3d57fd10 | 2019-08-08 07:39:28 -0700 | [diff] [blame] | 7116 | |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7117 | /* netdev packet/byte stats come from ring counter. These are obtained |
| 7118 | * by summing up ring counters (done by ice_update_vsi_ring_stats). |
Dave Ertman | 3d57fd10 | 2019-08-08 07:39:28 -0700 | [diff] [blame] | 7119 | * But, only call the update routine and read the registers if VSI is |
| 7120 | * not down. |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7121 | */ |
Anirudh Venkataramanan | e97fb1a | 2021-03-02 10:15:37 -0800 | [diff] [blame] | 7122 | if (!test_bit(ICE_VSI_DOWN, vsi->state)) |
Dave Ertman | 3d57fd10 | 2019-08-08 07:39:28 -0700 | [diff] [blame] | 7123 | ice_update_vsi_ring_stats(vsi); |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7124 | stats->tx_packets = vsi_stats->tx_packets; |
| 7125 | stats->tx_bytes = vsi_stats->tx_bytes; |
| 7126 | stats->rx_packets = vsi_stats->rx_packets; |
| 7127 | stats->rx_bytes = vsi_stats->rx_bytes; |
| 7128 | |
| 7129 | /* The rest of the stats can be read from the hardware but instead we |
| 7130 | * just return values that the watchdog task has already obtained from |
| 7131 | * the hardware. |
| 7132 | */ |
| 7133 | stats->multicast = vsi_stats->multicast; |
| 7134 | stats->tx_errors = vsi_stats->tx_errors; |
| 7135 | stats->tx_dropped = vsi_stats->tx_dropped; |
| 7136 | stats->rx_errors = vsi_stats->rx_errors; |
| 7137 | stats->rx_dropped = vsi_stats->rx_dropped; |
| 7138 | stats->rx_crc_errors = vsi_stats->rx_crc_errors; |
| 7139 | stats->rx_length_errors = vsi_stats->rx_length_errors; |
| 7140 | } |
| 7141 | |
| 7142 | /** |
Anirudh Venkataramanan | 2b245cb | 2018-03-20 07:58:14 -0700 | [diff] [blame] | 7143 | * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI |
| 7144 | * @vsi: VSI having NAPI disabled |
| 7145 | */ |
| 7146 | static void ice_napi_disable_all(struct ice_vsi *vsi) |
| 7147 | { |
| 7148 | int q_idx; |
| 7149 | |
| 7150 | if (!vsi->netdev) |
| 7151 | return; |
| 7152 | |
Brett Creeley | 0c2561c | 2019-02-28 15:25:53 -0800 | [diff] [blame] | 7153 | ice_for_each_q_vector(vsi, q_idx) { |
Young Xiao | eec9037 | 2018-11-29 01:54:10 +0000 | [diff] [blame] | 7154 | struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; |
| 7155 | |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 7156 | if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) |
Young Xiao | eec9037 | 2018-11-29 01:54:10 +0000 | [diff] [blame] | 7157 | napi_disable(&q_vector->napi); |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 7158 | |
| 7159 | cancel_work_sync(&q_vector->tx.dim.work); |
| 7160 | cancel_work_sync(&q_vector->rx.dim.work); |
Young Xiao | eec9037 | 2018-11-29 01:54:10 +0000 | [diff] [blame] | 7161 | } |
Anirudh Venkataramanan | 2b245cb | 2018-03-20 07:58:14 -0700 | [diff] [blame] | 7162 | } |
| 7163 | |
| 7164 | /** |
Maciej Fijalkowski | d5926e0 | 2024-02-23 17:06:27 +0100 | [diff] [blame] | 7165 | * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI |
| 7166 | * @vsi: the VSI being un-configured |
| 7167 | */ |
| 7168 | static void ice_vsi_dis_irq(struct ice_vsi *vsi) |
| 7169 | { |
| 7170 | struct ice_pf *pf = vsi->back; |
| 7171 | struct ice_hw *hw = &pf->hw; |
| 7172 | u32 val; |
| 7173 | int i; |
| 7174 | |
| 7175 | /* disable interrupt causation from each Rx queue; Tx queues are |
| 7176 | * handled in ice_vsi_stop_tx_ring() |
| 7177 | */ |
| 7178 | if (vsi->rx_rings) { |
| 7179 | ice_for_each_rxq(vsi, i) { |
| 7180 | if (vsi->rx_rings[i]) { |
| 7181 | u16 reg; |
| 7182 | |
| 7183 | reg = vsi->rx_rings[i]->reg_idx; |
| 7184 | val = rd32(hw, QINT_RQCTL(reg)); |
| 7185 | val &= ~QINT_RQCTL_CAUSE_ENA_M; |
| 7186 | wr32(hw, QINT_RQCTL(reg), val); |
| 7187 | } |
| 7188 | } |
| 7189 | } |
| 7190 | |
| 7191 | /* disable each interrupt */ |
| 7192 | ice_for_each_q_vector(vsi, i) { |
| 7193 | if (!vsi->q_vectors[i]) |
| 7194 | continue; |
| 7195 | wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); |
| 7196 | } |
| 7197 | |
| 7198 | ice_flush(hw); |
| 7199 | |
| 7200 | /* don't call synchronize_irq() for VF's from the host */ |
| 7201 | if (vsi->type == ICE_VSI_VF) |
| 7202 | return; |
| 7203 | |
| 7204 | ice_for_each_q_vector(vsi, i) |
| 7205 | synchronize_irq(vsi->q_vectors[i]->irq.virq); |
| 7206 | } |
| 7207 | |
| 7208 | /** |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7209 | * ice_down - Shutdown the connection |
| 7210 | * @vsi: The VSI being stopped |
Jesse Brandeburg | 21c6e36 | 2021-10-25 17:08:25 -0700 | [diff] [blame] | 7211 | * |
| 7212 | * Caller of this function is expected to set the vsi->state ICE_DOWN bit |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7213 | */ |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 7214 | int ice_down(struct ice_vsi *vsi) |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7215 | { |
Mateusz Palczewski | 8ac7132 | 2022-08-26 10:31:23 +0200 | [diff] [blame] | 7216 | int i, tx_err, rx_err, vlan_err = 0; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7217 | |
Jesse Brandeburg | 21c6e36 | 2021-10-25 17:08:25 -0700 | [diff] [blame] | 7218 | WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); |
| 7219 | |
Michal Swiatkowski | 33bf1e86 | 2024-03-01 12:54:11 +0100 | [diff] [blame] | 7220 | if (vsi->netdev) { |
Brett Creeley | c31af68 | 2021-12-02 08:38:46 -0800 | [diff] [blame] | 7221 | vlan_err = ice_vsi_del_vlan_zero(vsi); |
Jacob Keller | 6b1ff5d | 2022-12-05 11:52:43 -0800 | [diff] [blame] | 7222 | ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7223 | netif_carrier_off(vsi->netdev); |
| 7224 | netif_tx_disable(vsi->netdev); |
| 7225 | } |
| 7226 | |
| 7227 | ice_vsi_dis_irq(vsi); |
Anirudh Venkataramanan | 03f7a98 | 2018-12-19 10:03:27 -0800 | [diff] [blame] | 7228 | |
| 7229 | tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); |
Anirudh Venkataramanan | 72adf24 | 2018-09-19 17:23:05 -0700 | [diff] [blame] | 7230 | if (tx_err) |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 7231 | netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", |
Anirudh Venkataramanan | 72adf24 | 2018-09-19 17:23:05 -0700 | [diff] [blame] | 7232 | vsi->vsi_num, tx_err); |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 7233 | if (!tx_err && ice_is_xdp_ena_vsi(vsi)) { |
| 7234 | tx_err = ice_vsi_stop_xdp_tx_rings(vsi); |
| 7235 | if (tx_err) |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 7236 | netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 7237 | vsi->vsi_num, tx_err); |
| 7238 | } |
Anirudh Venkataramanan | 72adf24 | 2018-09-19 17:23:05 -0700 | [diff] [blame] | 7239 | |
Brett Creeley | 13a6233 | 2020-01-22 07:21:29 -0800 | [diff] [blame] | 7240 | rx_err = ice_vsi_stop_all_rx_rings(vsi); |
Anirudh Venkataramanan | 72adf24 | 2018-09-19 17:23:05 -0700 | [diff] [blame] | 7241 | if (rx_err) |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 7242 | netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", |
Anirudh Venkataramanan | 72adf24 | 2018-09-19 17:23:05 -0700 | [diff] [blame] | 7243 | vsi->vsi_num, rx_err); |
| 7244 | |
Anirudh Venkataramanan | 2b245cb | 2018-03-20 07:58:14 -0700 | [diff] [blame] | 7245 | ice_napi_disable_all(vsi); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7246 | |
| 7247 | ice_for_each_txq(vsi, i) |
| 7248 | ice_clean_tx_ring(vsi->tx_rings[i]); |
| 7249 | |
Kamil Maziarz | 78c50d6 | 2023-06-06 12:33:58 +0200 | [diff] [blame] | 7250 | if (ice_is_xdp_ena_vsi(vsi)) |
| 7251 | ice_for_each_xdp_txq(vsi, i) |
| 7252 | ice_clean_tx_ring(vsi->xdp_rings[i]); |
| 7253 | |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7254 | ice_for_each_rxq(vsi, i) |
| 7255 | ice_clean_rx_ring(vsi->rx_rings[i]); |
| 7256 | |
Mateusz Palczewski | 8ac7132 | 2022-08-26 10:31:23 +0200 | [diff] [blame] | 7257 | if (tx_err || rx_err || vlan_err) { |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 7258 | netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7259 | vsi->vsi_num, vsi->vsw->sw_id); |
Anirudh Venkataramanan | 72adf24 | 2018-09-19 17:23:05 -0700 | [diff] [blame] | 7260 | return -EIO; |
| 7261 | } |
| 7262 | |
| 7263 | return 0; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7264 | } |
| 7265 | |
| 7266 | /** |
Jesse Brandeburg | dddd406 | 2022-07-27 09:24:05 +0200 | [diff] [blame] | 7267 | * ice_down_up - shutdown the VSI connection and bring it up |
| 7268 | * @vsi: the VSI to be reconnected |
| 7269 | */ |
| 7270 | int ice_down_up(struct ice_vsi *vsi) |
| 7271 | { |
| 7272 | int ret; |
| 7273 | |
| 7274 | /* if DOWN already set, nothing to do */ |
| 7275 | if (test_and_set_bit(ICE_VSI_DOWN, vsi->state)) |
| 7276 | return 0; |
| 7277 | |
| 7278 | ret = ice_down(vsi); |
| 7279 | if (ret) |
| 7280 | return ret; |
| 7281 | |
| 7282 | ret = ice_up(vsi); |
| 7283 | if (ret) { |
| 7284 | netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n"); |
| 7285 | return ret; |
| 7286 | } |
| 7287 | |
| 7288 | return 0; |
| 7289 | } |
| 7290 | |
| 7291 | /** |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7292 | * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources |
| 7293 | * @vsi: VSI having resources allocated |
| 7294 | * |
| 7295 | * Return 0 on success, negative on failure |
| 7296 | */ |
Anirudh Venkataramanan | 0e674ae | 2019-04-16 10:30:43 -0700 | [diff] [blame] | 7297 | int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7298 | { |
Jesse Brandeburg | dab0588 | 2018-08-09 06:29:01 -0700 | [diff] [blame] | 7299 | int i, err = 0; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7300 | |
| 7301 | if (!vsi->num_txq) { |
Anirudh Venkataramanan | 9a94684 | 2020-02-06 01:20:09 -0800 | [diff] [blame] | 7302 | dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7303 | vsi->vsi_num); |
| 7304 | return -EINVAL; |
| 7305 | } |
| 7306 | |
| 7307 | ice_for_each_txq(vsi, i) { |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 7308 | struct ice_tx_ring *ring = vsi->tx_rings[i]; |
Michal Swiatkowski | eb0ee8a | 2019-10-09 07:09:47 -0700 | [diff] [blame] | 7309 | |
| 7310 | if (!ring) |
| 7311 | return -EINVAL; |
| 7312 | |
Grzegorz Nitka | 1c54c83 | 2021-08-19 17:08:56 -0700 | [diff] [blame] | 7313 | if (vsi->netdev) |
| 7314 | ring->netdev = vsi->netdev; |
Michal Swiatkowski | eb0ee8a | 2019-10-09 07:09:47 -0700 | [diff] [blame] | 7315 | err = ice_setup_tx_ring(ring); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7316 | if (err) |
| 7317 | break; |
| 7318 | } |
| 7319 | |
| 7320 | return err; |
| 7321 | } |
| 7322 | |
| 7323 | /** |
| 7324 | * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources |
| 7325 | * @vsi: VSI having resources allocated |
| 7326 | * |
| 7327 | * Return 0 on success, negative on failure |
| 7328 | */ |
Anirudh Venkataramanan | 0e674ae | 2019-04-16 10:30:43 -0700 | [diff] [blame] | 7329 | int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7330 | { |
Jesse Brandeburg | dab0588 | 2018-08-09 06:29:01 -0700 | [diff] [blame] | 7331 | int i, err = 0; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7332 | |
| 7333 | if (!vsi->num_rxq) { |
Anirudh Venkataramanan | 9a94684 | 2020-02-06 01:20:09 -0800 | [diff] [blame] | 7334 | dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7335 | vsi->vsi_num); |
| 7336 | return -EINVAL; |
| 7337 | } |
| 7338 | |
| 7339 | ice_for_each_rxq(vsi, i) { |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 7340 | struct ice_rx_ring *ring = vsi->rx_rings[i]; |
Michal Swiatkowski | eb0ee8a | 2019-10-09 07:09:47 -0700 | [diff] [blame] | 7341 | |
| 7342 | if (!ring) |
| 7343 | return -EINVAL; |
| 7344 | |
Grzegorz Nitka | 1c54c83 | 2021-08-19 17:08:56 -0700 | [diff] [blame] | 7345 | if (vsi->netdev) |
| 7346 | ring->netdev = vsi->netdev; |
Michal Swiatkowski | eb0ee8a | 2019-10-09 07:09:47 -0700 | [diff] [blame] | 7347 | err = ice_setup_rx_ring(ring); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7348 | if (err) |
| 7349 | break; |
| 7350 | } |
| 7351 | |
| 7352 | return err; |
| 7353 | } |
| 7354 | |
| 7355 | /** |
Henry Tieman | 148beb6 | 2020-05-11 18:01:40 -0700 | [diff] [blame] | 7356 | * ice_vsi_open_ctrl - open control VSI for use |
| 7357 | * @vsi: the VSI to open |
| 7358 | * |
| 7359 | * Initialization of the Control VSI |
| 7360 | * |
| 7361 | * Returns 0 on success, negative value on error |
| 7362 | */ |
| 7363 | int ice_vsi_open_ctrl(struct ice_vsi *vsi) |
| 7364 | { |
| 7365 | char int_name[ICE_INT_NAME_STR_LEN]; |
| 7366 | struct ice_pf *pf = vsi->back; |
| 7367 | struct device *dev; |
| 7368 | int err; |
| 7369 | |
| 7370 | dev = ice_pf_to_dev(pf); |
| 7371 | /* allocate descriptors */ |
| 7372 | err = ice_vsi_setup_tx_rings(vsi); |
| 7373 | if (err) |
| 7374 | goto err_setup_tx; |
| 7375 | |
| 7376 | err = ice_vsi_setup_rx_rings(vsi); |
| 7377 | if (err) |
| 7378 | goto err_setup_rx; |
| 7379 | |
Michal Swiatkowski | 0db66d2 | 2022-12-21 12:38:15 +0100 | [diff] [blame] | 7380 | err = ice_vsi_cfg_lan(vsi); |
Henry Tieman | 148beb6 | 2020-05-11 18:01:40 -0700 | [diff] [blame] | 7381 | if (err) |
| 7382 | goto err_setup_rx; |
| 7383 | |
| 7384 | snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl", |
| 7385 | dev_driver_string(dev), dev_name(dev)); |
| 7386 | err = ice_vsi_req_irq_msix(vsi, int_name); |
| 7387 | if (err) |
| 7388 | goto err_setup_rx; |
| 7389 | |
| 7390 | ice_vsi_cfg_msix(vsi); |
| 7391 | |
| 7392 | err = ice_vsi_start_all_rx_rings(vsi); |
| 7393 | if (err) |
| 7394 | goto err_up_complete; |
| 7395 | |
Anirudh Venkataramanan | e97fb1a | 2021-03-02 10:15:37 -0800 | [diff] [blame] | 7396 | clear_bit(ICE_VSI_DOWN, vsi->state); |
Henry Tieman | 148beb6 | 2020-05-11 18:01:40 -0700 | [diff] [blame] | 7397 | ice_vsi_ena_irq(vsi); |
| 7398 | |
| 7399 | return 0; |
| 7400 | |
| 7401 | err_up_complete: |
| 7402 | ice_down(vsi); |
| 7403 | err_setup_rx: |
| 7404 | ice_vsi_free_rx_rings(vsi); |
| 7405 | err_setup_tx: |
| 7406 | ice_vsi_free_tx_rings(vsi); |
| 7407 | |
| 7408 | return err; |
| 7409 | } |
| 7410 | |
| 7411 | /** |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7412 | * ice_vsi_open - Called when a network interface is made active |
| 7413 | * @vsi: the VSI to open |
| 7414 | * |
| 7415 | * Initialization of the VSI |
| 7416 | * |
| 7417 | * Returns 0 on success, negative value on error |
| 7418 | */ |
Grzegorz Nitka | 1a1c40d | 2021-08-19 17:08:54 -0700 | [diff] [blame] | 7419 | int ice_vsi_open(struct ice_vsi *vsi) |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7420 | { |
| 7421 | char int_name[ICE_INT_NAME_STR_LEN]; |
| 7422 | struct ice_pf *pf = vsi->back; |
| 7423 | int err; |
| 7424 | |
| 7425 | /* allocate descriptors */ |
| 7426 | err = ice_vsi_setup_tx_rings(vsi); |
| 7427 | if (err) |
| 7428 | goto err_setup_tx; |
| 7429 | |
| 7430 | err = ice_vsi_setup_rx_rings(vsi); |
| 7431 | if (err) |
| 7432 | goto err_setup_rx; |
| 7433 | |
Michal Swiatkowski | 0db66d2 | 2022-12-21 12:38:15 +0100 | [diff] [blame] | 7434 | err = ice_vsi_cfg_lan(vsi); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7435 | if (err) |
| 7436 | goto err_setup_rx; |
| 7437 | |
| 7438 | snprintf(int_name, sizeof(int_name) - 1, "%s-%s", |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 7439 | dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); |
Brett Creeley | ba88073 | 2019-06-26 02:20:25 -0700 | [diff] [blame] | 7440 | err = ice_vsi_req_irq_msix(vsi, int_name); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7441 | if (err) |
| 7442 | goto err_setup_rx; |
| 7443 | |
Michal Swiatkowski | 122045c | 2022-08-08 11:58:54 +0200 | [diff] [blame] | 7444 | ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); |
| 7445 | |
Grzegorz Nitka | 1a1c40d | 2021-08-19 17:08:54 -0700 | [diff] [blame] | 7446 | if (vsi->type == ICE_VSI_PF) { |
| 7447 | /* Notify the stack of the actual queue counts. */ |
| 7448 | err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); |
| 7449 | if (err) |
| 7450 | goto err_set_qs; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7451 | |
Grzegorz Nitka | 1a1c40d | 2021-08-19 17:08:54 -0700 | [diff] [blame] | 7452 | err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); |
| 7453 | if (err) |
| 7454 | goto err_set_qs; |
| 7455 | } |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7456 | |
| 7457 | err = ice_up_complete(vsi); |
| 7458 | if (err) |
| 7459 | goto err_up_complete; |
| 7460 | |
| 7461 | return 0; |
| 7462 | |
| 7463 | err_up_complete: |
| 7464 | ice_down(vsi); |
| 7465 | err_set_qs: |
| 7466 | ice_vsi_free_irq(vsi); |
| 7467 | err_setup_rx: |
| 7468 | ice_vsi_free_rx_rings(vsi); |
| 7469 | err_setup_tx: |
| 7470 | ice_vsi_free_tx_rings(vsi); |
| 7471 | |
| 7472 | return err; |
| 7473 | } |
| 7474 | |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 7475 | /** |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7476 | * ice_vsi_release_all - Delete all VSIs |
| 7477 | * @pf: PF from which all VSIs are being removed |
| 7478 | */ |
| 7479 | static void ice_vsi_release_all(struct ice_pf *pf) |
| 7480 | { |
| 7481 | int err, i; |
| 7482 | |
| 7483 | if (!pf->vsi) |
| 7484 | return; |
| 7485 | |
Brett Creeley | 80ed404a | 2019-02-08 12:50:54 -0800 | [diff] [blame] | 7486 | ice_for_each_vsi(pf, i) { |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7487 | if (!pf->vsi[i]) |
| 7488 | continue; |
| 7489 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 7490 | if (pf->vsi[i]->type == ICE_VSI_CHNL) |
| 7491 | continue; |
| 7492 | |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7493 | err = ice_vsi_release(pf->vsi[i]); |
| 7494 | if (err) |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 7495 | dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7496 | i, err, pf->vsi[i]->vsi_num); |
| 7497 | } |
| 7498 | } |
| 7499 | |
| 7500 | /** |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7501 | * ice_vsi_rebuild_by_type - Rebuild VSI of a given type |
| 7502 | * @pf: pointer to the PF instance |
| 7503 | * @type: VSI type to rebuild |
| 7504 | * |
| 7505 | * Iterates through the pf->vsi array and rebuilds VSIs of the requested type |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7506 | */ |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7507 | static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7508 | { |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 7509 | struct device *dev = ice_pf_to_dev(pf); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7510 | int i, err; |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7511 | |
Brett Creeley | 80ed404a | 2019-02-08 12:50:54 -0800 | [diff] [blame] | 7512 | ice_for_each_vsi(pf, i) { |
Krzysztof Kazimierczak | 4425e05 | 2019-07-25 02:53:57 -0700 | [diff] [blame] | 7513 | struct ice_vsi *vsi = pf->vsi[i]; |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7514 | |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7515 | if (!vsi || vsi->type != type) |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7516 | continue; |
| 7517 | |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7518 | /* rebuild the VSI */ |
Michal Swiatkowski | 6624e78 | 2022-12-21 12:38:16 +0100 | [diff] [blame] | 7519 | err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT); |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7520 | if (err) { |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 7521 | dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n", |
Anirudh Venkataramanan | 964674f | 2019-11-06 02:05:39 -0800 | [diff] [blame] | 7522 | err, vsi->idx, ice_vsi_type_str(type)); |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7523 | return err; |
| 7524 | } |
| 7525 | |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7526 | /* replay filters for the VSI */ |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 7527 | err = ice_replay_vsi(&pf->hw, vsi->idx); |
| 7528 | if (err) { |
Tony Nguyen | 5f87ec4 | 2021-10-07 15:56:02 -0700 | [diff] [blame] | 7529 | dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n", |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 7530 | err, vsi->idx, ice_vsi_type_str(type)); |
Tony Nguyen | c148469 | 2021-10-07 16:01:58 -0700 | [diff] [blame] | 7531 | return err; |
Anirudh Venkataramanan | 334cb06 | 2018-09-19 17:23:14 -0700 | [diff] [blame] | 7532 | } |
| 7533 | |
| 7534 | /* Re-map HW VSI number, using VSI handle that has been |
| 7535 | * previously validated in ice_replay_vsi() call above |
| 7536 | */ |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7537 | vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); |
Anirudh Venkataramanan | 334cb06 | 2018-09-19 17:23:14 -0700 | [diff] [blame] | 7538 | |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7539 | /* enable the VSI */ |
| 7540 | err = ice_ena_vsi(vsi, false); |
| 7541 | if (err) { |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 7542 | dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n", |
Anirudh Venkataramanan | 964674f | 2019-11-06 02:05:39 -0800 | [diff] [blame] | 7543 | err, vsi->idx, ice_vsi_type_str(type)); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7544 | return err; |
| 7545 | } |
| 7546 | |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 7547 | dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, |
| 7548 | ice_vsi_type_str(type)); |
Anirudh Venkataramanan | 334cb06 | 2018-09-19 17:23:14 -0700 | [diff] [blame] | 7549 | } |
| 7550 | |
Anirudh Venkataramanan | 334cb06 | 2018-09-19 17:23:14 -0700 | [diff] [blame] | 7551 | return 0; |
| 7552 | } |
| 7553 | |
| 7554 | /** |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7555 | * ice_update_pf_netdev_link - Update PF netdev link status |
| 7556 | * @pf: pointer to the PF instance |
| 7557 | */ |
| 7558 | static void ice_update_pf_netdev_link(struct ice_pf *pf) |
| 7559 | { |
| 7560 | bool link_up; |
| 7561 | int i; |
| 7562 | |
| 7563 | ice_for_each_vsi(pf, i) { |
| 7564 | struct ice_vsi *vsi = pf->vsi[i]; |
| 7565 | |
| 7566 | if (!vsi || vsi->type != ICE_VSI_PF) |
| 7567 | return; |
| 7568 | |
| 7569 | ice_get_link_status(pf->vsi[i]->port_info, &link_up); |
| 7570 | if (link_up) { |
| 7571 | netif_carrier_on(pf->vsi[i]->netdev); |
| 7572 | netif_tx_wake_all_queues(pf->vsi[i]->netdev); |
| 7573 | } else { |
| 7574 | netif_carrier_off(pf->vsi[i]->netdev); |
| 7575 | netif_tx_stop_all_queues(pf->vsi[i]->netdev); |
| 7576 | } |
| 7577 | } |
| 7578 | } |
| 7579 | |
| 7580 | /** |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 7581 | * ice_rebuild - rebuild after reset |
Anirudh Venkataramanan | 2f2da36 | 2019-04-16 10:35:03 -0700 | [diff] [blame] | 7582 | * @pf: PF to rebuild |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7583 | * @reset_type: type of reset |
Brett Creeley | 12bb018 | 2020-05-15 17:51:15 -0700 | [diff] [blame] | 7584 | * |
| 7585 | * Do not rebuild VF VSI in this flow because that is already handled via |
| 7586 | * ice_reset_all_vfs(). This is because requirements for resetting a VF after a |
| 7587 | * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want |
| 7588 | * to reset/rebuild all the VF VSI twice. |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 7589 | */ |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7590 | static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 7591 | { |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 7592 | struct device *dev = ice_pf_to_dev(pf); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 7593 | struct ice_hw *hw = &pf->hw; |
Brett Creeley | a1ffafb | 2021-12-02 08:38:49 -0800 | [diff] [blame] | 7594 | bool dvm; |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7595 | int err; |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 7596 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 7597 | if (test_bit(ICE_DOWN, pf->state)) |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 7598 | goto clear_recovery; |
| 7599 | |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7600 | dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 7601 | |
Petr Oros | b537752 | 2022-04-13 17:37:45 +0200 | [diff] [blame] | 7602 | #define ICE_EMP_RESET_SLEEP_MS 5000 |
Jacob Keller | 399e27d | 2021-10-27 16:22:55 -0700 | [diff] [blame] | 7603 | if (reset_type == ICE_RESET_EMPR) { |
| 7604 | /* If an EMP reset has occurred, any previously pending flash |
| 7605 | * update will have completed. We no longer know whether or |
| 7606 | * not the NVM update EMP reset is restricted. |
| 7607 | */ |
| 7608 | pf->fw_emp_reset_disabled = false; |
Petr Oros | b537752 | 2022-04-13 17:37:45 +0200 | [diff] [blame] | 7609 | |
| 7610 | msleep(ICE_EMP_RESET_SLEEP_MS); |
Jacob Keller | 399e27d | 2021-10-27 16:22:55 -0700 | [diff] [blame] | 7611 | } |
| 7612 | |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 7613 | err = ice_init_all_ctrlq(hw); |
| 7614 | if (err) { |
| 7615 | dev_err(dev, "control queues init failed %d\n", err); |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7616 | goto err_init_ctrlq; |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 7617 | } |
| 7618 | |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7619 | /* if DDP was previously loaded successfully */ |
| 7620 | if (!ice_is_safe_mode(pf)) { |
| 7621 | /* reload the SW DB of filter tables */ |
| 7622 | if (reset_type == ICE_RESET_PFR) |
| 7623 | ice_fill_blk_tbls(hw); |
| 7624 | else |
| 7625 | /* Reload DDP Package after CORER/GLOBR reset */ |
| 7626 | ice_load_pkg(NULL, pf); |
| 7627 | } |
| 7628 | |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 7629 | err = ice_clear_pf_cfg(hw); |
| 7630 | if (err) { |
| 7631 | dev_err(dev, "clear PF configuration failed %d\n", err); |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7632 | goto err_init_ctrlq; |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 7633 | } |
| 7634 | |
| 7635 | ice_clear_pxe_mode(hw); |
| 7636 | |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 7637 | err = ice_init_nvm(hw); |
| 7638 | if (err) { |
| 7639 | dev_err(dev, "ice_init_nvm failed %d\n", err); |
Jacob Keller | 97a4ec0 | 2021-05-06 08:40:00 -0700 | [diff] [blame] | 7640 | goto err_init_ctrlq; |
| 7641 | } |
| 7642 | |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 7643 | err = ice_get_caps(hw); |
| 7644 | if (err) { |
| 7645 | dev_err(dev, "ice_get_caps failed %d\n", err); |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7646 | goto err_init_ctrlq; |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 7647 | } |
| 7648 | |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 7649 | err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); |
| 7650 | if (err) { |
| 7651 | dev_err(dev, "set_mac_cfg failed %d\n", err); |
Anirudh Venkataramanan | 4244910 | 2020-05-15 17:36:30 -0700 | [diff] [blame] | 7652 | goto err_init_ctrlq; |
| 7653 | } |
| 7654 | |
Brett Creeley | a1ffafb | 2021-12-02 08:38:49 -0800 | [diff] [blame] | 7655 | dvm = ice_is_dvm_ena(hw); |
| 7656 | |
| 7657 | err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); |
| 7658 | if (err) |
| 7659 | goto err_init_ctrlq; |
| 7660 | |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7661 | err = ice_sched_init_port(hw->port_info); |
| 7662 | if (err) |
| 7663 | goto err_sched_init_port; |
| 7664 | |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 7665 | /* start misc vector */ |
Brett Creeley | ba88073 | 2019-06-26 02:20:25 -0700 | [diff] [blame] | 7666 | err = ice_req_irq_msix_misc(pf); |
| 7667 | if (err) { |
| 7668 | dev_err(dev, "misc vector setup failed: %d\n", err); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7669 | goto err_sched_init_port; |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 7670 | } |
| 7671 | |
Henry Tieman | 83af003 | 2020-05-11 18:01:45 -0700 | [diff] [blame] | 7672 | if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { |
| 7673 | wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); |
| 7674 | if (!rd32(hw, PFQF_FD_SIZE)) { |
| 7675 | u16 unused, guar, b_effort; |
| 7676 | |
| 7677 | guar = hw->func_caps.fd_fltr_guar; |
| 7678 | b_effort = hw->func_caps.fd_fltr_best_effort; |
| 7679 | |
| 7680 | /* force guaranteed filter pool for PF */ |
| 7681 | ice_alloc_fd_guar_item(hw, &unused, guar); |
| 7682 | /* force shared filter pool for PF */ |
| 7683 | ice_alloc_fd_shrd_item(hw, &unused, b_effort); |
| 7684 | } |
| 7685 | } |
| 7686 | |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7687 | if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) |
| 7688 | ice_dcb_rebuild(pf); |
| 7689 | |
Jacob Keller | 06c16d8 | 2021-06-09 09:39:50 -0700 | [diff] [blame] | 7690 | /* If the PF previously had enabled PTP, PTP init needs to happen before |
| 7691 | * the VSI rebuild. If not, this causes the PTP link status events to |
| 7692 | * fail. |
| 7693 | */ |
| 7694 | if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) |
Jacob Keller | 803bef8 | 2024-01-25 13:57:54 -0800 | [diff] [blame] | 7695 | ice_ptp_rebuild(pf, reset_type); |
Jacob Keller | 06c16d8 | 2021-06-09 09:39:50 -0700 | [diff] [blame] | 7696 | |
Karol Kolacinski | 43113ff | 2022-03-01 10:38:03 -0800 | [diff] [blame] | 7697 | if (ice_is_feature_supported(pf, ICE_F_GNSS)) |
| 7698 | ice_gnss_init(pf); |
| 7699 | |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7700 | /* rebuild PF VSI */ |
| 7701 | err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7702 | if (err) { |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7703 | dev_err(dev, "PF VSI rebuild failed: %d\n", err); |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7704 | goto err_vsi_rebuild; |
| 7705 | } |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 7706 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 7707 | if (reset_type == ICE_RESET_PFR) { |
| 7708 | err = ice_rebuild_channels(pf); |
| 7709 | if (err) { |
| 7710 | dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n", |
| 7711 | err); |
| 7712 | goto err_vsi_rebuild; |
| 7713 | } |
| 7714 | } |
| 7715 | |
Henry Tieman | 83af003 | 2020-05-11 18:01:45 -0700 | [diff] [blame] | 7716 | /* If Flow Director is active */ |
| 7717 | if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { |
| 7718 | err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); |
| 7719 | if (err) { |
| 7720 | dev_err(dev, "control VSI rebuild failed: %d\n", err); |
| 7721 | goto err_vsi_rebuild; |
| 7722 | } |
| 7723 | |
| 7724 | /* replay HW Flow Director recipes */ |
| 7725 | if (hw->fdir_prof) |
| 7726 | ice_fdir_replay_flows(hw); |
| 7727 | |
| 7728 | /* replay Flow Director filters */ |
| 7729 | ice_fdir_replay_fltrs(pf); |
Brett Creeley | 28bf267 | 2020-05-11 18:01:46 -0700 | [diff] [blame] | 7730 | |
| 7731 | ice_rebuild_arfs(pf); |
Henry Tieman | 83af003 | 2020-05-11 18:01:45 -0700 | [diff] [blame] | 7732 | } |
| 7733 | |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7734 | ice_update_pf_netdev_link(pf); |
| 7735 | |
| 7736 | /* tell the firmware we are up */ |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 7737 | err = ice_send_version(pf); |
| 7738 | if (err) { |
Tony Nguyen | 5f87ec4 | 2021-10-07 15:56:02 -0700 | [diff] [blame] | 7739 | dev_err(dev, "Rebuild failed due to error sending driver version: %d\n", |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 7740 | err); |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 7741 | goto err_vsi_rebuild; |
| 7742 | } |
| 7743 | |
| 7744 | ice_replay_post(hw); |
| 7745 | |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7746 | /* if we get here, reset flow is successful */ |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 7747 | clear_bit(ICE_RESET_FAILED, pf->state); |
Dave Ertman | f9f5301 | 2021-05-20 09:37:51 -0500 | [diff] [blame] | 7748 | |
| 7749 | ice_plug_aux_dev(pf); |
Dave Ertman | 3579aa8 | 2023-06-20 15:18:54 -0700 | [diff] [blame] | 7750 | if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) |
| 7751 | ice_lag_rebuild(pf); |
Jacob Keller | 7758017 | 2023-11-21 13:12:57 -0800 | [diff] [blame] | 7752 | |
| 7753 | /* Restore timestamp mode settings after VSI rebuild */ |
| 7754 | ice_ptp_restore_timestamp_mode(pf); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 7755 | return; |
| 7756 | |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7757 | err_vsi_rebuild: |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7758 | err_sched_init_port: |
| 7759 | ice_sched_cleanup_all(hw); |
| 7760 | err_init_ctrlq: |
Piotr Gardocki | fdd288e | 2024-06-14 12:38:11 +0200 | [diff] [blame] | 7761 | ice_shutdown_all_ctrlq(hw, false); |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 7762 | set_bit(ICE_RESET_FAILED, pf->state); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 7763 | clear_recovery: |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7764 | /* set this bit in PF state to control service task scheduling */ |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 7765 | set_bit(ICE_NEEDS_RESTART, pf->state); |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 7766 | dev_err(dev, "Rebuild failed, unload and reload driver\n"); |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 7767 | } |
| 7768 | |
| 7769 | /** |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 7770 | * ice_change_mtu - NDO callback to change the MTU |
| 7771 | * @netdev: network interface device structure |
| 7772 | * @new_mtu: new value for maximum frame size |
| 7773 | * |
| 7774 | * Returns 0 on success, negative on failure |
| 7775 | */ |
| 7776 | static int ice_change_mtu(struct net_device *netdev, int new_mtu) |
| 7777 | { |
| 7778 | struct ice_netdev_priv *np = netdev_priv(netdev); |
| 7779 | struct ice_vsi *vsi = np->vsi; |
| 7780 | struct ice_pf *pf = vsi->back; |
Maciej Fijalkowski | 2fba7dc | 2023-01-31 21:45:03 +0100 | [diff] [blame] | 7781 | struct bpf_prog *prog; |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 7782 | u8 count = 0; |
Dave Ertman | 348048e | 2021-05-20 09:37:50 -0500 | [diff] [blame] | 7783 | int err = 0; |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 7784 | |
Jesse Brandeburg | 22bef5e | 2020-05-15 17:36:38 -0700 | [diff] [blame] | 7785 | if (new_mtu == (int)netdev->mtu) { |
Anirudh Venkataramanan | 2f2da36 | 2019-04-16 10:35:03 -0700 | [diff] [blame] | 7786 | netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 7787 | return 0; |
| 7788 | } |
| 7789 | |
Maciej Fijalkowski | 2fba7dc | 2023-01-31 21:45:03 +0100 | [diff] [blame] | 7790 | prog = vsi->xdp_prog; |
| 7791 | if (prog && !prog->aux->xdp_has_frags) { |
Maciej Fijalkowski | 23b4451 | 2019-10-24 01:11:25 -0700 | [diff] [blame] | 7792 | int frame_size = ice_max_xdp_frame_size(vsi); |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 7793 | |
| 7794 | if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) { |
| 7795 | netdev_err(netdev, "max MTU for XDP usage is %d\n", |
Maciej Fijalkowski | 23b4451 | 2019-10-24 01:11:25 -0700 | [diff] [blame] | 7796 | frame_size - ICE_ETH_PKT_HDR_PAD); |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 7797 | return -EINVAL; |
| 7798 | } |
Maciej Fijalkowski | c61bceb | 2023-01-31 21:44:54 +0100 | [diff] [blame] | 7799 | } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) { |
| 7800 | if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) { |
| 7801 | netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n", |
| 7802 | ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD); |
| 7803 | return -EINVAL; |
| 7804 | } |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 7805 | } |
| 7806 | |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 7807 | /* if a reset is in progress, wait for some time for it to complete */ |
| 7808 | do { |
Dave Ertman | 5df7e45 | 2018-09-19 17:23:11 -0700 | [diff] [blame] | 7809 | if (ice_is_reset_in_progress(pf->state)) { |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 7810 | count++; |
| 7811 | usleep_range(1000, 2000); |
| 7812 | } else { |
| 7813 | break; |
| 7814 | } |
| 7815 | |
| 7816 | } while (count < 100); |
| 7817 | |
| 7818 | if (count == 100) { |
Anirudh Venkataramanan | 2f2da36 | 2019-04-16 10:35:03 -0700 | [diff] [blame] | 7819 | netdev_err(netdev, "can't change MTU. Device is busy\n"); |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 7820 | return -EBUSY; |
| 7821 | } |
| 7822 | |
Eric Dumazet | 1eb2cde | 2024-05-06 10:28:12 +0000 | [diff] [blame] | 7823 | WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu); |
Maciej Fijalkowski | b7a0345 | 2023-06-13 13:35:52 +0200 | [diff] [blame] | 7824 | err = ice_down_up(vsi); |
| 7825 | if (err) |
| 7826 | return err; |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 7827 | |
Tony Nguyen | bda5b7d | 2019-12-12 03:13:03 -0800 | [diff] [blame] | 7828 | netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); |
Dave Ertman | 97b0129 | 2022-02-18 12:39:25 -0800 | [diff] [blame] | 7829 | set_bit(ICE_FLAG_MTU_CHANGED, pf->flags); |
Dave Ertman | 348048e | 2021-05-20 09:37:50 -0500 | [diff] [blame] | 7830 | |
| 7831 | return err; |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 7832 | } |
| 7833 | |
| 7834 | /** |
Arnd Bergmann | a760537 | 2021-07-27 15:45:13 +0200 | [diff] [blame] | 7835 | * ice_eth_ioctl - Access the hwtstamp interface |
Jacob Keller | 77a7811 | 2021-06-09 09:39:52 -0700 | [diff] [blame] | 7836 | * @netdev: network interface device structure |
| 7837 | * @ifr: interface request data |
| 7838 | * @cmd: ioctl command |
| 7839 | */ |
Arnd Bergmann | a760537 | 2021-07-27 15:45:13 +0200 | [diff] [blame] | 7840 | static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) |
Jacob Keller | 77a7811 | 2021-06-09 09:39:52 -0700 | [diff] [blame] | 7841 | { |
| 7842 | struct ice_netdev_priv *np = netdev_priv(netdev); |
| 7843 | struct ice_pf *pf = np->vsi->back; |
| 7844 | |
| 7845 | switch (cmd) { |
| 7846 | case SIOCGHWTSTAMP: |
| 7847 | return ice_ptp_get_ts_config(pf, ifr); |
| 7848 | case SIOCSHWTSTAMP: |
| 7849 | return ice_ptp_set_ts_config(pf, ifr); |
| 7850 | default: |
| 7851 | return -EOPNOTSUPP; |
| 7852 | } |
| 7853 | } |
| 7854 | |
| 7855 | /** |
Lihong Yang | 0fee357 | 2020-05-07 17:41:04 -0700 | [diff] [blame] | 7856 | * ice_aq_str - convert AQ err code to a string |
| 7857 | * @aq_err: the AQ error code to convert |
| 7858 | */ |
| 7859 | const char *ice_aq_str(enum ice_aq_err aq_err) |
| 7860 | { |
| 7861 | switch (aq_err) { |
| 7862 | case ICE_AQ_RC_OK: |
| 7863 | return "OK"; |
| 7864 | case ICE_AQ_RC_EPERM: |
| 7865 | return "ICE_AQ_RC_EPERM"; |
| 7866 | case ICE_AQ_RC_ENOENT: |
| 7867 | return "ICE_AQ_RC_ENOENT"; |
| 7868 | case ICE_AQ_RC_ENOMEM: |
| 7869 | return "ICE_AQ_RC_ENOMEM"; |
| 7870 | case ICE_AQ_RC_EBUSY: |
| 7871 | return "ICE_AQ_RC_EBUSY"; |
| 7872 | case ICE_AQ_RC_EEXIST: |
| 7873 | return "ICE_AQ_RC_EEXIST"; |
| 7874 | case ICE_AQ_RC_EINVAL: |
| 7875 | return "ICE_AQ_RC_EINVAL"; |
| 7876 | case ICE_AQ_RC_ENOSPC: |
| 7877 | return "ICE_AQ_RC_ENOSPC"; |
| 7878 | case ICE_AQ_RC_ENOSYS: |
| 7879 | return "ICE_AQ_RC_ENOSYS"; |
Chinh T Cao | b5e19a6 | 2020-05-15 17:55:06 -0700 | [diff] [blame] | 7880 | case ICE_AQ_RC_EMODE: |
| 7881 | return "ICE_AQ_RC_EMODE"; |
Lihong Yang | 0fee357 | 2020-05-07 17:41:04 -0700 | [diff] [blame] | 7882 | case ICE_AQ_RC_ENOSEC: |
| 7883 | return "ICE_AQ_RC_ENOSEC"; |
| 7884 | case ICE_AQ_RC_EBADSIG: |
| 7885 | return "ICE_AQ_RC_EBADSIG"; |
| 7886 | case ICE_AQ_RC_ESVN: |
| 7887 | return "ICE_AQ_RC_ESVN"; |
| 7888 | case ICE_AQ_RC_EBADMAN: |
| 7889 | return "ICE_AQ_RC_EBADMAN"; |
| 7890 | case ICE_AQ_RC_EBADBUF: |
| 7891 | return "ICE_AQ_RC_EBADBUF"; |
| 7892 | } |
| 7893 | |
| 7894 | return "ICE_AQ_RC_UNKNOWN"; |
| 7895 | } |
| 7896 | |
| 7897 | /** |
Brett Creeley | b66a972 | 2021-03-02 10:15:36 -0800 | [diff] [blame] | 7898 | * ice_set_rss_lut - Set RSS LUT |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 7899 | * @vsi: Pointer to VSI structure |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 7900 | * @lut: Lookup table |
| 7901 | * @lut_size: Lookup table size |
| 7902 | * |
| 7903 | * Returns 0 on success, negative on failure |
| 7904 | */ |
Brett Creeley | b66a972 | 2021-03-02 10:15:36 -0800 | [diff] [blame] | 7905 | int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 7906 | { |
Brett Creeley | b66a972 | 2021-03-02 10:15:36 -0800 | [diff] [blame] | 7907 | struct ice_aq_get_set_rss_lut_params params = {}; |
| 7908 | struct ice_hw *hw = &vsi->back->hw; |
Tony Nguyen | 5e24d59 | 2021-10-07 15:56:57 -0700 | [diff] [blame] | 7909 | int status; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 7910 | |
Brett Creeley | b66a972 | 2021-03-02 10:15:36 -0800 | [diff] [blame] | 7911 | if (!lut) |
| 7912 | return -EINVAL; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 7913 | |
Brett Creeley | b66a972 | 2021-03-02 10:15:36 -0800 | [diff] [blame] | 7914 | params.vsi_handle = vsi->idx; |
| 7915 | params.lut_size = lut_size; |
| 7916 | params.lut_type = vsi->rss_lut_type; |
| 7917 | params.lut = lut; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 7918 | |
Brett Creeley | b66a972 | 2021-03-02 10:15:36 -0800 | [diff] [blame] | 7919 | status = ice_aq_set_rss_lut(hw, ¶ms); |
Tony Nguyen | c148469 | 2021-10-07 16:01:58 -0700 | [diff] [blame] | 7920 | if (status) |
Tony Nguyen | 5f87ec4 | 2021-10-07 15:56:02 -0700 | [diff] [blame] | 7921 | dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n", |
Tony Nguyen | 5518ac2 | 2021-10-07 15:59:03 -0700 | [diff] [blame] | 7922 | status, ice_aq_str(hw->adminq.sq_last_status)); |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 7923 | |
Tony Nguyen | c148469 | 2021-10-07 16:01:58 -0700 | [diff] [blame] | 7924 | return status; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 7925 | } |
| 7926 | |
| 7927 | /** |
Brett Creeley | b66a972 | 2021-03-02 10:15:36 -0800 | [diff] [blame] | 7928 | * ice_set_rss_key - Set RSS key |
| 7929 | * @vsi: Pointer to the VSI structure |
| 7930 | * @seed: RSS hash seed |
| 7931 | * |
| 7932 | * Returns 0 on success, negative on failure |
| 7933 | */ |
| 7934 | int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed) |
| 7935 | { |
| 7936 | struct ice_hw *hw = &vsi->back->hw; |
Tony Nguyen | 5e24d59 | 2021-10-07 15:56:57 -0700 | [diff] [blame] | 7937 | int status; |
Brett Creeley | b66a972 | 2021-03-02 10:15:36 -0800 | [diff] [blame] | 7938 | |
| 7939 | if (!seed) |
| 7940 | return -EINVAL; |
| 7941 | |
| 7942 | status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); |
Tony Nguyen | c148469 | 2021-10-07 16:01:58 -0700 | [diff] [blame] | 7943 | if (status) |
Tony Nguyen | 5f87ec4 | 2021-10-07 15:56:02 -0700 | [diff] [blame] | 7944 | dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n", |
Tony Nguyen | 5518ac2 | 2021-10-07 15:59:03 -0700 | [diff] [blame] | 7945 | status, ice_aq_str(hw->adminq.sq_last_status)); |
Brett Creeley | b66a972 | 2021-03-02 10:15:36 -0800 | [diff] [blame] | 7946 | |
Tony Nguyen | c148469 | 2021-10-07 16:01:58 -0700 | [diff] [blame] | 7947 | return status; |
Brett Creeley | b66a972 | 2021-03-02 10:15:36 -0800 | [diff] [blame] | 7948 | } |
| 7949 | |
| 7950 | /** |
| 7951 | * ice_get_rss_lut - Get RSS LUT |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 7952 | * @vsi: Pointer to VSI structure |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 7953 | * @lut: Buffer to store the lookup table entries |
| 7954 | * @lut_size: Size of buffer to store the lookup table entries |
| 7955 | * |
| 7956 | * Returns 0 on success, negative on failure |
| 7957 | */ |
Brett Creeley | b66a972 | 2021-03-02 10:15:36 -0800 | [diff] [blame] | 7958 | int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 7959 | { |
Brett Creeley | b66a972 | 2021-03-02 10:15:36 -0800 | [diff] [blame] | 7960 | struct ice_aq_get_set_rss_lut_params params = {}; |
| 7961 | struct ice_hw *hw = &vsi->back->hw; |
Tony Nguyen | 5e24d59 | 2021-10-07 15:56:57 -0700 | [diff] [blame] | 7962 | int status; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 7963 | |
Brett Creeley | b66a972 | 2021-03-02 10:15:36 -0800 | [diff] [blame] | 7964 | if (!lut) |
| 7965 | return -EINVAL; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 7966 | |
Brett Creeley | b66a972 | 2021-03-02 10:15:36 -0800 | [diff] [blame] | 7967 | params.vsi_handle = vsi->idx; |
| 7968 | params.lut_size = lut_size; |
| 7969 | params.lut_type = vsi->rss_lut_type; |
| 7970 | params.lut = lut; |
| 7971 | |
| 7972 | status = ice_aq_get_rss_lut(hw, ¶ms); |
Tony Nguyen | c148469 | 2021-10-07 16:01:58 -0700 | [diff] [blame] | 7973 | if (status) |
Tony Nguyen | 5f87ec4 | 2021-10-07 15:56:02 -0700 | [diff] [blame] | 7974 | dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n", |
Tony Nguyen | 5518ac2 | 2021-10-07 15:59:03 -0700 | [diff] [blame] | 7975 | status, ice_aq_str(hw->adminq.sq_last_status)); |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 7976 | |
Tony Nguyen | c148469 | 2021-10-07 16:01:58 -0700 | [diff] [blame] | 7977 | return status; |
Brett Creeley | b66a972 | 2021-03-02 10:15:36 -0800 | [diff] [blame] | 7978 | } |
Brett Creeley | e3c5392 | 2021-03-02 10:15:35 -0800 | [diff] [blame] | 7979 | |
Brett Creeley | b66a972 | 2021-03-02 10:15:36 -0800 | [diff] [blame] | 7980 | /** |
| 7981 | * ice_get_rss_key - Get RSS key |
| 7982 | * @vsi: Pointer to VSI structure |
| 7983 | * @seed: Buffer to store the key in |
| 7984 | * |
| 7985 | * Returns 0 on success, negative on failure |
| 7986 | */ |
| 7987 | int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) |
| 7988 | { |
| 7989 | struct ice_hw *hw = &vsi->back->hw; |
Tony Nguyen | 5e24d59 | 2021-10-07 15:56:57 -0700 | [diff] [blame] | 7990 | int status; |
Brett Creeley | b66a972 | 2021-03-02 10:15:36 -0800 | [diff] [blame] | 7991 | |
| 7992 | if (!seed) |
| 7993 | return -EINVAL; |
| 7994 | |
| 7995 | status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); |
Tony Nguyen | c148469 | 2021-10-07 16:01:58 -0700 | [diff] [blame] | 7996 | if (status) |
Tony Nguyen | 5f87ec4 | 2021-10-07 15:56:02 -0700 | [diff] [blame] | 7997 | dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n", |
Tony Nguyen | 5518ac2 | 2021-10-07 15:59:03 -0700 | [diff] [blame] | 7998 | status, ice_aq_str(hw->adminq.sq_last_status)); |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 7999 | |
Tony Nguyen | c148469 | 2021-10-07 16:01:58 -0700 | [diff] [blame] | 8000 | return status; |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 8001 | } |
| 8002 | |
| 8003 | /** |
Jeff Guo | 352e9bf | 2023-12-12 17:33:20 -0700 | [diff] [blame] | 8004 | * ice_set_rss_hfunc - Set RSS HASH function |
| 8005 | * @vsi: Pointer to VSI structure |
| 8006 | * @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*) |
| 8007 | * |
| 8008 | * Returns 0 on success, negative on failure |
| 8009 | */ |
| 8010 | int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc) |
| 8011 | { |
| 8012 | struct ice_hw *hw = &vsi->back->hw; |
| 8013 | struct ice_vsi_ctx *ctx; |
| 8014 | bool symm; |
| 8015 | int err; |
| 8016 | |
| 8017 | if (hfunc == vsi->rss_hfunc) |
| 8018 | return 0; |
| 8019 | |
| 8020 | if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ && |
| 8021 | hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ) |
| 8022 | return -EOPNOTSUPP; |
| 8023 | |
| 8024 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
| 8025 | if (!ctx) |
| 8026 | return -ENOMEM; |
| 8027 | |
| 8028 | ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); |
| 8029 | ctx->info.q_opt_rss = vsi->info.q_opt_rss; |
| 8030 | ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M; |
| 8031 | ctx->info.q_opt_rss |= |
| 8032 | FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc); |
| 8033 | ctx->info.q_opt_tc = vsi->info.q_opt_tc; |
| 8034 | ctx->info.q_opt_flags = vsi->info.q_opt_rss; |
| 8035 | |
| 8036 | err = ice_update_vsi(hw, vsi->idx, ctx, NULL); |
| 8037 | if (err) { |
| 8038 | dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n", |
| 8039 | vsi->vsi_num, err); |
| 8040 | } else { |
| 8041 | vsi->info.q_opt_rss = ctx->info.q_opt_rss; |
| 8042 | vsi->rss_hfunc = hfunc; |
| 8043 | netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n", |
| 8044 | hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ? |
| 8045 | "Symmetric " : ""); |
| 8046 | } |
| 8047 | kfree(ctx); |
| 8048 | if (err) |
| 8049 | return err; |
| 8050 | |
| 8051 | /* Fix the symmetry setting for all existing RSS configurations */ |
| 8052 | symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ); |
| 8053 | return ice_set_rss_cfg_symm(hw, vsi, symm); |
| 8054 | } |
| 8055 | |
| 8056 | /** |
Md Fahad Iqbal Polash | b1edc14 | 2018-08-09 06:29:54 -0700 | [diff] [blame] | 8057 | * ice_bridge_getlink - Get the hardware bridge mode |
| 8058 | * @skb: skb buff |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 8059 | * @pid: process ID |
Md Fahad Iqbal Polash | b1edc14 | 2018-08-09 06:29:54 -0700 | [diff] [blame] | 8060 | * @seq: RTNL message seq |
| 8061 | * @dev: the netdev being configured |
| 8062 | * @filter_mask: filter mask passed in |
| 8063 | * @nlflags: netlink flags passed in |
| 8064 | * |
| 8065 | * Return the bridge mode (VEB/VEPA) |
| 8066 | */ |
| 8067 | static int |
| 8068 | ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
| 8069 | struct net_device *dev, u32 filter_mask, int nlflags) |
| 8070 | { |
| 8071 | struct ice_netdev_priv *np = netdev_priv(dev); |
| 8072 | struct ice_vsi *vsi = np->vsi; |
| 8073 | struct ice_pf *pf = vsi->back; |
| 8074 | u16 bmode; |
| 8075 | |
| 8076 | bmode = pf->first_sw->bridge_mode; |
| 8077 | |
| 8078 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, |
| 8079 | filter_mask, NULL); |
| 8080 | } |
| 8081 | |
| 8082 | /** |
| 8083 | * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) |
| 8084 | * @vsi: Pointer to VSI structure |
| 8085 | * @bmode: Hardware bridge mode (VEB/VEPA) |
| 8086 | * |
| 8087 | * Returns 0 on success, negative on failure |
| 8088 | */ |
| 8089 | static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) |
| 8090 | { |
Md Fahad Iqbal Polash | b1edc14 | 2018-08-09 06:29:54 -0700 | [diff] [blame] | 8091 | struct ice_aqc_vsi_props *vsi_props; |
| 8092 | struct ice_hw *hw = &vsi->back->hw; |
Bruce Allan | 198a666 | 2019-02-08 12:50:32 -0800 | [diff] [blame] | 8093 | struct ice_vsi_ctx *ctxt; |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 8094 | int ret; |
Md Fahad Iqbal Polash | b1edc14 | 2018-08-09 06:29:54 -0700 | [diff] [blame] | 8095 | |
| 8096 | vsi_props = &vsi->info; |
Bruce Allan | 198a666 | 2019-02-08 12:50:32 -0800 | [diff] [blame] | 8097 | |
Tony Nguyen | 9efe35d0 | 2019-11-08 06:23:25 -0800 | [diff] [blame] | 8098 | ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); |
Bruce Allan | 198a666 | 2019-02-08 12:50:32 -0800 | [diff] [blame] | 8099 | if (!ctxt) |
| 8100 | return -ENOMEM; |
| 8101 | |
| 8102 | ctxt->info = vsi->info; |
Md Fahad Iqbal Polash | b1edc14 | 2018-08-09 06:29:54 -0700 | [diff] [blame] | 8103 | |
| 8104 | if (bmode == BRIDGE_MODE_VEB) |
| 8105 | /* change from VEPA to VEB mode */ |
Bruce Allan | 198a666 | 2019-02-08 12:50:32 -0800 | [diff] [blame] | 8106 | ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; |
Md Fahad Iqbal Polash | b1edc14 | 2018-08-09 06:29:54 -0700 | [diff] [blame] | 8107 | else |
| 8108 | /* change from VEB to VEPA mode */ |
Bruce Allan | 198a666 | 2019-02-08 12:50:32 -0800 | [diff] [blame] | 8109 | ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; |
| 8110 | ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); |
Anirudh Venkataramanan | 5726ca0 | 2018-09-19 17:23:12 -0700 | [diff] [blame] | 8111 | |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 8112 | ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); |
| 8113 | if (ret) { |
Tony Nguyen | 5f87ec4 | 2021-10-07 15:56:02 -0700 | [diff] [blame] | 8114 | dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n", |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 8115 | bmode, ret, ice_aq_str(hw->adminq.sq_last_status)); |
Bruce Allan | 198a666 | 2019-02-08 12:50:32 -0800 | [diff] [blame] | 8116 | goto out; |
Md Fahad Iqbal Polash | b1edc14 | 2018-08-09 06:29:54 -0700 | [diff] [blame] | 8117 | } |
| 8118 | /* Update sw flags for book keeping */ |
Bruce Allan | 198a666 | 2019-02-08 12:50:32 -0800 | [diff] [blame] | 8119 | vsi_props->sw_flags = ctxt->info.sw_flags; |
Md Fahad Iqbal Polash | b1edc14 | 2018-08-09 06:29:54 -0700 | [diff] [blame] | 8120 | |
Bruce Allan | 198a666 | 2019-02-08 12:50:32 -0800 | [diff] [blame] | 8121 | out: |
Tony Nguyen | 9efe35d0 | 2019-11-08 06:23:25 -0800 | [diff] [blame] | 8122 | kfree(ctxt); |
Bruce Allan | 198a666 | 2019-02-08 12:50:32 -0800 | [diff] [blame] | 8123 | return ret; |
Md Fahad Iqbal Polash | b1edc14 | 2018-08-09 06:29:54 -0700 | [diff] [blame] | 8124 | } |
| 8125 | |
| 8126 | /** |
| 8127 | * ice_bridge_setlink - Set the hardware bridge mode |
| 8128 | * @dev: the netdev being configured |
| 8129 | * @nlh: RTNL message |
| 8130 | * @flags: bridge setlink flags |
Petr Machata | 2fd527b | 2018-12-12 17:02:48 +0000 | [diff] [blame] | 8131 | * @extack: netlink extended ack |
Md Fahad Iqbal Polash | b1edc14 | 2018-08-09 06:29:54 -0700 | [diff] [blame] | 8132 | * |
| 8133 | * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is |
| 8134 | * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if |
| 8135 | * not already set for all VSIs connected to this switch. And also update the |
| 8136 | * unicast switch filter rules for the corresponding switch of the netdev. |
| 8137 | */ |
| 8138 | static int |
| 8139 | ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, |
Bruce Allan | 3d50514 | 2018-12-19 10:03:20 -0800 | [diff] [blame] | 8140 | u16 __always_unused flags, |
| 8141 | struct netlink_ext_ack __always_unused *extack) |
Md Fahad Iqbal Polash | b1edc14 | 2018-08-09 06:29:54 -0700 | [diff] [blame] | 8142 | { |
| 8143 | struct ice_netdev_priv *np = netdev_priv(dev); |
| 8144 | struct ice_pf *pf = np->vsi->back; |
| 8145 | struct nlattr *attr, *br_spec; |
| 8146 | struct ice_hw *hw = &pf->hw; |
Md Fahad Iqbal Polash | b1edc14 | 2018-08-09 06:29:54 -0700 | [diff] [blame] | 8147 | struct ice_sw *pf_sw; |
| 8148 | int rem, v, err = 0; |
| 8149 | |
| 8150 | pf_sw = pf->first_sw; |
| 8151 | /* find the attribute in the netlink message */ |
| 8152 | br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); |
Rand Deeb | 06e456a | 2024-02-28 18:54:48 +0300 | [diff] [blame] | 8153 | if (!br_spec) |
| 8154 | return -EINVAL; |
Md Fahad Iqbal Polash | b1edc14 | 2018-08-09 06:29:54 -0700 | [diff] [blame] | 8155 | |
Johannes Berg | e8058a4 | 2024-03-28 20:31:45 +0100 | [diff] [blame] | 8156 | nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { |
| 8157 | __u16 mode = nla_get_u16(attr); |
Md Fahad Iqbal Polash | b1edc14 | 2018-08-09 06:29:54 -0700 | [diff] [blame] | 8158 | |
Md Fahad Iqbal Polash | b1edc14 | 2018-08-09 06:29:54 -0700 | [diff] [blame] | 8159 | if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) |
| 8160 | return -EINVAL; |
| 8161 | /* Continue if bridge mode is not being flipped */ |
| 8162 | if (mode == pf_sw->bridge_mode) |
| 8163 | continue; |
| 8164 | /* Iterates through the PF VSI list and update the loopback |
| 8165 | * mode of the VSI |
| 8166 | */ |
| 8167 | ice_for_each_vsi(pf, v) { |
| 8168 | if (!pf->vsi[v]) |
| 8169 | continue; |
| 8170 | err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); |
| 8171 | if (err) |
| 8172 | return err; |
| 8173 | } |
| 8174 | |
| 8175 | hw->evb_veb = (mode == BRIDGE_MODE_VEB); |
| 8176 | /* Update the unicast switch filter rules for the corresponding |
| 8177 | * switch of the netdev |
| 8178 | */ |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 8179 | err = ice_update_sw_rule_bridge_mode(hw); |
| 8180 | if (err) { |
Tony Nguyen | 5f87ec4 | 2021-10-07 15:56:02 -0700 | [diff] [blame] | 8181 | netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n", |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 8182 | mode, err, |
Lihong Yang | 0fee357 | 2020-05-07 17:41:04 -0700 | [diff] [blame] | 8183 | ice_aq_str(hw->adminq.sq_last_status)); |
Md Fahad Iqbal Polash | b1edc14 | 2018-08-09 06:29:54 -0700 | [diff] [blame] | 8184 | /* revert hw->evb_veb */ |
| 8185 | hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); |
Tony Nguyen | c148469 | 2021-10-07 16:01:58 -0700 | [diff] [blame] | 8186 | return err; |
Md Fahad Iqbal Polash | b1edc14 | 2018-08-09 06:29:54 -0700 | [diff] [blame] | 8187 | } |
| 8188 | |
| 8189 | pf_sw->bridge_mode = mode; |
| 8190 | } |
| 8191 | |
| 8192 | return 0; |
| 8193 | } |
| 8194 | |
| 8195 | /** |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 8196 | * ice_tx_timeout - Respond to a Tx Hang |
| 8197 | * @netdev: network interface device structure |
Bruce Allan | 644f40e | 2020-01-22 07:21:37 -0800 | [diff] [blame] | 8198 | * @txqueue: Tx queue |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 8199 | */ |
Michael S. Tsirkin | 0290bd2 | 2019-12-10 09:23:51 -0500 | [diff] [blame] | 8200 | static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 8201 | { |
| 8202 | struct ice_netdev_priv *np = netdev_priv(netdev); |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 8203 | struct ice_tx_ring *tx_ring = NULL; |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 8204 | struct ice_vsi *vsi = np->vsi; |
| 8205 | struct ice_pf *pf = vsi->back; |
Brett Creeley | 807bc98 | 2018-10-26 10:41:00 -0700 | [diff] [blame] | 8206 | u32 i; |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 8207 | |
| 8208 | pf->tx_timeout_count++; |
| 8209 | |
Avinash JD | 610ed0e | 2020-05-07 17:41:00 -0700 | [diff] [blame] | 8210 | /* Check if PFC is enabled for the TC to which the queue belongs |
| 8211 | * to. If yes then Tx timeout is not caused by a hung queue, no |
| 8212 | * need to reset and rebuild |
| 8213 | */ |
| 8214 | if (ice_is_pfc_causing_hung_q(pf, txqueue)) { |
| 8215 | dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n", |
| 8216 | txqueue); |
| 8217 | return; |
| 8218 | } |
| 8219 | |
Julio Faracco | ed5a3f6 | 2019-12-18 15:38:45 -0300 | [diff] [blame] | 8220 | /* now that we have an index, find the tx_ring struct */ |
Maciej Fijalkowski | 2faf63b | 2021-08-19 14:00:04 +0200 | [diff] [blame] | 8221 | ice_for_each_txq(vsi, i) |
Julio Faracco | ed5a3f6 | 2019-12-18 15:38:45 -0300 | [diff] [blame] | 8222 | if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) |
| 8223 | if (txqueue == vsi->tx_rings[i]->q_index) { |
| 8224 | tx_ring = vsi->tx_rings[i]; |
| 8225 | break; |
| 8226 | } |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 8227 | |
| 8228 | /* Reset recovery level if enough time has elapsed after last timeout. |
| 8229 | * Also ensure no new reset action happens before next timeout period. |
| 8230 | */ |
| 8231 | if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) |
| 8232 | pf->tx_timeout_recovery_level = 1; |
| 8233 | else if (time_before(jiffies, (pf->tx_timeout_last_recovery + |
| 8234 | netdev->watchdog_timeo))) |
| 8235 | return; |
| 8236 | |
| 8237 | if (tx_ring) { |
Brett Creeley | 807bc98 | 2018-10-26 10:41:00 -0700 | [diff] [blame] | 8238 | struct ice_hw *hw = &pf->hw; |
| 8239 | u32 head, val = 0; |
| 8240 | |
Jesse Brandeburg | 5a259f8 | 2023-12-05 17:01:12 -0800 | [diff] [blame] | 8241 | head = FIELD_GET(QTX_COMM_HEAD_HEAD_M, |
| 8242 | rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue]))); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 8243 | /* Read interrupt register */ |
Brett Creeley | ba88073 | 2019-06-26 02:20:25 -0700 | [diff] [blame] | 8244 | val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 8245 | |
Tony Nguyen | 93ff485 | 2020-02-27 10:15:02 -0800 | [diff] [blame] | 8246 | netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", |
Julio Faracco | ed5a3f6 | 2019-12-18 15:38:45 -0300 | [diff] [blame] | 8247 | vsi->vsi_num, txqueue, tx_ring->next_to_clean, |
Brett Creeley | 807bc98 | 2018-10-26 10:41:00 -0700 | [diff] [blame] | 8248 | head, tx_ring->next_to_use, val); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 8249 | } |
| 8250 | |
| 8251 | pf->tx_timeout_last_recovery = jiffies; |
Tony Nguyen | 93ff485 | 2020-02-27 10:15:02 -0800 | [diff] [blame] | 8252 | netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n", |
Julio Faracco | ed5a3f6 | 2019-12-18 15:38:45 -0300 | [diff] [blame] | 8253 | pf->tx_timeout_recovery_level, txqueue); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 8254 | |
| 8255 | switch (pf->tx_timeout_recovery_level) { |
| 8256 | case 1: |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 8257 | set_bit(ICE_PFR_REQ, pf->state); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 8258 | break; |
| 8259 | case 2: |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 8260 | set_bit(ICE_CORER_REQ, pf->state); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 8261 | break; |
| 8262 | case 3: |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 8263 | set_bit(ICE_GLOBR_REQ, pf->state); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 8264 | break; |
| 8265 | default: |
| 8266 | netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 8267 | set_bit(ICE_DOWN, pf->state); |
Anirudh Venkataramanan | e97fb1a | 2021-03-02 10:15:37 -0800 | [diff] [blame] | 8268 | set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 8269 | set_bit(ICE_SERVICE_DIS, pf->state); |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 8270 | break; |
| 8271 | } |
| 8272 | |
| 8273 | ice_service_task_schedule(pf); |
| 8274 | pf->tx_timeout_recovery_level++; |
| 8275 | } |
| 8276 | |
| 8277 | /** |
Kiran Patil | 0d08a44 | 2021-08-06 10:49:05 +0200 | [diff] [blame] | 8278 | * ice_setup_tc_cls_flower - flower classifier offloads |
| 8279 | * @np: net device to configure |
| 8280 | * @filter_dev: device on which filter is added |
| 8281 | * @cls_flower: offload data |
| 8282 | */ |
| 8283 | static int |
| 8284 | ice_setup_tc_cls_flower(struct ice_netdev_priv *np, |
| 8285 | struct net_device *filter_dev, |
| 8286 | struct flow_cls_offload *cls_flower) |
| 8287 | { |
| 8288 | struct ice_vsi *vsi = np->vsi; |
| 8289 | |
| 8290 | if (cls_flower->common.chain_index) |
| 8291 | return -EOPNOTSUPP; |
| 8292 | |
| 8293 | switch (cls_flower->command) { |
| 8294 | case FLOW_CLS_REPLACE: |
| 8295 | return ice_add_cls_flower(filter_dev, vsi, cls_flower); |
| 8296 | case FLOW_CLS_DESTROY: |
| 8297 | return ice_del_cls_flower(vsi, cls_flower); |
| 8298 | default: |
| 8299 | return -EINVAL; |
| 8300 | } |
| 8301 | } |
| 8302 | |
| 8303 | /** |
| 8304 | * ice_setup_tc_block_cb - callback handler registered for TC block |
| 8305 | * @type: TC SETUP type |
| 8306 | * @type_data: TC flower offload data that contains user input |
| 8307 | * @cb_priv: netdev private data |
| 8308 | */ |
| 8309 | static int |
| 8310 | ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) |
| 8311 | { |
| 8312 | struct ice_netdev_priv *np = cb_priv; |
| 8313 | |
| 8314 | switch (type) { |
| 8315 | case TC_SETUP_CLSFLOWER: |
| 8316 | return ice_setup_tc_cls_flower(np, np->vsi->netdev, |
| 8317 | type_data); |
| 8318 | default: |
| 8319 | return -EOPNOTSUPP; |
| 8320 | } |
| 8321 | } |
| 8322 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 8323 | /** |
| 8324 | * ice_validate_mqprio_qopt - Validate TCF input parameters |
| 8325 | * @vsi: Pointer to VSI |
| 8326 | * @mqprio_qopt: input parameters for mqprio queue configuration |
| 8327 | * |
| 8328 | * This function validates MQPRIO params, such as qcount (power of 2 wherever |
| 8329 | * needed), and make sure user doesn't specify qcount and BW rate limit |
| 8330 | * for TCs, which are more than "num_tc" |
| 8331 | */ |
| 8332 | static int |
| 8333 | ice_validate_mqprio_qopt(struct ice_vsi *vsi, |
| 8334 | struct tc_mqprio_qopt_offload *mqprio_qopt) |
| 8335 | { |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 8336 | int non_power_of_2_qcount = 0; |
| 8337 | struct ice_pf *pf = vsi->back; |
| 8338 | int max_rss_q_cnt = 0; |
Sridhar Samudrala | 5f16da6 | 2023-06-09 17:40:23 -0700 | [diff] [blame] | 8339 | u64 sum_min_rate = 0; |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 8340 | struct device *dev; |
| 8341 | int i, speed; |
| 8342 | u8 num_tc; |
| 8343 | |
| 8344 | if (vsi->type != ICE_VSI_PF) |
| 8345 | return -EINVAL; |
| 8346 | |
| 8347 | if (mqprio_qopt->qopt.offset[0] != 0 || |
| 8348 | mqprio_qopt->qopt.num_tc < 1 || |
| 8349 | mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC) |
| 8350 | return -EINVAL; |
| 8351 | |
| 8352 | dev = ice_pf_to_dev(pf); |
| 8353 | vsi->ch_rss_size = 0; |
| 8354 | num_tc = mqprio_qopt->qopt.num_tc; |
Sridhar Samudrala | 5f16da6 | 2023-06-09 17:40:23 -0700 | [diff] [blame] | 8355 | speed = ice_get_link_speed_kbps(vsi); |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 8356 | |
| 8357 | for (i = 0; num_tc; i++) { |
| 8358 | int qcount = mqprio_qopt->qopt.count[i]; |
| 8359 | u64 max_rate, min_rate, rem; |
| 8360 | |
| 8361 | if (!qcount) |
| 8362 | return -EINVAL; |
| 8363 | |
| 8364 | if (is_power_of_2(qcount)) { |
| 8365 | if (non_power_of_2_qcount && |
| 8366 | qcount > non_power_of_2_qcount) { |
| 8367 | dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n", |
| 8368 | qcount, non_power_of_2_qcount); |
| 8369 | return -EINVAL; |
| 8370 | } |
| 8371 | if (qcount > max_rss_q_cnt) |
| 8372 | max_rss_q_cnt = qcount; |
| 8373 | } else { |
| 8374 | if (non_power_of_2_qcount && |
| 8375 | qcount != non_power_of_2_qcount) { |
| 8376 | dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n", |
| 8377 | qcount, non_power_of_2_qcount); |
| 8378 | return -EINVAL; |
| 8379 | } |
| 8380 | if (qcount < max_rss_q_cnt) { |
| 8381 | dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n", |
| 8382 | qcount, max_rss_q_cnt); |
| 8383 | return -EINVAL; |
| 8384 | } |
| 8385 | max_rss_q_cnt = qcount; |
| 8386 | non_power_of_2_qcount = qcount; |
| 8387 | } |
| 8388 | |
| 8389 | /* TC command takes input in K/N/Gbps or K/M/Gbit etc but |
| 8390 | * converts the bandwidth rate limit into Bytes/s when |
| 8391 | * passing it down to the driver. So convert input bandwidth |
| 8392 | * from Bytes/s to Kbps |
| 8393 | */ |
| 8394 | max_rate = mqprio_qopt->max_rate[i]; |
| 8395 | max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR); |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 8396 | |
| 8397 | /* min_rate is minimum guaranteed rate and it can't be zero */ |
| 8398 | min_rate = mqprio_qopt->min_rate[i]; |
| 8399 | min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR); |
| 8400 | sum_min_rate += min_rate; |
| 8401 | |
| 8402 | if (min_rate && min_rate < ICE_MIN_BW_LIMIT) { |
| 8403 | dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i, |
| 8404 | min_rate, ICE_MIN_BW_LIMIT); |
| 8405 | return -EINVAL; |
| 8406 | } |
| 8407 | |
Sridhar Samudrala | 5f16da6 | 2023-06-09 17:40:23 -0700 | [diff] [blame] | 8408 | if (max_rate && max_rate > speed) { |
| 8409 | dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n", |
| 8410 | i, max_rate, speed); |
| 8411 | return -EINVAL; |
| 8412 | } |
| 8413 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 8414 | iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem); |
| 8415 | if (rem) { |
| 8416 | dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps", |
| 8417 | i, ICE_MIN_BW_LIMIT); |
| 8418 | return -EINVAL; |
| 8419 | } |
| 8420 | |
| 8421 | iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem); |
| 8422 | if (rem) { |
| 8423 | dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps", |
| 8424 | i, ICE_MIN_BW_LIMIT); |
| 8425 | return -EINVAL; |
| 8426 | } |
| 8427 | |
| 8428 | /* min_rate can't be more than max_rate, except when max_rate |
| 8429 | * is zero (implies max_rate sought is max line rate). In such |
| 8430 | * a case min_rate can be more than max. |
| 8431 | */ |
| 8432 | if (max_rate && min_rate > max_rate) { |
| 8433 | dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n", |
| 8434 | min_rate, max_rate); |
| 8435 | return -EINVAL; |
| 8436 | } |
| 8437 | |
| 8438 | if (i >= mqprio_qopt->qopt.num_tc - 1) |
| 8439 | break; |
| 8440 | if (mqprio_qopt->qopt.offset[i + 1] != |
| 8441 | (mqprio_qopt->qopt.offset[i] + qcount)) |
| 8442 | return -EINVAL; |
| 8443 | } |
| 8444 | if (vsi->num_rxq < |
| 8445 | (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) |
| 8446 | return -EINVAL; |
| 8447 | if (vsi->num_txq < |
| 8448 | (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) |
| 8449 | return -EINVAL; |
| 8450 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 8451 | if (sum_min_rate && sum_min_rate > (u64)speed) { |
| 8452 | dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n", |
| 8453 | sum_min_rate, speed); |
| 8454 | return -EINVAL; |
| 8455 | } |
| 8456 | |
| 8457 | /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */ |
| 8458 | vsi->ch_rss_size = max_rss_q_cnt; |
| 8459 | |
| 8460 | return 0; |
| 8461 | } |
| 8462 | |
| 8463 | /** |
Kiran Patil | 4031979 | 2021-12-29 10:54:33 -0800 | [diff] [blame] | 8464 | * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF |
| 8465 | * @pf: ptr to PF device |
| 8466 | * @vsi: ptr to VSI |
| 8467 | */ |
| 8468 | static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) |
| 8469 | { |
| 8470 | struct device *dev = ice_pf_to_dev(pf); |
| 8471 | bool added = false; |
| 8472 | struct ice_hw *hw; |
| 8473 | int flow; |
| 8474 | |
| 8475 | if (!(vsi->num_gfltr || vsi->num_bfltr)) |
| 8476 | return -EINVAL; |
| 8477 | |
| 8478 | hw = &pf->hw; |
| 8479 | for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) { |
| 8480 | struct ice_fd_hw_prof *prof; |
| 8481 | int tun, status; |
| 8482 | u64 entry_h; |
| 8483 | |
| 8484 | if (!(hw->fdir_prof && hw->fdir_prof[flow] && |
| 8485 | hw->fdir_prof[flow]->cnt)) |
| 8486 | continue; |
| 8487 | |
| 8488 | for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { |
| 8489 | enum ice_flow_priority prio; |
Kiran Patil | 4031979 | 2021-12-29 10:54:33 -0800 | [diff] [blame] | 8490 | |
| 8491 | /* add this VSI to FDir profile for this flow */ |
| 8492 | prio = ICE_FLOW_PRIO_NORMAL; |
| 8493 | prof = hw->fdir_prof[flow]; |
Ahmed Zaki | b1f5921 | 2023-12-12 17:33:19 -0700 | [diff] [blame] | 8494 | status = ice_flow_add_entry(hw, ICE_BLK_FD, |
| 8495 | prof->prof_id[tun], |
Kiran Patil | 4031979 | 2021-12-29 10:54:33 -0800 | [diff] [blame] | 8496 | prof->vsi_h[0], vsi->idx, |
| 8497 | prio, prof->fdir_seg[tun], |
| 8498 | &entry_h); |
| 8499 | if (status) { |
| 8500 | dev_err(dev, "channel VSI idx %d, not able to add to group %d\n", |
| 8501 | vsi->idx, flow); |
| 8502 | continue; |
| 8503 | } |
| 8504 | |
| 8505 | prof->entry_h[prof->cnt][tun] = entry_h; |
| 8506 | } |
| 8507 | |
| 8508 | /* store VSI for filter replay and delete */ |
| 8509 | prof->vsi_h[prof->cnt] = vsi->idx; |
| 8510 | prof->cnt++; |
| 8511 | |
| 8512 | added = true; |
| 8513 | dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx, |
| 8514 | flow); |
| 8515 | } |
| 8516 | |
| 8517 | if (!added) |
| 8518 | dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx); |
| 8519 | |
| 8520 | return 0; |
| 8521 | } |
| 8522 | |
| 8523 | /** |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 8524 | * ice_add_channel - add a channel by adding VSI |
| 8525 | * @pf: ptr to PF device |
| 8526 | * @sw_id: underlying HW switching element ID |
| 8527 | * @ch: ptr to channel structure |
| 8528 | * |
| 8529 | * Add a channel (VSI) using add_vsi and queue_map |
| 8530 | */ |
| 8531 | static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch) |
| 8532 | { |
| 8533 | struct device *dev = ice_pf_to_dev(pf); |
| 8534 | struct ice_vsi *vsi; |
| 8535 | |
| 8536 | if (ch->type != ICE_VSI_CHNL) { |
| 8537 | dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type); |
| 8538 | return -EINVAL; |
| 8539 | } |
| 8540 | |
| 8541 | vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch); |
| 8542 | if (!vsi || vsi->type != ICE_VSI_CHNL) { |
| 8543 | dev_err(dev, "create chnl VSI failure\n"); |
| 8544 | return -EINVAL; |
| 8545 | } |
| 8546 | |
Kiran Patil | 4031979 | 2021-12-29 10:54:33 -0800 | [diff] [blame] | 8547 | ice_add_vsi_to_fdir(pf, vsi); |
| 8548 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 8549 | ch->sw_id = sw_id; |
| 8550 | ch->vsi_num = vsi->vsi_num; |
| 8551 | ch->info.mapping_flags = vsi->info.mapping_flags; |
| 8552 | ch->ch_vsi = vsi; |
| 8553 | /* set the back pointer of channel for newly created VSI */ |
| 8554 | vsi->ch = ch; |
| 8555 | |
| 8556 | memcpy(&ch->info.q_mapping, &vsi->info.q_mapping, |
| 8557 | sizeof(vsi->info.q_mapping)); |
| 8558 | memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping, |
| 8559 | sizeof(vsi->info.tc_mapping)); |
| 8560 | |
| 8561 | return 0; |
| 8562 | } |
| 8563 | |
| 8564 | /** |
| 8565 | * ice_chnl_cfg_res |
| 8566 | * @vsi: the VSI being setup |
| 8567 | * @ch: ptr to channel structure |
| 8568 | * |
| 8569 | * Configure channel specific resources such as rings, vector. |
| 8570 | */ |
| 8571 | static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch) |
| 8572 | { |
| 8573 | int i; |
| 8574 | |
| 8575 | for (i = 0; i < ch->num_txq; i++) { |
| 8576 | struct ice_q_vector *tx_q_vector, *rx_q_vector; |
| 8577 | struct ice_ring_container *rc; |
| 8578 | struct ice_tx_ring *tx_ring; |
| 8579 | struct ice_rx_ring *rx_ring; |
| 8580 | |
| 8581 | tx_ring = vsi->tx_rings[ch->base_q + i]; |
| 8582 | rx_ring = vsi->rx_rings[ch->base_q + i]; |
| 8583 | if (!tx_ring || !rx_ring) |
| 8584 | continue; |
| 8585 | |
| 8586 | /* setup ring being channel enabled */ |
| 8587 | tx_ring->ch = ch; |
| 8588 | rx_ring->ch = ch; |
| 8589 | |
| 8590 | /* following code block sets up vector specific attributes */ |
| 8591 | tx_q_vector = tx_ring->q_vector; |
| 8592 | rx_q_vector = rx_ring->q_vector; |
| 8593 | if (!tx_q_vector && !rx_q_vector) |
| 8594 | continue; |
| 8595 | |
| 8596 | if (tx_q_vector) { |
| 8597 | tx_q_vector->ch = ch; |
| 8598 | /* setup Tx and Rx ITR setting if DIM is off */ |
| 8599 | rc = &tx_q_vector->tx; |
| 8600 | if (!ITR_IS_DYNAMIC(rc)) |
| 8601 | ice_write_itr(rc, rc->itr_setting); |
| 8602 | } |
| 8603 | if (rx_q_vector) { |
| 8604 | rx_q_vector->ch = ch; |
| 8605 | /* setup Tx and Rx ITR setting if DIM is off */ |
| 8606 | rc = &rx_q_vector->rx; |
| 8607 | if (!ITR_IS_DYNAMIC(rc)) |
| 8608 | ice_write_itr(rc, rc->itr_setting); |
| 8609 | } |
| 8610 | } |
| 8611 | |
| 8612 | /* it is safe to assume that, if channel has non-zero num_t[r]xq, then |
| 8613 | * GLINT_ITR register would have written to perform in-context |
| 8614 | * update, hence perform flush |
| 8615 | */ |
| 8616 | if (ch->num_txq || ch->num_rxq) |
| 8617 | ice_flush(&vsi->back->hw); |
| 8618 | } |
| 8619 | |
| 8620 | /** |
| 8621 | * ice_cfg_chnl_all_res - configure channel resources |
| 8622 | * @vsi: pte to main_vsi |
| 8623 | * @ch: ptr to channel structure |
| 8624 | * |
| 8625 | * This function configures channel specific resources such as flow-director |
| 8626 | * counter index, and other resources such as queues, vectors, ITR settings |
| 8627 | */ |
| 8628 | static void |
| 8629 | ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch) |
| 8630 | { |
| 8631 | /* configure channel (aka ADQ) resources such as queues, vectors, |
| 8632 | * ITR settings for channel specific vectors and anything else |
| 8633 | */ |
| 8634 | ice_chnl_cfg_res(vsi, ch); |
| 8635 | } |
| 8636 | |
| 8637 | /** |
| 8638 | * ice_setup_hw_channel - setup new channel |
| 8639 | * @pf: ptr to PF device |
| 8640 | * @vsi: the VSI being setup |
| 8641 | * @ch: ptr to channel structure |
| 8642 | * @sw_id: underlying HW switching element ID |
| 8643 | * @type: type of channel to be created (VMDq2/VF) |
| 8644 | * |
| 8645 | * Setup new channel (VSI) based on specified type (VMDq2/VF) |
| 8646 | * and configures Tx rings accordingly |
| 8647 | */ |
| 8648 | static int |
| 8649 | ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi, |
| 8650 | struct ice_channel *ch, u16 sw_id, u8 type) |
| 8651 | { |
| 8652 | struct device *dev = ice_pf_to_dev(pf); |
| 8653 | int ret; |
| 8654 | |
| 8655 | ch->base_q = vsi->next_base_q; |
| 8656 | ch->type = type; |
| 8657 | |
| 8658 | ret = ice_add_channel(pf, sw_id, ch); |
| 8659 | if (ret) { |
| 8660 | dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id); |
| 8661 | return ret; |
| 8662 | } |
| 8663 | |
| 8664 | /* configure/setup ADQ specific resources */ |
| 8665 | ice_cfg_chnl_all_res(vsi, ch); |
| 8666 | |
| 8667 | /* make sure to update the next_base_q so that subsequent channel's |
| 8668 | * (aka ADQ) VSI queue map is correct |
| 8669 | */ |
| 8670 | vsi->next_base_q = vsi->next_base_q + ch->num_rxq; |
| 8671 | dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num, |
| 8672 | ch->num_rxq); |
| 8673 | |
| 8674 | return 0; |
| 8675 | } |
| 8676 | |
| 8677 | /** |
| 8678 | * ice_setup_channel - setup new channel using uplink element |
| 8679 | * @pf: ptr to PF device |
| 8680 | * @vsi: the VSI being setup |
| 8681 | * @ch: ptr to channel structure |
| 8682 | * |
| 8683 | * Setup new channel (VSI) based on specified type (VMDq2/VF) |
| 8684 | * and uplink switching element |
| 8685 | */ |
| 8686 | static bool |
| 8687 | ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi, |
| 8688 | struct ice_channel *ch) |
| 8689 | { |
| 8690 | struct device *dev = ice_pf_to_dev(pf); |
| 8691 | u16 sw_id; |
| 8692 | int ret; |
| 8693 | |
| 8694 | if (vsi->type != ICE_VSI_PF) { |
| 8695 | dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type); |
| 8696 | return false; |
| 8697 | } |
| 8698 | |
| 8699 | sw_id = pf->first_sw->sw_id; |
| 8700 | |
| 8701 | /* create channel (VSI) */ |
| 8702 | ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL); |
| 8703 | if (ret) { |
| 8704 | dev_err(dev, "failed to setup hw_channel\n"); |
| 8705 | return false; |
| 8706 | } |
| 8707 | dev_dbg(dev, "successfully created channel()\n"); |
| 8708 | |
| 8709 | return ch->ch_vsi ? true : false; |
| 8710 | } |
| 8711 | |
| 8712 | /** |
| 8713 | * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate |
| 8714 | * @vsi: VSI to be configured |
| 8715 | * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit |
| 8716 | * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit |
| 8717 | */ |
| 8718 | static int |
| 8719 | ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate) |
| 8720 | { |
| 8721 | int err; |
| 8722 | |
| 8723 | err = ice_set_min_bw_limit(vsi, min_tx_rate); |
| 8724 | if (err) |
| 8725 | return err; |
| 8726 | |
| 8727 | return ice_set_max_bw_limit(vsi, max_tx_rate); |
| 8728 | } |
| 8729 | |
| 8730 | /** |
| 8731 | * ice_create_q_channel - function to create channel |
| 8732 | * @vsi: VSI to be configured |
| 8733 | * @ch: ptr to channel (it contains channel specific params) |
| 8734 | * |
| 8735 | * This function creates channel (VSI) using num_queues specified by user, |
| 8736 | * reconfigs RSS if needed. |
| 8737 | */ |
| 8738 | static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch) |
| 8739 | { |
| 8740 | struct ice_pf *pf = vsi->back; |
| 8741 | struct device *dev; |
| 8742 | |
| 8743 | if (!ch) |
| 8744 | return -EINVAL; |
| 8745 | |
| 8746 | dev = ice_pf_to_dev(pf); |
| 8747 | if (!ch->num_txq || !ch->num_rxq) { |
| 8748 | dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq); |
| 8749 | return -EINVAL; |
| 8750 | } |
| 8751 | |
| 8752 | if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) { |
| 8753 | dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n", |
| 8754 | vsi->cnt_q_avail, ch->num_txq); |
| 8755 | return -EINVAL; |
| 8756 | } |
| 8757 | |
| 8758 | if (!ice_setup_channel(pf, vsi, ch)) { |
| 8759 | dev_info(dev, "Failed to setup channel\n"); |
| 8760 | return -EINVAL; |
| 8761 | } |
| 8762 | /* configure BW rate limit */ |
| 8763 | if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) { |
| 8764 | int ret; |
| 8765 | |
| 8766 | ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate, |
| 8767 | ch->min_tx_rate); |
| 8768 | if (ret) |
| 8769 | dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n", |
| 8770 | ch->max_tx_rate, ch->ch_vsi->vsi_num); |
| 8771 | else |
| 8772 | dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n", |
| 8773 | ch->max_tx_rate, ch->ch_vsi->vsi_num); |
| 8774 | } |
| 8775 | |
| 8776 | vsi->cnt_q_avail -= ch->num_txq; |
| 8777 | |
| 8778 | return 0; |
| 8779 | } |
| 8780 | |
| 8781 | /** |
Kiran Patil | 9fea749 | 2021-10-15 16:35:17 -0700 | [diff] [blame] | 8782 | * ice_rem_all_chnl_fltrs - removes all channel filters |
| 8783 | * @pf: ptr to PF, TC-flower based filter are tracked at PF level |
| 8784 | * |
| 8785 | * Remove all advanced switch filters only if they are channel specific |
| 8786 | * tc-flower based filter |
| 8787 | */ |
| 8788 | static void ice_rem_all_chnl_fltrs(struct ice_pf *pf) |
| 8789 | { |
| 8790 | struct ice_tc_flower_fltr *fltr; |
| 8791 | struct hlist_node *node; |
| 8792 | |
| 8793 | /* to remove all channel filters, iterate an ordered list of filters */ |
| 8794 | hlist_for_each_entry_safe(fltr, node, |
| 8795 | &pf->tc_flower_fltr_list, |
| 8796 | tc_flower_node) { |
| 8797 | struct ice_rule_query_data rule; |
| 8798 | int status; |
| 8799 | |
| 8800 | /* for now process only channel specific filters */ |
| 8801 | if (!ice_is_chnl_fltr(fltr)) |
| 8802 | continue; |
| 8803 | |
| 8804 | rule.rid = fltr->rid; |
| 8805 | rule.rule_id = fltr->rule_id; |
Amritha Nambiar | 143b86f | 2022-10-21 00:58:45 -0700 | [diff] [blame] | 8806 | rule.vsi_handle = fltr->dest_vsi_handle; |
Kiran Patil | 9fea749 | 2021-10-15 16:35:17 -0700 | [diff] [blame] | 8807 | status = ice_rem_adv_rule_by_id(&pf->hw, &rule); |
| 8808 | if (status) { |
| 8809 | if (status == -ENOENT) |
| 8810 | dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n", |
| 8811 | rule.rule_id); |
| 8812 | else |
| 8813 | dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n", |
| 8814 | status); |
| 8815 | } else if (fltr->dest_vsi) { |
| 8816 | /* update advanced switch filter count */ |
| 8817 | if (fltr->dest_vsi->type == ICE_VSI_CHNL) { |
| 8818 | u32 flags = fltr->flags; |
| 8819 | |
| 8820 | fltr->dest_vsi->num_chnl_fltr--; |
| 8821 | if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | |
| 8822 | ICE_TC_FLWR_FIELD_ENC_DST_MAC)) |
| 8823 | pf->num_dmac_chnl_fltrs--; |
| 8824 | } |
| 8825 | } |
| 8826 | |
| 8827 | hlist_del(&fltr->tc_flower_node); |
| 8828 | kfree(fltr); |
| 8829 | } |
| 8830 | } |
| 8831 | |
| 8832 | /** |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 8833 | * ice_remove_q_channels - Remove queue channels for the TCs |
| 8834 | * @vsi: VSI to be configured |
| 8835 | * @rem_fltr: delete advanced switch filter or not |
| 8836 | * |
| 8837 | * Remove queue channels for the TCs |
| 8838 | */ |
Kiran Patil | 9fea749 | 2021-10-15 16:35:17 -0700 | [diff] [blame] | 8839 | static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr) |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 8840 | { |
| 8841 | struct ice_channel *ch, *ch_tmp; |
Kiran Patil | 9fea749 | 2021-10-15 16:35:17 -0700 | [diff] [blame] | 8842 | struct ice_pf *pf = vsi->back; |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 8843 | int i; |
| 8844 | |
Kiran Patil | 9fea749 | 2021-10-15 16:35:17 -0700 | [diff] [blame] | 8845 | /* remove all tc-flower based filter if they are channel filters only */ |
| 8846 | if (rem_fltr) |
| 8847 | ice_rem_all_chnl_fltrs(pf); |
| 8848 | |
Kiran Patil | 4031979 | 2021-12-29 10:54:33 -0800 | [diff] [blame] | 8849 | /* remove ntuple filters since queue configuration is being changed */ |
| 8850 | if (vsi->netdev->features & NETIF_F_NTUPLE) { |
| 8851 | struct ice_hw *hw = &pf->hw; |
| 8852 | |
| 8853 | mutex_lock(&hw->fdir_fltr_lock); |
| 8854 | ice_fdir_del_all_fltrs(vsi); |
| 8855 | mutex_unlock(&hw->fdir_fltr_lock); |
| 8856 | } |
| 8857 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 8858 | /* perform cleanup for channels if they exist */ |
| 8859 | list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { |
| 8860 | struct ice_vsi *ch_vsi; |
| 8861 | |
| 8862 | list_del(&ch->list); |
| 8863 | ch_vsi = ch->ch_vsi; |
| 8864 | if (!ch_vsi) { |
| 8865 | kfree(ch); |
| 8866 | continue; |
| 8867 | } |
| 8868 | |
| 8869 | /* Reset queue contexts */ |
| 8870 | for (i = 0; i < ch->num_rxq; i++) { |
| 8871 | struct ice_tx_ring *tx_ring; |
| 8872 | struct ice_rx_ring *rx_ring; |
| 8873 | |
| 8874 | tx_ring = vsi->tx_rings[ch->base_q + i]; |
| 8875 | rx_ring = vsi->rx_rings[ch->base_q + i]; |
| 8876 | if (tx_ring) { |
| 8877 | tx_ring->ch = NULL; |
| 8878 | if (tx_ring->q_vector) |
| 8879 | tx_ring->q_vector->ch = NULL; |
| 8880 | } |
| 8881 | if (rx_ring) { |
| 8882 | rx_ring->ch = NULL; |
| 8883 | if (rx_ring->q_vector) |
| 8884 | rx_ring->q_vector->ch = NULL; |
| 8885 | } |
| 8886 | } |
| 8887 | |
Kiran Patil | 4031979 | 2021-12-29 10:54:33 -0800 | [diff] [blame] | 8888 | /* Release FD resources for the channel VSI */ |
| 8889 | ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx); |
| 8890 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 8891 | /* clear the VSI from scheduler tree */ |
| 8892 | ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx); |
| 8893 | |
Michal Swiatkowski | 227bf45 | 2022-12-21 12:38:20 +0100 | [diff] [blame] | 8894 | /* Delete VSI from FW, PF and HW VSI arrays */ |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 8895 | ice_vsi_delete(ch->ch_vsi); |
| 8896 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 8897 | /* free the channel */ |
| 8898 | kfree(ch); |
| 8899 | } |
| 8900 | |
| 8901 | /* clear the channel VSI map which is stored in main VSI */ |
| 8902 | ice_for_each_chnl_tc(i) |
| 8903 | vsi->tc_map_vsi[i] = NULL; |
| 8904 | |
| 8905 | /* reset main VSI's all TC information */ |
| 8906 | vsi->all_enatc = 0; |
| 8907 | vsi->all_numtc = 0; |
| 8908 | } |
| 8909 | |
| 8910 | /** |
| 8911 | * ice_rebuild_channels - rebuild channel |
| 8912 | * @pf: ptr to PF |
| 8913 | * |
| 8914 | * Recreate channel VSIs and replay filters |
| 8915 | */ |
| 8916 | static int ice_rebuild_channels(struct ice_pf *pf) |
| 8917 | { |
| 8918 | struct device *dev = ice_pf_to_dev(pf); |
| 8919 | struct ice_vsi *main_vsi; |
| 8920 | bool rem_adv_fltr = true; |
| 8921 | struct ice_channel *ch; |
| 8922 | struct ice_vsi *vsi; |
| 8923 | int tc_idx = 1; |
| 8924 | int i, err; |
| 8925 | |
| 8926 | main_vsi = ice_get_main_vsi(pf); |
| 8927 | if (!main_vsi) |
| 8928 | return 0; |
| 8929 | |
| 8930 | if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) || |
| 8931 | main_vsi->old_numtc == 1) |
| 8932 | return 0; /* nothing to be done */ |
| 8933 | |
| 8934 | /* reconfigure main VSI based on old value of TC and cached values |
| 8935 | * for MQPRIO opts |
| 8936 | */ |
| 8937 | err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc); |
| 8938 | if (err) { |
| 8939 | dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n", |
| 8940 | main_vsi->old_ena_tc, main_vsi->vsi_num); |
| 8941 | return err; |
| 8942 | } |
| 8943 | |
| 8944 | /* rebuild ADQ VSIs */ |
| 8945 | ice_for_each_vsi(pf, i) { |
| 8946 | enum ice_vsi_type type; |
| 8947 | |
| 8948 | vsi = pf->vsi[i]; |
| 8949 | if (!vsi || vsi->type != ICE_VSI_CHNL) |
| 8950 | continue; |
| 8951 | |
| 8952 | type = vsi->type; |
| 8953 | |
| 8954 | /* rebuild ADQ VSI */ |
Michal Swiatkowski | 6624e78 | 2022-12-21 12:38:16 +0100 | [diff] [blame] | 8955 | err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT); |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 8956 | if (err) { |
| 8957 | dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n", |
| 8958 | ice_vsi_type_str(type), vsi->idx, err); |
| 8959 | goto cleanup; |
| 8960 | } |
| 8961 | |
| 8962 | /* Re-map HW VSI number, using VSI handle that has been |
| 8963 | * previously validated in ice_replay_vsi() call above |
| 8964 | */ |
| 8965 | vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); |
| 8966 | |
| 8967 | /* replay filters for the VSI */ |
| 8968 | err = ice_replay_vsi(&pf->hw, vsi->idx); |
| 8969 | if (err) { |
| 8970 | dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n", |
| 8971 | ice_vsi_type_str(type), err, vsi->idx); |
| 8972 | rem_adv_fltr = false; |
| 8973 | goto cleanup; |
| 8974 | } |
| 8975 | dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n", |
| 8976 | ice_vsi_type_str(type), vsi->idx); |
| 8977 | |
| 8978 | /* store ADQ VSI at correct TC index in main VSI's |
| 8979 | * map of TC to VSI |
| 8980 | */ |
| 8981 | main_vsi->tc_map_vsi[tc_idx++] = vsi; |
| 8982 | } |
| 8983 | |
| 8984 | /* ADQ VSI(s) has been rebuilt successfully, so setup |
| 8985 | * channel for main VSI's Tx and Rx rings |
| 8986 | */ |
| 8987 | list_for_each_entry(ch, &main_vsi->ch_list, list) { |
| 8988 | struct ice_vsi *ch_vsi; |
| 8989 | |
| 8990 | ch_vsi = ch->ch_vsi; |
| 8991 | if (!ch_vsi) |
| 8992 | continue; |
| 8993 | |
| 8994 | /* reconfig channel resources */ |
| 8995 | ice_cfg_chnl_all_res(main_vsi, ch); |
| 8996 | |
| 8997 | /* replay BW rate limit if it is non-zero */ |
| 8998 | if (!ch->max_tx_rate && !ch->min_tx_rate) |
| 8999 | continue; |
| 9000 | |
| 9001 | err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate, |
| 9002 | ch->min_tx_rate); |
| 9003 | if (err) |
| 9004 | dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", |
| 9005 | err, ch->max_tx_rate, ch->min_tx_rate, |
| 9006 | ch_vsi->vsi_num); |
| 9007 | else |
| 9008 | dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", |
| 9009 | ch->max_tx_rate, ch->min_tx_rate, |
| 9010 | ch_vsi->vsi_num); |
| 9011 | } |
| 9012 | |
| 9013 | /* reconfig RSS for main VSI */ |
| 9014 | if (main_vsi->ch_rss_size) |
| 9015 | ice_vsi_cfg_rss_lut_key(main_vsi); |
| 9016 | |
| 9017 | return 0; |
| 9018 | |
| 9019 | cleanup: |
| 9020 | ice_remove_q_channels(main_vsi, rem_adv_fltr); |
| 9021 | return err; |
| 9022 | } |
| 9023 | |
| 9024 | /** |
| 9025 | * ice_create_q_channels - Add queue channel for the given TCs |
| 9026 | * @vsi: VSI to be configured |
| 9027 | * |
| 9028 | * Configures queue channel mapping to the given TCs |
| 9029 | */ |
| 9030 | static int ice_create_q_channels(struct ice_vsi *vsi) |
| 9031 | { |
| 9032 | struct ice_pf *pf = vsi->back; |
| 9033 | struct ice_channel *ch; |
| 9034 | int ret = 0, i; |
| 9035 | |
| 9036 | ice_for_each_chnl_tc(i) { |
| 9037 | if (!(vsi->all_enatc & BIT(i))) |
| 9038 | continue; |
| 9039 | |
| 9040 | ch = kzalloc(sizeof(*ch), GFP_KERNEL); |
| 9041 | if (!ch) { |
| 9042 | ret = -ENOMEM; |
| 9043 | goto err_free; |
| 9044 | } |
| 9045 | INIT_LIST_HEAD(&ch->list); |
| 9046 | ch->num_rxq = vsi->mqprio_qopt.qopt.count[i]; |
| 9047 | ch->num_txq = vsi->mqprio_qopt.qopt.count[i]; |
| 9048 | ch->base_q = vsi->mqprio_qopt.qopt.offset[i]; |
| 9049 | ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i]; |
| 9050 | ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i]; |
| 9051 | |
| 9052 | /* convert to Kbits/s */ |
| 9053 | if (ch->max_tx_rate) |
| 9054 | ch->max_tx_rate = div_u64(ch->max_tx_rate, |
| 9055 | ICE_BW_KBPS_DIVISOR); |
| 9056 | if (ch->min_tx_rate) |
| 9057 | ch->min_tx_rate = div_u64(ch->min_tx_rate, |
| 9058 | ICE_BW_KBPS_DIVISOR); |
| 9059 | |
| 9060 | ret = ice_create_q_channel(vsi, ch); |
| 9061 | if (ret) { |
| 9062 | dev_err(ice_pf_to_dev(pf), |
| 9063 | "failed creating channel TC:%d\n", i); |
| 9064 | kfree(ch); |
| 9065 | goto err_free; |
| 9066 | } |
| 9067 | list_add_tail(&ch->list, &vsi->ch_list); |
| 9068 | vsi->tc_map_vsi[i] = ch->ch_vsi; |
| 9069 | dev_dbg(ice_pf_to_dev(pf), |
| 9070 | "successfully created channel: VSI %pK\n", ch->ch_vsi); |
| 9071 | } |
| 9072 | return 0; |
| 9073 | |
| 9074 | err_free: |
| 9075 | ice_remove_q_channels(vsi, false); |
| 9076 | |
| 9077 | return ret; |
| 9078 | } |
| 9079 | |
| 9080 | /** |
| 9081 | * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes |
| 9082 | * @netdev: net device to configure |
| 9083 | * @type_data: TC offload data |
| 9084 | */ |
| 9085 | static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data) |
| 9086 | { |
| 9087 | struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; |
| 9088 | struct ice_netdev_priv *np = netdev_priv(netdev); |
| 9089 | struct ice_vsi *vsi = np->vsi; |
| 9090 | struct ice_pf *pf = vsi->back; |
| 9091 | u16 mode, ena_tc_qdisc = 0; |
| 9092 | int cur_txq, cur_rxq; |
| 9093 | u8 hw = 0, num_tcf; |
| 9094 | struct device *dev; |
| 9095 | int ret, i; |
| 9096 | |
| 9097 | dev = ice_pf_to_dev(pf); |
| 9098 | num_tcf = mqprio_qopt->qopt.num_tc; |
| 9099 | hw = mqprio_qopt->qopt.hw; |
| 9100 | mode = mqprio_qopt->mode; |
| 9101 | if (!hw) { |
| 9102 | clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); |
| 9103 | vsi->ch_rss_size = 0; |
| 9104 | memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); |
| 9105 | goto config_tcf; |
| 9106 | } |
| 9107 | |
| 9108 | /* Generate queue region map for number of TCF requested */ |
| 9109 | for (i = 0; i < num_tcf; i++) |
| 9110 | ena_tc_qdisc |= BIT(i); |
| 9111 | |
| 9112 | switch (mode) { |
| 9113 | case TC_MQPRIO_MODE_CHANNEL: |
| 9114 | |
Michal Wilczynski | 80fe30a | 2022-11-15 11:48:23 +0100 | [diff] [blame] | 9115 | if (pf->hw.port_info->is_custom_tx_enabled) { |
| 9116 | dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n"); |
| 9117 | return -EBUSY; |
| 9118 | } |
| 9119 | ice_tear_down_devlink_rate_tree(pf); |
| 9120 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 9121 | ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt); |
| 9122 | if (ret) { |
| 9123 | netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n", |
| 9124 | ret); |
| 9125 | return ret; |
| 9126 | } |
| 9127 | memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); |
| 9128 | set_bit(ICE_FLAG_TC_MQPRIO, pf->flags); |
Kiran Patil | 9fea749 | 2021-10-15 16:35:17 -0700 | [diff] [blame] | 9129 | /* don't assume state of hw_tc_offload during driver load |
| 9130 | * and set the flag for TC flower filter if hw_tc_offload |
| 9131 | * already ON |
| 9132 | */ |
| 9133 | if (vsi->netdev->features & NETIF_F_HW_TC) |
| 9134 | set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 9135 | break; |
| 9136 | default: |
| 9137 | return -EINVAL; |
| 9138 | } |
| 9139 | |
| 9140 | config_tcf: |
| 9141 | |
| 9142 | /* Requesting same TCF configuration as already enabled */ |
| 9143 | if (ena_tc_qdisc == vsi->tc_cfg.ena_tc && |
| 9144 | mode != TC_MQPRIO_MODE_CHANNEL) |
| 9145 | return 0; |
| 9146 | |
| 9147 | /* Pause VSI queues */ |
| 9148 | ice_dis_vsi(vsi, true); |
| 9149 | |
| 9150 | if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) |
| 9151 | ice_remove_q_channels(vsi, true); |
| 9152 | |
| 9153 | if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { |
| 9154 | vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf), |
| 9155 | num_online_cpus()); |
| 9156 | vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf), |
| 9157 | num_online_cpus()); |
| 9158 | } else { |
| 9159 | /* logic to rebuild VSI, same like ethtool -L */ |
| 9160 | u16 offset = 0, qcount_tx = 0, qcount_rx = 0; |
| 9161 | |
| 9162 | for (i = 0; i < num_tcf; i++) { |
| 9163 | if (!(ena_tc_qdisc & BIT(i))) |
| 9164 | continue; |
| 9165 | |
| 9166 | offset = vsi->mqprio_qopt.qopt.offset[i]; |
| 9167 | qcount_rx = vsi->mqprio_qopt.qopt.count[i]; |
| 9168 | qcount_tx = vsi->mqprio_qopt.qopt.count[i]; |
| 9169 | } |
| 9170 | vsi->req_txq = offset + qcount_tx; |
| 9171 | vsi->req_rxq = offset + qcount_rx; |
| 9172 | |
| 9173 | /* store away original rss_size info, so that it gets reused |
| 9174 | * form ice_vsi_rebuild during tc-qdisc delete stage - to |
| 9175 | * determine, what should be the rss_sizefor main VSI |
| 9176 | */ |
| 9177 | vsi->orig_rss_size = vsi->rss_size; |
| 9178 | } |
| 9179 | |
| 9180 | /* save current values of Tx and Rx queues before calling VSI rebuild |
| 9181 | * for fallback option |
| 9182 | */ |
| 9183 | cur_txq = vsi->num_txq; |
| 9184 | cur_rxq = vsi->num_rxq; |
| 9185 | |
| 9186 | /* proceed with rebuild main VSI using correct number of queues */ |
Michal Swiatkowski | 6624e78 | 2022-12-21 12:38:16 +0100 | [diff] [blame] | 9187 | ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 9188 | if (ret) { |
| 9189 | /* fallback to current number of queues */ |
| 9190 | dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n"); |
| 9191 | vsi->req_txq = cur_txq; |
| 9192 | vsi->req_rxq = cur_rxq; |
| 9193 | clear_bit(ICE_RESET_FAILED, pf->state); |
Michal Swiatkowski | 6624e78 | 2022-12-21 12:38:16 +0100 | [diff] [blame] | 9194 | if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) { |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 9195 | dev_err(dev, "Rebuild of main VSI failed again\n"); |
| 9196 | return ret; |
| 9197 | } |
| 9198 | } |
| 9199 | |
| 9200 | vsi->all_numtc = num_tcf; |
| 9201 | vsi->all_enatc = ena_tc_qdisc; |
| 9202 | ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc); |
| 9203 | if (ret) { |
| 9204 | netdev_err(netdev, "failed configuring TC for VSI id=%d\n", |
| 9205 | vsi->vsi_num); |
| 9206 | goto exit; |
| 9207 | } |
| 9208 | |
| 9209 | if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { |
| 9210 | u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; |
| 9211 | u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0]; |
| 9212 | |
| 9213 | /* set TC0 rate limit if specified */ |
| 9214 | if (max_tx_rate || min_tx_rate) { |
| 9215 | /* convert to Kbits/s */ |
| 9216 | if (max_tx_rate) |
| 9217 | max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR); |
| 9218 | if (min_tx_rate) |
| 9219 | min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR); |
| 9220 | |
| 9221 | ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate); |
| 9222 | if (!ret) { |
| 9223 | dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n", |
| 9224 | max_tx_rate, min_tx_rate, vsi->vsi_num); |
| 9225 | } else { |
| 9226 | dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n", |
| 9227 | max_tx_rate, min_tx_rate, vsi->vsi_num); |
| 9228 | goto exit; |
| 9229 | } |
| 9230 | } |
| 9231 | ret = ice_create_q_channels(vsi); |
| 9232 | if (ret) { |
| 9233 | netdev_err(netdev, "failed configuring queue channels\n"); |
| 9234 | goto exit; |
| 9235 | } else { |
| 9236 | netdev_dbg(netdev, "successfully configured channels\n"); |
| 9237 | } |
| 9238 | } |
| 9239 | |
| 9240 | if (vsi->ch_rss_size) |
| 9241 | ice_vsi_cfg_rss_lut_key(vsi); |
| 9242 | |
| 9243 | exit: |
| 9244 | /* if error, reset the all_numtc and all_enatc */ |
| 9245 | if (ret) { |
| 9246 | vsi->all_numtc = 0; |
| 9247 | vsi->all_enatc = 0; |
| 9248 | } |
| 9249 | /* resume VSI */ |
| 9250 | ice_ena_vsi(vsi, true); |
| 9251 | |
| 9252 | return ret; |
| 9253 | } |
| 9254 | |
Kiran Patil | 0d08a44 | 2021-08-06 10:49:05 +0200 | [diff] [blame] | 9255 | static LIST_HEAD(ice_block_cb_list); |
| 9256 | |
| 9257 | static int |
| 9258 | ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, |
| 9259 | void *type_data) |
| 9260 | { |
| 9261 | struct ice_netdev_priv *np = netdev_priv(netdev); |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 9262 | struct ice_pf *pf = np->vsi->back; |
Rafal Rogalski | 4b31fd4 | 2023-07-28 10:12:43 -0700 | [diff] [blame] | 9263 | bool locked = false; |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 9264 | int err; |
Kiran Patil | 0d08a44 | 2021-08-06 10:49:05 +0200 | [diff] [blame] | 9265 | |
| 9266 | switch (type) { |
| 9267 | case TC_SETUP_BLOCK: |
| 9268 | return flow_block_cb_setup_simple(type_data, |
| 9269 | &ice_block_cb_list, |
| 9270 | ice_setup_tc_block_cb, |
| 9271 | np, np, true); |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 9272 | case TC_SETUP_QDISC_MQPRIO: |
Marcin Szycik | 43d00e1 | 2023-08-16 12:34:05 -0700 | [diff] [blame] | 9273 | if (ice_is_eswitch_mode_switchdev(pf)) { |
| 9274 | netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n"); |
| 9275 | return -EOPNOTSUPP; |
| 9276 | } |
| 9277 | |
Rafal Rogalski | 4b31fd4 | 2023-07-28 10:12:43 -0700 | [diff] [blame] | 9278 | if (pf->adev) { |
| 9279 | mutex_lock(&pf->adev_mutex); |
| 9280 | device_lock(&pf->adev->dev); |
| 9281 | locked = true; |
| 9282 | if (pf->adev->dev.driver) { |
| 9283 | netdev_err(netdev, "Cannot change qdisc when RDMA is active\n"); |
| 9284 | err = -EBUSY; |
| 9285 | goto adev_unlock; |
| 9286 | } |
| 9287 | } |
| 9288 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 9289 | /* setup traffic classifier for receive side */ |
| 9290 | mutex_lock(&pf->tc_mutex); |
| 9291 | err = ice_setup_tc_mqprio_qdisc(netdev, type_data); |
| 9292 | mutex_unlock(&pf->tc_mutex); |
Rafal Rogalski | 4b31fd4 | 2023-07-28 10:12:43 -0700 | [diff] [blame] | 9293 | |
| 9294 | adev_unlock: |
| 9295 | if (locked) { |
| 9296 | device_unlock(&pf->adev->dev); |
| 9297 | mutex_unlock(&pf->adev_mutex); |
| 9298 | } |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 9299 | return err; |
Kiran Patil | 0d08a44 | 2021-08-06 10:49:05 +0200 | [diff] [blame] | 9300 | default: |
| 9301 | return -EOPNOTSUPP; |
| 9302 | } |
| 9303 | return -EOPNOTSUPP; |
| 9304 | } |
| 9305 | |
Michal Swiatkowski | 195bb48 | 2021-10-12 11:31:03 -0700 | [diff] [blame] | 9306 | static struct ice_indr_block_priv * |
| 9307 | ice_indr_block_priv_lookup(struct ice_netdev_priv *np, |
| 9308 | struct net_device *netdev) |
| 9309 | { |
| 9310 | struct ice_indr_block_priv *cb_priv; |
| 9311 | |
| 9312 | list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) { |
| 9313 | if (!cb_priv->netdev) |
| 9314 | return NULL; |
| 9315 | if (cb_priv->netdev == netdev) |
| 9316 | return cb_priv; |
| 9317 | } |
| 9318 | return NULL; |
| 9319 | } |
| 9320 | |
| 9321 | static int |
| 9322 | ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data, |
| 9323 | void *indr_priv) |
| 9324 | { |
| 9325 | struct ice_indr_block_priv *priv = indr_priv; |
| 9326 | struct ice_netdev_priv *np = priv->np; |
| 9327 | |
| 9328 | switch (type) { |
| 9329 | case TC_SETUP_CLSFLOWER: |
| 9330 | return ice_setup_tc_cls_flower(np, priv->netdev, |
| 9331 | (struct flow_cls_offload *) |
| 9332 | type_data); |
| 9333 | default: |
| 9334 | return -EOPNOTSUPP; |
| 9335 | } |
| 9336 | } |
| 9337 | |
| 9338 | static int |
| 9339 | ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch, |
| 9340 | struct ice_netdev_priv *np, |
| 9341 | struct flow_block_offload *f, void *data, |
| 9342 | void (*cleanup)(struct flow_block_cb *block_cb)) |
| 9343 | { |
| 9344 | struct ice_indr_block_priv *indr_priv; |
| 9345 | struct flow_block_cb *block_cb; |
| 9346 | |
Michal Swiatkowski | 9e30098 | 2021-10-12 11:31:04 -0700 | [diff] [blame] | 9347 | if (!ice_is_tunnel_supported(netdev) && |
| 9348 | !(is_vlan_dev(netdev) && |
| 9349 | vlan_dev_real_dev(netdev) == np->vsi->netdev)) |
| 9350 | return -EOPNOTSUPP; |
| 9351 | |
Michal Swiatkowski | 195bb48 | 2021-10-12 11:31:03 -0700 | [diff] [blame] | 9352 | if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
| 9353 | return -EOPNOTSUPP; |
| 9354 | |
| 9355 | switch (f->command) { |
| 9356 | case FLOW_BLOCK_BIND: |
| 9357 | indr_priv = ice_indr_block_priv_lookup(np, netdev); |
| 9358 | if (indr_priv) |
| 9359 | return -EEXIST; |
| 9360 | |
| 9361 | indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL); |
| 9362 | if (!indr_priv) |
| 9363 | return -ENOMEM; |
| 9364 | |
| 9365 | indr_priv->netdev = netdev; |
| 9366 | indr_priv->np = np; |
| 9367 | list_add(&indr_priv->list, &np->tc_indr_block_priv_list); |
| 9368 | |
| 9369 | block_cb = |
| 9370 | flow_indr_block_cb_alloc(ice_indr_setup_block_cb, |
| 9371 | indr_priv, indr_priv, |
| 9372 | ice_rep_indr_tc_block_unbind, |
| 9373 | f, netdev, sch, data, np, |
| 9374 | cleanup); |
| 9375 | |
| 9376 | if (IS_ERR(block_cb)) { |
| 9377 | list_del(&indr_priv->list); |
| 9378 | kfree(indr_priv); |
| 9379 | return PTR_ERR(block_cb); |
| 9380 | } |
| 9381 | flow_block_cb_add(block_cb, f); |
| 9382 | list_add_tail(&block_cb->driver_list, &ice_block_cb_list); |
| 9383 | break; |
| 9384 | case FLOW_BLOCK_UNBIND: |
| 9385 | indr_priv = ice_indr_block_priv_lookup(np, netdev); |
| 9386 | if (!indr_priv) |
| 9387 | return -ENOENT; |
| 9388 | |
| 9389 | block_cb = flow_block_cb_lookup(f->block, |
| 9390 | ice_indr_setup_block_cb, |
| 9391 | indr_priv); |
| 9392 | if (!block_cb) |
| 9393 | return -ENOENT; |
| 9394 | |
| 9395 | flow_indr_block_cb_remove(block_cb, f); |
| 9396 | |
| 9397 | list_del(&block_cb->driver_list); |
| 9398 | break; |
| 9399 | default: |
| 9400 | return -EOPNOTSUPP; |
| 9401 | } |
| 9402 | return 0; |
| 9403 | } |
| 9404 | |
| 9405 | static int |
| 9406 | ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, |
| 9407 | void *cb_priv, enum tc_setup_type type, void *type_data, |
| 9408 | void *data, |
| 9409 | void (*cleanup)(struct flow_block_cb *block_cb)) |
| 9410 | { |
| 9411 | switch (type) { |
| 9412 | case TC_SETUP_BLOCK: |
| 9413 | return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data, |
| 9414 | data, cleanup); |
| 9415 | |
| 9416 | default: |
| 9417 | return -EOPNOTSUPP; |
| 9418 | } |
| 9419 | } |
| 9420 | |
Kiran Patil | 0d08a44 | 2021-08-06 10:49:05 +0200 | [diff] [blame] | 9421 | /** |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 9422 | * ice_open - Called when a network interface becomes active |
| 9423 | * @netdev: network interface device structure |
| 9424 | * |
| 9425 | * The open entry point is called when a network interface is made |
Anirudh Venkataramanan | df17b7e | 2018-10-26 11:44:46 -0700 | [diff] [blame] | 9426 | * active by the system (IFF_UP). At this point all resources needed |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 9427 | * for transmit and receive operations are allocated, the interrupt |
| 9428 | * handler is registered with the OS, the netdev watchdog is enabled, |
| 9429 | * and the stack is notified that the interface is ready. |
| 9430 | * |
| 9431 | * Returns 0 on success, negative value on failure |
| 9432 | */ |
Anirudh Venkataramanan | 0e674ae | 2019-04-16 10:30:43 -0700 | [diff] [blame] | 9433 | int ice_open(struct net_device *netdev) |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 9434 | { |
| 9435 | struct ice_netdev_priv *np = netdev_priv(netdev); |
Krzysztof Goreczny | e95fc85 | 2021-02-26 13:19:26 -0800 | [diff] [blame] | 9436 | struct ice_pf *pf = np->vsi->back; |
| 9437 | |
| 9438 | if (ice_is_reset_in_progress(pf->state)) { |
| 9439 | netdev_err(netdev, "can't open net device while reset is in progress"); |
| 9440 | return -EBUSY; |
| 9441 | } |
| 9442 | |
| 9443 | return ice_open_internal(netdev); |
| 9444 | } |
| 9445 | |
| 9446 | /** |
| 9447 | * ice_open_internal - Called when a network interface becomes active |
| 9448 | * @netdev: network interface device structure |
| 9449 | * |
| 9450 | * Internal ice_open implementation. Should not be used directly except for ice_open and reset |
| 9451 | * handling routine |
| 9452 | * |
| 9453 | * Returns 0 on success, negative value on failure |
| 9454 | */ |
| 9455 | int ice_open_internal(struct net_device *netdev) |
| 9456 | { |
| 9457 | struct ice_netdev_priv *np = netdev_priv(netdev); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 9458 | struct ice_vsi *vsi = np->vsi; |
Anirudh Venkataramanan | de75135 | 2020-05-07 17:41:03 -0700 | [diff] [blame] | 9459 | struct ice_pf *pf = vsi->back; |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 9460 | struct ice_port_info *pi; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 9461 | int err; |
| 9462 | |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 9463 | if (test_bit(ICE_NEEDS_RESTART, pf->state)) { |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 9464 | netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); |
| 9465 | return -EIO; |
| 9466 | } |
| 9467 | |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 9468 | netif_carrier_off(netdev); |
| 9469 | |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 9470 | pi = vsi->port_info; |
Tony Nguyen | 2ccc1c1 | 2021-10-07 16:00:23 -0700 | [diff] [blame] | 9471 | err = ice_update_link_info(pi); |
| 9472 | if (err) { |
| 9473 | netdev_err(netdev, "Failed to get link info, error %d\n", err); |
Tony Nguyen | c148469 | 2021-10-07 16:01:58 -0700 | [diff] [blame] | 9474 | return err; |
Brett Creeley | b6f934f | 2018-12-19 10:03:25 -0800 | [diff] [blame] | 9475 | } |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 9476 | |
Brett Creeley | 99d4075 | 2021-10-13 09:02:19 -0700 | [diff] [blame] | 9477 | ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); |
Anirudh Venkataramanan | c77849f5 | 2021-05-06 08:40:01 -0700 | [diff] [blame] | 9478 | |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 9479 | /* Set PHY if there is media, otherwise, turn off PHY */ |
| 9480 | if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 9481 | clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 9482 | if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 9483 | err = ice_init_phy_user_cfg(pi); |
| 9484 | if (err) { |
| 9485 | netdev_err(netdev, "Failed to initialize PHY settings, error %d\n", |
| 9486 | err); |
| 9487 | return err; |
| 9488 | } |
| 9489 | } |
| 9490 | |
| 9491 | err = ice_configure_phy(vsi); |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 9492 | if (err) { |
Anirudh Venkataramanan | 19cce2c | 2020-02-06 01:20:10 -0800 | [diff] [blame] | 9493 | netdev_err(netdev, "Failed to set physical link up, error %d\n", |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 9494 | err); |
| 9495 | return err; |
| 9496 | } |
| 9497 | } else { |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 9498 | set_bit(ICE_FLAG_NO_MEDIA, pf->flags); |
Anirudh Venkataramanan | d348d51 | 2021-03-25 15:35:07 -0700 | [diff] [blame] | 9499 | ice_set_link(vsi, false); |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 9500 | } |
| 9501 | |
Brett Creeley | b6f934f | 2018-12-19 10:03:25 -0800 | [diff] [blame] | 9502 | err = ice_vsi_open(vsi); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 9503 | if (err) |
| 9504 | netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", |
| 9505 | vsi->vsi_num, vsi->vsw->sw_id); |
Tony Nguyen | a4e82a8 | 2020-05-06 09:32:30 -0700 | [diff] [blame] | 9506 | |
| 9507 | /* Update existing tunnels information */ |
| 9508 | udp_tunnel_get_rx_info(netdev); |
| 9509 | |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 9510 | return err; |
| 9511 | } |
| 9512 | |
| 9513 | /** |
| 9514 | * ice_stop - Disables a network interface |
| 9515 | * @netdev: network interface device structure |
| 9516 | * |
| 9517 | * The stop entry point is called when an interface is de-activated by the OS, |
Anirudh Venkataramanan | df17b7e | 2018-10-26 11:44:46 -0700 | [diff] [blame] | 9518 | * and the netdevice enters the DOWN state. The hardware is still under the |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 9519 | * driver's control, but the netdev interface is disabled. |
| 9520 | * |
| 9521 | * Returns success only - not allowed to fail |
| 9522 | */ |
Anirudh Venkataramanan | 0e674ae | 2019-04-16 10:30:43 -0700 | [diff] [blame] | 9523 | int ice_stop(struct net_device *netdev) |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 9524 | { |
| 9525 | struct ice_netdev_priv *np = netdev_priv(netdev); |
| 9526 | struct ice_vsi *vsi = np->vsi; |
Krzysztof Goreczny | e95fc85 | 2021-02-26 13:19:26 -0800 | [diff] [blame] | 9527 | struct ice_pf *pf = vsi->back; |
| 9528 | |
| 9529 | if (ice_is_reset_in_progress(pf->state)) { |
| 9530 | netdev_err(netdev, "can't stop net device while reset is in progress"); |
| 9531 | return -EBUSY; |
| 9532 | } |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 9533 | |
Mateusz Palczewski | 8ac7132 | 2022-08-26 10:31:23 +0200 | [diff] [blame] | 9534 | if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { |
| 9535 | int link_err = ice_force_phys_link_state(vsi, false); |
| 9536 | |
| 9537 | if (link_err) { |
Katarzyna Wieczerzycka | 6a8d8bb5 | 2023-12-15 12:01:56 +0100 | [diff] [blame] | 9538 | if (link_err == -ENOMEDIUM) |
| 9539 | netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n", |
| 9540 | vsi->vsi_num); |
| 9541 | else |
| 9542 | netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", |
| 9543 | vsi->vsi_num, link_err); |
Ngai-Mint Kwan | 6d05ff5 | 2023-12-15 12:01:57 +0100 | [diff] [blame] | 9544 | |
| 9545 | ice_vsi_close(vsi); |
Mateusz Palczewski | 8ac7132 | 2022-08-26 10:31:23 +0200 | [diff] [blame] | 9546 | return -EIO; |
| 9547 | } |
| 9548 | } |
| 9549 | |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 9550 | ice_vsi_close(vsi); |
| 9551 | |
| 9552 | return 0; |
| 9553 | } |
| 9554 | |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 9555 | /** |
| 9556 | * ice_features_check - Validate encapsulated packet conforms to limits |
| 9557 | * @skb: skb buffer |
| 9558 | * @netdev: This port's netdev |
| 9559 | * @features: Offload features that the stack believes apply |
| 9560 | */ |
| 9561 | static netdev_features_t |
| 9562 | ice_features_check(struct sk_buff *skb, |
| 9563 | struct net_device __always_unused *netdev, |
| 9564 | netdev_features_t features) |
| 9565 | { |
Jesse Brandeburg | 46b699c | 2022-01-14 15:38:39 -0800 | [diff] [blame] | 9566 | bool gso = skb_is_gso(skb); |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 9567 | size_t len; |
| 9568 | |
| 9569 | /* No point in doing any of this if neither checksum nor GSO are |
Anirudh Venkataramanan | df17b7e | 2018-10-26 11:44:46 -0700 | [diff] [blame] | 9570 | * being requested for this frame. We can rule out both by just |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 9571 | * checking for CHECKSUM_PARTIAL |
| 9572 | */ |
| 9573 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
| 9574 | return features; |
| 9575 | |
| 9576 | /* We cannot support GSO if the MSS is going to be less than |
Anirudh Venkataramanan | df17b7e | 2018-10-26 11:44:46 -0700 | [diff] [blame] | 9577 | * 64 bytes. If it is then we need to drop support for GSO. |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 9578 | */ |
Jesse Brandeburg | 46b699c | 2022-01-14 15:38:39 -0800 | [diff] [blame] | 9579 | if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS)) |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 9580 | features &= ~NETIF_F_GSO_MASK; |
| 9581 | |
Jesse Brandeburg | 46b699c | 2022-01-14 15:38:39 -0800 | [diff] [blame] | 9582 | len = skb_network_offset(skb); |
Tony Nguyen | a4e82a8 | 2020-05-06 09:32:30 -0700 | [diff] [blame] | 9583 | if (len > ICE_TXD_MACLEN_MAX || len & 0x1) |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 9584 | goto out_rm_features; |
| 9585 | |
Jesse Brandeburg | 46b699c | 2022-01-14 15:38:39 -0800 | [diff] [blame] | 9586 | len = skb_network_header_len(skb); |
Tony Nguyen | a4e82a8 | 2020-05-06 09:32:30 -0700 | [diff] [blame] | 9587 | if (len > ICE_TXD_IPLEN_MAX || len & 0x1) |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 9588 | goto out_rm_features; |
| 9589 | |
| 9590 | if (skb->encapsulation) { |
Jesse Brandeburg | 46b699c | 2022-01-14 15:38:39 -0800 | [diff] [blame] | 9591 | /* this must work for VXLAN frames AND IPIP/SIT frames, and in |
| 9592 | * the case of IPIP frames, the transport header pointer is |
| 9593 | * after the inner header! So check to make sure that this |
| 9594 | * is a GRE or UDP_TUNNEL frame before doing that math. |
| 9595 | */ |
| 9596 | if (gso && (skb_shinfo(skb)->gso_type & |
| 9597 | (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) { |
| 9598 | len = skb_inner_network_header(skb) - |
| 9599 | skb_transport_header(skb); |
| 9600 | if (len > ICE_TXD_L4LEN_MAX || len & 0x1) |
| 9601 | goto out_rm_features; |
| 9602 | } |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 9603 | |
Jesse Brandeburg | 46b699c | 2022-01-14 15:38:39 -0800 | [diff] [blame] | 9604 | len = skb_inner_network_header_len(skb); |
Tony Nguyen | a4e82a8 | 2020-05-06 09:32:30 -0700 | [diff] [blame] | 9605 | if (len > ICE_TXD_IPLEN_MAX || len & 0x1) |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 9606 | goto out_rm_features; |
| 9607 | } |
| 9608 | |
| 9609 | return features; |
| 9610 | out_rm_features: |
| 9611 | return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); |
| 9612 | } |
| 9613 | |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 9614 | static const struct net_device_ops ice_netdev_safe_mode_ops = { |
| 9615 | .ndo_open = ice_open, |
| 9616 | .ndo_stop = ice_stop, |
| 9617 | .ndo_start_xmit = ice_start_xmit, |
| 9618 | .ndo_set_mac_address = ice_set_mac_address, |
| 9619 | .ndo_validate_addr = eth_validate_addr, |
| 9620 | .ndo_change_mtu = ice_change_mtu, |
| 9621 | .ndo_get_stats64 = ice_get_stats64, |
| 9622 | .ndo_tx_timeout = ice_tx_timeout, |
Maciej Fijalkowski | ebc5399 | 2021-05-20 08:34:59 +0200 | [diff] [blame] | 9623 | .ndo_bpf = ice_xdp_safe_mode, |
Tony Nguyen | 462acf6a | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 9624 | }; |
| 9625 | |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 9626 | static const struct net_device_ops ice_netdev_ops = { |
| 9627 | .ndo_open = ice_open, |
| 9628 | .ndo_stop = ice_stop, |
Anirudh Venkataramanan | 2b245cb | 2018-03-20 07:58:14 -0700 | [diff] [blame] | 9629 | .ndo_start_xmit = ice_start_xmit, |
Dave Ertman | 2a87bd7 | 2021-08-06 13:53:56 -0700 | [diff] [blame] | 9630 | .ndo_select_queue = ice_select_queue, |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 9631 | .ndo_features_check = ice_features_check, |
Brett Creeley | 1babaf7 | 2021-12-02 08:38:50 -0800 | [diff] [blame] | 9632 | .ndo_fix_features = ice_fix_features, |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 9633 | .ndo_set_rx_mode = ice_set_rx_mode, |
| 9634 | .ndo_set_mac_address = ice_set_mac_address, |
| 9635 | .ndo_validate_addr = eth_validate_addr, |
| 9636 | .ndo_change_mtu = ice_change_mtu, |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 9637 | .ndo_get_stats64 = ice_get_stats64, |
Usha Ketineni | 1ddef45 | 2019-11-06 02:05:28 -0800 | [diff] [blame] | 9638 | .ndo_set_tx_maxrate = ice_set_tx_maxrate, |
Arnd Bergmann | a760537 | 2021-07-27 15:45:13 +0200 | [diff] [blame] | 9639 | .ndo_eth_ioctl = ice_eth_ioctl, |
Anirudh Venkataramanan | 7c71086 | 2018-09-19 17:42:58 -0700 | [diff] [blame] | 9640 | .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, |
| 9641 | .ndo_set_vf_mac = ice_set_vf_mac, |
| 9642 | .ndo_get_vf_config = ice_get_vf_cfg, |
| 9643 | .ndo_set_vf_trust = ice_set_vf_trust, |
| 9644 | .ndo_set_vf_vlan = ice_set_vf_port_vlan, |
| 9645 | .ndo_set_vf_link_state = ice_set_vf_link_state, |
Jesse Brandeburg | 730fdea | 2019-11-08 06:23:28 -0800 | [diff] [blame] | 9646 | .ndo_get_vf_stats = ice_get_vf_stats, |
Brett Creeley | 4ecc863 | 2021-09-13 11:22:19 -0700 | [diff] [blame] | 9647 | .ndo_set_vf_rate = ice_set_vf_bw, |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 9648 | .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, |
| 9649 | .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, |
Kiran Patil | 0d08a44 | 2021-08-06 10:49:05 +0200 | [diff] [blame] | 9650 | .ndo_setup_tc = ice_setup_tc, |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 9651 | .ndo_set_features = ice_set_features, |
Md Fahad Iqbal Polash | b1edc14 | 2018-08-09 06:29:54 -0700 | [diff] [blame] | 9652 | .ndo_bridge_getlink = ice_bridge_getlink, |
| 9653 | .ndo_bridge_setlink = ice_bridge_setlink, |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 9654 | .ndo_fdb_add = ice_fdb_add, |
| 9655 | .ndo_fdb_del = ice_fdb_del, |
Brett Creeley | 28bf267 | 2020-05-11 18:01:46 -0700 | [diff] [blame] | 9656 | #ifdef CONFIG_RFS_ACCEL |
| 9657 | .ndo_rx_flow_steer = ice_rx_flow_steer, |
| 9658 | #endif |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 9659 | .ndo_tx_timeout = ice_tx_timeout, |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 9660 | .ndo_bpf = ice_xdp, |
| 9661 | .ndo_xdp_xmit = ice_xdp_xmit, |
Krzysztof Kazimierczak | 2d4238f | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 9662 | .ndo_xsk_wakeup = ice_xsk_wakeup, |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 9663 | }; |