Bailey Forrest | dbdaa67 | 2021-06-24 11:06:18 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: (GPL-2.0 OR MIT) |
| 2 | /* Google virtual Ethernet (gve) driver |
| 3 | * |
| 4 | * Copyright (C) 2015-2021 Google, Inc. |
| 5 | */ |
| 6 | |
| 7 | #include "gve.h" |
| 8 | #include "gve_adminq.h" |
| 9 | #include "gve_utils.h" |
| 10 | |
Shailend Chand | f13697c | 2024-01-22 18:26:29 +0000 | [diff] [blame] | 11 | bool gve_tx_was_added_to_block(struct gve_priv *priv, int queue_idx) |
| 12 | { |
| 13 | struct gve_notify_block *block = |
| 14 | &priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)]; |
| 15 | |
| 16 | return block->tx != NULL; |
| 17 | } |
| 18 | |
Bailey Forrest | dbdaa67 | 2021-06-24 11:06:18 -0700 | [diff] [blame] | 19 | void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx) |
| 20 | { |
| 21 | struct gve_notify_block *block = |
| 22 | &priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)]; |
| 23 | |
| 24 | block->tx = NULL; |
| 25 | } |
| 26 | |
| 27 | void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx) |
| 28 | { |
Catherine Sullivan | 4edf824 | 2021-10-11 08:36:48 -0700 | [diff] [blame] | 29 | unsigned int active_cpus = min_t(int, priv->num_ntfy_blks / 2, |
| 30 | num_online_cpus()); |
Bailey Forrest | dbdaa67 | 2021-06-24 11:06:18 -0700 | [diff] [blame] | 31 | int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx); |
| 32 | struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; |
| 33 | struct gve_tx_ring *tx = &priv->tx[queue_idx]; |
| 34 | |
| 35 | block->tx = tx; |
| 36 | tx->ntfy_id = ntfy_idx; |
Catherine Sullivan | 4edf824 | 2021-10-11 08:36:48 -0700 | [diff] [blame] | 37 | netif_set_xps_queue(priv->dev, get_cpu_mask(ntfy_idx % active_cpus), |
| 38 | queue_idx); |
Bailey Forrest | dbdaa67 | 2021-06-24 11:06:18 -0700 | [diff] [blame] | 39 | } |
| 40 | |
Shailend Chand | f13697c | 2024-01-22 18:26:29 +0000 | [diff] [blame] | 41 | bool gve_rx_was_added_to_block(struct gve_priv *priv, int queue_idx) |
| 42 | { |
| 43 | struct gve_notify_block *block = |
| 44 | &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)]; |
| 45 | |
| 46 | return block->rx != NULL; |
| 47 | } |
| 48 | |
Bailey Forrest | dbdaa67 | 2021-06-24 11:06:18 -0700 | [diff] [blame] | 49 | void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx) |
| 50 | { |
| 51 | struct gve_notify_block *block = |
| 52 | &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)]; |
| 53 | |
| 54 | block->rx = NULL; |
| 55 | } |
| 56 | |
| 57 | void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx) |
| 58 | { |
| 59 | u32 ntfy_idx = gve_rx_idx_to_ntfy(priv, queue_idx); |
| 60 | struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; |
| 61 | struct gve_rx_ring *rx = &priv->rx[queue_idx]; |
| 62 | |
| 63 | block->rx = rx; |
| 64 | rx->ntfy_id = ntfy_idx; |
| 65 | } |
| 66 | |
Jeroen de Borst | 5e37d82 | 2024-02-29 13:22:35 -0800 | [diff] [blame] | 67 | struct sk_buff *gve_rx_copy_data(struct net_device *dev, struct napi_struct *napi, |
| 68 | u8 *data, u16 len) |
Bailey Forrest | dbdaa67 | 2021-06-24 11:06:18 -0700 | [diff] [blame] | 69 | { |
David Awogbemila | 37149e9 | 2021-10-24 11:42:37 -0700 | [diff] [blame] | 70 | struct sk_buff *skb; |
Bailey Forrest | dbdaa67 | 2021-06-24 11:06:18 -0700 | [diff] [blame] | 71 | |
Shailend Chand | 82fd151 | 2022-10-29 09:53:22 -0700 | [diff] [blame] | 72 | skb = napi_alloc_skb(napi, len); |
| 73 | if (unlikely(!skb)) |
| 74 | return NULL; |
Bailey Forrest | dbdaa67 | 2021-06-24 11:06:18 -0700 | [diff] [blame] | 75 | |
| 76 | __skb_put(skb, len); |
Jeroen de Borst | 5e37d82 | 2024-02-29 13:22:35 -0800 | [diff] [blame] | 77 | skb_copy_to_linear_data_offset(skb, 0, data, len); |
Shailend Chand | 82fd151 | 2022-10-29 09:53:22 -0700 | [diff] [blame] | 78 | skb->protocol = eth_type_trans(skb, dev); |
Bailey Forrest | dbdaa67 | 2021-06-24 11:06:18 -0700 | [diff] [blame] | 79 | |
| 80 | return skb; |
| 81 | } |
| 82 | |
Jeroen de Borst | 5e37d82 | 2024-02-29 13:22:35 -0800 | [diff] [blame] | 83 | struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, |
| 84 | struct gve_rx_slot_page_info *page_info, u16 len) |
| 85 | { |
| 86 | void *va = page_info->page_address + page_info->page_offset + |
| 87 | page_info->pad; |
| 88 | |
| 89 | return gve_rx_copy_data(dev, napi, va, len); |
| 90 | } |
| 91 | |
Bailey Forrest | 9b8dd5e | 2021-06-24 11:06:32 -0700 | [diff] [blame] | 92 | void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info) |
| 93 | { |
| 94 | page_info->pagecnt_bias--; |
| 95 | if (page_info->pagecnt_bias == 0) { |
| 96 | int pagecount = page_count(page_info->page); |
| 97 | |
| 98 | /* If we have run out of bias - set it back up to INT_MAX |
| 99 | * minus the existing refs. |
| 100 | */ |
| 101 | page_info->pagecnt_bias = INT_MAX - pagecount; |
| 102 | |
| 103 | /* Set pagecount back up to max. */ |
| 104 | page_ref_add(page_info->page, INT_MAX - pagecount); |
| 105 | } |
| 106 | } |
Shailend Chand | 1dfc2e4 | 2024-01-22 18:26:28 +0000 | [diff] [blame] | 107 | |
| 108 | void gve_add_napi(struct gve_priv *priv, int ntfy_idx, |
| 109 | int (*gve_poll)(struct napi_struct *, int)) |
| 110 | { |
| 111 | struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; |
| 112 | |
| 113 | netif_napi_add(priv->dev, &block->napi, gve_poll); |
| 114 | } |
| 115 | |
| 116 | void gve_remove_napi(struct gve_priv *priv, int ntfy_idx) |
| 117 | { |
| 118 | struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; |
| 119 | |
| 120 | netif_napi_del(&block->napi); |
| 121 | } |