| // SPDX-License-Identifier: (GPL-2.0 OR MIT) |
| /* Google virtual Ethernet (gve) driver |
| * |
| * Copyright (C) 2015-2021 Google, Inc. |
| */ |
| |
| #include "gve.h" |
| #include "gve_adminq.h" |
| #include "gve_utils.h" |
| |
| void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx) |
| { |
| struct gve_notify_block *block = |
| &priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)]; |
| |
| block->tx = NULL; |
| } |
| |
| void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx) |
| { |
| unsigned int active_cpus = min_t(int, priv->num_ntfy_blks / 2, |
| num_online_cpus()); |
| int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx); |
| struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; |
| struct gve_tx_ring *tx = &priv->tx[queue_idx]; |
| |
| block->tx = tx; |
| tx->ntfy_id = ntfy_idx; |
| netif_set_xps_queue(priv->dev, get_cpu_mask(ntfy_idx % active_cpus), |
| queue_idx); |
| } |
| |
| void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx) |
| { |
| struct gve_notify_block *block = |
| &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)]; |
| |
| block->rx = NULL; |
| } |
| |
| void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx) |
| { |
| u32 ntfy_idx = gve_rx_idx_to_ntfy(priv, queue_idx); |
| struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; |
| struct gve_rx_ring *rx = &priv->rx[queue_idx]; |
| |
| block->rx = rx; |
| rx->ntfy_id = ntfy_idx; |
| } |
| |
| struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, |
| struct gve_rx_slot_page_info *page_info, u16 len, |
| u16 padding, struct gve_rx_ctx *ctx) |
| { |
| void *va = page_info->page_address + padding + page_info->page_offset; |
| int skb_linear_offset = 0; |
| bool set_protocol = false; |
| struct sk_buff *skb; |
| |
| if (ctx) { |
| if (!ctx->skb_head) |
| ctx->skb_head = napi_alloc_skb(napi, ctx->total_expected_size); |
| |
| if (unlikely(!ctx->skb_head)) |
| return NULL; |
| skb = ctx->skb_head; |
| skb_linear_offset = skb->len; |
| set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1; |
| } else { |
| skb = napi_alloc_skb(napi, len); |
| set_protocol = true; |
| } |
| __skb_put(skb, len); |
| skb_copy_to_linear_data_offset(skb, skb_linear_offset, va, len); |
| |
| if (set_protocol) |
| skb->protocol = eth_type_trans(skb, dev); |
| |
| return skb; |
| } |
| |
| void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info) |
| { |
| page_info->pagecnt_bias--; |
| if (page_info->pagecnt_bias == 0) { |
| int pagecount = page_count(page_info->page); |
| |
| /* If we have run out of bias - set it back up to INT_MAX |
| * minus the existing refs. |
| */ |
| page_info->pagecnt_bias = INT_MAX - pagecount; |
| |
| /* Set pagecount back up to max. */ |
| page_ref_add(page_info->page, INT_MAX - pagecount); |
| } |
| } |