blob: a4be530b3a1bfc58d82341e63d9638338d76d706 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07002/*
3 * drivers/net/veth.c
4 *
5 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
6 *
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
9 *
10 */
11
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070012#include <linux/netdevice.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070014#include <linux/ethtool.h>
15#include <linux/etherdevice.h>
Eric Dumazetcf05c702011-06-19 22:48:34 -070016#include <linux/u64_stats_sync.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070017
Jiri Pirkof7b12602014-02-18 20:53:18 +010018#include <net/rtnetlink.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070019#include <net/dst.h>
20#include <net/xfrm.h>
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +090021#include <net/xdp.h>
Stephen Hemmingerecef9692007-12-25 17:23:59 -080022#include <linux/veth.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040023#include <linux/module.h>
Toshiaki Makita948d4f22018-08-03 16:58:10 +090024#include <linux/bpf.h>
25#include <linux/filter.h>
26#include <linux/ptr_ring.h>
Toshiaki Makita948d4f22018-08-03 16:58:10 +090027#include <linux/bpf_trace.h>
Michael Walleaa4e6892018-08-29 17:24:11 +020028#include <linux/net_tstamp.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070029
30#define DRV_NAME "veth"
31#define DRV_VERSION "1.0"
32
Toshiaki Makita9fc8d512018-08-03 16:58:13 +090033#define VETH_XDP_FLAG BIT(0)
Toshiaki Makita948d4f22018-08-03 16:58:10 +090034#define VETH_RING_SIZE 256
35#define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
36
Toshiaki Makita9cda7802019-06-13 18:39:59 +090037#define VETH_XDP_TX_BULK_SIZE 16
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +010038#define VETH_XDP_BATCH 16
Toshiaki Makita9cda7802019-06-13 18:39:59 +090039
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010040struct veth_stats {
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +010041 u64 rx_drops;
42 /* xdp */
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010043 u64 xdp_packets;
44 u64 xdp_bytes;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +010045 u64 xdp_redirect;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010046 u64 xdp_drops;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +010047 u64 xdp_tx;
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +010048 u64 xdp_tx_err;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +010049 u64 peer_tq_xdp_xmit;
50 u64 peer_tq_xdp_xmit_err;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010051};
52
Toshiaki Makita4195e542018-10-11 18:36:49 +090053struct veth_rq_stats {
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010054 struct veth_stats vs;
Toshiaki Makita4195e542018-10-11 18:36:49 +090055 struct u64_stats_sync syncp;
56};
57
Toshiaki Makita638264d2018-08-03 16:58:18 +090058struct veth_rq {
Toshiaki Makita948d4f22018-08-03 16:58:10 +090059 struct napi_struct xdp_napi;
Paolo Abenid3256ef2021-04-09 13:04:38 +020060 struct napi_struct __rcu *napi; /* points to xdp_napi when the latter is initialized */
Toshiaki Makita948d4f22018-08-03 16:58:10 +090061 struct net_device *dev;
62 struct bpf_prog __rcu *xdp_prog;
Toshiaki Makitad1396002018-08-03 16:58:17 +090063 struct xdp_mem_info xdp_mem;
Toshiaki Makita4195e542018-10-11 18:36:49 +090064 struct veth_rq_stats stats;
Toshiaki Makita948d4f22018-08-03 16:58:10 +090065 bool rx_notify_masked;
66 struct ptr_ring xdp_ring;
67 struct xdp_rxq_info xdp_rxq;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070068};
69
Toshiaki Makita638264d2018-08-03 16:58:18 +090070struct veth_priv {
71 struct net_device __rcu *peer;
72 atomic64_t dropped;
73 struct bpf_prog *_xdp_prog;
74 struct veth_rq *rq;
75 unsigned int requested_headroom;
76};
77
Toshiaki Makita9cda7802019-06-13 18:39:59 +090078struct veth_xdp_tx_bq {
79 struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE];
80 unsigned int count;
81};
82
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070083/*
84 * ethtool interface
85 */
86
Toshiaki Makitad397b962018-10-11 18:36:50 +090087struct veth_q_stat_desc {
88 char desc[ETH_GSTRING_LEN];
89 size_t offset;
90};
91
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010092#define VETH_RQ_STAT(m) offsetof(struct veth_stats, m)
Toshiaki Makitad397b962018-10-11 18:36:50 +090093
94static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
95 { "xdp_packets", VETH_RQ_STAT(xdp_packets) },
96 { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +010097 { "drops", VETH_RQ_STAT(rx_drops) },
98 { "xdp_redirect", VETH_RQ_STAT(xdp_redirect) },
99 { "xdp_drops", VETH_RQ_STAT(xdp_drops) },
100 { "xdp_tx", VETH_RQ_STAT(xdp_tx) },
101 { "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) },
Toshiaki Makitad397b962018-10-11 18:36:50 +0900102};
103
104#define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
105
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100106static const struct veth_q_stat_desc veth_tq_stats_desc[] = {
107 { "xdp_xmit", VETH_RQ_STAT(peer_tq_xdp_xmit) },
108 { "xdp_xmit_errors", VETH_RQ_STAT(peer_tq_xdp_xmit_err) },
109};
110
111#define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc)
112
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700113static struct {
114 const char string[ETH_GSTRING_LEN];
115} ethtool_stats_keys[] = {
116 { "peer_ifindex" },
117};
118
Philippe Reynes56607b92017-03-29 08:24:21 +0200119static int veth_get_link_ksettings(struct net_device *dev,
120 struct ethtool_link_ksettings *cmd)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700121{
Philippe Reynes56607b92017-03-29 08:24:21 +0200122 cmd->base.speed = SPEED_10000;
123 cmd->base.duplex = DUPLEX_FULL;
124 cmd->base.port = PORT_TP;
125 cmd->base.autoneg = AUTONEG_DISABLE;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700126 return 0;
127}
128
129static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
130{
Rick Jones33a5ba12011-11-15 14:59:53 +0000131 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
132 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700133}
134
135static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
136{
Tonghao Zhanga0341b72021-11-25 10:54:44 +0800137 u8 *p = buf;
Toshiaki Makitad397b962018-10-11 18:36:50 +0900138 int i, j;
139
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700140 switch(stringset) {
141 case ETH_SS_STATS:
Toshiaki Makitad397b962018-10-11 18:36:50 +0900142 memcpy(p, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
143 p += sizeof(ethtool_stats_keys);
Tonghao Zhanga0341b72021-11-25 10:54:44 +0800144 for (i = 0; i < dev->real_num_rx_queues; i++)
145 for (j = 0; j < VETH_RQ_STATS_LEN; j++)
146 ethtool_sprintf(&p, "rx_queue_%u_%.18s",
147 i, veth_rq_stats_desc[j].desc);
148
149 for (i = 0; i < dev->real_num_tx_queues; i++)
150 for (j = 0; j < VETH_TQ_STATS_LEN; j++)
151 ethtool_sprintf(&p, "tx_queue_%u_%.18s",
152 i, veth_tq_stats_desc[j].desc);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700153 break;
154 }
155}
156
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700157static int veth_get_sset_count(struct net_device *dev, int sset)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700158{
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700159 switch (sset) {
160 case ETH_SS_STATS:
Toshiaki Makitad397b962018-10-11 18:36:50 +0900161 return ARRAY_SIZE(ethtool_stats_keys) +
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100162 VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
163 VETH_TQ_STATS_LEN * dev->real_num_tx_queues;
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700164 default:
165 return -EOPNOTSUPP;
166 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700167}
168
169static void veth_get_ethtool_stats(struct net_device *dev,
170 struct ethtool_stats *stats, u64 *data)
171{
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100172 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000173 struct net_device *peer = rtnl_dereference(priv->peer);
Toshiaki Makitad397b962018-10-11 18:36:50 +0900174 int i, j, idx;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700175
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000176 data[0] = peer ? peer->ifindex : 0;
Toshiaki Makitad397b962018-10-11 18:36:50 +0900177 idx = 1;
178 for (i = 0; i < dev->real_num_rx_queues; i++) {
179 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100180 const void *stats_base = (void *)&rq_stats->vs;
Toshiaki Makitad397b962018-10-11 18:36:50 +0900181 unsigned int start;
182 size_t offset;
183
184 do {
185 start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
186 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
187 offset = veth_rq_stats_desc[j].offset;
188 data[idx + j] = *(u64 *)(stats_base + offset);
189 }
190 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
191 idx += VETH_RQ_STATS_LEN;
192 }
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100193
194 if (!peer)
195 return;
196
197 rcv_priv = netdev_priv(peer);
198 for (i = 0; i < peer->real_num_rx_queues; i++) {
199 const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
200 const void *base = (void *)&rq_stats->vs;
201 unsigned int start, tx_idx = idx;
202 size_t offset;
203
204 tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
205 do {
206 start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
207 for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
208 offset = veth_tq_stats_desc[j].offset;
209 data[tx_idx + j] += *(u64 *)(base + offset);
210 }
211 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
212 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700213}
214
Maciej Fijalkowski34829ee2021-03-30 00:43:12 +0200215static void veth_get_channels(struct net_device *dev,
216 struct ethtool_channels *channels)
217{
218 channels->tx_count = dev->real_num_tx_queues;
219 channels->rx_count = dev->real_num_rx_queues;
Paolo Abeni4752eeb2021-07-20 10:41:50 +0200220 channels->max_tx = dev->num_tx_queues;
221 channels->max_rx = dev->num_rx_queues;
Maciej Fijalkowski34829ee2021-03-30 00:43:12 +0200222}
223
Paolo Abeni4752eeb2021-07-20 10:41:50 +0200224static int veth_set_channels(struct net_device *dev,
225 struct ethtool_channels *ch);
226
Stephen Hemminger0fc0b732009-09-02 01:03:33 -0700227static const struct ethtool_ops veth_ethtool_ops = {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700228 .get_drvinfo = veth_get_drvinfo,
229 .get_link = ethtool_op_get_link,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700230 .get_strings = veth_get_strings,
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700231 .get_sset_count = veth_get_sset_count,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700232 .get_ethtool_stats = veth_get_ethtool_stats,
Philippe Reynes56607b92017-03-29 08:24:21 +0200233 .get_link_ksettings = veth_get_link_ksettings,
Julian Wiedmann056b21f2019-04-12 13:06:15 +0200234 .get_ts_info = ethtool_op_get_ts_info,
Maciej Fijalkowski34829ee2021-03-30 00:43:12 +0200235 .get_channels = veth_get_channels,
Paolo Abeni4752eeb2021-07-20 10:41:50 +0200236 .set_channels = veth_set_channels,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700237};
238
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900239/* general routines */
240
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900241static bool veth_is_xdp_frame(void *ptr)
242{
243 return (unsigned long)ptr & VETH_XDP_FLAG;
244}
245
Maciej Żenczykowskidefcffe2020-08-18 19:00:27 -0700246static struct xdp_frame *veth_ptr_to_xdp(void *ptr)
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900247{
248 return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
249}
250
Maciej Żenczykowskidefcffe2020-08-18 19:00:27 -0700251static void *veth_xdp_to_ptr(struct xdp_frame *xdp)
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900252{
Maciej Żenczykowskidefcffe2020-08-18 19:00:27 -0700253 return (void *)((unsigned long)xdp | VETH_XDP_FLAG);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900254}
255
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900256static void veth_ptr_free(void *ptr)
257{
258 if (veth_is_xdp_frame(ptr))
259 xdp_return_frame(veth_ptr_to_xdp(ptr));
260 else
261 kfree_skb(ptr);
262}
263
Toshiaki Makita638264d2018-08-03 16:58:18 +0900264static void __veth_xdp_flush(struct veth_rq *rq)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900265{
266 /* Write ptr_ring before reading rx_notify_masked */
267 smp_mb();
Toshiaki Makita638264d2018-08-03 16:58:18 +0900268 if (!rq->rx_notify_masked) {
269 rq->rx_notify_masked = true;
270 napi_schedule(&rq->xdp_napi);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900271 }
272}
273
Toshiaki Makita638264d2018-08-03 16:58:18 +0900274static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900275{
Toshiaki Makita638264d2018-08-03 16:58:18 +0900276 if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900277 dev_kfree_skb_any(skb);
278 return NET_RX_DROP;
279 }
280
281 return NET_RX_SUCCESS;
282}
283
Toshiaki Makita638264d2018-08-03 16:58:18 +0900284static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
285 struct veth_rq *rq, bool xdp)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700286{
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900287 return __dev_forward_skb(dev, skb) ?: xdp ?
Toshiaki Makita638264d2018-08-03 16:58:18 +0900288 veth_xdp_rx(rq, skb) :
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900289 netif_rx(skb);
290}
291
Paolo Abeni47e550e2021-04-09 13:04:39 +0200292/* return true if the specified skb has chances of GRO aggregation
293 * Don't strive for accuracy, but try to avoid GRO overhead in the most
294 * common scenarios.
295 * When XDP is enabled, all traffic is considered eligible, as the xmit
296 * device has TSO off.
297 * When TSO is enabled on the xmit device, we are likely interested only
298 * in UDP aggregation, explicitly check for that if the skb is suspected
299 * - the sock_wfree destructor is used by UDP, ICMP and XDP sockets -
300 * to belong to locally generated UDP traffic.
301 */
302static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
303 const struct net_device *rcv,
304 const struct sk_buff *skb)
305{
306 return !(dev->features & NETIF_F_ALL_TSO) ||
307 (skb->destructor == sock_wfree &&
308 rcv->features & (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD));
309}
310
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900311static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
312{
313 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900314 struct veth_rq *rq = NULL;
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000315 struct net_device *rcv;
Eric Dumazet26811282012-12-29 16:02:43 +0000316 int length = skb->len;
Paolo Abenid3256ef2021-04-09 13:04:38 +0200317 bool use_napi = false;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900318 int rxq;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700319
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000320 rcu_read_lock();
321 rcv = rcu_dereference(priv->peer);
322 if (unlikely(!rcv)) {
323 kfree_skb(skb);
324 goto drop;
325 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700326
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900327 rcv_priv = netdev_priv(rcv);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900328 rxq = skb_get_queue_mapping(skb);
329 if (rxq < rcv->real_num_rx_queues) {
330 rq = &rcv_priv->rq[rxq];
Paolo Abenid3256ef2021-04-09 13:04:38 +0200331
332 /* The napi pointer is available when an XDP program is
333 * attached or when GRO is enabled
Paolo Abeni47e550e2021-04-09 13:04:39 +0200334 * Don't bother with napi/GRO if the skb can't be aggregated
Paolo Abenid3256ef2021-04-09 13:04:38 +0200335 */
Paolo Abeni47e550e2021-04-09 13:04:39 +0200336 use_napi = rcu_access_pointer(rq->napi) &&
337 veth_skb_is_eligible_for_gro(dev, rcv, skb);
Maciej Fijalkowskiedbea922021-03-03 16:29:03 +0100338 skb_record_rx_queue(skb, rxq);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900339 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900340
Michael Walleaa4e6892018-08-29 17:24:11 +0200341 skb_tx_timestamp(skb);
Paolo Abenid3256ef2021-04-09 13:04:38 +0200342 if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
343 if (!use_napi)
Eric Dumazetb4fba472019-11-07 16:27:17 -0800344 dev_lstats_add(dev, length);
Eric Dumazet26811282012-12-29 16:02:43 +0000345 } else {
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000346drop:
Eric Dumazet26811282012-12-29 16:02:43 +0000347 atomic64_inc(&priv->dropped);
348 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900349
Paolo Abenid3256ef2021-04-09 13:04:38 +0200350 if (use_napi)
Toshiaki Makita638264d2018-08-03 16:58:18 +0900351 __veth_xdp_flush(rq);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900352
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000353 rcu_read_unlock();
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900354
Patrick McHardy6ed10652009-06-23 06:03:08 +0000355 return NETDEV_TX_OK;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700356}
357
Eric Dumazetb4fba472019-11-07 16:27:17 -0800358static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700359{
Eric Dumazetcf05c702011-06-19 22:48:34 -0700360 struct veth_priv *priv = netdev_priv(dev);
David S. Miller11687a12009-06-25 02:45:42 -0700361
Eric Dumazetb4fba472019-11-07 16:27:17 -0800362 dev_lstats_read(dev, packets, bytes);
Eric Dumazet26811282012-12-29 16:02:43 +0000363 return atomic64_read(&priv->dropped);
364}
365
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100366static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
Toshiaki Makita4195e542018-10-11 18:36:49 +0900367{
368 struct veth_priv *priv = netdev_priv(dev);
369 int i;
370
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100371 result->peer_tq_xdp_xmit_err = 0;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900372 result->xdp_packets = 0;
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100373 result->xdp_tx_err = 0;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900374 result->xdp_bytes = 0;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100375 result->rx_drops = 0;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900376 for (i = 0; i < dev->num_rx_queues; i++) {
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100377 u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900378 struct veth_rq_stats *stats = &priv->rq[i].stats;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900379 unsigned int start;
380
381 do {
382 start = u64_stats_fetch_begin_irq(&stats->syncp);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100383 peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100384 xdp_tx_err = stats->vs.xdp_tx_err;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100385 packets = stats->vs.xdp_packets;
386 bytes = stats->vs.xdp_bytes;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100387 drops = stats->vs.rx_drops;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900388 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100389 result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100390 result->xdp_tx_err += xdp_tx_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900391 result->xdp_packets += packets;
392 result->xdp_bytes += bytes;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100393 result->rx_drops += drops;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900394 }
395}
396
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800397static void veth_get_stats64(struct net_device *dev,
398 struct rtnl_link_stats64 *tot)
Eric Dumazet26811282012-12-29 16:02:43 +0000399{
400 struct veth_priv *priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000401 struct net_device *peer;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100402 struct veth_stats rx;
Eric Dumazetb4fba472019-11-07 16:27:17 -0800403 u64 packets, bytes;
Eric Dumazet26811282012-12-29 16:02:43 +0000404
Eric Dumazetb4fba472019-11-07 16:27:17 -0800405 tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
406 tot->tx_bytes = bytes;
407 tot->tx_packets = packets;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900408
409 veth_stats_rx(&rx, dev);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100410 tot->tx_dropped += rx.xdp_tx_err;
411 tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900412 tot->rx_bytes = rx.xdp_bytes;
413 tot->rx_packets = rx.xdp_packets;
Eric Dumazet26811282012-12-29 16:02:43 +0000414
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000415 rcu_read_lock();
416 peer = rcu_dereference(priv->peer);
417 if (peer) {
Jiang Lidonge25d5db2020-03-04 09:49:29 +0800418 veth_stats_tx(peer, &packets, &bytes);
Eric Dumazetb4fba472019-11-07 16:27:17 -0800419 tot->rx_bytes += bytes;
420 tot->rx_packets += packets;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900421
422 veth_stats_rx(&rx, peer);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100423 tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
424 tot->rx_dropped += rx.xdp_tx_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900425 tot->tx_bytes += rx.xdp_bytes;
426 tot->tx_packets += rx.xdp_packets;
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000427 }
428 rcu_read_unlock();
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700429}
430
Gao feng5c70ef82013-10-04 16:52:24 +0800431/* fake multicast ability */
432static void veth_set_multicast_list(struct net_device *dev)
433{
434}
435
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900436static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
437 int buflen)
438{
439 struct sk_buff *skb;
440
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900441 skb = build_skb(head, buflen);
442 if (!skb)
443 return NULL;
444
445 skb_reserve(skb, headroom);
446 skb_put(skb, len);
447
448 return skb;
449}
450
Toshiaki Makita638264d2018-08-03 16:58:18 +0900451static int veth_select_rxq(struct net_device *dev)
452{
453 return smp_processor_id() % dev->real_num_rx_queues;
454}
455
Daniel Borkmann9aa12062020-10-11 01:40:02 +0200456static struct net_device *veth_peer_dev(struct net_device *dev)
457{
458 struct veth_priv *priv = netdev_priv(dev);
459
460 /* Callers must be under RCU read side. */
461 return rcu_dereference(priv->peer);
462}
463
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900464static int veth_xdp_xmit(struct net_device *dev, int n,
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100465 struct xdp_frame **frames,
466 u32 flags, bool ndo_xmit)
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900467{
468 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100469 int i, ret = -ENXIO, nxmit = 0;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900470 struct net_device *rcv;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100471 unsigned int max_len;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900472 struct veth_rq *rq;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900473
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100474 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100475 return -EINVAL;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900476
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100477 rcu_read_lock();
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900478 rcv = rcu_dereference(priv->peer);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100479 if (unlikely(!rcv))
480 goto out;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900481
482 rcv_priv = netdev_priv(rcv);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100483 rq = &rcv_priv->rq[veth_select_rxq(rcv)];
Toke Høiland-Jørgensen0e672f32021-04-16 17:47:45 +0200484 /* The napi pointer is set if NAPI is enabled, which ensures that
485 * xdp_ring is initialized on receive side and the peer device is up.
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900486 */
Toke Høiland-Jørgensen0e672f32021-04-16 17:47:45 +0200487 if (!rcu_access_pointer(rq->napi))
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100488 goto out;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900489
490 max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
491
Toshiaki Makita638264d2018-08-03 16:58:18 +0900492 spin_lock(&rq->xdp_ring.producer_lock);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900493 for (i = 0; i < n; i++) {
494 struct xdp_frame *frame = frames[i];
495 void *ptr = veth_xdp_to_ptr(frame);
496
497 if (unlikely(frame->len > max_len ||
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100498 __ptr_ring_produce(&rq->xdp_ring, ptr)))
499 break;
500 nxmit++;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900501 }
Toshiaki Makita638264d2018-08-03 16:58:18 +0900502 spin_unlock(&rq->xdp_ring.producer_lock);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900503
504 if (flags & XDP_XMIT_FLUSH)
Toshiaki Makita638264d2018-08-03 16:58:18 +0900505 __veth_xdp_flush(rq);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900506
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100507 ret = nxmit;
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100508 if (ndo_xmit) {
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100509 u64_stats_update_begin(&rq->stats.syncp);
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100510 rq->stats.vs.peer_tq_xdp_xmit += nxmit;
511 rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100512 u64_stats_update_end(&rq->stats.syncp);
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100513 }
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100514
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100515out:
John Fastabendb23bfa52020-01-26 16:14:02 -0800516 rcu_read_unlock();
Toshiaki Makita21314792018-10-11 18:36:48 +0900517
518 return ret;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900519}
520
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100521static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
522 struct xdp_frame **frames, u32 flags)
523{
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100524 int err;
525
526 err = veth_xdp_xmit(dev, n, frames, flags, true);
527 if (err < 0) {
528 struct veth_priv *priv = netdev_priv(dev);
529
530 atomic64_add(n, &priv->dropped);
531 }
532
533 return err;
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100534}
535
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100536static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900537{
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100538 int sent, i, err = 0, drops;
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900539
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100540 sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900541 if (sent < 0) {
542 err = sent;
543 sent = 0;
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900544 }
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100545
546 for (i = sent; unlikely(i < bq->count); i++)
547 xdp_return_frame(bq->q[i]);
548
549 drops = bq->count - sent;
550 trace_xdp_bulk_tx(rq->dev, sent, drops, err);
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900551
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100552 u64_stats_update_begin(&rq->stats.syncp);
553 rq->stats.vs.xdp_tx += sent;
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100554 rq->stats.vs.xdp_tx_err += drops;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100555 u64_stats_update_end(&rq->stats.syncp);
556
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900557 bq->count = 0;
558}
559
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100560static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
Toshiaki Makitad1396002018-08-03 16:58:17 +0900561{
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100562 struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900563 struct net_device *rcv;
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100564 struct veth_rq *rcv_rq;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900565
566 rcu_read_lock();
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100567 veth_xdp_flush_bq(rq, bq);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900568 rcv = rcu_dereference(priv->peer);
569 if (unlikely(!rcv))
570 goto out;
571
572 rcv_priv = netdev_priv(rcv);
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100573 rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)];
Toshiaki Makitad1396002018-08-03 16:58:17 +0900574 /* xdp_ring is initialized on receive side? */
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100575 if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog)))
Toshiaki Makitad1396002018-08-03 16:58:17 +0900576 goto out;
577
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100578 __veth_xdp_flush(rcv_rq);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900579out:
580 rcu_read_unlock();
581}
582
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100583static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900584 struct veth_xdp_tx_bq *bq)
Toshiaki Makitad1396002018-08-03 16:58:17 +0900585{
Lorenzo Bianconi1b698fa2020-05-28 22:47:29 +0200586 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900587
588 if (unlikely(!frame))
589 return -EOVERFLOW;
590
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900591 if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100592 veth_xdp_flush_bq(rq, bq);
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900593
594 bq->q[bq->count++] = frame;
595
596 return 0;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900597}
598
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100599static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
600 struct xdp_frame *frame,
601 struct veth_xdp_tx_bq *bq,
602 struct veth_stats *stats)
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900603{
Toshiaki Makitad1396002018-08-03 16:58:17 +0900604 struct xdp_frame orig_frame;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900605 struct bpf_prog *xdp_prog;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900606
607 rcu_read_lock();
Toshiaki Makita638264d2018-08-03 16:58:18 +0900608 xdp_prog = rcu_dereference(rq->xdp_prog);
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900609 if (likely(xdp_prog)) {
610 struct xdp_buff xdp;
611 u32 act;
612
Lorenzo Bianconifc379872020-05-28 22:47:28 +0200613 xdp_convert_frame_to_buff(frame, &xdp);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900614 xdp.rxq = &rq->xdp_rxq;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900615
616 act = bpf_prog_run_xdp(xdp_prog, &xdp);
617
618 switch (act) {
619 case XDP_PASS:
Lorenzo Bianconi89f479f2021-01-12 19:26:13 +0100620 if (xdp_update_frame_from_buff(&xdp, frame))
621 goto err_xdp;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900622 break;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900623 case XDP_TX:
624 orig_frame = *frame;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900625 xdp.rxq->mem = frame->mem;
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100626 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900627 trace_xdp_exception(rq->dev, xdp_prog, act);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900628 frame = &orig_frame;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100629 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900630 goto err_xdp;
631 }
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100632 stats->xdp_tx++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900633 rcu_read_unlock();
634 goto xdp_xmit;
635 case XDP_REDIRECT:
636 orig_frame = *frame;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900637 xdp.rxq->mem = frame->mem;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900638 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
Toshiaki Makitad1396002018-08-03 16:58:17 +0900639 frame = &orig_frame;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100640 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900641 goto err_xdp;
642 }
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100643 stats->xdp_redirect++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900644 rcu_read_unlock();
645 goto xdp_xmit;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900646 default:
647 bpf_warn_invalid_xdp_action(act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500648 fallthrough;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900649 case XDP_ABORTED:
Toshiaki Makita638264d2018-08-03 16:58:18 +0900650 trace_xdp_exception(rq->dev, xdp_prog, act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500651 fallthrough;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900652 case XDP_DROP:
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100653 stats->xdp_drops++;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900654 goto err_xdp;
655 }
656 }
657 rcu_read_unlock();
658
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100659 return frame;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900660err_xdp:
661 rcu_read_unlock();
662 xdp_return_frame(frame);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900663xdp_xmit:
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900664 return NULL;
665}
666
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100667/* frames array contains VETH_XDP_BATCH at most */
668static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
669 int n_xdpf, struct veth_xdp_tx_bq *bq,
670 struct veth_stats *stats)
671{
672 void *skbs[VETH_XDP_BATCH];
673 int i;
674
675 if (xdp_alloc_skb_bulk(skbs, n_xdpf,
676 GFP_ATOMIC | __GFP_ZERO) < 0) {
677 for (i = 0; i < n_xdpf; i++)
678 xdp_return_frame(frames[i]);
679 stats->rx_drops += n_xdpf;
680
681 return;
682 }
683
684 for (i = 0; i < n_xdpf; i++) {
685 struct sk_buff *skb = skbs[i];
686
687 skb = __xdp_build_skb_from_frame(frames[i], skb,
688 rq->dev);
689 if (!skb) {
690 xdp_return_frame(frames[i]);
691 stats->rx_drops++;
692 continue;
693 }
694 napi_gro_receive(&rq->xdp_napi, skb);
695 }
696}
697
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100698static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
699 struct sk_buff *skb,
700 struct veth_xdp_tx_bq *bq,
701 struct veth_stats *stats)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900702{
Lorenzo Bianconi43b51692020-12-22 22:09:28 +0100703 u32 pktlen, headroom, act, metalen, frame_sz;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900704 void *orig_data, *orig_data_end;
705 struct bpf_prog *xdp_prog;
706 int mac_len, delta, off;
707 struct xdp_buff xdp;
708
Paolo Abenid504fff2021-07-28 18:24:04 +0200709 skb_prepare_for_gro(skb);
Toshiaki Makita4bf9ffa2018-09-14 13:33:44 +0900710
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900711 rcu_read_lock();
Toshiaki Makita638264d2018-08-03 16:58:18 +0900712 xdp_prog = rcu_dereference(rq->xdp_prog);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900713 if (unlikely(!xdp_prog)) {
714 rcu_read_unlock();
715 goto out;
716 }
717
718 mac_len = skb->data - skb_mac_header(skb);
719 pktlen = skb->len + mac_len;
720 headroom = skb_headroom(skb) - mac_len;
721
722 if (skb_shared(skb) || skb_head_is_locked(skb) ||
723 skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
724 struct sk_buff *nskb;
725 int size, head_off;
726 void *head, *start;
727 struct page *page;
728
729 size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
730 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
731 if (size > PAGE_SIZE)
732 goto drop;
733
734 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
735 if (!page)
736 goto drop;
737
738 head = page_address(page);
739 start = head + VETH_XDP_HEADROOM;
740 if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
741 page_frag_free(head);
742 goto drop;
743 }
744
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200745 nskb = veth_build_skb(head, VETH_XDP_HEADROOM + mac_len,
746 skb->len, PAGE_SIZE);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900747 if (!nskb) {
748 page_frag_free(head);
749 goto drop;
750 }
751
752 skb_copy_header(nskb, skb);
753 head_off = skb_headroom(nskb) - skb_headroom(skb);
754 skb_headers_offset_update(nskb, head_off);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900755 consume_skb(skb);
756 skb = nskb;
757 }
758
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200759 /* SKB "head" area always have tailroom for skb_shared_info */
Lorenzo Bianconibe9df4a2020-12-22 22:09:29 +0100760 frame_sz = skb_end_pointer(skb) - skb->head;
Lorenzo Bianconi43b51692020-12-22 22:09:28 +0100761 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
762 xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq);
Lorenzo Bianconibe9df4a2020-12-22 22:09:29 +0100763 xdp_prepare_buff(&xdp, skb->head, skb->mac_header, pktlen, true);
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200764
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900765 orig_data = xdp.data;
766 orig_data_end = xdp.data_end;
767
768 act = bpf_prog_run_xdp(xdp_prog, &xdp);
769
770 switch (act) {
771 case XDP_PASS:
772 break;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900773 case XDP_TX:
774 get_page(virt_to_page(xdp.data));
775 consume_skb(skb);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900776 xdp.rxq->mem = rq->xdp_mem;
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100777 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900778 trace_xdp_exception(rq->dev, xdp_prog, act);
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100779 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900780 goto err_xdp;
781 }
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100782 stats->xdp_tx++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900783 rcu_read_unlock();
784 goto xdp_xmit;
785 case XDP_REDIRECT:
786 get_page(virt_to_page(xdp.data));
787 consume_skb(skb);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900788 xdp.rxq->mem = rq->xdp_mem;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100789 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
790 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900791 goto err_xdp;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100792 }
793 stats->xdp_redirect++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900794 rcu_read_unlock();
795 goto xdp_xmit;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900796 default:
797 bpf_warn_invalid_xdp_action(act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500798 fallthrough;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900799 case XDP_ABORTED:
Toshiaki Makita638264d2018-08-03 16:58:18 +0900800 trace_xdp_exception(rq->dev, xdp_prog, act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500801 fallthrough;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900802 case XDP_DROP:
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100803 stats->xdp_drops++;
804 goto xdp_drop;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900805 }
806 rcu_read_unlock();
807
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200808 /* check if bpf_xdp_adjust_head was used */
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900809 delta = orig_data - xdp.data;
810 off = mac_len + delta;
811 if (off > 0)
812 __skb_push(skb, off);
813 else if (off < 0)
814 __skb_pull(skb, -off);
815 skb->mac_header -= delta;
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200816
817 /* check if bpf_xdp_adjust_tail was used */
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900818 off = xdp.data_end - orig_data_end;
819 if (off != 0)
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200820 __skb_put(skb, off); /* positive on grow, negative on shrink */
Toshiaki Makita638264d2018-08-03 16:58:18 +0900821 skb->protocol = eth_type_trans(skb, rq->dev);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900822
823 metalen = xdp.data - xdp.data_meta;
824 if (metalen)
825 skb_metadata_set(skb, metalen);
826out:
827 return skb;
828drop:
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100829 stats->rx_drops++;
830xdp_drop:
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900831 rcu_read_unlock();
832 kfree_skb(skb);
833 return NULL;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900834err_xdp:
835 rcu_read_unlock();
836 page_frag_free(xdp.data);
837xdp_xmit:
838 return NULL;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900839}
840
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100841static int veth_xdp_rcv(struct veth_rq *rq, int budget,
842 struct veth_xdp_tx_bq *bq,
843 struct veth_stats *stats)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900844{
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100845 int i, done = 0, n_xdpf = 0;
846 void *xdpf[VETH_XDP_BATCH];
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900847
848 for (i = 0; i < budget; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900849 void *ptr = __ptr_ring_consume(&rq->xdp_ring);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900850
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900851 if (!ptr)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900852 break;
853
Toshiaki Makitad1396002018-08-03 16:58:17 +0900854 if (veth_is_xdp_frame(ptr)) {
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100855 /* ndo_xdp_xmit */
Toshiaki Makita4195e542018-10-11 18:36:49 +0900856 struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
857
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100858 stats->xdp_bytes += frame->len;
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100859 frame = veth_xdp_rcv_one(rq, frame, bq, stats);
860 if (frame) {
861 /* XDP_PASS */
862 xdpf[n_xdpf++] = frame;
863 if (n_xdpf == VETH_XDP_BATCH) {
864 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf,
865 bq, stats);
866 n_xdpf = 0;
867 }
868 }
Toshiaki Makitad1396002018-08-03 16:58:17 +0900869 } else {
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100870 /* ndo_start_xmit */
871 struct sk_buff *skb = ptr;
872
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100873 stats->xdp_bytes += skb->len;
874 skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
Paolo Abeni9695b7d2021-12-22 19:39:52 +0100875 if (skb) {
876 if (skb_shared(skb) || skb_unclone(skb, GFP_ATOMIC))
877 netif_receive_skb(skb);
878 else
879 napi_gro_receive(&rq->xdp_napi, skb);
880 }
Toshiaki Makitad1396002018-08-03 16:58:17 +0900881 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900882 done++;
883 }
884
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100885 if (n_xdpf)
886 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf, bq, stats);
887
Toshiaki Makita4195e542018-10-11 18:36:49 +0900888 u64_stats_update_begin(&rq->stats.syncp);
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100889 rq->stats.vs.xdp_redirect += stats->xdp_redirect;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100890 rq->stats.vs.xdp_bytes += stats->xdp_bytes;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100891 rq->stats.vs.xdp_drops += stats->xdp_drops;
892 rq->stats.vs.rx_drops += stats->rx_drops;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100893 rq->stats.vs.xdp_packets += done;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900894 u64_stats_update_end(&rq->stats.syncp);
895
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900896 return done;
897}
898
899static int veth_poll(struct napi_struct *napi, int budget)
900{
Toshiaki Makita638264d2018-08-03 16:58:18 +0900901 struct veth_rq *rq =
902 container_of(napi, struct veth_rq, xdp_napi);
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100903 struct veth_stats stats = {};
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900904 struct veth_xdp_tx_bq bq;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900905 int done;
906
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900907 bq.count = 0;
908
Toshiaki Makitad1396002018-08-03 16:58:17 +0900909 xdp_set_return_frame_no_direct();
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100910 done = veth_xdp_rcv(rq, budget, &bq, &stats);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900911
912 if (done < budget && napi_complete_done(napi, done)) {
913 /* Write rx_notify_masked before reading ptr_ring */
Toshiaki Makita638264d2018-08-03 16:58:18 +0900914 smp_store_mb(rq->rx_notify_masked, false);
915 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
916 rq->rx_notify_masked = true;
917 napi_schedule(&rq->xdp_napi);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900918 }
919 }
920
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100921 if (stats.xdp_tx > 0)
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100922 veth_xdp_flush(rq, &bq);
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100923 if (stats.xdp_redirect > 0)
Toke Høiland-Jørgensen1d233882020-01-16 16:14:45 +0100924 xdp_do_flush();
Toshiaki Makitad1396002018-08-03 16:58:17 +0900925 xdp_clear_return_frame_no_direct();
926
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900927 return done;
928}
929
Paolo Abenidedd53c2021-07-20 10:41:49 +0200930static int __veth_napi_enable_range(struct net_device *dev, int start, int end)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900931{
932 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900933 int err, i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900934
Paolo Abenidedd53c2021-07-20 10:41:49 +0200935 for (i = start; i < end; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900936 struct veth_rq *rq = &priv->rq[i];
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900937
Toshiaki Makita638264d2018-08-03 16:58:18 +0900938 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
939 if (err)
940 goto err_xdp_ring;
941 }
942
Paolo Abenidedd53c2021-07-20 10:41:49 +0200943 for (i = start; i < end; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900944 struct veth_rq *rq = &priv->rq[i];
945
Toshiaki Makita638264d2018-08-03 16:58:18 +0900946 napi_enable(&rq->xdp_napi);
Paolo Abenid3256ef2021-04-09 13:04:38 +0200947 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900948 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900949
950 return 0;
Paolo Abenidedd53c2021-07-20 10:41:49 +0200951
Toshiaki Makita638264d2018-08-03 16:58:18 +0900952err_xdp_ring:
Paolo Abenidedd53c2021-07-20 10:41:49 +0200953 for (i--; i >= start; i--)
Toshiaki Makita638264d2018-08-03 16:58:18 +0900954 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
955
956 return err;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900957}
958
Paolo Abenidedd53c2021-07-20 10:41:49 +0200959static int __veth_napi_enable(struct net_device *dev)
960{
961 return __veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
962}
963
964static void veth_napi_del_range(struct net_device *dev, int start, int end)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900965{
966 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900967 int i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900968
Paolo Abenidedd53c2021-07-20 10:41:49 +0200969 for (i = start; i < end; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900970 struct veth_rq *rq = &priv->rq[i];
971
Paolo Abenid3256ef2021-04-09 13:04:38 +0200972 rcu_assign_pointer(priv->rq[i].napi, NULL);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900973 napi_disable(&rq->xdp_napi);
Jakub Kicinski5198d5452020-09-09 10:37:51 -0700974 __netif_napi_del(&rq->xdp_napi);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900975 }
976 synchronize_net();
977
Paolo Abenidedd53c2021-07-20 10:41:49 +0200978 for (i = start; i < end; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900979 struct veth_rq *rq = &priv->rq[i];
980
Toshiaki Makita638264d2018-08-03 16:58:18 +0900981 rq->rx_notify_masked = false;
982 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
983 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900984}
985
Paolo Abenidedd53c2021-07-20 10:41:49 +0200986static void veth_napi_del(struct net_device *dev)
987{
988 veth_napi_del_range(dev, 0, dev->real_num_rx_queues);
989}
990
Paolo Abenid3256ef2021-04-09 13:04:38 +0200991static bool veth_gro_requested(const struct net_device *dev)
992{
993 return !!(dev->wanted_features & NETIF_F_GRO);
994}
995
Paolo Abenidedd53c2021-07-20 10:41:49 +0200996static int veth_enable_xdp_range(struct net_device *dev, int start, int end,
997 bool napi_already_on)
998{
999 struct veth_priv *priv = netdev_priv(dev);
1000 int err, i;
1001
1002 for (i = start; i < end; i++) {
1003 struct veth_rq *rq = &priv->rq[i];
1004
1005 if (!napi_already_on)
1006 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
1007 err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
1008 if (err < 0)
1009 goto err_rxq_reg;
1010
1011 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
1012 MEM_TYPE_PAGE_SHARED,
1013 NULL);
1014 if (err < 0)
1015 goto err_reg_mem;
1016
1017 /* Save original mem info as it can be overwritten */
1018 rq->xdp_mem = rq->xdp_rxq.mem;
1019 }
1020 return 0;
1021
1022err_reg_mem:
1023 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
1024err_rxq_reg:
1025 for (i--; i >= start; i--) {
1026 struct veth_rq *rq = &priv->rq[i];
1027
1028 xdp_rxq_info_unreg(&rq->xdp_rxq);
1029 if (!napi_already_on)
1030 netif_napi_del(&rq->xdp_napi);
1031 }
1032
1033 return err;
1034}
1035
1036static void veth_disable_xdp_range(struct net_device *dev, int start, int end,
1037 bool delete_napi)
1038{
1039 struct veth_priv *priv = netdev_priv(dev);
1040 int i;
1041
1042 for (i = start; i < end; i++) {
1043 struct veth_rq *rq = &priv->rq[i];
1044
1045 rq->xdp_rxq.mem = rq->xdp_mem;
1046 xdp_rxq_info_unreg(&rq->xdp_rxq);
1047
1048 if (delete_napi)
1049 netif_napi_del(&rq->xdp_napi);
1050 }
1051}
1052
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001053static int veth_enable_xdp(struct net_device *dev)
1054{
Paolo Abenid3256ef2021-04-09 13:04:38 +02001055 bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001056 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +09001057 int err, i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001058
Toshiaki Makita638264d2018-08-03 16:58:18 +09001059 if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
Paolo Abenidedd53c2021-07-20 10:41:49 +02001060 err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on);
1061 if (err)
1062 return err;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001063
Paolo Abenid3256ef2021-04-09 13:04:38 +02001064 if (!napi_already_on) {
1065 err = __veth_napi_enable(dev);
Paolo Abenidedd53c2021-07-20 10:41:49 +02001066 if (err) {
1067 veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
1068 return err;
1069 }
Paolo Abenid3256ef2021-04-09 13:04:38 +02001070
1071 if (!veth_gro_requested(dev)) {
1072 /* user-space did not require GRO, but adding XDP
1073 * is supposed to get GRO working
1074 */
1075 dev->features |= NETIF_F_GRO;
1076 netdev_features_change(dev);
1077 }
1078 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001079 }
1080
Paolo Abenid3256ef2021-04-09 13:04:38 +02001081 for (i = 0; i < dev->real_num_rx_queues; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +09001082 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
Paolo Abenid3256ef2021-04-09 13:04:38 +02001083 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
1084 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001085
1086 return 0;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001087}
1088
1089static void veth_disable_xdp(struct net_device *dev)
1090{
1091 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +09001092 int i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001093
Toshiaki Makita638264d2018-08-03 16:58:18 +09001094 for (i = 0; i < dev->real_num_rx_queues; i++)
1095 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
Paolo Abenid3256ef2021-04-09 13:04:38 +02001096
1097 if (!netif_running(dev) || !veth_gro_requested(dev)) {
1098 veth_napi_del(dev);
1099
1100 /* if user-space did not require GRO, since adding XDP
1101 * enabled it, clear it now
1102 */
1103 if (!veth_gro_requested(dev) && netif_running(dev)) {
1104 dev->features &= ~NETIF_F_GRO;
1105 netdev_features_change(dev);
1106 }
1107 }
1108
Paolo Abenidedd53c2021-07-20 10:41:49 +02001109 veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001110}
1111
Paolo Abenidedd53c2021-07-20 10:41:49 +02001112static int veth_napi_enable_range(struct net_device *dev, int start, int end)
Paolo Abenid3256ef2021-04-09 13:04:38 +02001113{
1114 struct veth_priv *priv = netdev_priv(dev);
1115 int err, i;
1116
Paolo Abenidedd53c2021-07-20 10:41:49 +02001117 for (i = start; i < end; i++) {
Paolo Abenid3256ef2021-04-09 13:04:38 +02001118 struct veth_rq *rq = &priv->rq[i];
1119
1120 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
1121 }
1122
Paolo Abenidedd53c2021-07-20 10:41:49 +02001123 err = __veth_napi_enable_range(dev, start, end);
Paolo Abenid3256ef2021-04-09 13:04:38 +02001124 if (err) {
Paolo Abenidedd53c2021-07-20 10:41:49 +02001125 for (i = start; i < end; i++) {
Paolo Abenid3256ef2021-04-09 13:04:38 +02001126 struct veth_rq *rq = &priv->rq[i];
1127
1128 netif_napi_del(&rq->xdp_napi);
1129 }
1130 return err;
1131 }
1132 return err;
1133}
1134
Paolo Abenidedd53c2021-07-20 10:41:49 +02001135static int veth_napi_enable(struct net_device *dev)
1136{
1137 return veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
1138}
1139
Paolo Abeni4752eeb2021-07-20 10:41:50 +02001140static void veth_disable_range_safe(struct net_device *dev, int start, int end)
1141{
1142 struct veth_priv *priv = netdev_priv(dev);
1143
1144 if (start >= end)
1145 return;
1146
1147 if (priv->_xdp_prog) {
1148 veth_napi_del_range(dev, start, end);
1149 veth_disable_xdp_range(dev, start, end, false);
1150 } else if (veth_gro_requested(dev)) {
1151 veth_napi_del_range(dev, start, end);
1152 }
1153}
1154
1155static int veth_enable_range_safe(struct net_device *dev, int start, int end)
1156{
1157 struct veth_priv *priv = netdev_priv(dev);
1158 int err;
1159
1160 if (start >= end)
1161 return 0;
1162
1163 if (priv->_xdp_prog) {
1164 /* these channels are freshly initialized, napi is not on there even
1165 * when GRO is requeste
1166 */
1167 err = veth_enable_xdp_range(dev, start, end, false);
1168 if (err)
1169 return err;
1170
1171 err = __veth_napi_enable_range(dev, start, end);
1172 if (err) {
1173 /* on error always delete the newly added napis */
1174 veth_disable_xdp_range(dev, start, end, true);
1175 return err;
1176 }
1177 } else if (veth_gro_requested(dev)) {
1178 return veth_napi_enable_range(dev, start, end);
1179 }
1180 return 0;
1181}
1182
1183static int veth_set_channels(struct net_device *dev,
1184 struct ethtool_channels *ch)
1185{
1186 struct veth_priv *priv = netdev_priv(dev);
1187 unsigned int old_rx_count, new_rx_count;
1188 struct veth_priv *peer_priv;
1189 struct net_device *peer;
1190 int err;
1191
1192 /* sanity check. Upper bounds are already enforced by the caller */
1193 if (!ch->rx_count || !ch->tx_count)
1194 return -EINVAL;
1195
1196 /* avoid braking XDP, if that is enabled */
1197 peer = rtnl_dereference(priv->peer);
1198 peer_priv = peer ? netdev_priv(peer) : NULL;
1199 if (priv->_xdp_prog && peer && ch->rx_count < peer->real_num_tx_queues)
1200 return -EINVAL;
1201
1202 if (peer && peer_priv && peer_priv->_xdp_prog && ch->tx_count > peer->real_num_rx_queues)
1203 return -EINVAL;
1204
1205 old_rx_count = dev->real_num_rx_queues;
1206 new_rx_count = ch->rx_count;
1207 if (netif_running(dev)) {
1208 /* turn device off */
1209 netif_carrier_off(dev);
1210 if (peer)
1211 netif_carrier_off(peer);
1212
1213 /* try to allocate new resurces, as needed*/
1214 err = veth_enable_range_safe(dev, old_rx_count, new_rx_count);
1215 if (err)
1216 goto out;
1217 }
1218
1219 err = netif_set_real_num_rx_queues(dev, ch->rx_count);
1220 if (err)
1221 goto revert;
1222
1223 err = netif_set_real_num_tx_queues(dev, ch->tx_count);
1224 if (err) {
1225 int err2 = netif_set_real_num_rx_queues(dev, old_rx_count);
1226
1227 /* this error condition could happen only if rx and tx change
1228 * in opposite directions (e.g. tx nr raises, rx nr decreases)
1229 * and we can't do anything to fully restore the original
1230 * status
1231 */
1232 if (err2)
1233 pr_warn("Can't restore rx queues config %d -> %d %d",
1234 new_rx_count, old_rx_count, err2);
1235 else
1236 goto revert;
1237 }
1238
1239out:
1240 if (netif_running(dev)) {
1241 /* note that we need to swap the arguments WRT the enable part
1242 * to identify the range we have to disable
1243 */
1244 veth_disable_range_safe(dev, new_rx_count, old_rx_count);
1245 netif_carrier_on(dev);
1246 if (peer)
1247 netif_carrier_on(peer);
1248 }
1249 return err;
1250
1251revert:
1252 new_rx_count = old_rx_count;
1253 old_rx_count = ch->rx_count;
1254 goto out;
1255}
1256
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001257static int veth_open(struct net_device *dev)
1258{
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001259 struct veth_priv *priv = netdev_priv(dev);
1260 struct net_device *peer = rtnl_dereference(priv->peer);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001261 int err;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001262
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001263 if (!peer)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001264 return -ENOTCONN;
1265
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001266 if (priv->_xdp_prog) {
1267 err = veth_enable_xdp(dev);
1268 if (err)
1269 return err;
Paolo Abenid3256ef2021-04-09 13:04:38 +02001270 } else if (veth_gro_requested(dev)) {
1271 err = veth_napi_enable(dev);
1272 if (err)
1273 return err;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001274 }
1275
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001276 if (peer->flags & IFF_UP) {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001277 netif_carrier_on(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001278 netif_carrier_on(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001279 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001280
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001281 return 0;
1282}
1283
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001284static int veth_close(struct net_device *dev)
1285{
1286 struct veth_priv *priv = netdev_priv(dev);
Eric Dumazet2efd32e2013-01-10 08:32:45 +00001287 struct net_device *peer = rtnl_dereference(priv->peer);
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001288
1289 netif_carrier_off(dev);
Eric Dumazet2efd32e2013-01-10 08:32:45 +00001290 if (peer)
1291 netif_carrier_off(peer);
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001292
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001293 if (priv->_xdp_prog)
1294 veth_disable_xdp(dev);
Paolo Abenid3256ef2021-04-09 13:04:38 +02001295 else if (veth_gro_requested(dev))
1296 veth_napi_del(dev);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001297
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001298 return 0;
1299}
1300
Jarod Wilson91572082016-10-20 13:55:20 -04001301static int is_valid_veth_mtu(int mtu)
Eric Biederman38d40812009-03-03 23:36:04 -08001302{
Jarod Wilson91572082016-10-20 13:55:20 -04001303 return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
Eric Biederman38d40812009-03-03 23:36:04 -08001304}
1305
Toshiaki Makita7797b932018-08-15 17:07:29 +09001306static int veth_alloc_queues(struct net_device *dev)
1307{
1308 struct veth_priv *priv = netdev_priv(dev);
1309 int i;
1310
1311 priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
1312 if (!priv->rq)
1313 return -ENOMEM;
1314
Toshiaki Makita4195e542018-10-11 18:36:49 +09001315 for (i = 0; i < dev->num_rx_queues; i++) {
Toshiaki Makita7797b932018-08-15 17:07:29 +09001316 priv->rq[i].dev = dev;
Toshiaki Makita4195e542018-10-11 18:36:49 +09001317 u64_stats_init(&priv->rq[i].stats.syncp);
1318 }
Toshiaki Makita7797b932018-08-15 17:07:29 +09001319
1320 return 0;
1321}
1322
1323static void veth_free_queues(struct net_device *dev)
1324{
1325 struct veth_priv *priv = netdev_priv(dev);
1326
1327 kfree(priv->rq);
1328}
1329
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001330static int veth_dev_init(struct net_device *dev)
1331{
Toshiaki Makita7797b932018-08-15 17:07:29 +09001332 int err;
1333
Li RongQing14d73412018-09-17 18:46:55 +08001334 dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
1335 if (!dev->lstats)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001336 return -ENOMEM;
Toshiaki Makita7797b932018-08-15 17:07:29 +09001337
1338 err = veth_alloc_queues(dev);
1339 if (err) {
Li RongQing14d73412018-09-17 18:46:55 +08001340 free_percpu(dev->lstats);
Toshiaki Makita7797b932018-08-15 17:07:29 +09001341 return err;
1342 }
1343
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001344 return 0;
1345}
1346
David S. Miller11687a12009-06-25 02:45:42 -07001347static void veth_dev_free(struct net_device *dev)
1348{
Toshiaki Makita7797b932018-08-15 17:07:29 +09001349 veth_free_queues(dev);
Li RongQing14d73412018-09-17 18:46:55 +08001350 free_percpu(dev->lstats);
David S. Miller11687a12009-06-25 02:45:42 -07001351}
1352
WANG Congbb446c12014-06-23 15:36:02 -07001353#ifdef CONFIG_NET_POLL_CONTROLLER
1354static void veth_poll_controller(struct net_device *dev)
1355{
1356 /* veth only receives frames when its peer sends one
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001357 * Since it has nothing to do with disabling irqs, we are guaranteed
WANG Congbb446c12014-06-23 15:36:02 -07001358 * never to have pending data when we poll for it so
1359 * there is nothing to do here.
1360 *
1361 * We need this though so netpoll recognizes us as an interface that
1362 * supports polling, which enables bridge devices in virt setups to
1363 * still use netconsole
1364 */
1365}
1366#endif /* CONFIG_NET_POLL_CONTROLLER */
1367
Nicolas Dichtela45253b2015-04-02 17:07:11 +02001368static int veth_get_iflink(const struct net_device *dev)
1369{
1370 struct veth_priv *priv = netdev_priv(dev);
1371 struct net_device *peer;
1372 int iflink;
1373
1374 rcu_read_lock();
1375 peer = rcu_dereference(priv->peer);
1376 iflink = peer ? peer->ifindex : 0;
1377 rcu_read_unlock();
1378
1379 return iflink;
1380}
1381
Toshiaki Makitadc224822018-08-03 16:58:11 +09001382static netdev_features_t veth_fix_features(struct net_device *dev,
1383 netdev_features_t features)
1384{
1385 struct veth_priv *priv = netdev_priv(dev);
1386 struct net_device *peer;
1387
1388 peer = rtnl_dereference(priv->peer);
1389 if (peer) {
1390 struct veth_priv *peer_priv = netdev_priv(peer);
1391
1392 if (peer_priv->_xdp_prog)
1393 features &= ~NETIF_F_GSO_SOFTWARE;
1394 }
Paolo Abenid3256ef2021-04-09 13:04:38 +02001395 if (priv->_xdp_prog)
1396 features |= NETIF_F_GRO;
Toshiaki Makitadc224822018-08-03 16:58:11 +09001397
1398 return features;
1399}
1400
Paolo Abenid3256ef2021-04-09 13:04:38 +02001401static int veth_set_features(struct net_device *dev,
1402 netdev_features_t features)
1403{
1404 netdev_features_t changed = features ^ dev->features;
1405 struct veth_priv *priv = netdev_priv(dev);
1406 int err;
1407
1408 if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog)
1409 return 0;
1410
1411 if (features & NETIF_F_GRO) {
1412 err = veth_napi_enable(dev);
1413 if (err)
1414 return err;
1415 } else {
1416 veth_napi_del(dev);
1417 }
1418 return 0;
1419}
1420
Paolo Abeni163e5292016-02-26 10:45:41 +01001421static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
1422{
1423 struct veth_priv *peer_priv, *priv = netdev_priv(dev);
1424 struct net_device *peer;
1425
1426 if (new_hr < 0)
1427 new_hr = 0;
1428
1429 rcu_read_lock();
1430 peer = rcu_dereference(priv->peer);
1431 if (unlikely(!peer))
1432 goto out;
1433
1434 peer_priv = netdev_priv(peer);
1435 priv->requested_headroom = new_hr;
1436 new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
1437 dev->needed_headroom = new_hr;
1438 peer->needed_headroom = new_hr;
1439
1440out:
1441 rcu_read_unlock();
1442}
1443
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001444static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1445 struct netlink_ext_ack *extack)
1446{
1447 struct veth_priv *priv = netdev_priv(dev);
1448 struct bpf_prog *old_prog;
1449 struct net_device *peer;
Toshiaki Makitadc224822018-08-03 16:58:11 +09001450 unsigned int max_mtu;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001451 int err;
1452
1453 old_prog = priv->_xdp_prog;
1454 priv->_xdp_prog = prog;
1455 peer = rtnl_dereference(priv->peer);
1456
1457 if (prog) {
1458 if (!peer) {
1459 NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
1460 err = -ENOTCONN;
1461 goto err;
1462 }
1463
Toshiaki Makitadc224822018-08-03 16:58:11 +09001464 max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
1465 peer->hard_header_len -
1466 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1467 if (peer->mtu > max_mtu) {
1468 NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
1469 err = -ERANGE;
1470 goto err;
1471 }
1472
Toshiaki Makita638264d2018-08-03 16:58:18 +09001473 if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
1474 NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
1475 err = -ENOSPC;
1476 goto err;
1477 }
1478
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001479 if (dev->flags & IFF_UP) {
1480 err = veth_enable_xdp(dev);
1481 if (err) {
1482 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
1483 goto err;
1484 }
1485 }
Toshiaki Makitadc224822018-08-03 16:58:11 +09001486
1487 if (!old_prog) {
1488 peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
1489 peer->max_mtu = max_mtu;
1490 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001491 }
1492
1493 if (old_prog) {
Toshiaki Makitadc224822018-08-03 16:58:11 +09001494 if (!prog) {
1495 if (dev->flags & IFF_UP)
1496 veth_disable_xdp(dev);
1497
1498 if (peer) {
1499 peer->hw_features |= NETIF_F_GSO_SOFTWARE;
1500 peer->max_mtu = ETH_MAX_MTU;
1501 }
1502 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001503 bpf_prog_put(old_prog);
1504 }
1505
Toshiaki Makitadc224822018-08-03 16:58:11 +09001506 if ((!!old_prog ^ !!prog) && peer)
1507 netdev_update_features(peer);
1508
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001509 return 0;
1510err:
1511 priv->_xdp_prog = old_prog;
1512
1513 return err;
1514}
1515
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001516static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1517{
1518 switch (xdp->command) {
1519 case XDP_SETUP_PROG:
1520 return veth_xdp_set(dev, xdp->prog, xdp->extack);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001521 default:
1522 return -EINVAL;
1523 }
1524}
1525
Stephen Hemminger4456e7b2008-11-19 21:50:10 -08001526static const struct net_device_ops veth_netdev_ops = {
Daniel Lezcanoee923622009-02-22 00:04:45 -08001527 .ndo_init = veth_dev_init,
1528 .ndo_open = veth_open,
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001529 .ndo_stop = veth_close,
Daniel Lezcanoee923622009-02-22 00:04:45 -08001530 .ndo_start_xmit = veth_xmit,
stephen hemminger6311cc42011-06-08 14:53:59 +00001531 .ndo_get_stats64 = veth_get_stats64,
Gao feng5c70ef82013-10-04 16:52:24 +08001532 .ndo_set_rx_mode = veth_set_multicast_list,
Daniel Lezcanoee923622009-02-22 00:04:45 -08001533 .ndo_set_mac_address = eth_mac_addr,
WANG Congbb446c12014-06-23 15:36:02 -07001534#ifdef CONFIG_NET_POLL_CONTROLLER
1535 .ndo_poll_controller = veth_poll_controller,
1536#endif
Nicolas Dichtela45253b2015-04-02 17:07:11 +02001537 .ndo_get_iflink = veth_get_iflink,
Toshiaki Makitadc224822018-08-03 16:58:11 +09001538 .ndo_fix_features = veth_fix_features,
Paolo Abenid3256ef2021-04-09 13:04:38 +02001539 .ndo_set_features = veth_set_features,
Toshiaki Makita1a04a822015-07-31 15:03:25 +09001540 .ndo_features_check = passthru_features_check,
Paolo Abeni163e5292016-02-26 10:45:41 +01001541 .ndo_set_rx_headroom = veth_set_rx_headroom,
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001542 .ndo_bpf = veth_xdp,
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +01001543 .ndo_xdp_xmit = veth_ndo_xdp_xmit,
Daniel Borkmann9aa12062020-10-11 01:40:02 +02001544 .ndo_get_peer_dev = veth_peer_dev,
Stephen Hemminger4456e7b2008-11-19 21:50:10 -08001545};
1546
Alexander Duyck732912d72016-04-19 14:02:26 -04001547#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
Xin Longc80fafb2016-08-25 13:21:49 +08001548 NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
Alexander Duyck732912d72016-04-19 14:02:26 -04001549 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
Patrick McHardy28d2b132013-04-19 02:04:32 +00001550 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1551 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
Eric Dumazet80933152012-12-29 16:26:10 +00001552
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001553static void veth_setup(struct net_device *dev)
1554{
1555 ether_setup(dev);
1556
Neil Horman550fd082011-07-26 06:05:38 +00001557 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
Hannes Frederic Sowa23ea5a92012-10-30 16:22:01 +00001558 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
Phil Sutter02f01ec2015-08-18 10:30:29 +02001559 dev->priv_flags |= IFF_NO_QUEUE;
Paolo Abeni163e5292016-02-26 10:45:41 +01001560 dev->priv_flags |= IFF_PHONY_HEADROOM;
Neil Horman550fd082011-07-26 06:05:38 +00001561
Stephen Hemminger4456e7b2008-11-19 21:50:10 -08001562 dev->netdev_ops = &veth_netdev_ops;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001563 dev->ethtool_ops = &veth_ethtool_ops;
1564 dev->features |= NETIF_F_LLTX;
Eric Dumazet80933152012-12-29 16:26:10 +00001565 dev->features |= VETH_FEATURES;
Toshiaki Makita8d0d21f2014-02-18 21:20:08 +09001566 dev->vlan_features = dev->features &
Vlad Yasevich3f8c7072014-03-27 22:14:48 -04001567 ~(NETIF_F_HW_VLAN_CTAG_TX |
1568 NETIF_F_HW_VLAN_STAG_TX |
1569 NETIF_F_HW_VLAN_CTAG_RX |
1570 NETIF_F_HW_VLAN_STAG_RX);
David S. Millercf124db2017-05-08 12:52:56 -04001571 dev->needs_free_netdev = true;
1572 dev->priv_destructor = veth_dev_free;
Jarod Wilson91572082016-10-20 13:55:20 -04001573 dev->max_mtu = ETH_MAX_MTU;
Michał Mirosława2c725f2011-03-31 01:01:35 +00001574
Eric Dumazet80933152012-12-29 16:26:10 +00001575 dev->hw_features = VETH_FEATURES;
Eric Dumazet82d81892013-10-25 18:25:03 -07001576 dev->hw_enc_features = VETH_FEATURES;
David Ahern607fca92016-08-24 20:10:45 -07001577 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001578}
1579
1580/*
1581 * netlink interface
1582 */
1583
Matthias Schiffera8b8a8892017-06-25 23:56:01 +02001584static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1585 struct netlink_ext_ack *extack)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001586{
1587 if (tb[IFLA_ADDRESS]) {
1588 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1589 return -EINVAL;
1590 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1591 return -EADDRNOTAVAIL;
1592 }
Eric Biederman38d40812009-03-03 23:36:04 -08001593 if (tb[IFLA_MTU]) {
1594 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1595 return -EINVAL;
1596 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001597 return 0;
1598}
1599
1600static struct rtnl_link_ops veth_link_ops;
1601
Paolo Abenid3256ef2021-04-09 13:04:38 +02001602static void veth_disable_gro(struct net_device *dev)
1603{
1604 dev->features &= ~NETIF_F_GRO;
1605 dev->wanted_features &= ~NETIF_F_GRO;
1606 netdev_update_features(dev);
1607}
1608
Paolo Abeni9d3684c2021-07-20 10:41:51 +02001609static int veth_init_queues(struct net_device *dev, struct nlattr *tb[])
1610{
1611 int err;
1612
1613 if (!tb[IFLA_NUM_TX_QUEUES] && dev->num_tx_queues > 1) {
1614 err = netif_set_real_num_tx_queues(dev, 1);
1615 if (err)
1616 return err;
1617 }
1618 if (!tb[IFLA_NUM_RX_QUEUES] && dev->num_rx_queues > 1) {
1619 err = netif_set_real_num_rx_queues(dev, 1);
1620 if (err)
1621 return err;
1622 }
1623 return 0;
1624}
1625
Eric W. Biederman81adee42009-11-08 00:53:51 -08001626static int veth_newlink(struct net *src_net, struct net_device *dev,
Matthias Schiffer7a3f4a12017-06-25 23:55:59 +02001627 struct nlattr *tb[], struct nlattr *data[],
1628 struct netlink_ext_ack *extack)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001629{
Toshiaki Makita7797b932018-08-15 17:07:29 +09001630 int err;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001631 struct net_device *peer;
1632 struct veth_priv *priv;
1633 char ifname[IFNAMSIZ];
1634 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
Tom Gundersen55177502014-07-14 16:37:25 +02001635 unsigned char name_assign_type;
Patrick McHardy3729d502010-02-26 06:34:54 +00001636 struct ifinfomsg *ifmp;
Eric W. Biederman81adee42009-11-08 00:53:51 -08001637 struct net *net;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001638
1639 /*
1640 * create and register peer first
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001641 */
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001642 if (data != NULL && data[VETH_INFO_PEER] != NULL) {
1643 struct nlattr *nla_peer;
1644
1645 nla_peer = data[VETH_INFO_PEER];
Patrick McHardy3729d502010-02-26 06:34:54 +00001646 ifmp = nla_data(nla_peer);
Jiri Pirkof7b12602014-02-18 20:53:18 +01001647 err = rtnl_nla_parse_ifla(peer_tb,
1648 nla_data(nla_peer) + sizeof(struct ifinfomsg),
Johannes Bergfceb6432017-04-12 14:34:07 +02001649 nla_len(nla_peer) - sizeof(struct ifinfomsg),
1650 NULL);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001651 if (err < 0)
1652 return err;
1653
Matthias Schiffera8b8a8892017-06-25 23:56:01 +02001654 err = veth_validate(peer_tb, NULL, extack);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001655 if (err < 0)
1656 return err;
1657
1658 tbp = peer_tb;
Patrick McHardy3729d502010-02-26 06:34:54 +00001659 } else {
1660 ifmp = NULL;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001661 tbp = tb;
Patrick McHardy3729d502010-02-26 06:34:54 +00001662 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001663
Serhey Popovych191cdb32017-06-21 12:12:24 +03001664 if (ifmp && tbp[IFLA_IFNAME]) {
Francis Laniel872f6902020-11-15 18:08:06 +01001665 nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
Tom Gundersen55177502014-07-14 16:37:25 +02001666 name_assign_type = NET_NAME_USER;
1667 } else {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001668 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
Tom Gundersen55177502014-07-14 16:37:25 +02001669 name_assign_type = NET_NAME_ENUM;
1670 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001671
Eric W. Biederman81adee42009-11-08 00:53:51 -08001672 net = rtnl_link_get_net(src_net, tbp);
1673 if (IS_ERR(net))
1674 return PTR_ERR(net);
1675
Tom Gundersen55177502014-07-14 16:37:25 +02001676 peer = rtnl_create_link(net, ifname, name_assign_type,
David Ahernd0522f12018-11-06 12:51:14 -08001677 &veth_link_ops, tbp, extack);
Eric W. Biederman81adee42009-11-08 00:53:51 -08001678 if (IS_ERR(peer)) {
1679 put_net(net);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001680 return PTR_ERR(peer);
Eric W. Biederman81adee42009-11-08 00:53:51 -08001681 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001682
Serhey Popovych191cdb32017-06-21 12:12:24 +03001683 if (!ifmp || !tbp[IFLA_ADDRESS])
Danny Kukawkaf2cedb62012-02-15 06:45:39 +00001684 eth_hw_addr_random(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001685
Pavel Emelyanove6f8f1a2012-08-08 21:53:03 +00001686 if (ifmp && (dev->ifindex != 0))
1687 peer->ifindex = ifmp->ifi_index;
1688
Eric Dumazet4b66d212021-11-19 07:43:31 -08001689 netif_set_gso_max_size(peer, dev->gso_max_size);
Eric Dumazet6d872df2021-11-19 07:43:32 -08001690 netif_set_gso_max_segs(peer, dev->gso_max_segs);
Stephen Hemminger72d249552017-12-07 15:40:20 -08001691
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001692 err = register_netdevice(peer);
Eric W. Biederman81adee42009-11-08 00:53:51 -08001693 put_net(net);
1694 net = NULL;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001695 if (err < 0)
1696 goto err_register_peer;
1697
Paolo Abenid3256ef2021-04-09 13:04:38 +02001698 /* keep GRO disabled by default to be consistent with the established
1699 * veth behavior
1700 */
1701 veth_disable_gro(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001702 netif_carrier_off(peer);
1703
Patrick McHardy3729d502010-02-26 06:34:54 +00001704 err = rtnl_configure_link(peer, ifmp);
1705 if (err < 0)
1706 goto err_configure_peer;
1707
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001708 /*
1709 * register dev last
1710 *
1711 * note, that since we've registered new device the dev's name
1712 * should be re-allocated
1713 */
1714
1715 if (tb[IFLA_ADDRESS] == NULL)
Danny Kukawkaf2cedb62012-02-15 06:45:39 +00001716 eth_hw_addr_random(dev);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001717
Jiri Pirko6c8c4442011-04-30 01:28:17 +00001718 if (tb[IFLA_IFNAME])
Francis Laniel872f6902020-11-15 18:08:06 +01001719 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
Jiri Pirko6c8c4442011-04-30 01:28:17 +00001720 else
1721 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1722
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001723 err = register_netdevice(dev);
1724 if (err < 0)
1725 goto err_register_dev;
1726
1727 netif_carrier_off(dev);
1728
1729 /*
1730 * tie the deviced together
1731 */
1732
1733 priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001734 rcu_assign_pointer(priv->peer, peer);
Paolo Abeni9d3684c2021-07-20 10:41:51 +02001735 err = veth_init_queues(dev, tb);
1736 if (err)
1737 goto err_queues;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001738
1739 priv = netdev_priv(peer);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001740 rcu_assign_pointer(priv->peer, dev);
Paolo Abeni9d3684c2021-07-20 10:41:51 +02001741 err = veth_init_queues(peer, tb);
1742 if (err)
1743 goto err_queues;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001744
Paolo Abenid3256ef2021-04-09 13:04:38 +02001745 veth_disable_gro(dev);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001746 return 0;
1747
Paolo Abeni9d3684c2021-07-20 10:41:51 +02001748err_queues:
1749 unregister_netdevice(dev);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001750err_register_dev:
1751 /* nothing to do */
Patrick McHardy3729d502010-02-26 06:34:54 +00001752err_configure_peer:
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001753 unregister_netdevice(peer);
1754 return err;
1755
1756err_register_peer:
1757 free_netdev(peer);
1758 return err;
1759}
1760
Eric Dumazet23289a32009-10-27 07:06:36 +00001761static void veth_dellink(struct net_device *dev, struct list_head *head)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001762{
1763 struct veth_priv *priv;
1764 struct net_device *peer;
1765
1766 priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001767 peer = rtnl_dereference(priv->peer);
1768
1769 /* Note : dellink() is called from default_device_exit_batch(),
1770 * before a rcu_synchronize() point. The devices are guaranteed
1771 * not being freed before one RCU grace period.
1772 */
1773 RCU_INIT_POINTER(priv->peer, NULL);
Eric Dumazet24540532009-10-30 01:00:27 -07001774 unregister_netdevice_queue(dev, head);
Eric Dumazetf45a5c22013-02-08 20:10:49 +00001775
1776 if (peer) {
1777 priv = netdev_priv(peer);
1778 RCU_INIT_POINTER(priv->peer, NULL);
1779 unregister_netdevice_queue(peer, head);
1780 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001781}
1782
Thomas Graf23711432012-02-15 04:09:46 +00001783static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1784 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
1785};
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001786
Nicolas Dichtele5f4e7b2015-01-20 15:15:46 +01001787static struct net *veth_get_link_net(const struct net_device *dev)
1788{
1789 struct veth_priv *priv = netdev_priv(dev);
1790 struct net_device *peer = rtnl_dereference(priv->peer);
1791
1792 return peer ? dev_net(peer) : dev_net(dev);
1793}
1794
Paolo Abeni9d3684c2021-07-20 10:41:51 +02001795static unsigned int veth_get_num_queues(void)
1796{
1797 /* enforce the same queue limit as rtnl_create_link */
1798 int queues = num_possible_cpus();
1799
1800 if (queues > 4096)
1801 queues = 4096;
1802 return queues;
1803}
1804
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001805static struct rtnl_link_ops veth_link_ops = {
1806 .kind = DRV_NAME,
1807 .priv_size = sizeof(struct veth_priv),
1808 .setup = veth_setup,
1809 .validate = veth_validate,
1810 .newlink = veth_newlink,
1811 .dellink = veth_dellink,
1812 .policy = veth_policy,
1813 .maxtype = VETH_INFO_MAX,
Nicolas Dichtele5f4e7b2015-01-20 15:15:46 +01001814 .get_link_net = veth_get_link_net,
Paolo Abeni9d3684c2021-07-20 10:41:51 +02001815 .get_num_tx_queues = veth_get_num_queues,
1816 .get_num_rx_queues = veth_get_num_queues,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001817};
1818
1819/*
1820 * init/fini
1821 */
1822
1823static __init int veth_init(void)
1824{
1825 return rtnl_link_register(&veth_link_ops);
1826}
1827
1828static __exit void veth_exit(void)
1829{
Patrick McHardy68365452008-01-20 17:25:14 -08001830 rtnl_link_unregister(&veth_link_ops);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001831}
1832
1833module_init(veth_init);
1834module_exit(veth_exit);
1835
1836MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1837MODULE_LICENSE("GPL v2");
1838MODULE_ALIAS_RTNL_LINK(DRV_NAME);