blob: d6b500dc420847f107cbd967c15bc3b0107d0b53 [file] [log] [blame]
Björn Töpelc0c77d82018-05-02 13:01:23 +02001// SPDX-License-Identifier: GPL-2.0
2/* XDP sockets
3 *
4 * AF_XDP sockets allows a channel between XDP programs and userspace
5 * applications.
6 * Copyright(c) 2018 Intel Corporation.
7 *
Björn Töpelc0c77d82018-05-02 13:01:23 +02008 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12#define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14#include <linux/if_xdp.h>
15#include <linux/init.h>
16#include <linux/sched/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/sched/task.h>
19#include <linux/socket.h>
20#include <linux/file.h>
21#include <linux/uaccess.h>
22#include <linux/net.h>
23#include <linux/netdevice.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020024#include <linux/rculist.h>
Magnus Karlssona71506a2020-05-20 21:20:51 +020025#include <net/xdp_sock_drv.h>
Björn Töpela0731952020-11-30 19:52:00 +010026#include <net/busy_poll.h>
Björn Töpelb9b6b682018-05-02 13:01:25 +020027#include <net/xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020028
Magnus Karlsson423f3832018-05-02 13:01:24 +020029#include "xsk_queue.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020030#include "xdp_umem.h"
Björn Töpela36b38aa2019-01-24 19:59:39 +010031#include "xsk.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020032
Li RongQinge7a1c132021-04-14 13:39:12 +080033#define TX_BATCH_SIZE 32
Magnus Karlsson35fcde72018-05-02 13:01:34 +020034
Björn Töpele312b9e2019-12-19 07:10:02 +010035static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
36
Magnus Karlssonc4655762020-08-28 10:26:16 +020037void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020038{
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020039 if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020040 return;
41
Magnus Karlsson7361f9c2020-08-28 10:26:18 +020042 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020043 pool->cached_need_wakeup |= XDP_WAKEUP_RX;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020044}
45EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
46
Magnus Karlssonc4655762020-08-28 10:26:16 +020047void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020048{
49 struct xdp_sock *xs;
50
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020051 if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020052 return;
53
54 rcu_read_lock();
Magnus Karlssona5aa8e52020-08-28 10:26:20 +020055 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020056 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
57 }
58 rcu_read_unlock();
59
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020060 pool->cached_need_wakeup |= XDP_WAKEUP_TX;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020061}
62EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
63
Magnus Karlssonc4655762020-08-28 10:26:16 +020064void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020065{
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020066 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020067 return;
68
Magnus Karlsson7361f9c2020-08-28 10:26:18 +020069 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020070 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020071}
72EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
73
Magnus Karlssonc4655762020-08-28 10:26:16 +020074void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020075{
76 struct xdp_sock *xs;
77
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020078 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020079 return;
80
81 rcu_read_lock();
Magnus Karlssona5aa8e52020-08-28 10:26:20 +020082 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020083 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
84 }
85 rcu_read_unlock();
86
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020087 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020088}
89EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
90
Magnus Karlssonc4655762020-08-28 10:26:16 +020091bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020092{
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020093 return pool->uses_need_wakeup;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020094}
Magnus Karlssonc4655762020-08-28 10:26:16 +020095EXPORT_SYMBOL(xsk_uses_need_wakeup);
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020096
Magnus Karlsson1c1efc22020-08-28 10:26:17 +020097struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
98 u16 queue_id)
99{
100 if (queue_id < dev->real_num_rx_queues)
101 return dev->_rx[queue_id].pool;
102 if (queue_id < dev->real_num_tx_queues)
103 return dev->_tx[queue_id].pool;
104
105 return NULL;
106}
107EXPORT_SYMBOL(xsk_get_pool_from_qid);
108
109void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
110{
Maxim Mikityanskiyb425e242021-01-18 18:03:33 +0200111 if (queue_id < dev->num_rx_queues)
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200112 dev->_rx[queue_id].pool = NULL;
Maxim Mikityanskiyb425e242021-01-18 18:03:33 +0200113 if (queue_id < dev->num_tx_queues)
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200114 dev->_tx[queue_id].pool = NULL;
115}
116
117/* The buffer pool is stored both in the _rx struct and the _tx struct as we do
118 * not know if the device has more tx queues than rx, or the opposite.
119 * This might also change during run time.
120 */
121int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
122 u16 queue_id)
123{
124 if (queue_id >= max_t(unsigned int,
125 dev->real_num_rx_queues,
126 dev->real_num_tx_queues))
127 return -EINVAL;
128
129 if (queue_id < dev->real_num_rx_queues)
130 dev->_rx[queue_id].pool = pool;
131 if (queue_id < dev->real_num_tx_queues)
132 dev->_tx[queue_id].pool = pool;
133
134 return 0;
135}
136
Björn Töpel26062b12020-05-20 21:21:02 +0200137void xp_release(struct xdp_buff_xsk *xskb)
138{
139 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
140}
141
142static u64 xp_get_handle(struct xdp_buff_xsk *xskb)
143{
144 u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
145
146 offset += xskb->pool->headroom;
147 if (!xskb->pool->unaligned)
148 return xskb->orig_addr + offset;
149 return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
150}
151
Björn Töpel2b43470a2020-05-20 21:20:53 +0200152static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
Kevin Laatzc05cd362019-08-27 02:25:22 +0000153{
Björn Töpel2b43470a2020-05-20 21:20:53 +0200154 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
155 u64 addr;
156 int err;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000157
Björn Töpel2b43470a2020-05-20 21:20:53 +0200158 addr = xp_get_handle(xskb);
159 err = xskq_prod_reserve_desc(xs->rx, addr, len);
160 if (err) {
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000161 xs->rx_queue_full++;
Björn Töpel2b43470a2020-05-20 21:20:53 +0200162 return err;
163 }
Kevin Laatzc05cd362019-08-27 02:25:22 +0000164
Björn Töpel2b43470a2020-05-20 21:20:53 +0200165 xp_release(xskb);
166 return 0;
167}
Kevin Laatzc05cd362019-08-27 02:25:22 +0000168
Björn Töpel2b43470a2020-05-20 21:20:53 +0200169static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
170{
171 void *from_buf, *to_buf;
172 u32 metalen;
173
174 if (unlikely(xdp_data_meta_unsupported(from))) {
175 from_buf = from->data;
176 to_buf = to->data;
177 metalen = 0;
178 } else {
179 from_buf = from->data_meta;
180 metalen = from->data - from->data_meta;
181 to_buf = to->data - metalen;
Kevin Laatzc05cd362019-08-27 02:25:22 +0000182 }
183
184 memcpy(to_buf, from_buf, len + metalen);
185}
186
Björn Töpel458f7272021-01-22 11:53:49 +0100187static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
Björn Töpel173d3ad2018-06-04 14:05:55 +0200188{
Björn Töpel2b43470a2020-05-20 21:20:53 +0200189 struct xdp_buff *xsk_xdp;
Björn Töpel4e64c832018-06-04 13:57:11 +0200190 int err;
Björn Töpel458f7272021-01-22 11:53:49 +0100191 u32 len;
Björn Töpelc4971762018-05-02 13:01:27 +0200192
Björn Töpel458f7272021-01-22 11:53:49 +0100193 len = xdp->data_end - xdp->data;
Magnus Karlssonc4655762020-08-28 10:26:16 +0200194 if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
Björn Töpela509a9552018-06-04 13:57:12 +0200195 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +0200196 return -ENOSPC;
Björn Töpela509a9552018-06-04 13:57:12 +0200197 }
Björn Töpelc4971762018-05-02 13:01:27 +0200198
Magnus Karlssonc4655762020-08-28 10:26:16 +0200199 xsk_xdp = xsk_buff_alloc(xs->pool);
Björn Töpel2b43470a2020-05-20 21:20:53 +0200200 if (!xsk_xdp) {
Björn Töpela509a9552018-06-04 13:57:12 +0200201 xs->rx_dropped++;
Björn Töpel2b43470a2020-05-20 21:20:53 +0200202 return -ENOSPC;
203 }
Björn Töpelc4971762018-05-02 13:01:27 +0200204
Björn Töpel2b43470a2020-05-20 21:20:53 +0200205 xsk_copy_xdp(xsk_xdp, xdp, len);
206 err = __xsk_rcv_zc(xs, xsk_xdp, len);
207 if (err) {
208 xsk_buff_free(xsk_xdp);
209 return err;
210 }
Björn Töpel2b43470a2020-05-20 21:20:53 +0200211 return 0;
Björn Töpelc4971762018-05-02 13:01:27 +0200212}
213
Xuan Zhuo3413f042020-12-01 21:56:58 +0800214static bool xsk_tx_writeable(struct xdp_sock *xs)
215{
216 if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
217 return false;
218
219 return true;
220}
221
Björn Töpel42fddcc2019-09-04 13:49:12 +0200222static bool xsk_is_bound(struct xdp_sock *xs)
223{
224 if (READ_ONCE(xs->state) == XSK_BOUND) {
225 /* Matches smp_wmb() in bind(). */
226 smp_rmb();
227 return true;
228 }
229 return false;
230}
231
Björn Töpel458f7272021-01-22 11:53:49 +0100232static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp)
Björn Töpelc4971762018-05-02 13:01:27 +0200233{
Björn Töpel42fddcc2019-09-04 13:49:12 +0200234 if (!xsk_is_bound(xs))
235 return -EINVAL;
236
Björn Töpel173d3ad2018-06-04 14:05:55 +0200237 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
238 return -EINVAL;
Björn Töpelc4971762018-05-02 13:01:27 +0200239
Björn Töpelb02e5a02020-11-30 19:52:01 +0100240 sk_mark_napi_id_once_xdp(&xs->sk, xdp);
Björn Töpel458f7272021-01-22 11:53:49 +0100241 return 0;
Björn Töpelc4971762018-05-02 13:01:27 +0200242}
243
Björn Töpeld8179912019-11-01 12:03:46 +0100244static void xsk_flush(struct xdp_sock *xs)
Björn Töpelc4971762018-05-02 13:01:27 +0200245{
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100246 xskq_prod_submit(xs->rx);
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200247 __xskq_cons_release(xs->pool->fq);
Björn Töpel43a825a2020-01-20 10:29:17 +0100248 sock_def_readable(&xs->sk);
Björn Töpelc4971762018-05-02 13:01:27 +0200249}
250
251int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
252{
253 int err;
254
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300255 spin_lock_bh(&xs->rx_lock);
Björn Töpel458f7272021-01-22 11:53:49 +0100256 err = xsk_rcv_check(xs, xdp);
257 if (!err) {
258 err = __xsk_rcv(xs, xdp);
259 xsk_flush(xs);
260 }
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +0300261 spin_unlock_bh(&xs->rx_lock);
Björn Töpelc4971762018-05-02 13:01:27 +0200262 return err;
263}
264
Björn Töpel458f7272021-01-22 11:53:49 +0100265static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
266{
267 int err;
268 u32 len;
269
270 err = xsk_rcv_check(xs, xdp);
271 if (err)
272 return err;
273
274 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
275 len = xdp->data_end - xdp->data;
276 return __xsk_rcv_zc(xs, xdp, len);
277 }
278
279 err = __xsk_rcv(xs, xdp);
280 if (!err)
281 xdp_return_buff(xdp);
282 return err;
283}
284
Björn Töpele312b9e2019-12-19 07:10:02 +0100285int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
Björn Töpeld8179912019-11-01 12:03:46 +0100286{
Björn Töpele312b9e2019-12-19 07:10:02 +0100287 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
Björn Töpeld8179912019-11-01 12:03:46 +0100288 int err;
289
Björn Töpel458f7272021-01-22 11:53:49 +0100290 err = xsk_rcv(xs, xdp);
Björn Töpeld8179912019-11-01 12:03:46 +0100291 if (err)
292 return err;
293
294 if (!xs->flush_node.prev)
295 list_add(&xs->flush_node, flush_list);
296
297 return 0;
298}
299
Björn Töpele312b9e2019-12-19 07:10:02 +0100300void __xsk_map_flush(void)
Björn Töpeld8179912019-11-01 12:03:46 +0100301{
Björn Töpele312b9e2019-12-19 07:10:02 +0100302 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
Björn Töpeld8179912019-11-01 12:03:46 +0100303 struct xdp_sock *xs, *tmp;
304
305 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
306 xsk_flush(xs);
307 __list_del_clearprev(&xs->flush_node);
308 }
309}
310
Magnus Karlssonc4655762020-08-28 10:26:16 +0200311void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200312{
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200313 xskq_prod_submit_n(pool->cq, nb_entries);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200314}
Magnus Karlssonc4655762020-08-28 10:26:16 +0200315EXPORT_SYMBOL(xsk_tx_completed);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200316
Magnus Karlssonc4655762020-08-28 10:26:16 +0200317void xsk_tx_release(struct xsk_buff_pool *pool)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200318{
319 struct xdp_sock *xs;
320
321 rcu_read_lock();
Magnus Karlssona5aa8e52020-08-28 10:26:20 +0200322 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
Magnus Karlsson30744a62020-02-10 16:27:12 +0100323 __xskq_cons_release(xs->tx);
Xuan Zhuo3413f042020-12-01 21:56:58 +0800324 if (xsk_tx_writeable(xs))
325 xs->sk.sk_write_space(&xs->sk);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200326 }
327 rcu_read_unlock();
328}
Magnus Karlssonc4655762020-08-28 10:26:16 +0200329EXPORT_SYMBOL(xsk_tx_release);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200330
Magnus Karlssonc4655762020-08-28 10:26:16 +0200331bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200332{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200333 struct xdp_sock *xs;
334
335 rcu_read_lock();
Magnus Karlssona5aa8e52020-08-28 10:26:20 +0200336 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200337 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000338 xs->tx->queue_empty_descs++;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200339 continue;
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000340 }
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200341
Tobias Klauser0a058612020-04-22 01:29:27 +0200342 /* This is the backpressure mechanism for the Tx path.
Magnus Karlsson15d8c912019-12-19 13:39:30 +0100343 * Reserve space in the completion queue and only proceed
344 * if there is space in it. This avoids having to implement
345 * any buffering in the Tx path.
346 */
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200347 if (xskq_prod_reserve_addr(pool->cq, desc->addr))
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200348 goto out;
349
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100350 xskq_cons_release(xs->tx);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200351 rcu_read_unlock();
352 return true;
353 }
354
355out:
356 rcu_read_unlock();
357 return false;
358}
Magnus Karlssonc4655762020-08-28 10:26:16 +0200359EXPORT_SYMBOL(xsk_tx_peek_desc);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200360
Magnus Karlsson9349eb32020-11-16 12:12:46 +0100361static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, struct xdp_desc *descs,
362 u32 max_entries)
363{
364 u32 nb_pkts = 0;
365
366 while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
367 nb_pkts++;
368
369 xsk_tx_release(pool);
370 return nb_pkts;
371}
372
373u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *descs,
374 u32 max_entries)
375{
376 struct xdp_sock *xs;
377 u32 nb_pkts;
378
379 rcu_read_lock();
380 if (!list_is_singular(&pool->xsk_tx_list)) {
381 /* Fallback to the non-batched version */
382 rcu_read_unlock();
383 return xsk_tx_peek_release_fallback(pool, descs, max_entries);
384 }
385
386 xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
387 if (!xs) {
388 nb_pkts = 0;
389 goto out;
390 }
391
392 nb_pkts = xskq_cons_peek_desc_batch(xs->tx, descs, pool, max_entries);
393 if (!nb_pkts) {
394 xs->tx->queue_empty_descs++;
395 goto out;
396 }
397
398 /* This is the backpressure mechanism for the Tx path. Try to
399 * reserve space in the completion queue for all packets, but
400 * if there are fewer slots available, just process that many
401 * packets. This avoids having to implement any buffering in
402 * the Tx path.
403 */
404 nb_pkts = xskq_prod_reserve_addr_batch(pool->cq, descs, nb_pkts);
405 if (!nb_pkts)
406 goto out;
407
408 xskq_cons_release_n(xs->tx, nb_pkts);
409 __xskq_cons_release(xs->tx);
410 xs->sk.sk_write_space(&xs->sk);
411
412out:
413 rcu_read_unlock();
414 return nb_pkts;
415}
416EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
417
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000418static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200419{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200420 struct net_device *dev = xs->dev;
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000421 int err;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200422
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000423 rcu_read_lock();
424 err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
425 rcu_read_unlock();
426
427 return err;
428}
429
430static int xsk_zc_xmit(struct xdp_sock *xs)
431{
432 return xsk_wakeup(xs, XDP_WAKEUP_TX);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200433}
434
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200435static void xsk_destruct_skb(struct sk_buff *skb)
436{
Björn Töpelbbff2f32018-06-04 13:57:13 +0200437 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200438 struct xdp_sock *xs = xdp_sk(skb->sk);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200439 unsigned long flags;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200440
Magnus Karlssonf09ced42020-12-18 14:45:24 +0100441 spin_lock_irqsave(&xs->pool->cq_lock, flags);
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200442 xskq_prod_submit_addr(xs->pool->cq, addr);
Magnus Karlssonf09ced42020-12-18 14:45:24 +0100443 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200444
445 sock_wfree(skb);
446}
447
Xuan Zhuo9c8f21e2021-02-18 20:50:45 +0000448static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
449 struct xdp_desc *desc)
450{
451 struct xsk_buff_pool *pool = xs->pool;
452 u32 hr, len, ts, offset, copy, copied;
453 struct sk_buff *skb;
454 struct page *page;
455 void *buffer;
456 int err, i;
457 u64 addr;
458
459 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
460
461 skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
462 if (unlikely(!skb))
463 return ERR_PTR(err);
464
465 skb_reserve(skb, hr);
466
467 addr = desc->addr;
468 len = desc->len;
469 ts = pool->unaligned ? len : pool->chunk_size;
470
471 buffer = xsk_buff_raw_get_data(pool, addr);
472 offset = offset_in_page(buffer);
473 addr = buffer - pool->addrs;
474
475 for (copied = 0, i = 0; copied < len; i++) {
476 page = pool->umem->pgs[addr >> PAGE_SHIFT];
477 get_page(page);
478
479 copy = min_t(u32, PAGE_SIZE - offset, len - copied);
480 skb_fill_page_desc(skb, i, page, offset, copy);
481
482 copied += copy;
483 addr += copy;
484 offset = 0;
485 }
486
487 skb->len += len;
488 skb->data_len += len;
489 skb->truesize += ts;
490
491 refcount_add(ts, &xs->sk.sk_wmem_alloc);
492
493 return skb;
494}
495
496static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
497 struct xdp_desc *desc)
498{
499 struct net_device *dev = xs->dev;
500 struct sk_buff *skb;
501
502 if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
503 skb = xsk_build_skb_zerocopy(xs, desc);
504 if (IS_ERR(skb))
505 return skb;
506 } else {
507 u32 hr, tr, len;
508 void *buffer;
509 int err;
510
511 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
512 tr = dev->needed_tailroom;
513 len = desc->len;
514
515 skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
516 if (unlikely(!skb))
517 return ERR_PTR(err);
518
519 skb_reserve(skb, hr);
520 skb_put(skb, len);
521
522 buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
523 err = skb_store_bits(skb, 0, buffer, len);
524 if (unlikely(err)) {
525 kfree_skb(skb);
526 return ERR_PTR(err);
527 }
528 }
529
530 skb->dev = dev;
531 skb->priority = xs->sk.sk_priority;
532 skb->mark = xs->sk.sk_mark;
533 skb_shinfo(skb)->destructor_arg = (void *)(long)desc->addr;
534 skb->destructor = xsk_destruct_skb;
535
536 return skb;
537}
538
Magnus Karlssondf551052019-10-02 08:31:59 +0200539static int xsk_generic_xmit(struct sock *sk)
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200540{
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200541 struct xdp_sock *xs = xdp_sk(sk);
Magnus Karlssondf551052019-10-02 08:31:59 +0200542 u32 max_batch = TX_BATCH_SIZE;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200543 bool sent_frame = false;
544 struct xdp_desc desc;
545 struct sk_buff *skb;
Magnus Karlssonf09ced42020-12-18 14:45:24 +0100546 unsigned long flags;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200547 int err = 0;
548
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200549 mutex_lock(&xs->mutex);
550
Ilya Maximets67571642019-07-04 17:25:03 +0300551 if (xs->queue_id >= xs->dev->real_num_tx_queues)
552 goto out;
553
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200554 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200555 if (max_batch-- == 0) {
556 err = -EAGAIN;
557 goto out;
558 }
559
Xuan Zhuo9c8f21e2021-02-18 20:50:45 +0000560 skb = xsk_build_skb(xs, &desc);
561 if (IS_ERR(skb)) {
562 err = PTR_ERR(skb);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200563 goto out;
Xuan Zhuo9c8f21e2021-02-18 20:50:45 +0000564 }
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200565
Tobias Klauser0a058612020-04-22 01:29:27 +0200566 /* This is the backpressure mechanism for the Tx path.
Magnus Karlsson15d8c912019-12-19 13:39:30 +0100567 * Reserve space in the completion queue and only proceed
568 * if there is space in it. This avoids having to implement
569 * any buffering in the Tx path.
570 */
Magnus Karlssonf09ced42020-12-18 14:45:24 +0100571 spin_lock_irqsave(&xs->pool->cq_lock, flags);
Xuan Zhuo9c8f21e2021-02-18 20:50:45 +0000572 if (xskq_prod_reserve(xs->pool->cq)) {
Magnus Karlssonf09ced42020-12-18 14:45:24 +0100573 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200574 kfree_skb(skb);
575 goto out;
576 }
Magnus Karlssonf09ced42020-12-18 14:45:24 +0100577 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200578
Björn Töpel36ccdf82020-11-23 18:56:00 +0100579 err = __dev_direct_xmit(skb, xs->queue_id);
Magnus Karlsson642e4502020-09-16 14:00:25 +0200580 if (err == NETDEV_TX_BUSY) {
581 /* Tell user-space to retry the send */
582 skb->destructor = sock_wfree;
Magnus Karlssonb1b95cb2020-12-18 14:45:25 +0100583 spin_lock_irqsave(&xs->pool->cq_lock, flags);
584 xskq_prod_cancel(xs->pool->cq);
585 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
Magnus Karlsson642e4502020-09-16 14:00:25 +0200586 /* Free skb without triggering the perf drop trace */
587 consume_skb(skb);
588 err = -EAGAIN;
589 goto out;
590 }
591
Magnus Karlssonc5ed924b2019-12-19 13:39:26 +0100592 xskq_cons_release(xs->tx);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200593 /* Ignore NET_XMIT_CN as packet might have been sent */
Magnus Karlsson642e4502020-09-16 14:00:25 +0200594 if (err == NET_XMIT_DROP) {
Magnus Karlssonfe588682018-06-29 09:48:18 +0200595 /* SKB completed but not sent */
596 err = -EBUSY;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200597 goto out;
598 }
599
600 sent_frame = true;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200601 }
602
Ciara Loftus8aa5a332020-07-08 07:28:33 +0000603 xs->tx->queue_empty_descs++;
604
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200605out:
606 if (sent_frame)
Xuan Zhuo3413f042020-12-01 21:56:58 +0800607 if (xsk_tx_writeable(xs))
608 sk->sk_write_space(sk);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200609
610 mutex_unlock(&xs->mutex);
611 return err;
612}
613
Magnus Karlssondf551052019-10-02 08:31:59 +0200614static int __xsk_sendmsg(struct sock *sk)
615{
616 struct xdp_sock *xs = xdp_sk(sk);
617
618 if (unlikely(!(xs->dev->flags & IFF_UP)))
619 return -ENETDOWN;
620 if (unlikely(!xs->tx))
621 return -ENOBUFS;
622
623 return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
624}
625
Björn Töpela0731952020-11-30 19:52:00 +0100626static bool xsk_no_wakeup(struct sock *sk)
627{
628#ifdef CONFIG_NET_RX_BUSY_POLL
629 /* Prefer busy-polling, skip the wakeup. */
630 return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
631 READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID;
632#else
633 return false;
634#endif
635}
636
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200637static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
638{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200639 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200640 struct sock *sk = sock->sk;
641 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpele3920812020-11-30 19:51:59 +0100642 struct xsk_buff_pool *pool;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200643
Björn Töpel42fddcc2019-09-04 13:49:12 +0200644 if (unlikely(!xsk_is_bound(xs)))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200645 return -ENXIO;
Magnus Karlssondf551052019-10-02 08:31:59 +0200646 if (unlikely(need_wait))
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200647 return -EOPNOTSUPP;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200648
Björn Töpela0731952020-11-30 19:52:00 +0100649 if (sk_can_busy_loop(sk))
650 sk_busy_loop(sk, 1); /* only support non-blocking sockets */
651
652 if (xsk_no_wakeup(sk))
653 return 0;
654
Björn Töpele3920812020-11-30 19:51:59 +0100655 pool = xs->pool;
656 if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
657 return __xsk_sendmsg(sk);
658 return 0;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200659}
660
Björn Töpel45a86682020-11-30 19:51:58 +0100661static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
662{
663 bool need_wait = !(flags & MSG_DONTWAIT);
664 struct sock *sk = sock->sk;
665 struct xdp_sock *xs = xdp_sk(sk);
666
Björn Töpel3546b9b2020-12-07 09:20:08 +0100667 if (unlikely(!xsk_is_bound(xs)))
668 return -ENXIO;
Björn Töpel45a86682020-11-30 19:51:58 +0100669 if (unlikely(!(xs->dev->flags & IFF_UP)))
670 return -ENETDOWN;
671 if (unlikely(!xs->rx))
672 return -ENOBUFS;
Björn Töpel45a86682020-11-30 19:51:58 +0100673 if (unlikely(need_wait))
674 return -EOPNOTSUPP;
675
Björn Töpela0731952020-11-30 19:52:00 +0100676 if (sk_can_busy_loop(sk))
677 sk_busy_loop(sk, 1); /* only support non-blocking sockets */
678
679 if (xsk_no_wakeup(sk))
680 return 0;
681
Björn Töpel45a86682020-11-30 19:51:58 +0100682 if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
683 return xsk_wakeup(xs, XDP_WAKEUP_RX);
684 return 0;
Björn Töpelc4971762018-05-02 13:01:27 +0200685}
686
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100687static __poll_t xsk_poll(struct file *file, struct socket *sock,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700688 struct poll_table_struct *wait)
Björn Töpelc4971762018-05-02 13:01:27 +0200689{
Xuan Zhuof5da5412020-12-01 21:56:57 +0800690 __poll_t mask = 0;
Magnus Karlssondf551052019-10-02 08:31:59 +0200691 struct sock *sk = sock->sk;
692 struct xdp_sock *xs = xdp_sk(sk);
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +0200693 struct xsk_buff_pool *pool;
Björn Töpel42fddcc2019-09-04 13:49:12 +0200694
Xuan Zhuof5da5412020-12-01 21:56:57 +0800695 sock_poll_wait(file, sock, wait);
696
Björn Töpel42fddcc2019-09-04 13:49:12 +0200697 if (unlikely(!xsk_is_bound(xs)))
698 return mask;
699
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +0200700 pool = xs->pool;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200701
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +0200702 if (pool->cached_need_wakeup) {
Maxim Mikityanskiy06870682019-12-17 16:20:42 +0000703 if (xs->zc)
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +0200704 xsk_wakeup(xs, pool->cached_need_wakeup);
Magnus Karlssondf551052019-10-02 08:31:59 +0200705 else
706 /* Poll needs to drive Tx also in copy mode */
707 __xsk_sendmsg(sk);
708 }
Björn Töpelc4971762018-05-02 13:01:27 +0200709
Magnus Karlsson59e35e52019-12-19 13:39:23 +0100710 if (xs->rx && !xskq_prod_is_empty(xs->rx))
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100711 mask |= EPOLLIN | EPOLLRDNORM;
Xuan Zhuo3413f042020-12-01 21:56:58 +0800712 if (xs->tx && xsk_tx_writeable(xs))
Luc Van Oostenryck5d946c52019-11-20 01:10:42 +0100713 mask |= EPOLLOUT | EPOLLWRNORM;
Björn Töpelc4971762018-05-02 13:01:27 +0200714
715 return mask;
716}
717
Björn Töpelb9b6b682018-05-02 13:01:25 +0200718static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
719 bool umem_queue)
Magnus Karlsson423f3832018-05-02 13:01:24 +0200720{
721 struct xsk_queue *q;
722
723 if (entries == 0 || *queue || !is_power_of_2(entries))
724 return -EINVAL;
725
Björn Töpelb9b6b682018-05-02 13:01:25 +0200726 q = xskq_create(entries, umem_queue);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200727 if (!q)
728 return -ENOMEM;
729
Björn Töpel37b07692018-05-22 09:35:01 +0200730 /* Make sure queue is ready before it can be seen by others */
731 smp_wmb();
Björn Töpel94a99762019-09-04 13:49:10 +0200732 WRITE_ONCE(*queue, q);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200733 return 0;
734}
735
Ilya Maximets455302d2019-06-28 11:04:07 +0300736static void xsk_unbind_dev(struct xdp_sock *xs)
737{
738 struct net_device *dev = xs->dev;
739
Björn Töpel42fddcc2019-09-04 13:49:12 +0200740 if (xs->state != XSK_BOUND)
Ilya Maximets455302d2019-06-28 11:04:07 +0300741 return;
Björn Töpel42fddcc2019-09-04 13:49:12 +0200742 WRITE_ONCE(xs->state, XSK_UNBOUND);
Ilya Maximets455302d2019-06-28 11:04:07 +0300743
744 /* Wait for driver to stop using the xdp socket. */
Magnus Karlssona5aa8e52020-08-28 10:26:20 +0200745 xp_del_xsk(xs->pool, xs);
Ilya Maximets455302d2019-06-28 11:04:07 +0300746 xs->dev = NULL;
747 synchronize_net();
748 dev_put(dev);
749}
750
Björn Töpel0402acd2019-08-15 11:30:13 +0200751static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
Toke Høiland-Jørgensen782347b2021-06-24 18:05:55 +0200752 struct xdp_sock __rcu ***map_entry)
Björn Töpel0402acd2019-08-15 11:30:13 +0200753{
754 struct xsk_map *map = NULL;
755 struct xsk_map_node *node;
756
757 *map_entry = NULL;
758
759 spin_lock_bh(&xs->map_list_lock);
760 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
761 node);
762 if (node) {
Zhu Yanjunbb1b25c2020-11-26 23:03:18 +0800763 bpf_map_inc(&node->map->map);
Björn Töpel0402acd2019-08-15 11:30:13 +0200764 map = node->map;
765 *map_entry = node->map_entry;
766 }
767 spin_unlock_bh(&xs->map_list_lock);
768 return map;
769}
770
771static void xsk_delete_from_maps(struct xdp_sock *xs)
772{
773 /* This function removes the current XDP socket from all the
774 * maps it resides in. We need to take extra care here, due to
775 * the two locks involved. Each map has a lock synchronizing
776 * updates to the entries, and each socket has a lock that
777 * synchronizes access to the list of maps (map_list). For
778 * deadlock avoidance the locks need to be taken in the order
779 * "map lock"->"socket map list lock". We start off by
780 * accessing the socket map list, and take a reference to the
781 * map to guarantee existence between the
782 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
783 * calls. Then we ask the map to remove the socket, which
784 * tries to remove the socket from the map. Note that there
785 * might be updates to the map between
786 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
787 */
Toke Høiland-Jørgensen782347b2021-06-24 18:05:55 +0200788 struct xdp_sock __rcu **map_entry = NULL;
Björn Töpel0402acd2019-08-15 11:30:13 +0200789 struct xsk_map *map;
790
791 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
792 xsk_map_try_sock_delete(map, xs, map_entry);
Zhu Yanjunbb1b25c2020-11-26 23:03:18 +0800793 bpf_map_put(&map->map);
Björn Töpel0402acd2019-08-15 11:30:13 +0200794 }
795}
796
Björn Töpelc0c77d82018-05-02 13:01:23 +0200797static int xsk_release(struct socket *sock)
798{
799 struct sock *sk = sock->sk;
Magnus Karlsson965a99092018-05-02 13:01:26 +0200800 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200801 struct net *net;
802
803 if (!sk)
804 return 0;
805
806 net = sock_net(sk);
807
Björn Töpel1d0dc062019-01-24 19:59:37 +0100808 mutex_lock(&net->xdp.lock);
809 sk_del_node_init_rcu(sk);
810 mutex_unlock(&net->xdp.lock);
811
Björn Töpelc0c77d82018-05-02 13:01:23 +0200812 local_bh_disable();
813 sock_prot_inuse_add(net, sk->sk_prot, -1);
814 local_bh_enable();
815
Björn Töpel0402acd2019-08-15 11:30:13 +0200816 xsk_delete_from_maps(xs);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200817 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300818 xsk_unbind_dev(xs);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200819 mutex_unlock(&xs->mutex);
Magnus Karlsson965a99092018-05-02 13:01:26 +0200820
Björn Töpel541d7fd2018-10-05 13:25:15 +0200821 xskq_destroy(xs->rx);
822 xskq_destroy(xs->tx);
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200823 xskq_destroy(xs->fq_tmp);
824 xskq_destroy(xs->cq_tmp);
Björn Töpel541d7fd2018-10-05 13:25:15 +0200825
Björn Töpelc0c77d82018-05-02 13:01:23 +0200826 sock_orphan(sk);
827 sock->sk = NULL;
828
829 sk_refcnt_debug_release(sk);
830 sock_put(sk);
831
832 return 0;
833}
834
Magnus Karlsson965a99092018-05-02 13:01:26 +0200835static struct socket *xsk_lookup_xsk_from_fd(int fd)
836{
837 struct socket *sock;
838 int err;
839
840 sock = sockfd_lookup(fd, &err);
841 if (!sock)
842 return ERR_PTR(-ENOTSOCK);
843
844 if (sock->sk->sk_family != PF_XDP) {
845 sockfd_put(sock);
846 return ERR_PTR(-ENOPROTOOPT);
847 }
848
849 return sock;
850}
851
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200852static bool xsk_validate_queues(struct xdp_sock *xs)
853{
854 return xs->fq_tmp && xs->cq_tmp;
855}
856
Magnus Karlsson965a99092018-05-02 13:01:26 +0200857static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
858{
859 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
860 struct sock *sk = sock->sk;
Magnus Karlsson965a99092018-05-02 13:01:26 +0200861 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpel959b71d2018-05-22 09:34:56 +0200862 struct net_device *dev;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200863 u32 flags, qid;
Magnus Karlsson965a99092018-05-02 13:01:26 +0200864 int err = 0;
865
866 if (addr_len < sizeof(struct sockaddr_xdp))
867 return -EINVAL;
868 if (sxdp->sxdp_family != AF_XDP)
869 return -EINVAL;
870
Björn Töpelf54ba392019-03-08 08:57:26 +0100871 flags = sxdp->sxdp_flags;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200872 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
873 XDP_USE_NEED_WAKEUP))
Björn Töpelf54ba392019-03-08 08:57:26 +0100874 return -EINVAL;
875
Ilya Maximets5464c3a2019-07-08 14:03:44 +0300876 rtnl_lock();
Magnus Karlsson965a99092018-05-02 13:01:26 +0200877 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +0300878 if (xs->state != XSK_READY) {
Björn Töpel959b71d2018-05-22 09:34:56 +0200879 err = -EBUSY;
880 goto out_release;
881 }
882
Magnus Karlsson965a99092018-05-02 13:01:26 +0200883 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
884 if (!dev) {
885 err = -ENODEV;
886 goto out_release;
887 }
888
Magnus Karlssonf6145902018-05-02 13:01:32 +0200889 if (!xs->rx && !xs->tx) {
Magnus Karlsson965a99092018-05-02 13:01:26 +0200890 err = -EINVAL;
891 goto out_unlock;
892 }
893
Björn Töpel173d3ad2018-06-04 14:05:55 +0200894 qid = sxdp->sxdp_queue_id;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200895
896 if (flags & XDP_SHARED_UMEM) {
Magnus Karlsson965a99092018-05-02 13:01:26 +0200897 struct xdp_sock *umem_xs;
898 struct socket *sock;
899
Magnus Karlsson77cd0d72019-08-14 09:27:17 +0200900 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
901 (flags & XDP_USE_NEED_WAKEUP)) {
Björn Töpel173d3ad2018-06-04 14:05:55 +0200902 /* Cannot specify flags for shared sockets. */
903 err = -EINVAL;
904 goto out_unlock;
905 }
906
Magnus Karlsson965a99092018-05-02 13:01:26 +0200907 if (xs->umem) {
908 /* We have already our own. */
909 err = -EINVAL;
910 goto out_unlock;
911 }
912
913 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
914 if (IS_ERR(sock)) {
915 err = PTR_ERR(sock);
916 goto out_unlock;
917 }
918
919 umem_xs = xdp_sk(sock->sk);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200920 if (!xsk_is_bound(umem_xs)) {
Magnus Karlsson965a99092018-05-02 13:01:26 +0200921 err = -EBADF;
922 sockfd_put(sock);
923 goto out_unlock;
Björn Töpel42fddcc2019-09-04 13:49:12 +0200924 }
Magnus Karlsson965a99092018-05-02 13:01:26 +0200925
Magnus Karlssona1132432020-08-28 10:26:26 +0200926 if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
927 /* Share the umem with another socket on another qid
928 * and/or device.
929 */
Magnus Karlssonb5aea282020-08-28 10:26:25 +0200930 xs->pool = xp_create_and_assign_umem(xs,
931 umem_xs->umem);
932 if (!xs->pool) {
Magnus Karlsson1fd17c8c2020-09-26 11:26:13 +0200933 err = -ENOMEM;
Magnus Karlssonb5aea282020-08-28 10:26:25 +0200934 sockfd_put(sock);
935 goto out_unlock;
936 }
937
938 err = xp_assign_dev_shared(xs->pool, umem_xs->umem,
939 dev, qid);
940 if (err) {
941 xp_destroy(xs->pool);
Magnus Karlsson83cf5c62020-09-02 09:36:04 +0200942 xs->pool = NULL;
Magnus Karlssonb5aea282020-08-28 10:26:25 +0200943 sockfd_put(sock);
944 goto out_unlock;
945 }
946 } else {
947 /* Share the buffer pool with the other socket. */
948 if (xs->fq_tmp || xs->cq_tmp) {
949 /* Do not allow setting your own fq or cq. */
950 err = -EINVAL;
951 sockfd_put(sock);
952 goto out_unlock;
953 }
954
955 xp_get_pool(umem_xs->pool);
956 xs->pool = umem_xs->pool;
957 }
958
Magnus Karlsson965a99092018-05-02 13:01:26 +0200959 xdp_get_umem(umem_xs->umem);
Björn Töpel9764f4b2019-09-04 13:49:11 +0200960 WRITE_ONCE(xs->umem, umem_xs->umem);
Magnus Karlsson965a99092018-05-02 13:01:26 +0200961 sockfd_put(sock);
Magnus Karlsson7361f9c2020-08-28 10:26:18 +0200962 } else if (!xs->umem || !xsk_validate_queues(xs)) {
Magnus Karlsson965a99092018-05-02 13:01:26 +0200963 err = -EINVAL;
964 goto out_unlock;
Björn Töpelc4971762018-05-02 13:01:27 +0200965 } else {
966 /* This xsk has its own umem. */
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200967 xs->pool = xp_create_and_assign_umem(xs, xs->umem);
968 if (!xs->pool) {
969 err = -ENOMEM;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200970 goto out_unlock;
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200971 }
972
973 err = xp_assign_dev(xs->pool, dev, qid, flags);
974 if (err) {
975 xp_destroy(xs->pool);
976 xs->pool = NULL;
Magnus Karlsson1c1efc22020-08-28 10:26:17 +0200977 goto out_unlock;
978 }
Magnus Karlsson965a99092018-05-02 13:01:26 +0200979 }
980
Magnus Karlsson8bee6832020-12-14 09:51:27 +0100981 /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
982 xs->fq_tmp = NULL;
983 xs->cq_tmp = NULL;
984
Magnus Karlsson965a99092018-05-02 13:01:26 +0200985 xs->dev = dev;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200986 xs->zc = xs->umem->zc;
987 xs->queue_id = qid;
Magnus Karlssona5aa8e52020-08-28 10:26:20 +0200988 xp_add_xsk(xs->pool, xs);
Magnus Karlsson965a99092018-05-02 13:01:26 +0200989
990out_unlock:
Björn Töpel42fddcc2019-09-04 13:49:12 +0200991 if (err) {
Magnus Karlsson965a99092018-05-02 13:01:26 +0200992 dev_put(dev);
Björn Töpel42fddcc2019-09-04 13:49:12 +0200993 } else {
994 /* Matches smp_rmb() in bind() for shared umem
995 * sockets, and xsk_is_bound().
996 */
997 smp_wmb();
998 WRITE_ONCE(xs->state, XSK_BOUND);
999 }
Magnus Karlsson965a99092018-05-02 13:01:26 +02001000out_release:
1001 mutex_unlock(&xs->mutex);
Ilya Maximets5464c3a2019-07-08 14:03:44 +03001002 rtnl_unlock();
Magnus Karlsson965a99092018-05-02 13:01:26 +02001003 return err;
1004}
1005
Kevin Laatzc05cd362019-08-27 02:25:22 +00001006struct xdp_umem_reg_v1 {
1007 __u64 addr; /* Start of packet data area */
1008 __u64 len; /* Length of packet data area */
1009 __u32 chunk_size;
1010 __u32 headroom;
1011};
1012
Björn Töpelc0c77d82018-05-02 13:01:23 +02001013static int xsk_setsockopt(struct socket *sock, int level, int optname,
Christoph Hellwiga7b75c52020-07-23 08:09:07 +02001014 sockptr_t optval, unsigned int optlen)
Björn Töpelc0c77d82018-05-02 13:01:23 +02001015{
1016 struct sock *sk = sock->sk;
1017 struct xdp_sock *xs = xdp_sk(sk);
1018 int err;
1019
1020 if (level != SOL_XDP)
1021 return -ENOPROTOOPT;
1022
1023 switch (optname) {
Björn Töpelb9b6b682018-05-02 13:01:25 +02001024 case XDP_RX_RING:
Magnus Karlssonf6145902018-05-02 13:01:32 +02001025 case XDP_TX_RING:
Björn Töpelb9b6b682018-05-02 13:01:25 +02001026 {
1027 struct xsk_queue **q;
1028 int entries;
1029
1030 if (optlen < sizeof(entries))
1031 return -EINVAL;
Christoph Hellwiga7b75c52020-07-23 08:09:07 +02001032 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
Björn Töpelb9b6b682018-05-02 13:01:25 +02001033 return -EFAULT;
1034
1035 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +03001036 if (xs->state != XSK_READY) {
1037 mutex_unlock(&xs->mutex);
1038 return -EBUSY;
1039 }
Magnus Karlssonf6145902018-05-02 13:01:32 +02001040 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
Björn Töpelb9b6b682018-05-02 13:01:25 +02001041 err = xsk_init_queue(entries, q, false);
Magnus Karlsson77cd0d72019-08-14 09:27:17 +02001042 if (!err && optname == XDP_TX_RING)
1043 /* Tx needs to be explicitly woken up the first time */
1044 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
Björn Töpelb9b6b682018-05-02 13:01:25 +02001045 mutex_unlock(&xs->mutex);
1046 return err;
1047 }
Björn Töpelc0c77d82018-05-02 13:01:23 +02001048 case XDP_UMEM_REG:
1049 {
Kevin Laatzc05cd362019-08-27 02:25:22 +00001050 size_t mr_size = sizeof(struct xdp_umem_reg);
1051 struct xdp_umem_reg mr = {};
Björn Töpelc0c77d82018-05-02 13:01:23 +02001052 struct xdp_umem *umem;
1053
Kevin Laatzc05cd362019-08-27 02:25:22 +00001054 if (optlen < sizeof(struct xdp_umem_reg_v1))
1055 return -EINVAL;
1056 else if (optlen < sizeof(mr))
1057 mr_size = sizeof(struct xdp_umem_reg_v1);
1058
Christoph Hellwiga7b75c52020-07-23 08:09:07 +02001059 if (copy_from_sockptr(&mr, optval, mr_size))
Björn Töpelc0c77d82018-05-02 13:01:23 +02001060 return -EFAULT;
1061
1062 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +03001063 if (xs->state != XSK_READY || xs->umem) {
Björn Töpelc0c77d82018-05-02 13:01:23 +02001064 mutex_unlock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +02001065 return -EBUSY;
1066 }
1067
1068 umem = xdp_umem_create(&mr);
1069 if (IS_ERR(umem)) {
1070 mutex_unlock(&xs->mutex);
1071 return PTR_ERR(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +02001072 }
1073
1074 /* Make sure umem is ready before it can be seen by others */
1075 smp_wmb();
Björn Töpel9764f4b2019-09-04 13:49:11 +02001076 WRITE_ONCE(xs->umem, umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +02001077 mutex_unlock(&xs->mutex);
1078 return 0;
1079 }
Magnus Karlsson423f3832018-05-02 13:01:24 +02001080 case XDP_UMEM_FILL_RING:
Magnus Karlssonfe230832018-05-02 13:01:31 +02001081 case XDP_UMEM_COMPLETION_RING:
Magnus Karlsson423f3832018-05-02 13:01:24 +02001082 {
1083 struct xsk_queue **q;
1084 int entries;
1085
Christoph Hellwiga7b75c52020-07-23 08:09:07 +02001086 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
Magnus Karlsson423f3832018-05-02 13:01:24 +02001087 return -EFAULT;
1088
1089 mutex_lock(&xs->mutex);
Ilya Maximets455302d2019-06-28 11:04:07 +03001090 if (xs->state != XSK_READY) {
1091 mutex_unlock(&xs->mutex);
1092 return -EBUSY;
1093 }
Björn Töpela49049e2018-05-22 09:35:02 +02001094
Magnus Karlsson7361f9c2020-08-28 10:26:18 +02001095 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
1096 &xs->cq_tmp;
Björn Töpelb9b6b682018-05-02 13:01:25 +02001097 err = xsk_init_queue(entries, q, true);
Magnus Karlsson423f3832018-05-02 13:01:24 +02001098 mutex_unlock(&xs->mutex);
1099 return err;
1100 }
Björn Töpelc0c77d82018-05-02 13:01:23 +02001101 default:
1102 break;
1103 }
1104
1105 return -ENOPROTOOPT;
1106}
1107
Magnus Karlsson77cd0d72019-08-14 09:27:17 +02001108static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
1109{
1110 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
1111 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
1112 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
1113}
1114
1115static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
1116{
1117 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
1118 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
1119 ring->desc = offsetof(struct xdp_umem_ring, desc);
1120}
1121
Ciara Loftus8aa5a332020-07-08 07:28:33 +00001122struct xdp_statistics_v1 {
1123 __u64 rx_dropped;
1124 __u64 rx_invalid_descs;
1125 __u64 tx_invalid_descs;
1126};
1127
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +02001128static int xsk_getsockopt(struct socket *sock, int level, int optname,
1129 char __user *optval, int __user *optlen)
1130{
1131 struct sock *sk = sock->sk;
1132 struct xdp_sock *xs = xdp_sk(sk);
1133 int len;
1134
1135 if (level != SOL_XDP)
1136 return -ENOPROTOOPT;
1137
1138 if (get_user(len, optlen))
1139 return -EFAULT;
1140 if (len < 0)
1141 return -EINVAL;
1142
1143 switch (optname) {
1144 case XDP_STATISTICS:
1145 {
Peilin Ye3c4f8502020-07-28 01:36:04 -04001146 struct xdp_statistics stats = {};
Ciara Loftus8aa5a332020-07-08 07:28:33 +00001147 bool extra_stats = true;
1148 size_t stats_size;
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +02001149
Ciara Loftus8aa5a332020-07-08 07:28:33 +00001150 if (len < sizeof(struct xdp_statistics_v1)) {
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +02001151 return -EINVAL;
Ciara Loftus8aa5a332020-07-08 07:28:33 +00001152 } else if (len < sizeof(stats)) {
1153 extra_stats = false;
1154 stats_size = sizeof(struct xdp_statistics_v1);
1155 } else {
1156 stats_size = sizeof(stats);
1157 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +02001158
1159 mutex_lock(&xs->mutex);
1160 stats.rx_dropped = xs->rx_dropped;
Ciara Loftus8aa5a332020-07-08 07:28:33 +00001161 if (extra_stats) {
1162 stats.rx_ring_full = xs->rx_queue_full;
1163 stats.rx_fill_ring_empty_descs =
Magnus Karlsson7361f9c2020-08-28 10:26:18 +02001164 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
Ciara Loftus8aa5a332020-07-08 07:28:33 +00001165 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
1166 } else {
1167 stats.rx_dropped += xs->rx_queue_full;
1168 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +02001169 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
1170 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
1171 mutex_unlock(&xs->mutex);
1172
Ciara Loftus8aa5a332020-07-08 07:28:33 +00001173 if (copy_to_user(optval, &stats, stats_size))
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +02001174 return -EFAULT;
Ciara Loftus8aa5a332020-07-08 07:28:33 +00001175 if (put_user(stats_size, optlen))
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +02001176 return -EFAULT;
1177
1178 return 0;
1179 }
Björn Töpelb3a9e0b2018-05-22 09:34:59 +02001180 case XDP_MMAP_OFFSETS:
1181 {
1182 struct xdp_mmap_offsets off;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +02001183 struct xdp_mmap_offsets_v1 off_v1;
1184 bool flags_supported = true;
1185 void *to_copy;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +02001186
Magnus Karlsson77cd0d72019-08-14 09:27:17 +02001187 if (len < sizeof(off_v1))
Björn Töpelb3a9e0b2018-05-22 09:34:59 +02001188 return -EINVAL;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +02001189 else if (len < sizeof(off))
1190 flags_supported = false;
Björn Töpelb3a9e0b2018-05-22 09:34:59 +02001191
Magnus Karlsson77cd0d72019-08-14 09:27:17 +02001192 if (flags_supported) {
1193 /* xdp_ring_offset is identical to xdp_ring_offset_v1
1194 * except for the flags field added to the end.
1195 */
1196 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1197 &off.rx);
1198 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1199 &off.tx);
1200 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1201 &off.fr);
1202 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1203 &off.cr);
1204 off.rx.flags = offsetof(struct xdp_rxtx_ring,
1205 ptrs.flags);
1206 off.tx.flags = offsetof(struct xdp_rxtx_ring,
1207 ptrs.flags);
1208 off.fr.flags = offsetof(struct xdp_umem_ring,
1209 ptrs.flags);
1210 off.cr.flags = offsetof(struct xdp_umem_ring,
1211 ptrs.flags);
Björn Töpelb3a9e0b2018-05-22 09:34:59 +02001212
Magnus Karlsson77cd0d72019-08-14 09:27:17 +02001213 len = sizeof(off);
1214 to_copy = &off;
1215 } else {
1216 xsk_enter_rxtx_offsets(&off_v1.rx);
1217 xsk_enter_rxtx_offsets(&off_v1.tx);
1218 xsk_enter_umem_offsets(&off_v1.fr);
1219 xsk_enter_umem_offsets(&off_v1.cr);
Björn Töpelb3a9e0b2018-05-22 09:34:59 +02001220
Magnus Karlsson77cd0d72019-08-14 09:27:17 +02001221 len = sizeof(off_v1);
1222 to_copy = &off_v1;
1223 }
1224
1225 if (copy_to_user(optval, to_copy, len))
Björn Töpelb3a9e0b2018-05-22 09:34:59 +02001226 return -EFAULT;
1227 if (put_user(len, optlen))
1228 return -EFAULT;
1229
1230 return 0;
1231 }
Maxim Mikityanskiy2640d3c2019-06-26 17:35:25 +03001232 case XDP_OPTIONS:
1233 {
1234 struct xdp_options opts = {};
1235
1236 if (len < sizeof(opts))
1237 return -EINVAL;
1238
1239 mutex_lock(&xs->mutex);
1240 if (xs->zc)
1241 opts.flags |= XDP_OPTIONS_ZEROCOPY;
1242 mutex_unlock(&xs->mutex);
1243
1244 len = sizeof(opts);
1245 if (copy_to_user(optval, &opts, len))
1246 return -EFAULT;
1247 if (put_user(len, optlen))
1248 return -EFAULT;
1249
1250 return 0;
1251 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +02001252 default:
1253 break;
1254 }
1255
1256 return -EOPNOTSUPP;
1257}
1258
Magnus Karlsson423f3832018-05-02 13:01:24 +02001259static int xsk_mmap(struct file *file, struct socket *sock,
1260 struct vm_area_struct *vma)
1261{
Geert Uytterhoevena5a16e42018-06-07 15:37:34 +02001262 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
Magnus Karlsson423f3832018-05-02 13:01:24 +02001263 unsigned long size = vma->vm_end - vma->vm_start;
1264 struct xdp_sock *xs = xdp_sk(sock->sk);
1265 struct xsk_queue *q = NULL;
1266 unsigned long pfn;
1267 struct page *qpg;
1268
Björn Töpel42fddcc2019-09-04 13:49:12 +02001269 if (READ_ONCE(xs->state) != XSK_READY)
Ilya Maximets455302d2019-06-28 11:04:07 +03001270 return -EBUSY;
1271
Björn Töpelb9b6b682018-05-02 13:01:25 +02001272 if (offset == XDP_PGOFF_RX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +02001273 q = READ_ONCE(xs->rx);
Magnus Karlssonf6145902018-05-02 13:01:32 +02001274 } else if (offset == XDP_PGOFF_TX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +02001275 q = READ_ONCE(xs->tx);
Björn Töpelb9b6b682018-05-02 13:01:25 +02001276 } else {
Magnus Karlssone6762c82019-02-08 14:13:50 +01001277 /* Matches the smp_wmb() in XDP_UMEM_REG */
1278 smp_rmb();
Björn Töpelb9b6b682018-05-02 13:01:25 +02001279 if (offset == XDP_UMEM_PGOFF_FILL_RING)
Magnus Karlsson7361f9c2020-08-28 10:26:18 +02001280 q = READ_ONCE(xs->fq_tmp);
Magnus Karlssonfe230832018-05-02 13:01:31 +02001281 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
Magnus Karlsson7361f9c2020-08-28 10:26:18 +02001282 q = READ_ONCE(xs->cq_tmp);
Björn Töpelb9b6b682018-05-02 13:01:25 +02001283 }
Magnus Karlsson423f3832018-05-02 13:01:24 +02001284
1285 if (!q)
1286 return -EINVAL;
1287
Magnus Karlssone6762c82019-02-08 14:13:50 +01001288 /* Matches the smp_wmb() in xsk_init_queue */
1289 smp_rmb();
Magnus Karlsson423f3832018-05-02 13:01:24 +02001290 qpg = virt_to_head_page(q->ring);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07001291 if (size > page_size(qpg))
Magnus Karlsson423f3832018-05-02 13:01:24 +02001292 return -EINVAL;
1293
1294 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
1295 return remap_pfn_range(vma, vma->vm_start, pfn,
1296 size, vma->vm_page_prot);
1297}
1298
Ilya Maximets455302d2019-06-28 11:04:07 +03001299static int xsk_notifier(struct notifier_block *this,
1300 unsigned long msg, void *ptr)
1301{
1302 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1303 struct net *net = dev_net(dev);
1304 struct sock *sk;
1305
1306 switch (msg) {
1307 case NETDEV_UNREGISTER:
1308 mutex_lock(&net->xdp.lock);
1309 sk_for_each(sk, &net->xdp.list) {
1310 struct xdp_sock *xs = xdp_sk(sk);
1311
1312 mutex_lock(&xs->mutex);
1313 if (xs->dev == dev) {
1314 sk->sk_err = ENETDOWN;
1315 if (!sock_flag(sk, SOCK_DEAD))
Alexander Aringe3ae2362021-06-27 18:48:21 -04001316 sk_error_report(sk);
Ilya Maximets455302d2019-06-28 11:04:07 +03001317
1318 xsk_unbind_dev(xs);
1319
Magnus Karlsson1c1efc22020-08-28 10:26:17 +02001320 /* Clear device references. */
1321 xp_clear_dev(xs->pool);
Ilya Maximets455302d2019-06-28 11:04:07 +03001322 }
1323 mutex_unlock(&xs->mutex);
1324 }
1325 mutex_unlock(&net->xdp.lock);
1326 break;
1327 }
1328 return NOTIFY_DONE;
1329}
1330
Björn Töpelc0c77d82018-05-02 13:01:23 +02001331static struct proto xsk_proto = {
1332 .name = "XDP",
1333 .owner = THIS_MODULE,
1334 .obj_size = sizeof(struct xdp_sock),
1335};
1336
1337static const struct proto_ops xsk_proto_ops = {
Björn Töpelc2f43742018-05-18 14:00:24 +02001338 .family = PF_XDP,
1339 .owner = THIS_MODULE,
1340 .release = xsk_release,
1341 .bind = xsk_bind,
1342 .connect = sock_no_connect,
1343 .socketpair = sock_no_socketpair,
1344 .accept = sock_no_accept,
1345 .getname = sock_no_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001346 .poll = xsk_poll,
Björn Töpelc2f43742018-05-18 14:00:24 +02001347 .ioctl = sock_no_ioctl,
1348 .listen = sock_no_listen,
1349 .shutdown = sock_no_shutdown,
1350 .setsockopt = xsk_setsockopt,
1351 .getsockopt = xsk_getsockopt,
1352 .sendmsg = xsk_sendmsg,
Björn Töpel45a86682020-11-30 19:51:58 +01001353 .recvmsg = xsk_recvmsg,
Björn Töpelc2f43742018-05-18 14:00:24 +02001354 .mmap = xsk_mmap,
1355 .sendpage = sock_no_sendpage,
Björn Töpelc0c77d82018-05-02 13:01:23 +02001356};
1357
Björn Töpel11fe9262019-02-21 13:07:38 +01001358static void xsk_destruct(struct sock *sk)
1359{
1360 struct xdp_sock *xs = xdp_sk(sk);
1361
1362 if (!sock_flag(sk, SOCK_DEAD))
1363 return;
1364
Magnus Karlssone5e1a4b2020-10-27 13:32:01 +01001365 if (!xp_put_pool(xs->pool))
Magnus Karlsson537cf4e2020-11-20 12:53:39 +01001366 xdp_put_umem(xs->umem, !xs->pool);
Björn Töpel11fe9262019-02-21 13:07:38 +01001367
1368 sk_refcnt_debug_dec(sk);
1369}
1370
Björn Töpelc0c77d82018-05-02 13:01:23 +02001371static int xsk_create(struct net *net, struct socket *sock, int protocol,
1372 int kern)
1373{
Björn Töpelc0c77d82018-05-02 13:01:23 +02001374 struct xdp_sock *xs;
Magnus Karlsson1c1efc22020-08-28 10:26:17 +02001375 struct sock *sk;
Björn Töpelc0c77d82018-05-02 13:01:23 +02001376
1377 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1378 return -EPERM;
1379 if (sock->type != SOCK_RAW)
1380 return -ESOCKTNOSUPPORT;
1381
1382 if (protocol)
1383 return -EPROTONOSUPPORT;
1384
1385 sock->state = SS_UNCONNECTED;
1386
1387 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1388 if (!sk)
1389 return -ENOBUFS;
1390
1391 sock->ops = &xsk_proto_ops;
1392
1393 sock_init_data(sock, sk);
1394
1395 sk->sk_family = PF_XDP;
1396
Björn Töpel11fe9262019-02-21 13:07:38 +01001397 sk->sk_destruct = xsk_destruct;
1398 sk_refcnt_debug_inc(sk);
1399
Björn Töpelcee27162018-10-08 19:40:16 +02001400 sock_set_flag(sk, SOCK_RCU_FREE);
1401
Björn Töpelc0c77d82018-05-02 13:01:23 +02001402 xs = xdp_sk(sk);
Ilya Maximets455302d2019-06-28 11:04:07 +03001403 xs->state = XSK_READY;
Björn Töpelc0c77d82018-05-02 13:01:23 +02001404 mutex_init(&xs->mutex);
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +03001405 spin_lock_init(&xs->rx_lock);
Björn Töpelc0c77d82018-05-02 13:01:23 +02001406
Björn Töpel0402acd2019-08-15 11:30:13 +02001407 INIT_LIST_HEAD(&xs->map_list);
1408 spin_lock_init(&xs->map_list_lock);
1409
Björn Töpel1d0dc062019-01-24 19:59:37 +01001410 mutex_lock(&net->xdp.lock);
1411 sk_add_node_rcu(sk, &net->xdp.list);
1412 mutex_unlock(&net->xdp.lock);
1413
Björn Töpelc0c77d82018-05-02 13:01:23 +02001414 local_bh_disable();
1415 sock_prot_inuse_add(net, &xsk_proto, 1);
1416 local_bh_enable();
1417
1418 return 0;
1419}
1420
1421static const struct net_proto_family xsk_family_ops = {
1422 .family = PF_XDP,
1423 .create = xsk_create,
1424 .owner = THIS_MODULE,
1425};
1426
Ilya Maximets455302d2019-06-28 11:04:07 +03001427static struct notifier_block xsk_netdev_notifier = {
1428 .notifier_call = xsk_notifier,
1429};
1430
Björn Töpel1d0dc062019-01-24 19:59:37 +01001431static int __net_init xsk_net_init(struct net *net)
1432{
1433 mutex_init(&net->xdp.lock);
1434 INIT_HLIST_HEAD(&net->xdp.list);
1435 return 0;
1436}
1437
1438static void __net_exit xsk_net_exit(struct net *net)
1439{
1440 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1441}
1442
1443static struct pernet_operations xsk_net_ops = {
1444 .init = xsk_net_init,
1445 .exit = xsk_net_exit,
1446};
1447
Björn Töpelc0c77d82018-05-02 13:01:23 +02001448static int __init xsk_init(void)
1449{
Björn Töpele312b9e2019-12-19 07:10:02 +01001450 int err, cpu;
Björn Töpelc0c77d82018-05-02 13:01:23 +02001451
1452 err = proto_register(&xsk_proto, 0 /* no slab */);
1453 if (err)
1454 goto out;
1455
1456 err = sock_register(&xsk_family_ops);
1457 if (err)
1458 goto out_proto;
1459
Björn Töpel1d0dc062019-01-24 19:59:37 +01001460 err = register_pernet_subsys(&xsk_net_ops);
1461 if (err)
1462 goto out_sk;
Ilya Maximets455302d2019-06-28 11:04:07 +03001463
1464 err = register_netdevice_notifier(&xsk_netdev_notifier);
1465 if (err)
1466 goto out_pernet;
1467
Björn Töpele312b9e2019-12-19 07:10:02 +01001468 for_each_possible_cpu(cpu)
1469 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
Björn Töpelc0c77d82018-05-02 13:01:23 +02001470 return 0;
1471
Ilya Maximets455302d2019-06-28 11:04:07 +03001472out_pernet:
1473 unregister_pernet_subsys(&xsk_net_ops);
Björn Töpel1d0dc062019-01-24 19:59:37 +01001474out_sk:
1475 sock_unregister(PF_XDP);
Björn Töpelc0c77d82018-05-02 13:01:23 +02001476out_proto:
1477 proto_unregister(&xsk_proto);
1478out:
1479 return err;
1480}
1481
1482fs_initcall(xsk_init);