blob: 1261512d680735a0b3a7d8201544d53e5162b6a1 [file] [log] [blame]
Daniel Borkmann604326b2018-10-13 02:45:58 +02001// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#include <linux/skmsg.h>
5#include <linux/skbuff.h>
6#include <linux/scatterlist.h>
7
8#include <net/sock.h>
9#include <net/tcp.h>
John Fastabende91de6a2020-05-29 16:06:59 -070010#include <net/tls.h>
Daniel Borkmann604326b2018-10-13 02:45:58 +020011
12static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
13{
14 if (msg->sg.end > msg->sg.start &&
15 elem_first_coalesce < msg->sg.end)
16 return true;
17
18 if (msg->sg.end < msg->sg.start &&
19 (elem_first_coalesce > msg->sg.start ||
20 elem_first_coalesce < msg->sg.end))
21 return true;
22
23 return false;
24}
25
26int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27 int elem_first_coalesce)
28{
29 struct page_frag *pfrag = sk_page_frag(sk);
30 int ret = 0;
31
32 len -= msg->sg.size;
33 while (len > 0) {
34 struct scatterlist *sge;
35 u32 orig_offset;
36 int use, i;
37
38 if (!sk_page_frag_refill(sk, pfrag))
39 return -ENOMEM;
40
41 orig_offset = pfrag->offset;
42 use = min_t(int, len, pfrag->size - orig_offset);
43 if (!sk_wmem_schedule(sk, use))
44 return -ENOMEM;
45
46 i = msg->sg.end;
47 sk_msg_iter_var_prev(i);
48 sge = &msg->sg.data[i];
49
50 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
51 sg_page(sge) == pfrag->page &&
52 sge->offset + sge->length == orig_offset) {
53 sge->length += use;
54 } else {
55 if (sk_msg_full(msg)) {
56 ret = -ENOSPC;
57 break;
58 }
59
60 sge = &msg->sg.data[msg->sg.end];
61 sg_unmark_end(sge);
62 sg_set_page(sge, pfrag->page, use, orig_offset);
63 get_page(pfrag->page);
64 sk_msg_iter_next(msg, end);
65 }
66
67 sk_mem_charge(sk, use);
68 msg->sg.size += use;
69 pfrag->offset += use;
70 len -= use;
71 }
72
73 return ret;
74}
75EXPORT_SYMBOL_GPL(sk_msg_alloc);
76
Daniel Borkmannd829e9c2018-10-13 02:45:59 +020077int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
78 u32 off, u32 len)
79{
80 int i = src->sg.start;
81 struct scatterlist *sge = sk_msg_elem(src, i);
Vakul Gargfda497e2019-01-16 01:42:44 +000082 struct scatterlist *sgd = NULL;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +020083 u32 sge_len, sge_off;
84
Daniel Borkmannd829e9c2018-10-13 02:45:59 +020085 while (off) {
86 if (sge->length > off)
87 break;
88 off -= sge->length;
89 sk_msg_iter_var_next(i);
90 if (i == src->sg.end && off)
91 return -ENOSPC;
92 sge = sk_msg_elem(src, i);
93 }
94
95 while (len) {
96 sge_len = sge->length - off;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +020097 if (sge_len > len)
98 sge_len = len;
Vakul Gargfda497e2019-01-16 01:42:44 +000099
100 if (dst->sg.end)
101 sgd = sk_msg_elem(dst, dst->sg.end - 1);
102
103 if (sgd &&
104 (sg_page(sge) == sg_page(sgd)) &&
105 (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
106 sgd->length += sge_len;
107 dst->sg.size += sge_len;
108 } else if (!sk_msg_full(dst)) {
109 sge_off = sge->offset + off;
110 sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
111 } else {
112 return -ENOSPC;
113 }
114
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200115 off = 0;
116 len -= sge_len;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200117 sk_mem_charge(sk, sge_len);
118 sk_msg_iter_var_next(i);
119 if (i == src->sg.end && len)
120 return -ENOSPC;
121 sge = sk_msg_elem(src, i);
122 }
123
124 return 0;
125}
126EXPORT_SYMBOL_GPL(sk_msg_clone);
127
Daniel Borkmann604326b2018-10-13 02:45:58 +0200128void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
129{
130 int i = msg->sg.start;
131
132 do {
133 struct scatterlist *sge = sk_msg_elem(msg, i);
134
135 if (bytes < sge->length) {
136 sge->length -= bytes;
137 sge->offset += bytes;
138 sk_mem_uncharge(sk, bytes);
139 break;
140 }
141
142 sk_mem_uncharge(sk, sge->length);
143 bytes -= sge->length;
144 sge->length = 0;
145 sge->offset = 0;
146 sk_msg_iter_var_next(i);
147 } while (bytes && i != msg->sg.end);
148 msg->sg.start = i;
149}
150EXPORT_SYMBOL_GPL(sk_msg_return_zero);
151
152void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
153{
154 int i = msg->sg.start;
155
156 do {
157 struct scatterlist *sge = &msg->sg.data[i];
158 int uncharge = (bytes < sge->length) ? bytes : sge->length;
159
160 sk_mem_uncharge(sk, uncharge);
161 bytes -= uncharge;
162 sk_msg_iter_var_next(i);
163 } while (i != msg->sg.end);
164}
165EXPORT_SYMBOL_GPL(sk_msg_return);
166
167static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
168 bool charge)
169{
170 struct scatterlist *sge = sk_msg_elem(msg, i);
171 u32 len = sge->length;
172
John Fastabend36cd0e692020-11-16 14:28:06 -0800173 /* When the skb owns the memory we free it from consume_skb path. */
174 if (!msg->skb) {
175 if (charge)
176 sk_mem_uncharge(sk, len);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200177 put_page(sg_page(sge));
John Fastabend36cd0e692020-11-16 14:28:06 -0800178 }
Daniel Borkmann604326b2018-10-13 02:45:58 +0200179 memset(sge, 0, sizeof(*sge));
180 return len;
181}
182
183static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
184 bool charge)
185{
186 struct scatterlist *sge = sk_msg_elem(msg, i);
187 int freed = 0;
188
189 while (msg->sg.size) {
190 msg->sg.size -= sge->length;
191 freed += sk_msg_free_elem(sk, msg, i, charge);
192 sk_msg_iter_var_next(i);
193 sk_msg_check_to_free(msg, i, msg->sg.size);
194 sge = sk_msg_elem(msg, i);
195 }
Markus Elfringdd016ac2019-08-22 18:00:40 +0200196 consume_skb(msg->skb);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200197 sk_msg_init(msg);
198 return freed;
199}
200
201int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
202{
203 return __sk_msg_free(sk, msg, msg->sg.start, false);
204}
205EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
206
207int sk_msg_free(struct sock *sk, struct sk_msg *msg)
208{
209 return __sk_msg_free(sk, msg, msg->sg.start, true);
210}
211EXPORT_SYMBOL_GPL(sk_msg_free);
212
213static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
214 u32 bytes, bool charge)
215{
216 struct scatterlist *sge;
217 u32 i = msg->sg.start;
218
219 while (bytes) {
220 sge = sk_msg_elem(msg, i);
221 if (!sge->length)
222 break;
223 if (bytes < sge->length) {
224 if (charge)
225 sk_mem_uncharge(sk, bytes);
226 sge->length -= bytes;
227 sge->offset += bytes;
228 msg->sg.size -= bytes;
229 break;
230 }
231
232 msg->sg.size -= sge->length;
233 bytes -= sge->length;
234 sk_msg_free_elem(sk, msg, i, charge);
235 sk_msg_iter_var_next(i);
236 sk_msg_check_to_free(msg, i, bytes);
237 }
238 msg->sg.start = i;
239}
240
241void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
242{
243 __sk_msg_free_partial(sk, msg, bytes, true);
244}
245EXPORT_SYMBOL_GPL(sk_msg_free_partial);
246
247void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
248 u32 bytes)
249{
250 __sk_msg_free_partial(sk, msg, bytes, false);
251}
252
253void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
254{
255 int trim = msg->sg.size - len;
256 u32 i = msg->sg.end;
257
258 if (trim <= 0) {
259 WARN_ON(trim < 0);
260 return;
261 }
262
263 sk_msg_iter_var_prev(i);
264 msg->sg.size = len;
265 while (msg->sg.data[i].length &&
266 trim >= msg->sg.data[i].length) {
267 trim -= msg->sg.data[i].length;
268 sk_msg_free_elem(sk, msg, i, true);
269 sk_msg_iter_var_prev(i);
270 if (!trim)
271 goto out;
272 }
273
274 msg->sg.data[i].length -= trim;
275 sk_mem_uncharge(sk, trim);
Jakub Kicinski683916f2019-11-04 15:36:57 -0800276 /* Adjust copybreak if it falls into the trimmed part of last buf */
277 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
278 msg->sg.copybreak = msg->sg.data[i].length;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200279out:
Jakub Kicinski683916f2019-11-04 15:36:57 -0800280 sk_msg_iter_var_next(i);
281 msg->sg.end = i;
282
283 /* If we trim data a full sg elem before curr pointer update
284 * copybreak and current so that any future copy operations
285 * start at new copy location.
Daniel Borkmann604326b2018-10-13 02:45:58 +0200286 * However trimed data that has not yet been used in a copy op
287 * does not require an update.
288 */
Jakub Kicinski683916f2019-11-04 15:36:57 -0800289 if (!msg->sg.size) {
290 msg->sg.curr = msg->sg.start;
291 msg->sg.copybreak = 0;
292 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
293 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
294 sk_msg_iter_var_prev(i);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200295 msg->sg.curr = i;
296 msg->sg.copybreak = msg->sg.data[i].length;
297 }
Daniel Borkmann604326b2018-10-13 02:45:58 +0200298}
299EXPORT_SYMBOL_GPL(sk_msg_trim);
300
301int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
302 struct sk_msg *msg, u32 bytes)
303{
304 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
305 const int to_max_pages = MAX_MSG_FRAGS;
306 struct page *pages[MAX_MSG_FRAGS];
307 ssize_t orig, copied, use, offset;
308
309 orig = msg->sg.size;
310 while (bytes > 0) {
311 i = 0;
312 maxpages = to_max_pages - num_elems;
313 if (maxpages == 0) {
314 ret = -EFAULT;
315 goto out;
316 }
317
318 copied = iov_iter_get_pages(from, pages, bytes, maxpages,
319 &offset);
320 if (copied <= 0) {
321 ret = -EFAULT;
322 goto out;
323 }
324
325 iov_iter_advance(from, copied);
326 bytes -= copied;
327 msg->sg.size += copied;
328
329 while (copied) {
330 use = min_t(int, copied, PAGE_SIZE - offset);
331 sg_set_page(&msg->sg.data[msg->sg.end],
332 pages[i], use, offset);
333 sg_unmark_end(&msg->sg.data[msg->sg.end]);
334 sk_mem_charge(sk, use);
335
336 offset = 0;
337 copied -= use;
338 sk_msg_iter_next(msg, end);
339 num_elems++;
340 i++;
341 }
342 /* When zerocopy is mixed with sk_msg_*copy* operations we
343 * may have a copybreak set in this case clear and prefer
344 * zerocopy remainder when possible.
345 */
346 msg->sg.copybreak = 0;
347 msg->sg.curr = msg->sg.end;
348 }
349out:
350 /* Revert iov_iter updates, msg will need to use 'trim' later if it
351 * also needs to be cleared.
352 */
353 if (ret)
354 iov_iter_revert(from, msg->sg.size - orig);
355 return ret;
356}
357EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
358
359int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
360 struct sk_msg *msg, u32 bytes)
361{
362 int ret = -ENOSPC, i = msg->sg.curr;
363 struct scatterlist *sge;
364 u32 copy, buf_size;
365 void *to;
366
367 do {
368 sge = sk_msg_elem(msg, i);
369 /* This is possible if a trim operation shrunk the buffer */
370 if (msg->sg.copybreak >= sge->length) {
371 msg->sg.copybreak = 0;
372 sk_msg_iter_var_next(i);
373 if (i == msg->sg.end)
374 break;
375 sge = sk_msg_elem(msg, i);
376 }
377
378 buf_size = sge->length - msg->sg.copybreak;
379 copy = (buf_size > bytes) ? bytes : buf_size;
380 to = sg_virt(sge) + msg->sg.copybreak;
381 msg->sg.copybreak += copy;
382 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
383 ret = copy_from_iter_nocache(to, copy, from);
384 else
385 ret = copy_from_iter(to, copy, from);
386 if (ret != copy) {
387 ret = -EFAULT;
388 goto out;
389 }
390 bytes -= copy;
391 if (!bytes)
392 break;
393 msg->sg.copybreak = 0;
394 sk_msg_iter_var_next(i);
395 } while (i != msg->sg.end);
396out:
397 msg->sg.curr = i;
398 return ret;
399}
400EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
401
John Fastabend6fa92012020-11-16 14:28:46 -0800402static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
403 struct sk_buff *skb)
Daniel Borkmann604326b2018-10-13 02:45:58 +0200404{
Daniel Borkmann604326b2018-10-13 02:45:58 +0200405 struct sk_msg *msg;
406
John Fastabend36cd0e692020-11-16 14:28:06 -0800407 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
John Fastabend6fa92012020-11-16 14:28:46 -0800408 return NULL;
409
410 if (!sk_rmem_schedule(sk, skb, skb->truesize))
411 return NULL;
John Fastabend36cd0e692020-11-16 14:28:06 -0800412
Daniel Borkmann604326b2018-10-13 02:45:58 +0200413 msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
414 if (unlikely(!msg))
John Fastabend6fa92012020-11-16 14:28:46 -0800415 return NULL;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200416
417 sk_msg_init(msg);
John Fastabend6fa92012020-11-16 14:28:46 -0800418 return msg;
419}
420
421static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
422 struct sk_psock *psock,
423 struct sock *sk,
424 struct sk_msg *msg)
425{
John Fastabend43630232020-11-16 14:29:28 -0800426 int num_sge, copied;
John Fastabend6fa92012020-11-16 14:28:46 -0800427
John Fastabend43630232020-11-16 14:29:28 -0800428 /* skb linearize may fail with ENOMEM, but lets simply try again
429 * later if this happens. Under memory pressure we don't want to
430 * drop the skb. We need to linearize the skb so that the mapping
431 * in skb_to_sgvec can not error.
432 */
433 if (skb_linearize(skb))
434 return -EAGAIN;
435 num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200436 if (unlikely(num_sge < 0)) {
437 kfree(msg);
438 return num_sge;
439 }
440
Daniel Borkmann604326b2018-10-13 02:45:58 +0200441 copied = skb->len;
442 msg->sg.start = 0;
John Fastabendcabede82019-05-13 07:19:55 -0700443 msg->sg.size = copied;
Jakub Kicinski031097d2019-11-27 12:16:41 -0800444 msg->sg.end = num_sge;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200445 msg->skb = skb;
446
447 sk_psock_queue_msg(psock, msg);
John Fastabend552de9102018-12-20 11:35:33 -0800448 sk_psock_data_ready(sk, psock);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200449 return copied;
450}
451
John Fastabend2443ca62020-11-16 14:29:08 -0800452static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb);
453
John Fastabend6fa92012020-11-16 14:28:46 -0800454static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
455{
456 struct sock *sk = psock->sk;
457 struct sk_msg *msg;
458
John Fastabend2443ca62020-11-16 14:29:08 -0800459 /* If we are receiving on the same sock skb->sk is already assigned,
460 * skip memory accounting and owner transition seeing it already set
461 * correctly.
462 */
463 if (unlikely(skb->sk == sk))
464 return sk_psock_skb_ingress_self(psock, skb);
John Fastabend6fa92012020-11-16 14:28:46 -0800465 msg = sk_psock_create_ingress_msg(sk, skb);
466 if (!msg)
467 return -EAGAIN;
468
469 /* This will transition ownership of the data from the socket where
470 * the BPF program was run initiating the redirect to the socket
471 * we will eventually receive this data on. The data will be released
472 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
473 * into user buffers.
474 */
475 skb_set_owner_r(skb, sk);
476 return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
477}
478
479/* Puts an skb on the ingress queue of the socket already assigned to the
480 * skb. In this case we do not need to check memory limits or skb_set_owner_r
481 * because the skb is already accounted for here.
482 */
483static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb)
484{
485 struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
486 struct sock *sk = psock->sk;
487
488 if (unlikely(!msg))
489 return -EAGAIN;
490 sk_msg_init(msg);
491 return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
492}
493
Daniel Borkmann604326b2018-10-13 02:45:58 +0200494static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
495 u32 off, u32 len, bool ingress)
496{
John Fastabend9047f192020-10-09 11:37:17 -0700497 if (!ingress) {
498 if (!sock_writeable(psock->sk))
499 return -EAGAIN;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200500 return skb_send_sock_locked(psock->sk, skb, off, len);
John Fastabend9047f192020-10-09 11:37:17 -0700501 }
502 return sk_psock_skb_ingress(psock, skb);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200503}
504
505static void sk_psock_backlog(struct work_struct *work)
506{
507 struct sk_psock *psock = container_of(work, struct sk_psock, work);
508 struct sk_psock_work_state *state = &psock->work_state;
509 struct sk_buff *skb;
510 bool ingress;
511 u32 len, off;
512 int ret;
513
514 /* Lock sock to avoid losing sk_socket during loop. */
515 lock_sock(psock->sk);
516 if (state->skb) {
517 skb = state->skb;
518 len = state->len;
519 off = state->off;
520 state->skb = NULL;
521 goto start;
522 }
523
524 while ((skb = skb_dequeue(&psock->ingress_skb))) {
525 len = skb->len;
526 off = 0;
527start:
528 ingress = tcp_skb_bpf_ingress(skb);
529 do {
530 ret = -EIO;
531 if (likely(psock->sk->sk_socket))
532 ret = sk_psock_handle_skb(psock, skb, off,
533 len, ingress);
534 if (ret <= 0) {
535 if (ret == -EAGAIN) {
536 state->skb = skb;
537 state->len = len;
538 state->off = off;
539 goto end;
540 }
541 /* Hard errors break pipe and stop xmit. */
542 sk_psock_report_error(psock, ret ? -ret : EPIPE);
543 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
544 kfree_skb(skb);
545 goto end;
546 }
547 off += ret;
548 len -= ret;
549 } while (len);
550
551 if (!ingress)
552 kfree_skb(skb);
553 }
554end:
555 release_sock(psock->sk);
556}
557
558struct sk_psock *sk_psock_init(struct sock *sk, int node)
559{
Lorenz Bauer7b219da2020-08-21 11:29:43 +0100560 struct sk_psock *psock;
561 struct proto *prot;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200562
Lorenz Bauer7b219da2020-08-21 11:29:43 +0100563 write_lock_bh(&sk->sk_callback_lock);
564
565 if (inet_csk_has_ulp(sk)) {
566 psock = ERR_PTR(-EINVAL);
567 goto out;
568 }
569
570 if (sk->sk_user_data) {
571 psock = ERR_PTR(-EBUSY);
572 goto out;
573 }
574
575 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
576 if (!psock) {
577 psock = ERR_PTR(-ENOMEM);
578 goto out;
579 }
580
581 prot = READ_ONCE(sk->sk_prot);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200582 psock->sk = sk;
Lorenz Bauer7b219da2020-08-21 11:29:43 +0100583 psock->eval = __SK_NONE;
584 psock->sk_proto = prot;
585 psock->saved_unhash = prot->unhash;
586 psock->saved_close = prot->close;
587 psock->saved_write_space = sk->sk_write_space;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200588
589 INIT_LIST_HEAD(&psock->link);
590 spin_lock_init(&psock->link_lock);
591
592 INIT_WORK(&psock->work, sk_psock_backlog);
593 INIT_LIST_HEAD(&psock->ingress_msg);
594 skb_queue_head_init(&psock->ingress_skb);
595
596 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
597 refcount_set(&psock->refcnt, 1);
598
Jakub Sitnickif1ff5ce2020-02-18 17:10:14 +0000599 rcu_assign_sk_user_data_nocopy(sk, psock);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200600 sock_hold(sk);
601
Lorenz Bauer7b219da2020-08-21 11:29:43 +0100602out:
603 write_unlock_bh(&sk->sk_callback_lock);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200604 return psock;
605}
606EXPORT_SYMBOL_GPL(sk_psock_init);
607
608struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
609{
610 struct sk_psock_link *link;
611
612 spin_lock_bh(&psock->link_lock);
613 link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
614 list);
615 if (link)
616 list_del(&link->list);
617 spin_unlock_bh(&psock->link_lock);
618 return link;
619}
620
621void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
622{
623 struct sk_msg *msg, *tmp;
624
625 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
626 list_del(&msg->list);
627 sk_msg_free(psock->sk, msg);
628 kfree(msg);
629 }
630}
631
632static void sk_psock_zap_ingress(struct sk_psock *psock)
633{
634 __skb_queue_purge(&psock->ingress_skb);
635 __sk_psock_purge_ingress_msg(psock);
636}
637
638static void sk_psock_link_destroy(struct sk_psock *psock)
639{
640 struct sk_psock_link *link, *tmp;
641
642 list_for_each_entry_safe(link, tmp, &psock->link, list) {
643 list_del(&link->list);
644 sk_psock_free_link(link);
645 }
646}
647
648static void sk_psock_destroy_deferred(struct work_struct *gc)
649{
650 struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
651
652 /* No sk_callback_lock since already detached. */
John Fastabend01489432019-05-13 07:19:19 -0700653
654 /* Parser has been stopped */
655 if (psock->progs.skb_parser)
656 strp_done(&psock->parser.strp);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200657
658 cancel_work_sync(&psock->work);
659
660 psock_progs_drop(&psock->progs);
661
662 sk_psock_link_destroy(psock);
663 sk_psock_cork_free(psock);
664 sk_psock_zap_ingress(psock);
665
666 if (psock->sk_redir)
667 sock_put(psock->sk_redir);
668 sock_put(psock->sk);
669 kfree(psock);
670}
671
Cong Wang8063e182021-01-27 14:15:01 -0800672static void sk_psock_destroy(struct rcu_head *rcu)
Daniel Borkmann604326b2018-10-13 02:45:58 +0200673{
674 struct sk_psock *psock = container_of(rcu, struct sk_psock, rcu);
675
676 INIT_WORK(&psock->gc, sk_psock_destroy_deferred);
677 schedule_work(&psock->gc);
678}
Daniel Borkmann604326b2018-10-13 02:45:58 +0200679
680void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
681{
Daniel Borkmann604326b2018-10-13 02:45:58 +0200682 sk_psock_cork_free(psock);
John Fastabenda1366782018-12-20 11:35:34 -0800683 sk_psock_zap_ingress(psock);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200684
685 write_lock_bh(&sk->sk_callback_lock);
John Fastabend95fa1452019-07-19 10:29:22 -0700686 sk_psock_restore_proto(sk, psock);
687 rcu_assign_sk_user_data(sk, NULL);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200688 if (psock->progs.skb_parser)
689 sk_psock_stop_strp(sk, psock);
John Fastabendef565922020-10-10 22:09:38 -0700690 else if (psock->progs.skb_verdict)
691 sk_psock_stop_verdict(sk, psock);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200692 write_unlock_bh(&sk->sk_callback_lock);
693 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
694
Paul E. McKenney0245b802018-11-07 15:09:25 -0800695 call_rcu(&psock->rcu, sk_psock_destroy);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200696}
697EXPORT_SYMBOL_GPL(sk_psock_drop);
698
699static int sk_psock_map_verd(int verdict, bool redir)
700{
701 switch (verdict) {
702 case SK_PASS:
703 return redir ? __SK_REDIRECT : __SK_PASS;
704 case SK_DROP:
705 default:
706 break;
707 }
708
709 return __SK_DROP;
710}
711
712int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
713 struct sk_msg *msg)
714{
715 struct bpf_prog *prog;
716 int ret;
717
Daniel Borkmann604326b2018-10-13 02:45:58 +0200718 rcu_read_lock();
719 prog = READ_ONCE(psock->progs.msg_parser);
720 if (unlikely(!prog)) {
721 ret = __SK_PASS;
722 goto out;
723 }
724
725 sk_msg_compute_data_pointers(msg);
726 msg->sk = sk;
David Miller3d9f773c2020-02-24 15:01:43 +0100727 ret = bpf_prog_run_pin_on_cpu(prog, msg);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200728 ret = sk_psock_map_verd(ret, msg->sk_redir);
729 psock->apply_bytes = msg->apply_bytes;
730 if (ret == __SK_REDIRECT) {
731 if (psock->sk_redir)
732 sock_put(psock->sk_redir);
733 psock->sk_redir = msg->sk_redir;
734 if (!psock->sk_redir) {
735 ret = __SK_DROP;
736 goto out;
737 }
738 sock_hold(psock->sk_redir);
739 }
740out:
741 rcu_read_unlock();
Daniel Borkmann604326b2018-10-13 02:45:58 +0200742 return ret;
743}
744EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
745
746static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
747 struct sk_buff *skb)
748{
Daniel Borkmann604326b2018-10-13 02:45:58 +0200749 bpf_compute_data_end_sk_skb(skb);
John Fastabend0b17ad22020-10-09 11:37:55 -0700750 return bpf_prog_run_pin_on_cpu(prog, skb);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200751}
752
753static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
754{
755 struct sk_psock_parser *parser;
756
757 parser = container_of(strp, struct sk_psock_parser, strp);
758 return container_of(parser, struct sk_psock, parser);
759}
760
John Fastabend93dd5f12020-06-25 16:12:59 -0700761static void sk_psock_skb_redirect(struct sk_buff *skb)
Daniel Borkmann604326b2018-10-13 02:45:58 +0200762{
763 struct sk_psock *psock_other;
764 struct sock *sk_other;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200765
John Fastabendca2f5f22020-05-29 16:06:41 -0700766 sk_other = tcp_skb_bpf_redirect_fetch(skb);
John Fastabend9047f192020-10-09 11:37:17 -0700767 /* This error is a buggy BPF program, it returned a redirect
768 * return code, but then didn't set a redirect interface.
769 */
John Fastabendca2f5f22020-05-29 16:06:41 -0700770 if (unlikely(!sk_other)) {
771 kfree_skb(skb);
772 return;
773 }
774 psock_other = sk_psock(sk_other);
John Fastabend9047f192020-10-09 11:37:17 -0700775 /* This error indicates the socket is being torn down or had another
776 * error that caused the pipe to break. We can't send a packet on
777 * a socket that is in this state so we drop the skb.
778 */
John Fastabendca2f5f22020-05-29 16:06:41 -0700779 if (!psock_other || sock_flag(sk_other, SOCK_DEAD) ||
780 !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
781 kfree_skb(skb);
782 return;
783 }
784
John Fastabend9047f192020-10-09 11:37:17 -0700785 skb_queue_tail(&psock_other->ingress_skb, skb);
786 schedule_work(&psock_other->work);
John Fastabendca2f5f22020-05-29 16:06:41 -0700787}
788
John Fastabend0b17ad22020-10-09 11:37:55 -0700789static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int verdict)
John Fastabende91de6a2020-05-29 16:06:59 -0700790{
791 switch (verdict) {
792 case __SK_REDIRECT:
John Fastabend0b17ad22020-10-09 11:37:55 -0700793 skb_set_owner_r(skb, sk);
John Fastabend93dd5f12020-06-25 16:12:59 -0700794 sk_psock_skb_redirect(skb);
John Fastabende91de6a2020-05-29 16:06:59 -0700795 break;
796 case __SK_PASS:
797 case __SK_DROP:
798 default:
799 break;
800 }
801}
802
803int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
804{
805 struct bpf_prog *prog;
806 int ret = __SK_PASS;
807
808 rcu_read_lock();
809 prog = READ_ONCE(psock->progs.skb_verdict);
810 if (likely(prog)) {
John Fastabend0b17ad22020-10-09 11:37:55 -0700811 /* We skip full set_owner_r here because if we do a SK_PASS
812 * or SK_DROP we can skip skb memory accounting and use the
813 * TLS context.
814 */
815 skb->sk = psock->sk;
John Fastabende91de6a2020-05-29 16:06:59 -0700816 tcp_skb_bpf_redirect_clear(skb);
817 ret = sk_psock_bpf_run(psock, prog, skb);
818 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
John Fastabend0b17ad22020-10-09 11:37:55 -0700819 skb->sk = NULL;
John Fastabende91de6a2020-05-29 16:06:59 -0700820 }
John Fastabend0b17ad22020-10-09 11:37:55 -0700821 sk_psock_tls_verdict_apply(skb, psock->sk, ret);
John Fastabende91de6a2020-05-29 16:06:59 -0700822 rcu_read_unlock();
John Fastabende91de6a2020-05-29 16:06:59 -0700823 return ret;
824}
825EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
826
John Fastabendca2f5f22020-05-29 16:06:41 -0700827static void sk_psock_verdict_apply(struct sk_psock *psock,
828 struct sk_buff *skb, int verdict)
829{
John Fastabendcfea28f2020-10-09 11:36:16 -0700830 struct tcp_skb_cb *tcp;
John Fastabendca2f5f22020-05-29 16:06:41 -0700831 struct sock *sk_other;
John Fastabend9ecbfb02020-10-09 11:36:37 -0700832 int err = -EIO;
John Fastabendca2f5f22020-05-29 16:06:41 -0700833
Daniel Borkmann604326b2018-10-13 02:45:58 +0200834 switch (verdict) {
John Fastabend51199402018-12-20 11:35:32 -0800835 case __SK_PASS:
836 sk_other = psock->sk;
837 if (sock_flag(sk_other, SOCK_DEAD) ||
838 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
839 goto out_free;
840 }
John Fastabend51199402018-12-20 11:35:32 -0800841
John Fastabendcfea28f2020-10-09 11:36:16 -0700842 tcp = TCP_SKB_CB(skb);
843 tcp->bpf.flags |= BPF_F_INGRESS;
John Fastabend9ecbfb02020-10-09 11:36:37 -0700844
845 /* If the queue is empty then we can submit directly
846 * into the msg queue. If its not empty we have to
847 * queue work otherwise we may get OOO data. Otherwise,
848 * if sk_psock_skb_ingress errors will be handled by
849 * retrying later from workqueue.
850 */
851 if (skb_queue_empty(&psock->ingress_skb)) {
John Fastabend6fa92012020-11-16 14:28:46 -0800852 err = sk_psock_skb_ingress_self(psock, skb);
John Fastabend9ecbfb02020-10-09 11:36:37 -0700853 }
854 if (err < 0) {
855 skb_queue_tail(&psock->ingress_skb, skb);
856 schedule_work(&psock->work);
857 }
John Fastabendcfea28f2020-10-09 11:36:16 -0700858 break;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200859 case __SK_REDIRECT:
John Fastabend93dd5f12020-06-25 16:12:59 -0700860 sk_psock_skb_redirect(skb);
John Fastabendca2f5f22020-05-29 16:06:41 -0700861 break;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200862 case __SK_DROP:
Daniel Borkmann604326b2018-10-13 02:45:58 +0200863 default:
864out_free:
865 kfree_skb(skb);
866 }
867}
868
869static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
870{
John Fastabend80257512020-06-25 16:13:18 -0700871 struct sk_psock *psock;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200872 struct bpf_prog *prog;
873 int ret = __SK_DROP;
John Fastabend80257512020-06-25 16:13:18 -0700874 struct sock *sk;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200875
876 rcu_read_lock();
John Fastabend80257512020-06-25 16:13:18 -0700877 sk = strp->sk;
878 psock = sk_psock(sk);
879 if (unlikely(!psock)) {
880 kfree_skb(skb);
881 goto out;
882 }
John Fastabend0b17ad22020-10-09 11:37:55 -0700883 skb_set_owner_r(skb, sk);
Daniel Borkmann604326b2018-10-13 02:45:58 +0200884 prog = READ_ONCE(psock->progs.skb_verdict);
885 if (likely(prog)) {
Daniel Borkmann604326b2018-10-13 02:45:58 +0200886 tcp_skb_bpf_redirect_clear(skb);
887 ret = sk_psock_bpf_run(psock, prog, skb);
888 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
889 }
Daniel Borkmann604326b2018-10-13 02:45:58 +0200890 sk_psock_verdict_apply(psock, skb, ret);
John Fastabend80257512020-06-25 16:13:18 -0700891out:
John Fastabend93dd5f12020-06-25 16:12:59 -0700892 rcu_read_unlock();
Daniel Borkmann604326b2018-10-13 02:45:58 +0200893}
894
895static int sk_psock_strp_read_done(struct strparser *strp, int err)
896{
897 return err;
898}
899
900static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
901{
902 struct sk_psock *psock = sk_psock_from_strp(strp);
903 struct bpf_prog *prog;
904 int ret = skb->len;
905
906 rcu_read_lock();
907 prog = READ_ONCE(psock->progs.skb_parser);
John Fastabend0b17ad22020-10-09 11:37:55 -0700908 if (likely(prog)) {
909 skb->sk = psock->sk;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200910 ret = sk_psock_bpf_run(psock, prog, skb);
John Fastabend0b17ad22020-10-09 11:37:55 -0700911 skb->sk = NULL;
912 }
Daniel Borkmann604326b2018-10-13 02:45:58 +0200913 rcu_read_unlock();
914 return ret;
915}
916
917/* Called with socket lock held. */
John Fastabend552de9102018-12-20 11:35:33 -0800918static void sk_psock_strp_data_ready(struct sock *sk)
Daniel Borkmann604326b2018-10-13 02:45:58 +0200919{
920 struct sk_psock *psock;
921
922 rcu_read_lock();
923 psock = sk_psock(sk);
924 if (likely(psock)) {
John Fastabende91de6a2020-05-29 16:06:59 -0700925 if (tls_sw_has_ctx_rx(sk)) {
926 psock->parser.saved_data_ready(sk);
927 } else {
928 write_lock_bh(&sk->sk_callback_lock);
929 strp_data_ready(&psock->parser.strp);
930 write_unlock_bh(&sk->sk_callback_lock);
931 }
Daniel Borkmann604326b2018-10-13 02:45:58 +0200932 }
933 rcu_read_unlock();
934}
935
John Fastabendef565922020-10-10 22:09:38 -0700936static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
937 unsigned int offset, size_t orig_len)
938{
939 struct sock *sk = (struct sock *)desc->arg.data;
940 struct sk_psock *psock;
941 struct bpf_prog *prog;
942 int ret = __SK_DROP;
943 int len = skb->len;
944
945 /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
946 skb = skb_clone(skb, GFP_ATOMIC);
947 if (!skb) {
948 desc->error = -ENOMEM;
949 return 0;
950 }
951
952 rcu_read_lock();
953 psock = sk_psock(sk);
954 if (unlikely(!psock)) {
955 len = 0;
956 kfree_skb(skb);
957 goto out;
958 }
959 skb_set_owner_r(skb, sk);
960 prog = READ_ONCE(psock->progs.skb_verdict);
961 if (likely(prog)) {
962 tcp_skb_bpf_redirect_clear(skb);
963 ret = sk_psock_bpf_run(psock, prog, skb);
964 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
965 }
966 sk_psock_verdict_apply(psock, skb, ret);
967out:
968 rcu_read_unlock();
969 return len;
970}
971
972static void sk_psock_verdict_data_ready(struct sock *sk)
973{
974 struct socket *sock = sk->sk_socket;
975 read_descriptor_t desc;
976
977 if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
978 return;
979
980 desc.arg.data = sk;
981 desc.error = 0;
982 desc.count = 1;
983
984 sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
985}
986
Daniel Borkmann604326b2018-10-13 02:45:58 +0200987static void sk_psock_write_space(struct sock *sk)
988{
989 struct sk_psock *psock;
John Fastabend81639992019-11-21 08:25:09 -0800990 void (*write_space)(struct sock *sk) = NULL;
Daniel Borkmann604326b2018-10-13 02:45:58 +0200991
992 rcu_read_lock();
993 psock = sk_psock(sk);
John Fastabend81639992019-11-21 08:25:09 -0800994 if (likely(psock)) {
995 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
996 schedule_work(&psock->work);
997 write_space = psock->saved_write_space;
998 }
Daniel Borkmann604326b2018-10-13 02:45:58 +0200999 rcu_read_unlock();
John Fastabend81639992019-11-21 08:25:09 -08001000 if (write_space)
1001 write_space(sk);
Daniel Borkmann604326b2018-10-13 02:45:58 +02001002}
1003
1004int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1005{
1006 static const struct strp_callbacks cb = {
1007 .rcv_msg = sk_psock_strp_read,
1008 .read_sock_done = sk_psock_strp_read_done,
1009 .parse_msg = sk_psock_strp_parse,
1010 };
1011
1012 psock->parser.enabled = false;
1013 return strp_init(&psock->parser.strp, sk, &cb);
1014}
1015
John Fastabendef565922020-10-10 22:09:38 -07001016void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1017{
1018 struct sk_psock_parser *parser = &psock->parser;
1019
1020 if (parser->enabled)
1021 return;
1022
1023 parser->saved_data_ready = sk->sk_data_ready;
1024 sk->sk_data_ready = sk_psock_verdict_data_ready;
1025 sk->sk_write_space = sk_psock_write_space;
1026 parser->enabled = true;
1027}
1028
Daniel Borkmann604326b2018-10-13 02:45:58 +02001029void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1030{
1031 struct sk_psock_parser *parser = &psock->parser;
1032
1033 if (parser->enabled)
1034 return;
1035
1036 parser->saved_data_ready = sk->sk_data_ready;
John Fastabend552de9102018-12-20 11:35:33 -08001037 sk->sk_data_ready = sk_psock_strp_data_ready;
Daniel Borkmann604326b2018-10-13 02:45:58 +02001038 sk->sk_write_space = sk_psock_write_space;
1039 parser->enabled = true;
1040}
1041
1042void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1043{
1044 struct sk_psock_parser *parser = &psock->parser;
1045
1046 if (!parser->enabled)
1047 return;
1048
1049 sk->sk_data_ready = parser->saved_data_ready;
1050 parser->saved_data_ready = NULL;
1051 strp_stop(&parser->strp);
1052 parser->enabled = false;
1053}
John Fastabendef565922020-10-10 22:09:38 -07001054
1055void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1056{
1057 struct sk_psock_parser *parser = &psock->parser;
1058
1059 if (!parser->enabled)
1060 return;
1061
1062 sk->sk_data_ready = parser->saved_data_ready;
1063 parser->saved_data_ready = NULL;
1064 parser->enabled = false;
1065}