Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ |
| 3 | |
| 4 | #include <linux/skmsg.h> |
| 5 | #include <linux/skbuff.h> |
| 6 | #include <linux/scatterlist.h> |
| 7 | |
| 8 | #include <net/sock.h> |
| 9 | #include <net/tcp.h> |
John Fastabend | e91de6a | 2020-05-29 16:06:59 -0700 | [diff] [blame] | 10 | #include <net/tls.h> |
Peilin Ye | 40e0b09 | 2023-01-19 16:45:16 -0800 | [diff] [blame] | 11 | #include <trace/events/sock.h> |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 12 | |
| 13 | static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce) |
| 14 | { |
| 15 | if (msg->sg.end > msg->sg.start && |
| 16 | elem_first_coalesce < msg->sg.end) |
| 17 | return true; |
| 18 | |
| 19 | if (msg->sg.end < msg->sg.start && |
| 20 | (elem_first_coalesce > msg->sg.start || |
| 21 | elem_first_coalesce < msg->sg.end)) |
| 22 | return true; |
| 23 | |
| 24 | return false; |
| 25 | } |
| 26 | |
| 27 | int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, |
| 28 | int elem_first_coalesce) |
| 29 | { |
| 30 | struct page_frag *pfrag = sk_page_frag(sk); |
Wang Yufen | 9c34e38 | 2022-03-04 16:11:43 +0800 | [diff] [blame] | 31 | u32 osize = msg->sg.size; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 32 | int ret = 0; |
| 33 | |
| 34 | len -= msg->sg.size; |
| 35 | while (len > 0) { |
| 36 | struct scatterlist *sge; |
| 37 | u32 orig_offset; |
| 38 | int use, i; |
| 39 | |
Wang Yufen | 9c34e38 | 2022-03-04 16:11:43 +0800 | [diff] [blame] | 40 | if (!sk_page_frag_refill(sk, pfrag)) { |
| 41 | ret = -ENOMEM; |
| 42 | goto msg_trim; |
| 43 | } |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 44 | |
| 45 | orig_offset = pfrag->offset; |
| 46 | use = min_t(int, len, pfrag->size - orig_offset); |
Wang Yufen | 9c34e38 | 2022-03-04 16:11:43 +0800 | [diff] [blame] | 47 | if (!sk_wmem_schedule(sk, use)) { |
| 48 | ret = -ENOMEM; |
| 49 | goto msg_trim; |
| 50 | } |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 51 | |
| 52 | i = msg->sg.end; |
| 53 | sk_msg_iter_var_prev(i); |
| 54 | sge = &msg->sg.data[i]; |
| 55 | |
| 56 | if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) && |
| 57 | sg_page(sge) == pfrag->page && |
| 58 | sge->offset + sge->length == orig_offset) { |
| 59 | sge->length += use; |
| 60 | } else { |
| 61 | if (sk_msg_full(msg)) { |
| 62 | ret = -ENOSPC; |
| 63 | break; |
| 64 | } |
| 65 | |
| 66 | sge = &msg->sg.data[msg->sg.end]; |
| 67 | sg_unmark_end(sge); |
| 68 | sg_set_page(sge, pfrag->page, use, orig_offset); |
| 69 | get_page(pfrag->page); |
| 70 | sk_msg_iter_next(msg, end); |
| 71 | } |
| 72 | |
| 73 | sk_mem_charge(sk, use); |
| 74 | msg->sg.size += use; |
| 75 | pfrag->offset += use; |
| 76 | len -= use; |
| 77 | } |
| 78 | |
| 79 | return ret; |
Wang Yufen | 9c34e38 | 2022-03-04 16:11:43 +0800 | [diff] [blame] | 80 | |
| 81 | msg_trim: |
| 82 | sk_msg_trim(sk, msg, osize); |
| 83 | return ret; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 84 | } |
| 85 | EXPORT_SYMBOL_GPL(sk_msg_alloc); |
| 86 | |
Daniel Borkmann | d829e9c | 2018-10-13 02:45:59 +0200 | [diff] [blame] | 87 | int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src, |
| 88 | u32 off, u32 len) |
| 89 | { |
| 90 | int i = src->sg.start; |
| 91 | struct scatterlist *sge = sk_msg_elem(src, i); |
Vakul Garg | fda497e | 2019-01-16 01:42:44 +0000 | [diff] [blame] | 92 | struct scatterlist *sgd = NULL; |
Daniel Borkmann | d829e9c | 2018-10-13 02:45:59 +0200 | [diff] [blame] | 93 | u32 sge_len, sge_off; |
| 94 | |
Daniel Borkmann | d829e9c | 2018-10-13 02:45:59 +0200 | [diff] [blame] | 95 | while (off) { |
| 96 | if (sge->length > off) |
| 97 | break; |
| 98 | off -= sge->length; |
| 99 | sk_msg_iter_var_next(i); |
| 100 | if (i == src->sg.end && off) |
| 101 | return -ENOSPC; |
| 102 | sge = sk_msg_elem(src, i); |
| 103 | } |
| 104 | |
| 105 | while (len) { |
| 106 | sge_len = sge->length - off; |
Daniel Borkmann | d829e9c | 2018-10-13 02:45:59 +0200 | [diff] [blame] | 107 | if (sge_len > len) |
| 108 | sge_len = len; |
Vakul Garg | fda497e | 2019-01-16 01:42:44 +0000 | [diff] [blame] | 109 | |
| 110 | if (dst->sg.end) |
| 111 | sgd = sk_msg_elem(dst, dst->sg.end - 1); |
| 112 | |
| 113 | if (sgd && |
| 114 | (sg_page(sge) == sg_page(sgd)) && |
| 115 | (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) { |
| 116 | sgd->length += sge_len; |
| 117 | dst->sg.size += sge_len; |
| 118 | } else if (!sk_msg_full(dst)) { |
| 119 | sge_off = sge->offset + off; |
| 120 | sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off); |
| 121 | } else { |
| 122 | return -ENOSPC; |
| 123 | } |
| 124 | |
Daniel Borkmann | d829e9c | 2018-10-13 02:45:59 +0200 | [diff] [blame] | 125 | off = 0; |
| 126 | len -= sge_len; |
Daniel Borkmann | d829e9c | 2018-10-13 02:45:59 +0200 | [diff] [blame] | 127 | sk_mem_charge(sk, sge_len); |
| 128 | sk_msg_iter_var_next(i); |
| 129 | if (i == src->sg.end && len) |
| 130 | return -ENOSPC; |
| 131 | sge = sk_msg_elem(src, i); |
| 132 | } |
| 133 | |
| 134 | return 0; |
| 135 | } |
| 136 | EXPORT_SYMBOL_GPL(sk_msg_clone); |
| 137 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 138 | void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes) |
| 139 | { |
| 140 | int i = msg->sg.start; |
| 141 | |
| 142 | do { |
| 143 | struct scatterlist *sge = sk_msg_elem(msg, i); |
| 144 | |
| 145 | if (bytes < sge->length) { |
| 146 | sge->length -= bytes; |
| 147 | sge->offset += bytes; |
| 148 | sk_mem_uncharge(sk, bytes); |
| 149 | break; |
| 150 | } |
| 151 | |
| 152 | sk_mem_uncharge(sk, sge->length); |
| 153 | bytes -= sge->length; |
| 154 | sge->length = 0; |
| 155 | sge->offset = 0; |
| 156 | sk_msg_iter_var_next(i); |
| 157 | } while (bytes && i != msg->sg.end); |
| 158 | msg->sg.start = i; |
| 159 | } |
| 160 | EXPORT_SYMBOL_GPL(sk_msg_return_zero); |
| 161 | |
| 162 | void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes) |
| 163 | { |
| 164 | int i = msg->sg.start; |
| 165 | |
| 166 | do { |
| 167 | struct scatterlist *sge = &msg->sg.data[i]; |
| 168 | int uncharge = (bytes < sge->length) ? bytes : sge->length; |
| 169 | |
| 170 | sk_mem_uncharge(sk, uncharge); |
| 171 | bytes -= uncharge; |
| 172 | sk_msg_iter_var_next(i); |
| 173 | } while (i != msg->sg.end); |
| 174 | } |
| 175 | EXPORT_SYMBOL_GPL(sk_msg_return); |
| 176 | |
| 177 | static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i, |
| 178 | bool charge) |
| 179 | { |
| 180 | struct scatterlist *sge = sk_msg_elem(msg, i); |
| 181 | u32 len = sge->length; |
| 182 | |
John Fastabend | 36cd0e69 | 2020-11-16 14:28:06 -0800 | [diff] [blame] | 183 | /* When the skb owns the memory we free it from consume_skb path. */ |
| 184 | if (!msg->skb) { |
| 185 | if (charge) |
| 186 | sk_mem_uncharge(sk, len); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 187 | put_page(sg_page(sge)); |
John Fastabend | 36cd0e69 | 2020-11-16 14:28:06 -0800 | [diff] [blame] | 188 | } |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 189 | memset(sge, 0, sizeof(*sge)); |
| 190 | return len; |
| 191 | } |
| 192 | |
| 193 | static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i, |
| 194 | bool charge) |
| 195 | { |
| 196 | struct scatterlist *sge = sk_msg_elem(msg, i); |
| 197 | int freed = 0; |
| 198 | |
| 199 | while (msg->sg.size) { |
| 200 | msg->sg.size -= sge->length; |
| 201 | freed += sk_msg_free_elem(sk, msg, i, charge); |
| 202 | sk_msg_iter_var_next(i); |
| 203 | sk_msg_check_to_free(msg, i, msg->sg.size); |
| 204 | sge = sk_msg_elem(msg, i); |
| 205 | } |
Markus Elfring | dd016ac | 2019-08-22 18:00:40 +0200 | [diff] [blame] | 206 | consume_skb(msg->skb); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 207 | sk_msg_init(msg); |
| 208 | return freed; |
| 209 | } |
| 210 | |
| 211 | int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg) |
| 212 | { |
| 213 | return __sk_msg_free(sk, msg, msg->sg.start, false); |
| 214 | } |
| 215 | EXPORT_SYMBOL_GPL(sk_msg_free_nocharge); |
| 216 | |
| 217 | int sk_msg_free(struct sock *sk, struct sk_msg *msg) |
| 218 | { |
| 219 | return __sk_msg_free(sk, msg, msg->sg.start, true); |
| 220 | } |
| 221 | EXPORT_SYMBOL_GPL(sk_msg_free); |
| 222 | |
| 223 | static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, |
| 224 | u32 bytes, bool charge) |
| 225 | { |
| 226 | struct scatterlist *sge; |
| 227 | u32 i = msg->sg.start; |
| 228 | |
| 229 | while (bytes) { |
| 230 | sge = sk_msg_elem(msg, i); |
| 231 | if (!sge->length) |
| 232 | break; |
| 233 | if (bytes < sge->length) { |
| 234 | if (charge) |
| 235 | sk_mem_uncharge(sk, bytes); |
| 236 | sge->length -= bytes; |
| 237 | sge->offset += bytes; |
| 238 | msg->sg.size -= bytes; |
| 239 | break; |
| 240 | } |
| 241 | |
| 242 | msg->sg.size -= sge->length; |
| 243 | bytes -= sge->length; |
| 244 | sk_msg_free_elem(sk, msg, i, charge); |
| 245 | sk_msg_iter_var_next(i); |
| 246 | sk_msg_check_to_free(msg, i, bytes); |
| 247 | } |
| 248 | msg->sg.start = i; |
| 249 | } |
| 250 | |
| 251 | void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes) |
| 252 | { |
| 253 | __sk_msg_free_partial(sk, msg, bytes, true); |
| 254 | } |
| 255 | EXPORT_SYMBOL_GPL(sk_msg_free_partial); |
| 256 | |
| 257 | void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg, |
| 258 | u32 bytes) |
| 259 | { |
| 260 | __sk_msg_free_partial(sk, msg, bytes, false); |
| 261 | } |
| 262 | |
| 263 | void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len) |
| 264 | { |
| 265 | int trim = msg->sg.size - len; |
| 266 | u32 i = msg->sg.end; |
| 267 | |
| 268 | if (trim <= 0) { |
| 269 | WARN_ON(trim < 0); |
| 270 | return; |
| 271 | } |
| 272 | |
| 273 | sk_msg_iter_var_prev(i); |
| 274 | msg->sg.size = len; |
| 275 | while (msg->sg.data[i].length && |
| 276 | trim >= msg->sg.data[i].length) { |
| 277 | trim -= msg->sg.data[i].length; |
| 278 | sk_msg_free_elem(sk, msg, i, true); |
| 279 | sk_msg_iter_var_prev(i); |
| 280 | if (!trim) |
| 281 | goto out; |
| 282 | } |
| 283 | |
| 284 | msg->sg.data[i].length -= trim; |
| 285 | sk_mem_uncharge(sk, trim); |
Jakub Kicinski | 683916f | 2019-11-04 15:36:57 -0800 | [diff] [blame] | 286 | /* Adjust copybreak if it falls into the trimmed part of last buf */ |
| 287 | if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length) |
| 288 | msg->sg.copybreak = msg->sg.data[i].length; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 289 | out: |
Jakub Kicinski | 683916f | 2019-11-04 15:36:57 -0800 | [diff] [blame] | 290 | sk_msg_iter_var_next(i); |
| 291 | msg->sg.end = i; |
| 292 | |
| 293 | /* If we trim data a full sg elem before curr pointer update |
| 294 | * copybreak and current so that any future copy operations |
| 295 | * start at new copy location. |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 296 | * However trimed data that has not yet been used in a copy op |
| 297 | * does not require an update. |
| 298 | */ |
Jakub Kicinski | 683916f | 2019-11-04 15:36:57 -0800 | [diff] [blame] | 299 | if (!msg->sg.size) { |
| 300 | msg->sg.curr = msg->sg.start; |
| 301 | msg->sg.copybreak = 0; |
| 302 | } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >= |
| 303 | sk_msg_iter_dist(msg->sg.start, msg->sg.end)) { |
| 304 | sk_msg_iter_var_prev(i); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 305 | msg->sg.curr = i; |
| 306 | msg->sg.copybreak = msg->sg.data[i].length; |
| 307 | } |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 308 | } |
| 309 | EXPORT_SYMBOL_GPL(sk_msg_trim); |
| 310 | |
| 311 | int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, |
| 312 | struct sk_msg *msg, u32 bytes) |
| 313 | { |
| 314 | int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg); |
| 315 | const int to_max_pages = MAX_MSG_FRAGS; |
| 316 | struct page *pages[MAX_MSG_FRAGS]; |
| 317 | ssize_t orig, copied, use, offset; |
| 318 | |
| 319 | orig = msg->sg.size; |
| 320 | while (bytes > 0) { |
| 321 | i = 0; |
| 322 | maxpages = to_max_pages - num_elems; |
| 323 | if (maxpages == 0) { |
| 324 | ret = -EFAULT; |
| 325 | goto out; |
| 326 | } |
| 327 | |
Al Viro | 1ef255e | 2022-06-09 10:28:36 -0400 | [diff] [blame] | 328 | copied = iov_iter_get_pages2(from, pages, bytes, maxpages, |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 329 | &offset); |
| 330 | if (copied <= 0) { |
| 331 | ret = -EFAULT; |
| 332 | goto out; |
| 333 | } |
| 334 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 335 | bytes -= copied; |
| 336 | msg->sg.size += copied; |
| 337 | |
| 338 | while (copied) { |
| 339 | use = min_t(int, copied, PAGE_SIZE - offset); |
| 340 | sg_set_page(&msg->sg.data[msg->sg.end], |
| 341 | pages[i], use, offset); |
| 342 | sg_unmark_end(&msg->sg.data[msg->sg.end]); |
| 343 | sk_mem_charge(sk, use); |
| 344 | |
| 345 | offset = 0; |
| 346 | copied -= use; |
| 347 | sk_msg_iter_next(msg, end); |
| 348 | num_elems++; |
| 349 | i++; |
| 350 | } |
| 351 | /* When zerocopy is mixed with sk_msg_*copy* operations we |
| 352 | * may have a copybreak set in this case clear and prefer |
| 353 | * zerocopy remainder when possible. |
| 354 | */ |
| 355 | msg->sg.copybreak = 0; |
| 356 | msg->sg.curr = msg->sg.end; |
| 357 | } |
| 358 | out: |
| 359 | /* Revert iov_iter updates, msg will need to use 'trim' later if it |
| 360 | * also needs to be cleared. |
| 361 | */ |
| 362 | if (ret) |
| 363 | iov_iter_revert(from, msg->sg.size - orig); |
| 364 | return ret; |
| 365 | } |
| 366 | EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter); |
| 367 | |
| 368 | int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, |
| 369 | struct sk_msg *msg, u32 bytes) |
| 370 | { |
| 371 | int ret = -ENOSPC, i = msg->sg.curr; |
| 372 | struct scatterlist *sge; |
| 373 | u32 copy, buf_size; |
| 374 | void *to; |
| 375 | |
| 376 | do { |
| 377 | sge = sk_msg_elem(msg, i); |
| 378 | /* This is possible if a trim operation shrunk the buffer */ |
| 379 | if (msg->sg.copybreak >= sge->length) { |
| 380 | msg->sg.copybreak = 0; |
| 381 | sk_msg_iter_var_next(i); |
| 382 | if (i == msg->sg.end) |
| 383 | break; |
| 384 | sge = sk_msg_elem(msg, i); |
| 385 | } |
| 386 | |
| 387 | buf_size = sge->length - msg->sg.copybreak; |
| 388 | copy = (buf_size > bytes) ? bytes : buf_size; |
| 389 | to = sg_virt(sge) + msg->sg.copybreak; |
| 390 | msg->sg.copybreak += copy; |
| 391 | if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) |
| 392 | ret = copy_from_iter_nocache(to, copy, from); |
| 393 | else |
| 394 | ret = copy_from_iter(to, copy, from); |
| 395 | if (ret != copy) { |
| 396 | ret = -EFAULT; |
| 397 | goto out; |
| 398 | } |
| 399 | bytes -= copy; |
| 400 | if (!bytes) |
| 401 | break; |
| 402 | msg->sg.copybreak = 0; |
| 403 | sk_msg_iter_var_next(i); |
| 404 | } while (i != msg->sg.end); |
| 405 | out: |
| 406 | msg->sg.curr = i; |
| 407 | return ret; |
| 408 | } |
| 409 | EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter); |
| 410 | |
Cong Wang | 2bc793e | 2021-03-30 19:32:33 -0700 | [diff] [blame] | 411 | /* Receive sk_msg from psock->ingress_msg to @msg. */ |
| 412 | int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, |
| 413 | int len, int flags) |
| 414 | { |
| 415 | struct iov_iter *iter = &msg->msg_iter; |
| 416 | int peek = flags & MSG_PEEK; |
| 417 | struct sk_msg *msg_rx; |
| 418 | int i, copied = 0; |
| 419 | |
| 420 | msg_rx = sk_psock_peek_msg(psock); |
| 421 | while (copied != len) { |
| 422 | struct scatterlist *sge; |
| 423 | |
| 424 | if (unlikely(!msg_rx)) |
| 425 | break; |
| 426 | |
| 427 | i = msg_rx->sg.start; |
| 428 | do { |
| 429 | struct page *page; |
| 430 | int copy; |
| 431 | |
| 432 | sge = sk_msg_elem(msg_rx, i); |
| 433 | copy = sge->length; |
| 434 | page = sg_page(sge); |
| 435 | if (copied + copy > len) |
| 436 | copy = len - copied; |
| 437 | copy = copy_page_to_iter(page, sge->offset, copy, iter); |
Liu Jian | bec2171 | 2022-09-07 15:13:11 +0800 | [diff] [blame] | 438 | if (!copy) { |
| 439 | copied = copied ? copied : -EFAULT; |
| 440 | goto out; |
| 441 | } |
Cong Wang | 2bc793e | 2021-03-30 19:32:33 -0700 | [diff] [blame] | 442 | |
| 443 | copied += copy; |
| 444 | if (likely(!peek)) { |
| 445 | sge->offset += copy; |
| 446 | sge->length -= copy; |
| 447 | if (!msg_rx->skb) |
| 448 | sk_mem_uncharge(sk, copy); |
| 449 | msg_rx->sg.size -= copy; |
| 450 | |
| 451 | if (!sge->length) { |
| 452 | sk_msg_iter_var_next(i); |
| 453 | if (!msg_rx->skb) |
| 454 | put_page(page); |
| 455 | } |
| 456 | } else { |
| 457 | /* Lets not optimize peek case if copy_page_to_iter |
| 458 | * didn't copy the entire length lets just break. |
| 459 | */ |
| 460 | if (copy != sge->length) |
Liu Jian | bec2171 | 2022-09-07 15:13:11 +0800 | [diff] [blame] | 461 | goto out; |
Cong Wang | 2bc793e | 2021-03-30 19:32:33 -0700 | [diff] [blame] | 462 | sk_msg_iter_var_next(i); |
| 463 | } |
| 464 | |
| 465 | if (copied == len) |
| 466 | break; |
Liu Jian | 583585e | 2022-08-09 17:49:15 +0800 | [diff] [blame] | 467 | } while ((i != msg_rx->sg.end) && !sg_is_last(sge)); |
Cong Wang | 2bc793e | 2021-03-30 19:32:33 -0700 | [diff] [blame] | 468 | |
| 469 | if (unlikely(peek)) { |
| 470 | msg_rx = sk_psock_next_msg(psock, msg_rx); |
| 471 | if (!msg_rx) |
| 472 | break; |
| 473 | continue; |
| 474 | } |
| 475 | |
| 476 | msg_rx->sg.start = i; |
Liu Jian | 583585e | 2022-08-09 17:49:15 +0800 | [diff] [blame] | 477 | if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) { |
Cong Wang | 2bc793e | 2021-03-30 19:32:33 -0700 | [diff] [blame] | 478 | msg_rx = sk_psock_dequeue_msg(psock); |
| 479 | kfree_sk_msg(msg_rx); |
| 480 | } |
| 481 | msg_rx = sk_psock_peek_msg(psock); |
| 482 | } |
Liu Jian | bec2171 | 2022-09-07 15:13:11 +0800 | [diff] [blame] | 483 | out: |
Cong Wang | 2bc793e | 2021-03-30 19:32:33 -0700 | [diff] [blame] | 484 | return copied; |
| 485 | } |
| 486 | EXPORT_SYMBOL_GPL(sk_msg_recvmsg); |
| 487 | |
Cong Wang | fb4e0a5 | 2021-10-08 13:33:04 -0700 | [diff] [blame] | 488 | bool sk_msg_is_readable(struct sock *sk) |
| 489 | { |
| 490 | struct sk_psock *psock; |
| 491 | bool empty = true; |
| 492 | |
| 493 | rcu_read_lock(); |
| 494 | psock = sk_psock(sk); |
| 495 | if (likely(psock)) |
| 496 | empty = list_empty(&psock->ingress_msg); |
| 497 | rcu_read_unlock(); |
| 498 | return !empty; |
| 499 | } |
| 500 | EXPORT_SYMBOL_GPL(sk_msg_is_readable); |
| 501 | |
Eric Dumazet | 2d1f274 | 2022-10-15 21:24:41 +0000 | [diff] [blame] | 502 | static struct sk_msg *alloc_sk_msg(gfp_t gfp) |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 503 | { |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 504 | struct sk_msg *msg; |
| 505 | |
Eric Dumazet | 2d1f274 | 2022-10-15 21:24:41 +0000 | [diff] [blame] | 506 | msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN); |
Cong Wang | 4331291 | 2022-06-15 09:20:14 -0700 | [diff] [blame] | 507 | if (unlikely(!msg)) |
| 508 | return NULL; |
| 509 | sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS); |
| 510 | return msg; |
| 511 | } |
| 512 | |
| 513 | static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk, |
| 514 | struct sk_buff *skb) |
| 515 | { |
John Fastabend | 36cd0e69 | 2020-11-16 14:28:06 -0800 | [diff] [blame] | 516 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) |
John Fastabend | 6fa9201 | 2020-11-16 14:28:46 -0800 | [diff] [blame] | 517 | return NULL; |
| 518 | |
| 519 | if (!sk_rmem_schedule(sk, skb, skb->truesize)) |
| 520 | return NULL; |
John Fastabend | 36cd0e69 | 2020-11-16 14:28:06 -0800 | [diff] [blame] | 521 | |
Eric Dumazet | 2d1f274 | 2022-10-15 21:24:41 +0000 | [diff] [blame] | 522 | return alloc_sk_msg(GFP_KERNEL); |
John Fastabend | 6fa9201 | 2020-11-16 14:28:46 -0800 | [diff] [blame] | 523 | } |
| 524 | |
| 525 | static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, |
Liu Jian | 7303524 | 2021-10-29 22:12:14 +0800 | [diff] [blame] | 526 | u32 off, u32 len, |
John Fastabend | 6fa9201 | 2020-11-16 14:28:46 -0800 | [diff] [blame] | 527 | struct sk_psock *psock, |
| 528 | struct sock *sk, |
| 529 | struct sk_msg *msg) |
| 530 | { |
John Fastabend | 4363023 | 2020-11-16 14:29:28 -0800 | [diff] [blame] | 531 | int num_sge, copied; |
John Fastabend | 6fa9201 | 2020-11-16 14:28:46 -0800 | [diff] [blame] | 532 | |
Liu Jian | 7303524 | 2021-10-29 22:12:14 +0800 | [diff] [blame] | 533 | num_sge = skb_to_sgvec(skb, msg->sg.data, off, len); |
Liu Jian | 3527bfe | 2022-04-27 19:51:50 +0800 | [diff] [blame] | 534 | if (num_sge < 0) { |
| 535 | /* skb linearize may fail with ENOMEM, but lets simply try again |
| 536 | * later if this happens. Under memory pressure we don't want to |
| 537 | * drop the skb. We need to linearize the skb so that the mapping |
| 538 | * in skb_to_sgvec can not error. |
| 539 | */ |
| 540 | if (skb_linearize(skb)) |
| 541 | return -EAGAIN; |
| 542 | |
| 543 | num_sge = skb_to_sgvec(skb, msg->sg.data, off, len); |
| 544 | if (unlikely(num_sge < 0)) |
| 545 | return num_sge; |
| 546 | } |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 547 | |
Liu Jian | 7303524 | 2021-10-29 22:12:14 +0800 | [diff] [blame] | 548 | copied = len; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 549 | msg->sg.start = 0; |
John Fastabend | cabede8 | 2019-05-13 07:19:55 -0700 | [diff] [blame] | 550 | msg->sg.size = copied; |
Jakub Kicinski | 031097d | 2019-11-27 12:16:41 -0800 | [diff] [blame] | 551 | msg->sg.end = num_sge; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 552 | msg->skb = skb; |
| 553 | |
| 554 | sk_psock_queue_msg(psock, msg); |
John Fastabend | 552de910 | 2018-12-20 11:35:33 -0800 | [diff] [blame] | 555 | sk_psock_data_ready(sk, psock); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 556 | return copied; |
| 557 | } |
| 558 | |
Liu Jian | 7303524 | 2021-10-29 22:12:14 +0800 | [diff] [blame] | 559 | static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb, |
| 560 | u32 off, u32 len); |
John Fastabend | 2443ca6 | 2020-11-16 14:29:08 -0800 | [diff] [blame] | 561 | |
Liu Jian | 7303524 | 2021-10-29 22:12:14 +0800 | [diff] [blame] | 562 | static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, |
| 563 | u32 off, u32 len) |
John Fastabend | 6fa9201 | 2020-11-16 14:28:46 -0800 | [diff] [blame] | 564 | { |
| 565 | struct sock *sk = psock->sk; |
| 566 | struct sk_msg *msg; |
John Fastabend | 7e6b27a | 2021-07-12 12:55:45 -0700 | [diff] [blame] | 567 | int err; |
John Fastabend | 6fa9201 | 2020-11-16 14:28:46 -0800 | [diff] [blame] | 568 | |
John Fastabend | 2443ca6 | 2020-11-16 14:29:08 -0800 | [diff] [blame] | 569 | /* If we are receiving on the same sock skb->sk is already assigned, |
| 570 | * skip memory accounting and owner transition seeing it already set |
| 571 | * correctly. |
| 572 | */ |
| 573 | if (unlikely(skb->sk == sk)) |
Liu Jian | 7303524 | 2021-10-29 22:12:14 +0800 | [diff] [blame] | 574 | return sk_psock_skb_ingress_self(psock, skb, off, len); |
John Fastabend | 6fa9201 | 2020-11-16 14:28:46 -0800 | [diff] [blame] | 575 | msg = sk_psock_create_ingress_msg(sk, skb); |
| 576 | if (!msg) |
| 577 | return -EAGAIN; |
| 578 | |
| 579 | /* This will transition ownership of the data from the socket where |
| 580 | * the BPF program was run initiating the redirect to the socket |
| 581 | * we will eventually receive this data on. The data will be released |
| 582 | * from skb_consume found in __tcp_bpf_recvmsg() after its been copied |
| 583 | * into user buffers. |
| 584 | */ |
| 585 | skb_set_owner_r(skb, sk); |
Liu Jian | 7303524 | 2021-10-29 22:12:14 +0800 | [diff] [blame] | 586 | err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg); |
John Fastabend | 7e6b27a | 2021-07-12 12:55:45 -0700 | [diff] [blame] | 587 | if (err < 0) |
| 588 | kfree(msg); |
| 589 | return err; |
John Fastabend | 6fa9201 | 2020-11-16 14:28:46 -0800 | [diff] [blame] | 590 | } |
| 591 | |
| 592 | /* Puts an skb on the ingress queue of the socket already assigned to the |
| 593 | * skb. In this case we do not need to check memory limits or skb_set_owner_r |
| 594 | * because the skb is already accounted for here. |
| 595 | */ |
Liu Jian | 7303524 | 2021-10-29 22:12:14 +0800 | [diff] [blame] | 596 | static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb, |
| 597 | u32 off, u32 len) |
John Fastabend | 6fa9201 | 2020-11-16 14:28:46 -0800 | [diff] [blame] | 598 | { |
Eric Dumazet | 2d1f274 | 2022-10-15 21:24:41 +0000 | [diff] [blame] | 599 | struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC); |
John Fastabend | 6fa9201 | 2020-11-16 14:28:46 -0800 | [diff] [blame] | 600 | struct sock *sk = psock->sk; |
John Fastabend | 7e6b27a | 2021-07-12 12:55:45 -0700 | [diff] [blame] | 601 | int err; |
John Fastabend | 6fa9201 | 2020-11-16 14:28:46 -0800 | [diff] [blame] | 602 | |
| 603 | if (unlikely(!msg)) |
| 604 | return -EAGAIN; |
John Fastabend | 144748e | 2021-04-01 15:00:40 -0700 | [diff] [blame] | 605 | skb_set_owner_r(skb, sk); |
Liu Jian | 7303524 | 2021-10-29 22:12:14 +0800 | [diff] [blame] | 606 | err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg); |
John Fastabend | 7e6b27a | 2021-07-12 12:55:45 -0700 | [diff] [blame] | 607 | if (err < 0) |
| 608 | kfree(msg); |
| 609 | return err; |
John Fastabend | 6fa9201 | 2020-11-16 14:28:46 -0800 | [diff] [blame] | 610 | } |
| 611 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 612 | static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb, |
| 613 | u32 off, u32 len, bool ingress) |
| 614 | { |
John Fastabend | a454d84 | 2023-09-01 13:21:37 -0700 | [diff] [blame] | 615 | int err = 0; |
| 616 | |
John Fastabend | 9047f19 | 2020-10-09 11:37:17 -0700 | [diff] [blame] | 617 | if (!ingress) { |
| 618 | if (!sock_writeable(psock->sk)) |
| 619 | return -EAGAIN; |
Cong Wang | 799aa7f | 2021-03-30 19:32:25 -0700 | [diff] [blame] | 620 | return skb_send_sock(psock->sk, skb, off, len); |
John Fastabend | 9047f19 | 2020-10-09 11:37:17 -0700 | [diff] [blame] | 621 | } |
John Fastabend | a454d84 | 2023-09-01 13:21:37 -0700 | [diff] [blame] | 622 | skb_get(skb); |
| 623 | err = sk_psock_skb_ingress(psock, skb, off, len); |
| 624 | if (err < 0) |
| 625 | kfree_skb(skb); |
| 626 | return err; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 627 | } |
| 628 | |
John Fastabend | 476d980 | 2021-07-27 09:04:59 -0700 | [diff] [blame] | 629 | static void sk_psock_skb_state(struct sk_psock *psock, |
| 630 | struct sk_psock_work_state *state, |
John Fastabend | 476d980 | 2021-07-27 09:04:59 -0700 | [diff] [blame] | 631 | int len, int off) |
| 632 | { |
| 633 | spin_lock_bh(&psock->ingress_lock); |
| 634 | if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { |
John Fastabend | 476d980 | 2021-07-27 09:04:59 -0700 | [diff] [blame] | 635 | state->len = len; |
| 636 | state->off = off; |
John Fastabend | 476d980 | 2021-07-27 09:04:59 -0700 | [diff] [blame] | 637 | } |
| 638 | spin_unlock_bh(&psock->ingress_lock); |
| 639 | } |
| 640 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 641 | static void sk_psock_backlog(struct work_struct *work) |
| 642 | { |
John Fastabend | 29173d0 | 2023-05-22 19:56:06 -0700 | [diff] [blame] | 643 | struct delayed_work *dwork = to_delayed_work(work); |
| 644 | struct sk_psock *psock = container_of(dwork, struct sk_psock, work); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 645 | struct sk_psock_work_state *state = &psock->work_state; |
John Fastabend | 476d980 | 2021-07-27 09:04:59 -0700 | [diff] [blame] | 646 | struct sk_buff *skb = NULL; |
John Fastabend | 405df89 | 2023-05-22 19:56:08 -0700 | [diff] [blame] | 647 | u32 len = 0, off = 0; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 648 | bool ingress; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 649 | int ret; |
| 650 | |
Cong Wang | 799aa7f | 2021-03-30 19:32:25 -0700 | [diff] [blame] | 651 | mutex_lock(&psock->work_mutex); |
John Fastabend | 405df89 | 2023-05-22 19:56:08 -0700 | [diff] [blame] | 652 | if (unlikely(state->len)) { |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 653 | len = state->len; |
| 654 | off = state->off; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 655 | } |
| 656 | |
John Fastabend | 405df89 | 2023-05-22 19:56:08 -0700 | [diff] [blame] | 657 | while ((skb = skb_peek(&psock->ingress_skb))) { |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 658 | len = skb->len; |
| 659 | off = 0; |
Liu Jian | 7303524 | 2021-10-29 22:12:14 +0800 | [diff] [blame] | 660 | if (skb_bpf_strparser(skb)) { |
| 661 | struct strp_msg *stm = strp_msg(skb); |
| 662 | |
| 663 | off = stm->offset; |
| 664 | len = stm->full_len; |
| 665 | } |
Cong Wang | e3526bb | 2021-02-23 10:49:29 -0800 | [diff] [blame] | 666 | ingress = skb_bpf_ingress(skb); |
| 667 | skb_bpf_redirect_clear(skb); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 668 | do { |
| 669 | ret = -EIO; |
Cong Wang | 799aa7f | 2021-03-30 19:32:25 -0700 | [diff] [blame] | 670 | if (!sock_flag(psock->sk, SOCK_DEAD)) |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 671 | ret = sk_psock_handle_skb(psock, skb, off, |
| 672 | len, ingress); |
| 673 | if (ret <= 0) { |
| 674 | if (ret == -EAGAIN) { |
John Fastabend | 405df89 | 2023-05-22 19:56:08 -0700 | [diff] [blame] | 675 | sk_psock_skb_state(psock, state, len, off); |
John Fastabend | 29173d0 | 2023-05-22 19:56:06 -0700 | [diff] [blame] | 676 | |
| 677 | /* Delay slightly to prioritize any |
| 678 | * other work that might be here. |
| 679 | */ |
| 680 | if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) |
| 681 | schedule_delayed_work(&psock->work, 1); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 682 | goto end; |
| 683 | } |
| 684 | /* Hard errors break pipe and stop xmit. */ |
| 685 | sk_psock_report_error(psock, ret ? -ret : EPIPE); |
| 686 | sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 687 | goto end; |
| 688 | } |
| 689 | off += ret; |
| 690 | len -= ret; |
| 691 | } while (len); |
| 692 | |
John Fastabend | 405df89 | 2023-05-22 19:56:08 -0700 | [diff] [blame] | 693 | skb = skb_dequeue(&psock->ingress_skb); |
John Fastabend | a454d84 | 2023-09-01 13:21:37 -0700 | [diff] [blame] | 694 | kfree_skb(skb); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 695 | } |
| 696 | end: |
Cong Wang | 799aa7f | 2021-03-30 19:32:25 -0700 | [diff] [blame] | 697 | mutex_unlock(&psock->work_mutex); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 698 | } |
| 699 | |
| 700 | struct sk_psock *sk_psock_init(struct sock *sk, int node) |
| 701 | { |
Lorenz Bauer | 7b219da | 2020-08-21 11:29:43 +0100 | [diff] [blame] | 702 | struct sk_psock *psock; |
| 703 | struct proto *prot; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 704 | |
Lorenz Bauer | 7b219da | 2020-08-21 11:29:43 +0100 | [diff] [blame] | 705 | write_lock_bh(&sk->sk_callback_lock); |
| 706 | |
Jakub Kicinski | e34a07c | 2022-06-20 12:13:53 -0700 | [diff] [blame] | 707 | if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) { |
| 708 | psock = ERR_PTR(-EINVAL); |
| 709 | goto out; |
| 710 | } |
| 711 | |
Lorenz Bauer | 7b219da | 2020-08-21 11:29:43 +0100 | [diff] [blame] | 712 | if (sk->sk_user_data) { |
| 713 | psock = ERR_PTR(-EBUSY); |
| 714 | goto out; |
| 715 | } |
| 716 | |
| 717 | psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node); |
| 718 | if (!psock) { |
| 719 | psock = ERR_PTR(-ENOMEM); |
| 720 | goto out; |
| 721 | } |
| 722 | |
| 723 | prot = READ_ONCE(sk->sk_prot); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 724 | psock->sk = sk; |
Lorenz Bauer | 7b219da | 2020-08-21 11:29:43 +0100 | [diff] [blame] | 725 | psock->eval = __SK_NONE; |
| 726 | psock->sk_proto = prot; |
| 727 | psock->saved_unhash = prot->unhash; |
Wang Yufen | d8616ee | 2022-05-24 15:53:11 +0800 | [diff] [blame] | 728 | psock->saved_destroy = prot->destroy; |
Lorenz Bauer | 7b219da | 2020-08-21 11:29:43 +0100 | [diff] [blame] | 729 | psock->saved_close = prot->close; |
| 730 | psock->saved_write_space = sk->sk_write_space; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 731 | |
| 732 | INIT_LIST_HEAD(&psock->link); |
| 733 | spin_lock_init(&psock->link_lock); |
| 734 | |
John Fastabend | 29173d0 | 2023-05-22 19:56:06 -0700 | [diff] [blame] | 735 | INIT_DELAYED_WORK(&psock->work, sk_psock_backlog); |
Cong Wang | 799aa7f | 2021-03-30 19:32:25 -0700 | [diff] [blame] | 736 | mutex_init(&psock->work_mutex); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 737 | INIT_LIST_HEAD(&psock->ingress_msg); |
Cong Wang | b01fd6e | 2021-03-30 19:32:23 -0700 | [diff] [blame] | 738 | spin_lock_init(&psock->ingress_lock); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 739 | skb_queue_head_init(&psock->ingress_skb); |
| 740 | |
| 741 | sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED); |
| 742 | refcount_set(&psock->refcnt, 1); |
| 743 | |
Hawkins Jiawei | 2a01337 | 2022-08-05 15:48:34 +0800 | [diff] [blame] | 744 | __rcu_assign_sk_user_data_with_flags(sk, psock, |
| 745 | SK_USER_DATA_NOCOPY | |
| 746 | SK_USER_DATA_PSOCK); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 747 | sock_hold(sk); |
| 748 | |
Lorenz Bauer | 7b219da | 2020-08-21 11:29:43 +0100 | [diff] [blame] | 749 | out: |
| 750 | write_unlock_bh(&sk->sk_callback_lock); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 751 | return psock; |
| 752 | } |
| 753 | EXPORT_SYMBOL_GPL(sk_psock_init); |
| 754 | |
| 755 | struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock) |
| 756 | { |
| 757 | struct sk_psock_link *link; |
| 758 | |
| 759 | spin_lock_bh(&psock->link_lock); |
| 760 | link = list_first_entry_or_null(&psock->link, struct sk_psock_link, |
| 761 | list); |
| 762 | if (link) |
| 763 | list_del(&link->list); |
| 764 | spin_unlock_bh(&psock->link_lock); |
| 765 | return link; |
| 766 | } |
| 767 | |
Cong Wang | cd81cefb | 2021-02-23 10:49:32 -0800 | [diff] [blame] | 768 | static void __sk_psock_purge_ingress_msg(struct sk_psock *psock) |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 769 | { |
| 770 | struct sk_msg *msg, *tmp; |
| 771 | |
| 772 | list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) { |
| 773 | list_del(&msg->list); |
| 774 | sk_msg_free(psock->sk, msg); |
| 775 | kfree(msg); |
| 776 | } |
| 777 | } |
| 778 | |
Cong Wang | 799aa7f | 2021-03-30 19:32:25 -0700 | [diff] [blame] | 779 | static void __sk_psock_zap_ingress(struct sk_psock *psock) |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 780 | { |
Cong Wang | e3526bb | 2021-02-23 10:49:29 -0800 | [diff] [blame] | 781 | struct sk_buff *skb; |
| 782 | |
Cong Wang | 37f0e51 | 2021-03-30 19:32:22 -0700 | [diff] [blame] | 783 | while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) { |
Cong Wang | e3526bb | 2021-02-23 10:49:29 -0800 | [diff] [blame] | 784 | skb_bpf_redirect_clear(skb); |
Cong Wang | 781dd04 | 2021-06-14 19:13:42 -0700 | [diff] [blame] | 785 | sock_drop(psock->sk, skb); |
Cong Wang | e3526bb | 2021-02-23 10:49:29 -0800 | [diff] [blame] | 786 | } |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 787 | __sk_psock_purge_ingress_msg(psock); |
| 788 | } |
| 789 | |
| 790 | static void sk_psock_link_destroy(struct sk_psock *psock) |
| 791 | { |
| 792 | struct sk_psock_link *link, *tmp; |
| 793 | |
| 794 | list_for_each_entry_safe(link, tmp, &psock->link, list) { |
| 795 | list_del(&link->list); |
| 796 | sk_psock_free_link(link); |
| 797 | } |
| 798 | } |
| 799 | |
Cong Wang | 8bbabb3 | 2022-11-01 21:34:17 -0700 | [diff] [blame] | 800 | void sk_psock_stop(struct sk_psock *psock) |
Cong Wang | 799aa7f | 2021-03-30 19:32:25 -0700 | [diff] [blame] | 801 | { |
| 802 | spin_lock_bh(&psock->ingress_lock); |
| 803 | sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); |
| 804 | sk_psock_cork_free(psock); |
Cong Wang | 799aa7f | 2021-03-30 19:32:25 -0700 | [diff] [blame] | 805 | spin_unlock_bh(&psock->ingress_lock); |
Cong Wang | 799aa7f | 2021-03-30 19:32:25 -0700 | [diff] [blame] | 806 | } |
| 807 | |
Cong Wang | 8875960 | 2021-02-23 10:49:26 -0800 | [diff] [blame] | 808 | static void sk_psock_done_strp(struct sk_psock *psock); |
| 809 | |
Cong Wang | 7786dfc | 2021-03-30 19:32:26 -0700 | [diff] [blame] | 810 | static void sk_psock_destroy(struct work_struct *work) |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 811 | { |
Cong Wang | 7786dfc | 2021-03-30 19:32:26 -0700 | [diff] [blame] | 812 | struct sk_psock *psock = container_of(to_rcu_work(work), |
| 813 | struct sk_psock, rwork); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 814 | /* No sk_callback_lock since already detached. */ |
John Fastabend | 0148943 | 2019-05-13 07:19:19 -0700 | [diff] [blame] | 815 | |
Cong Wang | 8875960 | 2021-02-23 10:49:26 -0800 | [diff] [blame] | 816 | sk_psock_done_strp(psock); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 817 | |
John Fastabend | 29173d0 | 2023-05-22 19:56:06 -0700 | [diff] [blame] | 818 | cancel_delayed_work_sync(&psock->work); |
John Fastabend | 405df89 | 2023-05-22 19:56:08 -0700 | [diff] [blame] | 819 | __sk_psock_zap_ingress(psock); |
Cong Wang | 799aa7f | 2021-03-30 19:32:25 -0700 | [diff] [blame] | 820 | mutex_destroy(&psock->work_mutex); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 821 | |
| 822 | psock_progs_drop(&psock->progs); |
| 823 | |
| 824 | sk_psock_link_destroy(psock); |
| 825 | sk_psock_cork_free(psock); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 826 | |
| 827 | if (psock->sk_redir) |
| 828 | sock_put(psock->sk_redir); |
John Fastabend | 8866730 | 2023-11-28 17:25:56 -0800 | [diff] [blame] | 829 | if (psock->sk_pair) |
| 830 | sock_put(psock->sk_pair); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 831 | sock_put(psock->sk); |
| 832 | kfree(psock); |
| 833 | } |
| 834 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 835 | void sk_psock_drop(struct sock *sk, struct sk_psock *psock) |
| 836 | { |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 837 | write_lock_bh(&sk->sk_callback_lock); |
John Fastabend | 95fa145 | 2019-07-19 10:29:22 -0700 | [diff] [blame] | 838 | sk_psock_restore_proto(sk, psock); |
| 839 | rcu_assign_sk_user_data(sk, NULL); |
Cong Wang | ae8b833 | 2021-02-23 10:49:30 -0800 | [diff] [blame] | 840 | if (psock->progs.stream_parser) |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 841 | sk_psock_stop_strp(sk, psock); |
Cong Wang | a7ba455 | 2021-03-30 19:32:30 -0700 | [diff] [blame] | 842 | else if (psock->progs.stream_verdict || psock->progs.skb_verdict) |
John Fastabend | ef56592 | 2020-10-10 22:09:38 -0700 | [diff] [blame] | 843 | sk_psock_stop_verdict(sk, psock); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 844 | write_unlock_bh(&sk->sk_callback_lock); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 845 | |
Cong Wang | 8bbabb3 | 2022-11-01 21:34:17 -0700 | [diff] [blame] | 846 | sk_psock_stop(psock); |
John Fastabend | 343597d | 2021-07-27 09:04:58 -0700 | [diff] [blame] | 847 | |
Cong Wang | 7786dfc | 2021-03-30 19:32:26 -0700 | [diff] [blame] | 848 | INIT_RCU_WORK(&psock->rwork, sk_psock_destroy); |
| 849 | queue_rcu_work(system_wq, &psock->rwork); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 850 | } |
| 851 | EXPORT_SYMBOL_GPL(sk_psock_drop); |
| 852 | |
| 853 | static int sk_psock_map_verd(int verdict, bool redir) |
| 854 | { |
| 855 | switch (verdict) { |
| 856 | case SK_PASS: |
| 857 | return redir ? __SK_REDIRECT : __SK_PASS; |
| 858 | case SK_DROP: |
| 859 | default: |
| 860 | break; |
| 861 | } |
| 862 | |
| 863 | return __SK_DROP; |
| 864 | } |
| 865 | |
| 866 | int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, |
| 867 | struct sk_msg *msg) |
| 868 | { |
| 869 | struct bpf_prog *prog; |
| 870 | int ret; |
| 871 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 872 | rcu_read_lock(); |
| 873 | prog = READ_ONCE(psock->progs.msg_parser); |
| 874 | if (unlikely(!prog)) { |
| 875 | ret = __SK_PASS; |
| 876 | goto out; |
| 877 | } |
| 878 | |
| 879 | sk_msg_compute_data_pointers(msg); |
| 880 | msg->sk = sk; |
David Miller | 3d9f773c | 2020-02-24 15:01:43 +0100 | [diff] [blame] | 881 | ret = bpf_prog_run_pin_on_cpu(prog, msg); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 882 | ret = sk_psock_map_verd(ret, msg->sk_redir); |
| 883 | psock->apply_bytes = msg->apply_bytes; |
| 884 | if (ret == __SK_REDIRECT) { |
Pengcheng Yang | a351d60 | 2022-11-29 18:40:39 +0800 | [diff] [blame] | 885 | if (psock->sk_redir) { |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 886 | sock_put(psock->sk_redir); |
Pengcheng Yang | a351d60 | 2022-11-29 18:40:39 +0800 | [diff] [blame] | 887 | psock->sk_redir = NULL; |
| 888 | } |
| 889 | if (!msg->sk_redir) { |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 890 | ret = __SK_DROP; |
| 891 | goto out; |
| 892 | } |
Pengcheng Yang | a351d60 | 2022-11-29 18:40:39 +0800 | [diff] [blame] | 893 | psock->redir_ingress = sk_msg_to_ingress(msg); |
| 894 | psock->sk_redir = msg->sk_redir; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 895 | sock_hold(psock->sk_redir); |
| 896 | } |
| 897 | out: |
| 898 | rcu_read_unlock(); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 899 | return ret; |
| 900 | } |
| 901 | EXPORT_SYMBOL_GPL(sk_psock_msg_verdict); |
| 902 | |
Cong Wang | 4283057 | 2021-06-14 19:13:41 -0700 | [diff] [blame] | 903 | static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb) |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 904 | { |
| 905 | struct sk_psock *psock_other; |
| 906 | struct sock *sk_other; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 907 | |
Cong Wang | e3526bb | 2021-02-23 10:49:29 -0800 | [diff] [blame] | 908 | sk_other = skb_bpf_redirect_fetch(skb); |
John Fastabend | 9047f19 | 2020-10-09 11:37:17 -0700 | [diff] [blame] | 909 | /* This error is a buggy BPF program, it returned a redirect |
| 910 | * return code, but then didn't set a redirect interface. |
| 911 | */ |
John Fastabend | ca2f5f2 | 2020-05-29 16:06:41 -0700 | [diff] [blame] | 912 | if (unlikely(!sk_other)) { |
Liu Jian | 7303524 | 2021-10-29 22:12:14 +0800 | [diff] [blame] | 913 | skb_bpf_redirect_clear(skb); |
Cong Wang | 781dd04 | 2021-06-14 19:13:42 -0700 | [diff] [blame] | 914 | sock_drop(from->sk, skb); |
Cong Wang | 1581a6c | 2021-06-14 19:13:40 -0700 | [diff] [blame] | 915 | return -EIO; |
John Fastabend | ca2f5f2 | 2020-05-29 16:06:41 -0700 | [diff] [blame] | 916 | } |
| 917 | psock_other = sk_psock(sk_other); |
John Fastabend | 9047f19 | 2020-10-09 11:37:17 -0700 | [diff] [blame] | 918 | /* This error indicates the socket is being torn down or had another |
| 919 | * error that caused the pipe to break. We can't send a packet on |
| 920 | * a socket that is in this state so we drop the skb. |
| 921 | */ |
Cong Wang | 799aa7f | 2021-03-30 19:32:25 -0700 | [diff] [blame] | 922 | if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) { |
Cong Wang | 30b9c54 | 2021-06-14 19:13:38 -0700 | [diff] [blame] | 923 | skb_bpf_redirect_clear(skb); |
Cong Wang | 781dd04 | 2021-06-14 19:13:42 -0700 | [diff] [blame] | 924 | sock_drop(from->sk, skb); |
Cong Wang | 1581a6c | 2021-06-14 19:13:40 -0700 | [diff] [blame] | 925 | return -EIO; |
Cong Wang | 799aa7f | 2021-03-30 19:32:25 -0700 | [diff] [blame] | 926 | } |
| 927 | spin_lock_bh(&psock_other->ingress_lock); |
| 928 | if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) { |
| 929 | spin_unlock_bh(&psock_other->ingress_lock); |
Cong Wang | 30b9c54 | 2021-06-14 19:13:38 -0700 | [diff] [blame] | 930 | skb_bpf_redirect_clear(skb); |
Cong Wang | 781dd04 | 2021-06-14 19:13:42 -0700 | [diff] [blame] | 931 | sock_drop(from->sk, skb); |
Cong Wang | 1581a6c | 2021-06-14 19:13:40 -0700 | [diff] [blame] | 932 | return -EIO; |
John Fastabend | ca2f5f2 | 2020-05-29 16:06:41 -0700 | [diff] [blame] | 933 | } |
| 934 | |
John Fastabend | 9047f19 | 2020-10-09 11:37:17 -0700 | [diff] [blame] | 935 | skb_queue_tail(&psock_other->ingress_skb, skb); |
John Fastabend | 29173d0 | 2023-05-22 19:56:06 -0700 | [diff] [blame] | 936 | schedule_delayed_work(&psock_other->work, 0); |
Cong Wang | 799aa7f | 2021-03-30 19:32:25 -0700 | [diff] [blame] | 937 | spin_unlock_bh(&psock_other->ingress_lock); |
Cong Wang | 1581a6c | 2021-06-14 19:13:40 -0700 | [diff] [blame] | 938 | return 0; |
John Fastabend | ca2f5f2 | 2020-05-29 16:06:41 -0700 | [diff] [blame] | 939 | } |
| 940 | |
Cong Wang | 4283057 | 2021-06-14 19:13:41 -0700 | [diff] [blame] | 941 | static void sk_psock_tls_verdict_apply(struct sk_buff *skb, |
| 942 | struct sk_psock *from, int verdict) |
John Fastabend | e91de6a | 2020-05-29 16:06:59 -0700 | [diff] [blame] | 943 | { |
| 944 | switch (verdict) { |
| 945 | case __SK_REDIRECT: |
Cong Wang | 4283057 | 2021-06-14 19:13:41 -0700 | [diff] [blame] | 946 | sk_psock_skb_redirect(from, skb); |
John Fastabend | e91de6a | 2020-05-29 16:06:59 -0700 | [diff] [blame] | 947 | break; |
| 948 | case __SK_PASS: |
| 949 | case __SK_DROP: |
| 950 | default: |
| 951 | break; |
| 952 | } |
| 953 | } |
| 954 | |
| 955 | int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb) |
| 956 | { |
| 957 | struct bpf_prog *prog; |
| 958 | int ret = __SK_PASS; |
| 959 | |
| 960 | rcu_read_lock(); |
Cong Wang | ae8b833 | 2021-02-23 10:49:30 -0800 | [diff] [blame] | 961 | prog = READ_ONCE(psock->progs.stream_verdict); |
John Fastabend | e91de6a | 2020-05-29 16:06:59 -0700 | [diff] [blame] | 962 | if (likely(prog)) { |
John Fastabend | 0b17ad2 | 2020-10-09 11:37:55 -0700 | [diff] [blame] | 963 | skb->sk = psock->sk; |
Cong Wang | e3526bb | 2021-02-23 10:49:29 -0800 | [diff] [blame] | 964 | skb_dst_drop(skb); |
| 965 | skb_bpf_redirect_clear(skb); |
Cong Wang | 5333423 | 2021-02-23 10:49:33 -0800 | [diff] [blame] | 966 | ret = bpf_prog_run_pin_on_cpu(prog, skb); |
Cong Wang | e3526bb | 2021-02-23 10:49:29 -0800 | [diff] [blame] | 967 | ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); |
John Fastabend | 0b17ad2 | 2020-10-09 11:37:55 -0700 | [diff] [blame] | 968 | skb->sk = NULL; |
John Fastabend | e91de6a | 2020-05-29 16:06:59 -0700 | [diff] [blame] | 969 | } |
Cong Wang | 4283057 | 2021-06-14 19:13:41 -0700 | [diff] [blame] | 970 | sk_psock_tls_verdict_apply(skb, psock, ret); |
John Fastabend | e91de6a | 2020-05-29 16:06:59 -0700 | [diff] [blame] | 971 | rcu_read_unlock(); |
John Fastabend | e91de6a | 2020-05-29 16:06:59 -0700 | [diff] [blame] | 972 | return ret; |
| 973 | } |
| 974 | EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read); |
| 975 | |
Cong Wang | 1581a6c | 2021-06-14 19:13:40 -0700 | [diff] [blame] | 976 | static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb, |
| 977 | int verdict) |
John Fastabend | ca2f5f2 | 2020-05-29 16:06:41 -0700 | [diff] [blame] | 978 | { |
| 979 | struct sock *sk_other; |
Cong Wang | 1581a6c | 2021-06-14 19:13:40 -0700 | [diff] [blame] | 980 | int err = 0; |
Liu Jian | 7303524 | 2021-10-29 22:12:14 +0800 | [diff] [blame] | 981 | u32 len, off; |
John Fastabend | ca2f5f2 | 2020-05-29 16:06:41 -0700 | [diff] [blame] | 982 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 983 | switch (verdict) { |
John Fastabend | 5119940 | 2018-12-20 11:35:32 -0800 | [diff] [blame] | 984 | case __SK_PASS: |
Cong Wang | 1581a6c | 2021-06-14 19:13:40 -0700 | [diff] [blame] | 985 | err = -EIO; |
John Fastabend | 5119940 | 2018-12-20 11:35:32 -0800 | [diff] [blame] | 986 | sk_other = psock->sk; |
| 987 | if (sock_flag(sk_other, SOCK_DEAD) || |
John Fastabend | e5c6de5 | 2023-05-22 19:56:12 -0700 | [diff] [blame] | 988 | !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) |
John Fastabend | 5119940 | 2018-12-20 11:35:32 -0800 | [diff] [blame] | 989 | goto out_free; |
John Fastabend | 5119940 | 2018-12-20 11:35:32 -0800 | [diff] [blame] | 990 | |
Cong Wang | e3526bb | 2021-02-23 10:49:29 -0800 | [diff] [blame] | 991 | skb_bpf_set_ingress(skb); |
John Fastabend | 9ecbfb0 | 2020-10-09 11:36:37 -0700 | [diff] [blame] | 992 | |
| 993 | /* If the queue is empty then we can submit directly |
| 994 | * into the msg queue. If its not empty we have to |
| 995 | * queue work otherwise we may get OOO data. Otherwise, |
| 996 | * if sk_psock_skb_ingress errors will be handled by |
| 997 | * retrying later from workqueue. |
| 998 | */ |
| 999 | if (skb_queue_empty(&psock->ingress_skb)) { |
Liu Jian | 7303524 | 2021-10-29 22:12:14 +0800 | [diff] [blame] | 1000 | len = skb->len; |
| 1001 | off = 0; |
| 1002 | if (skb_bpf_strparser(skb)) { |
| 1003 | struct strp_msg *stm = strp_msg(skb); |
| 1004 | |
| 1005 | off = stm->offset; |
| 1006 | len = stm->full_len; |
| 1007 | } |
| 1008 | err = sk_psock_skb_ingress_self(psock, skb, off, len); |
John Fastabend | 9ecbfb0 | 2020-10-09 11:36:37 -0700 | [diff] [blame] | 1009 | } |
| 1010 | if (err < 0) { |
Cong Wang | 799aa7f | 2021-03-30 19:32:25 -0700 | [diff] [blame] | 1011 | spin_lock_bh(&psock->ingress_lock); |
| 1012 | if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { |
| 1013 | skb_queue_tail(&psock->ingress_skb, skb); |
John Fastabend | 29173d0 | 2023-05-22 19:56:06 -0700 | [diff] [blame] | 1014 | schedule_delayed_work(&psock->work, 0); |
Cong Wang | 0cf6672 | 2021-06-14 19:13:39 -0700 | [diff] [blame] | 1015 | err = 0; |
Cong Wang | 799aa7f | 2021-03-30 19:32:25 -0700 | [diff] [blame] | 1016 | } |
| 1017 | spin_unlock_bh(&psock->ingress_lock); |
John Fastabend | e5c6de5 | 2023-05-22 19:56:12 -0700 | [diff] [blame] | 1018 | if (err < 0) |
Cong Wang | 0cf6672 | 2021-06-14 19:13:39 -0700 | [diff] [blame] | 1019 | goto out_free; |
John Fastabend | 9ecbfb0 | 2020-10-09 11:36:37 -0700 | [diff] [blame] | 1020 | } |
John Fastabend | cfea28f | 2020-10-09 11:36:16 -0700 | [diff] [blame] | 1021 | break; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1022 | case __SK_REDIRECT: |
John Fastabend | e5c6de5 | 2023-05-22 19:56:12 -0700 | [diff] [blame] | 1023 | tcp_eat_skb(psock->sk, skb); |
Cong Wang | 4283057 | 2021-06-14 19:13:41 -0700 | [diff] [blame] | 1024 | err = sk_psock_skb_redirect(psock, skb); |
John Fastabend | ca2f5f2 | 2020-05-29 16:06:41 -0700 | [diff] [blame] | 1025 | break; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1026 | case __SK_DROP: |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1027 | default: |
| 1028 | out_free: |
John Fastabend | e5c6de5 | 2023-05-22 19:56:12 -0700 | [diff] [blame] | 1029 | skb_bpf_redirect_clear(skb); |
| 1030 | tcp_eat_skb(psock->sk, skb); |
Cong Wang | 781dd04 | 2021-06-14 19:13:42 -0700 | [diff] [blame] | 1031 | sock_drop(psock->sk, skb); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1032 | } |
Cong Wang | 1581a6c | 2021-06-14 19:13:40 -0700 | [diff] [blame] | 1033 | |
| 1034 | return err; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1035 | } |
| 1036 | |
Cong Wang | 8875960 | 2021-02-23 10:49:26 -0800 | [diff] [blame] | 1037 | static void sk_psock_write_space(struct sock *sk) |
| 1038 | { |
| 1039 | struct sk_psock *psock; |
| 1040 | void (*write_space)(struct sock *sk) = NULL; |
| 1041 | |
| 1042 | rcu_read_lock(); |
| 1043 | psock = sk_psock(sk); |
| 1044 | if (likely(psock)) { |
| 1045 | if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) |
John Fastabend | 29173d0 | 2023-05-22 19:56:06 -0700 | [diff] [blame] | 1046 | schedule_delayed_work(&psock->work, 0); |
Cong Wang | 8875960 | 2021-02-23 10:49:26 -0800 | [diff] [blame] | 1047 | write_space = psock->saved_write_space; |
| 1048 | } |
| 1049 | rcu_read_unlock(); |
| 1050 | if (write_space) |
| 1051 | write_space(sk); |
| 1052 | } |
| 1053 | |
| 1054 | #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1055 | static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) |
| 1056 | { |
John Fastabend | 8025751 | 2020-06-25 16:13:18 -0700 | [diff] [blame] | 1057 | struct sk_psock *psock; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1058 | struct bpf_prog *prog; |
| 1059 | int ret = __SK_DROP; |
John Fastabend | 8025751 | 2020-06-25 16:13:18 -0700 | [diff] [blame] | 1060 | struct sock *sk; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1061 | |
| 1062 | rcu_read_lock(); |
John Fastabend | 8025751 | 2020-06-25 16:13:18 -0700 | [diff] [blame] | 1063 | sk = strp->sk; |
| 1064 | psock = sk_psock(sk); |
| 1065 | if (unlikely(!psock)) { |
Cong Wang | 781dd04 | 2021-06-14 19:13:42 -0700 | [diff] [blame] | 1066 | sock_drop(sk, skb); |
John Fastabend | 8025751 | 2020-06-25 16:13:18 -0700 | [diff] [blame] | 1067 | goto out; |
| 1068 | } |
Cong Wang | ae8b833 | 2021-02-23 10:49:30 -0800 | [diff] [blame] | 1069 | prog = READ_ONCE(psock->progs.stream_verdict); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1070 | if (likely(prog)) { |
John Fastabend | 144748e | 2021-04-01 15:00:40 -0700 | [diff] [blame] | 1071 | skb->sk = sk; |
Cong Wang | e3526bb | 2021-02-23 10:49:29 -0800 | [diff] [blame] | 1072 | skb_dst_drop(skb); |
| 1073 | skb_bpf_redirect_clear(skb); |
Cong Wang | 5333423 | 2021-02-23 10:49:33 -0800 | [diff] [blame] | 1074 | ret = bpf_prog_run_pin_on_cpu(prog, skb); |
John Fastabend | e5c6de5 | 2023-05-22 19:56:12 -0700 | [diff] [blame] | 1075 | skb_bpf_set_strparser(skb); |
Cong Wang | e3526bb | 2021-02-23 10:49:29 -0800 | [diff] [blame] | 1076 | ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); |
John Fastabend | 144748e | 2021-04-01 15:00:40 -0700 | [diff] [blame] | 1077 | skb->sk = NULL; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1078 | } |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1079 | sk_psock_verdict_apply(psock, skb, ret); |
John Fastabend | 8025751 | 2020-06-25 16:13:18 -0700 | [diff] [blame] | 1080 | out: |
John Fastabend | 93dd5f1 | 2020-06-25 16:12:59 -0700 | [diff] [blame] | 1081 | rcu_read_unlock(); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1082 | } |
| 1083 | |
| 1084 | static int sk_psock_strp_read_done(struct strparser *strp, int err) |
| 1085 | { |
| 1086 | return err; |
| 1087 | } |
| 1088 | |
| 1089 | static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb) |
| 1090 | { |
Cong Wang | 5a685cd | 2021-02-23 10:49:27 -0800 | [diff] [blame] | 1091 | struct sk_psock *psock = container_of(strp, struct sk_psock, strp); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1092 | struct bpf_prog *prog; |
| 1093 | int ret = skb->len; |
| 1094 | |
| 1095 | rcu_read_lock(); |
Cong Wang | ae8b833 | 2021-02-23 10:49:30 -0800 | [diff] [blame] | 1096 | prog = READ_ONCE(psock->progs.stream_parser); |
John Fastabend | 0b17ad2 | 2020-10-09 11:37:55 -0700 | [diff] [blame] | 1097 | if (likely(prog)) { |
| 1098 | skb->sk = psock->sk; |
Cong Wang | 5333423 | 2021-02-23 10:49:33 -0800 | [diff] [blame] | 1099 | ret = bpf_prog_run_pin_on_cpu(prog, skb); |
John Fastabend | 0b17ad2 | 2020-10-09 11:37:55 -0700 | [diff] [blame] | 1100 | skb->sk = NULL; |
| 1101 | } |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1102 | rcu_read_unlock(); |
| 1103 | return ret; |
| 1104 | } |
| 1105 | |
| 1106 | /* Called with socket lock held. */ |
John Fastabend | 552de910 | 2018-12-20 11:35:33 -0800 | [diff] [blame] | 1107 | static void sk_psock_strp_data_ready(struct sock *sk) |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1108 | { |
| 1109 | struct sk_psock *psock; |
| 1110 | |
Peilin Ye | 40e0b09 | 2023-01-19 16:45:16 -0800 | [diff] [blame] | 1111 | trace_sk_data_ready(sk); |
| 1112 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1113 | rcu_read_lock(); |
| 1114 | psock = sk_psock(sk); |
| 1115 | if (likely(psock)) { |
John Fastabend | e91de6a | 2020-05-29 16:06:59 -0700 | [diff] [blame] | 1116 | if (tls_sw_has_ctx_rx(sk)) { |
Cong Wang | 5a685cd | 2021-02-23 10:49:27 -0800 | [diff] [blame] | 1117 | psock->saved_data_ready(sk); |
John Fastabend | e91de6a | 2020-05-29 16:06:59 -0700 | [diff] [blame] | 1118 | } else { |
| 1119 | write_lock_bh(&sk->sk_callback_lock); |
Cong Wang | 5a685cd | 2021-02-23 10:49:27 -0800 | [diff] [blame] | 1120 | strp_data_ready(&psock->strp); |
John Fastabend | e91de6a | 2020-05-29 16:06:59 -0700 | [diff] [blame] | 1121 | write_unlock_bh(&sk->sk_callback_lock); |
| 1122 | } |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1123 | } |
| 1124 | rcu_read_unlock(); |
| 1125 | } |
| 1126 | |
Cong Wang | 8875960 | 2021-02-23 10:49:26 -0800 | [diff] [blame] | 1127 | int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) |
| 1128 | { |
Xu Kuohai | 809e4dc | 2023-08-04 03:37:38 -0400 | [diff] [blame] | 1129 | int ret; |
| 1130 | |
Cong Wang | 8875960 | 2021-02-23 10:49:26 -0800 | [diff] [blame] | 1131 | static const struct strp_callbacks cb = { |
| 1132 | .rcv_msg = sk_psock_strp_read, |
| 1133 | .read_sock_done = sk_psock_strp_read_done, |
| 1134 | .parse_msg = sk_psock_strp_parse, |
| 1135 | }; |
| 1136 | |
Xu Kuohai | 809e4dc | 2023-08-04 03:37:38 -0400 | [diff] [blame] | 1137 | ret = strp_init(&psock->strp, sk, &cb); |
| 1138 | if (!ret) |
| 1139 | sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED); |
| 1140 | |
| 1141 | return ret; |
Cong Wang | 8875960 | 2021-02-23 10:49:26 -0800 | [diff] [blame] | 1142 | } |
| 1143 | |
| 1144 | void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) |
| 1145 | { |
Cong Wang | 5a685cd | 2021-02-23 10:49:27 -0800 | [diff] [blame] | 1146 | if (psock->saved_data_ready) |
Cong Wang | 8875960 | 2021-02-23 10:49:26 -0800 | [diff] [blame] | 1147 | return; |
| 1148 | |
Cong Wang | 5a685cd | 2021-02-23 10:49:27 -0800 | [diff] [blame] | 1149 | psock->saved_data_ready = sk->sk_data_ready; |
Cong Wang | 8875960 | 2021-02-23 10:49:26 -0800 | [diff] [blame] | 1150 | sk->sk_data_ready = sk_psock_strp_data_ready; |
| 1151 | sk->sk_write_space = sk_psock_write_space; |
Cong Wang | 8875960 | 2021-02-23 10:49:26 -0800 | [diff] [blame] | 1152 | } |
| 1153 | |
| 1154 | void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) |
| 1155 | { |
John Fastabend | c0d95d3 | 2021-11-19 10:14:18 -0800 | [diff] [blame] | 1156 | psock_set_prog(&psock->progs.stream_parser, NULL); |
| 1157 | |
Cong Wang | 5a685cd | 2021-02-23 10:49:27 -0800 | [diff] [blame] | 1158 | if (!psock->saved_data_ready) |
Cong Wang | 8875960 | 2021-02-23 10:49:26 -0800 | [diff] [blame] | 1159 | return; |
| 1160 | |
Cong Wang | 5a685cd | 2021-02-23 10:49:27 -0800 | [diff] [blame] | 1161 | sk->sk_data_ready = psock->saved_data_ready; |
| 1162 | psock->saved_data_ready = NULL; |
| 1163 | strp_stop(&psock->strp); |
Cong Wang | 8875960 | 2021-02-23 10:49:26 -0800 | [diff] [blame] | 1164 | } |
| 1165 | |
| 1166 | static void sk_psock_done_strp(struct sk_psock *psock) |
| 1167 | { |
| 1168 | /* Parser has been stopped */ |
Xu Kuohai | 809e4dc | 2023-08-04 03:37:38 -0400 | [diff] [blame] | 1169 | if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED)) |
Cong Wang | 5a685cd | 2021-02-23 10:49:27 -0800 | [diff] [blame] | 1170 | strp_done(&psock->strp); |
Cong Wang | 8875960 | 2021-02-23 10:49:26 -0800 | [diff] [blame] | 1171 | } |
| 1172 | #else |
| 1173 | static void sk_psock_done_strp(struct sk_psock *psock) |
| 1174 | { |
| 1175 | } |
| 1176 | #endif /* CONFIG_BPF_STREAM_PARSER */ |
| 1177 | |
Cong Wang | 965b57b | 2022-06-15 09:20:12 -0700 | [diff] [blame] | 1178 | static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb) |
John Fastabend | ef56592 | 2020-10-10 22:09:38 -0700 | [diff] [blame] | 1179 | { |
John Fastabend | ef56592 | 2020-10-10 22:09:38 -0700 | [diff] [blame] | 1180 | struct sk_psock *psock; |
| 1181 | struct bpf_prog *prog; |
| 1182 | int ret = __SK_DROP; |
Cong Wang | 965b57b | 2022-06-15 09:20:12 -0700 | [diff] [blame] | 1183 | int len = skb->len; |
John Fastabend | ef56592 | 2020-10-10 22:09:38 -0700 | [diff] [blame] | 1184 | |
John Fastabend | ef56592 | 2020-10-10 22:09:38 -0700 | [diff] [blame] | 1185 | rcu_read_lock(); |
| 1186 | psock = sk_psock(sk); |
| 1187 | if (unlikely(!psock)) { |
| 1188 | len = 0; |
John Fastabend | e5c6de5 | 2023-05-22 19:56:12 -0700 | [diff] [blame] | 1189 | tcp_eat_skb(sk, skb); |
Cong Wang | 781dd04 | 2021-06-14 19:13:42 -0700 | [diff] [blame] | 1190 | sock_drop(sk, skb); |
John Fastabend | ef56592 | 2020-10-10 22:09:38 -0700 | [diff] [blame] | 1191 | goto out; |
| 1192 | } |
Cong Wang | ae8b833 | 2021-02-23 10:49:30 -0800 | [diff] [blame] | 1193 | prog = READ_ONCE(psock->progs.stream_verdict); |
Cong Wang | a7ba455 | 2021-03-30 19:32:30 -0700 | [diff] [blame] | 1194 | if (!prog) |
| 1195 | prog = READ_ONCE(psock->progs.skb_verdict); |
John Fastabend | ef56592 | 2020-10-10 22:09:38 -0700 | [diff] [blame] | 1196 | if (likely(prog)) { |
Cong Wang | e3526bb | 2021-02-23 10:49:29 -0800 | [diff] [blame] | 1197 | skb_dst_drop(skb); |
| 1198 | skb_bpf_redirect_clear(skb); |
Cong Wang | 5333423 | 2021-02-23 10:49:33 -0800 | [diff] [blame] | 1199 | ret = bpf_prog_run_pin_on_cpu(prog, skb); |
Cong Wang | e3526bb | 2021-02-23 10:49:29 -0800 | [diff] [blame] | 1200 | ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); |
John Fastabend | ef56592 | 2020-10-10 22:09:38 -0700 | [diff] [blame] | 1201 | } |
Cong Wang | 2e23acd | 2022-08-17 12:54:45 -0700 | [diff] [blame] | 1202 | ret = sk_psock_verdict_apply(psock, skb, ret); |
| 1203 | if (ret < 0) |
| 1204 | len = ret; |
John Fastabend | ef56592 | 2020-10-10 22:09:38 -0700 | [diff] [blame] | 1205 | out: |
| 1206 | rcu_read_unlock(); |
| 1207 | return len; |
| 1208 | } |
| 1209 | |
| 1210 | static void sk_psock_verdict_data_ready(struct sock *sk) |
| 1211 | { |
| 1212 | struct socket *sock = sk->sk_socket; |
Eric Dumazet | 1ded5e5 | 2023-08-08 13:58:09 +0000 | [diff] [blame] | 1213 | const struct proto_ops *ops; |
John Fastabend | 6df7f76 | 2023-05-22 19:56:11 -0700 | [diff] [blame] | 1214 | int copied; |
John Fastabend | ef56592 | 2020-10-10 22:09:38 -0700 | [diff] [blame] | 1215 | |
Peilin Ye | 40e0b09 | 2023-01-19 16:45:16 -0800 | [diff] [blame] | 1216 | trace_sk_data_ready(sk); |
| 1217 | |
Eric Dumazet | 1ded5e5 | 2023-08-08 13:58:09 +0000 | [diff] [blame] | 1218 | if (unlikely(!sock)) |
John Fastabend | ef56592 | 2020-10-10 22:09:38 -0700 | [diff] [blame] | 1219 | return; |
Eric Dumazet | 1ded5e5 | 2023-08-08 13:58:09 +0000 | [diff] [blame] | 1220 | ops = READ_ONCE(sock->ops); |
| 1221 | if (!ops || !ops->read_skb) |
| 1222 | return; |
| 1223 | copied = ops->read_skb(sk, sk_psock_verdict_recv); |
John Fastabend | 6df7f76 | 2023-05-22 19:56:11 -0700 | [diff] [blame] | 1224 | if (copied >= 0) { |
| 1225 | struct sk_psock *psock; |
| 1226 | |
| 1227 | rcu_read_lock(); |
| 1228 | psock = sk_psock(sk); |
Shigeru Yoshida | 4cd12c6 | 2024-02-19 00:09:33 +0900 | [diff] [blame] | 1229 | if (psock) { |
| 1230 | read_lock_bh(&sk->sk_callback_lock); |
| 1231 | sk_psock_data_ready(sk, psock); |
| 1232 | read_unlock_bh(&sk->sk_callback_lock); |
| 1233 | } |
John Fastabend | 6df7f76 | 2023-05-22 19:56:11 -0700 | [diff] [blame] | 1234 | rcu_read_unlock(); |
| 1235 | } |
John Fastabend | ef56592 | 2020-10-10 22:09:38 -0700 | [diff] [blame] | 1236 | } |
| 1237 | |
John Fastabend | ef56592 | 2020-10-10 22:09:38 -0700 | [diff] [blame] | 1238 | void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock) |
| 1239 | { |
Cong Wang | 5a685cd | 2021-02-23 10:49:27 -0800 | [diff] [blame] | 1240 | if (psock->saved_data_ready) |
John Fastabend | ef56592 | 2020-10-10 22:09:38 -0700 | [diff] [blame] | 1241 | return; |
| 1242 | |
Cong Wang | 5a685cd | 2021-02-23 10:49:27 -0800 | [diff] [blame] | 1243 | psock->saved_data_ready = sk->sk_data_ready; |
John Fastabend | ef56592 | 2020-10-10 22:09:38 -0700 | [diff] [blame] | 1244 | sk->sk_data_ready = sk_psock_verdict_data_ready; |
| 1245 | sk->sk_write_space = sk_psock_write_space; |
John Fastabend | ef56592 | 2020-10-10 22:09:38 -0700 | [diff] [blame] | 1246 | } |
| 1247 | |
John Fastabend | ef56592 | 2020-10-10 22:09:38 -0700 | [diff] [blame] | 1248 | void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock) |
| 1249 | { |
John Fastabend | c0d95d3 | 2021-11-19 10:14:18 -0800 | [diff] [blame] | 1250 | psock_set_prog(&psock->progs.stream_verdict, NULL); |
| 1251 | psock_set_prog(&psock->progs.skb_verdict, NULL); |
| 1252 | |
Cong Wang | 5a685cd | 2021-02-23 10:49:27 -0800 | [diff] [blame] | 1253 | if (!psock->saved_data_ready) |
John Fastabend | ef56592 | 2020-10-10 22:09:38 -0700 | [diff] [blame] | 1254 | return; |
| 1255 | |
Cong Wang | 5a685cd | 2021-02-23 10:49:27 -0800 | [diff] [blame] | 1256 | sk->sk_data_ready = psock->saved_data_ready; |
| 1257 | psock->saved_data_ready = NULL; |
John Fastabend | ef56592 | 2020-10-10 22:09:38 -0700 | [diff] [blame] | 1258 | } |