Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ |
| 3 | |
| 4 | #include <linux/skmsg.h> |
| 5 | #include <linux/filter.h> |
| 6 | #include <linux/bpf.h> |
| 7 | #include <linux/init.h> |
| 8 | #include <linux/wait.h> |
Jakub Sitnicki | ddce1e0 | 2023-01-21 13:41:44 +0100 | [diff] [blame] | 9 | #include <linux/util_macros.h> |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 10 | |
| 11 | #include <net/inet_common.h> |
John Fastabend | 0608c69 | 2018-12-20 11:35:35 -0800 | [diff] [blame] | 12 | #include <net/tls.h> |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 13 | |
John Fastabend | e5c6de5 | 2023-05-22 19:56:12 -0700 | [diff] [blame] | 14 | void tcp_eat_skb(struct sock *sk, struct sk_buff *skb) |
| 15 | { |
| 16 | struct tcp_sock *tcp; |
| 17 | int copied; |
| 18 | |
| 19 | if (!skb || !skb->len || !sk_is_tcp(sk)) |
| 20 | return; |
| 21 | |
| 22 | if (skb_bpf_strparser(skb)) |
| 23 | return; |
| 24 | |
| 25 | tcp = tcp_sk(sk); |
| 26 | copied = tcp->copied_seq + skb->len; |
| 27 | WRITE_ONCE(tcp->copied_seq, copied); |
| 28 | tcp_rcv_space_adjust(sk); |
| 29 | __tcp_cleanup_rbuf(sk, skb->len); |
| 30 | } |
| 31 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 32 | static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, |
| 33 | struct sk_msg *msg, u32 apply_bytes, int flags) |
| 34 | { |
| 35 | bool apply = apply_bytes; |
| 36 | struct scatterlist *sge; |
| 37 | u32 size, copied = 0; |
| 38 | struct sk_msg *tmp; |
| 39 | int i, ret = 0; |
| 40 | |
| 41 | tmp = kzalloc(sizeof(*tmp), __GFP_NOWARN | GFP_KERNEL); |
| 42 | if (unlikely(!tmp)) |
| 43 | return -ENOMEM; |
| 44 | |
| 45 | lock_sock(sk); |
| 46 | tmp->sg.start = msg->sg.start; |
| 47 | i = msg->sg.start; |
| 48 | do { |
| 49 | sge = sk_msg_elem(msg, i); |
| 50 | size = (apply && apply_bytes < sge->length) ? |
| 51 | apply_bytes : sge->length; |
| 52 | if (!sk_wmem_schedule(sk, size)) { |
| 53 | if (!copied) |
| 54 | ret = -ENOMEM; |
| 55 | break; |
| 56 | } |
| 57 | |
| 58 | sk_mem_charge(sk, size); |
| 59 | sk_msg_xfer(tmp, msg, i, size); |
| 60 | copied += size; |
| 61 | if (sge->length) |
| 62 | get_page(sk_msg_page(tmp, i)); |
| 63 | sk_msg_iter_var_next(i); |
| 64 | tmp->sg.end = i; |
| 65 | if (apply) { |
| 66 | apply_bytes -= size; |
Pengcheng Yang | 9072931 | 2022-11-29 18:40:40 +0800 | [diff] [blame] | 67 | if (!apply_bytes) { |
| 68 | if (sge->length) |
| 69 | sk_msg_iter_var_prev(i); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 70 | break; |
Pengcheng Yang | 9072931 | 2022-11-29 18:40:40 +0800 | [diff] [blame] | 71 | } |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 72 | } |
| 73 | } while (i != msg->sg.end); |
| 74 | |
| 75 | if (!ret) { |
| 76 | msg->sg.start = i; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 77 | sk_psock_queue_msg(psock, tmp); |
John Fastabend | 552de910 | 2018-12-20 11:35:33 -0800 | [diff] [blame] | 78 | sk_psock_data_ready(sk, psock); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 79 | } else { |
| 80 | sk_msg_free(sk, tmp); |
| 81 | kfree(tmp); |
| 82 | } |
| 83 | |
| 84 | release_sock(sk); |
| 85 | return ret; |
| 86 | } |
| 87 | |
| 88 | static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes, |
| 89 | int flags, bool uncharge) |
| 90 | { |
David Howells | f8dd95b | 2023-06-23 23:54:58 +0100 | [diff] [blame] | 91 | struct msghdr msghdr = {}; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 92 | bool apply = apply_bytes; |
| 93 | struct scatterlist *sge; |
| 94 | struct page *page; |
| 95 | int size, ret = 0; |
| 96 | u32 off; |
| 97 | |
| 98 | while (1) { |
David Howells | ebf2e88 | 2023-05-22 13:11:15 +0100 | [diff] [blame] | 99 | struct bio_vec bvec; |
John Fastabend | 0608c69 | 2018-12-20 11:35:35 -0800 | [diff] [blame] | 100 | bool has_tx_ulp; |
| 101 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 102 | sge = sk_msg_elem(msg, msg->sg.start); |
| 103 | size = (apply && apply_bytes < sge->length) ? |
| 104 | apply_bytes : sge->length; |
| 105 | off = sge->offset; |
| 106 | page = sg_page(sge); |
| 107 | |
| 108 | tcp_rate_check_app_limited(sk); |
| 109 | retry: |
David Howells | f8dd95b | 2023-06-23 23:54:58 +0100 | [diff] [blame] | 110 | msghdr.msg_flags = flags | MSG_SPLICE_PAGES; |
John Fastabend | 0608c69 | 2018-12-20 11:35:35 -0800 | [diff] [blame] | 111 | has_tx_ulp = tls_sw_has_ctx_tx(sk); |
David Howells | ebf2e88 | 2023-05-22 13:11:15 +0100 | [diff] [blame] | 112 | if (has_tx_ulp) |
| 113 | msghdr.msg_flags |= MSG_SENDPAGE_NOPOLICY; |
John Fastabend | 0608c69 | 2018-12-20 11:35:35 -0800 | [diff] [blame] | 114 | |
David Howells | f8dd95b | 2023-06-23 23:54:58 +0100 | [diff] [blame] | 115 | if (size < sge->length && msg->sg.start != msg->sg.end) |
David Howells | ebf2e88 | 2023-05-22 13:11:15 +0100 | [diff] [blame] | 116 | msghdr.msg_flags |= MSG_MORE; |
| 117 | |
| 118 | bvec_set_page(&bvec, page, size, off); |
| 119 | iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, size); |
| 120 | ret = tcp_sendmsg_locked(sk, &msghdr, size); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 121 | if (ret <= 0) |
| 122 | return ret; |
David Howells | ebf2e88 | 2023-05-22 13:11:15 +0100 | [diff] [blame] | 123 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 124 | if (apply) |
| 125 | apply_bytes -= ret; |
| 126 | msg->sg.size -= ret; |
| 127 | sge->offset += ret; |
| 128 | sge->length -= ret; |
| 129 | if (uncharge) |
| 130 | sk_mem_uncharge(sk, ret); |
| 131 | if (ret != size) { |
| 132 | size -= ret; |
| 133 | off += ret; |
| 134 | goto retry; |
| 135 | } |
| 136 | if (!sge->length) { |
| 137 | put_page(page); |
| 138 | sk_msg_iter_next(msg, start); |
| 139 | sg_init_table(sge, 1); |
| 140 | if (msg->sg.start == msg->sg.end) |
| 141 | break; |
| 142 | } |
| 143 | if (apply && !apply_bytes) |
| 144 | break; |
| 145 | } |
| 146 | |
| 147 | return 0; |
| 148 | } |
| 149 | |
| 150 | static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg, |
| 151 | u32 apply_bytes, int flags, bool uncharge) |
| 152 | { |
| 153 | int ret; |
| 154 | |
| 155 | lock_sock(sk); |
| 156 | ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge); |
| 157 | release_sock(sk); |
| 158 | return ret; |
| 159 | } |
| 160 | |
Pengcheng Yang | a351d60 | 2022-11-29 18:40:39 +0800 | [diff] [blame] | 161 | int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress, |
| 162 | struct sk_msg *msg, u32 bytes, int flags) |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 163 | { |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 164 | struct sk_psock *psock = sk_psock_get(sk); |
| 165 | int ret; |
| 166 | |
Wang Yufen | 2486ab4 | 2022-03-04 16:11:45 +0800 | [diff] [blame] | 167 | if (unlikely(!psock)) |
| 168 | return -EPIPE; |
| 169 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 170 | ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) : |
| 171 | tcp_bpf_push_locked(sk, msg, bytes, flags, false); |
| 172 | sk_psock_put(sk, psock); |
| 173 | return ret; |
| 174 | } |
| 175 | EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir); |
| 176 | |
Cong Wang | 8875960 | 2021-02-23 10:49:26 -0800 | [diff] [blame] | 177 | #ifdef CONFIG_BPF_SYSCALL |
Jakub Kicinski | b6df007 | 2021-06-29 15:45:27 -0700 | [diff] [blame] | 178 | static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock, |
| 179 | long timeo) |
Cong Wang | 9f2470f | 2021-06-14 19:13:35 -0700 | [diff] [blame] | 180 | { |
| 181 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
| 182 | int ret = 0; |
| 183 | |
| 184 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
| 185 | return 1; |
| 186 | |
| 187 | if (!timeo) |
| 188 | return ret; |
| 189 | |
| 190 | add_wait_queue(sk_sleep(sk), &wait); |
| 191 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
| 192 | ret = sk_wait_event(sk, &timeo, |
| 193 | !list_empty(&psock->ingress_msg) || |
Eric Dumazet | d0ac89f | 2023-05-09 18:29:48 +0000 | [diff] [blame] | 194 | !skb_queue_empty_lockless(&sk->sk_receive_queue), &wait); |
Cong Wang | 9f2470f | 2021-06-14 19:13:35 -0700 | [diff] [blame] | 195 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
| 196 | remove_wait_queue(sk_sleep(sk), &wait); |
| 197 | return ret; |
| 198 | } |
| 199 | |
John Fastabend | 901546f | 2023-05-22 19:56:09 -0700 | [diff] [blame] | 200 | static bool is_next_msg_fin(struct sk_psock *psock) |
| 201 | { |
| 202 | struct scatterlist *sge; |
| 203 | struct sk_msg *msg_rx; |
| 204 | int i; |
| 205 | |
| 206 | msg_rx = sk_psock_peek_msg(psock); |
| 207 | i = msg_rx->sg.start; |
| 208 | sge = sk_msg_elem(msg_rx, i); |
| 209 | if (!sge->length) { |
| 210 | struct sk_buff *skb = msg_rx->skb; |
| 211 | |
| 212 | if (skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) |
| 213 | return true; |
| 214 | } |
| 215 | return false; |
| 216 | } |
| 217 | |
John Fastabend | c5d2177 | 2021-11-03 13:47:34 -0700 | [diff] [blame] | 218 | static int tcp_bpf_recvmsg_parser(struct sock *sk, |
| 219 | struct msghdr *msg, |
| 220 | size_t len, |
John Fastabend | c5d2177 | 2021-11-03 13:47:34 -0700 | [diff] [blame] | 221 | int flags, |
| 222 | int *addr_len) |
| 223 | { |
John Fastabend | e5c6de5 | 2023-05-22 19:56:12 -0700 | [diff] [blame] | 224 | struct tcp_sock *tcp = tcp_sk(sk); |
John Fastabend | da9e915 | 2023-09-25 20:52:59 -0700 | [diff] [blame] | 225 | int peek = flags & MSG_PEEK; |
John Fastabend | e5c6de5 | 2023-05-22 19:56:12 -0700 | [diff] [blame] | 226 | u32 seq = tcp->copied_seq; |
John Fastabend | c5d2177 | 2021-11-03 13:47:34 -0700 | [diff] [blame] | 227 | struct sk_psock *psock; |
John Fastabend | e5c6de5 | 2023-05-22 19:56:12 -0700 | [diff] [blame] | 228 | int copied = 0; |
John Fastabend | c5d2177 | 2021-11-03 13:47:34 -0700 | [diff] [blame] | 229 | |
| 230 | if (unlikely(flags & MSG_ERRQUEUE)) |
| 231 | return inet_recv_error(sk, msg, len, addr_len); |
| 232 | |
Liu Jian | d900f3d | 2023-03-03 16:09:46 +0800 | [diff] [blame] | 233 | if (!len) |
| 234 | return 0; |
| 235 | |
John Fastabend | c5d2177 | 2021-11-03 13:47:34 -0700 | [diff] [blame] | 236 | psock = sk_psock_get(sk); |
| 237 | if (unlikely(!psock)) |
Oliver Hartkopp | ec09526 | 2022-04-11 14:49:55 +0200 | [diff] [blame] | 238 | return tcp_recvmsg(sk, msg, len, flags, addr_len); |
John Fastabend | c5d2177 | 2021-11-03 13:47:34 -0700 | [diff] [blame] | 239 | |
| 240 | lock_sock(sk); |
John Fastabend | ea44418 | 2023-05-22 19:56:10 -0700 | [diff] [blame] | 241 | |
| 242 | /* We may have received data on the sk_receive_queue pre-accept and |
| 243 | * then we can not use read_skb in this context because we haven't |
| 244 | * assigned a sk_socket yet so have no link to the ops. The work-around |
| 245 | * is to check the sk_receive_queue and in these cases read skbs off |
| 246 | * queue again. The read_skb hook is not running at this point because |
| 247 | * of lock_sock so we avoid having multiple runners in read_skb. |
| 248 | */ |
| 249 | if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) { |
| 250 | tcp_data_ready(sk); |
| 251 | /* This handles the ENOMEM errors if we both receive data |
| 252 | * pre accept and are already under memory pressure. At least |
| 253 | * let user know to retry. |
| 254 | */ |
| 255 | if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) { |
| 256 | copied = -EAGAIN; |
| 257 | goto out; |
| 258 | } |
| 259 | } |
| 260 | |
John Fastabend | c5d2177 | 2021-11-03 13:47:34 -0700 | [diff] [blame] | 261 | msg_bytes_ready: |
| 262 | copied = sk_msg_recvmsg(sk, psock, msg, len, flags); |
John Fastabend | 901546f | 2023-05-22 19:56:09 -0700 | [diff] [blame] | 263 | /* The typical case for EFAULT is the socket was gracefully |
| 264 | * shutdown with a FIN pkt. So check here the other case is |
| 265 | * some error on copy_page_to_iter which would be unexpected. |
| 266 | * On fin return correct return code to zero. |
| 267 | */ |
| 268 | if (copied == -EFAULT) { |
| 269 | bool is_fin = is_next_msg_fin(psock); |
| 270 | |
| 271 | if (is_fin) { |
| 272 | copied = 0; |
John Fastabend | e5c6de5 | 2023-05-22 19:56:12 -0700 | [diff] [blame] | 273 | seq++; |
John Fastabend | 901546f | 2023-05-22 19:56:09 -0700 | [diff] [blame] | 274 | goto out; |
| 275 | } |
| 276 | } |
John Fastabend | e5c6de5 | 2023-05-22 19:56:12 -0700 | [diff] [blame] | 277 | seq += copied; |
John Fastabend | c5d2177 | 2021-11-03 13:47:34 -0700 | [diff] [blame] | 278 | if (!copied) { |
| 279 | long timeo; |
| 280 | int data; |
| 281 | |
John Fastabend | 5b2c554 | 2022-01-04 12:59:18 -0800 | [diff] [blame] | 282 | if (sock_flag(sk, SOCK_DONE)) |
| 283 | goto out; |
| 284 | |
| 285 | if (sk->sk_err) { |
| 286 | copied = sock_error(sk); |
| 287 | goto out; |
| 288 | } |
| 289 | |
| 290 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
| 291 | goto out; |
| 292 | |
| 293 | if (sk->sk_state == TCP_CLOSE) { |
| 294 | copied = -ENOTCONN; |
| 295 | goto out; |
| 296 | } |
| 297 | |
Oliver Hartkopp | ec09526 | 2022-04-11 14:49:55 +0200 | [diff] [blame] | 298 | timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
John Fastabend | 5b2c554 | 2022-01-04 12:59:18 -0800 | [diff] [blame] | 299 | if (!timeo) { |
| 300 | copied = -EAGAIN; |
| 301 | goto out; |
| 302 | } |
| 303 | |
| 304 | if (signal_pending(current)) { |
| 305 | copied = sock_intr_errno(timeo); |
| 306 | goto out; |
| 307 | } |
| 308 | |
John Fastabend | c5d2177 | 2021-11-03 13:47:34 -0700 | [diff] [blame] | 309 | data = tcp_msg_wait_data(sk, psock, timeo); |
Paolo Abeni | 68b54ae | 2023-10-17 17:49:51 +0200 | [diff] [blame] | 310 | if (data < 0) { |
| 311 | copied = data; |
| 312 | goto unlock; |
| 313 | } |
John Fastabend | c5d2177 | 2021-11-03 13:47:34 -0700 | [diff] [blame] | 314 | if (data && !sk_psock_queue_empty(psock)) |
| 315 | goto msg_bytes_ready; |
| 316 | copied = -EAGAIN; |
| 317 | } |
John Fastabend | 5b2c554 | 2022-01-04 12:59:18 -0800 | [diff] [blame] | 318 | out: |
John Fastabend | da9e915 | 2023-09-25 20:52:59 -0700 | [diff] [blame] | 319 | if (!peek) |
| 320 | WRITE_ONCE(tcp->copied_seq, seq); |
John Fastabend | e5c6de5 | 2023-05-22 19:56:12 -0700 | [diff] [blame] | 321 | tcp_rcv_space_adjust(sk); |
| 322 | if (copied > 0) |
| 323 | __tcp_cleanup_rbuf(sk, copied); |
Paolo Abeni | 68b54ae | 2023-10-17 17:49:51 +0200 | [diff] [blame] | 324 | |
| 325 | unlock: |
John Fastabend | c5d2177 | 2021-11-03 13:47:34 -0700 | [diff] [blame] | 326 | release_sock(sk); |
| 327 | sk_psock_put(sk, psock); |
| 328 | return copied; |
| 329 | } |
| 330 | |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 331 | static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, |
Oliver Hartkopp | ec09526 | 2022-04-11 14:49:55 +0200 | [diff] [blame] | 332 | int flags, int *addr_len) |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 333 | { |
| 334 | struct sk_psock *psock; |
| 335 | int copied, ret; |
| 336 | |
Xiyu Yang | 18f02ad | 2020-04-26 11:35:15 +0800 | [diff] [blame] | 337 | if (unlikely(flags & MSG_ERRQUEUE)) |
| 338 | return inet_recv_error(sk, msg, len, addr_len); |
| 339 | |
Liu Jian | d900f3d | 2023-03-03 16:09:46 +0800 | [diff] [blame] | 340 | if (!len) |
| 341 | return 0; |
| 342 | |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 343 | psock = sk_psock_get(sk); |
| 344 | if (unlikely(!psock)) |
Oliver Hartkopp | ec09526 | 2022-04-11 14:49:55 +0200 | [diff] [blame] | 345 | return tcp_recvmsg(sk, msg, len, flags, addr_len); |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 346 | if (!skb_queue_empty(&sk->sk_receive_queue) && |
Xiyu Yang | 18f02ad | 2020-04-26 11:35:15 +0800 | [diff] [blame] | 347 | sk_psock_queue_empty(psock)) { |
| 348 | sk_psock_put(sk, psock); |
Oliver Hartkopp | ec09526 | 2022-04-11 14:49:55 +0200 | [diff] [blame] | 349 | return tcp_recvmsg(sk, msg, len, flags, addr_len); |
Xiyu Yang | 18f02ad | 2020-04-26 11:35:15 +0800 | [diff] [blame] | 350 | } |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 351 | lock_sock(sk); |
| 352 | msg_bytes_ready: |
Cong Wang | 2bc793e | 2021-03-30 19:32:33 -0700 | [diff] [blame] | 353 | copied = sk_msg_recvmsg(sk, psock, msg, len, flags); |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 354 | if (!copied) { |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 355 | long timeo; |
Cong Wang | c49661a | 2021-05-16 19:23:48 -0700 | [diff] [blame] | 356 | int data; |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 357 | |
Oliver Hartkopp | ec09526 | 2022-04-11 14:49:55 +0200 | [diff] [blame] | 358 | timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
Jakub Kicinski | b6df007 | 2021-06-29 15:45:27 -0700 | [diff] [blame] | 359 | data = tcp_msg_wait_data(sk, psock, timeo); |
Paolo Abeni | 68b54ae | 2023-10-17 17:49:51 +0200 | [diff] [blame] | 360 | if (data < 0) { |
| 361 | ret = data; |
| 362 | goto unlock; |
| 363 | } |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 364 | if (data) { |
| 365 | if (!sk_psock_queue_empty(psock)) |
| 366 | goto msg_bytes_ready; |
| 367 | release_sock(sk); |
| 368 | sk_psock_put(sk, psock); |
Oliver Hartkopp | ec09526 | 2022-04-11 14:49:55 +0200 | [diff] [blame] | 369 | return tcp_recvmsg(sk, msg, len, flags, addr_len); |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 370 | } |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 371 | copied = -EAGAIN; |
| 372 | } |
| 373 | ret = copied; |
Paolo Abeni | 68b54ae | 2023-10-17 17:49:51 +0200 | [diff] [blame] | 374 | |
| 375 | unlock: |
YueHaibing | c0fd336 | 2020-03-20 10:34:26 +0800 | [diff] [blame] | 376 | release_sock(sk); |
| 377 | sk_psock_put(sk, psock); |
| 378 | return ret; |
| 379 | } |
| 380 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 381 | static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, |
| 382 | struct sk_msg *msg, int *copied, int flags) |
| 383 | { |
Pengcheng Yang | a351d60 | 2022-11-29 18:40:39 +0800 | [diff] [blame] | 384 | bool cork = false, enospc = sk_msg_full(msg), redir_ingress; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 385 | struct sock *sk_redir; |
Wang Yufen | 8ec95b9 | 2022-11-01 09:31:36 +0800 | [diff] [blame] | 386 | u32 tosend, origsize, sent, delta = 0; |
Pengcheng Yang | 7a9841c | 2022-11-29 18:40:38 +0800 | [diff] [blame] | 387 | u32 eval; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 388 | int ret; |
| 389 | |
| 390 | more_data: |
John Fastabend | 7246d8e | 2018-11-26 14:16:17 -0800 | [diff] [blame] | 391 | if (psock->eval == __SK_NONE) { |
| 392 | /* Track delta in msg size to add/subtract it on SK_DROP from |
| 393 | * returned to user copied size. This ensures user doesn't |
| 394 | * get a positive return code with msg_cut_data and SK_DROP |
| 395 | * verdict. |
| 396 | */ |
| 397 | delta = msg->sg.size; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 398 | psock->eval = sk_psock_msg_verdict(sk, psock, msg); |
John Fastabend | 7361d44 | 2020-01-11 06:12:06 +0000 | [diff] [blame] | 399 | delta -= msg->sg.size; |
John Fastabend | 7246d8e | 2018-11-26 14:16:17 -0800 | [diff] [blame] | 400 | } |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 401 | |
| 402 | if (msg->cork_bytes && |
| 403 | msg->cork_bytes > msg->sg.size && !enospc) { |
| 404 | psock->cork_bytes = msg->cork_bytes - msg->sg.size; |
| 405 | if (!psock->cork) { |
| 406 | psock->cork = kzalloc(sizeof(*psock->cork), |
| 407 | GFP_ATOMIC | __GFP_NOWARN); |
| 408 | if (!psock->cork) |
| 409 | return -ENOMEM; |
| 410 | } |
| 411 | memcpy(psock->cork, msg, sizeof(*msg)); |
| 412 | return 0; |
| 413 | } |
| 414 | |
| 415 | tosend = msg->sg.size; |
| 416 | if (psock->apply_bytes && psock->apply_bytes < tosend) |
| 417 | tosend = psock->apply_bytes; |
Pengcheng Yang | 7a9841c | 2022-11-29 18:40:38 +0800 | [diff] [blame] | 418 | eval = __SK_NONE; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 419 | |
| 420 | switch (psock->eval) { |
| 421 | case __SK_PASS: |
| 422 | ret = tcp_bpf_push(sk, msg, tosend, flags, true); |
| 423 | if (unlikely(ret)) { |
| 424 | *copied -= sk_msg_free(sk, msg); |
| 425 | break; |
| 426 | } |
| 427 | sk_msg_apply_bytes(psock, tosend); |
| 428 | break; |
| 429 | case __SK_REDIRECT: |
Pengcheng Yang | a351d60 | 2022-11-29 18:40:39 +0800 | [diff] [blame] | 430 | redir_ingress = psock->redir_ingress; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 431 | sk_redir = psock->sk_redir; |
| 432 | sk_msg_apply_bytes(psock, tosend); |
Liu Jian | cd9733f | 2021-10-12 13:20:19 +0800 | [diff] [blame] | 433 | if (!psock->apply_bytes) { |
| 434 | /* Clean up before releasing the sock lock. */ |
| 435 | eval = psock->eval; |
| 436 | psock->eval = __SK_NONE; |
| 437 | psock->sk_redir = NULL; |
| 438 | } |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 439 | if (psock->cork) { |
| 440 | cork = true; |
| 441 | psock->cork = NULL; |
| 442 | } |
Wang Yufen | 8ec95b9 | 2022-11-01 09:31:36 +0800 | [diff] [blame] | 443 | sk_msg_return(sk, msg, tosend); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 444 | release_sock(sk); |
Liu Jian | cd9733f | 2021-10-12 13:20:19 +0800 | [diff] [blame] | 445 | |
Wang Yufen | 8ec95b9 | 2022-11-01 09:31:36 +0800 | [diff] [blame] | 446 | origsize = msg->sg.size; |
Pengcheng Yang | a351d60 | 2022-11-29 18:40:39 +0800 | [diff] [blame] | 447 | ret = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress, |
| 448 | msg, tosend, flags); |
Wang Yufen | 8ec95b9 | 2022-11-01 09:31:36 +0800 | [diff] [blame] | 449 | sent = origsize - msg->sg.size; |
Liu Jian | cd9733f | 2021-10-12 13:20:19 +0800 | [diff] [blame] | 450 | |
| 451 | if (eval == __SK_REDIRECT) |
| 452 | sock_put(sk_redir); |
| 453 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 454 | lock_sock(sk); |
| 455 | if (unlikely(ret < 0)) { |
| 456 | int free = sk_msg_free_nocharge(sk, msg); |
| 457 | |
| 458 | if (!cork) |
| 459 | *copied -= free; |
| 460 | } |
| 461 | if (cork) { |
| 462 | sk_msg_free(sk, msg); |
| 463 | kfree(msg); |
| 464 | msg = NULL; |
| 465 | ret = 0; |
| 466 | } |
| 467 | break; |
| 468 | case __SK_DROP: |
| 469 | default: |
| 470 | sk_msg_free_partial(sk, msg, tosend); |
| 471 | sk_msg_apply_bytes(psock, tosend); |
John Fastabend | 7246d8e | 2018-11-26 14:16:17 -0800 | [diff] [blame] | 472 | *copied -= (tosend + delta); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 473 | return -EACCES; |
| 474 | } |
| 475 | |
| 476 | if (likely(!ret)) { |
| 477 | if (!psock->apply_bytes) { |
| 478 | psock->eval = __SK_NONE; |
| 479 | if (psock->sk_redir) { |
| 480 | sock_put(psock->sk_redir); |
| 481 | psock->sk_redir = NULL; |
| 482 | } |
| 483 | } |
| 484 | if (msg && |
| 485 | msg->sg.data[msg->sg.start].page_link && |
Wang Yufen | 84472b436e | 2022-03-04 16:11:44 +0800 | [diff] [blame] | 486 | msg->sg.data[msg->sg.start].length) { |
| 487 | if (eval == __SK_REDIRECT) |
Wang Yufen | 8ec95b9 | 2022-11-01 09:31:36 +0800 | [diff] [blame] | 488 | sk_mem_charge(sk, tosend - sent); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 489 | goto more_data; |
Wang Yufen | 84472b436e | 2022-03-04 16:11:44 +0800 | [diff] [blame] | 490 | } |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 491 | } |
| 492 | return ret; |
| 493 | } |
| 494 | |
| 495 | static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) |
| 496 | { |
| 497 | struct sk_msg tmp, *msg_tx = NULL; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 498 | int copied = 0, err = 0; |
| 499 | struct sk_psock *psock; |
| 500 | long timeo; |
Jakub Kicinski | 4147766 | 2019-08-07 17:03:59 -0700 | [diff] [blame] | 501 | int flags; |
| 502 | |
David Howells | dc97391 | 2023-06-23 23:55:12 +0100 | [diff] [blame] | 503 | /* Don't let internal flags through */ |
Jakub Kicinski | 4147766 | 2019-08-07 17:03:59 -0700 | [diff] [blame] | 504 | flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED); |
| 505 | flags |= MSG_NO_SHARED_FRAGS; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 506 | |
| 507 | psock = sk_psock_get(sk); |
| 508 | if (unlikely(!psock)) |
| 509 | return tcp_sendmsg(sk, msg, size); |
| 510 | |
| 511 | lock_sock(sk); |
| 512 | timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); |
| 513 | while (msg_data_left(msg)) { |
| 514 | bool enospc = false; |
| 515 | u32 copy, osize; |
| 516 | |
| 517 | if (sk->sk_err) { |
| 518 | err = -sk->sk_err; |
| 519 | goto out_err; |
| 520 | } |
| 521 | |
| 522 | copy = msg_data_left(msg); |
| 523 | if (!sk_stream_memory_free(sk)) |
| 524 | goto wait_for_sndbuf; |
| 525 | if (psock->cork) { |
| 526 | msg_tx = psock->cork; |
| 527 | } else { |
| 528 | msg_tx = &tmp; |
| 529 | sk_msg_init(msg_tx); |
| 530 | } |
| 531 | |
| 532 | osize = msg_tx->sg.size; |
| 533 | err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1); |
| 534 | if (err) { |
| 535 | if (err != -ENOSPC) |
| 536 | goto wait_for_memory; |
| 537 | enospc = true; |
| 538 | copy = msg_tx->sg.size - osize; |
| 539 | } |
| 540 | |
| 541 | err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx, |
| 542 | copy); |
| 543 | if (err < 0) { |
| 544 | sk_msg_trim(sk, msg_tx, osize); |
| 545 | goto out_err; |
| 546 | } |
| 547 | |
| 548 | copied += copy; |
| 549 | if (psock->cork_bytes) { |
| 550 | if (size > psock->cork_bytes) |
| 551 | psock->cork_bytes = 0; |
| 552 | else |
| 553 | psock->cork_bytes -= size; |
| 554 | if (psock->cork_bytes && !enospc) |
| 555 | goto out_err; |
| 556 | /* All cork bytes are accounted, rerun the prog. */ |
| 557 | psock->eval = __SK_NONE; |
| 558 | psock->cork_bytes = 0; |
| 559 | } |
| 560 | |
| 561 | err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags); |
| 562 | if (unlikely(err < 0)) |
| 563 | goto out_err; |
| 564 | continue; |
| 565 | wait_for_sndbuf: |
| 566 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
| 567 | wait_for_memory: |
| 568 | err = sk_stream_wait_memory(sk, &timeo); |
| 569 | if (err) { |
| 570 | if (msg_tx && msg_tx != psock->cork) |
| 571 | sk_msg_free(sk, msg_tx); |
| 572 | goto out_err; |
| 573 | } |
| 574 | } |
| 575 | out_err: |
| 576 | if (err < 0) |
| 577 | err = sk_stream_error(sk, msg->msg_flags, err); |
| 578 | release_sock(sk); |
| 579 | sk_psock_put(sk, psock); |
| 580 | return copied ? copied : err; |
| 581 | } |
| 582 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 583 | enum { |
| 584 | TCP_BPF_IPV4, |
| 585 | TCP_BPF_IPV6, |
| 586 | TCP_BPF_NUM_PROTS, |
| 587 | }; |
| 588 | |
| 589 | enum { |
| 590 | TCP_BPF_BASE, |
| 591 | TCP_BPF_TX, |
John Fastabend | c5d2177 | 2021-11-03 13:47:34 -0700 | [diff] [blame] | 592 | TCP_BPF_RX, |
| 593 | TCP_BPF_TXRX, |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 594 | TCP_BPF_NUM_CFGS, |
| 595 | }; |
| 596 | |
| 597 | static struct proto *tcpv6_prot_saved __read_mostly; |
| 598 | static DEFINE_SPINLOCK(tcpv6_prot_lock); |
| 599 | static struct proto tcp_bpf_prots[TCP_BPF_NUM_PROTS][TCP_BPF_NUM_CFGS]; |
| 600 | |
| 601 | static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS], |
| 602 | struct proto *base) |
| 603 | { |
| 604 | prot[TCP_BPF_BASE] = *base; |
Wang Yufen | d8616ee | 2022-05-24 15:53:11 +0800 | [diff] [blame] | 605 | prot[TCP_BPF_BASE].destroy = sock_map_destroy; |
Lorenz Bauer | f747632 | 2020-03-09 11:12:36 +0000 | [diff] [blame] | 606 | prot[TCP_BPF_BASE].close = sock_map_close; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 607 | prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg; |
Cong Wang | fb4e0a5 | 2021-10-08 13:33:04 -0700 | [diff] [blame] | 608 | prot[TCP_BPF_BASE].sock_is_readable = sk_msg_is_readable; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 609 | |
| 610 | prot[TCP_BPF_TX] = prot[TCP_BPF_BASE]; |
| 611 | prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg; |
John Fastabend | c5d2177 | 2021-11-03 13:47:34 -0700 | [diff] [blame] | 612 | |
| 613 | prot[TCP_BPF_RX] = prot[TCP_BPF_BASE]; |
| 614 | prot[TCP_BPF_RX].recvmsg = tcp_bpf_recvmsg_parser; |
| 615 | |
| 616 | prot[TCP_BPF_TXRX] = prot[TCP_BPF_TX]; |
| 617 | prot[TCP_BPF_TXRX].recvmsg = tcp_bpf_recvmsg_parser; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 618 | } |
| 619 | |
Lorenz Bauer | 7b219da | 2020-08-21 11:29:43 +0100 | [diff] [blame] | 620 | static void tcp_bpf_check_v6_needs_rebuild(struct proto *ops) |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 621 | { |
Lorenz Bauer | 7b219da | 2020-08-21 11:29:43 +0100 | [diff] [blame] | 622 | if (unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) { |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 623 | spin_lock_bh(&tcpv6_prot_lock); |
| 624 | if (likely(ops != tcpv6_prot_saved)) { |
| 625 | tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops); |
| 626 | smp_store_release(&tcpv6_prot_saved, ops); |
| 627 | } |
| 628 | spin_unlock_bh(&tcpv6_prot_lock); |
| 629 | } |
| 630 | } |
| 631 | |
| 632 | static int __init tcp_bpf_v4_build_proto(void) |
| 633 | { |
| 634 | tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot); |
| 635 | return 0; |
| 636 | } |
John Fastabend | 228a4a7 | 2021-07-12 12:55:46 -0700 | [diff] [blame] | 637 | late_initcall(tcp_bpf_v4_build_proto); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 638 | |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 639 | static int tcp_bpf_assert_proto_ops(struct proto *ops) |
| 640 | { |
| 641 | /* In order to avoid retpoline, we make assumptions when we call |
| 642 | * into ops if e.g. a psock is not present. Make sure they are |
| 643 | * indeed valid assumptions. |
| 644 | */ |
| 645 | return ops->recvmsg == tcp_recvmsg && |
David Howells | dc97391 | 2023-06-23 23:55:12 +0100 | [diff] [blame] | 646 | ops->sendmsg == tcp_sendmsg ? 0 : -ENOTSUPP; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 647 | } |
| 648 | |
Cong Wang | 51e0158 | 2021-04-06 20:21:11 -0700 | [diff] [blame] | 649 | int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 650 | { |
Lorenz Bauer | d19da36 | 2020-03-09 11:12:34 +0000 | [diff] [blame] | 651 | int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; |
| 652 | int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 653 | |
John Fastabend | c5d2177 | 2021-11-03 13:47:34 -0700 | [diff] [blame] | 654 | if (psock->progs.stream_verdict || psock->progs.skb_verdict) { |
| 655 | config = (config == TCP_BPF_TX) ? TCP_BPF_TXRX : TCP_BPF_RX; |
| 656 | } |
| 657 | |
Cong Wang | 8a59f9d | 2021-03-30 19:32:31 -0700 | [diff] [blame] | 658 | if (restore) { |
| 659 | if (inet_csk_has_ulp(sk)) { |
Jakub Kicinski | 8859a44 | 2021-04-09 20:46:01 -0700 | [diff] [blame] | 660 | /* TLS does not have an unhash proto in SW cases, |
| 661 | * but we need to ensure we stop using the sock_map |
| 662 | * unhash routine because the associated psock is being |
| 663 | * removed. So use the original unhash handler. |
| 664 | */ |
| 665 | WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash); |
Cong Wang | 8a59f9d | 2021-03-30 19:32:31 -0700 | [diff] [blame] | 666 | tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space); |
| 667 | } else { |
| 668 | sk->sk_write_space = psock->saved_write_space; |
| 669 | /* Pairs with lockless read in sk_clone_lock() */ |
Pavel Begunkov | fee9ac0 | 2022-10-27 00:25:57 +0100 | [diff] [blame] | 670 | sock_replace_proto(sk, psock->sk_proto); |
Cong Wang | 8a59f9d | 2021-03-30 19:32:31 -0700 | [diff] [blame] | 671 | } |
| 672 | return 0; |
| 673 | } |
| 674 | |
Lorenz Bauer | 7b219da | 2020-08-21 11:29:43 +0100 | [diff] [blame] | 675 | if (sk->sk_family == AF_INET6) { |
| 676 | if (tcp_bpf_assert_proto_ops(psock->sk_proto)) |
Cong Wang | 8a59f9d | 2021-03-30 19:32:31 -0700 | [diff] [blame] | 677 | return -EINVAL; |
Lorenz Bauer | d19da36 | 2020-03-09 11:12:34 +0000 | [diff] [blame] | 678 | |
Lorenz Bauer | 7b219da | 2020-08-21 11:29:43 +0100 | [diff] [blame] | 679 | tcp_bpf_check_v6_needs_rebuild(psock->sk_proto); |
Lorenz Bauer | d19da36 | 2020-03-09 11:12:34 +0000 | [diff] [blame] | 680 | } |
| 681 | |
Cong Wang | 8a59f9d | 2021-03-30 19:32:31 -0700 | [diff] [blame] | 682 | /* Pairs with lockless read in sk_clone_lock() */ |
Pavel Begunkov | fee9ac0 | 2022-10-27 00:25:57 +0100 | [diff] [blame] | 683 | sock_replace_proto(sk, &tcp_bpf_prots[family][config]); |
Cong Wang | 8a59f9d | 2021-03-30 19:32:31 -0700 | [diff] [blame] | 684 | return 0; |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 685 | } |
Cong Wang | 8a59f9d | 2021-03-30 19:32:31 -0700 | [diff] [blame] | 686 | EXPORT_SYMBOL_GPL(tcp_bpf_update_proto); |
Daniel Borkmann | 604326b | 2018-10-13 02:45:58 +0200 | [diff] [blame] | 687 | |
Jakub Sitnicki | e802515 | 2020-02-18 17:10:15 +0000 | [diff] [blame] | 688 | /* If a child got cloned from a listening socket that had tcp_bpf |
| 689 | * protocol callbacks installed, we need to restore the callbacks to |
| 690 | * the default ones because the child does not inherit the psock state |
| 691 | * that tcp_bpf callbacks expect. |
| 692 | */ |
| 693 | void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) |
| 694 | { |
Jakub Sitnicki | e802515 | 2020-02-18 17:10:15 +0000 | [diff] [blame] | 695 | struct proto *prot = newsk->sk_prot; |
| 696 | |
Jakub Sitnicki | ddce1e0 | 2023-01-21 13:41:44 +0100 | [diff] [blame] | 697 | if (is_insidevar(prot, tcp_bpf_prots)) |
Jakub Sitnicki | e802515 | 2020-02-18 17:10:15 +0000 | [diff] [blame] | 698 | newsk->sk_prot = sk->sk_prot_creator; |
| 699 | } |
Cong Wang | 8875960 | 2021-02-23 10:49:26 -0800 | [diff] [blame] | 700 | #endif /* CONFIG_BPF_SYSCALL */ |