blob: cf5195c7c33172879158f56323ff81c6d3bf7c4f [file] [log] [blame]
John Fastabend174a79f2017-08-15 22:32:47 -07001/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12
13/* A BPF sock_map is used to store sock objects. This is primarly used
14 * for doing socket redirect with BPF helper routines.
15 *
John Fastabend2f857d02017-08-28 07:10:25 -070016 * A sock map may have BPF programs attached to it, currently a program
17 * used to parse packets and a program to provide a verdict and redirect
18 * decision on the packet are supported. Any programs attached to a sock
19 * map are inherited by sock objects when they are added to the map. If
20 * no BPF programs are attached the sock object may only be used for sock
21 * redirect.
22 *
23 * A sock object may be in multiple maps, but can only inherit a single
24 * parse or verdict program. If adding a sock object to a map would result
25 * in having multiple parsing programs the update will return an EBUSY error.
John Fastabend174a79f2017-08-15 22:32:47 -070026 *
27 * For reference this program is similar to devmap used in XDP context
28 * reviewing these together may be useful. For an example please review
29 * ./samples/bpf/sockmap/.
30 */
31#include <linux/bpf.h>
32#include <net/sock.h>
33#include <linux/filter.h>
34#include <linux/errno.h>
35#include <linux/file.h>
36#include <linux/kernel.h>
37#include <linux/net.h>
38#include <linux/skbuff.h>
39#include <linux/workqueue.h>
40#include <linux/list.h>
John Fastabend4f738ad2018-03-18 12:57:10 -070041#include <linux/mm.h>
John Fastabend174a79f2017-08-15 22:32:47 -070042#include <net/strparser.h>
John Fastabend34f795022017-10-18 07:10:36 -070043#include <net/tcp.h>
John Fastabend8934ce22018-03-28 12:49:15 -070044#include <linux/ptr_ring.h>
45#include <net/inet_common.h>
John Fastabende20f7332018-04-23 15:39:28 -070046#include <linux/sched/signal.h>
John Fastabend174a79f2017-08-15 22:32:47 -070047
Chenbo Feng6e71b042017-10-18 13:00:22 -070048#define SOCK_CREATE_FLAG_MASK \
49 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
50
John Fastabende5cd3ab2018-05-14 10:00:16 -070051struct bpf_sock_progs {
John Fastabend4f738ad2018-03-18 12:57:10 -070052 struct bpf_prog *bpf_tx_msg;
John Fastabend174a79f2017-08-15 22:32:47 -070053 struct bpf_prog *bpf_parse;
54 struct bpf_prog *bpf_verdict;
John Fastabend174a79f2017-08-15 22:32:47 -070055};
56
John Fastabende5cd3ab2018-05-14 10:00:16 -070057struct bpf_stab {
58 struct bpf_map map;
59 struct sock **sock_map;
60 struct bpf_sock_progs progs;
Daniel Borkmann585f5a62018-08-16 21:49:10 +020061 raw_spinlock_t lock;
John Fastabende5cd3ab2018-05-14 10:00:16 -070062};
63
John Fastabend81110382018-05-14 10:00:17 -070064struct bucket {
65 struct hlist_head head;
66 raw_spinlock_t lock;
67};
68
69struct bpf_htab {
70 struct bpf_map map;
71 struct bucket *buckets;
72 atomic_t count;
73 u32 n_buckets;
74 u32 elem_size;
75 struct bpf_sock_progs progs;
John Fastabende9db4ef2018-06-30 06:17:47 -070076 struct rcu_head rcu;
John Fastabend81110382018-05-14 10:00:17 -070077};
78
79struct htab_elem {
80 struct rcu_head rcu;
81 struct hlist_node hash_node;
82 u32 hash;
83 struct sock *sk;
84 char key[0];
85};
86
John Fastabend174a79f2017-08-15 22:32:47 -070087enum smap_psock_state {
88 SMAP_TX_RUNNING,
89};
90
John Fastabend2f857d02017-08-28 07:10:25 -070091struct smap_psock_map_entry {
92 struct list_head list;
Daniel Borkmann585f5a62018-08-16 21:49:10 +020093 struct bpf_map *map;
John Fastabend2f857d02017-08-28 07:10:25 -070094 struct sock **entry;
John Fastabende9db4ef2018-06-30 06:17:47 -070095 struct htab_elem __rcu *hash_link;
John Fastabend2f857d02017-08-28 07:10:25 -070096};
97
John Fastabend174a79f2017-08-15 22:32:47 -070098struct smap_psock {
99 struct rcu_head rcu;
John Fastabendffa35662018-03-18 12:56:54 -0700100 refcount_t refcnt;
John Fastabend174a79f2017-08-15 22:32:47 -0700101
102 /* datapath variables */
103 struct sk_buff_head rxqueue;
104 bool strp_enabled;
105
106 /* datapath error path cache across tx work invocations */
107 int save_rem;
108 int save_off;
109 struct sk_buff *save_skb;
110
John Fastabend4f738ad2018-03-18 12:57:10 -0700111 /* datapath variables for tx_msg ULP */
112 struct sock *sk_redir;
113 int apply_bytes;
114 int cork_bytes;
115 int sg_size;
116 int eval;
117 struct sk_msg_buff *cork;
John Fastabend8934ce22018-03-28 12:49:15 -0700118 struct list_head ingress;
John Fastabend4f738ad2018-03-18 12:57:10 -0700119
John Fastabend174a79f2017-08-15 22:32:47 -0700120 struct strparser strp;
John Fastabend4f738ad2018-03-18 12:57:10 -0700121 struct bpf_prog *bpf_tx_msg;
John Fastabend174a79f2017-08-15 22:32:47 -0700122 struct bpf_prog *bpf_parse;
123 struct bpf_prog *bpf_verdict;
John Fastabend2f857d02017-08-28 07:10:25 -0700124 struct list_head maps;
John Fastabende9db4ef2018-06-30 06:17:47 -0700125 spinlock_t maps_lock;
John Fastabend174a79f2017-08-15 22:32:47 -0700126
127 /* Back reference used when sock callback trigger sockmap operations */
John Fastabend174a79f2017-08-15 22:32:47 -0700128 struct sock *sock;
129 unsigned long state;
130
131 struct work_struct tx_work;
132 struct work_struct gc_work;
133
John Fastabend1aa12bd2018-02-05 10:17:49 -0800134 struct proto *sk_proto;
135 void (*save_close)(struct sock *sk, long timeout);
John Fastabend174a79f2017-08-15 22:32:47 -0700136 void (*save_data_ready)(struct sock *sk);
137 void (*save_write_space)(struct sock *sk);
John Fastabend174a79f2017-08-15 22:32:47 -0700138};
139
John Fastabend4f738ad2018-03-18 12:57:10 -0700140static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
John Fastabend8934ce22018-03-28 12:49:15 -0700141static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
142 int nonblock, int flags, int *addr_len);
John Fastabend4f738ad2018-03-18 12:57:10 -0700143static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
144static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
145 int offset, size_t size, int flags);
John Fastabend9901c5d2018-06-30 06:17:36 -0700146static void bpf_tcp_close(struct sock *sk, long timeout);
John Fastabend4f738ad2018-03-18 12:57:10 -0700147
John Fastabend174a79f2017-08-15 22:32:47 -0700148static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
149{
John Fastabend2f857d02017-08-28 07:10:25 -0700150 return rcu_dereference_sk_user_data(sk);
John Fastabend174a79f2017-08-15 22:32:47 -0700151}
152
John Fastabend8934ce22018-03-28 12:49:15 -0700153static bool bpf_tcp_stream_read(const struct sock *sk)
154{
155 struct smap_psock *psock;
156 bool empty = true;
157
158 rcu_read_lock();
159 psock = smap_psock_sk(sk);
160 if (unlikely(!psock))
161 goto out;
162 empty = list_empty(&psock->ingress);
163out:
164 rcu_read_unlock();
165 return !empty;
166}
167
John Fastabend9901c5d2018-06-30 06:17:36 -0700168enum {
169 SOCKMAP_IPV4,
170 SOCKMAP_IPV6,
171 SOCKMAP_NUM_PROTS,
172};
173
174enum {
175 SOCKMAP_BASE,
176 SOCKMAP_TX,
177 SOCKMAP_NUM_CONFIGS,
178};
179
180static struct proto *saved_tcpv6_prot __read_mostly;
181static DEFINE_SPINLOCK(tcpv6_prot_lock);
182static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS];
183static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
184 struct proto *base)
185{
186 prot[SOCKMAP_BASE] = *base;
187 prot[SOCKMAP_BASE].close = bpf_tcp_close;
188 prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg;
189 prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read;
190
191 prot[SOCKMAP_TX] = prot[SOCKMAP_BASE];
192 prot[SOCKMAP_TX].sendmsg = bpf_tcp_sendmsg;
193 prot[SOCKMAP_TX].sendpage = bpf_tcp_sendpage;
194}
195
196static void update_sk_prot(struct sock *sk, struct smap_psock *psock)
197{
198 int family = sk->sk_family == AF_INET6 ? SOCKMAP_IPV6 : SOCKMAP_IPV4;
199 int conf = psock->bpf_tx_msg ? SOCKMAP_TX : SOCKMAP_BASE;
200
201 sk->sk_prot = &bpf_tcp_prots[family][conf];
202}
203
John Fastabend1aa12bd2018-02-05 10:17:49 -0800204static int bpf_tcp_init(struct sock *sk)
205{
206 struct smap_psock *psock;
207
208 rcu_read_lock();
209 psock = smap_psock_sk(sk);
210 if (unlikely(!psock)) {
211 rcu_read_unlock();
212 return -EINVAL;
213 }
214
215 if (unlikely(psock->sk_proto)) {
216 rcu_read_unlock();
217 return -EBUSY;
218 }
219
220 psock->save_close = sk->sk_prot->close;
221 psock->sk_proto = sk->sk_prot;
John Fastabend4f738ad2018-03-18 12:57:10 -0700222
John Fastabend9901c5d2018-06-30 06:17:36 -0700223 /* Build IPv6 sockmap whenever the address of tcpv6_prot changes */
224 if (sk->sk_family == AF_INET6 &&
225 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
226 spin_lock_bh(&tcpv6_prot_lock);
227 if (likely(sk->sk_prot != saved_tcpv6_prot)) {
228 build_protos(bpf_tcp_prots[SOCKMAP_IPV6], sk->sk_prot);
229 smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
230 }
231 spin_unlock_bh(&tcpv6_prot_lock);
John Fastabend4f738ad2018-03-18 12:57:10 -0700232 }
John Fastabend9901c5d2018-06-30 06:17:36 -0700233 update_sk_prot(sk, psock);
John Fastabend1aa12bd2018-02-05 10:17:49 -0800234 rcu_read_unlock();
235 return 0;
236}
237
John Fastabend4f738ad2018-03-18 12:57:10 -0700238static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
239static int free_start_sg(struct sock *sk, struct sk_msg_buff *md);
240
John Fastabend1aa12bd2018-02-05 10:17:49 -0800241static void bpf_tcp_release(struct sock *sk)
242{
243 struct smap_psock *psock;
244
245 rcu_read_lock();
246 psock = smap_psock_sk(sk);
John Fastabend4f738ad2018-03-18 12:57:10 -0700247 if (unlikely(!psock))
248 goto out;
John Fastabend1aa12bd2018-02-05 10:17:49 -0800249
John Fastabend4f738ad2018-03-18 12:57:10 -0700250 if (psock->cork) {
251 free_start_sg(psock->sock, psock->cork);
252 kfree(psock->cork);
253 psock->cork = NULL;
John Fastabend1aa12bd2018-02-05 10:17:49 -0800254 }
John Fastabend4f738ad2018-03-18 12:57:10 -0700255
John Fastabend0e94d872018-04-02 12:50:52 -0700256 if (psock->sk_proto) {
257 sk->sk_prot = psock->sk_proto;
258 psock->sk_proto = NULL;
259 }
John Fastabend4f738ad2018-03-18 12:57:10 -0700260out:
John Fastabend1aa12bd2018-02-05 10:17:49 -0800261 rcu_read_unlock();
262}
263
John Fastabende9db4ef2018-06-30 06:17:47 -0700264static struct htab_elem *lookup_elem_raw(struct hlist_head *head,
265 u32 hash, void *key, u32 key_size)
266{
267 struct htab_elem *l;
268
269 hlist_for_each_entry_rcu(l, head, hash_node) {
270 if (l->hash == hash && !memcmp(&l->key, key, key_size))
271 return l;
272 }
273
274 return NULL;
275}
276
277static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
278{
279 return &htab->buckets[hash & (htab->n_buckets - 1)];
280}
281
282static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
283{
284 return &__select_bucket(htab, hash)->head;
285}
286
John Fastabend81110382018-05-14 10:00:17 -0700287static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
288{
289 atomic_dec(&htab->count);
290 kfree_rcu(l, rcu);
291}
292
John Fastabende9db4ef2018-06-30 06:17:47 -0700293static struct smap_psock_map_entry *psock_map_pop(struct sock *sk,
294 struct smap_psock *psock)
295{
296 struct smap_psock_map_entry *e;
297
298 spin_lock_bh(&psock->maps_lock);
299 e = list_first_entry_or_null(&psock->maps,
300 struct smap_psock_map_entry,
301 list);
302 if (e)
303 list_del(&e->list);
304 spin_unlock_bh(&psock->maps_lock);
305 return e;
306}
307
John Fastabend1aa12bd2018-02-05 10:17:49 -0800308static void bpf_tcp_close(struct sock *sk, long timeout)
309{
310 void (*close_fun)(struct sock *sk, long timeout);
John Fastabende9db4ef2018-06-30 06:17:47 -0700311 struct smap_psock_map_entry *e;
John Fastabend8934ce22018-03-28 12:49:15 -0700312 struct sk_msg_buff *md, *mtmp;
John Fastabend1aa12bd2018-02-05 10:17:49 -0800313 struct smap_psock *psock;
314 struct sock *osk;
315
John Fastabend99ba2b52018-07-05 08:50:04 -0700316 lock_sock(sk);
John Fastabend1aa12bd2018-02-05 10:17:49 -0800317 rcu_read_lock();
318 psock = smap_psock_sk(sk);
319 if (unlikely(!psock)) {
320 rcu_read_unlock();
John Fastabend99ba2b52018-07-05 08:50:04 -0700321 release_sock(sk);
John Fastabend1aa12bd2018-02-05 10:17:49 -0800322 return sk->sk_prot->close(sk, timeout);
323 }
324
325 /* The psock may be destroyed anytime after exiting the RCU critial
326 * section so by the time we use close_fun the psock may no longer
327 * be valid. However, bpf_tcp_close is called with the sock lock
328 * held so the close hook and sk are still valid.
329 */
330 close_fun = psock->save_close;
331
John Fastabend820ed3f2018-04-02 12:50:46 -0700332 if (psock->cork) {
333 free_start_sg(psock->sock, psock->cork);
334 kfree(psock->cork);
335 psock->cork = NULL;
336 }
337
John Fastabend8934ce22018-03-28 12:49:15 -0700338 list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
339 list_del(&md->list);
340 free_start_sg(psock->sock, md);
341 kfree(md);
342 }
343
John Fastabende9db4ef2018-06-30 06:17:47 -0700344 e = psock_map_pop(sk, psock);
345 while (e) {
John Fastabend81110382018-05-14 10:00:17 -0700346 if (e->entry) {
Daniel Borkmann585f5a62018-08-16 21:49:10 +0200347 struct bpf_stab *stab = container_of(e->map, struct bpf_stab, map);
348
349 raw_spin_lock_bh(&stab->lock);
350 osk = *e->entry;
John Fastabend81110382018-05-14 10:00:17 -0700351 if (osk == sk) {
Daniel Borkmann585f5a62018-08-16 21:49:10 +0200352 *e->entry = NULL;
John Fastabend81110382018-05-14 10:00:17 -0700353 smap_release_sock(psock, sk);
354 }
Daniel Borkmann585f5a62018-08-16 21:49:10 +0200355 raw_spin_unlock_bh(&stab->lock);
John Fastabend81110382018-05-14 10:00:17 -0700356 } else {
John Fastabende9db4ef2018-06-30 06:17:47 -0700357 struct htab_elem *link = rcu_dereference(e->hash_link);
Daniel Borkmann585f5a62018-08-16 21:49:10 +0200358 struct bpf_htab *htab = container_of(e->map, struct bpf_htab, map);
John Fastabende9db4ef2018-06-30 06:17:47 -0700359 struct hlist_head *head;
360 struct htab_elem *l;
361 struct bucket *b;
362
363 b = __select_bucket(htab, link->hash);
364 head = &b->head;
365 raw_spin_lock_bh(&b->lock);
366 l = lookup_elem_raw(head,
367 link->hash, link->key,
368 htab->map.key_size);
369 /* If another thread deleted this object skip deletion.
370 * The refcnt on psock may or may not be zero.
371 */
372 if (l) {
373 hlist_del_rcu(&link->hash_node);
374 smap_release_sock(psock, link->sk);
375 free_htab_elem(htab, link);
376 }
377 raw_spin_unlock_bh(&b->lock);
John Fastabend1aa12bd2018-02-05 10:17:49 -0800378 }
Daniel Borkmannd40b0112018-08-16 21:49:08 +0200379 kfree(e);
John Fastabende9db4ef2018-06-30 06:17:47 -0700380 e = psock_map_pop(sk, psock);
John Fastabend1aa12bd2018-02-05 10:17:49 -0800381 }
John Fastabend1aa12bd2018-02-05 10:17:49 -0800382 rcu_read_unlock();
John Fastabend99ba2b52018-07-05 08:50:04 -0700383 release_sock(sk);
John Fastabend1aa12bd2018-02-05 10:17:49 -0800384 close_fun(sk, timeout);
385}
386
John Fastabend04686ef2017-10-31 19:17:31 -0700387enum __sk_action {
388 __SK_DROP = 0,
389 __SK_PASS,
390 __SK_REDIRECT,
John Fastabend4f738ad2018-03-18 12:57:10 -0700391 __SK_NONE,
John Fastabend04686ef2017-10-31 19:17:31 -0700392};
393
John Fastabend1aa12bd2018-02-05 10:17:49 -0800394static struct tcp_ulp_ops bpf_tcp_ulp_ops __read_mostly = {
395 .name = "bpf_tcp",
396 .uid = TCP_ULP_BPF,
397 .user_visible = false,
398 .owner = NULL,
399 .init = bpf_tcp_init,
400 .release = bpf_tcp_release,
401};
402
John Fastabend4f738ad2018-03-18 12:57:10 -0700403static int memcopy_from_iter(struct sock *sk,
404 struct sk_msg_buff *md,
405 struct iov_iter *from, int bytes)
406{
407 struct scatterlist *sg = md->sg_data;
408 int i = md->sg_curr, rc = -ENOSPC;
409
410 do {
411 int copy;
412 char *to;
413
414 if (md->sg_copybreak >= sg[i].length) {
415 md->sg_copybreak = 0;
416
417 if (++i == MAX_SKB_FRAGS)
418 i = 0;
419
420 if (i == md->sg_end)
421 break;
422 }
423
424 copy = sg[i].length - md->sg_copybreak;
425 to = sg_virt(&sg[i]) + md->sg_copybreak;
426 md->sg_copybreak += copy;
427
428 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
429 rc = copy_from_iter_nocache(to, copy, from);
430 else
431 rc = copy_from_iter(to, copy, from);
432
433 if (rc != copy) {
434 rc = -EFAULT;
435 goto out;
436 }
437
438 bytes -= copy;
439 if (!bytes)
440 break;
441
442 md->sg_copybreak = 0;
443 if (++i == MAX_SKB_FRAGS)
444 i = 0;
445 } while (i != md->sg_end);
446out:
447 md->sg_curr = i;
448 return rc;
449}
450
451static int bpf_tcp_push(struct sock *sk, int apply_bytes,
452 struct sk_msg_buff *md,
453 int flags, bool uncharge)
454{
455 bool apply = apply_bytes;
456 struct scatterlist *sg;
457 int offset, ret = 0;
458 struct page *p;
459 size_t size;
460
461 while (1) {
462 sg = md->sg_data + md->sg_start;
463 size = (apply && apply_bytes < sg->length) ?
464 apply_bytes : sg->length;
465 offset = sg->offset;
466
467 tcp_rate_check_app_limited(sk);
468 p = sg_page(sg);
469retry:
470 ret = do_tcp_sendpages(sk, p, offset, size, flags);
471 if (ret != size) {
472 if (ret > 0) {
473 if (apply)
474 apply_bytes -= ret;
John Fastabend3cc9a472018-05-02 13:50:19 -0700475
476 sg->offset += ret;
477 sg->length -= ret;
John Fastabend4f738ad2018-03-18 12:57:10 -0700478 size -= ret;
479 offset += ret;
480 if (uncharge)
481 sk_mem_uncharge(sk, ret);
482 goto retry;
483 }
484
John Fastabend4f738ad2018-03-18 12:57:10 -0700485 return ret;
486 }
487
488 if (apply)
489 apply_bytes -= ret;
490 sg->offset += ret;
491 sg->length -= ret;
492 if (uncharge)
493 sk_mem_uncharge(sk, ret);
494
495 if (!sg->length) {
496 put_page(p);
497 md->sg_start++;
498 if (md->sg_start == MAX_SKB_FRAGS)
499 md->sg_start = 0;
Prashant Bhole6ef6d842018-03-30 09:21:00 +0900500 sg_init_table(sg, 1);
John Fastabend4f738ad2018-03-18 12:57:10 -0700501
502 if (md->sg_start == md->sg_end)
503 break;
504 }
505
506 if (apply && !apply_bytes)
507 break;
508 }
509 return 0;
510}
511
512static inline void bpf_compute_data_pointers_sg(struct sk_msg_buff *md)
513{
514 struct scatterlist *sg = md->sg_data + md->sg_start;
515
516 if (md->sg_copy[md->sg_start]) {
517 md->data = md->data_end = 0;
518 } else {
519 md->data = sg_virt(sg);
520 md->data_end = md->data + sg->length;
521 }
522}
523
524static void return_mem_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
525{
526 struct scatterlist *sg = md->sg_data;
527 int i = md->sg_start;
528
529 do {
530 int uncharge = (bytes < sg[i].length) ? bytes : sg[i].length;
531
532 sk_mem_uncharge(sk, uncharge);
533 bytes -= uncharge;
534 if (!bytes)
535 break;
536 i++;
537 if (i == MAX_SKB_FRAGS)
538 i = 0;
539 } while (i != md->sg_end);
540}
541
John Fastabendabaeb092018-05-02 13:50:29 -0700542static void free_bytes_sg(struct sock *sk, int bytes,
543 struct sk_msg_buff *md, bool charge)
John Fastabend4f738ad2018-03-18 12:57:10 -0700544{
545 struct scatterlist *sg = md->sg_data;
546 int i = md->sg_start, free;
547
548 while (bytes && sg[i].length) {
549 free = sg[i].length;
550 if (bytes < free) {
551 sg[i].length -= bytes;
552 sg[i].offset += bytes;
John Fastabendabaeb092018-05-02 13:50:29 -0700553 if (charge)
554 sk_mem_uncharge(sk, bytes);
John Fastabend4f738ad2018-03-18 12:57:10 -0700555 break;
556 }
557
John Fastabendabaeb092018-05-02 13:50:29 -0700558 if (charge)
559 sk_mem_uncharge(sk, sg[i].length);
John Fastabend4f738ad2018-03-18 12:57:10 -0700560 put_page(sg_page(&sg[i]));
561 bytes -= sg[i].length;
562 sg[i].length = 0;
563 sg[i].page_link = 0;
564 sg[i].offset = 0;
565 i++;
566
567 if (i == MAX_SKB_FRAGS)
568 i = 0;
569 }
John Fastabendabaeb092018-05-02 13:50:29 -0700570 md->sg_start = i;
John Fastabend4f738ad2018-03-18 12:57:10 -0700571}
572
573static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
574{
575 struct scatterlist *sg = md->sg_data;
576 int i = start, free = 0;
577
578 while (sg[i].length) {
579 free += sg[i].length;
580 sk_mem_uncharge(sk, sg[i].length);
John Fastabend7ebc14d2018-07-05 08:50:10 -0700581 if (!md->skb)
582 put_page(sg_page(&sg[i]));
John Fastabend4f738ad2018-03-18 12:57:10 -0700583 sg[i].length = 0;
584 sg[i].page_link = 0;
585 sg[i].offset = 0;
586 i++;
587
588 if (i == MAX_SKB_FRAGS)
589 i = 0;
590 }
John Fastabend7ebc14d2018-07-05 08:50:10 -0700591 if (md->skb)
592 consume_skb(md->skb);
John Fastabend4f738ad2018-03-18 12:57:10 -0700593
594 return free;
595}
596
597static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
598{
599 int free = free_sg(sk, md->sg_start, md);
600
601 md->sg_start = md->sg_end;
602 return free;
603}
604
605static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md)
606{
607 return free_sg(sk, md->sg_curr, md);
608}
609
610static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md)
611{
612 return ((_rc == SK_PASS) ?
John Fastabende5cd3ab2018-05-14 10:00:16 -0700613 (md->sk_redir ? __SK_REDIRECT : __SK_PASS) :
John Fastabend4f738ad2018-03-18 12:57:10 -0700614 __SK_DROP);
615}
616
617static unsigned int smap_do_tx_msg(struct sock *sk,
618 struct smap_psock *psock,
619 struct sk_msg_buff *md)
620{
621 struct bpf_prog *prog;
622 unsigned int rc, _rc;
623
624 preempt_disable();
625 rcu_read_lock();
626
627 /* If the policy was removed mid-send then default to 'accept' */
628 prog = READ_ONCE(psock->bpf_tx_msg);
629 if (unlikely(!prog)) {
630 _rc = SK_PASS;
631 goto verdict;
632 }
633
634 bpf_compute_data_pointers_sg(md);
John Fastabend303def32018-05-17 14:16:58 -0700635 md->sk = sk;
John Fastabend4f738ad2018-03-18 12:57:10 -0700636 rc = (*prog->bpf_func)(md, prog->insnsi);
637 psock->apply_bytes = md->apply_bytes;
638
639 /* Moving return codes from UAPI namespace into internal namespace */
640 _rc = bpf_map_msg_verdict(rc, md);
641
642 /* The psock has a refcount on the sock but not on the map and because
643 * we need to drop rcu read lock here its possible the map could be
644 * removed between here and when we need it to execute the sock
645 * redirect. So do the map lookup now for future use.
646 */
647 if (_rc == __SK_REDIRECT) {
648 if (psock->sk_redir)
649 sock_put(psock->sk_redir);
650 psock->sk_redir = do_msg_redirect_map(md);
651 if (!psock->sk_redir) {
652 _rc = __SK_DROP;
653 goto verdict;
654 }
655 sock_hold(psock->sk_redir);
656 }
657verdict:
658 rcu_read_unlock();
659 preempt_enable();
660
661 return _rc;
662}
663
John Fastabend8934ce22018-03-28 12:49:15 -0700664static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
665 struct smap_psock *psock,
666 struct sk_msg_buff *md, int flags)
667{
668 bool apply = apply_bytes;
669 size_t size, copied = 0;
670 struct sk_msg_buff *r;
671 int err = 0, i;
672
673 r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_KERNEL);
674 if (unlikely(!r))
675 return -ENOMEM;
676
677 lock_sock(sk);
678 r->sg_start = md->sg_start;
679 i = md->sg_start;
680
681 do {
John Fastabend8934ce22018-03-28 12:49:15 -0700682 size = (apply && apply_bytes < md->sg_data[i].length) ?
683 apply_bytes : md->sg_data[i].length;
684
685 if (!sk_wmem_schedule(sk, size)) {
686 if (!copied)
687 err = -ENOMEM;
688 break;
689 }
690
691 sk_mem_charge(sk, size);
John Fastabend4fcfdfb2018-04-23 15:39:33 -0700692 r->sg_data[i] = md->sg_data[i];
John Fastabend8934ce22018-03-28 12:49:15 -0700693 r->sg_data[i].length = size;
694 md->sg_data[i].length -= size;
695 md->sg_data[i].offset += size;
696 copied += size;
697
698 if (md->sg_data[i].length) {
699 get_page(sg_page(&r->sg_data[i]));
700 r->sg_end = (i + 1) == MAX_SKB_FRAGS ? 0 : i + 1;
701 } else {
702 i++;
703 if (i == MAX_SKB_FRAGS)
704 i = 0;
705 r->sg_end = i;
706 }
707
708 if (apply) {
709 apply_bytes -= size;
710 if (!apply_bytes)
711 break;
712 }
713 } while (i != md->sg_end);
714
715 md->sg_start = i;
716
717 if (!err) {
718 list_add_tail(&r->list, &psock->ingress);
719 sk->sk_data_ready(sk);
720 } else {
721 free_start_sg(sk, r);
722 kfree(r);
723 }
724
725 release_sock(sk);
726 return err;
727}
728
John Fastabend4f738ad2018-03-18 12:57:10 -0700729static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
730 struct sk_msg_buff *md,
731 int flags)
732{
John Fastabendabaeb092018-05-02 13:50:29 -0700733 bool ingress = !!(md->flags & BPF_F_INGRESS);
John Fastabend4f738ad2018-03-18 12:57:10 -0700734 struct smap_psock *psock;
John Fastabendabaeb092018-05-02 13:50:29 -0700735 int err = 0;
John Fastabend4f738ad2018-03-18 12:57:10 -0700736
John Fastabend4f738ad2018-03-18 12:57:10 -0700737 rcu_read_lock();
738 psock = smap_psock_sk(sk);
739 if (unlikely(!psock))
740 goto out_rcu;
741
742 if (!refcount_inc_not_zero(&psock->refcnt))
743 goto out_rcu;
744
745 rcu_read_unlock();
John Fastabend8934ce22018-03-28 12:49:15 -0700746
747 if (ingress) {
748 err = bpf_tcp_ingress(sk, send, psock, md, flags);
749 } else {
750 lock_sock(sk);
751 err = bpf_tcp_push(sk, send, md, flags, false);
752 release_sock(sk);
753 }
John Fastabend4f738ad2018-03-18 12:57:10 -0700754 smap_release_sock(psock, sk);
755 if (unlikely(err))
756 goto out;
757 return 0;
758out_rcu:
759 rcu_read_unlock();
760out:
John Fastabendabaeb092018-05-02 13:50:29 -0700761 free_bytes_sg(NULL, send, md, false);
762 return err;
John Fastabend4f738ad2018-03-18 12:57:10 -0700763}
764
765static inline void bpf_md_init(struct smap_psock *psock)
766{
767 if (!psock->apply_bytes) {
768 psock->eval = __SK_NONE;
769 if (psock->sk_redir) {
770 sock_put(psock->sk_redir);
771 psock->sk_redir = NULL;
772 }
773 }
774}
775
776static void apply_bytes_dec(struct smap_psock *psock, int i)
777{
778 if (psock->apply_bytes) {
779 if (psock->apply_bytes < i)
780 psock->apply_bytes = 0;
781 else
782 psock->apply_bytes -= i;
783 }
784}
785
786static int bpf_exec_tx_verdict(struct smap_psock *psock,
787 struct sk_msg_buff *m,
788 struct sock *sk,
789 int *copied, int flags)
790{
791 bool cork = false, enospc = (m->sg_start == m->sg_end);
792 struct sock *redir;
793 int err = 0;
794 int send;
795
796more_data:
797 if (psock->eval == __SK_NONE)
798 psock->eval = smap_do_tx_msg(sk, psock, m);
799
800 if (m->cork_bytes &&
801 m->cork_bytes > psock->sg_size && !enospc) {
802 psock->cork_bytes = m->cork_bytes - psock->sg_size;
803 if (!psock->cork) {
804 psock->cork = kcalloc(1,
805 sizeof(struct sk_msg_buff),
806 GFP_ATOMIC | __GFP_NOWARN);
807
808 if (!psock->cork) {
809 err = -ENOMEM;
810 goto out_err;
811 }
812 }
813 memcpy(psock->cork, m, sizeof(*m));
814 goto out_err;
815 }
816
817 send = psock->sg_size;
818 if (psock->apply_bytes && psock->apply_bytes < send)
819 send = psock->apply_bytes;
820
821 switch (psock->eval) {
822 case __SK_PASS:
823 err = bpf_tcp_push(sk, send, m, flags, true);
824 if (unlikely(err)) {
825 *copied -= free_start_sg(sk, m);
826 break;
827 }
828
829 apply_bytes_dec(psock, send);
830 psock->sg_size -= send;
831 break;
832 case __SK_REDIRECT:
833 redir = psock->sk_redir;
834 apply_bytes_dec(psock, send);
835
836 if (psock->cork) {
837 cork = true;
838 psock->cork = NULL;
839 }
840
841 return_mem_sg(sk, send, m);
842 release_sock(sk);
843
844 err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags);
845 lock_sock(sk);
846
John Fastabendfec51d42018-05-02 13:50:24 -0700847 if (unlikely(err < 0)) {
848 free_start_sg(sk, m);
849 psock->sg_size = 0;
850 if (!cork)
851 *copied -= send;
852 } else {
853 psock->sg_size -= send;
854 }
855
John Fastabend4f738ad2018-03-18 12:57:10 -0700856 if (cork) {
857 free_start_sg(sk, m);
John Fastabendfec51d42018-05-02 13:50:24 -0700858 psock->sg_size = 0;
John Fastabend4f738ad2018-03-18 12:57:10 -0700859 kfree(m);
860 m = NULL;
John Fastabendfec51d42018-05-02 13:50:24 -0700861 err = 0;
John Fastabend4f738ad2018-03-18 12:57:10 -0700862 }
John Fastabend4f738ad2018-03-18 12:57:10 -0700863 break;
864 case __SK_DROP:
865 default:
John Fastabendabaeb092018-05-02 13:50:29 -0700866 free_bytes_sg(sk, send, m, true);
John Fastabend4f738ad2018-03-18 12:57:10 -0700867 apply_bytes_dec(psock, send);
868 *copied -= send;
869 psock->sg_size -= send;
870 err = -EACCES;
871 break;
872 }
873
874 if (likely(!err)) {
875 bpf_md_init(psock);
876 if (m &&
877 m->sg_data[m->sg_start].page_link &&
878 m->sg_data[m->sg_start].length)
879 goto more_data;
880 }
881
882out_err:
883 return err;
884}
885
John Fastabende20f7332018-04-23 15:39:28 -0700886static int bpf_wait_data(struct sock *sk,
887 struct smap_psock *psk, int flags,
888 long timeo, int *err)
889{
890 int rc;
891
892 DEFINE_WAIT_FUNC(wait, woken_wake_function);
893
894 add_wait_queue(sk_sleep(sk), &wait);
895 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
896 rc = sk_wait_event(sk, &timeo,
897 !list_empty(&psk->ingress) ||
898 !skb_queue_empty(&sk->sk_receive_queue),
899 &wait);
900 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
901 remove_wait_queue(sk_sleep(sk), &wait);
902
903 return rc;
904}
905
John Fastabend8934ce22018-03-28 12:49:15 -0700906static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
907 int nonblock, int flags, int *addr_len)
908{
909 struct iov_iter *iter = &msg->msg_iter;
910 struct smap_psock *psock;
911 int copied = 0;
912
913 if (unlikely(flags & MSG_ERRQUEUE))
914 return inet_recv_error(sk, msg, len, addr_len);
915
916 rcu_read_lock();
917 psock = smap_psock_sk(sk);
918 if (unlikely(!psock))
919 goto out;
920
921 if (unlikely(!refcount_inc_not_zero(&psock->refcnt)))
922 goto out;
923 rcu_read_unlock();
924
925 if (!skb_queue_empty(&sk->sk_receive_queue))
926 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
927
928 lock_sock(sk);
John Fastabende20f7332018-04-23 15:39:28 -0700929bytes_ready:
John Fastabend8934ce22018-03-28 12:49:15 -0700930 while (copied != len) {
931 struct scatterlist *sg;
932 struct sk_msg_buff *md;
933 int i;
934
935 md = list_first_entry_or_null(&psock->ingress,
936 struct sk_msg_buff, list);
937 if (unlikely(!md))
938 break;
939 i = md->sg_start;
940 do {
941 struct page *page;
942 int n, copy;
943
944 sg = &md->sg_data[i];
945 copy = sg->length;
946 page = sg_page(sg);
947
948 if (copied + copy > len)
949 copy = len - copied;
950
951 n = copy_page_to_iter(page, sg->offset, copy, iter);
952 if (n != copy) {
953 md->sg_start = i;
954 release_sock(sk);
955 smap_release_sock(psock, sk);
956 return -EFAULT;
957 }
958
959 copied += copy;
960 sg->offset += copy;
961 sg->length -= copy;
962 sk_mem_uncharge(sk, copy);
963
964 if (!sg->length) {
965 i++;
966 if (i == MAX_SKB_FRAGS)
967 i = 0;
John Fastabendfa246692018-03-28 12:49:25 -0700968 if (!md->skb)
969 put_page(page);
John Fastabend8934ce22018-03-28 12:49:15 -0700970 }
971 if (copied == len)
972 break;
973 } while (i != md->sg_end);
974 md->sg_start = i;
975
976 if (!sg->length && md->sg_start == md->sg_end) {
977 list_del(&md->list);
John Fastabendfa246692018-03-28 12:49:25 -0700978 if (md->skb)
979 consume_skb(md->skb);
John Fastabend8934ce22018-03-28 12:49:15 -0700980 kfree(md);
981 }
982 }
983
John Fastabende20f7332018-04-23 15:39:28 -0700984 if (!copied) {
985 long timeo;
986 int data;
987 int err = 0;
988
989 timeo = sock_rcvtimeo(sk, nonblock);
990 data = bpf_wait_data(sk, psock, flags, timeo, &err);
991
992 if (data) {
993 if (!skb_queue_empty(&sk->sk_receive_queue)) {
994 release_sock(sk);
995 smap_release_sock(psock, sk);
996 copied = tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
997 return copied;
998 }
999 goto bytes_ready;
1000 }
1001
1002 if (err)
1003 copied = err;
1004 }
1005
John Fastabend8934ce22018-03-28 12:49:15 -07001006 release_sock(sk);
1007 smap_release_sock(psock, sk);
1008 return copied;
1009out:
1010 rcu_read_unlock();
1011 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
1012}
1013
1014
John Fastabend4f738ad2018-03-18 12:57:10 -07001015static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1016{
1017 int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
1018 struct sk_msg_buff md = {0};
1019 unsigned int sg_copy = 0;
1020 struct smap_psock *psock;
1021 int copied = 0, err = 0;
1022 struct scatterlist *sg;
1023 long timeo;
1024
1025 /* Its possible a sock event or user removed the psock _but_ the ops
1026 * have not been reprogrammed yet so we get here. In this case fallback
1027 * to tcp_sendmsg. Note this only works because we _only_ ever allow
1028 * a single ULP there is no hierarchy here.
1029 */
1030 rcu_read_lock();
1031 psock = smap_psock_sk(sk);
1032 if (unlikely(!psock)) {
1033 rcu_read_unlock();
1034 return tcp_sendmsg(sk, msg, size);
1035 }
1036
1037 /* Increment the psock refcnt to ensure its not released while sending a
1038 * message. Required because sk lookup and bpf programs are used in
1039 * separate rcu critical sections. Its OK if we lose the map entry
1040 * but we can't lose the sock reference.
1041 */
1042 if (!refcount_inc_not_zero(&psock->refcnt)) {
1043 rcu_read_unlock();
1044 return tcp_sendmsg(sk, msg, size);
1045 }
1046
1047 sg = md.sg_data;
Prashant Bhole6ef6d842018-03-30 09:21:00 +09001048 sg_init_marker(sg, MAX_SKB_FRAGS);
John Fastabend4f738ad2018-03-18 12:57:10 -07001049 rcu_read_unlock();
1050
1051 lock_sock(sk);
1052 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1053
1054 while (msg_data_left(msg)) {
Daniel Borkmann7c81c712018-08-08 19:23:14 +02001055 struct sk_msg_buff *m = NULL;
John Fastabend4f738ad2018-03-18 12:57:10 -07001056 bool enospc = false;
1057 int copy;
1058
1059 if (sk->sk_err) {
Daniel Borkmann51217002018-08-08 19:23:13 +02001060 err = -sk->sk_err;
John Fastabend4f738ad2018-03-18 12:57:10 -07001061 goto out_err;
1062 }
1063
1064 copy = msg_data_left(msg);
1065 if (!sk_stream_memory_free(sk))
1066 goto wait_for_sndbuf;
1067
1068 m = psock->cork_bytes ? psock->cork : &md;
1069 m->sg_curr = m->sg_copybreak ? m->sg_curr : m->sg_end;
1070 err = sk_alloc_sg(sk, copy, m->sg_data,
1071 m->sg_start, &m->sg_end, &sg_copy,
1072 m->sg_end - 1);
1073 if (err) {
1074 if (err != -ENOSPC)
1075 goto wait_for_memory;
1076 enospc = true;
1077 copy = sg_copy;
1078 }
1079
1080 err = memcopy_from_iter(sk, m, &msg->msg_iter, copy);
1081 if (err < 0) {
1082 free_curr_sg(sk, m);
1083 goto out_err;
1084 }
1085
1086 psock->sg_size += copy;
1087 copied += copy;
1088 sg_copy = 0;
1089
1090 /* When bytes are being corked skip running BPF program and
1091 * applying verdict unless there is no more buffer space. In
1092 * the ENOSPC case simply run BPF prorgram with currently
1093 * accumulated data. We don't have much choice at this point
1094 * we could try extending the page frags or chaining complex
1095 * frags but even in these cases _eventually_ we will hit an
1096 * OOM scenario. More complex recovery schemes may be
1097 * implemented in the future, but BPF programs must handle
1098 * the case where apply_cork requests are not honored. The
1099 * canonical method to verify this is to check data length.
1100 */
1101 if (psock->cork_bytes) {
1102 if (copy > psock->cork_bytes)
1103 psock->cork_bytes = 0;
1104 else
1105 psock->cork_bytes -= copy;
1106
1107 if (psock->cork_bytes && !enospc)
1108 goto out_cork;
1109
1110 /* All cork bytes accounted for re-run filter */
1111 psock->eval = __SK_NONE;
1112 psock->cork_bytes = 0;
1113 }
1114
1115 err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags);
1116 if (unlikely(err < 0))
1117 goto out_err;
1118 continue;
1119wait_for_sndbuf:
1120 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1121wait_for_memory:
1122 err = sk_stream_wait_memory(sk, &timeo);
Daniel Borkmann7c81c712018-08-08 19:23:14 +02001123 if (err) {
1124 if (m && m != psock->cork)
1125 free_start_sg(sk, m);
John Fastabend4f738ad2018-03-18 12:57:10 -07001126 goto out_err;
Daniel Borkmann7c81c712018-08-08 19:23:14 +02001127 }
John Fastabend4f738ad2018-03-18 12:57:10 -07001128 }
1129out_err:
1130 if (err < 0)
1131 err = sk_stream_error(sk, msg->msg_flags, err);
1132out_cork:
1133 release_sock(sk);
1134 smap_release_sock(psock, sk);
1135 return copied ? copied : err;
1136}
1137
1138static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
1139 int offset, size_t size, int flags)
1140{
1141 struct sk_msg_buff md = {0}, *m = NULL;
1142 int err = 0, copied = 0;
1143 struct smap_psock *psock;
1144 struct scatterlist *sg;
1145 bool enospc = false;
1146
1147 rcu_read_lock();
1148 psock = smap_psock_sk(sk);
1149 if (unlikely(!psock))
1150 goto accept;
1151
1152 if (!refcount_inc_not_zero(&psock->refcnt))
1153 goto accept;
1154 rcu_read_unlock();
1155
1156 lock_sock(sk);
1157
Prashant Bhole6ef6d842018-03-30 09:21:00 +09001158 if (psock->cork_bytes) {
John Fastabend4f738ad2018-03-18 12:57:10 -07001159 m = psock->cork;
Prashant Bhole6ef6d842018-03-30 09:21:00 +09001160 sg = &m->sg_data[m->sg_end];
1161 } else {
John Fastabend4f738ad2018-03-18 12:57:10 -07001162 m = &md;
Prashant Bhole6ef6d842018-03-30 09:21:00 +09001163 sg = m->sg_data;
1164 sg_init_marker(sg, MAX_SKB_FRAGS);
1165 }
John Fastabend4f738ad2018-03-18 12:57:10 -07001166
1167 /* Catch case where ring is full and sendpage is stalled. */
1168 if (unlikely(m->sg_end == m->sg_start &&
1169 m->sg_data[m->sg_end].length))
1170 goto out_err;
1171
1172 psock->sg_size += size;
John Fastabend4f738ad2018-03-18 12:57:10 -07001173 sg_set_page(sg, page, size, offset);
1174 get_page(page);
1175 m->sg_copy[m->sg_end] = true;
1176 sk_mem_charge(sk, size);
1177 m->sg_end++;
1178 copied = size;
1179
1180 if (m->sg_end == MAX_SKB_FRAGS)
1181 m->sg_end = 0;
1182
1183 if (m->sg_end == m->sg_start)
1184 enospc = true;
1185
1186 if (psock->cork_bytes) {
1187 if (size > psock->cork_bytes)
1188 psock->cork_bytes = 0;
1189 else
1190 psock->cork_bytes -= size;
1191
1192 if (psock->cork_bytes && !enospc)
1193 goto out_err;
1194
1195 /* All cork bytes accounted for re-run filter */
1196 psock->eval = __SK_NONE;
1197 psock->cork_bytes = 0;
1198 }
1199
1200 err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags);
1201out_err:
1202 release_sock(sk);
1203 smap_release_sock(psock, sk);
1204 return copied ? copied : err;
1205accept:
1206 rcu_read_unlock();
1207 return tcp_sendpage(sk, page, offset, size, flags);
1208}
1209
1210static void bpf_tcp_msg_add(struct smap_psock *psock,
1211 struct sock *sk,
1212 struct bpf_prog *tx_msg)
1213{
1214 struct bpf_prog *orig_tx_msg;
1215
1216 orig_tx_msg = xchg(&psock->bpf_tx_msg, tx_msg);
1217 if (orig_tx_msg)
1218 bpf_prog_put(orig_tx_msg);
1219}
1220
John Fastabend1aa12bd2018-02-05 10:17:49 -08001221static int bpf_tcp_ulp_register(void)
1222{
John Fastabend9901c5d2018-06-30 06:17:36 -07001223 build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot);
John Fastabend4f738ad2018-03-18 12:57:10 -07001224 /* Once BPF TX ULP is registered it is never unregistered. It
1225 * will be in the ULP list for the lifetime of the system. Doing
1226 * duplicate registers is not a problem.
1227 */
John Fastabend1aa12bd2018-02-05 10:17:49 -08001228 return tcp_register_ulp(&bpf_tcp_ulp_ops);
1229}
1230
John Fastabend174a79f2017-08-15 22:32:47 -07001231static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
1232{
1233 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
1234 int rc;
1235
1236 if (unlikely(!prog))
John Fastabend04686ef2017-10-31 19:17:31 -07001237 return __SK_DROP;
John Fastabend174a79f2017-08-15 22:32:47 -07001238
1239 skb_orphan(skb);
John Fastabend34f795022017-10-18 07:10:36 -07001240 /* We need to ensure that BPF metadata for maps is also cleared
1241 * when we orphan the skb so that we don't have the possibility
1242 * to reference a stale map.
1243 */
John Fastabende5cd3ab2018-05-14 10:00:16 -07001244 TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
John Fastabend174a79f2017-08-15 22:32:47 -07001245 skb->sk = psock->sock;
John Fastabend0ea488f2018-07-05 08:50:15 -07001246 bpf_compute_data_end_sk_skb(skb);
John Fastabend34f795022017-10-18 07:10:36 -07001247 preempt_disable();
John Fastabend174a79f2017-08-15 22:32:47 -07001248 rc = (*prog->bpf_func)(skb, prog->insnsi);
John Fastabend34f795022017-10-18 07:10:36 -07001249 preempt_enable();
John Fastabend174a79f2017-08-15 22:32:47 -07001250 skb->sk = NULL;
1251
John Fastabend04686ef2017-10-31 19:17:31 -07001252 /* Moving return codes from UAPI namespace into internal namespace */
John Fastabendbfa640752017-10-27 09:45:53 -07001253 return rc == SK_PASS ?
John Fastabende5cd3ab2018-05-14 10:00:16 -07001254 (TCP_SKB_CB(skb)->bpf.sk_redir ? __SK_REDIRECT : __SK_PASS) :
John Fastabend04686ef2017-10-31 19:17:31 -07001255 __SK_DROP;
John Fastabend174a79f2017-08-15 22:32:47 -07001256}
1257
John Fastabendfa246692018-03-28 12:49:25 -07001258static int smap_do_ingress(struct smap_psock *psock, struct sk_buff *skb)
1259{
1260 struct sock *sk = psock->sock;
1261 int copied = 0, num_sg;
1262 struct sk_msg_buff *r;
1263
1264 r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_ATOMIC);
1265 if (unlikely(!r))
1266 return -EAGAIN;
1267
1268 if (!sk_rmem_schedule(sk, skb, skb->len)) {
1269 kfree(r);
1270 return -EAGAIN;
1271 }
1272
1273 sg_init_table(r->sg_data, MAX_SKB_FRAGS);
1274 num_sg = skb_to_sgvec(skb, r->sg_data, 0, skb->len);
1275 if (unlikely(num_sg < 0)) {
1276 kfree(r);
1277 return num_sg;
1278 }
1279 sk_mem_charge(sk, skb->len);
1280 copied = skb->len;
1281 r->sg_start = 0;
1282 r->sg_end = num_sg == MAX_SKB_FRAGS ? 0 : num_sg;
1283 r->skb = skb;
1284 list_add_tail(&r->list, &psock->ingress);
1285 sk->sk_data_ready(sk);
1286 return copied;
1287}
1288
John Fastabend174a79f2017-08-15 22:32:47 -07001289static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
1290{
John Fastabendfa246692018-03-28 12:49:25 -07001291 struct smap_psock *peer;
John Fastabend90a96312017-09-01 11:29:26 -07001292 struct sock *sk;
John Fastabendfa246692018-03-28 12:49:25 -07001293 __u32 in;
John Fastabend174a79f2017-08-15 22:32:47 -07001294 int rc;
1295
John Fastabend174a79f2017-08-15 22:32:47 -07001296 rc = smap_verdict_func(psock, skb);
1297 switch (rc) {
John Fastabend04686ef2017-10-31 19:17:31 -07001298 case __SK_REDIRECT:
John Fastabend34f795022017-10-18 07:10:36 -07001299 sk = do_sk_redirect_map(skb);
John Fastabendfa246692018-03-28 12:49:25 -07001300 if (!sk) {
1301 kfree_skb(skb);
1302 break;
1303 }
John Fastabend174a79f2017-08-15 22:32:47 -07001304
John Fastabendfa246692018-03-28 12:49:25 -07001305 peer = smap_psock_sk(sk);
1306 in = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
1307
1308 if (unlikely(!peer || sock_flag(sk, SOCK_DEAD) ||
1309 !test_bit(SMAP_TX_RUNNING, &peer->state))) {
1310 kfree_skb(skb);
1311 break;
1312 }
1313
1314 if (!in && sock_writeable(sk)) {
1315 skb_set_owner_w(skb, sk);
1316 skb_queue_tail(&peer->rxqueue, skb);
1317 schedule_work(&peer->tx_work);
1318 break;
1319 } else if (in &&
1320 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
1321 skb_queue_tail(&peer->rxqueue, skb);
1322 schedule_work(&peer->tx_work);
1323 break;
John Fastabend174a79f2017-08-15 22:32:47 -07001324 }
1325 /* Fall through and free skb otherwise */
John Fastabend04686ef2017-10-31 19:17:31 -07001326 case __SK_DROP:
John Fastabend174a79f2017-08-15 22:32:47 -07001327 default:
John Fastabend174a79f2017-08-15 22:32:47 -07001328 kfree_skb(skb);
1329 }
1330}
1331
1332static void smap_report_sk_error(struct smap_psock *psock, int err)
1333{
1334 struct sock *sk = psock->sock;
1335
1336 sk->sk_err = err;
1337 sk->sk_error_report(sk);
1338}
1339
John Fastabend174a79f2017-08-15 22:32:47 -07001340static void smap_read_sock_strparser(struct strparser *strp,
1341 struct sk_buff *skb)
1342{
1343 struct smap_psock *psock;
1344
1345 rcu_read_lock();
1346 psock = container_of(strp, struct smap_psock, strp);
1347 smap_do_verdict(psock, skb);
1348 rcu_read_unlock();
1349}
1350
1351/* Called with lock held on socket */
1352static void smap_data_ready(struct sock *sk)
1353{
1354 struct smap_psock *psock;
1355
John Fastabendd26e597d2017-08-28 07:10:45 -07001356 rcu_read_lock();
John Fastabend174a79f2017-08-15 22:32:47 -07001357 psock = smap_psock_sk(sk);
John Fastabendd26e597d2017-08-28 07:10:45 -07001358 if (likely(psock)) {
1359 write_lock_bh(&sk->sk_callback_lock);
John Fastabend174a79f2017-08-15 22:32:47 -07001360 strp_data_ready(&psock->strp);
John Fastabendd26e597d2017-08-28 07:10:45 -07001361 write_unlock_bh(&sk->sk_callback_lock);
1362 }
1363 rcu_read_unlock();
John Fastabend174a79f2017-08-15 22:32:47 -07001364}
1365
1366static void smap_tx_work(struct work_struct *w)
1367{
1368 struct smap_psock *psock;
1369 struct sk_buff *skb;
1370 int rem, off, n;
1371
1372 psock = container_of(w, struct smap_psock, tx_work);
1373
1374 /* lock sock to avoid losing sk_socket at some point during loop */
1375 lock_sock(psock->sock);
1376 if (psock->save_skb) {
1377 skb = psock->save_skb;
1378 rem = psock->save_rem;
1379 off = psock->save_off;
1380 psock->save_skb = NULL;
1381 goto start;
1382 }
1383
1384 while ((skb = skb_dequeue(&psock->rxqueue))) {
John Fastabendfa246692018-03-28 12:49:25 -07001385 __u32 flags;
1386
John Fastabend174a79f2017-08-15 22:32:47 -07001387 rem = skb->len;
1388 off = 0;
1389start:
John Fastabendfa246692018-03-28 12:49:25 -07001390 flags = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
John Fastabend174a79f2017-08-15 22:32:47 -07001391 do {
John Fastabendfa246692018-03-28 12:49:25 -07001392 if (likely(psock->sock->sk_socket)) {
1393 if (flags)
1394 n = smap_do_ingress(psock, skb);
1395 else
1396 n = skb_send_sock_locked(psock->sock,
1397 skb, off, rem);
1398 } else {
John Fastabend174a79f2017-08-15 22:32:47 -07001399 n = -EINVAL;
John Fastabendfa246692018-03-28 12:49:25 -07001400 }
1401
John Fastabend174a79f2017-08-15 22:32:47 -07001402 if (n <= 0) {
1403 if (n == -EAGAIN) {
1404 /* Retry when space is available */
1405 psock->save_skb = skb;
1406 psock->save_rem = rem;
1407 psock->save_off = off;
1408 goto out;
1409 }
1410 /* Hard errors break pipe and stop xmit */
1411 smap_report_sk_error(psock, n ? -n : EPIPE);
1412 clear_bit(SMAP_TX_RUNNING, &psock->state);
John Fastabend174a79f2017-08-15 22:32:47 -07001413 kfree_skb(skb);
1414 goto out;
1415 }
1416 rem -= n;
1417 off += n;
1418 } while (rem);
John Fastabendfa246692018-03-28 12:49:25 -07001419
1420 if (!flags)
1421 kfree_skb(skb);
John Fastabend174a79f2017-08-15 22:32:47 -07001422 }
1423out:
1424 release_sock(psock->sock);
1425}
1426
1427static void smap_write_space(struct sock *sk)
1428{
1429 struct smap_psock *psock;
John Fastabend9b2e0382018-08-22 08:37:37 -07001430 void (*write_space)(struct sock *sk);
John Fastabend174a79f2017-08-15 22:32:47 -07001431
1432 rcu_read_lock();
1433 psock = smap_psock_sk(sk);
1434 if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
1435 schedule_work(&psock->tx_work);
John Fastabend9b2e0382018-08-22 08:37:37 -07001436 write_space = psock->save_write_space;
John Fastabend174a79f2017-08-15 22:32:47 -07001437 rcu_read_unlock();
John Fastabend9b2e0382018-08-22 08:37:37 -07001438 write_space(sk);
John Fastabend174a79f2017-08-15 22:32:47 -07001439}
1440
1441static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
1442{
John Fastabend174a79f2017-08-15 22:32:47 -07001443 if (!psock->strp_enabled)
John Fastabend2f857d02017-08-28 07:10:25 -07001444 return;
John Fastabend174a79f2017-08-15 22:32:47 -07001445 sk->sk_data_ready = psock->save_data_ready;
1446 sk->sk_write_space = psock->save_write_space;
John Fastabend174a79f2017-08-15 22:32:47 -07001447 psock->save_data_ready = NULL;
1448 psock->save_write_space = NULL;
John Fastabend174a79f2017-08-15 22:32:47 -07001449 strp_stop(&psock->strp);
1450 psock->strp_enabled = false;
John Fastabend174a79f2017-08-15 22:32:47 -07001451}
1452
1453static void smap_destroy_psock(struct rcu_head *rcu)
1454{
1455 struct smap_psock *psock = container_of(rcu,
1456 struct smap_psock, rcu);
1457
1458 /* Now that a grace period has passed there is no longer
1459 * any reference to this sock in the sockmap so we can
1460 * destroy the psock, strparser, and bpf programs. But,
1461 * because we use workqueue sync operations we can not
1462 * do it in rcu context
1463 */
1464 schedule_work(&psock->gc_work);
1465}
1466
John Fastabend2f857d02017-08-28 07:10:25 -07001467static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
John Fastabend174a79f2017-08-15 22:32:47 -07001468{
John Fastabendffa35662018-03-18 12:56:54 -07001469 if (refcount_dec_and_test(&psock->refcnt)) {
1470 tcp_cleanup_ulp(sock);
John Fastabende9db4ef2018-06-30 06:17:47 -07001471 write_lock_bh(&sock->sk_callback_lock);
John Fastabendffa35662018-03-18 12:56:54 -07001472 smap_stop_sock(psock, sock);
John Fastabende9db4ef2018-06-30 06:17:47 -07001473 write_unlock_bh(&sock->sk_callback_lock);
John Fastabendffa35662018-03-18 12:56:54 -07001474 clear_bit(SMAP_TX_RUNNING, &psock->state);
1475 rcu_assign_sk_user_data(sock, NULL);
1476 call_rcu_sched(&psock->rcu, smap_destroy_psock);
1477 }
John Fastabend174a79f2017-08-15 22:32:47 -07001478}
1479
1480static int smap_parse_func_strparser(struct strparser *strp,
1481 struct sk_buff *skb)
1482{
1483 struct smap_psock *psock;
1484 struct bpf_prog *prog;
1485 int rc;
1486
1487 rcu_read_lock();
1488 psock = container_of(strp, struct smap_psock, strp);
1489 prog = READ_ONCE(psock->bpf_parse);
1490
1491 if (unlikely(!prog)) {
1492 rcu_read_unlock();
1493 return skb->len;
1494 }
1495
1496 /* Attach socket for bpf program to use if needed we can do this
1497 * because strparser clones the skb before handing it to a upper
1498 * layer, meaning skb_orphan has been called. We NULL sk on the
1499 * way out to ensure we don't trigger a BUG_ON in skb/sk operations
1500 * later and because we are not charging the memory of this skb to
1501 * any socket yet.
1502 */
1503 skb->sk = psock->sock;
John Fastabend0ea488f2018-07-05 08:50:15 -07001504 bpf_compute_data_end_sk_skb(skb);
John Fastabend174a79f2017-08-15 22:32:47 -07001505 rc = (*prog->bpf_func)(skb, prog->insnsi);
1506 skb->sk = NULL;
1507 rcu_read_unlock();
1508 return rc;
1509}
1510
John Fastabend174a79f2017-08-15 22:32:47 -07001511static int smap_read_sock_done(struct strparser *strp, int err)
1512{
1513 return err;
1514}
1515
1516static int smap_init_sock(struct smap_psock *psock,
1517 struct sock *sk)
1518{
Eric Biggers3fd87122017-08-24 14:38:51 -07001519 static const struct strp_callbacks cb = {
1520 .rcv_msg = smap_read_sock_strparser,
1521 .parse_msg = smap_parse_func_strparser,
1522 .read_sock_done = smap_read_sock_done,
1523 };
John Fastabend174a79f2017-08-15 22:32:47 -07001524
John Fastabend174a79f2017-08-15 22:32:47 -07001525 return strp_init(&psock->strp, sk, &cb);
1526}
1527
1528static void smap_init_progs(struct smap_psock *psock,
John Fastabend174a79f2017-08-15 22:32:47 -07001529 struct bpf_prog *verdict,
1530 struct bpf_prog *parse)
1531{
1532 struct bpf_prog *orig_parse, *orig_verdict;
1533
1534 orig_parse = xchg(&psock->bpf_parse, parse);
1535 orig_verdict = xchg(&psock->bpf_verdict, verdict);
1536
1537 if (orig_verdict)
1538 bpf_prog_put(orig_verdict);
1539 if (orig_parse)
1540 bpf_prog_put(orig_parse);
1541}
1542
1543static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
1544{
1545 if (sk->sk_data_ready == smap_data_ready)
1546 return;
1547 psock->save_data_ready = sk->sk_data_ready;
1548 psock->save_write_space = sk->sk_write_space;
John Fastabend174a79f2017-08-15 22:32:47 -07001549 sk->sk_data_ready = smap_data_ready;
1550 sk->sk_write_space = smap_write_space;
John Fastabend174a79f2017-08-15 22:32:47 -07001551 psock->strp_enabled = true;
1552}
1553
1554static void sock_map_remove_complete(struct bpf_stab *stab)
1555{
1556 bpf_map_area_free(stab->sock_map);
1557 kfree(stab);
1558}
1559
1560static void smap_gc_work(struct work_struct *w)
1561{
John Fastabend2f857d02017-08-28 07:10:25 -07001562 struct smap_psock_map_entry *e, *tmp;
John Fastabend8934ce22018-03-28 12:49:15 -07001563 struct sk_msg_buff *md, *mtmp;
John Fastabend174a79f2017-08-15 22:32:47 -07001564 struct smap_psock *psock;
1565
1566 psock = container_of(w, struct smap_psock, gc_work);
1567
1568 /* no callback lock needed because we already detached sockmap ops */
1569 if (psock->strp_enabled)
1570 strp_done(&psock->strp);
1571
1572 cancel_work_sync(&psock->tx_work);
1573 __skb_queue_purge(&psock->rxqueue);
1574
1575 /* At this point all strparser and xmit work must be complete */
1576 if (psock->bpf_parse)
1577 bpf_prog_put(psock->bpf_parse);
1578 if (psock->bpf_verdict)
1579 bpf_prog_put(psock->bpf_verdict);
John Fastabend4f738ad2018-03-18 12:57:10 -07001580 if (psock->bpf_tx_msg)
1581 bpf_prog_put(psock->bpf_tx_msg);
1582
1583 if (psock->cork) {
1584 free_start_sg(psock->sock, psock->cork);
1585 kfree(psock->cork);
1586 }
John Fastabend174a79f2017-08-15 22:32:47 -07001587
John Fastabend8934ce22018-03-28 12:49:15 -07001588 list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
1589 list_del(&md->list);
1590 free_start_sg(psock->sock, md);
1591 kfree(md);
1592 }
1593
John Fastabend2f857d02017-08-28 07:10:25 -07001594 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
1595 list_del(&e->list);
1596 kfree(e);
1597 }
John Fastabend174a79f2017-08-15 22:32:47 -07001598
John Fastabend4f738ad2018-03-18 12:57:10 -07001599 if (psock->sk_redir)
1600 sock_put(psock->sk_redir);
1601
John Fastabend174a79f2017-08-15 22:32:47 -07001602 sock_put(psock->sock);
1603 kfree(psock);
1604}
1605
John Fastabende5cd3ab2018-05-14 10:00:16 -07001606static struct smap_psock *smap_init_psock(struct sock *sock, int node)
John Fastabend174a79f2017-08-15 22:32:47 -07001607{
1608 struct smap_psock *psock;
1609
Martin KaFai Lau96eabe72017-08-18 11:28:00 -07001610 psock = kzalloc_node(sizeof(struct smap_psock),
1611 GFP_ATOMIC | __GFP_NOWARN,
John Fastabende5cd3ab2018-05-14 10:00:16 -07001612 node);
John Fastabend174a79f2017-08-15 22:32:47 -07001613 if (!psock)
1614 return ERR_PTR(-ENOMEM);
1615
John Fastabend4f738ad2018-03-18 12:57:10 -07001616 psock->eval = __SK_NONE;
John Fastabend174a79f2017-08-15 22:32:47 -07001617 psock->sock = sock;
1618 skb_queue_head_init(&psock->rxqueue);
1619 INIT_WORK(&psock->tx_work, smap_tx_work);
1620 INIT_WORK(&psock->gc_work, smap_gc_work);
John Fastabend2f857d02017-08-28 07:10:25 -07001621 INIT_LIST_HEAD(&psock->maps);
John Fastabend8934ce22018-03-28 12:49:15 -07001622 INIT_LIST_HEAD(&psock->ingress);
John Fastabendffa35662018-03-18 12:56:54 -07001623 refcount_set(&psock->refcnt, 1);
John Fastabende9db4ef2018-06-30 06:17:47 -07001624 spin_lock_init(&psock->maps_lock);
John Fastabend174a79f2017-08-15 22:32:47 -07001625
1626 rcu_assign_sk_user_data(sock, psock);
1627 sock_hold(sock);
1628 return psock;
1629}
1630
1631static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
1632{
1633 struct bpf_stab *stab;
John Fastabend174a79f2017-08-15 22:32:47 -07001634 u64 cost;
Eric Dumazet952fad82018-02-13 15:33:52 -08001635 int err;
John Fastabend174a79f2017-08-15 22:32:47 -07001636
John Fastabendfb50df82017-10-18 07:11:22 -07001637 if (!capable(CAP_NET_ADMIN))
1638 return ERR_PTR(-EPERM);
1639
John Fastabend174a79f2017-08-15 22:32:47 -07001640 /* check sanity of attributes */
1641 if (attr->max_entries == 0 || attr->key_size != 4 ||
Chenbo Feng6e71b042017-10-18 13:00:22 -07001642 attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
John Fastabend174a79f2017-08-15 22:32:47 -07001643 return ERR_PTR(-EINVAL);
1644
John Fastabend1aa12bd2018-02-05 10:17:49 -08001645 err = bpf_tcp_ulp_register();
1646 if (err && err != -EEXIST)
1647 return ERR_PTR(err);
1648
John Fastabend174a79f2017-08-15 22:32:47 -07001649 stab = kzalloc(sizeof(*stab), GFP_USER);
1650 if (!stab)
1651 return ERR_PTR(-ENOMEM);
1652
Jakub Kicinskibd475642018-01-11 20:29:06 -08001653 bpf_map_init_from_attr(&stab->map, attr);
Daniel Borkmann585f5a62018-08-16 21:49:10 +02001654 raw_spin_lock_init(&stab->lock);
John Fastabend174a79f2017-08-15 22:32:47 -07001655
1656 /* make sure page count doesn't overflow */
1657 cost = (u64) stab->map.max_entries * sizeof(struct sock *);
Eric Dumazet952fad82018-02-13 15:33:52 -08001658 err = -EINVAL;
John Fastabend174a79f2017-08-15 22:32:47 -07001659 if (cost >= U32_MAX - PAGE_SIZE)
1660 goto free_stab;
1661
1662 stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
1663
1664 /* if map size is larger than memlock limit, reject it early */
1665 err = bpf_map_precharge_memlock(stab->map.pages);
1666 if (err)
1667 goto free_stab;
1668
Dan Carpenterf740c342017-08-25 23:27:14 +03001669 err = -ENOMEM;
John Fastabend174a79f2017-08-15 22:32:47 -07001670 stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
Martin KaFai Lau96eabe72017-08-18 11:28:00 -07001671 sizeof(struct sock *),
1672 stab->map.numa_node);
John Fastabend174a79f2017-08-15 22:32:47 -07001673 if (!stab->sock_map)
1674 goto free_stab;
1675
John Fastabend174a79f2017-08-15 22:32:47 -07001676 return &stab->map;
1677free_stab:
1678 kfree(stab);
1679 return ERR_PTR(err);
1680}
1681
John Fastabend54fedb42018-06-30 06:17:41 -07001682static void smap_list_map_remove(struct smap_psock *psock,
1683 struct sock **entry)
John Fastabend2f857d02017-08-28 07:10:25 -07001684{
1685 struct smap_psock_map_entry *e, *tmp;
1686
John Fastabende9db4ef2018-06-30 06:17:47 -07001687 spin_lock_bh(&psock->maps_lock);
John Fastabend2f857d02017-08-28 07:10:25 -07001688 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
Daniel Borkmannd40b0112018-08-16 21:49:08 +02001689 if (e->entry == entry) {
John Fastabend2f857d02017-08-28 07:10:25 -07001690 list_del(&e->list);
Daniel Borkmannd40b0112018-08-16 21:49:08 +02001691 kfree(e);
1692 }
John Fastabend54fedb42018-06-30 06:17:41 -07001693 }
John Fastabende9db4ef2018-06-30 06:17:47 -07001694 spin_unlock_bh(&psock->maps_lock);
John Fastabend54fedb42018-06-30 06:17:41 -07001695}
1696
1697static void smap_list_hash_remove(struct smap_psock *psock,
1698 struct htab_elem *hash_link)
1699{
1700 struct smap_psock_map_entry *e, *tmp;
1701
John Fastabende9db4ef2018-06-30 06:17:47 -07001702 spin_lock_bh(&psock->maps_lock);
John Fastabend54fedb42018-06-30 06:17:41 -07001703 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
John Fastabende9db4ef2018-06-30 06:17:47 -07001704 struct htab_elem *c = rcu_dereference(e->hash_link);
John Fastabend54fedb42018-06-30 06:17:41 -07001705
Daniel Borkmannd40b0112018-08-16 21:49:08 +02001706 if (c == hash_link) {
John Fastabend54fedb42018-06-30 06:17:41 -07001707 list_del(&e->list);
Daniel Borkmannd40b0112018-08-16 21:49:08 +02001708 kfree(e);
1709 }
John Fastabend2f857d02017-08-28 07:10:25 -07001710 }
John Fastabende9db4ef2018-06-30 06:17:47 -07001711 spin_unlock_bh(&psock->maps_lock);
John Fastabend2f857d02017-08-28 07:10:25 -07001712}
1713
John Fastabend174a79f2017-08-15 22:32:47 -07001714static void sock_map_free(struct bpf_map *map)
1715{
1716 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1717 int i;
1718
1719 synchronize_rcu();
1720
1721 /* At this point no update, lookup or delete operations can happen.
1722 * However, be aware we can still get a socket state event updates,
1723 * and data ready callabacks that reference the psock from sk_user_data
1724 * Also psock worker threads are still in-flight. So smap_release_sock
1725 * will only free the psock after cancel_sync on the worker threads
1726 * and a grace period expire to ensure psock is really safe to remove.
1727 */
1728 rcu_read_lock();
Daniel Borkmann585f5a62018-08-16 21:49:10 +02001729 raw_spin_lock_bh(&stab->lock);
John Fastabend174a79f2017-08-15 22:32:47 -07001730 for (i = 0; i < stab->map.max_entries; i++) {
John Fastabend2f857d02017-08-28 07:10:25 -07001731 struct smap_psock *psock;
John Fastabend174a79f2017-08-15 22:32:47 -07001732 struct sock *sock;
1733
Daniel Borkmann585f5a62018-08-16 21:49:10 +02001734 sock = stab->sock_map[i];
John Fastabend174a79f2017-08-15 22:32:47 -07001735 if (!sock)
1736 continue;
Daniel Borkmann585f5a62018-08-16 21:49:10 +02001737 stab->sock_map[i] = NULL;
John Fastabend2f857d02017-08-28 07:10:25 -07001738 psock = smap_psock_sk(sock);
John Fastabend5731a872018-01-04 20:02:09 -08001739 /* This check handles a racing sock event that can get the
1740 * sk_callback_lock before this case but after xchg happens
1741 * causing the refcnt to hit zero and sock user data (psock)
1742 * to be null and queued for garbage collection.
1743 */
1744 if (likely(psock)) {
John Fastabend54fedb42018-06-30 06:17:41 -07001745 smap_list_map_remove(psock, &stab->sock_map[i]);
John Fastabend5731a872018-01-04 20:02:09 -08001746 smap_release_sock(psock, sock);
1747 }
John Fastabend174a79f2017-08-15 22:32:47 -07001748 }
Daniel Borkmann585f5a62018-08-16 21:49:10 +02001749 raw_spin_unlock_bh(&stab->lock);
John Fastabend174a79f2017-08-15 22:32:47 -07001750 rcu_read_unlock();
1751
John Fastabend2f857d02017-08-28 07:10:25 -07001752 sock_map_remove_complete(stab);
John Fastabend174a79f2017-08-15 22:32:47 -07001753}
1754
1755static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
1756{
1757 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1758 u32 i = key ? *(u32 *)key : U32_MAX;
1759 u32 *next = (u32 *)next_key;
1760
1761 if (i >= stab->map.max_entries) {
1762 *next = 0;
1763 return 0;
1764 }
1765
1766 if (i == stab->map.max_entries - 1)
1767 return -ENOENT;
1768
1769 *next = i + 1;
1770 return 0;
1771}
1772
1773struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
1774{
1775 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
1776
1777 if (key >= map->max_entries)
1778 return NULL;
1779
1780 return READ_ONCE(stab->sock_map[key]);
1781}
1782
1783static int sock_map_delete_elem(struct bpf_map *map, void *key)
1784{
1785 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
John Fastabend2f857d02017-08-28 07:10:25 -07001786 struct smap_psock *psock;
John Fastabend174a79f2017-08-15 22:32:47 -07001787 int k = *(u32 *)key;
1788 struct sock *sock;
1789
1790 if (k >= map->max_entries)
1791 return -EINVAL;
1792
Daniel Borkmann585f5a62018-08-16 21:49:10 +02001793 raw_spin_lock_bh(&stab->lock);
1794 sock = stab->sock_map[k];
1795 stab->sock_map[k] = NULL;
1796 raw_spin_unlock_bh(&stab->lock);
John Fastabend174a79f2017-08-15 22:32:47 -07001797 if (!sock)
1798 return -EINVAL;
1799
John Fastabend2f857d02017-08-28 07:10:25 -07001800 psock = smap_psock_sk(sock);
1801 if (!psock)
Daniel Borkmann585f5a62018-08-16 21:49:10 +02001802 return 0;
Daniel Borkmann166ab6f2018-08-16 21:49:09 +02001803 if (psock->bpf_parse) {
1804 write_lock_bh(&sock->sk_callback_lock);
John Fastabend2f857d02017-08-28 07:10:25 -07001805 smap_stop_sock(psock, sock);
Daniel Borkmann166ab6f2018-08-16 21:49:09 +02001806 write_unlock_bh(&sock->sk_callback_lock);
1807 }
John Fastabend54fedb42018-06-30 06:17:41 -07001808 smap_list_map_remove(psock, &stab->sock_map[k]);
John Fastabend2f857d02017-08-28 07:10:25 -07001809 smap_release_sock(psock, sock);
John Fastabend174a79f2017-08-15 22:32:47 -07001810 return 0;
1811}
1812
1813/* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
1814 * done inside rcu critical sections. This ensures on updates that the psock
1815 * will not be released via smap_release_sock() until concurrent updates/deletes
1816 * complete. All operations operate on sock_map using cmpxchg and xchg
1817 * operations to ensure we do not get stale references. Any reads into the
1818 * map must be done with READ_ONCE() because of this.
1819 *
1820 * A psock is destroyed via call_rcu and after any worker threads are cancelled
1821 * and syncd so we are certain all references from the update/lookup/delete
1822 * operations as well as references in the data path are no longer in use.
1823 *
John Fastabend2f857d02017-08-28 07:10:25 -07001824 * Psocks may exist in multiple maps, but only a single set of parse/verdict
1825 * programs may be inherited from the maps it belongs to. A reference count
1826 * is kept with the total number of references to the psock from all maps. The
1827 * psock will not be released until this reaches zero. The psock and sock
1828 * user data data use the sk_callback_lock to protect critical data structures
1829 * from concurrent access. This allows us to avoid two updates from modifying
1830 * the user data in sock and the lock is required anyways for modifying
1831 * callbacks, we simply increase its scope slightly.
John Fastabend174a79f2017-08-15 22:32:47 -07001832 *
John Fastabend2f857d02017-08-28 07:10:25 -07001833 * Rules to follow,
1834 * - psock must always be read inside RCU critical section
1835 * - sk_user_data must only be modified inside sk_callback_lock and read
1836 * inside RCU critical section.
1837 * - psock->maps list must only be read & modified inside sk_callback_lock
1838 * - sock_map must use READ_ONCE and (cmp)xchg operations
1839 * - BPF verdict/parse programs must use READ_ONCE and xchg operations
John Fastabend174a79f2017-08-15 22:32:47 -07001840 */
John Fastabende5cd3ab2018-05-14 10:00:16 -07001841
1842static int __sock_map_ctx_update_elem(struct bpf_map *map,
1843 struct bpf_sock_progs *progs,
1844 struct sock *sock,
John Fastabende5cd3ab2018-05-14 10:00:16 -07001845 void *key)
John Fastabend174a79f2017-08-15 22:32:47 -07001846{
John Fastabend4f738ad2018-03-18 12:57:10 -07001847 struct bpf_prog *verdict, *parse, *tx_msg;
John Fastabend2f857d02017-08-28 07:10:25 -07001848 struct smap_psock *psock;
John Fastabend4f738ad2018-03-18 12:57:10 -07001849 bool new = false;
Gustavo A. R. Silva0e436452018-05-17 09:08:43 -05001850 int err = 0;
John Fastabend174a79f2017-08-15 22:32:47 -07001851
John Fastabend2f857d02017-08-28 07:10:25 -07001852 /* 1. If sock map has BPF programs those will be inherited by the
1853 * sock being added. If the sock is already attached to BPF programs
1854 * this results in an error.
1855 */
John Fastabende5cd3ab2018-05-14 10:00:16 -07001856 verdict = READ_ONCE(progs->bpf_verdict);
1857 parse = READ_ONCE(progs->bpf_parse);
1858 tx_msg = READ_ONCE(progs->bpf_tx_msg);
John Fastabend174a79f2017-08-15 22:32:47 -07001859
John Fastabend2f857d02017-08-28 07:10:25 -07001860 if (parse && verdict) {
John Fastabend174a79f2017-08-15 22:32:47 -07001861 /* bpf prog refcnt may be zero if a concurrent attach operation
1862 * removes the program after the above READ_ONCE() but before
1863 * we increment the refcnt. If this is the case abort with an
1864 * error.
1865 */
John Fastabend96174562018-05-17 14:06:40 -07001866 verdict = bpf_prog_inc_not_zero(verdict);
John Fastabend174a79f2017-08-15 22:32:47 -07001867 if (IS_ERR(verdict))
1868 return PTR_ERR(verdict);
1869
John Fastabend96174562018-05-17 14:06:40 -07001870 parse = bpf_prog_inc_not_zero(parse);
John Fastabend174a79f2017-08-15 22:32:47 -07001871 if (IS_ERR(parse)) {
1872 bpf_prog_put(verdict);
1873 return PTR_ERR(parse);
1874 }
1875 }
1876
John Fastabend4f738ad2018-03-18 12:57:10 -07001877 if (tx_msg) {
John Fastabend96174562018-05-17 14:06:40 -07001878 tx_msg = bpf_prog_inc_not_zero(tx_msg);
John Fastabend4f738ad2018-03-18 12:57:10 -07001879 if (IS_ERR(tx_msg)) {
John Fastabenda593f702018-05-17 14:06:35 -07001880 if (parse && verdict) {
John Fastabend4f738ad2018-03-18 12:57:10 -07001881 bpf_prog_put(parse);
John Fastabenda593f702018-05-17 14:06:35 -07001882 bpf_prog_put(verdict);
1883 }
John Fastabend4f738ad2018-03-18 12:57:10 -07001884 return PTR_ERR(tx_msg);
1885 }
1886 }
1887
John Fastabend2f857d02017-08-28 07:10:25 -07001888 psock = smap_psock_sk(sock);
1889
1890 /* 2. Do not allow inheriting programs if psock exists and has
1891 * already inherited programs. This would create confusion on
1892 * which parser/verdict program is running. If no psock exists
1893 * create one. Inside sk_callback_lock to ensure concurrent create
1894 * doesn't update user data.
1895 */
1896 if (psock) {
1897 if (READ_ONCE(psock->bpf_parse) && parse) {
1898 err = -EBUSY;
1899 goto out_progs;
1900 }
John Fastabend4f738ad2018-03-18 12:57:10 -07001901 if (READ_ONCE(psock->bpf_tx_msg) && tx_msg) {
1902 err = -EBUSY;
1903 goto out_progs;
1904 }
1905 if (!refcount_inc_not_zero(&psock->refcnt)) {
1906 err = -EAGAIN;
1907 goto out_progs;
1908 }
John Fastabend2f857d02017-08-28 07:10:25 -07001909 } else {
John Fastabende5cd3ab2018-05-14 10:00:16 -07001910 psock = smap_init_psock(sock, map->numa_node);
John Fastabend174a79f2017-08-15 22:32:47 -07001911 if (IS_ERR(psock)) {
John Fastabend2f857d02017-08-28 07:10:25 -07001912 err = PTR_ERR(psock);
1913 goto out_progs;
John Fastabend174a79f2017-08-15 22:32:47 -07001914 }
John Fastabend2f857d02017-08-28 07:10:25 -07001915
John Fastabend174a79f2017-08-15 22:32:47 -07001916 set_bit(SMAP_TX_RUNNING, &psock->state);
John Fastabend4f738ad2018-03-18 12:57:10 -07001917 new = true;
John Fastabend174a79f2017-08-15 22:32:47 -07001918 }
1919
John Fastabend2f857d02017-08-28 07:10:25 -07001920 /* 3. At this point we have a reference to a valid psock that is
1921 * running. Attach any BPF programs needed.
1922 */
John Fastabend4f738ad2018-03-18 12:57:10 -07001923 if (tx_msg)
1924 bpf_tcp_msg_add(psock, sock, tx_msg);
1925 if (new) {
1926 err = tcp_set_ulp_id(sock, TCP_ULP_BPF);
1927 if (err)
1928 goto out_free;
1929 }
1930
John Fastabend2f857d02017-08-28 07:10:25 -07001931 if (parse && verdict && !psock->strp_enabled) {
John Fastabend174a79f2017-08-15 22:32:47 -07001932 err = smap_init_sock(psock, sock);
1933 if (err)
John Fastabend2f857d02017-08-28 07:10:25 -07001934 goto out_free;
John Fastabende5cd3ab2018-05-14 10:00:16 -07001935 smap_init_progs(psock, verdict, parse);
John Fastabende9db4ef2018-06-30 06:17:47 -07001936 write_lock_bh(&sock->sk_callback_lock);
John Fastabend174a79f2017-08-15 22:32:47 -07001937 smap_start_sock(psock, sock);
John Fastabende9db4ef2018-06-30 06:17:47 -07001938 write_unlock_bh(&sock->sk_callback_lock);
John Fastabend174a79f2017-08-15 22:32:47 -07001939 }
1940
John Fastabende5cd3ab2018-05-14 10:00:16 -07001941 return err;
John Fastabend2f857d02017-08-28 07:10:25 -07001942out_free:
1943 smap_release_sock(psock, sock);
1944out_progs:
John Fastabenda593f702018-05-17 14:06:35 -07001945 if (parse && verdict) {
John Fastabend2f857d02017-08-28 07:10:25 -07001946 bpf_prog_put(parse);
John Fastabenda593f702018-05-17 14:06:35 -07001947 bpf_prog_put(verdict);
1948 }
John Fastabend4f738ad2018-03-18 12:57:10 -07001949 if (tx_msg)
1950 bpf_prog_put(tx_msg);
John Fastabend174a79f2017-08-15 22:32:47 -07001951 return err;
1952}
1953
John Fastabende5cd3ab2018-05-14 10:00:16 -07001954static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
1955 struct bpf_map *map,
1956 void *key, u64 flags)
John Fastabend174a79f2017-08-15 22:32:47 -07001957{
1958 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
John Fastabende5cd3ab2018-05-14 10:00:16 -07001959 struct bpf_sock_progs *progs = &stab->progs;
Daniel Borkmann585f5a62018-08-16 21:49:10 +02001960 struct sock *osock, *sock = skops->sk;
1961 struct smap_psock_map_entry *e;
1962 struct smap_psock *psock;
John Fastabende5cd3ab2018-05-14 10:00:16 -07001963 u32 i = *(u32 *)key;
1964 int err;
1965
1966 if (unlikely(flags > BPF_EXIST))
1967 return -EINVAL;
John Fastabende5cd3ab2018-05-14 10:00:16 -07001968 if (unlikely(i >= stab->map.max_entries))
1969 return -E2BIG;
1970
Daniel Borkmann585f5a62018-08-16 21:49:10 +02001971 e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
1972 if (!e)
1973 return -ENOMEM;
John Fastabende5cd3ab2018-05-14 10:00:16 -07001974
Daniel Borkmann585f5a62018-08-16 21:49:10 +02001975 err = __sock_map_ctx_update_elem(map, progs, sock, key);
John Fastabende5cd3ab2018-05-14 10:00:16 -07001976 if (err)
1977 goto out;
1978
Daniel Borkmann585f5a62018-08-16 21:49:10 +02001979 /* psock guaranteed to be present. */
1980 psock = smap_psock_sk(sock);
1981 raw_spin_lock_bh(&stab->lock);
1982 osock = stab->sock_map[i];
1983 if (osock && flags == BPF_NOEXIST) {
1984 err = -EEXIST;
1985 goto out_unlock;
John Fastabende5cd3ab2018-05-14 10:00:16 -07001986 }
Daniel Borkmann585f5a62018-08-16 21:49:10 +02001987 if (!osock && flags == BPF_EXIST) {
1988 err = -ENOENT;
1989 goto out_unlock;
1990 }
1991
1992 e->entry = &stab->sock_map[i];
1993 e->map = map;
1994 spin_lock_bh(&psock->maps_lock);
1995 list_add_tail(&e->list, &psock->maps);
1996 spin_unlock_bh(&psock->maps_lock);
1997
1998 stab->sock_map[i] = sock;
1999 if (osock) {
2000 psock = smap_psock_sk(osock);
2001 smap_list_map_remove(psock, &stab->sock_map[i]);
2002 smap_release_sock(psock, osock);
2003 }
2004 raw_spin_unlock_bh(&stab->lock);
2005 return 0;
2006out_unlock:
2007 smap_release_sock(psock, sock);
2008 raw_spin_unlock_bh(&stab->lock);
John Fastabende5cd3ab2018-05-14 10:00:16 -07002009out:
Daniel Borkmann585f5a62018-08-16 21:49:10 +02002010 kfree(e);
John Fastabende23afe52018-05-16 16:38:14 -07002011 return err;
John Fastabende5cd3ab2018-05-14 10:00:16 -07002012}
2013
2014int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
2015{
2016 struct bpf_sock_progs *progs;
John Fastabend464bc0f2017-08-28 07:10:04 -07002017 struct bpf_prog *orig;
John Fastabend174a79f2017-08-15 22:32:47 -07002018
John Fastabende5cd3ab2018-05-14 10:00:16 -07002019 if (map->map_type == BPF_MAP_TYPE_SOCKMAP) {
2020 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
2021
2022 progs = &stab->progs;
John Fastabend81110382018-05-14 10:00:17 -07002023 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH) {
2024 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2025
2026 progs = &htab->progs;
John Fastabende5cd3ab2018-05-14 10:00:16 -07002027 } else {
John Fastabend81374aa2017-08-28 07:11:43 -07002028 return -EINVAL;
John Fastabende5cd3ab2018-05-14 10:00:16 -07002029 }
John Fastabend81374aa2017-08-28 07:11:43 -07002030
John Fastabend464bc0f2017-08-28 07:10:04 -07002031 switch (type) {
John Fastabend4f738ad2018-03-18 12:57:10 -07002032 case BPF_SK_MSG_VERDICT:
John Fastabende5cd3ab2018-05-14 10:00:16 -07002033 orig = xchg(&progs->bpf_tx_msg, prog);
John Fastabend4f738ad2018-03-18 12:57:10 -07002034 break;
John Fastabend464bc0f2017-08-28 07:10:04 -07002035 case BPF_SK_SKB_STREAM_PARSER:
John Fastabende5cd3ab2018-05-14 10:00:16 -07002036 orig = xchg(&progs->bpf_parse, prog);
John Fastabend464bc0f2017-08-28 07:10:04 -07002037 break;
2038 case BPF_SK_SKB_STREAM_VERDICT:
John Fastabende5cd3ab2018-05-14 10:00:16 -07002039 orig = xchg(&progs->bpf_verdict, prog);
John Fastabend464bc0f2017-08-28 07:10:04 -07002040 break;
2041 default:
2042 return -EOPNOTSUPP;
2043 }
John Fastabend174a79f2017-08-15 22:32:47 -07002044
John Fastabend464bc0f2017-08-28 07:10:04 -07002045 if (orig)
2046 bpf_prog_put(orig);
John Fastabend174a79f2017-08-15 22:32:47 -07002047
2048 return 0;
2049}
2050
Sean Youngfdb5c452018-06-19 00:04:24 +01002051int sockmap_get_from_fd(const union bpf_attr *attr, int type,
2052 struct bpf_prog *prog)
2053{
2054 int ufd = attr->target_fd;
2055 struct bpf_map *map;
2056 struct fd f;
2057 int err;
2058
2059 f = fdget(ufd);
2060 map = __bpf_map_get(f);
2061 if (IS_ERR(map))
2062 return PTR_ERR(map);
2063
2064 err = sock_map_prog(map, prog, attr->attach_type);
2065 fdput(f);
2066 return err;
2067}
2068
John Fastabend174a79f2017-08-15 22:32:47 -07002069static void *sock_map_lookup(struct bpf_map *map, void *key)
2070{
2071 return NULL;
2072}
2073
2074static int sock_map_update_elem(struct bpf_map *map,
2075 void *key, void *value, u64 flags)
2076{
2077 struct bpf_sock_ops_kern skops;
2078 u32 fd = *(u32 *)value;
2079 struct socket *socket;
2080 int err;
2081
2082 socket = sockfd_lookup(fd, &err);
2083 if (!socket)
2084 return err;
2085
2086 skops.sk = socket->sk;
2087 if (!skops.sk) {
2088 fput(socket->file);
2089 return -EINVAL;
2090 }
2091
John Fastabend435bf0d2017-10-18 07:10:15 -07002092 if (skops.sk->sk_type != SOCK_STREAM ||
2093 skops.sk->sk_protocol != IPPROTO_TCP) {
2094 fput(socket->file);
2095 return -EOPNOTSUPP;
2096 }
2097
John Fastabend99ba2b52018-07-05 08:50:04 -07002098 lock_sock(skops.sk);
2099 preempt_disable();
2100 rcu_read_lock();
John Fastabend2f857d02017-08-28 07:10:25 -07002101 err = sock_map_ctx_update_elem(&skops, map, key, flags);
John Fastabend99ba2b52018-07-05 08:50:04 -07002102 rcu_read_unlock();
2103 preempt_enable();
2104 release_sock(skops.sk);
John Fastabend174a79f2017-08-15 22:32:47 -07002105 fput(socket->file);
2106 return err;
2107}
2108
John Fastabendba6b8de2018-04-23 15:39:23 -07002109static void sock_map_release(struct bpf_map *map)
John Fastabend3d9e9522018-02-05 10:17:54 -08002110{
John Fastabende5cd3ab2018-05-14 10:00:16 -07002111 struct bpf_sock_progs *progs;
John Fastabend3d9e9522018-02-05 10:17:54 -08002112 struct bpf_prog *orig;
2113
John Fastabend81110382018-05-14 10:00:17 -07002114 if (map->map_type == BPF_MAP_TYPE_SOCKMAP) {
2115 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
2116
2117 progs = &stab->progs;
2118 } else {
2119 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2120
2121 progs = &htab->progs;
2122 }
2123
John Fastabende5cd3ab2018-05-14 10:00:16 -07002124 orig = xchg(&progs->bpf_parse, NULL);
John Fastabend3d9e9522018-02-05 10:17:54 -08002125 if (orig)
2126 bpf_prog_put(orig);
John Fastabende5cd3ab2018-05-14 10:00:16 -07002127 orig = xchg(&progs->bpf_verdict, NULL);
John Fastabend3d9e9522018-02-05 10:17:54 -08002128 if (orig)
2129 bpf_prog_put(orig);
John Fastabend4f738ad2018-03-18 12:57:10 -07002130
John Fastabende5cd3ab2018-05-14 10:00:16 -07002131 orig = xchg(&progs->bpf_tx_msg, NULL);
John Fastabend4f738ad2018-03-18 12:57:10 -07002132 if (orig)
2133 bpf_prog_put(orig);
John Fastabend3d9e9522018-02-05 10:17:54 -08002134}
2135
John Fastabend81110382018-05-14 10:00:17 -07002136static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
2137{
2138 struct bpf_htab *htab;
2139 int i, err;
2140 u64 cost;
2141
2142 if (!capable(CAP_NET_ADMIN))
2143 return ERR_PTR(-EPERM);
2144
2145 /* check sanity of attributes */
Daniel Borkmannb845c892018-08-21 15:55:00 +02002146 if (attr->max_entries == 0 ||
2147 attr->key_size == 0 ||
2148 attr->value_size != 4 ||
John Fastabend81110382018-05-14 10:00:17 -07002149 attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
2150 return ERR_PTR(-EINVAL);
2151
Yonghong Song683d2ac2018-05-16 14:06:26 -07002152 if (attr->key_size > MAX_BPF_STACK)
2153 /* eBPF programs initialize keys on stack, so they cannot be
2154 * larger than max stack size
2155 */
2156 return ERR_PTR(-E2BIG);
2157
John Fastabend81110382018-05-14 10:00:17 -07002158 err = bpf_tcp_ulp_register();
2159 if (err && err != -EEXIST)
2160 return ERR_PTR(err);
2161
2162 htab = kzalloc(sizeof(*htab), GFP_USER);
2163 if (!htab)
2164 return ERR_PTR(-ENOMEM);
2165
2166 bpf_map_init_from_attr(&htab->map, attr);
2167
2168 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
2169 htab->elem_size = sizeof(struct htab_elem) +
2170 round_up(htab->map.key_size, 8);
2171 err = -EINVAL;
2172 if (htab->n_buckets == 0 ||
2173 htab->n_buckets > U32_MAX / sizeof(struct bucket))
2174 goto free_htab;
2175
2176 cost = (u64) htab->n_buckets * sizeof(struct bucket) +
2177 (u64) htab->elem_size * htab->map.max_entries;
2178
2179 if (cost >= U32_MAX - PAGE_SIZE)
2180 goto free_htab;
2181
2182 htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
2183 err = bpf_map_precharge_memlock(htab->map.pages);
2184 if (err)
2185 goto free_htab;
2186
2187 err = -ENOMEM;
2188 htab->buckets = bpf_map_area_alloc(
2189 htab->n_buckets * sizeof(struct bucket),
2190 htab->map.numa_node);
2191 if (!htab->buckets)
2192 goto free_htab;
2193
2194 for (i = 0; i < htab->n_buckets; i++) {
2195 INIT_HLIST_HEAD(&htab->buckets[i].head);
2196 raw_spin_lock_init(&htab->buckets[i].lock);
2197 }
2198
2199 return &htab->map;
2200free_htab:
2201 kfree(htab);
2202 return ERR_PTR(err);
2203}
2204
John Fastabende9db4ef2018-06-30 06:17:47 -07002205static void __bpf_htab_free(struct rcu_head *rcu)
John Fastabend81110382018-05-14 10:00:17 -07002206{
John Fastabende9db4ef2018-06-30 06:17:47 -07002207 struct bpf_htab *htab;
John Fastabend81110382018-05-14 10:00:17 -07002208
John Fastabende9db4ef2018-06-30 06:17:47 -07002209 htab = container_of(rcu, struct bpf_htab, rcu);
2210 bpf_map_area_free(htab->buckets);
2211 kfree(htab);
John Fastabend81110382018-05-14 10:00:17 -07002212}
2213
2214static void sock_hash_free(struct bpf_map *map)
2215{
2216 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2217 int i;
2218
2219 synchronize_rcu();
2220
2221 /* At this point no update, lookup or delete operations can happen.
2222 * However, be aware we can still get a socket state event updates,
2223 * and data ready callabacks that reference the psock from sk_user_data
2224 * Also psock worker threads are still in-flight. So smap_release_sock
2225 * will only free the psock after cancel_sync on the worker threads
2226 * and a grace period expire to ensure psock is really safe to remove.
2227 */
2228 rcu_read_lock();
2229 for (i = 0; i < htab->n_buckets; i++) {
John Fastabende9db4ef2018-06-30 06:17:47 -07002230 struct bucket *b = __select_bucket(htab, i);
2231 struct hlist_head *head;
John Fastabend81110382018-05-14 10:00:17 -07002232 struct hlist_node *n;
2233 struct htab_elem *l;
2234
John Fastabende9db4ef2018-06-30 06:17:47 -07002235 raw_spin_lock_bh(&b->lock);
2236 head = &b->head;
John Fastabend81110382018-05-14 10:00:17 -07002237 hlist_for_each_entry_safe(l, n, head, hash_node) {
2238 struct sock *sock = l->sk;
2239 struct smap_psock *psock;
2240
2241 hlist_del_rcu(&l->hash_node);
John Fastabend81110382018-05-14 10:00:17 -07002242 psock = smap_psock_sk(sock);
2243 /* This check handles a racing sock event that can get
2244 * the sk_callback_lock before this case but after xchg
2245 * causing the refcnt to hit zero and sock user data
2246 * (psock) to be null and queued for garbage collection.
2247 */
2248 if (likely(psock)) {
John Fastabend54fedb42018-06-30 06:17:41 -07002249 smap_list_hash_remove(psock, l);
John Fastabend81110382018-05-14 10:00:17 -07002250 smap_release_sock(psock, sock);
2251 }
John Fastabende9db4ef2018-06-30 06:17:47 -07002252 free_htab_elem(htab, l);
John Fastabend81110382018-05-14 10:00:17 -07002253 }
John Fastabende9db4ef2018-06-30 06:17:47 -07002254 raw_spin_unlock_bh(&b->lock);
John Fastabend81110382018-05-14 10:00:17 -07002255 }
2256 rcu_read_unlock();
John Fastabende9db4ef2018-06-30 06:17:47 -07002257 call_rcu(&htab->rcu, __bpf_htab_free);
John Fastabend81110382018-05-14 10:00:17 -07002258}
2259
2260static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
2261 void *key, u32 key_size, u32 hash,
2262 struct sock *sk,
2263 struct htab_elem *old_elem)
2264{
2265 struct htab_elem *l_new;
2266
2267 if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
2268 if (!old_elem) {
2269 atomic_dec(&htab->count);
2270 return ERR_PTR(-E2BIG);
2271 }
2272 }
2273 l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
2274 htab->map.numa_node);
Daniel Borkmanneb294292018-08-22 18:09:17 +02002275 if (!l_new) {
2276 atomic_dec(&htab->count);
John Fastabend81110382018-05-14 10:00:17 -07002277 return ERR_PTR(-ENOMEM);
Daniel Borkmanneb294292018-08-22 18:09:17 +02002278 }
John Fastabend81110382018-05-14 10:00:17 -07002279
2280 memcpy(l_new->key, key, key_size);
2281 l_new->sk = sk;
2282 l_new->hash = hash;
2283 return l_new;
2284}
2285
John Fastabend81110382018-05-14 10:00:17 -07002286static inline u32 htab_map_hash(const void *key, u32 key_len)
2287{
2288 return jhash(key, key_len, 0);
2289}
2290
2291static int sock_hash_get_next_key(struct bpf_map *map,
2292 void *key, void *next_key)
2293{
2294 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2295 struct htab_elem *l, *next_l;
2296 struct hlist_head *h;
2297 u32 hash, key_size;
2298 int i = 0;
2299
2300 WARN_ON_ONCE(!rcu_read_lock_held());
2301
2302 key_size = map->key_size;
2303 if (!key)
2304 goto find_first_elem;
2305 hash = htab_map_hash(key, key_size);
2306 h = select_bucket(htab, hash);
2307
2308 l = lookup_elem_raw(h, hash, key, key_size);
2309 if (!l)
2310 goto find_first_elem;
2311 next_l = hlist_entry_safe(
2312 rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
2313 struct htab_elem, hash_node);
2314 if (next_l) {
2315 memcpy(next_key, next_l->key, key_size);
2316 return 0;
2317 }
2318
2319 /* no more elements in this hash list, go to the next bucket */
2320 i = hash & (htab->n_buckets - 1);
2321 i++;
2322
2323find_first_elem:
2324 /* iterate over buckets */
2325 for (; i < htab->n_buckets; i++) {
2326 h = select_bucket(htab, i);
2327
2328 /* pick first element in the bucket */
2329 next_l = hlist_entry_safe(
2330 rcu_dereference_raw(hlist_first_rcu(h)),
2331 struct htab_elem, hash_node);
2332 if (next_l) {
2333 /* if it's not empty, just return it */
2334 memcpy(next_key, next_l->key, key_size);
2335 return 0;
2336 }
2337 }
2338
2339 /* iterated over all buckets and all elements */
2340 return -ENOENT;
2341}
2342
2343static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
2344 struct bpf_map *map,
2345 void *key, u64 map_flags)
2346{
2347 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2348 struct bpf_sock_progs *progs = &htab->progs;
2349 struct htab_elem *l_new = NULL, *l_old;
2350 struct smap_psock_map_entry *e = NULL;
2351 struct hlist_head *head;
2352 struct smap_psock *psock;
2353 u32 key_size, hash;
2354 struct sock *sock;
2355 struct bucket *b;
2356 int err;
2357
2358 sock = skops->sk;
2359
2360 if (sock->sk_type != SOCK_STREAM ||
2361 sock->sk_protocol != IPPROTO_TCP)
2362 return -EOPNOTSUPP;
2363
2364 if (unlikely(map_flags > BPF_EXIST))
2365 return -EINVAL;
2366
2367 e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
2368 if (!e)
2369 return -ENOMEM;
2370
2371 WARN_ON_ONCE(!rcu_read_lock_held());
2372 key_size = map->key_size;
2373 hash = htab_map_hash(key, key_size);
2374 b = __select_bucket(htab, hash);
2375 head = &b->head;
2376
Daniel Borkmann585f5a62018-08-16 21:49:10 +02002377 err = __sock_map_ctx_update_elem(map, progs, sock, key);
John Fastabend81110382018-05-14 10:00:17 -07002378 if (err)
2379 goto err;
2380
John Fastabend547b3aa2018-07-05 08:05:56 -07002381 /* psock is valid here because otherwise above *ctx_update_elem would
2382 * have thrown an error. It is safe to skip error check.
2383 */
2384 psock = smap_psock_sk(sock);
John Fastabend81110382018-05-14 10:00:17 -07002385 raw_spin_lock_bh(&b->lock);
2386 l_old = lookup_elem_raw(head, hash, key, key_size);
2387 if (l_old && map_flags == BPF_NOEXIST) {
2388 err = -EEXIST;
2389 goto bucket_err;
2390 }
2391 if (!l_old && map_flags == BPF_EXIST) {
2392 err = -ENOENT;
2393 goto bucket_err;
2394 }
2395
2396 l_new = alloc_sock_hash_elem(htab, key, key_size, hash, sock, l_old);
2397 if (IS_ERR(l_new)) {
2398 err = PTR_ERR(l_new);
2399 goto bucket_err;
2400 }
2401
John Fastabende9db4ef2018-06-30 06:17:47 -07002402 rcu_assign_pointer(e->hash_link, l_new);
Daniel Borkmann585f5a62018-08-16 21:49:10 +02002403 e->map = map;
John Fastabende9db4ef2018-06-30 06:17:47 -07002404 spin_lock_bh(&psock->maps_lock);
John Fastabend81110382018-05-14 10:00:17 -07002405 list_add_tail(&e->list, &psock->maps);
John Fastabende9db4ef2018-06-30 06:17:47 -07002406 spin_unlock_bh(&psock->maps_lock);
John Fastabend81110382018-05-14 10:00:17 -07002407
2408 /* add new element to the head of the list, so that
2409 * concurrent search will find it before old elem
2410 */
2411 hlist_add_head_rcu(&l_new->hash_node, head);
2412 if (l_old) {
2413 psock = smap_psock_sk(l_old->sk);
2414
2415 hlist_del_rcu(&l_old->hash_node);
John Fastabend54fedb42018-06-30 06:17:41 -07002416 smap_list_hash_remove(psock, l_old);
John Fastabend81110382018-05-14 10:00:17 -07002417 smap_release_sock(psock, l_old->sk);
2418 free_htab_elem(htab, l_old);
2419 }
2420 raw_spin_unlock_bh(&b->lock);
2421 return 0;
2422bucket_err:
John Fastabend547b3aa2018-07-05 08:05:56 -07002423 smap_release_sock(psock, sock);
John Fastabend81110382018-05-14 10:00:17 -07002424 raw_spin_unlock_bh(&b->lock);
2425err:
2426 kfree(e);
John Fastabend81110382018-05-14 10:00:17 -07002427 return err;
2428}
2429
2430static int sock_hash_update_elem(struct bpf_map *map,
2431 void *key, void *value, u64 flags)
2432{
2433 struct bpf_sock_ops_kern skops;
2434 u32 fd = *(u32 *)value;
2435 struct socket *socket;
2436 int err;
2437
2438 socket = sockfd_lookup(fd, &err);
2439 if (!socket)
2440 return err;
2441
2442 skops.sk = socket->sk;
2443 if (!skops.sk) {
2444 fput(socket->file);
2445 return -EINVAL;
2446 }
2447
John Fastabend99ba2b52018-07-05 08:50:04 -07002448 lock_sock(skops.sk);
2449 preempt_disable();
2450 rcu_read_lock();
John Fastabend81110382018-05-14 10:00:17 -07002451 err = sock_hash_ctx_update_elem(&skops, map, key, flags);
John Fastabend99ba2b52018-07-05 08:50:04 -07002452 rcu_read_unlock();
2453 preempt_enable();
2454 release_sock(skops.sk);
John Fastabend81110382018-05-14 10:00:17 -07002455 fput(socket->file);
2456 return err;
2457}
2458
2459static int sock_hash_delete_elem(struct bpf_map *map, void *key)
2460{
2461 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2462 struct hlist_head *head;
2463 struct bucket *b;
2464 struct htab_elem *l;
2465 u32 hash, key_size;
2466 int ret = -ENOENT;
2467
2468 key_size = map->key_size;
2469 hash = htab_map_hash(key, key_size);
2470 b = __select_bucket(htab, hash);
2471 head = &b->head;
2472
2473 raw_spin_lock_bh(&b->lock);
2474 l = lookup_elem_raw(head, hash, key, key_size);
2475 if (l) {
2476 struct sock *sock = l->sk;
2477 struct smap_psock *psock;
2478
2479 hlist_del_rcu(&l->hash_node);
John Fastabend81110382018-05-14 10:00:17 -07002480 psock = smap_psock_sk(sock);
2481 /* This check handles a racing sock event that can get the
2482 * sk_callback_lock before this case but after xchg happens
2483 * causing the refcnt to hit zero and sock user data (psock)
2484 * to be null and queued for garbage collection.
2485 */
2486 if (likely(psock)) {
John Fastabend54fedb42018-06-30 06:17:41 -07002487 smap_list_hash_remove(psock, l);
John Fastabend81110382018-05-14 10:00:17 -07002488 smap_release_sock(psock, sock);
2489 }
John Fastabend81110382018-05-14 10:00:17 -07002490 free_htab_elem(htab, l);
2491 ret = 0;
2492 }
2493 raw_spin_unlock_bh(&b->lock);
2494 return ret;
2495}
2496
2497struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
2498{
2499 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2500 struct hlist_head *head;
2501 struct htab_elem *l;
2502 u32 key_size, hash;
2503 struct bucket *b;
2504 struct sock *sk;
2505
2506 key_size = map->key_size;
2507 hash = htab_map_hash(key, key_size);
2508 b = __select_bucket(htab, hash);
2509 head = &b->head;
2510
John Fastabend81110382018-05-14 10:00:17 -07002511 l = lookup_elem_raw(head, hash, key, key_size);
2512 sk = l ? l->sk : NULL;
John Fastabend81110382018-05-14 10:00:17 -07002513 return sk;
2514}
2515
John Fastabend174a79f2017-08-15 22:32:47 -07002516const struct bpf_map_ops sock_map_ops = {
2517 .map_alloc = sock_map_alloc,
2518 .map_free = sock_map_free,
2519 .map_lookup_elem = sock_map_lookup,
2520 .map_get_next_key = sock_map_get_next_key,
2521 .map_update_elem = sock_map_update_elem,
2522 .map_delete_elem = sock_map_delete_elem,
John Fastabendba6b8de2018-04-23 15:39:23 -07002523 .map_release_uref = sock_map_release,
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +02002524 .map_check_btf = map_check_no_btf,
John Fastabend174a79f2017-08-15 22:32:47 -07002525};
2526
John Fastabend81110382018-05-14 10:00:17 -07002527const struct bpf_map_ops sock_hash_ops = {
2528 .map_alloc = sock_hash_alloc,
2529 .map_free = sock_hash_free,
2530 .map_lookup_elem = sock_map_lookup,
2531 .map_get_next_key = sock_hash_get_next_key,
2532 .map_update_elem = sock_hash_update_elem,
2533 .map_delete_elem = sock_hash_delete_elem,
John Fastabendcaac76a2018-06-30 06:17:52 -07002534 .map_release_uref = sock_map_release,
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +02002535 .map_check_btf = map_check_no_btf,
John Fastabend81110382018-05-14 10:00:17 -07002536};
2537
John Fastabend2f857d02017-08-28 07:10:25 -07002538BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
2539 struct bpf_map *, map, void *, key, u64, flags)
John Fastabend174a79f2017-08-15 22:32:47 -07002540{
2541 WARN_ON_ONCE(!rcu_read_lock_held());
John Fastabend2f857d02017-08-28 07:10:25 -07002542 return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
John Fastabend174a79f2017-08-15 22:32:47 -07002543}
2544
2545const struct bpf_func_proto bpf_sock_map_update_proto = {
2546 .func = bpf_sock_map_update,
2547 .gpl_only = false,
2548 .pkt_access = true,
2549 .ret_type = RET_INTEGER,
2550 .arg1_type = ARG_PTR_TO_CTX,
2551 .arg2_type = ARG_CONST_MAP_PTR,
2552 .arg3_type = ARG_PTR_TO_MAP_KEY,
2553 .arg4_type = ARG_ANYTHING,
John Fastabend174a79f2017-08-15 22:32:47 -07002554};
John Fastabend81110382018-05-14 10:00:17 -07002555
2556BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, bpf_sock,
2557 struct bpf_map *, map, void *, key, u64, flags)
2558{
2559 WARN_ON_ONCE(!rcu_read_lock_held());
2560 return sock_hash_ctx_update_elem(bpf_sock, map, key, flags);
2561}
2562
2563const struct bpf_func_proto bpf_sock_hash_update_proto = {
2564 .func = bpf_sock_hash_update,
2565 .gpl_only = false,
2566 .pkt_access = true,
2567 .ret_type = RET_INTEGER,
2568 .arg1_type = ARG_PTR_TO_CTX,
2569 .arg2_type = ARG_CONST_MAP_PTR,
2570 .arg3_type = ARG_PTR_TO_MAP_KEY,
2571 .arg4_type = ARG_ANYTHING,
2572};