blob: e7b9c2636d1095533aac4256bfab349fa3bb8df0 [file] [log] [blame]
Thomas Gleixner25763b32019-05-28 10:10:09 -07001// SPDX-License-Identifier: GPL-2.0-only
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07002/* Copyright (c) 2017 Facebook
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07003 */
4#include <linux/bpf.h>
Kumar Kartikeya Dwivedic48e51c2021-10-02 06:47:57 +05305#include <linux/btf.h>
Martin KaFai Lau7bd15902021-03-24 18:52:52 -07006#include <linux/btf_ids.h>
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07007#include <linux/slab.h>
Kumar Kartikeya Dwivedib202d842022-01-14 22:09:46 +05308#include <linux/init.h>
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07009#include <linux/vmalloc.h>
10#include <linux/etherdevice.h>
11#include <linux/filter.h>
Yonghong Song87b7b532021-08-09 16:51:51 -070012#include <linux/rcupdate_trace.h>
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070013#include <linux/sched/signal.h>
Martin KaFai Lau6ac99e82019-04-26 16:39:39 -070014#include <net/bpf_sk_storage.h>
Song Liu2cb494a2018-10-19 09:57:58 -070015#include <net/sock.h>
16#include <net/tcp.h>
Lorenz Bauer7c32e8f2021-03-03 10:18:13 +000017#include <net/net_namespace.h>
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +010018#include <net/page_pool.h>
KP Singh3d08b6f2020-03-04 20:18:53 +010019#include <linux/error-injection.h>
Song Liu1b4d60e2020-09-25 13:54:29 -070020#include <linux/smp.h>
Lorenz Bauer7c32e8f2021-03-03 10:18:13 +000021#include <linux/sock_diag.h>
Zvi Effron47316f42021-07-07 22:16:55 +000022#include <net/xdp.h>
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070023
Matt Mullinse950e842019-04-26 11:49:51 -070024#define CREATE_TRACE_POINTS
25#include <trace/events/bpf_test_run.h>
26
Lorenz Bauer607b9cc2021-03-03 10:18:12 +000027struct bpf_test_timer {
28 enum { NO_PREEMPT, NO_MIGRATE } mode;
29 u32 i;
30 u64 time_start, time_spent;
31};
32
33static void bpf_test_timer_enter(struct bpf_test_timer *t)
34 __acquires(rcu)
35{
36 rcu_read_lock();
37 if (t->mode == NO_PREEMPT)
38 preempt_disable();
39 else
40 migrate_disable();
41
42 t->time_start = ktime_get_ns();
43}
44
45static void bpf_test_timer_leave(struct bpf_test_timer *t)
46 __releases(rcu)
47{
48 t->time_start = 0;
49
50 if (t->mode == NO_PREEMPT)
51 preempt_enable();
52 else
53 migrate_enable();
54 rcu_read_unlock();
55}
56
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +010057static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations,
58 u32 repeat, int *err, u32 *duration)
Lorenz Bauer607b9cc2021-03-03 10:18:12 +000059 __must_hold(rcu)
60{
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +010061 t->i += iterations;
Lorenz Bauer607b9cc2021-03-03 10:18:12 +000062 if (t->i >= repeat) {
63 /* We're done. */
64 t->time_spent += ktime_get_ns() - t->time_start;
65 do_div(t->time_spent, t->i);
66 *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
67 *err = 0;
68 goto reset;
69 }
70
71 if (signal_pending(current)) {
72 /* During iteration: we've been cancelled, abort. */
73 *err = -EINTR;
74 goto reset;
75 }
76
77 if (need_resched()) {
78 /* During iteration: we need to reschedule between runs. */
79 t->time_spent += ktime_get_ns() - t->time_start;
80 bpf_test_timer_leave(t);
81 cond_resched();
82 bpf_test_timer_enter(t);
83 }
84
85 /* Do another round. */
86 return true;
87
88reset:
89 t->i = 0;
90 return false;
91}
92
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +010093/* We put this struct at the head of each page with a context and frame
94 * initialised when the page is allocated, so we don't have to do this on each
95 * repetition of the test run.
96 */
97struct xdp_page_head {
98 struct xdp_buff orig_ctx;
99 struct xdp_buff ctx;
100 struct xdp_frame frm;
101 u8 data[];
102};
103
104struct xdp_test_data {
105 struct xdp_buff *orig_ctx;
106 struct xdp_rxq_info rxq;
107 struct net_device *dev;
108 struct page_pool *pp;
109 struct xdp_frame **frames;
110 struct sk_buff **skbs;
111 u32 batch_size;
112 u32 frame_cnt;
113};
114
Toke Høiland-Jørgensenb6f1f782022-03-10 23:56:20 +0100115#define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +0100116#define TEST_XDP_MAX_BATCH 256
117
118static void xdp_test_run_init_page(struct page *page, void *arg)
119{
120 struct xdp_page_head *head = phys_to_virt(page_to_phys(page));
121 struct xdp_buff *new_ctx, *orig_ctx;
122 u32 headroom = XDP_PACKET_HEADROOM;
123 struct xdp_test_data *xdp = arg;
124 size_t frm_len, meta_len;
125 struct xdp_frame *frm;
126 void *data;
127
128 orig_ctx = xdp->orig_ctx;
129 frm_len = orig_ctx->data_end - orig_ctx->data_meta;
130 meta_len = orig_ctx->data - orig_ctx->data_meta;
131 headroom -= meta_len;
132
133 new_ctx = &head->ctx;
134 frm = &head->frm;
135 data = &head->data;
136 memcpy(data + headroom, orig_ctx->data_meta, frm_len);
137
138 xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq);
139 xdp_prepare_buff(new_ctx, data, headroom, frm_len, true);
140 new_ctx->data = new_ctx->data_meta + meta_len;
141
142 xdp_update_frame_from_buff(new_ctx, frm);
143 frm->mem = new_ctx->rxq->mem;
144
145 memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx));
146}
147
148static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
149{
150 struct xdp_mem_info mem = {};
151 struct page_pool *pp;
152 int err = -ENOMEM;
153 struct page_pool_params pp_params = {
154 .order = 0,
155 .flags = 0,
156 .pool_size = xdp->batch_size,
157 .nid = NUMA_NO_NODE,
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +0100158 .init_callback = xdp_test_run_init_page,
159 .init_arg = xdp,
160 };
161
162 xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
163 if (!xdp->frames)
164 return -ENOMEM;
165
166 xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
167 if (!xdp->skbs)
168 goto err_skbs;
169
170 pp = page_pool_create(&pp_params);
171 if (IS_ERR(pp)) {
172 err = PTR_ERR(pp);
173 goto err_pp;
174 }
175
176 /* will copy 'mem.id' into pp->xdp_mem_id */
177 err = xdp_reg_mem_model(&mem, MEM_TYPE_PAGE_POOL, pp);
178 if (err)
179 goto err_mmodel;
180
181 xdp->pp = pp;
182
183 /* We create a 'fake' RXQ referencing the original dev, but with an
184 * xdp_mem_info pointing to our page_pool
185 */
186 xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0);
187 xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL;
188 xdp->rxq.mem.id = pp->xdp_mem_id;
189 xdp->dev = orig_ctx->rxq->dev;
190 xdp->orig_ctx = orig_ctx;
191
192 return 0;
193
194err_mmodel:
195 page_pool_destroy(pp);
196err_pp:
Yihao Han743bec12022-03-10 01:28:27 -0800197 kvfree(xdp->skbs);
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +0100198err_skbs:
Yihao Han743bec12022-03-10 01:28:27 -0800199 kvfree(xdp->frames);
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +0100200 return err;
201}
202
203static void xdp_test_run_teardown(struct xdp_test_data *xdp)
204{
205 page_pool_destroy(xdp->pp);
206 kfree(xdp->frames);
207 kfree(xdp->skbs);
208}
209
210static bool ctx_was_changed(struct xdp_page_head *head)
211{
212 return head->orig_ctx.data != head->ctx.data ||
213 head->orig_ctx.data_meta != head->ctx.data_meta ||
214 head->orig_ctx.data_end != head->ctx.data_end;
215}
216
217static void reset_ctx(struct xdp_page_head *head)
218{
219 if (likely(!ctx_was_changed(head)))
220 return;
221
222 head->ctx.data = head->orig_ctx.data;
223 head->ctx.data_meta = head->orig_ctx.data_meta;
224 head->ctx.data_end = head->orig_ctx.data_end;
225 xdp_update_frame_from_buff(&head->ctx, &head->frm);
226}
227
228static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
229 struct sk_buff **skbs,
230 struct net_device *dev)
231{
232 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
233 int i, n;
234 LIST_HEAD(list);
235
236 n = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, (void **)skbs);
237 if (unlikely(n == 0)) {
238 for (i = 0; i < nframes; i++)
239 xdp_return_frame(frames[i]);
240 return -ENOMEM;
241 }
242
243 for (i = 0; i < nframes; i++) {
244 struct xdp_frame *xdpf = frames[i];
245 struct sk_buff *skb = skbs[i];
246
247 skb = __xdp_build_skb_from_frame(xdpf, skb, dev);
248 if (!skb) {
249 xdp_return_frame(xdpf);
250 continue;
251 }
252
253 list_add_tail(&skb->list, &list);
254 }
255 netif_receive_skb_list(&list);
256
257 return 0;
258}
259
260static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
261 u32 repeat)
262{
263 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
264 int err = 0, act, ret, i, nframes = 0, batch_sz;
265 struct xdp_frame **frames = xdp->frames;
266 struct xdp_page_head *head;
267 struct xdp_frame *frm;
268 bool redirect = false;
269 struct xdp_buff *ctx;
270 struct page *page;
271
272 batch_sz = min_t(u32, repeat, xdp->batch_size);
273
274 local_bh_disable();
275 xdp_set_return_frame_no_direct();
276
277 for (i = 0; i < batch_sz; i++) {
278 page = page_pool_dev_alloc_pages(xdp->pp);
279 if (!page) {
280 err = -ENOMEM;
281 goto out;
282 }
283
284 head = phys_to_virt(page_to_phys(page));
285 reset_ctx(head);
286 ctx = &head->ctx;
287 frm = &head->frm;
288 xdp->frame_cnt++;
289
290 act = bpf_prog_run_xdp(prog, ctx);
291
292 /* if program changed pkt bounds we need to update the xdp_frame */
293 if (unlikely(ctx_was_changed(head))) {
294 ret = xdp_update_frame_from_buff(ctx, frm);
295 if (ret) {
296 xdp_return_buff(ctx);
297 continue;
298 }
299 }
300
301 switch (act) {
302 case XDP_TX:
303 /* we can't do a real XDP_TX since we're not in the
304 * driver, so turn it into a REDIRECT back to the same
305 * index
306 */
307 ri->tgt_index = xdp->dev->ifindex;
308 ri->map_id = INT_MAX;
309 ri->map_type = BPF_MAP_TYPE_UNSPEC;
310 fallthrough;
311 case XDP_REDIRECT:
312 redirect = true;
313 ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog);
314 if (ret)
315 xdp_return_buff(ctx);
316 break;
317 case XDP_PASS:
318 frames[nframes++] = frm;
319 break;
320 default:
321 bpf_warn_invalid_xdp_action(NULL, prog, act);
322 fallthrough;
323 case XDP_DROP:
324 xdp_return_buff(ctx);
325 break;
326 }
327 }
328
329out:
330 if (redirect)
331 xdp_do_flush();
332 if (nframes) {
333 ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev);
334 if (ret)
335 err = ret;
336 }
337
338 xdp_clear_return_frame_no_direct();
339 local_bh_enable();
340 return err;
341}
342
343static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
344 u32 repeat, u32 batch_size, u32 *time)
345
346{
347 struct xdp_test_data xdp = { .batch_size = batch_size };
348 struct bpf_test_timer t = { .mode = NO_MIGRATE };
349 int ret;
350
351 if (!repeat)
352 repeat = 1;
353
354 ret = xdp_test_run_setup(&xdp, ctx);
355 if (ret)
356 return ret;
357
358 bpf_test_timer_enter(&t);
359 do {
360 xdp.frame_cnt = 0;
361 ret = xdp_test_run_batch(&xdp, prog, repeat - t.i);
362 if (unlikely(ret < 0))
363 break;
364 } while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time));
365 bpf_test_timer_leave(&t);
366
367 xdp_test_run_teardown(&xdp);
368 return ret;
369}
370
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -0800371static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
Björn Töpelf23c4b32019-12-13 18:51:10 +0100372 u32 *retval, u32 *time, bool xdp)
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700373{
Andrii Nakryikoc7603cf2021-07-12 16:06:15 -0700374 struct bpf_prog_array_item item = {.prog = prog};
375 struct bpf_run_ctx *old_ctx;
376 struct bpf_cg_run_ctx run_ctx;
Lorenz Bauer607b9cc2021-03-03 10:18:12 +0000377 struct bpf_test_timer t = { NO_MIGRATE };
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000378 enum bpf_cgroup_storage_type stype;
Lorenz Bauer607b9cc2021-03-03 10:18:12 +0000379 int ret;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700380
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000381 for_each_cgroup_storage_type(stype) {
Andrii Nakryikoc7603cf2021-07-12 16:06:15 -0700382 item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
383 if (IS_ERR(item.cgroup_storage[stype])) {
384 item.cgroup_storage[stype] = NULL;
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000385 for_each_cgroup_storage_type(stype)
Andrii Nakryikoc7603cf2021-07-12 16:06:15 -0700386 bpf_cgroup_storage_free(item.cgroup_storage[stype]);
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000387 return -ENOMEM;
388 }
389 }
Roman Gushchinf42ee092018-08-02 14:27:27 -0700390
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700391 if (!repeat)
392 repeat = 1;
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -0800393
Lorenz Bauer607b9cc2021-03-03 10:18:12 +0000394 bpf_test_timer_enter(&t);
Andrii Nakryikoc7603cf2021-07-12 16:06:15 -0700395 old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
Lorenz Bauer607b9cc2021-03-03 10:18:12 +0000396 do {
Andrii Nakryikoc7603cf2021-07-12 16:06:15 -0700397 run_ctx.prog_item = &item;
Björn Töpelf23c4b32019-12-13 18:51:10 +0100398 if (xdp)
399 *retval = bpf_prog_run_xdp(prog, ctx);
400 else
Andrii Nakryikofb7dd8b2021-08-15 00:05:54 -0700401 *retval = bpf_prog_run(prog, ctx);
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +0100402 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
Andrii Nakryikoc7603cf2021-07-12 16:06:15 -0700403 bpf_reset_run_ctx(old_ctx);
Lorenz Bauer607b9cc2021-03-03 10:18:12 +0000404 bpf_test_timer_leave(&t);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700405
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000406 for_each_cgroup_storage_type(stype)
Andrii Nakryikoc7603cf2021-07-12 16:06:15 -0700407 bpf_cgroup_storage_free(item.cgroup_storage[stype]);
Roman Gushchinf42ee092018-08-02 14:27:27 -0700408
Stanislav Fomichevdf1a2cb2019-02-12 15:42:38 -0800409 return ret;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700410}
411
David Miller78e52272017-05-02 11:36:33 -0400412static int bpf_test_finish(const union bpf_attr *kattr,
413 union bpf_attr __user *uattr, const void *data,
Lorenzo Bianconi7855e0d2022-01-21 11:09:59 +0100414 struct skb_shared_info *sinfo, u32 size,
415 u32 retval, u32 duration)
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700416{
David Miller78e52272017-05-02 11:36:33 -0400417 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700418 int err = -EFAULT;
Lorenz Bauerb5a36b12018-12-03 11:31:23 +0000419 u32 copy_size = size;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700420
Lorenz Bauerb5a36b12018-12-03 11:31:23 +0000421 /* Clamp copy if the user has provided a size hint, but copy the full
422 * buffer if not to retain old behaviour.
423 */
424 if (kattr->test.data_size_out &&
425 copy_size > kattr->test.data_size_out) {
426 copy_size = kattr->test.data_size_out;
427 err = -ENOSPC;
428 }
429
Lorenzo Bianconi7855e0d2022-01-21 11:09:59 +0100430 if (data_out) {
431 int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size;
432
Stanislav Fomichev530e2142022-02-28 15:23:32 -0800433 if (len < 0) {
434 err = -ENOSPC;
435 goto out;
436 }
437
Lorenzo Bianconi7855e0d2022-01-21 11:09:59 +0100438 if (copy_to_user(data_out, data, len))
439 goto out;
440
441 if (sinfo) {
Stanislav Fomichev5d1e9f42022-02-04 15:58:49 -0800442 int i, offset = len;
443 u32 data_len;
Lorenzo Bianconi7855e0d2022-01-21 11:09:59 +0100444
445 for (i = 0; i < sinfo->nr_frags; i++) {
446 skb_frag_t *frag = &sinfo->frags[i];
447
448 if (offset >= copy_size) {
449 err = -ENOSPC;
450 break;
451 }
452
Stanislav Fomichev5d1e9f42022-02-04 15:58:49 -0800453 data_len = min_t(u32, copy_size - offset,
Lorenzo Bianconi7855e0d2022-01-21 11:09:59 +0100454 skb_frag_size(frag));
455
456 if (copy_to_user(data_out + offset,
457 skb_frag_address(frag),
458 data_len))
459 goto out;
460
461 offset += data_len;
462 }
463 }
464 }
465
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700466 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
467 goto out;
468 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
469 goto out;
470 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
471 goto out;
Lorenz Bauerb5a36b12018-12-03 11:31:23 +0000472 if (err != -ENOSPC)
473 err = 0;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700474out:
Matt Mullinse950e842019-04-26 11:49:51 -0700475 trace_bpf_test_finish(&err);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700476 return err;
477}
478
Alexei Starovoitovfaeb2dc2019-11-14 10:57:08 -0800479/* Integer types of various sizes and pointer combinations cover variety of
480 * architecture dependent calling conventions. 7+ can be supported in the
481 * future.
482 */
Jean-Philippe Menile9ff9d52020-03-27 21:47:13 +0100483__diag_push();
Kumar Kartikeya Dwivedi0b206c62022-03-05 04:16:44 +0530484__diag_ignore_all("-Wmissing-prototypes",
485 "Global functions as their definitions will be in vmlinux BTF");
Alexei Starovoitovfaeb2dc2019-11-14 10:57:08 -0800486int noinline bpf_fentry_test1(int a)
487{
488 return a + 1;
489}
Kumar Kartikeya Dwivedi46565692022-01-14 22:09:53 +0530490EXPORT_SYMBOL_GPL(bpf_fentry_test1);
491ALLOW_ERROR_INJECTION(bpf_fentry_test1, ERRNO);
Alexei Starovoitovfaeb2dc2019-11-14 10:57:08 -0800492
493int noinline bpf_fentry_test2(int a, u64 b)
494{
495 return a + b;
496}
497
498int noinline bpf_fentry_test3(char a, int b, u64 c)
499{
500 return a + b + c;
501}
502
503int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
504{
505 return (long)a + b + c + d;
506}
507
508int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
509{
510 return a + (long)b + c + d + e;
511}
512
513int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
514{
515 return a + (long)b + c + d + (long)e + f;
516}
517
Yonghong Songd9230212020-06-30 10:12:41 -0700518struct bpf_fentry_test_t {
519 struct bpf_fentry_test_t *a;
520};
521
522int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
523{
524 return (long)arg;
525}
526
527int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
528{
529 return (long)arg->a;
530}
531
KP Singh3d08b6f2020-03-04 20:18:53 +0100532int noinline bpf_modify_return_test(int a, int *b)
533{
534 *b += 1;
535 return a + *b;
536}
Martin KaFai Lau7bd15902021-03-24 18:52:52 -0700537
538u64 noinline bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
539{
540 return a + b + c + d;
541}
542
543int noinline bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
544{
545 return a + b;
546}
547
548struct sock * noinline bpf_kfunc_call_test3(struct sock *sk)
549{
550 return sk;
551}
552
Kumar Kartikeya Dwivedi8218ccb2022-03-05 04:16:45 +0530553struct prog_test_member {
554 u64 c;
555};
556
Kumar Kartikeya Dwivedic1ff1812022-01-14 22:09:52 +0530557struct prog_test_ref_kfunc {
558 int a;
559 int b;
Kumar Kartikeya Dwivedi8218ccb2022-03-05 04:16:45 +0530560 struct prog_test_member memb;
Kumar Kartikeya Dwivedic1ff1812022-01-14 22:09:52 +0530561 struct prog_test_ref_kfunc *next;
562};
563
564static struct prog_test_ref_kfunc prog_test_struct = {
565 .a = 42,
566 .b = 108,
567 .next = &prog_test_struct,
568};
569
570noinline struct prog_test_ref_kfunc *
571bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
572{
573 /* randomly return NULL */
574 if (get_jiffies_64() % 2)
575 return NULL;
576 return &prog_test_struct;
577}
578
579noinline void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
580{
581}
582
Kumar Kartikeya Dwivedi8218ccb2022-03-05 04:16:45 +0530583noinline void bpf_kfunc_call_memb_release(struct prog_test_member *p)
584{
585}
586
Kumar Kartikeya Dwivedic1ff1812022-01-14 22:09:52 +0530587struct prog_test_pass1 {
588 int x0;
589 struct {
590 int x1;
591 struct {
592 int x2;
593 struct {
594 int x3;
595 };
596 };
597 };
598};
599
600struct prog_test_pass2 {
601 int len;
602 short arr1[4];
603 struct {
604 char arr2[4];
605 unsigned long arr3[8];
606 } x;
607};
608
609struct prog_test_fail1 {
610 void *p;
611 int x;
612};
613
614struct prog_test_fail2 {
615 int x8;
616 struct prog_test_pass1 x;
617};
618
619struct prog_test_fail3 {
620 int len;
621 char arr1[2];
kernel test roboted8bb032022-01-22 12:09:44 +0100622 char arr2[];
Kumar Kartikeya Dwivedic1ff1812022-01-14 22:09:52 +0530623};
624
625noinline void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
626{
627}
628
629noinline void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
630{
631}
632
633noinline void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
634{
635}
636
637noinline void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
638{
639}
640
641noinline void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
642{
643}
644
645noinline void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
646{
647}
648
649noinline void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
650{
651}
652
653noinline void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
654{
655}
656
657noinline void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
658{
659}
660
Jean-Philippe Menile9ff9d52020-03-27 21:47:13 +0100661__diag_pop();
KP Singh3d08b6f2020-03-04 20:18:53 +0100662
663ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
664
Kumar Kartikeya Dwivedib202d842022-01-14 22:09:46 +0530665BTF_SET_START(test_sk_check_kfunc_ids)
Martin KaFai Lau7bd15902021-03-24 18:52:52 -0700666BTF_ID(func, bpf_kfunc_call_test1)
667BTF_ID(func, bpf_kfunc_call_test2)
668BTF_ID(func, bpf_kfunc_call_test3)
Kumar Kartikeya Dwivedic1ff1812022-01-14 22:09:52 +0530669BTF_ID(func, bpf_kfunc_call_test_acquire)
670BTF_ID(func, bpf_kfunc_call_test_release)
Kumar Kartikeya Dwivedi8218ccb2022-03-05 04:16:45 +0530671BTF_ID(func, bpf_kfunc_call_memb_release)
Kumar Kartikeya Dwivedic1ff1812022-01-14 22:09:52 +0530672BTF_ID(func, bpf_kfunc_call_test_pass_ctx)
673BTF_ID(func, bpf_kfunc_call_test_pass1)
674BTF_ID(func, bpf_kfunc_call_test_pass2)
675BTF_ID(func, bpf_kfunc_call_test_fail1)
676BTF_ID(func, bpf_kfunc_call_test_fail2)
677BTF_ID(func, bpf_kfunc_call_test_fail3)
678BTF_ID(func, bpf_kfunc_call_test_mem_len_pass1)
679BTF_ID(func, bpf_kfunc_call_test_mem_len_fail1)
680BTF_ID(func, bpf_kfunc_call_test_mem_len_fail2)
Kumar Kartikeya Dwivedib202d842022-01-14 22:09:46 +0530681BTF_SET_END(test_sk_check_kfunc_ids)
Martin KaFai Lau7bd15902021-03-24 18:52:52 -0700682
Kumar Kartikeya Dwivedic1ff1812022-01-14 22:09:52 +0530683BTF_SET_START(test_sk_acquire_kfunc_ids)
684BTF_ID(func, bpf_kfunc_call_test_acquire)
685BTF_SET_END(test_sk_acquire_kfunc_ids)
686
687BTF_SET_START(test_sk_release_kfunc_ids)
688BTF_ID(func, bpf_kfunc_call_test_release)
Kumar Kartikeya Dwivedi8218ccb2022-03-05 04:16:45 +0530689BTF_ID(func, bpf_kfunc_call_memb_release)
Kumar Kartikeya Dwivedic1ff1812022-01-14 22:09:52 +0530690BTF_SET_END(test_sk_release_kfunc_ids)
691
692BTF_SET_START(test_sk_ret_null_kfunc_ids)
693BTF_ID(func, bpf_kfunc_call_test_acquire)
694BTF_SET_END(test_sk_ret_null_kfunc_ids)
695
Lorenzo Bianconibe3d72a2022-01-21 11:09:57 +0100696static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
697 u32 size, u32 headroom, u32 tailroom)
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700698{
699 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
700 void *data;
701
702 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
703 return ERR_PTR(-EINVAL);
704
Jesper Dangaard Brouerd800bad2020-05-18 15:05:27 +0200705 if (user_size > size)
706 return ERR_PTR(-EMSGSIZE);
707
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700708 data = kzalloc(size + headroom + tailroom, GFP_USER);
709 if (!data)
710 return ERR_PTR(-ENOMEM);
711
Jesper Dangaard Brouerd800bad2020-05-18 15:05:27 +0200712 if (copy_from_user(data + headroom, data_in, user_size)) {
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700713 kfree(data);
714 return ERR_PTR(-EFAULT);
715 }
KP Singhda00d2f2020-03-04 20:18:52 +0100716
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700717 return data;
718}
719
KP Singhda00d2f2020-03-04 20:18:52 +0100720int bpf_prog_test_run_tracing(struct bpf_prog *prog,
721 const union bpf_attr *kattr,
722 union bpf_attr __user *uattr)
723{
Yonghong Songd9230212020-06-30 10:12:41 -0700724 struct bpf_fentry_test_t arg = {};
KP Singh3d08b6f2020-03-04 20:18:53 +0100725 u16 side_effect = 0, ret = 0;
726 int b = 2, err = -EFAULT;
727 u32 retval = 0;
KP Singhda00d2f2020-03-04 20:18:52 +0100728
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +0100729 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
Song Liu1b4d60e2020-09-25 13:54:29 -0700730 return -EINVAL;
731
KP Singhda00d2f2020-03-04 20:18:52 +0100732 switch (prog->expected_attach_type) {
733 case BPF_TRACE_FENTRY:
734 case BPF_TRACE_FEXIT:
735 if (bpf_fentry_test1(1) != 2 ||
736 bpf_fentry_test2(2, 3) != 5 ||
737 bpf_fentry_test3(4, 5, 6) != 15 ||
738 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
739 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
Yonghong Songd9230212020-06-30 10:12:41 -0700740 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
741 bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
742 bpf_fentry_test8(&arg) != 0)
KP Singhda00d2f2020-03-04 20:18:52 +0100743 goto out;
744 break;
KP Singh3d08b6f2020-03-04 20:18:53 +0100745 case BPF_MODIFY_RETURN:
746 ret = bpf_modify_return_test(1, &b);
747 if (b != 2)
748 side_effect = 1;
749 break;
KP Singhda00d2f2020-03-04 20:18:52 +0100750 default:
751 goto out;
752 }
753
KP Singh3d08b6f2020-03-04 20:18:53 +0100754 retval = ((u32)side_effect << 16) | ret;
755 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
756 goto out;
757
KP Singhda00d2f2020-03-04 20:18:52 +0100758 err = 0;
759out:
760 trace_bpf_test_finish(&err);
761 return err;
762}
763
Song Liu1b4d60e2020-09-25 13:54:29 -0700764struct bpf_raw_tp_test_run_info {
765 struct bpf_prog *prog;
766 void *ctx;
767 u32 retval;
768};
769
770static void
771__bpf_prog_test_run_raw_tp(void *data)
772{
773 struct bpf_raw_tp_test_run_info *info = data;
774
775 rcu_read_lock();
Andrii Nakryikofb7dd8b2021-08-15 00:05:54 -0700776 info->retval = bpf_prog_run(info->prog, info->ctx);
Song Liu1b4d60e2020-09-25 13:54:29 -0700777 rcu_read_unlock();
778}
779
780int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
781 const union bpf_attr *kattr,
782 union bpf_attr __user *uattr)
783{
784 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
785 __u32 ctx_size_in = kattr->test.ctx_size_in;
786 struct bpf_raw_tp_test_run_info info;
787 int cpu = kattr->test.cpu, err = 0;
Song Liu963ec272020-09-29 15:29:49 -0700788 int current_cpu;
Song Liu1b4d60e2020-09-25 13:54:29 -0700789
790 /* doesn't support data_in/out, ctx_out, duration, or repeat */
791 if (kattr->test.data_in || kattr->test.data_out ||
792 kattr->test.ctx_out || kattr->test.duration ||
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +0100793 kattr->test.repeat || kattr->test.batch_size)
Song Liu1b4d60e2020-09-25 13:54:29 -0700794 return -EINVAL;
795
Song Liu7ac6ad02021-01-12 15:42:54 -0800796 if (ctx_size_in < prog->aux->max_ctx_offset ||
797 ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
Song Liu1b4d60e2020-09-25 13:54:29 -0700798 return -EINVAL;
799
800 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
801 return -EINVAL;
802
803 if (ctx_size_in) {
Qing Wangdb5b6a42021-10-18 04:30:48 -0700804 info.ctx = memdup_user(ctx_in, ctx_size_in);
805 if (IS_ERR(info.ctx))
806 return PTR_ERR(info.ctx);
Song Liu1b4d60e2020-09-25 13:54:29 -0700807 } else {
808 info.ctx = NULL;
809 }
810
811 info.prog = prog;
812
Song Liu963ec272020-09-29 15:29:49 -0700813 current_cpu = get_cpu();
Song Liu1b4d60e2020-09-25 13:54:29 -0700814 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
Song Liu963ec272020-09-29 15:29:49 -0700815 cpu == current_cpu) {
Song Liu1b4d60e2020-09-25 13:54:29 -0700816 __bpf_prog_test_run_raw_tp(&info);
Song Liu963ec272020-09-29 15:29:49 -0700817 } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
Song Liu1b4d60e2020-09-25 13:54:29 -0700818 /* smp_call_function_single() also checks cpu_online()
819 * after csd_lock(). However, since cpu is from user
820 * space, let's do an extra quick check to filter out
821 * invalid value before smp_call_function_single().
822 */
Song Liu963ec272020-09-29 15:29:49 -0700823 err = -ENXIO;
824 } else {
Song Liu1b4d60e2020-09-25 13:54:29 -0700825 err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
826 &info, 1);
Song Liu1b4d60e2020-09-25 13:54:29 -0700827 }
Song Liu963ec272020-09-29 15:29:49 -0700828 put_cpu();
Song Liu1b4d60e2020-09-25 13:54:29 -0700829
Song Liu963ec272020-09-29 15:29:49 -0700830 if (!err &&
831 copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
Song Liu1b4d60e2020-09-25 13:54:29 -0700832 err = -EFAULT;
833
Song Liu1b4d60e2020-09-25 13:54:29 -0700834 kfree(info.ctx);
835 return err;
836}
837
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700838static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
839{
840 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
841 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
842 u32 size = kattr->test.ctx_size_in;
843 void *data;
844 int err;
845
846 if (!data_in && !data_out)
847 return NULL;
848
849 data = kzalloc(max_size, GFP_USER);
850 if (!data)
851 return ERR_PTR(-ENOMEM);
852
853 if (data_in) {
Alexei Starovoitovaf2ac3e2021-05-13 17:36:05 -0700854 err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700855 if (err) {
856 kfree(data);
857 return ERR_PTR(err);
858 }
859
860 size = min_t(u32, max_size, size);
861 if (copy_from_user(data, data_in, size)) {
862 kfree(data);
863 return ERR_PTR(-EFAULT);
864 }
865 }
866 return data;
867}
868
869static int bpf_ctx_finish(const union bpf_attr *kattr,
870 union bpf_attr __user *uattr, const void *data,
871 u32 size)
872{
873 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
874 int err = -EFAULT;
875 u32 copy_size = size;
876
877 if (!data || !data_out)
878 return 0;
879
880 if (copy_size > kattr->test.ctx_size_out) {
881 copy_size = kattr->test.ctx_size_out;
882 err = -ENOSPC;
883 }
884
885 if (copy_to_user(data_out, data, copy_size))
886 goto out;
887 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
888 goto out;
889 if (err != -ENOSPC)
890 err = 0;
891out:
892 return err;
893}
894
895/**
896 * range_is_zero - test whether buffer is initialized
897 * @buf: buffer to check
898 * @from: check from this position
899 * @to: check up until (excluding) this position
900 *
901 * This function returns true if the there is a non-zero byte
902 * in the buf in the range [from,to).
903 */
904static inline bool range_is_zero(void *buf, size_t from, size_t to)
905{
906 return !memchr_inv((u8 *)buf + from, 0, to - from);
907}
908
909static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
910{
911 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
912
913 if (!__skb)
914 return 0;
915
916 /* make sure the fields we don't use are zeroed */
Nikita V. Shirokov6de6c1f2019-12-18 12:57:47 -0800917 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
918 return -EINVAL;
919
920 /* mark is allowed */
921
922 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
923 offsetof(struct __sk_buff, priority)))
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700924 return -EINVAL;
925
926 /* priority is allowed */
Neil Springb2382902021-08-30 20:33:56 -0700927 /* ingress_ifindex is allowed */
Dmitry Yakunin21594c42020-08-03 12:05:45 +0300928 /* ifindex is allowed */
929
930 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700931 offsetof(struct __sk_buff, cb)))
932 return -EINVAL;
933
934 /* cb is allowed */
935
Stanislav Fomichevb590cb52019-12-10 11:19:33 -0800936 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
Stanislav Fomichevba940942019-10-15 11:31:24 -0700937 offsetof(struct __sk_buff, tstamp)))
938 return -EINVAL;
939
940 /* tstamp is allowed */
Stanislav Fomichev850a88c2019-12-13 14:30:27 -0800941 /* wire_len is allowed */
942 /* gso_segs is allowed */
Stanislav Fomichevba940942019-10-15 11:31:24 -0700943
Stanislav Fomichev850a88c2019-12-13 14:30:27 -0800944 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
Willem de Bruijncf620892020-03-03 15:05:01 -0500945 offsetof(struct __sk_buff, gso_size)))
946 return -EINVAL;
947
948 /* gso_size is allowed */
949
950 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
Vadim Fedorenko3384c7c2021-09-10 01:04:09 +0300951 offsetof(struct __sk_buff, hwtstamp)))
952 return -EINVAL;
953
954 /* hwtstamp is allowed */
955
956 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp),
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700957 sizeof(struct __sk_buff)))
958 return -EINVAL;
959
Nikita V. Shirokov6de6c1f2019-12-18 12:57:47 -0800960 skb->mark = __skb->mark;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700961 skb->priority = __skb->priority;
Neil Springb2382902021-08-30 20:33:56 -0700962 skb->skb_iif = __skb->ingress_ifindex;
Stanislav Fomichevba940942019-10-15 11:31:24 -0700963 skb->tstamp = __skb->tstamp;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700964 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
965
Stanislav Fomichev850a88c2019-12-13 14:30:27 -0800966 if (__skb->wire_len == 0) {
967 cb->pkt_len = skb->len;
968 } else {
969 if (__skb->wire_len < skb->len ||
970 __skb->wire_len > GSO_MAX_SIZE)
971 return -EINVAL;
972 cb->pkt_len = __skb->wire_len;
973 }
974
975 if (__skb->gso_segs > GSO_MAX_SEGS)
976 return -EINVAL;
977 skb_shinfo(skb)->gso_segs = __skb->gso_segs;
Willem de Bruijncf620892020-03-03 15:05:01 -0500978 skb_shinfo(skb)->gso_size = __skb->gso_size;
Vadim Fedorenko3384c7c2021-09-10 01:04:09 +0300979 skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp;
Stanislav Fomichev850a88c2019-12-13 14:30:27 -0800980
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700981 return 0;
982}
983
984static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
985{
986 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
987
988 if (!__skb)
989 return;
990
Nikita V. Shirokov6de6c1f2019-12-18 12:57:47 -0800991 __skb->mark = skb->mark;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700992 __skb->priority = skb->priority;
Neil Springb2382902021-08-30 20:33:56 -0700993 __skb->ingress_ifindex = skb->skb_iif;
Dmitry Yakunin21594c42020-08-03 12:05:45 +0300994 __skb->ifindex = skb->dev->ifindex;
Stanislav Fomichevba940942019-10-15 11:31:24 -0700995 __skb->tstamp = skb->tstamp;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -0700996 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
Stanislav Fomichev850a88c2019-12-13 14:30:27 -0800997 __skb->wire_len = cb->pkt_len;
998 __skb->gso_segs = skb_shinfo(skb)->gso_segs;
Vadim Fedorenko3384c7c2021-09-10 01:04:09 +0300999 __skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -07001000}
1001
Daniel Borkmann435b08e2021-09-27 14:39:21 +02001002static struct proto bpf_dummy_proto = {
1003 .name = "bpf_dummy",
1004 .owner = THIS_MODULE,
1005 .obj_size = sizeof(struct sock),
1006};
1007
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001008int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
1009 union bpf_attr __user *uattr)
1010{
1011 bool is_l2 = false, is_direct_pkt_access = false;
Dmitry Yakunin21594c42020-08-03 12:05:45 +03001012 struct net *net = current->nsproxy->net_ns;
1013 struct net_device *dev = net->loopback_dev;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001014 u32 size = kattr->test.data_size_in;
1015 u32 repeat = kattr->test.repeat;
Stanislav Fomichevb0b93952019-04-09 11:49:09 -07001016 struct __sk_buff *ctx = NULL;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001017 u32 retval, duration;
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +02001018 int hh_len = ETH_HLEN;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001019 struct sk_buff *skb;
Song Liu2cb494a2018-10-19 09:57:58 -07001020 struct sock *sk;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001021 void *data;
1022 int ret;
1023
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +01001024 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
Song Liu1b4d60e2020-09-25 13:54:29 -07001025 return -EINVAL;
1026
Lorenzo Bianconibe3d72a2022-01-21 11:09:57 +01001027 data = bpf_test_init(kattr, kattr->test.data_size_in,
1028 size, NET_SKB_PAD + NET_IP_ALIGN,
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001029 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1030 if (IS_ERR(data))
1031 return PTR_ERR(data);
1032
Stanislav Fomichevb0b93952019-04-09 11:49:09 -07001033 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
1034 if (IS_ERR(ctx)) {
1035 kfree(data);
1036 return PTR_ERR(ctx);
1037 }
1038
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001039 switch (prog->type) {
1040 case BPF_PROG_TYPE_SCHED_CLS:
1041 case BPF_PROG_TYPE_SCHED_ACT:
1042 is_l2 = true;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05001043 fallthrough;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001044 case BPF_PROG_TYPE_LWT_IN:
1045 case BPF_PROG_TYPE_LWT_OUT:
1046 case BPF_PROG_TYPE_LWT_XMIT:
1047 is_direct_pkt_access = true;
1048 break;
1049 default:
1050 break;
1051 }
1052
Daniel Borkmann435b08e2021-09-27 14:39:21 +02001053 sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
Song Liu2cb494a2018-10-19 09:57:58 -07001054 if (!sk) {
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001055 kfree(data);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -07001056 kfree(ctx);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001057 return -ENOMEM;
1058 }
Song Liu2cb494a2018-10-19 09:57:58 -07001059 sock_init_data(NULL, sk);
1060
1061 skb = build_skb(data, 0);
1062 if (!skb) {
1063 kfree(data);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -07001064 kfree(ctx);
Daniel Borkmann435b08e2021-09-27 14:39:21 +02001065 sk_free(sk);
Song Liu2cb494a2018-10-19 09:57:58 -07001066 return -ENOMEM;
1067 }
1068 skb->sk = sk;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001069
David Miller586f8522017-05-02 11:36:45 -04001070 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001071 __skb_put(skb, size);
Dmitry Yakunin21594c42020-08-03 12:05:45 +03001072 if (ctx && ctx->ifindex > 1) {
1073 dev = dev_get_by_index(net, ctx->ifindex);
1074 if (!dev) {
1075 ret = -ENODEV;
1076 goto out;
1077 }
1078 }
1079 skb->protocol = eth_type_trans(skb, dev);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001080 skb_reset_network_header(skb);
1081
Dmitry Yakuninfa5cb542020-08-03 12:05:44 +03001082 switch (skb->protocol) {
1083 case htons(ETH_P_IP):
1084 sk->sk_family = AF_INET;
1085 if (sizeof(struct iphdr) <= skb_headlen(skb)) {
1086 sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
1087 sk->sk_daddr = ip_hdr(skb)->daddr;
1088 }
1089 break;
1090#if IS_ENABLED(CONFIG_IPV6)
1091 case htons(ETH_P_IPV6):
1092 sk->sk_family = AF_INET6;
1093 if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
1094 sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
1095 sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
1096 }
1097 break;
1098#endif
1099 default:
1100 break;
1101 }
1102
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001103 if (is_l2)
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +02001104 __skb_push(skb, hh_len);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001105 if (is_direct_pkt_access)
Daniel Borkmann6aaae2b2017-09-25 02:25:50 +02001106 bpf_compute_data_pointers(skb);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -07001107 ret = convert___skb_to_skb(skb, ctx);
1108 if (ret)
1109 goto out;
Björn Töpelf23c4b32019-12-13 18:51:10 +01001110 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -07001111 if (ret)
1112 goto out;
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +02001113 if (!is_l2) {
1114 if (skb_headroom(skb) < hh_len) {
1115 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
1116
1117 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
Stanislav Fomichevb0b93952019-04-09 11:49:09 -07001118 ret = -ENOMEM;
1119 goto out;
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +02001120 }
1121 }
1122 memset(__skb_push(skb, hh_len), 0, hh_len);
1123 }
Stanislav Fomichevb0b93952019-04-09 11:49:09 -07001124 convert_skb_to___skb(skb, ctx);
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +02001125
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001126 size = skb->len;
1127 /* bpf program can never convert linear skb to non-linear */
1128 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
1129 size = skb_headlen(skb);
Lorenzo Bianconi7855e0d2022-01-21 11:09:59 +01001130 ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval,
1131 duration);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -07001132 if (!ret)
1133 ret = bpf_ctx_finish(kattr, uattr, ctx,
1134 sizeof(struct __sk_buff));
1135out:
Dmitry Yakunin21594c42020-08-03 12:05:45 +03001136 if (dev && dev != net->loopback_dev)
1137 dev_put(dev);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001138 kfree_skb(skb);
Daniel Borkmann435b08e2021-09-27 14:39:21 +02001139 sk_free(sk);
Stanislav Fomichevb0b93952019-04-09 11:49:09 -07001140 kfree(ctx);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001141 return ret;
1142}
1143
Zvi Effron47316f42021-07-07 22:16:55 +00001144static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
1145{
Zvi Effronec946702021-07-07 22:16:56 +00001146 unsigned int ingress_ifindex, rx_queue_index;
1147 struct netdev_rx_queue *rxqueue;
1148 struct net_device *device;
1149
Zvi Effron47316f42021-07-07 22:16:55 +00001150 if (!xdp_md)
1151 return 0;
1152
1153 if (xdp_md->egress_ifindex != 0)
1154 return -EINVAL;
1155
Zvi Effronec946702021-07-07 22:16:56 +00001156 ingress_ifindex = xdp_md->ingress_ifindex;
1157 rx_queue_index = xdp_md->rx_queue_index;
1158
1159 if (!ingress_ifindex && rx_queue_index)
Zvi Effron47316f42021-07-07 22:16:55 +00001160 return -EINVAL;
1161
Zvi Effronec946702021-07-07 22:16:56 +00001162 if (ingress_ifindex) {
1163 device = dev_get_by_index(current->nsproxy->net_ns,
1164 ingress_ifindex);
1165 if (!device)
1166 return -ENODEV;
Zvi Effron47316f42021-07-07 22:16:55 +00001167
Zvi Effronec946702021-07-07 22:16:56 +00001168 if (rx_queue_index >= device->real_num_rx_queues)
1169 goto free_dev;
1170
1171 rxqueue = __netif_get_rx_queue(device, rx_queue_index);
1172
1173 if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
1174 goto free_dev;
1175
1176 xdp->rxq = &rxqueue->xdp_rxq;
1177 /* The device is now tracked in the xdp->rxq for later
1178 * dev_put()
1179 */
1180 }
1181
1182 xdp->data = xdp->data_meta + xdp_md->data;
Zvi Effron47316f42021-07-07 22:16:55 +00001183 return 0;
Zvi Effronec946702021-07-07 22:16:56 +00001184
1185free_dev:
1186 dev_put(device);
1187 return -EINVAL;
1188}
1189
1190static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
1191{
1192 if (!xdp_md)
1193 return;
1194
1195 xdp_md->data = xdp->data - xdp->data_meta;
1196 xdp_md->data_end = xdp->data_end - xdp->data_meta;
1197
1198 if (xdp_md->ingress_ifindex)
1199 dev_put(xdp->rxq->dev);
Zvi Effron47316f42021-07-07 22:16:55 +00001200}
1201
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001202int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1203 union bpf_attr __user *uattr)
1204{
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +01001205 bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES);
Jesper Dangaard Brouerbc56c912020-05-14 12:51:35 +02001206 u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +01001207 u32 batch_size = kattr->test.batch_size;
Toke Høiland-Jørgenseneecbfd92022-03-10 12:02:28 +01001208 u32 retval = 0, duration, max_data_sz;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001209 u32 size = kattr->test.data_size_in;
Lorenzo Bianconi1c194992022-01-21 11:09:58 +01001210 u32 headroom = XDP_PACKET_HEADROOM;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001211 u32 repeat = kattr->test.repeat;
Daniel Borkmann65073a62018-01-31 12:58:56 +01001212 struct netdev_rx_queue *rxqueue;
Lorenzo Bianconi1c194992022-01-21 11:09:58 +01001213 struct skb_shared_info *sinfo;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001214 struct xdp_buff xdp = {};
Lorenzo Bianconi1c194992022-01-21 11:09:58 +01001215 int i, ret = -EINVAL;
Zvi Effron47316f42021-07-07 22:16:55 +00001216 struct xdp_md *ctx;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001217 void *data;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001218
Xuan Zhuo5e21bb42021-07-08 16:04:09 +08001219 if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
1220 prog->expected_attach_type == BPF_XDP_CPUMAP)
1221 return -EINVAL;
Andrii Nakryiko6d4eb362021-08-04 08:37:50 -07001222
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +01001223 if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES)
1224 return -EINVAL;
1225
1226 if (do_live) {
1227 if (!batch_size)
1228 batch_size = NAPI_POLL_WEIGHT;
1229 else if (batch_size > TEST_XDP_MAX_BATCH)
1230 return -E2BIG;
Toke Høiland-Jørgensenb6f1f782022-03-10 23:56:20 +01001231
1232 headroom += sizeof(struct xdp_page_head);
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +01001233 } else if (batch_size) {
1234 return -EINVAL;
1235 }
1236
Zvi Effron47316f42021-07-07 22:16:55 +00001237 ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md));
1238 if (IS_ERR(ctx))
1239 return PTR_ERR(ctx);
1240
1241 if (ctx) {
1242 /* There can't be user provided data before the meta data */
1243 if (ctx->data_meta || ctx->data_end != size ||
1244 ctx->data > ctx->data_end ||
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +01001245 unlikely(xdp_metalen_invalid(ctx->data)) ||
1246 (do_live && (kattr->test.data_out || kattr->test.ctx_out)))
Zvi Effron47316f42021-07-07 22:16:55 +00001247 goto free_ctx;
1248 /* Meta data is allocated from the headroom */
1249 headroom -= ctx->data;
1250 }
Stanislav Fomichev947e8b52019-04-11 15:47:07 -07001251
Jesper Dangaard Brouerbc56c912020-05-14 12:51:35 +02001252 max_data_sz = 4096 - headroom - tailroom;
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +01001253 if (size > max_data_sz) {
1254 /* disallow live data mode for jumbo frames */
1255 if (do_live)
1256 goto free_ctx;
1257 size = max_data_sz;
1258 }
Jesper Dangaard Brouerbc56c912020-05-14 12:51:35 +02001259
Lorenzo Bianconi1c194992022-01-21 11:09:58 +01001260 data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
Zvi Effron47316f42021-07-07 22:16:55 +00001261 if (IS_ERR(data)) {
1262 ret = PTR_ERR(data);
1263 goto free_ctx;
1264 }
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001265
Daniel Borkmann65073a62018-01-31 12:58:56 +01001266 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
Lorenzo Bianconi1c194992022-01-21 11:09:58 +01001267 rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom;
1268 xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
Lorenzo Bianconibe9df4a2020-12-22 22:09:29 +01001269 xdp_prepare_buff(&xdp, data, headroom, size, true);
Lorenzo Bianconi1c194992022-01-21 11:09:58 +01001270 sinfo = xdp_get_shared_info_from_buff(&xdp);
Lorenzo Bianconibe9df4a2020-12-22 22:09:29 +01001271
Zvi Effron47316f42021-07-07 22:16:55 +00001272 ret = xdp_convert_md_to_buff(ctx, &xdp);
1273 if (ret)
1274 goto free_data;
1275
Lorenzo Bianconi1c194992022-01-21 11:09:58 +01001276 if (unlikely(kattr->test.data_size_in > size)) {
1277 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
1278
1279 while (size < kattr->test.data_size_in) {
1280 struct page *page;
1281 skb_frag_t *frag;
Stanislav Fomichev9d63b592022-02-04 15:58:48 -08001282 u32 data_len;
Lorenzo Bianconi1c194992022-01-21 11:09:58 +01001283
Lorenzo Bianconia6763082022-02-02 21:53:20 +01001284 if (sinfo->nr_frags == MAX_SKB_FRAGS) {
1285 ret = -ENOMEM;
1286 goto out;
1287 }
1288
Lorenzo Bianconi1c194992022-01-21 11:09:58 +01001289 page = alloc_page(GFP_KERNEL);
1290 if (!page) {
1291 ret = -ENOMEM;
1292 goto out;
1293 }
1294
1295 frag = &sinfo->frags[sinfo->nr_frags++];
1296 __skb_frag_set_page(frag, page);
1297
Stanislav Fomichev9d63b592022-02-04 15:58:48 -08001298 data_len = min_t(u32, kattr->test.data_size_in - size,
Lorenzo Bianconi1c194992022-01-21 11:09:58 +01001299 PAGE_SIZE);
1300 skb_frag_size_set(frag, data_len);
1301
1302 if (copy_from_user(page_address(page), data_in + size,
1303 data_len)) {
1304 ret = -EFAULT;
1305 goto out;
1306 }
1307 sinfo->xdp_frags_size += data_len;
1308 size += data_len;
1309 }
1310 xdp_buff_set_frags_flag(&xdp);
1311 }
1312
Lorenz Bauerde21d8b2021-09-28 10:30:59 +01001313 if (repeat > 1)
1314 bpf_prog_change_xdp(NULL, prog);
Lorenzo Bianconi1c194992022-01-21 11:09:58 +01001315
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +01001316 if (do_live)
1317 ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
1318 else
1319 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
Zvi Effronec946702021-07-07 22:16:56 +00001320 /* We convert the xdp_buff back to an xdp_md before checking the return
1321 * code so the reference count of any held netdevice will be decremented
1322 * even if the test run failed.
1323 */
1324 xdp_convert_buff_to_md(&xdp, ctx);
Roman Gushchindcb40592018-12-01 10:39:44 -08001325 if (ret)
1326 goto out;
Zvi Effron47316f42021-07-07 22:16:55 +00001327
Lorenzo Bianconi1c194992022-01-21 11:09:58 +01001328 size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
Lorenzo Bianconi7855e0d2022-01-21 11:09:59 +01001329 ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size,
1330 retval, duration);
Zvi Effron47316f42021-07-07 22:16:55 +00001331 if (!ret)
1332 ret = bpf_ctx_finish(kattr, uattr, ctx,
1333 sizeof(struct xdp_md));
1334
Roman Gushchindcb40592018-12-01 10:39:44 -08001335out:
Lorenz Bauerde21d8b2021-09-28 10:30:59 +01001336 if (repeat > 1)
1337 bpf_prog_change_xdp(prog, NULL);
Zvi Effron47316f42021-07-07 22:16:55 +00001338free_data:
Lorenzo Bianconi1c194992022-01-21 11:09:58 +01001339 for (i = 0; i < sinfo->nr_frags; i++)
1340 __free_page(skb_frag_page(&sinfo->frags[i]));
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001341 kfree(data);
Zvi Effron47316f42021-07-07 22:16:55 +00001342free_ctx:
1343 kfree(ctx);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001344 return ret;
1345}
Stanislav Fomichevb7a18482019-01-28 08:53:54 -08001346
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -07001347static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
1348{
1349 /* make sure the fields we don't use are zeroed */
1350 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
1351 return -EINVAL;
1352
1353 /* flags is allowed */
1354
Stanislav Fomichevb590cb52019-12-10 11:19:33 -08001355 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -07001356 sizeof(struct bpf_flow_keys)))
1357 return -EINVAL;
1358
1359 return 0;
1360}
1361
Stanislav Fomichevb7a18482019-01-28 08:53:54 -08001362int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1363 const union bpf_attr *kattr,
1364 union bpf_attr __user *uattr)
1365{
Lorenz Bauer607b9cc2021-03-03 10:18:12 +00001366 struct bpf_test_timer t = { NO_PREEMPT };
Stanislav Fomichevb7a18482019-01-28 08:53:54 -08001367 u32 size = kattr->test.data_size_in;
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -07001368 struct bpf_flow_dissector ctx = {};
Stanislav Fomichevb7a18482019-01-28 08:53:54 -08001369 u32 repeat = kattr->test.repeat;
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -07001370 struct bpf_flow_keys *user_ctx;
Stanislav Fomichevb7a18482019-01-28 08:53:54 -08001371 struct bpf_flow_keys flow_keys;
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -07001372 const struct ethhdr *eth;
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -07001373 unsigned int flags = 0;
Stanislav Fomichevb7a18482019-01-28 08:53:54 -08001374 u32 retval, duration;
Stanislav Fomichevb7a18482019-01-28 08:53:54 -08001375 void *data;
1376 int ret;
Stanislav Fomichevb7a18482019-01-28 08:53:54 -08001377
1378 if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
1379 return -EINVAL;
1380
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +01001381 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
Song Liu1b4d60e2020-09-25 13:54:29 -07001382 return -EINVAL;
1383
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -07001384 if (size < ETH_HLEN)
1385 return -EINVAL;
1386
Lorenzo Bianconibe3d72a2022-01-21 11:09:57 +01001387 data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0);
Stanislav Fomichevb7a18482019-01-28 08:53:54 -08001388 if (IS_ERR(data))
1389 return PTR_ERR(data);
1390
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -07001391 eth = (struct ethhdr *)data;
Stanislav Fomichevb7a18482019-01-28 08:53:54 -08001392
Stanislav Fomichevb7a18482019-01-28 08:53:54 -08001393 if (!repeat)
1394 repeat = 1;
1395
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -07001396 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
1397 if (IS_ERR(user_ctx)) {
1398 kfree(data);
1399 return PTR_ERR(user_ctx);
1400 }
1401 if (user_ctx) {
1402 ret = verify_user_bpf_flow_keys(user_ctx);
1403 if (ret)
1404 goto out;
1405 flags = user_ctx->flags;
1406 }
1407
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -07001408 ctx.flow_keys = &flow_keys;
1409 ctx.data = data;
1410 ctx.data_end = (__u8 *)data + size;
1411
Lorenz Bauer607b9cc2021-03-03 10:18:12 +00001412 bpf_test_timer_enter(&t);
1413 do {
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -07001414 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -07001415 size, flags);
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +01001416 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
Lorenz Bauer607b9cc2021-03-03 10:18:12 +00001417 bpf_test_timer_leave(&t);
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -07001418
Lorenz Bauer607b9cc2021-03-03 10:18:12 +00001419 if (ret < 0)
1420 goto out;
Stanislav Fomichevb7a18482019-01-28 08:53:54 -08001421
Lorenzo Bianconi7855e0d2022-01-21 11:09:59 +01001422 ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL,
1423 sizeof(flow_keys), retval, duration);
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -07001424 if (!ret)
1425 ret = bpf_ctx_finish(kattr, uattr, user_ctx,
1426 sizeof(struct bpf_flow_keys));
Stanislav Fomichevb7a18482019-01-28 08:53:54 -08001427
Stanislav Fomicheva4391842019-02-19 10:54:17 -08001428out:
Stanislav Fomichevb2ca4e12019-07-25 15:52:27 -07001429 kfree(user_ctx);
Stanislav Fomichev7b8a1302019-04-22 08:55:45 -07001430 kfree(data);
Stanislav Fomichevb7a18482019-01-28 08:53:54 -08001431 return ret;
1432}
Lorenz Bauer7c32e8f2021-03-03 10:18:13 +00001433
1434int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
1435 union bpf_attr __user *uattr)
1436{
1437 struct bpf_test_timer t = { NO_PREEMPT };
1438 struct bpf_prog_array *progs = NULL;
1439 struct bpf_sk_lookup_kern ctx = {};
1440 u32 repeat = kattr->test.repeat;
1441 struct bpf_sk_lookup *user_ctx;
1442 u32 retval, duration;
1443 int ret = -EINVAL;
1444
1445 if (prog->type != BPF_PROG_TYPE_SK_LOOKUP)
1446 return -EINVAL;
1447
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +01001448 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
Lorenz Bauer7c32e8f2021-03-03 10:18:13 +00001449 return -EINVAL;
1450
1451 if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
1452 kattr->test.data_size_out)
1453 return -EINVAL;
1454
1455 if (!repeat)
1456 repeat = 1;
1457
1458 user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
1459 if (IS_ERR(user_ctx))
1460 return PTR_ERR(user_ctx);
1461
1462 if (!user_ctx)
1463 return -EINVAL;
1464
1465 if (user_ctx->sk)
1466 goto out;
1467
1468 if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
1469 goto out;
1470
Jakub Sitnicki9a69e2b2022-02-09 19:43:32 +01001471 if (user_ctx->local_port > U16_MAX) {
Lorenz Bauer7c32e8f2021-03-03 10:18:13 +00001472 ret = -ERANGE;
1473 goto out;
1474 }
1475
1476 ctx.family = (u16)user_ctx->family;
1477 ctx.protocol = (u16)user_ctx->protocol;
1478 ctx.dport = (u16)user_ctx->local_port;
Jakub Sitnicki9a69e2b2022-02-09 19:43:32 +01001479 ctx.sport = user_ctx->remote_port;
Lorenz Bauer7c32e8f2021-03-03 10:18:13 +00001480
1481 switch (ctx.family) {
1482 case AF_INET:
1483 ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
1484 ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
1485 break;
1486
1487#if IS_ENABLED(CONFIG_IPV6)
1488 case AF_INET6:
1489 ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
1490 ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
1491 break;
1492#endif
1493
1494 default:
1495 ret = -EAFNOSUPPORT;
1496 goto out;
1497 }
1498
1499 progs = bpf_prog_array_alloc(1, GFP_KERNEL);
1500 if (!progs) {
1501 ret = -ENOMEM;
1502 goto out;
1503 }
1504
1505 progs->items[0].prog = prog;
1506
1507 bpf_test_timer_enter(&t);
1508 do {
1509 ctx.selected_sk = NULL;
Andrii Nakryikofb7dd8b2021-08-15 00:05:54 -07001510 retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run);
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +01001511 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
Lorenz Bauer7c32e8f2021-03-03 10:18:13 +00001512 bpf_test_timer_leave(&t);
1513
1514 if (ret < 0)
1515 goto out;
1516
1517 user_ctx->cookie = 0;
1518 if (ctx.selected_sk) {
1519 if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
1520 ret = -EOPNOTSUPP;
1521 goto out;
1522 }
1523
1524 user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
1525 }
1526
Lorenzo Bianconi7855e0d2022-01-21 11:09:59 +01001527 ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
Lorenz Bauer7c32e8f2021-03-03 10:18:13 +00001528 if (!ret)
1529 ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
1530
1531out:
1532 bpf_prog_array_free(progs);
1533 kfree(user_ctx);
1534 return ret;
1535}
Alexei Starovoitov79a7f8b2021-05-13 17:36:03 -07001536
1537int bpf_prog_test_run_syscall(struct bpf_prog *prog,
1538 const union bpf_attr *kattr,
1539 union bpf_attr __user *uattr)
1540{
1541 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
1542 __u32 ctx_size_in = kattr->test.ctx_size_in;
1543 void *ctx = NULL;
1544 u32 retval;
1545 int err = 0;
1546
1547 /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
1548 if (kattr->test.data_in || kattr->test.data_out ||
1549 kattr->test.ctx_out || kattr->test.duration ||
Toke Høiland-Jørgensenb530e9e2022-03-09 11:53:42 +01001550 kattr->test.repeat || kattr->test.flags ||
1551 kattr->test.batch_size)
Alexei Starovoitov79a7f8b2021-05-13 17:36:03 -07001552 return -EINVAL;
1553
1554 if (ctx_size_in < prog->aux->max_ctx_offset ||
1555 ctx_size_in > U16_MAX)
1556 return -EINVAL;
1557
1558 if (ctx_size_in) {
Qing Wangdb5b6a42021-10-18 04:30:48 -07001559 ctx = memdup_user(ctx_in, ctx_size_in);
1560 if (IS_ERR(ctx))
1561 return PTR_ERR(ctx);
Alexei Starovoitov79a7f8b2021-05-13 17:36:03 -07001562 }
Yonghong Song87b7b532021-08-09 16:51:51 -07001563
1564 rcu_read_lock_trace();
Alexei Starovoitov79a7f8b2021-05-13 17:36:03 -07001565 retval = bpf_prog_run_pin_on_cpu(prog, ctx);
Yonghong Song87b7b532021-08-09 16:51:51 -07001566 rcu_read_unlock_trace();
Alexei Starovoitov79a7f8b2021-05-13 17:36:03 -07001567
1568 if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
1569 err = -EFAULT;
1570 goto out;
1571 }
1572 if (ctx_size_in)
1573 if (copy_to_user(ctx_in, ctx, ctx_size_in))
1574 err = -EFAULT;
1575out:
1576 kfree(ctx);
1577 return err;
1578}
Kumar Kartikeya Dwivedib202d842022-01-14 22:09:46 +05301579
1580static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = {
Kumar Kartikeya Dwivedic1ff1812022-01-14 22:09:52 +05301581 .owner = THIS_MODULE,
1582 .check_set = &test_sk_check_kfunc_ids,
1583 .acquire_set = &test_sk_acquire_kfunc_ids,
1584 .release_set = &test_sk_release_kfunc_ids,
1585 .ret_null_set = &test_sk_ret_null_kfunc_ids,
Kumar Kartikeya Dwivedib202d842022-01-14 22:09:46 +05301586};
1587
1588static int __init bpf_prog_test_run_init(void)
1589{
1590 return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
1591}
1592late_initcall(bpf_prog_test_run_init);