blob: a2f1f15ce43216de9b53ea7906aab7ed8791f980 [file] [log] [blame]
Thomas Gleixner5b497af2019-05-29 07:18:09 -07001// SPDX-License-Identifier: GPL-2.0-only
Alexei Starovoitovd0003ec2014-11-13 17:36:49 -08002/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
Alexei Starovoitovd0003ec2014-11-13 17:36:49 -08003 */
4#include <linux/bpf.h>
5#include <linux/rcupdate.h>
Daniel Borkmann03e69b52015-03-14 02:27:16 +01006#include <linux/random.h>
Daniel Borkmannc04167c2015-03-14 02:27:17 +01007#include <linux/smp.h>
Daniel Borkmann2d0e30c2016-10-21 12:46:33 +02008#include <linux/topology.h>
Daniel Borkmann17ca8cb2015-05-29 23:23:06 +02009#include <linux/ktime.h>
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -070010#include <linux/sched.h>
11#include <linux/uidgid.h>
Daniel Borkmannf3694e02016-09-09 02:45:31 +020012#include <linux/filter.h>
Andrey Ignatovd7a4cb92019-03-18 17:55:26 -070013#include <linux/ctype.h>
Martin KaFai Lau5576b992020-01-22 15:36:46 -080014#include <linux/jiffies.h>
Carlos Neirab4490c52020-03-04 17:41:56 -030015#include <linux/pid_namespace.h>
16#include <linux/proc_ns.h>
Daniel Borkmannff40e512021-05-28 09:16:31 +000017#include <linux/security.h>
Andrey Ignatovd7a4cb92019-03-18 17:55:26 -070018
19#include "../../lib/kstrtox.h"
Alexei Starovoitovd0003ec2014-11-13 17:36:49 -080020
21/* If kernel subsystem is allowing eBPF programs to call this function,
22 * inside its own verifier_ops->get_func_proto() callback it should return
23 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
24 *
25 * Different map implementations will rely on rcu in map methods
26 * lookup/update/delete, therefore eBPF programs must run under rcu lock
27 * if program is allowed to access maps, so check rcu_read_lock_held in
28 * all three functions.
29 */
Daniel Borkmannf3694e02016-09-09 02:45:31 +020030BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
Alexei Starovoitovd0003ec2014-11-13 17:36:49 -080031{
Alexei Starovoitovd0003ec2014-11-13 17:36:49 -080032 WARN_ON_ONCE(!rcu_read_lock_held());
Daniel Borkmannf3694e02016-09-09 02:45:31 +020033 return (unsigned long) map->ops->map_lookup_elem(map, key);
Alexei Starovoitovd0003ec2014-11-13 17:36:49 -080034}
35
Daniel Borkmanna2c83ff2015-03-01 12:31:42 +010036const struct bpf_func_proto bpf_map_lookup_elem_proto = {
Daniel Borkmann3324b582015-05-29 23:23:07 +020037 .func = bpf_map_lookup_elem,
38 .gpl_only = false,
Daniel Borkmann36bbef52016-09-20 00:26:13 +020039 .pkt_access = true,
Daniel Borkmann3324b582015-05-29 23:23:07 +020040 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
41 .arg1_type = ARG_CONST_MAP_PTR,
42 .arg2_type = ARG_PTR_TO_MAP_KEY,
Alexei Starovoitovd0003ec2014-11-13 17:36:49 -080043};
44
Daniel Borkmannf3694e02016-09-09 02:45:31 +020045BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
46 void *, value, u64, flags)
Alexei Starovoitovd0003ec2014-11-13 17:36:49 -080047{
Alexei Starovoitovd0003ec2014-11-13 17:36:49 -080048 WARN_ON_ONCE(!rcu_read_lock_held());
Daniel Borkmannf3694e02016-09-09 02:45:31 +020049 return map->ops->map_update_elem(map, key, value, flags);
Alexei Starovoitovd0003ec2014-11-13 17:36:49 -080050}
51
Daniel Borkmanna2c83ff2015-03-01 12:31:42 +010052const struct bpf_func_proto bpf_map_update_elem_proto = {
Daniel Borkmann3324b582015-05-29 23:23:07 +020053 .func = bpf_map_update_elem,
54 .gpl_only = false,
Daniel Borkmann36bbef52016-09-20 00:26:13 +020055 .pkt_access = true,
Daniel Borkmann3324b582015-05-29 23:23:07 +020056 .ret_type = RET_INTEGER,
57 .arg1_type = ARG_CONST_MAP_PTR,
58 .arg2_type = ARG_PTR_TO_MAP_KEY,
59 .arg3_type = ARG_PTR_TO_MAP_VALUE,
60 .arg4_type = ARG_ANYTHING,
Alexei Starovoitovd0003ec2014-11-13 17:36:49 -080061};
62
Daniel Borkmannf3694e02016-09-09 02:45:31 +020063BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
Alexei Starovoitovd0003ec2014-11-13 17:36:49 -080064{
Alexei Starovoitovd0003ec2014-11-13 17:36:49 -080065 WARN_ON_ONCE(!rcu_read_lock_held());
Alexei Starovoitovd0003ec2014-11-13 17:36:49 -080066 return map->ops->map_delete_elem(map, key);
67}
68
Daniel Borkmanna2c83ff2015-03-01 12:31:42 +010069const struct bpf_func_proto bpf_map_delete_elem_proto = {
Daniel Borkmann3324b582015-05-29 23:23:07 +020070 .func = bpf_map_delete_elem,
71 .gpl_only = false,
Daniel Borkmann36bbef52016-09-20 00:26:13 +020072 .pkt_access = true,
Daniel Borkmann3324b582015-05-29 23:23:07 +020073 .ret_type = RET_INTEGER,
74 .arg1_type = ARG_CONST_MAP_PTR,
75 .arg2_type = ARG_PTR_TO_MAP_KEY,
Alexei Starovoitovd0003ec2014-11-13 17:36:49 -080076};
Daniel Borkmann03e69b52015-03-14 02:27:16 +010077
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +020078BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
79{
80 return map->ops->map_push_elem(map, value, flags);
81}
82
83const struct bpf_func_proto bpf_map_push_elem_proto = {
84 .func = bpf_map_push_elem,
85 .gpl_only = false,
86 .pkt_access = true,
87 .ret_type = RET_INTEGER,
88 .arg1_type = ARG_CONST_MAP_PTR,
89 .arg2_type = ARG_PTR_TO_MAP_VALUE,
90 .arg3_type = ARG_ANYTHING,
91};
92
93BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
94{
95 return map->ops->map_pop_elem(map, value);
96}
97
98const struct bpf_func_proto bpf_map_pop_elem_proto = {
99 .func = bpf_map_pop_elem,
100 .gpl_only = false,
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +0200101 .ret_type = RET_INTEGER,
102 .arg1_type = ARG_CONST_MAP_PTR,
103 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
104};
105
106BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
107{
108 return map->ops->map_peek_elem(map, value);
109}
110
111const struct bpf_func_proto bpf_map_peek_elem_proto = {
Mircea Cirjaliu301a33d2021-01-19 21:53:18 +0100112 .func = bpf_map_peek_elem,
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +0200113 .gpl_only = false,
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +0200114 .ret_type = RET_INTEGER,
115 .arg1_type = ARG_CONST_MAP_PTR,
116 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
117};
118
Daniel Borkmann03e69b52015-03-14 02:27:16 +0100119const struct bpf_func_proto bpf_get_prandom_u32_proto = {
Daniel Borkmann3ad00402015-10-08 01:20:39 +0200120 .func = bpf_user_rnd_u32,
Daniel Borkmann03e69b52015-03-14 02:27:16 +0100121 .gpl_only = false,
122 .ret_type = RET_INTEGER,
123};
Daniel Borkmannc04167c2015-03-14 02:27:17 +0100124
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200125BPF_CALL_0(bpf_get_smp_processor_id)
Daniel Borkmannc04167c2015-03-14 02:27:17 +0100126{
Daniel Borkmann80b48c42016-06-28 12:18:26 +0200127 return smp_processor_id();
Daniel Borkmannc04167c2015-03-14 02:27:17 +0100128}
129
130const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
131 .func = bpf_get_smp_processor_id,
132 .gpl_only = false,
133 .ret_type = RET_INTEGER,
134};
Daniel Borkmann17ca8cb2015-05-29 23:23:06 +0200135
Daniel Borkmann2d0e30c2016-10-21 12:46:33 +0200136BPF_CALL_0(bpf_get_numa_node_id)
137{
138 return numa_node_id();
139}
140
141const struct bpf_func_proto bpf_get_numa_node_id_proto = {
142 .func = bpf_get_numa_node_id,
143 .gpl_only = false,
144 .ret_type = RET_INTEGER,
145};
146
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200147BPF_CALL_0(bpf_ktime_get_ns)
Daniel Borkmann17ca8cb2015-05-29 23:23:06 +0200148{
149 /* NMI safe access to clock monotonic */
150 return ktime_get_mono_fast_ns();
151}
152
153const struct bpf_func_proto bpf_ktime_get_ns_proto = {
154 .func = bpf_ktime_get_ns,
Maciej Żenczykowski082b57e2020-04-20 11:47:50 -0700155 .gpl_only = false,
Daniel Borkmann17ca8cb2015-05-29 23:23:06 +0200156 .ret_type = RET_INTEGER,
157};
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -0700158
Maciej Żenczykowski71d19212020-04-26 09:15:25 -0700159BPF_CALL_0(bpf_ktime_get_boot_ns)
160{
161 /* NMI safe access to clock boottime */
162 return ktime_get_boot_fast_ns();
163}
164
165const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
166 .func = bpf_ktime_get_boot_ns,
167 .gpl_only = false,
168 .ret_type = RET_INTEGER,
169};
170
Dmitrii Banshchikovd0551262020-11-17 18:45:49 +0000171BPF_CALL_0(bpf_ktime_get_coarse_ns)
172{
173 return ktime_get_coarse_ns();
174}
175
176const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
177 .func = bpf_ktime_get_coarse_ns,
178 .gpl_only = false,
179 .ret_type = RET_INTEGER,
180};
181
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200182BPF_CALL_0(bpf_get_current_pid_tgid)
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -0700183{
184 struct task_struct *task = current;
185
Daniel Borkmann6088b582016-09-09 02:45:28 +0200186 if (unlikely(!task))
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -0700187 return -EINVAL;
188
189 return (u64) task->tgid << 32 | task->pid;
190}
191
192const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
193 .func = bpf_get_current_pid_tgid,
194 .gpl_only = false,
195 .ret_type = RET_INTEGER,
196};
197
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200198BPF_CALL_0(bpf_get_current_uid_gid)
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -0700199{
200 struct task_struct *task = current;
201 kuid_t uid;
202 kgid_t gid;
203
Daniel Borkmann6088b582016-09-09 02:45:28 +0200204 if (unlikely(!task))
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -0700205 return -EINVAL;
206
207 current_uid_gid(&uid, &gid);
208 return (u64) from_kgid(&init_user_ns, gid) << 32 |
Daniel Borkmann6088b582016-09-09 02:45:28 +0200209 from_kuid(&init_user_ns, uid);
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -0700210}
211
212const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
213 .func = bpf_get_current_uid_gid,
214 .gpl_only = false,
215 .ret_type = RET_INTEGER,
216};
217
Daniel Borkmannf3694e02016-09-09 02:45:31 +0200218BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -0700219{
220 struct task_struct *task = current;
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -0700221
Daniel Borkmann074f528e2016-04-13 00:10:52 +0200222 if (unlikely(!task))
223 goto err_clear;
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -0700224
Daniel Borkmann074f528e2016-04-13 00:10:52 +0200225 strncpy(buf, task->comm, size);
226
227 /* Verifier guarantees that size > 0. For task->comm exceeding
228 * size, guarantee that buf is %NUL-terminated. Unconditionally
229 * done here to save the size test.
230 */
231 buf[size - 1] = 0;
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -0700232 return 0;
Daniel Borkmann074f528e2016-04-13 00:10:52 +0200233err_clear:
234 memset(buf, 0, size);
235 return -EINVAL;
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -0700236}
237
238const struct bpf_func_proto bpf_get_current_comm_proto = {
239 .func = bpf_get_current_comm,
240 .gpl_only = false,
241 .ret_type = RET_INTEGER,
Alexei Starovoitov39f19ebb2017-01-09 10:19:50 -0800242 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
243 .arg2_type = ARG_CONST_SIZE,
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -0700244};
Yonghong Songbf6fa2c82018-06-03 15:59:41 -0700245
Alexei Starovoitovd83525c2019-01-31 15:40:04 -0800246#if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
247
248static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
249{
250 arch_spinlock_t *l = (void *)lock;
251 union {
252 __u32 val;
253 arch_spinlock_t lock;
254 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
255
256 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
257 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
258 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
259 arch_spin_lock(l);
260}
261
262static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
263{
264 arch_spinlock_t *l = (void *)lock;
265
266 arch_spin_unlock(l);
267}
268
269#else
270
271static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
272{
273 atomic_t *l = (void *)lock;
274
275 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
276 do {
277 atomic_cond_read_relaxed(l, !VAL);
278 } while (atomic_xchg(l, 1));
279}
280
281static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
282{
283 atomic_t *l = (void *)lock;
284
285 atomic_set_release(l, 0);
286}
287
288#endif
289
290static DEFINE_PER_CPU(unsigned long, irqsave_flags);
291
292notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
293{
294 unsigned long flags;
295
296 local_irq_save(flags);
297 __bpf_spin_lock(lock);
298 __this_cpu_write(irqsave_flags, flags);
299 return 0;
300}
301
302const struct bpf_func_proto bpf_spin_lock_proto = {
303 .func = bpf_spin_lock,
304 .gpl_only = false,
305 .ret_type = RET_VOID,
306 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
307};
308
309notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
310{
311 unsigned long flags;
312
313 flags = __this_cpu_read(irqsave_flags);
314 __bpf_spin_unlock(lock);
315 local_irq_restore(flags);
316 return 0;
317}
318
319const struct bpf_func_proto bpf_spin_unlock_proto = {
320 .func = bpf_spin_unlock,
321 .gpl_only = false,
322 .ret_type = RET_VOID,
323 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
324};
325
Alexei Starovoitov96049f32019-01-31 15:40:09 -0800326void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
327 bool lock_src)
328{
329 struct bpf_spin_lock *lock;
330
331 if (lock_src)
332 lock = src + map->spin_lock_off;
333 else
334 lock = dst + map->spin_lock_off;
335 preempt_disable();
336 ____bpf_spin_lock(lock);
337 copy_map_value(map, dst, src);
338 ____bpf_spin_unlock(lock);
339 preempt_enable();
340}
341
Martin KaFai Lau5576b992020-01-22 15:36:46 -0800342BPF_CALL_0(bpf_jiffies64)
343{
344 return get_jiffies_64();
345}
346
347const struct bpf_func_proto bpf_jiffies64_proto = {
348 .func = bpf_jiffies64,
349 .gpl_only = false,
350 .ret_type = RET_INTEGER,
351};
352
Yonghong Songbf6fa2c82018-06-03 15:59:41 -0700353#ifdef CONFIG_CGROUPS
354BPF_CALL_0(bpf_get_current_cgroup_id)
355{
356 struct cgroup *cgrp = task_dfl_cgroup(current);
357
Tejun Heo74321032019-11-04 15:54:30 -0800358 return cgroup_id(cgrp);
Yonghong Songbf6fa2c82018-06-03 15:59:41 -0700359}
360
361const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
362 .func = bpf_get_current_cgroup_id,
363 .gpl_only = false,
364 .ret_type = RET_INTEGER,
365};
Roman Gushchincd339432018-08-02 14:27:24 -0700366
Daniel Borkmann0f09abd2020-03-27 16:58:54 +0100367BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
368{
369 struct cgroup *cgrp = task_dfl_cgroup(current);
370 struct cgroup *ancestor;
371
372 ancestor = cgroup_ancestor(cgrp, ancestor_level);
373 if (!ancestor)
374 return 0;
375 return cgroup_id(ancestor);
376}
377
378const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
379 .func = bpf_get_current_ancestor_cgroup_id,
380 .gpl_only = false,
381 .ret_type = RET_INTEGER,
382 .arg1_type = ARG_ANYTHING,
383};
384
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000385#ifdef CONFIG_CGROUP_BPF
Yonghong Songb910eaa2021-03-22 22:51:46 -0700386DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
387 bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
Roman Gushchincd339432018-08-02 14:27:24 -0700388
389BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
390{
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000391 /* flags argument is not used now,
392 * but provides an ability to extend the API.
393 * verifier checks that its value is correct.
Roman Gushchincd339432018-08-02 14:27:24 -0700394 */
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000395 enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
Yonghong Songb910eaa2021-03-22 22:51:46 -0700396 struct bpf_cgroup_storage *storage = NULL;
Roman Gushchinb741f162018-09-28 14:45:43 +0000397 void *ptr;
Yonghong Songb910eaa2021-03-22 22:51:46 -0700398 int i;
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000399
Yonghong Songb910eaa2021-03-22 22:51:46 -0700400 for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
401 if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
402 continue;
403
404 storage = this_cpu_read(bpf_cgroup_storage_info[i].storage[stype]);
405 break;
406 }
Roman Gushchinf294b372018-09-28 14:45:40 +0000407
Roman Gushchinb741f162018-09-28 14:45:43 +0000408 if (stype == BPF_CGROUP_STORAGE_SHARED)
409 ptr = &READ_ONCE(storage->buf)->data[0];
410 else
411 ptr = this_cpu_ptr(storage->percpu_buf);
412
413 return (unsigned long)ptr;
Roman Gushchincd339432018-08-02 14:27:24 -0700414}
415
416const struct bpf_func_proto bpf_get_local_storage_proto = {
417 .func = bpf_get_local_storage,
418 .gpl_only = false,
419 .ret_type = RET_PTR_TO_MAP_VALUE,
420 .arg1_type = ARG_CONST_MAP_PTR,
421 .arg2_type = ARG_ANYTHING,
422};
Yonghong Songbf6fa2c82018-06-03 15:59:41 -0700423#endif
Andrey Ignatovd7a4cb92019-03-18 17:55:26 -0700424
425#define BPF_STRTOX_BASE_MASK 0x1F
426
427static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
428 unsigned long long *res, bool *is_negative)
429{
430 unsigned int base = flags & BPF_STRTOX_BASE_MASK;
431 const char *cur_buf = buf;
432 size_t cur_len = buf_len;
433 unsigned int consumed;
434 size_t val_len;
435 char str[64];
436
437 if (!buf || !buf_len || !res || !is_negative)
438 return -EINVAL;
439
440 if (base != 0 && base != 8 && base != 10 && base != 16)
441 return -EINVAL;
442
443 if (flags & ~BPF_STRTOX_BASE_MASK)
444 return -EINVAL;
445
446 while (cur_buf < buf + buf_len && isspace(*cur_buf))
447 ++cur_buf;
448
449 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
450 if (*is_negative)
451 ++cur_buf;
452
453 consumed = cur_buf - buf;
454 cur_len -= consumed;
455 if (!cur_len)
456 return -EINVAL;
457
458 cur_len = min(cur_len, sizeof(str) - 1);
459 memcpy(str, cur_buf, cur_len);
460 str[cur_len] = '\0';
461 cur_buf = str;
462
463 cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
464 val_len = _parse_integer(cur_buf, base, res);
465
466 if (val_len & KSTRTOX_OVERFLOW)
467 return -ERANGE;
468
469 if (val_len == 0)
470 return -EINVAL;
471
472 cur_buf += val_len;
473 consumed += cur_buf - str;
474
475 return consumed;
476}
477
478static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
479 long long *res)
480{
481 unsigned long long _res;
482 bool is_negative;
483 int err;
484
485 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
486 if (err < 0)
487 return err;
488 if (is_negative) {
489 if ((long long)-_res > 0)
490 return -ERANGE;
491 *res = -_res;
492 } else {
493 if ((long long)_res < 0)
494 return -ERANGE;
495 *res = _res;
496 }
497 return err;
498}
499
500BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
501 long *, res)
502{
503 long long _res;
504 int err;
505
506 err = __bpf_strtoll(buf, buf_len, flags, &_res);
507 if (err < 0)
508 return err;
509 if (_res != (long)_res)
510 return -ERANGE;
511 *res = _res;
512 return err;
513}
514
515const struct bpf_func_proto bpf_strtol_proto = {
516 .func = bpf_strtol,
517 .gpl_only = false,
518 .ret_type = RET_INTEGER,
519 .arg1_type = ARG_PTR_TO_MEM,
520 .arg2_type = ARG_CONST_SIZE,
521 .arg3_type = ARG_ANYTHING,
522 .arg4_type = ARG_PTR_TO_LONG,
523};
524
525BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
526 unsigned long *, res)
527{
528 unsigned long long _res;
529 bool is_negative;
530 int err;
531
532 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
533 if (err < 0)
534 return err;
535 if (is_negative)
536 return -EINVAL;
537 if (_res != (unsigned long)_res)
538 return -ERANGE;
539 *res = _res;
540 return err;
541}
542
543const struct bpf_func_proto bpf_strtoul_proto = {
544 .func = bpf_strtoul,
545 .gpl_only = false,
546 .ret_type = RET_INTEGER,
547 .arg1_type = ARG_PTR_TO_MEM,
548 .arg2_type = ARG_CONST_SIZE,
549 .arg3_type = ARG_ANYTHING,
550 .arg4_type = ARG_PTR_TO_LONG,
551};
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000552#endif
Carlos Neirab4490c52020-03-04 17:41:56 -0300553
554BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
555 struct bpf_pidns_info *, nsdata, u32, size)
556{
557 struct task_struct *task = current;
558 struct pid_namespace *pidns;
559 int err = -EINVAL;
560
561 if (unlikely(size != sizeof(struct bpf_pidns_info)))
562 goto clear;
563
564 if (unlikely((u64)(dev_t)dev != dev))
565 goto clear;
566
567 if (unlikely(!task))
568 goto clear;
569
570 pidns = task_active_pid_ns(task);
571 if (unlikely(!pidns)) {
572 err = -ENOENT;
573 goto clear;
574 }
575
576 if (!ns_match(&pidns->ns, (dev_t)dev, ino))
577 goto clear;
578
579 nsdata->pid = task_pid_nr_ns(task, pidns);
580 nsdata->tgid = task_tgid_nr_ns(task, pidns);
581 return 0;
582clear:
583 memset((void *)nsdata, 0, (size_t) size);
584 return err;
585}
586
587const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
588 .func = bpf_get_ns_current_pid_tgid,
589 .gpl_only = false,
590 .ret_type = RET_INTEGER,
591 .arg1_type = ARG_ANYTHING,
592 .arg2_type = ARG_ANYTHING,
593 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
594 .arg4_type = ARG_CONST_SIZE,
595};
Stanislav Fomichev68908962020-04-24 16:59:41 -0700596
597static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
598 .func = bpf_get_raw_cpu_id,
599 .gpl_only = false,
600 .ret_type = RET_INTEGER,
601};
602
603BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
604 u64, flags, void *, data, u64, size)
605{
606 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
607 return -EINVAL;
608
609 return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
610}
611
612const struct bpf_func_proto bpf_event_output_data_proto = {
613 .func = bpf_event_output_data,
614 .gpl_only = true,
615 .ret_type = RET_INTEGER,
616 .arg1_type = ARG_PTR_TO_CTX,
617 .arg2_type = ARG_CONST_MAP_PTR,
618 .arg3_type = ARG_ANYTHING,
619 .arg4_type = ARG_PTR_TO_MEM,
620 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
621};
622
Alexei Starovoitov07be4c42020-08-27 15:01:12 -0700623BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
624 const void __user *, user_ptr)
625{
626 int ret = copy_from_user(dst, user_ptr, size);
627
628 if (unlikely(ret)) {
629 memset(dst, 0, size);
630 ret = -EFAULT;
631 }
632
633 return ret;
634}
635
636const struct bpf_func_proto bpf_copy_from_user_proto = {
637 .func = bpf_copy_from_user,
638 .gpl_only = false,
639 .ret_type = RET_INTEGER,
640 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
641 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
642 .arg3_type = ARG_ANYTHING,
643};
644
Hao Luoeaa6bcb2020-09-29 16:50:47 -0700645BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
646{
647 if (cpu >= nr_cpu_ids)
648 return (unsigned long)NULL;
649
650 return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
651}
652
653const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
654 .func = bpf_per_cpu_ptr,
655 .gpl_only = false,
656 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL,
657 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
658 .arg2_type = ARG_ANYTHING,
659};
660
Hao Luo63d9b802020-09-29 16:50:48 -0700661BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
662{
663 return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
664}
665
666const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
667 .func = bpf_this_cpu_ptr,
668 .gpl_only = false,
669 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID,
670 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
671};
672
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200673static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
674 size_t bufsz)
675{
676 void __user *user_ptr = (__force void __user *)unsafe_ptr;
677
678 buf[0] = 0;
679
680 switch (fmt_ptype) {
681 case 's':
682#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
683 if ((unsigned long)unsafe_ptr < TASK_SIZE)
684 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
685 fallthrough;
686#endif
687 case 'k':
688 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
689 case 'u':
690 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
691 }
692
693 return -EINVAL;
694}
695
Florent Revest8afcc192021-05-17 11:28:29 +0200696/* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
697 * arguments representation.
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200698 */
Florent Revest8afcc192021-05-17 11:28:29 +0200699#define MAX_BPRINTF_BUF_LEN 512
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200700
Florent Reveste2d5b2b2021-05-11 10:10:54 +0200701/* Support executing three nested bprintf helper calls on a given CPU */
Florent Revest0af02eb2021-05-17 11:28:30 +0200702#define MAX_BPRINTF_NEST_LEVEL 3
Florent Reveste2d5b2b2021-05-11 10:10:54 +0200703struct bpf_bprintf_buffers {
Florent Revest0af02eb2021-05-17 11:28:30 +0200704 char tmp_bufs[MAX_BPRINTF_NEST_LEVEL][MAX_BPRINTF_BUF_LEN];
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200705};
Florent Reveste2d5b2b2021-05-11 10:10:54 +0200706static DEFINE_PER_CPU(struct bpf_bprintf_buffers, bpf_bprintf_bufs);
707static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200708
709static int try_get_fmt_tmp_buf(char **tmp_buf)
710{
Florent Reveste2d5b2b2021-05-11 10:10:54 +0200711 struct bpf_bprintf_buffers *bufs;
712 int nest_level;
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200713
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200714 preempt_disable();
Florent Reveste2d5b2b2021-05-11 10:10:54 +0200715 nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
Florent Revest0af02eb2021-05-17 11:28:30 +0200716 if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
Florent Reveste2d5b2b2021-05-11 10:10:54 +0200717 this_cpu_dec(bpf_bprintf_nest_level);
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200718 preempt_enable();
719 return -EBUSY;
720 }
Florent Reveste2d5b2b2021-05-11 10:10:54 +0200721 bufs = this_cpu_ptr(&bpf_bprintf_bufs);
722 *tmp_buf = bufs->tmp_bufs[nest_level - 1];
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200723
724 return 0;
725}
726
Florent Revest48cac3f2021-04-27 19:43:13 +0200727void bpf_bprintf_cleanup(void)
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200728{
Florent Reveste2d5b2b2021-05-11 10:10:54 +0200729 if (this_cpu_read(bpf_bprintf_nest_level)) {
730 this_cpu_dec(bpf_bprintf_nest_level);
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200731 preempt_enable();
732 }
733}
734
735/*
Florent Revest48cac3f2021-04-27 19:43:13 +0200736 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200737 *
738 * Returns a negative value if fmt is an invalid format string or 0 otherwise.
739 *
740 * This can be used in two ways:
Florent Revest48cac3f2021-04-27 19:43:13 +0200741 * - Format string verification only: when bin_args is NULL
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200742 * - Arguments preparation: in addition to the above verification, it writes in
Florent Revest48cac3f2021-04-27 19:43:13 +0200743 * bin_args a binary representation of arguments usable by bstr_printf where
744 * pointers from BPF have been sanitized.
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200745 *
746 * In argument preparation mode, if 0 is returned, safe temporary buffers are
Florent Revest48cac3f2021-04-27 19:43:13 +0200747 * allocated and bpf_bprintf_cleanup should be called to free them after use.
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200748 */
Florent Revest48cac3f2021-04-27 19:43:13 +0200749int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
750 u32 **bin_args, u32 num_args)
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200751{
Florent Revest48cac3f2021-04-27 19:43:13 +0200752 char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
753 size_t sizeof_cur_arg, sizeof_cur_ip;
754 int err, i, num_spec = 0;
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200755 u64 cur_arg;
Florent Revest48cac3f2021-04-27 19:43:13 +0200756 char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200757
758 fmt_end = strnchr(fmt, fmt_size, 0);
759 if (!fmt_end)
760 return -EINVAL;
761 fmt_size = fmt_end - fmt;
762
Florent Revest48cac3f2021-04-27 19:43:13 +0200763 if (bin_args) {
764 if (num_args && try_get_fmt_tmp_buf(&tmp_buf))
765 return -EBUSY;
766
Florent Revest8afcc192021-05-17 11:28:29 +0200767 tmp_buf_end = tmp_buf + MAX_BPRINTF_BUF_LEN;
Florent Revest48cac3f2021-04-27 19:43:13 +0200768 *bin_args = (u32 *)tmp_buf;
769 }
770
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200771 for (i = 0; i < fmt_size; i++) {
772 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
773 err = -EINVAL;
Florent Revest48cac3f2021-04-27 19:43:13 +0200774 goto out;
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200775 }
776
777 if (fmt[i] != '%')
778 continue;
779
780 if (fmt[i + 1] == '%') {
781 i++;
782 continue;
783 }
784
785 if (num_spec >= num_args) {
786 err = -EINVAL;
Florent Revest48cac3f2021-04-27 19:43:13 +0200787 goto out;
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200788 }
789
790 /* The string is zero-terminated so if fmt[i] != 0, we can
791 * always access fmt[i + 1], in the worst case it will be a 0
792 */
793 i++;
794
795 /* skip optional "[0 +-][num]" width formatting field */
796 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
797 fmt[i] == ' ')
798 i++;
799 if (fmt[i] >= '1' && fmt[i] <= '9') {
800 i++;
801 while (fmt[i] >= '0' && fmt[i] <= '9')
802 i++;
803 }
804
805 if (fmt[i] == 'p') {
Florent Revest48cac3f2021-04-27 19:43:13 +0200806 sizeof_cur_arg = sizeof(long);
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200807
808 if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
809 fmt[i + 2] == 's') {
810 fmt_ptype = fmt[i + 1];
811 i += 2;
812 goto fmt_str;
813 }
814
815 if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
816 ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
Florent Revest48cac3f2021-04-27 19:43:13 +0200817 fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
818 fmt[i + 1] == 'S') {
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200819 /* just kernel pointers */
Florent Revest48cac3f2021-04-27 19:43:13 +0200820 if (tmp_buf)
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200821 cur_arg = raw_args[num_spec];
Florent Revest48cac3f2021-04-27 19:43:13 +0200822 i++;
823 goto nocopy_fmt;
824 }
825
826 if (fmt[i + 1] == 'B') {
827 if (tmp_buf) {
828 err = snprintf(tmp_buf,
829 (tmp_buf_end - tmp_buf),
830 "%pB",
831 (void *)(long)raw_args[num_spec]);
832 tmp_buf += (err + 1);
833 }
834
835 i++;
836 num_spec++;
837 continue;
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200838 }
839
840 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
841 if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
842 (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
843 err = -EINVAL;
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200844 goto out;
845 }
846
Florent Revest48cac3f2021-04-27 19:43:13 +0200847 i += 2;
848 if (!tmp_buf)
849 goto nocopy_fmt;
850
851 sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
852 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200853 err = -ENOSPC;
Florent Revest48cac3f2021-04-27 19:43:13 +0200854 goto out;
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200855 }
856
857 unsafe_ptr = (char *)(long)raw_args[num_spec];
Florent Revest48cac3f2021-04-27 19:43:13 +0200858 err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
859 sizeof_cur_ip);
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200860 if (err < 0)
Florent Revest48cac3f2021-04-27 19:43:13 +0200861 memset(cur_ip, 0, sizeof_cur_ip);
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200862
Florent Revest48cac3f2021-04-27 19:43:13 +0200863 /* hack: bstr_printf expects IP addresses to be
864 * pre-formatted as strings, ironically, the easiest way
865 * to do that is to call snprintf.
866 */
867 ip_spec[2] = fmt[i - 1];
868 ip_spec[3] = fmt[i];
869 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
870 ip_spec, &cur_ip);
871
872 tmp_buf += err + 1;
873 num_spec++;
874
875 continue;
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200876 } else if (fmt[i] == 's') {
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200877 fmt_ptype = fmt[i];
878fmt_str:
879 if (fmt[i + 1] != 0 &&
880 !isspace(fmt[i + 1]) &&
881 !ispunct(fmt[i + 1])) {
882 err = -EINVAL;
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200883 goto out;
884 }
885
Florent Revest48cac3f2021-04-27 19:43:13 +0200886 if (!tmp_buf)
887 goto nocopy_fmt;
888
889 if (tmp_buf_end == tmp_buf) {
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200890 err = -ENOSPC;
Florent Revest48cac3f2021-04-27 19:43:13 +0200891 goto out;
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200892 }
893
894 unsafe_ptr = (char *)(long)raw_args[num_spec];
895 err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
Florent Revest48cac3f2021-04-27 19:43:13 +0200896 fmt_ptype,
897 tmp_buf_end - tmp_buf);
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200898 if (err < 0) {
899 tmp_buf[0] = '\0';
900 err = 1;
901 }
902
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200903 tmp_buf += err;
Florent Revest48cac3f2021-04-27 19:43:13 +0200904 num_spec++;
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200905
Florent Revest48cac3f2021-04-27 19:43:13 +0200906 continue;
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200907 }
908
Florent Revest48cac3f2021-04-27 19:43:13 +0200909 sizeof_cur_arg = sizeof(int);
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200910
911 if (fmt[i] == 'l') {
Florent Revest48cac3f2021-04-27 19:43:13 +0200912 sizeof_cur_arg = sizeof(long);
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200913 i++;
914 }
915 if (fmt[i] == 'l') {
Florent Revest48cac3f2021-04-27 19:43:13 +0200916 sizeof_cur_arg = sizeof(long long);
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200917 i++;
918 }
919
920 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
921 fmt[i] != 'x' && fmt[i] != 'X') {
922 err = -EINVAL;
Florent Revest48cac3f2021-04-27 19:43:13 +0200923 goto out;
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200924 }
925
Florent Revest48cac3f2021-04-27 19:43:13 +0200926 if (tmp_buf)
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200927 cur_arg = raw_args[num_spec];
Florent Revest48cac3f2021-04-27 19:43:13 +0200928nocopy_fmt:
929 if (tmp_buf) {
930 tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
931 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
932 err = -ENOSPC;
933 goto out;
934 }
935
936 if (sizeof_cur_arg == 8) {
937 *(u32 *)tmp_buf = *(u32 *)&cur_arg;
938 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
939 } else {
940 *(u32 *)tmp_buf = (u32)(long)cur_arg;
941 }
942 tmp_buf += sizeof_cur_arg;
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200943 }
944 num_spec++;
945 }
946
947 err = 0;
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200948out:
Florent Revest48cac3f2021-04-27 19:43:13 +0200949 if (err)
950 bpf_bprintf_cleanup();
Florent Revestd9c9e4d2021-04-19 17:52:38 +0200951 return err;
952}
953
Florent Revest7b155232021-04-19 17:52:40 +0200954#define MAX_SNPRINTF_VARARGS 12
955
956BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
957 const void *, data, u32, data_len)
958{
Florent Revest7b155232021-04-19 17:52:40 +0200959 int err, num_args;
Florent Revest48cac3f2021-04-27 19:43:13 +0200960 u32 *bin_args;
Florent Revest7b155232021-04-19 17:52:40 +0200961
962 if (data_len % 8 || data_len > MAX_SNPRINTF_VARARGS * 8 ||
963 (data_len && !data))
964 return -EINVAL;
965 num_args = data_len / 8;
966
967 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
968 * can safely give an unbounded size.
969 */
Florent Revest48cac3f2021-04-27 19:43:13 +0200970 err = bpf_bprintf_prepare(fmt, UINT_MAX, data, &bin_args, num_args);
Florent Revest7b155232021-04-19 17:52:40 +0200971 if (err < 0)
972 return err;
973
Florent Revest48cac3f2021-04-27 19:43:13 +0200974 err = bstr_printf(str, str_size, fmt, bin_args);
Florent Revest7b155232021-04-19 17:52:40 +0200975
Florent Revest48cac3f2021-04-27 19:43:13 +0200976 bpf_bprintf_cleanup();
Florent Revest7b155232021-04-19 17:52:40 +0200977
978 return err + 1;
979}
980
981const struct bpf_func_proto bpf_snprintf_proto = {
982 .func = bpf_snprintf,
983 .gpl_only = true,
984 .ret_type = RET_INTEGER,
985 .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
986 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
987 .arg3_type = ARG_PTR_TO_CONST_STR,
988 .arg4_type = ARG_PTR_TO_MEM_OR_NULL,
989 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
990};
991
John Fastabendf4703782020-05-24 09:50:55 -0700992const struct bpf_func_proto bpf_get_current_task_proto __weak;
993const struct bpf_func_proto bpf_probe_read_user_proto __weak;
994const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
995const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
996const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
997
Stanislav Fomichev68908962020-04-24 16:59:41 -0700998const struct bpf_func_proto *
999bpf_base_func_proto(enum bpf_func_id func_id)
1000{
1001 switch (func_id) {
1002 case BPF_FUNC_map_lookup_elem:
1003 return &bpf_map_lookup_elem_proto;
1004 case BPF_FUNC_map_update_elem:
1005 return &bpf_map_update_elem_proto;
1006 case BPF_FUNC_map_delete_elem:
1007 return &bpf_map_delete_elem_proto;
1008 case BPF_FUNC_map_push_elem:
1009 return &bpf_map_push_elem_proto;
1010 case BPF_FUNC_map_pop_elem:
1011 return &bpf_map_pop_elem_proto;
1012 case BPF_FUNC_map_peek_elem:
1013 return &bpf_map_peek_elem_proto;
1014 case BPF_FUNC_get_prandom_u32:
1015 return &bpf_get_prandom_u32_proto;
1016 case BPF_FUNC_get_smp_processor_id:
1017 return &bpf_get_raw_smp_processor_id_proto;
1018 case BPF_FUNC_get_numa_node_id:
1019 return &bpf_get_numa_node_id_proto;
1020 case BPF_FUNC_tail_call:
1021 return &bpf_tail_call_proto;
1022 case BPF_FUNC_ktime_get_ns:
1023 return &bpf_ktime_get_ns_proto;
Maciej Żenczykowski71d19212020-04-26 09:15:25 -07001024 case BPF_FUNC_ktime_get_boot_ns:
1025 return &bpf_ktime_get_boot_ns_proto;
Dmitrii Banshchikovd0551262020-11-17 18:45:49 +00001026 case BPF_FUNC_ktime_get_coarse_ns:
1027 return &bpf_ktime_get_coarse_ns_proto;
Andrii Nakryiko457f44362020-05-29 00:54:20 -07001028 case BPF_FUNC_ringbuf_output:
1029 return &bpf_ringbuf_output_proto;
1030 case BPF_FUNC_ringbuf_reserve:
1031 return &bpf_ringbuf_reserve_proto;
1032 case BPF_FUNC_ringbuf_submit:
1033 return &bpf_ringbuf_submit_proto;
1034 case BPF_FUNC_ringbuf_discard:
1035 return &bpf_ringbuf_discard_proto;
1036 case BPF_FUNC_ringbuf_query:
1037 return &bpf_ringbuf_query_proto;
Yonghong Song69c087b2021-02-26 12:49:25 -08001038 case BPF_FUNC_for_each_map_elem:
1039 return &bpf_for_each_map_elem_proto;
Stanislav Fomichev68908962020-04-24 16:59:41 -07001040 default:
1041 break;
1042 }
1043
Alexei Starovoitov2c78ee82020-05-13 16:03:54 -07001044 if (!bpf_capable())
Stanislav Fomichev68908962020-04-24 16:59:41 -07001045 return NULL;
1046
1047 switch (func_id) {
1048 case BPF_FUNC_spin_lock:
1049 return &bpf_spin_lock_proto;
1050 case BPF_FUNC_spin_unlock:
1051 return &bpf_spin_unlock_proto;
Stanislav Fomichev68908962020-04-24 16:59:41 -07001052 case BPF_FUNC_jiffies64:
1053 return &bpf_jiffies64_proto;
Andrii Nakryikob7906b72020-12-11 22:36:25 +01001054 case BPF_FUNC_per_cpu_ptr:
Hao Luoeaa6bcb2020-09-29 16:50:47 -07001055 return &bpf_per_cpu_ptr_proto;
Andrii Nakryikob7906b72020-12-11 22:36:25 +01001056 case BPF_FUNC_this_cpu_ptr:
Hao Luo63d9b802020-09-29 16:50:48 -07001057 return &bpf_this_cpu_ptr_proto;
Stanislav Fomichev68908962020-04-24 16:59:41 -07001058 default:
John Fastabendf4703782020-05-24 09:50:55 -07001059 break;
1060 }
1061
1062 if (!perfmon_capable())
1063 return NULL;
1064
1065 switch (func_id) {
Tobias Klauser61ca36c82021-01-27 18:46:15 +01001066 case BPF_FUNC_trace_printk:
1067 return bpf_get_trace_printk_proto();
John Fastabendf4703782020-05-24 09:50:55 -07001068 case BPF_FUNC_get_current_task:
1069 return &bpf_get_current_task_proto;
1070 case BPF_FUNC_probe_read_user:
1071 return &bpf_probe_read_user_proto;
1072 case BPF_FUNC_probe_read_kernel:
Daniel Borkmannff40e512021-05-28 09:16:31 +00001073 return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
1074 NULL : &bpf_probe_read_kernel_proto;
John Fastabendf4703782020-05-24 09:50:55 -07001075 case BPF_FUNC_probe_read_user_str:
1076 return &bpf_probe_read_user_str_proto;
1077 case BPF_FUNC_probe_read_kernel_str:
Daniel Borkmannff40e512021-05-28 09:16:31 +00001078 return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
1079 NULL : &bpf_probe_read_kernel_str_proto;
Tobias Klauser61ca36c82021-01-27 18:46:15 +01001080 case BPF_FUNC_snprintf_btf:
1081 return &bpf_snprintf_btf_proto;
Florent Revest7b155232021-04-19 17:52:40 +02001082 case BPF_FUNC_snprintf:
1083 return &bpf_snprintf_proto;
John Fastabendf4703782020-05-24 09:50:55 -07001084 default:
Stanislav Fomichev68908962020-04-24 16:59:41 -07001085 return NULL;
1086 }
1087}