|  | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com | 
|  | * | 
|  | * This program is free software; you can redistribute it and/or | 
|  | * modify it under the terms of version 2 of the GNU General Public | 
|  | * License as published by the Free Software Foundation. | 
|  | * | 
|  | * This program is distributed in the hope that it will be useful, but | 
|  | * WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 
|  | * General Public License for more details. | 
|  | */ | 
|  | #include <linux/bpf.h> | 
|  | #include <linux/rcupdate.h> | 
|  | #include <linux/random.h> | 
|  | #include <linux/smp.h> | 
|  | #include <linux/topology.h> | 
|  | #include <linux/ktime.h> | 
|  | #include <linux/sched.h> | 
|  | #include <linux/uidgid.h> | 
|  | #include <linux/filter.h> | 
|  |  | 
|  | /* If kernel subsystem is allowing eBPF programs to call this function, | 
|  | * inside its own verifier_ops->get_func_proto() callback it should return | 
|  | * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments | 
|  | * | 
|  | * Different map implementations will rely on rcu in map methods | 
|  | * lookup/update/delete, therefore eBPF programs must run under rcu lock | 
|  | * if program is allowed to access maps, so check rcu_read_lock_held in | 
|  | * all three functions. | 
|  | */ | 
|  | BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) | 
|  | { | 
|  | WARN_ON_ONCE(!rcu_read_lock_held()); | 
|  | return (unsigned long) map->ops->map_lookup_elem(map, key); | 
|  | } | 
|  |  | 
|  | const struct bpf_func_proto bpf_map_lookup_elem_proto = { | 
|  | .func		= bpf_map_lookup_elem, | 
|  | .gpl_only	= false, | 
|  | .pkt_access	= true, | 
|  | .ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL, | 
|  | .arg1_type	= ARG_CONST_MAP_PTR, | 
|  | .arg2_type	= ARG_PTR_TO_MAP_KEY, | 
|  | }; | 
|  |  | 
|  | BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, | 
|  | void *, value, u64, flags) | 
|  | { | 
|  | WARN_ON_ONCE(!rcu_read_lock_held()); | 
|  | return map->ops->map_update_elem(map, key, value, flags); | 
|  | } | 
|  |  | 
|  | const struct bpf_func_proto bpf_map_update_elem_proto = { | 
|  | .func		= bpf_map_update_elem, | 
|  | .gpl_only	= false, | 
|  | .pkt_access	= true, | 
|  | .ret_type	= RET_INTEGER, | 
|  | .arg1_type	= ARG_CONST_MAP_PTR, | 
|  | .arg2_type	= ARG_PTR_TO_MAP_KEY, | 
|  | .arg3_type	= ARG_PTR_TO_MAP_VALUE, | 
|  | .arg4_type	= ARG_ANYTHING, | 
|  | }; | 
|  |  | 
|  | BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) | 
|  | { | 
|  | WARN_ON_ONCE(!rcu_read_lock_held()); | 
|  | return map->ops->map_delete_elem(map, key); | 
|  | } | 
|  |  | 
|  | const struct bpf_func_proto bpf_map_delete_elem_proto = { | 
|  | .func		= bpf_map_delete_elem, | 
|  | .gpl_only	= false, | 
|  | .pkt_access	= true, | 
|  | .ret_type	= RET_INTEGER, | 
|  | .arg1_type	= ARG_CONST_MAP_PTR, | 
|  | .arg2_type	= ARG_PTR_TO_MAP_KEY, | 
|  | }; | 
|  |  | 
|  | const struct bpf_func_proto bpf_get_prandom_u32_proto = { | 
|  | .func		= bpf_user_rnd_u32, | 
|  | .gpl_only	= false, | 
|  | .ret_type	= RET_INTEGER, | 
|  | }; | 
|  |  | 
|  | BPF_CALL_0(bpf_get_smp_processor_id) | 
|  | { | 
|  | return smp_processor_id(); | 
|  | } | 
|  |  | 
|  | const struct bpf_func_proto bpf_get_smp_processor_id_proto = { | 
|  | .func		= bpf_get_smp_processor_id, | 
|  | .gpl_only	= false, | 
|  | .ret_type	= RET_INTEGER, | 
|  | }; | 
|  |  | 
|  | BPF_CALL_0(bpf_get_numa_node_id) | 
|  | { | 
|  | return numa_node_id(); | 
|  | } | 
|  |  | 
|  | const struct bpf_func_proto bpf_get_numa_node_id_proto = { | 
|  | .func		= bpf_get_numa_node_id, | 
|  | .gpl_only	= false, | 
|  | .ret_type	= RET_INTEGER, | 
|  | }; | 
|  |  | 
|  | BPF_CALL_0(bpf_ktime_get_ns) | 
|  | { | 
|  | /* NMI safe access to clock monotonic */ | 
|  | return ktime_get_mono_fast_ns(); | 
|  | } | 
|  |  | 
|  | const struct bpf_func_proto bpf_ktime_get_ns_proto = { | 
|  | .func		= bpf_ktime_get_ns, | 
|  | .gpl_only	= true, | 
|  | .ret_type	= RET_INTEGER, | 
|  | }; | 
|  |  | 
|  | BPF_CALL_0(bpf_get_current_pid_tgid) | 
|  | { | 
|  | struct task_struct *task = current; | 
|  |  | 
|  | if (unlikely(!task)) | 
|  | return -EINVAL; | 
|  |  | 
|  | return (u64) task->tgid << 32 | task->pid; | 
|  | } | 
|  |  | 
|  | const struct bpf_func_proto bpf_get_current_pid_tgid_proto = { | 
|  | .func		= bpf_get_current_pid_tgid, | 
|  | .gpl_only	= false, | 
|  | .ret_type	= RET_INTEGER, | 
|  | }; | 
|  |  | 
|  | BPF_CALL_0(bpf_get_current_uid_gid) | 
|  | { | 
|  | struct task_struct *task = current; | 
|  | kuid_t uid; | 
|  | kgid_t gid; | 
|  |  | 
|  | if (unlikely(!task)) | 
|  | return -EINVAL; | 
|  |  | 
|  | current_uid_gid(&uid, &gid); | 
|  | return (u64) from_kgid(&init_user_ns, gid) << 32 | | 
|  | from_kuid(&init_user_ns, uid); | 
|  | } | 
|  |  | 
|  | const struct bpf_func_proto bpf_get_current_uid_gid_proto = { | 
|  | .func		= bpf_get_current_uid_gid, | 
|  | .gpl_only	= false, | 
|  | .ret_type	= RET_INTEGER, | 
|  | }; | 
|  |  | 
|  | BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) | 
|  | { | 
|  | struct task_struct *task = current; | 
|  |  | 
|  | if (unlikely(!task)) | 
|  | goto err_clear; | 
|  |  | 
|  | strncpy(buf, task->comm, size); | 
|  |  | 
|  | /* Verifier guarantees that size > 0. For task->comm exceeding | 
|  | * size, guarantee that buf is %NUL-terminated. Unconditionally | 
|  | * done here to save the size test. | 
|  | */ | 
|  | buf[size - 1] = 0; | 
|  | return 0; | 
|  | err_clear: | 
|  | memset(buf, 0, size); | 
|  | return -EINVAL; | 
|  | } | 
|  |  | 
|  | const struct bpf_func_proto bpf_get_current_comm_proto = { | 
|  | .func		= bpf_get_current_comm, | 
|  | .gpl_only	= false, | 
|  | .ret_type	= RET_INTEGER, | 
|  | .arg1_type	= ARG_PTR_TO_UNINIT_MEM, | 
|  | .arg2_type	= ARG_CONST_SIZE, | 
|  | }; |