blob: 1c65ce0098a950703c3604f1d0f3e43907e69656 [file] [log] [blame]
Thomas Gleixner5b497af2019-05-29 07:18:09 -07001// SPDX-License-Identifier: GPL-2.0-only
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -08002/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
Alexei Starovoitov81ed18a2017-03-15 18:26:42 -07003 * Copyright (c) 2016,2017 Facebook
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -08004 */
5#include <linux/bpf.h>
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -07006#include <linux/btf.h>
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -08007#include <linux/err.h>
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -08008#include <linux/slab.h>
9#include <linux/mm.h>
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -070010#include <linux/filter.h>
Daniel Borkmann0cdf56402015-10-02 18:42:00 +020011#include <linux/perf_event.h>
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -070012#include <uapi/linux/btf.h>
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080013
Martin KaFai Lau56f668d2017-03-22 10:00:33 -070014#include "map_in_map.h"
15
Chenbo Feng6e71b042017-10-18 13:00:22 -070016#define ARRAY_CREATE_FLAG_MASK \
Daniel Borkmann591fe982019-04-09 23:20:05 +020017 (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
Chenbo Feng6e71b042017-10-18 13:00:22 -070018
Alexei Starovoitova10423b2016-02-01 22:39:54 -080019static void bpf_array_free_percpu(struct bpf_array *array)
20{
21 int i;
22
Eric Dumazet32fff232018-02-22 08:33:24 -080023 for (i = 0; i < array->map.max_entries; i++) {
Alexei Starovoitova10423b2016-02-01 22:39:54 -080024 free_percpu(array->pptrs[i]);
Eric Dumazet32fff232018-02-22 08:33:24 -080025 cond_resched();
26 }
Alexei Starovoitova10423b2016-02-01 22:39:54 -080027}
28
29static int bpf_array_alloc_percpu(struct bpf_array *array)
30{
31 void __percpu *ptr;
32 int i;
33
34 for (i = 0; i < array->map.max_entries; i++) {
35 ptr = __alloc_percpu_gfp(array->elem_size, 8,
36 GFP_USER | __GFP_NOWARN);
37 if (!ptr) {
38 bpf_array_free_percpu(array);
39 return -ENOMEM;
40 }
41 array->pptrs[i] = ptr;
Eric Dumazet32fff232018-02-22 08:33:24 -080042 cond_resched();
Alexei Starovoitova10423b2016-02-01 22:39:54 -080043 }
44
45 return 0;
46}
47
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080048/* Called from syscall */
Martin KaFai Lau5dc4c4b2018-08-08 01:01:24 -070049int array_map_alloc_check(union bpf_attr *attr)
Jakub Kicinskiad460612018-01-17 19:13:25 -080050{
51 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
52 int numa_node = bpf_map_attr_numa_node(attr);
53
54 /* check sanity of attributes */
55 if (attr->max_entries == 0 || attr->key_size != 4 ||
56 attr->value_size == 0 ||
57 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
Daniel Borkmann591fe982019-04-09 23:20:05 +020058 !bpf_map_flags_access_ok(attr->map_flags) ||
Jakub Kicinskiad460612018-01-17 19:13:25 -080059 (percpu && numa_node != NUMA_NO_NODE))
60 return -EINVAL;
61
62 if (attr->value_size > KMALLOC_MAX_SIZE)
63 /* if value_size is bigger, the user space won't be able to
64 * access the elements.
65 */
66 return -E2BIG;
67
68 return 0;
69}
70
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080071static struct bpf_map *array_map_alloc(union bpf_attr *attr)
72{
Alexei Starovoitova10423b2016-02-01 22:39:54 -080073 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
Daniel Borkmann9c2d63b2018-02-16 01:10:29 +010074 int ret, numa_node = bpf_map_attr_numa_node(attr);
Alexei Starovoitovb2157392018-01-07 17:33:02 -080075 u32 elem_size, index_mask, max_entries;
76 bool unpriv = !capable(CAP_SYS_ADMIN);
Daniel Borkmann9c2d63b2018-02-16 01:10:29 +010077 u64 cost, array_size, mask64;
Roman Gushchinb936ca62019-05-29 18:03:58 -070078 struct bpf_map_memory mem;
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080079 struct bpf_array *array;
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080080
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080081 elem_size = round_up(attr->value_size, 8);
82
Alexei Starovoitovb2157392018-01-07 17:33:02 -080083 max_entries = attr->max_entries;
Alexei Starovoitovb2157392018-01-07 17:33:02 -080084
Daniel Borkmannbbeb6e42018-01-10 23:25:05 +010085 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
86 * upper most bit set in u32 space is undefined behavior due to
87 * resulting 1U << 32, so do it manually here in u64 space.
88 */
89 mask64 = fls_long(max_entries - 1);
90 mask64 = 1ULL << mask64;
91 mask64 -= 1;
92
93 index_mask = mask64;
94 if (unpriv) {
Alexei Starovoitovb2157392018-01-07 17:33:02 -080095 /* round up array size to nearest power of 2,
96 * since cpu will speculate within index_mask limits
97 */
98 max_entries = index_mask + 1;
Daniel Borkmannbbeb6e42018-01-10 23:25:05 +010099 /* Check for overflows. */
100 if (max_entries < attr->max_entries)
101 return ERR_PTR(-E2BIG);
102 }
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800103
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800104 array_size = sizeof(*array);
105 if (percpu)
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800106 array_size += (u64) max_entries * sizeof(void *);
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800107 else
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800108 array_size += (u64) max_entries * elem_size;
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800109
110 /* make sure there is no u32 overflow later in round_up() */
Daniel Borkmann9c2d63b2018-02-16 01:10:29 +0100111 cost = array_size;
Roman Gushchinc85d6912019-05-29 18:03:59 -0700112 if (percpu)
Daniel Borkmann9c2d63b2018-02-16 01:10:29 +0100113 cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
Daniel Borkmann9c2d63b2018-02-16 01:10:29 +0100114
Roman Gushchinb936ca62019-05-29 18:03:58 -0700115 ret = bpf_map_charge_init(&mem, cost);
Daniel Borkmann9c2d63b2018-02-16 01:10:29 +0100116 if (ret < 0)
117 return ERR_PTR(ret);
Alexei Starovoitovdaaf4272014-11-18 17:32:16 -0800118
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800119 /* allocate all map elements and zero-initialize them */
Martin KaFai Lau96eabe72017-08-18 11:28:00 -0700120 array = bpf_map_area_alloc(array_size, numa_node);
Roman Gushchinb936ca62019-05-29 18:03:58 -0700121 if (!array) {
122 bpf_map_charge_finish(&mem);
Daniel Borkmannd407bd22017-01-18 15:14:17 +0100123 return ERR_PTR(-ENOMEM);
Roman Gushchinb936ca62019-05-29 18:03:58 -0700124 }
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800125 array->index_mask = index_mask;
126 array->map.unpriv_array = unpriv;
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800127
128 /* copy mandatory map attributes */
Jakub Kicinski32852642018-01-17 19:13:26 -0800129 bpf_map_init_from_attr(&array->map, attr);
Roman Gushchinb936ca62019-05-29 18:03:58 -0700130 bpf_map_charge_move(&array->map.memory, &mem);
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800131 array->elem_size = elem_size;
132
Daniel Borkmann9c2d63b2018-02-16 01:10:29 +0100133 if (percpu && bpf_array_alloc_percpu(array)) {
Roman Gushchinb936ca62019-05-29 18:03:58 -0700134 bpf_map_charge_finish(&array->map.memory);
Daniel Borkmannd407bd22017-01-18 15:14:17 +0100135 bpf_map_area_free(array);
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800136 return ERR_PTR(-ENOMEM);
137 }
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800138
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800139 return &array->map;
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800140}
141
142/* Called from syscall or from eBPF program */
143static void *array_map_lookup_elem(struct bpf_map *map, void *key)
144{
145 struct bpf_array *array = container_of(map, struct bpf_array, map);
146 u32 index = *(u32 *)key;
147
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800148 if (unlikely(index >= array->map.max_entries))
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800149 return NULL;
150
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800151 return array->value + array->elem_size * (index & array->index_mask);
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800152}
153
Daniel Borkmannd8eca5b2019-04-09 23:20:03 +0200154static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
155 u32 off)
156{
157 struct bpf_array *array = container_of(map, struct bpf_array, map);
158
159 if (map->max_entries != 1)
160 return -ENOTSUPP;
161 if (off >= map->value_size)
162 return -EINVAL;
163
164 *imm = (unsigned long)array->value;
165 return 0;
166}
167
168static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
169 u32 *off)
170{
171 struct bpf_array *array = container_of(map, struct bpf_array, map);
172 u64 base = (unsigned long)array->value;
173 u64 range = array->elem_size;
174
175 if (map->max_entries != 1)
176 return -ENOTSUPP;
177 if (imm < base || imm >= base + range)
178 return -ENOENT;
179
180 *off = imm - base;
181 return 0;
182}
183
Alexei Starovoitov81ed18a2017-03-15 18:26:42 -0700184/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
185static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
186{
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800187 struct bpf_array *array = container_of(map, struct bpf_array, map);
Alexei Starovoitov81ed18a2017-03-15 18:26:42 -0700188 struct bpf_insn *insn = insn_buf;
Martin KaFai Laufad73a12017-03-22 10:00:32 -0700189 u32 elem_size = round_up(map->value_size, 8);
Alexei Starovoitov81ed18a2017-03-15 18:26:42 -0700190 const int ret = BPF_REG_0;
191 const int map_ptr = BPF_REG_1;
192 const int index = BPF_REG_2;
193
194 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
195 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800196 if (map->unpriv_array) {
197 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
198 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
199 } else {
200 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
201 }
Martin KaFai Laufad73a12017-03-22 10:00:32 -0700202
203 if (is_power_of_2(elem_size)) {
Alexei Starovoitov81ed18a2017-03-15 18:26:42 -0700204 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
205 } else {
206 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
207 }
208 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
209 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
210 *insn++ = BPF_MOV64_IMM(ret, 0);
211 return insn - insn_buf;
212}
213
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800214/* Called from eBPF program */
215static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
216{
217 struct bpf_array *array = container_of(map, struct bpf_array, map);
218 u32 index = *(u32 *)key;
219
220 if (unlikely(index >= array->map.max_entries))
221 return NULL;
222
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800223 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800224}
225
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800226int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
227{
228 struct bpf_array *array = container_of(map, struct bpf_array, map);
229 u32 index = *(u32 *)key;
230 void __percpu *pptr;
231 int cpu, off = 0;
232 u32 size;
233
234 if (unlikely(index >= array->map.max_entries))
235 return -ENOENT;
236
237 /* per_cpu areas are zero-filled and bpf programs can only
238 * access 'value_size' of them, so copying rounded areas
239 * will not leak any kernel data
240 */
241 size = round_up(map->value_size, 8);
242 rcu_read_lock();
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800243 pptr = array->pptrs[index & array->index_mask];
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800244 for_each_possible_cpu(cpu) {
245 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
246 off += size;
247 }
248 rcu_read_unlock();
249 return 0;
250}
251
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800252/* Called from syscall */
253static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
254{
255 struct bpf_array *array = container_of(map, struct bpf_array, map);
Teng Qin8fe45922017-04-24 19:00:37 -0700256 u32 index = key ? *(u32 *)key : U32_MAX;
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800257 u32 *next = (u32 *)next_key;
258
259 if (index >= array->map.max_entries) {
260 *next = 0;
261 return 0;
262 }
263
264 if (index == array->map.max_entries - 1)
265 return -ENOENT;
266
267 *next = index + 1;
268 return 0;
269}
270
271/* Called from syscall or from eBPF program */
272static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
273 u64 map_flags)
274{
275 struct bpf_array *array = container_of(map, struct bpf_array, map);
276 u32 index = *(u32 *)key;
Alexei Starovoitov96049f32019-01-31 15:40:09 -0800277 char *val;
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800278
Alexei Starovoitov96049f32019-01-31 15:40:09 -0800279 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800280 /* unknown flags */
281 return -EINVAL;
282
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800283 if (unlikely(index >= array->map.max_entries))
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800284 /* all elements were pre-allocated, cannot insert a new one */
285 return -E2BIG;
286
Alexei Starovoitov96049f32019-01-31 15:40:09 -0800287 if (unlikely(map_flags & BPF_NOEXIST))
Alexei Starovoitovdaaf4272014-11-18 17:32:16 -0800288 /* all elements already exist */
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800289 return -EEXIST;
290
Alexei Starovoitov96049f32019-01-31 15:40:09 -0800291 if (unlikely((map_flags & BPF_F_LOCK) &&
292 !map_value_has_spin_lock(map)))
293 return -EINVAL;
294
295 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800296 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800297 value, map->value_size);
Alexei Starovoitov96049f32019-01-31 15:40:09 -0800298 } else {
299 val = array->value +
300 array->elem_size * (index & array->index_mask);
301 if (map_flags & BPF_F_LOCK)
302 copy_map_value_locked(map, val, value, false);
303 else
304 copy_map_value(map, val, value);
305 }
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800306 return 0;
307}
308
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800309int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
310 u64 map_flags)
311{
312 struct bpf_array *array = container_of(map, struct bpf_array, map);
313 u32 index = *(u32 *)key;
314 void __percpu *pptr;
315 int cpu, off = 0;
316 u32 size;
317
318 if (unlikely(map_flags > BPF_EXIST))
319 /* unknown flags */
320 return -EINVAL;
321
322 if (unlikely(index >= array->map.max_entries))
323 /* all elements were pre-allocated, cannot insert a new one */
324 return -E2BIG;
325
326 if (unlikely(map_flags == BPF_NOEXIST))
327 /* all elements already exist */
328 return -EEXIST;
329
330 /* the user space will provide round_up(value_size, 8) bytes that
331 * will be copied into per-cpu area. bpf programs can only access
332 * value_size of it. During lookup the same extra bytes will be
333 * returned or zeros which were zero-filled by percpu_alloc,
334 * so no kernel data leaks possible
335 */
336 size = round_up(map->value_size, 8);
337 rcu_read_lock();
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800338 pptr = array->pptrs[index & array->index_mask];
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800339 for_each_possible_cpu(cpu) {
340 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
341 off += size;
342 }
343 rcu_read_unlock();
344 return 0;
345}
346
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800347/* Called from syscall or from eBPF program */
348static int array_map_delete_elem(struct bpf_map *map, void *key)
349{
350 return -EINVAL;
351}
352
353/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
354static void array_map_free(struct bpf_map *map)
355{
356 struct bpf_array *array = container_of(map, struct bpf_array, map);
357
358 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
359 * so the programs (can be more than one that used this map) were
360 * disconnected from events. Wait for outstanding programs to complete
361 * and free the array
362 */
363 synchronize_rcu();
364
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800365 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
366 bpf_array_free_percpu(array);
367
Daniel Borkmannd407bd22017-01-18 15:14:17 +0100368 bpf_map_area_free(array);
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800369}
370
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700371static void array_map_seq_show_elem(struct bpf_map *map, void *key,
372 struct seq_file *m)
373{
374 void *value;
375
376 rcu_read_lock();
377
378 value = array_map_lookup_elem(map, key);
379 if (!value) {
380 rcu_read_unlock();
381 return;
382 }
383
Daniel Borkmann2824ecb2019-04-09 23:20:10 +0200384 if (map->btf_key_type_id)
385 seq_printf(m, "%u: ", *(u32 *)key);
Martin KaFai Lau9b2cf322018-05-22 14:57:21 -0700386 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700387 seq_puts(m, "\n");
388
389 rcu_read_unlock();
390}
391
Yonghong Songc7b27c32018-08-29 14:43:13 -0700392static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
393 struct seq_file *m)
394{
395 struct bpf_array *array = container_of(map, struct bpf_array, map);
396 u32 index = *(u32 *)key;
397 void __percpu *pptr;
398 int cpu;
399
400 rcu_read_lock();
401
402 seq_printf(m, "%u: {\n", *(u32 *)key);
403 pptr = array->pptrs[index & array->index_mask];
404 for_each_possible_cpu(cpu) {
405 seq_printf(m, "\tcpu%d: ", cpu);
406 btf_type_seq_show(map->btf, map->btf_value_type_id,
407 per_cpu_ptr(pptr, cpu), m);
408 seq_puts(m, "\n");
409 }
410 seq_puts(m, "}\n");
411
412 rcu_read_unlock();
413}
414
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200415static int array_map_check_btf(const struct bpf_map *map,
Roman Gushchin1b2b2342018-12-10 15:43:00 -0800416 const struct btf *btf,
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200417 const struct btf_type *key_type,
418 const struct btf_type *value_type)
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700419{
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700420 u32 int_data;
421
Daniel Borkmann2824ecb2019-04-09 23:20:10 +0200422 /* One exception for keyless BTF: .bss/.data/.rodata map */
423 if (btf_type_is_void(key_type)) {
424 if (map->map_type != BPF_MAP_TYPE_ARRAY ||
425 map->max_entries != 1)
426 return -EINVAL;
427
428 if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
429 return -EINVAL;
430
431 return 0;
432 }
433
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200434 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700435 return -EINVAL;
436
437 int_data = *(u32 *)(key_type + 1);
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200438 /* bpf array can only take a u32 key. This check makes sure
439 * that the btf matches the attr used during map_create.
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700440 */
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200441 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700442 return -EINVAL;
443
444 return 0;
445}
446
Johannes Berg40077e02017-04-11 15:34:58 +0200447const struct bpf_map_ops array_map_ops = {
Jakub Kicinskiad460612018-01-17 19:13:25 -0800448 .map_alloc_check = array_map_alloc_check,
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800449 .map_alloc = array_map_alloc,
450 .map_free = array_map_free,
451 .map_get_next_key = array_map_get_next_key,
452 .map_lookup_elem = array_map_lookup_elem,
453 .map_update_elem = array_map_update_elem,
454 .map_delete_elem = array_map_delete_elem,
Alexei Starovoitov81ed18a2017-03-15 18:26:42 -0700455 .map_gen_lookup = array_map_gen_lookup,
Daniel Borkmannd8eca5b2019-04-09 23:20:03 +0200456 .map_direct_value_addr = array_map_direct_value_addr,
457 .map_direct_value_meta = array_map_direct_value_meta,
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700458 .map_seq_show_elem = array_map_seq_show_elem,
459 .map_check_btf = array_map_check_btf,
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800460};
461
Johannes Berg40077e02017-04-11 15:34:58 +0200462const struct bpf_map_ops percpu_array_map_ops = {
Jakub Kicinskiad460612018-01-17 19:13:25 -0800463 .map_alloc_check = array_map_alloc_check,
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800464 .map_alloc = array_map_alloc,
465 .map_free = array_map_free,
466 .map_get_next_key = array_map_get_next_key,
467 .map_lookup_elem = percpu_array_map_lookup_elem,
468 .map_update_elem = array_map_update_elem,
469 .map_delete_elem = array_map_delete_elem,
Yonghong Songc7b27c32018-08-29 14:43:13 -0700470 .map_seq_show_elem = percpu_array_map_seq_show_elem,
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200471 .map_check_btf = array_map_check_btf,
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800472};
473
Jakub Kicinskiad460612018-01-17 19:13:25 -0800474static int fd_array_map_alloc_check(union bpf_attr *attr)
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700475{
Wang Nan2a36f0b2015-08-06 07:02:33 +0000476 /* only file descriptors can be stored in this type of map */
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700477 if (attr->value_size != sizeof(u32))
Jakub Kicinskiad460612018-01-17 19:13:25 -0800478 return -EINVAL;
Daniel Borkmann591fe982019-04-09 23:20:05 +0200479 /* Program read-only/write-only not supported for special maps yet. */
480 if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
481 return -EINVAL;
Jakub Kicinskiad460612018-01-17 19:13:25 -0800482 return array_map_alloc_check(attr);
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700483}
484
Wang Nan2a36f0b2015-08-06 07:02:33 +0000485static void fd_array_map_free(struct bpf_map *map)
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700486{
487 struct bpf_array *array = container_of(map, struct bpf_array, map);
488 int i;
489
490 synchronize_rcu();
491
492 /* make sure it's empty */
493 for (i = 0; i < array->map.max_entries; i++)
Wang Nan2a36f0b2015-08-06 07:02:33 +0000494 BUG_ON(array->ptrs[i] != NULL);
Daniel Borkmannd407bd22017-01-18 15:14:17 +0100495
496 bpf_map_area_free(array);
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700497}
498
Wang Nan2a36f0b2015-08-06 07:02:33 +0000499static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700500{
Prashant Bhole3b4a63f2018-10-09 10:04:50 +0900501 return ERR_PTR(-EOPNOTSUPP);
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700502}
503
504/* only called from syscall */
Martin KaFai Lau14dc6f02017-06-27 23:08:34 -0700505int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
506{
507 void **elem, *ptr;
508 int ret = 0;
509
510 if (!map->ops->map_fd_sys_lookup_elem)
511 return -ENOTSUPP;
512
513 rcu_read_lock();
514 elem = array_map_lookup_elem(map, key);
515 if (elem && (ptr = READ_ONCE(*elem)))
516 *value = map->ops->map_fd_sys_lookup_elem(ptr);
517 else
518 ret = -ENOENT;
519 rcu_read_unlock();
520
521 return ret;
522}
523
524/* only called from syscall */
Daniel Borkmannd056a782016-06-15 22:47:13 +0200525int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
526 void *key, void *value, u64 map_flags)
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700527{
528 struct bpf_array *array = container_of(map, struct bpf_array, map);
Wang Nan2a36f0b2015-08-06 07:02:33 +0000529 void *new_ptr, *old_ptr;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700530 u32 index = *(u32 *)key, ufd;
531
532 if (map_flags != BPF_ANY)
533 return -EINVAL;
534
535 if (index >= array->map.max_entries)
536 return -E2BIG;
537
538 ufd = *(u32 *)value;
Daniel Borkmannd056a782016-06-15 22:47:13 +0200539 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
Wang Nan2a36f0b2015-08-06 07:02:33 +0000540 if (IS_ERR(new_ptr))
541 return PTR_ERR(new_ptr);
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700542
Wang Nan2a36f0b2015-08-06 07:02:33 +0000543 old_ptr = xchg(array->ptrs + index, new_ptr);
544 if (old_ptr)
545 map->ops->map_fd_put_ptr(old_ptr);
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700546
547 return 0;
548}
549
Wang Nan2a36f0b2015-08-06 07:02:33 +0000550static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700551{
552 struct bpf_array *array = container_of(map, struct bpf_array, map);
Wang Nan2a36f0b2015-08-06 07:02:33 +0000553 void *old_ptr;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700554 u32 index = *(u32 *)key;
555
556 if (index >= array->map.max_entries)
557 return -E2BIG;
558
Wang Nan2a36f0b2015-08-06 07:02:33 +0000559 old_ptr = xchg(array->ptrs + index, NULL);
560 if (old_ptr) {
561 map->ops->map_fd_put_ptr(old_ptr);
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700562 return 0;
563 } else {
564 return -ENOENT;
565 }
566}
567
Daniel Borkmannd056a782016-06-15 22:47:13 +0200568static void *prog_fd_array_get_ptr(struct bpf_map *map,
569 struct file *map_file, int fd)
Wang Nan2a36f0b2015-08-06 07:02:33 +0000570{
571 struct bpf_array *array = container_of(map, struct bpf_array, map);
572 struct bpf_prog *prog = bpf_prog_get(fd);
Daniel Borkmannd056a782016-06-15 22:47:13 +0200573
Wang Nan2a36f0b2015-08-06 07:02:33 +0000574 if (IS_ERR(prog))
575 return prog;
576
577 if (!bpf_prog_array_compatible(array, prog)) {
578 bpf_prog_put(prog);
579 return ERR_PTR(-EINVAL);
580 }
Daniel Borkmannd056a782016-06-15 22:47:13 +0200581
Wang Nan2a36f0b2015-08-06 07:02:33 +0000582 return prog;
583}
584
585static void prog_fd_array_put_ptr(void *ptr)
586{
Daniel Borkmann1aacde32016-06-30 17:24:43 +0200587 bpf_prog_put(ptr);
Wang Nan2a36f0b2015-08-06 07:02:33 +0000588}
589
Martin KaFai Lau14dc6f02017-06-27 23:08:34 -0700590static u32 prog_fd_array_sys_lookup_elem(void *ptr)
591{
592 return ((struct bpf_prog *)ptr)->aux->id;
593}
594
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700595/* decrement refcnt of all bpf_progs that are stored in this map */
John Fastabendba6b8de2018-04-23 15:39:23 -0700596static void bpf_fd_array_map_clear(struct bpf_map *map)
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700597{
598 struct bpf_array *array = container_of(map, struct bpf_array, map);
599 int i;
600
601 for (i = 0; i < array->map.max_entries; i++)
Wang Nan2a36f0b2015-08-06 07:02:33 +0000602 fd_array_map_delete_elem(map, &i);
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700603}
604
Yonghong Songa7c19db2018-09-06 17:26:04 -0700605static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
606 struct seq_file *m)
607{
608 void **elem, *ptr;
609 u32 prog_id;
610
611 rcu_read_lock();
612
613 elem = array_map_lookup_elem(map, key);
614 if (elem) {
615 ptr = READ_ONCE(*elem);
616 if (ptr) {
617 seq_printf(m, "%u: ", *(u32 *)key);
618 prog_id = prog_fd_array_sys_lookup_elem(ptr);
619 btf_type_seq_show(map->btf, map->btf_value_type_id,
620 &prog_id, m);
621 seq_puts(m, "\n");
622 }
623 }
624
625 rcu_read_unlock();
626}
627
Johannes Berg40077e02017-04-11 15:34:58 +0200628const struct bpf_map_ops prog_array_map_ops = {
Jakub Kicinskiad460612018-01-17 19:13:25 -0800629 .map_alloc_check = fd_array_map_alloc_check,
630 .map_alloc = array_map_alloc,
Wang Nan2a36f0b2015-08-06 07:02:33 +0000631 .map_free = fd_array_map_free,
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700632 .map_get_next_key = array_map_get_next_key,
Wang Nan2a36f0b2015-08-06 07:02:33 +0000633 .map_lookup_elem = fd_array_map_lookup_elem,
Wang Nan2a36f0b2015-08-06 07:02:33 +0000634 .map_delete_elem = fd_array_map_delete_elem,
635 .map_fd_get_ptr = prog_fd_array_get_ptr,
636 .map_fd_put_ptr = prog_fd_array_put_ptr,
Martin KaFai Lau14dc6f02017-06-27 23:08:34 -0700637 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
John Fastabendba6b8de2018-04-23 15:39:23 -0700638 .map_release_uref = bpf_fd_array_map_clear,
Yonghong Songa7c19db2018-09-06 17:26:04 -0700639 .map_seq_show_elem = prog_array_map_seq_show_elem,
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700640};
641
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200642static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
643 struct file *map_file)
Kaixu Xiaea317b22015-08-06 07:02:34 +0000644{
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200645 struct bpf_event_entry *ee;
646
Daniel Borkmann858d68f2016-07-16 01:15:55 +0200647 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200648 if (ee) {
649 ee->event = perf_file->private_data;
650 ee->perf_file = perf_file;
651 ee->map_file = map_file;
652 }
653
654 return ee;
655}
656
657static void __bpf_event_entry_free(struct rcu_head *rcu)
658{
659 struct bpf_event_entry *ee;
660
661 ee = container_of(rcu, struct bpf_event_entry, rcu);
662 fput(ee->perf_file);
663 kfree(ee);
664}
665
666static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
667{
668 call_rcu(&ee->rcu, __bpf_event_entry_free);
Kaixu Xiaea317b22015-08-06 07:02:34 +0000669}
670
Daniel Borkmannd056a782016-06-15 22:47:13 +0200671static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
672 struct file *map_file, int fd)
Kaixu Xiaea317b22015-08-06 07:02:34 +0000673{
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200674 struct bpf_event_entry *ee;
675 struct perf_event *event;
676 struct file *perf_file;
Alexei Starovoitovf91840a2017-06-02 21:03:52 -0700677 u64 value;
Kaixu Xiaea317b22015-08-06 07:02:34 +0000678
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200679 perf_file = perf_event_get(fd);
680 if (IS_ERR(perf_file))
681 return perf_file;
Alexei Starovoitove03e7ee2016-01-25 20:59:49 -0800682
Alexei Starovoitovf91840a2017-06-02 21:03:52 -0700683 ee = ERR_PTR(-EOPNOTSUPP);
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200684 event = perf_file->private_data;
Yonghong Song97562632017-10-05 09:19:19 -0700685 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200686 goto err_out;
Kaixu Xiaea317b22015-08-06 07:02:34 +0000687
Alexei Starovoitovf91840a2017-06-02 21:03:52 -0700688 ee = bpf_event_entry_gen(perf_file, map_file);
689 if (ee)
690 return ee;
691 ee = ERR_PTR(-ENOMEM);
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200692err_out:
693 fput(perf_file);
694 return ee;
Kaixu Xiaea317b22015-08-06 07:02:34 +0000695}
696
697static void perf_event_fd_array_put_ptr(void *ptr)
698{
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200699 bpf_event_entry_free_rcu(ptr);
700}
701
702static void perf_event_fd_array_release(struct bpf_map *map,
703 struct file *map_file)
704{
705 struct bpf_array *array = container_of(map, struct bpf_array, map);
706 struct bpf_event_entry *ee;
707 int i;
708
709 rcu_read_lock();
710 for (i = 0; i < array->map.max_entries; i++) {
711 ee = READ_ONCE(array->ptrs[i]);
712 if (ee && ee->map_file == map_file)
713 fd_array_map_delete_elem(map, &i);
714 }
715 rcu_read_unlock();
Kaixu Xiaea317b22015-08-06 07:02:34 +0000716}
717
Johannes Berg40077e02017-04-11 15:34:58 +0200718const struct bpf_map_ops perf_event_array_map_ops = {
Jakub Kicinskiad460612018-01-17 19:13:25 -0800719 .map_alloc_check = fd_array_map_alloc_check,
720 .map_alloc = array_map_alloc,
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200721 .map_free = fd_array_map_free,
Kaixu Xiaea317b22015-08-06 07:02:34 +0000722 .map_get_next_key = array_map_get_next_key,
723 .map_lookup_elem = fd_array_map_lookup_elem,
Kaixu Xiaea317b22015-08-06 07:02:34 +0000724 .map_delete_elem = fd_array_map_delete_elem,
725 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
726 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200727 .map_release = perf_event_fd_array_release,
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200728 .map_check_btf = map_check_no_btf,
Kaixu Xiaea317b22015-08-06 07:02:34 +0000729};
730
Sargun Dhillon60d20f92016-08-12 08:56:52 -0700731#ifdef CONFIG_CGROUPS
Martin KaFai Lau4ed8ec52016-06-30 10:28:43 -0700732static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
733 struct file *map_file /* not used */,
734 int fd)
735{
736 return cgroup_get_from_fd(fd);
737}
738
739static void cgroup_fd_array_put_ptr(void *ptr)
740{
741 /* cgroup_put free cgrp after a rcu grace period */
742 cgroup_put(ptr);
743}
744
745static void cgroup_fd_array_free(struct bpf_map *map)
746{
747 bpf_fd_array_map_clear(map);
748 fd_array_map_free(map);
749}
750
Johannes Berg40077e02017-04-11 15:34:58 +0200751const struct bpf_map_ops cgroup_array_map_ops = {
Jakub Kicinskiad460612018-01-17 19:13:25 -0800752 .map_alloc_check = fd_array_map_alloc_check,
753 .map_alloc = array_map_alloc,
Martin KaFai Lau4ed8ec52016-06-30 10:28:43 -0700754 .map_free = cgroup_fd_array_free,
755 .map_get_next_key = array_map_get_next_key,
756 .map_lookup_elem = fd_array_map_lookup_elem,
757 .map_delete_elem = fd_array_map_delete_elem,
758 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
759 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200760 .map_check_btf = map_check_no_btf,
Martin KaFai Lau4ed8ec52016-06-30 10:28:43 -0700761};
Martin KaFai Lau4ed8ec52016-06-30 10:28:43 -0700762#endif
Martin KaFai Lau56f668d2017-03-22 10:00:33 -0700763
764static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
765{
766 struct bpf_map *map, *inner_map_meta;
767
768 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
769 if (IS_ERR(inner_map_meta))
770 return inner_map_meta;
771
Jakub Kicinskiad460612018-01-17 19:13:25 -0800772 map = array_map_alloc(attr);
Martin KaFai Lau56f668d2017-03-22 10:00:33 -0700773 if (IS_ERR(map)) {
774 bpf_map_meta_free(inner_map_meta);
775 return map;
776 }
777
778 map->inner_map_meta = inner_map_meta;
779
780 return map;
781}
782
783static void array_of_map_free(struct bpf_map *map)
784{
785 /* map->inner_map_meta is only accessed by syscall which
786 * is protected by fdget/fdput.
787 */
788 bpf_map_meta_free(map->inner_map_meta);
789 bpf_fd_array_map_clear(map);
790 fd_array_map_free(map);
791}
792
793static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
794{
795 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
796
797 if (!inner_map)
798 return NULL;
799
800 return READ_ONCE(*inner_map);
801}
802
Daniel Borkmann7b0c2a02017-08-19 03:12:46 +0200803static u32 array_of_map_gen_lookup(struct bpf_map *map,
804 struct bpf_insn *insn_buf)
805{
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800806 struct bpf_array *array = container_of(map, struct bpf_array, map);
Daniel Borkmann7b0c2a02017-08-19 03:12:46 +0200807 u32 elem_size = round_up(map->value_size, 8);
808 struct bpf_insn *insn = insn_buf;
809 const int ret = BPF_REG_0;
810 const int map_ptr = BPF_REG_1;
811 const int index = BPF_REG_2;
812
813 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
814 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
Alexei Starovoitovb2157392018-01-07 17:33:02 -0800815 if (map->unpriv_array) {
816 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
817 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
818 } else {
819 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
820 }
Daniel Borkmann7b0c2a02017-08-19 03:12:46 +0200821 if (is_power_of_2(elem_size))
822 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
823 else
824 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
825 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
826 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
827 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
828 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
829 *insn++ = BPF_MOV64_IMM(ret, 0);
830
831 return insn - insn_buf;
832}
833
Johannes Berg40077e02017-04-11 15:34:58 +0200834const struct bpf_map_ops array_of_maps_map_ops = {
Jakub Kicinskiad460612018-01-17 19:13:25 -0800835 .map_alloc_check = fd_array_map_alloc_check,
Martin KaFai Lau56f668d2017-03-22 10:00:33 -0700836 .map_alloc = array_of_map_alloc,
837 .map_free = array_of_map_free,
838 .map_get_next_key = array_map_get_next_key,
839 .map_lookup_elem = array_of_map_lookup_elem,
840 .map_delete_elem = fd_array_map_delete_elem,
841 .map_fd_get_ptr = bpf_map_fd_get_ptr,
842 .map_fd_put_ptr = bpf_map_fd_put_ptr,
Martin KaFai Lau14dc6f02017-06-27 23:08:34 -0700843 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
Daniel Borkmann7b0c2a02017-08-19 03:12:46 +0200844 .map_gen_lookup = array_of_map_gen_lookup,
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200845 .map_check_btf = map_check_no_btf,
Martin KaFai Lau56f668d2017-03-22 10:00:33 -0700846};