blob: adf6dfe0ba68a4fe34c9a6a8a408b2c5631caba7 [file] [log] [blame]
KP Singh4cf1bc12020-11-06 10:37:40 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2020 Facebook
4 * Copyright 2020 Google LLC.
5 */
6
7#include <linux/pid.h>
8#include <linux/sched.h>
9#include <linux/rculist.h>
10#include <linux/list.h>
11#include <linux/hash.h>
12#include <linux/types.h>
13#include <linux/spinlock.h>
14#include <linux/bpf.h>
15#include <linux/bpf_local_storage.h>
16#include <linux/filter.h>
17#include <uapi/linux/btf.h>
KP Singh4cf1bc12020-11-06 10:37:40 +000018#include <linux/btf_ids.h>
19#include <linux/fdtable.h>
KP Singh0fe4b382021-12-24 15:29:15 +000020#include <linux/rcupdate_trace.h>
KP Singh4cf1bc12020-11-06 10:37:40 +000021
22DEFINE_BPF_STORAGE_CACHE(task_cache);
23
Wei Yongjun4d0b9382021-03-11 13:15:05 +000024static DEFINE_PER_CPU(int, bpf_task_storage_busy);
Song Liubc235cd2021-02-25 15:43:15 -080025
26static void bpf_task_storage_lock(void)
27{
28 migrate_disable();
Hou Tao197827a02022-09-01 14:19:35 +080029 this_cpu_inc(bpf_task_storage_busy);
Song Liubc235cd2021-02-25 15:43:15 -080030}
31
32static void bpf_task_storage_unlock(void)
33{
Hou Tao197827a02022-09-01 14:19:35 +080034 this_cpu_dec(bpf_task_storage_busy);
Song Liubc235cd2021-02-25 15:43:15 -080035 migrate_enable();
36}
37
38static bool bpf_task_storage_trylock(void)
39{
40 migrate_disable();
Hou Tao197827a02022-09-01 14:19:35 +080041 if (unlikely(this_cpu_inc_return(bpf_task_storage_busy) != 1)) {
42 this_cpu_dec(bpf_task_storage_busy);
Song Liubc235cd2021-02-25 15:43:15 -080043 migrate_enable();
44 return false;
45 }
46 return true;
47}
48
KP Singh4cf1bc12020-11-06 10:37:40 +000049static struct bpf_local_storage __rcu **task_storage_ptr(void *owner)
50{
51 struct task_struct *task = owner;
KP Singh4cf1bc12020-11-06 10:37:40 +000052
Song Liua10787e2021-02-25 15:43:14 -080053 return &task->bpf_storage;
KP Singh4cf1bc12020-11-06 10:37:40 +000054}
55
56static struct bpf_local_storage_data *
57task_storage_lookup(struct task_struct *task, struct bpf_map *map,
58 bool cacheit_lockit)
59{
60 struct bpf_local_storage *task_storage;
61 struct bpf_local_storage_map *smap;
KP Singh4cf1bc12020-11-06 10:37:40 +000062
KP Singh0fe4b382021-12-24 15:29:15 +000063 task_storage =
64 rcu_dereference_check(task->bpf_storage, bpf_rcu_lock_held());
KP Singh4cf1bc12020-11-06 10:37:40 +000065 if (!task_storage)
66 return NULL;
67
68 smap = (struct bpf_local_storage_map *)map;
69 return bpf_local_storage_lookup(task_storage, smap, cacheit_lockit);
70}
71
72void bpf_task_storage_free(struct task_struct *task)
73{
KP Singh4cf1bc12020-11-06 10:37:40 +000074 struct bpf_local_storage *local_storage;
KP Singh4cf1bc12020-11-06 10:37:40 +000075
76 rcu_read_lock();
77
Song Liua10787e2021-02-25 15:43:14 -080078 local_storage = rcu_dereference(task->bpf_storage);
KP Singh4cf1bc12020-11-06 10:37:40 +000079 if (!local_storage) {
80 rcu_read_unlock();
81 return;
82 }
83
Song Liubc235cd2021-02-25 15:43:15 -080084 bpf_task_storage_lock();
Martin KaFai Lau2ffcb6f2023-03-07 22:59:21 -080085 bpf_local_storage_destroy(local_storage);
Song Liubc235cd2021-02-25 15:43:15 -080086 bpf_task_storage_unlock();
KP Singh4cf1bc12020-11-06 10:37:40 +000087 rcu_read_unlock();
KP Singh4cf1bc12020-11-06 10:37:40 +000088}
89
90static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)
91{
92 struct bpf_local_storage_data *sdata;
93 struct task_struct *task;
94 unsigned int f_flags;
95 struct pid *pid;
96 int fd, err;
97
98 fd = *(int *)key;
99 pid = pidfd_get_pid(fd, &f_flags);
100 if (IS_ERR(pid))
101 return ERR_CAST(pid);
102
103 /* We should be in an RCU read side critical section, it should be safe
104 * to call pid_task.
105 */
106 WARN_ON_ONCE(!rcu_read_lock_held());
107 task = pid_task(pid, PIDTYPE_PID);
108 if (!task) {
109 err = -ENOENT;
110 goto out;
111 }
112
Song Liubc235cd2021-02-25 15:43:15 -0800113 bpf_task_storage_lock();
KP Singh4cf1bc12020-11-06 10:37:40 +0000114 sdata = task_storage_lookup(task, map, true);
Song Liubc235cd2021-02-25 15:43:15 -0800115 bpf_task_storage_unlock();
KP Singh4cf1bc12020-11-06 10:37:40 +0000116 put_pid(pid);
117 return sdata ? sdata->data : NULL;
118out:
119 put_pid(pid);
120 return ERR_PTR(err);
121}
122
JP Kobrynd7ba4cc2023-03-22 12:47:54 -0700123static long bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
124 void *value, u64 map_flags)
KP Singh4cf1bc12020-11-06 10:37:40 +0000125{
126 struct bpf_local_storage_data *sdata;
127 struct task_struct *task;
128 unsigned int f_flags;
129 struct pid *pid;
130 int fd, err;
131
132 fd = *(int *)key;
133 pid = pidfd_get_pid(fd, &f_flags);
134 if (IS_ERR(pid))
135 return PTR_ERR(pid);
136
137 /* We should be in an RCU read side critical section, it should be safe
138 * to call pid_task.
139 */
140 WARN_ON_ONCE(!rcu_read_lock_held());
141 task = pid_task(pid, PIDTYPE_PID);
Song Liua10787e2021-02-25 15:43:14 -0800142 if (!task) {
KP Singh4cf1bc12020-11-06 10:37:40 +0000143 err = -ENOENT;
144 goto out;
145 }
146
Song Liubc235cd2021-02-25 15:43:15 -0800147 bpf_task_storage_lock();
KP Singh4cf1bc12020-11-06 10:37:40 +0000148 sdata = bpf_local_storage_update(
Joanne Koongb00fa382022-03-17 21:55:52 -0700149 task, (struct bpf_local_storage_map *)map, value, map_flags,
150 GFP_ATOMIC);
Song Liubc235cd2021-02-25 15:43:15 -0800151 bpf_task_storage_unlock();
KP Singh4cf1bc12020-11-06 10:37:40 +0000152
153 err = PTR_ERR_OR_ZERO(sdata);
154out:
155 put_pid(pid);
156 return err;
157}
158
Martin KaFai Laufda64ae2022-10-25 11:45:21 -0700159static int task_storage_delete(struct task_struct *task, struct bpf_map *map,
160 bool nobusy)
KP Singh4cf1bc12020-11-06 10:37:40 +0000161{
162 struct bpf_local_storage_data *sdata;
163
164 sdata = task_storage_lookup(task, map, false);
165 if (!sdata)
166 return -ENOENT;
167
Martin KaFai Laufda64ae2022-10-25 11:45:21 -0700168 if (!nobusy)
169 return -EBUSY;
170
Martin KaFai Laua47eabf2023-03-07 22:59:25 -0800171 bpf_selem_unlink(SELEM(sdata), false);
KP Singh4cf1bc12020-11-06 10:37:40 +0000172
173 return 0;
174}
175
JP Kobrynd7ba4cc2023-03-22 12:47:54 -0700176static long bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
KP Singh4cf1bc12020-11-06 10:37:40 +0000177{
178 struct task_struct *task;
179 unsigned int f_flags;
180 struct pid *pid;
181 int fd, err;
182
183 fd = *(int *)key;
184 pid = pidfd_get_pid(fd, &f_flags);
185 if (IS_ERR(pid))
186 return PTR_ERR(pid);
187
188 /* We should be in an RCU read side critical section, it should be safe
189 * to call pid_task.
190 */
191 WARN_ON_ONCE(!rcu_read_lock_held());
192 task = pid_task(pid, PIDTYPE_PID);
193 if (!task) {
194 err = -ENOENT;
195 goto out;
196 }
197
Song Liubc235cd2021-02-25 15:43:15 -0800198 bpf_task_storage_lock();
Martin KaFai Laufda64ae2022-10-25 11:45:21 -0700199 err = task_storage_delete(task, map, true);
Song Liubc235cd2021-02-25 15:43:15 -0800200 bpf_task_storage_unlock();
KP Singh4cf1bc12020-11-06 10:37:40 +0000201out:
202 put_pid(pid);
203 return err;
204}
205
Martin KaFai Lau6d655002022-10-25 11:45:18 -0700206/* Called by bpf_task_storage_get*() helpers */
207static void *__bpf_task_storage_get(struct bpf_map *map,
208 struct task_struct *task, void *value,
Martin KaFai Laue8b02292022-10-25 11:45:19 -0700209 u64 flags, gfp_t gfp_flags, bool nobusy)
Martin KaFai Lau6d655002022-10-25 11:45:18 -0700210{
211 struct bpf_local_storage_data *sdata;
212
Martin KaFai Laue8b02292022-10-25 11:45:19 -0700213 sdata = task_storage_lookup(task, map, nobusy);
Martin KaFai Lau6d655002022-10-25 11:45:18 -0700214 if (sdata)
215 return sdata->data;
216
217 /* only allocate new storage, when the task is refcounted */
218 if (refcount_read(&task->usage) &&
Martin KaFai Laue8b02292022-10-25 11:45:19 -0700219 (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) && nobusy) {
Martin KaFai Lau6d655002022-10-25 11:45:18 -0700220 sdata = bpf_local_storage_update(
221 task, (struct bpf_local_storage_map *)map, value,
222 BPF_NOEXIST, gfp_flags);
223 return IS_ERR(sdata) ? NULL : sdata->data;
224 }
225
226 return NULL;
227}
228
Joanne Koongb00fa382022-03-17 21:55:52 -0700229/* *gfp_flags* is a hidden argument provided by the verifier */
Martin KaFai Lau0593dd32022-10-25 11:45:17 -0700230BPF_CALL_5(bpf_task_storage_get_recur, struct bpf_map *, map, struct task_struct *,
Joanne Koongb00fa382022-03-17 21:55:52 -0700231 task, void *, value, u64, flags, gfp_t, gfp_flags)
KP Singh4cf1bc12020-11-06 10:37:40 +0000232{
Martin KaFai Laue8b02292022-10-25 11:45:19 -0700233 bool nobusy;
Martin KaFai Lau6d655002022-10-25 11:45:18 -0700234 void *data;
KP Singh4cf1bc12020-11-06 10:37:40 +0000235
KP Singh0fe4b382021-12-24 15:29:15 +0000236 WARN_ON_ONCE(!bpf_rcu_lock_held());
Martin KaFai Lau6d655002022-10-25 11:45:18 -0700237 if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
KP Singh4cf1bc12020-11-06 10:37:40 +0000238 return (unsigned long)NULL;
239
Martin KaFai Laue8b02292022-10-25 11:45:19 -0700240 nobusy = bpf_task_storage_trylock();
Martin KaFai Lau6d655002022-10-25 11:45:18 -0700241 data = __bpf_task_storage_get(map, task, value, flags,
Martin KaFai Laue8b02292022-10-25 11:45:19 -0700242 gfp_flags, nobusy);
243 if (nobusy)
244 bpf_task_storage_unlock();
Martin KaFai Lau6d655002022-10-25 11:45:18 -0700245 return (unsigned long)data;
KP Singh4cf1bc12020-11-06 10:37:40 +0000246}
247
Martin KaFai Lau4279adb2022-10-25 11:45:20 -0700248/* *gfp_flags* is a hidden argument provided by the verifier */
249BPF_CALL_5(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
250 task, void *, value, u64, flags, gfp_t, gfp_flags)
251{
252 void *data;
253
254 WARN_ON_ONCE(!bpf_rcu_lock_held());
255 if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
256 return (unsigned long)NULL;
257
258 bpf_task_storage_lock();
259 data = __bpf_task_storage_get(map, task, value, flags,
260 gfp_flags, true);
261 bpf_task_storage_unlock();
262 return (unsigned long)data;
263}
264
Martin KaFai Lau0593dd32022-10-25 11:45:17 -0700265BPF_CALL_2(bpf_task_storage_delete_recur, struct bpf_map *, map, struct task_struct *,
KP Singh4cf1bc12020-11-06 10:37:40 +0000266 task)
267{
Martin KaFai Laufda64ae2022-10-25 11:45:21 -0700268 bool nobusy;
Song Liubc235cd2021-02-25 15:43:15 -0800269 int ret;
270
KP Singh0fe4b382021-12-24 15:29:15 +0000271 WARN_ON_ONCE(!bpf_rcu_lock_held());
KP Singh1a9c72a2021-01-12 07:55:24 +0000272 if (!task)
273 return -EINVAL;
274
Martin KaFai Laufda64ae2022-10-25 11:45:21 -0700275 nobusy = bpf_task_storage_trylock();
KP Singh4cf1bc12020-11-06 10:37:40 +0000276 /* This helper must only be called from places where the lifetime of the task
277 * is guaranteed. Either by being refcounted or by being protected
278 * by an RCU read-side critical section.
279 */
Martin KaFai Laufda64ae2022-10-25 11:45:21 -0700280 ret = task_storage_delete(task, map, nobusy);
281 if (nobusy)
282 bpf_task_storage_unlock();
Song Liubc235cd2021-02-25 15:43:15 -0800283 return ret;
KP Singh4cf1bc12020-11-06 10:37:40 +0000284}
285
Martin KaFai Lau8a7dac372022-10-25 11:45:22 -0700286BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
287 task)
288{
289 int ret;
290
291 WARN_ON_ONCE(!bpf_rcu_lock_held());
292 if (!task)
293 return -EINVAL;
294
295 bpf_task_storage_lock();
296 /* This helper must only be called from places where the lifetime of the task
297 * is guaranteed. Either by being refcounted or by being protected
298 * by an RCU read-side critical section.
299 */
300 ret = task_storage_delete(task, map, true);
301 bpf_task_storage_unlock();
302 return ret;
303}
304
KP Singh4cf1bc12020-11-06 10:37:40 +0000305static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
306{
307 return -ENOTSUPP;
308}
309
310static struct bpf_map *task_storage_map_alloc(union bpf_attr *attr)
311{
Martin KaFai Lau08a7ce32023-03-22 14:52:43 -0700312 return bpf_local_storage_map_alloc(attr, &task_cache, true);
KP Singh4cf1bc12020-11-06 10:37:40 +0000313}
314
315static void task_storage_map_free(struct bpf_map *map)
316{
Yonghong Songc83597f2022-10-25 21:28:45 -0700317 bpf_local_storage_map_free(map, &task_cache, &bpf_task_storage_busy);
KP Singh4cf1bc12020-11-06 10:37:40 +0000318}
319
Yonghong Song3144bfa2022-11-29 21:21:47 -0800320BTF_ID_LIST_GLOBAL_SINGLE(bpf_local_storage_map_btf_id, struct, bpf_local_storage_map)
KP Singh4cf1bc12020-11-06 10:37:40 +0000321const struct bpf_map_ops task_storage_map_ops = {
322 .map_meta_equal = bpf_map_meta_equal,
323 .map_alloc_check = bpf_local_storage_map_alloc_check,
324 .map_alloc = task_storage_map_alloc,
325 .map_free = task_storage_map_free,
326 .map_get_next_key = notsupp_get_next_key,
327 .map_lookup_elem = bpf_pid_task_storage_lookup_elem,
328 .map_update_elem = bpf_pid_task_storage_update_elem,
329 .map_delete_elem = bpf_pid_task_storage_delete_elem,
330 .map_check_btf = bpf_local_storage_map_check_btf,
Yafang Shao7490b7f2023-03-05 12:46:11 +0000331 .map_mem_usage = bpf_local_storage_map_mem_usage,
Yonghong Song3144bfa2022-11-29 21:21:47 -0800332 .map_btf_id = &bpf_local_storage_map_btf_id[0],
KP Singh4cf1bc12020-11-06 10:37:40 +0000333 .map_owner_storage_ptr = task_storage_ptr,
334};
335
Martin KaFai Lau0593dd32022-10-25 11:45:17 -0700336const struct bpf_func_proto bpf_task_storage_get_recur_proto = {
337 .func = bpf_task_storage_get_recur,
KP Singh4cf1bc12020-11-06 10:37:40 +0000338 .gpl_only = false,
339 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
340 .arg1_type = ARG_CONST_MAP_PTR,
Alexei Starovoitov91571a52023-04-03 21:50:25 -0700341 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
Song Liud19ddb42021-11-12 07:02:43 -0800342 .arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
KP Singh4cf1bc12020-11-06 10:37:40 +0000343 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
344 .arg4_type = ARG_ANYTHING,
345};
346
Martin KaFai Lau4279adb2022-10-25 11:45:20 -0700347const struct bpf_func_proto bpf_task_storage_get_proto = {
348 .func = bpf_task_storage_get,
349 .gpl_only = false,
350 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
351 .arg1_type = ARG_CONST_MAP_PTR,
Alexei Starovoitov91571a52023-04-03 21:50:25 -0700352 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
Martin KaFai Lau4279adb2022-10-25 11:45:20 -0700353 .arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
354 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
355 .arg4_type = ARG_ANYTHING,
356};
357
Martin KaFai Lau0593dd32022-10-25 11:45:17 -0700358const struct bpf_func_proto bpf_task_storage_delete_recur_proto = {
359 .func = bpf_task_storage_delete_recur,
KP Singh4cf1bc12020-11-06 10:37:40 +0000360 .gpl_only = false,
361 .ret_type = RET_INTEGER,
362 .arg1_type = ARG_CONST_MAP_PTR,
Alexei Starovoitov91571a52023-04-03 21:50:25 -0700363 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
Song Liud19ddb42021-11-12 07:02:43 -0800364 .arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
KP Singh4cf1bc12020-11-06 10:37:40 +0000365};
Martin KaFai Lau8a7dac372022-10-25 11:45:22 -0700366
367const struct bpf_func_proto bpf_task_storage_delete_proto = {
368 .func = bpf_task_storage_delete,
369 .gpl_only = false,
370 .ret_type = RET_INTEGER,
371 .arg1_type = ARG_CONST_MAP_PTR,
Alexei Starovoitov91571a52023-04-03 21:50:25 -0700372 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
Martin KaFai Lau8a7dac372022-10-25 11:45:22 -0700373 .arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
374};