Alexei Starovoitov | 1bc38b8 | 2018-10-05 16:40:00 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ |
Eric Leblond | 6061a3d | 2018-01-30 21:55:03 +0100 | [diff] [blame] | 2 | |
Wang Nan | 1b76c13 | 2015-07-01 02:13:51 +0000 | [diff] [blame] | 3 | /* |
| 4 | * Common eBPF ELF object loading operations. |
| 5 | * |
| 6 | * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> |
| 7 | * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> |
| 8 | * Copyright (C) 2015 Huawei Inc. |
| 9 | */ |
Andrey Ignatov | eff8190 | 2018-10-03 15:26:42 -0700 | [diff] [blame] | 10 | #ifndef __LIBBPF_LIBBPF_H |
| 11 | #define __LIBBPF_LIBBPF_H |
Wang Nan | 1b76c13 | 2015-07-01 02:13:51 +0000 | [diff] [blame] | 12 | |
Arnaldo Carvalho de Melo | dfcbc2f | 2019-03-11 17:07:52 -0300 | [diff] [blame] | 13 | #include <stdarg.h> |
Wang Nan | 1a5e3fb | 2015-07-01 02:13:53 +0000 | [diff] [blame] | 14 | #include <stdio.h> |
Joe Stringer | e28ff1a | 2017-01-22 17:11:25 -0800 | [diff] [blame] | 15 | #include <stdint.h> |
Wang Nan | aa9b1ac | 2015-07-01 02:14:08 +0000 | [diff] [blame] | 16 | #include <stdbool.h> |
Wang Nan | 5a6acad1 | 2016-11-26 07:03:27 +0000 | [diff] [blame] | 17 | #include <sys/types.h> // for size_t |
Alexei Starovoitov | dd26b7f | 2017-03-30 21:45:40 -0700 | [diff] [blame] | 18 | #include <linux/bpf.h> |
Wang Nan | 6371ca3b | 2015-11-06 13:49:37 +0000 | [diff] [blame] | 19 | |
Andrii Nakryiko | 544402d | 2019-12-13 17:43:29 -0800 | [diff] [blame] | 20 | #include "libbpf_common.h" |
Andrii Nakryiko | 5981881 | 2021-05-24 20:59:31 -0700 | [diff] [blame] | 21 | #include "libbpf_legacy.h" |
Andrii Nakryiko | 544402d | 2019-12-13 17:43:29 -0800 | [diff] [blame] | 22 | |
Stanislav Fomichev | 8c4905b | 2018-11-21 09:29:44 -0800 | [diff] [blame] | 23 | #ifdef __cplusplus |
| 24 | extern "C" { |
| 25 | #endif |
| 26 | |
Andrii Nakryiko | 7615209 | 2021-11-18 09:40:54 -0800 | [diff] [blame] | 27 | LIBBPF_API __u32 libbpf_major_version(void); |
| 28 | LIBBPF_API __u32 libbpf_minor_version(void); |
| 29 | LIBBPF_API const char *libbpf_version_string(void); |
| 30 | |
Wang Nan | 6371ca3b | 2015-11-06 13:49:37 +0000 | [diff] [blame] | 31 | enum libbpf_errno { |
| 32 | __LIBBPF_ERRNO__START = 4000, |
| 33 | |
| 34 | /* Something wrong in libelf */ |
| 35 | LIBBPF_ERRNO__LIBELF = __LIBBPF_ERRNO__START, |
| 36 | LIBBPF_ERRNO__FORMAT, /* BPF object format invalid */ |
| 37 | LIBBPF_ERRNO__KVERSION, /* Incorrect or no 'version' section */ |
Colin Ian King | de8a63b | 2016-06-28 13:23:37 +0100 | [diff] [blame] | 38 | LIBBPF_ERRNO__ENDIAN, /* Endian mismatch */ |
Wang Nan | 6371ca3b | 2015-11-06 13:49:37 +0000 | [diff] [blame] | 39 | LIBBPF_ERRNO__INTERNAL, /* Internal error in libbpf */ |
| 40 | LIBBPF_ERRNO__RELOC, /* Relocation failed */ |
| 41 | LIBBPF_ERRNO__LOAD, /* Load program failure for unknown reason */ |
| 42 | LIBBPF_ERRNO__VERIFY, /* Kernel verifier blocks program loading */ |
| 43 | LIBBPF_ERRNO__PROG2BIG, /* Program too big */ |
| 44 | LIBBPF_ERRNO__KVER, /* Incorrect kernel version */ |
Wang Nan | 705fa21 | 2016-07-13 10:44:02 +0000 | [diff] [blame] | 45 | LIBBPF_ERRNO__PROGTYPE, /* Kernel doesn't support this program type */ |
Eric Leblond | 949abbe | 2018-01-30 21:55:01 +0100 | [diff] [blame] | 46 | LIBBPF_ERRNO__WRNGPID, /* Wrong pid in netlink message */ |
| 47 | LIBBPF_ERRNO__INVSEQ, /* Invalid netlink sequence */ |
Yonghong Song | 36f1678 | 2018-09-05 16:58:05 -0700 | [diff] [blame] | 48 | LIBBPF_ERRNO__NLPARSE, /* netlink parsing error */ |
Wang Nan | 6371ca3b | 2015-11-06 13:49:37 +0000 | [diff] [blame] | 49 | __LIBBPF_ERRNO__END, |
| 50 | }; |
| 51 | |
Andrey Ignatov | ab9e084 | 2018-10-15 22:50:34 -0700 | [diff] [blame] | 52 | LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size); |
Wang Nan | 1a5e3fb | 2015-07-01 02:13:53 +0000 | [diff] [blame] | 53 | |
Daniel Müller | d18616e7 | 2022-05-23 23:04:17 +0000 | [diff] [blame] | 54 | /** |
Daniel Müller | ccde576 | 2022-05-23 23:04:23 +0000 | [diff] [blame] | 55 | * @brief **libbpf_bpf_attach_type_str()** converts the provided attach type |
| 56 | * value into a textual representation. |
| 57 | * @param t The attach type. |
| 58 | * @return Pointer to a static string identifying the attach type. NULL is |
| 59 | * returned for unknown **bpf_attach_type** values. |
| 60 | */ |
| 61 | LIBBPF_API const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t); |
| 62 | |
| 63 | /** |
Daniel Müller | ba5d1b5 | 2022-05-23 23:04:26 +0000 | [diff] [blame] | 64 | * @brief **libbpf_bpf_link_type_str()** converts the provided link type value |
| 65 | * into a textual representation. |
| 66 | * @param t The link type. |
| 67 | * @return Pointer to a static string identifying the link type. NULL is |
| 68 | * returned for unknown **bpf_link_type** values. |
| 69 | */ |
| 70 | LIBBPF_API const char *libbpf_bpf_link_type_str(enum bpf_link_type t); |
| 71 | |
| 72 | /** |
Daniel Müller | 3e6dc02 | 2022-05-23 23:04:20 +0000 | [diff] [blame] | 73 | * @brief **libbpf_bpf_map_type_str()** converts the provided map type value |
| 74 | * into a textual representation. |
| 75 | * @param t The map type. |
| 76 | * @return Pointer to a static string identifying the map type. NULL is |
| 77 | * returned for unknown **bpf_map_type** values. |
| 78 | */ |
| 79 | LIBBPF_API const char *libbpf_bpf_map_type_str(enum bpf_map_type t); |
| 80 | |
| 81 | /** |
Daniel Müller | d18616e7 | 2022-05-23 23:04:17 +0000 | [diff] [blame] | 82 | * @brief **libbpf_bpf_prog_type_str()** converts the provided program type |
| 83 | * value into a textual representation. |
| 84 | * @param t The program type. |
| 85 | * @return Pointer to a static string identifying the program type. NULL is |
| 86 | * returned for unknown **bpf_prog_type** values. |
| 87 | */ |
| 88 | LIBBPF_API const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t); |
| 89 | |
Yonghong Song | 8461ef8 | 2019-02-01 16:14:14 -0800 | [diff] [blame] | 90 | enum libbpf_print_level { |
| 91 | LIBBPF_WARN, |
| 92 | LIBBPF_INFO, |
| 93 | LIBBPF_DEBUG, |
| 94 | }; |
| 95 | |
Yonghong Song | 6f1ae8b | 2019-02-01 16:14:17 -0800 | [diff] [blame] | 96 | typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level, |
Stanislav Fomichev | a8a1f7d | 2019-02-04 16:20:55 -0800 | [diff] [blame] | 97 | const char *, va_list ap); |
Wang Nan | b3f59d6 | 2015-07-01 02:13:52 +0000 | [diff] [blame] | 98 | |
Xin Liu | 678a1c0 | 2022-12-24 19:20:58 +0800 | [diff] [blame] | 99 | /** |
| 100 | * @brief **libbpf_set_print()** sets user-provided log callback function to |
| 101 | * be used for libbpf warnings and informational messages. |
| 102 | * @param fn The log print function. If NULL, libbpf won't print anything. |
| 103 | * @return Pointer to old print function. |
JP Kobryn | f1cb927 | 2023-03-24 18:08:45 -0700 | [diff] [blame] | 104 | * |
| 105 | * This function is thread-safe. |
Xin Liu | 678a1c0 | 2022-12-24 19:20:58 +0800 | [diff] [blame] | 106 | */ |
Andrii Nakryiko | e87fd8b | 2019-07-27 20:25:26 -0700 | [diff] [blame] | 107 | LIBBPF_API libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn); |
Wang Nan | b3f59d6 | 2015-07-01 02:13:52 +0000 | [diff] [blame] | 108 | |
Wang Nan | 1a5e3fb | 2015-07-01 02:13:53 +0000 | [diff] [blame] | 109 | /* Hide internal to user */ |
| 110 | struct bpf_object; |
| 111 | |
Andrii Nakryiko | 2ce8450 | 2019-10-04 15:40:35 -0700 | [diff] [blame] | 112 | struct bpf_object_open_opts { |
Daniel Müller | 9bbdfad | 2022-06-01 15:40:25 +0000 | [diff] [blame] | 113 | /* size of this struct, for forward/backward compatibility */ |
Andrii Nakryiko | 2ce8450 | 2019-10-04 15:40:35 -0700 | [diff] [blame] | 114 | size_t sz; |
| 115 | /* object name override, if provided: |
| 116 | * - for object open from file, this will override setting object |
| 117 | * name from file path's base name; |
| 118 | * - for object open from memory buffer, this will specify an object |
| 119 | * name and will override default "<addr>-<buf-size>" name; |
| 120 | */ |
| 121 | const char *object_name; |
| 122 | /* parse map definitions non-strictly, allowing extra attributes/data */ |
| 123 | bool relaxed_maps; |
Toke Høiland-Jørgensen | 57a00f4 | 2019-11-02 12:09:41 +0100 | [diff] [blame] | 124 | /* maps that set the 'pinning' attribute in their definition will have |
| 125 | * their pin_path attribute set to a file in this directory, and be |
| 126 | * auto-pinned to that path on load; defaults to "/sys/fs/bpf". |
| 127 | */ |
| 128 | const char *pin_root_path; |
Andrii Nakryiko | dbdea9b | 2022-09-23 16:05:59 -0700 | [diff] [blame] | 129 | |
| 130 | __u32 :32; /* stub out now removed attach_prog_fd */ |
| 131 | |
Andrii Nakryiko | 8601fd4 | 2019-12-18 16:28:35 -0800 | [diff] [blame] | 132 | /* Additional kernel config content that augments and overrides |
| 133 | * system Kconfig for CONFIG_xxx externs. |
Andrii Nakryiko | 166750b | 2019-12-13 17:47:08 -0800 | [diff] [blame] | 134 | */ |
Andrii Nakryiko | 8601fd4 | 2019-12-18 16:28:35 -0800 | [diff] [blame] | 135 | const char *kconfig; |
Shuyi Cheng | 1373ff5 | 2021-07-13 20:42:37 +0800 | [diff] [blame] | 136 | /* Path to the custom BTF to be used for BPF CO-RE relocations. |
| 137 | * This custom BTF completely replaces the use of vmlinux BTF |
| 138 | * for the purpose of CO-RE relocations. |
| 139 | * NOTE: any other BPF feature (e.g., fentry/fexit programs, |
| 140 | * struct_ops, etc) will need actual kernel BTF at /sys/kernel/btf/vmlinux. |
| 141 | */ |
| 142 | const char *btf_custom_path; |
Andrii Nakryiko | e0e3ea8 | 2021-12-09 11:38:32 -0800 | [diff] [blame] | 143 | /* Pointer to a buffer for storing kernel logs for applicable BPF |
| 144 | * commands. Valid kernel_log_size has to be specified as well and are |
| 145 | * passed-through to bpf() syscall. Keep in mind that kernel might |
| 146 | * fail operation with -ENOSPC error if provided buffer is too small |
| 147 | * to contain entire log output. |
| 148 | * See the comment below for kernel_log_level for interaction between |
| 149 | * log_buf and log_level settings. |
| 150 | * |
| 151 | * If specified, this log buffer will be passed for: |
| 152 | * - each BPF progral load (BPF_PROG_LOAD) attempt, unless overriden |
| 153 | * with bpf_program__set_log() on per-program level, to get |
| 154 | * BPF verifier log output. |
| 155 | * - during BPF object's BTF load into kernel (BPF_BTF_LOAD) to get |
| 156 | * BTF sanity checking log. |
| 157 | * |
| 158 | * Each BPF command (BPF_BTF_LOAD or BPF_PROG_LOAD) will overwrite |
| 159 | * previous contents, so if you need more fine-grained control, set |
| 160 | * per-program buffer with bpf_program__set_log_buf() to preserve each |
| 161 | * individual program's verification log. Keep using kernel_log_buf |
| 162 | * for BTF verification log, if necessary. |
| 163 | */ |
| 164 | char *kernel_log_buf; |
| 165 | size_t kernel_log_size; |
| 166 | /* |
| 167 | * Log level can be set independently from log buffer. Log_level=0 |
| 168 | * means that libbpf will attempt loading BTF or program without any |
| 169 | * logging requested, but will retry with either its own or custom log |
| 170 | * buffer, if provided, and log_level=1 on any error. |
| 171 | * And vice versa, setting log_level>0 will request BTF or prog |
| 172 | * loading with verbose log from the first attempt (and as such also |
| 173 | * for successfully loaded BTF or program), and the actual log buffer |
| 174 | * could be either libbpf's own auto-allocated log buffer, if |
| 175 | * kernel_log_buffer is NULL, or user-provided custom kernel_log_buf. |
| 176 | * If user didn't provide custom log buffer, libbpf will emit captured |
| 177 | * logs through its print callback. |
| 178 | */ |
| 179 | __u32 kernel_log_level; |
| 180 | |
| 181 | size_t :0; |
Andrii Nakryiko | 2ce8450 | 2019-10-04 15:40:35 -0700 | [diff] [blame] | 182 | }; |
Andrii Nakryiko | e0e3ea8 | 2021-12-09 11:38:32 -0800 | [diff] [blame] | 183 | #define bpf_object_open_opts__last_field kernel_log_level |
Andrii Nakryiko | 2ce8450 | 2019-10-04 15:40:35 -0700 | [diff] [blame] | 184 | |
Xin Liu | 678a1c0 | 2022-12-24 19:20:58 +0800 | [diff] [blame] | 185 | /** |
| 186 | * @brief **bpf_object__open()** creates a bpf_object by opening |
| 187 | * the BPF ELF object file pointed to by the passed path and loading it |
| 188 | * into memory. |
| 189 | * @param path BPF object file path. |
| 190 | * @return pointer to the new bpf_object; or NULL is returned on error, |
| 191 | * error code is stored in errno |
| 192 | */ |
Andrey Ignatov | ab9e084 | 2018-10-15 22:50:34 -0700 | [diff] [blame] | 193 | LIBBPF_API struct bpf_object *bpf_object__open(const char *path); |
Grant Seltzer | d5284de | 2021-12-06 15:37:09 -0500 | [diff] [blame] | 194 | |
| 195 | /** |
| 196 | * @brief **bpf_object__open_file()** creates a bpf_object by opening |
| 197 | * the BPF ELF object file pointed to by the passed path and loading it |
| 198 | * into memory. |
| 199 | * @param path BPF object file path |
| 200 | * @param opts options for how to load the bpf object, this parameter is |
| 201 | * optional and can be set to NULL |
| 202 | * @return pointer to the new bpf_object; or NULL is returned on error, |
| 203 | * error code is stored in errno |
| 204 | */ |
Andrey Ignatov | ab9e084 | 2018-10-15 22:50:34 -0700 | [diff] [blame] | 205 | LIBBPF_API struct bpf_object * |
Andrii Nakryiko | 01af3bf | 2019-12-13 17:43:32 -0800 | [diff] [blame] | 206 | bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts); |
Grant Seltzer | d5284de | 2021-12-06 15:37:09 -0500 | [diff] [blame] | 207 | |
| 208 | /** |
| 209 | * @brief **bpf_object__open_mem()** creates a bpf_object by reading |
| 210 | * the BPF objects raw bytes from a memory buffer containing a valid |
| 211 | * BPF ELF object file. |
| 212 | * @param obj_buf pointer to the buffer containing ELF file bytes |
| 213 | * @param obj_buf_sz number of bytes in the buffer |
| 214 | * @param opts options for how to load the bpf object |
| 215 | * @return pointer to the new bpf_object; or NULL is returned on error, |
| 216 | * error code is stored in errno |
| 217 | */ |
Andrii Nakryiko | 2ce8450 | 2019-10-04 15:40:35 -0700 | [diff] [blame] | 218 | LIBBPF_API struct bpf_object * |
| 219 | bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, |
Andrii Nakryiko | 01af3bf | 2019-12-13 17:43:32 -0800 | [diff] [blame] | 220 | const struct bpf_object_open_opts *opts); |
Andrii Nakryiko | 2ce8450 | 2019-10-04 15:40:35 -0700 | [diff] [blame] | 221 | |
Xin Liu | 678a1c0 | 2022-12-24 19:20:58 +0800 | [diff] [blame] | 222 | /** |
| 223 | * @brief **bpf_object__load()** loads BPF object into kernel. |
| 224 | * @param obj Pointer to a valid BPF object instance returned by |
| 225 | * **bpf_object__open*()** APIs |
| 226 | * @return 0, on success; negative error code, otherwise, error code is |
| 227 | * stored in errno |
| 228 | */ |
Andrii Nakryiko | 146bf81 | 2022-06-27 14:15:20 -0700 | [diff] [blame] | 229 | LIBBPF_API int bpf_object__load(struct bpf_object *obj); |
Andrii Nakryiko | 2ce8450 | 2019-10-04 15:40:35 -0700 | [diff] [blame] | 230 | |
Xin Liu | 678a1c0 | 2022-12-24 19:20:58 +0800 | [diff] [blame] | 231 | /** |
| 232 | * @brief **bpf_object__close()** closes a BPF object and releases all |
| 233 | * resources. |
| 234 | * @param obj Pointer to a valid BPF object |
| 235 | */ |
| 236 | LIBBPF_API void bpf_object__close(struct bpf_object *obj); |
Toke Høiland-Jørgensen | 57a00f4 | 2019-11-02 12:09:41 +0100 | [diff] [blame] | 237 | |
Grant Seltzer | e4ce876 | 2023-01-25 21:42:25 -0500 | [diff] [blame] | 238 | /** |
| 239 | * @brief **bpf_object__pin_maps()** pins each map contained within |
| 240 | * the BPF object at the passed directory. |
| 241 | * @param obj Pointer to a valid BPF object |
| 242 | * @param path A directory where maps should be pinned. |
| 243 | * @return 0, on success; negative error code, otherwise |
| 244 | * |
| 245 | * If `path` is NULL `bpf_map__pin` (which is being used on each map) |
| 246 | * will use the pin_path attribute of each map. In this case, maps that |
| 247 | * don't have a pin_path set will be ignored. |
Toke Høiland-Jørgensen | 4580b25 | 2019-11-02 12:09:38 +0100 | [diff] [blame] | 248 | */ |
Stanislav Fomichev | 0c19a9f | 2018-11-09 08:21:41 -0800 | [diff] [blame] | 249 | LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path); |
Grant Seltzer | e4ce876 | 2023-01-25 21:42:25 -0500 | [diff] [blame] | 250 | |
| 251 | /** |
| 252 | * @brief **bpf_object__unpin_maps()** unpins each map contained within |
| 253 | * the BPF object found in the passed directory. |
| 254 | * @param obj Pointer to a valid BPF object |
| 255 | * @param path A directory where pinned maps should be searched for. |
| 256 | * @return 0, on success; negative error code, otherwise |
| 257 | * |
| 258 | * If `path` is NULL `bpf_map__unpin` (which is being used on each map) |
| 259 | * will use the pin_path attribute of each map. In this case, maps that |
| 260 | * don't have a pin_path set will be ignored. |
| 261 | */ |
Stanislav Fomichev | 0c19a9f | 2018-11-09 08:21:41 -0800 | [diff] [blame] | 262 | LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj, |
| 263 | const char *path); |
| 264 | LIBBPF_API int bpf_object__pin_programs(struct bpf_object *obj, |
| 265 | const char *path); |
| 266 | LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj, |
| 267 | const char *path); |
Andrey Ignatov | ab9e084 | 2018-10-15 22:50:34 -0700 | [diff] [blame] | 268 | LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path); |
Daniel Xu | 068ca52 | 2023-08-23 17:15:02 -0600 | [diff] [blame] | 269 | LIBBPF_API int bpf_object__unpin(struct bpf_object *object, const char *path); |
Andrii Nakryiko | 01af3bf | 2019-12-13 17:43:32 -0800 | [diff] [blame] | 270 | |
Andrii Nakryiko | a324aae | 2019-06-17 15:48:58 -0700 | [diff] [blame] | 271 | LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj); |
| 272 | LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj); |
Rafael David Tinoco | 155f556 | 2021-03-23 01:09:52 -0300 | [diff] [blame] | 273 | LIBBPF_API int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version); |
Andrey Ignatov | 789f6ba | 2019-02-14 15:01:43 -0800 | [diff] [blame] | 274 | |
| 275 | struct btf; |
Andrii Nakryiko | a324aae | 2019-06-17 15:48:58 -0700 | [diff] [blame] | 276 | LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj); |
Andrey Ignatov | ab9e084 | 2018-10-15 22:50:34 -0700 | [diff] [blame] | 277 | LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj); |
Wang Nan | 52d3352 | 2015-07-01 02:14:04 +0000 | [diff] [blame] | 278 | |
Andrii Nakryiko | 01af3bf | 2019-12-13 17:43:32 -0800 | [diff] [blame] | 279 | LIBBPF_API struct bpf_program * |
| 280 | bpf_object__find_program_by_name(const struct bpf_object *obj, |
| 281 | const char *name); |
Jakub Kicinski | 6d4b198 | 2018-07-26 14:32:19 -0700 | [diff] [blame] | 282 | |
Andrey Ignatov | ab9e084 | 2018-10-15 22:50:34 -0700 | [diff] [blame] | 283 | LIBBPF_API int |
| 284 | libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, |
| 285 | enum bpf_attach_type *expected_attach_type); |
| 286 | LIBBPF_API int libbpf_attach_type_by_name(const char *name, |
| 287 | enum bpf_attach_type *attach_type); |
Alexei Starovoitov | b8c54ea | 2019-11-14 10:57:06 -0800 | [diff] [blame] | 288 | LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name, |
| 289 | enum bpf_attach_type attach_type); |
Jakub Kicinski | b60df2a | 2018-07-10 14:42:59 -0700 | [diff] [blame] | 290 | |
Jakub Kicinski | 2eb57bb | 2018-05-10 10:24:41 -0700 | [diff] [blame] | 291 | /* Accessors of bpf_program */ |
Wang Nan | aa9b1ac | 2015-07-01 02:14:08 +0000 | [diff] [blame] | 292 | struct bpf_program; |
Andrii Nakryiko | 146bf81 | 2022-06-27 14:15:20 -0700 | [diff] [blame] | 293 | |
Hengqi Chen | 2088a3a | 2021-10-04 00:58:43 +0800 | [diff] [blame] | 294 | LIBBPF_API struct bpf_program * |
| 295 | bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prog); |
Wang Nan | aa9b1ac | 2015-07-01 02:14:08 +0000 | [diff] [blame] | 296 | |
Hengqi Chen | 2088a3a | 2021-10-04 00:58:43 +0800 | [diff] [blame] | 297 | #define bpf_object__for_each_program(pos, obj) \ |
| 298 | for ((pos) = bpf_object__next_program((obj), NULL); \ |
| 299 | (pos) != NULL; \ |
| 300 | (pos) = bpf_object__next_program((obj), (pos))) |
Wang Nan | aa9b1ac | 2015-07-01 02:14:08 +0000 | [diff] [blame] | 301 | |
Hengqi Chen | 2088a3a | 2021-10-04 00:58:43 +0800 | [diff] [blame] | 302 | LIBBPF_API struct bpf_program * |
| 303 | bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *prog); |
Stanislav Fomichev | 0c19a9f | 2018-11-09 08:21:41 -0800 | [diff] [blame] | 304 | |
Andrey Ignatov | ab9e084 | 2018-10-15 22:50:34 -0700 | [diff] [blame] | 305 | LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog, |
| 306 | __u32 ifindex); |
Wang Nan | aa9b1ac | 2015-07-01 02:14:08 +0000 | [diff] [blame] | 307 | |
Andrii Nakryiko | 01af3bf | 2019-12-13 17:43:32 -0800 | [diff] [blame] | 308 | LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog); |
Andrii Nakryiko | 5210958 | 2020-09-03 13:35:38 -0700 | [diff] [blame] | 309 | LIBBPF_API const char *bpf_program__section_name(const struct bpf_program *prog); |
Andrii Nakryiko | d929758 | 2020-06-25 16:26:28 -0700 | [diff] [blame] | 310 | LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog); |
| 311 | LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload); |
Hao Luo | 43cb8cba | 2022-08-16 16:40:11 -0700 | [diff] [blame] | 312 | LIBBPF_API bool bpf_program__autoattach(const struct bpf_program *prog); |
| 313 | LIBBPF_API void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach); |
Wang Nan | aa9b1ac | 2015-07-01 02:14:08 +0000 | [diff] [blame] | 314 | |
Andrii Nakryiko | 65a7fa2 | 2021-10-25 15:45:29 -0700 | [diff] [blame] | 315 | struct bpf_insn; |
| 316 | |
| 317 | /** |
| 318 | * @brief **bpf_program__insns()** gives read-only access to BPF program's |
| 319 | * underlying BPF instructions. |
| 320 | * @param prog BPF program for which to return instructions |
| 321 | * @return a pointer to an array of BPF instructions that belong to the |
| 322 | * specified BPF program |
| 323 | * |
| 324 | * Returned pointer is always valid and not NULL. Number of `struct bpf_insn` |
| 325 | * pointed to can be fetched using **bpf_program__insn_cnt()** API. |
| 326 | * |
| 327 | * Keep in mind, libbpf can modify and append/delete BPF program's |
| 328 | * instructions as it processes BPF object file and prepares everything for |
| 329 | * uploading into the kernel. So depending on the point in BPF object |
| 330 | * lifetime, **bpf_program__insns()** can return different sets of |
| 331 | * instructions. As an example, during BPF object load phase BPF program |
| 332 | * instructions will be CO-RE-relocated, BPF subprograms instructions will be |
| 333 | * appended, ldimm64 instructions will have FDs embedded, etc. So instructions |
| 334 | * returned before **bpf_object__load()** and after it might be quite |
| 335 | * different. |
| 336 | */ |
| 337 | LIBBPF_API const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog); |
Jiri Olsa | b63b3c4 | 2022-05-10 09:46:57 +0200 | [diff] [blame] | 338 | |
| 339 | /** |
| 340 | * @brief **bpf_program__set_insns()** can set BPF program's underlying |
| 341 | * BPF instructions. |
| 342 | * |
| 343 | * WARNING: This is a very advanced libbpf API and users need to know |
| 344 | * what they are doing. This should be used from prog_prepare_load_fn |
| 345 | * callback only. |
| 346 | * |
| 347 | * @param prog BPF program for which to return instructions |
| 348 | * @param new_insns a pointer to an array of BPF instructions |
| 349 | * @param new_insn_cnt number of `struct bpf_insn`'s that form |
| 350 | * specified BPF program |
| 351 | * @return 0, on success; negative error code, otherwise |
| 352 | */ |
| 353 | LIBBPF_API int bpf_program__set_insns(struct bpf_program *prog, |
| 354 | struct bpf_insn *new_insns, size_t new_insn_cnt); |
| 355 | |
Andrii Nakryiko | 65a7fa2 | 2021-10-25 15:45:29 -0700 | [diff] [blame] | 356 | /** |
| 357 | * @brief **bpf_program__insn_cnt()** returns number of `struct bpf_insn`'s |
| 358 | * that form specified BPF program. |
| 359 | * @param prog BPF program for which to return number of BPF instructions |
| 360 | * |
| 361 | * See **bpf_program__insns()** documentation for notes on how libbpf can |
| 362 | * change instructions and their count during different phases of |
| 363 | * **bpf_object** lifetime. |
| 364 | */ |
| 365 | LIBBPF_API size_t bpf_program__insn_cnt(const struct bpf_program *prog); |
| 366 | |
Andrii Nakryiko | a324aae | 2019-06-17 15:48:58 -0700 | [diff] [blame] | 367 | LIBBPF_API int bpf_program__fd(const struct bpf_program *prog); |
Grant Seltzer | f742fc6 | 2021-12-09 18:22:22 -0500 | [diff] [blame] | 368 | |
| 369 | /** |
| 370 | * @brief **bpf_program__pin()** pins the BPF program to a file |
| 371 | * in the BPF FS specified by a path. This increments the programs |
| 372 | * reference count, allowing it to stay loaded after the process |
| 373 | * which loaded it has exited. |
| 374 | * |
| 375 | * @param prog BPF program to pin, must already be loaded |
| 376 | * @param path file path in a BPF file system |
| 377 | * @return 0, on success; negative error code, otherwise |
| 378 | */ |
Andrey Ignatov | ab9e084 | 2018-10-15 22:50:34 -0700 | [diff] [blame] | 379 | LIBBPF_API int bpf_program__pin(struct bpf_program *prog, const char *path); |
Grant Seltzer | f742fc6 | 2021-12-09 18:22:22 -0500 | [diff] [blame] | 380 | |
| 381 | /** |
| 382 | * @brief **bpf_program__unpin()** unpins the BPF program from a file |
| 383 | * in the BPFFS specified by a path. This decrements the programs |
| 384 | * reference count. |
| 385 | * |
| 386 | * The file pinning the BPF program can also be unlinked by a different |
| 387 | * process in which case this function will return an error. |
| 388 | * |
| 389 | * @param prog BPF program to unpin |
| 390 | * @param path file path to the pin in a BPF file system |
| 391 | * @return 0, on success; negative error code, otherwise |
| 392 | */ |
Stanislav Fomichev | 0c19a9f | 2018-11-09 08:21:41 -0800 | [diff] [blame] | 393 | LIBBPF_API int bpf_program__unpin(struct bpf_program *prog, const char *path); |
Andrey Ignatov | ab9e084 | 2018-10-15 22:50:34 -0700 | [diff] [blame] | 394 | LIBBPF_API void bpf_program__unload(struct bpf_program *prog); |
Wang Nan | aa9b1ac | 2015-07-01 02:14:08 +0000 | [diff] [blame] | 395 | |
Andrii Nakryiko | 1c2e9ef | 2019-07-01 16:58:56 -0700 | [diff] [blame] | 396 | struct bpf_link; |
| 397 | |
Andrii Nakryiko | c016b68 | 2020-03-02 20:31:58 -0800 | [diff] [blame] | 398 | LIBBPF_API struct bpf_link *bpf_link__open(const char *path); |
| 399 | LIBBPF_API int bpf_link__fd(const struct bpf_link *link); |
| 400 | LIBBPF_API const char *bpf_link__pin_path(const struct bpf_link *link); |
Grant Seltzer | a66ab9a | 2022-04-20 12:12:26 -0400 | [diff] [blame] | 401 | /** |
| 402 | * @brief **bpf_link__pin()** pins the BPF link to a file |
| 403 | * in the BPF FS specified by a path. This increments the links |
| 404 | * reference count, allowing it to stay loaded after the process |
| 405 | * which loaded it has exited. |
| 406 | * |
| 407 | * @param link BPF link to pin, must already be loaded |
| 408 | * @param path file path in a BPF file system |
| 409 | * @return 0, on success; negative error code, otherwise |
| 410 | */ |
| 411 | |
Andrii Nakryiko | c016b68 | 2020-03-02 20:31:58 -0800 | [diff] [blame] | 412 | LIBBPF_API int bpf_link__pin(struct bpf_link *link, const char *path); |
Grant Seltzer | a66ab9a | 2022-04-20 12:12:26 -0400 | [diff] [blame] | 413 | |
| 414 | /** |
| 415 | * @brief **bpf_link__unpin()** unpins the BPF link from a file |
| 416 | * in the BPFFS specified by a path. This decrements the links |
| 417 | * reference count. |
| 418 | * |
| 419 | * The file pinning the BPF link can also be unlinked by a different |
| 420 | * process in which case this function will return an error. |
| 421 | * |
| 422 | * @param prog BPF program to unpin |
| 423 | * @param path file path to the pin in a BPF file system |
| 424 | * @return 0, on success; negative error code, otherwise |
| 425 | */ |
Andrii Nakryiko | c016b68 | 2020-03-02 20:31:58 -0800 | [diff] [blame] | 426 | LIBBPF_API int bpf_link__unpin(struct bpf_link *link); |
Andrii Nakryiko | cc4f864 | 2020-03-29 20:00:00 -0700 | [diff] [blame] | 427 | LIBBPF_API int bpf_link__update_program(struct bpf_link *link, |
| 428 | struct bpf_program *prog); |
Andrii Nakryiko | d695870 | 2019-12-18 14:50:39 -0800 | [diff] [blame] | 429 | LIBBPF_API void bpf_link__disconnect(struct bpf_link *link); |
Andrii Nakryiko | 2e49527 | 2020-07-31 11:28:27 -0700 | [diff] [blame] | 430 | LIBBPF_API int bpf_link__detach(struct bpf_link *link); |
Andrii Nakryiko | 1c2e9ef | 2019-07-01 16:58:56 -0700 | [diff] [blame] | 431 | LIBBPF_API int bpf_link__destroy(struct bpf_link *link); |
| 432 | |
Grant Seltzer | a66ab9a | 2022-04-20 12:12:26 -0400 | [diff] [blame] | 433 | /** |
| 434 | * @brief **bpf_program__attach()** is a generic function for attaching |
| 435 | * a BPF program based on auto-detection of program type, attach type, |
| 436 | * and extra paremeters, where applicable. |
| 437 | * |
| 438 | * @param prog BPF program to attach |
| 439 | * @return Reference to the newly created BPF link; or NULL is returned on error, |
| 440 | * error code is stored in errno |
| 441 | * |
| 442 | * This is supported for: |
| 443 | * - kprobe/kretprobe (depends on SEC() definition) |
| 444 | * - uprobe/uretprobe (depends on SEC() definition) |
| 445 | * - tracepoint |
| 446 | * - raw tracepoint |
| 447 | * - tracing programs (typed raw TP/fentry/fexit/fmod_ret) |
| 448 | */ |
Andrii Nakryiko | 63f2f5e | 2019-07-01 16:58:57 -0700 | [diff] [blame] | 449 | LIBBPF_API struct bpf_link * |
Andrii Nakryiko | 942025c | 2021-09-15 18:58:36 -0700 | [diff] [blame] | 450 | bpf_program__attach(const struct bpf_program *prog); |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 451 | |
| 452 | struct bpf_perf_event_opts { |
Menglong Dong | f8b299b | 2023-03-06 14:48:31 +0800 | [diff] [blame] | 453 | /* size of this struct, for forward/backward compatibility */ |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 454 | size_t sz; |
| 455 | /* custom user-provided value fetchable through bpf_get_attach_cookie() */ |
| 456 | __u64 bpf_cookie; |
Menglong Dong | f8b299b | 2023-03-06 14:48:31 +0800 | [diff] [blame] | 457 | /* don't use BPF link when attach BPF program */ |
| 458 | bool force_ioctl_attach; |
| 459 | size_t :0; |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 460 | }; |
Menglong Dong | f8b299b | 2023-03-06 14:48:31 +0800 | [diff] [blame] | 461 | #define bpf_perf_event_opts__last_field force_ioctl_attach |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 462 | |
Andrii Nakryiko | d7a18ea | 2019-12-13 17:43:26 -0800 | [diff] [blame] | 463 | LIBBPF_API struct bpf_link * |
Andrii Nakryiko | 942025c | 2021-09-15 18:58:36 -0700 | [diff] [blame] | 464 | bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd); |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 465 | |
| 466 | LIBBPF_API struct bpf_link * |
Andrii Nakryiko | 942025c | 2021-09-15 18:58:36 -0700 | [diff] [blame] | 467 | bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd, |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 468 | const struct bpf_perf_event_opts *opts); |
| 469 | |
Menglong Dong | f8b299b | 2023-03-06 14:48:31 +0800 | [diff] [blame] | 470 | /** |
| 471 | * enum probe_attach_mode - the mode to attach kprobe/uprobe |
| 472 | * |
| 473 | * force libbpf to attach kprobe/uprobe in specific mode, -ENOTSUP will |
| 474 | * be returned if it is not supported by the kernel. |
| 475 | */ |
| 476 | enum probe_attach_mode { |
| 477 | /* attach probe in latest supported mode by kernel */ |
| 478 | PROBE_ATTACH_MODE_DEFAULT = 0, |
| 479 | /* attach probe in legacy mode, using debugfs/tracefs */ |
| 480 | PROBE_ATTACH_MODE_LEGACY, |
| 481 | /* create perf event with perf_event_open() syscall */ |
| 482 | PROBE_ATTACH_MODE_PERF, |
| 483 | /* attach probe with BPF link */ |
| 484 | PROBE_ATTACH_MODE_LINK, |
| 485 | }; |
| 486 | |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 487 | struct bpf_kprobe_opts { |
Menglong Dong | f8b299b | 2023-03-06 14:48:31 +0800 | [diff] [blame] | 488 | /* size of this struct, for forward/backward compatibility */ |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 489 | size_t sz; |
| 490 | /* custom user-provided value fetchable through bpf_get_attach_cookie() */ |
| 491 | __u64 bpf_cookie; |
| 492 | /* function's offset to install kprobe to */ |
Andrii Nakryiko | 46ed5fc | 2021-09-21 14:00:35 -0700 | [diff] [blame] | 493 | size_t offset; |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 494 | /* kprobe is return probe */ |
| 495 | bool retprobe; |
Menglong Dong | f8b299b | 2023-03-06 14:48:31 +0800 | [diff] [blame] | 496 | /* kprobe attach mode */ |
| 497 | enum probe_attach_mode attach_mode; |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 498 | size_t :0; |
| 499 | }; |
Menglong Dong | f8b299b | 2023-03-06 14:48:31 +0800 | [diff] [blame] | 500 | #define bpf_kprobe_opts__last_field attach_mode |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 501 | |
Andrii Nakryiko | b265002 | 2019-07-01 16:58:58 -0700 | [diff] [blame] | 502 | LIBBPF_API struct bpf_link * |
Andrii Nakryiko | 942025c | 2021-09-15 18:58:36 -0700 | [diff] [blame] | 503 | bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe, |
Andrii Nakryiko | b265002 | 2019-07-01 16:58:58 -0700 | [diff] [blame] | 504 | const char *func_name); |
| 505 | LIBBPF_API struct bpf_link * |
Andrii Nakryiko | 942025c | 2021-09-15 18:58:36 -0700 | [diff] [blame] | 506 | bpf_program__attach_kprobe_opts(const struct bpf_program *prog, |
Jiri Olsa | da97553 | 2021-07-21 23:58:10 +0200 | [diff] [blame] | 507 | const char *func_name, |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 508 | const struct bpf_kprobe_opts *opts); |
| 509 | |
Jiri Olsa | ddc6b04 | 2022-03-16 13:24:15 +0100 | [diff] [blame] | 510 | struct bpf_kprobe_multi_opts { |
| 511 | /* size of this struct, for forward/backward compatibility */ |
| 512 | size_t sz; |
| 513 | /* array of function symbols to attach */ |
| 514 | const char **syms; |
| 515 | /* array of function addresses to attach */ |
| 516 | const unsigned long *addrs; |
| 517 | /* array of user-provided values fetchable through bpf_get_attach_cookie */ |
| 518 | const __u64 *cookies; |
| 519 | /* number of elements in syms/addrs/cookies arrays */ |
| 520 | size_t cnt; |
| 521 | /* create return kprobes */ |
| 522 | bool retprobe; |
| 523 | size_t :0; |
| 524 | }; |
| 525 | |
| 526 | #define bpf_kprobe_multi_opts__last_field retprobe |
| 527 | |
| 528 | LIBBPF_API struct bpf_link * |
| 529 | bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, |
| 530 | const char *pattern, |
| 531 | const struct bpf_kprobe_multi_opts *opts); |
| 532 | |
Jiri Olsa | 3140cf1 | 2023-08-09 10:34:26 +0200 | [diff] [blame] | 533 | struct bpf_uprobe_multi_opts { |
| 534 | /* size of this struct, for forward/backward compatibility */ |
| 535 | size_t sz; |
| 536 | /* array of function symbols to attach to */ |
| 537 | const char **syms; |
| 538 | /* array of function addresses to attach to */ |
| 539 | const unsigned long *offsets; |
| 540 | /* optional, array of associated ref counter offsets */ |
| 541 | const unsigned long *ref_ctr_offsets; |
| 542 | /* optional, array of associated BPF cookies */ |
| 543 | const __u64 *cookies; |
| 544 | /* number of elements in syms/addrs/cookies arrays */ |
| 545 | size_t cnt; |
| 546 | /* create return uprobes */ |
| 547 | bool retprobe; |
| 548 | size_t :0; |
| 549 | }; |
| 550 | |
| 551 | #define bpf_uprobe_multi_opts__last_field retprobe |
| 552 | |
| 553 | /** |
| 554 | * @brief **bpf_program__attach_uprobe_multi()** attaches a BPF program |
| 555 | * to multiple uprobes with uprobe_multi link. |
| 556 | * |
| 557 | * User can specify 2 mutually exclusive set of inputs: |
| 558 | * |
| 559 | * 1) use only path/func_pattern/pid arguments |
| 560 | * |
| 561 | * 2) use path/pid with allowed combinations of |
| 562 | * syms/offsets/ref_ctr_offsets/cookies/cnt |
| 563 | * |
| 564 | * - syms and offsets are mutually exclusive |
| 565 | * - ref_ctr_offsets and cookies are optional |
| 566 | * |
| 567 | * |
| 568 | * @param prog BPF program to attach |
| 569 | * @param pid Process ID to attach the uprobe to, 0 for self (own process), |
| 570 | * -1 for all processes |
| 571 | * @param binary_path Path to binary |
| 572 | * @param func_pattern Regular expression to specify functions to attach |
| 573 | * BPF program to |
| 574 | * @param opts Additional options (see **struct bpf_uprobe_multi_opts**) |
| 575 | * @return 0, on success; negative error code, otherwise |
| 576 | */ |
| 577 | LIBBPF_API struct bpf_link * |
| 578 | bpf_program__attach_uprobe_multi(const struct bpf_program *prog, |
| 579 | pid_t pid, |
| 580 | const char *binary_path, |
| 581 | const char *func_pattern, |
| 582 | const struct bpf_uprobe_multi_opts *opts); |
| 583 | |
Andrii Nakryiko | 708ac5b | 2022-07-14 00:07:54 -0700 | [diff] [blame] | 584 | struct bpf_ksyscall_opts { |
Menglong Dong | f8b299b | 2023-03-06 14:48:31 +0800 | [diff] [blame] | 585 | /* size of this struct, for forward/backward compatibility */ |
Andrii Nakryiko | 708ac5b | 2022-07-14 00:07:54 -0700 | [diff] [blame] | 586 | size_t sz; |
| 587 | /* custom user-provided value fetchable through bpf_get_attach_cookie() */ |
| 588 | __u64 bpf_cookie; |
| 589 | /* attach as return probe? */ |
| 590 | bool retprobe; |
| 591 | size_t :0; |
| 592 | }; |
| 593 | #define bpf_ksyscall_opts__last_field retprobe |
| 594 | |
| 595 | /** |
| 596 | * @brief **bpf_program__attach_ksyscall()** attaches a BPF program |
| 597 | * to kernel syscall handler of a specified syscall. Optionally it's possible |
| 598 | * to request to install retprobe that will be triggered at syscall exit. It's |
| 599 | * also possible to associate BPF cookie (though options). |
| 600 | * |
| 601 | * Libbpf automatically will determine correct full kernel function name, |
| 602 | * which depending on system architecture and kernel version/configuration |
| 603 | * could be of the form __<arch>_sys_<syscall> or __se_sys_<syscall>, and will |
| 604 | * attach specified program using kprobe/kretprobe mechanism. |
| 605 | * |
| 606 | * **bpf_program__attach_ksyscall()** is an API counterpart of declarative |
| 607 | * **SEC("ksyscall/<syscall>")** annotation of BPF programs. |
| 608 | * |
| 609 | * At the moment **SEC("ksyscall")** and **bpf_program__attach_ksyscall()** do |
| 610 | * not handle all the calling convention quirks for mmap(), clone() and compat |
| 611 | * syscalls. It also only attaches to "native" syscall interfaces. If host |
| 612 | * system supports compat syscalls or defines 32-bit syscalls in 64-bit |
| 613 | * kernel, such syscall interfaces won't be attached to by libbpf. |
| 614 | * |
| 615 | * These limitations may or may not change in the future. Therefore it is |
| 616 | * recommended to use SEC("kprobe") for these syscalls or if working with |
| 617 | * compat and 32-bit interfaces is required. |
| 618 | * |
| 619 | * @param prog BPF program to attach |
| 620 | * @param syscall_name Symbolic name of the syscall (e.g., "bpf") |
| 621 | * @param opts Additional options (see **struct bpf_ksyscall_opts**) |
| 622 | * @return Reference to the newly created BPF link; or NULL is returned on |
| 623 | * error, error code is stored in errno |
| 624 | */ |
| 625 | LIBBPF_API struct bpf_link * |
| 626 | bpf_program__attach_ksyscall(const struct bpf_program *prog, |
| 627 | const char *syscall_name, |
| 628 | const struct bpf_ksyscall_opts *opts); |
| 629 | |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 630 | struct bpf_uprobe_opts { |
Menglong Dong | f8b299b | 2023-03-06 14:48:31 +0800 | [diff] [blame] | 631 | /* size of this struct, for forward/backward compatibility */ |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 632 | size_t sz; |
Andrii Nakryiko | 5e3b835 | 2021-08-15 00:06:08 -0700 | [diff] [blame] | 633 | /* offset of kernel reference counted USDT semaphore, added in |
| 634 | * a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe") |
| 635 | */ |
| 636 | size_t ref_ctr_offset; |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 637 | /* custom user-provided value fetchable through bpf_get_attach_cookie() */ |
| 638 | __u64 bpf_cookie; |
| 639 | /* uprobe is return probe, invoked at function return time */ |
| 640 | bool retprobe; |
Alan Maguire | 433966e | 2022-03-30 16:26:37 +0100 | [diff] [blame] | 641 | /* Function name to attach to. Could be an unqualified ("abc") or library-qualified |
| 642 | * "abc@LIBXYZ" name. To specify function entry, func_name should be set while |
| 643 | * func_offset argument to bpf_prog__attach_uprobe_opts() should be 0. To trace an |
| 644 | * offset within a function, specify func_name and use func_offset argument to specify |
| 645 | * offset within the function. Shared library functions must specify the shared library |
| 646 | * binary_path. |
| 647 | */ |
| 648 | const char *func_name; |
Menglong Dong | f8b299b | 2023-03-06 14:48:31 +0800 | [diff] [blame] | 649 | /* uprobe attach mode */ |
| 650 | enum probe_attach_mode attach_mode; |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 651 | size_t :0; |
| 652 | }; |
Menglong Dong | f8b299b | 2023-03-06 14:48:31 +0800 | [diff] [blame] | 653 | #define bpf_uprobe_opts__last_field attach_mode |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 654 | |
Grant Seltzer | d5284de | 2021-12-06 15:37:09 -0500 | [diff] [blame] | 655 | /** |
| 656 | * @brief **bpf_program__attach_uprobe()** attaches a BPF program |
| 657 | * to the userspace function which is found by binary path and |
| 658 | * offset. You can optionally specify a particular proccess to attach |
| 659 | * to. You can also optionally attach the program to the function |
| 660 | * exit instead of entry. |
| 661 | * |
| 662 | * @param prog BPF program to attach |
| 663 | * @param retprobe Attach to function exit |
| 664 | * @param pid Process ID to attach the uprobe to, 0 for self (own process), |
| 665 | * -1 for all processes |
| 666 | * @param binary_path Path to binary that contains the function symbol |
| 667 | * @param func_offset Offset within the binary of the function symbol |
| 668 | * @return Reference to the newly created BPF link; or NULL is returned on error, |
| 669 | * error code is stored in errno |
| 670 | */ |
Jiri Olsa | da97553 | 2021-07-21 23:58:10 +0200 | [diff] [blame] | 671 | LIBBPF_API struct bpf_link * |
Andrii Nakryiko | 942025c | 2021-09-15 18:58:36 -0700 | [diff] [blame] | 672 | bpf_program__attach_uprobe(const struct bpf_program *prog, bool retprobe, |
Andrii Nakryiko | b265002 | 2019-07-01 16:58:58 -0700 | [diff] [blame] | 673 | pid_t pid, const char *binary_path, |
| 674 | size_t func_offset); |
Grant Seltzer | d5284de | 2021-12-06 15:37:09 -0500 | [diff] [blame] | 675 | |
| 676 | /** |
| 677 | * @brief **bpf_program__attach_uprobe_opts()** is just like |
| 678 | * bpf_program__attach_uprobe() except with a options struct |
| 679 | * for various configurations. |
| 680 | * |
| 681 | * @param prog BPF program to attach |
| 682 | * @param pid Process ID to attach the uprobe to, 0 for self (own process), |
| 683 | * -1 for all processes |
| 684 | * @param binary_path Path to binary that contains the function symbol |
| 685 | * @param func_offset Offset within the binary of the function symbol |
| 686 | * @param opts Options for altering program attachment |
| 687 | * @return Reference to the newly created BPF link; or NULL is returned on error, |
| 688 | * error code is stored in errno |
| 689 | */ |
Andrii Nakryiko | f6de59c | 2019-07-01 16:58:59 -0700 | [diff] [blame] | 690 | LIBBPF_API struct bpf_link * |
Andrii Nakryiko | 942025c | 2021-09-15 18:58:36 -0700 | [diff] [blame] | 691 | bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 692 | const char *binary_path, size_t func_offset, |
| 693 | const struct bpf_uprobe_opts *opts); |
| 694 | |
Andrii Nakryiko | 2e4913e0 | 2022-04-04 16:41:57 -0700 | [diff] [blame] | 695 | struct bpf_usdt_opts { |
| 696 | /* size of this struct, for forward/backward compatibility */ |
| 697 | size_t sz; |
| 698 | /* custom user-provided value accessible through usdt_cookie() */ |
| 699 | __u64 usdt_cookie; |
| 700 | size_t :0; |
| 701 | }; |
| 702 | #define bpf_usdt_opts__last_field usdt_cookie |
| 703 | |
| 704 | /** |
| 705 | * @brief **bpf_program__attach_usdt()** is just like |
| 706 | * bpf_program__attach_uprobe_opts() except it covers USDT (User-space |
| 707 | * Statically Defined Tracepoint) attachment, instead of attaching to |
| 708 | * user-space function entry or exit. |
| 709 | * |
| 710 | * @param prog BPF program to attach |
| 711 | * @param pid Process ID to attach the uprobe to, 0 for self (own process), |
| 712 | * -1 for all processes |
| 713 | * @param binary_path Path to binary that contains provided USDT probe |
| 714 | * @param usdt_provider USDT provider name |
| 715 | * @param usdt_name USDT probe name |
| 716 | * @param opts Options for altering program attachment |
| 717 | * @return Reference to the newly created BPF link; or NULL is returned on error, |
| 718 | * error code is stored in errno |
| 719 | */ |
| 720 | LIBBPF_API struct bpf_link * |
| 721 | bpf_program__attach_usdt(const struct bpf_program *prog, |
| 722 | pid_t pid, const char *binary_path, |
| 723 | const char *usdt_provider, const char *usdt_name, |
| 724 | const struct bpf_usdt_opts *opts); |
| 725 | |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 726 | struct bpf_tracepoint_opts { |
Menglong Dong | f8b299b | 2023-03-06 14:48:31 +0800 | [diff] [blame] | 727 | /* size of this struct, for forward/backward compatibility */ |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 728 | size_t sz; |
| 729 | /* custom user-provided value fetchable through bpf_get_attach_cookie() */ |
| 730 | __u64 bpf_cookie; |
| 731 | }; |
| 732 | #define bpf_tracepoint_opts__last_field bpf_cookie |
| 733 | |
| 734 | LIBBPF_API struct bpf_link * |
Andrii Nakryiko | 942025c | 2021-09-15 18:58:36 -0700 | [diff] [blame] | 735 | bpf_program__attach_tracepoint(const struct bpf_program *prog, |
Andrii Nakryiko | f6de59c | 2019-07-01 16:58:59 -0700 | [diff] [blame] | 736 | const char *tp_category, |
| 737 | const char *tp_name); |
Andrii Nakryiko | 84bf5e1 | 2019-07-01 16:59:00 -0700 | [diff] [blame] | 738 | LIBBPF_API struct bpf_link * |
Andrii Nakryiko | 942025c | 2021-09-15 18:58:36 -0700 | [diff] [blame] | 739 | bpf_program__attach_tracepoint_opts(const struct bpf_program *prog, |
Andrii Nakryiko | 47faff3 | 2021-08-15 00:06:04 -0700 | [diff] [blame] | 740 | const char *tp_category, |
| 741 | const char *tp_name, |
| 742 | const struct bpf_tracepoint_opts *opts); |
| 743 | |
| 744 | LIBBPF_API struct bpf_link * |
Andrii Nakryiko | 942025c | 2021-09-15 18:58:36 -0700 | [diff] [blame] | 745 | bpf_program__attach_raw_tracepoint(const struct bpf_program *prog, |
Andrii Nakryiko | 84bf5e1 | 2019-07-01 16:59:00 -0700 | [diff] [blame] | 746 | const char *tp_name); |
Kui-Feng Lee | 129b9c5 | 2022-05-10 13:59:22 -0700 | [diff] [blame] | 747 | |
| 748 | struct bpf_trace_opts { |
| 749 | /* size of this struct, for forward/backward compatibility */ |
| 750 | size_t sz; |
| 751 | /* custom user-provided value fetchable through bpf_get_attach_cookie() */ |
| 752 | __u64 cookie; |
| 753 | }; |
| 754 | #define bpf_trace_opts__last_field cookie |
| 755 | |
Alexei Starovoitov | b8c54ea | 2019-11-14 10:57:06 -0800 | [diff] [blame] | 756 | LIBBPF_API struct bpf_link * |
Andrii Nakryiko | 942025c | 2021-09-15 18:58:36 -0700 | [diff] [blame] | 757 | bpf_program__attach_trace(const struct bpf_program *prog); |
KP Singh | 1e092a03 | 2020-03-29 01:43:54 +0100 | [diff] [blame] | 758 | LIBBPF_API struct bpf_link * |
Kui-Feng Lee | 129b9c5 | 2022-05-10 13:59:22 -0700 | [diff] [blame] | 759 | bpf_program__attach_trace_opts(const struct bpf_program *prog, const struct bpf_trace_opts *opts); |
| 760 | |
| 761 | LIBBPF_API struct bpf_link * |
Andrii Nakryiko | 942025c | 2021-09-15 18:58:36 -0700 | [diff] [blame] | 762 | bpf_program__attach_lsm(const struct bpf_program *prog); |
Andrii Nakryiko | cc4f864 | 2020-03-29 20:00:00 -0700 | [diff] [blame] | 763 | LIBBPF_API struct bpf_link * |
Andrii Nakryiko | 942025c | 2021-09-15 18:58:36 -0700 | [diff] [blame] | 764 | bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd); |
Jakub Sitnicki | d60d81a | 2020-05-31 10:28:40 +0200 | [diff] [blame] | 765 | LIBBPF_API struct bpf_link * |
Andrii Nakryiko | 942025c | 2021-09-15 18:58:36 -0700 | [diff] [blame] | 766 | bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd); |
Andrii Nakryiko | dc8698c | 2020-07-21 23:46:00 -0700 | [diff] [blame] | 767 | LIBBPF_API struct bpf_link * |
Andrii Nakryiko | 942025c | 2021-09-15 18:58:36 -0700 | [diff] [blame] | 768 | bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex); |
Toke Høiland-Jørgensen | a535909 | 2020-09-29 14:45:53 +0200 | [diff] [blame] | 769 | LIBBPF_API struct bpf_link * |
Andrii Nakryiko | 942025c | 2021-09-15 18:58:36 -0700 | [diff] [blame] | 770 | bpf_program__attach_freplace(const struct bpf_program *prog, |
Toke Høiland-Jørgensen | a535909 | 2020-09-29 14:45:53 +0200 | [diff] [blame] | 771 | int target_fd, const char *attach_func_name); |
Andrii Nakryiko | cc4f864 | 2020-03-29 20:00:00 -0700 | [diff] [blame] | 772 | |
Florian Westphal | 52364ab | 2023-06-28 17:27:37 +0200 | [diff] [blame] | 773 | struct bpf_netfilter_opts { |
| 774 | /* size of this struct, for forward/backward compatibility */ |
| 775 | size_t sz; |
| 776 | |
| 777 | __u32 pf; |
| 778 | __u32 hooknum; |
| 779 | __s32 priority; |
| 780 | __u32 flags; |
| 781 | }; |
| 782 | #define bpf_netfilter_opts__last_field flags |
| 783 | |
| 784 | LIBBPF_API struct bpf_link * |
| 785 | bpf_program__attach_netfilter(const struct bpf_program *prog, |
| 786 | const struct bpf_netfilter_opts *opts); |
| 787 | |
Daniel Borkmann | 55cc376 | 2023-07-19 16:08:54 +0200 | [diff] [blame] | 788 | struct bpf_tcx_opts { |
| 789 | /* size of this struct, for forward/backward compatibility */ |
| 790 | size_t sz; |
| 791 | __u32 flags; |
| 792 | __u32 relative_fd; |
| 793 | __u32 relative_id; |
| 794 | __u64 expected_revision; |
| 795 | size_t :0; |
| 796 | }; |
| 797 | #define bpf_tcx_opts__last_field expected_revision |
| 798 | |
| 799 | LIBBPF_API struct bpf_link * |
| 800 | bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex, |
| 801 | const struct bpf_tcx_opts *opts); |
| 802 | |
Daniel Borkmann | 05c31b4 | 2023-10-24 23:49:00 +0200 | [diff] [blame] | 803 | struct bpf_netkit_opts { |
| 804 | /* size of this struct, for forward/backward compatibility */ |
| 805 | size_t sz; |
| 806 | __u32 flags; |
| 807 | __u32 relative_fd; |
| 808 | __u32 relative_id; |
| 809 | __u64 expected_revision; |
| 810 | size_t :0; |
| 811 | }; |
| 812 | #define bpf_netkit_opts__last_field expected_revision |
| 813 | |
| 814 | LIBBPF_API struct bpf_link * |
| 815 | bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex, |
| 816 | const struct bpf_netkit_opts *opts); |
| 817 | |
Martin KaFai Lau | 590a008 | 2020-01-08 16:35:14 -0800 | [diff] [blame] | 818 | struct bpf_map; |
Andrii Nakryiko | cc4f864 | 2020-03-29 20:00:00 -0700 | [diff] [blame] | 819 | |
Andrii Nakryiko | 942025c | 2021-09-15 18:58:36 -0700 | [diff] [blame] | 820 | LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map); |
Kui-Feng Lee | 912dd4b | 2023-03-22 20:24:03 -0700 | [diff] [blame] | 821 | LIBBPF_API int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map); |
Andrii Nakryiko | cc4f864 | 2020-03-29 20:00:00 -0700 | [diff] [blame] | 822 | |
Yonghong Song | c09add2 | 2020-05-09 10:59:17 -0700 | [diff] [blame] | 823 | struct bpf_iter_attach_opts { |
| 824 | size_t sz; /* size of this struct for forward/backward compatibility */ |
Yonghong Song | 74fc097 | 2020-08-04 22:50:58 -0700 | [diff] [blame] | 825 | union bpf_iter_link_info *link_info; |
| 826 | __u32 link_info_len; |
Yonghong Song | c09add2 | 2020-05-09 10:59:17 -0700 | [diff] [blame] | 827 | }; |
Yonghong Song | 74fc097 | 2020-08-04 22:50:58 -0700 | [diff] [blame] | 828 | #define bpf_iter_attach_opts__last_field link_info_len |
Yonghong Song | c09add2 | 2020-05-09 10:59:17 -0700 | [diff] [blame] | 829 | |
| 830 | LIBBPF_API struct bpf_link * |
Andrii Nakryiko | 942025c | 2021-09-15 18:58:36 -0700 | [diff] [blame] | 831 | bpf_program__attach_iter(const struct bpf_program *prog, |
Yonghong Song | c09add2 | 2020-05-09 10:59:17 -0700 | [diff] [blame] | 832 | const struct bpf_iter_attach_opts *opts); |
| 833 | |
Andrii Nakryiko | 20eccf2 | 2022-01-24 11:42:48 -0800 | [diff] [blame] | 834 | LIBBPF_API enum bpf_prog_type bpf_program__type(const struct bpf_program *prog); |
Grant Seltzer | a66ab9a | 2022-04-20 12:12:26 -0400 | [diff] [blame] | 835 | |
| 836 | /** |
| 837 | * @brief **bpf_program__set_type()** sets the program |
| 838 | * type of the passed BPF program. |
| 839 | * @param prog BPF program to set the program type for |
| 840 | * @param type program type to set the BPF map to have |
| 841 | * @return error code; or 0 if no error. An error occurs |
| 842 | * if the object is already loaded. |
| 843 | * |
| 844 | * This must be called before the BPF object is loaded, |
| 845 | * otherwise it has no effect and an error is returned. |
| 846 | */ |
Grant Seltzer | 93442f1 | 2022-04-20 12:12:24 -0400 | [diff] [blame] | 847 | LIBBPF_API int bpf_program__set_type(struct bpf_program *prog, |
| 848 | enum bpf_prog_type type); |
Andrii Nakryiko | f1eead9 | 2019-10-20 20:38:57 -0700 | [diff] [blame] | 849 | |
| 850 | LIBBPF_API enum bpf_attach_type |
Andrii Nakryiko | 20eccf2 | 2022-01-24 11:42:48 -0800 | [diff] [blame] | 851 | bpf_program__expected_attach_type(const struct bpf_program *prog); |
Grant Seltzer | a66ab9a | 2022-04-20 12:12:26 -0400 | [diff] [blame] | 852 | |
| 853 | /** |
| 854 | * @brief **bpf_program__set_expected_attach_type()** sets the |
| 855 | * attach type of the passed BPF program. This is used for |
| 856 | * auto-detection of attachment when programs are loaded. |
| 857 | * @param prog BPF program to set the attach type for |
| 858 | * @param type attach type to set the BPF map to have |
| 859 | * @return error code; or 0 if no error. An error occurs |
| 860 | * if the object is already loaded. |
| 861 | * |
| 862 | * This must be called before the BPF object is loaded, |
| 863 | * otherwise it has no effect and an error is returned. |
| 864 | */ |
Grant Seltzer | 93442f1 | 2022-04-20 12:12:24 -0400 | [diff] [blame] | 865 | LIBBPF_API int |
Andrey Ignatov | ab9e084 | 2018-10-15 22:50:34 -0700 | [diff] [blame] | 866 | bpf_program__set_expected_attach_type(struct bpf_program *prog, |
| 867 | enum bpf_attach_type type); |
Wang Nan | 5f44e4c8 | 2016-07-13 10:44:01 +0000 | [diff] [blame] | 868 | |
Andrii Nakryiko | a6ca715 | 2021-11-10 21:17:57 -0800 | [diff] [blame] | 869 | LIBBPF_API __u32 bpf_program__flags(const struct bpf_program *prog); |
Florent Revest | 8cccee9 | 2021-11-19 19:00:35 +0100 | [diff] [blame] | 870 | LIBBPF_API int bpf_program__set_flags(struct bpf_program *prog, __u32 flags); |
Andrii Nakryiko | b3ce907 | 2021-12-09 11:38:35 -0800 | [diff] [blame] | 871 | |
| 872 | /* Per-program log level and log buffer getters/setters. |
| 873 | * See bpf_object_open_opts comments regarding log_level and log_buf |
| 874 | * interactions. |
| 875 | */ |
Andrii Nakryiko | dbdd2c7 | 2021-12-01 15:28:17 -0800 | [diff] [blame] | 876 | LIBBPF_API __u32 bpf_program__log_level(const struct bpf_program *prog); |
| 877 | LIBBPF_API int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level); |
Andrii Nakryiko | b3ce907 | 2021-12-09 11:38:35 -0800 | [diff] [blame] | 878 | LIBBPF_API const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size); |
| 879 | LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size); |
Andrii Nakryiko | a6ca715 | 2021-11-10 21:17:57 -0800 | [diff] [blame] | 880 | |
Grant Seltzer | a66ab9a | 2022-04-20 12:12:26 -0400 | [diff] [blame] | 881 | /** |
| 882 | * @brief **bpf_program__set_attach_target()** sets BTF-based attach target |
| 883 | * for supported BPF program types: |
| 884 | * - BTF-aware raw tracepoints (tp_btf); |
| 885 | * - fentry/fexit/fmod_ret; |
| 886 | * - lsm; |
| 887 | * - freplace. |
| 888 | * @param prog BPF program to set the attach type for |
| 889 | * @param type attach type to set the BPF map to have |
| 890 | * @return error code; or 0 if no error occurred. |
| 891 | */ |
Eelco Chaudron | ff26ce5 | 2020-02-20 13:26:35 +0000 | [diff] [blame] | 892 | LIBBPF_API int |
| 893 | bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd, |
| 894 | const char *attach_func_name); |
| 895 | |
Grant Seltzer | 97c140d | 2021-09-17 23:14:58 -0400 | [diff] [blame] | 896 | /** |
| 897 | * @brief **bpf_object__find_map_by_name()** returns BPF map of |
| 898 | * the given name, if it exists within the passed BPF object |
| 899 | * @param obj BPF object |
| 900 | * @param name name of the BPF map |
| 901 | * @return BPF map instance, if such map exists within the BPF object; |
| 902 | * or NULL otherwise. |
Wang Nan | 9d759a9 | 2015-11-27 08:47:35 +0000 | [diff] [blame] | 903 | */ |
Andrey Ignatov | ab9e084 | 2018-10-15 22:50:34 -0700 | [diff] [blame] | 904 | LIBBPF_API struct bpf_map * |
Andrii Nakryiko | a324aae | 2019-06-17 15:48:58 -0700 | [diff] [blame] | 905 | bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name); |
Wang Nan | 9d759a9 | 2015-11-27 08:47:35 +0000 | [diff] [blame] | 906 | |
Maciej Fijalkowski | f3cea32 | 2019-02-01 22:42:23 +0100 | [diff] [blame] | 907 | LIBBPF_API int |
Andrii Nakryiko | a324aae | 2019-06-17 15:48:58 -0700 | [diff] [blame] | 908 | bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name); |
Maciej Fijalkowski | f3cea32 | 2019-02-01 22:42:23 +0100 | [diff] [blame] | 909 | |
Andrey Ignatov | ab9e084 | 2018-10-15 22:50:34 -0700 | [diff] [blame] | 910 | LIBBPF_API struct bpf_map * |
Hengqi Chen | 2088a3a | 2021-10-04 00:58:43 +0800 | [diff] [blame] | 911 | bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *map); |
| 912 | |
Jakub Kicinski | f74a53d9 | 2019-02-27 19:04:12 -0800 | [diff] [blame] | 913 | #define bpf_object__for_each_map(pos, obj) \ |
Hengqi Chen | 2088a3a | 2021-10-04 00:58:43 +0800 | [diff] [blame] | 914 | for ((pos) = bpf_object__next_map((obj), NULL); \ |
Wang Nan | 9d759a9 | 2015-11-27 08:47:35 +0000 | [diff] [blame] | 915 | (pos) != NULL; \ |
Hengqi Chen | 2088a3a | 2021-10-04 00:58:43 +0800 | [diff] [blame] | 916 | (pos) = bpf_object__next_map((obj), (pos))) |
Jakub Kicinski | f74a53d9 | 2019-02-27 19:04:12 -0800 | [diff] [blame] | 917 | #define bpf_map__for_each bpf_object__for_each_map |
Wang Nan | 9d759a9 | 2015-11-27 08:47:35 +0000 | [diff] [blame] | 918 | |
Stanislav Fomichev | 0c19a9f | 2018-11-09 08:21:41 -0800 | [diff] [blame] | 919 | LIBBPF_API struct bpf_map * |
Hengqi Chen | 2088a3a | 2021-10-04 00:58:43 +0800 | [diff] [blame] | 920 | bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map); |
Stanislav Fomichev | 0c19a9f | 2018-11-09 08:21:41 -0800 | [diff] [blame] | 921 | |
Grant Seltzer | 97c140d | 2021-09-17 23:14:58 -0400 | [diff] [blame] | 922 | /** |
Andrii Nakryiko | ec41817 | 2022-04-27 21:15:22 -0700 | [diff] [blame] | 923 | * @brief **bpf_map__set_autocreate()** sets whether libbpf has to auto-create |
| 924 | * BPF map during BPF object load phase. |
| 925 | * @param map the BPF map instance |
| 926 | * @param autocreate whether to create BPF map during BPF object load |
| 927 | * @return 0 on success; -EBUSY if BPF object was already loaded |
| 928 | * |
| 929 | * **bpf_map__set_autocreate()** allows to opt-out from libbpf auto-creating |
| 930 | * BPF map. By default, libbpf will attempt to create every single BPF map |
| 931 | * defined in BPF object file using BPF_MAP_CREATE command of bpf() syscall |
| 932 | * and fill in map FD in BPF instructions. |
| 933 | * |
| 934 | * This API allows to opt-out of this process for specific map instance. This |
| 935 | * can be useful if host kernel doesn't support such BPF map type or used |
| 936 | * combination of flags and user application wants to avoid creating such |
| 937 | * a map in the first place. User is still responsible to make sure that their |
| 938 | * BPF-side code that expects to use such missing BPF map is recognized by BPF |
| 939 | * verifier as dead code, otherwise BPF verifier will reject such BPF program. |
| 940 | */ |
| 941 | LIBBPF_API int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate); |
| 942 | LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map); |
| 943 | |
| 944 | /** |
Grant Seltzer | 97c140d | 2021-09-17 23:14:58 -0400 | [diff] [blame] | 945 | * @brief **bpf_map__fd()** gets the file descriptor of the passed |
| 946 | * BPF map |
| 947 | * @param map the BPF map instance |
| 948 | * @return the file descriptor; or -EINVAL in case of an error |
| 949 | */ |
Andrii Nakryiko | a324aae | 2019-06-17 15:48:58 -0700 | [diff] [blame] | 950 | LIBBPF_API int bpf_map__fd(const struct bpf_map *map); |
Andrii Nakryiko | 1bdb6c9 | 2020-06-20 23:21:12 -0700 | [diff] [blame] | 951 | LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd); |
Andrii Nakryiko | 1bdb6c9 | 2020-06-20 23:21:12 -0700 | [diff] [blame] | 952 | /* get map name */ |
Andrii Nakryiko | a324aae | 2019-06-17 15:48:58 -0700 | [diff] [blame] | 953 | LIBBPF_API const char *bpf_map__name(const struct bpf_map *map); |
Andrii Nakryiko | 1bdb6c9 | 2020-06-20 23:21:12 -0700 | [diff] [blame] | 954 | /* get/set map type */ |
| 955 | LIBBPF_API enum bpf_map_type bpf_map__type(const struct bpf_map *map); |
| 956 | LIBBPF_API int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type); |
| 957 | /* get/set map size (max_entries) */ |
| 958 | LIBBPF_API __u32 bpf_map__max_entries(const struct bpf_map *map); |
| 959 | LIBBPF_API int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries); |
Andrii Nakryiko | 1bdb6c9 | 2020-06-20 23:21:12 -0700 | [diff] [blame] | 960 | /* get/set map flags */ |
| 961 | LIBBPF_API __u32 bpf_map__map_flags(const struct bpf_map *map); |
| 962 | LIBBPF_API int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags); |
| 963 | /* get/set map NUMA node */ |
| 964 | LIBBPF_API __u32 bpf_map__numa_node(const struct bpf_map *map); |
| 965 | LIBBPF_API int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node); |
| 966 | /* get/set map key size */ |
| 967 | LIBBPF_API __u32 bpf_map__key_size(const struct bpf_map *map); |
| 968 | LIBBPF_API int bpf_map__set_key_size(struct bpf_map *map, __u32 size); |
JP Kobryn | 9d0a233 | 2023-05-23 17:45:36 -0700 | [diff] [blame] | 969 | /* get map value size */ |
Andrii Nakryiko | 1bdb6c9 | 2020-06-20 23:21:12 -0700 | [diff] [blame] | 970 | LIBBPF_API __u32 bpf_map__value_size(const struct bpf_map *map); |
JP Kobryn | 9d0a233 | 2023-05-23 17:45:36 -0700 | [diff] [blame] | 971 | /** |
| 972 | * @brief **bpf_map__set_value_size()** sets map value size. |
| 973 | * @param map the BPF map instance |
| 974 | * @return 0, on success; negative error, otherwise |
| 975 | * |
| 976 | * There is a special case for maps with associated memory-mapped regions, like |
| 977 | * the global data section maps (bss, data, rodata). When this function is used |
| 978 | * on such a map, the mapped region is resized. Afterward, an attempt is made to |
| 979 | * adjust the corresponding BTF info. This attempt is best-effort and can only |
| 980 | * succeed if the last variable of the data section map is an array. The array |
| 981 | * BTF type is replaced by a new BTF array type with a different length. |
| 982 | * Any previously existing pointers returned from bpf_map__initial_value() or |
| 983 | * corresponding data section skeleton pointer must be reinitialized. |
| 984 | */ |
Andrii Nakryiko | 1bdb6c9 | 2020-06-20 23:21:12 -0700 | [diff] [blame] | 985 | LIBBPF_API int bpf_map__set_value_size(struct bpf_map *map, __u32 size); |
| 986 | /* get map key/value BTF type IDs */ |
Andrey Ignatov | ab9e084 | 2018-10-15 22:50:34 -0700 | [diff] [blame] | 987 | LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map); |
| 988 | LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map); |
Andrii Nakryiko | 1bdb6c9 | 2020-06-20 23:21:12 -0700 | [diff] [blame] | 989 | /* get/set map if_index */ |
| 990 | LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map); |
| 991 | LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); |
Joanne Koong | 4751210 | 2021-10-27 16:45:01 -0700 | [diff] [blame] | 992 | /* get/set map map_extra flags */ |
| 993 | LIBBPF_API __u64 bpf_map__map_extra(const struct bpf_map *map); |
| 994 | LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra); |
Wang Nan | 9d759a9 | 2015-11-27 08:47:35 +0000 | [diff] [blame] | 995 | |
Toke Høiland-Jørgensen | e2842be | 2020-03-29 15:22:52 +0200 | [diff] [blame] | 996 | LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map, |
| 997 | const void *data, size_t size); |
JP Kobryn | 9d0a233 | 2023-05-23 17:45:36 -0700 | [diff] [blame] | 998 | LIBBPF_API void *bpf_map__initial_value(struct bpf_map *map, size_t *psize); |
Grant Seltzer | 97c140d | 2021-09-17 23:14:58 -0400 | [diff] [blame] | 999 | |
| 1000 | /** |
| 1001 | * @brief **bpf_map__is_internal()** tells the caller whether or not the |
| 1002 | * passed map is a special map created by libbpf automatically for things like |
| 1003 | * global variables, __ksym externs, Kconfig values, etc |
| 1004 | * @param map the bpf_map |
| 1005 | * @return true, if the map is an internal map; false, otherwise |
| 1006 | */ |
Andrii Nakryiko | a324aae | 2019-06-17 15:48:58 -0700 | [diff] [blame] | 1007 | LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map); |
Grant Seltzer | e4ce876 | 2023-01-25 21:42:25 -0500 | [diff] [blame] | 1008 | |
| 1009 | /** |
| 1010 | * @brief **bpf_map__set_pin_path()** sets the path attribute that tells where the |
| 1011 | * BPF map should be pinned. This does not actually create the 'pin'. |
| 1012 | * @param map The bpf_map |
| 1013 | * @param path The path |
| 1014 | * @return 0, on success; negative error, otherwise |
| 1015 | */ |
Toke Høiland-Jørgensen | 4580b25 | 2019-11-02 12:09:38 +0100 | [diff] [blame] | 1016 | LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path); |
Grant Seltzer | e4ce876 | 2023-01-25 21:42:25 -0500 | [diff] [blame] | 1017 | |
| 1018 | /** |
| 1019 | * @brief **bpf_map__pin_path()** gets the path attribute that tells where the |
| 1020 | * BPF map should be pinned. |
| 1021 | * @param map The bpf_map |
| 1022 | * @return The path string; which can be NULL |
| 1023 | */ |
Evgeniy Litvinenko | e244d34 | 2021-07-23 15:15:11 -0700 | [diff] [blame] | 1024 | LIBBPF_API const char *bpf_map__pin_path(const struct bpf_map *map); |
Grant Seltzer | e4ce876 | 2023-01-25 21:42:25 -0500 | [diff] [blame] | 1025 | |
| 1026 | /** |
| 1027 | * @brief **bpf_map__is_pinned()** tells the caller whether or not the |
| 1028 | * passed map has been pinned via a 'pin' file. |
| 1029 | * @param map The bpf_map |
| 1030 | * @return true, if the map is pinned; false, otherwise |
| 1031 | */ |
Toke Høiland-Jørgensen | 4580b25 | 2019-11-02 12:09:38 +0100 | [diff] [blame] | 1032 | LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map); |
Grant Seltzer | e4ce876 | 2023-01-25 21:42:25 -0500 | [diff] [blame] | 1033 | |
| 1034 | /** |
| 1035 | * @brief **bpf_map__pin()** creates a file that serves as a 'pin' |
| 1036 | * for the BPF map. This increments the reference count on the |
| 1037 | * BPF map which will keep the BPF map loaded even after the |
| 1038 | * userspace process which loaded it has exited. |
| 1039 | * @param map The bpf_map to pin |
| 1040 | * @param path A file path for the 'pin' |
| 1041 | * @return 0, on success; negative error, otherwise |
| 1042 | * |
| 1043 | * If `path` is NULL the maps `pin_path` attribute will be used. If this is |
| 1044 | * also NULL, an error will be returned and the map will not be pinned. |
| 1045 | */ |
Andrey Ignatov | ab9e084 | 2018-10-15 22:50:34 -0700 | [diff] [blame] | 1046 | LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path); |
Grant Seltzer | e4ce876 | 2023-01-25 21:42:25 -0500 | [diff] [blame] | 1047 | |
| 1048 | /** |
| 1049 | * @brief **bpf_map__unpin()** removes the file that serves as a |
| 1050 | * 'pin' for the BPF map. |
| 1051 | * @param map The bpf_map to unpin |
| 1052 | * @param path A file path for the 'pin' |
| 1053 | * @return 0, on success; negative error, otherwise |
| 1054 | * |
| 1055 | * The `path` parameter can be NULL, in which case the `pin_path` |
| 1056 | * map attribute is unpinned. If both the `path` parameter and |
| 1057 | * `pin_path` map attribute are set, they must be equal. |
| 1058 | */ |
Stanislav Fomichev | 0c19a9f | 2018-11-09 08:21:41 -0800 | [diff] [blame] | 1059 | LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path); |
Wang Nan | 9d759a9 | 2015-11-27 08:47:35 +0000 | [diff] [blame] | 1060 | |
Nikita V. Shirokov | addb9fc | 2018-11-20 20:55:56 -0800 | [diff] [blame] | 1061 | LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd); |
Andrii Nakryiko | b327809 | 2021-04-08 09:13:08 +0300 | [diff] [blame] | 1062 | LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map); |
Nikita V. Shirokov | addb9fc | 2018-11-20 20:55:56 -0800 | [diff] [blame] | 1063 | |
Grant Seltzer | 97c140d | 2021-09-17 23:14:58 -0400 | [diff] [blame] | 1064 | /** |
Andrii Nakryiko | 737d064 | 2022-05-12 15:07:12 -0700 | [diff] [blame] | 1065 | * @brief **bpf_map__lookup_elem()** allows to lookup BPF map value |
| 1066 | * corresponding to provided key. |
| 1067 | * @param map BPF map to lookup element in |
| 1068 | * @param key pointer to memory containing bytes of the key used for lookup |
| 1069 | * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** |
| 1070 | * @param value pointer to memory in which looked up value will be stored |
| 1071 | * @param value_sz size in byte of value data memory; it has to match BPF map |
| 1072 | * definition's **value_size**. For per-CPU BPF maps value size has to be |
| 1073 | * a product of BPF map value size and number of possible CPUs in the system |
| 1074 | * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for |
| 1075 | * per-CPU values value size has to be aligned up to closest 8 bytes for |
| 1076 | * alignment reasons, so expected size is: `round_up(value_size, 8) |
| 1077 | * * libbpf_num_possible_cpus()`. |
| 1078 | * @flags extra flags passed to kernel for this operation |
| 1079 | * @return 0, on success; negative error, otherwise |
| 1080 | * |
| 1081 | * **bpf_map__lookup_elem()** is high-level equivalent of |
| 1082 | * **bpf_map_lookup_elem()** API with added check for key and value size. |
| 1083 | */ |
| 1084 | LIBBPF_API int bpf_map__lookup_elem(const struct bpf_map *map, |
| 1085 | const void *key, size_t key_sz, |
| 1086 | void *value, size_t value_sz, __u64 flags); |
| 1087 | |
| 1088 | /** |
| 1089 | * @brief **bpf_map__update_elem()** allows to insert or update value in BPF |
| 1090 | * map that corresponds to provided key. |
| 1091 | * @param map BPF map to insert to or update element in |
| 1092 | * @param key pointer to memory containing bytes of the key |
| 1093 | * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** |
| 1094 | * @param value pointer to memory containing bytes of the value |
| 1095 | * @param value_sz size in byte of value data memory; it has to match BPF map |
| 1096 | * definition's **value_size**. For per-CPU BPF maps value size has to be |
| 1097 | * a product of BPF map value size and number of possible CPUs in the system |
| 1098 | * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for |
| 1099 | * per-CPU values value size has to be aligned up to closest 8 bytes for |
| 1100 | * alignment reasons, so expected size is: `round_up(value_size, 8) |
| 1101 | * * libbpf_num_possible_cpus()`. |
| 1102 | * @flags extra flags passed to kernel for this operation |
| 1103 | * @return 0, on success; negative error, otherwise |
| 1104 | * |
| 1105 | * **bpf_map__update_elem()** is high-level equivalent of |
| 1106 | * **bpf_map_update_elem()** API with added check for key and value size. |
| 1107 | */ |
| 1108 | LIBBPF_API int bpf_map__update_elem(const struct bpf_map *map, |
| 1109 | const void *key, size_t key_sz, |
| 1110 | const void *value, size_t value_sz, __u64 flags); |
| 1111 | |
| 1112 | /** |
| 1113 | * @brief **bpf_map__delete_elem()** allows to delete element in BPF map that |
| 1114 | * corresponds to provided key. |
| 1115 | * @param map BPF map to delete element from |
| 1116 | * @param key pointer to memory containing bytes of the key |
| 1117 | * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** |
| 1118 | * @flags extra flags passed to kernel for this operation |
| 1119 | * @return 0, on success; negative error, otherwise |
| 1120 | * |
| 1121 | * **bpf_map__delete_elem()** is high-level equivalent of |
| 1122 | * **bpf_map_delete_elem()** API with added check for key size. |
| 1123 | */ |
| 1124 | LIBBPF_API int bpf_map__delete_elem(const struct bpf_map *map, |
| 1125 | const void *key, size_t key_sz, __u64 flags); |
| 1126 | |
| 1127 | /** |
| 1128 | * @brief **bpf_map__lookup_and_delete_elem()** allows to lookup BPF map value |
| 1129 | * corresponding to provided key and atomically delete it afterwards. |
| 1130 | * @param map BPF map to lookup element in |
| 1131 | * @param key pointer to memory containing bytes of the key used for lookup |
| 1132 | * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** |
| 1133 | * @param value pointer to memory in which looked up value will be stored |
| 1134 | * @param value_sz size in byte of value data memory; it has to match BPF map |
| 1135 | * definition's **value_size**. For per-CPU BPF maps value size has to be |
| 1136 | * a product of BPF map value size and number of possible CPUs in the system |
| 1137 | * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for |
| 1138 | * per-CPU values value size has to be aligned up to closest 8 bytes for |
| 1139 | * alignment reasons, so expected size is: `round_up(value_size, 8) |
| 1140 | * * libbpf_num_possible_cpus()`. |
| 1141 | * @flags extra flags passed to kernel for this operation |
| 1142 | * @return 0, on success; negative error, otherwise |
| 1143 | * |
| 1144 | * **bpf_map__lookup_and_delete_elem()** is high-level equivalent of |
| 1145 | * **bpf_map_lookup_and_delete_elem()** API with added check for key and value size. |
| 1146 | */ |
| 1147 | LIBBPF_API int bpf_map__lookup_and_delete_elem(const struct bpf_map *map, |
| 1148 | const void *key, size_t key_sz, |
| 1149 | void *value, size_t value_sz, __u64 flags); |
| 1150 | |
| 1151 | /** |
| 1152 | * @brief **bpf_map__get_next_key()** allows to iterate BPF map keys by |
| 1153 | * fetching next key that follows current key. |
| 1154 | * @param map BPF map to fetch next key from |
| 1155 | * @param cur_key pointer to memory containing bytes of current key or NULL to |
| 1156 | * fetch the first key |
| 1157 | * @param next_key pointer to memory to write next key into |
| 1158 | * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** |
| 1159 | * @return 0, on success; -ENOENT if **cur_key** is the last key in BPF map; |
| 1160 | * negative error, otherwise |
| 1161 | * |
| 1162 | * **bpf_map__get_next_key()** is high-level equivalent of |
| 1163 | * **bpf_map_get_next_key()** API with added check for key size. |
| 1164 | */ |
| 1165 | LIBBPF_API int bpf_map__get_next_key(const struct bpf_map *map, |
| 1166 | const void *cur_key, void *next_key, size_t key_sz); |
| 1167 | |
Toke Høiland-Jørgensen | bd5ca3e | 2020-03-25 18:23:28 +0100 | [diff] [blame] | 1168 | struct bpf_xdp_set_link_opts { |
| 1169 | size_t sz; |
Toke Høiland-Jørgensen | 49b452c | 2020-04-14 16:50:24 +0200 | [diff] [blame] | 1170 | int old_fd; |
Andrii Nakryiko | dde7b3f | 2021-03-13 13:09:17 -0800 | [diff] [blame] | 1171 | size_t :0; |
Toke Høiland-Jørgensen | bd5ca3e | 2020-03-25 18:23:28 +0100 | [diff] [blame] | 1172 | }; |
| 1173 | #define bpf_xdp_set_link_opts__last_field old_fd |
| 1174 | |
Andrii Nakryiko | c359821 | 2022-01-19 22:14:19 -0800 | [diff] [blame] | 1175 | struct bpf_xdp_attach_opts { |
| 1176 | size_t sz; |
| 1177 | int old_prog_fd; |
| 1178 | size_t :0; |
| 1179 | }; |
| 1180 | #define bpf_xdp_attach_opts__last_field old_prog_fd |
| 1181 | |
| 1182 | struct bpf_xdp_query_opts { |
| 1183 | size_t sz; |
| 1184 | __u32 prog_id; /* output */ |
| 1185 | __u32 drv_prog_id; /* output */ |
| 1186 | __u32 hw_prog_id; /* output */ |
| 1187 | __u32 skb_prog_id; /* output */ |
| 1188 | __u8 attach_mode; /* output */ |
Lorenzo Bianconi | 04d58f1b | 2023-02-01 11:24:21 +0100 | [diff] [blame] | 1189 | __u64 feature_flags; /* output */ |
Maciej Fijalkowski | 13ce2da | 2023-07-19 15:24:07 +0200 | [diff] [blame] | 1190 | __u32 xdp_zc_max_segs; /* output */ |
Andrii Nakryiko | c359821 | 2022-01-19 22:14:19 -0800 | [diff] [blame] | 1191 | size_t :0; |
| 1192 | }; |
Maciej Fijalkowski | 13ce2da | 2023-07-19 15:24:07 +0200 | [diff] [blame] | 1193 | #define bpf_xdp_query_opts__last_field xdp_zc_max_segs |
Andrii Nakryiko | c359821 | 2022-01-19 22:14:19 -0800 | [diff] [blame] | 1194 | |
| 1195 | LIBBPF_API int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags, |
| 1196 | const struct bpf_xdp_attach_opts *opts); |
| 1197 | LIBBPF_API int bpf_xdp_detach(int ifindex, __u32 flags, |
| 1198 | const struct bpf_xdp_attach_opts *opts); |
| 1199 | LIBBPF_API int bpf_xdp_query(int ifindex, int flags, struct bpf_xdp_query_opts *opts); |
| 1200 | LIBBPF_API int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id); |
| 1201 | |
Kumar Kartikeya Dwivedi | 715c5ce | 2021-05-13 01:41:22 +0200 | [diff] [blame] | 1202 | /* TC related API */ |
| 1203 | enum bpf_tc_attach_point { |
| 1204 | BPF_TC_INGRESS = 1 << 0, |
| 1205 | BPF_TC_EGRESS = 1 << 1, |
| 1206 | BPF_TC_CUSTOM = 1 << 2, |
| 1207 | }; |
| 1208 | |
| 1209 | #define BPF_TC_PARENT(a, b) \ |
| 1210 | ((((a) << 16) & 0xFFFF0000U) | ((b) & 0x0000FFFFU)) |
| 1211 | |
| 1212 | enum bpf_tc_flags { |
| 1213 | BPF_TC_F_REPLACE = 1 << 0, |
| 1214 | }; |
| 1215 | |
| 1216 | struct bpf_tc_hook { |
| 1217 | size_t sz; |
| 1218 | int ifindex; |
| 1219 | enum bpf_tc_attach_point attach_point; |
| 1220 | __u32 parent; |
| 1221 | size_t :0; |
| 1222 | }; |
| 1223 | #define bpf_tc_hook__last_field parent |
| 1224 | |
| 1225 | struct bpf_tc_opts { |
| 1226 | size_t sz; |
| 1227 | int prog_fd; |
| 1228 | __u32 flags; |
| 1229 | __u32 prog_id; |
| 1230 | __u32 handle; |
| 1231 | __u32 priority; |
| 1232 | size_t :0; |
| 1233 | }; |
| 1234 | #define bpf_tc_opts__last_field priority |
| 1235 | |
| 1236 | LIBBPF_API int bpf_tc_hook_create(struct bpf_tc_hook *hook); |
| 1237 | LIBBPF_API int bpf_tc_hook_destroy(struct bpf_tc_hook *hook); |
| 1238 | LIBBPF_API int bpf_tc_attach(const struct bpf_tc_hook *hook, |
| 1239 | struct bpf_tc_opts *opts); |
| 1240 | LIBBPF_API int bpf_tc_detach(const struct bpf_tc_hook *hook, |
| 1241 | const struct bpf_tc_opts *opts); |
| 1242 | LIBBPF_API int bpf_tc_query(const struct bpf_tc_hook *hook, |
| 1243 | struct bpf_tc_opts *opts); |
| 1244 | |
Andrii Nakryiko | bf99c93 | 2020-05-29 00:54:21 -0700 | [diff] [blame] | 1245 | /* Ring buffer APIs */ |
| 1246 | struct ring_buffer; |
Martin Kelly | 1c97f6a | 2023-09-25 14:50:34 -0700 | [diff] [blame] | 1247 | struct ring; |
David Vernet | b66ccae | 2022-09-19 19:00:59 -0500 | [diff] [blame] | 1248 | struct user_ring_buffer; |
Andrii Nakryiko | bf99c93 | 2020-05-29 00:54:21 -0700 | [diff] [blame] | 1249 | |
| 1250 | typedef int (*ring_buffer_sample_fn)(void *ctx, void *data, size_t size); |
| 1251 | |
| 1252 | struct ring_buffer_opts { |
Menglong Dong | f8b299b | 2023-03-06 14:48:31 +0800 | [diff] [blame] | 1253 | size_t sz; /* size of this struct, for forward/backward compatibility */ |
Andrii Nakryiko | bf99c93 | 2020-05-29 00:54:21 -0700 | [diff] [blame] | 1254 | }; |
| 1255 | |
| 1256 | #define ring_buffer_opts__last_field sz |
| 1257 | |
| 1258 | LIBBPF_API struct ring_buffer * |
| 1259 | ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx, |
| 1260 | const struct ring_buffer_opts *opts); |
| 1261 | LIBBPF_API void ring_buffer__free(struct ring_buffer *rb); |
| 1262 | LIBBPF_API int ring_buffer__add(struct ring_buffer *rb, int map_fd, |
| 1263 | ring_buffer_sample_fn sample_cb, void *ctx); |
| 1264 | LIBBPF_API int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms); |
| 1265 | LIBBPF_API int ring_buffer__consume(struct ring_buffer *rb); |
Brendan Jackman | a4d2a7a | 2020-12-14 11:38:12 +0000 | [diff] [blame] | 1266 | LIBBPF_API int ring_buffer__epoll_fd(const struct ring_buffer *rb); |
Andrii Nakryiko | bf99c93 | 2020-05-29 00:54:21 -0700 | [diff] [blame] | 1267 | |
Martin Kelly | 1c97f6a | 2023-09-25 14:50:34 -0700 | [diff] [blame] | 1268 | /** |
| 1269 | * @brief **ring_buffer__ring()** returns the ringbuffer object inside a given |
| 1270 | * ringbuffer manager representing a single BPF_MAP_TYPE_RINGBUF map instance. |
| 1271 | * |
| 1272 | * @param rb A ringbuffer manager object. |
| 1273 | * @param idx An index into the ringbuffers contained within the ringbuffer |
| 1274 | * manager object. The index is 0-based and corresponds to the order in which |
| 1275 | * ring_buffer__add was called. |
| 1276 | * @return A ringbuffer object on success; NULL and errno set if the index is |
| 1277 | * invalid. |
| 1278 | */ |
| 1279 | LIBBPF_API struct ring *ring_buffer__ring(struct ring_buffer *rb, |
| 1280 | unsigned int idx); |
| 1281 | |
Martin Kelly | 059a8c0 | 2023-09-25 14:50:36 -0700 | [diff] [blame] | 1282 | /** |
| 1283 | * @brief **ring__consumer_pos()** returns the current consumer position in the |
| 1284 | * given ringbuffer. |
| 1285 | * |
| 1286 | * @param r A ringbuffer object. |
| 1287 | * @return The current consumer position. |
| 1288 | */ |
| 1289 | LIBBPF_API unsigned long ring__consumer_pos(const struct ring *r); |
| 1290 | |
| 1291 | /** |
| 1292 | * @brief **ring__producer_pos()** returns the current producer position in the |
| 1293 | * given ringbuffer. |
| 1294 | * |
| 1295 | * @param r A ringbuffer object. |
| 1296 | * @return The current producer position. |
| 1297 | */ |
| 1298 | LIBBPF_API unsigned long ring__producer_pos(const struct ring *r); |
| 1299 | |
Martin Kelly | 3b34d29 | 2023-09-25 14:50:38 -0700 | [diff] [blame] | 1300 | /** |
| 1301 | * @brief **ring__avail_data_size()** returns the number of bytes in the |
| 1302 | * ringbuffer not yet consumed. This has no locking associated with it, so it |
| 1303 | * can be inaccurate if operations are ongoing while this is called. However, it |
| 1304 | * should still show the correct trend over the long-term. |
| 1305 | * |
| 1306 | * @param r A ringbuffer object. |
| 1307 | * @return The number of bytes not yet consumed. |
| 1308 | */ |
| 1309 | LIBBPF_API size_t ring__avail_data_size(const struct ring *r); |
| 1310 | |
Martin Kelly | e79abf7 | 2023-09-25 14:50:40 -0700 | [diff] [blame] | 1311 | /** |
| 1312 | * @brief **ring__size()** returns the total size of the ringbuffer's map data |
| 1313 | * area (excluding special producer/consumer pages). Effectively this gives the |
| 1314 | * amount of usable bytes of data inside the ringbuffer. |
| 1315 | * |
| 1316 | * @param r A ringbuffer object. |
| 1317 | * @return The total size of the ringbuffer map data area. |
| 1318 | */ |
| 1319 | LIBBPF_API size_t ring__size(const struct ring *r); |
| 1320 | |
Martin Kelly | ae76939 | 2023-09-25 14:50:42 -0700 | [diff] [blame] | 1321 | /** |
| 1322 | * @brief **ring__map_fd()** returns the file descriptor underlying the given |
| 1323 | * ringbuffer. |
| 1324 | * |
| 1325 | * @param r A ringbuffer object. |
| 1326 | * @return The underlying ringbuffer file descriptor |
| 1327 | */ |
| 1328 | LIBBPF_API int ring__map_fd(const struct ring *r); |
| 1329 | |
Martin Kelly | 16058ff | 2023-09-25 14:50:44 -0700 | [diff] [blame] | 1330 | /** |
| 1331 | * @brief **ring__consume()** consumes available ringbuffer data without event |
| 1332 | * polling. |
| 1333 | * |
| 1334 | * @param r A ringbuffer object. |
| 1335 | * @return The number of records consumed (or INT_MAX, whichever is less), or |
| 1336 | * a negative number if any of the callbacks return an error. |
| 1337 | */ |
| 1338 | LIBBPF_API int ring__consume(struct ring *r); |
| 1339 | |
David Vernet | b66ccae | 2022-09-19 19:00:59 -0500 | [diff] [blame] | 1340 | struct user_ring_buffer_opts { |
| 1341 | size_t sz; /* size of this struct, for forward/backward compatibility */ |
| 1342 | }; |
| 1343 | |
| 1344 | #define user_ring_buffer_opts__last_field sz |
| 1345 | |
Grant Seltzer | 139df64 | 2023-01-25 21:47:49 -0500 | [diff] [blame] | 1346 | /** |
| 1347 | * @brief **user_ring_buffer__new()** creates a new instance of a user ring |
David Vernet | b66ccae | 2022-09-19 19:00:59 -0500 | [diff] [blame] | 1348 | * buffer. |
| 1349 | * |
| 1350 | * @param map_fd A file descriptor to a BPF_MAP_TYPE_USER_RINGBUF map. |
| 1351 | * @param opts Options for how the ring buffer should be created. |
| 1352 | * @return A user ring buffer on success; NULL and errno being set on a |
| 1353 | * failure. |
| 1354 | */ |
| 1355 | LIBBPF_API struct user_ring_buffer * |
| 1356 | user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts); |
| 1357 | |
Grant Seltzer | 139df64 | 2023-01-25 21:47:49 -0500 | [diff] [blame] | 1358 | /** |
| 1359 | * @brief **user_ring_buffer__reserve()** reserves a pointer to a sample in the |
David Vernet | b66ccae | 2022-09-19 19:00:59 -0500 | [diff] [blame] | 1360 | * user ring buffer. |
| 1361 | * @param rb A pointer to a user ring buffer. |
| 1362 | * @param size The size of the sample, in bytes. |
| 1363 | * @return A pointer to an 8-byte aligned reserved region of the user ring |
| 1364 | * buffer; NULL, and errno being set if a sample could not be reserved. |
| 1365 | * |
| 1366 | * This function is *not* thread safe, and callers must synchronize accessing |
| 1367 | * this function if there are multiple producers. If a size is requested that |
| 1368 | * is larger than the size of the entire ring buffer, errno will be set to |
| 1369 | * E2BIG and NULL is returned. If the ring buffer could accommodate the size, |
| 1370 | * but currently does not have enough space, errno is set to ENOSPC and NULL is |
| 1371 | * returned. |
| 1372 | * |
| 1373 | * After initializing the sample, callers must invoke |
| 1374 | * **user_ring_buffer__submit()** to post the sample to the kernel. Otherwise, |
| 1375 | * the sample must be freed with **user_ring_buffer__discard()**. |
| 1376 | */ |
| 1377 | LIBBPF_API void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size); |
| 1378 | |
Grant Seltzer | 139df64 | 2023-01-25 21:47:49 -0500 | [diff] [blame] | 1379 | /** |
| 1380 | * @brief **user_ring_buffer__reserve_blocking()** reserves a record in the |
David Vernet | b66ccae | 2022-09-19 19:00:59 -0500 | [diff] [blame] | 1381 | * ring buffer, possibly blocking for up to @timeout_ms until a sample becomes |
| 1382 | * available. |
| 1383 | * @param rb The user ring buffer. |
| 1384 | * @param size The size of the sample, in bytes. |
| 1385 | * @param timeout_ms The amount of time, in milliseconds, for which the caller |
| 1386 | * should block when waiting for a sample. -1 causes the caller to block |
| 1387 | * indefinitely. |
| 1388 | * @return A pointer to an 8-byte aligned reserved region of the user ring |
| 1389 | * buffer; NULL, and errno being set if a sample could not be reserved. |
| 1390 | * |
| 1391 | * This function is *not* thread safe, and callers must synchronize |
| 1392 | * accessing this function if there are multiple producers |
| 1393 | * |
| 1394 | * If **timeout_ms** is -1, the function will block indefinitely until a sample |
| 1395 | * becomes available. Otherwise, **timeout_ms** must be non-negative, or errno |
| 1396 | * is set to EINVAL, and NULL is returned. If **timeout_ms** is 0, no blocking |
| 1397 | * will occur and the function will return immediately after attempting to |
| 1398 | * reserve a sample. |
| 1399 | * |
| 1400 | * If **size** is larger than the size of the entire ring buffer, errno is set |
| 1401 | * to E2BIG and NULL is returned. If the ring buffer could accommodate |
| 1402 | * **size**, but currently does not have enough space, the caller will block |
| 1403 | * until at most **timeout_ms** has elapsed. If insufficient space is available |
| 1404 | * at that time, errno is set to ENOSPC, and NULL is returned. |
| 1405 | * |
| 1406 | * The kernel guarantees that it will wake up this thread to check if |
| 1407 | * sufficient space is available in the ring buffer at least once per |
| 1408 | * invocation of the **bpf_ringbuf_drain()** helper function, provided that at |
| 1409 | * least one sample is consumed, and the BPF program did not invoke the |
| 1410 | * function with BPF_RB_NO_WAKEUP. A wakeup may occur sooner than that, but the |
| 1411 | * kernel does not guarantee this. If the helper function is invoked with |
| 1412 | * BPF_RB_FORCE_WAKEUP, a wakeup event will be sent even if no sample is |
| 1413 | * consumed. |
| 1414 | * |
| 1415 | * When a sample of size **size** is found within **timeout_ms**, a pointer to |
| 1416 | * the sample is returned. After initializing the sample, callers must invoke |
| 1417 | * **user_ring_buffer__submit()** to post the sample to the ring buffer. |
| 1418 | * Otherwise, the sample must be freed with **user_ring_buffer__discard()**. |
| 1419 | */ |
| 1420 | LIBBPF_API void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb, |
| 1421 | __u32 size, |
| 1422 | int timeout_ms); |
| 1423 | |
Grant Seltzer | 139df64 | 2023-01-25 21:47:49 -0500 | [diff] [blame] | 1424 | /** |
| 1425 | * @brief **user_ring_buffer__submit()** submits a previously reserved sample |
David Vernet | b66ccae | 2022-09-19 19:00:59 -0500 | [diff] [blame] | 1426 | * into the ring buffer. |
| 1427 | * @param rb The user ring buffer. |
| 1428 | * @param sample A reserved sample. |
| 1429 | * |
| 1430 | * It is not necessary to synchronize amongst multiple producers when invoking |
| 1431 | * this function. |
| 1432 | */ |
| 1433 | LIBBPF_API void user_ring_buffer__submit(struct user_ring_buffer *rb, void *sample); |
| 1434 | |
Grant Seltzer | 139df64 | 2023-01-25 21:47:49 -0500 | [diff] [blame] | 1435 | /** |
| 1436 | * @brief **user_ring_buffer__discard()** discards a previously reserved sample. |
David Vernet | b66ccae | 2022-09-19 19:00:59 -0500 | [diff] [blame] | 1437 | * @param rb The user ring buffer. |
| 1438 | * @param sample A reserved sample. |
| 1439 | * |
| 1440 | * It is not necessary to synchronize amongst multiple producers when invoking |
| 1441 | * this function. |
| 1442 | */ |
| 1443 | LIBBPF_API void user_ring_buffer__discard(struct user_ring_buffer *rb, void *sample); |
| 1444 | |
Grant Seltzer | 139df64 | 2023-01-25 21:47:49 -0500 | [diff] [blame] | 1445 | /** |
| 1446 | * @brief **user_ring_buffer__free()** frees a ring buffer that was previously |
David Vernet | b66ccae | 2022-09-19 19:00:59 -0500 | [diff] [blame] | 1447 | * created with **user_ring_buffer__new()**. |
| 1448 | * @param rb The user ring buffer being freed. |
| 1449 | */ |
| 1450 | LIBBPF_API void user_ring_buffer__free(struct user_ring_buffer *rb); |
| 1451 | |
Andrii Nakryiko | bf99c93 | 2020-05-29 00:54:21 -0700 | [diff] [blame] | 1452 | /* Perf buffer APIs */ |
Andrii Nakryiko | fb84b82 | 2019-07-06 11:06:24 -0700 | [diff] [blame] | 1453 | struct perf_buffer; |
| 1454 | |
| 1455 | typedef void (*perf_buffer_sample_fn)(void *ctx, int cpu, |
| 1456 | void *data, __u32 size); |
| 1457 | typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt); |
| 1458 | |
| 1459 | /* common use perf buffer options */ |
| 1460 | struct perf_buffer_opts { |
Andrii Nakryiko | 22dd7a5 | 2022-06-27 14:15:18 -0700 | [diff] [blame] | 1461 | size_t sz; |
Jon Doron | ab8684b | 2023-02-07 10:19:16 +0200 | [diff] [blame] | 1462 | __u32 sample_period; |
| 1463 | size_t :0; |
Andrii Nakryiko | fb84b82 | 2019-07-06 11:06:24 -0700 | [diff] [blame] | 1464 | }; |
Jon Doron | ab8684b | 2023-02-07 10:19:16 +0200 | [diff] [blame] | 1465 | #define perf_buffer_opts__last_field sample_period |
Andrii Nakryiko | fb84b82 | 2019-07-06 11:06:24 -0700 | [diff] [blame] | 1466 | |
Andrii Nakryiko | 4178893 | 2021-11-10 21:36:20 -0800 | [diff] [blame] | 1467 | /** |
| 1468 | * @brief **perf_buffer__new()** creates BPF perfbuf manager for a specified |
| 1469 | * BPF_PERF_EVENT_ARRAY map |
| 1470 | * @param map_fd FD of BPF_PERF_EVENT_ARRAY BPF map that will be used by BPF |
| 1471 | * code to send data over to user-space |
| 1472 | * @param page_cnt number of memory pages allocated for each per-CPU buffer |
| 1473 | * @param sample_cb function called on each received data record |
| 1474 | * @param lost_cb function called when record loss has occurred |
| 1475 | * @param ctx user-provided extra context passed into *sample_cb* and *lost_cb* |
| 1476 | * @return a new instance of struct perf_buffer on success, NULL on error with |
| 1477 | * *errno* containing an error code |
| 1478 | */ |
Andrii Nakryiko | fb84b82 | 2019-07-06 11:06:24 -0700 | [diff] [blame] | 1479 | LIBBPF_API struct perf_buffer * |
| 1480 | perf_buffer__new(int map_fd, size_t page_cnt, |
Andrii Nakryiko | 4178893 | 2021-11-10 21:36:20 -0800 | [diff] [blame] | 1481 | perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx, |
Andrii Nakryiko | fb84b82 | 2019-07-06 11:06:24 -0700 | [diff] [blame] | 1482 | const struct perf_buffer_opts *opts); |
| 1483 | |
Jakub Kicinski | d0cabbb | 2018-05-10 10:24:40 -0700 | [diff] [blame] | 1484 | enum bpf_perf_event_ret { |
| 1485 | LIBBPF_PERF_EVENT_DONE = 0, |
| 1486 | LIBBPF_PERF_EVENT_ERROR = -1, |
| 1487 | LIBBPF_PERF_EVENT_CONT = -2, |
| 1488 | }; |
| 1489 | |
Daniel Borkmann | 3dca211 | 2018-10-21 02:09:28 +0200 | [diff] [blame] | 1490 | struct perf_event_header; |
Andrii Nakryiko | fb84b82 | 2019-07-06 11:06:24 -0700 | [diff] [blame] | 1491 | |
| 1492 | typedef enum bpf_perf_event_ret |
| 1493 | (*perf_buffer_event_fn)(void *ctx, int cpu, struct perf_event_header *event); |
| 1494 | |
| 1495 | /* raw perf buffer options, giving most power and control */ |
| 1496 | struct perf_buffer_raw_opts { |
Andrii Nakryiko | 22dd7a5 | 2022-06-27 14:15:18 -0700 | [diff] [blame] | 1497 | size_t sz; |
| 1498 | long :0; |
| 1499 | long :0; |
Andrii Nakryiko | fb84b82 | 2019-07-06 11:06:24 -0700 | [diff] [blame] | 1500 | /* if cpu_cnt == 0, open all on all possible CPUs (up to the number of |
| 1501 | * max_entries of given PERF_EVENT_ARRAY map) |
| 1502 | */ |
| 1503 | int cpu_cnt; |
| 1504 | /* if cpu_cnt > 0, cpus is an array of CPUs to open ring buffers on */ |
| 1505 | int *cpus; |
| 1506 | /* if cpu_cnt > 0, map_keys specify map keys to set per-CPU FDs for */ |
| 1507 | int *map_keys; |
| 1508 | }; |
Andrii Nakryiko | 4178893 | 2021-11-10 21:36:20 -0800 | [diff] [blame] | 1509 | #define perf_buffer_raw_opts__last_field map_keys |
Andrii Nakryiko | fb84b82 | 2019-07-06 11:06:24 -0700 | [diff] [blame] | 1510 | |
Andrii Nakryiko | 22dd7a5 | 2022-06-27 14:15:18 -0700 | [diff] [blame] | 1511 | struct perf_event_attr; |
| 1512 | |
Andrii Nakryiko | fb84b82 | 2019-07-06 11:06:24 -0700 | [diff] [blame] | 1513 | LIBBPF_API struct perf_buffer * |
Andrii Nakryiko | 4178893 | 2021-11-10 21:36:20 -0800 | [diff] [blame] | 1514 | perf_buffer__new_raw(int map_fd, size_t page_cnt, struct perf_event_attr *attr, |
| 1515 | perf_buffer_event_fn event_cb, void *ctx, |
Andrii Nakryiko | fb84b82 | 2019-07-06 11:06:24 -0700 | [diff] [blame] | 1516 | const struct perf_buffer_raw_opts *opts); |
| 1517 | |
| 1518 | LIBBPF_API void perf_buffer__free(struct perf_buffer *pb); |
Andrii Nakryiko | dca5612 | 2020-08-21 09:59:27 -0700 | [diff] [blame] | 1519 | LIBBPF_API int perf_buffer__epoll_fd(const struct perf_buffer *pb); |
Andrii Nakryiko | fb84b82 | 2019-07-06 11:06:24 -0700 | [diff] [blame] | 1520 | LIBBPF_API int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms); |
Eelco Chaudron | 272d51a | 2020-05-26 11:21:42 +0200 | [diff] [blame] | 1521 | LIBBPF_API int perf_buffer__consume(struct perf_buffer *pb); |
Andrii Nakryiko | dca5612 | 2020-08-21 09:59:27 -0700 | [diff] [blame] | 1522 | LIBBPF_API int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx); |
| 1523 | LIBBPF_API size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb); |
| 1524 | LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx); |
Jon Doron | 9ff5efd | 2022-07-15 21:11:22 +0300 | [diff] [blame] | 1525 | /** |
| 1526 | * @brief **perf_buffer__buffer()** returns the per-cpu raw mmap()'ed underlying |
| 1527 | * memory region of the ring buffer. |
| 1528 | * This ring buffer can be used to implement a custom events consumer. |
| 1529 | * The ring buffer starts with the *struct perf_event_mmap_page*, which |
| 1530 | * holds the ring buffer managment fields, when accessing the header |
| 1531 | * structure it's important to be SMP aware. |
| 1532 | * You can refer to *perf_event_read_simple* for a simple example. |
| 1533 | * @param pb the perf buffer structure |
| 1534 | * @param buf_idx the buffer index to retreive |
| 1535 | * @param buf (out) gets the base pointer of the mmap()'ed memory |
| 1536 | * @param buf_size (out) gets the size of the mmap()'ed region |
| 1537 | * @return 0 on success, negative error code for failure |
| 1538 | */ |
| 1539 | LIBBPF_API int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, |
| 1540 | size_t *buf_size); |
Andrii Nakryiko | fb84b82 | 2019-07-06 11:06:24 -0700 | [diff] [blame] | 1541 | |
Martin KaFai Lau | b053b43 | 2018-12-07 16:42:32 -0800 | [diff] [blame] | 1542 | struct bpf_prog_linfo; |
| 1543 | struct bpf_prog_info; |
| 1544 | |
| 1545 | LIBBPF_API void bpf_prog_linfo__free(struct bpf_prog_linfo *prog_linfo); |
| 1546 | LIBBPF_API struct bpf_prog_linfo * |
| 1547 | bpf_prog_linfo__new(const struct bpf_prog_info *info); |
| 1548 | LIBBPF_API const struct bpf_line_info * |
| 1549 | bpf_prog_linfo__lfind_addr_func(const struct bpf_prog_linfo *prog_linfo, |
| 1550 | __u64 addr, __u32 func_idx, __u32 nr_skip); |
| 1551 | LIBBPF_API const struct bpf_line_info * |
| 1552 | bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo, |
| 1553 | __u32 insn_off, __u32 nr_skip); |
| 1554 | |
Quentin Monnet | 1bf4b05 | 2019-01-17 15:27:53 +0000 | [diff] [blame] | 1555 | /* |
| 1556 | * Probe for supported system features |
| 1557 | * |
| 1558 | * Note that running many of these probes in a short amount of time can cause |
| 1559 | * the kernel to reach the maximal size of lockable memory allowed for the |
| 1560 | * user, causing subsequent probes to fail. In this case, the caller may want |
| 1561 | * to adjust that limit with setrlimit(). |
| 1562 | */ |
Quentin Monnet | 1bf4b05 | 2019-01-17 15:27:53 +0000 | [diff] [blame] | 1563 | |
Andrii Nakryiko | 878d8de | 2021-12-17 09:12:00 -0800 | [diff] [blame] | 1564 | /** |
| 1565 | * @brief **libbpf_probe_bpf_prog_type()** detects if host kernel supports |
| 1566 | * BPF programs of a given type. |
| 1567 | * @param prog_type BPF program type to detect kernel support for |
| 1568 | * @param opts reserved for future extensibility, should be NULL |
| 1569 | * @return 1, if given program type is supported; 0, if given program type is |
| 1570 | * not supported; negative error code if feature detection failed or can't be |
| 1571 | * performed |
| 1572 | * |
| 1573 | * Make sure the process has required set of CAP_* permissions (or runs as |
| 1574 | * root) when performing feature checking. |
| 1575 | */ |
| 1576 | LIBBPF_API int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts); |
| 1577 | /** |
| 1578 | * @brief **libbpf_probe_bpf_map_type()** detects if host kernel supports |
| 1579 | * BPF maps of a given type. |
| 1580 | * @param map_type BPF map type to detect kernel support for |
| 1581 | * @param opts reserved for future extensibility, should be NULL |
| 1582 | * @return 1, if given map type is supported; 0, if given map type is |
| 1583 | * not supported; negative error code if feature detection failed or can't be |
| 1584 | * performed |
| 1585 | * |
| 1586 | * Make sure the process has required set of CAP_* permissions (or runs as |
| 1587 | * root) when performing feature checking. |
| 1588 | */ |
| 1589 | LIBBPF_API int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void *opts); |
| 1590 | /** |
| 1591 | * @brief **libbpf_probe_bpf_helper()** detects if host kernel supports the |
| 1592 | * use of a given BPF helper from specified BPF program type. |
| 1593 | * @param prog_type BPF program type used to check the support of BPF helper |
| 1594 | * @param helper_id BPF helper ID (enum bpf_func_id) to check support for |
| 1595 | * @param opts reserved for future extensibility, should be NULL |
| 1596 | * @return 1, if given combination of program type and helper is supported; 0, |
| 1597 | * if the combination is not supported; negative error code if feature |
| 1598 | * detection for provided input arguments failed or can't be performed |
| 1599 | * |
| 1600 | * Make sure the process has required set of CAP_* permissions (or runs as |
| 1601 | * root) when performing feature checking. |
| 1602 | */ |
| 1603 | LIBBPF_API int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, |
| 1604 | enum bpf_func_id helper_id, const void *opts); |
| 1605 | |
Grant Seltzer | 97c140d | 2021-09-17 23:14:58 -0400 | [diff] [blame] | 1606 | /** |
| 1607 | * @brief **libbpf_num_possible_cpus()** is a helper function to get the |
| 1608 | * number of possible CPUs that the host kernel supports and expects. |
| 1609 | * @return number of possible CPUs; or error code on failure |
Hechao Li | 6446b31 | 2019-06-10 17:56:50 -0700 | [diff] [blame] | 1610 | * |
| 1611 | * Example usage: |
| 1612 | * |
| 1613 | * int ncpus = libbpf_num_possible_cpus(); |
| 1614 | * if (ncpus < 0) { |
| 1615 | * // error handling |
| 1616 | * } |
| 1617 | * long values[ncpus]; |
| 1618 | * bpf_map_lookup_elem(per_cpu_map_fd, key, values); |
Hechao Li | 6446b31 | 2019-06-10 17:56:50 -0700 | [diff] [blame] | 1619 | */ |
| 1620 | LIBBPF_API int libbpf_num_possible_cpus(void); |
| 1621 | |
Andrii Nakryiko | d66562f | 2019-12-13 17:43:36 -0800 | [diff] [blame] | 1622 | struct bpf_map_skeleton { |
| 1623 | const char *name; |
| 1624 | struct bpf_map **map; |
| 1625 | void **mmaped; |
| 1626 | }; |
| 1627 | |
| 1628 | struct bpf_prog_skeleton { |
| 1629 | const char *name; |
| 1630 | struct bpf_program **prog; |
| 1631 | struct bpf_link **link; |
| 1632 | }; |
| 1633 | |
| 1634 | struct bpf_object_skeleton { |
| 1635 | size_t sz; /* size of this struct, for forward/backward compatibility */ |
| 1636 | |
| 1637 | const char *name; |
Matt Smith | 08a6f22 | 2021-09-01 12:44:37 -0700 | [diff] [blame] | 1638 | const void *data; |
Andrii Nakryiko | d66562f | 2019-12-13 17:43:36 -0800 | [diff] [blame] | 1639 | size_t data_sz; |
| 1640 | |
| 1641 | struct bpf_object **obj; |
| 1642 | |
| 1643 | int map_cnt; |
huangxuesen | 222c98c | 2021-12-06 09:47:16 +0800 | [diff] [blame] | 1644 | int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */ |
Andrii Nakryiko | d66562f | 2019-12-13 17:43:36 -0800 | [diff] [blame] | 1645 | struct bpf_map_skeleton *maps; |
| 1646 | |
| 1647 | int prog_cnt; |
huangxuesen | 222c98c | 2021-12-06 09:47:16 +0800 | [diff] [blame] | 1648 | int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */ |
Andrii Nakryiko | d66562f | 2019-12-13 17:43:36 -0800 | [diff] [blame] | 1649 | struct bpf_prog_skeleton *progs; |
| 1650 | }; |
| 1651 | |
| 1652 | LIBBPF_API int |
| 1653 | bpf_object__open_skeleton(struct bpf_object_skeleton *s, |
| 1654 | const struct bpf_object_open_opts *opts); |
| 1655 | LIBBPF_API int bpf_object__load_skeleton(struct bpf_object_skeleton *s); |
| 1656 | LIBBPF_API int bpf_object__attach_skeleton(struct bpf_object_skeleton *s); |
| 1657 | LIBBPF_API void bpf_object__detach_skeleton(struct bpf_object_skeleton *s); |
| 1658 | LIBBPF_API void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s); |
| 1659 | |
Delyan Kratunov | 430025e | 2022-03-16 23:37:33 +0000 | [diff] [blame] | 1660 | struct bpf_var_skeleton { |
| 1661 | const char *name; |
| 1662 | struct bpf_map **map; |
| 1663 | void **addr; |
| 1664 | }; |
| 1665 | |
| 1666 | struct bpf_object_subskeleton { |
| 1667 | size_t sz; /* size of this struct, for forward/backward compatibility */ |
| 1668 | |
| 1669 | const struct bpf_object *obj; |
| 1670 | |
| 1671 | int map_cnt; |
| 1672 | int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */ |
| 1673 | struct bpf_map_skeleton *maps; |
| 1674 | |
| 1675 | int prog_cnt; |
| 1676 | int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */ |
| 1677 | struct bpf_prog_skeleton *progs; |
| 1678 | |
| 1679 | int var_cnt; |
| 1680 | int var_skel_sz; /* sizeof(struct bpf_var_skeleton) */ |
| 1681 | struct bpf_var_skeleton *vars; |
| 1682 | }; |
| 1683 | |
| 1684 | LIBBPF_API int |
| 1685 | bpf_object__open_subskeleton(struct bpf_object_subskeleton *s); |
| 1686 | LIBBPF_API void |
| 1687 | bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s); |
| 1688 | |
Alexei Starovoitov | 6723474 | 2021-05-13 17:36:16 -0700 | [diff] [blame] | 1689 | struct gen_loader_opts { |
Menglong Dong | f8b299b | 2023-03-06 14:48:31 +0800 | [diff] [blame] | 1690 | size_t sz; /* size of this struct, for forward/backward compatibility */ |
Alexei Starovoitov | 6723474 | 2021-05-13 17:36:16 -0700 | [diff] [blame] | 1691 | const char *data; |
| 1692 | const char *insns; |
| 1693 | __u32 data_sz; |
| 1694 | __u32 insns_sz; |
| 1695 | }; |
| 1696 | |
| 1697 | #define gen_loader_opts__last_field insns_sz |
| 1698 | LIBBPF_API int bpf_object__gen_loader(struct bpf_object *obj, |
| 1699 | struct gen_loader_opts *opts); |
| 1700 | |
Andrii Nakryiko | 166750b | 2019-12-13 17:47:08 -0800 | [diff] [blame] | 1701 | enum libbpf_tristate { |
| 1702 | TRI_NO = 0, |
| 1703 | TRI_YES = 1, |
| 1704 | TRI_MODULE = 2, |
| 1705 | }; |
| 1706 | |
Andrii Nakryiko | faf6ed3 | 2021-03-18 12:40:30 -0700 | [diff] [blame] | 1707 | struct bpf_linker_opts { |
Menglong Dong | f8b299b | 2023-03-06 14:48:31 +0800 | [diff] [blame] | 1708 | /* size of this struct, for forward/backward compatibility */ |
Andrii Nakryiko | faf6ed3 | 2021-03-18 12:40:30 -0700 | [diff] [blame] | 1709 | size_t sz; |
| 1710 | }; |
| 1711 | #define bpf_linker_opts__last_field sz |
| 1712 | |
Andrii Nakryiko | fdbf5dd | 2021-05-06 22:41:14 -0700 | [diff] [blame] | 1713 | struct bpf_linker_file_opts { |
Menglong Dong | f8b299b | 2023-03-06 14:48:31 +0800 | [diff] [blame] | 1714 | /* size of this struct, for forward/backward compatibility */ |
Andrii Nakryiko | fdbf5dd | 2021-05-06 22:41:14 -0700 | [diff] [blame] | 1715 | size_t sz; |
| 1716 | }; |
| 1717 | #define bpf_linker_file_opts__last_field sz |
| 1718 | |
Andrii Nakryiko | faf6ed3 | 2021-03-18 12:40:30 -0700 | [diff] [blame] | 1719 | struct bpf_linker; |
| 1720 | |
| 1721 | LIBBPF_API struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts); |
Andrii Nakryiko | fdbf5dd | 2021-05-06 22:41:14 -0700 | [diff] [blame] | 1722 | LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker, |
| 1723 | const char *filename, |
| 1724 | const struct bpf_linker_file_opts *opts); |
Andrii Nakryiko | faf6ed3 | 2021-03-18 12:40:30 -0700 | [diff] [blame] | 1725 | LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker); |
| 1726 | LIBBPF_API void bpf_linker__free(struct bpf_linker *linker); |
| 1727 | |
Andrii Nakryiko | 697f104 | 2022-03-04 17:01:28 -0800 | [diff] [blame] | 1728 | /* |
| 1729 | * Custom handling of BPF program's SEC() definitions |
| 1730 | */ |
| 1731 | |
| 1732 | struct bpf_prog_load_opts; /* defined in bpf.h */ |
| 1733 | |
| 1734 | /* Called during bpf_object__open() for each recognized BPF program. Callback |
| 1735 | * can use various bpf_program__set_*() setters to adjust whatever properties |
| 1736 | * are necessary. |
| 1737 | */ |
| 1738 | typedef int (*libbpf_prog_setup_fn_t)(struct bpf_program *prog, long cookie); |
| 1739 | |
| 1740 | /* Called right before libbpf performs bpf_prog_load() to load BPF program |
| 1741 | * into the kernel. Callback can adjust opts as necessary. |
| 1742 | */ |
| 1743 | typedef int (*libbpf_prog_prepare_load_fn_t)(struct bpf_program *prog, |
| 1744 | struct bpf_prog_load_opts *opts, long cookie); |
| 1745 | |
| 1746 | /* Called during skeleton attach or through bpf_program__attach(). If |
| 1747 | * auto-attach is not supported, callback should return 0 and set link to |
| 1748 | * NULL (it's not considered an error during skeleton attach, but it will be |
| 1749 | * an error for bpf_program__attach() calls). On error, error should be |
| 1750 | * returned directly and link set to NULL. On success, return 0 and set link |
| 1751 | * to a valid struct bpf_link. |
| 1752 | */ |
| 1753 | typedef int (*libbpf_prog_attach_fn_t)(const struct bpf_program *prog, long cookie, |
| 1754 | struct bpf_link **link); |
| 1755 | |
| 1756 | struct libbpf_prog_handler_opts { |
Menglong Dong | f8b299b | 2023-03-06 14:48:31 +0800 | [diff] [blame] | 1757 | /* size of this struct, for forward/backward compatibility */ |
Andrii Nakryiko | 697f104 | 2022-03-04 17:01:28 -0800 | [diff] [blame] | 1758 | size_t sz; |
| 1759 | /* User-provided value that is passed to prog_setup_fn, |
| 1760 | * prog_prepare_load_fn, and prog_attach_fn callbacks. Allows user to |
| 1761 | * register one set of callbacks for multiple SEC() definitions and |
| 1762 | * still be able to distinguish them, if necessary. For example, |
| 1763 | * libbpf itself is using this to pass necessary flags (e.g., |
| 1764 | * sleepable flag) to a common internal SEC() handler. |
| 1765 | */ |
| 1766 | long cookie; |
| 1767 | /* BPF program initialization callback (see libbpf_prog_setup_fn_t). |
| 1768 | * Callback is optional, pass NULL if it's not necessary. |
| 1769 | */ |
| 1770 | libbpf_prog_setup_fn_t prog_setup_fn; |
| 1771 | /* BPF program loading callback (see libbpf_prog_prepare_load_fn_t). |
| 1772 | * Callback is optional, pass NULL if it's not necessary. |
| 1773 | */ |
| 1774 | libbpf_prog_prepare_load_fn_t prog_prepare_load_fn; |
| 1775 | /* BPF program attach callback (see libbpf_prog_attach_fn_t). |
| 1776 | * Callback is optional, pass NULL if it's not necessary. |
| 1777 | */ |
| 1778 | libbpf_prog_attach_fn_t prog_attach_fn; |
| 1779 | }; |
| 1780 | #define libbpf_prog_handler_opts__last_field prog_attach_fn |
| 1781 | |
| 1782 | /** |
| 1783 | * @brief **libbpf_register_prog_handler()** registers a custom BPF program |
| 1784 | * SEC() handler. |
| 1785 | * @param sec section prefix for which custom handler is registered |
| 1786 | * @param prog_type BPF program type associated with specified section |
| 1787 | * @param exp_attach_type Expected BPF attach type associated with specified section |
| 1788 | * @param opts optional cookie, callbacks, and other extra options |
| 1789 | * @return Non-negative handler ID is returned on success. This handler ID has |
| 1790 | * to be passed to *libbpf_unregister_prog_handler()* to unregister such |
| 1791 | * custom handler. Negative error code is returned on error. |
| 1792 | * |
| 1793 | * *sec* defines which SEC() definitions are handled by this custom handler |
| 1794 | * registration. *sec* can have few different forms: |
| 1795 | * - if *sec* is just a plain string (e.g., "abc"), it will match only |
| 1796 | * SEC("abc"). If BPF program specifies SEC("abc/whatever") it will result |
| 1797 | * in an error; |
| 1798 | * - if *sec* is of the form "abc/", proper SEC() form is |
| 1799 | * SEC("abc/something"), where acceptable "something" should be checked by |
| 1800 | * *prog_init_fn* callback, if there are additional restrictions; |
| 1801 | * - if *sec* is of the form "abc+", it will successfully match both |
| 1802 | * SEC("abc") and SEC("abc/whatever") forms; |
| 1803 | * - if *sec* is NULL, custom handler is registered for any BPF program that |
| 1804 | * doesn't match any of the registered (custom or libbpf's own) SEC() |
| 1805 | * handlers. There could be only one such generic custom handler registered |
| 1806 | * at any given time. |
| 1807 | * |
| 1808 | * All custom handlers (except the one with *sec* == NULL) are processed |
| 1809 | * before libbpf's own SEC() handlers. It is allowed to "override" libbpf's |
| 1810 | * SEC() handlers by registering custom ones for the same section prefix |
| 1811 | * (i.e., it's possible to have custom SEC("perf_event/LLC-load-misses") |
| 1812 | * handler). |
| 1813 | * |
| 1814 | * Note, like much of global libbpf APIs (e.g., libbpf_set_print(), |
| 1815 | * libbpf_set_strict_mode(), etc)) these APIs are not thread-safe. User needs |
| 1816 | * to ensure synchronization if there is a risk of running this API from |
| 1817 | * multiple threads simultaneously. |
| 1818 | */ |
| 1819 | LIBBPF_API int libbpf_register_prog_handler(const char *sec, |
| 1820 | enum bpf_prog_type prog_type, |
| 1821 | enum bpf_attach_type exp_attach_type, |
| 1822 | const struct libbpf_prog_handler_opts *opts); |
| 1823 | /** |
| 1824 | * @brief *libbpf_unregister_prog_handler()* unregisters previously registered |
| 1825 | * custom BPF program SEC() handler. |
| 1826 | * @param handler_id handler ID returned by *libbpf_register_prog_handler()* |
| 1827 | * after successful registration |
| 1828 | * @return 0 on success, negative error code if handler isn't found |
| 1829 | * |
| 1830 | * Note, like much of global libbpf APIs (e.g., libbpf_set_print(), |
| 1831 | * libbpf_set_strict_mode(), etc)) these APIs are not thread-safe. User needs |
| 1832 | * to ensure synchronization if there is a risk of running this API from |
| 1833 | * multiple threads simultaneously. |
| 1834 | */ |
| 1835 | LIBBPF_API int libbpf_unregister_prog_handler(int handler_id); |
| 1836 | |
Stanislav Fomichev | 8c4905b | 2018-11-21 09:29:44 -0800 | [diff] [blame] | 1837 | #ifdef __cplusplus |
| 1838 | } /* extern "C" */ |
| 1839 | #endif |
| 1840 | |
Andrey Ignatov | eff8190 | 2018-10-03 15:26:42 -0700 | [diff] [blame] | 1841 | #endif /* __LIBBPF_LIBBPF_H */ |