blob: bd8c2f19ef74cf65dcc3ccde87cae3b388dd4897 [file] [log] [blame]
Jiri Olsa285a30c2019-07-21 13:24:21 +02001// SPDX-License-Identifier: GPL-2.0
Jiri Olsab8eca4d2019-07-21 13:24:48 +02002#include <errno.h>
Jiri Olsa50a4e6f2019-07-21 13:24:49 +02003#include <unistd.h>
4#include <sys/syscall.h>
Jiri Olsa285a30c2019-07-21 13:24:21 +02005#include <perf/evsel.h>
Jiri Olsa50a4e6f2019-07-21 13:24:49 +02006#include <perf/cpumap.h>
7#include <perf/threadmap.h>
Jiri Olsa285a30c2019-07-21 13:24:21 +02008#include <linux/list.h>
9#include <internal/evsel.h>
Jiri Olsa63bd5df2019-07-21 13:24:33 +020010#include <linux/zalloc.h>
Jiri Olsab9358ee2019-07-21 13:24:36 +020011#include <stdlib.h>
Jiri Olsab8eca4d2019-07-21 13:24:48 +020012#include <internal/xyarray.h>
Jiri Olsa50a4e6f2019-07-21 13:24:49 +020013#include <internal/cpumap.h>
Rob Herring6cd70752021-04-14 11:07:37 -050014#include <internal/mmap.h>
Jiri Olsa50a4e6f2019-07-21 13:24:49 +020015#include <internal/threadmap.h>
Jiri Olsa5c30af92019-07-21 13:24:51 +020016#include <internal/lib.h>
Jiri Olsab8eca4d2019-07-21 13:24:48 +020017#include <linux/string.h>
Jiri Olsaa00571f2019-07-21 13:24:52 +020018#include <sys/ioctl.h>
Rob Herring6cd70752021-04-14 11:07:37 -050019#include <sys/mman.h>
Jiri Olsab04c5972019-07-21 13:24:24 +020020
Jiri Olsa1fc632c2019-07-21 13:24:29 +020021void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr)
Jiri Olsab04c5972019-07-21 13:24:24 +020022{
23 INIT_LIST_HEAD(&evsel->node);
Jiri Olsa1fc632c2019-07-21 13:24:29 +020024 evsel->attr = *attr;
Jiri Olsab04c5972019-07-21 13:24:24 +020025}
Jiri Olsa63bd5df2019-07-21 13:24:33 +020026
27struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
28{
29 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
30
31 if (evsel != NULL)
32 perf_evsel__init(evsel, attr);
33
34 return evsel;
35}
Jiri Olsab9358ee2019-07-21 13:24:36 +020036
37void perf_evsel__delete(struct perf_evsel *evsel)
38{
39 free(evsel);
40}
Jiri Olsab8eca4d2019-07-21 13:24:48 +020041
42#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
Rob Herring6cd70752021-04-14 11:07:37 -050043#define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL)
Jiri Olsab8eca4d2019-07-21 13:24:48 +020044
45int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
46{
47 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
48
49 if (evsel->fd) {
50 int cpu, thread;
51 for (cpu = 0; cpu < ncpus; cpu++) {
52 for (thread = 0; thread < nthreads; thread++) {
53 FD(evsel, cpu, thread) = -1;
54 }
55 }
56 }
57
58 return evsel->fd != NULL ? 0 : -ENOMEM;
59}
Jiri Olsa50a4e6f2019-07-21 13:24:49 +020060
Rob Herring6cd70752021-04-14 11:07:37 -050061static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads)
62{
63 evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap));
64
65 return evsel->mmap != NULL ? 0 : -ENOMEM;
66}
67
Jiri Olsa50a4e6f2019-07-21 13:24:49 +020068static int
69sys_perf_event_open(struct perf_event_attr *attr,
70 pid_t pid, int cpu, int group_fd,
71 unsigned long flags)
72{
73 return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
74}
75
76int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
77 struct perf_thread_map *threads)
78{
79 int cpu, thread, err = 0;
80
81 if (cpus == NULL) {
82 static struct perf_cpu_map *empty_cpu_map;
83
84 if (empty_cpu_map == NULL) {
85 empty_cpu_map = perf_cpu_map__dummy_new();
86 if (empty_cpu_map == NULL)
87 return -ENOMEM;
88 }
89
90 cpus = empty_cpu_map;
91 }
92
93 if (threads == NULL) {
94 static struct perf_thread_map *empty_thread_map;
95
96 if (empty_thread_map == NULL) {
97 empty_thread_map = perf_thread_map__new_dummy();
98 if (empty_thread_map == NULL)
99 return -ENOMEM;
100 }
101
102 threads = empty_thread_map;
103 }
104
105 if (evsel->fd == NULL &&
106 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
107 return -ENOMEM;
108
109 for (cpu = 0; cpu < cpus->nr; cpu++) {
110 for (thread = 0; thread < threads->nr; thread++) {
111 int fd;
112
113 fd = sys_perf_event_open(&evsel->attr,
114 threads->map[thread].pid,
115 cpus->map[cpu], -1, 0);
116
117 if (fd < 0)
118 return -errno;
119
120 FD(evsel, cpu, thread) = fd;
121 }
122 }
123
124 return err;
125}
Jiri Olsa88761fa2019-07-21 13:24:50 +0200126
Andi Kleen99d61412019-11-20 16:15:16 -0800127static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
128{
129 int thread;
130
131 for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
132 if (FD(evsel, cpu, thread) >= 0)
133 close(FD(evsel, cpu, thread));
134 FD(evsel, cpu, thread) = -1;
135 }
136}
137
Jiri Olsa88761fa2019-07-21 13:24:50 +0200138void perf_evsel__close_fd(struct perf_evsel *evsel)
139{
Andi Kleen99d61412019-11-20 16:15:16 -0800140 int cpu;
Jiri Olsa88761fa2019-07-21 13:24:50 +0200141
142 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
Andi Kleen99d61412019-11-20 16:15:16 -0800143 perf_evsel__close_fd_cpu(evsel, cpu);
Jiri Olsa88761fa2019-07-21 13:24:50 +0200144}
145
146void perf_evsel__free_fd(struct perf_evsel *evsel)
147{
148 xyarray__delete(evsel->fd);
149 evsel->fd = NULL;
150}
151
152void perf_evsel__close(struct perf_evsel *evsel)
153{
154 if (evsel->fd == NULL)
155 return;
156
157 perf_evsel__close_fd(evsel);
158 perf_evsel__free_fd(evsel);
159}
Jiri Olsa5c30af92019-07-21 13:24:51 +0200160
Andi Kleen99d61412019-11-20 16:15:16 -0800161void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu)
162{
163 if (evsel->fd == NULL)
164 return;
165
166 perf_evsel__close_fd_cpu(evsel, cpu);
167}
168
Rob Herring6cd70752021-04-14 11:07:37 -0500169void perf_evsel__munmap(struct perf_evsel *evsel)
170{
171 int cpu, thread;
172
173 if (evsel->fd == NULL || evsel->mmap == NULL)
174 return;
175
176 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
177 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
178 int fd = FD(evsel, cpu, thread);
179 struct perf_mmap *map = MMAP(evsel, cpu, thread);
180
181 if (fd < 0)
182 continue;
183
184 perf_mmap__munmap(map);
185 }
186 }
187
188 xyarray__delete(evsel->mmap);
189 evsel->mmap = NULL;
190}
191
192int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
193{
194 int ret, cpu, thread;
195 struct perf_mmap_param mp = {
196 .prot = PROT_READ | PROT_WRITE,
197 .mask = (pages * page_size) - 1,
198 };
199
200 if (evsel->fd == NULL || evsel->mmap)
201 return -EINVAL;
202
203 if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0)
204 return -ENOMEM;
205
206 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
207 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
208 int fd = FD(evsel, cpu, thread);
209 struct perf_mmap *map = MMAP(evsel, cpu, thread);
210
211 if (fd < 0)
212 continue;
213
214 perf_mmap__init(map, NULL, false, NULL);
215
216 ret = perf_mmap__mmap(map, &mp, fd, cpu);
217 if (ret) {
218 perf_evsel__munmap(evsel);
219 return ret;
220 }
221 }
222 }
223
224 return 0;
225}
226
227void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread)
228{
229 if (FD(evsel, cpu, thread) < 0 || MMAP(evsel, cpu, thread) == NULL)
230 return NULL;
231
232 return MMAP(evsel, cpu, thread)->base;
233}
234
Jiri Olsa5c30af92019-07-21 13:24:51 +0200235int perf_evsel__read_size(struct perf_evsel *evsel)
236{
237 u64 read_format = evsel->attr.read_format;
238 int entry = sizeof(u64); /* value */
239 int size = 0;
240 int nr = 1;
241
242 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
243 size += sizeof(u64);
244
245 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
246 size += sizeof(u64);
247
248 if (read_format & PERF_FORMAT_ID)
249 entry += sizeof(u64);
250
251 if (read_format & PERF_FORMAT_GROUP) {
252 nr = evsel->nr_members;
253 size += sizeof(u64);
254 }
255
256 size += entry * nr;
257 return size;
258}
259
260int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
261 struct perf_counts_values *count)
262{
263 size_t size = perf_evsel__read_size(evsel);
264
265 memset(count, 0, sizeof(*count));
266
267 if (FD(evsel, cpu, thread) < 0)
268 return -EINVAL;
269
Rob Herring47d01e72021-04-14 11:07:39 -0500270 if (MMAP(evsel, cpu, thread) &&
271 !perf_mmap__read_self(MMAP(evsel, cpu, thread), count))
272 return 0;
273
Jiri Olsa5c30af92019-07-21 13:24:51 +0200274 if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
275 return -errno;
276
277 return 0;
278}
Jiri Olsaa00571f2019-07-21 13:24:52 +0200279
280static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
Andi Kleen363fb122019-11-20 16:15:21 -0800281 int ioc, void *arg,
282 int cpu)
Jiri Olsaa00571f2019-07-21 13:24:52 +0200283{
Andi Kleen363fb122019-11-20 16:15:21 -0800284 int thread;
Jiri Olsaa00571f2019-07-21 13:24:52 +0200285
Andi Kleen363fb122019-11-20 16:15:21 -0800286 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
287 int fd = FD(evsel, cpu, thread),
288 err = ioctl(fd, ioc, arg);
Jiri Olsaa00571f2019-07-21 13:24:52 +0200289
Andi Kleen363fb122019-11-20 16:15:21 -0800290 if (err)
291 return err;
Jiri Olsaa00571f2019-07-21 13:24:52 +0200292 }
293
294 return 0;
295}
296
Andi Kleen363fb122019-11-20 16:15:21 -0800297int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu)
298{
299 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu);
300}
301
Jiri Olsaa00571f2019-07-21 13:24:52 +0200302int perf_evsel__enable(struct perf_evsel *evsel)
303{
Andi Kleen363fb122019-11-20 16:15:21 -0800304 int i;
305 int err = 0;
306
307 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
308 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i);
309 return err;
310}
311
312int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu)
313{
314 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu);
Jiri Olsaa00571f2019-07-21 13:24:52 +0200315}
316
317int perf_evsel__disable(struct perf_evsel *evsel)
318{
Andi Kleen363fb122019-11-20 16:15:21 -0800319 int i;
320 int err = 0;
321
322 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
323 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i);
324 return err;
Jiri Olsaa00571f2019-07-21 13:24:52 +0200325}
326
327int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
328{
Andi Kleen363fb122019-11-20 16:15:21 -0800329 int err = 0, i;
330
331 for (i = 0; i < evsel->cpus->nr && !err; i++)
332 err = perf_evsel__run_ioctl(evsel,
Jiri Olsaa00571f2019-07-21 13:24:52 +0200333 PERF_EVENT_IOC_SET_FILTER,
Andi Kleen363fb122019-11-20 16:15:21 -0800334 (void *)filter, i);
335 return err;
Jiri Olsaa00571f2019-07-21 13:24:52 +0200336}
Jiri Olsa0ff1a0f2019-07-21 13:24:54 +0200337
338struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
339{
340 return evsel->cpus;
341}
342
343struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel)
344{
345 return evsel->threads;
346}
Jiri Olsa384c4ad12019-07-21 13:24:57 +0200347
348struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel)
349{
350 return &evsel->attr;
351}
Jiri Olsa70c20362019-09-03 10:34:29 +0200352
353int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
354{
355 if (ncpus == 0 || nthreads == 0)
356 return 0;
357
358 if (evsel->system_wide)
359 nthreads = 1;
360
361 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
362 if (evsel->sample_id == NULL)
363 return -ENOMEM;
364
365 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
366 if (evsel->id == NULL) {
367 xyarray__delete(evsel->sample_id);
368 evsel->sample_id = NULL;
369 return -ENOMEM;
370 }
371
372 return 0;
373}
374
375void perf_evsel__free_id(struct perf_evsel *evsel)
376{
377 xyarray__delete(evsel->sample_id);
378 evsel->sample_id = NULL;
379 zfree(&evsel->id);
380 evsel->ids = 0;
381}