blob: 9a71f0330137ea03663bf3dffb6c43755269c690 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020011#include "util/build-id.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060012#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020013#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090014#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020015
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030016#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030017#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020018#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020019#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020020#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020021#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020022#include "util/debug.h"
Arnaldo Carvalho de Meloe0fcfb02019-09-23 12:20:38 -030023#include "util/mmap.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030024#include "util/target.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020025#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020026#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020027#include "util/symbol.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030028#include "util/record.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110029#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020030#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020031#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020032#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030033#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020034#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070035#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020036#include "util/parse-regs-options.h"
Arnaldo Carvalho de Melo40c7d242020-05-05 11:49:08 -030037#include "util/perf_api_probe.h"
Wang Nan71dc23262015-10-14 12:41:19 +000038#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000039#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000040#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000041#include "util/perf-hooks.h"
Alexey Budankovf13de662019-01-22 20:50:57 +030042#include "util/cpu-set-sched.h"
Arnaldo Carvalho de Meloea49e012019-09-18 11:36:13 -030043#include "util/synthetic-events.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030044#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030045#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080046#include "util/bpf-event.h"
Stephane Eraniand99c22e2020-04-22 08:50:38 -070047#include "util/util.h"
Stephane Eranian70943492020-05-05 11:29:43 -070048#include "util/pfm.h"
Jiri Olsa6953beb2020-08-05 11:34:38 +020049#include "util/clockid.h"
Jin Yaob53a0752021-04-27 15:01:26 +080050#include "util/pmu-hybrid.h"
51#include "util/evlist-hybrid.h"
Namhyung Kimedc41a12022-05-18 15:47:21 -070052#include "util/off_cpu.h"
Wang Nand8871ea2016-02-26 09:32:06 +000053#include "asm/bug.h"
Arnaldo Carvalho de Meloc1a604d2019-08-29 15:20:59 -030054#include "perf.h"
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +030055#include "cputopo.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020056
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030057#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030058#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030059#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030060#include <poll.h>
Stephane Eraniand99c22e2020-04-22 08:50:38 -070061#include <pthread.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020062#include <unistd.h>
Alexey Bayduraev415ccb52022-01-17 21:34:23 +030063#ifndef HAVE_GETTID
64#include <syscall.h>
65#endif
Peter Zijlstrade9ac072009-04-08 15:01:31 +020066#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030067#include <signal.h>
Anand K Mistryda231332020-05-13 12:20:23 +100068#ifdef HAVE_EVENTFD_SUPPORT
69#include <sys/eventfd.h>
70#endif
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030071#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030072#include <sys/wait.h>
Adrian Huntereeb399b2019-10-04 11:31:21 +030073#include <sys/types.h>
74#include <sys/stat.h>
75#include <fcntl.h>
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +053076#include <linux/err.h>
Arnaldo Carvalho de Melo8520a982019-08-29 16:18:59 -030077#include <linux/string.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030078#include <linux/time64.h>
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -030079#include <linux/zalloc.h>
Alexey Budankov8384a262019-12-03 14:45:27 +030080#include <linux/bitmap.h>
Jiri Olsad1e325c2020-08-05 11:34:40 +020081#include <sys/time.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030082
Jiri Olsa1b43b702017-01-09 10:51:56 +010083struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010084 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010085 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010086 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010087 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010088 const char *str;
89 bool set;
Andi Kleen03724b22019-03-14 15:49:55 -070090 char **filenames;
91 int num_files;
92 int cur_file;
Jiri Olsa1b43b702017-01-09 10:51:56 +010093};
94
Alexey Bayduraev7954f712022-01-17 21:34:21 +030095struct thread_mask {
96 struct mmap_cpu_mask maps;
97 struct mmap_cpu_mask affinity;
98};
99
Alexey Bayduraev415ccb52022-01-17 21:34:23 +0300100struct record_thread {
101 pid_t tid;
102 struct thread_mask *mask;
103 struct {
104 int msg[2];
105 int ack[2];
106 } pipes;
107 struct fdarray pollfd;
108 int ctlfd_pos;
109 int nr_mmaps;
110 struct mmap **maps;
111 struct mmap **overwrite_maps;
112 struct record *rec;
Alexey Bayduraev396b6262022-01-17 21:34:25 +0300113 unsigned long long samples;
114 unsigned long waking;
Alexey Bayduraevae9c7242b2022-01-17 21:34:29 +0300115 u64 bytes_written;
Alexey Bayduraev610fbc02022-01-17 21:34:31 +0300116 u64 bytes_transferred;
117 u64 bytes_compressed;
Alexey Bayduraev415ccb52022-01-17 21:34:23 +0300118};
119
Alexey Bayduraev396b6262022-01-17 21:34:25 +0300120static __thread struct record_thread *thread;
121
Alexey Bayduraev1e5de7d2022-01-17 21:34:26 +0300122enum thread_msg {
123 THREAD_MSG__UNDEFINED = 0,
124 THREAD_MSG__READY,
125 THREAD_MSG__MAX,
126};
127
128static const char *thread_msg_tags[THREAD_MSG__MAX] = {
129 "UNDEFINED", "READY"
130};
131
Alexey Bayduraev06380a82022-01-17 21:34:32 +0300132enum thread_spec {
133 THREAD_SPEC__UNDEFINED = 0,
134 THREAD_SPEC__CPU,
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +0300135 THREAD_SPEC__CORE,
136 THREAD_SPEC__PACKAGE,
137 THREAD_SPEC__NUMA,
138 THREAD_SPEC__USER,
139 THREAD_SPEC__MAX,
140};
141
142static const char *thread_spec_tags[THREAD_SPEC__MAX] = {
143 "undefined", "cpu", "core", "package", "numa", "user"
Alexey Bayduraev06380a82022-01-17 21:34:32 +0300144};
145
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300146struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200147 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300148 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200149 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100150 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300151 struct auxtrace_record *itr;
Jiri Olsa63503db2019-07-21 13:23:52 +0200152 struct evlist *evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200153 struct perf_session *session;
Arnaldo Carvalho de Melobc477d72020-04-24 10:24:04 -0300154 struct evlist *sb_evlist;
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -0300155 pthread_t thread_id;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200156 int realtime_prio;
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -0300157 bool switch_output_event_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200158 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +0000159 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200160 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +0000161 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +0900162 bool buildid_all;
Jiri Olsae29386c2020-12-14 11:54:57 +0100163 bool buildid_mmap;
Wang Nanecfd7a92016-04-13 08:21:07 +0000164 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +0800165 bool timestamp_boundary;
Namhyung Kimedc41a12022-05-18 15:47:21 -0700166 bool off_cpu;
Jiri Olsa1b43b702017-01-09 10:51:56 +0100167 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -0700168 unsigned long long samples;
Jiwei Sun6d575812019-10-22 16:09:01 +0800169 unsigned long output_max_size; /* = 0: unlimited */
Jiri Olsa9bce13e2021-12-09 21:04:25 +0100170 struct perf_debuginfod debuginfod;
Alexey Bayduraev7954f712022-01-17 21:34:21 +0300171 int nr_threads;
172 struct thread_mask *thread_masks;
Alexey Bayduraev415ccb52022-01-17 21:34:23 +0300173 struct record_thread *thread_data;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200174};
Ingo Molnara21ca2c2009-06-06 09:58:57 +0200175
Jiwei Sun6d575812019-10-22 16:09:01 +0800176static volatile int done;
177
Jiri Olsadc0c6122017-01-09 10:51:58 +0100178static volatile int auxtrace_record__snapshot_started;
179static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
180static DEFINE_TRIGGER(switch_output_trigger);
181
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300182static const char *affinity_tags[PERF_AFFINITY_MAX] = {
183 "SYS", "NODE", "CPU"
184};
185
Alexey Bayduraev415ccb52022-01-17 21:34:23 +0300186#ifndef HAVE_GETTID
187static inline pid_t gettid(void)
188{
189 return (pid_t)syscall(__NR_gettid);
190}
191#endif
192
Alexey Bayduraev3217e9f2022-01-17 21:34:27 +0300193static int record__threads_enabled(struct record *rec)
194{
195 return rec->opts.threads_spec;
196}
197
Jiri Olsadc0c6122017-01-09 10:51:58 +0100198static bool switch_output_signal(struct record *rec)
199{
200 return rec->switch_output.signal &&
201 trigger_is_ready(&switch_output_trigger);
202}
203
204static bool switch_output_size(struct record *rec)
205{
206 return rec->switch_output.size &&
207 trigger_is_ready(&switch_output_trigger) &&
208 (rec->bytes_written >= rec->switch_output.size);
209}
210
Jiri Olsabfacbe32017-01-09 10:52:00 +0100211static bool switch_output_time(struct record *rec)
212{
213 return rec->switch_output.time &&
214 trigger_is_ready(&switch_output_trigger);
215}
216
Alexey Bayduraevae9c7242b2022-01-17 21:34:29 +0300217static u64 record__bytes_written(struct record *rec)
218{
219 int t;
220 u64 bytes_written = rec->bytes_written;
221 struct record_thread *thread_data = rec->thread_data;
222
223 for (t = 0; t < rec->nr_threads; t++)
224 bytes_written += thread_data[t].bytes_written;
225
226 return bytes_written;
227}
228
Jiwei Sun6d575812019-10-22 16:09:01 +0800229static bool record__output_max_size_exceeded(struct record *rec)
230{
231 return rec->output_max_size &&
Alexey Bayduraevae9c7242b2022-01-17 21:34:29 +0300232 (record__bytes_written(rec) >= rec->output_max_size);
Jiwei Sun6d575812019-10-22 16:09:01 +0800233}
234
Jiri Olsaa5830532019-07-27 20:30:53 +0200235static int record__write(struct record *rec, struct mmap *map __maybe_unused,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200236 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200237{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200238 struct perf_data_file *file = &rec->session->data->file;
239
Alexey Bayduraev56f735f2022-01-17 21:34:28 +0300240 if (map && map->file)
241 file = map->file;
242
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200243 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100244 pr_err("failed to write perf data, error: %m\n");
245 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200246 }
David Ahern8d3eca22012-08-26 12:24:47 -0600247
Alexey Bayduraevae9c7242b2022-01-17 21:34:29 +0300248 if (map && map->file)
249 thread->bytes_written += size;
250 else
Alexey Bayduraev56f735f2022-01-17 21:34:28 +0300251 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100252
Jiwei Sun6d575812019-10-22 16:09:01 +0800253 if (record__output_max_size_exceeded(rec) && !done) {
254 fprintf(stderr, "[ perf record: perf size limit reached (%" PRIu64 " KB),"
255 " stopping session ]\n",
Alexey Bayduraevae9c7242b2022-01-17 21:34:29 +0300256 record__bytes_written(rec) >> 10);
Jiwei Sun6d575812019-10-22 16:09:01 +0800257 done = 1;
258 }
259
Jiri Olsadc0c6122017-01-09 10:51:58 +0100260 if (switch_output_size(rec))
261 trigger_hit(&switch_output_trigger);
262
David Ahern8d3eca22012-08-26 12:24:47 -0600263 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200264}
265
Alexey Budankovef781122019-03-18 20:44:12 +0300266static int record__aio_enabled(struct record *rec);
267static int record__comp_enabled(struct record *rec);
Alexey Bayduraev75f5f1f2022-01-17 21:34:30 +0300268static size_t zstd_compress(struct perf_session *session, struct mmap *map,
269 void *dst, size_t dst_size, void *src, size_t src_size);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300270
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300271#ifdef HAVE_AIO_SUPPORT
272static int record__aio_write(struct aiocb *cblock, int trace_fd,
273 void *buf, size_t size, off_t off)
274{
275 int rc;
276
277 cblock->aio_fildes = trace_fd;
278 cblock->aio_buf = buf;
279 cblock->aio_nbytes = size;
280 cblock->aio_offset = off;
281 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
282
283 do {
284 rc = aio_write(cblock);
285 if (rc == 0) {
286 break;
287 } else if (errno != EAGAIN) {
288 cblock->aio_fildes = -1;
289 pr_err("failed to queue perf data, error: %m\n");
290 break;
291 }
292 } while (1);
293
294 return rc;
295}
296
Jiri Olsaa5830532019-07-27 20:30:53 +0200297static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300298{
299 void *rem_buf;
300 off_t rem_off;
301 size_t rem_size;
302 int rc, aio_errno;
303 ssize_t aio_ret, written;
304
305 aio_errno = aio_error(cblock);
306 if (aio_errno == EINPROGRESS)
307 return 0;
308
309 written = aio_ret = aio_return(cblock);
310 if (aio_ret < 0) {
311 if (aio_errno != EINTR)
312 pr_err("failed to write perf data, error: %m\n");
313 written = 0;
314 }
315
316 rem_size = cblock->aio_nbytes - written;
317
318 if (rem_size == 0) {
319 cblock->aio_fildes = -1;
320 /*
Alexey Budankovef781122019-03-18 20:44:12 +0300321 * md->refcount is incremented in record__aio_pushfn() for
322 * every aio write request started in record__aio_push() so
323 * decrement it because the request is now complete.
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300324 */
Jiri Olsa80e53d12019-10-07 14:53:15 +0200325 perf_mmap__put(&md->core);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300326 rc = 1;
327 } else {
328 /*
329 * aio write request may require restart with the
330 * reminder if the kernel didn't write whole
331 * chunk at once.
332 */
333 rem_off = cblock->aio_offset + written;
334 rem_buf = (void *)(cblock->aio_buf + written);
335 record__aio_write(cblock, cblock->aio_fildes,
336 rem_buf, rem_size, rem_off);
337 rc = 0;
338 }
339
340 return rc;
341}
342
Jiri Olsaa5830532019-07-27 20:30:53 +0200343static int record__aio_sync(struct mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300344{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300345 struct aiocb **aiocb = md->aio.aiocb;
346 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300347 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300348 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300349
350 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300351 do_suspend = 0;
352 for (i = 0; i < md->aio.nr_cblocks; ++i) {
353 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
354 if (sync_all)
355 aiocb[i] = NULL;
356 else
357 return i;
358 } else {
359 /*
360 * Started aio write is not complete yet
361 * so it has to be waited before the
362 * next allocation.
363 */
364 aiocb[i] = &cblocks[i];
365 do_suspend = 1;
366 }
367 }
368 if (!do_suspend)
369 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300370
Alexey Budankov93f20c02018-11-06 12:07:19 +0300371 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300372 if (!(errno == EAGAIN || errno == EINTR))
373 pr_err("failed to sync perf data, error: %m\n");
374 }
375 } while (1);
376}
377
Alexey Budankovef781122019-03-18 20:44:12 +0300378struct record_aio {
379 struct record *rec;
380 void *data;
381 size_t size;
382};
383
Jiri Olsaa5830532019-07-27 20:30:53 +0200384static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300385{
Alexey Budankovef781122019-03-18 20:44:12 +0300386 struct record_aio *aio = to;
387
388 /*
Jiri Olsa547740f2019-07-27 22:07:44 +0200389 * map->core.base data pointed by buf is copied into free map->aio.data[] buffer
Alexey Budankovef781122019-03-18 20:44:12 +0300390 * to release space in the kernel buffer as fast as possible, calling
391 * perf_mmap__consume() from perf_mmap__push() function.
392 *
393 * That lets the kernel to proceed with storing more profiling data into
394 * the kernel buffer earlier than other per-cpu kernel buffers are handled.
395 *
396 * Coping can be done in two steps in case the chunk of profiling data
397 * crosses the upper bound of the kernel buffer. In this case we first move
398 * part of data from map->start till the upper bound and then the reminder
399 * from the beginning of the kernel buffer till the end of the data chunk.
400 */
401
402 if (record__comp_enabled(aio->rec)) {
Alexey Bayduraev75f5f1f2022-01-17 21:34:30 +0300403 size = zstd_compress(aio->rec->session, NULL, aio->data + aio->size,
Jiri Olsabf59b302019-10-07 14:53:11 +0200404 mmap__mmap_len(map) - aio->size,
Alexey Budankovef781122019-03-18 20:44:12 +0300405 buf, size);
406 } else {
407 memcpy(aio->data + aio->size, buf, size);
408 }
409
410 if (!aio->size) {
411 /*
412 * Increment map->refcount to guard map->aio.data[] buffer
413 * from premature deallocation because map object can be
414 * released earlier than aio write request started on
415 * map->aio.data[] buffer is complete.
416 *
417 * perf_mmap__put() is done at record__aio_complete()
418 * after started aio request completion or at record__aio_push()
419 * if the request failed to start.
420 */
Jiri Olsae75710f2019-10-07 14:53:13 +0200421 perf_mmap__get(&map->core);
Alexey Budankovef781122019-03-18 20:44:12 +0300422 }
423
424 aio->size += size;
425
426 return size;
427}
428
Jiri Olsaa5830532019-07-27 20:30:53 +0200429static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
Alexey Budankovef781122019-03-18 20:44:12 +0300430{
431 int ret, idx;
432 int trace_fd = rec->session->data->file.fd;
433 struct record_aio aio = { .rec = rec, .size = 0 };
434
435 /*
436 * Call record__aio_sync() to wait till map->aio.data[] buffer
437 * becomes available after previous aio write operation.
438 */
439
440 idx = record__aio_sync(map, false);
441 aio.data = map->aio.data[idx];
442 ret = perf_mmap__push(map, &aio, record__aio_pushfn);
443 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
444 return ret;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300445
446 rec->samples++;
Alexey Budankovef781122019-03-18 20:44:12 +0300447 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300448 if (!ret) {
Alexey Budankovef781122019-03-18 20:44:12 +0300449 *off += aio.size;
450 rec->bytes_written += aio.size;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300451 if (switch_output_size(rec))
452 trigger_hit(&switch_output_trigger);
Alexey Budankovef781122019-03-18 20:44:12 +0300453 } else {
454 /*
455 * Decrement map->refcount incremented in record__aio_pushfn()
456 * back if record__aio_write() operation failed to start, otherwise
457 * map->refcount is decremented in record__aio_complete() after
458 * aio write operation finishes successfully.
459 */
Jiri Olsa80e53d12019-10-07 14:53:15 +0200460 perf_mmap__put(&map->core);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300461 }
462
463 return ret;
464}
465
466static off_t record__aio_get_pos(int trace_fd)
467{
468 return lseek(trace_fd, 0, SEEK_CUR);
469}
470
471static void record__aio_set_pos(int trace_fd, off_t pos)
472{
473 lseek(trace_fd, pos, SEEK_SET);
474}
475
476static void record__aio_mmap_read_sync(struct record *rec)
477{
478 int i;
Jiri Olsa63503db2019-07-21 13:23:52 +0200479 struct evlist *evlist = rec->evlist;
Jiri Olsaa5830532019-07-27 20:30:53 +0200480 struct mmap *maps = evlist->mmap;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300481
Alexey Budankovef781122019-03-18 20:44:12 +0300482 if (!record__aio_enabled(rec))
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300483 return;
484
Jiri Olsac976ee12019-07-30 13:04:59 +0200485 for (i = 0; i < evlist->core.nr_mmaps; i++) {
Jiri Olsaa5830532019-07-27 20:30:53 +0200486 struct mmap *map = &maps[i];
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300487
Jiri Olsa547740f2019-07-27 22:07:44 +0200488 if (map->core.base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300489 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300490 }
491}
492
493static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300494static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300495
496static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300497 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300498 int unset)
499{
500 struct record_opts *opts = (struct record_opts *)opt->value;
501
Alexey Budankov93f20c02018-11-06 12:07:19 +0300502 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300503 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300504 } else {
505 if (str)
506 opts->nr_cblocks = strtol(str, NULL, 0);
507 if (!opts->nr_cblocks)
508 opts->nr_cblocks = nr_cblocks_default;
509 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300510
511 return 0;
512}
513#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300514static int nr_cblocks_max = 0;
515
Jiri Olsaa5830532019-07-27 20:30:53 +0200516static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
Alexey Budankovef781122019-03-18 20:44:12 +0300517 off_t *off __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300518{
519 return -1;
520}
521
522static off_t record__aio_get_pos(int trace_fd __maybe_unused)
523{
524 return -1;
525}
526
527static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
528{
529}
530
531static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
532{
533}
534#endif
535
536static int record__aio_enabled(struct record *rec)
537{
538 return rec->opts.nr_cblocks > 0;
539}
540
Alexey Budankov470530b2019-03-18 20:40:26 +0300541#define MMAP_FLUSH_DEFAULT 1
542static int record__mmap_flush_parse(const struct option *opt,
543 const char *str,
544 int unset)
545{
546 int flush_max;
547 struct record_opts *opts = (struct record_opts *)opt->value;
548 static struct parse_tag tags[] = {
549 { .tag = 'B', .mult = 1 },
550 { .tag = 'K', .mult = 1 << 10 },
551 { .tag = 'M', .mult = 1 << 20 },
552 { .tag = 'G', .mult = 1 << 30 },
553 { .tag = 0 },
554 };
555
556 if (unset)
557 return 0;
558
559 if (str) {
560 opts->mmap_flush = parse_tag_value(str, tags);
561 if (opts->mmap_flush == (int)-1)
562 opts->mmap_flush = strtol(str, NULL, 0);
563 }
564
565 if (!opts->mmap_flush)
566 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
567
Jiri Olsa9521b5f2019-07-28 12:45:35 +0200568 flush_max = evlist__mmap_size(opts->mmap_pages);
Alexey Budankov470530b2019-03-18 20:40:26 +0300569 flush_max /= 4;
570 if (opts->mmap_flush > flush_max)
571 opts->mmap_flush = flush_max;
572
573 return 0;
574}
575
Alexey Budankov504c1ad2019-03-18 20:44:42 +0300576#ifdef HAVE_ZSTD_SUPPORT
577static unsigned int comp_level_default = 1;
578
579static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
580{
581 struct record_opts *opts = opt->value;
582
583 if (unset) {
584 opts->comp_level = 0;
585 } else {
586 if (str)
587 opts->comp_level = strtol(str, NULL, 0);
588 if (!opts->comp_level)
589 opts->comp_level = comp_level_default;
590 }
591
592 return 0;
593}
594#endif
Alexey Budankov51255a82019-03-18 20:42:19 +0300595static unsigned int comp_level_max = 22;
596
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300597static int record__comp_enabled(struct record *rec)
598{
599 return rec->opts.comp_level > 0;
600}
601
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200602static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200603 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300604 struct perf_sample *sample __maybe_unused,
605 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200606{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300607 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200608 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200609}
610
Stephane Eraniand99c22e2020-04-22 08:50:38 -0700611static int process_locked_synthesized_event(struct perf_tool *tool,
612 union perf_event *event,
613 struct perf_sample *sample __maybe_unused,
614 struct machine *machine __maybe_unused)
615{
616 static pthread_mutex_t synth_lock = PTHREAD_MUTEX_INITIALIZER;
617 int ret;
618
619 pthread_mutex_lock(&synth_lock);
620 ret = process_synthesized_event(tool, event, sample, machine);
621 pthread_mutex_unlock(&synth_lock);
622 return ret;
623}
624
Jiri Olsaa5830532019-07-27 20:30:53 +0200625static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300626{
627 struct record *rec = to;
628
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300629 if (record__comp_enabled(rec)) {
Alexey Bayduraev75f5f1f2022-01-17 21:34:30 +0300630 size = zstd_compress(rec->session, map, map->data, mmap__mmap_len(map), bf, size);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300631 bf = map->data;
632 }
633
Alexey Bayduraev396b6262022-01-17 21:34:25 +0300634 thread->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200635 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300636}
637
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300638static volatile int signr = -1;
639static volatile int child_finished;
Anand K Mistryda231332020-05-13 12:20:23 +1000640#ifdef HAVE_EVENTFD_SUPPORT
641static int done_fd = -1;
642#endif
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000643
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300644static void sig_handler(int sig)
645{
646 if (sig == SIGCHLD)
647 child_finished = 1;
648 else
649 signr = sig;
650
651 done = 1;
Anand K Mistryda231332020-05-13 12:20:23 +1000652#ifdef HAVE_EVENTFD_SUPPORT
653{
654 u64 tmp = 1;
655 /*
656 * It is possible for this signal handler to run after done is checked
657 * in the main loop, but before the perf counter fds are polled. If this
658 * happens, the poll() will continue to wait even though done is set,
659 * and will only break out if either another signal is received, or the
660 * counters are ready for read. To ensure the poll() doesn't sleep when
661 * done is set, use an eventfd (done_fd) to wake up the poll().
662 */
663 if (write(done_fd, &tmp, sizeof(tmp)) < 0)
664 pr_err("failed to signal wakeup fd, error: %m\n");
665}
666#endif // HAVE_EVENTFD_SUPPORT
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300667}
668
Wang Nana0748652016-11-26 07:03:28 +0000669static void sigsegv_handler(int sig)
670{
671 perf_hooks__recover();
672 sighandler_dump_stack(sig);
673}
674
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300675static void record__sig_exit(void)
676{
677 if (signr == -1)
678 return;
679
680 signal(signr, SIG_DFL);
681 raise(signr);
682}
683
Adrian Huntere31f0d02015-04-30 17:37:27 +0300684#ifdef HAVE_AUXTRACE_SUPPORT
685
Adrian Hunteref149c22015-04-09 18:53:45 +0300686static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaa5830532019-07-27 20:30:53 +0200687 struct mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300688 union perf_event *event, void *data1,
689 size_t len1, void *data2, size_t len2)
690{
691 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100692 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300693 size_t padding;
694 u8 pad[8] = {0};
695
Adrian Hunter46e201e2019-10-04 11:31:20 +0300696 if (!perf_data__is_pipe(data) && perf_data__is_single_file(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300697 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100698 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300699 int err;
700
701 file_offset = lseek(fd, 0, SEEK_CUR);
702 if (file_offset == -1)
703 return -1;
704 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
705 event, file_offset);
706 if (err)
707 return err;
708 }
709
Adrian Hunteref149c22015-04-09 18:53:45 +0300710 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
711 padding = (len1 + len2) & 7;
712 if (padding)
713 padding = 8 - padding;
714
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200715 record__write(rec, map, event, event->header.size);
716 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300717 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200718 record__write(rec, map, data2, len2);
719 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300720
721 return 0;
722}
723
724static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsaa5830532019-07-27 20:30:53 +0200725 struct mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300726{
727 int ret;
728
Jiri Olsae035f4c2018-09-13 14:54:05 +0200729 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300730 record__process_auxtrace);
731 if (ret < 0)
732 return ret;
733
734 if (ret)
735 rec->samples++;
736
737 return 0;
738}
739
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300740static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsaa5830532019-07-27 20:30:53 +0200741 struct mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300742{
743 int ret;
744
Jiri Olsae035f4c2018-09-13 14:54:05 +0200745 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300746 record__process_auxtrace,
747 rec->opts.auxtrace_snapshot_size);
748 if (ret < 0)
749 return ret;
750
751 if (ret)
752 rec->samples++;
753
754 return 0;
755}
756
757static int record__auxtrace_read_snapshot_all(struct record *rec)
758{
759 int i;
760 int rc = 0;
761
Jiri Olsac976ee12019-07-30 13:04:59 +0200762 for (i = 0; i < rec->evlist->core.nr_mmaps; i++) {
Jiri Olsaa5830532019-07-27 20:30:53 +0200763 struct mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300764
Jiri Olsae035f4c2018-09-13 14:54:05 +0200765 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300766 continue;
767
Jiri Olsae035f4c2018-09-13 14:54:05 +0200768 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300769 rc = -1;
770 goto out;
771 }
772 }
773out:
774 return rc;
775}
776
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300777static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300778{
779 pr_debug("Recording AUX area tracing snapshot\n");
780 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000781 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300782 } else {
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300783 if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
Wang Nan5f9cf592016-04-20 18:59:49 +0000784 trigger_error(&auxtrace_snapshot_trigger);
785 else
786 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300787 }
788}
789
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300790static int record__auxtrace_snapshot_exit(struct record *rec)
791{
792 if (trigger_is_error(&auxtrace_snapshot_trigger))
793 return 0;
794
795 if (!auxtrace_record__snapshot_started &&
796 auxtrace_record__snapshot_start(rec->itr))
797 return -1;
798
799 record__read_auxtrace_snapshot(rec, true);
800 if (trigger_is_error(&auxtrace_snapshot_trigger))
801 return -1;
802
803 return 0;
804}
805
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200806static int record__auxtrace_init(struct record *rec)
807{
808 int err;
809
Alexey Bayduraevb5f25112022-01-17 21:34:34 +0300810 if ((rec->opts.auxtrace_snapshot_opts || rec->opts.auxtrace_sample_opts)
811 && record__threads_enabled(rec)) {
812 pr_err("AUX area tracing options are not available in parallel streaming mode.\n");
813 return -EINVAL;
814 }
815
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200816 if (!rec->itr) {
817 rec->itr = auxtrace_record__init(rec->evlist, &err);
818 if (err)
819 return err;
820 }
821
822 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
823 rec->opts.auxtrace_snapshot_opts);
824 if (err)
825 return err;
826
Adrian Hunterc0a6de02019-11-15 14:42:16 +0200827 err = auxtrace_parse_sample_options(rec->itr, rec->evlist, &rec->opts,
828 rec->opts.auxtrace_sample_opts);
829 if (err)
830 return err;
831
Adrian Hunterd58b3f72021-01-21 16:04:18 +0200832 auxtrace_regroup_aux_output(rec->evlist);
833
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200834 return auxtrace_parse_filters(rec->evlist);
835}
836
Adrian Huntere31f0d02015-04-30 17:37:27 +0300837#else
838
839static inline
840int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsaa5830532019-07-27 20:30:53 +0200841 struct mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300842{
843 return 0;
844}
845
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300846static inline
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300847void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
848 bool on_exit __maybe_unused)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300849{
850}
851
852static inline
853int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
854{
855 return 0;
856}
857
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300858static inline
859int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
860{
861 return 0;
862}
863
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200864static int record__auxtrace_init(struct record *rec __maybe_unused)
865{
866 return 0;
867}
868
Adrian Huntere31f0d02015-04-30 17:37:27 +0300869#endif
870
Adrian Hunter246eba82020-05-12 15:19:18 +0300871static int record__config_text_poke(struct evlist *evlist)
872{
873 struct evsel *evsel;
Adrian Hunter246eba82020-05-12 15:19:18 +0300874
875 /* Nothing to do if text poke is already configured */
876 evlist__for_each_entry(evlist, evsel) {
877 if (evsel->core.attr.text_poke)
878 return 0;
879 }
880
Adrian Hunter921e3be2022-05-24 10:54:27 +0300881 evsel = evlist__add_dummy_on_all_cpus(evlist);
882 if (!evsel)
883 return -ENOMEM;
Adrian Hunter246eba82020-05-12 15:19:18 +0300884
Adrian Hunter246eba82020-05-12 15:19:18 +0300885 evsel->core.attr.text_poke = 1;
886 evsel->core.attr.ksymbol = 1;
Adrian Hunter246eba82020-05-12 15:19:18 +0300887 evsel->immediate = true;
Adrian Hunter246eba82020-05-12 15:19:18 +0300888 evsel__set_sample_bit(evsel, TIME);
889
890 return 0;
891}
892
Namhyung Kimedc41a12022-05-18 15:47:21 -0700893static int record__config_off_cpu(struct record *rec)
894{
Namhyung Kim685439a2022-05-18 15:47:24 -0700895 return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts);
Namhyung Kimedc41a12022-05-18 15:47:21 -0700896}
897
Adrian Huntereeb399b2019-10-04 11:31:21 +0300898static bool record__kcore_readable(struct machine *machine)
899{
900 char kcore[PATH_MAX];
901 int fd;
902
903 scnprintf(kcore, sizeof(kcore), "%s/proc/kcore", machine->root_dir);
904
905 fd = open(kcore, O_RDONLY);
906 if (fd < 0)
907 return false;
908
909 close(fd);
910
911 return true;
912}
913
914static int record__kcore_copy(struct machine *machine, struct perf_data *data)
915{
916 char from_dir[PATH_MAX];
917 char kcore_dir[PATH_MAX];
918 int ret;
919
920 snprintf(from_dir, sizeof(from_dir), "%s/proc", machine->root_dir);
921
922 ret = perf_data__make_kcore_dir(data, kcore_dir, sizeof(kcore_dir));
923 if (ret)
924 return ret;
925
926 return kcore_copy(from_dir, kcore_dir);
927}
928
Alexey Bayduraev415ccb52022-01-17 21:34:23 +0300929static void record__thread_data_init_pipes(struct record_thread *thread_data)
930{
931 thread_data->pipes.msg[0] = -1;
932 thread_data->pipes.msg[1] = -1;
933 thread_data->pipes.ack[0] = -1;
934 thread_data->pipes.ack[1] = -1;
935}
936
937static int record__thread_data_open_pipes(struct record_thread *thread_data)
938{
939 if (pipe(thread_data->pipes.msg))
940 return -EINVAL;
941
942 if (pipe(thread_data->pipes.ack)) {
943 close(thread_data->pipes.msg[0]);
944 thread_data->pipes.msg[0] = -1;
945 close(thread_data->pipes.msg[1]);
946 thread_data->pipes.msg[1] = -1;
947 return -EINVAL;
948 }
949
950 pr_debug2("thread_data[%p]: msg=[%d,%d], ack=[%d,%d]\n", thread_data,
951 thread_data->pipes.msg[0], thread_data->pipes.msg[1],
952 thread_data->pipes.ack[0], thread_data->pipes.ack[1]);
953
954 return 0;
955}
956
957static void record__thread_data_close_pipes(struct record_thread *thread_data)
958{
959 if (thread_data->pipes.msg[0] != -1) {
960 close(thread_data->pipes.msg[0]);
961 thread_data->pipes.msg[0] = -1;
962 }
963 if (thread_data->pipes.msg[1] != -1) {
964 close(thread_data->pipes.msg[1]);
965 thread_data->pipes.msg[1] = -1;
966 }
967 if (thread_data->pipes.ack[0] != -1) {
968 close(thread_data->pipes.ack[0]);
969 thread_data->pipes.ack[0] = -1;
970 }
971 if (thread_data->pipes.ack[1] != -1) {
972 close(thread_data->pipes.ack[1]);
973 thread_data->pipes.ack[1] = -1;
974 }
975}
976
Adrian Hunter7be1fed2022-05-24 10:54:30 +0300977static bool evlist__per_thread(struct evlist *evlist)
978{
979 return cpu_map__is_dummy(evlist->core.user_requested_cpus);
980}
981
Alexey Bayduraev415ccb52022-01-17 21:34:23 +0300982static int record__thread_data_init_maps(struct record_thread *thread_data, struct evlist *evlist)
983{
984 int m, tm, nr_mmaps = evlist->core.nr_mmaps;
985 struct mmap *mmap = evlist->mmap;
986 struct mmap *overwrite_mmap = evlist->overwrite_mmap;
Adrian Hunter7be1fed2022-05-24 10:54:30 +0300987 struct perf_cpu_map *cpus = evlist->core.all_cpus;
988 bool per_thread = evlist__per_thread(evlist);
Alexey Bayduraev415ccb52022-01-17 21:34:23 +0300989
Adrian Hunter7be1fed2022-05-24 10:54:30 +0300990 if (per_thread)
Alexey Bayduraev23380e42022-04-13 18:46:40 -0700991 thread_data->nr_mmaps = nr_mmaps;
992 else
993 thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
994 thread_data->mask->maps.nbits);
Alexey Bayduraev415ccb52022-01-17 21:34:23 +0300995 if (mmap) {
996 thread_data->maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *));
997 if (!thread_data->maps)
998 return -ENOMEM;
999 }
1000 if (overwrite_mmap) {
1001 thread_data->overwrite_maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *));
1002 if (!thread_data->overwrite_maps) {
1003 zfree(&thread_data->maps);
1004 return -ENOMEM;
1005 }
1006 }
1007 pr_debug2("thread_data[%p]: nr_mmaps=%d, maps=%p, ow_maps=%p\n", thread_data,
1008 thread_data->nr_mmaps, thread_data->maps, thread_data->overwrite_maps);
1009
1010 for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) {
Adrian Hunter7be1fed2022-05-24 10:54:30 +03001011 if (per_thread ||
Ian Rogers02555712022-05-02 21:17:52 -07001012 test_bit(perf_cpu_map__cpu(cpus, m).cpu, thread_data->mask->maps.bits)) {
Alexey Bayduraev415ccb52022-01-17 21:34:23 +03001013 if (thread_data->maps) {
1014 thread_data->maps[tm] = &mmap[m];
1015 pr_debug2("thread_data[%p]: cpu%d: maps[%d] -> mmap[%d]\n",
Alexey Bayduraev23380e42022-04-13 18:46:40 -07001016 thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
Alexey Bayduraev415ccb52022-01-17 21:34:23 +03001017 }
1018 if (thread_data->overwrite_maps) {
1019 thread_data->overwrite_maps[tm] = &overwrite_mmap[m];
1020 pr_debug2("thread_data[%p]: cpu%d: ow_maps[%d] -> ow_mmap[%d]\n",
Alexey Bayduraev23380e42022-04-13 18:46:40 -07001021 thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
Alexey Bayduraev415ccb52022-01-17 21:34:23 +03001022 }
1023 tm++;
1024 }
1025 }
1026
1027 return 0;
1028}
1029
1030static int record__thread_data_init_pollfd(struct record_thread *thread_data, struct evlist *evlist)
1031{
1032 int f, tm, pos;
1033 struct mmap *map, *overwrite_map;
1034
1035 fdarray__init(&thread_data->pollfd, 64);
1036
1037 for (tm = 0; tm < thread_data->nr_mmaps; tm++) {
1038 map = thread_data->maps ? thread_data->maps[tm] : NULL;
1039 overwrite_map = thread_data->overwrite_maps ?
1040 thread_data->overwrite_maps[tm] : NULL;
1041
1042 for (f = 0; f < evlist->core.pollfd.nr; f++) {
1043 void *ptr = evlist->core.pollfd.priv[f].ptr;
1044
1045 if ((map && ptr == map) || (overwrite_map && ptr == overwrite_map)) {
1046 pos = fdarray__dup_entry_from(&thread_data->pollfd, f,
1047 &evlist->core.pollfd);
1048 if (pos < 0)
1049 return pos;
1050 pr_debug2("thread_data[%p]: pollfd[%d] <- event_fd=%d\n",
1051 thread_data, pos, evlist->core.pollfd.entries[f].fd);
1052 }
1053 }
1054 }
1055
1056 return 0;
1057}
1058
1059static void record__free_thread_data(struct record *rec)
1060{
1061 int t;
1062 struct record_thread *thread_data = rec->thread_data;
1063
1064 if (thread_data == NULL)
1065 return;
1066
1067 for (t = 0; t < rec->nr_threads; t++) {
1068 record__thread_data_close_pipes(&thread_data[t]);
1069 zfree(&thread_data[t].maps);
1070 zfree(&thread_data[t].overwrite_maps);
1071 fdarray__exit(&thread_data[t].pollfd);
1072 }
1073
1074 zfree(&rec->thread_data);
1075}
1076
1077static int record__alloc_thread_data(struct record *rec, struct evlist *evlist)
1078{
1079 int t, ret;
1080 struct record_thread *thread_data;
1081
1082 rec->thread_data = zalloc(rec->nr_threads * sizeof(*(rec->thread_data)));
1083 if (!rec->thread_data) {
1084 pr_err("Failed to allocate thread data\n");
1085 return -ENOMEM;
1086 }
1087 thread_data = rec->thread_data;
1088
1089 for (t = 0; t < rec->nr_threads; t++)
1090 record__thread_data_init_pipes(&thread_data[t]);
1091
1092 for (t = 0; t < rec->nr_threads; t++) {
1093 thread_data[t].rec = rec;
1094 thread_data[t].mask = &rec->thread_masks[t];
1095 ret = record__thread_data_init_maps(&thread_data[t], evlist);
1096 if (ret) {
1097 pr_err("Failed to initialize thread[%d] maps\n", t);
1098 goto out_free;
1099 }
1100 ret = record__thread_data_init_pollfd(&thread_data[t], evlist);
1101 if (ret) {
1102 pr_err("Failed to initialize thread[%d] pollfd\n", t);
1103 goto out_free;
1104 }
1105 if (t) {
1106 thread_data[t].tid = -1;
1107 ret = record__thread_data_open_pipes(&thread_data[t]);
1108 if (ret) {
1109 pr_err("Failed to open thread[%d] communication pipes\n", t);
1110 goto out_free;
1111 }
1112 ret = fdarray__add(&thread_data[t].pollfd, thread_data[t].pipes.msg[0],
1113 POLLIN | POLLERR | POLLHUP, fdarray_flag__nonfilterable);
1114 if (ret < 0) {
1115 pr_err("Failed to add descriptor to thread[%d] pollfd\n", t);
1116 goto out_free;
1117 }
1118 thread_data[t].ctlfd_pos = ret;
1119 pr_debug2("thread_data[%p]: pollfd[%d] <- ctl_fd=%d\n",
1120 thread_data, thread_data[t].ctlfd_pos,
1121 thread_data[t].pipes.msg[0]);
1122 } else {
1123 thread_data[t].tid = gettid();
1124 if (evlist->ctl_fd.pos == -1)
1125 continue;
1126 ret = fdarray__dup_entry_from(&thread_data[t].pollfd, evlist->ctl_fd.pos,
1127 &evlist->core.pollfd);
1128 if (ret < 0) {
1129 pr_err("Failed to duplicate descriptor in main thread pollfd\n");
1130 goto out_free;
1131 }
1132 thread_data[t].ctlfd_pos = ret;
1133 pr_debug2("thread_data[%p]: pollfd[%d] <- ctl_fd=%d\n",
1134 thread_data, thread_data[t].ctlfd_pos,
1135 evlist->core.pollfd.entries[evlist->ctl_fd.pos].fd);
1136 }
1137 }
1138
1139 return 0;
1140
1141out_free:
1142 record__free_thread_data(rec);
1143
1144 return ret;
1145}
1146
Wang Nancda57a82016-06-27 10:24:03 +00001147static int record__mmap_evlist(struct record *rec,
Jiri Olsa63503db2019-07-21 13:23:52 +02001148 struct evlist *evlist)
Wang Nancda57a82016-06-27 10:24:03 +00001149{
Alexey Bayduraev56f735f2022-01-17 21:34:28 +03001150 int i, ret;
Wang Nancda57a82016-06-27 10:24:03 +00001151 struct record_opts *opts = &rec->opts;
Adrian Hunterc0a6de02019-11-15 14:42:16 +02001152 bool auxtrace_overwrite = opts->auxtrace_snapshot_mode ||
1153 opts->auxtrace_sample_mode;
Wang Nancda57a82016-06-27 10:24:03 +00001154 char msg[512];
1155
Alexey Budankovf13de662019-01-22 20:50:57 +03001156 if (opts->affinity != PERF_AFFINITY_SYS)
1157 cpu__setup_cpunode_map();
1158
Jiri Olsa9521b5f2019-07-28 12:45:35 +02001159 if (evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +00001160 opts->auxtrace_mmap_pages,
Adrian Hunterc0a6de02019-11-15 14:42:16 +02001161 auxtrace_overwrite,
Alexey Budankov470530b2019-03-18 20:40:26 +03001162 opts->nr_cblocks, opts->affinity,
Alexey Budankov51255a82019-03-18 20:42:19 +03001163 opts->mmap_flush, opts->comp_level) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +00001164 if (errno == EPERM) {
1165 pr_err("Permission error mapping pages.\n"
1166 "Consider increasing "
1167 "/proc/sys/kernel/perf_event_mlock_kb,\n"
1168 "or try again with a smaller value of -m/--mmap_pages.\n"
1169 "(current value: %u,%u)\n",
1170 opts->mmap_pages, opts->auxtrace_mmap_pages);
1171 return -errno;
1172 } else {
1173 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001174 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +00001175 if (errno)
1176 return -errno;
1177 else
1178 return -EINVAL;
1179 }
1180 }
Alexey Bayduraev415ccb52022-01-17 21:34:23 +03001181
1182 if (evlist__initialize_ctlfd(evlist, opts->ctl_fd, opts->ctl_fd_ack))
1183 return -1;
1184
1185 ret = record__alloc_thread_data(rec, evlist);
1186 if (ret)
1187 return ret;
1188
Alexey Bayduraev56f735f2022-01-17 21:34:28 +03001189 if (record__threads_enabled(rec)) {
1190 ret = perf_data__create_dir(&rec->data, evlist->core.nr_mmaps);
Alexey Bayduraev65e7c962022-02-22 12:14:17 +03001191 if (ret) {
1192 pr_err("Failed to create data directory: %s\n", strerror(-ret));
Alexey Bayduraev56f735f2022-01-17 21:34:28 +03001193 return ret;
Alexey Bayduraev65e7c962022-02-22 12:14:17 +03001194 }
Alexey Bayduraev56f735f2022-01-17 21:34:28 +03001195 for (i = 0; i < evlist->core.nr_mmaps; i++) {
1196 if (evlist->mmap)
1197 evlist->mmap[i].file = &rec->data.dir.files[i];
1198 if (evlist->overwrite_mmap)
1199 evlist->overwrite_mmap[i].file = &rec->data.dir.files[i];
1200 }
1201 }
1202
Wang Nancda57a82016-06-27 10:24:03 +00001203 return 0;
1204}
1205
1206static int record__mmap(struct record *rec)
1207{
1208 return record__mmap_evlist(rec, rec->evlist);
1209}
1210
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001211static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02001212{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -03001213 char msg[BUFSIZ];
Jiri Olsa32dcd022019-07-21 13:23:51 +02001214 struct evsel *pos;
Jiri Olsa63503db2019-07-21 13:23:52 +02001215 struct evlist *evlist = rec->evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001216 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001217 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -06001218 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02001219
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -03001220 /*
Kan Liangb91e5492021-07-08 09:03:32 -07001221 * For initial_delay, system wide or a hybrid system, we need to add a
1222 * dummy event so that we can track PERF_RECORD_MMAP to cover the delay
1223 * of waiting or event synthesis.
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -03001224 */
Kan Liangb91e5492021-07-08 09:03:32 -07001225 if (opts->initial_delay || target__has_cpu(&opts->target) ||
1226 perf_pmu__has_hybrid()) {
Arnaldo Carvalho de Meloe80db252020-11-30 14:39:41 -03001227 pos = evlist__get_tracking_event(evlist);
Adrian Hunter442ad2252020-06-29 12:19:51 +03001228 if (!evsel__is_dummy_event(pos)) {
1229 /* Set up dummy event. */
Arnaldo Carvalho de Melofacbf0b2020-07-08 13:49:15 -03001230 if (evlist__add_dummy(evlist))
Adrian Hunter442ad2252020-06-29 12:19:51 +03001231 return -ENOMEM;
1232 pos = evlist__last(evlist);
Arnaldo Carvalho de Meloe80db252020-11-30 14:39:41 -03001233 evlist__set_tracking_event(evlist, pos);
Adrian Hunter442ad2252020-06-29 12:19:51 +03001234 }
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -03001235
Ian Rogers0a892c12020-04-22 10:36:15 -07001236 /*
1237 * Enable the dummy event when the process is forked for
1238 * initial_delay, immediately for system wide.
1239 */
Namhyung Kimbb07d622021-08-27 16:32:12 -07001240 if (opts->initial_delay && !pos->immediate &&
1241 !target__has_cpu(&opts->target))
Ian Rogers0a892c12020-04-22 10:36:15 -07001242 pos->core.attr.enable_on_exec = 1;
1243 else
1244 pos->immediate = 1;
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -03001245 }
1246
Arnaldo Carvalho de Melo78e1bc22020-11-30 15:07:49 -03001247 evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +01001248
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001249 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +02001250try_again:
Jiri Olsaaf663bd2019-07-21 13:24:39 +02001251 if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
Arnaldo Carvalho de Meloae430892020-04-30 11:46:15 -03001252 if (evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +09001253 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -03001254 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001255 goto try_again;
1256 }
Andi Kleencf99ad12018-10-01 12:59:27 -07001257 if ((errno == EINVAL || errno == EBADF) &&
Jiri Olsafba7c862021-07-06 17:17:00 +02001258 pos->core.leader != &pos->core &&
Andi Kleencf99ad12018-10-01 12:59:27 -07001259 pos->weak_group) {
Arnaldo Carvalho de Melo64b47782020-11-30 14:58:32 -03001260 pos = evlist__reset_weak_group(evlist, pos, true);
Andi Kleencf99ad12018-10-01 12:59:27 -07001261 goto try_again;
1262 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03001263 rc = -errno;
Arnaldo Carvalho de Melo2bb72db2020-05-04 13:43:03 -03001264 evsel__open_strerror(pos, &opts->target, errno, msg, sizeof(msg));
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03001265 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -06001266 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001267 }
Andi Kleenbfd8f722017-11-17 13:42:58 -08001268
1269 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +08001270 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -02001271
Arnaldo Carvalho de Melo78e1bc22020-11-30 15:07:49 -03001272 if (symbol_conf.kptr_restrict && !evlist__exclude_kernel(evlist)) {
Arnaldo Carvalho de Meloc8b567c2019-09-23 11:07:29 -03001273 pr_warning(
1274"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1275"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
1276"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1277"file is not found in the buildid cache or in the vmlinux path.\n\n"
1278"Samples in kernel modules won't be resolved at all.\n\n"
1279"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1280"even with a suitable vmlinux or kallsyms file.\n\n");
1281 }
1282
Arnaldo Carvalho de Melo24bf91a2020-11-30 09:38:02 -03001283 if (evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -03001284 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -03001285 pos->filter, evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001286 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -06001287 rc = -1;
1288 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001289 }
1290
Wang Nancda57a82016-06-27 10:24:03 +00001291 rc = record__mmap(rec);
1292 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -06001293 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -02001294
Jiri Olsa563aecb2013-06-05 13:35:06 +02001295 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -03001296 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -06001297out:
1298 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001299}
1300
Adrian Hunter66286ed2021-05-03 09:42:22 +03001301static void set_timestamp_boundary(struct record *rec, u64 sample_time)
1302{
1303 if (rec->evlist->first_sample_time == 0)
1304 rec->evlist->first_sample_time = sample_time;
1305
1306 if (sample_time)
1307 rec->evlist->last_sample_time = sample_time;
1308}
1309
Namhyung Kime3d59112015-01-29 17:06:44 +09001310static int process_sample_event(struct perf_tool *tool,
1311 union perf_event *event,
1312 struct perf_sample *sample,
Jiri Olsa32dcd022019-07-21 13:23:51 +02001313 struct evsel *evsel,
Namhyung Kime3d59112015-01-29 17:06:44 +09001314 struct machine *machine)
1315{
1316 struct record *rec = container_of(tool, struct record, tool);
1317
Adrian Hunter66286ed2021-05-03 09:42:22 +03001318 set_timestamp_boundary(rec, sample->time);
Jin Yao68588ba2017-12-08 21:13:42 +08001319
1320 if (rec->buildid_all)
1321 return 0;
1322
1323 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +09001324 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
1325}
1326
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001327static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -02001328{
Jiri Olsaf5fc14122013-10-15 16:27:32 +02001329 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -02001330
Jiri Olsa45112e82019-02-21 10:41:29 +01001331 if (perf_data__size(&rec->data) == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -03001332 return 0;
1333
Namhyung Kim00dc8652014-11-04 10:14:32 +09001334 /*
1335 * During this process, it'll load kernel map and replace the
1336 * dso->long_name to a real pathname it found. In this case
1337 * we prefer the vmlinux path like
1338 * /lib/modules/3.16.4/build/vmlinux
1339 *
1340 * rather than build-id path (in debug directory).
1341 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
1342 */
1343 symbol_conf.ignore_vmlinux_buildid = true;
1344
Namhyung Kim61566812016-01-11 22:37:09 +09001345 /*
1346 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +08001347 * so no need to process samples. But if timestamp_boundary is enabled,
1348 * it still needs to walk on all samples to get the timestamps of
1349 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +09001350 */
Jin Yao68588ba2017-12-08 21:13:42 +08001351 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +09001352 rec->tool.sample = NULL;
1353
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001354 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -02001355}
1356
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02001357static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001358{
1359 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001360 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001361 /*
1362 *As for guest kernel when processing subcommand record&report,
1363 *we arrange module mmap prior to guest kernel mmap and trigger
1364 *a preload dso because default guest module symbols are loaded
1365 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
1366 *method is used to avoid symbol missing when the first addr is
1367 *in module instead of in guest kernel.
1368 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001369 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb8682011-11-28 07:56:39 -02001370 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001371 if (err < 0)
1372 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -03001373 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001374
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001375 /*
1376 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
1377 * have no _text sometimes.
1378 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001379 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +02001380 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001381 if (err < 0)
1382 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -03001383 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001384}
1385
Frederic Weisbecker98402802010-05-02 22:05:29 +02001386static struct perf_event_header finished_round_event = {
1387 .size = sizeof(struct perf_event_header),
1388 .type = PERF_RECORD_FINISHED_ROUND,
1389};
1390
Jiri Olsaa5830532019-07-27 20:30:53 +02001391static void record__adjust_affinity(struct record *rec, struct mmap *map)
Alexey Budankovf13de662019-01-22 20:50:57 +03001392{
1393 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
Alexey Bayduraev396b6262022-01-17 21:34:25 +03001394 !bitmap_equal(thread->mask->affinity.bits, map->affinity_mask.bits,
1395 thread->mask->affinity.nbits)) {
1396 bitmap_zero(thread->mask->affinity.bits, thread->mask->affinity.nbits);
1397 bitmap_or(thread->mask->affinity.bits, thread->mask->affinity.bits,
1398 map->affinity_mask.bits, thread->mask->affinity.nbits);
1399 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity),
1400 (cpu_set_t *)thread->mask->affinity.bits);
1401 if (verbose == 2) {
1402 pr_debug("threads[%d]: running on cpu%d: ", thread->tid, sched_getcpu());
1403 mmap_cpu_mask__scnprintf(&thread->mask->affinity, "affinity");
1404 }
Alexey Budankovf13de662019-01-22 20:50:57 +03001405 }
1406}
1407
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001408static size_t process_comp_header(void *record, size_t increment)
1409{
Jiri Olsa72932372019-08-28 15:57:16 +02001410 struct perf_record_compressed *event = record;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001411 size_t size = sizeof(*event);
1412
1413 if (increment) {
1414 event->header.size += increment;
1415 return increment;
1416 }
1417
1418 event->header.type = PERF_RECORD_COMPRESSED;
1419 event->header.size = size;
1420
1421 return size;
1422}
1423
Alexey Bayduraev75f5f1f2022-01-17 21:34:30 +03001424static size_t zstd_compress(struct perf_session *session, struct mmap *map,
1425 void *dst, size_t dst_size, void *src, size_t src_size)
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001426{
1427 size_t compressed;
Jiri Olsa72932372019-08-28 15:57:16 +02001428 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
Alexey Bayduraev75f5f1f2022-01-17 21:34:30 +03001429 struct zstd_data *zstd_data = &session->zstd_data;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001430
Alexey Bayduraev75f5f1f2022-01-17 21:34:30 +03001431 if (map && map->file)
1432 zstd_data = &map->zstd_data;
1433
1434 compressed = zstd_compress_stream_to_records(zstd_data, dst, dst_size, src, src_size,
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001435 max_record_size, process_comp_header);
1436
Alexey Bayduraev610fbc02022-01-17 21:34:31 +03001437 if (map && map->file) {
1438 thread->bytes_transferred += src_size;
1439 thread->bytes_compressed += compressed;
1440 } else {
1441 session->bytes_transferred += src_size;
1442 session->bytes_compressed += compressed;
1443 }
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001444
1445 return compressed;
1446}
1447
Jiri Olsa63503db2019-07-21 13:23:52 +02001448static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
Alexey Budankov470530b2019-03-18 20:40:26 +03001449 bool overwrite, bool synch)
Frederic Weisbecker98402802010-05-02 22:05:29 +02001450{
Jiri Olsadcabb502014-07-25 16:56:16 +02001451 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +02001452 int i;
David Ahern8d3eca22012-08-26 12:24:47 -06001453 int rc = 0;
Alexey Bayduraev396b6262022-01-17 21:34:25 +03001454 int nr_mmaps;
1455 struct mmap **maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001456 int trace_fd = rec->data.file.fd;
Alexey Budankovef781122019-03-18 20:44:12 +03001457 off_t off = 0;
Frederic Weisbecker98402802010-05-02 22:05:29 +02001458
Wang Nancb216862016-06-27 10:24:04 +00001459 if (!evlist)
1460 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +03001461
Alexey Bayduraev396b6262022-01-17 21:34:25 +03001462 nr_mmaps = thread->nr_mmaps;
1463 maps = overwrite ? thread->overwrite_maps : thread->maps;
1464
Wang Nana4ea0ec2016-07-14 08:34:36 +00001465 if (!maps)
1466 return 0;
Wang Nancb216862016-06-27 10:24:04 +00001467
Wang Nan0b72d692017-12-04 16:51:07 +00001468 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +00001469 return 0;
1470
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001471 if (record__aio_enabled(rec))
1472 off = record__aio_get_pos(trace_fd);
1473
Alexey Bayduraev396b6262022-01-17 21:34:25 +03001474 for (i = 0; i < nr_mmaps; i++) {
Alexey Budankov470530b2019-03-18 20:40:26 +03001475 u64 flush = 0;
Alexey Bayduraev396b6262022-01-17 21:34:25 +03001476 struct mmap *map = maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +00001477
Jiri Olsa547740f2019-07-27 22:07:44 +02001478 if (map->core.base) {
Alexey Budankovf13de662019-01-22 20:50:57 +03001479 record__adjust_affinity(rec, map);
Alexey Budankov470530b2019-03-18 20:40:26 +03001480 if (synch) {
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001481 flush = map->core.flush;
1482 map->core.flush = 1;
Alexey Budankov470530b2019-03-18 20:40:26 +03001483 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001484 if (!record__aio_enabled(rec)) {
Alexey Budankovef781122019-03-18 20:44:12 +03001485 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
Alexey Budankov470530b2019-03-18 20:40:26 +03001486 if (synch)
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001487 map->core.flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001488 rc = -1;
1489 goto out;
1490 }
1491 } else {
Alexey Budankovef781122019-03-18 20:44:12 +03001492 if (record__aio_push(rec, map, &off) < 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001493 record__aio_set_pos(trace_fd, off);
Alexey Budankov470530b2019-03-18 20:40:26 +03001494 if (synch)
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001495 map->core.flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001496 rc = -1;
1497 goto out;
1498 }
David Ahern8d3eca22012-08-26 12:24:47 -06001499 }
Alexey Budankov470530b2019-03-18 20:40:26 +03001500 if (synch)
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001501 map->core.flush = flush;
David Ahern8d3eca22012-08-26 12:24:47 -06001502 }
Adrian Hunteref149c22015-04-09 18:53:45 +03001503
Jiri Olsae035f4c2018-09-13 14:54:05 +02001504 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
Adrian Hunterc0a6de02019-11-15 14:42:16 +02001505 !rec->opts.auxtrace_sample_mode &&
Jiri Olsae035f4c2018-09-13 14:54:05 +02001506 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +03001507 rc = -1;
1508 goto out;
1509 }
Frederic Weisbecker98402802010-05-02 22:05:29 +02001510 }
1511
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001512 if (record__aio_enabled(rec))
1513 record__aio_set_pos(trace_fd, off);
1514
Jiri Olsadcabb502014-07-25 16:56:16 +02001515 /*
1516 * Mark the round finished in case we wrote
1517 * at least one event.
Alexey Bayduraev56f735f2022-01-17 21:34:28 +03001518 *
1519 * No need for round events in directory mode,
1520 * because per-cpu maps and files have data
1521 * sorted by kernel.
Jiri Olsadcabb502014-07-25 16:56:16 +02001522 */
Alexey Bayduraev56f735f2022-01-17 21:34:28 +03001523 if (!record__threads_enabled(rec) && bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +02001524 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -06001525
Wang Nan0b72d692017-12-04 16:51:07 +00001526 if (overwrite)
Arnaldo Carvalho de Meloade9d202020-11-30 09:33:55 -03001527 evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -06001528out:
1529 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +02001530}
1531
Alexey Budankov470530b2019-03-18 20:40:26 +03001532static int record__mmap_read_all(struct record *rec, bool synch)
Wang Nancb216862016-06-27 10:24:04 +00001533{
1534 int err;
1535
Alexey Budankov470530b2019-03-18 20:40:26 +03001536 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
Wang Nancb216862016-06-27 10:24:04 +00001537 if (err)
1538 return err;
1539
Alexey Budankov470530b2019-03-18 20:40:26 +03001540 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
Wang Nancb216862016-06-27 10:24:04 +00001541}
1542
Alexey Bayduraev396b6262022-01-17 21:34:25 +03001543static void record__thread_munmap_filtered(struct fdarray *fda, int fd,
1544 void *arg __maybe_unused)
1545{
1546 struct perf_mmap *map = fda->priv[fd].ptr;
1547
1548 if (map)
1549 perf_mmap__put(map);
1550}
1551
Alexey Bayduraev3217e9f2022-01-17 21:34:27 +03001552static void *record__thread(void *arg)
1553{
1554 enum thread_msg msg = THREAD_MSG__READY;
1555 bool terminate = false;
1556 struct fdarray *pollfd;
1557 int err, ctlfd_pos;
1558
1559 thread = arg;
1560 thread->tid = gettid();
1561
1562 err = write(thread->pipes.ack[1], &msg, sizeof(msg));
1563 if (err == -1)
1564 pr_warning("threads[%d]: failed to notify on start: %s\n",
1565 thread->tid, strerror(errno));
1566
1567 pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu());
1568
1569 pollfd = &thread->pollfd;
1570 ctlfd_pos = thread->ctlfd_pos;
1571
1572 for (;;) {
1573 unsigned long long hits = thread->samples;
1574
1575 if (record__mmap_read_all(thread->rec, false) < 0 || terminate)
1576 break;
1577
1578 if (hits == thread->samples) {
1579
1580 err = fdarray__poll(pollfd, -1);
1581 /*
1582 * Propagate error, only if there's any. Ignore positive
1583 * number of returned events and interrupt error.
1584 */
1585 if (err > 0 || (err < 0 && errno == EINTR))
1586 err = 0;
1587 thread->waking++;
1588
1589 if (fdarray__filter(pollfd, POLLERR | POLLHUP,
1590 record__thread_munmap_filtered, NULL) == 0)
1591 break;
1592 }
1593
1594 if (pollfd->entries[ctlfd_pos].revents & POLLHUP) {
1595 terminate = true;
1596 close(thread->pipes.msg[0]);
1597 thread->pipes.msg[0] = -1;
1598 pollfd->entries[ctlfd_pos].fd = -1;
1599 pollfd->entries[ctlfd_pos].events = 0;
1600 }
1601
1602 pollfd->entries[ctlfd_pos].revents = 0;
1603 }
1604 record__mmap_read_all(thread->rec, true);
1605
1606 err = write(thread->pipes.ack[1], &msg, sizeof(msg));
1607 if (err == -1)
1608 pr_warning("threads[%d]: failed to notify on termination: %s\n",
1609 thread->tid, strerror(errno));
1610
1611 return NULL;
1612}
1613
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001614static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -07001615{
David Ahern57706ab2013-11-06 11:41:34 -07001616 struct perf_session *session = rec->session;
1617 int feat;
1618
1619 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1620 perf_header__set_feat(&session->header, feat);
1621
1622 if (rec->no_buildid)
1623 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1624
Jiri Olsace9036a2019-07-21 13:24:23 +02001625 if (!have_tracepoints(&rec->evlist->core.entries))
David Ahern57706ab2013-11-06 11:41:34 -07001626 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1627
1628 if (!rec->opts.branch_stack)
1629 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +03001630
1631 if (!rec->opts.full_auxtrace)
1632 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +01001633
Alexey Budankovcf790512018-10-09 17:36:24 +03001634 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1635 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
1636
Jiri Olsad1e325c2020-08-05 11:34:40 +02001637 if (!rec->opts.use_clockid)
1638 perf_header__clear_feat(&session->header, HEADER_CLOCK_DATA);
1639
Alexey Bayduraev56f735f2022-01-17 21:34:28 +03001640 if (!record__threads_enabled(rec))
1641 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
1642
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001643 if (!record__comp_enabled(rec))
1644 perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
Jiri Olsa258031c2019-03-08 14:47:39 +01001645
Jiri Olsaffa517a2015-10-25 15:51:43 +01001646 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -07001647}
1648
Wang Nane1ab48b2016-02-26 09:32:10 +00001649static void
1650record__finish_output(struct record *rec)
1651{
Alexey Bayduraev56f735f2022-01-17 21:34:28 +03001652 int i;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001653 struct perf_data *data = &rec->data;
1654 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +00001655
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001656 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +00001657 return;
1658
1659 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa45112e82019-02-21 10:41:29 +01001660 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Alexey Bayduraev56f735f2022-01-17 21:34:28 +03001661 if (record__threads_enabled(rec)) {
1662 for (i = 0; i < data->dir.nr; i++)
1663 data->dir.files[i].size = lseek(data->dir.files[i].fd, 0, SEEK_CUR);
1664 }
Wang Nane1ab48b2016-02-26 09:32:10 +00001665
1666 if (!rec->no_buildid) {
1667 process_buildids(rec);
1668
1669 if (rec->buildid_all)
1670 dsos__hit_all(rec->session);
1671 }
1672 perf_session__write_header(rec->session, rec->evlist, fd, true);
1673
1674 return;
1675}
1676
Wang Nan4ea648a2016-07-14 08:34:47 +00001677static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +00001678{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001679 int err;
Jiri Olsa9749b902019-07-21 13:23:50 +02001680 struct perf_thread_map *thread_map;
Namhyung Kim41b740b2021-08-10 21:46:58 -07001681 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001682
Wang Nan4ea648a2016-07-14 08:34:47 +00001683 if (rec->opts.tail_synthesize != tail)
1684 return 0;
1685
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001686 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1687 if (thread_map == NULL)
1688 return -1;
1689
1690 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +00001691 process_synthesized_event,
1692 &rec->session->machines.host,
Namhyung Kim41b740b2021-08-10 21:46:58 -07001693 needs_mmap,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001694 rec->opts.sample_address);
Jiri Olsa7836e522019-07-21 13:24:20 +02001695 perf_thread_map__put(thread_map);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001696 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001697}
1698
Wang Nan4ea648a2016-07-14 08:34:47 +00001699static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001700
Wang Nanecfd7a92016-04-13 08:21:07 +00001701static int
1702record__switch_output(struct record *rec, bool at_exit)
1703{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001704 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +00001705 int fd, err;
Andi Kleen03724b22019-03-14 15:49:55 -07001706 char *new_filename;
Wang Nanecfd7a92016-04-13 08:21:07 +00001707
1708 /* Same Size: "2015122520103046"*/
1709 char timestamp[] = "InvalidTimestamp";
1710
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001711 record__aio_mmap_read_sync(rec);
1712
Wang Nan4ea648a2016-07-14 08:34:47 +00001713 record__synthesize(rec, true);
1714 if (target__none(&rec->opts.target))
1715 record__synthesize_workload(rec, true);
1716
Wang Nanecfd7a92016-04-13 08:21:07 +00001717 rec->samples = 0;
1718 record__finish_output(rec);
1719 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
1720 if (err) {
1721 pr_err("Failed to get current timestamp\n");
1722 return -EINVAL;
1723 }
1724
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001725 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +00001726 rec->session->header.data_offset,
Andi Kleen03724b22019-03-14 15:49:55 -07001727 at_exit, &new_filename);
Wang Nanecfd7a92016-04-13 08:21:07 +00001728 if (fd >= 0 && !at_exit) {
1729 rec->bytes_written = 0;
1730 rec->session->header.data_size = 0;
1731 }
1732
1733 if (!quiet)
1734 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001735 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001736
Andi Kleen03724b22019-03-14 15:49:55 -07001737 if (rec->switch_output.num_files) {
1738 int n = rec->switch_output.cur_file + 1;
1739
1740 if (n >= rec->switch_output.num_files)
1741 n = 0;
1742 rec->switch_output.cur_file = n;
1743 if (rec->switch_output.filenames[n]) {
1744 remove(rec->switch_output.filenames[n]);
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03001745 zfree(&rec->switch_output.filenames[n]);
Andi Kleen03724b22019-03-14 15:49:55 -07001746 }
1747 rec->switch_output.filenames[n] = new_filename;
1748 } else {
1749 free(new_filename);
1750 }
1751
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001752 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +00001753 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +00001754 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001755
Wang Nanbe7b0c92016-04-20 18:59:54 +00001756 /*
1757 * In 'perf record --switch-output' without -a,
1758 * record__synthesize() in record__switch_output() won't
1759 * generate tracking events because there's no thread_map
1760 * in evlist. Which causes newly created perf.data doesn't
1761 * contain map and comm information.
1762 * Create a fake thread_map and directly call
1763 * perf_event__synthesize_thread_map() for those events.
1764 */
1765 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +00001766 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +00001767 }
Wang Nanecfd7a92016-04-13 08:21:07 +00001768 return fd;
1769}
1770
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001771static volatile int workload_exec_errno;
1772
1773/*
Arnaldo Carvalho de Melo7b392ef2020-11-30 09:26:54 -03001774 * evlist__prepare_workload will send a SIGUSR1
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001775 * if the fork fails, since we asked by setting its
1776 * want_signal to true.
1777 */
Namhyung Kim45604712014-05-12 09:47:24 +09001778static void workload_exec_failed_signal(int signo __maybe_unused,
1779 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001780 void *ucontext __maybe_unused)
1781{
1782 workload_exec_errno = info->si_value.sival_int;
1783 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001784 child_finished = 1;
1785}
1786
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001787static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +01001788static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001789
Arnaldo Carvalho de Melodb0ea13c2020-11-30 15:19:40 -03001790static const struct perf_event_mmap_page *evlist__pick_pc(struct evlist *evlist)
Wang Nanee667f92016-06-27 10:24:05 +00001791{
Wang Nanb2cb6152016-07-14 08:34:39 +00001792 if (evlist) {
Jiri Olsa547740f2019-07-27 22:07:44 +02001793 if (evlist->mmap && evlist->mmap[0].core.base)
1794 return evlist->mmap[0].core.base;
1795 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base)
1796 return evlist->overwrite_mmap[0].core.base;
Wang Nanb2cb6152016-07-14 08:34:39 +00001797 }
Wang Nanee667f92016-06-27 10:24:05 +00001798 return NULL;
1799}
1800
Wang Nanc45628b2016-05-24 02:28:59 +00001801static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1802{
Arnaldo Carvalho de Melodb0ea13c2020-11-30 15:19:40 -03001803 const struct perf_event_mmap_page *pc = evlist__pick_pc(rec->evlist);
Wang Nanee667f92016-06-27 10:24:05 +00001804 if (pc)
1805 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +00001806 return NULL;
1807}
1808
Wang Nan4ea648a2016-07-14 08:34:47 +00001809static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +00001810{
1811 struct perf_session *session = rec->session;
1812 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001813 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +00001814 struct record_opts *opts = &rec->opts;
1815 struct perf_tool *tool = &rec->tool;
Wang Nanc45c86e2016-02-26 09:32:07 +00001816 int err = 0;
Stephane Eraniand99c22e2020-04-22 08:50:38 -07001817 event_op f = process_synthesized_event;
Wang Nanc45c86e2016-02-26 09:32:07 +00001818
Wang Nan4ea648a2016-07-14 08:34:47 +00001819 if (rec->opts.tail_synthesize != tail)
1820 return 0;
1821
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001822 if (data->is_pipe) {
Namhyung Kimc3a057d2021-07-19 15:31:52 -07001823 err = perf_event__synthesize_for_pipe(tool, session, data,
Jiri Olsaa2015512018-03-14 10:22:04 +01001824 process_synthesized_event);
Namhyung Kimc3a057d2021-07-19 15:31:52 -07001825 if (err < 0)
1826 goto out;
Jiri Olsaa2015512018-03-14 10:22:04 +01001827
Namhyung Kimc3a057d2021-07-19 15:31:52 -07001828 rec->bytes_written += err;
Wang Nanc45c86e2016-02-26 09:32:07 +00001829 }
1830
Wang Nanc45628b2016-05-24 02:28:59 +00001831 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001832 process_synthesized_event, machine);
1833 if (err)
1834 goto out;
1835
Adrian Hunterc0a6de02019-11-15 14:42:16 +02001836 /* Synthesize id_index before auxtrace_info */
Adrian Hunter61750472021-09-07 19:39:02 +03001837 if (rec->opts.auxtrace_sample_mode || rec->opts.full_auxtrace) {
Adrian Hunterc0a6de02019-11-15 14:42:16 +02001838 err = perf_event__synthesize_id_index(tool,
1839 process_synthesized_event,
1840 session->evlist, machine);
1841 if (err)
1842 goto out;
1843 }
1844
Wang Nanc45c86e2016-02-26 09:32:07 +00001845 if (rec->opts.full_auxtrace) {
1846 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1847 session, process_synthesized_event);
1848 if (err)
1849 goto out;
1850 }
1851
Arnaldo Carvalho de Melo78e1bc22020-11-30 15:07:49 -03001852 if (!evlist__exclude_kernel(rec->evlist)) {
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001853 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1854 machine);
1855 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1856 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1857 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001858
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001859 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1860 machine);
1861 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1862 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1863 "Check /proc/modules permission or run as root.\n");
1864 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001865
1866 if (perf_guest) {
1867 machines__process_guests(&session->machines,
1868 perf_event__synthesize_guest_os, tool);
1869 }
1870
Andi Kleenbfd8f722017-11-17 13:42:58 -08001871 err = perf_event__synthesize_extra_attr(&rec->tool,
1872 rec->evlist,
1873 process_synthesized_event,
1874 data->is_pipe);
1875 if (err)
1876 goto out;
1877
Jiri Olsa03617c22019-07-21 13:24:42 +02001878 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
Andi Kleen373565d2017-11-17 13:42:59 -08001879 process_synthesized_event,
1880 NULL);
1881 if (err < 0) {
1882 pr_err("Couldn't synthesize thread map.\n");
1883 return err;
1884 }
1885
Adrian Hunter7be1fed2022-05-24 10:54:30 +03001886 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.all_cpus,
Andi Kleen373565d2017-11-17 13:42:59 -08001887 process_synthesized_event, NULL);
1888 if (err < 0) {
1889 pr_err("Couldn't synthesize cpu map.\n");
1890 return err;
1891 }
1892
Song Liue5416952019-03-11 22:30:41 -07001893 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
Song Liu7b612e22019-01-17 08:15:19 -08001894 machine, opts);
1895 if (err < 0)
1896 pr_warning("Couldn't synthesize bpf events.\n");
1897
Namhyung Kim41b740b2021-08-10 21:46:58 -07001898 if (rec->opts.synth & PERF_SYNTH_CGROUP) {
1899 err = perf_event__synthesize_cgroups(tool, process_synthesized_event,
1900 machine);
1901 if (err < 0)
1902 pr_warning("Couldn't synthesize cgroup events.\n");
1903 }
Namhyung Kimab640692020-03-25 21:45:33 +09001904
Stephane Eraniand99c22e2020-04-22 08:50:38 -07001905 if (rec->opts.nr_threads_synthesize > 1) {
1906 perf_set_multithreaded();
1907 f = process_locked_synthesized_event;
1908 }
1909
Namhyung Kim41b740b2021-08-10 21:46:58 -07001910 if (rec->opts.synth & PERF_SYNTH_TASK) {
1911 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP;
1912
1913 err = __machine__synthesize_threads(machine, tool, &opts->target,
1914 rec->evlist->core.threads,
1915 f, needs_mmap, opts->sample_address,
1916 rec->opts.nr_threads_synthesize);
1917 }
Stephane Eraniand99c22e2020-04-22 08:50:38 -07001918
1919 if (rec->opts.nr_threads_synthesize > 1)
1920 perf_set_singlethreaded();
1921
Wang Nanc45c86e2016-02-26 09:32:07 +00001922out:
1923 return err;
1924}
1925
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -03001926static int record__process_signal_event(union perf_event *event __maybe_unused, void *data)
1927{
1928 struct record *rec = data;
1929 pthread_kill(rec->thread_id, SIGUSR2);
1930 return 0;
1931}
1932
Arnaldo Carvalho de Melo23cbb412020-04-28 14:58:29 -03001933static int record__setup_sb_evlist(struct record *rec)
1934{
1935 struct record_opts *opts = &rec->opts;
1936
1937 if (rec->sb_evlist != NULL) {
1938 /*
1939 * We get here if --switch-output-event populated the
1940 * sb_evlist, so associate a callback that will send a SIGUSR2
1941 * to the main thread.
1942 */
1943 evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec);
1944 rec->thread_id = pthread_self();
1945 }
Jin Yao1101c872020-08-05 10:29:37 +08001946#ifdef HAVE_LIBBPF_SUPPORT
Arnaldo Carvalho de Melo23cbb412020-04-28 14:58:29 -03001947 if (!opts->no_bpf_event) {
1948 if (rec->sb_evlist == NULL) {
1949 rec->sb_evlist = evlist__new();
1950
1951 if (rec->sb_evlist == NULL) {
1952 pr_err("Couldn't create side band evlist.\n.");
1953 return -1;
1954 }
1955 }
1956
1957 if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) {
1958 pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
1959 return -1;
1960 }
1961 }
Jin Yao1101c872020-08-05 10:29:37 +08001962#endif
Arnaldo Carvalho de Melo08c83992020-11-30 09:40:10 -03001963 if (evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) {
Arnaldo Carvalho de Melo23cbb412020-04-28 14:58:29 -03001964 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1965 opts->no_bpf_event = true;
1966 }
1967
1968 return 0;
1969}
1970
Jiri Olsad1e325c2020-08-05 11:34:40 +02001971static int record__init_clock(struct record *rec)
1972{
1973 struct perf_session *session = rec->session;
1974 struct timespec ref_clockid;
1975 struct timeval ref_tod;
1976 u64 ref;
1977
1978 if (!rec->opts.use_clockid)
1979 return 0;
1980
Jiri Olsa9d88a1a12020-08-05 11:34:41 +02001981 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1982 session->header.env.clock.clockid_res_ns = rec->opts.clockid_res_ns;
1983
Jiri Olsad1e325c2020-08-05 11:34:40 +02001984 session->header.env.clock.clockid = rec->opts.clockid;
1985
1986 if (gettimeofday(&ref_tod, NULL) != 0) {
1987 pr_err("gettimeofday failed, cannot set reference time.\n");
1988 return -1;
1989 }
1990
1991 if (clock_gettime(rec->opts.clockid, &ref_clockid)) {
1992 pr_err("clock_gettime failed, cannot set reference time.\n");
1993 return -1;
1994 }
1995
1996 ref = (u64) ref_tod.tv_sec * NSEC_PER_SEC +
1997 (u64) ref_tod.tv_usec * NSEC_PER_USEC;
1998
1999 session->header.env.clock.tod_ns = ref;
2000
2001 ref = (u64) ref_clockid.tv_sec * NSEC_PER_SEC +
2002 (u64) ref_clockid.tv_nsec;
2003
2004 session->header.env.clock.clockid_ns = ref;
2005 return 0;
2006}
2007
Adrian Hunterd20aff12020-09-01 12:37:57 +03002008static void hit_auxtrace_snapshot_trigger(struct record *rec)
2009{
2010 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2011 trigger_hit(&auxtrace_snapshot_trigger);
2012 auxtrace_record__snapshot_started = 1;
2013 if (auxtrace_record__snapshot_start(rec->itr))
2014 trigger_error(&auxtrace_snapshot_trigger);
2015 }
2016}
2017
Jin Yao91c0f5e2021-04-27 15:01:30 +08002018static void record__uniquify_name(struct record *rec)
2019{
2020 struct evsel *pos;
2021 struct evlist *evlist = rec->evlist;
2022 char *new_name;
2023 int ret;
2024
2025 if (!perf_pmu__has_hybrid())
2026 return;
2027
2028 evlist__for_each_entry(evlist, pos) {
2029 if (!evsel__is_hybrid(pos))
2030 continue;
2031
2032 if (strchr(pos->name, '/'))
2033 continue;
2034
2035 ret = asprintf(&new_name, "%s/%s/",
2036 pos->pmu_name, pos->name);
2037 if (ret) {
2038 free(pos->name);
2039 pos->name = new_name;
2040 }
2041 }
2042}
2043
Alexey Bayduraev1e5de7d2022-01-17 21:34:26 +03002044static int record__terminate_thread(struct record_thread *thread_data)
2045{
2046 int err;
2047 enum thread_msg ack = THREAD_MSG__UNDEFINED;
2048 pid_t tid = thread_data->tid;
2049
2050 close(thread_data->pipes.msg[1]);
2051 thread_data->pipes.msg[1] = -1;
2052 err = read(thread_data->pipes.ack[0], &ack, sizeof(ack));
2053 if (err > 0)
2054 pr_debug2("threads[%d]: sent %s\n", tid, thread_msg_tags[ack]);
2055 else
2056 pr_warning("threads[%d]: failed to receive termination notification from %d\n",
2057 thread->tid, tid);
2058
2059 return 0;
2060}
2061
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002062static int record__start_threads(struct record *rec)
2063{
Alexey Bayduraev3217e9f2022-01-17 21:34:27 +03002064 int t, tt, err, ret = 0, nr_threads = rec->nr_threads;
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002065 struct record_thread *thread_data = rec->thread_data;
Alexey Bayduraev3217e9f2022-01-17 21:34:27 +03002066 sigset_t full, mask;
2067 pthread_t handle;
2068 pthread_attr_t attrs;
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002069
2070 thread = &thread_data[0];
2071
Alexey Bayduraev3217e9f2022-01-17 21:34:27 +03002072 if (!record__threads_enabled(rec))
2073 return 0;
2074
2075 sigfillset(&full);
2076 if (sigprocmask(SIG_SETMASK, &full, &mask)) {
2077 pr_err("Failed to block signals on threads start: %s\n", strerror(errno));
2078 return -1;
2079 }
2080
2081 pthread_attr_init(&attrs);
2082 pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED);
2083
2084 for (t = 1; t < nr_threads; t++) {
2085 enum thread_msg msg = THREAD_MSG__UNDEFINED;
2086
2087#ifdef HAVE_PTHREAD_ATTR_SETAFFINITY_NP
2088 pthread_attr_setaffinity_np(&attrs,
2089 MMAP_CPU_MASK_BYTES(&(thread_data[t].mask->affinity)),
2090 (cpu_set_t *)(thread_data[t].mask->affinity.bits));
2091#endif
2092 if (pthread_create(&handle, &attrs, record__thread, &thread_data[t])) {
2093 for (tt = 1; tt < t; tt++)
2094 record__terminate_thread(&thread_data[t]);
2095 pr_err("Failed to start threads: %s\n", strerror(errno));
2096 ret = -1;
2097 goto out_err;
2098 }
2099
2100 err = read(thread_data[t].pipes.ack[0], &msg, sizeof(msg));
2101 if (err > 0)
2102 pr_debug2("threads[%d]: sent %s\n", rec->thread_data[t].tid,
2103 thread_msg_tags[msg]);
2104 else
2105 pr_warning("threads[%d]: failed to receive start notification from %d\n",
2106 thread->tid, rec->thread_data[t].tid);
2107 }
2108
2109 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity),
2110 (cpu_set_t *)thread->mask->affinity.bits);
2111
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002112 pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu());
2113
Alexey Bayduraev3217e9f2022-01-17 21:34:27 +03002114out_err:
2115 pthread_attr_destroy(&attrs);
2116
2117 if (sigprocmask(SIG_SETMASK, &mask, NULL)) {
2118 pr_err("Failed to unblock signals on threads start: %s\n", strerror(errno));
2119 ret = -1;
2120 }
2121
2122 return ret;
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002123}
2124
2125static int record__stop_threads(struct record *rec)
2126{
2127 int t;
2128 struct record_thread *thread_data = rec->thread_data;
2129
Alexey Bayduraev1e5de7d2022-01-17 21:34:26 +03002130 for (t = 1; t < rec->nr_threads; t++)
2131 record__terminate_thread(&thread_data[t]);
2132
Alexey Bayduraev610fbc02022-01-17 21:34:31 +03002133 for (t = 0; t < rec->nr_threads; t++) {
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002134 rec->samples += thread_data[t].samples;
Alexey Bayduraev610fbc02022-01-17 21:34:31 +03002135 if (!record__threads_enabled(rec))
2136 continue;
2137 rec->session->bytes_transferred += thread_data[t].bytes_transferred;
2138 rec->session->bytes_compressed += thread_data[t].bytes_compressed;
2139 pr_debug("threads[%d]: samples=%lld, wakes=%ld, ", thread_data[t].tid,
2140 thread_data[t].samples, thread_data[t].waking);
2141 if (thread_data[t].bytes_transferred && thread_data[t].bytes_compressed)
2142 pr_debug("transferred=%" PRIu64 ", compressed=%" PRIu64 "\n",
2143 thread_data[t].bytes_transferred, thread_data[t].bytes_compressed);
2144 else
2145 pr_debug("written=%" PRIu64 "\n", thread_data[t].bytes_written);
2146 }
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002147
2148 return 0;
2149}
2150
2151static unsigned long record__waking(struct record *rec)
2152{
2153 int t;
2154 unsigned long waking = 0;
2155 struct record_thread *thread_data = rec->thread_data;
2156
2157 for (t = 0; t < rec->nr_threads; t++)
2158 waking += thread_data[t].waking;
2159
2160 return waking;
2161}
2162
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002163static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02002164{
David Ahern57706ab2013-11-06 11:41:34 -07002165 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09002166 int status = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03002167 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02002168 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002169 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002170 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002171 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03002172 bool disabled = false, draining = false;
Namhyung Kim42aa2762015-01-29 17:06:48 +09002173 int fd;
Alexey Budankovd3c8c082019-03-18 20:41:02 +03002174 float ratio = 0;
Alexey Budankovacce0222020-07-17 10:07:50 +03002175 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002176
Namhyung Kim45604712014-05-12 09:47:24 +09002177 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02002178 signal(SIGCHLD, sig_handler);
2179 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06002180 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00002181 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00002182
Hari Bathinif3b36142017-03-08 02:11:43 +05302183 if (rec->opts.record_namespaces)
2184 tool->namespace_events = true;
2185
Namhyung Kim8fb4b672020-03-25 21:45:34 +09002186 if (rec->opts.record_cgroup) {
2187#ifdef HAVE_FILE_HANDLE
2188 tool->cgroup_events = true;
2189#else
2190 pr_err("cgroup tracking is not supported\n");
2191 return -1;
2192#endif
2193 }
2194
Jiri Olsadc0c6122017-01-09 10:51:58 +01002195 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002196 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002197 if (rec->opts.auxtrace_snapshot_mode)
2198 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01002199 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002200 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00002201 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002202 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00002203 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02002204
Namhyung Kim2681bd82021-07-19 15:31:49 -07002205 session = perf_session__new(data, tool);
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +05302206 if (IS_ERR(session)) {
Adrien BAKffa91882014-04-18 11:00:43 +09002207 pr_err("Perf session creation failed.\n");
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +05302208 return PTR_ERR(session);
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02002209 }
2210
Alexey Bayduraevb5f25112022-01-17 21:34:34 +03002211 if (record__threads_enabled(rec)) {
2212 if (perf_data__is_pipe(&rec->data)) {
2213 pr_err("Parallel trace streaming is not available in pipe mode.\n");
2214 return -1;
2215 }
2216 if (rec->opts.full_auxtrace) {
2217 pr_err("Parallel trace streaming is not available in AUX area tracing mode.\n");
2218 return -1;
2219 }
2220 }
2221
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002222 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002223 rec->session = session;
2224
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002225 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
2226 pr_err("Compression initialization failed.\n");
2227 return -1;
2228 }
Anand K Mistryda231332020-05-13 12:20:23 +10002229#ifdef HAVE_EVENTFD_SUPPORT
2230 done_fd = eventfd(0, EFD_NONBLOCK);
2231 if (done_fd < 0) {
2232 pr_err("Failed to create wakeup eventfd, error: %m\n");
2233 status = -1;
2234 goto out_delete_session;
2235 }
Yang Jihonge16c2ce2021-02-05 14:50:01 +08002236 err = evlist__add_wakeup_eventfd(rec->evlist, done_fd);
Anand K Mistryda231332020-05-13 12:20:23 +10002237 if (err < 0) {
2238 pr_err("Failed to add wakeup eventfd to poll list\n");
2239 status = err;
2240 goto out_delete_session;
2241 }
2242#endif // HAVE_EVENTFD_SUPPORT
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002243
2244 session->header.env.comp_type = PERF_COMP_ZSTD;
2245 session->header.env.comp_level = rec->opts.comp_level;
2246
Adrian Huntereeb399b2019-10-04 11:31:21 +03002247 if (rec->opts.kcore &&
2248 !record__kcore_readable(&session->machines.host)) {
2249 pr_err("ERROR: kcore is not readable.\n");
2250 return -1;
2251 }
2252
Jiri Olsad1e325c2020-08-05 11:34:40 +02002253 if (record__init_clock(rec))
2254 return -1;
2255
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002256 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01002257
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02002258 if (forks) {
Arnaldo Carvalho de Melo7b392ef2020-11-30 09:26:54 -03002259 err = evlist__prepare_workload(rec->evlist, &opts->target, argv, data->is_pipe,
2260 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02002261 if (err < 0) {
2262 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09002263 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02002264 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02002265 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01002266 }
2267
Jiri Olsaad46e48c2018-03-02 17:13:54 +01002268 /*
2269 * If we have just single event and are sending data
2270 * through pipe, we need to force the ids allocation,
2271 * because we synthesize event name through the pipe
2272 * and need the id for that.
2273 */
Jiri Olsa6484d2f2019-07-21 13:24:28 +02002274 if (data->is_pipe && rec->evlist->core.nr_entries == 1)
Jiri Olsaad46e48c2018-03-02 17:13:54 +01002275 rec->opts.sample_id = true;
2276
Jin Yao91c0f5e2021-04-27 15:01:30 +08002277 record__uniquify_name(rec);
2278
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002279 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06002280 err = -1;
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002281 goto out_free_threads;
David Ahern8d3eca22012-08-26 12:24:47 -06002282 }
Jiri Olsaf6fa4372019-08-06 15:14:05 +02002283 session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002284
Adrian Huntereeb399b2019-10-04 11:31:21 +03002285 if (rec->opts.kcore) {
2286 err = record__kcore_copy(&session->machines.host, data);
2287 if (err) {
2288 pr_err("ERROR: Failed to copy kcore\n");
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002289 goto out_free_threads;
Adrian Huntereeb399b2019-10-04 11:31:21 +03002290 }
2291 }
2292
Wang Nan8690a2a2016-02-22 09:10:32 +00002293 err = bpf__apply_obj_config();
2294 if (err) {
2295 char errbuf[BUFSIZ];
2296
2297 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
2298 pr_err("ERROR: Apply config to BPF failed: %s\n",
2299 errbuf);
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002300 goto out_free_threads;
Wang Nan8690a2a2016-02-22 09:10:32 +00002301 }
2302
Adrian Huntercca84822015-08-19 17:29:21 +03002303 /*
2304 * Normally perf_session__new would do this, but it doesn't have the
2305 * evlist.
2306 */
Arnaldo Carvalho de Melo8cedf3a52020-06-17 09:29:48 -03002307 if (rec->tool.ordered_events && !evlist__sample_id_all(rec->evlist)) {
Adrian Huntercca84822015-08-19 17:29:21 +03002308 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
2309 rec->tool.ordered_events = false;
2310 }
2311
Jiri Olsa3a683122021-07-06 17:17:01 +02002312 if (!rec->evlist->core.nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09002313 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
2314
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002315 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09002316 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05002317 if (err < 0)
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002318 goto out_free_threads;
Jiri Olsa563aecb2013-06-05 13:35:06 +02002319 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09002320 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002321 if (err < 0)
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002322 goto out_free_threads;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002323 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002324
Arnaldo Carvalho de Melob38d85e2020-04-24 12:24:51 -03002325 err = -1;
David Ahernd3665492012-02-06 15:27:52 -07002326 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01002327 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07002328 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01002329 "Use --no-buildid to profile anyway.\n");
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002330 goto out_free_threads;
Robert Richtere20960c2011-12-07 10:02:55 +01002331 }
2332
Arnaldo Carvalho de Melo23cbb412020-04-28 14:58:29 -03002333 err = record__setup_sb_evlist(rec);
2334 if (err)
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002335 goto out_free_threads;
Song Liu657ee552019-03-11 22:30:50 -07002336
Wang Nan4ea648a2016-07-14 08:34:47 +00002337 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00002338 if (err < 0)
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002339 goto out_free_threads;
David Ahern8d3eca22012-08-26 12:24:47 -06002340
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002341 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002342 struct sched_param param;
2343
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002344 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002345 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02002346 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06002347 err = -1;
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002348 goto out_free_threads;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002349 }
2350 }
2351
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002352 if (record__start_threads(rec))
2353 goto out_free_threads;
2354
Jiri Olsa774cb492012-11-12 18:34:01 +01002355 /*
2356 * When perf is starting the traced process, all the events
2357 * (apart from group members) have enable_on_exec=1 set,
2358 * so don't spoil it by prematurely enabling them.
2359 */
Andi Kleen6619a532014-01-11 13:38:27 -08002360 if (!target__none(&opts->target) && !opts->initial_delay)
Jiri Olsa1c87f162019-07-21 13:24:08 +02002361 evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06002362
Peter Zijlstra856e9662009-12-16 17:55:55 +01002363 /*
2364 * Let the child rip
2365 */
Namhyung Kime803cf92015-09-22 09:24:55 +09002366 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01002367 struct machine *machine = &session->machines.host;
Namhyung Kime5bed5642015-09-30 10:45:24 +09002368 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05302369 pid_t tgid;
Namhyung Kime5bed5642015-09-30 10:45:24 +09002370
2371 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
2372 if (event == NULL) {
2373 err = -ENOMEM;
2374 goto out_child;
2375 }
2376
Namhyung Kime803cf92015-09-22 09:24:55 +09002377 /*
2378 * Some H/W events are generated before COMM event
2379 * which is emitted during exec(), so perf script
2380 * cannot see a correct process name for those events.
2381 * Synthesize COMM event to prevent it.
2382 */
Hari Bathinie907caf2017-03-08 02:11:51 +05302383 tgid = perf_event__synthesize_comm(tool, event,
2384 rec->evlist->workload.pid,
2385 process_synthesized_event,
2386 machine);
2387 free(event);
2388
2389 if (tgid == -1)
2390 goto out_child;
2391
2392 event = malloc(sizeof(event->namespaces) +
2393 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
2394 machine->id_hdr_size);
2395 if (event == NULL) {
2396 err = -ENOMEM;
2397 goto out_child;
2398 }
2399
2400 /*
2401 * Synthesize NAMESPACES event for the command specified.
2402 */
2403 perf_event__synthesize_namespaces(tool, event,
2404 rec->evlist->workload.pid,
2405 tgid, process_synthesized_event,
2406 machine);
Namhyung Kime5bed5642015-09-30 10:45:24 +09002407 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09002408
Arnaldo Carvalho de Melo7b392ef2020-11-30 09:26:54 -03002409 evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09002410 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01002411
Andi Kleen6619a532014-01-11 13:38:27 -08002412 if (opts->initial_delay) {
Alexey Budankov68cd3b42020-07-17 10:07:03 +03002413 pr_info(EVLIST_DISABLED_MSG);
2414 if (opts->initial_delay > 0) {
2415 usleep(opts->initial_delay * USEC_PER_MSEC);
2416 evlist__enable(rec->evlist);
2417 pr_info(EVLIST_ENABLED_MSG);
2418 }
Andi Kleen6619a532014-01-11 13:38:27 -08002419 }
2420
Wang Nan5f9cf592016-04-20 18:59:49 +00002421 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002422 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00002423 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002424 for (;;) {
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002425 unsigned long long hits = thread->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002426
Wang Nan057374642016-07-14 08:34:43 +00002427 /*
2428 * rec->evlist->bkw_mmap_state is possible to be
2429 * BKW_MMAP_EMPTY here: when done == true and
2430 * hits != rec->samples in previous round.
2431 *
Arnaldo Carvalho de Meloade9d202020-11-30 09:33:55 -03002432 * evlist__toggle_bkw_mmap ensure we never
Wang Nan057374642016-07-14 08:34:43 +00002433 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
2434 */
2435 if (trigger_is_hit(&switch_output_trigger) || done || draining)
Arnaldo Carvalho de Meloade9d202020-11-30 09:33:55 -03002436 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
Wang Nan057374642016-07-14 08:34:43 +00002437
Alexey Budankov470530b2019-03-18 20:40:26 +03002438 if (record__mmap_read_all(rec, false) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00002439 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002440 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06002441 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09002442 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06002443 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002444
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002445 if (auxtrace_record__snapshot_started) {
2446 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00002447 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Alexander Shishkince7b0e42019-08-06 17:41:01 +03002448 record__read_auxtrace_snapshot(rec, false);
Wang Nan5f9cf592016-04-20 18:59:49 +00002449 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002450 pr_err("AUX area tracing snapshot failed\n");
2451 err = -1;
2452 goto out_child;
2453 }
2454 }
2455
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002456 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00002457 /*
2458 * If switch_output_trigger is hit, the data in
2459 * overwritable ring buffer should have been collected,
2460 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
2461 *
2462 * If SIGUSR2 raise after or during record__mmap_read_all(),
2463 * record__mmap_read_all() didn't collect data from
2464 * overwritable ring buffer. Read again.
2465 */
2466 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
2467 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002468 trigger_ready(&switch_output_trigger);
2469
Wang Nan057374642016-07-14 08:34:43 +00002470 /*
2471 * Reenable events in overwrite ring buffer after
2472 * record__mmap_read_all(): we should have collected
2473 * data from it.
2474 */
Arnaldo Carvalho de Meloade9d202020-11-30 09:33:55 -03002475 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
Wang Nan057374642016-07-14 08:34:43 +00002476
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002477 if (!quiet)
2478 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002479 record__waking(rec));
2480 thread->waking = 0;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002481 fd = record__switch_output(rec, false);
2482 if (fd < 0) {
2483 pr_err("Failed to switch to new file\n");
2484 trigger_error(&switch_output_trigger);
2485 err = fd;
2486 goto out_child;
2487 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01002488
2489 /* re-arm the alarm */
2490 if (rec->switch_output.time)
2491 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002492 }
2493
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002494 if (hits == thread->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03002495 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002496 break;
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002497 err = fdarray__poll(&thread->pollfd, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04002498 /*
2499 * Propagate error, only if there's any. Ignore positive
2500 * number of returned events and interrupt error.
2501 */
2502 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09002503 err = 0;
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002504 thread->waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03002505
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002506 if (fdarray__filter(&thread->pollfd, POLLERR | POLLHUP,
2507 record__thread_munmap_filtered, NULL) == 0)
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03002508 draining = true;
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002509
2510 evlist__ctlfd_update(rec->evlist,
2511 &thread->pollfd.entries[thread->ctlfd_pos]);
Peter Zijlstra8b412662009-09-17 19:59:05 +02002512 }
2513
Alexey Budankovacce0222020-07-17 10:07:50 +03002514 if (evlist__ctlfd_process(rec->evlist, &cmd) > 0) {
2515 switch (cmd) {
Adrian Hunterd20aff12020-09-01 12:37:57 +03002516 case EVLIST_CTL_CMD_SNAPSHOT:
2517 hit_auxtrace_snapshot_trigger(rec);
2518 evlist__ctlfd_ack(rec->evlist);
2519 break;
Jiri Olsaf186cd62020-12-27 00:20:37 +01002520 case EVLIST_CTL_CMD_STOP:
2521 done = 1;
2522 break;
Alexey Budankovacce0222020-07-17 10:07:50 +03002523 case EVLIST_CTL_CMD_ACK:
2524 case EVLIST_CTL_CMD_UNSUPPORTED:
Jiri Olsa991ae4e2020-12-27 00:20:35 +01002525 case EVLIST_CTL_CMD_ENABLE:
2526 case EVLIST_CTL_CMD_DISABLE:
Jiri Olsa142544a2020-12-27 00:20:36 +01002527 case EVLIST_CTL_CMD_EVLIST:
Jiri Olsa47fddcb2020-12-27 00:20:38 +01002528 case EVLIST_CTL_CMD_PING:
Alexey Budankovacce0222020-07-17 10:07:50 +03002529 default:
2530 break;
2531 }
2532 }
2533
Jiri Olsa774cb492012-11-12 18:34:01 +01002534 /*
2535 * When perf is starting the traced process, at the end events
2536 * die with the process and we wait for that. Thus no need to
2537 * disable events in this case.
2538 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002539 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00002540 trigger_off(&auxtrace_snapshot_trigger);
Jiri Olsae74676d2019-07-21 13:24:09 +02002541 evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01002542 disabled = true;
2543 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002544 }
Alexander Shishkince7b0e42019-08-06 17:41:01 +03002545
Wang Nan5f9cf592016-04-20 18:59:49 +00002546 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002547 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002548
Alexander Shishkince7b0e42019-08-06 17:41:01 +03002549 if (opts->auxtrace_snapshot_on_exit)
2550 record__auxtrace_snapshot_exit(rec);
2551
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03002552 if (forks && workload_exec_errno) {
Arnaldo Carvalho de Melo3535a692021-04-14 09:32:14 -03002553 char msg[STRERR_BUFSIZE], strevsels[2048];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03002554 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melo3535a692021-04-14 09:32:14 -03002555
2556 evlist__scnprintf_evsels(rec->evlist, sizeof(strevsels), strevsels);
2557
2558 pr_err("Failed to collect '%s' for the '%s' workload: %s\n",
2559 strevsels, argv[0], emsg);
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03002560 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09002561 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03002562 }
2563
Namhyung Kime3d59112015-01-29 17:06:44 +09002564 if (!quiet)
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002565 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n",
2566 record__waking(rec));
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02002567
Wang Nan4ea648a2016-07-14 08:34:47 +00002568 if (target__none(&rec->opts.target))
2569 record__synthesize_workload(rec, true);
2570
Namhyung Kim45604712014-05-12 09:47:24 +09002571out_child:
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002572 record__stop_threads(rec);
Alexey Budankov470530b2019-03-18 20:40:26 +03002573 record__mmap_read_all(rec, true);
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002574out_free_threads:
Alexey Bayduraev415ccb52022-01-17 21:34:23 +03002575 record__free_thread_data(rec);
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002576 evlist__finalize_ctlfd(rec->evlist);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002577 record__aio_mmap_read_sync(rec);
2578
Alexey Budankovd3c8c082019-03-18 20:41:02 +03002579 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
2580 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
2581 session->header.env.comp_ratio = ratio + 0.5;
2582 }
2583
Namhyung Kim45604712014-05-12 09:47:24 +09002584 if (forks) {
2585 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02002586
Namhyung Kim45604712014-05-12 09:47:24 +09002587 if (!child_finished)
2588 kill(rec->evlist->workload.pid, SIGTERM);
2589
2590 wait(&exit_status);
2591
2592 if (err < 0)
2593 status = err;
2594 else if (WIFEXITED(exit_status))
2595 status = WEXITSTATUS(exit_status);
2596 else if (WIFSIGNALED(exit_status))
2597 signr = WTERMSIG(exit_status);
2598 } else
2599 status = err;
2600
Namhyung Kimedc41a12022-05-18 15:47:21 -07002601 if (rec->off_cpu)
2602 rec->bytes_written += off_cpu_write(rec->session);
2603
Wang Nan4ea648a2016-07-14 08:34:47 +00002604 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09002605 /* this will be recalculated during process_buildids() */
2606 rec->samples = 0;
2607
Wang Nanecfd7a92016-04-13 08:21:07 +00002608 if (!err) {
2609 if (!rec->timestamp_filename) {
2610 record__finish_output(rec);
2611 } else {
2612 fd = record__switch_output(rec, true);
2613 if (fd < 0) {
2614 status = fd;
2615 goto out_delete_session;
2616 }
2617 }
2618 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002619
Wang Nana0748652016-11-26 07:03:28 +00002620 perf_hooks__invoke_record_end();
2621
Namhyung Kime3d59112015-01-29 17:06:44 +09002622 if (!err && !quiet) {
2623 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00002624 const char *postfix = rec->timestamp_filename ?
2625 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09002626
Adrian Hunteref149c22015-04-09 18:53:45 +03002627 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09002628 scnprintf(samples, sizeof(samples),
2629 " (%" PRIu64 " samples)", rec->samples);
2630 else
2631 samples[0] = '\0';
2632
Alexey Budankovd3c8c082019-03-18 20:41:02 +03002633 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002634 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsa2d4f2792019-02-21 10:41:30 +01002635 data->path, postfix, samples);
Alexey Budankovd3c8c082019-03-18 20:41:02 +03002636 if (ratio) {
2637 fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
2638 rec->session->bytes_transferred / 1024.0 / 1024.0,
2639 ratio);
2640 }
2641 fprintf(stderr, " ]\n");
Namhyung Kime3d59112015-01-29 17:06:44 +09002642 }
2643
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002644out_delete_session:
Anand K Mistryda231332020-05-13 12:20:23 +10002645#ifdef HAVE_EVENTFD_SUPPORT
2646 if (done_fd >= 0)
2647 close(done_fd);
2648#endif
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002649 zstd_fini(&session->zstd_data);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002650 perf_session__delete(session);
Song Liu657ee552019-03-11 22:30:50 -07002651
2652 if (!opts->no_bpf_event)
Arnaldo Carvalho de Melo08c83992020-11-30 09:40:10 -03002653 evlist__stop_sb_thread(rec->sb_evlist);
Namhyung Kim45604712014-05-12 09:47:24 +09002654 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002655}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002656
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03002657static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002658{
Kan Liangaad2b212015-01-05 13:23:04 -05002659 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01002660
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03002661 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002662
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03002663 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002664 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03002665 callchain->dump_size);
2666}
2667
2668int record_opts__parse_callchain(struct record_opts *record,
2669 struct callchain_param *callchain,
2670 const char *arg, bool unset)
2671{
2672 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03002673 callchain->enabled = !unset;
2674
2675 /* --no-call-graph */
2676 if (unset) {
2677 callchain->record_mode = CALLCHAIN_NONE;
2678 pr_debug("callchain: disabled\n");
2679 return 0;
2680 }
2681
2682 ret = parse_callchain_record_opt(arg, callchain);
2683 if (!ret) {
2684 /* Enable data address sampling for DWARF unwind. */
2685 if (callchain->record_mode == CALLCHAIN_DWARF)
2686 record->sample_address = true;
2687 callchain_debug(callchain);
2688 }
2689
2690 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002691}
2692
Kan Liangc421e802015-07-29 05:42:12 -04002693int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002694 const char *arg,
2695 int unset)
2696{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03002697 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02002698}
2699
Kan Liangc421e802015-07-29 05:42:12 -04002700int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002701 const char *arg __maybe_unused,
2702 int unset __maybe_unused)
2703{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002704 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04002705
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002706 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002707
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002708 if (callchain->record_mode == CALLCHAIN_NONE)
2709 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002710
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002711 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002712 return 0;
2713}
2714
Jiri Olsaeb853e82014-02-03 12:44:42 +01002715static int perf_record_config(const char *var, const char *value, void *cb)
2716{
Namhyung Kim7a29c082015-12-15 10:49:56 +09002717 struct record *rec = cb;
2718
2719 if (!strcmp(var, "record.build-id")) {
2720 if (!strcmp(value, "cache"))
2721 rec->no_buildid_cache = false;
2722 else if (!strcmp(value, "no-cache"))
2723 rec->no_buildid_cache = true;
2724 else if (!strcmp(value, "skip"))
2725 rec->no_buildid = true;
Jiri Olsae29386c2020-12-14 11:54:57 +01002726 else if (!strcmp(value, "mmap"))
2727 rec->buildid_mmap = true;
Namhyung Kim7a29c082015-12-15 10:49:56 +09002728 else
2729 return -1;
2730 return 0;
2731 }
Yisheng Xiecff17202018-03-12 19:25:57 +08002732 if (!strcmp(var, "record.call-graph")) {
2733 var = "call-graph.record-mode";
2734 return perf_default_config(var, value, cb);
2735 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03002736#ifdef HAVE_AIO_SUPPORT
2737 if (!strcmp(var, "record.aio")) {
2738 rec->opts.nr_cblocks = strtol(value, NULL, 0);
2739 if (!rec->opts.nr_cblocks)
2740 rec->opts.nr_cblocks = nr_cblocks_default;
2741 }
2742#endif
Jiri Olsa9bce13e2021-12-09 21:04:25 +01002743 if (!strcmp(var, "record.debuginfod")) {
2744 rec->debuginfod.urls = strdup(value);
2745 if (!rec->debuginfod.urls)
2746 return -ENOMEM;
2747 rec->debuginfod.set = true;
2748 }
Jiri Olsaeb853e82014-02-03 12:44:42 +01002749
Yisheng Xiecff17202018-03-12 19:25:57 +08002750 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002751}
2752
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002753
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002754static int record__parse_affinity(const struct option *opt, const char *str, int unset)
2755{
2756 struct record_opts *opts = (struct record_opts *)opt->value;
2757
2758 if (unset || !str)
2759 return 0;
2760
2761 if (!strcasecmp(str, "node"))
2762 opts->affinity = PERF_AFFINITY_NODE;
2763 else if (!strcasecmp(str, "cpu"))
2764 opts->affinity = PERF_AFFINITY_CPU;
2765
2766 return 0;
2767}
2768
Alexey Bayduraev7954f712022-01-17 21:34:21 +03002769static int record__mmap_cpu_mask_alloc(struct mmap_cpu_mask *mask, int nr_bits)
2770{
2771 mask->nbits = nr_bits;
2772 mask->bits = bitmap_zalloc(mask->nbits);
2773 if (!mask->bits)
2774 return -ENOMEM;
2775
2776 return 0;
2777}
2778
2779static void record__mmap_cpu_mask_free(struct mmap_cpu_mask *mask)
2780{
2781 bitmap_free(mask->bits);
2782 mask->nbits = 0;
2783}
2784
2785static int record__thread_mask_alloc(struct thread_mask *mask, int nr_bits)
2786{
2787 int ret;
2788
2789 ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits);
2790 if (ret) {
2791 mask->affinity.bits = NULL;
2792 return ret;
2793 }
2794
2795 ret = record__mmap_cpu_mask_alloc(&mask->affinity, nr_bits);
2796 if (ret) {
2797 record__mmap_cpu_mask_free(&mask->maps);
2798 mask->maps.bits = NULL;
2799 }
2800
2801 return ret;
2802}
2803
2804static void record__thread_mask_free(struct thread_mask *mask)
2805{
2806 record__mmap_cpu_mask_free(&mask->maps);
2807 record__mmap_cpu_mask_free(&mask->affinity);
2808}
2809
Alexey Bayduraev06380a82022-01-17 21:34:32 +03002810static int record__parse_threads(const struct option *opt, const char *str, int unset)
2811{
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +03002812 int s;
Alexey Bayduraev06380a82022-01-17 21:34:32 +03002813 struct record_opts *opts = opt->value;
2814
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +03002815 if (unset || !str || !strlen(str)) {
Alexey Bayduraev06380a82022-01-17 21:34:32 +03002816 opts->threads_spec = THREAD_SPEC__CPU;
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +03002817 } else {
2818 for (s = 1; s < THREAD_SPEC__MAX; s++) {
2819 if (s == THREAD_SPEC__USER) {
2820 opts->threads_user_spec = strdup(str);
2821 if (!opts->threads_user_spec)
2822 return -ENOMEM;
2823 opts->threads_spec = THREAD_SPEC__USER;
2824 break;
2825 }
2826 if (!strncasecmp(str, thread_spec_tags[s], strlen(thread_spec_tags[s]))) {
2827 opts->threads_spec = s;
2828 break;
2829 }
2830 }
2831 }
2832
2833 if (opts->threads_spec == THREAD_SPEC__USER)
2834 pr_debug("threads_spec: %s\n", opts->threads_user_spec);
2835 else
2836 pr_debug("threads_spec: %s\n", thread_spec_tags[opts->threads_spec]);
Alexey Bayduraev06380a82022-01-17 21:34:32 +03002837
2838 return 0;
2839}
2840
Jiwei Sun6d575812019-10-22 16:09:01 +08002841static int parse_output_max_size(const struct option *opt,
2842 const char *str, int unset)
2843{
2844 unsigned long *s = (unsigned long *)opt->value;
2845 static struct parse_tag tags_size[] = {
2846 { .tag = 'B', .mult = 1 },
2847 { .tag = 'K', .mult = 1 << 10 },
2848 { .tag = 'M', .mult = 1 << 20 },
2849 { .tag = 'G', .mult = 1 << 30 },
2850 { .tag = 0 },
2851 };
2852 unsigned long val;
2853
2854 if (unset) {
2855 *s = 0;
2856 return 0;
2857 }
2858
2859 val = parse_tag_value(str, tags_size);
2860 if (val != (unsigned long) -1) {
2861 *s = val;
2862 return 0;
2863 }
2864
2865 return -1;
2866}
2867
Adrian Huntere9db1312015-04-09 18:53:46 +03002868static int record__parse_mmap_pages(const struct option *opt,
2869 const char *str,
2870 int unset __maybe_unused)
2871{
2872 struct record_opts *opts = opt->value;
2873 char *s, *p;
2874 unsigned int mmap_pages;
2875 int ret;
2876
2877 if (!str)
2878 return -EINVAL;
2879
2880 s = strdup(str);
2881 if (!s)
2882 return -ENOMEM;
2883
2884 p = strchr(s, ',');
2885 if (p)
2886 *p = '\0';
2887
2888 if (*s) {
Arnaldo Carvalho de Melo25f847022020-11-30 15:09:45 -03002889 ret = __evlist__parse_mmap_pages(&mmap_pages, s);
Adrian Huntere9db1312015-04-09 18:53:46 +03002890 if (ret)
2891 goto out_free;
2892 opts->mmap_pages = mmap_pages;
2893 }
2894
2895 if (!p) {
2896 ret = 0;
2897 goto out_free;
2898 }
2899
Arnaldo Carvalho de Melo25f847022020-11-30 15:09:45 -03002900 ret = __evlist__parse_mmap_pages(&mmap_pages, p + 1);
Adrian Huntere9db1312015-04-09 18:53:46 +03002901 if (ret)
2902 goto out_free;
2903
2904 opts->auxtrace_mmap_pages = mmap_pages;
2905
2906out_free:
2907 free(s);
2908 return ret;
2909}
2910
Alexandre Truong7248e302021-12-17 15:45:15 +00002911void __weak arch__add_leaf_frame_record_opts(struct record_opts *opts __maybe_unused)
2912{
2913}
2914
Alexey Budankov1d078cc2020-07-17 10:08:23 +03002915static int parse_control_option(const struct option *opt,
2916 const char *str,
2917 int unset __maybe_unused)
2918{
Adrian Hunter9864a662020-09-01 12:37:53 +03002919 struct record_opts *opts = opt->value;
Alexey Budankov1d078cc2020-07-17 10:08:23 +03002920
Adrian Huntera8fcbd22020-09-02 13:57:07 +03002921 return evlist__parse_control(str, &opts->ctl_fd, &opts->ctl_fd_ack, &opts->ctl_fd_close);
2922}
2923
Jiri Olsa0c582442017-01-09 10:51:59 +01002924static void switch_output_size_warn(struct record *rec)
2925{
Jiri Olsa9521b5f2019-07-28 12:45:35 +02002926 u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
Jiri Olsa0c582442017-01-09 10:51:59 +01002927 struct switch_output *s = &rec->switch_output;
2928
2929 wakeup_size /= 2;
2930
2931 if (s->size < wakeup_size) {
2932 char buf[100];
2933
2934 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
2935 pr_warning("WARNING: switch-output data size lower than "
2936 "wakeup kernel buffer size (%s) "
2937 "expect bigger perf.data sizes\n", buf);
2938 }
2939}
2940
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002941static int switch_output_setup(struct record *rec)
2942{
2943 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01002944 static struct parse_tag tags_size[] = {
2945 { .tag = 'B', .mult = 1 },
2946 { .tag = 'K', .mult = 1 << 10 },
2947 { .tag = 'M', .mult = 1 << 20 },
2948 { .tag = 'G', .mult = 1 << 30 },
2949 { .tag = 0 },
2950 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01002951 static struct parse_tag tags_time[] = {
2952 { .tag = 's', .mult = 1 },
2953 { .tag = 'm', .mult = 60 },
2954 { .tag = 'h', .mult = 60*60 },
2955 { .tag = 'd', .mult = 60*60*24 },
2956 { .tag = 0 },
2957 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01002958 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002959
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -03002960 /*
2961 * If we're using --switch-output-events, then we imply its
2962 * --switch-output=signal, as we'll send a SIGUSR2 from the side band
2963 * thread to its parent.
2964 */
Alexey Bayduraevb5f25112022-01-17 21:34:34 +03002965 if (rec->switch_output_event_set) {
2966 if (record__threads_enabled(rec)) {
2967 pr_warning("WARNING: --switch-output-event option is not available in parallel streaming mode.\n");
2968 return 0;
2969 }
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -03002970 goto do_signal;
Alexey Bayduraevb5f25112022-01-17 21:34:34 +03002971 }
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -03002972
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002973 if (!s->set)
2974 return 0;
2975
Alexey Bayduraevb5f25112022-01-17 21:34:34 +03002976 if (record__threads_enabled(rec)) {
2977 pr_warning("WARNING: --switch-output option is not available in parallel streaming mode.\n");
2978 return 0;
2979 }
2980
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002981 if (!strcmp(s->str, "signal")) {
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -03002982do_signal:
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002983 s->signal = true;
2984 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01002985 goto enabled;
2986 }
2987
2988 val = parse_tag_value(s->str, tags_size);
2989 if (val != (unsigned long) -1) {
2990 s->size = val;
2991 pr_debug("switch-output with %s size threshold\n", s->str);
2992 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002993 }
2994
Jiri Olsabfacbe32017-01-09 10:52:00 +01002995 val = parse_tag_value(s->str, tags_time);
2996 if (val != (unsigned long) -1) {
2997 s->time = val;
2998 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
2999 s->str, s->time);
3000 goto enabled;
3001 }
3002
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01003003 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01003004
3005enabled:
3006 rec->timestamp_filename = true;
3007 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01003008
3009 if (s->size && !rec->opts.no_buffering)
3010 switch_output_size_warn(rec);
3011
Jiri Olsadc0c6122017-01-09 10:51:58 +01003012 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01003013}
3014
Namhyung Kime5b2c202014-10-23 00:15:46 +09003015static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02003016 "perf record [<options>] [<command>]",
3017 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02003018 NULL
3019};
Namhyung Kime5b2c202014-10-23 00:15:46 +09003020const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02003021
Arnaldo Carvalho de Melo6e0a9b3d2019-11-14 12:15:34 -03003022static int build_id__process_mmap(struct perf_tool *tool, union perf_event *event,
3023 struct perf_sample *sample, struct machine *machine)
3024{
3025 /*
3026 * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
3027 * no need to add them twice.
3028 */
3029 if (!(event->header.misc & PERF_RECORD_MISC_USER))
3030 return 0;
3031 return perf_event__process_mmap(tool, event, sample, machine);
3032}
3033
3034static int build_id__process_mmap2(struct perf_tool *tool, union perf_event *event,
3035 struct perf_sample *sample, struct machine *machine)
3036{
3037 /*
3038 * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
3039 * no need to add them twice.
3040 */
3041 if (!(event->header.misc & PERF_RECORD_MISC_USER))
3042 return 0;
3043
3044 return perf_event__process_mmap2(tool, event, sample, machine);
3045}
3046
Adrian Hunter66286ed2021-05-03 09:42:22 +03003047static int process_timestamp_boundary(struct perf_tool *tool,
3048 union perf_event *event __maybe_unused,
3049 struct perf_sample *sample,
3050 struct machine *machine __maybe_unused)
3051{
3052 struct record *rec = container_of(tool, struct record, tool);
3053
3054 set_timestamp_boundary(rec, sample->time);
3055 return 0;
3056}
3057
Namhyung Kim41b740b2021-08-10 21:46:58 -07003058static int parse_record_synth_option(const struct option *opt,
3059 const char *str,
3060 int unset __maybe_unused)
3061{
3062 struct record_opts *opts = opt->value;
3063 char *p = strdup(str);
3064
3065 if (p == NULL)
3066 return -1;
3067
3068 opts->synth = parse_synth_opt(p);
3069 free(p);
3070
3071 if (opts->synth < 0) {
3072 pr_err("Invalid synth option: %s\n", str);
3073 return -1;
3074 }
3075 return 0;
3076}
3077
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003078/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03003079 * XXX Ideally would be local to cmd_record() and passed to a record__new
3080 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003081 * after cmd_record() exits, but since record_options need to be accessible to
3082 * builtin-script, leave it here.
3083 *
3084 * At least we don't ouch it in all the other functions here directly.
3085 *
3086 * Just say no to tons of global variables, sigh.
3087 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03003088static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003089 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08003090 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003091 .mmap_pages = UINT_MAX,
3092 .user_freq = UINT_MAX,
3093 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03003094 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09003095 .target = {
3096 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02003097 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09003098 },
Alexey Budankov470530b2019-03-18 20:40:26 +03003099 .mmap_flush = MMAP_FLUSH_DEFAULT,
Stephane Eraniand99c22e2020-04-22 08:50:38 -07003100 .nr_threads_synthesize = 1,
Alexey Budankov1d078cc2020-07-17 10:08:23 +03003101 .ctl_fd = -1,
3102 .ctl_fd_ack = -1,
Namhyung Kim41b740b2021-08-10 21:46:58 -07003103 .synth = PERF_SYNTH_ALL,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003104 },
Namhyung Kime3d59112015-01-29 17:06:44 +09003105 .tool = {
3106 .sample = process_sample_event,
3107 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03003108 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09003109 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05303110 .namespaces = perf_event__process_namespaces,
Arnaldo Carvalho de Melo6e0a9b3d2019-11-14 12:15:34 -03003111 .mmap = build_id__process_mmap,
3112 .mmap2 = build_id__process_mmap2,
Adrian Hunter66286ed2021-05-03 09:42:22 +03003113 .itrace_start = process_timestamp_boundary,
3114 .aux = process_timestamp_boundary,
Adrian Huntercca84822015-08-19 17:29:21 +03003115 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09003116 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003117};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02003118
Namhyung Kim76a26542015-10-22 23:28:32 +09003119const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
3120 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03003121
Wang Nan0aab2132016-06-16 08:02:41 +00003122static bool dry_run;
3123
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003124/*
3125 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
3126 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03003127 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melo7b392ef2020-11-30 09:26:54 -03003128 * evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003129 * using pipes, etc.
3130 */
Jiri Olsaefd21302017-01-03 09:19:55 +01003131static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003132 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02003133 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02003134 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003135 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08003136 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00003137 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
3138 NULL, "don't record events from perf itself",
3139 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09003140 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03003141 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09003142 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03003143 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003144 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02003145 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03003146 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03003147 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003148 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02003149 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09003150 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02003151 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09003152 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02003153 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003154 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa2d4f2792019-02-21 10:41:30 +01003155 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02003156 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02003157 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
3158 &record.opts.no_inherit_set,
3159 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00003160 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
3161 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00003162 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Wei Lia060c1f2020-08-19 11:19:47 +08003163 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "do not record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03003164 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
3165 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03003166 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
3167 "profile at this frequency",
3168 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03003169 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
3170 "number of mmap data pages and AUX area tracing mmap pages",
3171 record__parse_mmap_pages),
Alexey Budankov470530b2019-03-18 20:40:26 +03003172 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
3173 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
3174 record__mmap_flush_parse),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003175 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08003176 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03003177 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02003178 NULL, "enables call-graph recording" ,
3179 &record_callchain_opt),
3180 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09003181 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02003182 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10003183 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02003184 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02003185 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003186 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02003187 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02003188 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04003189 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
3190 "Record the sample physical addresses"),
Kan Liang542b88f2020-11-30 09:27:53 -08003191 OPT_BOOLEAN(0, "data-page-size", &record.opts.sample_data_page_size,
3192 "Record the sampled data address data page size"),
Kan Liangc1de7f32021-01-05 11:57:49 -08003193 OPT_BOOLEAN(0, "code-page-size", &record.opts.sample_code_page_size,
3194 "Record the sampled code address (ip) page size"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02003195 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc552015-07-06 14:51:01 +03003196 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
3197 &record.opts.sample_time_set,
3198 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01003199 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
3200 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003201 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02003202 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00003203 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
3204 &record.no_buildid_cache_set,
3205 "do not update the buildid cache"),
3206 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
3207 &record.no_buildid_set,
3208 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003209 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02003210 "monitor event in cgroup name only",
3211 parse_cgroups),
Alexey Budankov68cd3b42020-07-17 10:07:03 +03003212 OPT_INTEGER('D', "delay", &record.opts.initial_delay,
3213 "ms to wait before starting measurement after program start (-1: start with events disabled)"),
Adrian Huntereeb399b2019-10-04 11:31:21 +03003214 OPT_BOOLEAN(0, "kcore", &record.opts.kcore, "copy /proc/kcore"),
Namhyung Kimbea03402012-04-26 14:15:15 +09003215 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
3216 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01003217
3218 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
3219 "branch any", "sample any taken branches",
3220 parse_branch_stack),
3221
3222 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
3223 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01003224 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01003225 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
3226 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07003227 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
3228 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02003229 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
3230 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02003231 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
3232 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07003233 " use '-I?' to list register names", parse_intr_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07003234 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
3235 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07003236 " use '--user-regs=?' to list register names", parse_user_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08003237 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
3238 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02003239 OPT_CALLBACK('k', "clockid", &record.opts,
3240 "clockid", "clockid to use for events, see clock_gettime()",
3241 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03003242 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
3243 "opts", "AUX area tracing Snapshot Mode", ""),
Adrian Hunterc0a6de02019-11-15 14:42:16 +02003244 OPT_STRING_OPTARG(0, "aux-sample", &record.opts.auxtrace_sample_opts,
3245 "opts", "sample AUX area", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08003246 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04003247 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05303248 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
3249 "Record namespaces events"),
Namhyung Kim8fb4b672020-03-25 21:45:34 +09003250 OPT_BOOLEAN(0, "all-cgroups", &record.opts.record_cgroup,
3251 "Record cgroup events"),
Adrian Hunter16b4b4e2020-05-28 15:08:58 +03003252 OPT_BOOLEAN_SET(0, "switch-events", &record.opts.record_switch_events,
3253 &record.opts.record_switch_events_set,
3254 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01003255 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
3256 "Configure all used events to run in kernel space.",
3257 PARSE_OPT_EXCLUSIVE),
3258 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
3259 "Configure all used events to run in user space.",
3260 PARSE_OPT_EXCLUSIVE),
yuzhoujian53651b22019-05-30 14:29:22 +01003261 OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
3262 "collect kernel callchains"),
3263 OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
3264 "collect user callchains"),
Wang Nan71dc23262015-10-14 12:41:19 +00003265 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
3266 "clang binary to use for compiling BPF scriptlets"),
3267 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
3268 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00003269 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
3270 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09003271 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
3272 "Record build-id of all DSOs regardless of hits"),
Jiri Olsae29386c2020-12-14 11:54:57 +01003273 OPT_BOOLEAN(0, "buildid-mmap", &record.buildid_mmap,
3274 "Record build-id in map events"),
Wang Nanecfd7a92016-04-13 08:21:07 +00003275 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
3276 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08003277 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
3278 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01003279 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Andi Kleenc38dab72019-03-14 15:49:56 -07003280 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
3281 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01003282 "signal"),
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -03003283 OPT_CALLBACK_SET(0, "switch-output-event", &record.sb_evlist, &record.switch_output_event_set, "switch output event",
3284 "switch output event selector. use 'perf list' to list available events",
3285 parse_events_option_new_evlist),
Andi Kleen03724b22019-03-14 15:49:55 -07003286 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
3287 "Limit number of switch output generated files"),
Wang Nan0aab2132016-06-16 08:02:41 +00003288 OPT_BOOLEAN(0, "dry-run", &dry_run,
3289 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03003290#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03003291 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
3292 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03003293 record__aio_parse),
3294#endif
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03003295 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
3296 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
3297 record__parse_affinity),
Alexey Budankov504c1ad2019-03-18 20:44:42 +03003298#ifdef HAVE_ZSTD_SUPPORT
Alexey Bayduraevb5f25112022-01-17 21:34:34 +03003299 OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default, "n",
3300 "Compress records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
Alexey Budankov504c1ad2019-03-18 20:44:42 +03003301 record__parse_comp_level),
3302#endif
Jiwei Sun6d575812019-10-22 16:09:01 +08003303 OPT_CALLBACK(0, "max-size", &record.output_max_size,
3304 "size", "Limit the maximum size of the output file", parse_output_max_size),
Stephane Eraniand99c22e2020-04-22 08:50:38 -07003305 OPT_UINTEGER(0, "num-thread-synthesize",
3306 &record.opts.nr_threads_synthesize,
3307 "number of threads to run for event synthesis"),
Stephane Eranian70943492020-05-05 11:29:43 -07003308#ifdef HAVE_LIBPFM
3309 OPT_CALLBACK(0, "pfm-events", &record.evlist, "event",
3310 "libpfm4 event selector. use 'perf list' to list available events",
3311 parse_libpfm_events_option),
3312#endif
Adrian Huntera8fcbd22020-09-02 13:57:07 +03003313 OPT_CALLBACK(0, "control", &record.opts, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
Adrian Hunterd20aff12020-09-01 12:37:57 +03003314 "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events,\n"
3315 "\t\t\t 'snapshot': AUX area tracing snapshot).\n"
Adrian Huntera8fcbd22020-09-02 13:57:07 +03003316 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
3317 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
Alexey Budankov1d078cc2020-07-17 10:08:23 +03003318 parse_control_option),
Namhyung Kim41b740b2021-08-10 21:46:58 -07003319 OPT_CALLBACK(0, "synth", &record.opts, "no|all|task|mmap|cgroup",
3320 "Fine-tune event synthesis: default=all", parse_record_synth_option),
Jiri Olsa9bce13e2021-12-09 21:04:25 +01003321 OPT_STRING_OPTARG_SET(0, "debuginfod", &record.debuginfod.urls,
3322 &record.debuginfod.set, "debuginfod urls",
3323 "Enable debuginfod data retrieval from DEBUGINFOD_URLS or specified urls",
3324 "system"),
Alexey Bayduraev06380a82022-01-17 21:34:32 +03003325 OPT_CALLBACK_OPTARG(0, "threads", &record.opts, NULL, "spec",
3326 "write collected trace data into several data files using parallel threads",
3327 record__parse_threads),
Namhyung Kimedc41a12022-05-18 15:47:21 -07003328 OPT_BOOLEAN(0, "off-cpu", &record.off_cpu, "Enable off-cpu analysis"),
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02003329 OPT_END()
3330};
3331
Namhyung Kime5b2c202014-10-23 00:15:46 +09003332struct option *record_options = __record_options;
3333
Alexey Bayduraev7954f712022-01-17 21:34:21 +03003334static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
3335{
Ian Rogers02555712022-05-02 21:17:52 -07003336 struct perf_cpu cpu;
3337 int idx;
Alexey Bayduraev7954f712022-01-17 21:34:21 +03003338
Alexey Bayduraev23380e42022-04-13 18:46:40 -07003339 if (cpu_map__is_dummy(cpus))
3340 return;
3341
Ian Rogers02555712022-05-02 21:17:52 -07003342 perf_cpu_map__for_each_cpu(cpu, idx, cpus)
3343 set_bit(cpu.cpu, mask->bits);
Alexey Bayduraev7954f712022-01-17 21:34:21 +03003344}
3345
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +03003346static int record__mmap_cpu_mask_init_spec(struct mmap_cpu_mask *mask, const char *mask_spec)
3347{
3348 struct perf_cpu_map *cpus;
3349
3350 cpus = perf_cpu_map__new(mask_spec);
3351 if (!cpus)
3352 return -ENOMEM;
3353
3354 bitmap_zero(mask->bits, mask->nbits);
3355 record__mmap_cpu_mask_init(mask, cpus);
3356 perf_cpu_map__put(cpus);
3357
3358 return 0;
3359}
3360
Alexey Bayduraev7954f712022-01-17 21:34:21 +03003361static void record__free_thread_masks(struct record *rec, int nr_threads)
3362{
3363 int t;
3364
3365 if (rec->thread_masks)
3366 for (t = 0; t < nr_threads; t++)
3367 record__thread_mask_free(&rec->thread_masks[t]);
3368
3369 zfree(&rec->thread_masks);
3370}
3371
3372static int record__alloc_thread_masks(struct record *rec, int nr_threads, int nr_bits)
3373{
3374 int t, ret;
3375
3376 rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks)));
3377 if (!rec->thread_masks) {
3378 pr_err("Failed to allocate thread masks\n");
3379 return -ENOMEM;
3380 }
3381
3382 for (t = 0; t < nr_threads; t++) {
3383 ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits);
3384 if (ret) {
3385 pr_err("Failed to allocate thread masks[%d]\n", t);
3386 goto out_free;
3387 }
3388 }
3389
3390 return 0;
3391
3392out_free:
3393 record__free_thread_masks(rec, nr_threads);
3394
3395 return ret;
3396}
3397
Alexey Bayduraev06380a82022-01-17 21:34:32 +03003398static int record__init_thread_cpu_masks(struct record *rec, struct perf_cpu_map *cpus)
3399{
3400 int t, ret, nr_cpus = perf_cpu_map__nr(cpus);
3401
3402 ret = record__alloc_thread_masks(rec, nr_cpus, cpu__max_cpu().cpu);
3403 if (ret)
3404 return ret;
3405
3406 rec->nr_threads = nr_cpus;
3407 pr_debug("nr_threads: %d\n", rec->nr_threads);
3408
3409 for (t = 0; t < rec->nr_threads; t++) {
Ian Rogers02555712022-05-02 21:17:52 -07003410 set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
3411 set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
Alexey Bayduraev06380a82022-01-17 21:34:32 +03003412 if (verbose) {
3413 pr_debug("thread_masks[%d]: ", t);
3414 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
3415 pr_debug("thread_masks[%d]: ", t);
3416 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity");
3417 }
3418 }
3419
3420 return 0;
3421}
3422
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +03003423static int record__init_thread_masks_spec(struct record *rec, struct perf_cpu_map *cpus,
3424 const char **maps_spec, const char **affinity_spec,
3425 u32 nr_spec)
3426{
3427 u32 s;
3428 int ret = 0, t = 0;
3429 struct mmap_cpu_mask cpus_mask;
3430 struct thread_mask thread_mask, full_mask, *thread_masks;
3431
3432 ret = record__mmap_cpu_mask_alloc(&cpus_mask, cpu__max_cpu().cpu);
3433 if (ret) {
3434 pr_err("Failed to allocate CPUs mask\n");
3435 return ret;
3436 }
3437 record__mmap_cpu_mask_init(&cpus_mask, cpus);
3438
3439 ret = record__thread_mask_alloc(&full_mask, cpu__max_cpu().cpu);
3440 if (ret) {
3441 pr_err("Failed to allocate full mask\n");
3442 goto out_free_cpu_mask;
3443 }
3444
3445 ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu);
3446 if (ret) {
3447 pr_err("Failed to allocate thread mask\n");
3448 goto out_free_full_and_cpu_masks;
3449 }
3450
3451 for (s = 0; s < nr_spec; s++) {
3452 ret = record__mmap_cpu_mask_init_spec(&thread_mask.maps, maps_spec[s]);
3453 if (ret) {
3454 pr_err("Failed to initialize maps thread mask\n");
3455 goto out_free;
3456 }
3457 ret = record__mmap_cpu_mask_init_spec(&thread_mask.affinity, affinity_spec[s]);
3458 if (ret) {
3459 pr_err("Failed to initialize affinity thread mask\n");
3460 goto out_free;
3461 }
3462
3463 /* ignore invalid CPUs but do not allow empty masks */
3464 if (!bitmap_and(thread_mask.maps.bits, thread_mask.maps.bits,
3465 cpus_mask.bits, thread_mask.maps.nbits)) {
3466 pr_err("Empty maps mask: %s\n", maps_spec[s]);
3467 ret = -EINVAL;
3468 goto out_free;
3469 }
3470 if (!bitmap_and(thread_mask.affinity.bits, thread_mask.affinity.bits,
3471 cpus_mask.bits, thread_mask.affinity.nbits)) {
3472 pr_err("Empty affinity mask: %s\n", affinity_spec[s]);
3473 ret = -EINVAL;
3474 goto out_free;
3475 }
3476
3477 /* do not allow intersection with other masks (full_mask) */
3478 if (bitmap_intersects(thread_mask.maps.bits, full_mask.maps.bits,
3479 thread_mask.maps.nbits)) {
3480 pr_err("Intersecting maps mask: %s\n", maps_spec[s]);
3481 ret = -EINVAL;
3482 goto out_free;
3483 }
3484 if (bitmap_intersects(thread_mask.affinity.bits, full_mask.affinity.bits,
3485 thread_mask.affinity.nbits)) {
3486 pr_err("Intersecting affinity mask: %s\n", affinity_spec[s]);
3487 ret = -EINVAL;
3488 goto out_free;
3489 }
3490
3491 bitmap_or(full_mask.maps.bits, full_mask.maps.bits,
3492 thread_mask.maps.bits, full_mask.maps.nbits);
3493 bitmap_or(full_mask.affinity.bits, full_mask.affinity.bits,
3494 thread_mask.affinity.bits, full_mask.maps.nbits);
3495
3496 thread_masks = realloc(rec->thread_masks, (t + 1) * sizeof(struct thread_mask));
3497 if (!thread_masks) {
3498 pr_err("Failed to reallocate thread masks\n");
3499 ret = -ENOMEM;
3500 goto out_free;
3501 }
3502 rec->thread_masks = thread_masks;
3503 rec->thread_masks[t] = thread_mask;
3504 if (verbose) {
3505 pr_debug("thread_masks[%d]: ", t);
3506 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
3507 pr_debug("thread_masks[%d]: ", t);
3508 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity");
3509 }
3510 t++;
3511 ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu);
3512 if (ret) {
3513 pr_err("Failed to allocate thread mask\n");
3514 goto out_free_full_and_cpu_masks;
3515 }
3516 }
3517 rec->nr_threads = t;
3518 pr_debug("nr_threads: %d\n", rec->nr_threads);
3519 if (!rec->nr_threads)
3520 ret = -EINVAL;
3521
3522out_free:
3523 record__thread_mask_free(&thread_mask);
3524out_free_full_and_cpu_masks:
3525 record__thread_mask_free(&full_mask);
3526out_free_cpu_mask:
3527 record__mmap_cpu_mask_free(&cpus_mask);
3528
3529 return ret;
3530}
3531
3532static int record__init_thread_core_masks(struct record *rec, struct perf_cpu_map *cpus)
3533{
3534 int ret;
3535 struct cpu_topology *topo;
3536
3537 topo = cpu_topology__new();
3538 if (!topo) {
3539 pr_err("Failed to allocate CPU topology\n");
3540 return -ENOMEM;
3541 }
3542
3543 ret = record__init_thread_masks_spec(rec, cpus, topo->core_cpus_list,
3544 topo->core_cpus_list, topo->core_cpus_lists);
3545 cpu_topology__delete(topo);
3546
3547 return ret;
3548}
3549
3550static int record__init_thread_package_masks(struct record *rec, struct perf_cpu_map *cpus)
3551{
3552 int ret;
3553 struct cpu_topology *topo;
3554
3555 topo = cpu_topology__new();
3556 if (!topo) {
3557 pr_err("Failed to allocate CPU topology\n");
3558 return -ENOMEM;
3559 }
3560
3561 ret = record__init_thread_masks_spec(rec, cpus, topo->package_cpus_list,
3562 topo->package_cpus_list, topo->package_cpus_lists);
3563 cpu_topology__delete(topo);
3564
3565 return ret;
3566}
3567
3568static int record__init_thread_numa_masks(struct record *rec, struct perf_cpu_map *cpus)
3569{
3570 u32 s;
3571 int ret;
3572 const char **spec;
3573 struct numa_topology *topo;
3574
3575 topo = numa_topology__new();
3576 if (!topo) {
3577 pr_err("Failed to allocate NUMA topology\n");
3578 return -ENOMEM;
3579 }
3580
3581 spec = zalloc(topo->nr * sizeof(char *));
3582 if (!spec) {
3583 pr_err("Failed to allocate NUMA spec\n");
3584 ret = -ENOMEM;
3585 goto out_delete_topo;
3586 }
3587 for (s = 0; s < topo->nr; s++)
3588 spec[s] = topo->nodes[s].cpus;
3589
3590 ret = record__init_thread_masks_spec(rec, cpus, spec, spec, topo->nr);
3591
3592 zfree(&spec);
3593
3594out_delete_topo:
3595 numa_topology__delete(topo);
3596
3597 return ret;
3598}
3599
3600static int record__init_thread_user_masks(struct record *rec, struct perf_cpu_map *cpus)
3601{
3602 int t, ret;
3603 u32 s, nr_spec = 0;
3604 char **maps_spec = NULL, **affinity_spec = NULL, **tmp_spec;
3605 char *user_spec, *spec, *spec_ptr, *mask, *mask_ptr, *dup_mask = NULL;
3606
3607 for (t = 0, user_spec = (char *)rec->opts.threads_user_spec; ; t++, user_spec = NULL) {
3608 spec = strtok_r(user_spec, ":", &spec_ptr);
3609 if (spec == NULL)
3610 break;
3611 pr_debug2("threads_spec[%d]: %s\n", t, spec);
3612 mask = strtok_r(spec, "/", &mask_ptr);
3613 if (mask == NULL)
3614 break;
3615 pr_debug2(" maps mask: %s\n", mask);
3616 tmp_spec = realloc(maps_spec, (nr_spec + 1) * sizeof(char *));
3617 if (!tmp_spec) {
3618 pr_err("Failed to reallocate maps spec\n");
3619 ret = -ENOMEM;
3620 goto out_free;
3621 }
3622 maps_spec = tmp_spec;
3623 maps_spec[nr_spec] = dup_mask = strdup(mask);
3624 if (!maps_spec[nr_spec]) {
3625 pr_err("Failed to allocate maps spec[%d]\n", nr_spec);
3626 ret = -ENOMEM;
3627 goto out_free;
3628 }
3629 mask = strtok_r(NULL, "/", &mask_ptr);
3630 if (mask == NULL) {
3631 pr_err("Invalid thread maps or affinity specs\n");
3632 ret = -EINVAL;
3633 goto out_free;
3634 }
3635 pr_debug2(" affinity mask: %s\n", mask);
3636 tmp_spec = realloc(affinity_spec, (nr_spec + 1) * sizeof(char *));
3637 if (!tmp_spec) {
3638 pr_err("Failed to reallocate affinity spec\n");
3639 ret = -ENOMEM;
3640 goto out_free;
3641 }
3642 affinity_spec = tmp_spec;
3643 affinity_spec[nr_spec] = strdup(mask);
3644 if (!affinity_spec[nr_spec]) {
3645 pr_err("Failed to allocate affinity spec[%d]\n", nr_spec);
3646 ret = -ENOMEM;
3647 goto out_free;
3648 }
3649 dup_mask = NULL;
3650 nr_spec++;
3651 }
3652
3653 ret = record__init_thread_masks_spec(rec, cpus, (const char **)maps_spec,
3654 (const char **)affinity_spec, nr_spec);
3655
3656out_free:
3657 free(dup_mask);
3658 for (s = 0; s < nr_spec; s++) {
3659 if (maps_spec)
3660 free(maps_spec[s]);
3661 if (affinity_spec)
3662 free(affinity_spec[s]);
3663 }
3664 free(affinity_spec);
3665 free(maps_spec);
3666
3667 return ret;
3668}
3669
Alexey Bayduraev7954f712022-01-17 21:34:21 +03003670static int record__init_thread_default_masks(struct record *rec, struct perf_cpu_map *cpus)
3671{
3672 int ret;
3673
3674 ret = record__alloc_thread_masks(rec, 1, cpu__max_cpu().cpu);
3675 if (ret)
3676 return ret;
3677
3678 record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus);
3679
3680 rec->nr_threads = 1;
3681
3682 return 0;
3683}
3684
3685static int record__init_thread_masks(struct record *rec)
3686{
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +03003687 int ret = 0;
Adrian Hunter7be1fed2022-05-24 10:54:30 +03003688 struct perf_cpu_map *cpus = rec->evlist->core.all_cpus;
Alexey Bayduraev7954f712022-01-17 21:34:21 +03003689
Alexey Bayduraev06380a82022-01-17 21:34:32 +03003690 if (!record__threads_enabled(rec))
3691 return record__init_thread_default_masks(rec, cpus);
3692
Adrian Hunter7be1fed2022-05-24 10:54:30 +03003693 if (evlist__per_thread(rec->evlist)) {
Alexey Bayduraev23380e42022-04-13 18:46:40 -07003694 pr_err("--per-thread option is mutually exclusive to parallel streaming mode.\n");
3695 return -EINVAL;
3696 }
3697
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +03003698 switch (rec->opts.threads_spec) {
3699 case THREAD_SPEC__CPU:
3700 ret = record__init_thread_cpu_masks(rec, cpus);
3701 break;
3702 case THREAD_SPEC__CORE:
3703 ret = record__init_thread_core_masks(rec, cpus);
3704 break;
3705 case THREAD_SPEC__PACKAGE:
3706 ret = record__init_thread_package_masks(rec, cpus);
3707 break;
3708 case THREAD_SPEC__NUMA:
3709 ret = record__init_thread_numa_masks(rec, cpus);
3710 break;
3711 case THREAD_SPEC__USER:
3712 ret = record__init_thread_user_masks(rec, cpus);
3713 break;
3714 default:
3715 break;
3716 }
3717
3718 return ret;
Alexey Bayduraev7954f712022-01-17 21:34:21 +03003719}
3720
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03003721int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02003722{
Adrian Hunteref149c22015-04-09 18:53:45 +03003723 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03003724 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09003725 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02003726
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03003727 setlocale(LC_ALL, "");
3728
Wang Nan48e1cab2015-12-14 10:39:22 +00003729#ifndef HAVE_LIBBPF_SUPPORT
3730# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
3731 set_nobuild('\0', "clang-path", true);
3732 set_nobuild('\0', "clang-opt", true);
3733# undef set_nobuild
3734#endif
3735
He Kuang7efe0e02015-12-14 10:39:23 +00003736#ifndef HAVE_BPF_PROLOGUE
3737# if !defined (HAVE_DWARF_SUPPORT)
3738# define REASON "NO_DWARF=1"
3739# elif !defined (HAVE_LIBBPF_SUPPORT)
3740# define REASON "NO_LIBBPF=1"
3741# else
3742# define REASON "this architecture doesn't support BPF prologue"
3743# endif
3744# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
3745 set_nobuild('\0', "vmlinux", true);
3746# undef set_nobuild
3747# undef REASON
3748#endif
3749
Namhyung Kimedc41a12022-05-18 15:47:21 -07003750#ifndef HAVE_BPF_SKEL
3751# define set_nobuild(s, l, m, c) set_option_nobuild(record_options, s, l, m, c)
3752 set_nobuild('\0', "off-cpu", "no BUILD_BPF_SKEL=1", true);
3753# undef set_nobuild
3754#endif
3755
Alexey Budankov9d2ed642019-01-22 20:47:43 +03003756 rec->opts.affinity = PERF_AFFINITY_SYS;
3757
Jiri Olsa0f98b112019-07-21 13:23:55 +02003758 rec->evlist = evlist__new();
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03003759 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02003760 return -ENOMEM;
3761
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03003762 err = perf_config(perf_record_config, rec);
3763 if (err)
3764 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01003765
Tom Zanussibca647a2010-11-10 08:11:30 -06003766 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02003767 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09003768 if (quiet)
3769 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01003770
James Clark7cc72552021-10-18 14:48:42 +01003771 err = symbol__validate_sym_arguments();
3772 if (err)
3773 return err;
3774
Jiri Olsa9bce13e2021-12-09 21:04:25 +01003775 perf_debuginfod_setup(&record.debuginfod);
3776
Jiri Olsa483635a2017-02-17 18:00:18 +01003777 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03003778 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01003779 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02003780
Namhyung Kimbea03402012-04-26 14:15:15 +09003781 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09003782 usage_with_options_msg(record_usage, record_options,
3783 "cgroup monitoring only available in system-wide mode");
3784
Stephane Eranian023695d2011-02-14 11:20:01 +02003785 }
Alexey Budankov504c1ad2019-03-18 20:44:42 +03003786
Jiri Olsae29386c2020-12-14 11:54:57 +01003787 if (rec->buildid_mmap) {
3788 if (!perf_can_record_build_id()) {
3789 pr_err("Failed: no support to record build id in mmap events, update your kernel.\n");
3790 err = -EINVAL;
3791 goto out_opts;
3792 }
3793 pr_debug("Enabling build id in mmap2 events.\n");
3794 /* Enable mmap build id synthesizing. */
3795 symbol_conf.buildid_mmap2 = true;
3796 /* Enable perf_event_attr::build_id bit. */
3797 rec->opts.build_id = true;
3798 /* Disable build id cache. */
3799 rec->no_buildid = true;
3800 }
3801
Namhyung Kim4f2abe92021-05-27 11:28:35 -07003802 if (rec->opts.record_cgroup && !perf_can_record_cgroup()) {
3803 pr_err("Kernel has no cgroup sampling support.\n");
3804 err = -EINVAL;
3805 goto out_opts;
3806 }
3807
Alexey Bayduraev56f735f2022-01-17 21:34:28 +03003808 if (rec->opts.kcore || record__threads_enabled(rec))
Adrian Huntereeb399b2019-10-04 11:31:21 +03003809 rec->data.is_dir = true;
3810
Alexey Bayduraevb5f25112022-01-17 21:34:34 +03003811 if (record__threads_enabled(rec)) {
3812 if (rec->opts.affinity != PERF_AFFINITY_SYS) {
3813 pr_err("--affinity option is mutually exclusive to parallel streaming mode.\n");
3814 goto out_opts;
3815 }
3816 if (record__aio_enabled(rec)) {
3817 pr_err("Asynchronous streaming mode (--aio) is mutually exclusive to parallel streaming mode.\n");
3818 goto out_opts;
3819 }
3820 }
3821
Alexey Budankov504c1ad2019-03-18 20:44:42 +03003822 if (rec->opts.comp_level != 0) {
3823 pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
3824 rec->no_buildid = true;
3825 }
3826
Adrian Hunterb757bb02015-07-21 12:44:04 +03003827 if (rec->opts.record_switch_events &&
3828 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09003829 ui__error("kernel does not support recording context switch events\n");
3830 parse_options_usage(record_usage, record_options, "switch-events", 0);
Adrian Huntera8fcbd22020-09-02 13:57:07 +03003831 err = -EINVAL;
3832 goto out_opts;
Adrian Hunterb757bb02015-07-21 12:44:04 +03003833 }
Stephane Eranian023695d2011-02-14 11:20:01 +02003834
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01003835 if (switch_output_setup(rec)) {
3836 parse_options_usage(record_usage, record_options, "switch-output", 0);
Adrian Huntera8fcbd22020-09-02 13:57:07 +03003837 err = -EINVAL;
3838 goto out_opts;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01003839 }
3840
Jiri Olsabfacbe32017-01-09 10:52:00 +01003841 if (rec->switch_output.time) {
3842 signal(SIGALRM, alarm_sig_handler);
3843 alarm(rec->switch_output.time);
3844 }
3845
Andi Kleen03724b22019-03-14 15:49:55 -07003846 if (rec->switch_output.num_files) {
3847 rec->switch_output.filenames = calloc(sizeof(char *),
3848 rec->switch_output.num_files);
Adrian Huntera8fcbd22020-09-02 13:57:07 +03003849 if (!rec->switch_output.filenames) {
3850 err = -EINVAL;
3851 goto out_opts;
3852 }
Andi Kleen03724b22019-03-14 15:49:55 -07003853 }
3854
Alexey Bayduraevb5f25112022-01-17 21:34:34 +03003855 if (rec->timestamp_filename && record__threads_enabled(rec)) {
3856 rec->timestamp_filename = false;
3857 pr_warning("WARNING: --timestamp-filename option is not available in parallel streaming mode.\n");
3858 }
3859
Adrian Hunter1b36c032016-09-23 17:38:39 +03003860 /*
3861 * Allow aliases to facilitate the lookup of symbols for address
3862 * filters. Refer to auxtrace_parse_filters().
3863 */
3864 symbol_conf.allow_aliases = true;
3865
3866 symbol__init(NULL);
3867
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02003868 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03003869 if (err)
3870 goto out;
3871
Wang Nan0aab2132016-06-16 08:02:41 +00003872 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03003873 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00003874
Wang Nand7888572016-04-08 15:07:24 +00003875 err = bpf__setup_stdout(rec->evlist);
3876 if (err) {
3877 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
3878 pr_err("ERROR: Setup BPF stdout failed: %s\n",
3879 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03003880 goto out;
Wang Nand7888572016-04-08 15:07:24 +00003881 }
3882
Adrian Hunteref149c22015-04-09 18:53:45 +03003883 err = -ENOMEM;
3884
Wang Nan0c1d46a2016-04-20 18:59:52 +00003885 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02003886 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01003887 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00003888 /*
3889 * In 'perf record --switch-output', disable buildid
3890 * generation by default to reduce data file switching
3891 * overhead. Still generate buildid if they are required
3892 * explicitly using
3893 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01003894 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00003895 * --no-no-buildid-cache
3896 *
3897 * Following code equals to:
3898 *
3899 * if ((rec->no_buildid || !rec->no_buildid_set) &&
3900 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
3901 * disable_buildid_cache();
3902 */
3903 bool disable = true;
3904
3905 if (rec->no_buildid_set && !rec->no_buildid)
3906 disable = false;
3907 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
3908 disable = false;
3909 if (disable) {
3910 rec->no_buildid = true;
3911 rec->no_buildid_cache = true;
3912 disable_buildid_cache();
3913 }
3914 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02003915
Wang Nan4ea648a2016-07-14 08:34:47 +00003916 if (record.opts.overwrite)
3917 record.opts.tail_synthesize = true;
3918
Jin Yaob53a0752021-04-27 15:01:26 +08003919 if (rec->evlist->core.nr_entries == 0) {
3920 if (perf_pmu__has_hybrid()) {
3921 err = evlist__add_default_hybrid(rec->evlist,
3922 !record.opts.no_samples);
3923 } else {
3924 err = __evlist__add_default(rec->evlist,
3925 !record.opts.no_samples);
3926 }
3927
3928 if (err < 0) {
3929 pr_err("Not enough memory for event selector list\n");
3930 goto out;
3931 }
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02003932 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02003933
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02003934 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
3935 rec->opts.no_inherit = true;
3936
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03003937 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09003938 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03003939 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01003940 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09003941 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09003942
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03003943 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09003944 if (err) {
3945 int saved_errno = errno;
3946
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03003947 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09003948 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09003949
3950 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03003951 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09003952 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02003953
Mengting Zhangca800062017-12-13 15:01:53 +08003954 /* Enable ignoring missing threads when -u/-p option is defined. */
3955 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01003956
Jin Yao1d3351e2021-07-23 14:34:33 +08003957 if (evlist__fix_hybrid_cpus(rec->evlist, rec->opts.target.cpu_list)) {
3958 pr_err("failed to use cpu list %s\n",
3959 rec->opts.target.cpu_list);
3960 goto out;
3961 }
3962
3963 rec->opts.target.hybrid = perf_pmu__has_hybrid();
Alexandre Truong7248e302021-12-17 15:45:15 +00003964
3965 if (callchain_param.enabled && callchain_param.record_mode == CALLCHAIN_FP)
3966 arch__add_leaf_frame_record_opts(&rec->opts);
3967
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09003968 err = -ENOMEM;
Arnaldo Carvalho de Melo7748bb72020-11-30 14:56:52 -03003969 if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02003970 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02003971
Adrian Hunteref149c22015-04-09 18:53:45 +03003972 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
3973 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03003974 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03003975
Namhyung Kim61566812016-01-11 22:37:09 +09003976 /*
3977 * We take all buildids when the file contains
3978 * AUX area tracing data because we do not decode the
3979 * trace because it would take too long.
3980 */
3981 if (rec->opts.full_auxtrace)
3982 rec->buildid_all = true;
3983
Adrian Hunter246eba82020-05-12 15:19:18 +03003984 if (rec->opts.text_poke) {
3985 err = record__config_text_poke(rec->evlist);
3986 if (err) {
3987 pr_err("record__config_text_poke failed, error %d\n", err);
3988 goto out;
3989 }
3990 }
3991
Namhyung Kimedc41a12022-05-18 15:47:21 -07003992 if (rec->off_cpu) {
3993 err = record__config_off_cpu(rec);
3994 if (err) {
3995 pr_err("record__config_off_cpu failed, error %d\n", err);
3996 goto out;
3997 }
3998 }
3999
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03004000 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03004001 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03004002 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02004003 }
4004
Alexey Bayduraev7954f712022-01-17 21:34:21 +03004005 err = record__init_thread_masks(rec);
4006 if (err) {
4007 pr_err("Failed to initialize parallel data streaming masks\n");
4008 goto out;
4009 }
4010
Alexey Budankov93f20c02018-11-06 12:07:19 +03004011 if (rec->opts.nr_cblocks > nr_cblocks_max)
4012 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03004013 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03004014
Alexey Budankov9d2ed642019-01-22 20:47:43 +03004015 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
Alexey Budankov470530b2019-03-18 20:40:26 +03004016 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
Alexey Budankov9d2ed642019-01-22 20:47:43 +03004017
Alexey Budankov51255a82019-03-18 20:42:19 +03004018 if (rec->opts.comp_level > comp_level_max)
4019 rec->opts.comp_level = comp_level_max;
4020 pr_debug("comp level: %d\n", rec->opts.comp_level);
4021
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02004022 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03004023out:
Jiri Olsac12995a2019-07-21 13:23:56 +02004024 evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03004025 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03004026 auxtrace_record__free(rec->itr);
Adrian Huntera8fcbd22020-09-02 13:57:07 +03004027out_opts:
Alexey Bayduraev7954f712022-01-17 21:34:21 +03004028 record__free_thread_masks(rec, rec->nr_threads);
4029 rec->nr_threads = 0;
Adrian Hunteree7fe312020-09-03 15:29:37 +03004030 evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03004031 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02004032}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03004033
4034static void snapshot_sig_handler(int sig __maybe_unused)
4035{
Jiri Olsadc0c6122017-01-09 10:51:58 +01004036 struct record *rec = &record;
4037
Adrian Hunterd20aff12020-09-01 12:37:57 +03004038 hit_auxtrace_snapshot_trigger(rec);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00004039
Jiri Olsadc0c6122017-01-09 10:51:58 +01004040 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00004041 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03004042}
Jiri Olsabfacbe32017-01-09 10:52:00 +01004043
4044static void alarm_sig_handler(int sig __maybe_unused)
4045{
4046 struct record *rec = &record;
4047
4048 if (switch_output_time(rec))
4049 trigger_hit(&switch_output_trigger);
4050}