blob: 34bb31f08bb52039378955d01918847f1efb8827 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020011#include "util/build-id.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060012#include <subcmd/parse-options.h>
Namhyung Kime3a23262022-09-01 12:57:37 -070013#include <internal/xyarray.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020014#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090015#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020016
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030017#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030018#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020019#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020020#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020021#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020022#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020023#include "util/debug.h"
Arnaldo Carvalho de Meloe0fcfb02019-09-23 12:20:38 -030024#include "util/mmap.h"
Ian Rogers49c670b172022-08-26 09:42:31 -070025#include "util/mutex.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030026#include "util/target.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020027#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020028#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020029#include "util/symbol.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030030#include "util/record.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110031#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020032#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020033#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020034#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030035#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020036#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070037#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020038#include "util/parse-regs-options.h"
Arnaldo Carvalho de Melo40c7d242020-05-05 11:49:08 -030039#include "util/perf_api_probe.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000040#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000041#include "util/perf-hooks.h"
Alexey Budankovf13de662019-01-22 20:50:57 +030042#include "util/cpu-set-sched.h"
Arnaldo Carvalho de Meloea49e012019-09-18 11:36:13 -030043#include "util/synthetic-events.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030044#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030045#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080046#include "util/bpf-event.h"
Stephane Eraniand99c22e2020-04-22 08:50:38 -070047#include "util/util.h"
Stephane Eranian70943492020-05-05 11:29:43 -070048#include "util/pfm.h"
Ian Rogers1eaf4962023-05-27 00:22:03 -070049#include "util/pmu.h"
50#include "util/pmus.h"
Jiri Olsa6953beb2020-08-05 11:34:38 +020051#include "util/clockid.h"
Namhyung Kimedc41a12022-05-18 15:47:21 -070052#include "util/off_cpu.h"
Namhyung Kim27c6f242023-03-14 16:42:31 -070053#include "util/bpf-filter.h"
Wang Nand8871ea2016-02-26 09:32:06 +000054#include "asm/bug.h"
Arnaldo Carvalho de Meloc1a604d2019-08-29 15:20:59 -030055#include "perf.h"
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +030056#include "cputopo.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020057
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030058#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030059#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030060#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030061#include <poll.h>
Stephane Eraniand99c22e2020-04-22 08:50:38 -070062#include <pthread.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020063#include <unistd.h>
Alexey Bayduraev415ccb52022-01-17 21:34:23 +030064#ifndef HAVE_GETTID
65#include <syscall.h>
66#endif
Peter Zijlstrade9ac072009-04-08 15:01:31 +020067#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030068#include <signal.h>
Anand K Mistryda231332020-05-13 12:20:23 +100069#ifdef HAVE_EVENTFD_SUPPORT
70#include <sys/eventfd.h>
71#endif
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030072#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030073#include <sys/wait.h>
Adrian Huntereeb399b2019-10-04 11:31:21 +030074#include <sys/types.h>
75#include <sys/stat.h>
76#include <fcntl.h>
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +053077#include <linux/err.h>
Arnaldo Carvalho de Melo8520a982019-08-29 16:18:59 -030078#include <linux/string.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030079#include <linux/time64.h>
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -030080#include <linux/zalloc.h>
Alexey Budankov8384a262019-12-03 14:45:27 +030081#include <linux/bitmap.h>
Jiri Olsad1e325c2020-08-05 11:34:40 +020082#include <sys/time.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030083
Jiri Olsa1b43b702017-01-09 10:51:56 +010084struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010085 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010086 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010087 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010088 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010089 const char *str;
90 bool set;
Andi Kleen03724b22019-03-14 15:49:55 -070091 char **filenames;
92 int num_files;
93 int cur_file;
Jiri Olsa1b43b702017-01-09 10:51:56 +010094};
95
Alexey Bayduraev7954f712022-01-17 21:34:21 +030096struct thread_mask {
97 struct mmap_cpu_mask maps;
98 struct mmap_cpu_mask affinity;
99};
100
Alexey Bayduraev415ccb52022-01-17 21:34:23 +0300101struct record_thread {
102 pid_t tid;
103 struct thread_mask *mask;
104 struct {
105 int msg[2];
106 int ack[2];
107 } pipes;
108 struct fdarray pollfd;
109 int ctlfd_pos;
110 int nr_mmaps;
111 struct mmap **maps;
112 struct mmap **overwrite_maps;
113 struct record *rec;
Alexey Bayduraev396b6262022-01-17 21:34:25 +0300114 unsigned long long samples;
115 unsigned long waking;
Alexey Bayduraevae9c7242b2022-01-17 21:34:29 +0300116 u64 bytes_written;
Alexey Bayduraev610fbc02022-01-17 21:34:31 +0300117 u64 bytes_transferred;
118 u64 bytes_compressed;
Alexey Bayduraev415ccb52022-01-17 21:34:23 +0300119};
120
Alexey Bayduraev396b6262022-01-17 21:34:25 +0300121static __thread struct record_thread *thread;
122
Alexey Bayduraev1e5de7d2022-01-17 21:34:26 +0300123enum thread_msg {
124 THREAD_MSG__UNDEFINED = 0,
125 THREAD_MSG__READY,
126 THREAD_MSG__MAX,
127};
128
129static const char *thread_msg_tags[THREAD_MSG__MAX] = {
130 "UNDEFINED", "READY"
131};
132
Alexey Bayduraev06380a82022-01-17 21:34:32 +0300133enum thread_spec {
134 THREAD_SPEC__UNDEFINED = 0,
135 THREAD_SPEC__CPU,
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +0300136 THREAD_SPEC__CORE,
137 THREAD_SPEC__PACKAGE,
138 THREAD_SPEC__NUMA,
139 THREAD_SPEC__USER,
140 THREAD_SPEC__MAX,
141};
142
143static const char *thread_spec_tags[THREAD_SPEC__MAX] = {
144 "undefined", "cpu", "core", "package", "numa", "user"
Alexey Bayduraev06380a82022-01-17 21:34:32 +0300145};
146
Adrian Hunter6562c9a2022-08-24 10:28:10 +0300147struct pollfd_index_map {
148 int evlist_pollfd_index;
149 int thread_pollfd_index;
150};
151
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300152struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200153 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300154 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200155 u64 bytes_written;
Yang Jihong91621be2023-02-15 12:23:24 +0000156 u64 thread_bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100157 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300158 struct auxtrace_record *itr;
Jiri Olsa63503db2019-07-21 13:23:52 +0200159 struct evlist *evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200160 struct perf_session *session;
Arnaldo Carvalho de Melobc477d72020-04-24 10:24:04 -0300161 struct evlist *sb_evlist;
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -0300162 pthread_t thread_id;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200163 int realtime_prio;
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -0300164 bool switch_output_event_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200165 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +0000166 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200167 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +0000168 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +0900169 bool buildid_all;
Jiri Olsae29386c2020-12-14 11:54:57 +0100170 bool buildid_mmap;
Wang Nanecfd7a92016-04-13 08:21:07 +0000171 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +0800172 bool timestamp_boundary;
Namhyung Kimedc41a12022-05-18 15:47:21 -0700173 bool off_cpu;
Jiri Olsa1b43b702017-01-09 10:51:56 +0100174 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -0700175 unsigned long long samples;
Jiwei Sun6d575812019-10-22 16:09:01 +0800176 unsigned long output_max_size; /* = 0: unlimited */
Jiri Olsa9bce13e2021-12-09 21:04:25 +0100177 struct perf_debuginfod debuginfod;
Alexey Bayduraev7954f712022-01-17 21:34:21 +0300178 int nr_threads;
179 struct thread_mask *thread_masks;
Alexey Bayduraev415ccb52022-01-17 21:34:23 +0300180 struct record_thread *thread_data;
Adrian Hunter6562c9a2022-08-24 10:28:10 +0300181 struct pollfd_index_map *index_map;
182 size_t index_map_sz;
183 size_t index_map_cnt;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200184};
Ingo Molnara21ca2c2009-06-06 09:58:57 +0200185
Jiwei Sun6d575812019-10-22 16:09:01 +0800186static volatile int done;
187
Jiri Olsadc0c6122017-01-09 10:51:58 +0100188static volatile int auxtrace_record__snapshot_started;
189static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
190static DEFINE_TRIGGER(switch_output_trigger);
191
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300192static const char *affinity_tags[PERF_AFFINITY_MAX] = {
193 "SYS", "NODE", "CPU"
194};
195
Alexey Bayduraev415ccb52022-01-17 21:34:23 +0300196#ifndef HAVE_GETTID
197static inline pid_t gettid(void)
198{
199 return (pid_t)syscall(__NR_gettid);
200}
201#endif
202
Alexey Bayduraev3217e9f2022-01-17 21:34:27 +0300203static int record__threads_enabled(struct record *rec)
204{
205 return rec->opts.threads_spec;
206}
207
Jiri Olsadc0c6122017-01-09 10:51:58 +0100208static bool switch_output_signal(struct record *rec)
209{
210 return rec->switch_output.signal &&
211 trigger_is_ready(&switch_output_trigger);
212}
213
214static bool switch_output_size(struct record *rec)
215{
216 return rec->switch_output.size &&
217 trigger_is_ready(&switch_output_trigger) &&
218 (rec->bytes_written >= rec->switch_output.size);
219}
220
Jiri Olsabfacbe32017-01-09 10:52:00 +0100221static bool switch_output_time(struct record *rec)
222{
223 return rec->switch_output.time &&
224 trigger_is_ready(&switch_output_trigger);
225}
226
Alexey Bayduraevae9c7242b2022-01-17 21:34:29 +0300227static u64 record__bytes_written(struct record *rec)
228{
Yang Jihong91621be2023-02-15 12:23:24 +0000229 return rec->bytes_written + rec->thread_bytes_written;
Alexey Bayduraevae9c7242b2022-01-17 21:34:29 +0300230}
231
Jiwei Sun6d575812019-10-22 16:09:01 +0800232static bool record__output_max_size_exceeded(struct record *rec)
233{
234 return rec->output_max_size &&
Alexey Bayduraevae9c7242b2022-01-17 21:34:29 +0300235 (record__bytes_written(rec) >= rec->output_max_size);
Jiwei Sun6d575812019-10-22 16:09:01 +0800236}
237
Jiri Olsaa5830532019-07-27 20:30:53 +0200238static int record__write(struct record *rec, struct mmap *map __maybe_unused,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200239 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200240{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200241 struct perf_data_file *file = &rec->session->data->file;
242
Alexey Bayduraev56f735f2022-01-17 21:34:28 +0300243 if (map && map->file)
244 file = map->file;
245
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200246 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100247 pr_err("failed to write perf data, error: %m\n");
248 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200249 }
David Ahern8d3eca22012-08-26 12:24:47 -0600250
Yang Jihong91621be2023-02-15 12:23:24 +0000251 if (map && map->file) {
Alexey Bayduraevae9c7242b2022-01-17 21:34:29 +0300252 thread->bytes_written += size;
Yang Jihong91621be2023-02-15 12:23:24 +0000253 rec->thread_bytes_written += size;
254 } else {
Alexey Bayduraev56f735f2022-01-17 21:34:28 +0300255 rec->bytes_written += size;
Yang Jihong91621be2023-02-15 12:23:24 +0000256 }
Jiri Olsadc0c6122017-01-09 10:51:58 +0100257
Jiwei Sun6d575812019-10-22 16:09:01 +0800258 if (record__output_max_size_exceeded(rec) && !done) {
259 fprintf(stderr, "[ perf record: perf size limit reached (%" PRIu64 " KB),"
260 " stopping session ]\n",
Alexey Bayduraevae9c7242b2022-01-17 21:34:29 +0300261 record__bytes_written(rec) >> 10);
Jiwei Sun6d575812019-10-22 16:09:01 +0800262 done = 1;
263 }
264
Jiri Olsadc0c6122017-01-09 10:51:58 +0100265 if (switch_output_size(rec))
266 trigger_hit(&switch_output_trigger);
267
David Ahern8d3eca22012-08-26 12:24:47 -0600268 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200269}
270
Alexey Budankovef781122019-03-18 20:44:12 +0300271static int record__aio_enabled(struct record *rec);
272static int record__comp_enabled(struct record *rec);
Alexey Bayduraev75f5f1f2022-01-17 21:34:30 +0300273static size_t zstd_compress(struct perf_session *session, struct mmap *map,
274 void *dst, size_t dst_size, void *src, size_t src_size);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300275
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300276#ifdef HAVE_AIO_SUPPORT
277static int record__aio_write(struct aiocb *cblock, int trace_fd,
278 void *buf, size_t size, off_t off)
279{
280 int rc;
281
282 cblock->aio_fildes = trace_fd;
283 cblock->aio_buf = buf;
284 cblock->aio_nbytes = size;
285 cblock->aio_offset = off;
286 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
287
288 do {
289 rc = aio_write(cblock);
290 if (rc == 0) {
291 break;
292 } else if (errno != EAGAIN) {
293 cblock->aio_fildes = -1;
294 pr_err("failed to queue perf data, error: %m\n");
295 break;
296 }
297 } while (1);
298
299 return rc;
300}
301
Jiri Olsaa5830532019-07-27 20:30:53 +0200302static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300303{
304 void *rem_buf;
305 off_t rem_off;
306 size_t rem_size;
307 int rc, aio_errno;
308 ssize_t aio_ret, written;
309
310 aio_errno = aio_error(cblock);
311 if (aio_errno == EINPROGRESS)
312 return 0;
313
314 written = aio_ret = aio_return(cblock);
315 if (aio_ret < 0) {
316 if (aio_errno != EINTR)
317 pr_err("failed to write perf data, error: %m\n");
318 written = 0;
319 }
320
321 rem_size = cblock->aio_nbytes - written;
322
323 if (rem_size == 0) {
324 cblock->aio_fildes = -1;
325 /*
Alexey Budankovef781122019-03-18 20:44:12 +0300326 * md->refcount is incremented in record__aio_pushfn() for
327 * every aio write request started in record__aio_push() so
328 * decrement it because the request is now complete.
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300329 */
Jiri Olsa80e53d12019-10-07 14:53:15 +0200330 perf_mmap__put(&md->core);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300331 rc = 1;
332 } else {
333 /*
334 * aio write request may require restart with the
335 * reminder if the kernel didn't write whole
336 * chunk at once.
337 */
338 rem_off = cblock->aio_offset + written;
339 rem_buf = (void *)(cblock->aio_buf + written);
340 record__aio_write(cblock, cblock->aio_fildes,
341 rem_buf, rem_size, rem_off);
342 rc = 0;
343 }
344
345 return rc;
346}
347
Jiri Olsaa5830532019-07-27 20:30:53 +0200348static int record__aio_sync(struct mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300349{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300350 struct aiocb **aiocb = md->aio.aiocb;
351 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300352 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300353 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300354
355 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300356 do_suspend = 0;
357 for (i = 0; i < md->aio.nr_cblocks; ++i) {
358 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
359 if (sync_all)
360 aiocb[i] = NULL;
361 else
362 return i;
363 } else {
364 /*
365 * Started aio write is not complete yet
366 * so it has to be waited before the
367 * next allocation.
368 */
369 aiocb[i] = &cblocks[i];
370 do_suspend = 1;
371 }
372 }
373 if (!do_suspend)
374 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300375
Alexey Budankov93f20c02018-11-06 12:07:19 +0300376 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300377 if (!(errno == EAGAIN || errno == EINTR))
378 pr_err("failed to sync perf data, error: %m\n");
379 }
380 } while (1);
381}
382
Alexey Budankovef781122019-03-18 20:44:12 +0300383struct record_aio {
384 struct record *rec;
385 void *data;
386 size_t size;
387};
388
Jiri Olsaa5830532019-07-27 20:30:53 +0200389static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300390{
Alexey Budankovef781122019-03-18 20:44:12 +0300391 struct record_aio *aio = to;
392
393 /*
Jiri Olsa547740f2019-07-27 22:07:44 +0200394 * map->core.base data pointed by buf is copied into free map->aio.data[] buffer
Alexey Budankovef781122019-03-18 20:44:12 +0300395 * to release space in the kernel buffer as fast as possible, calling
396 * perf_mmap__consume() from perf_mmap__push() function.
397 *
398 * That lets the kernel to proceed with storing more profiling data into
399 * the kernel buffer earlier than other per-cpu kernel buffers are handled.
400 *
401 * Coping can be done in two steps in case the chunk of profiling data
402 * crosses the upper bound of the kernel buffer. In this case we first move
403 * part of data from map->start till the upper bound and then the reminder
404 * from the beginning of the kernel buffer till the end of the data chunk.
405 */
406
407 if (record__comp_enabled(aio->rec)) {
Alexey Bayduraev75f5f1f2022-01-17 21:34:30 +0300408 size = zstd_compress(aio->rec->session, NULL, aio->data + aio->size,
Jiri Olsabf59b302019-10-07 14:53:11 +0200409 mmap__mmap_len(map) - aio->size,
Alexey Budankovef781122019-03-18 20:44:12 +0300410 buf, size);
411 } else {
412 memcpy(aio->data + aio->size, buf, size);
413 }
414
415 if (!aio->size) {
416 /*
417 * Increment map->refcount to guard map->aio.data[] buffer
418 * from premature deallocation because map object can be
419 * released earlier than aio write request started on
420 * map->aio.data[] buffer is complete.
421 *
422 * perf_mmap__put() is done at record__aio_complete()
423 * after started aio request completion or at record__aio_push()
424 * if the request failed to start.
425 */
Jiri Olsae75710f2019-10-07 14:53:13 +0200426 perf_mmap__get(&map->core);
Alexey Budankovef781122019-03-18 20:44:12 +0300427 }
428
429 aio->size += size;
430
431 return size;
432}
433
Jiri Olsaa5830532019-07-27 20:30:53 +0200434static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
Alexey Budankovef781122019-03-18 20:44:12 +0300435{
436 int ret, idx;
437 int trace_fd = rec->session->data->file.fd;
438 struct record_aio aio = { .rec = rec, .size = 0 };
439
440 /*
441 * Call record__aio_sync() to wait till map->aio.data[] buffer
442 * becomes available after previous aio write operation.
443 */
444
445 idx = record__aio_sync(map, false);
446 aio.data = map->aio.data[idx];
447 ret = perf_mmap__push(map, &aio, record__aio_pushfn);
448 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
449 return ret;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300450
451 rec->samples++;
Alexey Budankovef781122019-03-18 20:44:12 +0300452 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300453 if (!ret) {
Alexey Budankovef781122019-03-18 20:44:12 +0300454 *off += aio.size;
455 rec->bytes_written += aio.size;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300456 if (switch_output_size(rec))
457 trigger_hit(&switch_output_trigger);
Alexey Budankovef781122019-03-18 20:44:12 +0300458 } else {
459 /*
460 * Decrement map->refcount incremented in record__aio_pushfn()
461 * back if record__aio_write() operation failed to start, otherwise
462 * map->refcount is decremented in record__aio_complete() after
463 * aio write operation finishes successfully.
464 */
Jiri Olsa80e53d12019-10-07 14:53:15 +0200465 perf_mmap__put(&map->core);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300466 }
467
468 return ret;
469}
470
471static off_t record__aio_get_pos(int trace_fd)
472{
473 return lseek(trace_fd, 0, SEEK_CUR);
474}
475
476static void record__aio_set_pos(int trace_fd, off_t pos)
477{
478 lseek(trace_fd, pos, SEEK_SET);
479}
480
481static void record__aio_mmap_read_sync(struct record *rec)
482{
483 int i;
Jiri Olsa63503db2019-07-21 13:23:52 +0200484 struct evlist *evlist = rec->evlist;
Jiri Olsaa5830532019-07-27 20:30:53 +0200485 struct mmap *maps = evlist->mmap;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300486
Alexey Budankovef781122019-03-18 20:44:12 +0300487 if (!record__aio_enabled(rec))
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300488 return;
489
Jiri Olsac976ee12019-07-30 13:04:59 +0200490 for (i = 0; i < evlist->core.nr_mmaps; i++) {
Jiri Olsaa5830532019-07-27 20:30:53 +0200491 struct mmap *map = &maps[i];
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300492
Jiri Olsa547740f2019-07-27 22:07:44 +0200493 if (map->core.base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300494 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300495 }
496}
497
498static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300499static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300500
501static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300502 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300503 int unset)
504{
505 struct record_opts *opts = (struct record_opts *)opt->value;
506
Alexey Budankov93f20c02018-11-06 12:07:19 +0300507 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300508 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300509 } else {
510 if (str)
511 opts->nr_cblocks = strtol(str, NULL, 0);
512 if (!opts->nr_cblocks)
513 opts->nr_cblocks = nr_cblocks_default;
514 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300515
516 return 0;
517}
518#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300519static int nr_cblocks_max = 0;
520
Jiri Olsaa5830532019-07-27 20:30:53 +0200521static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
Alexey Budankovef781122019-03-18 20:44:12 +0300522 off_t *off __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300523{
524 return -1;
525}
526
527static off_t record__aio_get_pos(int trace_fd __maybe_unused)
528{
529 return -1;
530}
531
532static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
533{
534}
535
536static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
537{
538}
539#endif
540
541static int record__aio_enabled(struct record *rec)
542{
543 return rec->opts.nr_cblocks > 0;
544}
545
Alexey Budankov470530b2019-03-18 20:40:26 +0300546#define MMAP_FLUSH_DEFAULT 1
547static int record__mmap_flush_parse(const struct option *opt,
548 const char *str,
549 int unset)
550{
551 int flush_max;
552 struct record_opts *opts = (struct record_opts *)opt->value;
553 static struct parse_tag tags[] = {
554 { .tag = 'B', .mult = 1 },
555 { .tag = 'K', .mult = 1 << 10 },
556 { .tag = 'M', .mult = 1 << 20 },
557 { .tag = 'G', .mult = 1 << 30 },
558 { .tag = 0 },
559 };
560
561 if (unset)
562 return 0;
563
564 if (str) {
565 opts->mmap_flush = parse_tag_value(str, tags);
566 if (opts->mmap_flush == (int)-1)
567 opts->mmap_flush = strtol(str, NULL, 0);
568 }
569
570 if (!opts->mmap_flush)
571 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
572
Jiri Olsa9521b5f2019-07-28 12:45:35 +0200573 flush_max = evlist__mmap_size(opts->mmap_pages);
Alexey Budankov470530b2019-03-18 20:40:26 +0300574 flush_max /= 4;
575 if (opts->mmap_flush > flush_max)
576 opts->mmap_flush = flush_max;
577
578 return 0;
579}
580
Alexey Budankov504c1ad2019-03-18 20:44:42 +0300581#ifdef HAVE_ZSTD_SUPPORT
582static unsigned int comp_level_default = 1;
583
584static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
585{
586 struct record_opts *opts = opt->value;
587
588 if (unset) {
589 opts->comp_level = 0;
590 } else {
591 if (str)
592 opts->comp_level = strtol(str, NULL, 0);
593 if (!opts->comp_level)
594 opts->comp_level = comp_level_default;
595 }
596
597 return 0;
598}
599#endif
Alexey Budankov51255a82019-03-18 20:42:19 +0300600static unsigned int comp_level_max = 22;
601
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300602static int record__comp_enabled(struct record *rec)
603{
604 return rec->opts.comp_level > 0;
605}
606
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200607static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200608 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300609 struct perf_sample *sample __maybe_unused,
610 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200611{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300612 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200613 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200614}
615
Ian Rogers49c670b172022-08-26 09:42:31 -0700616static struct mutex synth_lock;
617
Stephane Eraniand99c22e2020-04-22 08:50:38 -0700618static int process_locked_synthesized_event(struct perf_tool *tool,
619 union perf_event *event,
620 struct perf_sample *sample __maybe_unused,
621 struct machine *machine __maybe_unused)
622{
Stephane Eraniand99c22e2020-04-22 08:50:38 -0700623 int ret;
624
Ian Rogers49c670b172022-08-26 09:42:31 -0700625 mutex_lock(&synth_lock);
Stephane Eraniand99c22e2020-04-22 08:50:38 -0700626 ret = process_synthesized_event(tool, event, sample, machine);
Ian Rogers49c670b172022-08-26 09:42:31 -0700627 mutex_unlock(&synth_lock);
Stephane Eraniand99c22e2020-04-22 08:50:38 -0700628 return ret;
629}
630
Jiri Olsaa5830532019-07-27 20:30:53 +0200631static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300632{
633 struct record *rec = to;
634
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300635 if (record__comp_enabled(rec)) {
Alexey Bayduraev75f5f1f2022-01-17 21:34:30 +0300636 size = zstd_compress(rec->session, map, map->data, mmap__mmap_len(map), bf, size);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300637 bf = map->data;
638 }
639
Alexey Bayduraev396b6262022-01-17 21:34:25 +0300640 thread->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200641 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300642}
643
Ian Rogers8ed28c22022-10-24 11:19:07 -0700644static volatile sig_atomic_t signr = -1;
645static volatile sig_atomic_t child_finished;
Anand K Mistryda231332020-05-13 12:20:23 +1000646#ifdef HAVE_EVENTFD_SUPPORT
Ian Rogers8ed28c22022-10-24 11:19:07 -0700647static volatile sig_atomic_t done_fd = -1;
Anand K Mistryda231332020-05-13 12:20:23 +1000648#endif
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000649
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300650static void sig_handler(int sig)
651{
652 if (sig == SIGCHLD)
653 child_finished = 1;
654 else
655 signr = sig;
656
657 done = 1;
Anand K Mistryda231332020-05-13 12:20:23 +1000658#ifdef HAVE_EVENTFD_SUPPORT
Ian Rogers304f0a22022-10-23 18:10:24 -0700659 if (done_fd >= 0) {
660 u64 tmp = 1;
661 int orig_errno = errno;
662
663 /*
664 * It is possible for this signal handler to run after done is
665 * checked in the main loop, but before the perf counter fds are
666 * polled. If this happens, the poll() will continue to wait
667 * even though done is set, and will only break out if either
668 * another signal is received, or the counters are ready for
669 * read. To ensure the poll() doesn't sleep when done is set,
670 * use an eventfd (done_fd) to wake up the poll().
671 */
672 if (write(done_fd, &tmp, sizeof(tmp)) < 0)
673 pr_err("failed to signal wakeup fd, error: %m\n");
674
675 errno = orig_errno;
676 }
Anand K Mistryda231332020-05-13 12:20:23 +1000677#endif // HAVE_EVENTFD_SUPPORT
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300678}
679
Wang Nana0748652016-11-26 07:03:28 +0000680static void sigsegv_handler(int sig)
681{
682 perf_hooks__recover();
683 sighandler_dump_stack(sig);
684}
685
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300686static void record__sig_exit(void)
687{
688 if (signr == -1)
689 return;
690
691 signal(signr, SIG_DFL);
692 raise(signr);
693}
694
Adrian Huntere31f0d02015-04-30 17:37:27 +0300695#ifdef HAVE_AUXTRACE_SUPPORT
696
Adrian Hunteref149c22015-04-09 18:53:45 +0300697static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaa5830532019-07-27 20:30:53 +0200698 struct mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300699 union perf_event *event, void *data1,
700 size_t len1, void *data2, size_t len2)
701{
702 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100703 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300704 size_t padding;
705 u8 pad[8] = {0};
706
Adrian Hunter46e201e2019-10-04 11:31:20 +0300707 if (!perf_data__is_pipe(data) && perf_data__is_single_file(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300708 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100709 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300710 int err;
711
712 file_offset = lseek(fd, 0, SEEK_CUR);
713 if (file_offset == -1)
714 return -1;
715 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
716 event, file_offset);
717 if (err)
718 return err;
719 }
720
Adrian Hunteref149c22015-04-09 18:53:45 +0300721 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
722 padding = (len1 + len2) & 7;
723 if (padding)
724 padding = 8 - padding;
725
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200726 record__write(rec, map, event, event->header.size);
727 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300728 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200729 record__write(rec, map, data2, len2);
730 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300731
732 return 0;
733}
734
735static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsaa5830532019-07-27 20:30:53 +0200736 struct mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300737{
738 int ret;
739
Jiri Olsae035f4c2018-09-13 14:54:05 +0200740 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300741 record__process_auxtrace);
742 if (ret < 0)
743 return ret;
744
745 if (ret)
746 rec->samples++;
747
748 return 0;
749}
750
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300751static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsaa5830532019-07-27 20:30:53 +0200752 struct mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300753{
754 int ret;
755
Jiri Olsae035f4c2018-09-13 14:54:05 +0200756 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300757 record__process_auxtrace,
758 rec->opts.auxtrace_snapshot_size);
759 if (ret < 0)
760 return ret;
761
762 if (ret)
763 rec->samples++;
764
765 return 0;
766}
767
768static int record__auxtrace_read_snapshot_all(struct record *rec)
769{
770 int i;
771 int rc = 0;
772
Jiri Olsac976ee12019-07-30 13:04:59 +0200773 for (i = 0; i < rec->evlist->core.nr_mmaps; i++) {
Jiri Olsaa5830532019-07-27 20:30:53 +0200774 struct mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300775
Jiri Olsae035f4c2018-09-13 14:54:05 +0200776 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300777 continue;
778
Jiri Olsae035f4c2018-09-13 14:54:05 +0200779 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300780 rc = -1;
781 goto out;
782 }
783 }
784out:
785 return rc;
786}
787
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300788static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300789{
790 pr_debug("Recording AUX area tracing snapshot\n");
791 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000792 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300793 } else {
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300794 if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
Wang Nan5f9cf592016-04-20 18:59:49 +0000795 trigger_error(&auxtrace_snapshot_trigger);
796 else
797 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300798 }
799}
800
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300801static int record__auxtrace_snapshot_exit(struct record *rec)
802{
803 if (trigger_is_error(&auxtrace_snapshot_trigger))
804 return 0;
805
806 if (!auxtrace_record__snapshot_started &&
807 auxtrace_record__snapshot_start(rec->itr))
808 return -1;
809
810 record__read_auxtrace_snapshot(rec, true);
811 if (trigger_is_error(&auxtrace_snapshot_trigger))
812 return -1;
813
814 return 0;
815}
816
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200817static int record__auxtrace_init(struct record *rec)
818{
819 int err;
820
Alexey Bayduraevb5f25112022-01-17 21:34:34 +0300821 if ((rec->opts.auxtrace_snapshot_opts || rec->opts.auxtrace_sample_opts)
822 && record__threads_enabled(rec)) {
823 pr_err("AUX area tracing options are not available in parallel streaming mode.\n");
824 return -EINVAL;
825 }
826
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200827 if (!rec->itr) {
828 rec->itr = auxtrace_record__init(rec->evlist, &err);
829 if (err)
830 return err;
831 }
832
833 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
834 rec->opts.auxtrace_snapshot_opts);
835 if (err)
836 return err;
837
Adrian Hunterc0a6de02019-11-15 14:42:16 +0200838 err = auxtrace_parse_sample_options(rec->itr, rec->evlist, &rec->opts,
839 rec->opts.auxtrace_sample_opts);
840 if (err)
841 return err;
842
Adrian Hunterd58b3f72021-01-21 16:04:18 +0200843 auxtrace_regroup_aux_output(rec->evlist);
844
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200845 return auxtrace_parse_filters(rec->evlist);
846}
847
Adrian Huntere31f0d02015-04-30 17:37:27 +0300848#else
849
850static inline
851int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsaa5830532019-07-27 20:30:53 +0200852 struct mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300853{
854 return 0;
855}
856
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300857static inline
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300858void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
859 bool on_exit __maybe_unused)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300860{
861}
862
863static inline
864int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
865{
866 return 0;
867}
868
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300869static inline
870int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
871{
872 return 0;
873}
874
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200875static int record__auxtrace_init(struct record *rec __maybe_unused)
876{
877 return 0;
878}
879
Adrian Huntere31f0d02015-04-30 17:37:27 +0300880#endif
881
Adrian Hunter246eba82020-05-12 15:19:18 +0300882static int record__config_text_poke(struct evlist *evlist)
883{
884 struct evsel *evsel;
Adrian Hunter246eba82020-05-12 15:19:18 +0300885
886 /* Nothing to do if text poke is already configured */
887 evlist__for_each_entry(evlist, evsel) {
888 if (evsel->core.attr.text_poke)
889 return 0;
890 }
891
Adrian Hunter921e3be2022-05-24 10:54:27 +0300892 evsel = evlist__add_dummy_on_all_cpus(evlist);
893 if (!evsel)
894 return -ENOMEM;
Adrian Hunter246eba82020-05-12 15:19:18 +0300895
Adrian Hunter246eba82020-05-12 15:19:18 +0300896 evsel->core.attr.text_poke = 1;
897 evsel->core.attr.ksymbol = 1;
Adrian Hunter246eba82020-05-12 15:19:18 +0300898 evsel->immediate = true;
Adrian Hunter246eba82020-05-12 15:19:18 +0300899 evsel__set_sample_bit(evsel, TIME);
900
901 return 0;
902}
903
Namhyung Kimedc41a12022-05-18 15:47:21 -0700904static int record__config_off_cpu(struct record *rec)
905{
Namhyung Kim685439a2022-05-18 15:47:24 -0700906 return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts);
Namhyung Kimedc41a12022-05-18 15:47:21 -0700907}
908
Adrian Huntereeb399b2019-10-04 11:31:21 +0300909static bool record__kcore_readable(struct machine *machine)
910{
911 char kcore[PATH_MAX];
912 int fd;
913
914 scnprintf(kcore, sizeof(kcore), "%s/proc/kcore", machine->root_dir);
915
916 fd = open(kcore, O_RDONLY);
917 if (fd < 0)
918 return false;
919
920 close(fd);
921
922 return true;
923}
924
925static int record__kcore_copy(struct machine *machine, struct perf_data *data)
926{
927 char from_dir[PATH_MAX];
928 char kcore_dir[PATH_MAX];
929 int ret;
930
931 snprintf(from_dir, sizeof(from_dir), "%s/proc", machine->root_dir);
932
933 ret = perf_data__make_kcore_dir(data, kcore_dir, sizeof(kcore_dir));
934 if (ret)
935 return ret;
936
937 return kcore_copy(from_dir, kcore_dir);
938}
939
Alexey Bayduraev415ccb52022-01-17 21:34:23 +0300940static void record__thread_data_init_pipes(struct record_thread *thread_data)
941{
942 thread_data->pipes.msg[0] = -1;
943 thread_data->pipes.msg[1] = -1;
944 thread_data->pipes.ack[0] = -1;
945 thread_data->pipes.ack[1] = -1;
946}
947
948static int record__thread_data_open_pipes(struct record_thread *thread_data)
949{
950 if (pipe(thread_data->pipes.msg))
951 return -EINVAL;
952
953 if (pipe(thread_data->pipes.ack)) {
954 close(thread_data->pipes.msg[0]);
955 thread_data->pipes.msg[0] = -1;
956 close(thread_data->pipes.msg[1]);
957 thread_data->pipes.msg[1] = -1;
958 return -EINVAL;
959 }
960
961 pr_debug2("thread_data[%p]: msg=[%d,%d], ack=[%d,%d]\n", thread_data,
962 thread_data->pipes.msg[0], thread_data->pipes.msg[1],
963 thread_data->pipes.ack[0], thread_data->pipes.ack[1]);
964
965 return 0;
966}
967
968static void record__thread_data_close_pipes(struct record_thread *thread_data)
969{
970 if (thread_data->pipes.msg[0] != -1) {
971 close(thread_data->pipes.msg[0]);
972 thread_data->pipes.msg[0] = -1;
973 }
974 if (thread_data->pipes.msg[1] != -1) {
975 close(thread_data->pipes.msg[1]);
976 thread_data->pipes.msg[1] = -1;
977 }
978 if (thread_data->pipes.ack[0] != -1) {
979 close(thread_data->pipes.ack[0]);
980 thread_data->pipes.ack[0] = -1;
981 }
982 if (thread_data->pipes.ack[1] != -1) {
983 close(thread_data->pipes.ack[1]);
984 thread_data->pipes.ack[1] = -1;
985 }
986}
987
Adrian Hunter7be1fed2022-05-24 10:54:30 +0300988static bool evlist__per_thread(struct evlist *evlist)
989{
990 return cpu_map__is_dummy(evlist->core.user_requested_cpus);
991}
992
Alexey Bayduraev415ccb52022-01-17 21:34:23 +0300993static int record__thread_data_init_maps(struct record_thread *thread_data, struct evlist *evlist)
994{
995 int m, tm, nr_mmaps = evlist->core.nr_mmaps;
996 struct mmap *mmap = evlist->mmap;
997 struct mmap *overwrite_mmap = evlist->overwrite_mmap;
Adrian Hunter7be1fed2022-05-24 10:54:30 +0300998 struct perf_cpu_map *cpus = evlist->core.all_cpus;
999 bool per_thread = evlist__per_thread(evlist);
Alexey Bayduraev415ccb52022-01-17 21:34:23 +03001000
Adrian Hunter7be1fed2022-05-24 10:54:30 +03001001 if (per_thread)
Alexey Bayduraev23380e42022-04-13 18:46:40 -07001002 thread_data->nr_mmaps = nr_mmaps;
1003 else
1004 thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
1005 thread_data->mask->maps.nbits);
Alexey Bayduraev415ccb52022-01-17 21:34:23 +03001006 if (mmap) {
1007 thread_data->maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *));
1008 if (!thread_data->maps)
1009 return -ENOMEM;
1010 }
1011 if (overwrite_mmap) {
1012 thread_data->overwrite_maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *));
1013 if (!thread_data->overwrite_maps) {
1014 zfree(&thread_data->maps);
1015 return -ENOMEM;
1016 }
1017 }
1018 pr_debug2("thread_data[%p]: nr_mmaps=%d, maps=%p, ow_maps=%p\n", thread_data,
1019 thread_data->nr_mmaps, thread_data->maps, thread_data->overwrite_maps);
1020
1021 for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) {
Adrian Hunter7be1fed2022-05-24 10:54:30 +03001022 if (per_thread ||
Ian Rogers02555712022-05-02 21:17:52 -07001023 test_bit(perf_cpu_map__cpu(cpus, m).cpu, thread_data->mask->maps.bits)) {
Alexey Bayduraev415ccb52022-01-17 21:34:23 +03001024 if (thread_data->maps) {
1025 thread_data->maps[tm] = &mmap[m];
1026 pr_debug2("thread_data[%p]: cpu%d: maps[%d] -> mmap[%d]\n",
Alexey Bayduraev23380e42022-04-13 18:46:40 -07001027 thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
Alexey Bayduraev415ccb52022-01-17 21:34:23 +03001028 }
1029 if (thread_data->overwrite_maps) {
1030 thread_data->overwrite_maps[tm] = &overwrite_mmap[m];
1031 pr_debug2("thread_data[%p]: cpu%d: ow_maps[%d] -> ow_mmap[%d]\n",
Alexey Bayduraev23380e42022-04-13 18:46:40 -07001032 thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
Alexey Bayduraev415ccb52022-01-17 21:34:23 +03001033 }
1034 tm++;
1035 }
1036 }
1037
1038 return 0;
1039}
1040
1041static int record__thread_data_init_pollfd(struct record_thread *thread_data, struct evlist *evlist)
1042{
1043 int f, tm, pos;
1044 struct mmap *map, *overwrite_map;
1045
1046 fdarray__init(&thread_data->pollfd, 64);
1047
1048 for (tm = 0; tm < thread_data->nr_mmaps; tm++) {
1049 map = thread_data->maps ? thread_data->maps[tm] : NULL;
1050 overwrite_map = thread_data->overwrite_maps ?
1051 thread_data->overwrite_maps[tm] : NULL;
1052
1053 for (f = 0; f < evlist->core.pollfd.nr; f++) {
1054 void *ptr = evlist->core.pollfd.priv[f].ptr;
1055
1056 if ((map && ptr == map) || (overwrite_map && ptr == overwrite_map)) {
1057 pos = fdarray__dup_entry_from(&thread_data->pollfd, f,
1058 &evlist->core.pollfd);
1059 if (pos < 0)
1060 return pos;
1061 pr_debug2("thread_data[%p]: pollfd[%d] <- event_fd=%d\n",
1062 thread_data, pos, evlist->core.pollfd.entries[f].fd);
1063 }
1064 }
1065 }
1066
1067 return 0;
1068}
1069
1070static void record__free_thread_data(struct record *rec)
1071{
1072 int t;
1073 struct record_thread *thread_data = rec->thread_data;
1074
1075 if (thread_data == NULL)
1076 return;
1077
1078 for (t = 0; t < rec->nr_threads; t++) {
1079 record__thread_data_close_pipes(&thread_data[t]);
1080 zfree(&thread_data[t].maps);
1081 zfree(&thread_data[t].overwrite_maps);
1082 fdarray__exit(&thread_data[t].pollfd);
1083 }
1084
1085 zfree(&rec->thread_data);
1086}
1087
Adrian Hunter6562c9a2022-08-24 10:28:10 +03001088static int record__map_thread_evlist_pollfd_indexes(struct record *rec,
1089 int evlist_pollfd_index,
1090 int thread_pollfd_index)
1091{
1092 size_t x = rec->index_map_cnt;
1093
1094 if (realloc_array_as_needed(rec->index_map, rec->index_map_sz, x, NULL))
1095 return -ENOMEM;
1096 rec->index_map[x].evlist_pollfd_index = evlist_pollfd_index;
1097 rec->index_map[x].thread_pollfd_index = thread_pollfd_index;
1098 rec->index_map_cnt += 1;
1099 return 0;
1100}
1101
1102static int record__update_evlist_pollfd_from_thread(struct record *rec,
1103 struct evlist *evlist,
1104 struct record_thread *thread_data)
1105{
1106 struct pollfd *e_entries = evlist->core.pollfd.entries;
1107 struct pollfd *t_entries = thread_data->pollfd.entries;
1108 int err = 0;
1109 size_t i;
1110
1111 for (i = 0; i < rec->index_map_cnt; i++) {
1112 int e_pos = rec->index_map[i].evlist_pollfd_index;
1113 int t_pos = rec->index_map[i].thread_pollfd_index;
1114
1115 if (e_entries[e_pos].fd != t_entries[t_pos].fd ||
1116 e_entries[e_pos].events != t_entries[t_pos].events) {
1117 pr_err("Thread and evlist pollfd index mismatch\n");
1118 err = -EINVAL;
1119 continue;
1120 }
1121 e_entries[e_pos].revents = t_entries[t_pos].revents;
1122 }
1123 return err;
1124}
1125
1126static int record__dup_non_perf_events(struct record *rec,
1127 struct evlist *evlist,
1128 struct record_thread *thread_data)
1129{
1130 struct fdarray *fda = &evlist->core.pollfd;
1131 int i, ret;
1132
1133 for (i = 0; i < fda->nr; i++) {
1134 if (!(fda->priv[i].flags & fdarray_flag__non_perf_event))
1135 continue;
1136 ret = fdarray__dup_entry_from(&thread_data->pollfd, i, fda);
1137 if (ret < 0) {
1138 pr_err("Failed to duplicate descriptor in main thread pollfd\n");
1139 return ret;
1140 }
1141 pr_debug2("thread_data[%p]: pollfd[%d] <- non_perf_event fd=%d\n",
1142 thread_data, ret, fda->entries[i].fd);
1143 ret = record__map_thread_evlist_pollfd_indexes(rec, i, ret);
1144 if (ret < 0) {
1145 pr_err("Failed to map thread and evlist pollfd indexes\n");
1146 return ret;
1147 }
1148 }
1149 return 0;
1150}
1151
Alexey Bayduraev415ccb52022-01-17 21:34:23 +03001152static int record__alloc_thread_data(struct record *rec, struct evlist *evlist)
1153{
1154 int t, ret;
1155 struct record_thread *thread_data;
1156
1157 rec->thread_data = zalloc(rec->nr_threads * sizeof(*(rec->thread_data)));
1158 if (!rec->thread_data) {
1159 pr_err("Failed to allocate thread data\n");
1160 return -ENOMEM;
1161 }
1162 thread_data = rec->thread_data;
1163
1164 for (t = 0; t < rec->nr_threads; t++)
1165 record__thread_data_init_pipes(&thread_data[t]);
1166
1167 for (t = 0; t < rec->nr_threads; t++) {
1168 thread_data[t].rec = rec;
1169 thread_data[t].mask = &rec->thread_masks[t];
1170 ret = record__thread_data_init_maps(&thread_data[t], evlist);
1171 if (ret) {
1172 pr_err("Failed to initialize thread[%d] maps\n", t);
1173 goto out_free;
1174 }
1175 ret = record__thread_data_init_pollfd(&thread_data[t], evlist);
1176 if (ret) {
1177 pr_err("Failed to initialize thread[%d] pollfd\n", t);
1178 goto out_free;
1179 }
1180 if (t) {
1181 thread_data[t].tid = -1;
1182 ret = record__thread_data_open_pipes(&thread_data[t]);
1183 if (ret) {
1184 pr_err("Failed to open thread[%d] communication pipes\n", t);
1185 goto out_free;
1186 }
1187 ret = fdarray__add(&thread_data[t].pollfd, thread_data[t].pipes.msg[0],
1188 POLLIN | POLLERR | POLLHUP, fdarray_flag__nonfilterable);
1189 if (ret < 0) {
1190 pr_err("Failed to add descriptor to thread[%d] pollfd\n", t);
1191 goto out_free;
1192 }
1193 thread_data[t].ctlfd_pos = ret;
1194 pr_debug2("thread_data[%p]: pollfd[%d] <- ctl_fd=%d\n",
1195 thread_data, thread_data[t].ctlfd_pos,
1196 thread_data[t].pipes.msg[0]);
1197 } else {
1198 thread_data[t].tid = gettid();
Adrian Hunter6562c9a2022-08-24 10:28:10 +03001199
1200 ret = record__dup_non_perf_events(rec, evlist, &thread_data[t]);
1201 if (ret < 0)
1202 goto out_free;
1203
Adrian Hunterfeff0b62022-08-24 10:28:12 +03001204 thread_data[t].ctlfd_pos = -1; /* Not used */
Alexey Bayduraev415ccb52022-01-17 21:34:23 +03001205 }
1206 }
1207
1208 return 0;
1209
1210out_free:
1211 record__free_thread_data(rec);
1212
1213 return ret;
1214}
1215
Wang Nancda57a82016-06-27 10:24:03 +00001216static int record__mmap_evlist(struct record *rec,
Jiri Olsa63503db2019-07-21 13:23:52 +02001217 struct evlist *evlist)
Wang Nancda57a82016-06-27 10:24:03 +00001218{
Alexey Bayduraev56f735f2022-01-17 21:34:28 +03001219 int i, ret;
Wang Nancda57a82016-06-27 10:24:03 +00001220 struct record_opts *opts = &rec->opts;
Adrian Hunterc0a6de02019-11-15 14:42:16 +02001221 bool auxtrace_overwrite = opts->auxtrace_snapshot_mode ||
1222 opts->auxtrace_sample_mode;
Wang Nancda57a82016-06-27 10:24:03 +00001223 char msg[512];
1224
Alexey Budankovf13de662019-01-22 20:50:57 +03001225 if (opts->affinity != PERF_AFFINITY_SYS)
1226 cpu__setup_cpunode_map();
1227
Jiri Olsa9521b5f2019-07-28 12:45:35 +02001228 if (evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +00001229 opts->auxtrace_mmap_pages,
Adrian Hunterc0a6de02019-11-15 14:42:16 +02001230 auxtrace_overwrite,
Alexey Budankov470530b2019-03-18 20:40:26 +03001231 opts->nr_cblocks, opts->affinity,
Alexey Budankov51255a82019-03-18 20:42:19 +03001232 opts->mmap_flush, opts->comp_level) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +00001233 if (errno == EPERM) {
1234 pr_err("Permission error mapping pages.\n"
1235 "Consider increasing "
1236 "/proc/sys/kernel/perf_event_mlock_kb,\n"
1237 "or try again with a smaller value of -m/--mmap_pages.\n"
1238 "(current value: %u,%u)\n",
1239 opts->mmap_pages, opts->auxtrace_mmap_pages);
1240 return -errno;
1241 } else {
1242 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001243 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +00001244 if (errno)
1245 return -errno;
1246 else
1247 return -EINVAL;
1248 }
1249 }
Alexey Bayduraev415ccb52022-01-17 21:34:23 +03001250
1251 if (evlist__initialize_ctlfd(evlist, opts->ctl_fd, opts->ctl_fd_ack))
1252 return -1;
1253
1254 ret = record__alloc_thread_data(rec, evlist);
1255 if (ret)
1256 return ret;
1257
Alexey Bayduraev56f735f2022-01-17 21:34:28 +03001258 if (record__threads_enabled(rec)) {
1259 ret = perf_data__create_dir(&rec->data, evlist->core.nr_mmaps);
Alexey Bayduraev65e7c962022-02-22 12:14:17 +03001260 if (ret) {
1261 pr_err("Failed to create data directory: %s\n", strerror(-ret));
Alexey Bayduraev56f735f2022-01-17 21:34:28 +03001262 return ret;
Alexey Bayduraev65e7c962022-02-22 12:14:17 +03001263 }
Alexey Bayduraev56f735f2022-01-17 21:34:28 +03001264 for (i = 0; i < evlist->core.nr_mmaps; i++) {
1265 if (evlist->mmap)
1266 evlist->mmap[i].file = &rec->data.dir.files[i];
1267 if (evlist->overwrite_mmap)
1268 evlist->overwrite_mmap[i].file = &rec->data.dir.files[i];
1269 }
1270 }
1271
Wang Nancda57a82016-06-27 10:24:03 +00001272 return 0;
1273}
1274
1275static int record__mmap(struct record *rec)
1276{
1277 return record__mmap_evlist(rec, rec->evlist);
1278}
1279
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001280static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02001281{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -03001282 char msg[BUFSIZ];
Jiri Olsa32dcd022019-07-21 13:23:51 +02001283 struct evsel *pos;
Jiri Olsa63503db2019-07-21 13:23:52 +02001284 struct evlist *evlist = rec->evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001285 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001286 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -06001287 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02001288
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -03001289 /*
Kan Liangb91e5492021-07-08 09:03:32 -07001290 * For initial_delay, system wide or a hybrid system, we need to add a
1291 * dummy event so that we can track PERF_RECORD_MMAP to cover the delay
1292 * of waiting or event synthesis.
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -03001293 */
Changbin Ducb4b9e62023-03-02 11:11:45 +08001294 if (opts->target.initial_delay || target__has_cpu(&opts->target) ||
Ian Rogers94f9eb92023-05-27 00:22:09 -07001295 perf_pmus__num_core_pmus() > 1) {
Arnaldo Carvalho de Meloe80db252020-11-30 14:39:41 -03001296 pos = evlist__get_tracking_event(evlist);
Adrian Hunter442ad2252020-06-29 12:19:51 +03001297 if (!evsel__is_dummy_event(pos)) {
1298 /* Set up dummy event. */
Arnaldo Carvalho de Melofacbf0b2020-07-08 13:49:15 -03001299 if (evlist__add_dummy(evlist))
Adrian Hunter442ad2252020-06-29 12:19:51 +03001300 return -ENOMEM;
1301 pos = evlist__last(evlist);
Arnaldo Carvalho de Meloe80db252020-11-30 14:39:41 -03001302 evlist__set_tracking_event(evlist, pos);
Adrian Hunter442ad2252020-06-29 12:19:51 +03001303 }
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -03001304
Ian Rogers0a892c12020-04-22 10:36:15 -07001305 /*
1306 * Enable the dummy event when the process is forked for
1307 * initial_delay, immediately for system wide.
1308 */
Changbin Ducb4b9e62023-03-02 11:11:45 +08001309 if (opts->target.initial_delay && !pos->immediate &&
Namhyung Kimbb07d622021-08-27 16:32:12 -07001310 !target__has_cpu(&opts->target))
Ian Rogers0a892c12020-04-22 10:36:15 -07001311 pos->core.attr.enable_on_exec = 1;
1312 else
1313 pos->immediate = 1;
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -03001314 }
1315
Arnaldo Carvalho de Melo78e1bc22020-11-30 15:07:49 -03001316 evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +01001317
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001318 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +02001319try_again:
Jiri Olsaaf663bd2019-07-21 13:24:39 +02001320 if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
Arnaldo Carvalho de Meloae430892020-04-30 11:46:15 -03001321 if (evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +09001322 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -03001323 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001324 goto try_again;
1325 }
Andi Kleencf99ad12018-10-01 12:59:27 -07001326 if ((errno == EINVAL || errno == EBADF) &&
Jiri Olsafba7c862021-07-06 17:17:00 +02001327 pos->core.leader != &pos->core &&
Andi Kleencf99ad12018-10-01 12:59:27 -07001328 pos->weak_group) {
Arnaldo Carvalho de Melo64b47782020-11-30 14:58:32 -03001329 pos = evlist__reset_weak_group(evlist, pos, true);
Andi Kleencf99ad12018-10-01 12:59:27 -07001330 goto try_again;
1331 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03001332 rc = -errno;
Arnaldo Carvalho de Melo2bb72db2020-05-04 13:43:03 -03001333 evsel__open_strerror(pos, &opts->target, errno, msg, sizeof(msg));
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03001334 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -06001335 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001336 }
Andi Kleenbfd8f722017-11-17 13:42:58 -08001337
1338 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +08001339 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -02001340
Arnaldo Carvalho de Melo78e1bc22020-11-30 15:07:49 -03001341 if (symbol_conf.kptr_restrict && !evlist__exclude_kernel(evlist)) {
Arnaldo Carvalho de Meloc8b567c2019-09-23 11:07:29 -03001342 pr_warning(
1343"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1344"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
1345"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1346"file is not found in the buildid cache or in the vmlinux path.\n\n"
1347"Samples in kernel modules won't be resolved at all.\n\n"
1348"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1349"even with a suitable vmlinux or kallsyms file.\n\n");
1350 }
1351
Arnaldo Carvalho de Melo24bf91a2020-11-30 09:38:02 -03001352 if (evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -03001353 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Namhyung Kim4310551b2023-03-14 16:42:36 -07001354 pos->filter ?: "BPF", evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001355 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -06001356 rc = -1;
1357 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001358 }
1359
Wang Nancda57a82016-06-27 10:24:03 +00001360 rc = record__mmap(rec);
1361 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -06001362 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -02001363
Jiri Olsa563aecb2013-06-05 13:35:06 +02001364 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -03001365 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -06001366out:
1367 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001368}
1369
Adrian Hunter66286ed2021-05-03 09:42:22 +03001370static void set_timestamp_boundary(struct record *rec, u64 sample_time)
1371{
1372 if (rec->evlist->first_sample_time == 0)
1373 rec->evlist->first_sample_time = sample_time;
1374
1375 if (sample_time)
1376 rec->evlist->last_sample_time = sample_time;
1377}
1378
Namhyung Kime3d59112015-01-29 17:06:44 +09001379static int process_sample_event(struct perf_tool *tool,
1380 union perf_event *event,
1381 struct perf_sample *sample,
Jiri Olsa32dcd022019-07-21 13:23:51 +02001382 struct evsel *evsel,
Namhyung Kime3d59112015-01-29 17:06:44 +09001383 struct machine *machine)
1384{
1385 struct record *rec = container_of(tool, struct record, tool);
1386
Adrian Hunter66286ed2021-05-03 09:42:22 +03001387 set_timestamp_boundary(rec, sample->time);
Jin Yao68588ba2017-12-08 21:13:42 +08001388
1389 if (rec->buildid_all)
1390 return 0;
1391
1392 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +09001393 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
1394}
1395
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001396static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -02001397{
Jiri Olsaf5fc14122013-10-15 16:27:32 +02001398 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -02001399
Jiri Olsa45112e82019-02-21 10:41:29 +01001400 if (perf_data__size(&rec->data) == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -03001401 return 0;
1402
Namhyung Kim00dc8652014-11-04 10:14:32 +09001403 /*
1404 * During this process, it'll load kernel map and replace the
1405 * dso->long_name to a real pathname it found. In this case
1406 * we prefer the vmlinux path like
1407 * /lib/modules/3.16.4/build/vmlinux
1408 *
1409 * rather than build-id path (in debug directory).
1410 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
1411 */
1412 symbol_conf.ignore_vmlinux_buildid = true;
1413
Namhyung Kim61566812016-01-11 22:37:09 +09001414 /*
1415 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +08001416 * so no need to process samples. But if timestamp_boundary is enabled,
1417 * it still needs to walk on all samples to get the timestamps of
1418 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +09001419 */
Jin Yao68588ba2017-12-08 21:13:42 +08001420 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +09001421 rec->tool.sample = NULL;
1422
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001423 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -02001424}
1425
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02001426static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001427{
1428 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001429 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001430 /*
1431 *As for guest kernel when processing subcommand record&report,
1432 *we arrange module mmap prior to guest kernel mmap and trigger
1433 *a preload dso because default guest module symbols are loaded
1434 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
1435 *method is used to avoid symbol missing when the first addr is
1436 *in module instead of in guest kernel.
1437 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001438 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb8682011-11-28 07:56:39 -02001439 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001440 if (err < 0)
1441 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -03001442 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001443
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001444 /*
1445 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
1446 * have no _text sometimes.
1447 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001448 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +02001449 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001450 if (err < 0)
1451 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -03001452 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001453}
1454
Frederic Weisbecker98402802010-05-02 22:05:29 +02001455static struct perf_event_header finished_round_event = {
1456 .size = sizeof(struct perf_event_header),
1457 .type = PERF_RECORD_FINISHED_ROUND,
1458};
1459
Adrian Hunter3812d292022-06-10 14:33:15 +03001460static struct perf_event_header finished_init_event = {
1461 .size = sizeof(struct perf_event_header),
1462 .type = PERF_RECORD_FINISHED_INIT,
1463};
1464
Jiri Olsaa5830532019-07-27 20:30:53 +02001465static void record__adjust_affinity(struct record *rec, struct mmap *map)
Alexey Budankovf13de662019-01-22 20:50:57 +03001466{
1467 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
Alexey Bayduraev396b6262022-01-17 21:34:25 +03001468 !bitmap_equal(thread->mask->affinity.bits, map->affinity_mask.bits,
1469 thread->mask->affinity.nbits)) {
1470 bitmap_zero(thread->mask->affinity.bits, thread->mask->affinity.nbits);
1471 bitmap_or(thread->mask->affinity.bits, thread->mask->affinity.bits,
1472 map->affinity_mask.bits, thread->mask->affinity.nbits);
1473 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity),
1474 (cpu_set_t *)thread->mask->affinity.bits);
1475 if (verbose == 2) {
1476 pr_debug("threads[%d]: running on cpu%d: ", thread->tid, sched_getcpu());
1477 mmap_cpu_mask__scnprintf(&thread->mask->affinity, "affinity");
1478 }
Alexey Budankovf13de662019-01-22 20:50:57 +03001479 }
1480}
1481
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001482static size_t process_comp_header(void *record, size_t increment)
1483{
Jiri Olsa72932372019-08-28 15:57:16 +02001484 struct perf_record_compressed *event = record;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001485 size_t size = sizeof(*event);
1486
1487 if (increment) {
1488 event->header.size += increment;
1489 return increment;
1490 }
1491
1492 event->header.type = PERF_RECORD_COMPRESSED;
1493 event->header.size = size;
1494
1495 return size;
1496}
1497
Alexey Bayduraev75f5f1f2022-01-17 21:34:30 +03001498static size_t zstd_compress(struct perf_session *session, struct mmap *map,
1499 void *dst, size_t dst_size, void *src, size_t src_size)
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001500{
1501 size_t compressed;
Jiri Olsa72932372019-08-28 15:57:16 +02001502 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
Alexey Bayduraev75f5f1f2022-01-17 21:34:30 +03001503 struct zstd_data *zstd_data = &session->zstd_data;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001504
Alexey Bayduraev75f5f1f2022-01-17 21:34:30 +03001505 if (map && map->file)
1506 zstd_data = &map->zstd_data;
1507
1508 compressed = zstd_compress_stream_to_records(zstd_data, dst, dst_size, src, src_size,
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001509 max_record_size, process_comp_header);
1510
Alexey Bayduraev610fbc02022-01-17 21:34:31 +03001511 if (map && map->file) {
1512 thread->bytes_transferred += src_size;
1513 thread->bytes_compressed += compressed;
1514 } else {
1515 session->bytes_transferred += src_size;
1516 session->bytes_compressed += compressed;
1517 }
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001518
1519 return compressed;
1520}
1521
Jiri Olsa63503db2019-07-21 13:23:52 +02001522static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
Alexey Budankov470530b2019-03-18 20:40:26 +03001523 bool overwrite, bool synch)
Frederic Weisbecker98402802010-05-02 22:05:29 +02001524{
Jiri Olsadcabb502014-07-25 16:56:16 +02001525 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +02001526 int i;
David Ahern8d3eca22012-08-26 12:24:47 -06001527 int rc = 0;
Alexey Bayduraev396b6262022-01-17 21:34:25 +03001528 int nr_mmaps;
1529 struct mmap **maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001530 int trace_fd = rec->data.file.fd;
Alexey Budankovef781122019-03-18 20:44:12 +03001531 off_t off = 0;
Frederic Weisbecker98402802010-05-02 22:05:29 +02001532
Wang Nancb216862016-06-27 10:24:04 +00001533 if (!evlist)
1534 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +03001535
Alexey Bayduraev396b6262022-01-17 21:34:25 +03001536 nr_mmaps = thread->nr_mmaps;
1537 maps = overwrite ? thread->overwrite_maps : thread->maps;
1538
Wang Nana4ea0ec2016-07-14 08:34:36 +00001539 if (!maps)
1540 return 0;
Wang Nancb216862016-06-27 10:24:04 +00001541
Wang Nan0b72d692017-12-04 16:51:07 +00001542 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +00001543 return 0;
1544
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001545 if (record__aio_enabled(rec))
1546 off = record__aio_get_pos(trace_fd);
1547
Alexey Bayduraev396b6262022-01-17 21:34:25 +03001548 for (i = 0; i < nr_mmaps; i++) {
Alexey Budankov470530b2019-03-18 20:40:26 +03001549 u64 flush = 0;
Alexey Bayduraev396b6262022-01-17 21:34:25 +03001550 struct mmap *map = maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +00001551
Jiri Olsa547740f2019-07-27 22:07:44 +02001552 if (map->core.base) {
Alexey Budankovf13de662019-01-22 20:50:57 +03001553 record__adjust_affinity(rec, map);
Alexey Budankov470530b2019-03-18 20:40:26 +03001554 if (synch) {
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001555 flush = map->core.flush;
1556 map->core.flush = 1;
Alexey Budankov470530b2019-03-18 20:40:26 +03001557 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001558 if (!record__aio_enabled(rec)) {
Alexey Budankovef781122019-03-18 20:44:12 +03001559 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
Alexey Budankov470530b2019-03-18 20:40:26 +03001560 if (synch)
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001561 map->core.flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001562 rc = -1;
1563 goto out;
1564 }
1565 } else {
Alexey Budankovef781122019-03-18 20:44:12 +03001566 if (record__aio_push(rec, map, &off) < 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001567 record__aio_set_pos(trace_fd, off);
Alexey Budankov470530b2019-03-18 20:40:26 +03001568 if (synch)
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001569 map->core.flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001570 rc = -1;
1571 goto out;
1572 }
David Ahern8d3eca22012-08-26 12:24:47 -06001573 }
Alexey Budankov470530b2019-03-18 20:40:26 +03001574 if (synch)
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001575 map->core.flush = flush;
David Ahern8d3eca22012-08-26 12:24:47 -06001576 }
Adrian Hunteref149c22015-04-09 18:53:45 +03001577
Jiri Olsae035f4c2018-09-13 14:54:05 +02001578 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
Adrian Hunterc0a6de02019-11-15 14:42:16 +02001579 !rec->opts.auxtrace_sample_mode &&
Jiri Olsae035f4c2018-09-13 14:54:05 +02001580 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +03001581 rc = -1;
1582 goto out;
1583 }
Frederic Weisbecker98402802010-05-02 22:05:29 +02001584 }
1585
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001586 if (record__aio_enabled(rec))
1587 record__aio_set_pos(trace_fd, off);
1588
Jiri Olsadcabb502014-07-25 16:56:16 +02001589 /*
1590 * Mark the round finished in case we wrote
1591 * at least one event.
Alexey Bayduraev56f735f2022-01-17 21:34:28 +03001592 *
1593 * No need for round events in directory mode,
1594 * because per-cpu maps and files have data
1595 * sorted by kernel.
Jiri Olsadcabb502014-07-25 16:56:16 +02001596 */
Alexey Bayduraev56f735f2022-01-17 21:34:28 +03001597 if (!record__threads_enabled(rec) && bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +02001598 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -06001599
Wang Nan0b72d692017-12-04 16:51:07 +00001600 if (overwrite)
Arnaldo Carvalho de Meloade9d202020-11-30 09:33:55 -03001601 evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -06001602out:
1603 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +02001604}
1605
Alexey Budankov470530b2019-03-18 20:40:26 +03001606static int record__mmap_read_all(struct record *rec, bool synch)
Wang Nancb216862016-06-27 10:24:04 +00001607{
1608 int err;
1609
Alexey Budankov470530b2019-03-18 20:40:26 +03001610 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
Wang Nancb216862016-06-27 10:24:04 +00001611 if (err)
1612 return err;
1613
Alexey Budankov470530b2019-03-18 20:40:26 +03001614 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
Wang Nancb216862016-06-27 10:24:04 +00001615}
1616
Alexey Bayduraev396b6262022-01-17 21:34:25 +03001617static void record__thread_munmap_filtered(struct fdarray *fda, int fd,
1618 void *arg __maybe_unused)
1619{
1620 struct perf_mmap *map = fda->priv[fd].ptr;
1621
1622 if (map)
1623 perf_mmap__put(map);
1624}
1625
Alexey Bayduraev3217e9f2022-01-17 21:34:27 +03001626static void *record__thread(void *arg)
1627{
1628 enum thread_msg msg = THREAD_MSG__READY;
1629 bool terminate = false;
1630 struct fdarray *pollfd;
1631 int err, ctlfd_pos;
1632
1633 thread = arg;
1634 thread->tid = gettid();
1635
1636 err = write(thread->pipes.ack[1], &msg, sizeof(msg));
1637 if (err == -1)
1638 pr_warning("threads[%d]: failed to notify on start: %s\n",
1639 thread->tid, strerror(errno));
1640
1641 pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu());
1642
1643 pollfd = &thread->pollfd;
1644 ctlfd_pos = thread->ctlfd_pos;
1645
1646 for (;;) {
1647 unsigned long long hits = thread->samples;
1648
1649 if (record__mmap_read_all(thread->rec, false) < 0 || terminate)
1650 break;
1651
1652 if (hits == thread->samples) {
1653
1654 err = fdarray__poll(pollfd, -1);
1655 /*
1656 * Propagate error, only if there's any. Ignore positive
1657 * number of returned events and interrupt error.
1658 */
1659 if (err > 0 || (err < 0 && errno == EINTR))
1660 err = 0;
1661 thread->waking++;
1662
1663 if (fdarray__filter(pollfd, POLLERR | POLLHUP,
1664 record__thread_munmap_filtered, NULL) == 0)
1665 break;
1666 }
1667
1668 if (pollfd->entries[ctlfd_pos].revents & POLLHUP) {
1669 terminate = true;
1670 close(thread->pipes.msg[0]);
1671 thread->pipes.msg[0] = -1;
1672 pollfd->entries[ctlfd_pos].fd = -1;
1673 pollfd->entries[ctlfd_pos].events = 0;
1674 }
1675
1676 pollfd->entries[ctlfd_pos].revents = 0;
1677 }
1678 record__mmap_read_all(thread->rec, true);
1679
1680 err = write(thread->pipes.ack[1], &msg, sizeof(msg));
1681 if (err == -1)
1682 pr_warning("threads[%d]: failed to notify on termination: %s\n",
1683 thread->tid, strerror(errno));
1684
1685 return NULL;
1686}
1687
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001688static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -07001689{
David Ahern57706ab2013-11-06 11:41:34 -07001690 struct perf_session *session = rec->session;
1691 int feat;
1692
1693 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1694 perf_header__set_feat(&session->header, feat);
1695
1696 if (rec->no_buildid)
1697 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1698
Ian Rogers378ef0f2022-12-05 14:59:39 -08001699#ifdef HAVE_LIBTRACEEVENT
Jiri Olsace9036a2019-07-21 13:24:23 +02001700 if (!have_tracepoints(&rec->evlist->core.entries))
David Ahern57706ab2013-11-06 11:41:34 -07001701 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
Ian Rogers378ef0f2022-12-05 14:59:39 -08001702#endif
David Ahern57706ab2013-11-06 11:41:34 -07001703
1704 if (!rec->opts.branch_stack)
1705 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +03001706
1707 if (!rec->opts.full_auxtrace)
1708 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +01001709
Alexey Budankovcf790512018-10-09 17:36:24 +03001710 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1711 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
1712
Jiri Olsad1e325c2020-08-05 11:34:40 +02001713 if (!rec->opts.use_clockid)
1714 perf_header__clear_feat(&session->header, HEADER_CLOCK_DATA);
1715
Alexey Bayduraev56f735f2022-01-17 21:34:28 +03001716 if (!record__threads_enabled(rec))
1717 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
1718
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001719 if (!record__comp_enabled(rec))
1720 perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
Jiri Olsa258031c2019-03-08 14:47:39 +01001721
Jiri Olsaffa517a2015-10-25 15:51:43 +01001722 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -07001723}
1724
Wang Nane1ab48b2016-02-26 09:32:10 +00001725static void
1726record__finish_output(struct record *rec)
1727{
Alexey Bayduraev56f735f2022-01-17 21:34:28 +03001728 int i;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001729 struct perf_data *data = &rec->data;
1730 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +00001731
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001732 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +00001733 return;
1734
1735 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa45112e82019-02-21 10:41:29 +01001736 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Alexey Bayduraev56f735f2022-01-17 21:34:28 +03001737 if (record__threads_enabled(rec)) {
1738 for (i = 0; i < data->dir.nr; i++)
1739 data->dir.files[i].size = lseek(data->dir.files[i].fd, 0, SEEK_CUR);
1740 }
Wang Nane1ab48b2016-02-26 09:32:10 +00001741
1742 if (!rec->no_buildid) {
1743 process_buildids(rec);
1744
1745 if (rec->buildid_all)
1746 dsos__hit_all(rec->session);
1747 }
1748 perf_session__write_header(rec->session, rec->evlist, fd, true);
1749
1750 return;
1751}
1752
Wang Nan4ea648a2016-07-14 08:34:47 +00001753static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +00001754{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001755 int err;
Jiri Olsa9749b902019-07-21 13:23:50 +02001756 struct perf_thread_map *thread_map;
Namhyung Kim41b740b2021-08-10 21:46:58 -07001757 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001758
Wang Nan4ea648a2016-07-14 08:34:47 +00001759 if (rec->opts.tail_synthesize != tail)
1760 return 0;
1761
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001762 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1763 if (thread_map == NULL)
1764 return -1;
1765
1766 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +00001767 process_synthesized_event,
1768 &rec->session->machines.host,
Namhyung Kim41b740b2021-08-10 21:46:58 -07001769 needs_mmap,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001770 rec->opts.sample_address);
Jiri Olsa7836e522019-07-21 13:24:20 +02001771 perf_thread_map__put(thread_map);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001772 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001773}
1774
Adrian Hunter3812d292022-06-10 14:33:15 +03001775static int write_finished_init(struct record *rec, bool tail)
1776{
1777 if (rec->opts.tail_synthesize != tail)
1778 return 0;
1779
1780 return record__write(rec, NULL, &finished_init_event, sizeof(finished_init_event));
1781}
1782
Wang Nan4ea648a2016-07-14 08:34:47 +00001783static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001784
Wang Nanecfd7a92016-04-13 08:21:07 +00001785static int
1786record__switch_output(struct record *rec, bool at_exit)
1787{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001788 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +00001789 int fd, err;
Andi Kleen03724b22019-03-14 15:49:55 -07001790 char *new_filename;
Wang Nanecfd7a92016-04-13 08:21:07 +00001791
1792 /* Same Size: "2015122520103046"*/
1793 char timestamp[] = "InvalidTimestamp";
1794
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001795 record__aio_mmap_read_sync(rec);
1796
Adrian Hunter3812d292022-06-10 14:33:15 +03001797 write_finished_init(rec, true);
1798
Wang Nan4ea648a2016-07-14 08:34:47 +00001799 record__synthesize(rec, true);
1800 if (target__none(&rec->opts.target))
1801 record__synthesize_workload(rec, true);
1802
Wang Nanecfd7a92016-04-13 08:21:07 +00001803 rec->samples = 0;
1804 record__finish_output(rec);
1805 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
1806 if (err) {
1807 pr_err("Failed to get current timestamp\n");
1808 return -EINVAL;
1809 }
1810
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001811 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +00001812 rec->session->header.data_offset,
Andi Kleen03724b22019-03-14 15:49:55 -07001813 at_exit, &new_filename);
Wang Nanecfd7a92016-04-13 08:21:07 +00001814 if (fd >= 0 && !at_exit) {
1815 rec->bytes_written = 0;
1816 rec->session->header.data_size = 0;
1817 }
1818
1819 if (!quiet)
1820 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001821 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001822
Andi Kleen03724b22019-03-14 15:49:55 -07001823 if (rec->switch_output.num_files) {
1824 int n = rec->switch_output.cur_file + 1;
1825
1826 if (n >= rec->switch_output.num_files)
1827 n = 0;
1828 rec->switch_output.cur_file = n;
1829 if (rec->switch_output.filenames[n]) {
1830 remove(rec->switch_output.filenames[n]);
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03001831 zfree(&rec->switch_output.filenames[n]);
Andi Kleen03724b22019-03-14 15:49:55 -07001832 }
1833 rec->switch_output.filenames[n] = new_filename;
1834 } else {
1835 free(new_filename);
1836 }
1837
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001838 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +00001839 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +00001840 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001841
Wang Nanbe7b0c92016-04-20 18:59:54 +00001842 /*
1843 * In 'perf record --switch-output' without -a,
1844 * record__synthesize() in record__switch_output() won't
1845 * generate tracking events because there's no thread_map
1846 * in evlist. Which causes newly created perf.data doesn't
1847 * contain map and comm information.
1848 * Create a fake thread_map and directly call
1849 * perf_event__synthesize_thread_map() for those events.
1850 */
1851 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +00001852 record__synthesize_workload(rec, false);
Adrian Hunter3812d292022-06-10 14:33:15 +03001853 write_finished_init(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +00001854 }
Wang Nanecfd7a92016-04-13 08:21:07 +00001855 return fd;
1856}
1857
Namhyung Kim27c6f242023-03-14 16:42:31 -07001858static void __record__save_lost_samples(struct record *rec, struct evsel *evsel,
Namhyung Kime3a23262022-09-01 12:57:37 -07001859 struct perf_record_lost_samples *lost,
Namhyung Kim27c6f242023-03-14 16:42:31 -07001860 int cpu_idx, int thread_idx, u64 lost_count,
1861 u16 misc_flag)
Namhyung Kime3a23262022-09-01 12:57:37 -07001862{
Namhyung Kime3a23262022-09-01 12:57:37 -07001863 struct perf_sample_id *sid;
1864 struct perf_sample sample = {};
1865 int id_hdr_size;
1866
Namhyung Kim27c6f242023-03-14 16:42:31 -07001867 lost->lost = lost_count;
Namhyung Kime3a23262022-09-01 12:57:37 -07001868 if (evsel->core.ids) {
1869 sid = xyarray__entry(evsel->core.sample_id, cpu_idx, thread_idx);
1870 sample.id = sid->id;
1871 }
1872
1873 id_hdr_size = perf_event__synthesize_id_sample((void *)(lost + 1),
1874 evsel->core.attr.sample_type, &sample);
1875 lost->header.size = sizeof(*lost) + id_hdr_size;
Namhyung Kim27c6f242023-03-14 16:42:31 -07001876 lost->header.misc = misc_flag;
Namhyung Kime3a23262022-09-01 12:57:37 -07001877 record__write(rec, NULL, lost, lost->header.size);
1878}
1879
1880static void record__read_lost_samples(struct record *rec)
1881{
1882 struct perf_session *session = rec->session;
1883 struct perf_record_lost_samples *lost;
1884 struct evsel *evsel;
1885
Namhyung Kimd031a002022-09-09 16:50:24 -07001886 /* there was an error during record__open */
1887 if (session->evlist == NULL)
1888 return;
1889
Namhyung Kime3a23262022-09-01 12:57:37 -07001890 lost = zalloc(PERF_SAMPLE_MAX_SIZE);
1891 if (lost == NULL) {
1892 pr_debug("Memory allocation failed\n");
1893 return;
1894 }
1895
1896 lost->header.type = PERF_RECORD_LOST_SAMPLES;
1897
1898 evlist__for_each_entry(session->evlist, evsel) {
1899 struct xyarray *xy = evsel->core.sample_id;
Namhyung Kim27c6f242023-03-14 16:42:31 -07001900 u64 lost_count;
Namhyung Kime3a23262022-09-01 12:57:37 -07001901
Namhyung Kimd031a002022-09-09 16:50:24 -07001902 if (xy == NULL || evsel->core.fd == NULL)
1903 continue;
Namhyung Kime3a23262022-09-01 12:57:37 -07001904 if (xyarray__max_x(evsel->core.fd) != xyarray__max_x(xy) ||
1905 xyarray__max_y(evsel->core.fd) != xyarray__max_y(xy)) {
1906 pr_debug("Unmatched FD vs. sample ID: skip reading LOST count\n");
1907 continue;
1908 }
1909
1910 for (int x = 0; x < xyarray__max_x(xy); x++) {
1911 for (int y = 0; y < xyarray__max_y(xy); y++) {
Namhyung Kim27c6f242023-03-14 16:42:31 -07001912 struct perf_counts_values count;
1913
1914 if (perf_evsel__read(&evsel->core, x, y, &count) < 0) {
1915 pr_debug("read LOST count failed\n");
1916 goto out;
1917 }
1918
1919 if (count.lost) {
1920 __record__save_lost_samples(rec, evsel, lost,
1921 x, y, count.lost, 0);
1922 }
Namhyung Kime3a23262022-09-01 12:57:37 -07001923 }
1924 }
Namhyung Kime3a23262022-09-01 12:57:37 -07001925
Namhyung Kim27c6f242023-03-14 16:42:31 -07001926 lost_count = perf_bpf_filter__lost_count(evsel);
1927 if (lost_count)
1928 __record__save_lost_samples(rec, evsel, lost, 0, 0, lost_count,
1929 PERF_RECORD_MISC_LOST_SAMPLES_BPF);
1930 }
1931out:
1932 free(lost);
Namhyung Kime3a23262022-09-01 12:57:37 -07001933}
1934
Ian Rogers8ed28c22022-10-24 11:19:07 -07001935static volatile sig_atomic_t workload_exec_errno;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001936
1937/*
Arnaldo Carvalho de Melo7b392ef2020-11-30 09:26:54 -03001938 * evlist__prepare_workload will send a SIGUSR1
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001939 * if the fork fails, since we asked by setting its
1940 * want_signal to true.
1941 */
Namhyung Kim45604712014-05-12 09:47:24 +09001942static void workload_exec_failed_signal(int signo __maybe_unused,
1943 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001944 void *ucontext __maybe_unused)
1945{
1946 workload_exec_errno = info->si_value.sival_int;
1947 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001948 child_finished = 1;
1949}
1950
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001951static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +01001952static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001953
Arnaldo Carvalho de Melodb0ea13c2020-11-30 15:19:40 -03001954static const struct perf_event_mmap_page *evlist__pick_pc(struct evlist *evlist)
Wang Nanee667f92016-06-27 10:24:05 +00001955{
Wang Nanb2cb6152016-07-14 08:34:39 +00001956 if (evlist) {
Jiri Olsa547740f2019-07-27 22:07:44 +02001957 if (evlist->mmap && evlist->mmap[0].core.base)
1958 return evlist->mmap[0].core.base;
1959 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base)
1960 return evlist->overwrite_mmap[0].core.base;
Wang Nanb2cb6152016-07-14 08:34:39 +00001961 }
Wang Nanee667f92016-06-27 10:24:05 +00001962 return NULL;
1963}
1964
Wang Nanc45628b2016-05-24 02:28:59 +00001965static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1966{
Arnaldo Carvalho de Melodb0ea13c2020-11-30 15:19:40 -03001967 const struct perf_event_mmap_page *pc = evlist__pick_pc(rec->evlist);
Wang Nanee667f92016-06-27 10:24:05 +00001968 if (pc)
1969 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +00001970 return NULL;
1971}
1972
Wang Nan4ea648a2016-07-14 08:34:47 +00001973static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +00001974{
1975 struct perf_session *session = rec->session;
1976 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001977 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +00001978 struct record_opts *opts = &rec->opts;
1979 struct perf_tool *tool = &rec->tool;
Wang Nanc45c86e2016-02-26 09:32:07 +00001980 int err = 0;
Stephane Eraniand99c22e2020-04-22 08:50:38 -07001981 event_op f = process_synthesized_event;
Wang Nanc45c86e2016-02-26 09:32:07 +00001982
Wang Nan4ea648a2016-07-14 08:34:47 +00001983 if (rec->opts.tail_synthesize != tail)
1984 return 0;
1985
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001986 if (data->is_pipe) {
Namhyung Kimc3a057d2021-07-19 15:31:52 -07001987 err = perf_event__synthesize_for_pipe(tool, session, data,
Jiri Olsaa2015512018-03-14 10:22:04 +01001988 process_synthesized_event);
Namhyung Kimc3a057d2021-07-19 15:31:52 -07001989 if (err < 0)
1990 goto out;
Jiri Olsaa2015512018-03-14 10:22:04 +01001991
Namhyung Kimc3a057d2021-07-19 15:31:52 -07001992 rec->bytes_written += err;
Wang Nanc45c86e2016-02-26 09:32:07 +00001993 }
1994
Wang Nanc45628b2016-05-24 02:28:59 +00001995 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001996 process_synthesized_event, machine);
1997 if (err)
1998 goto out;
1999
Adrian Hunterc0a6de02019-11-15 14:42:16 +02002000 /* Synthesize id_index before auxtrace_info */
Adrian Hunter6b080312022-06-10 14:33:13 +03002001 err = perf_event__synthesize_id_index(tool,
2002 process_synthesized_event,
2003 session->evlist, machine);
2004 if (err)
2005 goto out;
Adrian Hunterc0a6de02019-11-15 14:42:16 +02002006
Wang Nanc45c86e2016-02-26 09:32:07 +00002007 if (rec->opts.full_auxtrace) {
2008 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
2009 session, process_synthesized_event);
2010 if (err)
2011 goto out;
2012 }
2013
Arnaldo Carvalho de Melo78e1bc22020-11-30 15:07:49 -03002014 if (!evlist__exclude_kernel(rec->evlist)) {
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03002015 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
2016 machine);
2017 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
2018 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
2019 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00002020
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03002021 err = perf_event__synthesize_modules(tool, process_synthesized_event,
2022 machine);
2023 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
2024 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
2025 "Check /proc/modules permission or run as root.\n");
2026 }
Wang Nanc45c86e2016-02-26 09:32:07 +00002027
2028 if (perf_guest) {
2029 machines__process_guests(&session->machines,
2030 perf_event__synthesize_guest_os, tool);
2031 }
2032
Andi Kleenbfd8f722017-11-17 13:42:58 -08002033 err = perf_event__synthesize_extra_attr(&rec->tool,
2034 rec->evlist,
2035 process_synthesized_event,
2036 data->is_pipe);
2037 if (err)
2038 goto out;
2039
Jiri Olsa03617c22019-07-21 13:24:42 +02002040 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
Andi Kleen373565d2017-11-17 13:42:59 -08002041 process_synthesized_event,
2042 NULL);
2043 if (err < 0) {
2044 pr_err("Couldn't synthesize thread map.\n");
2045 return err;
2046 }
2047
Adrian Hunter7be1fed2022-05-24 10:54:30 +03002048 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.all_cpus,
Andi Kleen373565d2017-11-17 13:42:59 -08002049 process_synthesized_event, NULL);
2050 if (err < 0) {
2051 pr_err("Couldn't synthesize cpu map.\n");
2052 return err;
2053 }
2054
Song Liue5416952019-03-11 22:30:41 -07002055 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
Song Liu7b612e22019-01-17 08:15:19 -08002056 machine, opts);
Adrian Hunterfaf59ec2022-09-07 19:24:58 +03002057 if (err < 0) {
Song Liu7b612e22019-01-17 08:15:19 -08002058 pr_warning("Couldn't synthesize bpf events.\n");
Adrian Hunterfaf59ec2022-09-07 19:24:58 +03002059 err = 0;
2060 }
Song Liu7b612e22019-01-17 08:15:19 -08002061
Namhyung Kim41b740b2021-08-10 21:46:58 -07002062 if (rec->opts.synth & PERF_SYNTH_CGROUP) {
2063 err = perf_event__synthesize_cgroups(tool, process_synthesized_event,
2064 machine);
Adrian Hunterfaf59ec2022-09-07 19:24:58 +03002065 if (err < 0) {
Namhyung Kim41b740b2021-08-10 21:46:58 -07002066 pr_warning("Couldn't synthesize cgroup events.\n");
Adrian Hunterfaf59ec2022-09-07 19:24:58 +03002067 err = 0;
2068 }
Namhyung Kim41b740b2021-08-10 21:46:58 -07002069 }
Namhyung Kimab640692020-03-25 21:45:33 +09002070
Stephane Eraniand99c22e2020-04-22 08:50:38 -07002071 if (rec->opts.nr_threads_synthesize > 1) {
Ian Rogers49c670b172022-08-26 09:42:31 -07002072 mutex_init(&synth_lock);
Stephane Eraniand99c22e2020-04-22 08:50:38 -07002073 perf_set_multithreaded();
2074 f = process_locked_synthesized_event;
2075 }
2076
Namhyung Kim41b740b2021-08-10 21:46:58 -07002077 if (rec->opts.synth & PERF_SYNTH_TASK) {
2078 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP;
2079
2080 err = __machine__synthesize_threads(machine, tool, &opts->target,
2081 rec->evlist->core.threads,
2082 f, needs_mmap, opts->sample_address,
2083 rec->opts.nr_threads_synthesize);
2084 }
Stephane Eraniand99c22e2020-04-22 08:50:38 -07002085
Ian Rogers49c670b172022-08-26 09:42:31 -07002086 if (rec->opts.nr_threads_synthesize > 1) {
Stephane Eraniand99c22e2020-04-22 08:50:38 -07002087 perf_set_singlethreaded();
Ian Rogers49c670b172022-08-26 09:42:31 -07002088 mutex_destroy(&synth_lock);
2089 }
Stephane Eraniand99c22e2020-04-22 08:50:38 -07002090
Wang Nanc45c86e2016-02-26 09:32:07 +00002091out:
2092 return err;
2093}
2094
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -03002095static int record__process_signal_event(union perf_event *event __maybe_unused, void *data)
2096{
2097 struct record *rec = data;
2098 pthread_kill(rec->thread_id, SIGUSR2);
2099 return 0;
2100}
2101
Arnaldo Carvalho de Melo23cbb412020-04-28 14:58:29 -03002102static int record__setup_sb_evlist(struct record *rec)
2103{
2104 struct record_opts *opts = &rec->opts;
2105
2106 if (rec->sb_evlist != NULL) {
2107 /*
2108 * We get here if --switch-output-event populated the
2109 * sb_evlist, so associate a callback that will send a SIGUSR2
2110 * to the main thread.
2111 */
2112 evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec);
2113 rec->thread_id = pthread_self();
2114 }
Jin Yao1101c872020-08-05 10:29:37 +08002115#ifdef HAVE_LIBBPF_SUPPORT
Arnaldo Carvalho de Melo23cbb412020-04-28 14:58:29 -03002116 if (!opts->no_bpf_event) {
2117 if (rec->sb_evlist == NULL) {
2118 rec->sb_evlist = evlist__new();
2119
2120 if (rec->sb_evlist == NULL) {
2121 pr_err("Couldn't create side band evlist.\n.");
2122 return -1;
2123 }
2124 }
2125
2126 if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) {
2127 pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
2128 return -1;
2129 }
2130 }
Jin Yao1101c872020-08-05 10:29:37 +08002131#endif
Arnaldo Carvalho de Melo08c83992020-11-30 09:40:10 -03002132 if (evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) {
Arnaldo Carvalho de Melo23cbb412020-04-28 14:58:29 -03002133 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
2134 opts->no_bpf_event = true;
2135 }
2136
2137 return 0;
2138}
2139
Jiri Olsad1e325c2020-08-05 11:34:40 +02002140static int record__init_clock(struct record *rec)
2141{
2142 struct perf_session *session = rec->session;
2143 struct timespec ref_clockid;
2144 struct timeval ref_tod;
2145 u64 ref;
2146
2147 if (!rec->opts.use_clockid)
2148 return 0;
2149
Jiri Olsa9d88a1a12020-08-05 11:34:41 +02002150 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
2151 session->header.env.clock.clockid_res_ns = rec->opts.clockid_res_ns;
2152
Jiri Olsad1e325c2020-08-05 11:34:40 +02002153 session->header.env.clock.clockid = rec->opts.clockid;
2154
2155 if (gettimeofday(&ref_tod, NULL) != 0) {
2156 pr_err("gettimeofday failed, cannot set reference time.\n");
2157 return -1;
2158 }
2159
2160 if (clock_gettime(rec->opts.clockid, &ref_clockid)) {
2161 pr_err("clock_gettime failed, cannot set reference time.\n");
2162 return -1;
2163 }
2164
2165 ref = (u64) ref_tod.tv_sec * NSEC_PER_SEC +
2166 (u64) ref_tod.tv_usec * NSEC_PER_USEC;
2167
2168 session->header.env.clock.tod_ns = ref;
2169
2170 ref = (u64) ref_clockid.tv_sec * NSEC_PER_SEC +
2171 (u64) ref_clockid.tv_nsec;
2172
2173 session->header.env.clock.clockid_ns = ref;
2174 return 0;
2175}
2176
Adrian Hunterd20aff12020-09-01 12:37:57 +03002177static void hit_auxtrace_snapshot_trigger(struct record *rec)
2178{
2179 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2180 trigger_hit(&auxtrace_snapshot_trigger);
2181 auxtrace_record__snapshot_started = 1;
2182 if (auxtrace_record__snapshot_start(rec->itr))
2183 trigger_error(&auxtrace_snapshot_trigger);
2184 }
2185}
2186
Jin Yao91c0f5e2021-04-27 15:01:30 +08002187static void record__uniquify_name(struct record *rec)
2188{
2189 struct evsel *pos;
2190 struct evlist *evlist = rec->evlist;
2191 char *new_name;
2192 int ret;
2193
Ian Rogers94f9eb92023-05-27 00:22:09 -07002194 if (perf_pmus__num_core_pmus() == 1)
Jin Yao91c0f5e2021-04-27 15:01:30 +08002195 return;
2196
2197 evlist__for_each_entry(evlist, pos) {
2198 if (!evsel__is_hybrid(pos))
2199 continue;
2200
2201 if (strchr(pos->name, '/'))
2202 continue;
2203
2204 ret = asprintf(&new_name, "%s/%s/",
2205 pos->pmu_name, pos->name);
2206 if (ret) {
2207 free(pos->name);
2208 pos->name = new_name;
2209 }
2210 }
2211}
2212
Alexey Bayduraev1e5de7d2022-01-17 21:34:26 +03002213static int record__terminate_thread(struct record_thread *thread_data)
2214{
2215 int err;
2216 enum thread_msg ack = THREAD_MSG__UNDEFINED;
2217 pid_t tid = thread_data->tid;
2218
2219 close(thread_data->pipes.msg[1]);
2220 thread_data->pipes.msg[1] = -1;
2221 err = read(thread_data->pipes.ack[0], &ack, sizeof(ack));
2222 if (err > 0)
2223 pr_debug2("threads[%d]: sent %s\n", tid, thread_msg_tags[ack]);
2224 else
2225 pr_warning("threads[%d]: failed to receive termination notification from %d\n",
2226 thread->tid, tid);
2227
2228 return 0;
2229}
2230
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002231static int record__start_threads(struct record *rec)
2232{
Alexey Bayduraev3217e9f2022-01-17 21:34:27 +03002233 int t, tt, err, ret = 0, nr_threads = rec->nr_threads;
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002234 struct record_thread *thread_data = rec->thread_data;
Alexey Bayduraev3217e9f2022-01-17 21:34:27 +03002235 sigset_t full, mask;
2236 pthread_t handle;
2237 pthread_attr_t attrs;
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002238
2239 thread = &thread_data[0];
2240
Alexey Bayduraev3217e9f2022-01-17 21:34:27 +03002241 if (!record__threads_enabled(rec))
2242 return 0;
2243
2244 sigfillset(&full);
2245 if (sigprocmask(SIG_SETMASK, &full, &mask)) {
2246 pr_err("Failed to block signals on threads start: %s\n", strerror(errno));
2247 return -1;
2248 }
2249
2250 pthread_attr_init(&attrs);
2251 pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED);
2252
2253 for (t = 1; t < nr_threads; t++) {
2254 enum thread_msg msg = THREAD_MSG__UNDEFINED;
2255
2256#ifdef HAVE_PTHREAD_ATTR_SETAFFINITY_NP
2257 pthread_attr_setaffinity_np(&attrs,
2258 MMAP_CPU_MASK_BYTES(&(thread_data[t].mask->affinity)),
2259 (cpu_set_t *)(thread_data[t].mask->affinity.bits));
2260#endif
2261 if (pthread_create(&handle, &attrs, record__thread, &thread_data[t])) {
2262 for (tt = 1; tt < t; tt++)
2263 record__terminate_thread(&thread_data[t]);
2264 pr_err("Failed to start threads: %s\n", strerror(errno));
2265 ret = -1;
2266 goto out_err;
2267 }
2268
2269 err = read(thread_data[t].pipes.ack[0], &msg, sizeof(msg));
2270 if (err > 0)
2271 pr_debug2("threads[%d]: sent %s\n", rec->thread_data[t].tid,
2272 thread_msg_tags[msg]);
2273 else
2274 pr_warning("threads[%d]: failed to receive start notification from %d\n",
2275 thread->tid, rec->thread_data[t].tid);
2276 }
2277
2278 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity),
2279 (cpu_set_t *)thread->mask->affinity.bits);
2280
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002281 pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu());
2282
Alexey Bayduraev3217e9f2022-01-17 21:34:27 +03002283out_err:
2284 pthread_attr_destroy(&attrs);
2285
2286 if (sigprocmask(SIG_SETMASK, &mask, NULL)) {
2287 pr_err("Failed to unblock signals on threads start: %s\n", strerror(errno));
2288 ret = -1;
2289 }
2290
2291 return ret;
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002292}
2293
2294static int record__stop_threads(struct record *rec)
2295{
2296 int t;
2297 struct record_thread *thread_data = rec->thread_data;
2298
Alexey Bayduraev1e5de7d2022-01-17 21:34:26 +03002299 for (t = 1; t < rec->nr_threads; t++)
2300 record__terminate_thread(&thread_data[t]);
2301
Alexey Bayduraev610fbc02022-01-17 21:34:31 +03002302 for (t = 0; t < rec->nr_threads; t++) {
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002303 rec->samples += thread_data[t].samples;
Alexey Bayduraev610fbc02022-01-17 21:34:31 +03002304 if (!record__threads_enabled(rec))
2305 continue;
2306 rec->session->bytes_transferred += thread_data[t].bytes_transferred;
2307 rec->session->bytes_compressed += thread_data[t].bytes_compressed;
2308 pr_debug("threads[%d]: samples=%lld, wakes=%ld, ", thread_data[t].tid,
2309 thread_data[t].samples, thread_data[t].waking);
2310 if (thread_data[t].bytes_transferred && thread_data[t].bytes_compressed)
2311 pr_debug("transferred=%" PRIu64 ", compressed=%" PRIu64 "\n",
2312 thread_data[t].bytes_transferred, thread_data[t].bytes_compressed);
2313 else
2314 pr_debug("written=%" PRIu64 "\n", thread_data[t].bytes_written);
2315 }
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002316
2317 return 0;
2318}
2319
2320static unsigned long record__waking(struct record *rec)
2321{
2322 int t;
2323 unsigned long waking = 0;
2324 struct record_thread *thread_data = rec->thread_data;
2325
2326 for (t = 0; t < rec->nr_threads; t++)
2327 waking += thread_data[t].waking;
2328
2329 return waking;
2330}
2331
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002332static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02002333{
David Ahern57706ab2013-11-06 11:41:34 -07002334 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09002335 int status = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03002336 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02002337 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002338 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002339 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002340 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03002341 bool disabled = false, draining = false;
Namhyung Kim42aa2762015-01-29 17:06:48 +09002342 int fd;
Alexey Budankovd3c8c082019-03-18 20:41:02 +03002343 float ratio = 0;
Alexey Budankovacce0222020-07-17 10:07:50 +03002344 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002345
Namhyung Kim45604712014-05-12 09:47:24 +09002346 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02002347 signal(SIGCHLD, sig_handler);
2348 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06002349 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00002350 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00002351
Hari Bathinif3b36142017-03-08 02:11:43 +05302352 if (rec->opts.record_namespaces)
2353 tool->namespace_events = true;
2354
Namhyung Kim8fb4b672020-03-25 21:45:34 +09002355 if (rec->opts.record_cgroup) {
2356#ifdef HAVE_FILE_HANDLE
2357 tool->cgroup_events = true;
2358#else
2359 pr_err("cgroup tracking is not supported\n");
2360 return -1;
2361#endif
2362 }
2363
Jiri Olsadc0c6122017-01-09 10:51:58 +01002364 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002365 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002366 if (rec->opts.auxtrace_snapshot_mode)
2367 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01002368 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002369 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00002370 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002371 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00002372 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02002373
Namhyung Kim2681bd82021-07-19 15:31:49 -07002374 session = perf_session__new(data, tool);
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +05302375 if (IS_ERR(session)) {
Adrien BAKffa91882014-04-18 11:00:43 +09002376 pr_err("Perf session creation failed.\n");
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +05302377 return PTR_ERR(session);
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02002378 }
2379
Alexey Bayduraevb5f25112022-01-17 21:34:34 +03002380 if (record__threads_enabled(rec)) {
2381 if (perf_data__is_pipe(&rec->data)) {
2382 pr_err("Parallel trace streaming is not available in pipe mode.\n");
2383 return -1;
2384 }
2385 if (rec->opts.full_auxtrace) {
2386 pr_err("Parallel trace streaming is not available in AUX area tracing mode.\n");
2387 return -1;
2388 }
2389 }
2390
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002391 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002392 rec->session = session;
2393
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002394 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
2395 pr_err("Compression initialization failed.\n");
2396 return -1;
2397 }
Anand K Mistryda231332020-05-13 12:20:23 +10002398#ifdef HAVE_EVENTFD_SUPPORT
2399 done_fd = eventfd(0, EFD_NONBLOCK);
2400 if (done_fd < 0) {
2401 pr_err("Failed to create wakeup eventfd, error: %m\n");
2402 status = -1;
2403 goto out_delete_session;
2404 }
Yang Jihonge16c2ce2021-02-05 14:50:01 +08002405 err = evlist__add_wakeup_eventfd(rec->evlist, done_fd);
Anand K Mistryda231332020-05-13 12:20:23 +10002406 if (err < 0) {
2407 pr_err("Failed to add wakeup eventfd to poll list\n");
2408 status = err;
2409 goto out_delete_session;
2410 }
2411#endif // HAVE_EVENTFD_SUPPORT
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002412
2413 session->header.env.comp_type = PERF_COMP_ZSTD;
2414 session->header.env.comp_level = rec->opts.comp_level;
2415
Adrian Huntereeb399b2019-10-04 11:31:21 +03002416 if (rec->opts.kcore &&
2417 !record__kcore_readable(&session->machines.host)) {
2418 pr_err("ERROR: kcore is not readable.\n");
2419 return -1;
2420 }
2421
Jiri Olsad1e325c2020-08-05 11:34:40 +02002422 if (record__init_clock(rec))
2423 return -1;
2424
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002425 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01002426
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02002427 if (forks) {
Arnaldo Carvalho de Melo7b392ef2020-11-30 09:26:54 -03002428 err = evlist__prepare_workload(rec->evlist, &opts->target, argv, data->is_pipe,
2429 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02002430 if (err < 0) {
2431 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09002432 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02002433 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02002434 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01002435 }
2436
Jiri Olsaad46e48c2018-03-02 17:13:54 +01002437 /*
2438 * If we have just single event and are sending data
2439 * through pipe, we need to force the ids allocation,
2440 * because we synthesize event name through the pipe
2441 * and need the id for that.
2442 */
Jiri Olsa6484d2f2019-07-21 13:24:28 +02002443 if (data->is_pipe && rec->evlist->core.nr_entries == 1)
Jiri Olsaad46e48c2018-03-02 17:13:54 +01002444 rec->opts.sample_id = true;
2445
Jin Yao91c0f5e2021-04-27 15:01:30 +08002446 record__uniquify_name(rec);
2447
Adrian Hunterda406202022-09-12 11:34:11 +03002448 /* Debug message used by test scripts */
2449 pr_debug3("perf record opening and mmapping events\n");
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002450 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06002451 err = -1;
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002452 goto out_free_threads;
David Ahern8d3eca22012-08-26 12:24:47 -06002453 }
Adrian Hunterda406202022-09-12 11:34:11 +03002454 /* Debug message used by test scripts */
2455 pr_debug3("perf record done opening and mmapping events\n");
Jiri Olsaf6fa4372019-08-06 15:14:05 +02002456 session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002457
Adrian Huntereeb399b2019-10-04 11:31:21 +03002458 if (rec->opts.kcore) {
2459 err = record__kcore_copy(&session->machines.host, data);
2460 if (err) {
2461 pr_err("ERROR: Failed to copy kcore\n");
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002462 goto out_free_threads;
Adrian Huntereeb399b2019-10-04 11:31:21 +03002463 }
2464 }
2465
Adrian Huntercca84822015-08-19 17:29:21 +03002466 /*
2467 * Normally perf_session__new would do this, but it doesn't have the
2468 * evlist.
2469 */
Arnaldo Carvalho de Melo8cedf3a52020-06-17 09:29:48 -03002470 if (rec->tool.ordered_events && !evlist__sample_id_all(rec->evlist)) {
Adrian Huntercca84822015-08-19 17:29:21 +03002471 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
2472 rec->tool.ordered_events = false;
2473 }
2474
Ian Rogers9d2dc632023-03-11 18:15:42 -08002475 if (evlist__nr_groups(rec->evlist) == 0)
Namhyung Kima8bb5592013-01-22 18:09:31 +09002476 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
2477
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002478 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09002479 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05002480 if (err < 0)
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002481 goto out_free_threads;
Jiri Olsa563aecb2013-06-05 13:35:06 +02002482 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09002483 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002484 if (err < 0)
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002485 goto out_free_threads;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002486 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002487
Arnaldo Carvalho de Melob38d85e2020-04-24 12:24:51 -03002488 err = -1;
David Ahernd3665492012-02-06 15:27:52 -07002489 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01002490 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07002491 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01002492 "Use --no-buildid to profile anyway.\n");
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002493 goto out_free_threads;
Robert Richtere20960c2011-12-07 10:02:55 +01002494 }
2495
Arnaldo Carvalho de Melo23cbb412020-04-28 14:58:29 -03002496 err = record__setup_sb_evlist(rec);
2497 if (err)
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002498 goto out_free_threads;
Song Liu657ee552019-03-11 22:30:50 -07002499
Wang Nan4ea648a2016-07-14 08:34:47 +00002500 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00002501 if (err < 0)
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002502 goto out_free_threads;
David Ahern8d3eca22012-08-26 12:24:47 -06002503
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002504 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002505 struct sched_param param;
2506
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002507 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002508 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02002509 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06002510 err = -1;
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002511 goto out_free_threads;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002512 }
2513 }
2514
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002515 if (record__start_threads(rec))
2516 goto out_free_threads;
2517
Jiri Olsa774cb492012-11-12 18:34:01 +01002518 /*
2519 * When perf is starting the traced process, all the events
2520 * (apart from group members) have enable_on_exec=1 set,
2521 * so don't spoil it by prematurely enabling them.
2522 */
Changbin Ducb4b9e62023-03-02 11:11:45 +08002523 if (!target__none(&opts->target) && !opts->target.initial_delay)
Jiri Olsa1c87f162019-07-21 13:24:08 +02002524 evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06002525
Peter Zijlstra856e9662009-12-16 17:55:55 +01002526 /*
2527 * Let the child rip
2528 */
Namhyung Kime803cf92015-09-22 09:24:55 +09002529 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01002530 struct machine *machine = &session->machines.host;
Namhyung Kime5bed5642015-09-30 10:45:24 +09002531 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05302532 pid_t tgid;
Namhyung Kime5bed5642015-09-30 10:45:24 +09002533
2534 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
2535 if (event == NULL) {
2536 err = -ENOMEM;
2537 goto out_child;
2538 }
2539
Namhyung Kime803cf92015-09-22 09:24:55 +09002540 /*
2541 * Some H/W events are generated before COMM event
2542 * which is emitted during exec(), so perf script
2543 * cannot see a correct process name for those events.
2544 * Synthesize COMM event to prevent it.
2545 */
Hari Bathinie907caf2017-03-08 02:11:51 +05302546 tgid = perf_event__synthesize_comm(tool, event,
2547 rec->evlist->workload.pid,
2548 process_synthesized_event,
2549 machine);
2550 free(event);
2551
2552 if (tgid == -1)
2553 goto out_child;
2554
2555 event = malloc(sizeof(event->namespaces) +
2556 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
2557 machine->id_hdr_size);
2558 if (event == NULL) {
2559 err = -ENOMEM;
2560 goto out_child;
2561 }
2562
2563 /*
2564 * Synthesize NAMESPACES event for the command specified.
2565 */
2566 perf_event__synthesize_namespaces(tool, event,
2567 rec->evlist->workload.pid,
2568 tgid, process_synthesized_event,
2569 machine);
Namhyung Kime5bed5642015-09-30 10:45:24 +09002570 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09002571
Arnaldo Carvalho de Melo7b392ef2020-11-30 09:26:54 -03002572 evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09002573 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01002574
Changbin Ducb4b9e62023-03-02 11:11:45 +08002575 if (opts->target.initial_delay) {
Alexey Budankov68cd3b42020-07-17 10:07:03 +03002576 pr_info(EVLIST_DISABLED_MSG);
Changbin Ducb4b9e62023-03-02 11:11:45 +08002577 if (opts->target.initial_delay > 0) {
2578 usleep(opts->target.initial_delay * USEC_PER_MSEC);
Alexey Budankov68cd3b42020-07-17 10:07:03 +03002579 evlist__enable(rec->evlist);
2580 pr_info(EVLIST_ENABLED_MSG);
2581 }
Andi Kleen6619a532014-01-11 13:38:27 -08002582 }
2583
Adrian Hunter6657a092022-08-24 10:28:14 +03002584 err = event_enable_timer__start(rec->evlist->eet);
2585 if (err)
2586 goto out_child;
2587
Adrian Hunterda406202022-09-12 11:34:11 +03002588 /* Debug message used by test scripts */
2589 pr_debug3("perf record has started\n");
2590 fflush(stderr);
2591
Wang Nan5f9cf592016-04-20 18:59:49 +00002592 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002593 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00002594 perf_hooks__invoke_record_start();
Adrian Hunter3812d292022-06-10 14:33:15 +03002595
2596 /*
2597 * Must write FINISHED_INIT so it will be seen after all other
2598 * synthesized user events, but before any regular events.
2599 */
2600 err = write_finished_init(rec, false);
2601 if (err < 0)
2602 goto out_child;
2603
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002604 for (;;) {
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002605 unsigned long long hits = thread->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002606
Wang Nan057374642016-07-14 08:34:43 +00002607 /*
2608 * rec->evlist->bkw_mmap_state is possible to be
2609 * BKW_MMAP_EMPTY here: when done == true and
2610 * hits != rec->samples in previous round.
2611 *
Arnaldo Carvalho de Meloade9d202020-11-30 09:33:55 -03002612 * evlist__toggle_bkw_mmap ensure we never
Wang Nan057374642016-07-14 08:34:43 +00002613 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
2614 */
2615 if (trigger_is_hit(&switch_output_trigger) || done || draining)
Arnaldo Carvalho de Meloade9d202020-11-30 09:33:55 -03002616 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
Wang Nan057374642016-07-14 08:34:43 +00002617
Alexey Budankov470530b2019-03-18 20:40:26 +03002618 if (record__mmap_read_all(rec, false) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00002619 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002620 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06002621 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09002622 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06002623 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002624
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002625 if (auxtrace_record__snapshot_started) {
2626 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00002627 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Alexander Shishkince7b0e42019-08-06 17:41:01 +03002628 record__read_auxtrace_snapshot(rec, false);
Wang Nan5f9cf592016-04-20 18:59:49 +00002629 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002630 pr_err("AUX area tracing snapshot failed\n");
2631 err = -1;
2632 goto out_child;
2633 }
2634 }
2635
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002636 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00002637 /*
2638 * If switch_output_trigger is hit, the data in
2639 * overwritable ring buffer should have been collected,
2640 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
2641 *
2642 * If SIGUSR2 raise after or during record__mmap_read_all(),
2643 * record__mmap_read_all() didn't collect data from
2644 * overwritable ring buffer. Read again.
2645 */
2646 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
2647 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002648 trigger_ready(&switch_output_trigger);
2649
Wang Nan057374642016-07-14 08:34:43 +00002650 /*
2651 * Reenable events in overwrite ring buffer after
2652 * record__mmap_read_all(): we should have collected
2653 * data from it.
2654 */
Arnaldo Carvalho de Meloade9d202020-11-30 09:33:55 -03002655 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
Wang Nan057374642016-07-14 08:34:43 +00002656
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002657 if (!quiet)
2658 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002659 record__waking(rec));
2660 thread->waking = 0;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002661 fd = record__switch_output(rec, false);
2662 if (fd < 0) {
2663 pr_err("Failed to switch to new file\n");
2664 trigger_error(&switch_output_trigger);
2665 err = fd;
2666 goto out_child;
2667 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01002668
2669 /* re-arm the alarm */
2670 if (rec->switch_output.time)
2671 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002672 }
2673
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002674 if (hits == thread->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03002675 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002676 break;
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002677 err = fdarray__poll(&thread->pollfd, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04002678 /*
2679 * Propagate error, only if there's any. Ignore positive
2680 * number of returned events and interrupt error.
2681 */
2682 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09002683 err = 0;
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002684 thread->waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03002685
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002686 if (fdarray__filter(&thread->pollfd, POLLERR | POLLHUP,
2687 record__thread_munmap_filtered, NULL) == 0)
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03002688 draining = true;
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002689
Adrian Hunter6562c9a2022-08-24 10:28:10 +03002690 err = record__update_evlist_pollfd_from_thread(rec, rec->evlist, thread);
2691 if (err)
2692 goto out_child;
Peter Zijlstra8b412662009-09-17 19:59:05 +02002693 }
2694
Alexey Budankovacce0222020-07-17 10:07:50 +03002695 if (evlist__ctlfd_process(rec->evlist, &cmd) > 0) {
2696 switch (cmd) {
Adrian Hunterd20aff12020-09-01 12:37:57 +03002697 case EVLIST_CTL_CMD_SNAPSHOT:
2698 hit_auxtrace_snapshot_trigger(rec);
2699 evlist__ctlfd_ack(rec->evlist);
2700 break;
Jiri Olsaf186cd62020-12-27 00:20:37 +01002701 case EVLIST_CTL_CMD_STOP:
2702 done = 1;
2703 break;
Alexey Budankovacce0222020-07-17 10:07:50 +03002704 case EVLIST_CTL_CMD_ACK:
2705 case EVLIST_CTL_CMD_UNSUPPORTED:
Jiri Olsa991ae4e2020-12-27 00:20:35 +01002706 case EVLIST_CTL_CMD_ENABLE:
2707 case EVLIST_CTL_CMD_DISABLE:
Jiri Olsa142544a2020-12-27 00:20:36 +01002708 case EVLIST_CTL_CMD_EVLIST:
Jiri Olsa47fddcb2020-12-27 00:20:38 +01002709 case EVLIST_CTL_CMD_PING:
Alexey Budankovacce0222020-07-17 10:07:50 +03002710 default:
2711 break;
2712 }
2713 }
2714
Adrian Hunter6657a092022-08-24 10:28:14 +03002715 err = event_enable_timer__process(rec->evlist->eet);
2716 if (err < 0)
2717 goto out_child;
2718 if (err) {
2719 err = 0;
2720 done = 1;
2721 }
2722
Jiri Olsa774cb492012-11-12 18:34:01 +01002723 /*
2724 * When perf is starting the traced process, at the end events
2725 * die with the process and we wait for that. Thus no need to
2726 * disable events in this case.
2727 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002728 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00002729 trigger_off(&auxtrace_snapshot_trigger);
Jiri Olsae74676d2019-07-21 13:24:09 +02002730 evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01002731 disabled = true;
2732 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002733 }
Alexander Shishkince7b0e42019-08-06 17:41:01 +03002734
Wang Nan5f9cf592016-04-20 18:59:49 +00002735 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002736 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002737
Alexander Shishkince7b0e42019-08-06 17:41:01 +03002738 if (opts->auxtrace_snapshot_on_exit)
2739 record__auxtrace_snapshot_exit(rec);
2740
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03002741 if (forks && workload_exec_errno) {
Arnaldo Carvalho de Melo3535a692021-04-14 09:32:14 -03002742 char msg[STRERR_BUFSIZE], strevsels[2048];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03002743 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melo3535a692021-04-14 09:32:14 -03002744
2745 evlist__scnprintf_evsels(rec->evlist, sizeof(strevsels), strevsels);
2746
2747 pr_err("Failed to collect '%s' for the '%s' workload: %s\n",
2748 strevsels, argv[0], emsg);
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03002749 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09002750 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03002751 }
2752
Namhyung Kime3d59112015-01-29 17:06:44 +09002753 if (!quiet)
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002754 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n",
2755 record__waking(rec));
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02002756
Adrian Hunter3812d292022-06-10 14:33:15 +03002757 write_finished_init(rec, true);
2758
Wang Nan4ea648a2016-07-14 08:34:47 +00002759 if (target__none(&rec->opts.target))
2760 record__synthesize_workload(rec, true);
2761
Namhyung Kim45604712014-05-12 09:47:24 +09002762out_child:
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002763 record__stop_threads(rec);
Alexey Budankov470530b2019-03-18 20:40:26 +03002764 record__mmap_read_all(rec, true);
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002765out_free_threads:
Alexey Bayduraev415ccb52022-01-17 21:34:23 +03002766 record__free_thread_data(rec);
Alexey Bayduraev396b6262022-01-17 21:34:25 +03002767 evlist__finalize_ctlfd(rec->evlist);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002768 record__aio_mmap_read_sync(rec);
2769
Alexey Budankovd3c8c082019-03-18 20:41:02 +03002770 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
2771 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
2772 session->header.env.comp_ratio = ratio + 0.5;
2773 }
2774
Namhyung Kim45604712014-05-12 09:47:24 +09002775 if (forks) {
2776 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02002777
Namhyung Kim45604712014-05-12 09:47:24 +09002778 if (!child_finished)
2779 kill(rec->evlist->workload.pid, SIGTERM);
2780
2781 wait(&exit_status);
2782
2783 if (err < 0)
2784 status = err;
2785 else if (WIFEXITED(exit_status))
2786 status = WEXITSTATUS(exit_status);
2787 else if (WIFSIGNALED(exit_status))
2788 signr = WTERMSIG(exit_status);
2789 } else
2790 status = err;
2791
Namhyung Kimedc41a12022-05-18 15:47:21 -07002792 if (rec->off_cpu)
2793 rec->bytes_written += off_cpu_write(rec->session);
2794
Namhyung Kime3a23262022-09-01 12:57:37 -07002795 record__read_lost_samples(rec);
Wang Nan4ea648a2016-07-14 08:34:47 +00002796 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09002797 /* this will be recalculated during process_buildids() */
2798 rec->samples = 0;
2799
Wang Nanecfd7a92016-04-13 08:21:07 +00002800 if (!err) {
2801 if (!rec->timestamp_filename) {
2802 record__finish_output(rec);
2803 } else {
2804 fd = record__switch_output(rec, true);
2805 if (fd < 0) {
2806 status = fd;
2807 goto out_delete_session;
2808 }
2809 }
2810 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002811
Wang Nana0748652016-11-26 07:03:28 +00002812 perf_hooks__invoke_record_end();
2813
Namhyung Kime3d59112015-01-29 17:06:44 +09002814 if (!err && !quiet) {
2815 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00002816 const char *postfix = rec->timestamp_filename ?
2817 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09002818
Adrian Hunteref149c22015-04-09 18:53:45 +03002819 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09002820 scnprintf(samples, sizeof(samples),
2821 " (%" PRIu64 " samples)", rec->samples);
2822 else
2823 samples[0] = '\0';
2824
Alexey Budankovd3c8c082019-03-18 20:41:02 +03002825 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002826 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsa2d4f2792019-02-21 10:41:30 +01002827 data->path, postfix, samples);
Alexey Budankovd3c8c082019-03-18 20:41:02 +03002828 if (ratio) {
2829 fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
2830 rec->session->bytes_transferred / 1024.0 / 1024.0,
2831 ratio);
2832 }
2833 fprintf(stderr, " ]\n");
Namhyung Kime3d59112015-01-29 17:06:44 +09002834 }
2835
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002836out_delete_session:
Anand K Mistryda231332020-05-13 12:20:23 +10002837#ifdef HAVE_EVENTFD_SUPPORT
Ian Rogers304f0a22022-10-23 18:10:24 -07002838 if (done_fd >= 0) {
2839 fd = done_fd;
2840 done_fd = -1;
2841
2842 close(fd);
2843 }
Anand K Mistryda231332020-05-13 12:20:23 +10002844#endif
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002845 zstd_fini(&session->zstd_data);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002846 perf_session__delete(session);
Song Liu657ee552019-03-11 22:30:50 -07002847
2848 if (!opts->no_bpf_event)
Arnaldo Carvalho de Melo08c83992020-11-30 09:40:10 -03002849 evlist__stop_sb_thread(rec->sb_evlist);
Namhyung Kim45604712014-05-12 09:47:24 +09002850 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002851}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002852
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03002853static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002854{
Kan Liangaad2b212015-01-05 13:23:04 -05002855 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01002856
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03002857 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002858
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03002859 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002860 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03002861 callchain->dump_size);
2862}
2863
2864int record_opts__parse_callchain(struct record_opts *record,
2865 struct callchain_param *callchain,
2866 const char *arg, bool unset)
2867{
2868 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03002869 callchain->enabled = !unset;
2870
2871 /* --no-call-graph */
2872 if (unset) {
2873 callchain->record_mode = CALLCHAIN_NONE;
2874 pr_debug("callchain: disabled\n");
2875 return 0;
2876 }
2877
2878 ret = parse_callchain_record_opt(arg, callchain);
2879 if (!ret) {
2880 /* Enable data address sampling for DWARF unwind. */
2881 if (callchain->record_mode == CALLCHAIN_DWARF)
2882 record->sample_address = true;
2883 callchain_debug(callchain);
2884 }
2885
2886 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002887}
2888
Kan Liangc421e802015-07-29 05:42:12 -04002889int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002890 const char *arg,
2891 int unset)
2892{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03002893 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02002894}
2895
Kan Liangc421e802015-07-29 05:42:12 -04002896int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002897 const char *arg __maybe_unused,
2898 int unset __maybe_unused)
2899{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002900 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04002901
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002902 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002903
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002904 if (callchain->record_mode == CALLCHAIN_NONE)
2905 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002906
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002907 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002908 return 0;
2909}
2910
Jiri Olsaeb853e82014-02-03 12:44:42 +01002911static int perf_record_config(const char *var, const char *value, void *cb)
2912{
Namhyung Kim7a29c082015-12-15 10:49:56 +09002913 struct record *rec = cb;
2914
2915 if (!strcmp(var, "record.build-id")) {
2916 if (!strcmp(value, "cache"))
2917 rec->no_buildid_cache = false;
2918 else if (!strcmp(value, "no-cache"))
2919 rec->no_buildid_cache = true;
2920 else if (!strcmp(value, "skip"))
2921 rec->no_buildid = true;
Jiri Olsae29386c2020-12-14 11:54:57 +01002922 else if (!strcmp(value, "mmap"))
2923 rec->buildid_mmap = true;
Namhyung Kim7a29c082015-12-15 10:49:56 +09002924 else
2925 return -1;
2926 return 0;
2927 }
Yisheng Xiecff17202018-03-12 19:25:57 +08002928 if (!strcmp(var, "record.call-graph")) {
2929 var = "call-graph.record-mode";
2930 return perf_default_config(var, value, cb);
2931 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03002932#ifdef HAVE_AIO_SUPPORT
2933 if (!strcmp(var, "record.aio")) {
2934 rec->opts.nr_cblocks = strtol(value, NULL, 0);
2935 if (!rec->opts.nr_cblocks)
2936 rec->opts.nr_cblocks = nr_cblocks_default;
2937 }
2938#endif
Jiri Olsa9bce13e2021-12-09 21:04:25 +01002939 if (!strcmp(var, "record.debuginfod")) {
2940 rec->debuginfod.urls = strdup(value);
2941 if (!rec->debuginfod.urls)
2942 return -ENOMEM;
2943 rec->debuginfod.set = true;
2944 }
Jiri Olsaeb853e82014-02-03 12:44:42 +01002945
Yisheng Xiecff17202018-03-12 19:25:57 +08002946 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002947}
2948
Adrian Hunter6657a092022-08-24 10:28:14 +03002949static int record__parse_event_enable_time(const struct option *opt, const char *str, int unset)
2950{
2951 struct record *rec = (struct record *)opt->value;
2952
2953 return evlist__parse_event_enable_time(rec->evlist, &rec->opts, str, unset);
2954}
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002955
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002956static int record__parse_affinity(const struct option *opt, const char *str, int unset)
2957{
2958 struct record_opts *opts = (struct record_opts *)opt->value;
2959
2960 if (unset || !str)
2961 return 0;
2962
2963 if (!strcasecmp(str, "node"))
2964 opts->affinity = PERF_AFFINITY_NODE;
2965 else if (!strcasecmp(str, "cpu"))
2966 opts->affinity = PERF_AFFINITY_CPU;
2967
2968 return 0;
2969}
2970
Alexey Bayduraev7954f712022-01-17 21:34:21 +03002971static int record__mmap_cpu_mask_alloc(struct mmap_cpu_mask *mask, int nr_bits)
2972{
2973 mask->nbits = nr_bits;
2974 mask->bits = bitmap_zalloc(mask->nbits);
2975 if (!mask->bits)
2976 return -ENOMEM;
2977
2978 return 0;
2979}
2980
2981static void record__mmap_cpu_mask_free(struct mmap_cpu_mask *mask)
2982{
2983 bitmap_free(mask->bits);
2984 mask->nbits = 0;
2985}
2986
2987static int record__thread_mask_alloc(struct thread_mask *mask, int nr_bits)
2988{
2989 int ret;
2990
2991 ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits);
2992 if (ret) {
2993 mask->affinity.bits = NULL;
2994 return ret;
2995 }
2996
2997 ret = record__mmap_cpu_mask_alloc(&mask->affinity, nr_bits);
2998 if (ret) {
2999 record__mmap_cpu_mask_free(&mask->maps);
3000 mask->maps.bits = NULL;
3001 }
3002
3003 return ret;
3004}
3005
3006static void record__thread_mask_free(struct thread_mask *mask)
3007{
3008 record__mmap_cpu_mask_free(&mask->maps);
3009 record__mmap_cpu_mask_free(&mask->affinity);
3010}
3011
Alexey Bayduraev06380a82022-01-17 21:34:32 +03003012static int record__parse_threads(const struct option *opt, const char *str, int unset)
3013{
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +03003014 int s;
Alexey Bayduraev06380a82022-01-17 21:34:32 +03003015 struct record_opts *opts = opt->value;
3016
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +03003017 if (unset || !str || !strlen(str)) {
Alexey Bayduraev06380a82022-01-17 21:34:32 +03003018 opts->threads_spec = THREAD_SPEC__CPU;
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +03003019 } else {
3020 for (s = 1; s < THREAD_SPEC__MAX; s++) {
3021 if (s == THREAD_SPEC__USER) {
3022 opts->threads_user_spec = strdup(str);
3023 if (!opts->threads_user_spec)
3024 return -ENOMEM;
3025 opts->threads_spec = THREAD_SPEC__USER;
3026 break;
3027 }
3028 if (!strncasecmp(str, thread_spec_tags[s], strlen(thread_spec_tags[s]))) {
3029 opts->threads_spec = s;
3030 break;
3031 }
3032 }
3033 }
3034
3035 if (opts->threads_spec == THREAD_SPEC__USER)
3036 pr_debug("threads_spec: %s\n", opts->threads_user_spec);
3037 else
3038 pr_debug("threads_spec: %s\n", thread_spec_tags[opts->threads_spec]);
Alexey Bayduraev06380a82022-01-17 21:34:32 +03003039
3040 return 0;
3041}
3042
Jiwei Sun6d575812019-10-22 16:09:01 +08003043static int parse_output_max_size(const struct option *opt,
3044 const char *str, int unset)
3045{
3046 unsigned long *s = (unsigned long *)opt->value;
3047 static struct parse_tag tags_size[] = {
3048 { .tag = 'B', .mult = 1 },
3049 { .tag = 'K', .mult = 1 << 10 },
3050 { .tag = 'M', .mult = 1 << 20 },
3051 { .tag = 'G', .mult = 1 << 30 },
3052 { .tag = 0 },
3053 };
3054 unsigned long val;
3055
3056 if (unset) {
3057 *s = 0;
3058 return 0;
3059 }
3060
3061 val = parse_tag_value(str, tags_size);
3062 if (val != (unsigned long) -1) {
3063 *s = val;
3064 return 0;
3065 }
3066
3067 return -1;
3068}
3069
Adrian Huntere9db1312015-04-09 18:53:46 +03003070static int record__parse_mmap_pages(const struct option *opt,
3071 const char *str,
3072 int unset __maybe_unused)
3073{
3074 struct record_opts *opts = opt->value;
3075 char *s, *p;
3076 unsigned int mmap_pages;
3077 int ret;
3078
3079 if (!str)
3080 return -EINVAL;
3081
3082 s = strdup(str);
3083 if (!s)
3084 return -ENOMEM;
3085
3086 p = strchr(s, ',');
3087 if (p)
3088 *p = '\0';
3089
3090 if (*s) {
Arnaldo Carvalho de Melo25f847022020-11-30 15:09:45 -03003091 ret = __evlist__parse_mmap_pages(&mmap_pages, s);
Adrian Huntere9db1312015-04-09 18:53:46 +03003092 if (ret)
3093 goto out_free;
3094 opts->mmap_pages = mmap_pages;
3095 }
3096
3097 if (!p) {
3098 ret = 0;
3099 goto out_free;
3100 }
3101
Arnaldo Carvalho de Melo25f847022020-11-30 15:09:45 -03003102 ret = __evlist__parse_mmap_pages(&mmap_pages, p + 1);
Adrian Huntere9db1312015-04-09 18:53:46 +03003103 if (ret)
3104 goto out_free;
3105
3106 opts->auxtrace_mmap_pages = mmap_pages;
3107
3108out_free:
3109 free(s);
3110 return ret;
3111}
3112
Alexandre Truong7248e302021-12-17 15:45:15 +00003113void __weak arch__add_leaf_frame_record_opts(struct record_opts *opts __maybe_unused)
3114{
3115}
3116
Alexey Budankov1d078cc2020-07-17 10:08:23 +03003117static int parse_control_option(const struct option *opt,
3118 const char *str,
3119 int unset __maybe_unused)
3120{
Adrian Hunter9864a662020-09-01 12:37:53 +03003121 struct record_opts *opts = opt->value;
Alexey Budankov1d078cc2020-07-17 10:08:23 +03003122
Adrian Huntera8fcbd22020-09-02 13:57:07 +03003123 return evlist__parse_control(str, &opts->ctl_fd, &opts->ctl_fd_ack, &opts->ctl_fd_close);
3124}
3125
Jiri Olsa0c582442017-01-09 10:51:59 +01003126static void switch_output_size_warn(struct record *rec)
3127{
Jiri Olsa9521b5f2019-07-28 12:45:35 +02003128 u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
Jiri Olsa0c582442017-01-09 10:51:59 +01003129 struct switch_output *s = &rec->switch_output;
3130
3131 wakeup_size /= 2;
3132
3133 if (s->size < wakeup_size) {
3134 char buf[100];
3135
3136 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
3137 pr_warning("WARNING: switch-output data size lower than "
3138 "wakeup kernel buffer size (%s) "
3139 "expect bigger perf.data sizes\n", buf);
3140 }
3141}
3142
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01003143static int switch_output_setup(struct record *rec)
3144{
3145 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01003146 static struct parse_tag tags_size[] = {
3147 { .tag = 'B', .mult = 1 },
3148 { .tag = 'K', .mult = 1 << 10 },
3149 { .tag = 'M', .mult = 1 << 20 },
3150 { .tag = 'G', .mult = 1 << 30 },
3151 { .tag = 0 },
3152 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01003153 static struct parse_tag tags_time[] = {
3154 { .tag = 's', .mult = 1 },
3155 { .tag = 'm', .mult = 60 },
3156 { .tag = 'h', .mult = 60*60 },
3157 { .tag = 'd', .mult = 60*60*24 },
3158 { .tag = 0 },
3159 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01003160 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01003161
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -03003162 /*
3163 * If we're using --switch-output-events, then we imply its
3164 * --switch-output=signal, as we'll send a SIGUSR2 from the side band
3165 * thread to its parent.
3166 */
Alexey Bayduraevb5f25112022-01-17 21:34:34 +03003167 if (rec->switch_output_event_set) {
3168 if (record__threads_enabled(rec)) {
3169 pr_warning("WARNING: --switch-output-event option is not available in parallel streaming mode.\n");
3170 return 0;
3171 }
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -03003172 goto do_signal;
Alexey Bayduraevb5f25112022-01-17 21:34:34 +03003173 }
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -03003174
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01003175 if (!s->set)
3176 return 0;
3177
Alexey Bayduraevb5f25112022-01-17 21:34:34 +03003178 if (record__threads_enabled(rec)) {
3179 pr_warning("WARNING: --switch-output option is not available in parallel streaming mode.\n");
3180 return 0;
3181 }
3182
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01003183 if (!strcmp(s->str, "signal")) {
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -03003184do_signal:
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01003185 s->signal = true;
3186 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01003187 goto enabled;
3188 }
3189
3190 val = parse_tag_value(s->str, tags_size);
3191 if (val != (unsigned long) -1) {
3192 s->size = val;
3193 pr_debug("switch-output with %s size threshold\n", s->str);
3194 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01003195 }
3196
Jiri Olsabfacbe32017-01-09 10:52:00 +01003197 val = parse_tag_value(s->str, tags_time);
3198 if (val != (unsigned long) -1) {
3199 s->time = val;
3200 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
3201 s->str, s->time);
3202 goto enabled;
3203 }
3204
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01003205 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01003206
3207enabled:
3208 rec->timestamp_filename = true;
3209 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01003210
3211 if (s->size && !rec->opts.no_buffering)
3212 switch_output_size_warn(rec);
3213
Jiri Olsadc0c6122017-01-09 10:51:58 +01003214 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01003215}
3216
Namhyung Kime5b2c202014-10-23 00:15:46 +09003217static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02003218 "perf record [<options>] [<command>]",
3219 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02003220 NULL
3221};
Namhyung Kime5b2c202014-10-23 00:15:46 +09003222const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02003223
Arnaldo Carvalho de Melo6e0a9b3d2019-11-14 12:15:34 -03003224static int build_id__process_mmap(struct perf_tool *tool, union perf_event *event,
3225 struct perf_sample *sample, struct machine *machine)
3226{
3227 /*
3228 * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
3229 * no need to add them twice.
3230 */
3231 if (!(event->header.misc & PERF_RECORD_MISC_USER))
3232 return 0;
3233 return perf_event__process_mmap(tool, event, sample, machine);
3234}
3235
3236static int build_id__process_mmap2(struct perf_tool *tool, union perf_event *event,
3237 struct perf_sample *sample, struct machine *machine)
3238{
3239 /*
3240 * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
3241 * no need to add them twice.
3242 */
3243 if (!(event->header.misc & PERF_RECORD_MISC_USER))
3244 return 0;
3245
3246 return perf_event__process_mmap2(tool, event, sample, machine);
3247}
3248
Adrian Hunter66286ed2021-05-03 09:42:22 +03003249static int process_timestamp_boundary(struct perf_tool *tool,
3250 union perf_event *event __maybe_unused,
3251 struct perf_sample *sample,
3252 struct machine *machine __maybe_unused)
3253{
3254 struct record *rec = container_of(tool, struct record, tool);
3255
3256 set_timestamp_boundary(rec, sample->time);
3257 return 0;
3258}
3259
Namhyung Kim41b740b2021-08-10 21:46:58 -07003260static int parse_record_synth_option(const struct option *opt,
3261 const char *str,
3262 int unset __maybe_unused)
3263{
3264 struct record_opts *opts = opt->value;
3265 char *p = strdup(str);
3266
3267 if (p == NULL)
3268 return -1;
3269
3270 opts->synth = parse_synth_opt(p);
3271 free(p);
3272
3273 if (opts->synth < 0) {
3274 pr_err("Invalid synth option: %s\n", str);
3275 return -1;
3276 }
3277 return 0;
3278}
3279
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003280/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03003281 * XXX Ideally would be local to cmd_record() and passed to a record__new
3282 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003283 * after cmd_record() exits, but since record_options need to be accessible to
3284 * builtin-script, leave it here.
3285 *
3286 * At least we don't ouch it in all the other functions here directly.
3287 *
3288 * Just say no to tons of global variables, sigh.
3289 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03003290static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003291 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08003292 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003293 .mmap_pages = UINT_MAX,
3294 .user_freq = UINT_MAX,
3295 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03003296 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09003297 .target = {
3298 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02003299 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09003300 },
Alexey Budankov470530b2019-03-18 20:40:26 +03003301 .mmap_flush = MMAP_FLUSH_DEFAULT,
Stephane Eraniand99c22e2020-04-22 08:50:38 -07003302 .nr_threads_synthesize = 1,
Alexey Budankov1d078cc2020-07-17 10:08:23 +03003303 .ctl_fd = -1,
3304 .ctl_fd_ack = -1,
Namhyung Kim41b740b2021-08-10 21:46:58 -07003305 .synth = PERF_SYNTH_ALL,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003306 },
Namhyung Kime3d59112015-01-29 17:06:44 +09003307 .tool = {
3308 .sample = process_sample_event,
3309 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03003310 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09003311 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05303312 .namespaces = perf_event__process_namespaces,
Arnaldo Carvalho de Melo6e0a9b3d2019-11-14 12:15:34 -03003313 .mmap = build_id__process_mmap,
3314 .mmap2 = build_id__process_mmap2,
Adrian Hunter66286ed2021-05-03 09:42:22 +03003315 .itrace_start = process_timestamp_boundary,
3316 .aux = process_timestamp_boundary,
Adrian Huntercca84822015-08-19 17:29:21 +03003317 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09003318 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003319};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02003320
Namhyung Kim76a26542015-10-22 23:28:32 +09003321const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
3322 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03003323
Wang Nan0aab2132016-06-16 08:02:41 +00003324static bool dry_run;
3325
Ian Rogers411ad222023-05-02 15:38:36 -07003326static struct parse_events_option_args parse_events_option_args = {
3327 .evlistp = &record.evlist,
3328};
3329
3330static struct parse_events_option_args switch_output_parse_events_option_args = {
3331 .evlistp = &record.sb_evlist,
3332};
3333
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003334/*
3335 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
3336 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03003337 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melo7b392ef2020-11-30 09:26:54 -03003338 * evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003339 * using pipes, etc.
3340 */
Jiri Olsaefd21302017-01-03 09:19:55 +01003341static struct option __record_options[] = {
Ian Rogers411ad222023-05-02 15:38:36 -07003342 OPT_CALLBACK('e', "event", &parse_events_option_args, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02003343 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02003344 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003345 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08003346 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00003347 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
3348 NULL, "don't record events from perf itself",
3349 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09003350 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03003351 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09003352 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03003353 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003354 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02003355 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03003356 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03003357 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003358 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02003359 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09003360 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02003361 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09003362 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02003363 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003364 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa2d4f2792019-02-21 10:41:30 +01003365 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02003366 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02003367 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
3368 &record.opts.no_inherit_set,
3369 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00003370 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
3371 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00003372 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Wei Lia060c1f2020-08-19 11:19:47 +08003373 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "do not record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03003374 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
3375 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03003376 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
3377 "profile at this frequency",
3378 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03003379 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
3380 "number of mmap data pages and AUX area tracing mmap pages",
3381 record__parse_mmap_pages),
Alexey Budankov470530b2019-03-18 20:40:26 +03003382 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
3383 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
3384 record__mmap_flush_parse),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03003385 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02003386 NULL, "enables call-graph recording" ,
3387 &record_callchain_opt),
3388 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09003389 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02003390 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10003391 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02003392 "be more verbose (show counter open errors, etc)"),
James Clarka527c2c2022-10-18 10:41:36 +01003393 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any warnings or messages"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003394 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02003395 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02003396 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04003397 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
3398 "Record the sample physical addresses"),
Kan Liang542b88f2020-11-30 09:27:53 -08003399 OPT_BOOLEAN(0, "data-page-size", &record.opts.sample_data_page_size,
3400 "Record the sampled data address data page size"),
Kan Liangc1de7f32021-01-05 11:57:49 -08003401 OPT_BOOLEAN(0, "code-page-size", &record.opts.sample_code_page_size,
3402 "Record the sampled code address (ip) page size"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02003403 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter61110882022-06-15 08:25:11 +03003404 OPT_BOOLEAN(0, "sample-identifier", &record.opts.sample_identifier,
3405 "Record the sample identifier"),
Adrian Hunter3abebc552015-07-06 14:51:01 +03003406 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
3407 &record.opts.sample_time_set,
3408 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01003409 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
3410 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003411 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02003412 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00003413 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
3414 &record.no_buildid_cache_set,
3415 "do not update the buildid cache"),
3416 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
3417 &record.no_buildid_set,
3418 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003419 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02003420 "monitor event in cgroup name only",
3421 parse_cgroups),
Adrian Hunter6657a092022-08-24 10:28:14 +03003422 OPT_CALLBACK('D', "delay", &record, "ms",
3423 "ms to wait before starting measurement after program start (-1: start with events disabled), "
3424 "or ranges of time to enable events e.g. '-D 10-20,30-40'",
3425 record__parse_event_enable_time),
Adrian Huntereeb399b2019-10-04 11:31:21 +03003426 OPT_BOOLEAN(0, "kcore", &record.opts.kcore, "copy /proc/kcore"),
Namhyung Kimbea03402012-04-26 14:15:15 +09003427 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
3428 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01003429
3430 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
3431 "branch any", "sample any taken branches",
3432 parse_branch_stack),
3433
3434 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
3435 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01003436 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01003437 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
3438 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07003439 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
3440 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02003441 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
3442 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02003443 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
3444 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07003445 " use '-I?' to list register names", parse_intr_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07003446 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
3447 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07003448 " use '--user-regs=?' to list register names", parse_user_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08003449 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
3450 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02003451 OPT_CALLBACK('k', "clockid", &record.opts,
3452 "clockid", "clockid to use for events, see clock_gettime()",
3453 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03003454 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
3455 "opts", "AUX area tracing Snapshot Mode", ""),
Adrian Hunterc0a6de02019-11-15 14:42:16 +02003456 OPT_STRING_OPTARG(0, "aux-sample", &record.opts.auxtrace_sample_opts,
3457 "opts", "sample AUX area", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08003458 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04003459 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05303460 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
3461 "Record namespaces events"),
Namhyung Kim8fb4b672020-03-25 21:45:34 +09003462 OPT_BOOLEAN(0, "all-cgroups", &record.opts.record_cgroup,
3463 "Record cgroup events"),
Adrian Hunter16b4b4e2020-05-28 15:08:58 +03003464 OPT_BOOLEAN_SET(0, "switch-events", &record.opts.record_switch_events,
3465 &record.opts.record_switch_events_set,
3466 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01003467 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
3468 "Configure all used events to run in kernel space.",
3469 PARSE_OPT_EXCLUSIVE),
3470 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
3471 "Configure all used events to run in user space.",
3472 PARSE_OPT_EXCLUSIVE),
yuzhoujian53651b22019-05-30 14:29:22 +01003473 OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
3474 "collect kernel callchains"),
3475 OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
3476 "collect user callchains"),
He Kuang7efe0e02015-12-14 10:39:23 +00003477 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
3478 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09003479 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
3480 "Record build-id of all DSOs regardless of hits"),
Jiri Olsae29386c2020-12-14 11:54:57 +01003481 OPT_BOOLEAN(0, "buildid-mmap", &record.buildid_mmap,
3482 "Record build-id in map events"),
Wang Nanecfd7a92016-04-13 08:21:07 +00003483 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
3484 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08003485 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
3486 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01003487 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Andi Kleenc38dab72019-03-14 15:49:56 -07003488 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
3489 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01003490 "signal"),
Ian Rogers411ad222023-05-02 15:38:36 -07003491 OPT_CALLBACK_SET(0, "switch-output-event", &switch_output_parse_events_option_args,
3492 &record.switch_output_event_set, "switch output event",
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -03003493 "switch output event selector. use 'perf list' to list available events",
3494 parse_events_option_new_evlist),
Andi Kleen03724b22019-03-14 15:49:55 -07003495 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
3496 "Limit number of switch output generated files"),
Wang Nan0aab2132016-06-16 08:02:41 +00003497 OPT_BOOLEAN(0, "dry-run", &dry_run,
3498 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03003499#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03003500 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
3501 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03003502 record__aio_parse),
3503#endif
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03003504 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
3505 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
3506 record__parse_affinity),
Alexey Budankov504c1ad2019-03-18 20:44:42 +03003507#ifdef HAVE_ZSTD_SUPPORT
Alexey Bayduraevb5f25112022-01-17 21:34:34 +03003508 OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default, "n",
3509 "Compress records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
Alexey Budankov504c1ad2019-03-18 20:44:42 +03003510 record__parse_comp_level),
3511#endif
Jiwei Sun6d575812019-10-22 16:09:01 +08003512 OPT_CALLBACK(0, "max-size", &record.output_max_size,
3513 "size", "Limit the maximum size of the output file", parse_output_max_size),
Stephane Eraniand99c22e2020-04-22 08:50:38 -07003514 OPT_UINTEGER(0, "num-thread-synthesize",
3515 &record.opts.nr_threads_synthesize,
3516 "number of threads to run for event synthesis"),
Stephane Eranian70943492020-05-05 11:29:43 -07003517#ifdef HAVE_LIBPFM
3518 OPT_CALLBACK(0, "pfm-events", &record.evlist, "event",
3519 "libpfm4 event selector. use 'perf list' to list available events",
3520 parse_libpfm_events_option),
3521#endif
Adrian Huntera8fcbd22020-09-02 13:57:07 +03003522 OPT_CALLBACK(0, "control", &record.opts, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
Adrian Hunterd20aff12020-09-01 12:37:57 +03003523 "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events,\n"
3524 "\t\t\t 'snapshot': AUX area tracing snapshot).\n"
Adrian Huntera8fcbd22020-09-02 13:57:07 +03003525 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
3526 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
Alexey Budankov1d078cc2020-07-17 10:08:23 +03003527 parse_control_option),
Namhyung Kim41b740b2021-08-10 21:46:58 -07003528 OPT_CALLBACK(0, "synth", &record.opts, "no|all|task|mmap|cgroup",
3529 "Fine-tune event synthesis: default=all", parse_record_synth_option),
Jiri Olsa9bce13e2021-12-09 21:04:25 +01003530 OPT_STRING_OPTARG_SET(0, "debuginfod", &record.debuginfod.urls,
3531 &record.debuginfod.set, "debuginfod urls",
3532 "Enable debuginfod data retrieval from DEBUGINFOD_URLS or specified urls",
3533 "system"),
Alexey Bayduraev06380a82022-01-17 21:34:32 +03003534 OPT_CALLBACK_OPTARG(0, "threads", &record.opts, NULL, "spec",
3535 "write collected trace data into several data files using parallel threads",
3536 record__parse_threads),
Namhyung Kimedc41a12022-05-18 15:47:21 -07003537 OPT_BOOLEAN(0, "off-cpu", &record.off_cpu, "Enable off-cpu analysis"),
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02003538 OPT_END()
3539};
3540
Namhyung Kime5b2c202014-10-23 00:15:46 +09003541struct option *record_options = __record_options;
3542
Athira Rajeevcbd7bfc2022-09-05 19:49:29 +05303543static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
Alexey Bayduraev7954f712022-01-17 21:34:21 +03003544{
Ian Rogers02555712022-05-02 21:17:52 -07003545 struct perf_cpu cpu;
3546 int idx;
Alexey Bayduraev7954f712022-01-17 21:34:21 +03003547
Alexey Bayduraev23380e42022-04-13 18:46:40 -07003548 if (cpu_map__is_dummy(cpus))
Athira Rajeevcbd7bfc2022-09-05 19:49:29 +05303549 return 0;
Alexey Bayduraev23380e42022-04-13 18:46:40 -07003550
Athira Rajeevcbd7bfc2022-09-05 19:49:29 +05303551 perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
Adrian Hunterca76d7d2022-09-15 15:26:11 +03003552 if (cpu.cpu == -1)
3553 continue;
Athira Rajeevcbd7bfc2022-09-05 19:49:29 +05303554 /* Return ENODEV is input cpu is greater than max cpu */
3555 if ((unsigned long)cpu.cpu > mask->nbits)
3556 return -ENODEV;
Sean Christopherson49bd97c2022-11-19 01:34:46 +00003557 __set_bit(cpu.cpu, mask->bits);
Athira Rajeevcbd7bfc2022-09-05 19:49:29 +05303558 }
3559
3560 return 0;
Alexey Bayduraev7954f712022-01-17 21:34:21 +03003561}
3562
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +03003563static int record__mmap_cpu_mask_init_spec(struct mmap_cpu_mask *mask, const char *mask_spec)
3564{
3565 struct perf_cpu_map *cpus;
3566
3567 cpus = perf_cpu_map__new(mask_spec);
3568 if (!cpus)
3569 return -ENOMEM;
3570
3571 bitmap_zero(mask->bits, mask->nbits);
Athira Rajeevcbd7bfc2022-09-05 19:49:29 +05303572 if (record__mmap_cpu_mask_init(mask, cpus))
3573 return -ENODEV;
3574
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +03003575 perf_cpu_map__put(cpus);
3576
3577 return 0;
3578}
3579
Alexey Bayduraev7954f712022-01-17 21:34:21 +03003580static void record__free_thread_masks(struct record *rec, int nr_threads)
3581{
3582 int t;
3583
3584 if (rec->thread_masks)
3585 for (t = 0; t < nr_threads; t++)
3586 record__thread_mask_free(&rec->thread_masks[t]);
3587
3588 zfree(&rec->thread_masks);
3589}
3590
3591static int record__alloc_thread_masks(struct record *rec, int nr_threads, int nr_bits)
3592{
3593 int t, ret;
3594
3595 rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks)));
3596 if (!rec->thread_masks) {
3597 pr_err("Failed to allocate thread masks\n");
3598 return -ENOMEM;
3599 }
3600
3601 for (t = 0; t < nr_threads; t++) {
3602 ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits);
3603 if (ret) {
3604 pr_err("Failed to allocate thread masks[%d]\n", t);
3605 goto out_free;
3606 }
3607 }
3608
3609 return 0;
3610
3611out_free:
3612 record__free_thread_masks(rec, nr_threads);
3613
3614 return ret;
3615}
3616
Alexey Bayduraev06380a82022-01-17 21:34:32 +03003617static int record__init_thread_cpu_masks(struct record *rec, struct perf_cpu_map *cpus)
3618{
3619 int t, ret, nr_cpus = perf_cpu_map__nr(cpus);
3620
3621 ret = record__alloc_thread_masks(rec, nr_cpus, cpu__max_cpu().cpu);
3622 if (ret)
3623 return ret;
3624
3625 rec->nr_threads = nr_cpus;
3626 pr_debug("nr_threads: %d\n", rec->nr_threads);
3627
3628 for (t = 0; t < rec->nr_threads; t++) {
Sean Christopherson49bd97c2022-11-19 01:34:46 +00003629 __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
3630 __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
Yang Jihong7c0a6142022-12-20 11:57:01 +08003631 if (verbose > 0) {
Alexey Bayduraev06380a82022-01-17 21:34:32 +03003632 pr_debug("thread_masks[%d]: ", t);
3633 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
3634 pr_debug("thread_masks[%d]: ", t);
3635 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity");
3636 }
3637 }
3638
3639 return 0;
3640}
3641
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +03003642static int record__init_thread_masks_spec(struct record *rec, struct perf_cpu_map *cpus,
3643 const char **maps_spec, const char **affinity_spec,
3644 u32 nr_spec)
3645{
3646 u32 s;
3647 int ret = 0, t = 0;
3648 struct mmap_cpu_mask cpus_mask;
3649 struct thread_mask thread_mask, full_mask, *thread_masks;
3650
3651 ret = record__mmap_cpu_mask_alloc(&cpus_mask, cpu__max_cpu().cpu);
3652 if (ret) {
3653 pr_err("Failed to allocate CPUs mask\n");
3654 return ret;
3655 }
Athira Rajeevcbd7bfc2022-09-05 19:49:29 +05303656
3657 ret = record__mmap_cpu_mask_init(&cpus_mask, cpus);
3658 if (ret) {
3659 pr_err("Failed to init cpu mask\n");
3660 goto out_free_cpu_mask;
3661 }
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +03003662
3663 ret = record__thread_mask_alloc(&full_mask, cpu__max_cpu().cpu);
3664 if (ret) {
3665 pr_err("Failed to allocate full mask\n");
3666 goto out_free_cpu_mask;
3667 }
3668
3669 ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu);
3670 if (ret) {
3671 pr_err("Failed to allocate thread mask\n");
3672 goto out_free_full_and_cpu_masks;
3673 }
3674
3675 for (s = 0; s < nr_spec; s++) {
3676 ret = record__mmap_cpu_mask_init_spec(&thread_mask.maps, maps_spec[s]);
3677 if (ret) {
3678 pr_err("Failed to initialize maps thread mask\n");
3679 goto out_free;
3680 }
3681 ret = record__mmap_cpu_mask_init_spec(&thread_mask.affinity, affinity_spec[s]);
3682 if (ret) {
3683 pr_err("Failed to initialize affinity thread mask\n");
3684 goto out_free;
3685 }
3686
3687 /* ignore invalid CPUs but do not allow empty masks */
3688 if (!bitmap_and(thread_mask.maps.bits, thread_mask.maps.bits,
3689 cpus_mask.bits, thread_mask.maps.nbits)) {
3690 pr_err("Empty maps mask: %s\n", maps_spec[s]);
3691 ret = -EINVAL;
3692 goto out_free;
3693 }
3694 if (!bitmap_and(thread_mask.affinity.bits, thread_mask.affinity.bits,
3695 cpus_mask.bits, thread_mask.affinity.nbits)) {
3696 pr_err("Empty affinity mask: %s\n", affinity_spec[s]);
3697 ret = -EINVAL;
3698 goto out_free;
3699 }
3700
3701 /* do not allow intersection with other masks (full_mask) */
3702 if (bitmap_intersects(thread_mask.maps.bits, full_mask.maps.bits,
3703 thread_mask.maps.nbits)) {
3704 pr_err("Intersecting maps mask: %s\n", maps_spec[s]);
3705 ret = -EINVAL;
3706 goto out_free;
3707 }
3708 if (bitmap_intersects(thread_mask.affinity.bits, full_mask.affinity.bits,
3709 thread_mask.affinity.nbits)) {
3710 pr_err("Intersecting affinity mask: %s\n", affinity_spec[s]);
3711 ret = -EINVAL;
3712 goto out_free;
3713 }
3714
3715 bitmap_or(full_mask.maps.bits, full_mask.maps.bits,
3716 thread_mask.maps.bits, full_mask.maps.nbits);
3717 bitmap_or(full_mask.affinity.bits, full_mask.affinity.bits,
3718 thread_mask.affinity.bits, full_mask.maps.nbits);
3719
3720 thread_masks = realloc(rec->thread_masks, (t + 1) * sizeof(struct thread_mask));
3721 if (!thread_masks) {
3722 pr_err("Failed to reallocate thread masks\n");
3723 ret = -ENOMEM;
3724 goto out_free;
3725 }
3726 rec->thread_masks = thread_masks;
3727 rec->thread_masks[t] = thread_mask;
Yang Jihong7c0a6142022-12-20 11:57:01 +08003728 if (verbose > 0) {
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +03003729 pr_debug("thread_masks[%d]: ", t);
3730 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
3731 pr_debug("thread_masks[%d]: ", t);
3732 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity");
3733 }
3734 t++;
3735 ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu);
3736 if (ret) {
3737 pr_err("Failed to allocate thread mask\n");
3738 goto out_free_full_and_cpu_masks;
3739 }
3740 }
3741 rec->nr_threads = t;
3742 pr_debug("nr_threads: %d\n", rec->nr_threads);
3743 if (!rec->nr_threads)
3744 ret = -EINVAL;
3745
3746out_free:
3747 record__thread_mask_free(&thread_mask);
3748out_free_full_and_cpu_masks:
3749 record__thread_mask_free(&full_mask);
3750out_free_cpu_mask:
3751 record__mmap_cpu_mask_free(&cpus_mask);
3752
3753 return ret;
3754}
3755
3756static int record__init_thread_core_masks(struct record *rec, struct perf_cpu_map *cpus)
3757{
3758 int ret;
3759 struct cpu_topology *topo;
3760
3761 topo = cpu_topology__new();
3762 if (!topo) {
3763 pr_err("Failed to allocate CPU topology\n");
3764 return -ENOMEM;
3765 }
3766
3767 ret = record__init_thread_masks_spec(rec, cpus, topo->core_cpus_list,
3768 topo->core_cpus_list, topo->core_cpus_lists);
3769 cpu_topology__delete(topo);
3770
3771 return ret;
3772}
3773
3774static int record__init_thread_package_masks(struct record *rec, struct perf_cpu_map *cpus)
3775{
3776 int ret;
3777 struct cpu_topology *topo;
3778
3779 topo = cpu_topology__new();
3780 if (!topo) {
3781 pr_err("Failed to allocate CPU topology\n");
3782 return -ENOMEM;
3783 }
3784
3785 ret = record__init_thread_masks_spec(rec, cpus, topo->package_cpus_list,
3786 topo->package_cpus_list, topo->package_cpus_lists);
3787 cpu_topology__delete(topo);
3788
3789 return ret;
3790}
3791
3792static int record__init_thread_numa_masks(struct record *rec, struct perf_cpu_map *cpus)
3793{
3794 u32 s;
3795 int ret;
3796 const char **spec;
3797 struct numa_topology *topo;
3798
3799 topo = numa_topology__new();
3800 if (!topo) {
3801 pr_err("Failed to allocate NUMA topology\n");
3802 return -ENOMEM;
3803 }
3804
3805 spec = zalloc(topo->nr * sizeof(char *));
3806 if (!spec) {
3807 pr_err("Failed to allocate NUMA spec\n");
3808 ret = -ENOMEM;
3809 goto out_delete_topo;
3810 }
3811 for (s = 0; s < topo->nr; s++)
3812 spec[s] = topo->nodes[s].cpus;
3813
3814 ret = record__init_thread_masks_spec(rec, cpus, spec, spec, topo->nr);
3815
3816 zfree(&spec);
3817
3818out_delete_topo:
3819 numa_topology__delete(topo);
3820
3821 return ret;
3822}
3823
3824static int record__init_thread_user_masks(struct record *rec, struct perf_cpu_map *cpus)
3825{
3826 int t, ret;
3827 u32 s, nr_spec = 0;
3828 char **maps_spec = NULL, **affinity_spec = NULL, **tmp_spec;
3829 char *user_spec, *spec, *spec_ptr, *mask, *mask_ptr, *dup_mask = NULL;
3830
3831 for (t = 0, user_spec = (char *)rec->opts.threads_user_spec; ; t++, user_spec = NULL) {
3832 spec = strtok_r(user_spec, ":", &spec_ptr);
3833 if (spec == NULL)
3834 break;
3835 pr_debug2("threads_spec[%d]: %s\n", t, spec);
3836 mask = strtok_r(spec, "/", &mask_ptr);
3837 if (mask == NULL)
3838 break;
3839 pr_debug2(" maps mask: %s\n", mask);
3840 tmp_spec = realloc(maps_spec, (nr_spec + 1) * sizeof(char *));
3841 if (!tmp_spec) {
3842 pr_err("Failed to reallocate maps spec\n");
3843 ret = -ENOMEM;
3844 goto out_free;
3845 }
3846 maps_spec = tmp_spec;
3847 maps_spec[nr_spec] = dup_mask = strdup(mask);
3848 if (!maps_spec[nr_spec]) {
3849 pr_err("Failed to allocate maps spec[%d]\n", nr_spec);
3850 ret = -ENOMEM;
3851 goto out_free;
3852 }
3853 mask = strtok_r(NULL, "/", &mask_ptr);
3854 if (mask == NULL) {
3855 pr_err("Invalid thread maps or affinity specs\n");
3856 ret = -EINVAL;
3857 goto out_free;
3858 }
3859 pr_debug2(" affinity mask: %s\n", mask);
3860 tmp_spec = realloc(affinity_spec, (nr_spec + 1) * sizeof(char *));
3861 if (!tmp_spec) {
3862 pr_err("Failed to reallocate affinity spec\n");
3863 ret = -ENOMEM;
3864 goto out_free;
3865 }
3866 affinity_spec = tmp_spec;
3867 affinity_spec[nr_spec] = strdup(mask);
3868 if (!affinity_spec[nr_spec]) {
3869 pr_err("Failed to allocate affinity spec[%d]\n", nr_spec);
3870 ret = -ENOMEM;
3871 goto out_free;
3872 }
3873 dup_mask = NULL;
3874 nr_spec++;
3875 }
3876
3877 ret = record__init_thread_masks_spec(rec, cpus, (const char **)maps_spec,
3878 (const char **)affinity_spec, nr_spec);
3879
3880out_free:
3881 free(dup_mask);
3882 for (s = 0; s < nr_spec; s++) {
3883 if (maps_spec)
3884 free(maps_spec[s]);
3885 if (affinity_spec)
3886 free(affinity_spec[s]);
3887 }
3888 free(affinity_spec);
3889 free(maps_spec);
3890
3891 return ret;
3892}
3893
Alexey Bayduraev7954f712022-01-17 21:34:21 +03003894static int record__init_thread_default_masks(struct record *rec, struct perf_cpu_map *cpus)
3895{
3896 int ret;
3897
3898 ret = record__alloc_thread_masks(rec, 1, cpu__max_cpu().cpu);
3899 if (ret)
3900 return ret;
3901
Athira Rajeevcbd7bfc2022-09-05 19:49:29 +05303902 if (record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus))
3903 return -ENODEV;
Alexey Bayduraev7954f712022-01-17 21:34:21 +03003904
3905 rec->nr_threads = 1;
3906
3907 return 0;
3908}
3909
3910static int record__init_thread_masks(struct record *rec)
3911{
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +03003912 int ret = 0;
Adrian Hunter7be1fed2022-05-24 10:54:30 +03003913 struct perf_cpu_map *cpus = rec->evlist->core.all_cpus;
Alexey Bayduraev7954f712022-01-17 21:34:21 +03003914
Alexey Bayduraev06380a82022-01-17 21:34:32 +03003915 if (!record__threads_enabled(rec))
3916 return record__init_thread_default_masks(rec, cpus);
3917
Adrian Hunter7be1fed2022-05-24 10:54:30 +03003918 if (evlist__per_thread(rec->evlist)) {
Alexey Bayduraev23380e42022-04-13 18:46:40 -07003919 pr_err("--per-thread option is mutually exclusive to parallel streaming mode.\n");
3920 return -EINVAL;
3921 }
3922
Alexey Bayduraevf466e5e2022-01-17 21:34:33 +03003923 switch (rec->opts.threads_spec) {
3924 case THREAD_SPEC__CPU:
3925 ret = record__init_thread_cpu_masks(rec, cpus);
3926 break;
3927 case THREAD_SPEC__CORE:
3928 ret = record__init_thread_core_masks(rec, cpus);
3929 break;
3930 case THREAD_SPEC__PACKAGE:
3931 ret = record__init_thread_package_masks(rec, cpus);
3932 break;
3933 case THREAD_SPEC__NUMA:
3934 ret = record__init_thread_numa_masks(rec, cpus);
3935 break;
3936 case THREAD_SPEC__USER:
3937 ret = record__init_thread_user_masks(rec, cpus);
3938 break;
3939 default:
3940 break;
3941 }
3942
3943 return ret;
Alexey Bayduraev7954f712022-01-17 21:34:21 +03003944}
3945
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03003946int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02003947{
Adrian Hunteref149c22015-04-09 18:53:45 +03003948 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03003949 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09003950 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02003951
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03003952 setlocale(LC_ALL, "");
3953
Namhyung Kimedc41a12022-05-18 15:47:21 -07003954#ifndef HAVE_BPF_SKEL
3955# define set_nobuild(s, l, m, c) set_option_nobuild(record_options, s, l, m, c)
Arnaldo Carvalho de Melo9a2d5172023-05-06 18:07:37 -03003956 set_nobuild('\0', "off-cpu", "no BUILD_BPF_SKEL=1", true);
Namhyung Kimedc41a12022-05-18 15:47:21 -07003957# undef set_nobuild
3958#endif
3959
Alexey Budankov9d2ed642019-01-22 20:47:43 +03003960 rec->opts.affinity = PERF_AFFINITY_SYS;
3961
Jiri Olsa0f98b112019-07-21 13:23:55 +02003962 rec->evlist = evlist__new();
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03003963 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02003964 return -ENOMEM;
3965
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03003966 err = perf_config(perf_record_config, rec);
3967 if (err)
3968 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01003969
Tom Zanussibca647a2010-11-10 08:11:30 -06003970 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02003971 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09003972 if (quiet)
3973 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01003974
James Clark7cc72552021-10-18 14:48:42 +01003975 err = symbol__validate_sym_arguments();
3976 if (err)
3977 return err;
3978
Jiri Olsa9bce13e2021-12-09 21:04:25 +01003979 perf_debuginfod_setup(&record.debuginfod);
3980
Jiri Olsa483635a2017-02-17 18:00:18 +01003981 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03003982 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01003983 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02003984
Namhyung Kimbea03402012-04-26 14:15:15 +09003985 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09003986 usage_with_options_msg(record_usage, record_options,
3987 "cgroup monitoring only available in system-wide mode");
3988
Stephane Eranian023695d2011-02-14 11:20:01 +02003989 }
Alexey Budankov504c1ad2019-03-18 20:44:42 +03003990
Jiri Olsae29386c2020-12-14 11:54:57 +01003991 if (rec->buildid_mmap) {
3992 if (!perf_can_record_build_id()) {
3993 pr_err("Failed: no support to record build id in mmap events, update your kernel.\n");
3994 err = -EINVAL;
3995 goto out_opts;
3996 }
3997 pr_debug("Enabling build id in mmap2 events.\n");
3998 /* Enable mmap build id synthesizing. */
3999 symbol_conf.buildid_mmap2 = true;
4000 /* Enable perf_event_attr::build_id bit. */
4001 rec->opts.build_id = true;
4002 /* Disable build id cache. */
4003 rec->no_buildid = true;
4004 }
4005
Namhyung Kim4f2abe92021-05-27 11:28:35 -07004006 if (rec->opts.record_cgroup && !perf_can_record_cgroup()) {
4007 pr_err("Kernel has no cgroup sampling support.\n");
4008 err = -EINVAL;
4009 goto out_opts;
4010 }
4011
Adrian Hunterf42c0ce2022-06-10 14:33:12 +03004012 if (rec->opts.kcore)
4013 rec->opts.text_poke = true;
4014
Alexey Bayduraev56f735f2022-01-17 21:34:28 +03004015 if (rec->opts.kcore || record__threads_enabled(rec))
Adrian Huntereeb399b2019-10-04 11:31:21 +03004016 rec->data.is_dir = true;
4017
Alexey Bayduraevb5f25112022-01-17 21:34:34 +03004018 if (record__threads_enabled(rec)) {
4019 if (rec->opts.affinity != PERF_AFFINITY_SYS) {
4020 pr_err("--affinity option is mutually exclusive to parallel streaming mode.\n");
4021 goto out_opts;
4022 }
4023 if (record__aio_enabled(rec)) {
4024 pr_err("Asynchronous streaming mode (--aio) is mutually exclusive to parallel streaming mode.\n");
4025 goto out_opts;
4026 }
4027 }
4028
Alexey Budankov504c1ad2019-03-18 20:44:42 +03004029 if (rec->opts.comp_level != 0) {
4030 pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
4031 rec->no_buildid = true;
4032 }
4033
Adrian Hunterb757bb02015-07-21 12:44:04 +03004034 if (rec->opts.record_switch_events &&
4035 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09004036 ui__error("kernel does not support recording context switch events\n");
4037 parse_options_usage(record_usage, record_options, "switch-events", 0);
Adrian Huntera8fcbd22020-09-02 13:57:07 +03004038 err = -EINVAL;
4039 goto out_opts;
Adrian Hunterb757bb02015-07-21 12:44:04 +03004040 }
Stephane Eranian023695d2011-02-14 11:20:01 +02004041
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01004042 if (switch_output_setup(rec)) {
4043 parse_options_usage(record_usage, record_options, "switch-output", 0);
Adrian Huntera8fcbd22020-09-02 13:57:07 +03004044 err = -EINVAL;
4045 goto out_opts;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01004046 }
4047
Jiri Olsabfacbe32017-01-09 10:52:00 +01004048 if (rec->switch_output.time) {
4049 signal(SIGALRM, alarm_sig_handler);
4050 alarm(rec->switch_output.time);
4051 }
4052
Andi Kleen03724b22019-03-14 15:49:55 -07004053 if (rec->switch_output.num_files) {
4054 rec->switch_output.filenames = calloc(sizeof(char *),
4055 rec->switch_output.num_files);
Adrian Huntera8fcbd22020-09-02 13:57:07 +03004056 if (!rec->switch_output.filenames) {
4057 err = -EINVAL;
4058 goto out_opts;
4059 }
Andi Kleen03724b22019-03-14 15:49:55 -07004060 }
4061
Alexey Bayduraevb5f25112022-01-17 21:34:34 +03004062 if (rec->timestamp_filename && record__threads_enabled(rec)) {
4063 rec->timestamp_filename = false;
4064 pr_warning("WARNING: --timestamp-filename option is not available in parallel streaming mode.\n");
4065 }
4066
Adrian Hunter1b36c032016-09-23 17:38:39 +03004067 /*
4068 * Allow aliases to facilitate the lookup of symbols for address
4069 * filters. Refer to auxtrace_parse_filters().
4070 */
4071 symbol_conf.allow_aliases = true;
4072
4073 symbol__init(NULL);
4074
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02004075 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03004076 if (err)
4077 goto out;
4078
Wang Nan0aab2132016-06-16 08:02:41 +00004079 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03004080 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00004081
Adrian Hunteref149c22015-04-09 18:53:45 +03004082 err = -ENOMEM;
4083
Wang Nan0c1d46a2016-04-20 18:59:52 +00004084 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02004085 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01004086 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00004087 /*
4088 * In 'perf record --switch-output', disable buildid
4089 * generation by default to reduce data file switching
4090 * overhead. Still generate buildid if they are required
4091 * explicitly using
4092 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01004093 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00004094 * --no-no-buildid-cache
4095 *
4096 * Following code equals to:
4097 *
4098 * if ((rec->no_buildid || !rec->no_buildid_set) &&
4099 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
4100 * disable_buildid_cache();
4101 */
4102 bool disable = true;
4103
4104 if (rec->no_buildid_set && !rec->no_buildid)
4105 disable = false;
4106 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
4107 disable = false;
4108 if (disable) {
4109 rec->no_buildid = true;
4110 rec->no_buildid_cache = true;
4111 disable_buildid_cache();
4112 }
4113 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02004114
Wang Nan4ea648a2016-07-14 08:34:47 +00004115 if (record.opts.overwrite)
4116 record.opts.tail_synthesize = true;
4117
Jin Yaob53a0752021-04-27 15:01:26 +08004118 if (rec->evlist->core.nr_entries == 0) {
Ian Rogers7b100982023-05-27 00:21:49 -07004119 bool can_profile_kernel = perf_event_paranoid_check(1);
Jin Yaob53a0752021-04-27 15:01:26 +08004120
Ian Rogers7b100982023-05-27 00:21:49 -07004121 err = parse_event(rec->evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
4122 if (err)
Jin Yaob53a0752021-04-27 15:01:26 +08004123 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02004124 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02004125
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02004126 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
4127 rec->opts.no_inherit = true;
4128
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03004129 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09004130 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03004131 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01004132 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09004133 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09004134
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03004135 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09004136 if (err) {
4137 int saved_errno = errno;
4138
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03004139 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09004140 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09004141
4142 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03004143 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09004144 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02004145
Mengting Zhangca800062017-12-13 15:01:53 +08004146 /* Enable ignoring missing threads when -u/-p option is defined. */
4147 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01004148
Ian Rogers5ac72632023-05-27 00:21:47 -07004149 evlist__warn_user_requested_cpus(rec->evlist, rec->opts.target.cpu_list);
Jin Yao1d3351e2021-07-23 14:34:33 +08004150
Alexandre Truong7248e302021-12-17 15:45:15 +00004151 if (callchain_param.enabled && callchain_param.record_mode == CALLCHAIN_FP)
4152 arch__add_leaf_frame_record_opts(&rec->opts);
4153
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09004154 err = -ENOMEM;
Martin Liška1bf7d832022-08-12 13:40:49 +02004155 if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0) {
4156 if (rec->opts.target.pid != NULL) {
4157 pr_err("Couldn't create thread/CPU maps: %s\n",
4158 errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
4159 goto out;
4160 }
4161 else
4162 usage_with_options(record_usage, record_options);
4163 }
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02004164
Adrian Hunteref149c22015-04-09 18:53:45 +03004165 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
4166 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03004167 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03004168
Namhyung Kim61566812016-01-11 22:37:09 +09004169 /*
4170 * We take all buildids when the file contains
4171 * AUX area tracing data because we do not decode the
4172 * trace because it would take too long.
4173 */
4174 if (rec->opts.full_auxtrace)
4175 rec->buildid_all = true;
4176
Adrian Hunter246eba82020-05-12 15:19:18 +03004177 if (rec->opts.text_poke) {
4178 err = record__config_text_poke(rec->evlist);
4179 if (err) {
4180 pr_err("record__config_text_poke failed, error %d\n", err);
4181 goto out;
4182 }
4183 }
4184
Namhyung Kimedc41a12022-05-18 15:47:21 -07004185 if (rec->off_cpu) {
4186 err = record__config_off_cpu(rec);
4187 if (err) {
4188 pr_err("record__config_off_cpu failed, error %d\n", err);
4189 goto out;
4190 }
4191 }
4192
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03004193 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03004194 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03004195 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02004196 }
4197
Alexey Bayduraev7954f712022-01-17 21:34:21 +03004198 err = record__init_thread_masks(rec);
4199 if (err) {
4200 pr_err("Failed to initialize parallel data streaming masks\n");
4201 goto out;
4202 }
4203
Alexey Budankov93f20c02018-11-06 12:07:19 +03004204 if (rec->opts.nr_cblocks > nr_cblocks_max)
4205 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03004206 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03004207
Alexey Budankov9d2ed642019-01-22 20:47:43 +03004208 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
Alexey Budankov470530b2019-03-18 20:40:26 +03004209 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
Alexey Budankov9d2ed642019-01-22 20:47:43 +03004210
Alexey Budankov51255a82019-03-18 20:42:19 +03004211 if (rec->opts.comp_level > comp_level_max)
4212 rec->opts.comp_level = comp_level_max;
4213 pr_debug("comp level: %d\n", rec->opts.comp_level);
4214
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02004215 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03004216out:
Jiri Olsac12995a2019-07-21 13:23:56 +02004217 evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03004218 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03004219 auxtrace_record__free(rec->itr);
Adrian Huntera8fcbd22020-09-02 13:57:07 +03004220out_opts:
Alexey Bayduraev7954f712022-01-17 21:34:21 +03004221 record__free_thread_masks(rec, rec->nr_threads);
4222 rec->nr_threads = 0;
Adrian Hunteree7fe312020-09-03 15:29:37 +03004223 evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03004224 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02004225}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03004226
4227static void snapshot_sig_handler(int sig __maybe_unused)
4228{
Jiri Olsadc0c6122017-01-09 10:51:58 +01004229 struct record *rec = &record;
4230
Adrian Hunterd20aff12020-09-01 12:37:57 +03004231 hit_auxtrace_snapshot_trigger(rec);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00004232
Jiri Olsadc0c6122017-01-09 10:51:58 +01004233 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00004234 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03004235}
Jiri Olsabfacbe32017-01-09 10:52:00 +01004236
4237static void alarm_sig_handler(int sig __maybe_unused)
4238{
4239 struct record *rec = &record;
4240
4241 if (switch_output_time(rec))
4242 trigger_hit(&switch_output_trigger);
4243}