blob: 8d2b988126250da9078230ffe31d6b77c493f2dc [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002/*
3 * ring buffer based function tracer
4 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010013 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020014 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050015#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020016#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050017#include <linux/stacktrace.h>
18#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020019#include <linux/kallsyms.h>
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -040020#include <linux/security.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020021#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040022#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050023#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050025#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020026#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020027#include <linux/hardirq.h>
28#include <linux/linkage.h>
29#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040030#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020031#include <linux/ftrace.h>
32#include <linux/module.h>
33#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050034#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040035#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010036#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050037#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080038#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/ctype.h>
41#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020042#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050043#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020044#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080045#include <linux/trace.h>
Chris Wilson3fd49c92018-03-30 16:01:31 +010046#include <linux/sched/clock.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060047#include <linux/sched/rt.h>
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +020048#include <linux/fsnotify.h>
49#include <linux/irq_work.h>
50#include <linux/workqueue.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020051
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020052#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050053#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020054
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010055/*
Steven Rostedt73c51622009-03-11 13:42:01 -040056 * On boot up, the ring buffer is set to the minimum size, so that
57 * we do not waste memory on systems that are not using tracing.
58 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050059bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040060
61/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010062 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010063 * A selftest will lurk into the ring-buffer to count the
64 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010065 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010066 * at the same time, giving false positive or negative results.
67 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010068static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010069
Steven Rostedtb2821ae2009-02-02 21:38:32 -050070/*
71 * If a tracer is running, we do not want to run SELFTEST.
72 */
Li Zefan020e5f82009-07-01 10:47:05 +080073bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050074
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050075/* Pipe tracepoints to printk */
76struct trace_iterator *tracepoint_print_iter;
77int tracepoint_printk;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050078static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050079
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010080/* For tracers that don't implement custom flags */
81static struct tracer_opt dummy_tracer_opt[] = {
82 { }
83};
84
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050085static int
86dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010087{
88 return 0;
89}
Steven Rostedt0f048702008-11-05 16:05:44 -050090
91/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040092 * To prevent the comm cache from being overwritten when no
93 * tracing is active, only save the comm when a trace event
94 * occurred.
95 */
Joel Fernandesd914ba32017-06-26 19:01:55 -070096static DEFINE_PER_CPU(bool, trace_taskinfo_save);
Steven Rostedt7ffbd482012-10-11 12:14:25 -040097
98/*
Steven Rostedt0f048702008-11-05 16:05:44 -050099 * Kill all tracing for good (never come back).
100 * It is initialized to 1 but will turn to zero if the initialization
101 * of the tracer is successful. But that is the only place that sets
102 * this back to zero.
103 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100104static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500105
Jason Wessel955b61e2010-08-05 09:22:23 -0500106cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200107
Steven Rostedt944ac422008-10-23 19:26:08 -0400108/*
109 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
110 *
111 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
112 * is set, then ftrace_dump is called. This will output the contents
113 * of the ftrace buffers to the console. This is very useful for
114 * capturing traces that lead to crashes and outputing it to a
115 * serial console.
116 *
117 * It is default off, but you can enable it with either specifying
118 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200119 * /proc/sys/kernel/ftrace_dump_on_oops
120 * Set 1 if you want to dump buffers of all CPUs
121 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400122 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200123
124enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400125
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400126/* When set, tracing will stop when a WARN*() is hit */
127int __disable_trace_on_warning;
128
Jeremy Linton681bec02017-05-31 16:56:53 -0500129#ifdef CONFIG_TRACE_EVAL_MAP_FILE
130/* Map of enums to their values, for "eval_map" file */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500131struct trace_eval_map_head {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400132 struct module *mod;
133 unsigned long length;
134};
135
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500136union trace_eval_map_item;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400137
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500138struct trace_eval_map_tail {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400139 /*
140 * "end" is first and points to NULL as it must be different
Jeremy Linton00f4b652017-05-31 16:56:43 -0500141 * than "mod" or "eval_string"
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400142 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500143 union trace_eval_map_item *next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400144 const char *end; /* points to NULL */
145};
146
Jeremy Linton1793ed92017-05-31 16:56:46 -0500147static DEFINE_MUTEX(trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400148
149/*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500150 * The trace_eval_maps are saved in an array with two extra elements,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400151 * one at the beginning, and one at the end. The beginning item contains
152 * the count of the saved maps (head.length), and the module they
153 * belong to if not built in (head.mod). The ending item contains a
Jeremy Linton681bec02017-05-31 16:56:53 -0500154 * pointer to the next array of saved eval_map items.
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400155 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500156union trace_eval_map_item {
Jeremy Linton00f4b652017-05-31 16:56:43 -0500157 struct trace_eval_map map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500158 struct trace_eval_map_head head;
159 struct trace_eval_map_tail tail;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400160};
161
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500162static union trace_eval_map_item *trace_eval_maps;
Jeremy Linton681bec02017-05-31 16:56:53 -0500163#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400164
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +0900165int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500166static void ftrace_trace_userstack(struct trace_buffer *buffer,
Thomas Gleixnerc438f142019-04-25 11:45:15 +0200167 unsigned long flags, int pc);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500168
Li Zefanee6c2c12009-09-18 14:06:47 +0800169#define MAX_TRACER_SIZE 100
170static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500171static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100172
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500173static bool allocate_snapshot;
174
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200175static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100176{
Chen Gang67012ab2013-04-08 12:06:44 +0800177 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500178 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400179 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500180 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100181 return 1;
182}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200183__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100184
Steven Rostedt944ac422008-10-23 19:26:08 -0400185static int __init set_ftrace_dump_on_oops(char *str)
186{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200187 if (*str++ != '=' || !*str) {
188 ftrace_dump_on_oops = DUMP_ALL;
189 return 1;
190 }
191
192 if (!strcmp("orig_cpu", str)) {
193 ftrace_dump_on_oops = DUMP_ORIG;
194 return 1;
195 }
196
197 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400198}
199__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200200
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400201static int __init stop_trace_on_warning(char *str)
202{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200203 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
204 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400205 return 1;
206}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200207__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400208
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400209static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500210{
211 allocate_snapshot = true;
212 /* We also need the main ring buffer expanded */
213 ring_buffer_expanded = true;
214 return 1;
215}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400216__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500217
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -0400218
219static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -0400220
221static int __init set_trace_boot_options(char *str)
222{
Chen Gang67012ab2013-04-08 12:06:44 +0800223 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -0400224 return 0;
225}
226__setup("trace_options=", set_trace_boot_options);
227
Steven Rostedte1e232c2014-02-10 23:38:46 -0500228static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
229static char *trace_boot_clock __initdata;
230
231static int __init set_trace_boot_clock(char *str)
232{
233 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
234 trace_boot_clock = trace_boot_clock_buf;
235 return 0;
236}
237__setup("trace_clock=", set_trace_boot_clock);
238
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500239static int __init set_tracepoint_printk(char *str)
240{
241 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
242 tracepoint_printk = 1;
243 return 1;
244}
245__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400246
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100247unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200248{
249 nsec += 500;
250 do_div(nsec, 1000);
251 return nsec;
252}
253
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400254/* trace_flags holds trace_options default values */
255#define TRACE_DEFAULT_FLAGS \
256 (FUNCTION_DEFAULT_FLAGS | \
257 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
258 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
259 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
260 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
261
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400262/* trace_options that are only supported by global_trace */
263#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
264 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
265
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400266/* trace_flags that are default zero for instances */
267#define ZEROED_TRACE_FLAGS \
Namhyung Kim1e104862017-04-17 11:44:28 +0900268 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400269
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200270/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800271 * The global_trace is the descriptor that holds the top-level tracing
272 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200273 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400274static struct trace_array global_trace = {
275 .trace_flags = TRACE_DEFAULT_FLAGS,
276};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200277
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400278LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200279
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400280int trace_array_get(struct trace_array *this_tr)
281{
282 struct trace_array *tr;
283 int ret = -ENODEV;
284
285 mutex_lock(&trace_types_lock);
286 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
287 if (tr == this_tr) {
288 tr->ref++;
289 ret = 0;
290 break;
291 }
292 }
293 mutex_unlock(&trace_types_lock);
294
295 return ret;
296}
297
298static void __trace_array_put(struct trace_array *this_tr)
299{
300 WARN_ON(!this_tr->ref);
301 this_tr->ref--;
302}
303
Divya Indi28879782019-11-20 11:08:38 -0800304/**
305 * trace_array_put - Decrement the reference counter for this trace array.
306 *
307 * NOTE: Use this when we no longer need the trace array returned by
308 * trace_array_get_by_name(). This ensures the trace array can be later
309 * destroyed.
310 *
311 */
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400312void trace_array_put(struct trace_array *this_tr)
313{
Divya Indi28879782019-11-20 11:08:38 -0800314 if (!this_tr)
315 return;
316
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400317 mutex_lock(&trace_types_lock);
318 __trace_array_put(this_tr);
319 mutex_unlock(&trace_types_lock);
320}
Divya Indi28879782019-11-20 11:08:38 -0800321EXPORT_SYMBOL_GPL(trace_array_put);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400322
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400323int tracing_check_open_get_tr(struct trace_array *tr)
324{
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -0400325 int ret;
326
327 ret = security_locked_down(LOCKDOWN_TRACEFS);
328 if (ret)
329 return ret;
330
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400331 if (tracing_disabled)
332 return -ENODEV;
333
334 if (tr && trace_array_get(tr) < 0)
335 return -ENODEV;
336
337 return 0;
338}
339
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400340int call_filter_check_discard(struct trace_event_call *call, void *rec,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500341 struct trace_buffer *buffer,
Tom Zanussif306cc82013-10-24 08:34:17 -0500342 struct ring_buffer_event *event)
343{
344 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
345 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400346 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500347 return 1;
348 }
349
350 return 0;
351}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500352
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400353void trace_free_pid_list(struct trace_pid_list *pid_list)
354{
355 vfree(pid_list->pids);
356 kfree(pid_list);
357}
358
Steven Rostedtd8275c42016-04-14 12:15:22 -0400359/**
360 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
361 * @filtered_pids: The list of pids to check
362 * @search_pid: The PID to find in @filtered_pids
363 *
364 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
365 */
366bool
367trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
368{
369 /*
370 * If pid_max changed after filtered_pids was created, we
371 * by default ignore all pids greater than the previous pid_max.
372 */
373 if (search_pid >= filtered_pids->pid_max)
374 return false;
375
376 return test_bit(search_pid, filtered_pids->pids);
377}
378
379/**
380 * trace_ignore_this_task - should a task be ignored for tracing
381 * @filtered_pids: The list of pids to check
382 * @task: The task that should be ignored if not filtered
383 *
384 * Checks if @task should be traced or not from @filtered_pids.
385 * Returns true if @task should *NOT* be traced.
386 * Returns false if @task should be traced.
387 */
388bool
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400389trace_ignore_this_task(struct trace_pid_list *filtered_pids,
390 struct trace_pid_list *filtered_no_pids,
391 struct task_struct *task)
Steven Rostedtd8275c42016-04-14 12:15:22 -0400392{
393 /*
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400394 * If filterd_no_pids is not empty, and the task's pid is listed
395 * in filtered_no_pids, then return true.
396 * Otherwise, if filtered_pids is empty, that means we can
397 * trace all tasks. If it has content, then only trace pids
398 * within filtered_pids.
Steven Rostedtd8275c42016-04-14 12:15:22 -0400399 */
Steven Rostedtd8275c42016-04-14 12:15:22 -0400400
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400401 return (filtered_pids &&
402 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
403 (filtered_no_pids &&
404 trace_find_filtered_pid(filtered_no_pids, task->pid));
Steven Rostedtd8275c42016-04-14 12:15:22 -0400405}
406
407/**
Matthias Kaehlckef08367b2019-05-23 12:26:28 -0700408 * trace_filter_add_remove_task - Add or remove a task from a pid_list
Steven Rostedtd8275c42016-04-14 12:15:22 -0400409 * @pid_list: The list to modify
410 * @self: The current task for fork or NULL for exit
411 * @task: The task to add or remove
412 *
413 * If adding a task, if @self is defined, the task is only added if @self
414 * is also included in @pid_list. This happens on fork and tasks should
415 * only be added when the parent is listed. If @self is NULL, then the
416 * @task pid will be removed from the list, which would happen on exit
417 * of a task.
418 */
419void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
420 struct task_struct *self,
421 struct task_struct *task)
422{
423 if (!pid_list)
424 return;
425
426 /* For forks, we only add if the forking task is listed */
427 if (self) {
428 if (!trace_find_filtered_pid(pid_list, self->pid))
429 return;
430 }
431
432 /* Sorry, but we don't support pid_max changing after setting */
433 if (task->pid >= pid_list->pid_max)
434 return;
435
436 /* "self" is set for forks, and NULL for exits */
437 if (self)
438 set_bit(task->pid, pid_list->pids);
439 else
440 clear_bit(task->pid, pid_list->pids);
441}
442
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400443/**
444 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
445 * @pid_list: The pid list to show
446 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
447 * @pos: The position of the file
448 *
449 * This is used by the seq_file "next" operation to iterate the pids
450 * listed in a trace_pid_list structure.
451 *
452 * Returns the pid+1 as we want to display pid of zero, but NULL would
453 * stop the iteration.
454 */
455void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
456{
457 unsigned long pid = (unsigned long)v;
458
459 (*pos)++;
460
461 /* pid already is +1 of the actual prevous bit */
462 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
463
464 /* Return pid + 1 to allow zero to be represented */
465 if (pid < pid_list->pid_max)
466 return (void *)(pid + 1);
467
468 return NULL;
469}
470
471/**
472 * trace_pid_start - Used for seq_file to start reading pid lists
473 * @pid_list: The pid list to show
474 * @pos: The position of the file
475 *
476 * This is used by seq_file "start" operation to start the iteration
477 * of listing pids.
478 *
479 * Returns the pid+1 as we want to display pid of zero, but NULL would
480 * stop the iteration.
481 */
482void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
483{
484 unsigned long pid;
485 loff_t l = 0;
486
487 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
488 if (pid >= pid_list->pid_max)
489 return NULL;
490
491 /* Return pid + 1 so that zero can be the exit value */
492 for (pid++; pid && l < *pos;
493 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
494 ;
495 return (void *)pid;
496}
497
498/**
499 * trace_pid_show - show the current pid in seq_file processing
500 * @m: The seq_file structure to write into
501 * @v: A void pointer of the pid (+1) value to display
502 *
503 * Can be directly used by seq_file operations to display the current
504 * pid value.
505 */
506int trace_pid_show(struct seq_file *m, void *v)
507{
508 unsigned long pid = (unsigned long)v - 1;
509
510 seq_printf(m, "%lu\n", pid);
511 return 0;
512}
513
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400514/* 128 should be much more than enough */
515#define PID_BUF_SIZE 127
516
517int trace_pid_write(struct trace_pid_list *filtered_pids,
518 struct trace_pid_list **new_pid_list,
519 const char __user *ubuf, size_t cnt)
520{
521 struct trace_pid_list *pid_list;
522 struct trace_parser parser;
523 unsigned long val;
524 int nr_pids = 0;
525 ssize_t read = 0;
526 ssize_t ret = 0;
527 loff_t pos;
528 pid_t pid;
529
530 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
531 return -ENOMEM;
532
533 /*
534 * Always recreate a new array. The write is an all or nothing
535 * operation. Always create a new array when adding new pids by
536 * the user. If the operation fails, then the current list is
537 * not modified.
538 */
539 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
Wenwen Wang91862cc2019-04-19 21:22:59 -0500540 if (!pid_list) {
541 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400542 return -ENOMEM;
Wenwen Wang91862cc2019-04-19 21:22:59 -0500543 }
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400544
545 pid_list->pid_max = READ_ONCE(pid_max);
546
547 /* Only truncating will shrink pid_max */
548 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
549 pid_list->pid_max = filtered_pids->pid_max;
550
551 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
552 if (!pid_list->pids) {
Wenwen Wang91862cc2019-04-19 21:22:59 -0500553 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400554 kfree(pid_list);
555 return -ENOMEM;
556 }
557
558 if (filtered_pids) {
559 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000560 for_each_set_bit(pid, filtered_pids->pids,
561 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400562 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400563 nr_pids++;
564 }
565 }
566
567 while (cnt > 0) {
568
569 pos = 0;
570
571 ret = trace_get_user(&parser, ubuf, cnt, &pos);
572 if (ret < 0 || !trace_parser_loaded(&parser))
573 break;
574
575 read += ret;
576 ubuf += ret;
577 cnt -= ret;
578
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400579 ret = -EINVAL;
580 if (kstrtoul(parser.buffer, 0, &val))
581 break;
582 if (val >= pid_list->pid_max)
583 break;
584
585 pid = (pid_t)val;
586
587 set_bit(pid, pid_list->pids);
588 nr_pids++;
589
590 trace_parser_clear(&parser);
591 ret = 0;
592 }
593 trace_parser_put(&parser);
594
595 if (ret < 0) {
596 trace_free_pid_list(pid_list);
597 return ret;
598 }
599
600 if (!nr_pids) {
601 /* Cleared the list of pids */
602 trace_free_pid_list(pid_list);
603 read = ret;
604 pid_list = NULL;
605 }
606
607 *new_pid_list = pid_list;
608
609 return read;
610}
611
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500612static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400613{
614 u64 ts;
615
616 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700617 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400618 return trace_clock_local();
619
Alexander Z Lam94571582013-08-02 18:36:16 -0700620 ts = ring_buffer_time_stamp(buf->buffer, cpu);
621 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400622
623 return ts;
624}
625
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100626u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700627{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500628 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
Alexander Z Lam94571582013-08-02 18:36:16 -0700629}
630
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400631/**
632 * tracing_is_enabled - Show if global_trace has been disabled
633 *
634 * Shows if the global trace has been enabled or not. It uses the
635 * mirror flag "buffer_disabled" to be used in fast paths such as for
636 * the irqsoff tracer. But it may be inaccurate due to races. If you
637 * need to know the accurate state, use tracing_is_on() which is a little
638 * slower, but accurate.
639 */
Steven Rostedt90369902008-11-05 16:05:44 -0500640int tracing_is_enabled(void)
641{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400642 /*
643 * For quick access (irqsoff uses this in fast path), just
644 * return the mirror variable of the state of the ring buffer.
645 * It's a little racy, but we don't really care.
646 */
647 smp_rmb();
648 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500649}
650
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200651/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400652 * trace_buf_size is the size in bytes that is allocated
653 * for a buffer. Note, the number of bytes is always rounded
654 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400655 *
656 * This number is purposely set to a low number of 16384.
657 * If the dump on oops happens, it will be much appreciated
658 * to not have to wait for all that output. Anyway this can be
659 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200660 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400661#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400662
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400663static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200664
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200665/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200666static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200667
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200668/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200669 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200670 */
Alexander Z Lama82274152013-07-01 19:37:54 -0700671DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200672
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800673/*
674 * serialize the access of the ring buffer
675 *
676 * ring buffer serializes readers, but it is low level protection.
677 * The validity of the events (which returns by ring_buffer_peek() ..etc)
678 * are not protected by ring buffer.
679 *
680 * The content of events may become garbage if we allow other process consumes
681 * these events concurrently:
682 * A) the page of the consumed events may become a normal page
683 * (not reader page) in ring buffer, and this page will be rewrited
684 * by events producer.
685 * B) The page of the consumed events may become a page for splice_read,
686 * and this page will be returned to system.
687 *
688 * These primitives allow multi process access to different cpu ring buffer
689 * concurrently.
690 *
691 * These primitives don't distinguish read-only and read-consume access.
692 * Multi read-only access are also serialized.
693 */
694
695#ifdef CONFIG_SMP
696static DECLARE_RWSEM(all_cpu_access_lock);
697static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
698
699static inline void trace_access_lock(int cpu)
700{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500701 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800702 /* gain it for accessing the whole ring buffer. */
703 down_write(&all_cpu_access_lock);
704 } else {
705 /* gain it for accessing a cpu ring buffer. */
706
Steven Rostedtae3b5092013-01-23 15:22:59 -0500707 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800708 down_read(&all_cpu_access_lock);
709
710 /* Secondly block other access to this @cpu ring buffer. */
711 mutex_lock(&per_cpu(cpu_access_lock, cpu));
712 }
713}
714
715static inline void trace_access_unlock(int cpu)
716{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500717 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800718 up_write(&all_cpu_access_lock);
719 } else {
720 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
721 up_read(&all_cpu_access_lock);
722 }
723}
724
725static inline void trace_access_lock_init(void)
726{
727 int cpu;
728
729 for_each_possible_cpu(cpu)
730 mutex_init(&per_cpu(cpu_access_lock, cpu));
731}
732
733#else
734
735static DEFINE_MUTEX(access_lock);
736
737static inline void trace_access_lock(int cpu)
738{
739 (void)cpu;
740 mutex_lock(&access_lock);
741}
742
743static inline void trace_access_unlock(int cpu)
744{
745 (void)cpu;
746 mutex_unlock(&access_lock);
747}
748
749static inline void trace_access_lock_init(void)
750{
751}
752
753#endif
754
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400755#ifdef CONFIG_STACKTRACE
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500756static void __ftrace_trace_stack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400757 unsigned long flags,
758 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400759static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500760 struct trace_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400761 unsigned long flags,
762 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400763
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400764#else
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500765static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400766 unsigned long flags,
767 int skip, int pc, struct pt_regs *regs)
768{
769}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400770static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500771 struct trace_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400772 unsigned long flags,
773 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400774{
775}
776
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400777#endif
778
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500779static __always_inline void
780trace_event_setup(struct ring_buffer_event *event,
781 int type, unsigned long flags, int pc)
782{
783 struct trace_entry *ent = ring_buffer_event_data(event);
784
Cong Wang46710f32019-05-25 09:57:59 -0700785 tracing_generic_entry_update(ent, type, flags, pc);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500786}
787
788static __always_inline struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500789__trace_buffer_lock_reserve(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500790 int type,
791 unsigned long len,
792 unsigned long flags, int pc)
793{
794 struct ring_buffer_event *event;
795
796 event = ring_buffer_lock_reserve(buffer, len);
797 if (event != NULL)
798 trace_event_setup(event, type, flags, pc);
799
800 return event;
801}
802
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400803void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400804{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500805 if (tr->array_buffer.buffer)
806 ring_buffer_record_on(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400807 /*
808 * This flag is looked at when buffers haven't been allocated
809 * yet, or by some tracers (like irqsoff), that just want to
810 * know if the ring buffer has been disabled, but it can handle
811 * races of where it gets disabled but we still do a record.
812 * As the check is in the fast path of the tracers, it is more
813 * important to be fast than accurate.
814 */
815 tr->buffer_disabled = 0;
816 /* Make the flag seen by readers */
817 smp_wmb();
818}
819
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200820/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500821 * tracing_on - enable tracing buffers
822 *
823 * This function enables tracing buffers that may have been
824 * disabled with tracing_off.
825 */
826void tracing_on(void)
827{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400828 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500829}
830EXPORT_SYMBOL_GPL(tracing_on);
831
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500832
833static __always_inline void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500834__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500835{
Joel Fernandesd914ba32017-06-26 19:01:55 -0700836 __this_cpu_write(trace_taskinfo_save, true);
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500837
838 /* If this is the temp buffer, we need to commit fully */
839 if (this_cpu_read(trace_buffered_event) == event) {
840 /* Length is in event->array[0] */
841 ring_buffer_write(buffer, event->array[0], &event->array[1]);
842 /* Release the temp buffer */
843 this_cpu_dec(trace_buffered_event_cnt);
844 } else
845 ring_buffer_unlock_commit(buffer, event);
846}
847
Steven Rostedt499e5472012-02-22 15:50:28 -0500848/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500849 * __trace_puts - write a constant string into the trace buffer.
850 * @ip: The address of the caller
851 * @str: The constant string to write
852 * @size: The size of the string.
853 */
854int __trace_puts(unsigned long ip, const char *str, int size)
855{
856 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500857 struct trace_buffer *buffer;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500858 struct print_entry *entry;
859 unsigned long irq_flags;
860 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800861 int pc;
862
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400863 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800864 return 0;
865
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800866 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500867
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500868 if (unlikely(tracing_selftest_running || tracing_disabled))
869 return 0;
870
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500871 alloc = sizeof(*entry) + size + 2; /* possible \n added */
872
873 local_save_flags(irq_flags);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500874 buffer = global_trace.array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -0500875 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500876 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
877 irq_flags, pc);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -0500878 if (!event) {
879 size = 0;
880 goto out;
881 }
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500882
883 entry = ring_buffer_event_data(event);
884 entry->ip = ip;
885
886 memcpy(&entry->buf, str, size);
887
888 /* Add a newline if necessary */
889 if (entry->buf[size - 1] != '\n') {
890 entry->buf[size] = '\n';
891 entry->buf[size + 1] = '\0';
892 } else
893 entry->buf[size] = '\0';
894
895 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400896 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -0500897 out:
898 ring_buffer_nest_end(buffer);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500899 return size;
900}
901EXPORT_SYMBOL_GPL(__trace_puts);
902
903/**
904 * __trace_bputs - write the pointer to a constant string into trace buffer
905 * @ip: The address of the caller
906 * @str: The constant string to write to the buffer to
907 */
908int __trace_bputs(unsigned long ip, const char *str)
909{
910 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500911 struct trace_buffer *buffer;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500912 struct bputs_entry *entry;
913 unsigned long irq_flags;
914 int size = sizeof(struct bputs_entry);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -0500915 int ret = 0;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800916 int pc;
917
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400918 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800919 return 0;
920
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800921 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500922
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500923 if (unlikely(tracing_selftest_running || tracing_disabled))
924 return 0;
925
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500926 local_save_flags(irq_flags);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500927 buffer = global_trace.array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -0500928
929 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500930 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
931 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500932 if (!event)
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -0500933 goto out;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500934
935 entry = ring_buffer_event_data(event);
936 entry->ip = ip;
937 entry->str = str;
938
939 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400940 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500941
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -0500942 ret = 1;
943 out:
944 ring_buffer_nest_end(buffer);
945 return ret;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500946}
947EXPORT_SYMBOL_GPL(__trace_bputs);
948
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500949#ifdef CONFIG_TRACER_SNAPSHOT
Tom Zanussia35873a2019-02-13 17:42:45 -0600950void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500951{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500952 struct tracer *tracer = tr->current_trace;
953 unsigned long flags;
954
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500955 if (in_nmi()) {
956 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
957 internal_trace_puts("*** snapshot is being ignored ***\n");
958 return;
959 }
960
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500961 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500962 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
963 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500964 tracing_off();
965 return;
966 }
967
968 /* Note, snapshot can not be used when the tracer uses it */
969 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500970 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
971 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500972 return;
973 }
974
975 local_irq_save(flags);
Tom Zanussia35873a2019-02-13 17:42:45 -0600976 update_max_tr(tr, current, smp_processor_id(), cond_data);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500977 local_irq_restore(flags);
978}
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400979
Tom Zanussia35873a2019-02-13 17:42:45 -0600980void tracing_snapshot_instance(struct trace_array *tr)
981{
982 tracing_snapshot_instance_cond(tr, NULL);
983}
984
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400985/**
Chunyu Hu5a93bae22017-10-19 14:32:33 +0800986 * tracing_snapshot - take a snapshot of the current buffer.
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400987 *
988 * This causes a swap between the snapshot buffer and the current live
989 * tracing buffer. You can use this to take snapshots of the live
990 * trace when some condition is triggered, but continue to trace.
991 *
992 * Note, make sure to allocate the snapshot with either
993 * a tracing_snapshot_alloc(), or by doing it manually
994 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
995 *
996 * If the snapshot buffer is not allocated, it will stop tracing.
997 * Basically making a permanent snapshot.
998 */
999void tracing_snapshot(void)
1000{
1001 struct trace_array *tr = &global_trace;
1002
1003 tracing_snapshot_instance(tr);
1004}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001005EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001006
Tom Zanussia35873a2019-02-13 17:42:45 -06001007/**
1008 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1009 * @tr: The tracing instance to snapshot
1010 * @cond_data: The data to be tested conditionally, and possibly saved
1011 *
1012 * This is the same as tracing_snapshot() except that the snapshot is
1013 * conditional - the snapshot will only happen if the
1014 * cond_snapshot.update() implementation receiving the cond_data
1015 * returns true, which means that the trace array's cond_snapshot
1016 * update() operation used the cond_data to determine whether the
1017 * snapshot should be taken, and if it was, presumably saved it along
1018 * with the snapshot.
1019 */
1020void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1021{
1022 tracing_snapshot_instance_cond(tr, cond_data);
1023}
1024EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1025
1026/**
1027 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1028 * @tr: The tracing instance
1029 *
1030 * When the user enables a conditional snapshot using
1031 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1032 * with the snapshot. This accessor is used to retrieve it.
1033 *
1034 * Should not be called from cond_snapshot.update(), since it takes
1035 * the tr->max_lock lock, which the code calling
1036 * cond_snapshot.update() has already done.
1037 *
1038 * Returns the cond_data associated with the trace array's snapshot.
1039 */
1040void *tracing_cond_snapshot_data(struct trace_array *tr)
1041{
1042 void *cond_data = NULL;
1043
1044 arch_spin_lock(&tr->max_lock);
1045
1046 if (tr->cond_snapshot)
1047 cond_data = tr->cond_snapshot->cond_data;
1048
1049 arch_spin_unlock(&tr->max_lock);
1050
1051 return cond_data;
1052}
1053EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1054
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001055static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1056 struct array_buffer *size_buf, int cpu_id);
1057static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001058
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001059int tracing_alloc_snapshot_instance(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001060{
1061 int ret;
1062
1063 if (!tr->allocated_snapshot) {
1064
1065 /* allocate spare buffer */
1066 ret = resize_buffer_duplicate_size(&tr->max_buffer,
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001067 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001068 if (ret < 0)
1069 return ret;
1070
1071 tr->allocated_snapshot = true;
1072 }
1073
1074 return 0;
1075}
1076
Fabian Frederickad1438a2014-04-17 21:44:42 +02001077static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001078{
1079 /*
1080 * We don't free the ring buffer. instead, resize it because
1081 * The max_tr ring buffer has some state (e.g. ring->clock) and
1082 * we want preserve it.
1083 */
1084 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1085 set_buffer_entries(&tr->max_buffer, 1);
1086 tracing_reset_online_cpus(&tr->max_buffer);
1087 tr->allocated_snapshot = false;
1088}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001089
1090/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001091 * tracing_alloc_snapshot - allocate snapshot buffer.
1092 *
1093 * This only allocates the snapshot buffer if it isn't already
1094 * allocated - it doesn't also take a snapshot.
1095 *
1096 * This is meant to be used in cases where the snapshot buffer needs
1097 * to be set up for events that can't sleep but need to be able to
1098 * trigger a snapshot.
1099 */
1100int tracing_alloc_snapshot(void)
1101{
1102 struct trace_array *tr = &global_trace;
1103 int ret;
1104
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001105 ret = tracing_alloc_snapshot_instance(tr);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001106 WARN_ON(ret < 0);
1107
1108 return ret;
1109}
1110EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1111
1112/**
Chunyu Hu5a93bae22017-10-19 14:32:33 +08001113 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001114 *
Chunyu Hu5a93bae22017-10-19 14:32:33 +08001115 * This is similar to tracing_snapshot(), but it will allocate the
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001116 * snapshot buffer if it isn't already allocated. Use this only
1117 * where it is safe to sleep, as the allocation may sleep.
1118 *
1119 * This causes a swap between the snapshot buffer and the current live
1120 * tracing buffer. You can use this to take snapshots of the live
1121 * trace when some condition is triggered, but continue to trace.
1122 */
1123void tracing_snapshot_alloc(void)
1124{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001125 int ret;
1126
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001127 ret = tracing_alloc_snapshot();
1128 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001129 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001130
1131 tracing_snapshot();
1132}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001133EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001134
1135/**
1136 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1137 * @tr: The tracing instance
1138 * @cond_data: User data to associate with the snapshot
1139 * @update: Implementation of the cond_snapshot update function
1140 *
1141 * Check whether the conditional snapshot for the given instance has
1142 * already been enabled, or if the current tracer is already using a
1143 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1144 * save the cond_data and update function inside.
1145 *
1146 * Returns 0 if successful, error otherwise.
1147 */
1148int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1149 cond_update_fn_t update)
1150{
1151 struct cond_snapshot *cond_snapshot;
1152 int ret = 0;
1153
1154 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1155 if (!cond_snapshot)
1156 return -ENOMEM;
1157
1158 cond_snapshot->cond_data = cond_data;
1159 cond_snapshot->update = update;
1160
1161 mutex_lock(&trace_types_lock);
1162
1163 ret = tracing_alloc_snapshot_instance(tr);
1164 if (ret)
1165 goto fail_unlock;
1166
1167 if (tr->current_trace->use_max_tr) {
1168 ret = -EBUSY;
1169 goto fail_unlock;
1170 }
1171
Steven Rostedt (VMware)1c347a92019-02-14 18:45:21 -05001172 /*
1173 * The cond_snapshot can only change to NULL without the
1174 * trace_types_lock. We don't care if we race with it going
1175 * to NULL, but we want to make sure that it's not set to
1176 * something other than NULL when we get here, which we can
1177 * do safely with only holding the trace_types_lock and not
1178 * having to take the max_lock.
1179 */
Tom Zanussia35873a2019-02-13 17:42:45 -06001180 if (tr->cond_snapshot) {
1181 ret = -EBUSY;
1182 goto fail_unlock;
1183 }
1184
1185 arch_spin_lock(&tr->max_lock);
1186 tr->cond_snapshot = cond_snapshot;
1187 arch_spin_unlock(&tr->max_lock);
1188
1189 mutex_unlock(&trace_types_lock);
1190
1191 return ret;
1192
1193 fail_unlock:
1194 mutex_unlock(&trace_types_lock);
1195 kfree(cond_snapshot);
1196 return ret;
1197}
1198EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1199
1200/**
1201 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1202 * @tr: The tracing instance
1203 *
1204 * Check whether the conditional snapshot for the given instance is
1205 * enabled; if so, free the cond_snapshot associated with it,
1206 * otherwise return -EINVAL.
1207 *
1208 * Returns 0 if successful, error otherwise.
1209 */
1210int tracing_snapshot_cond_disable(struct trace_array *tr)
1211{
1212 int ret = 0;
1213
1214 arch_spin_lock(&tr->max_lock);
1215
1216 if (!tr->cond_snapshot)
1217 ret = -EINVAL;
1218 else {
1219 kfree(tr->cond_snapshot);
1220 tr->cond_snapshot = NULL;
1221 }
1222
1223 arch_spin_unlock(&tr->max_lock);
1224
1225 return ret;
1226}
1227EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001228#else
1229void tracing_snapshot(void)
1230{
1231 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1232}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001233EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussia35873a2019-02-13 17:42:45 -06001234void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1235{
1236 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1237}
1238EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001239int tracing_alloc_snapshot(void)
1240{
1241 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1242 return -ENODEV;
1243}
1244EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001245void tracing_snapshot_alloc(void)
1246{
1247 /* Give warning */
1248 tracing_snapshot();
1249}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001250EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001251void *tracing_cond_snapshot_data(struct trace_array *tr)
1252{
1253 return NULL;
1254}
1255EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1256int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1257{
1258 return -ENODEV;
1259}
1260EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1261int tracing_snapshot_cond_disable(struct trace_array *tr)
1262{
1263 return false;
1264}
1265EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001266#endif /* CONFIG_TRACER_SNAPSHOT */
1267
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -04001268void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001269{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001270 if (tr->array_buffer.buffer)
1271 ring_buffer_record_off(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001272 /*
1273 * This flag is looked at when buffers haven't been allocated
1274 * yet, or by some tracers (like irqsoff), that just want to
1275 * know if the ring buffer has been disabled, but it can handle
1276 * races of where it gets disabled but we still do a record.
1277 * As the check is in the fast path of the tracers, it is more
1278 * important to be fast than accurate.
1279 */
1280 tr->buffer_disabled = 1;
1281 /* Make the flag seen by readers */
1282 smp_wmb();
1283}
1284
Steven Rostedt499e5472012-02-22 15:50:28 -05001285/**
1286 * tracing_off - turn off tracing buffers
1287 *
1288 * This function stops the tracing buffers from recording data.
1289 * It does not disable any overhead the tracers themselves may
1290 * be causing. This function simply causes all recording to
1291 * the ring buffers to fail.
1292 */
1293void tracing_off(void)
1294{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001295 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001296}
1297EXPORT_SYMBOL_GPL(tracing_off);
1298
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001299void disable_trace_on_warning(void)
1300{
1301 if (__disable_trace_on_warning)
1302 tracing_off();
1303}
1304
Steven Rostedt499e5472012-02-22 15:50:28 -05001305/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001306 * tracer_tracing_is_on - show real state of ring buffer enabled
1307 * @tr : the trace array to know if ring buffer is enabled
1308 *
1309 * Shows real state of the ring buffer if it is enabled or not.
1310 */
Steven Rostedt (VMware)ec573502018-08-01 16:08:57 -04001311bool tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001312{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001313 if (tr->array_buffer.buffer)
1314 return ring_buffer_record_is_on(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001315 return !tr->buffer_disabled;
1316}
1317
Steven Rostedt499e5472012-02-22 15:50:28 -05001318/**
1319 * tracing_is_on - show state of ring buffers enabled
1320 */
1321int tracing_is_on(void)
1322{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001323 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001324}
1325EXPORT_SYMBOL_GPL(tracing_is_on);
1326
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001327static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001328{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001329 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001330
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001331 if (!str)
1332 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001333 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001334 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001335 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001336 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001337 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001338 return 1;
1339}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001340__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001341
Tim Bird0e950172010-02-25 15:36:43 -08001342static int __init set_tracing_thresh(char *str)
1343{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001344 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001345 int ret;
1346
1347 if (!str)
1348 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001349 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001350 if (ret < 0)
1351 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001352 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001353 return 1;
1354}
1355__setup("tracing_thresh=", set_tracing_thresh);
1356
Steven Rostedt57f50be2008-05-12 21:20:44 +02001357unsigned long nsecs_to_usecs(unsigned long nsecs)
1358{
1359 return nsecs / 1000;
1360}
1361
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001362/*
1363 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001364 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001365 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001366 * of strings in the order that the evals (enum) were defined.
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001367 */
1368#undef C
1369#define C(a, b) b
1370
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001371/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001372static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001373 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001374 NULL
1375};
1376
Zhaolei5079f322009-08-25 16:12:56 +08001377static struct {
1378 u64 (*func)(void);
1379 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001380 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001381} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001382 { trace_clock_local, "local", 1 },
1383 { trace_clock_global, "global", 1 },
1384 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001385 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001386 { trace_clock, "perf", 1 },
1387 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001388 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Thomas Gleixnera3ed0e432018-04-25 15:33:38 +02001389 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001390 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001391};
1392
Tom Zanussi860f9f62018-01-15 20:51:48 -06001393bool trace_clock_in_ns(struct trace_array *tr)
1394{
1395 if (trace_clocks[tr->clock_id].in_ns)
1396 return true;
1397
1398 return false;
1399}
1400
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001401/*
1402 * trace_parser_get_init - gets the buffer for trace parser
1403 */
1404int trace_parser_get_init(struct trace_parser *parser, int size)
1405{
1406 memset(parser, 0, sizeof(*parser));
1407
1408 parser->buffer = kmalloc(size, GFP_KERNEL);
1409 if (!parser->buffer)
1410 return 1;
1411
1412 parser->size = size;
1413 return 0;
1414}
1415
1416/*
1417 * trace_parser_put - frees the buffer for trace parser
1418 */
1419void trace_parser_put(struct trace_parser *parser)
1420{
1421 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001422 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001423}
1424
1425/*
1426 * trace_get_user - reads the user input string separated by space
1427 * (matched by isspace(ch))
1428 *
1429 * For each string found the 'struct trace_parser' is updated,
1430 * and the function returns.
1431 *
1432 * Returns number of bytes read.
1433 *
1434 * See kernel/trace/trace.h for 'struct trace_parser' details.
1435 */
1436int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1437 size_t cnt, loff_t *ppos)
1438{
1439 char ch;
1440 size_t read = 0;
1441 ssize_t ret;
1442
1443 if (!*ppos)
1444 trace_parser_clear(parser);
1445
1446 ret = get_user(ch, ubuf++);
1447 if (ret)
1448 goto out;
1449
1450 read++;
1451 cnt--;
1452
1453 /*
1454 * The parser is not finished with the last write,
1455 * continue reading the user input without skipping spaces.
1456 */
1457 if (!parser->cont) {
1458 /* skip white space */
1459 while (cnt && isspace(ch)) {
1460 ret = get_user(ch, ubuf++);
1461 if (ret)
1462 goto out;
1463 read++;
1464 cnt--;
1465 }
1466
Changbin Du76638d92018-01-16 17:02:29 +08001467 parser->idx = 0;
1468
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001469 /* only spaces were written */
Changbin Du921a7ac2018-01-16 17:02:28 +08001470 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001471 *ppos += read;
1472 ret = read;
1473 goto out;
1474 }
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001475 }
1476
1477 /* read the non-space input */
Changbin Du921a7ac2018-01-16 17:02:28 +08001478 while (cnt && !isspace(ch) && ch) {
Li Zefan3c235a32009-09-22 13:51:54 +08001479 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001480 parser->buffer[parser->idx++] = ch;
1481 else {
1482 ret = -EINVAL;
1483 goto out;
1484 }
1485 ret = get_user(ch, ubuf++);
1486 if (ret)
1487 goto out;
1488 read++;
1489 cnt--;
1490 }
1491
1492 /* We either got finished input or we have to wait for another call. */
Changbin Du921a7ac2018-01-16 17:02:28 +08001493 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001494 parser->buffer[parser->idx] = 0;
1495 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001496 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001497 parser->cont = true;
1498 parser->buffer[parser->idx++] = ch;
Changbin Duf4d07062018-01-16 17:02:30 +08001499 /* Make sure the parsed string always terminates with '\0'. */
1500 parser->buffer[parser->idx] = 0;
Steven Rostedt057db842013-10-09 22:23:23 -04001501 } else {
1502 ret = -EINVAL;
1503 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001504 }
1505
1506 *ppos += read;
1507 ret = read;
1508
1509out:
1510 return ret;
1511}
1512
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001513/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001514static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001515{
1516 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001517
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001518 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001519 return -EBUSY;
1520
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001521 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001522 if (cnt > len)
1523 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001524 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001525
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001526 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001527 return cnt;
1528}
1529
Tim Bird0e950172010-02-25 15:36:43 -08001530unsigned long __read_mostly tracing_thresh;
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001531static const struct file_operations tracing_max_lat_fops;
1532
1533#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1534 defined(CONFIG_FSNOTIFY)
1535
1536static struct workqueue_struct *fsnotify_wq;
1537
1538static void latency_fsnotify_workfn(struct work_struct *work)
1539{
1540 struct trace_array *tr = container_of(work, struct trace_array,
1541 fsnotify_work);
1542 fsnotify(tr->d_max_latency->d_inode, FS_MODIFY,
1543 tr->d_max_latency->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
1544}
1545
1546static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1547{
1548 struct trace_array *tr = container_of(iwork, struct trace_array,
1549 fsnotify_irqwork);
1550 queue_work(fsnotify_wq, &tr->fsnotify_work);
1551}
1552
1553static void trace_create_maxlat_file(struct trace_array *tr,
1554 struct dentry *d_tracer)
1555{
1556 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1557 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1558 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1559 d_tracer, &tr->max_latency,
1560 &tracing_max_lat_fops);
1561}
1562
1563__init static int latency_fsnotify_init(void)
1564{
1565 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1566 WQ_UNBOUND | WQ_HIGHPRI, 0);
1567 if (!fsnotify_wq) {
1568 pr_err("Unable to allocate tr_max_lat_wq\n");
1569 return -ENOMEM;
1570 }
1571 return 0;
1572}
1573
1574late_initcall_sync(latency_fsnotify_init);
1575
1576void latency_fsnotify(struct trace_array *tr)
1577{
1578 if (!fsnotify_wq)
1579 return;
1580 /*
1581 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1582 * possible that we are called from __schedule() or do_idle(), which
1583 * could cause a deadlock.
1584 */
1585 irq_work_queue(&tr->fsnotify_irqwork);
1586}
1587
1588/*
1589 * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1590 * defined(CONFIG_FSNOTIFY)
1591 */
1592#else
1593
1594#define trace_create_maxlat_file(tr, d_tracer) \
1595 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1596 &tr->max_latency, &tracing_max_lat_fops)
1597
1598#endif
Tim Bird0e950172010-02-25 15:36:43 -08001599
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001600#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001601/*
1602 * Copy the new maximum trace into the separate maximum-trace
1603 * structure. (this way the maximum trace is permanently saved,
Chunyu Hu5a93bae22017-10-19 14:32:33 +08001604 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001605 */
1606static void
1607__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1608{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001609 struct array_buffer *trace_buf = &tr->array_buffer;
1610 struct array_buffer *max_buf = &tr->max_buffer;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001611 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1612 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001613
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001614 max_buf->cpu = cpu;
1615 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001616
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001617 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001618 max_data->critical_start = data->critical_start;
1619 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001620
Tom Zanussi85f726a2019-03-05 10:12:00 -06001621 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001622 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001623 /*
1624 * If tsk == current, then use current_uid(), as that does not use
1625 * RCU. The irq tracer can be called out of RCU scope.
1626 */
1627 if (tsk == current)
1628 max_data->uid = current_uid();
1629 else
1630 max_data->uid = task_uid(tsk);
1631
Steven Rostedt8248ac02009-09-02 12:27:41 -04001632 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1633 max_data->policy = tsk->policy;
1634 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001635
1636 /* record this tasks comm */
1637 tracing_record_cmdline(tsk);
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001638 latency_fsnotify(tr);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001639}
1640
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001641/**
1642 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1643 * @tr: tracer
1644 * @tsk: the task with the latency
1645 * @cpu: The cpu that initiated the trace.
Tom Zanussia35873a2019-02-13 17:42:45 -06001646 * @cond_data: User data associated with a conditional snapshot
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001647 *
1648 * Flip the buffers between the @tr and the max_tr and record information
1649 * about which task was the cause of this latency.
1650 */
Ingo Molnare309b412008-05-12 21:20:51 +02001651void
Tom Zanussia35873a2019-02-13 17:42:45 -06001652update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1653 void *cond_data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001654{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001655 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001656 return;
1657
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001658 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001659
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001660 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001661 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001662 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001663 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001664 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001665
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001666 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001667
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001668 /* Inherit the recordable setting from array_buffer */
1669 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
Masami Hiramatsu73c8d892018-07-14 01:28:15 +09001670 ring_buffer_record_on(tr->max_buffer.buffer);
1671 else
1672 ring_buffer_record_off(tr->max_buffer.buffer);
1673
Tom Zanussia35873a2019-02-13 17:42:45 -06001674#ifdef CONFIG_TRACER_SNAPSHOT
1675 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1676 goto out_unlock;
1677#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001678 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001679
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001680 __update_max_tr(tr, tsk, cpu);
Tom Zanussia35873a2019-02-13 17:42:45 -06001681
1682 out_unlock:
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001683 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001684}
1685
1686/**
1687 * update_max_tr_single - only copy one trace over, and reset the rest
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07001688 * @tr: tracer
1689 * @tsk: task with the latency
1690 * @cpu: the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001691 *
1692 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001693 */
Ingo Molnare309b412008-05-12 21:20:51 +02001694void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001695update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1696{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001697 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001698
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001699 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001700 return;
1701
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001702 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001703 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001704 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001705 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001706 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001707 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001708
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001709 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001710
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001711 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001712
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001713 if (ret == -EBUSY) {
1714 /*
1715 * We failed to swap the buffer due to a commit taking
1716 * place on this CPU. We fail to record, but we reset
1717 * the max trace buffer (no one writes directly to it)
1718 * and flag that it failed.
1719 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001720 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001721 "Failed to swap buffers due to commit in progress\n");
1722 }
1723
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001724 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001725
1726 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001727 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001728}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001729#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001730
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05001731static int wait_on_pipe(struct trace_iterator *iter, int full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001732{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001733 /* Iterators are static, they should be filled or empty */
1734 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001735 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001736
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001737 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
Rabin Vincente30f53a2014-11-10 19:46:34 +01001738 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001739}
1740
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001741#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001742static bool selftests_can_run;
1743
1744struct trace_selftests {
1745 struct list_head list;
1746 struct tracer *type;
1747};
1748
1749static LIST_HEAD(postponed_selftests);
1750
1751static int save_selftest(struct tracer *type)
1752{
1753 struct trace_selftests *selftest;
1754
1755 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1756 if (!selftest)
1757 return -ENOMEM;
1758
1759 selftest->type = type;
1760 list_add(&selftest->list, &postponed_selftests);
1761 return 0;
1762}
1763
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001764static int run_tracer_selftest(struct tracer *type)
1765{
1766 struct trace_array *tr = &global_trace;
1767 struct tracer *saved_tracer = tr->current_trace;
1768 int ret;
1769
1770 if (!type->selftest || tracing_selftest_disabled)
1771 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001772
1773 /*
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001774 * If a tracer registers early in boot up (before scheduling is
1775 * initialized and such), then do not run its selftests yet.
1776 * Instead, run it a little later in the boot process.
1777 */
1778 if (!selftests_can_run)
1779 return save_selftest(type);
1780
1781 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001782 * Run a selftest on this tracer.
1783 * Here we reset the trace buffer, and set the current
1784 * tracer to be this tracer. The tracer can then run some
1785 * internal tracing to verify that everything is in order.
1786 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001787 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001788 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001789
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001790 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001791
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001792#ifdef CONFIG_TRACER_MAX_TRACE
1793 if (type->use_max_tr) {
1794 /* If we expanded the buffers, make sure the max is expanded too */
1795 if (ring_buffer_expanded)
1796 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1797 RING_BUFFER_ALL_CPUS);
1798 tr->allocated_snapshot = true;
1799 }
1800#endif
1801
1802 /* the test is responsible for initializing and enabling */
1803 pr_info("Testing tracer %s: ", type->name);
1804 ret = type->selftest(type, tr);
1805 /* the test is responsible for resetting too */
1806 tr->current_trace = saved_tracer;
1807 if (ret) {
1808 printk(KERN_CONT "FAILED!\n");
1809 /* Add the warning after printing 'FAILED' */
1810 WARN_ON(1);
1811 return -1;
1812 }
1813 /* Only reset on passing, to avoid touching corrupted buffers */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001814 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001815
1816#ifdef CONFIG_TRACER_MAX_TRACE
1817 if (type->use_max_tr) {
1818 tr->allocated_snapshot = false;
1819
1820 /* Shrink the max buffer again */
1821 if (ring_buffer_expanded)
1822 ring_buffer_resize(tr->max_buffer.buffer, 1,
1823 RING_BUFFER_ALL_CPUS);
1824 }
1825#endif
1826
1827 printk(KERN_CONT "PASSED\n");
1828 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001829}
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001830
1831static __init int init_trace_selftests(void)
1832{
1833 struct trace_selftests *p, *n;
1834 struct tracer *t, **last;
1835 int ret;
1836
1837 selftests_can_run = true;
1838
1839 mutex_lock(&trace_types_lock);
1840
1841 if (list_empty(&postponed_selftests))
1842 goto out;
1843
1844 pr_info("Running postponed tracer tests:\n");
1845
Steven Rostedt (VMware)78041c02020-02-20 15:38:01 -05001846 tracing_selftest_running = true;
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001847 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
Anders Roxell6fc21712018-11-30 15:56:22 +01001848 /* This loop can take minutes when sanitizers are enabled, so
1849 * lets make sure we allow RCU processing.
1850 */
1851 cond_resched();
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001852 ret = run_tracer_selftest(p->type);
1853 /* If the test fails, then warn and remove from available_tracers */
1854 if (ret < 0) {
1855 WARN(1, "tracer: %s failed selftest, disabling\n",
1856 p->type->name);
1857 last = &trace_types;
1858 for (t = trace_types; t; t = t->next) {
1859 if (t == p->type) {
1860 *last = t->next;
1861 break;
1862 }
1863 last = &t->next;
1864 }
1865 }
1866 list_del(&p->list);
1867 kfree(p);
1868 }
Steven Rostedt (VMware)78041c02020-02-20 15:38:01 -05001869 tracing_selftest_running = false;
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001870
1871 out:
1872 mutex_unlock(&trace_types_lock);
1873
1874 return 0;
1875}
Steven Rostedtb9ef0322017-05-17 11:14:35 -04001876core_initcall(init_trace_selftests);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001877#else
1878static inline int run_tracer_selftest(struct tracer *type)
1879{
1880 return 0;
1881}
1882#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001883
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001884static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1885
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001886static void __init apply_trace_boot_options(void);
1887
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001888/**
1889 * register_tracer - register a tracer with the ftrace system.
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07001890 * @type: the plugin for the tracer
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001891 *
1892 * Register a new plugin tracer.
1893 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001894int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001895{
1896 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001897 int ret = 0;
1898
1899 if (!type->name) {
1900 pr_info("Tracer must have a name\n");
1901 return -1;
1902 }
1903
Dan Carpenter24a461d2010-07-10 12:06:44 +02001904 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001905 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1906 return -1;
1907 }
1908
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05001909 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11001910 pr_warn("Can not register tracer %s due to lockdown\n",
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05001911 type->name);
1912 return -EPERM;
1913 }
1914
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001915 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001916
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001917 tracing_selftest_running = true;
1918
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001919 for (t = trace_types; t; t = t->next) {
1920 if (strcmp(type->name, t->name) == 0) {
1921 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001922 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001923 type->name);
1924 ret = -1;
1925 goto out;
1926 }
1927 }
1928
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001929 if (!type->set_flag)
1930 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08001931 if (!type->flags) {
1932 /*allocate a dummy tracer_flags*/
1933 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08001934 if (!type->flags) {
1935 ret = -ENOMEM;
1936 goto out;
1937 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08001938 type->flags->val = 0;
1939 type->flags->opts = dummy_tracer_opt;
1940 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001941 if (!type->flags->opts)
1942 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001943
Chunyu Hud39cdd22016-03-08 21:37:01 +08001944 /* store the tracer for __set_tracer_option */
1945 type->flags->trace = type;
1946
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001947 ret = run_tracer_selftest(type);
1948 if (ret < 0)
1949 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001950
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001951 type->next = trace_types;
1952 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001953 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001954
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001955 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001956 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001957 mutex_unlock(&trace_types_lock);
1958
Steven Rostedtdac74942009-02-05 01:13:38 -05001959 if (ret || !default_bootup_tracer)
1960 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001961
Li Zefanee6c2c12009-09-18 14:06:47 +08001962 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001963 goto out_unlock;
1964
1965 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1966 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001967 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001968 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001969
1970 apply_trace_boot_options();
1971
Steven Rostedtdac74942009-02-05 01:13:38 -05001972 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001973 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001974#ifdef CONFIG_FTRACE_STARTUP_TEST
1975 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1976 type->name);
1977#endif
1978
1979 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001980 return ret;
1981}
1982
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001983static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001984{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05001985 struct trace_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001986
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001987 if (!buffer)
1988 return;
1989
Steven Rostedtf6339032009-09-04 12:35:16 -04001990 ring_buffer_record_disable(buffer);
1991
1992 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08001993 synchronize_rcu();
Steven Rostedt68179682012-05-08 20:57:53 -04001994 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001995
1996 ring_buffer_record_enable(buffer);
1997}
1998
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001999void tracing_reset_online_cpus(struct array_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02002000{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002001 struct trace_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02002002 int cpu;
2003
Hiraku Toyookaa5416412012-12-19 16:02:34 +09002004 if (!buffer)
2005 return;
2006
Steven Rostedt621968c2009-09-04 12:02:35 -04002007 ring_buffer_record_disable(buffer);
2008
2009 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08002010 synchronize_rcu();
Steven Rostedt621968c2009-09-04 12:02:35 -04002011
Alexander Z Lam94571582013-08-02 18:36:16 -07002012 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002013
2014 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04002015 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04002016
2017 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002018}
2019
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04002020/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c642f2013-03-04 23:26:06 -05002021void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002022{
Steven Rostedt (Red Hat)873c642f2013-03-04 23:26:06 -05002023 struct trace_array *tr;
2024
Steven Rostedt (Red Hat)873c642f2013-03-04 23:26:06 -05002025 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (VMware)065e63f2017-08-31 17:03:47 -04002026 if (!tr->clear_trace)
2027 continue;
2028 tr->clear_trace = false;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002029 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002030#ifdef CONFIG_TRACER_MAX_TRACE
2031 tracing_reset_online_cpus(&tr->max_buffer);
2032#endif
Steven Rostedt (Red Hat)873c642f2013-03-04 23:26:06 -05002033 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002034}
2035
Joel Fernandesd914ba32017-06-26 19:01:55 -07002036static int *tgid_map;
2037
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002038#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002039#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01002040static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002041struct saved_cmdlines_buffer {
2042 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2043 unsigned *map_cmdline_to_pid;
2044 unsigned cmdline_num;
2045 int cmdline_idx;
2046 char *saved_cmdlines;
2047};
2048static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02002049
Steven Rostedt25b0b442008-05-12 21:21:00 +02002050/* temporary disable recording */
Joel Fernandesd914ba32017-06-26 19:01:55 -07002051static atomic_t trace_record_taskinfo_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002052
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002053static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002054{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002055 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2056}
2057
2058static inline void set_cmdline(int idx, const char *cmdline)
2059{
Tom Zanussi85f726a2019-03-05 10:12:00 -06002060 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002061}
2062
2063static int allocate_cmdlines_buffer(unsigned int val,
2064 struct saved_cmdlines_buffer *s)
2065{
Kees Cook6da2ec52018-06-12 13:55:00 -07002066 s->map_cmdline_to_pid = kmalloc_array(val,
2067 sizeof(*s->map_cmdline_to_pid),
2068 GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002069 if (!s->map_cmdline_to_pid)
2070 return -ENOMEM;
2071
Kees Cook6da2ec52018-06-12 13:55:00 -07002072 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002073 if (!s->saved_cmdlines) {
2074 kfree(s->map_cmdline_to_pid);
2075 return -ENOMEM;
2076 }
2077
2078 s->cmdline_idx = 0;
2079 s->cmdline_num = val;
2080 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2081 sizeof(s->map_pid_to_cmdline));
2082 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2083 val * sizeof(*s->map_cmdline_to_pid));
2084
2085 return 0;
2086}
2087
2088static int trace_create_savedcmd(void)
2089{
2090 int ret;
2091
Namhyung Kima6af8fb2014-06-10 16:11:35 +09002092 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002093 if (!savedcmd)
2094 return -ENOMEM;
2095
2096 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2097 if (ret < 0) {
2098 kfree(savedcmd);
2099 savedcmd = NULL;
2100 return -ENOMEM;
2101 }
2102
2103 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002104}
2105
Carsten Emdeb5130b12009-09-13 01:43:07 +02002106int is_tracing_stopped(void)
2107{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002108 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02002109}
2110
Steven Rostedt0f048702008-11-05 16:05:44 -05002111/**
2112 * tracing_start - quick start of the tracer
2113 *
2114 * If tracing is enabled but was stopped by tracing_stop,
2115 * this will start the tracer back up.
2116 */
2117void tracing_start(void)
2118{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002119 struct trace_buffer *buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002120 unsigned long flags;
2121
2122 if (tracing_disabled)
2123 return;
2124
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002125 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2126 if (--global_trace.stop_count) {
2127 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05002128 /* Someone screwed up their debugging */
2129 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002130 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05002131 }
Steven Rostedt0f048702008-11-05 16:05:44 -05002132 goto out;
2133 }
2134
Steven Rostedta2f80712010-03-12 19:56:00 -05002135 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002136 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05002137
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002138 buffer = global_trace.array_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002139 if (buffer)
2140 ring_buffer_record_enable(buffer);
2141
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002142#ifdef CONFIG_TRACER_MAX_TRACE
2143 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002144 if (buffer)
2145 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002146#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002147
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002148 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002149
Steven Rostedt0f048702008-11-05 16:05:44 -05002150 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002151 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2152}
2153
2154static void tracing_start_tr(struct trace_array *tr)
2155{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002156 struct trace_buffer *buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002157 unsigned long flags;
2158
2159 if (tracing_disabled)
2160 return;
2161
2162 /* If global, we need to also start the max tracer */
2163 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2164 return tracing_start();
2165
2166 raw_spin_lock_irqsave(&tr->start_lock, flags);
2167
2168 if (--tr->stop_count) {
2169 if (tr->stop_count < 0) {
2170 /* Someone screwed up their debugging */
2171 WARN_ON_ONCE(1);
2172 tr->stop_count = 0;
2173 }
2174 goto out;
2175 }
2176
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002177 buffer = tr->array_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002178 if (buffer)
2179 ring_buffer_record_enable(buffer);
2180
2181 out:
2182 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002183}
2184
2185/**
2186 * tracing_stop - quick stop of the tracer
2187 *
2188 * Light weight way to stop tracing. Use in conjunction with
2189 * tracing_start.
2190 */
2191void tracing_stop(void)
2192{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002193 struct trace_buffer *buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002194 unsigned long flags;
2195
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002196 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2197 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05002198 goto out;
2199
Steven Rostedta2f80712010-03-12 19:56:00 -05002200 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002201 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002202
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002203 buffer = global_trace.array_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002204 if (buffer)
2205 ring_buffer_record_disable(buffer);
2206
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002207#ifdef CONFIG_TRACER_MAX_TRACE
2208 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002209 if (buffer)
2210 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002211#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002212
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002213 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002214
Steven Rostedt0f048702008-11-05 16:05:44 -05002215 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002216 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2217}
2218
2219static void tracing_stop_tr(struct trace_array *tr)
2220{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002221 struct trace_buffer *buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002222 unsigned long flags;
2223
2224 /* If global, we need to also stop the max tracer */
2225 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2226 return tracing_stop();
2227
2228 raw_spin_lock_irqsave(&tr->start_lock, flags);
2229 if (tr->stop_count++)
2230 goto out;
2231
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002232 buffer = tr->array_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002233 if (buffer)
2234 ring_buffer_record_disable(buffer);
2235
2236 out:
2237 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002238}
2239
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002240static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002241{
Carsten Emdea635cf02009-03-18 09:00:41 +01002242 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002243
Joel Fernandeseaf260a2017-07-06 16:00:21 -07002244 /* treat recording of idle task as a success */
2245 if (!tsk->pid)
2246 return 1;
2247
2248 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002249 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002250
2251 /*
2252 * It's not the end of the world if we don't get
2253 * the lock, but we also don't want to spin
2254 * nor do we want to disable interrupts,
2255 * so if we miss here, then better luck next time.
2256 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002257 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002258 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002259
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002260 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002261 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002262 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002263
Carsten Emdea635cf02009-03-18 09:00:41 +01002264 /*
2265 * Check whether the cmdline buffer at idx has a pid
2266 * mapped. We are going to overwrite that entry so we
2267 * need to clear the map_pid_to_cmdline. Otherwise we
2268 * would read the new comm for the old pid.
2269 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002270 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01002271 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002272 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002273
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002274 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2275 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002276
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002277 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002278 }
2279
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002280 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002281
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002282 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002283
2284 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002285}
2286
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002287static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002288{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002289 unsigned map;
2290
Steven Rostedt4ca530852009-03-16 19:20:15 -04002291 if (!pid) {
2292 strcpy(comm, "<idle>");
2293 return;
2294 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002295
Steven Rostedt74bf4072010-01-25 15:11:53 -05002296 if (WARN_ON_ONCE(pid < 0)) {
2297 strcpy(comm, "<XXX>");
2298 return;
2299 }
2300
Steven Rostedt4ca530852009-03-16 19:20:15 -04002301 if (pid > PID_MAX_DEFAULT) {
2302 strcpy(comm, "<...>");
2303 return;
2304 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002305
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002306 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01002307 if (map != NO_CMDLINE_MAP)
Amey Telawanee09e2862017-05-03 15:41:14 +05302308 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
Thomas Gleixner50d88752009-03-18 08:58:44 +01002309 else
2310 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002311}
2312
2313void trace_find_cmdline(int pid, char comm[])
2314{
2315 preempt_disable();
2316 arch_spin_lock(&trace_cmdline_lock);
2317
2318 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002319
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002320 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02002321 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002322}
2323
Joel Fernandesd914ba32017-06-26 19:01:55 -07002324int trace_find_tgid(int pid)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002325{
Joel Fernandesd914ba32017-06-26 19:01:55 -07002326 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2327 return 0;
2328
2329 return tgid_map[pid];
2330}
2331
2332static int trace_save_tgid(struct task_struct *tsk)
2333{
Joel Fernandesbd45d342017-07-06 16:00:22 -07002334 /* treat recording of idle task as a success */
2335 if (!tsk->pid)
2336 return 1;
2337
2338 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
Joel Fernandesd914ba32017-06-26 19:01:55 -07002339 return 0;
2340
2341 tgid_map[tsk->pid] = tsk->tgid;
2342 return 1;
2343}
2344
2345static bool tracing_record_taskinfo_skip(int flags)
2346{
2347 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2348 return true;
2349 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2350 return true;
2351 if (!__this_cpu_read(trace_taskinfo_save))
2352 return true;
2353 return false;
2354}
2355
2356/**
2357 * tracing_record_taskinfo - record the task info of a task
2358 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002359 * @task: task to record
2360 * @flags: TRACE_RECORD_CMDLINE for recording comm
2361 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002362 */
2363void tracing_record_taskinfo(struct task_struct *task, int flags)
2364{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002365 bool done;
2366
Joel Fernandesd914ba32017-06-26 19:01:55 -07002367 if (tracing_record_taskinfo_skip(flags))
2368 return;
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002369
2370 /*
2371 * Record as much task information as possible. If some fail, continue
2372 * to try to record the others.
2373 */
2374 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2375 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2376
2377 /* If recording any information failed, retry again soon. */
2378 if (!done)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002379 return;
2380
Joel Fernandesd914ba32017-06-26 19:01:55 -07002381 __this_cpu_write(trace_taskinfo_save, false);
2382}
2383
2384/**
2385 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2386 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002387 * @prev: previous task during sched_switch
2388 * @next: next task during sched_switch
2389 * @flags: TRACE_RECORD_CMDLINE for recording comm
2390 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002391 */
2392void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2393 struct task_struct *next, int flags)
2394{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002395 bool done;
2396
Joel Fernandesd914ba32017-06-26 19:01:55 -07002397 if (tracing_record_taskinfo_skip(flags))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002398 return;
2399
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002400 /*
2401 * Record as much task information as possible. If some fail, continue
2402 * to try to record the others.
2403 */
2404 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2405 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2406 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2407 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
Joel Fernandesd914ba32017-06-26 19:01:55 -07002408
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002409 /* If recording any information failed, retry again soon. */
2410 if (!done)
Joel Fernandesd914ba32017-06-26 19:01:55 -07002411 return;
2412
2413 __this_cpu_write(trace_taskinfo_save, false);
2414}
2415
2416/* Helpers to record a specific task information */
2417void tracing_record_cmdline(struct task_struct *task)
2418{
2419 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2420}
2421
2422void tracing_record_tgid(struct task_struct *task)
2423{
2424 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002425}
2426
Steven Rostedt (VMware)af0009f2017-03-16 11:01:06 -04002427/*
2428 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2429 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2430 * simplifies those functions and keeps them in sync.
2431 */
2432enum print_line_t trace_handle_return(struct trace_seq *s)
2433{
2434 return trace_seq_has_overflowed(s) ?
2435 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2436}
2437EXPORT_SYMBOL_GPL(trace_handle_return);
2438
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03002439void
Cong Wang46710f32019-05-25 09:57:59 -07002440tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2441 unsigned long flags, int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002442{
2443 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002444
Steven Rostedt777e2082008-09-29 23:02:42 -04002445 entry->preempt_count = pc & 0xff;
2446 entry->pid = (tsk) ? tsk->pid : 0;
Cong Wang46710f32019-05-25 09:57:59 -07002447 entry->type = type;
Steven Rostedt777e2082008-09-29 23:02:42 -04002448 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04002449#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04002450 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04002451#else
2452 TRACE_FLAG_IRQS_NOSUPPORT |
2453#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01002454 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002455 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondetic59f29c2016-12-09 21:50:17 +05302456 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02002457 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2458 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002459}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02002460EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002461
Steven Rostedte77405a2009-09-02 14:17:06 -04002462struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002463trace_buffer_lock_reserve(struct trace_buffer *buffer,
Steven Rostedte77405a2009-09-02 14:17:06 -04002464 int type,
2465 unsigned long len,
2466 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002467{
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002468 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002469}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002470
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002471DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2472DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2473static int trace_buffered_event_ref;
2474
2475/**
2476 * trace_buffered_event_enable - enable buffering events
2477 *
2478 * When events are being filtered, it is quicker to use a temporary
2479 * buffer to write the event data into if there's a likely chance
2480 * that it will not be committed. The discard of the ring buffer
2481 * is not as fast as committing, and is much slower than copying
2482 * a commit.
2483 *
2484 * When an event is to be filtered, allocate per cpu buffers to
2485 * write the event data into, and if the event is filtered and discarded
2486 * it is simply dropped, otherwise, the entire data is to be committed
2487 * in one shot.
2488 */
2489void trace_buffered_event_enable(void)
2490{
2491 struct ring_buffer_event *event;
2492 struct page *page;
2493 int cpu;
2494
2495 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2496
2497 if (trace_buffered_event_ref++)
2498 return;
2499
2500 for_each_tracing_cpu(cpu) {
2501 page = alloc_pages_node(cpu_to_node(cpu),
2502 GFP_KERNEL | __GFP_NORETRY, 0);
2503 if (!page)
2504 goto failed;
2505
2506 event = page_address(page);
2507 memset(event, 0, sizeof(*event));
2508
2509 per_cpu(trace_buffered_event, cpu) = event;
2510
2511 preempt_disable();
2512 if (cpu == smp_processor_id() &&
2513 this_cpu_read(trace_buffered_event) !=
2514 per_cpu(trace_buffered_event, cpu))
2515 WARN_ON_ONCE(1);
2516 preempt_enable();
2517 }
2518
2519 return;
2520 failed:
2521 trace_buffered_event_disable();
2522}
2523
2524static void enable_trace_buffered_event(void *data)
2525{
2526 /* Probably not needed, but do it anyway */
2527 smp_rmb();
2528 this_cpu_dec(trace_buffered_event_cnt);
2529}
2530
2531static void disable_trace_buffered_event(void *data)
2532{
2533 this_cpu_inc(trace_buffered_event_cnt);
2534}
2535
2536/**
2537 * trace_buffered_event_disable - disable buffering events
2538 *
2539 * When a filter is removed, it is faster to not use the buffered
2540 * events, and to commit directly into the ring buffer. Free up
2541 * the temp buffers when there are no more users. This requires
2542 * special synchronization with current events.
2543 */
2544void trace_buffered_event_disable(void)
2545{
2546 int cpu;
2547
2548 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2549
2550 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2551 return;
2552
2553 if (--trace_buffered_event_ref)
2554 return;
2555
2556 preempt_disable();
2557 /* For each CPU, set the buffer as used. */
2558 smp_call_function_many(tracing_buffer_mask,
2559 disable_trace_buffered_event, NULL, 1);
2560 preempt_enable();
2561
2562 /* Wait for all current users to finish */
Paul E. McKenney74401722018-11-06 18:44:52 -08002563 synchronize_rcu();
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002564
2565 for_each_tracing_cpu(cpu) {
2566 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2567 per_cpu(trace_buffered_event, cpu) = NULL;
2568 }
2569 /*
2570 * Make sure trace_buffered_event is NULL before clearing
2571 * trace_buffered_event_cnt.
2572 */
2573 smp_wmb();
2574
2575 preempt_disable();
2576 /* Do the work on each cpu */
2577 smp_call_function_many(tracing_buffer_mask,
2578 enable_trace_buffered_event, NULL, 1);
2579 preempt_enable();
2580}
2581
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002582static struct trace_buffer *temp_buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002583
Steven Rostedtef5580d2009-02-27 19:38:04 -05002584struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002585trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002586 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002587 int type, unsigned long len,
2588 unsigned long flags, int pc)
2589{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002590 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002591 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002592
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002593 *current_rb = trace_file->tr->array_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002594
Tom Zanussi00b41452018-01-15 20:51:39 -06002595 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002596 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2597 (entry = this_cpu_read(trace_buffered_event))) {
2598 /* Try to use the per cpu buffer first */
2599 val = this_cpu_inc_return(trace_buffered_event_cnt);
2600 if (val == 1) {
2601 trace_event_setup(entry, type, flags, pc);
2602 entry->array[0] = len;
2603 return entry;
2604 }
2605 this_cpu_dec(trace_buffered_event_cnt);
2606 }
2607
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002608 entry = __trace_buffer_lock_reserve(*current_rb,
2609 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002610 /*
2611 * If tracing is off, but we have triggers enabled
2612 * we still need to look at the event data. Use the temp_buffer
2613 * to store the trace event for the tigger to use. It's recusive
2614 * safe and will not be recorded anywhere.
2615 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002616 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002617 *current_rb = temp_buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002618 entry = __trace_buffer_lock_reserve(*current_rb,
2619 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002620 }
2621 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002622}
2623EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2624
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002625static DEFINE_SPINLOCK(tracepoint_iter_lock);
2626static DEFINE_MUTEX(tracepoint_printk_mutex);
2627
2628static void output_printk(struct trace_event_buffer *fbuffer)
2629{
2630 struct trace_event_call *event_call;
Masami Hiramatsud8d0c242020-01-11 01:05:18 +09002631 struct trace_event_file *file;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002632 struct trace_event *event;
2633 unsigned long flags;
2634 struct trace_iterator *iter = tracepoint_print_iter;
2635
2636 /* We should never get here if iter is NULL */
2637 if (WARN_ON_ONCE(!iter))
2638 return;
2639
2640 event_call = fbuffer->trace_file->event_call;
2641 if (!event_call || !event_call->event.funcs ||
2642 !event_call->event.funcs->trace)
2643 return;
2644
Masami Hiramatsud8d0c242020-01-11 01:05:18 +09002645 file = fbuffer->trace_file;
2646 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2647 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2648 !filter_match_preds(file->filter, fbuffer->entry)))
2649 return;
2650
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002651 event = &fbuffer->trace_file->event_call->event;
2652
2653 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2654 trace_seq_init(&iter->seq);
2655 iter->ent = fbuffer->entry;
2656 event_call->event.funcs->trace(iter, 0, event);
2657 trace_seq_putc(&iter->seq, 0);
2658 printk("%s", iter->seq.buffer);
2659
2660 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2661}
2662
2663int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2664 void __user *buffer, size_t *lenp,
2665 loff_t *ppos)
2666{
2667 int save_tracepoint_printk;
2668 int ret;
2669
2670 mutex_lock(&tracepoint_printk_mutex);
2671 save_tracepoint_printk = tracepoint_printk;
2672
2673 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2674
2675 /*
2676 * This will force exiting early, as tracepoint_printk
2677 * is always zero when tracepoint_printk_iter is not allocated
2678 */
2679 if (!tracepoint_print_iter)
2680 tracepoint_printk = 0;
2681
2682 if (save_tracepoint_printk == tracepoint_printk)
2683 goto out;
2684
2685 if (tracepoint_printk)
2686 static_key_enable(&tracepoint_printk_key.key);
2687 else
2688 static_key_disable(&tracepoint_printk_key.key);
2689
2690 out:
2691 mutex_unlock(&tracepoint_printk_mutex);
2692
2693 return ret;
2694}
2695
2696void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2697{
2698 if (static_key_false(&tracepoint_printk_key.key))
2699 output_printk(fbuffer);
2700
Masami Hiramatsu8cfcf152020-01-11 01:05:31 +09002701 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002702 fbuffer->event, fbuffer->entry,
Masami Hiramatsu8cfcf152020-01-11 01:05:31 +09002703 fbuffer->flags, fbuffer->pc, fbuffer->regs);
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002704}
2705EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2706
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002707/*
2708 * Skip 3:
2709 *
2710 * trace_buffer_unlock_commit_regs()
2711 * trace_event_buffer_commit()
2712 * trace_event_raw_event_xxx()
Rohit Visavalia13cf9122018-01-29 15:11:26 +05302713 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002714# define STACK_SKIP 3
2715
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002716void trace_buffer_unlock_commit_regs(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002717 struct trace_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002718 struct ring_buffer_event *event,
2719 unsigned long flags, int pc,
2720 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002721{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002722 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002723
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002724 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002725 * If regs is not set, then skip the necessary functions.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002726 * Note, we can still get here via blktrace, wakeup tracer
2727 * and mmiotrace, but that's ok if they lose a function or
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002728 * two. They are not that meaningful.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002729 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002730 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002731 ftrace_trace_userstack(buffer, flags, pc);
2732}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002733
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002734/*
2735 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2736 */
2737void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002738trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002739 struct ring_buffer_event *event)
2740{
2741 __buffer_unlock_commit(buffer, event);
2742}
2743
Chunyan Zhang478409d2016-11-21 15:57:18 +08002744static void
2745trace_process_export(struct trace_export *export,
2746 struct ring_buffer_event *event)
2747{
2748 struct trace_entry *entry;
2749 unsigned int size = 0;
2750
2751 entry = ring_buffer_event_data(event);
2752 size = ring_buffer_event_length(event);
Felipe Balbia773d412017-06-02 13:20:25 +03002753 export->write(export, entry, size);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002754}
2755
2756static DEFINE_MUTEX(ftrace_export_lock);
2757
2758static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2759
2760static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2761
2762static inline void ftrace_exports_enable(void)
2763{
2764 static_branch_enable(&ftrace_exports_enabled);
2765}
2766
2767static inline void ftrace_exports_disable(void)
2768{
2769 static_branch_disable(&ftrace_exports_enabled);
2770}
2771
Mathieu Malaterre1cce3772018-05-16 21:30:12 +02002772static void ftrace_exports(struct ring_buffer_event *event)
Chunyan Zhang478409d2016-11-21 15:57:18 +08002773{
2774 struct trace_export *export;
2775
2776 preempt_disable_notrace();
2777
Joel Fernandes (Google)0a5b99f2019-07-11 16:45:41 -04002778 export = rcu_dereference_raw_check(ftrace_exports_list);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002779 while (export) {
2780 trace_process_export(export, event);
Joel Fernandes (Google)0a5b99f2019-07-11 16:45:41 -04002781 export = rcu_dereference_raw_check(export->next);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002782 }
2783
2784 preempt_enable_notrace();
2785}
2786
2787static inline void
2788add_trace_export(struct trace_export **list, struct trace_export *export)
2789{
2790 rcu_assign_pointer(export->next, *list);
2791 /*
2792 * We are entering export into the list but another
2793 * CPU might be walking that list. We need to make sure
2794 * the export->next pointer is valid before another CPU sees
2795 * the export pointer included into the list.
2796 */
2797 rcu_assign_pointer(*list, export);
2798}
2799
2800static inline int
2801rm_trace_export(struct trace_export **list, struct trace_export *export)
2802{
2803 struct trace_export **p;
2804
2805 for (p = list; *p != NULL; p = &(*p)->next)
2806 if (*p == export)
2807 break;
2808
2809 if (*p != export)
2810 return -1;
2811
2812 rcu_assign_pointer(*p, (*p)->next);
2813
2814 return 0;
2815}
2816
2817static inline void
2818add_ftrace_export(struct trace_export **list, struct trace_export *export)
2819{
2820 if (*list == NULL)
2821 ftrace_exports_enable();
2822
2823 add_trace_export(list, export);
2824}
2825
2826static inline int
2827rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2828{
2829 int ret;
2830
2831 ret = rm_trace_export(list, export);
2832 if (*list == NULL)
2833 ftrace_exports_disable();
2834
2835 return ret;
2836}
2837
2838int register_ftrace_export(struct trace_export *export)
2839{
2840 if (WARN_ON_ONCE(!export->write))
2841 return -1;
2842
2843 mutex_lock(&ftrace_export_lock);
2844
2845 add_ftrace_export(&ftrace_exports_list, export);
2846
2847 mutex_unlock(&ftrace_export_lock);
2848
2849 return 0;
2850}
2851EXPORT_SYMBOL_GPL(register_ftrace_export);
2852
2853int unregister_ftrace_export(struct trace_export *export)
2854{
2855 int ret;
2856
2857 mutex_lock(&ftrace_export_lock);
2858
2859 ret = rm_ftrace_export(&ftrace_exports_list, export);
2860
2861 mutex_unlock(&ftrace_export_lock);
2862
2863 return ret;
2864}
2865EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2866
Ingo Molnare309b412008-05-12 21:20:51 +02002867void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002868trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002869 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2870 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002871{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002872 struct trace_event_call *call = &event_function;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002873 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002874 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002875 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002876
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002877 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2878 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002879 if (!event)
2880 return;
2881 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002882 entry->ip = ip;
2883 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002884
Chunyan Zhang478409d2016-11-21 15:57:18 +08002885 if (!call_filter_check_discard(call, entry, buffer, event)) {
2886 if (static_branch_unlikely(&ftrace_exports_enabled))
2887 ftrace_exports(event);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002888 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002889 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002890}
2891
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002892#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002893
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002894/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2895#define FTRACE_KSTACK_NESTING 4
2896
2897#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2898
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002899struct ftrace_stack {
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002900 unsigned long calls[FTRACE_KSTACK_ENTRIES];
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002901};
2902
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002903
2904struct ftrace_stacks {
2905 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2906};
2907
2908static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002909static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2910
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002911static void __ftrace_trace_stack(struct trace_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002912 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002913 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002914{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002915 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002916 struct ring_buffer_event *event;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002917 unsigned int size, nr_entries;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002918 struct ftrace_stack *fstack;
Steven Rostedt777e2082008-09-29 23:02:42 -04002919 struct stack_entry *entry;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002920 int stackidx;
Ingo Molnar86387f72008-05-12 21:20:51 +02002921
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002922 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002923 * Add one, for this function and the call to save_stack_trace()
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002924 * If regs is set, then these functions will not be in the way.
2925 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002926#ifndef CONFIG_UNWINDER_ORC
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002927 if (!regs)
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002928 skip++;
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002929#endif
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002930
2931 /*
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002932 * Since events can happen in NMIs there's no safe way to
2933 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2934 * or NMI comes in, it will just have to use the default
2935 * FTRACE_STACK_SIZE.
2936 */
2937 preempt_disable_notrace();
2938
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002939 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2940
2941 /* This should never happen. If it does, yell once and skip */
2942 if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
2943 goto out;
2944
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002945 /*
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002946 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2947 * interrupt will either see the value pre increment or post
2948 * increment. If the interrupt happens pre increment it will have
2949 * restored the counter when it returns. We just need a barrier to
2950 * keep gcc from moving things around.
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002951 */
2952 barrier();
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002953
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002954 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002955 size = ARRAY_SIZE(fstack->calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002956
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002957 if (regs) {
2958 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2959 size, skip);
2960 } else {
2961 nr_entries = stack_trace_save(fstack->calls, size, skip);
2962 }
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002963
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002964 size = nr_entries * sizeof(unsigned long);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002965 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2966 sizeof(*entry) + size, flags, pc);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002967 if (!event)
2968 goto out;
2969 entry = ring_buffer_event_data(event);
2970
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002971 memcpy(&entry->caller, fstack->calls, size);
2972 entry->size = nr_entries;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002973
Tom Zanussif306cc82013-10-24 08:34:17 -05002974 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002975 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002976
2977 out:
2978 /* Again, don't let gcc optimize things here */
2979 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002980 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002981 preempt_enable_notrace();
2982
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002983}
2984
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002985static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002986 struct trace_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002987 unsigned long flags,
2988 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05002989{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002990 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05002991 return;
2992
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002993 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05002994}
2995
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002996void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2997 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04002998{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002999 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003000
3001 if (rcu_is_watching()) {
3002 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3003 return;
3004 }
3005
3006 /*
3007 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3008 * but if the above rcu_is_watching() failed, then the NMI
3009 * triggered someplace critical, and rcu_irq_enter() should
3010 * not be called from NMI.
3011 */
3012 if (unlikely(in_nmi()))
3013 return;
3014
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003015 rcu_irq_enter_irqson();
3016 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3017 rcu_irq_exit_irqson();
Steven Rostedt38697052008-10-01 13:14:09 -04003018}
3019
Steven Rostedt03889382009-12-11 09:48:22 -05003020/**
3021 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003022 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05003023 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003024void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05003025{
3026 unsigned long flags;
3027
3028 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05003029 return;
Steven Rostedt03889382009-12-11 09:48:22 -05003030
3031 local_save_flags(flags);
3032
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05003033#ifndef CONFIG_UNWINDER_ORC
3034 /* Skip 1 to skip this function. */
3035 skip++;
3036#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003037 __ftrace_trace_stack(global_trace.array_buffer.buffer,
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003038 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05003039}
Nikolay Borisovda387e52018-10-17 09:51:43 +03003040EXPORT_SYMBOL_GPL(trace_dump_stack);
Steven Rostedt03889382009-12-11 09:48:22 -05003041
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003042#ifdef CONFIG_USER_STACKTRACE_SUPPORT
Steven Rostedt91e86e52010-11-10 12:56:12 +01003043static DEFINE_PER_CPU(int, user_stack_count);
3044
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003045static void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003046ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02003047{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003048 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02003049 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02003050 struct userstack_entry *entry;
Török Edwin02b67512008-11-22 13:28:47 +02003051
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003052 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02003053 return;
3054
Steven Rostedtb6345872010-03-12 20:03:30 -05003055 /*
3056 * NMIs can not handle page faults, even with fix ups.
3057 * The save user stack can (and often does) fault.
3058 */
3059 if (unlikely(in_nmi()))
3060 return;
3061
Steven Rostedt91e86e52010-11-10 12:56:12 +01003062 /*
3063 * prevent recursion, since the user stack tracing may
3064 * trigger other kernel events.
3065 */
3066 preempt_disable();
3067 if (__this_cpu_read(user_stack_count))
3068 goto out;
3069
3070 __this_cpu_inc(user_stack_count);
3071
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003072 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3073 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02003074 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08003075 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02003076 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02003077
Steven Rostedt48659d32009-09-11 11:36:23 -04003078 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02003079 memset(&entry->caller, 0, sizeof(entry->caller));
3080
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003081 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
Tom Zanussif306cc82013-10-24 08:34:17 -05003082 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003083 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003084
Li Zefan1dbd1952010-12-09 15:47:56 +08003085 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01003086 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003087 out:
3088 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02003089}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003090#else /* CONFIG_USER_STACKTRACE_SUPPORT */
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003091static void ftrace_trace_userstack(struct trace_buffer *buffer,
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003092 unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02003093{
Török Edwin02b67512008-11-22 13:28:47 +02003094}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003095#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
Török Edwin02b67512008-11-22 13:28:47 +02003096
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02003097#endif /* CONFIG_STACKTRACE */
3098
Steven Rostedt07d777f2011-09-22 14:01:55 -04003099/* created for use with alloc_percpu */
3100struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003101 int nesting;
3102 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04003103};
3104
3105static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003106
3107/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003108 * Thise allows for lockless recording. If we're nested too deeply, then
3109 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04003110 */
3111static char *get_trace_buf(void)
3112{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003113 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003114
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003115 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003116 return NULL;
3117
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003118 buffer->nesting++;
3119
3120 /* Interrupts must see nesting incremented before we use the buffer */
3121 barrier();
3122 return &buffer->buffer[buffer->nesting][0];
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003123}
3124
3125static void put_trace_buf(void)
3126{
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003127 /* Don't let the decrement of nesting leak before this */
3128 barrier();
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003129 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003130}
3131
3132static int alloc_percpu_trace_buffer(void)
3133{
3134 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003135
3136 buffers = alloc_percpu(struct trace_buffer_struct);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05003137 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003138 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003139
3140 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003141 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003142}
3143
Steven Rostedt81698832012-10-11 10:15:05 -04003144static int buffers_allocated;
3145
Steven Rostedt07d777f2011-09-22 14:01:55 -04003146void trace_printk_init_buffers(void)
3147{
Steven Rostedt07d777f2011-09-22 14:01:55 -04003148 if (buffers_allocated)
3149 return;
3150
3151 if (alloc_percpu_trace_buffer())
3152 return;
3153
Steven Rostedt2184db42014-05-28 13:14:40 -04003154 /* trace_printk() is for debug use only. Don't use it in production. */
3155
Joe Perchesa395d6a2016-03-22 14:28:09 -07003156 pr_warn("\n");
3157 pr_warn("**********************************************************\n");
3158 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3159 pr_warn("** **\n");
3160 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3161 pr_warn("** **\n");
3162 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3163 pr_warn("** unsafe for production use. **\n");
3164 pr_warn("** **\n");
3165 pr_warn("** If you see this message and you are not debugging **\n");
3166 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3167 pr_warn("** **\n");
3168 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3169 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04003170
Steven Rostedtb382ede62012-10-10 21:44:34 -04003171 /* Expand the buffers to set size */
3172 tracing_update_buffers();
3173
Steven Rostedt07d777f2011-09-22 14:01:55 -04003174 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04003175
3176 /*
3177 * trace_printk_init_buffers() can be called by modules.
3178 * If that happens, then we need to start cmdline recording
3179 * directly here. If the global_trace.buffer is already
3180 * allocated here, then this was called by module code.
3181 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003182 if (global_trace.array_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04003183 tracing_start_cmdline_record();
3184}
Divya Indif45d1222019-03-20 11:28:51 -07003185EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
Steven Rostedt81698832012-10-11 10:15:05 -04003186
3187void trace_printk_start_comm(void)
3188{
3189 /* Start tracing comms if trace printk is set */
3190 if (!buffers_allocated)
3191 return;
3192 tracing_start_cmdline_record();
3193}
3194
3195static void trace_printk_start_stop_comm(int enabled)
3196{
3197 if (!buffers_allocated)
3198 return;
3199
3200 if (enabled)
3201 tracing_start_cmdline_record();
3202 else
3203 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003204}
3205
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003206/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003207 * trace_vbprintk - write binary msg to tracing buffer
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07003208 * @ip: The address of the caller
3209 * @fmt: The string format to write to the buffer
3210 * @args: Arguments for @fmt
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003211 */
Steven Rostedt40ce74f12009-03-19 14:03:53 -04003212int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003213{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003214 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003215 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003216 struct trace_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003217 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003218 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003219 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003220 char *tbuffer;
3221 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003222
3223 if (unlikely(tracing_selftest_running || tracing_disabled))
3224 return 0;
3225
3226 /* Don't pollute graph traces with trace_vprintk internals */
3227 pause_graph_tracing();
3228
3229 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04003230 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003231
Steven Rostedt07d777f2011-09-22 14:01:55 -04003232 tbuffer = get_trace_buf();
3233 if (!tbuffer) {
3234 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003235 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003236 }
3237
3238 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3239
3240 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Steven Rostedt (VMware)34423f22020-01-22 06:44:50 -05003241 goto out_put;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003242
Steven Rostedt07d777f2011-09-22 14:01:55 -04003243 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003244 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003245 buffer = tr->array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003246 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003247 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3248 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003249 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003250 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003251 entry = ring_buffer_event_data(event);
3252 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003253 entry->fmt = fmt;
3254
Steven Rostedt07d777f2011-09-22 14:01:55 -04003255 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05003256 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003257 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003258 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003259 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003260
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003261out:
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003262 ring_buffer_nest_end(buffer);
Steven Rostedt (VMware)34423f22020-01-22 06:44:50 -05003263out_put:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003264 put_trace_buf();
3265
3266out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04003267 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003268 unpause_graph_tracing();
3269
3270 return len;
3271}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003272EXPORT_SYMBOL_GPL(trace_vbprintk);
3273
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003274__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003275static int
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003276__trace_array_vprintk(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003277 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003278{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003279 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003280 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003281 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003282 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003283 unsigned long flags;
3284 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003285
3286 if (tracing_disabled || tracing_selftest_running)
3287 return 0;
3288
Steven Rostedt07d777f2011-09-22 14:01:55 -04003289 /* Don't pollute graph traces with trace_vprintk internals */
3290 pause_graph_tracing();
3291
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003292 pc = preempt_count();
3293 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003294
Steven Rostedt07d777f2011-09-22 14:01:55 -04003295
3296 tbuffer = get_trace_buf();
3297 if (!tbuffer) {
3298 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003299 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003300 }
3301
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003302 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003303
Steven Rostedt07d777f2011-09-22 14:01:55 -04003304 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003305 size = sizeof(*entry) + len + 1;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003306 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003307 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3308 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003309 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003310 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003311 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01003312 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003313
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003314 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05003315 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003316 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003317 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003318 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003319
3320out:
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003321 ring_buffer_nest_end(buffer);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003322 put_trace_buf();
3323
3324out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003325 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003326 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003327
3328 return len;
3329}
Steven Rostedt659372d2009-09-03 19:11:07 -04003330
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003331__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003332int trace_array_vprintk(struct trace_array *tr,
3333 unsigned long ip, const char *fmt, va_list args)
3334{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003335 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003336}
3337
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003338__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003339int trace_array_printk(struct trace_array *tr,
3340 unsigned long ip, const char *fmt, ...)
3341{
3342 int ret;
3343 va_list ap;
3344
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003345 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003346 return 0;
3347
Divya Indi953ae452019-08-14 10:55:25 -07003348 if (!tr)
3349 return -ENOENT;
3350
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003351 va_start(ap, fmt);
3352 ret = trace_array_vprintk(tr, ip, fmt, ap);
3353 va_end(ap);
3354 return ret;
3355}
Divya Indif45d1222019-03-20 11:28:51 -07003356EXPORT_SYMBOL_GPL(trace_array_printk);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003357
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003358__printf(3, 4)
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003359int trace_array_printk_buf(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003360 unsigned long ip, const char *fmt, ...)
3361{
3362 int ret;
3363 va_list ap;
3364
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003365 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003366 return 0;
3367
3368 va_start(ap, fmt);
3369 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3370 va_end(ap);
3371 return ret;
3372}
3373
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003374__printf(2, 0)
Steven Rostedt659372d2009-09-03 19:11:07 -04003375int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3376{
Steven Rostedta813a152009-10-09 01:41:35 -04003377 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04003378}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003379EXPORT_SYMBOL_GPL(trace_vprintk);
3380
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003381static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04003382{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003383 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3384
Steven Rostedt5a90f572008-09-03 17:42:51 -04003385 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003386 if (buf_iter)
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -04003387 ring_buffer_iter_advance(buf_iter);
Steven Rostedt5a90f572008-09-03 17:42:51 -04003388}
3389
Ingo Molnare309b412008-05-12 21:20:51 +02003390static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003391peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3392 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003393{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003394 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003395 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003396
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003397 if (buf_iter) {
Steven Rostedtd7690412008-10-01 00:29:53 -04003398 event = ring_buffer_iter_peek(buf_iter, ts);
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003399 if (lost_events)
3400 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3401 (unsigned long)-1 : 0;
3402 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003403 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003404 lost_events);
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003405 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003406
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003407 if (event) {
3408 iter->ent_size = ring_buffer_event_length(event);
3409 return ring_buffer_event_data(event);
3410 }
3411 iter->ent_size = 0;
3412 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003413}
Steven Rostedtd7690412008-10-01 00:29:53 -04003414
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003415static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003416__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3417 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003418{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003419 struct trace_buffer *buffer = iter->array_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003420 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08003421 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003422 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003423 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003424 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003425 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003426 int cpu;
3427
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003428 /*
3429 * If we are in a per_cpu trace file, don't bother by iterating over
3430 * all cpu and peek directly.
3431 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003432 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003433 if (ring_buffer_empty_cpu(buffer, cpu_file))
3434 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003435 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003436 if (ent_cpu)
3437 *ent_cpu = cpu_file;
3438
3439 return ent;
3440 }
3441
Steven Rostedtab464282008-05-12 21:21:00 +02003442 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003443
3444 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003445 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003446
Steven Rostedtbc21b472010-03-31 19:49:26 -04003447 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003448
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02003449 /*
3450 * Pick the entry with the smallest timestamp:
3451 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003452 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003453 next = ent;
3454 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003455 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003456 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003457 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003458 }
3459 }
3460
Steven Rostedt12b5da32012-03-27 10:43:28 -04003461 iter->ent_size = next_size;
3462
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003463 if (ent_cpu)
3464 *ent_cpu = next_cpu;
3465
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003466 if (ent_ts)
3467 *ent_ts = next_ts;
3468
Steven Rostedtbc21b472010-03-31 19:49:26 -04003469 if (missing_events)
3470 *missing_events = next_lost;
3471
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003472 return next;
3473}
3474
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003475#define STATIC_TEMP_BUF_SIZE 128
3476static char static_temp_buf[STATIC_TEMP_BUF_SIZE];
3477
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003478/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003479struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3480 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003481{
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003482 /* __find_next_entry will reset ent_size */
3483 int ent_size = iter->ent_size;
3484 struct trace_entry *entry;
3485
3486 /*
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003487 * If called from ftrace_dump(), then the iter->temp buffer
3488 * will be the static_temp_buf and not created from kmalloc.
3489 * If the entry size is greater than the buffer, we can
3490 * not save it. Just return NULL in that case. This is only
3491 * used to add markers when two consecutive events' time
3492 * stamps have a large delta. See trace_print_lat_context()
3493 */
3494 if (iter->temp == static_temp_buf &&
3495 STATIC_TEMP_BUF_SIZE < ent_size)
3496 return NULL;
3497
3498 /*
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003499 * The __find_next_entry() may call peek_next_entry(), which may
3500 * call ring_buffer_peek() that may make the contents of iter->ent
3501 * undefined. Need to copy iter->ent now.
3502 */
3503 if (iter->ent && iter->ent != iter->temp) {
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003504 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3505 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003506 kfree(iter->temp);
3507 iter->temp = kmalloc(iter->ent_size, GFP_KERNEL);
3508 if (!iter->temp)
3509 return NULL;
3510 }
3511 memcpy(iter->temp, iter->ent, iter->ent_size);
3512 iter->temp_size = iter->ent_size;
3513 iter->ent = iter->temp;
3514 }
3515 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3516 /* Put back the original ent_size */
3517 iter->ent_size = ent_size;
3518
3519 return entry;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003520}
Ingo Molnar8c523a92008-05-12 21:20:46 +02003521
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003522/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05003523void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003524{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003525 iter->ent = __find_next_entry(iter, &iter->cpu,
3526 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003527
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003528 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003529 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003530
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003531 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003532}
3533
Ingo Molnare309b412008-05-12 21:20:51 +02003534static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003535{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003536 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003537 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003538}
3539
Ingo Molnare309b412008-05-12 21:20:51 +02003540static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003541{
3542 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003543 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003544 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003545
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003546 WARN_ON_ONCE(iter->leftover);
3547
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003548 (*pos)++;
3549
3550 /* can't go backwards */
3551 if (iter->idx > i)
3552 return NULL;
3553
3554 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05003555 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003556 else
3557 ent = iter;
3558
3559 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05003560 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003561
3562 iter->pos = *pos;
3563
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003564 return ent;
3565}
3566
Jason Wessel955b61e2010-08-05 09:22:23 -05003567void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003568{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003569 struct ring_buffer_event *event;
3570 struct ring_buffer_iter *buf_iter;
3571 unsigned long entries = 0;
3572 u64 ts;
3573
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003574 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003575
Steven Rostedt6d158a82012-06-27 20:46:14 -04003576 buf_iter = trace_buffer_iter(iter, cpu);
3577 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003578 return;
3579
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003580 ring_buffer_iter_reset(buf_iter);
3581
3582 /*
3583 * We could have the case with the max latency tracers
3584 * that a reset never took place on a cpu. This is evident
3585 * by the timestamp being before the start of the buffer.
3586 */
3587 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003588 if (ts >= iter->array_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003589 break;
3590 entries++;
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -04003591 ring_buffer_iter_advance(buf_iter);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003592 }
3593
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003594 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003595}
3596
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003597/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003598 * The current tracer is copied to avoid a global locking
3599 * all around.
3600 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003601static void *s_start(struct seq_file *m, loff_t *pos)
3602{
3603 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003604 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003605 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003606 void *p = NULL;
3607 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003608 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003609
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09003610 /*
3611 * copy the tracer to avoid using a global lock all around.
3612 * iter->trace is a copy of current_trace, the pointer to the
3613 * name may be used instead of a strcmp(), as iter->trace->name
3614 * will point to the same string as current_trace->name.
3615 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003616 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003617 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3618 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003619 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003620
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003621#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003622 if (iter->snapshot && iter->trace->use_max_tr)
3623 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003624#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003625
3626 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003627 atomic_inc(&trace_record_taskinfo_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003628
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003629 if (*pos != iter->pos) {
3630 iter->ent = NULL;
3631 iter->cpu = 0;
3632 iter->idx = -1;
3633
Steven Rostedtae3b5092013-01-23 15:22:59 -05003634 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003635 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003636 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003637 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003638 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003639
Lai Jiangshanac91d852010-03-02 17:54:50 +08003640 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003641 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3642 ;
3643
3644 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003645 /*
3646 * If we overflowed the seq_file before, then we want
3647 * to just reuse the trace_seq buffer again.
3648 */
3649 if (iter->leftover)
3650 p = iter;
3651 else {
3652 l = *pos - 1;
3653 p = s_next(m, p, &l);
3654 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003655 }
3656
Lai Jiangshan4f535962009-05-18 19:35:34 +08003657 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003658 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003659 return p;
3660}
3661
3662static void s_stop(struct seq_file *m, void *p)
3663{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003664 struct trace_iterator *iter = m->private;
3665
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003666#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003667 if (iter->snapshot && iter->trace->use_max_tr)
3668 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003669#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003670
3671 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003672 atomic_dec(&trace_record_taskinfo_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003673
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003674 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003675 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003676}
3677
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003678static void
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003679get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003680 unsigned long *entries, int cpu)
3681{
3682 unsigned long count;
3683
3684 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3685 /*
3686 * If this buffer has skipped entries, then we hold all
3687 * entries for the trace and we need to ignore the
3688 * ones before the time stamp.
3689 */
3690 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3691 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3692 /* total is the same as the entries */
3693 *total = count;
3694 } else
3695 *total = count +
3696 ring_buffer_overrun_cpu(buf->buffer, cpu);
3697 *entries = count;
3698}
3699
3700static void
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003701get_total_entries(struct array_buffer *buf,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003702 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003703{
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003704 unsigned long t, e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003705 int cpu;
3706
3707 *total = 0;
3708 *entries = 0;
3709
3710 for_each_tracing_cpu(cpu) {
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003711 get_total_entries_cpu(buf, &t, &e, cpu);
3712 *total += t;
3713 *entries += e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003714 }
3715}
3716
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003717unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3718{
3719 unsigned long total, entries;
3720
3721 if (!tr)
3722 tr = &global_trace;
3723
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003724 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003725
3726 return entries;
3727}
3728
3729unsigned long trace_total_entries(struct trace_array *tr)
3730{
3731 unsigned long total, entries;
3732
3733 if (!tr)
3734 tr = &global_trace;
3735
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003736 get_total_entries(&tr->array_buffer, &total, &entries);
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003737
3738 return entries;
3739}
3740
Ingo Molnare309b412008-05-12 21:20:51 +02003741static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003742{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003743 seq_puts(m, "# _------=> CPU# \n"
3744 "# / _-----=> irqs-off \n"
3745 "# | / _----=> need-resched \n"
3746 "# || / _---=> hardirq/softirq \n"
3747 "# ||| / _--=> preempt-depth \n"
3748 "# |||| / delay \n"
3749 "# cmd pid ||||| time | caller \n"
3750 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003751}
3752
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003753static void print_event_info(struct array_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003754{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003755 unsigned long total;
3756 unsigned long entries;
3757
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003758 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003759 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3760 entries, total, num_online_cpus());
3761 seq_puts(m, "#\n");
3762}
3763
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003764static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
Joel Fernandes441dae82017-06-25 22:38:43 -07003765 unsigned int flags)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003766{
Joel Fernandes441dae82017-06-25 22:38:43 -07003767 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3768
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003769 print_event_info(buf, m);
Joel Fernandes441dae82017-06-25 22:38:43 -07003770
Joel Fernandes (Google)f8494fa2018-06-25 17:08:22 -07003771 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3772 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003773}
3774
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003775static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
Joel Fernandes441dae82017-06-25 22:38:43 -07003776 unsigned int flags)
Steven Rostedt77271ce2011-11-17 09:34:33 -05003777{
Joel Fernandes441dae82017-06-25 22:38:43 -07003778 bool tgid = flags & TRACE_ITER_RECORD_TGID;
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01003779 const char *space = " ";
3780 int prec = tgid ? 10 : 2;
Joel Fernandes441dae82017-06-25 22:38:43 -07003781
Quentin Perret9e738212019-02-14 15:29:50 +00003782 print_event_info(buf, m);
3783
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01003784 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3785 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3786 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3787 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3788 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3789 seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3790 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
Steven Rostedt77271ce2011-11-17 09:34:33 -05003791}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003792
Jiri Olsa62b915f2010-04-02 19:01:22 +02003793void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003794print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3795{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003796 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003797 struct array_buffer *buf = iter->array_buffer;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003798 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003799 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003800 unsigned long entries;
3801 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003802 const char *name = "preemption";
3803
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05003804 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003805
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003806 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003807
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003808 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003809 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003810 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003811 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003812 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003813 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02003814 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003815 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02003816 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003817 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003818#if defined(CONFIG_PREEMPT_NONE)
3819 "server",
3820#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3821 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003822#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003823 "preempt",
Sebastian Andrzej Siewior9c34fc42019-10-15 21:18:20 +02003824#elif defined(CONFIG_PREEMPT_RT)
3825 "preempt_rt",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003826#else
3827 "unknown",
3828#endif
3829 /* These are reserved for later use */
3830 0, 0, 0, 0);
3831#ifdef CONFIG_SMP
3832 seq_printf(m, " #P:%d)\n", num_online_cpus());
3833#else
3834 seq_puts(m, ")\n");
3835#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003836 seq_puts(m, "# -----------------\n");
3837 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003838 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003839 data->comm, data->pid,
3840 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003841 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003842 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003843
3844 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003845 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003846 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3847 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003848 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003849 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3850 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003851 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003852 }
3853
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003854 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003855}
3856
Steven Rostedta3097202008-11-07 22:36:02 -05003857static void test_cpu_buff_start(struct trace_iterator *iter)
3858{
3859 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003860 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003861
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003862 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003863 return;
3864
3865 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3866 return;
3867
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003868 if (cpumask_available(iter->started) &&
3869 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003870 return;
3871
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003872 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003873 return;
3874
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003875 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04003876 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003877
3878 /* Don't print started cpu buffer for the first entry of the trace */
3879 if (iter->idx > 1)
3880 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3881 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003882}
3883
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003884static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003885{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003886 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003887 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003888 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003889 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003890 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003891
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003892 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003893
Steven Rostedta3097202008-11-07 22:36:02 -05003894 test_cpu_buff_start(iter);
3895
Steven Rostedtf633cef2008-12-23 23:24:13 -05003896 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003897
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003898 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003899 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3900 trace_print_lat_context(iter);
3901 else
3902 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003903 }
3904
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003905 if (trace_seq_has_overflowed(s))
3906 return TRACE_TYPE_PARTIAL_LINE;
3907
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003908 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003909 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003910
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003911 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003912
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003913 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003914}
3915
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003916static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003917{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003918 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003919 struct trace_seq *s = &iter->seq;
3920 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003921 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003922
3923 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003924
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003925 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003926 trace_seq_printf(s, "%d %d %llu ",
3927 entry->pid, iter->cpu, iter->ts);
3928
3929 if (trace_seq_has_overflowed(s))
3930 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003931
Steven Rostedtf633cef2008-12-23 23:24:13 -05003932 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003933 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003934 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003935
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003936 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003937
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003938 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003939}
3940
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003941static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003942{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003943 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003944 struct trace_seq *s = &iter->seq;
3945 unsigned char newline = '\n';
3946 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003947 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003948
3949 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003950
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003951 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003952 SEQ_PUT_HEX_FIELD(s, entry->pid);
3953 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3954 SEQ_PUT_HEX_FIELD(s, iter->ts);
3955 if (trace_seq_has_overflowed(s))
3956 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003957 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003958
Steven Rostedtf633cef2008-12-23 23:24:13 -05003959 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003960 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04003961 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003962 if (ret != TRACE_TYPE_HANDLED)
3963 return ret;
3964 }
Steven Rostedt7104f302008-10-01 10:52:51 -04003965
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003966 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003967
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003968 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003969}
3970
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003971static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003972{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003973 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003974 struct trace_seq *s = &iter->seq;
3975 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003976 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003977
3978 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003979
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003980 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003981 SEQ_PUT_FIELD(s, entry->pid);
3982 SEQ_PUT_FIELD(s, iter->cpu);
3983 SEQ_PUT_FIELD(s, iter->ts);
3984 if (trace_seq_has_overflowed(s))
3985 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003986 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003987
Steven Rostedtf633cef2008-12-23 23:24:13 -05003988 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04003989 return event ? event->funcs->binary(iter, 0, event) :
3990 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003991}
3992
Jiri Olsa62b915f2010-04-02 19:01:22 +02003993int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003994{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003995 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003996 int cpu;
3997
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003998 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003999 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004000 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04004001 buf_iter = trace_buffer_iter(iter, cpu);
4002 if (buf_iter) {
4003 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004004 return 0;
4005 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004006 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004007 return 0;
4008 }
4009 return 1;
4010 }
4011
Steven Rostedtab464282008-05-12 21:21:00 +02004012 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04004013 buf_iter = trace_buffer_iter(iter, cpu);
4014 if (buf_iter) {
4015 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04004016 return 0;
4017 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004018 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04004019 return 0;
4020 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004021 }
Steven Rostedtd7690412008-10-01 00:29:53 -04004022
Frederic Weisbecker797d3712008-09-30 18:13:45 +02004023 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004024}
4025
Lai Jiangshan4f535962009-05-18 19:35:34 +08004026/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05004027enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004028{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004029 struct trace_array *tr = iter->tr;
4030 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004031 enum print_line_t ret;
4032
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004033 if (iter->lost_events) {
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04004034 if (iter->lost_events == (unsigned long)-1)
4035 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4036 iter->cpu);
4037 else
4038 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4039 iter->cpu, iter->lost_events);
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004040 if (trace_seq_has_overflowed(&iter->seq))
4041 return TRACE_TYPE_PARTIAL_LINE;
4042 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04004043
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004044 if (iter->trace && iter->trace->print_line) {
4045 ret = iter->trace->print_line(iter);
4046 if (ret != TRACE_TYPE_UNHANDLED)
4047 return ret;
4048 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02004049
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05004050 if (iter->ent->type == TRACE_BPUTS &&
4051 trace_flags & TRACE_ITER_PRINTK &&
4052 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4053 return trace_print_bputs_msg_only(iter);
4054
Frederic Weisbecker48ead022009-03-12 18:24:49 +01004055 if (iter->ent->type == TRACE_BPRINT &&
4056 trace_flags & TRACE_ITER_PRINTK &&
4057 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04004058 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01004059
Frederic Weisbecker66896a82008-12-13 20:18:13 +01004060 if (iter->ent->type == TRACE_PRINT &&
4061 trace_flags & TRACE_ITER_PRINTK &&
4062 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04004063 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01004064
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004065 if (trace_flags & TRACE_ITER_BIN)
4066 return print_bin_fmt(iter);
4067
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004068 if (trace_flags & TRACE_ITER_HEX)
4069 return print_hex_fmt(iter);
4070
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004071 if (trace_flags & TRACE_ITER_RAW)
4072 return print_raw_fmt(iter);
4073
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004074 return print_trace_fmt(iter);
4075}
4076
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004077void trace_latency_header(struct seq_file *m)
4078{
4079 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004080 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004081
4082 /* print nothing if the buffers are empty */
4083 if (trace_empty(iter))
4084 return;
4085
4086 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4087 print_trace_header(m, iter);
4088
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004089 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004090 print_lat_help_header(m);
4091}
4092
Jiri Olsa62b915f2010-04-02 19:01:22 +02004093void trace_default_header(struct seq_file *m)
4094{
4095 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004096 struct trace_array *tr = iter->tr;
4097 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02004098
Jiri Olsaf56e7f82011-06-03 16:58:49 +02004099 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4100 return;
4101
Jiri Olsa62b915f2010-04-02 19:01:22 +02004102 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4103 /* print nothing if the buffers are empty */
4104 if (trace_empty(iter))
4105 return;
4106 print_trace_header(m, iter);
4107 if (!(trace_flags & TRACE_ITER_VERBOSE))
4108 print_lat_help_header(m);
4109 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05004110 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4111 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004112 print_func_help_header_irq(iter->array_buffer,
Joel Fernandes441dae82017-06-25 22:38:43 -07004113 m, trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004114 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004115 print_func_help_header(iter->array_buffer, m,
Joel Fernandes441dae82017-06-25 22:38:43 -07004116 trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004117 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02004118 }
4119}
4120
Steven Rostedte0a413f2011-09-29 21:26:16 -04004121static void test_ftrace_alive(struct seq_file *m)
4122{
4123 if (!ftrace_is_dead())
4124 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004125 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4126 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004127}
4128
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004129#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004130static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004131{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004132 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4133 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4134 "# Takes a snapshot of the main buffer.\n"
4135 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4136 "# (Doesn't have to be '2' works with any number that\n"
4137 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004138}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004139
4140static void show_snapshot_percpu_help(struct seq_file *m)
4141{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004142 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004143#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004144 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4145 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004146#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004147 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4148 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004149#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004150 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4151 "# (Doesn't have to be '2' works with any number that\n"
4152 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004153}
4154
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004155static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4156{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004157 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004158 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004159 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004160 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004161
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004162 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004163 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4164 show_snapshot_main_help(m);
4165 else
4166 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004167}
4168#else
4169/* Should never be called */
4170static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4171#endif
4172
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004173static int s_show(struct seq_file *m, void *v)
4174{
4175 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004176 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004177
4178 if (iter->ent == NULL) {
4179 if (iter->tr) {
4180 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4181 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004182 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004183 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004184 if (iter->snapshot && trace_empty(iter))
4185 print_snapshot_help(m, iter);
4186 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004187 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02004188 else
4189 trace_default_header(m);
4190
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004191 } else if (iter->leftover) {
4192 /*
4193 * If we filled the seq_file buffer earlier, we
4194 * want to just show it now.
4195 */
4196 ret = trace_print_seq(m, &iter->seq);
4197
4198 /* ret should this time be zero, but you never know */
4199 iter->leftover = ret;
4200
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004201 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004202 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004203 ret = trace_print_seq(m, &iter->seq);
4204 /*
4205 * If we overflow the seq_file buffer, then it will
4206 * ask us for this data again at start up.
4207 * Use that instead.
4208 * ret is 0 if seq_file write succeeded.
4209 * -1 otherwise.
4210 */
4211 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004212 }
4213
4214 return 0;
4215}
4216
Oleg Nesterov649e9c702013-07-23 17:25:54 +02004217/*
4218 * Should be used after trace_array_get(), trace_types_lock
4219 * ensures that i_cdev was already initialized.
4220 */
4221static inline int tracing_get_cpu(struct inode *inode)
4222{
4223 if (inode->i_cdev) /* See trace_create_cpu_file() */
4224 return (long)inode->i_cdev - 1;
4225 return RING_BUFFER_ALL_CPUS;
4226}
4227
James Morris88e9d342009-09-22 16:43:43 -07004228static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004229 .start = s_start,
4230 .next = s_next,
4231 .stop = s_stop,
4232 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004233};
4234
Ingo Molnare309b412008-05-12 21:20:51 +02004235static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02004236__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004237{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004238 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004239 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02004240 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004241
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004242 if (tracing_disabled)
4243 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02004244
Jiri Olsa50e18b92012-04-25 10:23:39 +02004245 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004246 if (!iter)
4247 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004248
Gil Fruchter72917232015-06-09 10:32:35 +03004249 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04004250 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004251 if (!iter->buffer_iter)
4252 goto release;
4253
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004254 /*
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004255 * trace_find_next_entry() may need to save off iter->ent.
4256 * It will place it into the iter->temp buffer. As most
4257 * events are less than 128, allocate a buffer of that size.
4258 * If one is greater, then trace_find_next_entry() will
4259 * allocate a new buffer to adjust for the bigger iter->ent.
4260 * It's not critical if it fails to get allocated here.
4261 */
4262 iter->temp = kmalloc(128, GFP_KERNEL);
4263 if (iter->temp)
4264 iter->temp_size = 128;
4265
4266 /*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004267 * We make a copy of the current tracer to avoid concurrent
4268 * changes on it while we are reading.
4269 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004270 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004271 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004272 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004273 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004274
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004275 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004276
Li Zefan79f55992009-06-15 14:58:26 +08004277 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004278 goto fail;
4279
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004280 iter->tr = tr;
4281
4282#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004283 /* Currently only the top directory has a snapshot */
4284 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004285 iter->array_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004286 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004287#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004288 iter->array_buffer = &tr->array_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004289 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004290 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004291 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004292 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004293
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004294 /* Notify the tracer early; before we stop tracing. */
Dan Carpenterb3f7a6c2014-11-22 21:30:12 +03004295 if (iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01004296 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004297
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004298 /* Annotate start of buffers if we had overruns */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004299 if (ring_buffer_overruns(iter->array_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004300 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4301
David Sharp8be07092012-11-13 12:18:22 -08004302 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004303 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004304 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4305
Steven Rostedt (VMware)06e0a542020-03-17 17:32:31 -04004306 /*
4307 * If pause-on-trace is enabled, then stop the trace while
4308 * dumping, unless this is the "snapshot" file
4309 */
4310 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004311 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004312
Steven Rostedtae3b5092013-01-23 15:22:59 -05004313 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004314 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004315 iter->buffer_iter[cpu] =
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004316 ring_buffer_read_prepare(iter->array_buffer->buffer,
Douglas Anderson31b265b2019-03-08 11:32:04 -08004317 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004318 }
4319 ring_buffer_read_prepare_sync();
4320 for_each_tracing_cpu(cpu) {
4321 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004322 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004323 }
4324 } else {
4325 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004326 iter->buffer_iter[cpu] =
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004327 ring_buffer_read_prepare(iter->array_buffer->buffer,
Douglas Anderson31b265b2019-03-08 11:32:04 -08004328 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004329 ring_buffer_read_prepare_sync();
4330 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004331 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004332 }
4333
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004334 mutex_unlock(&trace_types_lock);
4335
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004336 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004337
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004338 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004339 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004340 kfree(iter->trace);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004341 kfree(iter->temp);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004342 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004343release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02004344 seq_release_private(inode, file);
4345 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004346}
4347
4348int tracing_open_generic(struct inode *inode, struct file *filp)
4349{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004350 int ret;
4351
4352 ret = tracing_check_open_get_tr(NULL);
4353 if (ret)
4354 return ret;
Steven Rostedt60a11772008-05-12 21:20:44 +02004355
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004356 filp->private_data = inode->i_private;
4357 return 0;
4358}
4359
Geyslan G. Bem2e864212013-10-18 21:15:54 -03004360bool tracing_is_disabled(void)
4361{
4362 return (tracing_disabled) ? true: false;
4363}
4364
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004365/*
4366 * Open and update trace_array ref count.
4367 * Must have the current trace_array passed to it.
4368 */
Steven Rostedt (VMware)aa07d712019-10-11 19:12:21 -04004369int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004370{
4371 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004372 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004373
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004374 ret = tracing_check_open_get_tr(tr);
4375 if (ret)
4376 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004377
4378 filp->private_data = inode->i_private;
4379
4380 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004381}
4382
Hannes Eder4fd27352009-02-10 19:44:12 +01004383static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004384{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004385 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07004386 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004387 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004388 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004389
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004390 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004391 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004392 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004393 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004394
Oleg Nesterov6484c712013-07-23 17:26:10 +02004395 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004396 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004397 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004398
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004399 for_each_tracing_cpu(cpu) {
4400 if (iter->buffer_iter[cpu])
4401 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4402 }
4403
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004404 if (iter->trace && iter->trace->close)
4405 iter->trace->close(iter);
4406
Steven Rostedt (VMware)06e0a542020-03-17 17:32:31 -04004407 if (!iter->snapshot && tr->stop_count)
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004408 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004409 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004410
4411 __trace_array_put(tr);
4412
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004413 mutex_unlock(&trace_types_lock);
4414
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004415 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004416 free_cpumask_var(iter->started);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004417 kfree(iter->temp);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004418 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004419 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02004420 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004421
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004422 return 0;
4423}
4424
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004425static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4426{
4427 struct trace_array *tr = inode->i_private;
4428
4429 trace_array_put(tr);
4430 return 0;
4431}
4432
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004433static int tracing_single_release_tr(struct inode *inode, struct file *file)
4434{
4435 struct trace_array *tr = inode->i_private;
4436
4437 trace_array_put(tr);
4438
4439 return single_release(inode, file);
4440}
4441
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004442static int tracing_open(struct inode *inode, struct file *file)
4443{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004444 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004445 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004446 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004447
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004448 ret = tracing_check_open_get_tr(tr);
4449 if (ret)
4450 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004451
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004452 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02004453 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4454 int cpu = tracing_get_cpu(inode);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004455 struct array_buffer *trace_buf = &tr->array_buffer;
Bo Yan8dd33bc2017-09-18 10:03:35 -07004456
4457#ifdef CONFIG_TRACER_MAX_TRACE
4458 if (tr->current_trace->print_max)
4459 trace_buf = &tr->max_buffer;
4460#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02004461
4462 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan8dd33bc2017-09-18 10:03:35 -07004463 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004464 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04004465 tracing_reset_cpu(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004466 }
4467
4468 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004469 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004470 if (IS_ERR(iter))
4471 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004472 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004473 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4474 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004475
4476 if (ret < 0)
4477 trace_array_put(tr);
4478
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004479 return ret;
4480}
4481
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004482/*
4483 * Some tracers are not suitable for instance buffers.
4484 * A tracer is always available for the global array (toplevel)
4485 * or if it explicitly states that it is.
4486 */
4487static bool
4488trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4489{
4490 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4491}
4492
4493/* Find the next tracer that this trace array may use */
4494static struct tracer *
4495get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4496{
4497 while (t && !trace_ok_for_array(t, tr))
4498 t = t->next;
4499
4500 return t;
4501}
4502
Ingo Molnare309b412008-05-12 21:20:51 +02004503static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004504t_next(struct seq_file *m, void *v, loff_t *pos)
4505{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004506 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004507 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004508
4509 (*pos)++;
4510
4511 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004512 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004513
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004514 return t;
4515}
4516
4517static void *t_start(struct seq_file *m, loff_t *pos)
4518{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004519 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004520 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004521 loff_t l = 0;
4522
4523 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004524
4525 t = get_tracer_for_array(tr, trace_types);
4526 for (; t && l < *pos; t = t_next(m, t, &l))
4527 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004528
4529 return t;
4530}
4531
4532static void t_stop(struct seq_file *m, void *p)
4533{
4534 mutex_unlock(&trace_types_lock);
4535}
4536
4537static int t_show(struct seq_file *m, void *v)
4538{
4539 struct tracer *t = v;
4540
4541 if (!t)
4542 return 0;
4543
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004544 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004545 if (t->next)
4546 seq_putc(m, ' ');
4547 else
4548 seq_putc(m, '\n');
4549
4550 return 0;
4551}
4552
James Morris88e9d342009-09-22 16:43:43 -07004553static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004554 .start = t_start,
4555 .next = t_next,
4556 .stop = t_stop,
4557 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004558};
4559
4560static int show_traces_open(struct inode *inode, struct file *file)
4561{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004562 struct trace_array *tr = inode->i_private;
4563 struct seq_file *m;
4564 int ret;
4565
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004566 ret = tracing_check_open_get_tr(tr);
4567 if (ret)
4568 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004569
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004570 ret = seq_open(file, &show_traces_seq_ops);
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004571 if (ret) {
4572 trace_array_put(tr);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004573 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004574 }
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004575
4576 m = file->private_data;
4577 m->private = tr;
4578
4579 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004580}
4581
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004582static int show_traces_release(struct inode *inode, struct file *file)
4583{
4584 struct trace_array *tr = inode->i_private;
4585
4586 trace_array_put(tr);
4587 return seq_release(inode, file);
4588}
4589
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004590static ssize_t
4591tracing_write_stub(struct file *filp, const char __user *ubuf,
4592 size_t count, loff_t *ppos)
4593{
4594 return count;
4595}
4596
Steven Rostedt (Red Hat)098c879e2013-12-21 17:39:40 -05004597loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08004598{
Steven Rostedt (Red Hat)098c879e2013-12-21 17:39:40 -05004599 int ret;
4600
Slava Pestov364829b2010-11-24 15:13:16 -08004601 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c879e2013-12-21 17:39:40 -05004602 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08004603 else
Steven Rostedt (Red Hat)098c879e2013-12-21 17:39:40 -05004604 file->f_pos = ret = 0;
4605
4606 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08004607}
4608
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004609static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004610 .open = tracing_open,
4611 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004612 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c879e2013-12-21 17:39:40 -05004613 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004614 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004615};
4616
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004617static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004618 .open = show_traces_open,
4619 .read = seq_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004620 .llseek = seq_lseek,
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004621 .release = show_traces_release,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004622};
4623
4624static ssize_t
4625tracing_cpumask_read(struct file *filp, char __user *ubuf,
4626 size_t count, loff_t *ppos)
4627{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004628 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Du90e406f2017-11-30 11:39:43 +08004629 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004630 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004631
Changbin Du90e406f2017-11-30 11:39:43 +08004632 len = snprintf(NULL, 0, "%*pb\n",
4633 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4634 mask_str = kmalloc(len, GFP_KERNEL);
4635 if (!mask_str)
4636 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004637
Changbin Du90e406f2017-11-30 11:39:43 +08004638 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08004639 cpumask_pr_args(tr->tracing_cpumask));
4640 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004641 count = -EINVAL;
4642 goto out_err;
4643 }
Changbin Du90e406f2017-11-30 11:39:43 +08004644 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004645
4646out_err:
Changbin Du90e406f2017-11-30 11:39:43 +08004647 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004648
4649 return count;
4650}
4651
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004652int tracing_set_cpumask(struct trace_array *tr,
4653 cpumask_var_t tracing_cpumask_new)
Ingo Molnarc7078de2008-05-12 21:20:52 +02004654{
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004655 int cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304656
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004657 if (!tr)
4658 return -EINVAL;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004659
Steven Rostedta5e25882008-12-02 15:34:05 -05004660 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004661 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02004662 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004663 /*
4664 * Increase/decrease the disabled counter if we are
4665 * about to flip a bit in the cpumask:
4666 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004667 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304668 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004669 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4670 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004671 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004672 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304673 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004674 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4675 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004676 }
4677 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004678 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05004679 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02004680
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004681 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004682
4683 return 0;
4684}
4685
4686static ssize_t
4687tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4688 size_t count, loff_t *ppos)
4689{
4690 struct trace_array *tr = file_inode(filp)->i_private;
4691 cpumask_var_t tracing_cpumask_new;
4692 int err;
4693
4694 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4695 return -ENOMEM;
4696
4697 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4698 if (err)
4699 goto err_free;
4700
4701 err = tracing_set_cpumask(tr, tracing_cpumask_new);
4702 if (err)
4703 goto err_free;
4704
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304705 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004706
Ingo Molnarc7078de2008-05-12 21:20:52 +02004707 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004708
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004709err_free:
Li Zefan215368e2009-06-15 10:56:42 +08004710 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004711
4712 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004713}
4714
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004715static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004716 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004717 .read = tracing_cpumask_read,
4718 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004719 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004720 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004721};
4722
Li Zefanfdb372e2009-12-08 11:15:59 +08004723static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004724{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004725 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004726 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004727 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004728 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004729
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004730 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004731 tracer_flags = tr->current_trace->flags->val;
4732 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004733
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004734 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004735 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08004736 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004737 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004738 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004739 }
4740
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004741 for (i = 0; trace_opts[i].name; i++) {
4742 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08004743 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004744 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004745 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004746 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004747 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004748
Li Zefanfdb372e2009-12-08 11:15:59 +08004749 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004750}
4751
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004752static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08004753 struct tracer_flags *tracer_flags,
4754 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004755{
Chunyu Hud39cdd22016-03-08 21:37:01 +08004756 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004757 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004758
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004759 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004760 if (ret)
4761 return ret;
4762
4763 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08004764 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004765 else
Zhaolei77708412009-08-07 18:53:21 +08004766 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004767 return 0;
4768}
4769
Li Zefan8d18eaa2009-12-08 11:17:06 +08004770/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004771static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08004772{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004773 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004774 struct tracer_flags *tracer_flags = trace->flags;
4775 struct tracer_opt *opts = NULL;
4776 int i;
4777
4778 for (i = 0; tracer_flags->opts[i].name; i++) {
4779 opts = &tracer_flags->opts[i];
4780
4781 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004782 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08004783 }
4784
4785 return -EINVAL;
4786}
4787
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004788/* Some tracers require overwrite to stay enabled */
4789int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4790{
4791 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4792 return -1;
4793
4794 return 0;
4795}
4796
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004797int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004798{
Prateek Sood3a53acf2019-12-10 09:15:16 +00004799 if ((mask == TRACE_ITER_RECORD_TGID) ||
4800 (mask == TRACE_ITER_RECORD_CMD))
4801 lockdep_assert_held(&event_mutex);
4802
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004803 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004804 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004805 return 0;
4806
4807 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004808 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05004809 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004810 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004811
4812 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004813 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004814 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004815 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08004816
4817 if (mask == TRACE_ITER_RECORD_CMD)
4818 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08004819
Joel Fernandesd914ba32017-06-26 19:01:55 -07004820 if (mask == TRACE_ITER_RECORD_TGID) {
4821 if (!tgid_map)
Yuming Han6ee40512019-10-24 11:34:30 +08004822 tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
Kees Cook6396bb22018-06-12 14:03:40 -07004823 sizeof(*tgid_map),
Joel Fernandesd914ba32017-06-26 19:01:55 -07004824 GFP_KERNEL);
4825 if (!tgid_map) {
4826 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4827 return -ENOMEM;
4828 }
4829
4830 trace_event_enable_tgid_record(enabled);
4831 }
4832
Steven Rostedtc37775d2016-04-13 16:59:18 -04004833 if (mask == TRACE_ITER_EVENT_FORK)
4834 trace_event_follow_fork(tr, enabled);
4835
Namhyung Kim1e104862017-04-17 11:44:28 +09004836 if (mask == TRACE_ITER_FUNC_FORK)
4837 ftrace_pid_follow_fork(tr, enabled);
4838
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004839 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004840 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004841#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004842 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004843#endif
4844 }
Steven Rostedt81698832012-10-11 10:15:05 -04004845
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004846 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04004847 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004848 trace_printk_control(enabled);
4849 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004850
4851 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004852}
4853
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09004854int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004855{
Li Zefan8d18eaa2009-12-08 11:17:06 +08004856 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004857 int neg = 0;
Yisheng Xie591a0332018-05-17 16:36:03 +08004858 int ret;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004859 size_t orig_len = strlen(option);
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004860 int len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004861
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -04004862 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004863
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004864 len = str_has_prefix(cmp, "no");
4865 if (len)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004866 neg = 1;
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004867
4868 cmp += len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004869
Prateek Sood3a53acf2019-12-10 09:15:16 +00004870 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004871 mutex_lock(&trace_types_lock);
4872
Yisheng Xie591a0332018-05-17 16:36:03 +08004873 ret = match_string(trace_options, -1, cmp);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004874 /* If no option could be set, test the specific tracer options */
Yisheng Xie591a0332018-05-17 16:36:03 +08004875 if (ret < 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004876 ret = set_tracer_option(tr, cmp, neg);
Yisheng Xie591a0332018-05-17 16:36:03 +08004877 else
4878 ret = set_tracer_flag(tr, 1 << ret, !neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004879
4880 mutex_unlock(&trace_types_lock);
Prateek Sood3a53acf2019-12-10 09:15:16 +00004881 mutex_unlock(&event_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004882
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004883 /*
4884 * If the first trailing whitespace is replaced with '\0' by strstrip,
4885 * turn it back into a space.
4886 */
4887 if (orig_len > strlen(option))
4888 option[strlen(option)] = ' ';
4889
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -04004890 return ret;
4891}
4892
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004893static void __init apply_trace_boot_options(void)
4894{
4895 char *buf = trace_boot_options_buf;
4896 char *option;
4897
4898 while (true) {
4899 option = strsep(&buf, ",");
4900
4901 if (!option)
4902 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004903
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004904 if (*option)
4905 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004906
4907 /* Put back the comma to allow this to be called again */
4908 if (buf)
4909 *(buf - 1) = ',';
4910 }
4911}
4912
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -04004913static ssize_t
4914tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4915 size_t cnt, loff_t *ppos)
4916{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004917 struct seq_file *m = filp->private_data;
4918 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -04004919 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004920 int ret;
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -04004921
4922 if (cnt >= sizeof(buf))
4923 return -EINVAL;
4924
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004925 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -04004926 return -EFAULT;
4927
Steven Rostedta8dd2172013-01-09 20:54:17 -05004928 buf[cnt] = 0;
4929
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004930 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004931 if (ret < 0)
4932 return ret;
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -04004933
Jiri Olsacf8517c2009-10-23 19:36:16 -04004934 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004935
4936 return cnt;
4937}
4938
Li Zefanfdb372e2009-12-08 11:15:59 +08004939static int tracing_trace_options_open(struct inode *inode, struct file *file)
4940{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004941 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004942 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004943
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004944 ret = tracing_check_open_get_tr(tr);
4945 if (ret)
4946 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004947
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004948 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4949 if (ret < 0)
4950 trace_array_put(tr);
4951
4952 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08004953}
4954
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004955static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08004956 .open = tracing_trace_options_open,
4957 .read = seq_read,
4958 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004959 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05004960 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004961};
4962
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004963static const char readme_msg[] =
4964 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004965 "# echo 0 > tracing_on : quick way to disable tracing\n"
4966 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4967 " Important files:\n"
4968 " trace\t\t\t- The static contents of the buffer\n"
4969 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4970 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4971 " current_tracer\t- function and latency tracers\n"
4972 " available_tracers\t- list of configured tracers for current_tracer\n"
Tom Zanussia8d65572019-03-31 18:48:25 -05004973 " error_log\t- error log for failed commands (that support it)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004974 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4975 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4976 " trace_clock\t\t-change the clock used to order events\n"
4977 " local: Per cpu clock but may not be synced across CPUs\n"
4978 " global: Synced across CPUs but slows tracing down.\n"
4979 " counter: Not a clock, but just an increment\n"
4980 " uptime: Jiffy counter from time of boot\n"
4981 " perf: Same clock that perf events use\n"
4982#ifdef CONFIG_X86_64
4983 " x86-tsc: TSC cycle counter\n"
4984#endif
Tom Zanussi2c1ea602018-01-15 20:51:41 -06004985 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4986 " delta: Delta difference against a buffer-wide timestamp\n"
4987 " absolute: Absolute (standalone) timestamp\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004988 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04004989 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004990 " tracing_cpumask\t- Limit which CPUs to trace\n"
4991 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4992 "\t\t\t Remove sub-buffer with rmdir\n"
4993 " trace_options\t\t- Set format or modify how tracing happens\n"
Srivatsa S. Bhat (VMware)b9416992019-01-28 17:55:53 -08004994 "\t\t\t Disable an option by prefixing 'no' to the\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004995 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004996 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004997#ifdef CONFIG_DYNAMIC_FTRACE
4998 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004999 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5000 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09005001 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005002 "\t modules: Can select a group via module\n"
5003 "\t Format: :mod:<module-name>\n"
5004 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5005 "\t triggers: a command to perform when function is hit\n"
5006 "\t Format: <function>:<trigger>[:count]\n"
5007 "\t trigger: traceon, traceoff\n"
5008 "\t\t enable_event:<system>:<event>\n"
5009 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005010#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005011 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005012#endif
5013#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005014 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005015#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04005016 "\t\t dump\n"
5017 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005018 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5019 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5020 "\t The first one will disable tracing every time do_fault is hit\n"
5021 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5022 "\t The first time do trap is hit and it disables tracing, the\n"
5023 "\t counter will decrement to 2. If tracing is already disabled,\n"
5024 "\t the counter will not decrement. It only decrements when the\n"
5025 "\t trigger did work\n"
5026 "\t To remove trigger without count:\n"
5027 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5028 "\t To remove trigger with a count:\n"
5029 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005030 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005031 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5032 "\t modules: Can select a group via module command :mod:\n"
5033 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005034#endif /* CONFIG_DYNAMIC_FTRACE */
5035#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005036 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5037 "\t\t (function)\n"
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -04005038 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5039 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005040#endif
5041#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5042 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09005043 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005044 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5045#endif
5046#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005047 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5048 "\t\t\t snapshot buffer. Read the contents for more\n"
5049 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005050#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08005051#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005052 " stack_trace\t\t- Shows the max stack trace when active\n"
5053 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005054 "\t\t\t Write into this file to reset the max size (trigger a\n"
5055 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005056#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005057 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5058 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005059#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08005060#endif /* CONFIG_STACK_TRACER */
Masami Hiramatsu5448d442018-11-05 18:02:08 +09005061#ifdef CONFIG_DYNAMIC_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09005062 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
Masami Hiramatsu5448d442018-11-05 18:02:08 +09005063 "\t\t\t Write into this file to define/undefine new trace events.\n"
5064#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005065#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09005066 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005067 "\t\t\t Write into this file to define/undefine new trace events.\n"
5068#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005069#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +09005070 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005071 "\t\t\t Write into this file to define/undefine new trace events.\n"
5072#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005073#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
Masami Hiramatsu86425622016-08-18 17:58:15 +09005074 "\t accepts: event-definitions (one definition per line)\n"
Masami Hiramatsuc3ca46e2017-05-23 15:05:50 +09005075 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5076 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09005077#ifdef CONFIG_HIST_TRIGGERS
5078 "\t s:[synthetic/]<event> <field> [<field>]\n"
5079#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09005080 "\t -:[<group>/]<event>\n"
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005081#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09005082 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Naveen N. Rao35b6f552017-02-22 19:23:39 +05305083 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005084#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005085#ifdef CONFIG_UPROBE_EVENTS
Ravi Bangoria1cc33162018-08-20 10:12:47 +05305086 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005087#endif
5088 "\t args: <name>=fetcharg[:type]\n"
5089 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005090#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09005091 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005092#else
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09005093 "\t $stack<index>, $stack, $retval, $comm,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005094#endif
Masami Hiramatsua42e3c42019-06-20 00:08:37 +09005095 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
Masami Hiramatsu60c2e0c2018-04-25 21:20:28 +09005096 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
Masami Hiramatsu88903c42019-05-15 14:38:30 +09005097 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
Masami Hiramatsu40b53b72018-04-25 21:21:55 +09005098 "\t <type>\\[<array-size>\\]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09005099#ifdef CONFIG_HIST_TRIGGERS
5100 "\t field: <stype> <name>;\n"
5101 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5102 "\t [unsigned] char/int/long\n"
5103#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09005104#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005105 " events/\t\t- Directory containing all trace event subsystems:\n"
5106 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5107 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005108 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5109 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005110 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005111 " events/<system>/<event>/\t- Directory containing control files for\n"
5112 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005113 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5114 " filter\t\t- If set, only events passing filter are traced\n"
5115 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005116 "\t Format: <trigger>[:count][if <filter>]\n"
5117 "\t trigger: traceon, traceoff\n"
5118 "\t enable_event:<system>:<event>\n"
5119 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005120#ifdef CONFIG_HIST_TRIGGERS
5121 "\t enable_hist:<system>:<event>\n"
5122 "\t disable_hist:<system>:<event>\n"
5123#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005124#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005125 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005126#endif
5127#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005128 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005129#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005130#ifdef CONFIG_HIST_TRIGGERS
5131 "\t\t hist (see below)\n"
5132#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005133 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5134 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5135 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5136 "\t events/block/block_unplug/trigger\n"
5137 "\t The first disables tracing every time block_unplug is hit.\n"
5138 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5139 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5140 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5141 "\t Like function triggers, the counter is only decremented if it\n"
5142 "\t enabled or disabled tracing.\n"
5143 "\t To remove a trigger without a count:\n"
5144 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5145 "\t To remove a trigger with a count:\n"
5146 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5147 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005148#ifdef CONFIG_HIST_TRIGGERS
5149 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06005150 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005151 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06005152 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005153 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005154 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005155 "\t [:name=histname1]\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005156 "\t [:<handler>.<action>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005157 "\t [if <filter>]\n\n"
5158 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005159 "\t table using the key(s) and value(s) named, and the value of a\n"
5160 "\t sum called 'hitcount' is incremented. Keys and values\n"
5161 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06005162 "\t can be any field, or the special string 'stacktrace'.\n"
5163 "\t Compound keys consisting of up to two fields can be specified\n"
5164 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5165 "\t fields. Sort keys consisting of up to two fields can be\n"
5166 "\t specified using the 'sort' keyword. The sort direction can\n"
5167 "\t be modified by appending '.descending' or '.ascending' to a\n"
5168 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005169 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5170 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5171 "\t its histogram data will be shared with other triggers of the\n"
5172 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005173 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06005174 "\t table in its entirety to stdout. If there are multiple hist\n"
5175 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005176 "\t trigger in the output. The table displayed for a named\n"
5177 "\t trigger will be the same as any other instance having the\n"
5178 "\t same name. The default format used to display a given field\n"
5179 "\t can be modified by appending any of the following modifiers\n"
5180 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06005181 "\t .hex display a number as a hex value\n"
5182 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06005183 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06005184 "\t .execname display a common_pid as a program name\n"
Tom Zanussi860f9f62018-01-15 20:51:48 -06005185 "\t .syscall display a syscall id as a syscall name\n"
5186 "\t .log2 display log2 value rather than raw number\n"
5187 "\t .usecs display a common_timestamp in microseconds\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06005188 "\t The 'pause' parameter can be used to pause an existing hist\n"
5189 "\t trigger or to start a hist trigger but not log any events\n"
5190 "\t until told to do so. 'continue' can be used to start or\n"
5191 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005192 "\t The 'clear' parameter will clear the contents of a running\n"
5193 "\t hist trigger and leave its current paused/active state\n"
5194 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005195 "\t The enable_hist and disable_hist triggers can be used to\n"
5196 "\t have one event conditionally start and stop another event's\n"
Colin Ian King9e5a36a2019-02-17 22:32:22 +00005197 "\t already-attached hist trigger. The syntax is analogous to\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005198 "\t the enable_event and disable_event triggers.\n\n"
5199 "\t Hist trigger handlers and actions are executed whenever a\n"
5200 "\t a histogram entry is added or updated. They take the form:\n\n"
5201 "\t <handler>.<action>\n\n"
5202 "\t The available handlers are:\n\n"
5203 "\t onmatch(matching.event) - invoke on addition or update\n"
Tom Zanussidff81f52019-02-13 17:42:48 -06005204 "\t onmax(var) - invoke if var exceeds current max\n"
5205 "\t onchange(var) - invoke action if var changes\n\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005206 "\t The available actions are:\n\n"
Tom Zanussie91eefd72019-02-13 17:42:50 -06005207 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005208 "\t save(field,...) - save current event fields\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06005209#ifdef CONFIG_TRACER_SNAPSHOT
5210 "\t snapshot() - snapshot the trace buffer\n"
5211#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005212#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005213;
5214
5215static ssize_t
5216tracing_readme_read(struct file *filp, char __user *ubuf,
5217 size_t cnt, loff_t *ppos)
5218{
5219 return simple_read_from_buffer(ubuf, cnt, ppos,
5220 readme_msg, strlen(readme_msg));
5221}
5222
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005223static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02005224 .open = tracing_open_generic,
5225 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005226 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005227};
5228
Michael Sartain99c621d2017-07-05 22:07:15 -06005229static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5230{
5231 int *ptr = v;
5232
5233 if (*pos || m->count)
5234 ptr++;
5235
5236 (*pos)++;
5237
5238 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5239 if (trace_find_tgid(*ptr))
5240 return ptr;
5241 }
5242
5243 return NULL;
5244}
5245
5246static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5247{
5248 void *v;
5249 loff_t l = 0;
5250
5251 if (!tgid_map)
5252 return NULL;
5253
5254 v = &tgid_map[0];
5255 while (l <= *pos) {
5256 v = saved_tgids_next(m, v, &l);
5257 if (!v)
5258 return NULL;
5259 }
5260
5261 return v;
5262}
5263
5264static void saved_tgids_stop(struct seq_file *m, void *v)
5265{
5266}
5267
5268static int saved_tgids_show(struct seq_file *m, void *v)
5269{
5270 int pid = (int *)v - tgid_map;
5271
5272 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5273 return 0;
5274}
5275
5276static const struct seq_operations tracing_saved_tgids_seq_ops = {
5277 .start = saved_tgids_start,
5278 .stop = saved_tgids_stop,
5279 .next = saved_tgids_next,
5280 .show = saved_tgids_show,
5281};
5282
5283static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5284{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005285 int ret;
5286
5287 ret = tracing_check_open_get_tr(NULL);
5288 if (ret)
5289 return ret;
Michael Sartain99c621d2017-07-05 22:07:15 -06005290
5291 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5292}
5293
5294
5295static const struct file_operations tracing_saved_tgids_fops = {
5296 .open = tracing_saved_tgids_open,
5297 .read = seq_read,
5298 .llseek = seq_lseek,
5299 .release = seq_release,
5300};
5301
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005302static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005303{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005304 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005305
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005306 if (*pos || m->count)
5307 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005308
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005309 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005310
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005311 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5312 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005313 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005314 continue;
5315
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005316 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005317 }
5318
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005319 return NULL;
5320}
Avadh Patel69abe6a2009-04-10 16:04:48 -04005321
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005322static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5323{
5324 void *v;
5325 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005326
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005327 preempt_disable();
5328 arch_spin_lock(&trace_cmdline_lock);
5329
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005330 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005331 while (l <= *pos) {
5332 v = saved_cmdlines_next(m, v, &l);
5333 if (!v)
5334 return NULL;
5335 }
5336
5337 return v;
5338}
5339
5340static void saved_cmdlines_stop(struct seq_file *m, void *v)
5341{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005342 arch_spin_unlock(&trace_cmdline_lock);
5343 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005344}
5345
5346static int saved_cmdlines_show(struct seq_file *m, void *v)
5347{
5348 char buf[TASK_COMM_LEN];
5349 unsigned int *pid = v;
5350
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005351 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005352 seq_printf(m, "%d %s\n", *pid, buf);
5353 return 0;
5354}
5355
5356static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5357 .start = saved_cmdlines_start,
5358 .next = saved_cmdlines_next,
5359 .stop = saved_cmdlines_stop,
5360 .show = saved_cmdlines_show,
5361};
5362
5363static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5364{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005365 int ret;
5366
5367 ret = tracing_check_open_get_tr(NULL);
5368 if (ret)
5369 return ret;
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005370
5371 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04005372}
5373
5374static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005375 .open = tracing_saved_cmdlines_open,
5376 .read = seq_read,
5377 .llseek = seq_lseek,
5378 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04005379};
5380
5381static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005382tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5383 size_t cnt, loff_t *ppos)
5384{
5385 char buf[64];
5386 int r;
5387
5388 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005389 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005390 arch_spin_unlock(&trace_cmdline_lock);
5391
5392 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5393}
5394
5395static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5396{
5397 kfree(s->saved_cmdlines);
5398 kfree(s->map_cmdline_to_pid);
5399 kfree(s);
5400}
5401
5402static int tracing_resize_saved_cmdlines(unsigned int val)
5403{
5404 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5405
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005406 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005407 if (!s)
5408 return -ENOMEM;
5409
5410 if (allocate_cmdlines_buffer(val, s) < 0) {
5411 kfree(s);
5412 return -ENOMEM;
5413 }
5414
5415 arch_spin_lock(&trace_cmdline_lock);
5416 savedcmd_temp = savedcmd;
5417 savedcmd = s;
5418 arch_spin_unlock(&trace_cmdline_lock);
5419 free_saved_cmdlines_buffer(savedcmd_temp);
5420
5421 return 0;
5422}
5423
5424static ssize_t
5425tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5426 size_t cnt, loff_t *ppos)
5427{
5428 unsigned long val;
5429 int ret;
5430
5431 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5432 if (ret)
5433 return ret;
5434
5435 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5436 if (!val || val > PID_MAX_DEFAULT)
5437 return -EINVAL;
5438
5439 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5440 if (ret < 0)
5441 return ret;
5442
5443 *ppos += cnt;
5444
5445 return cnt;
5446}
5447
5448static const struct file_operations tracing_saved_cmdlines_size_fops = {
5449 .open = tracing_open_generic,
5450 .read = tracing_saved_cmdlines_size_read,
5451 .write = tracing_saved_cmdlines_size_write,
5452};
5453
Jeremy Linton681bec02017-05-31 16:56:53 -05005454#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005455static union trace_eval_map_item *
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005456update_eval_map(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005457{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005458 if (!ptr->map.eval_string) {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005459 if (ptr->tail.next) {
5460 ptr = ptr->tail.next;
5461 /* Set ptr to the next real item (skip head) */
5462 ptr++;
5463 } else
5464 return NULL;
5465 }
5466 return ptr;
5467}
5468
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005469static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005470{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005471 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005472
5473 /*
5474 * Paranoid! If ptr points to end, we don't want to increment past it.
5475 * This really should never happen.
5476 */
Vasily Averin039958a2020-01-24 10:03:01 +03005477 (*pos)++;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005478 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005479 if (WARN_ON_ONCE(!ptr))
5480 return NULL;
5481
5482 ptr++;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005483 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005484
5485 return ptr;
5486}
5487
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005488static void *eval_map_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005489{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005490 union trace_eval_map_item *v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005491 loff_t l = 0;
5492
Jeremy Linton1793ed92017-05-31 16:56:46 -05005493 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005494
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005495 v = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005496 if (v)
5497 v++;
5498
5499 while (v && l < *pos) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005500 v = eval_map_next(m, v, &l);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005501 }
5502
5503 return v;
5504}
5505
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005506static void eval_map_stop(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005507{
Jeremy Linton1793ed92017-05-31 16:56:46 -05005508 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005509}
5510
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005511static int eval_map_show(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005512{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005513 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005514
5515 seq_printf(m, "%s %ld (%s)\n",
Jeremy Linton00f4b652017-05-31 16:56:43 -05005516 ptr->map.eval_string, ptr->map.eval_value,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005517 ptr->map.system);
5518
5519 return 0;
5520}
5521
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005522static const struct seq_operations tracing_eval_map_seq_ops = {
5523 .start = eval_map_start,
5524 .next = eval_map_next,
5525 .stop = eval_map_stop,
5526 .show = eval_map_show,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005527};
5528
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005529static int tracing_eval_map_open(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005530{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005531 int ret;
5532
5533 ret = tracing_check_open_get_tr(NULL);
5534 if (ret)
5535 return ret;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005536
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005537 return seq_open(filp, &tracing_eval_map_seq_ops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005538}
5539
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005540static const struct file_operations tracing_eval_map_fops = {
5541 .open = tracing_eval_map_open,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005542 .read = seq_read,
5543 .llseek = seq_lseek,
5544 .release = seq_release,
5545};
5546
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005547static inline union trace_eval_map_item *
Jeremy Linton5f60b352017-05-31 16:56:47 -05005548trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005549{
5550 /* Return tail of array given the head */
5551 return ptr + ptr->head.length + 1;
5552}
5553
5554static void
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005555trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005556 int len)
5557{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005558 struct trace_eval_map **stop;
5559 struct trace_eval_map **map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005560 union trace_eval_map_item *map_array;
5561 union trace_eval_map_item *ptr;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005562
5563 stop = start + len;
5564
5565 /*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005566 * The trace_eval_maps contains the map plus a head and tail item,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005567 * where the head holds the module and length of array, and the
5568 * tail holds a pointer to the next list.
5569 */
Kees Cook6da2ec52018-06-12 13:55:00 -07005570 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005571 if (!map_array) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005572 pr_warn("Unable to allocate trace eval mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005573 return;
5574 }
5575
Jeremy Linton1793ed92017-05-31 16:56:46 -05005576 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005577
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005578 if (!trace_eval_maps)
5579 trace_eval_maps = map_array;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005580 else {
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005581 ptr = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005582 for (;;) {
Jeremy Linton5f60b352017-05-31 16:56:47 -05005583 ptr = trace_eval_jmp_to_tail(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005584 if (!ptr->tail.next)
5585 break;
5586 ptr = ptr->tail.next;
5587
5588 }
5589 ptr->tail.next = map_array;
5590 }
5591 map_array->head.mod = mod;
5592 map_array->head.length = len;
5593 map_array++;
5594
5595 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5596 map_array->map = **map;
5597 map_array++;
5598 }
5599 memset(map_array, 0, sizeof(*map_array));
5600
Jeremy Linton1793ed92017-05-31 16:56:46 -05005601 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005602}
5603
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005604static void trace_create_eval_file(struct dentry *d_tracer)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005605{
Jeremy Linton681bec02017-05-31 16:56:53 -05005606 trace_create_file("eval_map", 0444, d_tracer,
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005607 NULL, &tracing_eval_map_fops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005608}
5609
Jeremy Linton681bec02017-05-31 16:56:53 -05005610#else /* CONFIG_TRACE_EVAL_MAP_FILE */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005611static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5612static inline void trace_insert_eval_map_file(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005613 struct trace_eval_map **start, int len) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05005614#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005615
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005616static void trace_insert_eval_map(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005617 struct trace_eval_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005618{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005619 struct trace_eval_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005620
5621 if (len <= 0)
5622 return;
5623
5624 map = start;
5625
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005626 trace_event_eval_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005627
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005628 trace_insert_eval_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005629}
5630
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005631static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005632tracing_set_trace_read(struct file *filp, char __user *ubuf,
5633 size_t cnt, loff_t *ppos)
5634{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005635 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005636 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005637 int r;
5638
5639 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005640 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005641 mutex_unlock(&trace_types_lock);
5642
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005643 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005644}
5645
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005646int tracer_init(struct tracer *t, struct trace_array *tr)
5647{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005648 tracing_reset_online_cpus(&tr->array_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005649 return t->init(tr);
5650}
5651
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005652static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005653{
5654 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005655
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005656 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005657 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005658}
5659
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005660#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09005661/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005662static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5663 struct array_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09005664{
5665 int cpu, ret = 0;
5666
5667 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5668 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005669 ret = ring_buffer_resize(trace_buf->buffer,
5670 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005671 if (ret < 0)
5672 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005673 per_cpu_ptr(trace_buf->data, cpu)->entries =
5674 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005675 }
5676 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005677 ret = ring_buffer_resize(trace_buf->buffer,
5678 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005679 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005680 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5681 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005682 }
5683
5684 return ret;
5685}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005686#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09005687
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005688static int __tracing_resize_ring_buffer(struct trace_array *tr,
5689 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04005690{
5691 int ret;
5692
5693 /*
5694 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04005695 * we use the size that was given, and we can forget about
5696 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04005697 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005698 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04005699
Steven Rostedtb382ede62012-10-10 21:44:34 -04005700 /* May be called before buffers are initialized */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005701 if (!tr->array_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04005702 return 0;
5703
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005704 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005705 if (ret < 0)
5706 return ret;
5707
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005708#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005709 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5710 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005711 goto out;
5712
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005713 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005714 if (ret < 0) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005715 int r = resize_buffer_duplicate_size(&tr->array_buffer,
5716 &tr->array_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005717 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04005718 /*
5719 * AARGH! We are left with different
5720 * size max buffer!!!!
5721 * The max buffer is our "snapshot" buffer.
5722 * When a tracer needs a snapshot (one of the
5723 * latency tracers), it swaps the max buffer
5724 * with the saved snap shot. We succeeded to
5725 * update the size of the main buffer, but failed to
5726 * update the size of the max buffer. But when we tried
5727 * to reset the main buffer to the original size, we
5728 * failed there too. This is very unlikely to
5729 * happen, but if it does, warn and kill all
5730 * tracing.
5731 */
Steven Rostedt73c51622009-03-11 13:42:01 -04005732 WARN_ON(1);
5733 tracing_disabled = 1;
5734 }
5735 return ret;
5736 }
5737
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005738 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005739 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005740 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005741 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005742
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005743 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005744#endif /* CONFIG_TRACER_MAX_TRACE */
5745
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005746 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005747 set_buffer_entries(&tr->array_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005748 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005749 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04005750
5751 return ret;
5752}
5753
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09005754ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5755 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005756{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07005757 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005758
5759 mutex_lock(&trace_types_lock);
5760
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005761 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5762 /* make sure, this cpu is enabled in the mask */
5763 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5764 ret = -EINVAL;
5765 goto out;
5766 }
5767 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005768
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005769 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005770 if (ret < 0)
5771 ret = -ENOMEM;
5772
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005773out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005774 mutex_unlock(&trace_types_lock);
5775
5776 return ret;
5777}
5778
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005779
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005780/**
5781 * tracing_update_buffers - used by tracing facility to expand ring buffers
5782 *
5783 * To save on memory when the tracing is never used on a system with it
5784 * configured in. The ring buffers are set to a minimum size. But once
5785 * a user starts to use the tracing facility, then they need to grow
5786 * to their default size.
5787 *
5788 * This function is to be called when a tracer is about to be used.
5789 */
5790int tracing_update_buffers(void)
5791{
5792 int ret = 0;
5793
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005794 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005795 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005796 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005797 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005798 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005799
5800 return ret;
5801}
5802
Steven Rostedt577b7852009-02-26 23:43:05 -05005803struct trace_option_dentry;
5804
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005805static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005806create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05005807
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005808/*
5809 * Used to clear out the tracer before deletion of an instance.
5810 * Must have trace_types_lock held.
5811 */
5812static void tracing_set_nop(struct trace_array *tr)
5813{
5814 if (tr->current_trace == &nop_trace)
5815 return;
5816
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005817 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005818
5819 if (tr->current_trace->reset)
5820 tr->current_trace->reset(tr);
5821
5822 tr->current_trace = &nop_trace;
5823}
5824
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04005825static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005826{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005827 /* Only enable if the directory has been created already. */
5828 if (!tr->dir)
5829 return;
5830
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005831 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005832}
5833
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09005834int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005835{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005836 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005837#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005838 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005839#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005840 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005841
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005842 mutex_lock(&trace_types_lock);
5843
Steven Rostedt73c51622009-03-11 13:42:01 -04005844 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005845 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005846 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04005847 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01005848 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04005849 ret = 0;
5850 }
5851
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005852 for (t = trace_types; t; t = t->next) {
5853 if (strcmp(t->name, buf) == 0)
5854 break;
5855 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005856 if (!t) {
5857 ret = -EINVAL;
5858 goto out;
5859 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005860 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005861 goto out;
5862
Tom Zanussia35873a2019-02-13 17:42:45 -06005863#ifdef CONFIG_TRACER_SNAPSHOT
5864 if (t->use_max_tr) {
5865 arch_spin_lock(&tr->max_lock);
5866 if (tr->cond_snapshot)
5867 ret = -EBUSY;
5868 arch_spin_unlock(&tr->max_lock);
5869 if (ret)
5870 goto out;
5871 }
5872#endif
Ziqian SUN (Zamir)c7b3ae02017-09-11 14:26:35 +08005873 /* Some tracers won't work on kernel command line */
5874 if (system_state < SYSTEM_RUNNING && t->noboot) {
5875 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5876 t->name);
5877 goto out;
5878 }
5879
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005880 /* Some tracers are only allowed for the top level buffer */
5881 if (!trace_ok_for_array(t, tr)) {
5882 ret = -EINVAL;
5883 goto out;
5884 }
5885
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005886 /* If trace pipe files are being read, we can't change the tracer */
5887 if (tr->current_trace->ref) {
5888 ret = -EBUSY;
5889 goto out;
5890 }
5891
Steven Rostedt9f029e82008-11-12 15:24:24 -05005892 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005893
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005894 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005895
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005896 if (tr->current_trace->reset)
5897 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05005898
Paul E. McKenney74401722018-11-06 18:44:52 -08005899 /* Current trace needs to be nop_trace before synchronize_rcu */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005900 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05005901
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005902#ifdef CONFIG_TRACER_MAX_TRACE
5903 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05005904
5905 if (had_max_tr && !t->use_max_tr) {
5906 /*
5907 * We need to make sure that the update_max_tr sees that
5908 * current_trace changed to nop_trace to keep it from
5909 * swapping the buffers after we resize it.
5910 * The update_max_tr is called from interrupts disabled
5911 * so a synchronized_sched() is sufficient.
5912 */
Paul E. McKenney74401722018-11-06 18:44:52 -08005913 synchronize_rcu();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005914 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005915 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005916#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005917
5918#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005919 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04005920 ret = tracing_alloc_snapshot_instance(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005921 if (ret < 0)
5922 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005923 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005924#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05005925
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005926 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005927 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005928 if (ret)
5929 goto out;
5930 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005931
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005932 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005933 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05005934 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005935 out:
5936 mutex_unlock(&trace_types_lock);
5937
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005938 return ret;
5939}
5940
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005941static ssize_t
5942tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5943 size_t cnt, loff_t *ppos)
5944{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005945 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005946 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005947 int i;
5948 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005949 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005950
Steven Rostedt60063a62008-10-28 10:44:24 -04005951 ret = cnt;
5952
Li Zefanee6c2c12009-09-18 14:06:47 +08005953 if (cnt > MAX_TRACER_SIZE)
5954 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005955
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005956 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005957 return -EFAULT;
5958
5959 buf[cnt] = 0;
5960
5961 /* strip ending whitespace. */
5962 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5963 buf[i] = 0;
5964
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005965 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005966 if (err)
5967 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005968
Jiri Olsacf8517c2009-10-23 19:36:16 -04005969 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005970
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005971 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005972}
5973
5974static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005975tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5976 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005977{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005978 char buf[64];
5979 int r;
5980
Steven Rostedtcffae432008-05-12 21:21:00 +02005981 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005982 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02005983 if (r > sizeof(buf))
5984 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005985 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005986}
5987
5988static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005989tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5990 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005991{
Hannes Eder5e398412009-02-10 19:44:34 +01005992 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005993 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005994
Peter Huewe22fe9b52011-06-07 21:58:27 +02005995 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5996 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005997 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005998
5999 *ptr = val * 1000;
6000
6001 return cnt;
6002}
6003
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006004static ssize_t
6005tracing_thresh_read(struct file *filp, char __user *ubuf,
6006 size_t cnt, loff_t *ppos)
6007{
6008 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6009}
6010
6011static ssize_t
6012tracing_thresh_write(struct file *filp, const char __user *ubuf,
6013 size_t cnt, loff_t *ppos)
6014{
6015 struct trace_array *tr = filp->private_data;
6016 int ret;
6017
6018 mutex_lock(&trace_types_lock);
6019 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6020 if (ret < 0)
6021 goto out;
6022
6023 if (tr->current_trace->update_thresh) {
6024 ret = tr->current_trace->update_thresh(tr);
6025 if (ret < 0)
6026 goto out;
6027 }
6028
6029 ret = cnt;
6030out:
6031 mutex_unlock(&trace_types_lock);
6032
6033 return ret;
6034}
6035
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006036#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08006037
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006038static ssize_t
6039tracing_max_lat_read(struct file *filp, char __user *ubuf,
6040 size_t cnt, loff_t *ppos)
6041{
6042 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6043}
6044
6045static ssize_t
6046tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6047 size_t cnt, loff_t *ppos)
6048{
6049 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6050}
6051
Chen Gange428abb2015-11-10 05:15:15 +08006052#endif
6053
Steven Rostedtb3806b42008-05-12 21:20:46 +02006054static int tracing_open_pipe(struct inode *inode, struct file *filp)
6055{
Oleg Nesterov15544202013-07-23 17:25:57 +02006056 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006057 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006058 int ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006059
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006060 ret = tracing_check_open_get_tr(tr);
6061 if (ret)
6062 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006063
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006064 mutex_lock(&trace_types_lock);
6065
Steven Rostedtb3806b42008-05-12 21:20:46 +02006066 /* create a buffer to store the information to pass to userspace */
6067 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006068 if (!iter) {
6069 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006070 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006071 goto out;
6072 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006073
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006074 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006075 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006076
6077 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6078 ret = -ENOMEM;
6079 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10306080 }
6081
Steven Rostedta3097202008-11-07 22:36:02 -05006082 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10306083 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05006084
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006085 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04006086 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6087
David Sharp8be07092012-11-13 12:18:22 -08006088 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006089 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08006090 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6091
Oleg Nesterov15544202013-07-23 17:25:57 +02006092 iter->tr = tr;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006093 iter->array_buffer = &tr->array_buffer;
Oleg Nesterov15544202013-07-23 17:25:57 +02006094 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006095 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006096 filp->private_data = iter;
6097
Steven Rostedt107bad82008-05-12 21:21:01 +02006098 if (iter->trace->pipe_open)
6099 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02006100
Arnd Bergmannb4447862010-07-07 23:40:11 +02006101 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006102
6103 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006104out:
6105 mutex_unlock(&trace_types_lock);
6106 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006107
6108fail:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006109 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006110 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006111 mutex_unlock(&trace_types_lock);
6112 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006113}
6114
6115static int tracing_release_pipe(struct inode *inode, struct file *file)
6116{
6117 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02006118 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006119
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006120 mutex_lock(&trace_types_lock);
6121
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006122 tr->current_trace->ref--;
6123
Steven Rostedt29bf4a52009-12-09 12:37:43 -05006124 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05006125 iter->trace->pipe_close(iter);
6126
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006127 mutex_unlock(&trace_types_lock);
6128
Rusty Russell44623442009-01-01 10:12:23 +10306129 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006130 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006131 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006132
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006133 trace_array_put(tr);
6134
Steven Rostedtb3806b42008-05-12 21:20:46 +02006135 return 0;
6136}
6137
Al Viro9dd95742017-07-03 00:42:43 -04006138static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006139trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006140{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006141 struct trace_array *tr = iter->tr;
6142
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006143 /* Iterators are static, they should be filled or empty */
6144 if (trace_buffer_iter(iter, iter->cpu_file))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006145 return EPOLLIN | EPOLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006146
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006147 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006148 /*
6149 * Always select as readable when in blocking mode
6150 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006151 return EPOLLIN | EPOLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006152 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006153 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006154 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006155}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006156
Al Viro9dd95742017-07-03 00:42:43 -04006157static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006158tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6159{
6160 struct trace_iterator *iter = filp->private_data;
6161
6162 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006163}
6164
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006165/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006166static int tracing_wait_pipe(struct file *filp)
6167{
6168 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006169 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006170
6171 while (trace_empty(iter)) {
6172
6173 if ((filp->f_flags & O_NONBLOCK)) {
6174 return -EAGAIN;
6175 }
6176
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006177 /*
Liu Bo250bfd32013-01-14 10:54:11 +08006178 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006179 * We still block if tracing is disabled, but we have never
6180 * read anything. This allows a user to cat this file, and
6181 * then enable tracing. But after we have read something,
6182 * we give an EOF when tracing is again disabled.
6183 *
6184 * iter->pos will be 0 if we haven't read anything.
6185 */
Tahsin Erdogan75df6e62017-09-17 03:23:48 -07006186 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006187 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006188
6189 mutex_unlock(&iter->mutex);
6190
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05006191 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006192
6193 mutex_lock(&iter->mutex);
6194
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006195 if (ret)
6196 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006197 }
6198
6199 return 1;
6200}
6201
Steven Rostedtb3806b42008-05-12 21:20:46 +02006202/*
6203 * Consumer reader.
6204 */
6205static ssize_t
6206tracing_read_pipe(struct file *filp, char __user *ubuf,
6207 size_t cnt, loff_t *ppos)
6208{
6209 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006210 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006211
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006212 /*
6213 * Avoid more than one consumer on a single file descriptor
6214 * This is just a matter of traces coherency, the ring buffer itself
6215 * is protected.
6216 */
6217 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04006218
6219 /* return any leftover data */
6220 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6221 if (sret != -EBUSY)
6222 goto out;
6223
6224 trace_seq_init(&iter->seq);
6225
Steven Rostedt107bad82008-05-12 21:21:01 +02006226 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006227 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6228 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02006229 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02006230 }
6231
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006232waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006233 sret = tracing_wait_pipe(filp);
6234 if (sret <= 0)
6235 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006236
6237 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006238 if (trace_empty(iter)) {
6239 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02006240 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006241 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006242
6243 if (cnt >= PAGE_SIZE)
6244 cnt = PAGE_SIZE - 1;
6245
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006246 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006247 memset(&iter->seq, 0,
6248 sizeof(struct trace_iterator) -
6249 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04006250 cpumask_clear(iter->started);
Petr Mladekd303de12019-10-11 16:21:34 +02006251 trace_seq_init(&iter->seq);
Steven Rostedt4823ed72008-05-12 21:21:01 +02006252 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006253
Lai Jiangshan4f535962009-05-18 19:35:34 +08006254 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006255 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05006256 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006257 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006258 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006259
Ingo Molnarf9896bf2008-05-12 21:20:47 +02006260 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006261 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02006262 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006263 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006264 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006265 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01006266 if (ret != TRACE_TYPE_NO_CONSUME)
6267 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006268
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006269 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02006270 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01006271
6272 /*
6273 * Setting the full flag means we reached the trace_seq buffer
6274 * size and we should leave by partial output condition above.
6275 * One of the trace_seq_* functions is not used properly.
6276 */
6277 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6278 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006279 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006280 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006281 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02006282
Steven Rostedtb3806b42008-05-12 21:20:46 +02006283 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006284 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006285 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05006286 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006287
6288 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006289 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006290 * entries, go back to wait for more entries.
6291 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006292 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006293 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006294
Steven Rostedt107bad82008-05-12 21:21:01 +02006295out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006296 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02006297
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006298 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006299}
6300
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006301static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6302 unsigned int idx)
6303{
6304 __free_page(spd->pages[idx]);
6305}
6306
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08006307static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05006308 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05006309 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006310 .steal = generic_pipe_buf_steal,
6311 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006312};
6313
Steven Rostedt34cd4992009-02-09 12:06:29 -05006314static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006315tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006316{
6317 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006318 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006319 int ret;
6320
6321 /* Seq buffer is page-sized, exactly what we need. */
6322 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006323 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006324 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006325
6326 if (trace_seq_has_overflowed(&iter->seq)) {
6327 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006328 break;
6329 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006330
6331 /*
6332 * This should not be hit, because it should only
6333 * be set if the iter->seq overflowed. But check it
6334 * anyway to be safe.
6335 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05006336 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006337 iter->seq.seq.len = save_len;
6338 break;
6339 }
6340
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006341 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006342 if (rem < count) {
6343 rem = 0;
6344 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006345 break;
6346 }
6347
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006348 if (ret != TRACE_TYPE_NO_CONSUME)
6349 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05006350 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05006351 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05006352 rem = 0;
6353 iter->ent = NULL;
6354 break;
6355 }
6356 }
6357
6358 return rem;
6359}
6360
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006361static ssize_t tracing_splice_read_pipe(struct file *filp,
6362 loff_t *ppos,
6363 struct pipe_inode_info *pipe,
6364 size_t len,
6365 unsigned int flags)
6366{
Jens Axboe35f3d142010-05-20 10:43:18 +02006367 struct page *pages_def[PIPE_DEF_BUFFERS];
6368 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006369 struct trace_iterator *iter = filp->private_data;
6370 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006371 .pages = pages_def,
6372 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006373 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02006374 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006375 .ops = &tracing_pipe_buf_ops,
6376 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006377 };
6378 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006379 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006380 unsigned int i;
6381
Jens Axboe35f3d142010-05-20 10:43:18 +02006382 if (splice_grow_spd(pipe, &spd))
6383 return -ENOMEM;
6384
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006385 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006386
6387 if (iter->trace->splice_read) {
6388 ret = iter->trace->splice_read(iter, filp,
6389 ppos, pipe, len, flags);
6390 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006391 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006392 }
6393
6394 ret = tracing_wait_pipe(filp);
6395 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006396 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006397
Jason Wessel955b61e2010-08-05 09:22:23 -05006398 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006399 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006400 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006401 }
6402
Lai Jiangshan4f535962009-05-18 19:35:34 +08006403 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006404 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006405
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006406 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04006407 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006408 spd.pages[i] = alloc_page(GFP_KERNEL);
6409 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05006410 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006411
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006412 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006413
6414 /* Copy the data into the page, so we can start over. */
6415 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02006416 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006417 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006418 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006419 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006420 break;
6421 }
Jens Axboe35f3d142010-05-20 10:43:18 +02006422 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006423 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006424
Steven Rostedtf9520752009-03-02 14:04:40 -05006425 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006426 }
6427
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006428 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006429 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006430 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006431
6432 spd.nr_pages = i;
6433
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04006434 if (i)
6435 ret = splice_to_pipe(pipe, &spd);
6436 else
6437 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02006438out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006439 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02006440 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006441
Steven Rostedt34cd4992009-02-09 12:06:29 -05006442out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006443 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02006444 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006445}
6446
Steven Rostedta98a3c32008-05-12 21:20:59 +02006447static ssize_t
6448tracing_entries_read(struct file *filp, char __user *ubuf,
6449 size_t cnt, loff_t *ppos)
6450{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006451 struct inode *inode = file_inode(filp);
6452 struct trace_array *tr = inode->i_private;
6453 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006454 char buf[64];
6455 int r = 0;
6456 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006457
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006458 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006459
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006460 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006461 int cpu, buf_size_same;
6462 unsigned long size;
6463
6464 size = 0;
6465 buf_size_same = 1;
6466 /* check if all cpu sizes are same */
6467 for_each_tracing_cpu(cpu) {
6468 /* fill in the size from first enabled cpu */
6469 if (size == 0)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006470 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6471 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006472 buf_size_same = 0;
6473 break;
6474 }
6475 }
6476
6477 if (buf_size_same) {
6478 if (!ring_buffer_expanded)
6479 r = sprintf(buf, "%lu (expanded: %lu)\n",
6480 size >> 10,
6481 trace_buf_size >> 10);
6482 else
6483 r = sprintf(buf, "%lu\n", size >> 10);
6484 } else
6485 r = sprintf(buf, "X\n");
6486 } else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006487 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006488
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006489 mutex_unlock(&trace_types_lock);
6490
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006491 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6492 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006493}
6494
6495static ssize_t
6496tracing_entries_write(struct file *filp, const char __user *ubuf,
6497 size_t cnt, loff_t *ppos)
6498{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006499 struct inode *inode = file_inode(filp);
6500 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006501 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006502 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006503
Peter Huewe22fe9b52011-06-07 21:58:27 +02006504 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6505 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006506 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006507
6508 /* must have at least 1 entry */
6509 if (!val)
6510 return -EINVAL;
6511
Steven Rostedt1696b2b2008-11-13 00:09:35 -05006512 /* value is in KB */
6513 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006514 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006515 if (ret < 0)
6516 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006517
Jiri Olsacf8517c2009-10-23 19:36:16 -04006518 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006519
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006520 return cnt;
6521}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05006522
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006523static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006524tracing_total_entries_read(struct file *filp, char __user *ubuf,
6525 size_t cnt, loff_t *ppos)
6526{
6527 struct trace_array *tr = filp->private_data;
6528 char buf[64];
6529 int r, cpu;
6530 unsigned long size = 0, expanded_size = 0;
6531
6532 mutex_lock(&trace_types_lock);
6533 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006534 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006535 if (!ring_buffer_expanded)
6536 expanded_size += trace_buf_size >> 10;
6537 }
6538 if (ring_buffer_expanded)
6539 r = sprintf(buf, "%lu\n", size);
6540 else
6541 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6542 mutex_unlock(&trace_types_lock);
6543
6544 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6545}
6546
6547static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006548tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6549 size_t cnt, loff_t *ppos)
6550{
6551 /*
6552 * There is no need to read what the user has written, this function
6553 * is just to make sure that there is no error when "echo" is used
6554 */
6555
6556 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006557
6558 return cnt;
6559}
6560
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006561static int
6562tracing_free_buffer_release(struct inode *inode, struct file *filp)
6563{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006564 struct trace_array *tr = inode->i_private;
6565
Steven Rostedtcf30cf62011-06-14 22:44:07 -04006566 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006567 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07006568 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006569 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006570 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006571
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006572 trace_array_put(tr);
6573
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006574 return 0;
6575}
6576
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006577static ssize_t
6578tracing_mark_write(struct file *filp, const char __user *ubuf,
6579 size_t cnt, loff_t *fpos)
6580{
Alexander Z Lam2d716192013-07-01 15:31:24 -07006581 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04006582 struct ring_buffer_event *event;
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006583 enum event_trigger_type tt = ETT_NONE;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05006584 struct trace_buffer *buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04006585 struct print_entry *entry;
6586 unsigned long irq_flags;
Steven Rostedtd696b582011-09-22 11:50:27 -04006587 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006588 int size;
6589 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006590
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006591/* Used in tracing_mark_raw_write() as well */
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006592#define FAULTED_STR "<faulted>"
6593#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006594
Steven Rostedtc76f0692008-11-07 22:36:02 -05006595 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006596 return -EINVAL;
6597
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006598 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07006599 return -EINVAL;
6600
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006601 if (cnt > TRACE_BUF_SIZE)
6602 cnt = TRACE_BUF_SIZE;
6603
Steven Rostedtd696b582011-09-22 11:50:27 -04006604 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006605
Steven Rostedtd696b582011-09-22 11:50:27 -04006606 local_save_flags(irq_flags);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006607 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6608
6609 /* If less than "<faulted>", then make sure we can still add that */
6610 if (cnt < FAULTED_SIZE)
6611 size += FAULTED_SIZE - cnt;
6612
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006613 buffer = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006614 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6615 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006616 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04006617 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006618 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04006619
6620 entry = ring_buffer_event_data(event);
6621 entry->ip = _THIS_IP_;
6622
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006623 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6624 if (len) {
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006625 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006626 cnt = FAULTED_SIZE;
6627 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04006628 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006629 written = cnt;
6630 len = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04006631
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006632 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6633 /* do not add \n before testing triggers, but add \0 */
6634 entry->buf[cnt] = '\0';
6635 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6636 }
6637
Steven Rostedtd696b582011-09-22 11:50:27 -04006638 if (entry->buf[cnt - 1] != '\n') {
6639 entry->buf[cnt] = '\n';
6640 entry->buf[cnt + 1] = '\0';
6641 } else
6642 entry->buf[cnt] = '\0';
6643
Steven Rostedt7ffbd482012-10-11 12:14:25 -04006644 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04006645
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006646 if (tt)
6647 event_triggers_post_call(tr->trace_marker_file, tt);
6648
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006649 if (written > 0)
6650 *fpos += written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006651
Steven Rostedtfa32e852016-07-06 15:25:08 -04006652 return written;
6653}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006654
Steven Rostedtfa32e852016-07-06 15:25:08 -04006655/* Limit it for now to 3K (including tag) */
6656#define RAW_DATA_MAX_SIZE (1024*3)
6657
6658static ssize_t
6659tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6660 size_t cnt, loff_t *fpos)
6661{
6662 struct trace_array *tr = filp->private_data;
6663 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05006664 struct trace_buffer *buffer;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006665 struct raw_data_entry *entry;
6666 unsigned long irq_flags;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006667 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006668 int size;
6669 int len;
6670
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006671#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6672
Steven Rostedtfa32e852016-07-06 15:25:08 -04006673 if (tracing_disabled)
6674 return -EINVAL;
6675
6676 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6677 return -EINVAL;
6678
6679 /* The marker must at least have a tag id */
6680 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6681 return -EINVAL;
6682
6683 if (cnt > TRACE_BUF_SIZE)
6684 cnt = TRACE_BUF_SIZE;
6685
6686 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6687
Steven Rostedtfa32e852016-07-06 15:25:08 -04006688 local_save_flags(irq_flags);
6689 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006690 if (cnt < FAULT_SIZE_ID)
6691 size += FAULT_SIZE_ID - cnt;
6692
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006693 buffer = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006694 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6695 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006696 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04006697 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006698 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006699
6700 entry = ring_buffer_event_data(event);
6701
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006702 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6703 if (len) {
6704 entry->id = -1;
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006705 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006706 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006707 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006708 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006709
6710 __buffer_unlock_commit(buffer, event);
6711
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006712 if (written > 0)
6713 *fpos += written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006714
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02006715 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006716}
6717
Li Zefan13f16d22009-12-08 11:16:11 +08006718static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08006719{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006720 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08006721 int i;
6722
6723 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08006724 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08006725 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006726 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6727 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08006728 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08006729
Li Zefan13f16d22009-12-08 11:16:11 +08006730 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08006731}
6732
Tom Zanussid71bd342018-01-15 20:52:07 -06006733int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08006734{
Zhaolei5079f322009-08-25 16:12:56 +08006735 int i;
6736
Zhaolei5079f322009-08-25 16:12:56 +08006737 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6738 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6739 break;
6740 }
6741 if (i == ARRAY_SIZE(trace_clocks))
6742 return -EINVAL;
6743
Zhaolei5079f322009-08-25 16:12:56 +08006744 mutex_lock(&trace_types_lock);
6745
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006746 tr->clock_id = i;
6747
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006748 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08006749
David Sharp60303ed2012-10-11 16:27:52 -07006750 /*
6751 * New clock may not be consistent with the previous clock.
6752 * Reset the buffer so that it doesn't have incomparable timestamps.
6753 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006754 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006755
6756#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liu170b3b12017-09-05 16:57:19 -05006757 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006758 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07006759 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006760#endif
David Sharp60303ed2012-10-11 16:27:52 -07006761
Zhaolei5079f322009-08-25 16:12:56 +08006762 mutex_unlock(&trace_types_lock);
6763
Steven Rostedte1e232c2014-02-10 23:38:46 -05006764 return 0;
6765}
6766
6767static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6768 size_t cnt, loff_t *fpos)
6769{
6770 struct seq_file *m = filp->private_data;
6771 struct trace_array *tr = m->private;
6772 char buf[64];
6773 const char *clockstr;
6774 int ret;
6775
6776 if (cnt >= sizeof(buf))
6777 return -EINVAL;
6778
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006779 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05006780 return -EFAULT;
6781
6782 buf[cnt] = 0;
6783
6784 clockstr = strstrip(buf);
6785
6786 ret = tracing_set_clock(tr, clockstr);
6787 if (ret)
6788 return ret;
6789
Zhaolei5079f322009-08-25 16:12:56 +08006790 *fpos += cnt;
6791
6792 return cnt;
6793}
6794
Li Zefan13f16d22009-12-08 11:16:11 +08006795static int tracing_clock_open(struct inode *inode, struct file *file)
6796{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006797 struct trace_array *tr = inode->i_private;
6798 int ret;
6799
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006800 ret = tracing_check_open_get_tr(tr);
6801 if (ret)
6802 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006803
6804 ret = single_open(file, tracing_clock_show, inode->i_private);
6805 if (ret < 0)
6806 trace_array_put(tr);
6807
6808 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08006809}
6810
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006811static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6812{
6813 struct trace_array *tr = m->private;
6814
6815 mutex_lock(&trace_types_lock);
6816
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006817 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006818 seq_puts(m, "delta [absolute]\n");
6819 else
6820 seq_puts(m, "[delta] absolute\n");
6821
6822 mutex_unlock(&trace_types_lock);
6823
6824 return 0;
6825}
6826
6827static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6828{
6829 struct trace_array *tr = inode->i_private;
6830 int ret;
6831
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006832 ret = tracing_check_open_get_tr(tr);
6833 if (ret)
6834 return ret;
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006835
6836 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6837 if (ret < 0)
6838 trace_array_put(tr);
6839
6840 return ret;
6841}
6842
Tom Zanussi00b41452018-01-15 20:51:39 -06006843int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6844{
6845 int ret = 0;
6846
6847 mutex_lock(&trace_types_lock);
6848
6849 if (abs && tr->time_stamp_abs_ref++)
6850 goto out;
6851
6852 if (!abs) {
6853 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6854 ret = -EINVAL;
6855 goto out;
6856 }
6857
6858 if (--tr->time_stamp_abs_ref)
6859 goto out;
6860 }
6861
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006862 ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
Tom Zanussi00b41452018-01-15 20:51:39 -06006863
6864#ifdef CONFIG_TRACER_MAX_TRACE
6865 if (tr->max_buffer.buffer)
6866 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6867#endif
6868 out:
6869 mutex_unlock(&trace_types_lock);
6870
6871 return ret;
6872}
6873
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006874struct ftrace_buffer_info {
6875 struct trace_iterator iter;
6876 void *spare;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006877 unsigned int spare_cpu;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006878 unsigned int read;
6879};
6880
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006881#ifdef CONFIG_TRACER_SNAPSHOT
6882static int tracing_snapshot_open(struct inode *inode, struct file *file)
6883{
Oleg Nesterov6484c712013-07-23 17:26:10 +02006884 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006885 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006886 struct seq_file *m;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006887 int ret;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006888
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006889 ret = tracing_check_open_get_tr(tr);
6890 if (ret)
6891 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006892
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006893 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02006894 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006895 if (IS_ERR(iter))
6896 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006897 } else {
6898 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006899 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006900 m = kzalloc(sizeof(*m), GFP_KERNEL);
6901 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006902 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006903 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6904 if (!iter) {
6905 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006906 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006907 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006908 ret = 0;
6909
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006910 iter->tr = tr;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006911 iter->array_buffer = &tr->max_buffer;
Oleg Nesterov6484c712013-07-23 17:26:10 +02006912 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006913 m->private = iter;
6914 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006915 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006916out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006917 if (ret < 0)
6918 trace_array_put(tr);
6919
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006920 return ret;
6921}
6922
6923static ssize_t
6924tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6925 loff_t *ppos)
6926{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006927 struct seq_file *m = filp->private_data;
6928 struct trace_iterator *iter = m->private;
6929 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006930 unsigned long val;
6931 int ret;
6932
6933 ret = tracing_update_buffers();
6934 if (ret < 0)
6935 return ret;
6936
6937 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6938 if (ret)
6939 return ret;
6940
6941 mutex_lock(&trace_types_lock);
6942
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006943 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006944 ret = -EBUSY;
6945 goto out;
6946 }
6947
Tom Zanussia35873a2019-02-13 17:42:45 -06006948 arch_spin_lock(&tr->max_lock);
6949 if (tr->cond_snapshot)
6950 ret = -EBUSY;
6951 arch_spin_unlock(&tr->max_lock);
6952 if (ret)
6953 goto out;
6954
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006955 switch (val) {
6956 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006957 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6958 ret = -EINVAL;
6959 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006960 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006961 if (tr->allocated_snapshot)
6962 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006963 break;
6964 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006965/* Only allow per-cpu swap if the ring buffer supports it */
6966#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6967 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6968 ret = -EINVAL;
6969 break;
6970 }
6971#endif
Eiichi Tsukata46cc0b442019-06-25 10:29:10 +09006972 if (tr->allocated_snapshot)
6973 ret = resize_buffer_duplicate_size(&tr->max_buffer,
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006974 &tr->array_buffer, iter->cpu_file);
Eiichi Tsukata46cc0b442019-06-25 10:29:10 +09006975 else
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04006976 ret = tracing_alloc_snapshot_instance(tr);
Eiichi Tsukata46cc0b442019-06-25 10:29:10 +09006977 if (ret < 0)
6978 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006979 local_irq_disable();
6980 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006981 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Tom Zanussia35873a2019-02-13 17:42:45 -06006982 update_max_tr(tr, current, smp_processor_id(), NULL);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006983 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006984 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006985 local_irq_enable();
6986 break;
6987 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006988 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006989 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6990 tracing_reset_online_cpus(&tr->max_buffer);
6991 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04006992 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006993 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006994 break;
6995 }
6996
6997 if (ret >= 0) {
6998 *ppos += cnt;
6999 ret = cnt;
7000 }
7001out:
7002 mutex_unlock(&trace_types_lock);
7003 return ret;
7004}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007005
7006static int tracing_snapshot_release(struct inode *inode, struct file *file)
7007{
7008 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007009 int ret;
7010
7011 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007012
7013 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007014 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007015
7016 /* If write only, the seq_file is just a stub */
7017 if (m)
7018 kfree(m->private);
7019 kfree(m);
7020
7021 return 0;
7022}
7023
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007024static int tracing_buffers_open(struct inode *inode, struct file *filp);
7025static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7026 size_t count, loff_t *ppos);
7027static int tracing_buffers_release(struct inode *inode, struct file *file);
7028static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7029 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7030
7031static int snapshot_raw_open(struct inode *inode, struct file *filp)
7032{
7033 struct ftrace_buffer_info *info;
7034 int ret;
7035
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -04007036 /* The following checks for tracefs lockdown */
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007037 ret = tracing_buffers_open(inode, filp);
7038 if (ret < 0)
7039 return ret;
7040
7041 info = filp->private_data;
7042
7043 if (info->iter.trace->use_max_tr) {
7044 tracing_buffers_release(inode, filp);
7045 return -EBUSY;
7046 }
7047
7048 info->iter.snapshot = true;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007049 info->iter.array_buffer = &info->iter.tr->max_buffer;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007050
7051 return ret;
7052}
7053
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007054#endif /* CONFIG_TRACER_SNAPSHOT */
7055
7056
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007057static const struct file_operations tracing_thresh_fops = {
7058 .open = tracing_open_generic,
7059 .read = tracing_thresh_read,
7060 .write = tracing_thresh_write,
7061 .llseek = generic_file_llseek,
7062};
7063
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007064#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007065static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007066 .open = tracing_open_generic,
7067 .read = tracing_max_lat_read,
7068 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007069 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007070};
Chen Gange428abb2015-11-10 05:15:15 +08007071#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007072
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007073static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007074 .open = tracing_open_generic,
7075 .read = tracing_set_trace_read,
7076 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007077 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007078};
7079
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007080static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007081 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02007082 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007083 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02007084 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007085 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007086 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02007087};
7088
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007089static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007090 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02007091 .read = tracing_entries_read,
7092 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007093 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007094 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02007095};
7096
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007097static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007098 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007099 .read = tracing_total_entries_read,
7100 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007101 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007102};
7103
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007104static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007105 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007106 .write = tracing_free_buffer_write,
7107 .release = tracing_free_buffer_release,
7108};
7109
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007110static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007111 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007112 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007113 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007114 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007115};
7116
Steven Rostedtfa32e852016-07-06 15:25:08 -04007117static const struct file_operations tracing_mark_raw_fops = {
7118 .open = tracing_open_generic_tr,
7119 .write = tracing_mark_raw_write,
7120 .llseek = generic_file_llseek,
7121 .release = tracing_release_generic_tr,
7122};
7123
Zhaolei5079f322009-08-25 16:12:56 +08007124static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08007125 .open = tracing_clock_open,
7126 .read = seq_read,
7127 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007128 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08007129 .write = tracing_clock_write,
7130};
7131
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007132static const struct file_operations trace_time_stamp_mode_fops = {
7133 .open = tracing_time_stamp_mode_open,
7134 .read = seq_read,
7135 .llseek = seq_lseek,
7136 .release = tracing_single_release_tr,
7137};
7138
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007139#ifdef CONFIG_TRACER_SNAPSHOT
7140static const struct file_operations snapshot_fops = {
7141 .open = tracing_snapshot_open,
7142 .read = seq_read,
7143 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c879e2013-12-21 17:39:40 -05007144 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007145 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007146};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007147
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007148static const struct file_operations snapshot_raw_fops = {
7149 .open = snapshot_raw_open,
7150 .read = tracing_buffers_read,
7151 .release = tracing_buffers_release,
7152 .splice_read = tracing_buffers_splice_read,
7153 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007154};
7155
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007156#endif /* CONFIG_TRACER_SNAPSHOT */
7157
Tom Zanussi8a062902019-03-31 18:48:15 -05007158#define TRACING_LOG_ERRS_MAX 8
7159#define TRACING_LOG_LOC_MAX 128
7160
7161#define CMD_PREFIX " Command: "
7162
7163struct err_info {
7164 const char **errs; /* ptr to loc-specific array of err strings */
7165 u8 type; /* index into errs -> specific err string */
7166 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7167 u64 ts;
7168};
7169
7170struct tracing_log_err {
7171 struct list_head list;
7172 struct err_info info;
7173 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7174 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7175};
7176
Tom Zanussi8a062902019-03-31 18:48:15 -05007177static DEFINE_MUTEX(tracing_err_log_lock);
7178
YueHaibingff585c52019-06-14 23:32:10 +08007179static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007180{
7181 struct tracing_log_err *err;
7182
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007183 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007184 err = kzalloc(sizeof(*err), GFP_KERNEL);
7185 if (!err)
7186 err = ERR_PTR(-ENOMEM);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007187 tr->n_err_log_entries++;
Tom Zanussi8a062902019-03-31 18:48:15 -05007188
7189 return err;
7190 }
7191
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007192 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
Tom Zanussi8a062902019-03-31 18:48:15 -05007193 list_del(&err->list);
7194
7195 return err;
7196}
7197
7198/**
7199 * err_pos - find the position of a string within a command for error careting
7200 * @cmd: The tracing command that caused the error
7201 * @str: The string to position the caret at within @cmd
7202 *
7203 * Finds the position of the first occurence of @str within @cmd. The
7204 * return value can be passed to tracing_log_err() for caret placement
7205 * within @cmd.
7206 *
7207 * Returns the index within @cmd of the first occurence of @str or 0
7208 * if @str was not found.
7209 */
7210unsigned int err_pos(char *cmd, const char *str)
7211{
7212 char *found;
7213
7214 if (WARN_ON(!strlen(cmd)))
7215 return 0;
7216
7217 found = strstr(cmd, str);
7218 if (found)
7219 return found - cmd;
7220
7221 return 0;
7222}
7223
7224/**
7225 * tracing_log_err - write an error to the tracing error log
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007226 * @tr: The associated trace array for the error (NULL for top level array)
Tom Zanussi8a062902019-03-31 18:48:15 -05007227 * @loc: A string describing where the error occurred
7228 * @cmd: The tracing command that caused the error
7229 * @errs: The array of loc-specific static error strings
7230 * @type: The index into errs[], which produces the specific static err string
7231 * @pos: The position the caret should be placed in the cmd
7232 *
7233 * Writes an error into tracing/error_log of the form:
7234 *
7235 * <loc>: error: <text>
7236 * Command: <cmd>
7237 * ^
7238 *
7239 * tracing/error_log is a small log file containing the last
7240 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7241 * unless there has been a tracing error, and the error log can be
7242 * cleared and have its memory freed by writing the empty string in
7243 * truncation mode to it i.e. echo > tracing/error_log.
7244 *
7245 * NOTE: the @errs array along with the @type param are used to
7246 * produce a static error string - this string is not copied and saved
7247 * when the error is logged - only a pointer to it is saved. See
7248 * existing callers for examples of how static strings are typically
7249 * defined for use with tracing_log_err().
7250 */
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007251void tracing_log_err(struct trace_array *tr,
7252 const char *loc, const char *cmd,
Tom Zanussi8a062902019-03-31 18:48:15 -05007253 const char **errs, u8 type, u8 pos)
7254{
7255 struct tracing_log_err *err;
7256
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007257 if (!tr)
7258 tr = &global_trace;
7259
Tom Zanussi8a062902019-03-31 18:48:15 -05007260 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007261 err = get_tracing_log_err(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007262 if (PTR_ERR(err) == -ENOMEM) {
7263 mutex_unlock(&tracing_err_log_lock);
7264 return;
7265 }
7266
7267 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7268 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7269
7270 err->info.errs = errs;
7271 err->info.type = type;
7272 err->info.pos = pos;
7273 err->info.ts = local_clock();
7274
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007275 list_add_tail(&err->list, &tr->err_log);
Tom Zanussi8a062902019-03-31 18:48:15 -05007276 mutex_unlock(&tracing_err_log_lock);
7277}
7278
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007279static void clear_tracing_err_log(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007280{
7281 struct tracing_log_err *err, *next;
7282
7283 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007284 list_for_each_entry_safe(err, next, &tr->err_log, list) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007285 list_del(&err->list);
7286 kfree(err);
7287 }
7288
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007289 tr->n_err_log_entries = 0;
Tom Zanussi8a062902019-03-31 18:48:15 -05007290 mutex_unlock(&tracing_err_log_lock);
7291}
7292
7293static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7294{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007295 struct trace_array *tr = m->private;
7296
Tom Zanussi8a062902019-03-31 18:48:15 -05007297 mutex_lock(&tracing_err_log_lock);
7298
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007299 return seq_list_start(&tr->err_log, *pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007300}
7301
7302static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7303{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007304 struct trace_array *tr = m->private;
7305
7306 return seq_list_next(v, &tr->err_log, pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007307}
7308
7309static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7310{
7311 mutex_unlock(&tracing_err_log_lock);
7312}
7313
7314static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7315{
7316 u8 i;
7317
7318 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7319 seq_putc(m, ' ');
7320 for (i = 0; i < pos; i++)
7321 seq_putc(m, ' ');
7322 seq_puts(m, "^\n");
7323}
7324
7325static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7326{
7327 struct tracing_log_err *err = v;
7328
7329 if (err) {
7330 const char *err_text = err->info.errs[err->info.type];
7331 u64 sec = err->info.ts;
7332 u32 nsec;
7333
7334 nsec = do_div(sec, NSEC_PER_SEC);
7335 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7336 err->loc, err_text);
7337 seq_printf(m, "%s", err->cmd);
7338 tracing_err_log_show_pos(m, err->info.pos);
7339 }
7340
7341 return 0;
7342}
7343
7344static const struct seq_operations tracing_err_log_seq_ops = {
7345 .start = tracing_err_log_seq_start,
7346 .next = tracing_err_log_seq_next,
7347 .stop = tracing_err_log_seq_stop,
7348 .show = tracing_err_log_seq_show
7349};
7350
7351static int tracing_err_log_open(struct inode *inode, struct file *file)
7352{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007353 struct trace_array *tr = inode->i_private;
Tom Zanussi8a062902019-03-31 18:48:15 -05007354 int ret = 0;
7355
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007356 ret = tracing_check_open_get_tr(tr);
7357 if (ret)
7358 return ret;
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007359
Tom Zanussi8a062902019-03-31 18:48:15 -05007360 /* If this file was opened for write, then erase contents */
7361 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007362 clear_tracing_err_log(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007363
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007364 if (file->f_mode & FMODE_READ) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007365 ret = seq_open(file, &tracing_err_log_seq_ops);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007366 if (!ret) {
7367 struct seq_file *m = file->private_data;
7368 m->private = tr;
7369 } else {
7370 trace_array_put(tr);
7371 }
7372 }
Tom Zanussi8a062902019-03-31 18:48:15 -05007373 return ret;
7374}
7375
7376static ssize_t tracing_err_log_write(struct file *file,
7377 const char __user *buffer,
7378 size_t count, loff_t *ppos)
7379{
7380 return count;
7381}
7382
Takeshi Misawad122ed62019-06-28 19:56:40 +09007383static int tracing_err_log_release(struct inode *inode, struct file *file)
7384{
7385 struct trace_array *tr = inode->i_private;
7386
7387 trace_array_put(tr);
7388
7389 if (file->f_mode & FMODE_READ)
7390 seq_release(inode, file);
7391
7392 return 0;
7393}
7394
Tom Zanussi8a062902019-03-31 18:48:15 -05007395static const struct file_operations tracing_err_log_fops = {
7396 .open = tracing_err_log_open,
7397 .write = tracing_err_log_write,
7398 .read = seq_read,
7399 .llseek = seq_lseek,
Takeshi Misawad122ed62019-06-28 19:56:40 +09007400 .release = tracing_err_log_release,
Tom Zanussi8a062902019-03-31 18:48:15 -05007401};
7402
Steven Rostedt2cadf912008-12-01 22:20:19 -05007403static int tracing_buffers_open(struct inode *inode, struct file *filp)
7404{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007405 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007406 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007407 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007408
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007409 ret = tracing_check_open_get_tr(tr);
7410 if (ret)
7411 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007412
Steven Rostedt2cadf912008-12-01 22:20:19 -05007413 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007414 if (!info) {
7415 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007416 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007417 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007418
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007419 mutex_lock(&trace_types_lock);
7420
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007421 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007422 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05007423 info->iter.trace = tr->current_trace;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007424 info->iter.array_buffer = &tr->array_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007425 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007426 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007427 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007428
7429 filp->private_data = info;
7430
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007431 tr->current_trace->ref++;
7432
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007433 mutex_unlock(&trace_types_lock);
7434
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007435 ret = nonseekable_open(inode, filp);
7436 if (ret < 0)
7437 trace_array_put(tr);
7438
7439 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007440}
7441
Al Viro9dd95742017-07-03 00:42:43 -04007442static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007443tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7444{
7445 struct ftrace_buffer_info *info = filp->private_data;
7446 struct trace_iterator *iter = &info->iter;
7447
7448 return trace_poll(iter, filp, poll_table);
7449}
7450
Steven Rostedt2cadf912008-12-01 22:20:19 -05007451static ssize_t
7452tracing_buffers_read(struct file *filp, char __user *ubuf,
7453 size_t count, loff_t *ppos)
7454{
7455 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007456 struct trace_iterator *iter = &info->iter;
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007457 ssize_t ret = 0;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007458 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007459
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007460 if (!count)
7461 return 0;
7462
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007463#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007464 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7465 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007466#endif
7467
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007468 if (!info->spare) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007469 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007470 iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007471 if (IS_ERR(info->spare)) {
7472 ret = PTR_ERR(info->spare);
7473 info->spare = NULL;
7474 } else {
7475 info->spare_cpu = iter->cpu_file;
7476 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007477 }
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007478 if (!info->spare)
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007479 return ret;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007480
Steven Rostedt2cadf912008-12-01 22:20:19 -05007481 /* Do we have previous read data to read? */
7482 if (info->read < PAGE_SIZE)
7483 goto read;
7484
Steven Rostedtb6273442013-02-28 13:44:11 -05007485 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007486 trace_access_lock(iter->cpu_file);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007487 ret = ring_buffer_read_page(iter->array_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007488 &info->spare,
7489 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007490 iter->cpu_file, 0);
7491 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05007492
7493 if (ret < 0) {
7494 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007495 if ((filp->f_flags & O_NONBLOCK))
7496 return -EAGAIN;
7497
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05007498 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007499 if (ret)
7500 return ret;
7501
Steven Rostedtb6273442013-02-28 13:44:11 -05007502 goto again;
7503 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007504 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007505 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007506
Steven Rostedt436fc282011-10-14 10:44:25 -04007507 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007508 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05007509 size = PAGE_SIZE - info->read;
7510 if (size > count)
7511 size = count;
7512
7513 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007514 if (ret == size)
7515 return -EFAULT;
7516
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007517 size -= ret;
7518
Steven Rostedt2cadf912008-12-01 22:20:19 -05007519 *ppos += size;
7520 info->read += size;
7521
7522 return size;
7523}
7524
7525static int tracing_buffers_release(struct inode *inode, struct file *file)
7526{
7527 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007528 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007529
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007530 mutex_lock(&trace_types_lock);
7531
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007532 iter->tr->current_trace->ref--;
7533
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007534 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007535
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007536 if (info->spare)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007537 ring_buffer_free_read_page(iter->array_buffer->buffer,
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007538 info->spare_cpu, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007539 kfree(info);
7540
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007541 mutex_unlock(&trace_types_lock);
7542
Steven Rostedt2cadf912008-12-01 22:20:19 -05007543 return 0;
7544}
7545
7546struct buffer_ref {
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05007547 struct trace_buffer *buffer;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007548 void *page;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007549 int cpu;
Jann Hornb9872222019-04-04 23:59:25 +02007550 refcount_t refcount;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007551};
7552
Jann Hornb9872222019-04-04 23:59:25 +02007553static void buffer_ref_release(struct buffer_ref *ref)
7554{
7555 if (!refcount_dec_and_test(&ref->refcount))
7556 return;
7557 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7558 kfree(ref);
7559}
7560
Steven Rostedt2cadf912008-12-01 22:20:19 -05007561static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7562 struct pipe_buffer *buf)
7563{
7564 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7565
Jann Hornb9872222019-04-04 23:59:25 +02007566 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007567 buf->private = 0;
7568}
7569
Matthew Wilcox15fab632019-04-05 14:02:10 -07007570static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007571 struct pipe_buffer *buf)
7572{
7573 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7574
Linus Torvaldse9e1a2e2019-04-26 11:09:55 -07007575 if (refcount_read(&ref->refcount) > INT_MAX/2)
Matthew Wilcox15fab632019-04-05 14:02:10 -07007576 return false;
7577
Jann Hornb9872222019-04-04 23:59:25 +02007578 refcount_inc(&ref->refcount);
Matthew Wilcox15fab632019-04-05 14:02:10 -07007579 return true;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007580}
7581
7582/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08007583static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007584 .confirm = generic_pipe_buf_confirm,
7585 .release = buffer_pipe_buf_release,
Jann Hornb9872222019-04-04 23:59:25 +02007586 .steal = generic_pipe_buf_nosteal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007587 .get = buffer_pipe_buf_get,
7588};
7589
7590/*
7591 * Callback from splice_to_pipe(), if we need to release some pages
7592 * at the end of the spd in case we error'ed out in filling the pipe.
7593 */
7594static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7595{
7596 struct buffer_ref *ref =
7597 (struct buffer_ref *)spd->partial[i].private;
7598
Jann Hornb9872222019-04-04 23:59:25 +02007599 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007600 spd->partial[i].private = 0;
7601}
7602
7603static ssize_t
7604tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7605 struct pipe_inode_info *pipe, size_t len,
7606 unsigned int flags)
7607{
7608 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007609 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02007610 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7611 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05007612 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02007613 .pages = pages_def,
7614 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02007615 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007616 .ops = &buffer_pipe_buf_ops,
7617 .spd_release = buffer_spd_release,
7618 };
7619 struct buffer_ref *ref;
Steven Rostedt (VMware)6b7e6332017-12-22 20:38:57 -05007620 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01007621 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007622
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007623#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007624 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7625 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007626#endif
7627
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007628 if (*ppos & (PAGE_SIZE - 1))
7629 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007630
7631 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007632 if (len < PAGE_SIZE)
7633 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007634 len &= PAGE_MASK;
7635 }
7636
Al Viro1ae22932016-09-17 18:31:46 -04007637 if (splice_grow_spd(pipe, &spd))
7638 return -ENOMEM;
7639
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007640 again:
7641 trace_access_lock(iter->cpu_file);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007642 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04007643
Al Viroa786c062014-04-11 12:01:03 -04007644 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007645 struct page *page;
7646 int r;
7647
7648 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01007649 if (!ref) {
7650 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007651 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01007652 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007653
Jann Hornb9872222019-04-04 23:59:25 +02007654 refcount_set(&ref->refcount, 1);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007655 ref->buffer = iter->array_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007656 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007657 if (IS_ERR(ref->page)) {
7658 ret = PTR_ERR(ref->page);
7659 ref->page = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007660 kfree(ref);
7661 break;
7662 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007663 ref->cpu = iter->cpu_file;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007664
7665 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007666 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007667 if (r < 0) {
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007668 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7669 ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007670 kfree(ref);
7671 break;
7672 }
7673
Steven Rostedt2cadf912008-12-01 22:20:19 -05007674 page = virt_to_page(ref->page);
7675
7676 spd.pages[i] = page;
7677 spd.partial[i].len = PAGE_SIZE;
7678 spd.partial[i].offset = 0;
7679 spd.partial[i].private = (unsigned long)ref;
7680 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007681 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04007682
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007683 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007684 }
7685
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007686 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007687 spd.nr_pages = i;
7688
7689 /* did we read anything? */
7690 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01007691 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04007692 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01007693
Al Viro1ae22932016-09-17 18:31:46 -04007694 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007695 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04007696 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007697
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05007698 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04007699 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04007700 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01007701
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007702 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007703 }
7704
7705 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04007706out:
Eric Dumazet047fe362012-06-12 15:24:40 +02007707 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007708
Steven Rostedt2cadf912008-12-01 22:20:19 -05007709 return ret;
7710}
7711
7712static const struct file_operations tracing_buffers_fops = {
7713 .open = tracing_buffers_open,
7714 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007715 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007716 .release = tracing_buffers_release,
7717 .splice_read = tracing_buffers_splice_read,
7718 .llseek = no_llseek,
7719};
7720
Steven Rostedtc8d77182009-04-29 18:03:45 -04007721static ssize_t
7722tracing_stats_read(struct file *filp, char __user *ubuf,
7723 size_t count, loff_t *ppos)
7724{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007725 struct inode *inode = file_inode(filp);
7726 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007727 struct array_buffer *trace_buf = &tr->array_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007728 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007729 struct trace_seq *s;
7730 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007731 unsigned long long t;
7732 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04007733
Li Zefane4f2d102009-06-15 10:57:28 +08007734 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007735 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01007736 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04007737
7738 trace_seq_init(s);
7739
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007740 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007741 trace_seq_printf(s, "entries: %ld\n", cnt);
7742
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007743 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007744 trace_seq_printf(s, "overrun: %ld\n", cnt);
7745
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007746 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007747 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7748
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007749 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007750 trace_seq_printf(s, "bytes: %ld\n", cnt);
7751
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09007752 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007753 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007754 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007755 usec_rem = do_div(t, USEC_PER_SEC);
7756 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7757 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007758
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007759 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007760 usec_rem = do_div(t, USEC_PER_SEC);
7761 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7762 } else {
7763 /* counter or tsc mode for trace_clock */
7764 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007765 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007766
7767 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007768 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007769 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007770
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007771 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07007772 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7773
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007774 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05007775 trace_seq_printf(s, "read events: %ld\n", cnt);
7776
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05007777 count = simple_read_from_buffer(ubuf, count, ppos,
7778 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04007779
7780 kfree(s);
7781
7782 return count;
7783}
7784
7785static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007786 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007787 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007788 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007789 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007790};
7791
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007792#ifdef CONFIG_DYNAMIC_FTRACE
7793
7794static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007795tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007796 size_t cnt, loff_t *ppos)
7797{
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007798 ssize_t ret;
7799 char *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007800 int r;
7801
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007802 /* 256 should be plenty to hold the amount needed */
7803 buf = kmalloc(256, GFP_KERNEL);
7804 if (!buf)
7805 return -ENOMEM;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007806
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007807 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
7808 ftrace_update_tot_cnt,
7809 ftrace_number_of_pages,
7810 ftrace_number_of_groups);
7811
7812 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7813 kfree(buf);
7814 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007815}
7816
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007817static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007818 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007819 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007820 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007821};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007822#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007823
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007824#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7825static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007826ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007827 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007828 void *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007829{
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007830 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007831}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007832
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007833static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007834ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007835 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007836 void *data)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007837{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007838 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007839 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007840
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007841 if (mapper)
7842 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007843
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007844 if (count) {
7845
7846 if (*count <= 0)
7847 return;
7848
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007849 (*count)--;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007850 }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007851
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007852 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007853}
7854
7855static int
7856ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7857 struct ftrace_probe_ops *ops, void *data)
7858{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007859 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007860 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007861
7862 seq_printf(m, "%ps:", (void *)ip);
7863
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01007864 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007865
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007866 if (mapper)
7867 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7868
7869 if (count)
7870 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007871 else
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007872 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007873
7874 return 0;
7875}
7876
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007877static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007878ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007879 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007880{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007881 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007882
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007883 if (!mapper) {
7884 mapper = allocate_ftrace_func_mapper();
7885 if (!mapper)
7886 return -ENOMEM;
7887 *data = mapper;
7888 }
7889
7890 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007891}
7892
7893static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007894ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007895 unsigned long ip, void *data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007896{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007897 struct ftrace_func_mapper *mapper = data;
7898
7899 if (!ip) {
7900 if (!mapper)
7901 return;
7902 free_ftrace_func_mapper(mapper, NULL);
7903 return;
7904 }
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007905
7906 ftrace_func_mapper_remove_ip(mapper, ip);
7907}
7908
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007909static struct ftrace_probe_ops snapshot_probe_ops = {
7910 .func = ftrace_snapshot,
7911 .print = ftrace_snapshot_print,
7912};
7913
7914static struct ftrace_probe_ops snapshot_count_probe_ops = {
7915 .func = ftrace_count_snapshot,
7916 .print = ftrace_snapshot_print,
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007917 .init = ftrace_snapshot_init,
7918 .free = ftrace_snapshot_free,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007919};
7920
7921static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007922ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007923 char *glob, char *cmd, char *param, int enable)
7924{
7925 struct ftrace_probe_ops *ops;
7926 void *count = (void *)-1;
7927 char *number;
7928 int ret;
7929
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -04007930 if (!tr)
7931 return -ENODEV;
7932
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007933 /* hash funcs only work with set_ftrace_filter */
7934 if (!enable)
7935 return -EINVAL;
7936
7937 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7938
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04007939 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04007940 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007941
7942 if (!param)
7943 goto out_reg;
7944
7945 number = strsep(&param, ":");
7946
7947 if (!strlen(number))
7948 goto out_reg;
7949
7950 /*
7951 * We use the callback data field (which is a pointer)
7952 * as our counter.
7953 */
7954 ret = kstrtoul(number, 0, (unsigned long *)&count);
7955 if (ret)
7956 return ret;
7957
7958 out_reg:
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04007959 ret = tracing_alloc_snapshot_instance(tr);
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04007960 if (ret < 0)
7961 goto out;
7962
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007963 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007964
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04007965 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007966 return ret < 0 ? ret : 0;
7967}
7968
7969static struct ftrace_func_command ftrace_snapshot_cmd = {
7970 .name = "snapshot",
7971 .func = ftrace_trace_snapshot_callback,
7972};
7973
Tom Zanussi38de93a2013-10-24 08:34:18 -05007974static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007975{
7976 return register_ftrace_command(&ftrace_snapshot_cmd);
7977}
7978#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05007979static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007980#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007981
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007982static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007983{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007984 if (WARN_ON(!tr->dir))
7985 return ERR_PTR(-ENODEV);
7986
7987 /* Top directory uses NULL as the parent */
7988 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7989 return NULL;
7990
7991 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007992 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007993}
7994
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007995static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7996{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007997 struct dentry *d_tracer;
7998
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007999 if (tr->percpu_dir)
8000 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008001
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008002 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008003 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008004 return NULL;
8005
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008006 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008007
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008008 MEM_FAIL(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008009 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008010
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008011 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008012}
8013
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008014static struct dentry *
8015trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8016 void *data, long cpu, const struct file_operations *fops)
8017{
8018 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8019
8020 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00008021 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008022 return ret;
8023}
8024
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008025static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008026tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008027{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008028 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008029 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04008030 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008031
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09008032 if (!d_percpu)
8033 return;
8034
Steven Rostedtdd49a382010-10-20 21:51:26 -04008035 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008036 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008037 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008038 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008039 return;
8040 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008041
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008042 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008043 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02008044 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008045
8046 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008047 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008048 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04008049
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008050 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02008051 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008052
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008053 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008054 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08008055
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008056 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02008057 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05008058
8059#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008060 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008061 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008062
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008063 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02008064 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05008065#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008066}
8067
Steven Rostedt60a11772008-05-12 21:20:44 +02008068#ifdef CONFIG_FTRACE_SELFTEST
8069/* Let selftest have access to static functions in this file */
8070#include "trace_selftest.c"
8071#endif
8072
Steven Rostedt577b7852009-02-26 23:43:05 -05008073static ssize_t
8074trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8075 loff_t *ppos)
8076{
8077 struct trace_option_dentry *topt = filp->private_data;
8078 char *buf;
8079
8080 if (topt->flags->val & topt->opt->bit)
8081 buf = "1\n";
8082 else
8083 buf = "0\n";
8084
8085 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8086}
8087
8088static ssize_t
8089trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8090 loff_t *ppos)
8091{
8092 struct trace_option_dentry *topt = filp->private_data;
8093 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05008094 int ret;
8095
Peter Huewe22fe9b52011-06-07 21:58:27 +02008096 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8097 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05008098 return ret;
8099
Li Zefan8d18eaa2009-12-08 11:17:06 +08008100 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05008101 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08008102
8103 if (!!(topt->flags->val & topt->opt->bit) != val) {
8104 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05008105 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05008106 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08008107 mutex_unlock(&trace_types_lock);
8108 if (ret)
8109 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05008110 }
8111
8112 *ppos += cnt;
8113
8114 return cnt;
8115}
8116
8117
8118static const struct file_operations trace_options_fops = {
8119 .open = tracing_open_generic,
8120 .read = trace_options_read,
8121 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008122 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05008123};
8124
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008125/*
8126 * In order to pass in both the trace_array descriptor as well as the index
8127 * to the flag that the trace option file represents, the trace_array
8128 * has a character array of trace_flags_index[], which holds the index
8129 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8130 * The address of this character array is passed to the flag option file
8131 * read/write callbacks.
8132 *
8133 * In order to extract both the index and the trace_array descriptor,
8134 * get_tr_index() uses the following algorithm.
8135 *
8136 * idx = *ptr;
8137 *
8138 * As the pointer itself contains the address of the index (remember
8139 * index[1] == 1).
8140 *
8141 * Then to get the trace_array descriptor, by subtracting that index
8142 * from the ptr, we get to the start of the index itself.
8143 *
8144 * ptr - idx == &index[0]
8145 *
8146 * Then a simple container_of() from that pointer gets us to the
8147 * trace_array descriptor.
8148 */
8149static void get_tr_index(void *data, struct trace_array **ptr,
8150 unsigned int *pindex)
8151{
8152 *pindex = *(unsigned char *)data;
8153
8154 *ptr = container_of(data - *pindex, struct trace_array,
8155 trace_flags_index);
8156}
8157
Steven Rostedta8259072009-02-26 22:19:12 -05008158static ssize_t
8159trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8160 loff_t *ppos)
8161{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008162 void *tr_index = filp->private_data;
8163 struct trace_array *tr;
8164 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008165 char *buf;
8166
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008167 get_tr_index(tr_index, &tr, &index);
8168
8169 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05008170 buf = "1\n";
8171 else
8172 buf = "0\n";
8173
8174 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8175}
8176
8177static ssize_t
8178trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8179 loff_t *ppos)
8180{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008181 void *tr_index = filp->private_data;
8182 struct trace_array *tr;
8183 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008184 unsigned long val;
8185 int ret;
8186
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008187 get_tr_index(tr_index, &tr, &index);
8188
Peter Huewe22fe9b52011-06-07 21:58:27 +02008189 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8190 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05008191 return ret;
8192
Zhaoleif2d84b62009-08-07 18:55:48 +08008193 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05008194 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008195
Prateek Sood3a53acf2019-12-10 09:15:16 +00008196 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008197 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008198 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008199 mutex_unlock(&trace_types_lock);
Prateek Sood3a53acf2019-12-10 09:15:16 +00008200 mutex_unlock(&event_mutex);
Steven Rostedta8259072009-02-26 22:19:12 -05008201
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04008202 if (ret < 0)
8203 return ret;
8204
Steven Rostedta8259072009-02-26 22:19:12 -05008205 *ppos += cnt;
8206
8207 return cnt;
8208}
8209
Steven Rostedta8259072009-02-26 22:19:12 -05008210static const struct file_operations trace_options_core_fops = {
8211 .open = tracing_open_generic,
8212 .read = trace_options_core_read,
8213 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008214 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05008215};
8216
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008217struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04008218 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008219 struct dentry *parent,
8220 void *data,
8221 const struct file_operations *fops)
8222{
8223 struct dentry *ret;
8224
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008225 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008226 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07008227 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008228
8229 return ret;
8230}
8231
8232
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008233static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008234{
8235 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05008236
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008237 if (tr->options)
8238 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008239
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008240 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008241 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05008242 return NULL;
8243
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008244 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008245 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008246 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05008247 return NULL;
8248 }
8249
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008250 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008251}
8252
Steven Rostedt577b7852009-02-26 23:43:05 -05008253static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008254create_trace_option_file(struct trace_array *tr,
8255 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008256 struct tracer_flags *flags,
8257 struct tracer_opt *opt)
8258{
8259 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05008260
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008261 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05008262 if (!t_options)
8263 return;
8264
8265 topt->flags = flags;
8266 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008267 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05008268
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008269 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008270 &trace_options_fops);
8271
Steven Rostedt577b7852009-02-26 23:43:05 -05008272}
8273
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008274static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008275create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05008276{
8277 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008278 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05008279 struct tracer_flags *flags;
8280 struct tracer_opt *opts;
8281 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008282 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05008283
8284 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008285 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05008286
8287 flags = tracer->flags;
8288
8289 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008290 return;
8291
8292 /*
8293 * If this is an instance, only create flags for tracers
8294 * the instance may have.
8295 */
8296 if (!trace_ok_for_array(tracer, tr))
8297 return;
8298
8299 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08008300 /* Make sure there's no duplicate flags. */
8301 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008302 return;
8303 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008304
8305 opts = flags->opts;
8306
8307 for (cnt = 0; opts[cnt].name; cnt++)
8308 ;
8309
Steven Rostedt0cfe8242009-02-27 10:51:10 -05008310 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05008311 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008312 return;
8313
8314 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8315 GFP_KERNEL);
8316 if (!tr_topts) {
8317 kfree(topts);
8318 return;
8319 }
8320
8321 tr->topts = tr_topts;
8322 tr->topts[tr->nr_topts].tracer = tracer;
8323 tr->topts[tr->nr_topts].topts = topts;
8324 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05008325
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008326 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008327 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05008328 &opts[cnt]);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008329 MEM_FAIL(topts[cnt].entry == NULL,
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008330 "Failed to create trace option: %s",
8331 opts[cnt].name);
8332 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008333}
8334
Steven Rostedta8259072009-02-26 22:19:12 -05008335static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008336create_trace_option_core_file(struct trace_array *tr,
8337 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05008338{
8339 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05008340
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008341 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008342 if (!t_options)
8343 return NULL;
8344
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008345 return trace_create_file(option, 0644, t_options,
8346 (void *)&tr->trace_flags_index[index],
8347 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05008348}
8349
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008350static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008351{
8352 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008353 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05008354 int i;
8355
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008356 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008357 if (!t_options)
8358 return;
8359
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008360 for (i = 0; trace_options[i]; i++) {
8361 if (top_level ||
8362 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8363 create_trace_option_core_file(tr, trace_options[i], i);
8364 }
Steven Rostedta8259072009-02-26 22:19:12 -05008365}
8366
Steven Rostedt499e5472012-02-22 15:50:28 -05008367static ssize_t
8368rb_simple_read(struct file *filp, char __user *ubuf,
8369 size_t cnt, loff_t *ppos)
8370{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008371 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05008372 char buf[64];
8373 int r;
8374
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008375 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05008376 r = sprintf(buf, "%d\n", r);
8377
8378 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8379}
8380
8381static ssize_t
8382rb_simple_write(struct file *filp, const char __user *ubuf,
8383 size_t cnt, loff_t *ppos)
8384{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008385 struct trace_array *tr = filp->private_data;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05008386 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05008387 unsigned long val;
8388 int ret;
8389
8390 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8391 if (ret)
8392 return ret;
8393
8394 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008395 mutex_lock(&trace_types_lock);
Steven Rostedt (VMware)f1436412018-08-01 15:40:57 -04008396 if (!!val == tracer_tracing_is_on(tr)) {
8397 val = 0; /* do nothing */
8398 } else if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008399 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008400 if (tr->current_trace->start)
8401 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008402 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008403 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008404 if (tr->current_trace->stop)
8405 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008406 }
8407 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05008408 }
8409
8410 (*ppos)++;
8411
8412 return cnt;
8413}
8414
8415static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008416 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008417 .read = rb_simple_read,
8418 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008419 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008420 .llseek = default_llseek,
8421};
8422
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008423static ssize_t
8424buffer_percent_read(struct file *filp, char __user *ubuf,
8425 size_t cnt, loff_t *ppos)
8426{
8427 struct trace_array *tr = filp->private_data;
8428 char buf[64];
8429 int r;
8430
8431 r = tr->buffer_percent;
8432 r = sprintf(buf, "%d\n", r);
8433
8434 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8435}
8436
8437static ssize_t
8438buffer_percent_write(struct file *filp, const char __user *ubuf,
8439 size_t cnt, loff_t *ppos)
8440{
8441 struct trace_array *tr = filp->private_data;
8442 unsigned long val;
8443 int ret;
8444
8445 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8446 if (ret)
8447 return ret;
8448
8449 if (val > 100)
8450 return -EINVAL;
8451
8452 if (!val)
8453 val = 1;
8454
8455 tr->buffer_percent = val;
8456
8457 (*ppos)++;
8458
8459 return cnt;
8460}
8461
8462static const struct file_operations buffer_percent_fops = {
8463 .open = tracing_open_generic_tr,
8464 .read = buffer_percent_read,
8465 .write = buffer_percent_write,
8466 .release = tracing_release_generic_tr,
8467 .llseek = default_llseek,
8468};
8469
YueHaibingff585c52019-06-14 23:32:10 +08008470static struct dentry *trace_instance_dir;
Steven Rostedt277ba042012-08-03 16:10:49 -04008471
8472static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008473init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04008474
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008475static int
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008476allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04008477{
8478 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008479
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008480 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008481
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05008482 buf->tr = tr;
8483
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008484 buf->buffer = ring_buffer_alloc(size, rb_flags);
8485 if (!buf->buffer)
8486 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008487
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008488 buf->data = alloc_percpu(struct trace_array_cpu);
8489 if (!buf->data) {
8490 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)4397f042017-12-26 20:07:34 -05008491 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008492 return -ENOMEM;
8493 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008494
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008495 /* Allocate the first page for all buffers */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008496 set_buffer_entries(&tr->array_buffer,
8497 ring_buffer_size(tr->array_buffer.buffer, 0));
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008498
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008499 return 0;
8500}
8501
8502static int allocate_trace_buffers(struct trace_array *tr, int size)
8503{
8504 int ret;
8505
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008506 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008507 if (ret)
8508 return ret;
8509
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008510#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008511 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8512 allocate_snapshot ? size : 1);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008513 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008514 ring_buffer_free(tr->array_buffer.buffer);
8515 tr->array_buffer.buffer = NULL;
8516 free_percpu(tr->array_buffer.data);
8517 tr->array_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008518 return -ENOMEM;
8519 }
8520 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008521
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008522 /*
8523 * Only the top level trace array gets its snapshot allocated
8524 * from the kernel command line.
8525 */
8526 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008527#endif
8528 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008529}
8530
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008531static void free_trace_buffer(struct array_buffer *buf)
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008532{
8533 if (buf->buffer) {
8534 ring_buffer_free(buf->buffer);
8535 buf->buffer = NULL;
8536 free_percpu(buf->data);
8537 buf->data = NULL;
8538 }
8539}
8540
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008541static void free_trace_buffers(struct trace_array *tr)
8542{
8543 if (!tr)
8544 return;
8545
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008546 free_trace_buffer(&tr->array_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008547
8548#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008549 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008550#endif
8551}
8552
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008553static void init_trace_flags_index(struct trace_array *tr)
8554{
8555 int i;
8556
8557 /* Used by the trace options files */
8558 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8559 tr->trace_flags_index[i] = i;
8560}
8561
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008562static void __update_tracer_options(struct trace_array *tr)
8563{
8564 struct tracer *t;
8565
8566 for (t = trace_types; t; t = t->next)
8567 add_tracer_options(tr, t);
8568}
8569
8570static void update_tracer_options(struct trace_array *tr)
8571{
8572 mutex_lock(&trace_types_lock);
8573 __update_tracer_options(tr);
8574 mutex_unlock(&trace_types_lock);
8575}
8576
Tom Zanussi89c95fc2020-01-29 12:59:21 -06008577/* Must have trace_types_lock held */
8578struct trace_array *trace_array_find(const char *instance)
8579{
8580 struct trace_array *tr, *found = NULL;
8581
8582 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8583 if (tr->name && strcmp(tr->name, instance) == 0) {
8584 found = tr;
8585 break;
8586 }
8587 }
8588
8589 return found;
8590}
8591
8592struct trace_array *trace_array_find_get(const char *instance)
8593{
8594 struct trace_array *tr;
8595
8596 mutex_lock(&trace_types_lock);
8597 tr = trace_array_find(instance);
8598 if (tr)
8599 tr->ref++;
8600 mutex_unlock(&trace_types_lock);
8601
8602 return tr;
8603}
8604
Divya Indi28879782019-11-20 11:08:38 -08008605static struct trace_array *trace_array_create(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008606{
Steven Rostedt277ba042012-08-03 16:10:49 -04008607 struct trace_array *tr;
8608 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04008609
Steven Rostedt277ba042012-08-03 16:10:49 -04008610 ret = -ENOMEM;
8611 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8612 if (!tr)
Divya Indi28879782019-11-20 11:08:38 -08008613 return ERR_PTR(ret);
Steven Rostedt277ba042012-08-03 16:10:49 -04008614
8615 tr->name = kstrdup(name, GFP_KERNEL);
8616 if (!tr->name)
8617 goto out_free_tr;
8618
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008619 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8620 goto out_free_tr;
8621
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008622 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008623
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008624 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8625
Steven Rostedt277ba042012-08-03 16:10:49 -04008626 raw_spin_lock_init(&tr->start_lock);
8627
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05008628 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8629
Steven Rostedt277ba042012-08-03 16:10:49 -04008630 tr->current_trace = &nop_trace;
8631
8632 INIT_LIST_HEAD(&tr->systems);
8633 INIT_LIST_HEAD(&tr->events);
Tom Zanussi067fe032018-01-15 20:51:56 -06008634 INIT_LIST_HEAD(&tr->hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04008635 INIT_LIST_HEAD(&tr->err_log);
Steven Rostedt277ba042012-08-03 16:10:49 -04008636
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008637 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04008638 goto out_free_tr;
8639
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008640 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04008641 if (!tr->dir)
8642 goto out_free_tr;
8643
8644 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07008645 if (ret) {
Al Viroa3d1e7e2019-11-18 09:43:10 -05008646 tracefs_remove(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04008647 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07008648 }
Steven Rostedt277ba042012-08-03 16:10:49 -04008649
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008650 ftrace_init_trace_array(tr);
8651
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008652 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008653 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008654 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04008655
8656 list_add(&tr->list, &ftrace_trace_arrays);
8657
Divya Indi28879782019-11-20 11:08:38 -08008658 tr->ref++;
8659
Steven Rostedt277ba042012-08-03 16:10:49 -04008660
Divya Indif45d1222019-03-20 11:28:51 -07008661 return tr;
Steven Rostedt277ba042012-08-03 16:10:49 -04008662
8663 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008664 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008665 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04008666 kfree(tr->name);
8667 kfree(tr);
8668
Divya Indif45d1222019-03-20 11:28:51 -07008669 return ERR_PTR(ret);
8670}
Steven Rostedt277ba042012-08-03 16:10:49 -04008671
Divya Indif45d1222019-03-20 11:28:51 -07008672static int instance_mkdir(const char *name)
8673{
Divya Indi28879782019-11-20 11:08:38 -08008674 struct trace_array *tr;
8675 int ret;
8676
8677 mutex_lock(&event_mutex);
8678 mutex_lock(&trace_types_lock);
8679
8680 ret = -EEXIST;
Tom Zanussi89c95fc2020-01-29 12:59:21 -06008681 if (trace_array_find(name))
8682 goto out_unlock;
Divya Indi28879782019-11-20 11:08:38 -08008683
8684 tr = trace_array_create(name);
8685
8686 ret = PTR_ERR_OR_ZERO(tr);
8687
8688out_unlock:
8689 mutex_unlock(&trace_types_lock);
8690 mutex_unlock(&event_mutex);
8691 return ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04008692}
8693
Divya Indi28879782019-11-20 11:08:38 -08008694/**
8695 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
8696 * @name: The name of the trace array to be looked up/created.
8697 *
8698 * Returns pointer to trace array with given name.
8699 * NULL, if it cannot be created.
8700 *
8701 * NOTE: This function increments the reference counter associated with the
8702 * trace array returned. This makes sure it cannot be freed while in use.
8703 * Use trace_array_put() once the trace array is no longer needed.
Steven Rostedt (VMware)28394da2020-01-24 20:47:46 -05008704 * If the trace_array is to be freed, trace_array_destroy() needs to
8705 * be called after the trace_array_put(), or simply let user space delete
8706 * it from the tracefs instances directory. But until the
8707 * trace_array_put() is called, user space can not delete it.
Divya Indi28879782019-11-20 11:08:38 -08008708 *
8709 */
8710struct trace_array *trace_array_get_by_name(const char *name)
8711{
8712 struct trace_array *tr;
8713
8714 mutex_lock(&event_mutex);
8715 mutex_lock(&trace_types_lock);
8716
8717 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8718 if (tr->name && strcmp(tr->name, name) == 0)
8719 goto out_unlock;
8720 }
8721
8722 tr = trace_array_create(name);
8723
8724 if (IS_ERR(tr))
8725 tr = NULL;
8726out_unlock:
8727 if (tr)
8728 tr->ref++;
8729
8730 mutex_unlock(&trace_types_lock);
8731 mutex_unlock(&event_mutex);
8732 return tr;
8733}
8734EXPORT_SYMBOL_GPL(trace_array_get_by_name);
8735
Divya Indif45d1222019-03-20 11:28:51 -07008736static int __remove_instance(struct trace_array *tr)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008737{
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008738 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008739
Divya Indi28879782019-11-20 11:08:38 -08008740 /* Reference counter for a newly created trace array = 1. */
8741 if (tr->ref > 1 || (tr->current_trace && tr->current_trace->ref))
Divya Indif45d1222019-03-20 11:28:51 -07008742 return -EBUSY;
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05008743
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008744 list_del(&tr->list);
8745
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008746 /* Disable all the flags that were enabled coming in */
8747 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8748 if ((1 << i) & ZEROED_TRACE_FLAGS)
8749 set_tracer_flag(tr, 1 << i, 0);
8750 }
8751
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05008752 tracing_set_nop(tr);
Naveen N. Raoa0e63692017-05-16 23:21:26 +05308753 clear_ftrace_function_probes(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008754 event_trace_del_tracer(tr);
Namhyung Kimd879d0b2017-04-17 11:44:27 +09008755 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008756 ftrace_destroy_function_files(tr);
Al Viroa3d1e7e2019-11-18 09:43:10 -05008757 tracefs_remove(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04008758 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008759
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008760 for (i = 0; i < tr->nr_topts; i++) {
8761 kfree(tr->topts[i].topts);
8762 }
8763 kfree(tr->topts);
8764
Chunyu Hudb9108e02017-07-20 18:36:09 +08008765 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008766 kfree(tr->name);
8767 kfree(tr);
Divya Indif45d1222019-03-20 11:28:51 -07008768 tr = NULL;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008769
Divya Indif45d1222019-03-20 11:28:51 -07008770 return 0;
8771}
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008772
Divya Indie585e642019-08-14 10:55:24 -07008773int trace_array_destroy(struct trace_array *this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07008774{
Divya Indie585e642019-08-14 10:55:24 -07008775 struct trace_array *tr;
Divya Indif45d1222019-03-20 11:28:51 -07008776 int ret;
8777
Divya Indie585e642019-08-14 10:55:24 -07008778 if (!this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07008779 return -EINVAL;
8780
8781 mutex_lock(&event_mutex);
8782 mutex_lock(&trace_types_lock);
8783
Divya Indie585e642019-08-14 10:55:24 -07008784 ret = -ENODEV;
8785
8786 /* Making sure trace array exists before destroying it. */
8787 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8788 if (tr == this_tr) {
8789 ret = __remove_instance(tr);
8790 break;
8791 }
8792 }
Divya Indif45d1222019-03-20 11:28:51 -07008793
8794 mutex_unlock(&trace_types_lock);
8795 mutex_unlock(&event_mutex);
8796
8797 return ret;
8798}
8799EXPORT_SYMBOL_GPL(trace_array_destroy);
8800
8801static int instance_rmdir(const char *name)
8802{
8803 struct trace_array *tr;
8804 int ret;
8805
8806 mutex_lock(&event_mutex);
8807 mutex_lock(&trace_types_lock);
8808
8809 ret = -ENODEV;
Tom Zanussi89c95fc2020-01-29 12:59:21 -06008810 tr = trace_array_find(name);
8811 if (tr)
8812 ret = __remove_instance(tr);
Divya Indif45d1222019-03-20 11:28:51 -07008813
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008814 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04008815 mutex_unlock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008816
8817 return ret;
8818}
8819
Steven Rostedt277ba042012-08-03 16:10:49 -04008820static __init void create_trace_instances(struct dentry *d_tracer)
8821{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05008822 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8823 instance_mkdir,
8824 instance_rmdir);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008825 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
Steven Rostedt277ba042012-08-03 16:10:49 -04008826 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04008827}
8828
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008829static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008830init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008831{
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04008832 struct trace_event_file *file;
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008833 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008834
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05008835 trace_create_file("available_tracers", 0444, d_tracer,
8836 tr, &show_traces_fops);
8837
8838 trace_create_file("current_tracer", 0644, d_tracer,
8839 tr, &set_tracer_fops);
8840
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008841 trace_create_file("tracing_cpumask", 0644, d_tracer,
8842 tr, &tracing_cpumask_fops);
8843
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008844 trace_create_file("trace_options", 0644, d_tracer,
8845 tr, &tracing_iter_fops);
8846
8847 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008848 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008849
8850 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02008851 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008852
8853 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02008854 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008855
8856 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8857 tr, &tracing_total_entries_fops);
8858
Wang YanQing238ae932013-05-26 16:52:01 +08008859 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008860 tr, &tracing_free_buffer_fops);
8861
8862 trace_create_file("trace_marker", 0220, d_tracer,
8863 tr, &tracing_mark_fops);
8864
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04008865 file = __find_event_file(tr, "ftrace", "print");
8866 if (file && file->dir)
8867 trace_create_file("trigger", 0644, file->dir, file,
8868 &event_trigger_fops);
8869 tr->trace_marker_file = file;
8870
Steven Rostedtfa32e852016-07-06 15:25:08 -04008871 trace_create_file("trace_marker_raw", 0220, d_tracer,
8872 tr, &tracing_mark_raw_fops);
8873
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008874 trace_create_file("trace_clock", 0644, d_tracer, tr,
8875 &trace_clock_fops);
8876
8877 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008878 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008879
Tom Zanussi2c1ea602018-01-15 20:51:41 -06008880 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8881 &trace_time_stamp_mode_fops);
8882
Steven Rostedt (VMware)a7b1d742018-11-29 22:36:47 -05008883 tr->buffer_percent = 50;
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008884
8885 trace_create_file("buffer_percent", 0444, d_tracer,
8886 tr, &buffer_percent_fops);
8887
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008888 create_trace_options_dir(tr);
8889
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04008890#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02008891 trace_create_maxlat_file(tr, d_tracer);
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05008892#endif
8893
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008894 if (ftrace_create_function_files(tr, d_tracer))
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008895 MEM_FAIL(1, "Could not allocate function filter files");
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008896
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008897#ifdef CONFIG_TRACER_SNAPSHOT
8898 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008899 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008900#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008901
Tom Zanussi8a062902019-03-31 18:48:15 -05008902 trace_create_file("error_log", 0644, d_tracer,
8903 tr, &tracing_err_log_fops);
8904
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008905 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008906 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008907
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04008908 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008909}
8910
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13008911static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008912{
8913 struct vfsmount *mnt;
8914 struct file_system_type *type;
8915
8916 /*
8917 * To maintain backward compatibility for tools that mount
8918 * debugfs to get to the tracing facility, tracefs is automatically
8919 * mounted to the debugfs/tracing directory.
8920 */
8921 type = get_fs_type("tracefs");
8922 if (!type)
8923 return NULL;
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13008924 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008925 put_filesystem(type);
8926 if (IS_ERR(mnt))
8927 return NULL;
8928 mntget(mnt);
8929
8930 return mnt;
8931}
8932
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008933/**
8934 * tracing_init_dentry - initialize top level trace array
8935 *
8936 * This is called when creating files or directories in the tracing
8937 * directory. It is called via fs_initcall() by any of the boot up code
8938 * and expects to return the dentry of the top level tracing directory.
8939 */
8940struct dentry *tracing_init_dentry(void)
8941{
8942 struct trace_array *tr = &global_trace;
8943
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05008944 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11008945 pr_warn("Tracing disabled due to lockdown\n");
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05008946 return ERR_PTR(-EPERM);
8947 }
8948
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008949 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008950 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008951 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008952
Jiaxing Wang8b129192015-11-06 16:04:16 +08008953 if (WARN_ON(!tracefs_initialized()) ||
8954 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8955 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008956 return ERR_PTR(-ENODEV);
8957
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008958 /*
8959 * As there may still be users that expect the tracing
8960 * files to exist in debugfs/tracing, we must automount
8961 * the tracefs file system there, so older tools still
8962 * work with the newer kerenl.
8963 */
8964 tr->dir = debugfs_create_automount("tracing", NULL,
8965 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008966
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008967 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008968}
8969
Jeremy Linton00f4b652017-05-31 16:56:43 -05008970extern struct trace_eval_map *__start_ftrace_eval_maps[];
8971extern struct trace_eval_map *__stop_ftrace_eval_maps[];
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008972
Jeremy Linton5f60b352017-05-31 16:56:47 -05008973static void __init trace_eval_init(void)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008974{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008975 int len;
8976
Jeremy Linton02fd7f62017-05-31 16:56:42 -05008977 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008978 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008979}
8980
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008981#ifdef CONFIG_MODULES
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008982static void trace_module_add_evals(struct module *mod)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008983{
Jeremy Linton99be6472017-05-31 16:56:44 -05008984 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008985 return;
8986
8987 /*
8988 * Modules with bad taint do not have events created, do
8989 * not bother with enums either.
8990 */
8991 if (trace_module_has_bad_taint(mod))
8992 return;
8993
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008994 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008995}
8996
Jeremy Linton681bec02017-05-31 16:56:53 -05008997#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008998static void trace_module_remove_evals(struct module *mod)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008999{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05009000 union trace_eval_map_item *map;
9001 union trace_eval_map_item **last = &trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009002
Jeremy Linton99be6472017-05-31 16:56:44 -05009003 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009004 return;
9005
Jeremy Linton1793ed92017-05-31 16:56:46 -05009006 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009007
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05009008 map = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009009
9010 while (map) {
9011 if (map->head.mod == mod)
9012 break;
Jeremy Linton5f60b352017-05-31 16:56:47 -05009013 map = trace_eval_jmp_to_tail(map);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009014 last = &map->tail.next;
9015 map = map->tail.next;
9016 }
9017 if (!map)
9018 goto out;
9019
Jeremy Linton5f60b352017-05-31 16:56:47 -05009020 *last = trace_eval_jmp_to_tail(map)->tail.next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009021 kfree(map);
9022 out:
Jeremy Linton1793ed92017-05-31 16:56:46 -05009023 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009024}
9025#else
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009026static inline void trace_module_remove_evals(struct module *mod) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05009027#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009028
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009029static int trace_module_notify(struct notifier_block *self,
9030 unsigned long val, void *data)
9031{
9032 struct module *mod = data;
9033
9034 switch (val) {
9035 case MODULE_STATE_COMING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009036 trace_module_add_evals(mod);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009037 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009038 case MODULE_STATE_GOING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009039 trace_module_remove_evals(mod);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009040 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009041 }
9042
9043 return 0;
9044}
9045
9046static struct notifier_block trace_module_nb = {
9047 .notifier_call = trace_module_notify,
9048 .priority = 0,
9049};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009050#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009051
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009052static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009053{
9054 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009055
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08009056 trace_access_lock_init();
9057
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009058 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05009059 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09009060 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009061
Steven Rostedt (VMware)58b92542018-05-08 15:09:27 -04009062 event_trace_init();
9063
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009064 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04009065 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009066
Frederic Weisbecker5452af62009-03-27 00:25:38 +01009067 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04009068 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009069
Li Zefan339ae5d2009-04-17 10:34:30 +08009070 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01009071 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02009072
Avadh Patel69abe6a2009-04-10 16:04:48 -04009073 trace_create_file("saved_cmdlines", 0444, d_tracer,
9074 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03009075
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009076 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
9077 NULL, &tracing_saved_cmdlines_size_fops);
9078
Michael Sartain99c621d2017-07-05 22:07:15 -06009079 trace_create_file("saved_tgids", 0444, d_tracer,
9080 NULL, &tracing_saved_tgids_fops);
9081
Jeremy Linton5f60b352017-05-31 16:56:47 -05009082 trace_eval_init();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009083
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009084 trace_create_eval_file(d_tracer);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009085
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009086#ifdef CONFIG_MODULES
9087 register_module_notifier(&trace_module_nb);
9088#endif
9089
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009090#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01009091 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04009092 NULL, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009093#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01009094
Steven Rostedt277ba042012-08-03 16:10:49 -04009095 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09009096
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04009097 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05009098
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01009099 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009100}
9101
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009102static int trace_panic_handler(struct notifier_block *this,
9103 unsigned long event, void *unused)
9104{
Steven Rostedt944ac422008-10-23 19:26:08 -04009105 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009106 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009107 return NOTIFY_OK;
9108}
9109
9110static struct notifier_block trace_panic_notifier = {
9111 .notifier_call = trace_panic_handler,
9112 .next = NULL,
9113 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9114};
9115
9116static int trace_die_handler(struct notifier_block *self,
9117 unsigned long val,
9118 void *data)
9119{
9120 switch (val) {
9121 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04009122 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009123 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009124 break;
9125 default:
9126 break;
9127 }
9128 return NOTIFY_OK;
9129}
9130
9131static struct notifier_block trace_die_notifier = {
9132 .notifier_call = trace_die_handler,
9133 .priority = 200
9134};
9135
9136/*
9137 * printk is set to max of 1024, we really don't need it that big.
9138 * Nothing should be printing 1000 characters anyway.
9139 */
9140#define TRACE_MAX_PRINT 1000
9141
9142/*
9143 * Define here KERN_TRACE so that we have one place to modify
9144 * it if we decide to change what log level the ftrace dump
9145 * should be at.
9146 */
Steven Rostedt428aee12009-01-14 12:24:42 -05009147#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009148
Jason Wessel955b61e2010-08-05 09:22:23 -05009149void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009150trace_printk_seq(struct trace_seq *s)
9151{
9152 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009153 if (s->seq.len >= TRACE_MAX_PRINT)
9154 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009155
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05009156 /*
9157 * More paranoid code. Although the buffer size is set to
9158 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9159 * an extra layer of protection.
9160 */
9161 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9162 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009163
9164 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009165 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009166
9167 printk(KERN_TRACE "%s", s->buffer);
9168
Steven Rostedtf9520752009-03-02 14:04:40 -05009169 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009170}
9171
Jason Wessel955b61e2010-08-05 09:22:23 -05009172void trace_init_global_iter(struct trace_iterator *iter)
9173{
9174 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009175 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05009176 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009177 iter->array_buffer = &global_trace.array_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009178
9179 if (iter->trace && iter->trace->open)
9180 iter->trace->open(iter);
9181
9182 /* Annotate start of buffers if we had overruns */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009183 if (ring_buffer_overruns(iter->array_buffer->buffer))
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009184 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9185
9186 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9187 if (trace_clocks[iter->tr->clock_id].in_ns)
9188 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05009189}
9190
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009191void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009192{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009193 /* use static because iter can be a bit big for the stack */
9194 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009195 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009196 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009197 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04009198 unsigned long flags;
9199 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009200
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009201 /* Only allow one dump user at a time. */
9202 if (atomic_inc_return(&dump_running) != 1) {
9203 atomic_dec(&dump_running);
9204 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04009205 }
9206
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009207 /*
9208 * Always turn off tracing when we dump.
9209 * We don't need to show trace output of what happens
9210 * between multiple crashes.
9211 *
9212 * If the user does a sysrq-z, then they can re-enable
9213 * tracing with echo 1 > tracing_on.
9214 */
9215 tracing_off();
9216
9217 local_irq_save(flags);
Petr Mladek03fc7f92018-06-27 16:20:28 +02009218 printk_nmi_direct_enter();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009219
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08009220 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05009221 trace_init_global_iter(&iter);
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04009222 /* Can not use kmalloc for iter.temp */
9223 iter.temp = static_temp_buf;
9224 iter.temp_size = STATIC_TEMP_BUF_SIZE;
Jason Wessel955b61e2010-08-05 09:22:23 -05009225
Steven Rostedtd7690412008-10-01 00:29:53 -04009226 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009227 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04009228 }
9229
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009230 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009231
Török Edwinb54d3de2008-11-22 13:28:48 +02009232 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009233 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02009234
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009235 switch (oops_dump_mode) {
9236 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05009237 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009238 break;
9239 case DUMP_ORIG:
9240 iter.cpu_file = raw_smp_processor_id();
9241 break;
9242 case DUMP_NONE:
9243 goto out_enable;
9244 default:
9245 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05009246 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009247 }
9248
9249 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009250
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009251 /* Did function tracer already get disabled? */
9252 if (ftrace_is_dead()) {
9253 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9254 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9255 }
9256
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009257 /*
9258 * We need to stop all tracing on all CPUS to read the
9259 * the next buffer. This is a bit expensive, but is
9260 * not done often. We fill all what we can read,
9261 * and then release the locks again.
9262 */
9263
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009264 while (!trace_empty(&iter)) {
9265
9266 if (!cnt)
9267 printk(KERN_TRACE "---------------------------------\n");
9268
9269 cnt++;
9270
Miguel Ojeda0c97bf82019-05-23 14:45:35 +02009271 trace_iterator_reset(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009272 iter.iter_flags |= TRACE_FILE_LAT_FMT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009273
Jason Wessel955b61e2010-08-05 09:22:23 -05009274 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08009275 int ret;
9276
9277 ret = print_trace_line(&iter);
9278 if (ret != TRACE_TYPE_NO_CONSUME)
9279 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009280 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05009281 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009282
9283 trace_printk_seq(&iter.seq);
9284 }
9285
9286 if (!cnt)
9287 printk(KERN_TRACE " (ftrace buffer empty)\n");
9288 else
9289 printk(KERN_TRACE "---------------------------------\n");
9290
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009291 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009292 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009293
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009294 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009295 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009296 }
Petr Mladek03fc7f92018-06-27 16:20:28 +02009297 atomic_dec(&dump_running);
9298 printk_nmi_direct_exit();
Steven Rostedtcd891ae2009-04-28 11:39:34 -04009299 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009300}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07009301EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009302
Tom Zanussi7e465ba2017-09-22 14:58:20 -05009303int trace_run_command(const char *buf, int (*createfn)(int, char **))
9304{
9305 char **argv;
9306 int argc, ret;
9307
9308 argc = 0;
9309 ret = 0;
9310 argv = argv_split(GFP_KERNEL, buf, &argc);
9311 if (!argv)
9312 return -ENOMEM;
9313
9314 if (argc)
9315 ret = createfn(argc, argv);
9316
9317 argv_free(argv);
9318
9319 return ret;
9320}
9321
9322#define WRITE_BUFSIZE 4096
9323
9324ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9325 size_t count, loff_t *ppos,
9326 int (*createfn)(int, char **))
9327{
9328 char *kbuf, *buf, *tmp;
9329 int ret = 0;
9330 size_t done = 0;
9331 size_t size;
9332
9333 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9334 if (!kbuf)
9335 return -ENOMEM;
9336
9337 while (done < count) {
9338 size = count - done;
9339
9340 if (size >= WRITE_BUFSIZE)
9341 size = WRITE_BUFSIZE - 1;
9342
9343 if (copy_from_user(kbuf, buffer + done, size)) {
9344 ret = -EFAULT;
9345 goto out;
9346 }
9347 kbuf[size] = '\0';
9348 buf = kbuf;
9349 do {
9350 tmp = strchr(buf, '\n');
9351 if (tmp) {
9352 *tmp = '\0';
9353 size = tmp - buf + 1;
9354 } else {
9355 size = strlen(buf);
9356 if (done + size < count) {
9357 if (buf != kbuf)
9358 break;
9359 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9360 pr_warn("Line length is too long: Should be less than %d\n",
9361 WRITE_BUFSIZE - 2);
9362 ret = -EINVAL;
9363 goto out;
9364 }
9365 }
9366 done += size;
9367
9368 /* Remove comments */
9369 tmp = strchr(buf, '#');
9370
9371 if (tmp)
9372 *tmp = '\0';
9373
9374 ret = trace_run_command(buf, createfn);
9375 if (ret)
9376 goto out;
9377 buf += size;
9378
9379 } while (done < count);
9380 }
9381 ret = done;
9382
9383out:
9384 kfree(kbuf);
9385
9386 return ret;
9387}
9388
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009389__init static int tracer_alloc_buffers(void)
9390{
Steven Rostedt73c51622009-03-11 13:42:01 -04009391 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309392 int ret = -ENOMEM;
9393
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009394
9395 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11009396 pr_warn("Tracing disabled due to lockdown\n");
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009397 return -EPERM;
9398 }
9399
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009400 /*
9401 * Make sure we don't accidently add more trace options
9402 * than we have bits for.
9403 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009404 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009405
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309406 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9407 goto out;
9408
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009409 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309410 goto out_free_buffer_mask;
9411
Steven Rostedt07d777f2011-09-22 14:01:55 -04009412 /* Only allocate trace_printk buffers if a trace_printk exists */
Nathan Chancellorbf2cbe02020-02-19 22:10:12 -07009413 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04009414 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04009415 trace_printk_init_buffers();
9416
Steven Rostedt73c51622009-03-11 13:42:01 -04009417 /* To save memory, keep the ring buffer size to its minimum */
9418 if (ring_buffer_expanded)
9419 ring_buf_size = trace_buf_size;
9420 else
9421 ring_buf_size = 1;
9422
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309423 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009424 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009425
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009426 raw_spin_lock_init(&global_trace.start_lock);
9427
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009428 /*
9429 * The prepare callbacks allocates some memory for the ring buffer. We
9430 * don't free the buffer if the if the CPU goes down. If we were to free
9431 * the buffer, then the user would lose any trace that was in the
9432 * buffer. The memory will be removed once the "instance" is removed.
9433 */
9434 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9435 "trace/RB:preapre", trace_rb_cpu_prepare,
9436 NULL);
9437 if (ret < 0)
9438 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009439 /* Used for event triggers */
Dan Carpenter147d88e02017-08-01 14:02:01 +03009440 ret = -ENOMEM;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009441 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9442 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009443 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009444
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009445 if (trace_create_savedcmd() < 0)
9446 goto out_free_temp_buffer;
9447
Steven Rostedtab464282008-05-12 21:21:00 +02009448 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009449 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009450 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009451 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009452 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04009453
Steven Rostedt499e5472012-02-22 15:50:28 -05009454 if (global_trace.buffer_disabled)
9455 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009456
Steven Rostedte1e232c2014-02-10 23:38:46 -05009457 if (trace_boot_clock) {
9458 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9459 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07009460 pr_warn("Trace clock %s not defined, going back to default\n",
9461 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05009462 }
9463
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009464 /*
9465 * register_tracer() might reference current_trace, so it
9466 * needs to be set before we register anything. This is
9467 * just a bootstrap of current_trace anyway.
9468 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009469 global_trace.current_trace = &nop_trace;
9470
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05009471 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9472
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05009473 ftrace_init_global_array_ops(&global_trace);
9474
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009475 init_trace_flags_index(&global_trace);
9476
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009477 register_tracer(&nop_trace);
9478
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -05009479 /* Function tracing may start here (via kernel command line) */
9480 init_function_trace();
9481
Steven Rostedt60a11772008-05-12 21:20:44 +02009482 /* All seems OK, enable tracing */
9483 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009484
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009485 atomic_notifier_chain_register(&panic_notifier_list,
9486 &trace_panic_notifier);
9487
9488 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009489
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009490 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9491
9492 INIT_LIST_HEAD(&global_trace.systems);
9493 INIT_LIST_HEAD(&global_trace.events);
Tom Zanussi067fe032018-01-15 20:51:56 -06009494 INIT_LIST_HEAD(&global_trace.hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04009495 INIT_LIST_HEAD(&global_trace.err_log);
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009496 list_add(&global_trace.list, &ftrace_trace_arrays);
9497
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08009498 apply_trace_boot_options();
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -04009499
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04009500 register_snapshot_cmd();
9501
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009502 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009503
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009504out_free_savedcmd:
9505 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009506out_free_temp_buffer:
9507 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009508out_rm_hp_state:
9509 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309510out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009511 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309512out_free_buffer_mask:
9513 free_cpumask_var(tracing_buffer_mask);
9514out:
9515 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009516}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009517
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009518void __init early_trace_init(void)
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009519{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009520 if (tracepoint_printk) {
9521 tracepoint_print_iter =
9522 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009523 if (MEM_FAIL(!tracepoint_print_iter,
9524 "Failed to allocate trace iterator\n"))
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009525 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05009526 else
9527 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009528 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009529 tracer_alloc_buffers();
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009530}
9531
9532void __init trace_init(void)
9533{
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009534 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009535}
9536
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009537__init static int clear_boot_tracer(void)
9538{
9539 /*
9540 * The default tracer at boot buffer is an init section.
9541 * This function is called in lateinit. If we did not
9542 * find the boot tracer, then clear it out, to prevent
9543 * later registration from accessing the buffer that is
9544 * about to be freed.
9545 */
9546 if (!default_bootup_tracer)
9547 return 0;
9548
9549 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9550 default_bootup_tracer);
9551 default_bootup_tracer = NULL;
9552
9553 return 0;
9554}
9555
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009556fs_initcall(tracer_init_tracefs);
Steven Rostedt (VMware)4bb0f0e2017-08-01 12:01:52 -04009557late_initcall_sync(clear_boot_tracer);
Chris Wilson3fd49c92018-03-30 16:01:31 +01009558
9559#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9560__init static int tracing_set_default_clock(void)
9561{
9562 /* sched_clock_stable() is determined in late_initcall */
Chris Wilson5125eee2018-04-04 22:24:50 +01009563 if (!trace_boot_clock && !sched_clock_stable()) {
Masami Ichikawabf24daa2020-01-16 22:12:36 +09009564 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9565 pr_warn("Can not set tracing clock due to lockdown\n");
9566 return -EPERM;
9567 }
9568
Chris Wilson3fd49c92018-03-30 16:01:31 +01009569 printk(KERN_WARNING
9570 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9571 "If you want to keep using the local clock, then add:\n"
9572 " \"trace_clock=local\"\n"
9573 "on the kernel command line\n");
9574 tracing_set_clock(&global_trace, "global");
9575 }
9576
9577 return 0;
9578}
9579late_initcall_sync(tracing_set_default_clock);
9580#endif