blob: 6c75410f96988fa887d10a2bbb933890211fcde5 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002/*
3 * ring buffer based function tracer
4 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010013 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020014 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050015#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020016#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050017#include <linux/stacktrace.h>
18#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020019#include <linux/kallsyms.h>
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -040020#include <linux/security.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020021#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040022#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050023#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050025#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020026#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020027#include <linux/hardirq.h>
28#include <linux/linkage.h>
29#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040030#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020031#include <linux/ftrace.h>
32#include <linux/module.h>
33#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050034#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040035#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010036#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050037#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080038#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/ctype.h>
41#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020042#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050043#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020044#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080045#include <linux/trace.h>
Chris Wilson3fd49c92018-03-30 16:01:31 +010046#include <linux/sched/clock.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060047#include <linux/sched/rt.h>
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +020048#include <linux/fsnotify.h>
49#include <linux/irq_work.h>
50#include <linux/workqueue.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020051
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020052#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050053#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020054
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010055/*
Steven Rostedt73c51622009-03-11 13:42:01 -040056 * On boot up, the ring buffer is set to the minimum size, so that
57 * we do not waste memory on systems that are not using tracing.
58 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050059bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040060
61/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010062 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010063 * A selftest will lurk into the ring-buffer to count the
64 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010065 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010066 * at the same time, giving false positive or negative results.
67 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010068static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010069
Steven Rostedtb2821ae2009-02-02 21:38:32 -050070/*
71 * If a tracer is running, we do not want to run SELFTEST.
72 */
Li Zefan020e5f82009-07-01 10:47:05 +080073bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050074
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050075/* Pipe tracepoints to printk */
76struct trace_iterator *tracepoint_print_iter;
77int tracepoint_printk;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050078static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050079
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010080/* For tracers that don't implement custom flags */
81static struct tracer_opt dummy_tracer_opt[] = {
82 { }
83};
84
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050085static int
86dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010087{
88 return 0;
89}
Steven Rostedt0f048702008-11-05 16:05:44 -050090
91/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040092 * To prevent the comm cache from being overwritten when no
93 * tracing is active, only save the comm when a trace event
94 * occurred.
95 */
Joel Fernandesd914ba32017-06-26 19:01:55 -070096static DEFINE_PER_CPU(bool, trace_taskinfo_save);
Steven Rostedt7ffbd482012-10-11 12:14:25 -040097
98/*
Steven Rostedt0f048702008-11-05 16:05:44 -050099 * Kill all tracing for good (never come back).
100 * It is initialized to 1 but will turn to zero if the initialization
101 * of the tracer is successful. But that is the only place that sets
102 * this back to zero.
103 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100104static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500105
Jason Wessel955b61e2010-08-05 09:22:23 -0500106cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200107
Steven Rostedt944ac422008-10-23 19:26:08 -0400108/*
109 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
110 *
111 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
112 * is set, then ftrace_dump is called. This will output the contents
113 * of the ftrace buffers to the console. This is very useful for
114 * capturing traces that lead to crashes and outputing it to a
115 * serial console.
116 *
117 * It is default off, but you can enable it with either specifying
118 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200119 * /proc/sys/kernel/ftrace_dump_on_oops
120 * Set 1 if you want to dump buffers of all CPUs
121 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400122 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200123
124enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400125
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400126/* When set, tracing will stop when a WARN*() is hit */
127int __disable_trace_on_warning;
128
Jeremy Linton681bec02017-05-31 16:56:53 -0500129#ifdef CONFIG_TRACE_EVAL_MAP_FILE
130/* Map of enums to their values, for "eval_map" file */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500131struct trace_eval_map_head {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400132 struct module *mod;
133 unsigned long length;
134};
135
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500136union trace_eval_map_item;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400137
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500138struct trace_eval_map_tail {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400139 /*
140 * "end" is first and points to NULL as it must be different
Jeremy Linton00f4b652017-05-31 16:56:43 -0500141 * than "mod" or "eval_string"
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400142 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500143 union trace_eval_map_item *next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400144 const char *end; /* points to NULL */
145};
146
Jeremy Linton1793ed92017-05-31 16:56:46 -0500147static DEFINE_MUTEX(trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400148
149/*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500150 * The trace_eval_maps are saved in an array with two extra elements,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400151 * one at the beginning, and one at the end. The beginning item contains
152 * the count of the saved maps (head.length), and the module they
153 * belong to if not built in (head.mod). The ending item contains a
Jeremy Linton681bec02017-05-31 16:56:53 -0500154 * pointer to the next array of saved eval_map items.
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400155 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500156union trace_eval_map_item {
Jeremy Linton00f4b652017-05-31 16:56:43 -0500157 struct trace_eval_map map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500158 struct trace_eval_map_head head;
159 struct trace_eval_map_tail tail;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400160};
161
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500162static union trace_eval_map_item *trace_eval_maps;
Jeremy Linton681bec02017-05-31 16:56:53 -0500163#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400164
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500165static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Thomas Gleixnerc438f142019-04-25 11:45:15 +0200166static void ftrace_trace_userstack(struct ring_buffer *buffer,
167 unsigned long flags, int pc);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500168
Li Zefanee6c2c12009-09-18 14:06:47 +0800169#define MAX_TRACER_SIZE 100
170static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500171static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100172
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500173static bool allocate_snapshot;
174
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200175static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100176{
Chen Gang67012ab2013-04-08 12:06:44 +0800177 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500178 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400179 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500180 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100181 return 1;
182}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200183__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100184
Steven Rostedt944ac422008-10-23 19:26:08 -0400185static int __init set_ftrace_dump_on_oops(char *str)
186{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200187 if (*str++ != '=' || !*str) {
188 ftrace_dump_on_oops = DUMP_ALL;
189 return 1;
190 }
191
192 if (!strcmp("orig_cpu", str)) {
193 ftrace_dump_on_oops = DUMP_ORIG;
194 return 1;
195 }
196
197 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400198}
199__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200200
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400201static int __init stop_trace_on_warning(char *str)
202{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200203 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
204 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400205 return 1;
206}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200207__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400208
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400209static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500210{
211 allocate_snapshot = true;
212 /* We also need the main ring buffer expanded */
213 ring_buffer_expanded = true;
214 return 1;
215}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400216__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500217
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -0400218
219static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -0400220
221static int __init set_trace_boot_options(char *str)
222{
Chen Gang67012ab2013-04-08 12:06:44 +0800223 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -0400224 return 0;
225}
226__setup("trace_options=", set_trace_boot_options);
227
Steven Rostedte1e232c2014-02-10 23:38:46 -0500228static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
229static char *trace_boot_clock __initdata;
230
231static int __init set_trace_boot_clock(char *str)
232{
233 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
234 trace_boot_clock = trace_boot_clock_buf;
235 return 0;
236}
237__setup("trace_clock=", set_trace_boot_clock);
238
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500239static int __init set_tracepoint_printk(char *str)
240{
241 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
242 tracepoint_printk = 1;
243 return 1;
244}
245__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400246
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100247unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200248{
249 nsec += 500;
250 do_div(nsec, 1000);
251 return nsec;
252}
253
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400254/* trace_flags holds trace_options default values */
255#define TRACE_DEFAULT_FLAGS \
256 (FUNCTION_DEFAULT_FLAGS | \
257 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
258 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
259 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
260 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
261
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400262/* trace_options that are only supported by global_trace */
263#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
264 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
265
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400266/* trace_flags that are default zero for instances */
267#define ZEROED_TRACE_FLAGS \
Namhyung Kim1e104862017-04-17 11:44:28 +0900268 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400269
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200270/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800271 * The global_trace is the descriptor that holds the top-level tracing
272 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200273 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400274static struct trace_array global_trace = {
275 .trace_flags = TRACE_DEFAULT_FLAGS,
276};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200277
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400278LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200279
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400280int trace_array_get(struct trace_array *this_tr)
281{
282 struct trace_array *tr;
283 int ret = -ENODEV;
284
285 mutex_lock(&trace_types_lock);
286 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
287 if (tr == this_tr) {
288 tr->ref++;
289 ret = 0;
290 break;
291 }
292 }
293 mutex_unlock(&trace_types_lock);
294
295 return ret;
296}
297
298static void __trace_array_put(struct trace_array *this_tr)
299{
300 WARN_ON(!this_tr->ref);
301 this_tr->ref--;
302}
303
Divya Indi28879782019-11-20 11:08:38 -0800304/**
305 * trace_array_put - Decrement the reference counter for this trace array.
306 *
307 * NOTE: Use this when we no longer need the trace array returned by
308 * trace_array_get_by_name(). This ensures the trace array can be later
309 * destroyed.
310 *
311 */
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400312void trace_array_put(struct trace_array *this_tr)
313{
Divya Indi28879782019-11-20 11:08:38 -0800314 if (!this_tr)
315 return;
316
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400317 mutex_lock(&trace_types_lock);
318 __trace_array_put(this_tr);
319 mutex_unlock(&trace_types_lock);
320}
Divya Indi28879782019-11-20 11:08:38 -0800321EXPORT_SYMBOL_GPL(trace_array_put);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400322
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400323int tracing_check_open_get_tr(struct trace_array *tr)
324{
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -0400325 int ret;
326
327 ret = security_locked_down(LOCKDOWN_TRACEFS);
328 if (ret)
329 return ret;
330
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400331 if (tracing_disabled)
332 return -ENODEV;
333
334 if (tr && trace_array_get(tr) < 0)
335 return -ENODEV;
336
337 return 0;
338}
339
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400340int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500341 struct ring_buffer *buffer,
342 struct ring_buffer_event *event)
343{
344 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
345 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400346 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500347 return 1;
348 }
349
350 return 0;
351}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500352
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400353void trace_free_pid_list(struct trace_pid_list *pid_list)
354{
355 vfree(pid_list->pids);
356 kfree(pid_list);
357}
358
Steven Rostedtd8275c42016-04-14 12:15:22 -0400359/**
360 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
361 * @filtered_pids: The list of pids to check
362 * @search_pid: The PID to find in @filtered_pids
363 *
364 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
365 */
366bool
367trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
368{
369 /*
370 * If pid_max changed after filtered_pids was created, we
371 * by default ignore all pids greater than the previous pid_max.
372 */
373 if (search_pid >= filtered_pids->pid_max)
374 return false;
375
376 return test_bit(search_pid, filtered_pids->pids);
377}
378
379/**
380 * trace_ignore_this_task - should a task be ignored for tracing
381 * @filtered_pids: The list of pids to check
382 * @task: The task that should be ignored if not filtered
383 *
384 * Checks if @task should be traced or not from @filtered_pids.
385 * Returns true if @task should *NOT* be traced.
386 * Returns false if @task should be traced.
387 */
388bool
389trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
390{
391 /*
392 * Return false, because if filtered_pids does not exist,
393 * all pids are good to trace.
394 */
395 if (!filtered_pids)
396 return false;
397
398 return !trace_find_filtered_pid(filtered_pids, task->pid);
399}
400
401/**
Matthias Kaehlckef08367b2019-05-23 12:26:28 -0700402 * trace_filter_add_remove_task - Add or remove a task from a pid_list
Steven Rostedtd8275c42016-04-14 12:15:22 -0400403 * @pid_list: The list to modify
404 * @self: The current task for fork or NULL for exit
405 * @task: The task to add or remove
406 *
407 * If adding a task, if @self is defined, the task is only added if @self
408 * is also included in @pid_list. This happens on fork and tasks should
409 * only be added when the parent is listed. If @self is NULL, then the
410 * @task pid will be removed from the list, which would happen on exit
411 * of a task.
412 */
413void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
414 struct task_struct *self,
415 struct task_struct *task)
416{
417 if (!pid_list)
418 return;
419
420 /* For forks, we only add if the forking task is listed */
421 if (self) {
422 if (!trace_find_filtered_pid(pid_list, self->pid))
423 return;
424 }
425
426 /* Sorry, but we don't support pid_max changing after setting */
427 if (task->pid >= pid_list->pid_max)
428 return;
429
430 /* "self" is set for forks, and NULL for exits */
431 if (self)
432 set_bit(task->pid, pid_list->pids);
433 else
434 clear_bit(task->pid, pid_list->pids);
435}
436
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400437/**
438 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
439 * @pid_list: The pid list to show
440 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
441 * @pos: The position of the file
442 *
443 * This is used by the seq_file "next" operation to iterate the pids
444 * listed in a trace_pid_list structure.
445 *
446 * Returns the pid+1 as we want to display pid of zero, but NULL would
447 * stop the iteration.
448 */
449void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
450{
451 unsigned long pid = (unsigned long)v;
452
453 (*pos)++;
454
455 /* pid already is +1 of the actual prevous bit */
456 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
457
458 /* Return pid + 1 to allow zero to be represented */
459 if (pid < pid_list->pid_max)
460 return (void *)(pid + 1);
461
462 return NULL;
463}
464
465/**
466 * trace_pid_start - Used for seq_file to start reading pid lists
467 * @pid_list: The pid list to show
468 * @pos: The position of the file
469 *
470 * This is used by seq_file "start" operation to start the iteration
471 * of listing pids.
472 *
473 * Returns the pid+1 as we want to display pid of zero, but NULL would
474 * stop the iteration.
475 */
476void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
477{
478 unsigned long pid;
479 loff_t l = 0;
480
481 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
482 if (pid >= pid_list->pid_max)
483 return NULL;
484
485 /* Return pid + 1 so that zero can be the exit value */
486 for (pid++; pid && l < *pos;
487 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
488 ;
489 return (void *)pid;
490}
491
492/**
493 * trace_pid_show - show the current pid in seq_file processing
494 * @m: The seq_file structure to write into
495 * @v: A void pointer of the pid (+1) value to display
496 *
497 * Can be directly used by seq_file operations to display the current
498 * pid value.
499 */
500int trace_pid_show(struct seq_file *m, void *v)
501{
502 unsigned long pid = (unsigned long)v - 1;
503
504 seq_printf(m, "%lu\n", pid);
505 return 0;
506}
507
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400508/* 128 should be much more than enough */
509#define PID_BUF_SIZE 127
510
511int trace_pid_write(struct trace_pid_list *filtered_pids,
512 struct trace_pid_list **new_pid_list,
513 const char __user *ubuf, size_t cnt)
514{
515 struct trace_pid_list *pid_list;
516 struct trace_parser parser;
517 unsigned long val;
518 int nr_pids = 0;
519 ssize_t read = 0;
520 ssize_t ret = 0;
521 loff_t pos;
522 pid_t pid;
523
524 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
525 return -ENOMEM;
526
527 /*
528 * Always recreate a new array. The write is an all or nothing
529 * operation. Always create a new array when adding new pids by
530 * the user. If the operation fails, then the current list is
531 * not modified.
532 */
533 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
Wenwen Wang91862cc2019-04-19 21:22:59 -0500534 if (!pid_list) {
535 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400536 return -ENOMEM;
Wenwen Wang91862cc2019-04-19 21:22:59 -0500537 }
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400538
539 pid_list->pid_max = READ_ONCE(pid_max);
540
541 /* Only truncating will shrink pid_max */
542 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
543 pid_list->pid_max = filtered_pids->pid_max;
544
545 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
546 if (!pid_list->pids) {
Wenwen Wang91862cc2019-04-19 21:22:59 -0500547 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400548 kfree(pid_list);
549 return -ENOMEM;
550 }
551
552 if (filtered_pids) {
553 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000554 for_each_set_bit(pid, filtered_pids->pids,
555 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400556 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400557 nr_pids++;
558 }
559 }
560
561 while (cnt > 0) {
562
563 pos = 0;
564
565 ret = trace_get_user(&parser, ubuf, cnt, &pos);
566 if (ret < 0 || !trace_parser_loaded(&parser))
567 break;
568
569 read += ret;
570 ubuf += ret;
571 cnt -= ret;
572
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400573 ret = -EINVAL;
574 if (kstrtoul(parser.buffer, 0, &val))
575 break;
576 if (val >= pid_list->pid_max)
577 break;
578
579 pid = (pid_t)val;
580
581 set_bit(pid, pid_list->pids);
582 nr_pids++;
583
584 trace_parser_clear(&parser);
585 ret = 0;
586 }
587 trace_parser_put(&parser);
588
589 if (ret < 0) {
590 trace_free_pid_list(pid_list);
591 return ret;
592 }
593
594 if (!nr_pids) {
595 /* Cleared the list of pids */
596 trace_free_pid_list(pid_list);
597 read = ret;
598 pid_list = NULL;
599 }
600
601 *new_pid_list = pid_list;
602
603 return read;
604}
605
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100606static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400607{
608 u64 ts;
609
610 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700611 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400612 return trace_clock_local();
613
Alexander Z Lam94571582013-08-02 18:36:16 -0700614 ts = ring_buffer_time_stamp(buf->buffer, cpu);
615 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400616
617 return ts;
618}
619
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100620u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700621{
622 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
623}
624
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400625/**
626 * tracing_is_enabled - Show if global_trace has been disabled
627 *
628 * Shows if the global trace has been enabled or not. It uses the
629 * mirror flag "buffer_disabled" to be used in fast paths such as for
630 * the irqsoff tracer. But it may be inaccurate due to races. If you
631 * need to know the accurate state, use tracing_is_on() which is a little
632 * slower, but accurate.
633 */
Steven Rostedt90369902008-11-05 16:05:44 -0500634int tracing_is_enabled(void)
635{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400636 /*
637 * For quick access (irqsoff uses this in fast path), just
638 * return the mirror variable of the state of the ring buffer.
639 * It's a little racy, but we don't really care.
640 */
641 smp_rmb();
642 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500643}
644
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200645/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400646 * trace_buf_size is the size in bytes that is allocated
647 * for a buffer. Note, the number of bytes is always rounded
648 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400649 *
650 * This number is purposely set to a low number of 16384.
651 * If the dump on oops happens, it will be much appreciated
652 * to not have to wait for all that output. Anyway this can be
653 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200654 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400655#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400656
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400657static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200658
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200659/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200660static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200661
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200662/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200663 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200664 */
Alexander Z Lama82274152013-07-01 19:37:54 -0700665DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200666
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800667/*
668 * serialize the access of the ring buffer
669 *
670 * ring buffer serializes readers, but it is low level protection.
671 * The validity of the events (which returns by ring_buffer_peek() ..etc)
672 * are not protected by ring buffer.
673 *
674 * The content of events may become garbage if we allow other process consumes
675 * these events concurrently:
676 * A) the page of the consumed events may become a normal page
677 * (not reader page) in ring buffer, and this page will be rewrited
678 * by events producer.
679 * B) The page of the consumed events may become a page for splice_read,
680 * and this page will be returned to system.
681 *
682 * These primitives allow multi process access to different cpu ring buffer
683 * concurrently.
684 *
685 * These primitives don't distinguish read-only and read-consume access.
686 * Multi read-only access are also serialized.
687 */
688
689#ifdef CONFIG_SMP
690static DECLARE_RWSEM(all_cpu_access_lock);
691static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
692
693static inline void trace_access_lock(int cpu)
694{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500695 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800696 /* gain it for accessing the whole ring buffer. */
697 down_write(&all_cpu_access_lock);
698 } else {
699 /* gain it for accessing a cpu ring buffer. */
700
Steven Rostedtae3b5092013-01-23 15:22:59 -0500701 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800702 down_read(&all_cpu_access_lock);
703
704 /* Secondly block other access to this @cpu ring buffer. */
705 mutex_lock(&per_cpu(cpu_access_lock, cpu));
706 }
707}
708
709static inline void trace_access_unlock(int cpu)
710{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500711 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800712 up_write(&all_cpu_access_lock);
713 } else {
714 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
715 up_read(&all_cpu_access_lock);
716 }
717}
718
719static inline void trace_access_lock_init(void)
720{
721 int cpu;
722
723 for_each_possible_cpu(cpu)
724 mutex_init(&per_cpu(cpu_access_lock, cpu));
725}
726
727#else
728
729static DEFINE_MUTEX(access_lock);
730
731static inline void trace_access_lock(int cpu)
732{
733 (void)cpu;
734 mutex_lock(&access_lock);
735}
736
737static inline void trace_access_unlock(int cpu)
738{
739 (void)cpu;
740 mutex_unlock(&access_lock);
741}
742
743static inline void trace_access_lock_init(void)
744{
745}
746
747#endif
748
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400749#ifdef CONFIG_STACKTRACE
750static void __ftrace_trace_stack(struct ring_buffer *buffer,
751 unsigned long flags,
752 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400753static inline void ftrace_trace_stack(struct trace_array *tr,
754 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400755 unsigned long flags,
756 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400757
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400758#else
759static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
760 unsigned long flags,
761 int skip, int pc, struct pt_regs *regs)
762{
763}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400764static inline void ftrace_trace_stack(struct trace_array *tr,
765 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400766 unsigned long flags,
767 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400768{
769}
770
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400771#endif
772
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500773static __always_inline void
774trace_event_setup(struct ring_buffer_event *event,
775 int type, unsigned long flags, int pc)
776{
777 struct trace_entry *ent = ring_buffer_event_data(event);
778
Cong Wang46710f32019-05-25 09:57:59 -0700779 tracing_generic_entry_update(ent, type, flags, pc);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500780}
781
782static __always_inline struct ring_buffer_event *
783__trace_buffer_lock_reserve(struct ring_buffer *buffer,
784 int type,
785 unsigned long len,
786 unsigned long flags, int pc)
787{
788 struct ring_buffer_event *event;
789
790 event = ring_buffer_lock_reserve(buffer, len);
791 if (event != NULL)
792 trace_event_setup(event, type, flags, pc);
793
794 return event;
795}
796
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400797void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400798{
799 if (tr->trace_buffer.buffer)
800 ring_buffer_record_on(tr->trace_buffer.buffer);
801 /*
802 * This flag is looked at when buffers haven't been allocated
803 * yet, or by some tracers (like irqsoff), that just want to
804 * know if the ring buffer has been disabled, but it can handle
805 * races of where it gets disabled but we still do a record.
806 * As the check is in the fast path of the tracers, it is more
807 * important to be fast than accurate.
808 */
809 tr->buffer_disabled = 0;
810 /* Make the flag seen by readers */
811 smp_wmb();
812}
813
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200814/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500815 * tracing_on - enable tracing buffers
816 *
817 * This function enables tracing buffers that may have been
818 * disabled with tracing_off.
819 */
820void tracing_on(void)
821{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400822 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500823}
824EXPORT_SYMBOL_GPL(tracing_on);
825
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500826
827static __always_inline void
828__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
829{
Joel Fernandesd914ba32017-06-26 19:01:55 -0700830 __this_cpu_write(trace_taskinfo_save, true);
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500831
832 /* If this is the temp buffer, we need to commit fully */
833 if (this_cpu_read(trace_buffered_event) == event) {
834 /* Length is in event->array[0] */
835 ring_buffer_write(buffer, event->array[0], &event->array[1]);
836 /* Release the temp buffer */
837 this_cpu_dec(trace_buffered_event_cnt);
838 } else
839 ring_buffer_unlock_commit(buffer, event);
840}
841
Steven Rostedt499e5472012-02-22 15:50:28 -0500842/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500843 * __trace_puts - write a constant string into the trace buffer.
844 * @ip: The address of the caller
845 * @str: The constant string to write
846 * @size: The size of the string.
847 */
848int __trace_puts(unsigned long ip, const char *str, int size)
849{
850 struct ring_buffer_event *event;
851 struct ring_buffer *buffer;
852 struct print_entry *entry;
853 unsigned long irq_flags;
854 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800855 int pc;
856
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400857 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800858 return 0;
859
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800860 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500861
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500862 if (unlikely(tracing_selftest_running || tracing_disabled))
863 return 0;
864
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500865 alloc = sizeof(*entry) + size + 2; /* possible \n added */
866
867 local_save_flags(irq_flags);
868 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500869 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
870 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500871 if (!event)
872 return 0;
873
874 entry = ring_buffer_event_data(event);
875 entry->ip = ip;
876
877 memcpy(&entry->buf, str, size);
878
879 /* Add a newline if necessary */
880 if (entry->buf[size - 1] != '\n') {
881 entry->buf[size] = '\n';
882 entry->buf[size + 1] = '\0';
883 } else
884 entry->buf[size] = '\0';
885
886 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400887 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500888
889 return size;
890}
891EXPORT_SYMBOL_GPL(__trace_puts);
892
893/**
894 * __trace_bputs - write the pointer to a constant string into trace buffer
895 * @ip: The address of the caller
896 * @str: The constant string to write to the buffer to
897 */
898int __trace_bputs(unsigned long ip, const char *str)
899{
900 struct ring_buffer_event *event;
901 struct ring_buffer *buffer;
902 struct bputs_entry *entry;
903 unsigned long irq_flags;
904 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800905 int pc;
906
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400907 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800908 return 0;
909
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800910 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500911
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500912 if (unlikely(tracing_selftest_running || tracing_disabled))
913 return 0;
914
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500915 local_save_flags(irq_flags);
916 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500917 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
918 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500919 if (!event)
920 return 0;
921
922 entry = ring_buffer_event_data(event);
923 entry->ip = ip;
924 entry->str = str;
925
926 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400927 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500928
929 return 1;
930}
931EXPORT_SYMBOL_GPL(__trace_bputs);
932
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500933#ifdef CONFIG_TRACER_SNAPSHOT
Tom Zanussia35873a2019-02-13 17:42:45 -0600934void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500935{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500936 struct tracer *tracer = tr->current_trace;
937 unsigned long flags;
938
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500939 if (in_nmi()) {
940 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
941 internal_trace_puts("*** snapshot is being ignored ***\n");
942 return;
943 }
944
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500945 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500946 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
947 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500948 tracing_off();
949 return;
950 }
951
952 /* Note, snapshot can not be used when the tracer uses it */
953 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500954 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
955 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500956 return;
957 }
958
959 local_irq_save(flags);
Tom Zanussia35873a2019-02-13 17:42:45 -0600960 update_max_tr(tr, current, smp_processor_id(), cond_data);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500961 local_irq_restore(flags);
962}
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400963
Tom Zanussia35873a2019-02-13 17:42:45 -0600964void tracing_snapshot_instance(struct trace_array *tr)
965{
966 tracing_snapshot_instance_cond(tr, NULL);
967}
968
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400969/**
Chunyu Hu5a93bae22017-10-19 14:32:33 +0800970 * tracing_snapshot - take a snapshot of the current buffer.
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400971 *
972 * This causes a swap between the snapshot buffer and the current live
973 * tracing buffer. You can use this to take snapshots of the live
974 * trace when some condition is triggered, but continue to trace.
975 *
976 * Note, make sure to allocate the snapshot with either
977 * a tracing_snapshot_alloc(), or by doing it manually
978 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
979 *
980 * If the snapshot buffer is not allocated, it will stop tracing.
981 * Basically making a permanent snapshot.
982 */
983void tracing_snapshot(void)
984{
985 struct trace_array *tr = &global_trace;
986
987 tracing_snapshot_instance(tr);
988}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500989EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500990
Tom Zanussia35873a2019-02-13 17:42:45 -0600991/**
992 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
993 * @tr: The tracing instance to snapshot
994 * @cond_data: The data to be tested conditionally, and possibly saved
995 *
996 * This is the same as tracing_snapshot() except that the snapshot is
997 * conditional - the snapshot will only happen if the
998 * cond_snapshot.update() implementation receiving the cond_data
999 * returns true, which means that the trace array's cond_snapshot
1000 * update() operation used the cond_data to determine whether the
1001 * snapshot should be taken, and if it was, presumably saved it along
1002 * with the snapshot.
1003 */
1004void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1005{
1006 tracing_snapshot_instance_cond(tr, cond_data);
1007}
1008EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1009
1010/**
1011 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1012 * @tr: The tracing instance
1013 *
1014 * When the user enables a conditional snapshot using
1015 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1016 * with the snapshot. This accessor is used to retrieve it.
1017 *
1018 * Should not be called from cond_snapshot.update(), since it takes
1019 * the tr->max_lock lock, which the code calling
1020 * cond_snapshot.update() has already done.
1021 *
1022 * Returns the cond_data associated with the trace array's snapshot.
1023 */
1024void *tracing_cond_snapshot_data(struct trace_array *tr)
1025{
1026 void *cond_data = NULL;
1027
1028 arch_spin_lock(&tr->max_lock);
1029
1030 if (tr->cond_snapshot)
1031 cond_data = tr->cond_snapshot->cond_data;
1032
1033 arch_spin_unlock(&tr->max_lock);
1034
1035 return cond_data;
1036}
1037EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1038
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001039static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
1040 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001041static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
1042
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001043int tracing_alloc_snapshot_instance(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001044{
1045 int ret;
1046
1047 if (!tr->allocated_snapshot) {
1048
1049 /* allocate spare buffer */
1050 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1051 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
1052 if (ret < 0)
1053 return ret;
1054
1055 tr->allocated_snapshot = true;
1056 }
1057
1058 return 0;
1059}
1060
Fabian Frederickad1438a2014-04-17 21:44:42 +02001061static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001062{
1063 /*
1064 * We don't free the ring buffer. instead, resize it because
1065 * The max_tr ring buffer has some state (e.g. ring->clock) and
1066 * we want preserve it.
1067 */
1068 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1069 set_buffer_entries(&tr->max_buffer, 1);
1070 tracing_reset_online_cpus(&tr->max_buffer);
1071 tr->allocated_snapshot = false;
1072}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001073
1074/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001075 * tracing_alloc_snapshot - allocate snapshot buffer.
1076 *
1077 * This only allocates the snapshot buffer if it isn't already
1078 * allocated - it doesn't also take a snapshot.
1079 *
1080 * This is meant to be used in cases where the snapshot buffer needs
1081 * to be set up for events that can't sleep but need to be able to
1082 * trigger a snapshot.
1083 */
1084int tracing_alloc_snapshot(void)
1085{
1086 struct trace_array *tr = &global_trace;
1087 int ret;
1088
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001089 ret = tracing_alloc_snapshot_instance(tr);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001090 WARN_ON(ret < 0);
1091
1092 return ret;
1093}
1094EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1095
1096/**
Chunyu Hu5a93bae22017-10-19 14:32:33 +08001097 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001098 *
Chunyu Hu5a93bae22017-10-19 14:32:33 +08001099 * This is similar to tracing_snapshot(), but it will allocate the
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001100 * snapshot buffer if it isn't already allocated. Use this only
1101 * where it is safe to sleep, as the allocation may sleep.
1102 *
1103 * This causes a swap between the snapshot buffer and the current live
1104 * tracing buffer. You can use this to take snapshots of the live
1105 * trace when some condition is triggered, but continue to trace.
1106 */
1107void tracing_snapshot_alloc(void)
1108{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001109 int ret;
1110
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001111 ret = tracing_alloc_snapshot();
1112 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001113 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001114
1115 tracing_snapshot();
1116}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001117EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001118
1119/**
1120 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1121 * @tr: The tracing instance
1122 * @cond_data: User data to associate with the snapshot
1123 * @update: Implementation of the cond_snapshot update function
1124 *
1125 * Check whether the conditional snapshot for the given instance has
1126 * already been enabled, or if the current tracer is already using a
1127 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1128 * save the cond_data and update function inside.
1129 *
1130 * Returns 0 if successful, error otherwise.
1131 */
1132int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1133 cond_update_fn_t update)
1134{
1135 struct cond_snapshot *cond_snapshot;
1136 int ret = 0;
1137
1138 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1139 if (!cond_snapshot)
1140 return -ENOMEM;
1141
1142 cond_snapshot->cond_data = cond_data;
1143 cond_snapshot->update = update;
1144
1145 mutex_lock(&trace_types_lock);
1146
1147 ret = tracing_alloc_snapshot_instance(tr);
1148 if (ret)
1149 goto fail_unlock;
1150
1151 if (tr->current_trace->use_max_tr) {
1152 ret = -EBUSY;
1153 goto fail_unlock;
1154 }
1155
Steven Rostedt (VMware)1c347a92019-02-14 18:45:21 -05001156 /*
1157 * The cond_snapshot can only change to NULL without the
1158 * trace_types_lock. We don't care if we race with it going
1159 * to NULL, but we want to make sure that it's not set to
1160 * something other than NULL when we get here, which we can
1161 * do safely with only holding the trace_types_lock and not
1162 * having to take the max_lock.
1163 */
Tom Zanussia35873a2019-02-13 17:42:45 -06001164 if (tr->cond_snapshot) {
1165 ret = -EBUSY;
1166 goto fail_unlock;
1167 }
1168
1169 arch_spin_lock(&tr->max_lock);
1170 tr->cond_snapshot = cond_snapshot;
1171 arch_spin_unlock(&tr->max_lock);
1172
1173 mutex_unlock(&trace_types_lock);
1174
1175 return ret;
1176
1177 fail_unlock:
1178 mutex_unlock(&trace_types_lock);
1179 kfree(cond_snapshot);
1180 return ret;
1181}
1182EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1183
1184/**
1185 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1186 * @tr: The tracing instance
1187 *
1188 * Check whether the conditional snapshot for the given instance is
1189 * enabled; if so, free the cond_snapshot associated with it,
1190 * otherwise return -EINVAL.
1191 *
1192 * Returns 0 if successful, error otherwise.
1193 */
1194int tracing_snapshot_cond_disable(struct trace_array *tr)
1195{
1196 int ret = 0;
1197
1198 arch_spin_lock(&tr->max_lock);
1199
1200 if (!tr->cond_snapshot)
1201 ret = -EINVAL;
1202 else {
1203 kfree(tr->cond_snapshot);
1204 tr->cond_snapshot = NULL;
1205 }
1206
1207 arch_spin_unlock(&tr->max_lock);
1208
1209 return ret;
1210}
1211EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001212#else
1213void tracing_snapshot(void)
1214{
1215 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1216}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001217EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussia35873a2019-02-13 17:42:45 -06001218void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1219{
1220 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1221}
1222EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001223int tracing_alloc_snapshot(void)
1224{
1225 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1226 return -ENODEV;
1227}
1228EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001229void tracing_snapshot_alloc(void)
1230{
1231 /* Give warning */
1232 tracing_snapshot();
1233}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001234EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001235void *tracing_cond_snapshot_data(struct trace_array *tr)
1236{
1237 return NULL;
1238}
1239EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1240int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1241{
1242 return -ENODEV;
1243}
1244EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1245int tracing_snapshot_cond_disable(struct trace_array *tr)
1246{
1247 return false;
1248}
1249EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001250#endif /* CONFIG_TRACER_SNAPSHOT */
1251
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -04001252void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001253{
1254 if (tr->trace_buffer.buffer)
1255 ring_buffer_record_off(tr->trace_buffer.buffer);
1256 /*
1257 * This flag is looked at when buffers haven't been allocated
1258 * yet, or by some tracers (like irqsoff), that just want to
1259 * know if the ring buffer has been disabled, but it can handle
1260 * races of where it gets disabled but we still do a record.
1261 * As the check is in the fast path of the tracers, it is more
1262 * important to be fast than accurate.
1263 */
1264 tr->buffer_disabled = 1;
1265 /* Make the flag seen by readers */
1266 smp_wmb();
1267}
1268
Steven Rostedt499e5472012-02-22 15:50:28 -05001269/**
1270 * tracing_off - turn off tracing buffers
1271 *
1272 * This function stops the tracing buffers from recording data.
1273 * It does not disable any overhead the tracers themselves may
1274 * be causing. This function simply causes all recording to
1275 * the ring buffers to fail.
1276 */
1277void tracing_off(void)
1278{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001279 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001280}
1281EXPORT_SYMBOL_GPL(tracing_off);
1282
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001283void disable_trace_on_warning(void)
1284{
1285 if (__disable_trace_on_warning)
1286 tracing_off();
1287}
1288
Steven Rostedt499e5472012-02-22 15:50:28 -05001289/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001290 * tracer_tracing_is_on - show real state of ring buffer enabled
1291 * @tr : the trace array to know if ring buffer is enabled
1292 *
1293 * Shows real state of the ring buffer if it is enabled or not.
1294 */
Steven Rostedt (VMware)ec573502018-08-01 16:08:57 -04001295bool tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001296{
1297 if (tr->trace_buffer.buffer)
1298 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1299 return !tr->buffer_disabled;
1300}
1301
Steven Rostedt499e5472012-02-22 15:50:28 -05001302/**
1303 * tracing_is_on - show state of ring buffers enabled
1304 */
1305int tracing_is_on(void)
1306{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001307 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001308}
1309EXPORT_SYMBOL_GPL(tracing_is_on);
1310
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001311static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001312{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001313 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001314
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001315 if (!str)
1316 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001317 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001318 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001319 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001320 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001321 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001322 return 1;
1323}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001324__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001325
Tim Bird0e950172010-02-25 15:36:43 -08001326static int __init set_tracing_thresh(char *str)
1327{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001328 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001329 int ret;
1330
1331 if (!str)
1332 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001333 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001334 if (ret < 0)
1335 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001336 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001337 return 1;
1338}
1339__setup("tracing_thresh=", set_tracing_thresh);
1340
Steven Rostedt57f50be2008-05-12 21:20:44 +02001341unsigned long nsecs_to_usecs(unsigned long nsecs)
1342{
1343 return nsecs / 1000;
1344}
1345
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001346/*
1347 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001348 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001349 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001350 * of strings in the order that the evals (enum) were defined.
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001351 */
1352#undef C
1353#define C(a, b) b
1354
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001355/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001356static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001357 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001358 NULL
1359};
1360
Zhaolei5079f322009-08-25 16:12:56 +08001361static struct {
1362 u64 (*func)(void);
1363 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001364 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001365} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001366 { trace_clock_local, "local", 1 },
1367 { trace_clock_global, "global", 1 },
1368 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001369 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001370 { trace_clock, "perf", 1 },
1371 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001372 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Thomas Gleixnera3ed0e432018-04-25 15:33:38 +02001373 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001374 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001375};
1376
Tom Zanussi860f9f62018-01-15 20:51:48 -06001377bool trace_clock_in_ns(struct trace_array *tr)
1378{
1379 if (trace_clocks[tr->clock_id].in_ns)
1380 return true;
1381
1382 return false;
1383}
1384
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001385/*
1386 * trace_parser_get_init - gets the buffer for trace parser
1387 */
1388int trace_parser_get_init(struct trace_parser *parser, int size)
1389{
1390 memset(parser, 0, sizeof(*parser));
1391
1392 parser->buffer = kmalloc(size, GFP_KERNEL);
1393 if (!parser->buffer)
1394 return 1;
1395
1396 parser->size = size;
1397 return 0;
1398}
1399
1400/*
1401 * trace_parser_put - frees the buffer for trace parser
1402 */
1403void trace_parser_put(struct trace_parser *parser)
1404{
1405 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001406 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001407}
1408
1409/*
1410 * trace_get_user - reads the user input string separated by space
1411 * (matched by isspace(ch))
1412 *
1413 * For each string found the 'struct trace_parser' is updated,
1414 * and the function returns.
1415 *
1416 * Returns number of bytes read.
1417 *
1418 * See kernel/trace/trace.h for 'struct trace_parser' details.
1419 */
1420int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1421 size_t cnt, loff_t *ppos)
1422{
1423 char ch;
1424 size_t read = 0;
1425 ssize_t ret;
1426
1427 if (!*ppos)
1428 trace_parser_clear(parser);
1429
1430 ret = get_user(ch, ubuf++);
1431 if (ret)
1432 goto out;
1433
1434 read++;
1435 cnt--;
1436
1437 /*
1438 * The parser is not finished with the last write,
1439 * continue reading the user input without skipping spaces.
1440 */
1441 if (!parser->cont) {
1442 /* skip white space */
1443 while (cnt && isspace(ch)) {
1444 ret = get_user(ch, ubuf++);
1445 if (ret)
1446 goto out;
1447 read++;
1448 cnt--;
1449 }
1450
Changbin Du76638d92018-01-16 17:02:29 +08001451 parser->idx = 0;
1452
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001453 /* only spaces were written */
Changbin Du921a7ac2018-01-16 17:02:28 +08001454 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001455 *ppos += read;
1456 ret = read;
1457 goto out;
1458 }
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001459 }
1460
1461 /* read the non-space input */
Changbin Du921a7ac2018-01-16 17:02:28 +08001462 while (cnt && !isspace(ch) && ch) {
Li Zefan3c235a32009-09-22 13:51:54 +08001463 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001464 parser->buffer[parser->idx++] = ch;
1465 else {
1466 ret = -EINVAL;
1467 goto out;
1468 }
1469 ret = get_user(ch, ubuf++);
1470 if (ret)
1471 goto out;
1472 read++;
1473 cnt--;
1474 }
1475
1476 /* We either got finished input or we have to wait for another call. */
Changbin Du921a7ac2018-01-16 17:02:28 +08001477 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001478 parser->buffer[parser->idx] = 0;
1479 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001480 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001481 parser->cont = true;
1482 parser->buffer[parser->idx++] = ch;
Changbin Duf4d07062018-01-16 17:02:30 +08001483 /* Make sure the parsed string always terminates with '\0'. */
1484 parser->buffer[parser->idx] = 0;
Steven Rostedt057db842013-10-09 22:23:23 -04001485 } else {
1486 ret = -EINVAL;
1487 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001488 }
1489
1490 *ppos += read;
1491 ret = read;
1492
1493out:
1494 return ret;
1495}
1496
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001497/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001498static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001499{
1500 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001501
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001502 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001503 return -EBUSY;
1504
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001505 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001506 if (cnt > len)
1507 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001508 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001509
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001510 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001511 return cnt;
1512}
1513
Tim Bird0e950172010-02-25 15:36:43 -08001514unsigned long __read_mostly tracing_thresh;
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001515static const struct file_operations tracing_max_lat_fops;
1516
1517#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1518 defined(CONFIG_FSNOTIFY)
1519
1520static struct workqueue_struct *fsnotify_wq;
1521
1522static void latency_fsnotify_workfn(struct work_struct *work)
1523{
1524 struct trace_array *tr = container_of(work, struct trace_array,
1525 fsnotify_work);
1526 fsnotify(tr->d_max_latency->d_inode, FS_MODIFY,
1527 tr->d_max_latency->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
1528}
1529
1530static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1531{
1532 struct trace_array *tr = container_of(iwork, struct trace_array,
1533 fsnotify_irqwork);
1534 queue_work(fsnotify_wq, &tr->fsnotify_work);
1535}
1536
1537static void trace_create_maxlat_file(struct trace_array *tr,
1538 struct dentry *d_tracer)
1539{
1540 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1541 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1542 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1543 d_tracer, &tr->max_latency,
1544 &tracing_max_lat_fops);
1545}
1546
1547__init static int latency_fsnotify_init(void)
1548{
1549 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1550 WQ_UNBOUND | WQ_HIGHPRI, 0);
1551 if (!fsnotify_wq) {
1552 pr_err("Unable to allocate tr_max_lat_wq\n");
1553 return -ENOMEM;
1554 }
1555 return 0;
1556}
1557
1558late_initcall_sync(latency_fsnotify_init);
1559
1560void latency_fsnotify(struct trace_array *tr)
1561{
1562 if (!fsnotify_wq)
1563 return;
1564 /*
1565 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1566 * possible that we are called from __schedule() or do_idle(), which
1567 * could cause a deadlock.
1568 */
1569 irq_work_queue(&tr->fsnotify_irqwork);
1570}
1571
1572/*
1573 * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1574 * defined(CONFIG_FSNOTIFY)
1575 */
1576#else
1577
1578#define trace_create_maxlat_file(tr, d_tracer) \
1579 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1580 &tr->max_latency, &tracing_max_lat_fops)
1581
1582#endif
Tim Bird0e950172010-02-25 15:36:43 -08001583
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001584#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001585/*
1586 * Copy the new maximum trace into the separate maximum-trace
1587 * structure. (this way the maximum trace is permanently saved,
Chunyu Hu5a93bae22017-10-19 14:32:33 +08001588 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001589 */
1590static void
1591__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1592{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001593 struct trace_buffer *trace_buf = &tr->trace_buffer;
1594 struct trace_buffer *max_buf = &tr->max_buffer;
1595 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1596 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001597
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001598 max_buf->cpu = cpu;
1599 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001600
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001601 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001602 max_data->critical_start = data->critical_start;
1603 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001604
Tom Zanussi85f726a2019-03-05 10:12:00 -06001605 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001606 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001607 /*
1608 * If tsk == current, then use current_uid(), as that does not use
1609 * RCU. The irq tracer can be called out of RCU scope.
1610 */
1611 if (tsk == current)
1612 max_data->uid = current_uid();
1613 else
1614 max_data->uid = task_uid(tsk);
1615
Steven Rostedt8248ac02009-09-02 12:27:41 -04001616 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1617 max_data->policy = tsk->policy;
1618 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001619
1620 /* record this tasks comm */
1621 tracing_record_cmdline(tsk);
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001622 latency_fsnotify(tr);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001623}
1624
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001625/**
1626 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1627 * @tr: tracer
1628 * @tsk: the task with the latency
1629 * @cpu: The cpu that initiated the trace.
Tom Zanussia35873a2019-02-13 17:42:45 -06001630 * @cond_data: User data associated with a conditional snapshot
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001631 *
1632 * Flip the buffers between the @tr and the max_tr and record information
1633 * about which task was the cause of this latency.
1634 */
Ingo Molnare309b412008-05-12 21:20:51 +02001635void
Tom Zanussia35873a2019-02-13 17:42:45 -06001636update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1637 void *cond_data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001638{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001639 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001640 return;
1641
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001642 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001643
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001644 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001645 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001646 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001647 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001648 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001649
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001650 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001651
Masami Hiramatsu73c8d892018-07-14 01:28:15 +09001652 /* Inherit the recordable setting from trace_buffer */
1653 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1654 ring_buffer_record_on(tr->max_buffer.buffer);
1655 else
1656 ring_buffer_record_off(tr->max_buffer.buffer);
1657
Tom Zanussia35873a2019-02-13 17:42:45 -06001658#ifdef CONFIG_TRACER_SNAPSHOT
1659 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1660 goto out_unlock;
1661#endif
Gustavo A. R. Silva08ae88f2018-02-09 11:53:16 -06001662 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001663
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001664 __update_max_tr(tr, tsk, cpu);
Tom Zanussia35873a2019-02-13 17:42:45 -06001665
1666 out_unlock:
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001667 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001668}
1669
1670/**
1671 * update_max_tr_single - only copy one trace over, and reset the rest
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07001672 * @tr: tracer
1673 * @tsk: task with the latency
1674 * @cpu: the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001675 *
1676 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001677 */
Ingo Molnare309b412008-05-12 21:20:51 +02001678void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001679update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1680{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001681 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001682
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001683 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001684 return;
1685
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001686 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001687 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001688 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001689 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001690 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001691 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001692
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001693 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001694
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001695 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001696
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001697 if (ret == -EBUSY) {
1698 /*
1699 * We failed to swap the buffer due to a commit taking
1700 * place on this CPU. We fail to record, but we reset
1701 * the max trace buffer (no one writes directly to it)
1702 * and flag that it failed.
1703 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001704 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001705 "Failed to swap buffers due to commit in progress\n");
1706 }
1707
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001708 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001709
1710 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001711 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001712}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001713#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001714
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05001715static int wait_on_pipe(struct trace_iterator *iter, int full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001716{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001717 /* Iterators are static, they should be filled or empty */
1718 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001719 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001720
Rabin Vincente30f53a2014-11-10 19:46:34 +01001721 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1722 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001723}
1724
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001725#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001726static bool selftests_can_run;
1727
1728struct trace_selftests {
1729 struct list_head list;
1730 struct tracer *type;
1731};
1732
1733static LIST_HEAD(postponed_selftests);
1734
1735static int save_selftest(struct tracer *type)
1736{
1737 struct trace_selftests *selftest;
1738
1739 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1740 if (!selftest)
1741 return -ENOMEM;
1742
1743 selftest->type = type;
1744 list_add(&selftest->list, &postponed_selftests);
1745 return 0;
1746}
1747
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001748static int run_tracer_selftest(struct tracer *type)
1749{
1750 struct trace_array *tr = &global_trace;
1751 struct tracer *saved_tracer = tr->current_trace;
1752 int ret;
1753
1754 if (!type->selftest || tracing_selftest_disabled)
1755 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001756
1757 /*
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001758 * If a tracer registers early in boot up (before scheduling is
1759 * initialized and such), then do not run its selftests yet.
1760 * Instead, run it a little later in the boot process.
1761 */
1762 if (!selftests_can_run)
1763 return save_selftest(type);
1764
1765 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001766 * Run a selftest on this tracer.
1767 * Here we reset the trace buffer, and set the current
1768 * tracer to be this tracer. The tracer can then run some
1769 * internal tracing to verify that everything is in order.
1770 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001771 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001772 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001773
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001774 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001775
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001776#ifdef CONFIG_TRACER_MAX_TRACE
1777 if (type->use_max_tr) {
1778 /* If we expanded the buffers, make sure the max is expanded too */
1779 if (ring_buffer_expanded)
1780 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1781 RING_BUFFER_ALL_CPUS);
1782 tr->allocated_snapshot = true;
1783 }
1784#endif
1785
1786 /* the test is responsible for initializing and enabling */
1787 pr_info("Testing tracer %s: ", type->name);
1788 ret = type->selftest(type, tr);
1789 /* the test is responsible for resetting too */
1790 tr->current_trace = saved_tracer;
1791 if (ret) {
1792 printk(KERN_CONT "FAILED!\n");
1793 /* Add the warning after printing 'FAILED' */
1794 WARN_ON(1);
1795 return -1;
1796 }
1797 /* Only reset on passing, to avoid touching corrupted buffers */
1798 tracing_reset_online_cpus(&tr->trace_buffer);
1799
1800#ifdef CONFIG_TRACER_MAX_TRACE
1801 if (type->use_max_tr) {
1802 tr->allocated_snapshot = false;
1803
1804 /* Shrink the max buffer again */
1805 if (ring_buffer_expanded)
1806 ring_buffer_resize(tr->max_buffer.buffer, 1,
1807 RING_BUFFER_ALL_CPUS);
1808 }
1809#endif
1810
1811 printk(KERN_CONT "PASSED\n");
1812 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001813}
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001814
1815static __init int init_trace_selftests(void)
1816{
1817 struct trace_selftests *p, *n;
1818 struct tracer *t, **last;
1819 int ret;
1820
1821 selftests_can_run = true;
1822
1823 mutex_lock(&trace_types_lock);
1824
1825 if (list_empty(&postponed_selftests))
1826 goto out;
1827
1828 pr_info("Running postponed tracer tests:\n");
1829
1830 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
Anders Roxell6fc21712018-11-30 15:56:22 +01001831 /* This loop can take minutes when sanitizers are enabled, so
1832 * lets make sure we allow RCU processing.
1833 */
1834 cond_resched();
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001835 ret = run_tracer_selftest(p->type);
1836 /* If the test fails, then warn and remove from available_tracers */
1837 if (ret < 0) {
1838 WARN(1, "tracer: %s failed selftest, disabling\n",
1839 p->type->name);
1840 last = &trace_types;
1841 for (t = trace_types; t; t = t->next) {
1842 if (t == p->type) {
1843 *last = t->next;
1844 break;
1845 }
1846 last = &t->next;
1847 }
1848 }
1849 list_del(&p->list);
1850 kfree(p);
1851 }
1852
1853 out:
1854 mutex_unlock(&trace_types_lock);
1855
1856 return 0;
1857}
Steven Rostedtb9ef0322017-05-17 11:14:35 -04001858core_initcall(init_trace_selftests);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001859#else
1860static inline int run_tracer_selftest(struct tracer *type)
1861{
1862 return 0;
1863}
1864#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001865
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001866static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1867
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001868static void __init apply_trace_boot_options(void);
1869
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001870/**
1871 * register_tracer - register a tracer with the ftrace system.
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07001872 * @type: the plugin for the tracer
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001873 *
1874 * Register a new plugin tracer.
1875 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001876int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001877{
1878 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001879 int ret = 0;
1880
1881 if (!type->name) {
1882 pr_info("Tracer must have a name\n");
1883 return -1;
1884 }
1885
Dan Carpenter24a461d2010-07-10 12:06:44 +02001886 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001887 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1888 return -1;
1889 }
1890
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05001891 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11001892 pr_warn("Can not register tracer %s due to lockdown\n",
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05001893 type->name);
1894 return -EPERM;
1895 }
1896
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001897 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001898
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001899 tracing_selftest_running = true;
1900
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001901 for (t = trace_types; t; t = t->next) {
1902 if (strcmp(type->name, t->name) == 0) {
1903 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001904 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001905 type->name);
1906 ret = -1;
1907 goto out;
1908 }
1909 }
1910
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001911 if (!type->set_flag)
1912 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08001913 if (!type->flags) {
1914 /*allocate a dummy tracer_flags*/
1915 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08001916 if (!type->flags) {
1917 ret = -ENOMEM;
1918 goto out;
1919 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08001920 type->flags->val = 0;
1921 type->flags->opts = dummy_tracer_opt;
1922 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001923 if (!type->flags->opts)
1924 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001925
Chunyu Hud39cdd22016-03-08 21:37:01 +08001926 /* store the tracer for __set_tracer_option */
1927 type->flags->trace = type;
1928
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001929 ret = run_tracer_selftest(type);
1930 if (ret < 0)
1931 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001932
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001933 type->next = trace_types;
1934 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001935 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001936
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001937 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001938 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001939 mutex_unlock(&trace_types_lock);
1940
Steven Rostedtdac74942009-02-05 01:13:38 -05001941 if (ret || !default_bootup_tracer)
1942 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001943
Li Zefanee6c2c12009-09-18 14:06:47 +08001944 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001945 goto out_unlock;
1946
1947 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1948 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001949 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001950 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001951
1952 apply_trace_boot_options();
1953
Steven Rostedtdac74942009-02-05 01:13:38 -05001954 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001955 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001956#ifdef CONFIG_FTRACE_STARTUP_TEST
1957 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1958 type->name);
1959#endif
1960
1961 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001962 return ret;
1963}
1964
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04001965static void tracing_reset_cpu(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001966{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001967 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001968
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001969 if (!buffer)
1970 return;
1971
Steven Rostedtf6339032009-09-04 12:35:16 -04001972 ring_buffer_record_disable(buffer);
1973
1974 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08001975 synchronize_rcu();
Steven Rostedt68179682012-05-08 20:57:53 -04001976 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001977
1978 ring_buffer_record_enable(buffer);
1979}
1980
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001981void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001982{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001983 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001984 int cpu;
1985
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001986 if (!buffer)
1987 return;
1988
Steven Rostedt621968c2009-09-04 12:02:35 -04001989 ring_buffer_record_disable(buffer);
1990
1991 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08001992 synchronize_rcu();
Steven Rostedt621968c2009-09-04 12:02:35 -04001993
Alexander Z Lam94571582013-08-02 18:36:16 -07001994 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001995
1996 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001997 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001998
1999 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002000}
2001
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04002002/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c642f2013-03-04 23:26:06 -05002003void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002004{
Steven Rostedt (Red Hat)873c642f2013-03-04 23:26:06 -05002005 struct trace_array *tr;
2006
Steven Rostedt (Red Hat)873c642f2013-03-04 23:26:06 -05002007 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (VMware)065e63f2017-08-31 17:03:47 -04002008 if (!tr->clear_trace)
2009 continue;
2010 tr->clear_trace = false;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002011 tracing_reset_online_cpus(&tr->trace_buffer);
2012#ifdef CONFIG_TRACER_MAX_TRACE
2013 tracing_reset_online_cpus(&tr->max_buffer);
2014#endif
Steven Rostedt (Red Hat)873c642f2013-03-04 23:26:06 -05002015 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002016}
2017
Joel Fernandesd914ba32017-06-26 19:01:55 -07002018static int *tgid_map;
2019
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002020#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002021#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01002022static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002023struct saved_cmdlines_buffer {
2024 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2025 unsigned *map_cmdline_to_pid;
2026 unsigned cmdline_num;
2027 int cmdline_idx;
2028 char *saved_cmdlines;
2029};
2030static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02002031
Steven Rostedt25b0b442008-05-12 21:21:00 +02002032/* temporary disable recording */
Joel Fernandesd914ba32017-06-26 19:01:55 -07002033static atomic_t trace_record_taskinfo_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002034
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002035static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002036{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002037 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2038}
2039
2040static inline void set_cmdline(int idx, const char *cmdline)
2041{
Tom Zanussi85f726a2019-03-05 10:12:00 -06002042 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002043}
2044
2045static int allocate_cmdlines_buffer(unsigned int val,
2046 struct saved_cmdlines_buffer *s)
2047{
Kees Cook6da2ec52018-06-12 13:55:00 -07002048 s->map_cmdline_to_pid = kmalloc_array(val,
2049 sizeof(*s->map_cmdline_to_pid),
2050 GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002051 if (!s->map_cmdline_to_pid)
2052 return -ENOMEM;
2053
Kees Cook6da2ec52018-06-12 13:55:00 -07002054 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002055 if (!s->saved_cmdlines) {
2056 kfree(s->map_cmdline_to_pid);
2057 return -ENOMEM;
2058 }
2059
2060 s->cmdline_idx = 0;
2061 s->cmdline_num = val;
2062 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2063 sizeof(s->map_pid_to_cmdline));
2064 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2065 val * sizeof(*s->map_cmdline_to_pid));
2066
2067 return 0;
2068}
2069
2070static int trace_create_savedcmd(void)
2071{
2072 int ret;
2073
Namhyung Kima6af8fb2014-06-10 16:11:35 +09002074 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002075 if (!savedcmd)
2076 return -ENOMEM;
2077
2078 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2079 if (ret < 0) {
2080 kfree(savedcmd);
2081 savedcmd = NULL;
2082 return -ENOMEM;
2083 }
2084
2085 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002086}
2087
Carsten Emdeb5130b12009-09-13 01:43:07 +02002088int is_tracing_stopped(void)
2089{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002090 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02002091}
2092
Steven Rostedt0f048702008-11-05 16:05:44 -05002093/**
2094 * tracing_start - quick start of the tracer
2095 *
2096 * If tracing is enabled but was stopped by tracing_stop,
2097 * this will start the tracer back up.
2098 */
2099void tracing_start(void)
2100{
2101 struct ring_buffer *buffer;
2102 unsigned long flags;
2103
2104 if (tracing_disabled)
2105 return;
2106
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002107 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2108 if (--global_trace.stop_count) {
2109 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05002110 /* Someone screwed up their debugging */
2111 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002112 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05002113 }
Steven Rostedt0f048702008-11-05 16:05:44 -05002114 goto out;
2115 }
2116
Steven Rostedta2f80712010-03-12 19:56:00 -05002117 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002118 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05002119
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002120 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002121 if (buffer)
2122 ring_buffer_record_enable(buffer);
2123
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002124#ifdef CONFIG_TRACER_MAX_TRACE
2125 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002126 if (buffer)
2127 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002128#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002129
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002130 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002131
Steven Rostedt0f048702008-11-05 16:05:44 -05002132 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002133 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2134}
2135
2136static void tracing_start_tr(struct trace_array *tr)
2137{
2138 struct ring_buffer *buffer;
2139 unsigned long flags;
2140
2141 if (tracing_disabled)
2142 return;
2143
2144 /* If global, we need to also start the max tracer */
2145 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2146 return tracing_start();
2147
2148 raw_spin_lock_irqsave(&tr->start_lock, flags);
2149
2150 if (--tr->stop_count) {
2151 if (tr->stop_count < 0) {
2152 /* Someone screwed up their debugging */
2153 WARN_ON_ONCE(1);
2154 tr->stop_count = 0;
2155 }
2156 goto out;
2157 }
2158
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002159 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002160 if (buffer)
2161 ring_buffer_record_enable(buffer);
2162
2163 out:
2164 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002165}
2166
2167/**
2168 * tracing_stop - quick stop of the tracer
2169 *
2170 * Light weight way to stop tracing. Use in conjunction with
2171 * tracing_start.
2172 */
2173void tracing_stop(void)
2174{
2175 struct ring_buffer *buffer;
2176 unsigned long flags;
2177
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002178 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2179 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05002180 goto out;
2181
Steven Rostedta2f80712010-03-12 19:56:00 -05002182 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002183 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002184
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002185 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002186 if (buffer)
2187 ring_buffer_record_disable(buffer);
2188
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002189#ifdef CONFIG_TRACER_MAX_TRACE
2190 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002191 if (buffer)
2192 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002193#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002194
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002195 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002196
Steven Rostedt0f048702008-11-05 16:05:44 -05002197 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002198 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2199}
2200
2201static void tracing_stop_tr(struct trace_array *tr)
2202{
2203 struct ring_buffer *buffer;
2204 unsigned long flags;
2205
2206 /* If global, we need to also stop the max tracer */
2207 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2208 return tracing_stop();
2209
2210 raw_spin_lock_irqsave(&tr->start_lock, flags);
2211 if (tr->stop_count++)
2212 goto out;
2213
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002214 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002215 if (buffer)
2216 ring_buffer_record_disable(buffer);
2217
2218 out:
2219 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002220}
2221
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002222static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002223{
Carsten Emdea635cf02009-03-18 09:00:41 +01002224 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002225
Joel Fernandeseaf260a2017-07-06 16:00:21 -07002226 /* treat recording of idle task as a success */
2227 if (!tsk->pid)
2228 return 1;
2229
2230 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002231 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002232
2233 /*
2234 * It's not the end of the world if we don't get
2235 * the lock, but we also don't want to spin
2236 * nor do we want to disable interrupts,
2237 * so if we miss here, then better luck next time.
2238 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002239 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002240 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002241
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002242 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002243 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002244 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002245
Carsten Emdea635cf02009-03-18 09:00:41 +01002246 /*
2247 * Check whether the cmdline buffer at idx has a pid
2248 * mapped. We are going to overwrite that entry so we
2249 * need to clear the map_pid_to_cmdline. Otherwise we
2250 * would read the new comm for the old pid.
2251 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002252 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01002253 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002254 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002255
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002256 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2257 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002258
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002259 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002260 }
2261
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002262 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002263
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002264 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002265
2266 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002267}
2268
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002269static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002270{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002271 unsigned map;
2272
Steven Rostedt4ca530852009-03-16 19:20:15 -04002273 if (!pid) {
2274 strcpy(comm, "<idle>");
2275 return;
2276 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002277
Steven Rostedt74bf4072010-01-25 15:11:53 -05002278 if (WARN_ON_ONCE(pid < 0)) {
2279 strcpy(comm, "<XXX>");
2280 return;
2281 }
2282
Steven Rostedt4ca530852009-03-16 19:20:15 -04002283 if (pid > PID_MAX_DEFAULT) {
2284 strcpy(comm, "<...>");
2285 return;
2286 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002287
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002288 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01002289 if (map != NO_CMDLINE_MAP)
Amey Telawanee09e2862017-05-03 15:41:14 +05302290 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
Thomas Gleixner50d88752009-03-18 08:58:44 +01002291 else
2292 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002293}
2294
2295void trace_find_cmdline(int pid, char comm[])
2296{
2297 preempt_disable();
2298 arch_spin_lock(&trace_cmdline_lock);
2299
2300 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002301
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002302 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02002303 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002304}
2305
Joel Fernandesd914ba32017-06-26 19:01:55 -07002306int trace_find_tgid(int pid)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002307{
Joel Fernandesd914ba32017-06-26 19:01:55 -07002308 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2309 return 0;
2310
2311 return tgid_map[pid];
2312}
2313
2314static int trace_save_tgid(struct task_struct *tsk)
2315{
Joel Fernandesbd45d342017-07-06 16:00:22 -07002316 /* treat recording of idle task as a success */
2317 if (!tsk->pid)
2318 return 1;
2319
2320 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
Joel Fernandesd914ba32017-06-26 19:01:55 -07002321 return 0;
2322
2323 tgid_map[tsk->pid] = tsk->tgid;
2324 return 1;
2325}
2326
2327static bool tracing_record_taskinfo_skip(int flags)
2328{
2329 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2330 return true;
2331 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2332 return true;
2333 if (!__this_cpu_read(trace_taskinfo_save))
2334 return true;
2335 return false;
2336}
2337
2338/**
2339 * tracing_record_taskinfo - record the task info of a task
2340 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002341 * @task: task to record
2342 * @flags: TRACE_RECORD_CMDLINE for recording comm
2343 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002344 */
2345void tracing_record_taskinfo(struct task_struct *task, int flags)
2346{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002347 bool done;
2348
Joel Fernandesd914ba32017-06-26 19:01:55 -07002349 if (tracing_record_taskinfo_skip(flags))
2350 return;
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002351
2352 /*
2353 * Record as much task information as possible. If some fail, continue
2354 * to try to record the others.
2355 */
2356 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2357 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2358
2359 /* If recording any information failed, retry again soon. */
2360 if (!done)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002361 return;
2362
Joel Fernandesd914ba32017-06-26 19:01:55 -07002363 __this_cpu_write(trace_taskinfo_save, false);
2364}
2365
2366/**
2367 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2368 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002369 * @prev: previous task during sched_switch
2370 * @next: next task during sched_switch
2371 * @flags: TRACE_RECORD_CMDLINE for recording comm
2372 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002373 */
2374void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2375 struct task_struct *next, int flags)
2376{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002377 bool done;
2378
Joel Fernandesd914ba32017-06-26 19:01:55 -07002379 if (tracing_record_taskinfo_skip(flags))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002380 return;
2381
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002382 /*
2383 * Record as much task information as possible. If some fail, continue
2384 * to try to record the others.
2385 */
2386 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2387 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2388 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2389 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
Joel Fernandesd914ba32017-06-26 19:01:55 -07002390
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002391 /* If recording any information failed, retry again soon. */
2392 if (!done)
Joel Fernandesd914ba32017-06-26 19:01:55 -07002393 return;
2394
2395 __this_cpu_write(trace_taskinfo_save, false);
2396}
2397
2398/* Helpers to record a specific task information */
2399void tracing_record_cmdline(struct task_struct *task)
2400{
2401 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2402}
2403
2404void tracing_record_tgid(struct task_struct *task)
2405{
2406 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002407}
2408
Steven Rostedt (VMware)af0009f2017-03-16 11:01:06 -04002409/*
2410 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2411 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2412 * simplifies those functions and keeps them in sync.
2413 */
2414enum print_line_t trace_handle_return(struct trace_seq *s)
2415{
2416 return trace_seq_has_overflowed(s) ?
2417 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2418}
2419EXPORT_SYMBOL_GPL(trace_handle_return);
2420
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03002421void
Cong Wang46710f32019-05-25 09:57:59 -07002422tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2423 unsigned long flags, int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002424{
2425 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002426
Steven Rostedt777e2082008-09-29 23:02:42 -04002427 entry->preempt_count = pc & 0xff;
2428 entry->pid = (tsk) ? tsk->pid : 0;
Cong Wang46710f32019-05-25 09:57:59 -07002429 entry->type = type;
Steven Rostedt777e2082008-09-29 23:02:42 -04002430 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04002431#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04002432 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04002433#else
2434 TRACE_FLAG_IRQS_NOSUPPORT |
2435#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01002436 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002437 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondetic59f29c2016-12-09 21:50:17 +05302438 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02002439 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2440 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002441}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02002442EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002443
Steven Rostedte77405a2009-09-02 14:17:06 -04002444struct ring_buffer_event *
2445trace_buffer_lock_reserve(struct ring_buffer *buffer,
2446 int type,
2447 unsigned long len,
2448 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002449{
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002450 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002451}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002452
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002453DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2454DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2455static int trace_buffered_event_ref;
2456
2457/**
2458 * trace_buffered_event_enable - enable buffering events
2459 *
2460 * When events are being filtered, it is quicker to use a temporary
2461 * buffer to write the event data into if there's a likely chance
2462 * that it will not be committed. The discard of the ring buffer
2463 * is not as fast as committing, and is much slower than copying
2464 * a commit.
2465 *
2466 * When an event is to be filtered, allocate per cpu buffers to
2467 * write the event data into, and if the event is filtered and discarded
2468 * it is simply dropped, otherwise, the entire data is to be committed
2469 * in one shot.
2470 */
2471void trace_buffered_event_enable(void)
2472{
2473 struct ring_buffer_event *event;
2474 struct page *page;
2475 int cpu;
2476
2477 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2478
2479 if (trace_buffered_event_ref++)
2480 return;
2481
2482 for_each_tracing_cpu(cpu) {
2483 page = alloc_pages_node(cpu_to_node(cpu),
2484 GFP_KERNEL | __GFP_NORETRY, 0);
2485 if (!page)
2486 goto failed;
2487
2488 event = page_address(page);
2489 memset(event, 0, sizeof(*event));
2490
2491 per_cpu(trace_buffered_event, cpu) = event;
2492
2493 preempt_disable();
2494 if (cpu == smp_processor_id() &&
2495 this_cpu_read(trace_buffered_event) !=
2496 per_cpu(trace_buffered_event, cpu))
2497 WARN_ON_ONCE(1);
2498 preempt_enable();
2499 }
2500
2501 return;
2502 failed:
2503 trace_buffered_event_disable();
2504}
2505
2506static void enable_trace_buffered_event(void *data)
2507{
2508 /* Probably not needed, but do it anyway */
2509 smp_rmb();
2510 this_cpu_dec(trace_buffered_event_cnt);
2511}
2512
2513static void disable_trace_buffered_event(void *data)
2514{
2515 this_cpu_inc(trace_buffered_event_cnt);
2516}
2517
2518/**
2519 * trace_buffered_event_disable - disable buffering events
2520 *
2521 * When a filter is removed, it is faster to not use the buffered
2522 * events, and to commit directly into the ring buffer. Free up
2523 * the temp buffers when there are no more users. This requires
2524 * special synchronization with current events.
2525 */
2526void trace_buffered_event_disable(void)
2527{
2528 int cpu;
2529
2530 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2531
2532 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2533 return;
2534
2535 if (--trace_buffered_event_ref)
2536 return;
2537
2538 preempt_disable();
2539 /* For each CPU, set the buffer as used. */
2540 smp_call_function_many(tracing_buffer_mask,
2541 disable_trace_buffered_event, NULL, 1);
2542 preempt_enable();
2543
2544 /* Wait for all current users to finish */
Paul E. McKenney74401722018-11-06 18:44:52 -08002545 synchronize_rcu();
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002546
2547 for_each_tracing_cpu(cpu) {
2548 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2549 per_cpu(trace_buffered_event, cpu) = NULL;
2550 }
2551 /*
2552 * Make sure trace_buffered_event is NULL before clearing
2553 * trace_buffered_event_cnt.
2554 */
2555 smp_wmb();
2556
2557 preempt_disable();
2558 /* Do the work on each cpu */
2559 smp_call_function_many(tracing_buffer_mask,
2560 enable_trace_buffered_event, NULL, 1);
2561 preempt_enable();
2562}
2563
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002564static struct ring_buffer *temp_buffer;
2565
Steven Rostedtef5580d2009-02-27 19:38:04 -05002566struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04002567trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002568 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002569 int type, unsigned long len,
2570 unsigned long flags, int pc)
2571{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002572 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002573 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002574
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002575 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002576
Tom Zanussi00b41452018-01-15 20:51:39 -06002577 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002578 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2579 (entry = this_cpu_read(trace_buffered_event))) {
2580 /* Try to use the per cpu buffer first */
2581 val = this_cpu_inc_return(trace_buffered_event_cnt);
2582 if (val == 1) {
2583 trace_event_setup(entry, type, flags, pc);
2584 entry->array[0] = len;
2585 return entry;
2586 }
2587 this_cpu_dec(trace_buffered_event_cnt);
2588 }
2589
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002590 entry = __trace_buffer_lock_reserve(*current_rb,
2591 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002592 /*
2593 * If tracing is off, but we have triggers enabled
2594 * we still need to look at the event data. Use the temp_buffer
2595 * to store the trace event for the tigger to use. It's recusive
2596 * safe and will not be recorded anywhere.
2597 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002598 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002599 *current_rb = temp_buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002600 entry = __trace_buffer_lock_reserve(*current_rb,
2601 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002602 }
2603 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002604}
2605EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2606
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002607static DEFINE_SPINLOCK(tracepoint_iter_lock);
2608static DEFINE_MUTEX(tracepoint_printk_mutex);
2609
2610static void output_printk(struct trace_event_buffer *fbuffer)
2611{
2612 struct trace_event_call *event_call;
2613 struct trace_event *event;
2614 unsigned long flags;
2615 struct trace_iterator *iter = tracepoint_print_iter;
2616
2617 /* We should never get here if iter is NULL */
2618 if (WARN_ON_ONCE(!iter))
2619 return;
2620
2621 event_call = fbuffer->trace_file->event_call;
2622 if (!event_call || !event_call->event.funcs ||
2623 !event_call->event.funcs->trace)
2624 return;
2625
2626 event = &fbuffer->trace_file->event_call->event;
2627
2628 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2629 trace_seq_init(&iter->seq);
2630 iter->ent = fbuffer->entry;
2631 event_call->event.funcs->trace(iter, 0, event);
2632 trace_seq_putc(&iter->seq, 0);
2633 printk("%s", iter->seq.buffer);
2634
2635 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2636}
2637
2638int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2639 void __user *buffer, size_t *lenp,
2640 loff_t *ppos)
2641{
2642 int save_tracepoint_printk;
2643 int ret;
2644
2645 mutex_lock(&tracepoint_printk_mutex);
2646 save_tracepoint_printk = tracepoint_printk;
2647
2648 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2649
2650 /*
2651 * This will force exiting early, as tracepoint_printk
2652 * is always zero when tracepoint_printk_iter is not allocated
2653 */
2654 if (!tracepoint_print_iter)
2655 tracepoint_printk = 0;
2656
2657 if (save_tracepoint_printk == tracepoint_printk)
2658 goto out;
2659
2660 if (tracepoint_printk)
2661 static_key_enable(&tracepoint_printk_key.key);
2662 else
2663 static_key_disable(&tracepoint_printk_key.key);
2664
2665 out:
2666 mutex_unlock(&tracepoint_printk_mutex);
2667
2668 return ret;
2669}
2670
2671void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2672{
2673 if (static_key_false(&tracepoint_printk_key.key))
2674 output_printk(fbuffer);
2675
2676 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2677 fbuffer->event, fbuffer->entry,
2678 fbuffer->flags, fbuffer->pc);
2679}
2680EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2681
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002682/*
2683 * Skip 3:
2684 *
2685 * trace_buffer_unlock_commit_regs()
2686 * trace_event_buffer_commit()
2687 * trace_event_raw_event_xxx()
Rohit Visavalia13cf9122018-01-29 15:11:26 +05302688 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002689# define STACK_SKIP 3
2690
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002691void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2692 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002693 struct ring_buffer_event *event,
2694 unsigned long flags, int pc,
2695 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002696{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002697 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002698
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002699 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002700 * If regs is not set, then skip the necessary functions.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002701 * Note, we can still get here via blktrace, wakeup tracer
2702 * and mmiotrace, but that's ok if they lose a function or
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002703 * two. They are not that meaningful.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002704 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002705 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002706 ftrace_trace_userstack(buffer, flags, pc);
2707}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002708
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002709/*
2710 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2711 */
2712void
2713trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2714 struct ring_buffer_event *event)
2715{
2716 __buffer_unlock_commit(buffer, event);
2717}
2718
Chunyan Zhang478409d2016-11-21 15:57:18 +08002719static void
2720trace_process_export(struct trace_export *export,
2721 struct ring_buffer_event *event)
2722{
2723 struct trace_entry *entry;
2724 unsigned int size = 0;
2725
2726 entry = ring_buffer_event_data(event);
2727 size = ring_buffer_event_length(event);
Felipe Balbia773d412017-06-02 13:20:25 +03002728 export->write(export, entry, size);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002729}
2730
2731static DEFINE_MUTEX(ftrace_export_lock);
2732
2733static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2734
2735static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2736
2737static inline void ftrace_exports_enable(void)
2738{
2739 static_branch_enable(&ftrace_exports_enabled);
2740}
2741
2742static inline void ftrace_exports_disable(void)
2743{
2744 static_branch_disable(&ftrace_exports_enabled);
2745}
2746
Mathieu Malaterre1cce3772018-05-16 21:30:12 +02002747static void ftrace_exports(struct ring_buffer_event *event)
Chunyan Zhang478409d2016-11-21 15:57:18 +08002748{
2749 struct trace_export *export;
2750
2751 preempt_disable_notrace();
2752
Joel Fernandes (Google)0a5b99f2019-07-11 16:45:41 -04002753 export = rcu_dereference_raw_check(ftrace_exports_list);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002754 while (export) {
2755 trace_process_export(export, event);
Joel Fernandes (Google)0a5b99f2019-07-11 16:45:41 -04002756 export = rcu_dereference_raw_check(export->next);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002757 }
2758
2759 preempt_enable_notrace();
2760}
2761
2762static inline void
2763add_trace_export(struct trace_export **list, struct trace_export *export)
2764{
2765 rcu_assign_pointer(export->next, *list);
2766 /*
2767 * We are entering export into the list but another
2768 * CPU might be walking that list. We need to make sure
2769 * the export->next pointer is valid before another CPU sees
2770 * the export pointer included into the list.
2771 */
2772 rcu_assign_pointer(*list, export);
2773}
2774
2775static inline int
2776rm_trace_export(struct trace_export **list, struct trace_export *export)
2777{
2778 struct trace_export **p;
2779
2780 for (p = list; *p != NULL; p = &(*p)->next)
2781 if (*p == export)
2782 break;
2783
2784 if (*p != export)
2785 return -1;
2786
2787 rcu_assign_pointer(*p, (*p)->next);
2788
2789 return 0;
2790}
2791
2792static inline void
2793add_ftrace_export(struct trace_export **list, struct trace_export *export)
2794{
2795 if (*list == NULL)
2796 ftrace_exports_enable();
2797
2798 add_trace_export(list, export);
2799}
2800
2801static inline int
2802rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2803{
2804 int ret;
2805
2806 ret = rm_trace_export(list, export);
2807 if (*list == NULL)
2808 ftrace_exports_disable();
2809
2810 return ret;
2811}
2812
2813int register_ftrace_export(struct trace_export *export)
2814{
2815 if (WARN_ON_ONCE(!export->write))
2816 return -1;
2817
2818 mutex_lock(&ftrace_export_lock);
2819
2820 add_ftrace_export(&ftrace_exports_list, export);
2821
2822 mutex_unlock(&ftrace_export_lock);
2823
2824 return 0;
2825}
2826EXPORT_SYMBOL_GPL(register_ftrace_export);
2827
2828int unregister_ftrace_export(struct trace_export *export)
2829{
2830 int ret;
2831
2832 mutex_lock(&ftrace_export_lock);
2833
2834 ret = rm_ftrace_export(&ftrace_exports_list, export);
2835
2836 mutex_unlock(&ftrace_export_lock);
2837
2838 return ret;
2839}
2840EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2841
Ingo Molnare309b412008-05-12 21:20:51 +02002842void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002843trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002844 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2845 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002846{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002847 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002848 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002849 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002850 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002851
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002852 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2853 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002854 if (!event)
2855 return;
2856 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002857 entry->ip = ip;
2858 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002859
Chunyan Zhang478409d2016-11-21 15:57:18 +08002860 if (!call_filter_check_discard(call, entry, buffer, event)) {
2861 if (static_branch_unlikely(&ftrace_exports_enabled))
2862 ftrace_exports(event);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002863 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002864 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002865}
2866
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002867#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002868
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002869/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2870#define FTRACE_KSTACK_NESTING 4
2871
2872#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2873
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002874struct ftrace_stack {
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002875 unsigned long calls[FTRACE_KSTACK_ENTRIES];
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002876};
2877
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002878
2879struct ftrace_stacks {
2880 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2881};
2882
2883static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002884static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2885
Steven Rostedte77405a2009-09-02 14:17:06 -04002886static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002887 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002888 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002889{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002890 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002891 struct ring_buffer_event *event;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002892 unsigned int size, nr_entries;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002893 struct ftrace_stack *fstack;
Steven Rostedt777e2082008-09-29 23:02:42 -04002894 struct stack_entry *entry;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002895 int stackidx;
Ingo Molnar86387f72008-05-12 21:20:51 +02002896
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002897 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002898 * Add one, for this function and the call to save_stack_trace()
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002899 * If regs is set, then these functions will not be in the way.
2900 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002901#ifndef CONFIG_UNWINDER_ORC
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002902 if (!regs)
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002903 skip++;
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002904#endif
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002905
2906 /*
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002907 * Since events can happen in NMIs there's no safe way to
2908 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2909 * or NMI comes in, it will just have to use the default
2910 * FTRACE_STACK_SIZE.
2911 */
2912 preempt_disable_notrace();
2913
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002914 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2915
2916 /* This should never happen. If it does, yell once and skip */
2917 if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
2918 goto out;
2919
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002920 /*
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002921 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2922 * interrupt will either see the value pre increment or post
2923 * increment. If the interrupt happens pre increment it will have
2924 * restored the counter when it returns. We just need a barrier to
2925 * keep gcc from moving things around.
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002926 */
2927 barrier();
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002928
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002929 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002930 size = ARRAY_SIZE(fstack->calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002931
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002932 if (regs) {
2933 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2934 size, skip);
2935 } else {
2936 nr_entries = stack_trace_save(fstack->calls, size, skip);
2937 }
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002938
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002939 size = nr_entries * sizeof(unsigned long);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002940 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2941 sizeof(*entry) + size, flags, pc);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002942 if (!event)
2943 goto out;
2944 entry = ring_buffer_event_data(event);
2945
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002946 memcpy(&entry->caller, fstack->calls, size);
2947 entry->size = nr_entries;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002948
Tom Zanussif306cc82013-10-24 08:34:17 -05002949 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002950 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002951
2952 out:
2953 /* Again, don't let gcc optimize things here */
2954 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002955 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002956 preempt_enable_notrace();
2957
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002958}
2959
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002960static inline void ftrace_trace_stack(struct trace_array *tr,
2961 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002962 unsigned long flags,
2963 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05002964{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002965 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05002966 return;
2967
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002968 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05002969}
2970
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002971void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2972 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04002973{
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04002974 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2975
2976 if (rcu_is_watching()) {
2977 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2978 return;
2979 }
2980
2981 /*
2982 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2983 * but if the above rcu_is_watching() failed, then the NMI
2984 * triggered someplace critical, and rcu_irq_enter() should
2985 * not be called from NMI.
2986 */
2987 if (unlikely(in_nmi()))
2988 return;
2989
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04002990 rcu_irq_enter_irqson();
2991 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2992 rcu_irq_exit_irqson();
Steven Rostedt38697052008-10-01 13:14:09 -04002993}
2994
Steven Rostedt03889382009-12-11 09:48:22 -05002995/**
2996 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002997 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05002998 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002999void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05003000{
3001 unsigned long flags;
3002
3003 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05003004 return;
Steven Rostedt03889382009-12-11 09:48:22 -05003005
3006 local_save_flags(flags);
3007
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05003008#ifndef CONFIG_UNWINDER_ORC
3009 /* Skip 1 to skip this function. */
3010 skip++;
3011#endif
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003012 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
3013 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05003014}
Nikolay Borisovda387e52018-10-17 09:51:43 +03003015EXPORT_SYMBOL_GPL(trace_dump_stack);
Steven Rostedt03889382009-12-11 09:48:22 -05003016
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003017#ifdef CONFIG_USER_STACKTRACE_SUPPORT
Steven Rostedt91e86e52010-11-10 12:56:12 +01003018static DEFINE_PER_CPU(int, user_stack_count);
3019
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003020static void
Steven Rostedte77405a2009-09-02 14:17:06 -04003021ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02003022{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003023 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02003024 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02003025 struct userstack_entry *entry;
Török Edwin02b67512008-11-22 13:28:47 +02003026
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003027 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02003028 return;
3029
Steven Rostedtb6345872010-03-12 20:03:30 -05003030 /*
3031 * NMIs can not handle page faults, even with fix ups.
3032 * The save user stack can (and often does) fault.
3033 */
3034 if (unlikely(in_nmi()))
3035 return;
3036
Steven Rostedt91e86e52010-11-10 12:56:12 +01003037 /*
3038 * prevent recursion, since the user stack tracing may
3039 * trigger other kernel events.
3040 */
3041 preempt_disable();
3042 if (__this_cpu_read(user_stack_count))
3043 goto out;
3044
3045 __this_cpu_inc(user_stack_count);
3046
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003047 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3048 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02003049 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08003050 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02003051 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02003052
Steven Rostedt48659d32009-09-11 11:36:23 -04003053 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02003054 memset(&entry->caller, 0, sizeof(entry->caller));
3055
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003056 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
Tom Zanussif306cc82013-10-24 08:34:17 -05003057 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003058 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003059
Li Zefan1dbd1952010-12-09 15:47:56 +08003060 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01003061 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003062 out:
3063 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02003064}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003065#else /* CONFIG_USER_STACKTRACE_SUPPORT */
3066static void ftrace_trace_userstack(struct ring_buffer *buffer,
3067 unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02003068{
Török Edwin02b67512008-11-22 13:28:47 +02003069}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003070#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
Török Edwin02b67512008-11-22 13:28:47 +02003071
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02003072#endif /* CONFIG_STACKTRACE */
3073
Steven Rostedt07d777f2011-09-22 14:01:55 -04003074/* created for use with alloc_percpu */
3075struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003076 int nesting;
3077 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04003078};
3079
3080static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003081
3082/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003083 * Thise allows for lockless recording. If we're nested too deeply, then
3084 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04003085 */
3086static char *get_trace_buf(void)
3087{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003088 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003089
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003090 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003091 return NULL;
3092
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003093 buffer->nesting++;
3094
3095 /* Interrupts must see nesting incremented before we use the buffer */
3096 barrier();
3097 return &buffer->buffer[buffer->nesting][0];
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003098}
3099
3100static void put_trace_buf(void)
3101{
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003102 /* Don't let the decrement of nesting leak before this */
3103 barrier();
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003104 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003105}
3106
3107static int alloc_percpu_trace_buffer(void)
3108{
3109 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003110
3111 buffers = alloc_percpu(struct trace_buffer_struct);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003112 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
3113 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003114
3115 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003116 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003117}
3118
Steven Rostedt81698832012-10-11 10:15:05 -04003119static int buffers_allocated;
3120
Steven Rostedt07d777f2011-09-22 14:01:55 -04003121void trace_printk_init_buffers(void)
3122{
Steven Rostedt07d777f2011-09-22 14:01:55 -04003123 if (buffers_allocated)
3124 return;
3125
3126 if (alloc_percpu_trace_buffer())
3127 return;
3128
Steven Rostedt2184db42014-05-28 13:14:40 -04003129 /* trace_printk() is for debug use only. Don't use it in production. */
3130
Joe Perchesa395d6a2016-03-22 14:28:09 -07003131 pr_warn("\n");
3132 pr_warn("**********************************************************\n");
3133 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3134 pr_warn("** **\n");
3135 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3136 pr_warn("** **\n");
3137 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3138 pr_warn("** unsafe for production use. **\n");
3139 pr_warn("** **\n");
3140 pr_warn("** If you see this message and you are not debugging **\n");
3141 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3142 pr_warn("** **\n");
3143 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3144 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04003145
Steven Rostedtb382ede62012-10-10 21:44:34 -04003146 /* Expand the buffers to set size */
3147 tracing_update_buffers();
3148
Steven Rostedt07d777f2011-09-22 14:01:55 -04003149 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04003150
3151 /*
3152 * trace_printk_init_buffers() can be called by modules.
3153 * If that happens, then we need to start cmdline recording
3154 * directly here. If the global_trace.buffer is already
3155 * allocated here, then this was called by module code.
3156 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003157 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04003158 tracing_start_cmdline_record();
3159}
Divya Indif45d1222019-03-20 11:28:51 -07003160EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
Steven Rostedt81698832012-10-11 10:15:05 -04003161
3162void trace_printk_start_comm(void)
3163{
3164 /* Start tracing comms if trace printk is set */
3165 if (!buffers_allocated)
3166 return;
3167 tracing_start_cmdline_record();
3168}
3169
3170static void trace_printk_start_stop_comm(int enabled)
3171{
3172 if (!buffers_allocated)
3173 return;
3174
3175 if (enabled)
3176 tracing_start_cmdline_record();
3177 else
3178 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003179}
3180
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003181/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003182 * trace_vbprintk - write binary msg to tracing buffer
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07003183 * @ip: The address of the caller
3184 * @fmt: The string format to write to the buffer
3185 * @args: Arguments for @fmt
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003186 */
Steven Rostedt40ce74f12009-03-19 14:03:53 -04003187int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003188{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003189 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003190 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04003191 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003192 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003193 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003194 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003195 char *tbuffer;
3196 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003197
3198 if (unlikely(tracing_selftest_running || tracing_disabled))
3199 return 0;
3200
3201 /* Don't pollute graph traces with trace_vprintk internals */
3202 pause_graph_tracing();
3203
3204 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04003205 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003206
Steven Rostedt07d777f2011-09-22 14:01:55 -04003207 tbuffer = get_trace_buf();
3208 if (!tbuffer) {
3209 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003210 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003211 }
3212
3213 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3214
3215 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003216 goto out;
3217
Steven Rostedt07d777f2011-09-22 14:01:55 -04003218 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003219 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003220 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003221 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3222 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003223 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003224 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003225 entry = ring_buffer_event_data(event);
3226 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003227 entry->fmt = fmt;
3228
Steven Rostedt07d777f2011-09-22 14:01:55 -04003229 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05003230 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003231 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003232 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003233 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003234
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003235out:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003236 put_trace_buf();
3237
3238out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04003239 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003240 unpause_graph_tracing();
3241
3242 return len;
3243}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003244EXPORT_SYMBOL_GPL(trace_vbprintk);
3245
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003246__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003247static int
3248__trace_array_vprintk(struct ring_buffer *buffer,
3249 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003250{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003251 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003252 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003253 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003254 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003255 unsigned long flags;
3256 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003257
3258 if (tracing_disabled || tracing_selftest_running)
3259 return 0;
3260
Steven Rostedt07d777f2011-09-22 14:01:55 -04003261 /* Don't pollute graph traces with trace_vprintk internals */
3262 pause_graph_tracing();
3263
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003264 pc = preempt_count();
3265 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003266
Steven Rostedt07d777f2011-09-22 14:01:55 -04003267
3268 tbuffer = get_trace_buf();
3269 if (!tbuffer) {
3270 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003271 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003272 }
3273
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003274 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003275
Steven Rostedt07d777f2011-09-22 14:01:55 -04003276 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003277 size = sizeof(*entry) + len + 1;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003278 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3279 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003280 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003281 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003282 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01003283 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003284
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003285 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05003286 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003287 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003288 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003289 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003290
3291out:
3292 put_trace_buf();
3293
3294out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003295 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003296 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003297
3298 return len;
3299}
Steven Rostedt659372d2009-09-03 19:11:07 -04003300
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003301__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003302int trace_array_vprintk(struct trace_array *tr,
3303 unsigned long ip, const char *fmt, va_list args)
3304{
3305 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3306}
3307
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003308__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003309int trace_array_printk(struct trace_array *tr,
3310 unsigned long ip, const char *fmt, ...)
3311{
3312 int ret;
3313 va_list ap;
3314
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003315 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003316 return 0;
3317
Divya Indi953ae452019-08-14 10:55:25 -07003318 if (!tr)
3319 return -ENOENT;
3320
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003321 va_start(ap, fmt);
3322 ret = trace_array_vprintk(tr, ip, fmt, ap);
3323 va_end(ap);
3324 return ret;
3325}
Divya Indif45d1222019-03-20 11:28:51 -07003326EXPORT_SYMBOL_GPL(trace_array_printk);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003327
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003328__printf(3, 4)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003329int trace_array_printk_buf(struct ring_buffer *buffer,
3330 unsigned long ip, const char *fmt, ...)
3331{
3332 int ret;
3333 va_list ap;
3334
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003335 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003336 return 0;
3337
3338 va_start(ap, fmt);
3339 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3340 va_end(ap);
3341 return ret;
3342}
3343
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003344__printf(2, 0)
Steven Rostedt659372d2009-09-03 19:11:07 -04003345int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3346{
Steven Rostedta813a152009-10-09 01:41:35 -04003347 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04003348}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003349EXPORT_SYMBOL_GPL(trace_vprintk);
3350
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003351static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04003352{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003353 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3354
Steven Rostedt5a90f572008-09-03 17:42:51 -04003355 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003356 if (buf_iter)
3357 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04003358}
3359
Ingo Molnare309b412008-05-12 21:20:51 +02003360static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003361peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3362 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003363{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003364 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003365 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003366
Steven Rostedtd7690412008-10-01 00:29:53 -04003367 if (buf_iter)
3368 event = ring_buffer_iter_peek(buf_iter, ts);
3369 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003370 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003371 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04003372
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003373 if (event) {
3374 iter->ent_size = ring_buffer_event_length(event);
3375 return ring_buffer_event_data(event);
3376 }
3377 iter->ent_size = 0;
3378 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003379}
Steven Rostedtd7690412008-10-01 00:29:53 -04003380
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003381static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003382__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3383 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003384{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003385 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003386 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08003387 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003388 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003389 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003390 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003391 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003392 int cpu;
3393
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003394 /*
3395 * If we are in a per_cpu trace file, don't bother by iterating over
3396 * all cpu and peek directly.
3397 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003398 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003399 if (ring_buffer_empty_cpu(buffer, cpu_file))
3400 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003401 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003402 if (ent_cpu)
3403 *ent_cpu = cpu_file;
3404
3405 return ent;
3406 }
3407
Steven Rostedtab464282008-05-12 21:21:00 +02003408 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003409
3410 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003411 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003412
Steven Rostedtbc21b472010-03-31 19:49:26 -04003413 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003414
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02003415 /*
3416 * Pick the entry with the smallest timestamp:
3417 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003418 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003419 next = ent;
3420 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003421 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003422 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003423 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003424 }
3425 }
3426
Steven Rostedt12b5da32012-03-27 10:43:28 -04003427 iter->ent_size = next_size;
3428
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003429 if (ent_cpu)
3430 *ent_cpu = next_cpu;
3431
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003432 if (ent_ts)
3433 *ent_ts = next_ts;
3434
Steven Rostedtbc21b472010-03-31 19:49:26 -04003435 if (missing_events)
3436 *missing_events = next_lost;
3437
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003438 return next;
3439}
3440
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003441/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003442struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3443 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003444{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003445 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003446}
Ingo Molnar8c523a92008-05-12 21:20:46 +02003447
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003448/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05003449void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003450{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003451 iter->ent = __find_next_entry(iter, &iter->cpu,
3452 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003453
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003454 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003455 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003456
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003457 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003458}
3459
Ingo Molnare309b412008-05-12 21:20:51 +02003460static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003461{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003462 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003463 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003464}
3465
Ingo Molnare309b412008-05-12 21:20:51 +02003466static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003467{
3468 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003469 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003470 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003471
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003472 WARN_ON_ONCE(iter->leftover);
3473
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003474 (*pos)++;
3475
3476 /* can't go backwards */
3477 if (iter->idx > i)
3478 return NULL;
3479
3480 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05003481 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003482 else
3483 ent = iter;
3484
3485 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05003486 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003487
3488 iter->pos = *pos;
3489
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003490 return ent;
3491}
3492
Jason Wessel955b61e2010-08-05 09:22:23 -05003493void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003494{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003495 struct ring_buffer_event *event;
3496 struct ring_buffer_iter *buf_iter;
3497 unsigned long entries = 0;
3498 u64 ts;
3499
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003500 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003501
Steven Rostedt6d158a82012-06-27 20:46:14 -04003502 buf_iter = trace_buffer_iter(iter, cpu);
3503 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003504 return;
3505
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003506 ring_buffer_iter_reset(buf_iter);
3507
3508 /*
3509 * We could have the case with the max latency tracers
3510 * that a reset never took place on a cpu. This is evident
3511 * by the timestamp being before the start of the buffer.
3512 */
3513 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003514 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003515 break;
3516 entries++;
3517 ring_buffer_read(buf_iter, NULL);
3518 }
3519
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003520 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003521}
3522
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003523/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003524 * The current tracer is copied to avoid a global locking
3525 * all around.
3526 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003527static void *s_start(struct seq_file *m, loff_t *pos)
3528{
3529 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003530 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003531 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003532 void *p = NULL;
3533 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003534 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003535
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09003536 /*
3537 * copy the tracer to avoid using a global lock all around.
3538 * iter->trace is a copy of current_trace, the pointer to the
3539 * name may be used instead of a strcmp(), as iter->trace->name
3540 * will point to the same string as current_trace->name.
3541 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003542 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003543 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3544 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003545 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003546
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003547#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003548 if (iter->snapshot && iter->trace->use_max_tr)
3549 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003550#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003551
3552 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003553 atomic_inc(&trace_record_taskinfo_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003554
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003555 if (*pos != iter->pos) {
3556 iter->ent = NULL;
3557 iter->cpu = 0;
3558 iter->idx = -1;
3559
Steven Rostedtae3b5092013-01-23 15:22:59 -05003560 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003561 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003562 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003563 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003564 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003565
Lai Jiangshanac91d852010-03-02 17:54:50 +08003566 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003567 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3568 ;
3569
3570 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003571 /*
3572 * If we overflowed the seq_file before, then we want
3573 * to just reuse the trace_seq buffer again.
3574 */
3575 if (iter->leftover)
3576 p = iter;
3577 else {
3578 l = *pos - 1;
3579 p = s_next(m, p, &l);
3580 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003581 }
3582
Lai Jiangshan4f535962009-05-18 19:35:34 +08003583 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003584 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003585 return p;
3586}
3587
3588static void s_stop(struct seq_file *m, void *p)
3589{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003590 struct trace_iterator *iter = m->private;
3591
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003592#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003593 if (iter->snapshot && iter->trace->use_max_tr)
3594 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003595#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003596
3597 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003598 atomic_dec(&trace_record_taskinfo_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003599
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003600 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003601 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003602}
3603
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003604static void
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003605get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
3606 unsigned long *entries, int cpu)
3607{
3608 unsigned long count;
3609
3610 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3611 /*
3612 * If this buffer has skipped entries, then we hold all
3613 * entries for the trace and we need to ignore the
3614 * ones before the time stamp.
3615 */
3616 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3617 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3618 /* total is the same as the entries */
3619 *total = count;
3620 } else
3621 *total = count +
3622 ring_buffer_overrun_cpu(buf->buffer, cpu);
3623 *entries = count;
3624}
3625
3626static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003627get_total_entries(struct trace_buffer *buf,
3628 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003629{
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003630 unsigned long t, e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003631 int cpu;
3632
3633 *total = 0;
3634 *entries = 0;
3635
3636 for_each_tracing_cpu(cpu) {
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003637 get_total_entries_cpu(buf, &t, &e, cpu);
3638 *total += t;
3639 *entries += e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003640 }
3641}
3642
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003643unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3644{
3645 unsigned long total, entries;
3646
3647 if (!tr)
3648 tr = &global_trace;
3649
3650 get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu);
3651
3652 return entries;
3653}
3654
3655unsigned long trace_total_entries(struct trace_array *tr)
3656{
3657 unsigned long total, entries;
3658
3659 if (!tr)
3660 tr = &global_trace;
3661
3662 get_total_entries(&tr->trace_buffer, &total, &entries);
3663
3664 return entries;
3665}
3666
Ingo Molnare309b412008-05-12 21:20:51 +02003667static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003668{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003669 seq_puts(m, "# _------=> CPU# \n"
3670 "# / _-----=> irqs-off \n"
3671 "# | / _----=> need-resched \n"
3672 "# || / _---=> hardirq/softirq \n"
3673 "# ||| / _--=> preempt-depth \n"
3674 "# |||| / delay \n"
3675 "# cmd pid ||||| time | caller \n"
3676 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003677}
3678
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003679static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003680{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003681 unsigned long total;
3682 unsigned long entries;
3683
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003684 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003685 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3686 entries, total, num_online_cpus());
3687 seq_puts(m, "#\n");
3688}
3689
Joel Fernandes441dae82017-06-25 22:38:43 -07003690static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3691 unsigned int flags)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003692{
Joel Fernandes441dae82017-06-25 22:38:43 -07003693 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3694
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003695 print_event_info(buf, m);
Joel Fernandes441dae82017-06-25 22:38:43 -07003696
Joel Fernandes (Google)f8494fa2018-06-25 17:08:22 -07003697 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3698 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003699}
3700
Joel Fernandes441dae82017-06-25 22:38:43 -07003701static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3702 unsigned int flags)
Steven Rostedt77271ce2011-11-17 09:34:33 -05003703{
Joel Fernandes441dae82017-06-25 22:38:43 -07003704 bool tgid = flags & TRACE_ITER_RECORD_TGID;
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01003705 const char *space = " ";
3706 int prec = tgid ? 10 : 2;
Joel Fernandes441dae82017-06-25 22:38:43 -07003707
Quentin Perret9e738212019-02-14 15:29:50 +00003708 print_event_info(buf, m);
3709
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01003710 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3711 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3712 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3713 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3714 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3715 seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3716 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
Steven Rostedt77271ce2011-11-17 09:34:33 -05003717}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003718
Jiri Olsa62b915f2010-04-02 19:01:22 +02003719void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003720print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3721{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003722 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003723 struct trace_buffer *buf = iter->trace_buffer;
3724 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003725 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003726 unsigned long entries;
3727 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003728 const char *name = "preemption";
3729
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05003730 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003731
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003732 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003733
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003734 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003735 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003736 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003737 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003738 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003739 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02003740 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003741 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02003742 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003743 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003744#if defined(CONFIG_PREEMPT_NONE)
3745 "server",
3746#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3747 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003748#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003749 "preempt",
Sebastian Andrzej Siewior9c34fc42019-10-15 21:18:20 +02003750#elif defined(CONFIG_PREEMPT_RT)
3751 "preempt_rt",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003752#else
3753 "unknown",
3754#endif
3755 /* These are reserved for later use */
3756 0, 0, 0, 0);
3757#ifdef CONFIG_SMP
3758 seq_printf(m, " #P:%d)\n", num_online_cpus());
3759#else
3760 seq_puts(m, ")\n");
3761#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003762 seq_puts(m, "# -----------------\n");
3763 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003764 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003765 data->comm, data->pid,
3766 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003767 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003768 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003769
3770 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003771 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003772 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3773 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003774 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003775 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3776 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003777 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003778 }
3779
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003780 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003781}
3782
Steven Rostedta3097202008-11-07 22:36:02 -05003783static void test_cpu_buff_start(struct trace_iterator *iter)
3784{
3785 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003786 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003787
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003788 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003789 return;
3790
3791 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3792 return;
3793
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003794 if (cpumask_available(iter->started) &&
3795 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003796 return;
3797
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003798 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003799 return;
3800
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003801 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04003802 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003803
3804 /* Don't print started cpu buffer for the first entry of the trace */
3805 if (iter->idx > 1)
3806 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3807 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003808}
3809
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003810static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003811{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003812 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003813 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003814 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003815 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003816 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003817
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003818 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003819
Steven Rostedta3097202008-11-07 22:36:02 -05003820 test_cpu_buff_start(iter);
3821
Steven Rostedtf633cef2008-12-23 23:24:13 -05003822 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003823
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003824 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003825 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3826 trace_print_lat_context(iter);
3827 else
3828 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003829 }
3830
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003831 if (trace_seq_has_overflowed(s))
3832 return TRACE_TYPE_PARTIAL_LINE;
3833
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003834 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003835 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003836
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003837 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003838
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003839 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003840}
3841
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003842static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003843{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003844 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003845 struct trace_seq *s = &iter->seq;
3846 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003847 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003848
3849 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003850
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003851 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003852 trace_seq_printf(s, "%d %d %llu ",
3853 entry->pid, iter->cpu, iter->ts);
3854
3855 if (trace_seq_has_overflowed(s))
3856 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003857
Steven Rostedtf633cef2008-12-23 23:24:13 -05003858 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003859 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003860 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003861
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003862 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003863
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003864 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003865}
3866
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003867static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003868{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003869 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003870 struct trace_seq *s = &iter->seq;
3871 unsigned char newline = '\n';
3872 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003873 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003874
3875 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003876
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003877 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003878 SEQ_PUT_HEX_FIELD(s, entry->pid);
3879 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3880 SEQ_PUT_HEX_FIELD(s, iter->ts);
3881 if (trace_seq_has_overflowed(s))
3882 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003883 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003884
Steven Rostedtf633cef2008-12-23 23:24:13 -05003885 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003886 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04003887 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003888 if (ret != TRACE_TYPE_HANDLED)
3889 return ret;
3890 }
Steven Rostedt7104f302008-10-01 10:52:51 -04003891
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003892 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003893
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003894 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003895}
3896
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003897static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003898{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003899 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003900 struct trace_seq *s = &iter->seq;
3901 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003902 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003903
3904 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003905
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003906 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003907 SEQ_PUT_FIELD(s, entry->pid);
3908 SEQ_PUT_FIELD(s, iter->cpu);
3909 SEQ_PUT_FIELD(s, iter->ts);
3910 if (trace_seq_has_overflowed(s))
3911 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003912 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003913
Steven Rostedtf633cef2008-12-23 23:24:13 -05003914 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04003915 return event ? event->funcs->binary(iter, 0, event) :
3916 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003917}
3918
Jiri Olsa62b915f2010-04-02 19:01:22 +02003919int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003920{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003921 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003922 int cpu;
3923
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003924 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003925 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003926 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003927 buf_iter = trace_buffer_iter(iter, cpu);
3928 if (buf_iter) {
3929 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003930 return 0;
3931 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003932 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003933 return 0;
3934 }
3935 return 1;
3936 }
3937
Steven Rostedtab464282008-05-12 21:21:00 +02003938 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04003939 buf_iter = trace_buffer_iter(iter, cpu);
3940 if (buf_iter) {
3941 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04003942 return 0;
3943 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003944 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04003945 return 0;
3946 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003947 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003948
Frederic Weisbecker797d3712008-09-30 18:13:45 +02003949 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003950}
3951
Lai Jiangshan4f535962009-05-18 19:35:34 +08003952/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05003953enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003954{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003955 struct trace_array *tr = iter->tr;
3956 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003957 enum print_line_t ret;
3958
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003959 if (iter->lost_events) {
3960 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3961 iter->cpu, iter->lost_events);
3962 if (trace_seq_has_overflowed(&iter->seq))
3963 return TRACE_TYPE_PARTIAL_LINE;
3964 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04003965
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003966 if (iter->trace && iter->trace->print_line) {
3967 ret = iter->trace->print_line(iter);
3968 if (ret != TRACE_TYPE_UNHANDLED)
3969 return ret;
3970 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02003971
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05003972 if (iter->ent->type == TRACE_BPUTS &&
3973 trace_flags & TRACE_ITER_PRINTK &&
3974 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3975 return trace_print_bputs_msg_only(iter);
3976
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003977 if (iter->ent->type == TRACE_BPRINT &&
3978 trace_flags & TRACE_ITER_PRINTK &&
3979 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003980 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003981
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003982 if (iter->ent->type == TRACE_PRINT &&
3983 trace_flags & TRACE_ITER_PRINTK &&
3984 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003985 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003986
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003987 if (trace_flags & TRACE_ITER_BIN)
3988 return print_bin_fmt(iter);
3989
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003990 if (trace_flags & TRACE_ITER_HEX)
3991 return print_hex_fmt(iter);
3992
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003993 if (trace_flags & TRACE_ITER_RAW)
3994 return print_raw_fmt(iter);
3995
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003996 return print_trace_fmt(iter);
3997}
3998
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003999void trace_latency_header(struct seq_file *m)
4000{
4001 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004002 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004003
4004 /* print nothing if the buffers are empty */
4005 if (trace_empty(iter))
4006 return;
4007
4008 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4009 print_trace_header(m, iter);
4010
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004011 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004012 print_lat_help_header(m);
4013}
4014
Jiri Olsa62b915f2010-04-02 19:01:22 +02004015void trace_default_header(struct seq_file *m)
4016{
4017 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004018 struct trace_array *tr = iter->tr;
4019 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02004020
Jiri Olsaf56e7f82011-06-03 16:58:49 +02004021 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4022 return;
4023
Jiri Olsa62b915f2010-04-02 19:01:22 +02004024 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4025 /* print nothing if the buffers are empty */
4026 if (trace_empty(iter))
4027 return;
4028 print_trace_header(m, iter);
4029 if (!(trace_flags & TRACE_ITER_VERBOSE))
4030 print_lat_help_header(m);
4031 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05004032 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4033 if (trace_flags & TRACE_ITER_IRQ_INFO)
Joel Fernandes441dae82017-06-25 22:38:43 -07004034 print_func_help_header_irq(iter->trace_buffer,
4035 m, trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004036 else
Joel Fernandes441dae82017-06-25 22:38:43 -07004037 print_func_help_header(iter->trace_buffer, m,
4038 trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004039 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02004040 }
4041}
4042
Steven Rostedte0a413f2011-09-29 21:26:16 -04004043static void test_ftrace_alive(struct seq_file *m)
4044{
4045 if (!ftrace_is_dead())
4046 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004047 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4048 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004049}
4050
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004051#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004052static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004053{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004054 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4055 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4056 "# Takes a snapshot of the main buffer.\n"
4057 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4058 "# (Doesn't have to be '2' works with any number that\n"
4059 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004060}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004061
4062static void show_snapshot_percpu_help(struct seq_file *m)
4063{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004064 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004065#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004066 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4067 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004068#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004069 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4070 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004071#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004072 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4073 "# (Doesn't have to be '2' works with any number that\n"
4074 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004075}
4076
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004077static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4078{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004079 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004080 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004081 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004082 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004083
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004084 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004085 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4086 show_snapshot_main_help(m);
4087 else
4088 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004089}
4090#else
4091/* Should never be called */
4092static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4093#endif
4094
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004095static int s_show(struct seq_file *m, void *v)
4096{
4097 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004098 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004099
4100 if (iter->ent == NULL) {
4101 if (iter->tr) {
4102 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4103 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004104 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004105 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004106 if (iter->snapshot && trace_empty(iter))
4107 print_snapshot_help(m, iter);
4108 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004109 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02004110 else
4111 trace_default_header(m);
4112
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004113 } else if (iter->leftover) {
4114 /*
4115 * If we filled the seq_file buffer earlier, we
4116 * want to just show it now.
4117 */
4118 ret = trace_print_seq(m, &iter->seq);
4119
4120 /* ret should this time be zero, but you never know */
4121 iter->leftover = ret;
4122
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004123 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004124 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004125 ret = trace_print_seq(m, &iter->seq);
4126 /*
4127 * If we overflow the seq_file buffer, then it will
4128 * ask us for this data again at start up.
4129 * Use that instead.
4130 * ret is 0 if seq_file write succeeded.
4131 * -1 otherwise.
4132 */
4133 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004134 }
4135
4136 return 0;
4137}
4138
Oleg Nesterov649e9c702013-07-23 17:25:54 +02004139/*
4140 * Should be used after trace_array_get(), trace_types_lock
4141 * ensures that i_cdev was already initialized.
4142 */
4143static inline int tracing_get_cpu(struct inode *inode)
4144{
4145 if (inode->i_cdev) /* See trace_create_cpu_file() */
4146 return (long)inode->i_cdev - 1;
4147 return RING_BUFFER_ALL_CPUS;
4148}
4149
James Morris88e9d342009-09-22 16:43:43 -07004150static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004151 .start = s_start,
4152 .next = s_next,
4153 .stop = s_stop,
4154 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004155};
4156
Ingo Molnare309b412008-05-12 21:20:51 +02004157static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02004158__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004159{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004160 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004161 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02004162 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004163
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004164 if (tracing_disabled)
4165 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02004166
Jiri Olsa50e18b92012-04-25 10:23:39 +02004167 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004168 if (!iter)
4169 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004170
Gil Fruchter72917232015-06-09 10:32:35 +03004171 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04004172 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004173 if (!iter->buffer_iter)
4174 goto release;
4175
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004176 /*
4177 * We make a copy of the current tracer to avoid concurrent
4178 * changes on it while we are reading.
4179 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004180 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004181 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004182 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004183 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004184
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004185 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004186
Li Zefan79f55992009-06-15 14:58:26 +08004187 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004188 goto fail;
4189
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004190 iter->tr = tr;
4191
4192#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004193 /* Currently only the top directory has a snapshot */
4194 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004195 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004196 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004197#endif
4198 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004199 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004200 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004201 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004202 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004203
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004204 /* Notify the tracer early; before we stop tracing. */
4205 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01004206 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004207
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004208 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004209 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004210 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4211
David Sharp8be07092012-11-13 12:18:22 -08004212 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004213 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004214 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4215
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004216 /* stop the trace while dumping if we are not opening "snapshot" */
4217 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004218 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004219
Steven Rostedtae3b5092013-01-23 15:22:59 -05004220 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004221 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004222 iter->buffer_iter[cpu] =
Douglas Anderson31b265b2019-03-08 11:32:04 -08004223 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4224 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004225 }
4226 ring_buffer_read_prepare_sync();
4227 for_each_tracing_cpu(cpu) {
4228 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004229 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004230 }
4231 } else {
4232 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004233 iter->buffer_iter[cpu] =
Douglas Anderson31b265b2019-03-08 11:32:04 -08004234 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4235 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004236 ring_buffer_read_prepare_sync();
4237 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004238 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004239 }
4240
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004241 mutex_unlock(&trace_types_lock);
4242
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004243 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004244
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004245 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004246 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004247 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004248 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004249release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02004250 seq_release_private(inode, file);
4251 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004252}
4253
4254int tracing_open_generic(struct inode *inode, struct file *filp)
4255{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004256 int ret;
4257
4258 ret = tracing_check_open_get_tr(NULL);
4259 if (ret)
4260 return ret;
Steven Rostedt60a11772008-05-12 21:20:44 +02004261
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004262 filp->private_data = inode->i_private;
4263 return 0;
4264}
4265
Geyslan G. Bem2e864212013-10-18 21:15:54 -03004266bool tracing_is_disabled(void)
4267{
4268 return (tracing_disabled) ? true: false;
4269}
4270
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004271/*
4272 * Open and update trace_array ref count.
4273 * Must have the current trace_array passed to it.
4274 */
Steven Rostedt (VMware)aa07d712019-10-11 19:12:21 -04004275int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004276{
4277 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004278 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004279
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004280 ret = tracing_check_open_get_tr(tr);
4281 if (ret)
4282 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004283
4284 filp->private_data = inode->i_private;
4285
4286 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004287}
4288
Hannes Eder4fd27352009-02-10 19:44:12 +01004289static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004290{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004291 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07004292 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004293 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004294 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004295
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004296 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004297 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004298 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004299 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004300
Oleg Nesterov6484c712013-07-23 17:26:10 +02004301 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004302 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004303 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004304
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004305 for_each_tracing_cpu(cpu) {
4306 if (iter->buffer_iter[cpu])
4307 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4308 }
4309
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004310 if (iter->trace && iter->trace->close)
4311 iter->trace->close(iter);
4312
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004313 if (!iter->snapshot)
4314 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004315 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004316
4317 __trace_array_put(tr);
4318
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004319 mutex_unlock(&trace_types_lock);
4320
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004321 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004322 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004323 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004324 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02004325 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004326
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004327 return 0;
4328}
4329
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004330static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4331{
4332 struct trace_array *tr = inode->i_private;
4333
4334 trace_array_put(tr);
4335 return 0;
4336}
4337
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004338static int tracing_single_release_tr(struct inode *inode, struct file *file)
4339{
4340 struct trace_array *tr = inode->i_private;
4341
4342 trace_array_put(tr);
4343
4344 return single_release(inode, file);
4345}
4346
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004347static int tracing_open(struct inode *inode, struct file *file)
4348{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004349 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004350 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004351 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004352
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004353 ret = tracing_check_open_get_tr(tr);
4354 if (ret)
4355 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004356
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004357 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02004358 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4359 int cpu = tracing_get_cpu(inode);
Bo Yan8dd33bc2017-09-18 10:03:35 -07004360 struct trace_buffer *trace_buf = &tr->trace_buffer;
4361
4362#ifdef CONFIG_TRACER_MAX_TRACE
4363 if (tr->current_trace->print_max)
4364 trace_buf = &tr->max_buffer;
4365#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02004366
4367 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan8dd33bc2017-09-18 10:03:35 -07004368 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004369 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04004370 tracing_reset_cpu(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004371 }
4372
4373 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004374 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004375 if (IS_ERR(iter))
4376 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004377 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004378 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4379 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004380
4381 if (ret < 0)
4382 trace_array_put(tr);
4383
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004384 return ret;
4385}
4386
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004387/*
4388 * Some tracers are not suitable for instance buffers.
4389 * A tracer is always available for the global array (toplevel)
4390 * or if it explicitly states that it is.
4391 */
4392static bool
4393trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4394{
4395 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4396}
4397
4398/* Find the next tracer that this trace array may use */
4399static struct tracer *
4400get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4401{
4402 while (t && !trace_ok_for_array(t, tr))
4403 t = t->next;
4404
4405 return t;
4406}
4407
Ingo Molnare309b412008-05-12 21:20:51 +02004408static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004409t_next(struct seq_file *m, void *v, loff_t *pos)
4410{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004411 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004412 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004413
4414 (*pos)++;
4415
4416 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004417 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004418
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004419 return t;
4420}
4421
4422static void *t_start(struct seq_file *m, loff_t *pos)
4423{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004424 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004425 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004426 loff_t l = 0;
4427
4428 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004429
4430 t = get_tracer_for_array(tr, trace_types);
4431 for (; t && l < *pos; t = t_next(m, t, &l))
4432 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004433
4434 return t;
4435}
4436
4437static void t_stop(struct seq_file *m, void *p)
4438{
4439 mutex_unlock(&trace_types_lock);
4440}
4441
4442static int t_show(struct seq_file *m, void *v)
4443{
4444 struct tracer *t = v;
4445
4446 if (!t)
4447 return 0;
4448
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004449 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004450 if (t->next)
4451 seq_putc(m, ' ');
4452 else
4453 seq_putc(m, '\n');
4454
4455 return 0;
4456}
4457
James Morris88e9d342009-09-22 16:43:43 -07004458static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004459 .start = t_start,
4460 .next = t_next,
4461 .stop = t_stop,
4462 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004463};
4464
4465static int show_traces_open(struct inode *inode, struct file *file)
4466{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004467 struct trace_array *tr = inode->i_private;
4468 struct seq_file *m;
4469 int ret;
4470
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004471 ret = tracing_check_open_get_tr(tr);
4472 if (ret)
4473 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004474
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004475 ret = seq_open(file, &show_traces_seq_ops);
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004476 if (ret) {
4477 trace_array_put(tr);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004478 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004479 }
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004480
4481 m = file->private_data;
4482 m->private = tr;
4483
4484 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004485}
4486
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004487static int show_traces_release(struct inode *inode, struct file *file)
4488{
4489 struct trace_array *tr = inode->i_private;
4490
4491 trace_array_put(tr);
4492 return seq_release(inode, file);
4493}
4494
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004495static ssize_t
4496tracing_write_stub(struct file *filp, const char __user *ubuf,
4497 size_t count, loff_t *ppos)
4498{
4499 return count;
4500}
4501
Steven Rostedt (Red Hat)098c879e2013-12-21 17:39:40 -05004502loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08004503{
Steven Rostedt (Red Hat)098c879e2013-12-21 17:39:40 -05004504 int ret;
4505
Slava Pestov364829b2010-11-24 15:13:16 -08004506 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c879e2013-12-21 17:39:40 -05004507 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08004508 else
Steven Rostedt (Red Hat)098c879e2013-12-21 17:39:40 -05004509 file->f_pos = ret = 0;
4510
4511 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08004512}
4513
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004514static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004515 .open = tracing_open,
4516 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004517 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c879e2013-12-21 17:39:40 -05004518 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004519 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004520};
4521
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004522static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004523 .open = show_traces_open,
4524 .read = seq_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004525 .llseek = seq_lseek,
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004526 .release = show_traces_release,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004527};
4528
4529static ssize_t
4530tracing_cpumask_read(struct file *filp, char __user *ubuf,
4531 size_t count, loff_t *ppos)
4532{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004533 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Du90e406f2017-11-30 11:39:43 +08004534 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004535 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004536
Changbin Du90e406f2017-11-30 11:39:43 +08004537 len = snprintf(NULL, 0, "%*pb\n",
4538 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4539 mask_str = kmalloc(len, GFP_KERNEL);
4540 if (!mask_str)
4541 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004542
Changbin Du90e406f2017-11-30 11:39:43 +08004543 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08004544 cpumask_pr_args(tr->tracing_cpumask));
4545 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004546 count = -EINVAL;
4547 goto out_err;
4548 }
Changbin Du90e406f2017-11-30 11:39:43 +08004549 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004550
4551out_err:
Changbin Du90e406f2017-11-30 11:39:43 +08004552 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004553
4554 return count;
4555}
4556
4557static ssize_t
4558tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4559 size_t count, loff_t *ppos)
4560{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004561 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304562 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004563 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304564
4565 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4566 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004567
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304568 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004569 if (err)
4570 goto err_unlock;
4571
Steven Rostedta5e25882008-12-02 15:34:05 -05004572 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004573 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02004574 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004575 /*
4576 * Increase/decrease the disabled counter if we are
4577 * about to flip a bit in the cpumask:
4578 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004579 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304580 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004581 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4582 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004583 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004584 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304585 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004586 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4587 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004588 }
4589 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004590 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05004591 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02004592
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004593 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304594 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004595
Ingo Molnarc7078de2008-05-12 21:20:52 +02004596 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004597
4598err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08004599 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004600
4601 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004602}
4603
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004604static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004605 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004606 .read = tracing_cpumask_read,
4607 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004608 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004609 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004610};
4611
Li Zefanfdb372e2009-12-08 11:15:59 +08004612static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004613{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004614 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004615 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004616 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004617 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004618
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004619 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004620 tracer_flags = tr->current_trace->flags->val;
4621 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004622
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004623 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004624 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08004625 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004626 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004627 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004628 }
4629
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004630 for (i = 0; trace_opts[i].name; i++) {
4631 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08004632 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004633 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004634 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004635 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004636 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004637
Li Zefanfdb372e2009-12-08 11:15:59 +08004638 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004639}
4640
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004641static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08004642 struct tracer_flags *tracer_flags,
4643 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004644{
Chunyu Hud39cdd22016-03-08 21:37:01 +08004645 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004646 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004647
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004648 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004649 if (ret)
4650 return ret;
4651
4652 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08004653 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004654 else
Zhaolei77708412009-08-07 18:53:21 +08004655 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004656 return 0;
4657}
4658
Li Zefan8d18eaa2009-12-08 11:17:06 +08004659/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004660static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08004661{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004662 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004663 struct tracer_flags *tracer_flags = trace->flags;
4664 struct tracer_opt *opts = NULL;
4665 int i;
4666
4667 for (i = 0; tracer_flags->opts[i].name; i++) {
4668 opts = &tracer_flags->opts[i];
4669
4670 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004671 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08004672 }
4673
4674 return -EINVAL;
4675}
4676
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004677/* Some tracers require overwrite to stay enabled */
4678int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4679{
4680 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4681 return -1;
4682
4683 return 0;
4684}
4685
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004686int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004687{
4688 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004689 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004690 return 0;
4691
4692 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004693 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05004694 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004695 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004696
4697 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004698 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004699 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004700 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08004701
4702 if (mask == TRACE_ITER_RECORD_CMD)
4703 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08004704
Joel Fernandesd914ba32017-06-26 19:01:55 -07004705 if (mask == TRACE_ITER_RECORD_TGID) {
4706 if (!tgid_map)
Yuming Han6ee40512019-10-24 11:34:30 +08004707 tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
Kees Cook6396bb22018-06-12 14:03:40 -07004708 sizeof(*tgid_map),
Joel Fernandesd914ba32017-06-26 19:01:55 -07004709 GFP_KERNEL);
4710 if (!tgid_map) {
4711 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4712 return -ENOMEM;
4713 }
4714
4715 trace_event_enable_tgid_record(enabled);
4716 }
4717
Steven Rostedtc37775d2016-04-13 16:59:18 -04004718 if (mask == TRACE_ITER_EVENT_FORK)
4719 trace_event_follow_fork(tr, enabled);
4720
Namhyung Kim1e104862017-04-17 11:44:28 +09004721 if (mask == TRACE_ITER_FUNC_FORK)
4722 ftrace_pid_follow_fork(tr, enabled);
4723
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004724 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004725 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004726#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004727 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004728#endif
4729 }
Steven Rostedt81698832012-10-11 10:15:05 -04004730
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004731 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04004732 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004733 trace_printk_control(enabled);
4734 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004735
4736 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004737}
4738
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004739static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004740{
Li Zefan8d18eaa2009-12-08 11:17:06 +08004741 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004742 int neg = 0;
Yisheng Xie591a0332018-05-17 16:36:03 +08004743 int ret;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004744 size_t orig_len = strlen(option);
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004745 int len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004746
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -04004747 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004748
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004749 len = str_has_prefix(cmp, "no");
4750 if (len)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004751 neg = 1;
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004752
4753 cmp += len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004754
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004755 mutex_lock(&trace_types_lock);
4756
Yisheng Xie591a0332018-05-17 16:36:03 +08004757 ret = match_string(trace_options, -1, cmp);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004758 /* If no option could be set, test the specific tracer options */
Yisheng Xie591a0332018-05-17 16:36:03 +08004759 if (ret < 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004760 ret = set_tracer_option(tr, cmp, neg);
Yisheng Xie591a0332018-05-17 16:36:03 +08004761 else
4762 ret = set_tracer_flag(tr, 1 << ret, !neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004763
4764 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004765
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004766 /*
4767 * If the first trailing whitespace is replaced with '\0' by strstrip,
4768 * turn it back into a space.
4769 */
4770 if (orig_len > strlen(option))
4771 option[strlen(option)] = ' ';
4772
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -04004773 return ret;
4774}
4775
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004776static void __init apply_trace_boot_options(void)
4777{
4778 char *buf = trace_boot_options_buf;
4779 char *option;
4780
4781 while (true) {
4782 option = strsep(&buf, ",");
4783
4784 if (!option)
4785 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004786
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004787 if (*option)
4788 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004789
4790 /* Put back the comma to allow this to be called again */
4791 if (buf)
4792 *(buf - 1) = ',';
4793 }
4794}
4795
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -04004796static ssize_t
4797tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4798 size_t cnt, loff_t *ppos)
4799{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004800 struct seq_file *m = filp->private_data;
4801 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -04004802 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004803 int ret;
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -04004804
4805 if (cnt >= sizeof(buf))
4806 return -EINVAL;
4807
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004808 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -04004809 return -EFAULT;
4810
Steven Rostedta8dd2172013-01-09 20:54:17 -05004811 buf[cnt] = 0;
4812
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004813 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004814 if (ret < 0)
4815 return ret;
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -04004816
Jiri Olsacf8517c2009-10-23 19:36:16 -04004817 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004818
4819 return cnt;
4820}
4821
Li Zefanfdb372e2009-12-08 11:15:59 +08004822static int tracing_trace_options_open(struct inode *inode, struct file *file)
4823{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004824 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004825 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004826
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004827 ret = tracing_check_open_get_tr(tr);
4828 if (ret)
4829 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004830
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004831 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4832 if (ret < 0)
4833 trace_array_put(tr);
4834
4835 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08004836}
4837
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004838static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08004839 .open = tracing_trace_options_open,
4840 .read = seq_read,
4841 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004842 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05004843 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004844};
4845
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004846static const char readme_msg[] =
4847 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004848 "# echo 0 > tracing_on : quick way to disable tracing\n"
4849 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4850 " Important files:\n"
4851 " trace\t\t\t- The static contents of the buffer\n"
4852 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4853 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4854 " current_tracer\t- function and latency tracers\n"
4855 " available_tracers\t- list of configured tracers for current_tracer\n"
Tom Zanussia8d65572019-03-31 18:48:25 -05004856 " error_log\t- error log for failed commands (that support it)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004857 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4858 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4859 " trace_clock\t\t-change the clock used to order events\n"
4860 " local: Per cpu clock but may not be synced across CPUs\n"
4861 " global: Synced across CPUs but slows tracing down.\n"
4862 " counter: Not a clock, but just an increment\n"
4863 " uptime: Jiffy counter from time of boot\n"
4864 " perf: Same clock that perf events use\n"
4865#ifdef CONFIG_X86_64
4866 " x86-tsc: TSC cycle counter\n"
4867#endif
Tom Zanussi2c1ea602018-01-15 20:51:41 -06004868 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4869 " delta: Delta difference against a buffer-wide timestamp\n"
4870 " absolute: Absolute (standalone) timestamp\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004871 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04004872 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004873 " tracing_cpumask\t- Limit which CPUs to trace\n"
4874 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4875 "\t\t\t Remove sub-buffer with rmdir\n"
4876 " trace_options\t\t- Set format or modify how tracing happens\n"
Srivatsa S. Bhat (VMware)b9416992019-01-28 17:55:53 -08004877 "\t\t\t Disable an option by prefixing 'no' to the\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004878 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004879 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004880#ifdef CONFIG_DYNAMIC_FTRACE
4881 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004882 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4883 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09004884 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004885 "\t modules: Can select a group via module\n"
4886 "\t Format: :mod:<module-name>\n"
4887 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4888 "\t triggers: a command to perform when function is hit\n"
4889 "\t Format: <function>:<trigger>[:count]\n"
4890 "\t trigger: traceon, traceoff\n"
4891 "\t\t enable_event:<system>:<event>\n"
4892 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004893#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004894 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004895#endif
4896#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004897 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004898#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04004899 "\t\t dump\n"
4900 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004901 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4902 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4903 "\t The first one will disable tracing every time do_fault is hit\n"
4904 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4905 "\t The first time do trap is hit and it disables tracing, the\n"
4906 "\t counter will decrement to 2. If tracing is already disabled,\n"
4907 "\t the counter will not decrement. It only decrements when the\n"
4908 "\t trigger did work\n"
4909 "\t To remove trigger without count:\n"
4910 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4911 "\t To remove trigger with a count:\n"
4912 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004913 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004914 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4915 "\t modules: Can select a group via module command :mod:\n"
4916 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004917#endif /* CONFIG_DYNAMIC_FTRACE */
4918#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004919 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4920 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004921#endif
4922#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4923 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09004924 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004925 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4926#endif
4927#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004928 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4929 "\t\t\t snapshot buffer. Read the contents for more\n"
4930 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004931#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004932#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004933 " stack_trace\t\t- Shows the max stack trace when active\n"
4934 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004935 "\t\t\t Write into this file to reset the max size (trigger a\n"
4936 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004937#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004938 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4939 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004940#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004941#endif /* CONFIG_STACK_TRACER */
Masami Hiramatsu5448d442018-11-05 18:02:08 +09004942#ifdef CONFIG_DYNAMIC_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09004943 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
Masami Hiramatsu5448d442018-11-05 18:02:08 +09004944 "\t\t\t Write into this file to define/undefine new trace events.\n"
4945#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004946#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09004947 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004948 "\t\t\t Write into this file to define/undefine new trace events.\n"
4949#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004950#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +09004951 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004952 "\t\t\t Write into this file to define/undefine new trace events.\n"
4953#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004954#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
Masami Hiramatsu86425622016-08-18 17:58:15 +09004955 "\t accepts: event-definitions (one definition per line)\n"
Masami Hiramatsuc3ca46e2017-05-23 15:05:50 +09004956 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4957 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09004958#ifdef CONFIG_HIST_TRIGGERS
4959 "\t s:[synthetic/]<event> <field> [<field>]\n"
4960#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09004961 "\t -:[<group>/]<event>\n"
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004962#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004963 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Naveen N. Rao35b6f552017-02-22 19:23:39 +05304964 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004965#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004966#ifdef CONFIG_UPROBE_EVENTS
Ravi Bangoria1cc33162018-08-20 10:12:47 +05304967 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004968#endif
4969 "\t args: <name>=fetcharg[:type]\n"
4970 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09004971#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09004972 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09004973#else
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09004974 "\t $stack<index>, $stack, $retval, $comm,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09004975#endif
Masami Hiramatsua42e3c42019-06-20 00:08:37 +09004976 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
Masami Hiramatsu60c2e0c2018-04-25 21:20:28 +09004977 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
Masami Hiramatsu88903c42019-05-15 14:38:30 +09004978 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
Masami Hiramatsu40b53b72018-04-25 21:21:55 +09004979 "\t <type>\\[<array-size>\\]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09004980#ifdef CONFIG_HIST_TRIGGERS
4981 "\t field: <stype> <name>;\n"
4982 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
4983 "\t [unsigned] char/int/long\n"
4984#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09004985#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004986 " events/\t\t- Directory containing all trace event subsystems:\n"
4987 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4988 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004989 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4990 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004991 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004992 " events/<system>/<event>/\t- Directory containing control files for\n"
4993 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004994 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4995 " filter\t\t- If set, only events passing filter are traced\n"
4996 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004997 "\t Format: <trigger>[:count][if <filter>]\n"
4998 "\t trigger: traceon, traceoff\n"
4999 "\t enable_event:<system>:<event>\n"
5000 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005001#ifdef CONFIG_HIST_TRIGGERS
5002 "\t enable_hist:<system>:<event>\n"
5003 "\t disable_hist:<system>:<event>\n"
5004#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005005#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005006 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005007#endif
5008#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005009 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005010#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005011#ifdef CONFIG_HIST_TRIGGERS
5012 "\t\t hist (see below)\n"
5013#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005014 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5015 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5016 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5017 "\t events/block/block_unplug/trigger\n"
5018 "\t The first disables tracing every time block_unplug is hit.\n"
5019 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5020 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5021 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5022 "\t Like function triggers, the counter is only decremented if it\n"
5023 "\t enabled or disabled tracing.\n"
5024 "\t To remove a trigger without a count:\n"
5025 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5026 "\t To remove a trigger with a count:\n"
5027 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5028 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005029#ifdef CONFIG_HIST_TRIGGERS
5030 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06005031 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005032 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06005033 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005034 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005035 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005036 "\t [:name=histname1]\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005037 "\t [:<handler>.<action>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005038 "\t [if <filter>]\n\n"
5039 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005040 "\t table using the key(s) and value(s) named, and the value of a\n"
5041 "\t sum called 'hitcount' is incremented. Keys and values\n"
5042 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06005043 "\t can be any field, or the special string 'stacktrace'.\n"
5044 "\t Compound keys consisting of up to two fields can be specified\n"
5045 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5046 "\t fields. Sort keys consisting of up to two fields can be\n"
5047 "\t specified using the 'sort' keyword. The sort direction can\n"
5048 "\t be modified by appending '.descending' or '.ascending' to a\n"
5049 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005050 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5051 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5052 "\t its histogram data will be shared with other triggers of the\n"
5053 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005054 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06005055 "\t table in its entirety to stdout. If there are multiple hist\n"
5056 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005057 "\t trigger in the output. The table displayed for a named\n"
5058 "\t trigger will be the same as any other instance having the\n"
5059 "\t same name. The default format used to display a given field\n"
5060 "\t can be modified by appending any of the following modifiers\n"
5061 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06005062 "\t .hex display a number as a hex value\n"
5063 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06005064 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06005065 "\t .execname display a common_pid as a program name\n"
Tom Zanussi860f9f62018-01-15 20:51:48 -06005066 "\t .syscall display a syscall id as a syscall name\n"
5067 "\t .log2 display log2 value rather than raw number\n"
5068 "\t .usecs display a common_timestamp in microseconds\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06005069 "\t The 'pause' parameter can be used to pause an existing hist\n"
5070 "\t trigger or to start a hist trigger but not log any events\n"
5071 "\t until told to do so. 'continue' can be used to start or\n"
5072 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005073 "\t The 'clear' parameter will clear the contents of a running\n"
5074 "\t hist trigger and leave its current paused/active state\n"
5075 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005076 "\t The enable_hist and disable_hist triggers can be used to\n"
5077 "\t have one event conditionally start and stop another event's\n"
Colin Ian King9e5a36a2019-02-17 22:32:22 +00005078 "\t already-attached hist trigger. The syntax is analogous to\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005079 "\t the enable_event and disable_event triggers.\n\n"
5080 "\t Hist trigger handlers and actions are executed whenever a\n"
5081 "\t a histogram entry is added or updated. They take the form:\n\n"
5082 "\t <handler>.<action>\n\n"
5083 "\t The available handlers are:\n\n"
5084 "\t onmatch(matching.event) - invoke on addition or update\n"
Tom Zanussidff81f52019-02-13 17:42:48 -06005085 "\t onmax(var) - invoke if var exceeds current max\n"
5086 "\t onchange(var) - invoke action if var changes\n\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005087 "\t The available actions are:\n\n"
Tom Zanussie91eefd72019-02-13 17:42:50 -06005088 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005089 "\t save(field,...) - save current event fields\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06005090#ifdef CONFIG_TRACER_SNAPSHOT
5091 "\t snapshot() - snapshot the trace buffer\n"
5092#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005093#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005094;
5095
5096static ssize_t
5097tracing_readme_read(struct file *filp, char __user *ubuf,
5098 size_t cnt, loff_t *ppos)
5099{
5100 return simple_read_from_buffer(ubuf, cnt, ppos,
5101 readme_msg, strlen(readme_msg));
5102}
5103
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005104static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02005105 .open = tracing_open_generic,
5106 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005107 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005108};
5109
Michael Sartain99c621d2017-07-05 22:07:15 -06005110static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5111{
5112 int *ptr = v;
5113
5114 if (*pos || m->count)
5115 ptr++;
5116
5117 (*pos)++;
5118
5119 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5120 if (trace_find_tgid(*ptr))
5121 return ptr;
5122 }
5123
5124 return NULL;
5125}
5126
5127static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5128{
5129 void *v;
5130 loff_t l = 0;
5131
5132 if (!tgid_map)
5133 return NULL;
5134
5135 v = &tgid_map[0];
5136 while (l <= *pos) {
5137 v = saved_tgids_next(m, v, &l);
5138 if (!v)
5139 return NULL;
5140 }
5141
5142 return v;
5143}
5144
5145static void saved_tgids_stop(struct seq_file *m, void *v)
5146{
5147}
5148
5149static int saved_tgids_show(struct seq_file *m, void *v)
5150{
5151 int pid = (int *)v - tgid_map;
5152
5153 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5154 return 0;
5155}
5156
5157static const struct seq_operations tracing_saved_tgids_seq_ops = {
5158 .start = saved_tgids_start,
5159 .stop = saved_tgids_stop,
5160 .next = saved_tgids_next,
5161 .show = saved_tgids_show,
5162};
5163
5164static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5165{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005166 int ret;
5167
5168 ret = tracing_check_open_get_tr(NULL);
5169 if (ret)
5170 return ret;
Michael Sartain99c621d2017-07-05 22:07:15 -06005171
5172 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5173}
5174
5175
5176static const struct file_operations tracing_saved_tgids_fops = {
5177 .open = tracing_saved_tgids_open,
5178 .read = seq_read,
5179 .llseek = seq_lseek,
5180 .release = seq_release,
5181};
5182
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005183static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005184{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005185 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005186
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005187 if (*pos || m->count)
5188 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005189
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005190 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005191
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005192 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5193 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005194 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005195 continue;
5196
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005197 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005198 }
5199
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005200 return NULL;
5201}
Avadh Patel69abe6a2009-04-10 16:04:48 -04005202
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005203static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5204{
5205 void *v;
5206 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005207
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005208 preempt_disable();
5209 arch_spin_lock(&trace_cmdline_lock);
5210
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005211 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005212 while (l <= *pos) {
5213 v = saved_cmdlines_next(m, v, &l);
5214 if (!v)
5215 return NULL;
5216 }
5217
5218 return v;
5219}
5220
5221static void saved_cmdlines_stop(struct seq_file *m, void *v)
5222{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005223 arch_spin_unlock(&trace_cmdline_lock);
5224 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005225}
5226
5227static int saved_cmdlines_show(struct seq_file *m, void *v)
5228{
5229 char buf[TASK_COMM_LEN];
5230 unsigned int *pid = v;
5231
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005232 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005233 seq_printf(m, "%d %s\n", *pid, buf);
5234 return 0;
5235}
5236
5237static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5238 .start = saved_cmdlines_start,
5239 .next = saved_cmdlines_next,
5240 .stop = saved_cmdlines_stop,
5241 .show = saved_cmdlines_show,
5242};
5243
5244static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5245{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005246 int ret;
5247
5248 ret = tracing_check_open_get_tr(NULL);
5249 if (ret)
5250 return ret;
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005251
5252 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04005253}
5254
5255static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005256 .open = tracing_saved_cmdlines_open,
5257 .read = seq_read,
5258 .llseek = seq_lseek,
5259 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04005260};
5261
5262static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005263tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5264 size_t cnt, loff_t *ppos)
5265{
5266 char buf[64];
5267 int r;
5268
5269 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005270 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005271 arch_spin_unlock(&trace_cmdline_lock);
5272
5273 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5274}
5275
5276static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5277{
5278 kfree(s->saved_cmdlines);
5279 kfree(s->map_cmdline_to_pid);
5280 kfree(s);
5281}
5282
5283static int tracing_resize_saved_cmdlines(unsigned int val)
5284{
5285 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5286
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005287 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005288 if (!s)
5289 return -ENOMEM;
5290
5291 if (allocate_cmdlines_buffer(val, s) < 0) {
5292 kfree(s);
5293 return -ENOMEM;
5294 }
5295
5296 arch_spin_lock(&trace_cmdline_lock);
5297 savedcmd_temp = savedcmd;
5298 savedcmd = s;
5299 arch_spin_unlock(&trace_cmdline_lock);
5300 free_saved_cmdlines_buffer(savedcmd_temp);
5301
5302 return 0;
5303}
5304
5305static ssize_t
5306tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5307 size_t cnt, loff_t *ppos)
5308{
5309 unsigned long val;
5310 int ret;
5311
5312 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5313 if (ret)
5314 return ret;
5315
5316 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5317 if (!val || val > PID_MAX_DEFAULT)
5318 return -EINVAL;
5319
5320 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5321 if (ret < 0)
5322 return ret;
5323
5324 *ppos += cnt;
5325
5326 return cnt;
5327}
5328
5329static const struct file_operations tracing_saved_cmdlines_size_fops = {
5330 .open = tracing_open_generic,
5331 .read = tracing_saved_cmdlines_size_read,
5332 .write = tracing_saved_cmdlines_size_write,
5333};
5334
Jeremy Linton681bec02017-05-31 16:56:53 -05005335#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005336static union trace_eval_map_item *
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005337update_eval_map(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005338{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005339 if (!ptr->map.eval_string) {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005340 if (ptr->tail.next) {
5341 ptr = ptr->tail.next;
5342 /* Set ptr to the next real item (skip head) */
5343 ptr++;
5344 } else
5345 return NULL;
5346 }
5347 return ptr;
5348}
5349
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005350static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005351{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005352 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005353
5354 /*
5355 * Paranoid! If ptr points to end, we don't want to increment past it.
5356 * This really should never happen.
5357 */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005358 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005359 if (WARN_ON_ONCE(!ptr))
5360 return NULL;
5361
5362 ptr++;
5363
5364 (*pos)++;
5365
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005366 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005367
5368 return ptr;
5369}
5370
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005371static void *eval_map_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005372{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005373 union trace_eval_map_item *v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005374 loff_t l = 0;
5375
Jeremy Linton1793ed92017-05-31 16:56:46 -05005376 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005377
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005378 v = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005379 if (v)
5380 v++;
5381
5382 while (v && l < *pos) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005383 v = eval_map_next(m, v, &l);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005384 }
5385
5386 return v;
5387}
5388
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005389static void eval_map_stop(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005390{
Jeremy Linton1793ed92017-05-31 16:56:46 -05005391 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005392}
5393
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005394static int eval_map_show(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005395{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005396 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005397
5398 seq_printf(m, "%s %ld (%s)\n",
Jeremy Linton00f4b652017-05-31 16:56:43 -05005399 ptr->map.eval_string, ptr->map.eval_value,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005400 ptr->map.system);
5401
5402 return 0;
5403}
5404
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005405static const struct seq_operations tracing_eval_map_seq_ops = {
5406 .start = eval_map_start,
5407 .next = eval_map_next,
5408 .stop = eval_map_stop,
5409 .show = eval_map_show,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005410};
5411
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005412static int tracing_eval_map_open(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005413{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005414 int ret;
5415
5416 ret = tracing_check_open_get_tr(NULL);
5417 if (ret)
5418 return ret;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005419
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005420 return seq_open(filp, &tracing_eval_map_seq_ops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005421}
5422
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005423static const struct file_operations tracing_eval_map_fops = {
5424 .open = tracing_eval_map_open,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005425 .read = seq_read,
5426 .llseek = seq_lseek,
5427 .release = seq_release,
5428};
5429
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005430static inline union trace_eval_map_item *
Jeremy Linton5f60b352017-05-31 16:56:47 -05005431trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005432{
5433 /* Return tail of array given the head */
5434 return ptr + ptr->head.length + 1;
5435}
5436
5437static void
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005438trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005439 int len)
5440{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005441 struct trace_eval_map **stop;
5442 struct trace_eval_map **map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005443 union trace_eval_map_item *map_array;
5444 union trace_eval_map_item *ptr;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005445
5446 stop = start + len;
5447
5448 /*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005449 * The trace_eval_maps contains the map plus a head and tail item,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005450 * where the head holds the module and length of array, and the
5451 * tail holds a pointer to the next list.
5452 */
Kees Cook6da2ec52018-06-12 13:55:00 -07005453 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005454 if (!map_array) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005455 pr_warn("Unable to allocate trace eval mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005456 return;
5457 }
5458
Jeremy Linton1793ed92017-05-31 16:56:46 -05005459 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005460
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005461 if (!trace_eval_maps)
5462 trace_eval_maps = map_array;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005463 else {
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005464 ptr = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005465 for (;;) {
Jeremy Linton5f60b352017-05-31 16:56:47 -05005466 ptr = trace_eval_jmp_to_tail(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005467 if (!ptr->tail.next)
5468 break;
5469 ptr = ptr->tail.next;
5470
5471 }
5472 ptr->tail.next = map_array;
5473 }
5474 map_array->head.mod = mod;
5475 map_array->head.length = len;
5476 map_array++;
5477
5478 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5479 map_array->map = **map;
5480 map_array++;
5481 }
5482 memset(map_array, 0, sizeof(*map_array));
5483
Jeremy Linton1793ed92017-05-31 16:56:46 -05005484 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005485}
5486
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005487static void trace_create_eval_file(struct dentry *d_tracer)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005488{
Jeremy Linton681bec02017-05-31 16:56:53 -05005489 trace_create_file("eval_map", 0444, d_tracer,
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005490 NULL, &tracing_eval_map_fops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005491}
5492
Jeremy Linton681bec02017-05-31 16:56:53 -05005493#else /* CONFIG_TRACE_EVAL_MAP_FILE */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005494static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5495static inline void trace_insert_eval_map_file(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005496 struct trace_eval_map **start, int len) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05005497#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005498
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005499static void trace_insert_eval_map(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005500 struct trace_eval_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005501{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005502 struct trace_eval_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005503
5504 if (len <= 0)
5505 return;
5506
5507 map = start;
5508
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005509 trace_event_eval_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005510
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005511 trace_insert_eval_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005512}
5513
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005514static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005515tracing_set_trace_read(struct file *filp, char __user *ubuf,
5516 size_t cnt, loff_t *ppos)
5517{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005518 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005519 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005520 int r;
5521
5522 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005523 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005524 mutex_unlock(&trace_types_lock);
5525
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005526 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005527}
5528
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005529int tracer_init(struct tracer *t, struct trace_array *tr)
5530{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005531 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005532 return t->init(tr);
5533}
5534
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005535static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005536{
5537 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005538
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005539 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005540 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005541}
5542
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005543#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09005544/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005545static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5546 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09005547{
5548 int cpu, ret = 0;
5549
5550 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5551 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005552 ret = ring_buffer_resize(trace_buf->buffer,
5553 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005554 if (ret < 0)
5555 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005556 per_cpu_ptr(trace_buf->data, cpu)->entries =
5557 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005558 }
5559 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005560 ret = ring_buffer_resize(trace_buf->buffer,
5561 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005562 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005563 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5564 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005565 }
5566
5567 return ret;
5568}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005569#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09005570
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005571static int __tracing_resize_ring_buffer(struct trace_array *tr,
5572 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04005573{
5574 int ret;
5575
5576 /*
5577 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04005578 * we use the size that was given, and we can forget about
5579 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04005580 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005581 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04005582
Steven Rostedtb382ede62012-10-10 21:44:34 -04005583 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005584 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04005585 return 0;
5586
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005587 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005588 if (ret < 0)
5589 return ret;
5590
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005591#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005592 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5593 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005594 goto out;
5595
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005596 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005597 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005598 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5599 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005600 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04005601 /*
5602 * AARGH! We are left with different
5603 * size max buffer!!!!
5604 * The max buffer is our "snapshot" buffer.
5605 * When a tracer needs a snapshot (one of the
5606 * latency tracers), it swaps the max buffer
5607 * with the saved snap shot. We succeeded to
5608 * update the size of the main buffer, but failed to
5609 * update the size of the max buffer. But when we tried
5610 * to reset the main buffer to the original size, we
5611 * failed there too. This is very unlikely to
5612 * happen, but if it does, warn and kill all
5613 * tracing.
5614 */
Steven Rostedt73c51622009-03-11 13:42:01 -04005615 WARN_ON(1);
5616 tracing_disabled = 1;
5617 }
5618 return ret;
5619 }
5620
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005621 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005622 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005623 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005624 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005625
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005626 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005627#endif /* CONFIG_TRACER_MAX_TRACE */
5628
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005629 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005630 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005631 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005632 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04005633
5634 return ret;
5635}
5636
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005637static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5638 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005639{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07005640 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005641
5642 mutex_lock(&trace_types_lock);
5643
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005644 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5645 /* make sure, this cpu is enabled in the mask */
5646 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5647 ret = -EINVAL;
5648 goto out;
5649 }
5650 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005651
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005652 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005653 if (ret < 0)
5654 ret = -ENOMEM;
5655
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005656out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005657 mutex_unlock(&trace_types_lock);
5658
5659 return ret;
5660}
5661
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005662
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005663/**
5664 * tracing_update_buffers - used by tracing facility to expand ring buffers
5665 *
5666 * To save on memory when the tracing is never used on a system with it
5667 * configured in. The ring buffers are set to a minimum size. But once
5668 * a user starts to use the tracing facility, then they need to grow
5669 * to their default size.
5670 *
5671 * This function is to be called when a tracer is about to be used.
5672 */
5673int tracing_update_buffers(void)
5674{
5675 int ret = 0;
5676
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005677 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005678 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005679 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005680 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005681 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005682
5683 return ret;
5684}
5685
Steven Rostedt577b7852009-02-26 23:43:05 -05005686struct trace_option_dentry;
5687
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005688static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005689create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05005690
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005691/*
5692 * Used to clear out the tracer before deletion of an instance.
5693 * Must have trace_types_lock held.
5694 */
5695static void tracing_set_nop(struct trace_array *tr)
5696{
5697 if (tr->current_trace == &nop_trace)
5698 return;
5699
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005700 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005701
5702 if (tr->current_trace->reset)
5703 tr->current_trace->reset(tr);
5704
5705 tr->current_trace = &nop_trace;
5706}
5707
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04005708static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005709{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005710 /* Only enable if the directory has been created already. */
5711 if (!tr->dir)
5712 return;
5713
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005714 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005715}
5716
5717static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5718{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005719 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005720#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005721 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005722#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005723 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005724
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005725 mutex_lock(&trace_types_lock);
5726
Steven Rostedt73c51622009-03-11 13:42:01 -04005727 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005728 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005729 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04005730 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01005731 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04005732 ret = 0;
5733 }
5734
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005735 for (t = trace_types; t; t = t->next) {
5736 if (strcmp(t->name, buf) == 0)
5737 break;
5738 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005739 if (!t) {
5740 ret = -EINVAL;
5741 goto out;
5742 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005743 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005744 goto out;
5745
Tom Zanussia35873a2019-02-13 17:42:45 -06005746#ifdef CONFIG_TRACER_SNAPSHOT
5747 if (t->use_max_tr) {
5748 arch_spin_lock(&tr->max_lock);
5749 if (tr->cond_snapshot)
5750 ret = -EBUSY;
5751 arch_spin_unlock(&tr->max_lock);
5752 if (ret)
5753 goto out;
5754 }
5755#endif
Ziqian SUN (Zamir)c7b3ae02017-09-11 14:26:35 +08005756 /* Some tracers won't work on kernel command line */
5757 if (system_state < SYSTEM_RUNNING && t->noboot) {
5758 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5759 t->name);
5760 goto out;
5761 }
5762
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005763 /* Some tracers are only allowed for the top level buffer */
5764 if (!trace_ok_for_array(t, tr)) {
5765 ret = -EINVAL;
5766 goto out;
5767 }
5768
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005769 /* If trace pipe files are being read, we can't change the tracer */
5770 if (tr->current_trace->ref) {
5771 ret = -EBUSY;
5772 goto out;
5773 }
5774
Steven Rostedt9f029e82008-11-12 15:24:24 -05005775 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005776
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005777 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005778
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005779 if (tr->current_trace->reset)
5780 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05005781
Paul E. McKenney74401722018-11-06 18:44:52 -08005782 /* Current trace needs to be nop_trace before synchronize_rcu */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005783 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05005784
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005785#ifdef CONFIG_TRACER_MAX_TRACE
5786 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05005787
5788 if (had_max_tr && !t->use_max_tr) {
5789 /*
5790 * We need to make sure that the update_max_tr sees that
5791 * current_trace changed to nop_trace to keep it from
5792 * swapping the buffers after we resize it.
5793 * The update_max_tr is called from interrupts disabled
5794 * so a synchronized_sched() is sufficient.
5795 */
Paul E. McKenney74401722018-11-06 18:44:52 -08005796 synchronize_rcu();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005797 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005798 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005799#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005800
5801#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005802 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04005803 ret = tracing_alloc_snapshot_instance(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005804 if (ret < 0)
5805 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005806 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005807#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05005808
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005809 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005810 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005811 if (ret)
5812 goto out;
5813 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005814
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005815 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005816 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05005817 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005818 out:
5819 mutex_unlock(&trace_types_lock);
5820
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005821 return ret;
5822}
5823
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005824static ssize_t
5825tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5826 size_t cnt, loff_t *ppos)
5827{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005828 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005829 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005830 int i;
5831 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005832 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005833
Steven Rostedt60063a62008-10-28 10:44:24 -04005834 ret = cnt;
5835
Li Zefanee6c2c12009-09-18 14:06:47 +08005836 if (cnt > MAX_TRACER_SIZE)
5837 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005838
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005839 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005840 return -EFAULT;
5841
5842 buf[cnt] = 0;
5843
5844 /* strip ending whitespace. */
5845 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5846 buf[i] = 0;
5847
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005848 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005849 if (err)
5850 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005851
Jiri Olsacf8517c2009-10-23 19:36:16 -04005852 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005853
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005854 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005855}
5856
5857static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005858tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5859 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005860{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005861 char buf[64];
5862 int r;
5863
Steven Rostedtcffae432008-05-12 21:21:00 +02005864 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005865 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02005866 if (r > sizeof(buf))
5867 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005868 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005869}
5870
5871static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005872tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5873 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005874{
Hannes Eder5e398412009-02-10 19:44:34 +01005875 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005876 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005877
Peter Huewe22fe9b52011-06-07 21:58:27 +02005878 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5879 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005880 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005881
5882 *ptr = val * 1000;
5883
5884 return cnt;
5885}
5886
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005887static ssize_t
5888tracing_thresh_read(struct file *filp, char __user *ubuf,
5889 size_t cnt, loff_t *ppos)
5890{
5891 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5892}
5893
5894static ssize_t
5895tracing_thresh_write(struct file *filp, const char __user *ubuf,
5896 size_t cnt, loff_t *ppos)
5897{
5898 struct trace_array *tr = filp->private_data;
5899 int ret;
5900
5901 mutex_lock(&trace_types_lock);
5902 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5903 if (ret < 0)
5904 goto out;
5905
5906 if (tr->current_trace->update_thresh) {
5907 ret = tr->current_trace->update_thresh(tr);
5908 if (ret < 0)
5909 goto out;
5910 }
5911
5912 ret = cnt;
5913out:
5914 mutex_unlock(&trace_types_lock);
5915
5916 return ret;
5917}
5918
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04005919#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08005920
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005921static ssize_t
5922tracing_max_lat_read(struct file *filp, char __user *ubuf,
5923 size_t cnt, loff_t *ppos)
5924{
5925 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5926}
5927
5928static ssize_t
5929tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5930 size_t cnt, loff_t *ppos)
5931{
5932 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5933}
5934
Chen Gange428abb2015-11-10 05:15:15 +08005935#endif
5936
Steven Rostedtb3806b42008-05-12 21:20:46 +02005937static int tracing_open_pipe(struct inode *inode, struct file *filp)
5938{
Oleg Nesterov15544202013-07-23 17:25:57 +02005939 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005940 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005941 int ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005942
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005943 ret = tracing_check_open_get_tr(tr);
5944 if (ret)
5945 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005946
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005947 mutex_lock(&trace_types_lock);
5948
Steven Rostedtb3806b42008-05-12 21:20:46 +02005949 /* create a buffer to store the information to pass to userspace */
5950 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005951 if (!iter) {
5952 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005953 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005954 goto out;
5955 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005956
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04005957 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005958 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005959
5960 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5961 ret = -ENOMEM;
5962 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10305963 }
5964
Steven Rostedta3097202008-11-07 22:36:02 -05005965 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10305966 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05005967
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005968 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04005969 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5970
David Sharp8be07092012-11-13 12:18:22 -08005971 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005972 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08005973 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5974
Oleg Nesterov15544202013-07-23 17:25:57 +02005975 iter->tr = tr;
5976 iter->trace_buffer = &tr->trace_buffer;
5977 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005978 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005979 filp->private_data = iter;
5980
Steven Rostedt107bad82008-05-12 21:21:01 +02005981 if (iter->trace->pipe_open)
5982 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02005983
Arnd Bergmannb4447862010-07-07 23:40:11 +02005984 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005985
5986 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005987out:
5988 mutex_unlock(&trace_types_lock);
5989 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005990
5991fail:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005992 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005993 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005994 mutex_unlock(&trace_types_lock);
5995 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005996}
5997
5998static int tracing_release_pipe(struct inode *inode, struct file *file)
5999{
6000 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02006001 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006002
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006003 mutex_lock(&trace_types_lock);
6004
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006005 tr->current_trace->ref--;
6006
Steven Rostedt29bf4a52009-12-09 12:37:43 -05006007 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05006008 iter->trace->pipe_close(iter);
6009
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006010 mutex_unlock(&trace_types_lock);
6011
Rusty Russell44623442009-01-01 10:12:23 +10306012 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006013 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006014 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006015
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006016 trace_array_put(tr);
6017
Steven Rostedtb3806b42008-05-12 21:20:46 +02006018 return 0;
6019}
6020
Al Viro9dd95742017-07-03 00:42:43 -04006021static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006022trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006023{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006024 struct trace_array *tr = iter->tr;
6025
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006026 /* Iterators are static, they should be filled or empty */
6027 if (trace_buffer_iter(iter, iter->cpu_file))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006028 return EPOLLIN | EPOLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006029
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006030 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006031 /*
6032 * Always select as readable when in blocking mode
6033 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006034 return EPOLLIN | EPOLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006035 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006036 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006037 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006038}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006039
Al Viro9dd95742017-07-03 00:42:43 -04006040static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006041tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6042{
6043 struct trace_iterator *iter = filp->private_data;
6044
6045 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006046}
6047
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006048/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006049static int tracing_wait_pipe(struct file *filp)
6050{
6051 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006052 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006053
6054 while (trace_empty(iter)) {
6055
6056 if ((filp->f_flags & O_NONBLOCK)) {
6057 return -EAGAIN;
6058 }
6059
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006060 /*
Liu Bo250bfd32013-01-14 10:54:11 +08006061 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006062 * We still block if tracing is disabled, but we have never
6063 * read anything. This allows a user to cat this file, and
6064 * then enable tracing. But after we have read something,
6065 * we give an EOF when tracing is again disabled.
6066 *
6067 * iter->pos will be 0 if we haven't read anything.
6068 */
Tahsin Erdogan75df6e62017-09-17 03:23:48 -07006069 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006070 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006071
6072 mutex_unlock(&iter->mutex);
6073
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05006074 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006075
6076 mutex_lock(&iter->mutex);
6077
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006078 if (ret)
6079 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006080 }
6081
6082 return 1;
6083}
6084
Steven Rostedtb3806b42008-05-12 21:20:46 +02006085/*
6086 * Consumer reader.
6087 */
6088static ssize_t
6089tracing_read_pipe(struct file *filp, char __user *ubuf,
6090 size_t cnt, loff_t *ppos)
6091{
6092 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006093 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006094
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006095 /*
6096 * Avoid more than one consumer on a single file descriptor
6097 * This is just a matter of traces coherency, the ring buffer itself
6098 * is protected.
6099 */
6100 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04006101
6102 /* return any leftover data */
6103 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6104 if (sret != -EBUSY)
6105 goto out;
6106
6107 trace_seq_init(&iter->seq);
6108
Steven Rostedt107bad82008-05-12 21:21:01 +02006109 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006110 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6111 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02006112 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02006113 }
6114
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006115waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006116 sret = tracing_wait_pipe(filp);
6117 if (sret <= 0)
6118 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006119
6120 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006121 if (trace_empty(iter)) {
6122 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02006123 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006124 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006125
6126 if (cnt >= PAGE_SIZE)
6127 cnt = PAGE_SIZE - 1;
6128
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006129 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006130 memset(&iter->seq, 0,
6131 sizeof(struct trace_iterator) -
6132 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04006133 cpumask_clear(iter->started);
Petr Mladekd303de12019-10-11 16:21:34 +02006134 trace_seq_init(&iter->seq);
Steven Rostedt4823ed72008-05-12 21:21:01 +02006135 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006136
Lai Jiangshan4f535962009-05-18 19:35:34 +08006137 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006138 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05006139 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006140 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006141 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006142
Ingo Molnarf9896bf2008-05-12 21:20:47 +02006143 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006144 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02006145 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006146 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006147 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006148 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01006149 if (ret != TRACE_TYPE_NO_CONSUME)
6150 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006151
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006152 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02006153 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01006154
6155 /*
6156 * Setting the full flag means we reached the trace_seq buffer
6157 * size and we should leave by partial output condition above.
6158 * One of the trace_seq_* functions is not used properly.
6159 */
6160 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6161 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006162 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006163 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006164 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02006165
Steven Rostedtb3806b42008-05-12 21:20:46 +02006166 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006167 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006168 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05006169 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006170
6171 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006172 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006173 * entries, go back to wait for more entries.
6174 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006175 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006176 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006177
Steven Rostedt107bad82008-05-12 21:21:01 +02006178out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006179 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02006180
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006181 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006182}
6183
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006184static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6185 unsigned int idx)
6186{
6187 __free_page(spd->pages[idx]);
6188}
6189
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08006190static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05006191 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05006192 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006193 .steal = generic_pipe_buf_steal,
6194 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006195};
6196
Steven Rostedt34cd4992009-02-09 12:06:29 -05006197static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006198tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006199{
6200 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006201 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006202 int ret;
6203
6204 /* Seq buffer is page-sized, exactly what we need. */
6205 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006206 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006207 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006208
6209 if (trace_seq_has_overflowed(&iter->seq)) {
6210 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006211 break;
6212 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006213
6214 /*
6215 * This should not be hit, because it should only
6216 * be set if the iter->seq overflowed. But check it
6217 * anyway to be safe.
6218 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05006219 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006220 iter->seq.seq.len = save_len;
6221 break;
6222 }
6223
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006224 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006225 if (rem < count) {
6226 rem = 0;
6227 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006228 break;
6229 }
6230
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006231 if (ret != TRACE_TYPE_NO_CONSUME)
6232 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05006233 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05006234 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05006235 rem = 0;
6236 iter->ent = NULL;
6237 break;
6238 }
6239 }
6240
6241 return rem;
6242}
6243
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006244static ssize_t tracing_splice_read_pipe(struct file *filp,
6245 loff_t *ppos,
6246 struct pipe_inode_info *pipe,
6247 size_t len,
6248 unsigned int flags)
6249{
Jens Axboe35f3d142010-05-20 10:43:18 +02006250 struct page *pages_def[PIPE_DEF_BUFFERS];
6251 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006252 struct trace_iterator *iter = filp->private_data;
6253 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006254 .pages = pages_def,
6255 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006256 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02006257 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006258 .ops = &tracing_pipe_buf_ops,
6259 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006260 };
6261 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006262 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006263 unsigned int i;
6264
Jens Axboe35f3d142010-05-20 10:43:18 +02006265 if (splice_grow_spd(pipe, &spd))
6266 return -ENOMEM;
6267
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006268 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006269
6270 if (iter->trace->splice_read) {
6271 ret = iter->trace->splice_read(iter, filp,
6272 ppos, pipe, len, flags);
6273 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006274 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006275 }
6276
6277 ret = tracing_wait_pipe(filp);
6278 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006279 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006280
Jason Wessel955b61e2010-08-05 09:22:23 -05006281 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006282 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006283 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006284 }
6285
Lai Jiangshan4f535962009-05-18 19:35:34 +08006286 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006287 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006288
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006289 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04006290 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006291 spd.pages[i] = alloc_page(GFP_KERNEL);
6292 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05006293 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006294
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006295 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006296
6297 /* Copy the data into the page, so we can start over. */
6298 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02006299 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006300 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006301 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006302 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006303 break;
6304 }
Jens Axboe35f3d142010-05-20 10:43:18 +02006305 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006306 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006307
Steven Rostedtf9520752009-03-02 14:04:40 -05006308 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006309 }
6310
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006311 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006312 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006313 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006314
6315 spd.nr_pages = i;
6316
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04006317 if (i)
6318 ret = splice_to_pipe(pipe, &spd);
6319 else
6320 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02006321out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006322 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02006323 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006324
Steven Rostedt34cd4992009-02-09 12:06:29 -05006325out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006326 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02006327 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006328}
6329
Steven Rostedta98a3c32008-05-12 21:20:59 +02006330static ssize_t
6331tracing_entries_read(struct file *filp, char __user *ubuf,
6332 size_t cnt, loff_t *ppos)
6333{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006334 struct inode *inode = file_inode(filp);
6335 struct trace_array *tr = inode->i_private;
6336 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006337 char buf[64];
6338 int r = 0;
6339 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006340
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006341 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006342
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006343 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006344 int cpu, buf_size_same;
6345 unsigned long size;
6346
6347 size = 0;
6348 buf_size_same = 1;
6349 /* check if all cpu sizes are same */
6350 for_each_tracing_cpu(cpu) {
6351 /* fill in the size from first enabled cpu */
6352 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006353 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
6354 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006355 buf_size_same = 0;
6356 break;
6357 }
6358 }
6359
6360 if (buf_size_same) {
6361 if (!ring_buffer_expanded)
6362 r = sprintf(buf, "%lu (expanded: %lu)\n",
6363 size >> 10,
6364 trace_buf_size >> 10);
6365 else
6366 r = sprintf(buf, "%lu\n", size >> 10);
6367 } else
6368 r = sprintf(buf, "X\n");
6369 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006370 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006371
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006372 mutex_unlock(&trace_types_lock);
6373
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006374 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6375 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006376}
6377
6378static ssize_t
6379tracing_entries_write(struct file *filp, const char __user *ubuf,
6380 size_t cnt, loff_t *ppos)
6381{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006382 struct inode *inode = file_inode(filp);
6383 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006384 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006385 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006386
Peter Huewe22fe9b52011-06-07 21:58:27 +02006387 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6388 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006389 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006390
6391 /* must have at least 1 entry */
6392 if (!val)
6393 return -EINVAL;
6394
Steven Rostedt1696b2b2008-11-13 00:09:35 -05006395 /* value is in KB */
6396 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006397 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006398 if (ret < 0)
6399 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006400
Jiri Olsacf8517c2009-10-23 19:36:16 -04006401 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006402
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006403 return cnt;
6404}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05006405
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006406static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006407tracing_total_entries_read(struct file *filp, char __user *ubuf,
6408 size_t cnt, loff_t *ppos)
6409{
6410 struct trace_array *tr = filp->private_data;
6411 char buf[64];
6412 int r, cpu;
6413 unsigned long size = 0, expanded_size = 0;
6414
6415 mutex_lock(&trace_types_lock);
6416 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006417 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006418 if (!ring_buffer_expanded)
6419 expanded_size += trace_buf_size >> 10;
6420 }
6421 if (ring_buffer_expanded)
6422 r = sprintf(buf, "%lu\n", size);
6423 else
6424 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6425 mutex_unlock(&trace_types_lock);
6426
6427 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6428}
6429
6430static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006431tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6432 size_t cnt, loff_t *ppos)
6433{
6434 /*
6435 * There is no need to read what the user has written, this function
6436 * is just to make sure that there is no error when "echo" is used
6437 */
6438
6439 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006440
6441 return cnt;
6442}
6443
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006444static int
6445tracing_free_buffer_release(struct inode *inode, struct file *filp)
6446{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006447 struct trace_array *tr = inode->i_private;
6448
Steven Rostedtcf30cf62011-06-14 22:44:07 -04006449 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006450 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07006451 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006452 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006453 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006454
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006455 trace_array_put(tr);
6456
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006457 return 0;
6458}
6459
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006460static ssize_t
6461tracing_mark_write(struct file *filp, const char __user *ubuf,
6462 size_t cnt, loff_t *fpos)
6463{
Alexander Z Lam2d716192013-07-01 15:31:24 -07006464 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04006465 struct ring_buffer_event *event;
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006466 enum event_trigger_type tt = ETT_NONE;
Steven Rostedtd696b582011-09-22 11:50:27 -04006467 struct ring_buffer *buffer;
6468 struct print_entry *entry;
6469 unsigned long irq_flags;
Steven Rostedtd696b582011-09-22 11:50:27 -04006470 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006471 int size;
6472 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006473
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006474/* Used in tracing_mark_raw_write() as well */
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006475#define FAULTED_STR "<faulted>"
6476#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006477
Steven Rostedtc76f0692008-11-07 22:36:02 -05006478 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006479 return -EINVAL;
6480
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006481 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07006482 return -EINVAL;
6483
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006484 if (cnt > TRACE_BUF_SIZE)
6485 cnt = TRACE_BUF_SIZE;
6486
Steven Rostedtd696b582011-09-22 11:50:27 -04006487 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006488
Steven Rostedtd696b582011-09-22 11:50:27 -04006489 local_save_flags(irq_flags);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006490 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6491
6492 /* If less than "<faulted>", then make sure we can still add that */
6493 if (cnt < FAULTED_SIZE)
6494 size += FAULTED_SIZE - cnt;
6495
Alexander Z Lam2d716192013-07-01 15:31:24 -07006496 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006497 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6498 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006499 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04006500 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006501 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04006502
6503 entry = ring_buffer_event_data(event);
6504 entry->ip = _THIS_IP_;
6505
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006506 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6507 if (len) {
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006508 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006509 cnt = FAULTED_SIZE;
6510 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04006511 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006512 written = cnt;
6513 len = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04006514
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006515 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6516 /* do not add \n before testing triggers, but add \0 */
6517 entry->buf[cnt] = '\0';
6518 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6519 }
6520
Steven Rostedtd696b582011-09-22 11:50:27 -04006521 if (entry->buf[cnt - 1] != '\n') {
6522 entry->buf[cnt] = '\n';
6523 entry->buf[cnt + 1] = '\0';
6524 } else
6525 entry->buf[cnt] = '\0';
6526
Steven Rostedt7ffbd482012-10-11 12:14:25 -04006527 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04006528
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006529 if (tt)
6530 event_triggers_post_call(tr->trace_marker_file, tt);
6531
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006532 if (written > 0)
6533 *fpos += written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006534
Steven Rostedtfa32e852016-07-06 15:25:08 -04006535 return written;
6536}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006537
Steven Rostedtfa32e852016-07-06 15:25:08 -04006538/* Limit it for now to 3K (including tag) */
6539#define RAW_DATA_MAX_SIZE (1024*3)
6540
6541static ssize_t
6542tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6543 size_t cnt, loff_t *fpos)
6544{
6545 struct trace_array *tr = filp->private_data;
6546 struct ring_buffer_event *event;
6547 struct ring_buffer *buffer;
6548 struct raw_data_entry *entry;
6549 unsigned long irq_flags;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006550 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006551 int size;
6552 int len;
6553
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006554#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6555
Steven Rostedtfa32e852016-07-06 15:25:08 -04006556 if (tracing_disabled)
6557 return -EINVAL;
6558
6559 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6560 return -EINVAL;
6561
6562 /* The marker must at least have a tag id */
6563 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6564 return -EINVAL;
6565
6566 if (cnt > TRACE_BUF_SIZE)
6567 cnt = TRACE_BUF_SIZE;
6568
6569 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6570
Steven Rostedtfa32e852016-07-06 15:25:08 -04006571 local_save_flags(irq_flags);
6572 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006573 if (cnt < FAULT_SIZE_ID)
6574 size += FAULT_SIZE_ID - cnt;
6575
Steven Rostedtfa32e852016-07-06 15:25:08 -04006576 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006577 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6578 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006579 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04006580 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006581 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006582
6583 entry = ring_buffer_event_data(event);
6584
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006585 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6586 if (len) {
6587 entry->id = -1;
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006588 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006589 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006590 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006591 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006592
6593 __buffer_unlock_commit(buffer, event);
6594
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006595 if (written > 0)
6596 *fpos += written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006597
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02006598 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006599}
6600
Li Zefan13f16d22009-12-08 11:16:11 +08006601static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08006602{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006603 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08006604 int i;
6605
6606 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08006607 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08006608 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006609 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6610 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08006611 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08006612
Li Zefan13f16d22009-12-08 11:16:11 +08006613 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08006614}
6615
Tom Zanussid71bd342018-01-15 20:52:07 -06006616int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08006617{
Zhaolei5079f322009-08-25 16:12:56 +08006618 int i;
6619
Zhaolei5079f322009-08-25 16:12:56 +08006620 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6621 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6622 break;
6623 }
6624 if (i == ARRAY_SIZE(trace_clocks))
6625 return -EINVAL;
6626
Zhaolei5079f322009-08-25 16:12:56 +08006627 mutex_lock(&trace_types_lock);
6628
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006629 tr->clock_id = i;
6630
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006631 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08006632
David Sharp60303ed2012-10-11 16:27:52 -07006633 /*
6634 * New clock may not be consistent with the previous clock.
6635 * Reset the buffer so that it doesn't have incomparable timestamps.
6636 */
Alexander Z Lam94571582013-08-02 18:36:16 -07006637 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006638
6639#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liu170b3b12017-09-05 16:57:19 -05006640 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006641 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07006642 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006643#endif
David Sharp60303ed2012-10-11 16:27:52 -07006644
Zhaolei5079f322009-08-25 16:12:56 +08006645 mutex_unlock(&trace_types_lock);
6646
Steven Rostedte1e232c2014-02-10 23:38:46 -05006647 return 0;
6648}
6649
6650static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6651 size_t cnt, loff_t *fpos)
6652{
6653 struct seq_file *m = filp->private_data;
6654 struct trace_array *tr = m->private;
6655 char buf[64];
6656 const char *clockstr;
6657 int ret;
6658
6659 if (cnt >= sizeof(buf))
6660 return -EINVAL;
6661
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006662 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05006663 return -EFAULT;
6664
6665 buf[cnt] = 0;
6666
6667 clockstr = strstrip(buf);
6668
6669 ret = tracing_set_clock(tr, clockstr);
6670 if (ret)
6671 return ret;
6672
Zhaolei5079f322009-08-25 16:12:56 +08006673 *fpos += cnt;
6674
6675 return cnt;
6676}
6677
Li Zefan13f16d22009-12-08 11:16:11 +08006678static int tracing_clock_open(struct inode *inode, struct file *file)
6679{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006680 struct trace_array *tr = inode->i_private;
6681 int ret;
6682
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006683 ret = tracing_check_open_get_tr(tr);
6684 if (ret)
6685 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006686
6687 ret = single_open(file, tracing_clock_show, inode->i_private);
6688 if (ret < 0)
6689 trace_array_put(tr);
6690
6691 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08006692}
6693
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006694static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6695{
6696 struct trace_array *tr = m->private;
6697
6698 mutex_lock(&trace_types_lock);
6699
6700 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6701 seq_puts(m, "delta [absolute]\n");
6702 else
6703 seq_puts(m, "[delta] absolute\n");
6704
6705 mutex_unlock(&trace_types_lock);
6706
6707 return 0;
6708}
6709
6710static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6711{
6712 struct trace_array *tr = inode->i_private;
6713 int ret;
6714
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006715 ret = tracing_check_open_get_tr(tr);
6716 if (ret)
6717 return ret;
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006718
6719 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6720 if (ret < 0)
6721 trace_array_put(tr);
6722
6723 return ret;
6724}
6725
Tom Zanussi00b41452018-01-15 20:51:39 -06006726int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6727{
6728 int ret = 0;
6729
6730 mutex_lock(&trace_types_lock);
6731
6732 if (abs && tr->time_stamp_abs_ref++)
6733 goto out;
6734
6735 if (!abs) {
6736 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6737 ret = -EINVAL;
6738 goto out;
6739 }
6740
6741 if (--tr->time_stamp_abs_ref)
6742 goto out;
6743 }
6744
6745 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6746
6747#ifdef CONFIG_TRACER_MAX_TRACE
6748 if (tr->max_buffer.buffer)
6749 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6750#endif
6751 out:
6752 mutex_unlock(&trace_types_lock);
6753
6754 return ret;
6755}
6756
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006757struct ftrace_buffer_info {
6758 struct trace_iterator iter;
6759 void *spare;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006760 unsigned int spare_cpu;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006761 unsigned int read;
6762};
6763
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006764#ifdef CONFIG_TRACER_SNAPSHOT
6765static int tracing_snapshot_open(struct inode *inode, struct file *file)
6766{
Oleg Nesterov6484c712013-07-23 17:26:10 +02006767 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006768 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006769 struct seq_file *m;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006770 int ret;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006771
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006772 ret = tracing_check_open_get_tr(tr);
6773 if (ret)
6774 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006775
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006776 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02006777 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006778 if (IS_ERR(iter))
6779 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006780 } else {
6781 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006782 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006783 m = kzalloc(sizeof(*m), GFP_KERNEL);
6784 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006785 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006786 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6787 if (!iter) {
6788 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006789 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006790 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006791 ret = 0;
6792
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006793 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02006794 iter->trace_buffer = &tr->max_buffer;
6795 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006796 m->private = iter;
6797 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006798 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006799out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006800 if (ret < 0)
6801 trace_array_put(tr);
6802
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006803 return ret;
6804}
6805
6806static ssize_t
6807tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6808 loff_t *ppos)
6809{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006810 struct seq_file *m = filp->private_data;
6811 struct trace_iterator *iter = m->private;
6812 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006813 unsigned long val;
6814 int ret;
6815
6816 ret = tracing_update_buffers();
6817 if (ret < 0)
6818 return ret;
6819
6820 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6821 if (ret)
6822 return ret;
6823
6824 mutex_lock(&trace_types_lock);
6825
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006826 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006827 ret = -EBUSY;
6828 goto out;
6829 }
6830
Tom Zanussia35873a2019-02-13 17:42:45 -06006831 arch_spin_lock(&tr->max_lock);
6832 if (tr->cond_snapshot)
6833 ret = -EBUSY;
6834 arch_spin_unlock(&tr->max_lock);
6835 if (ret)
6836 goto out;
6837
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006838 switch (val) {
6839 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006840 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6841 ret = -EINVAL;
6842 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006843 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006844 if (tr->allocated_snapshot)
6845 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006846 break;
6847 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006848/* Only allow per-cpu swap if the ring buffer supports it */
6849#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6850 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6851 ret = -EINVAL;
6852 break;
6853 }
6854#endif
Eiichi Tsukata46cc0b442019-06-25 10:29:10 +09006855 if (tr->allocated_snapshot)
6856 ret = resize_buffer_duplicate_size(&tr->max_buffer,
6857 &tr->trace_buffer, iter->cpu_file);
6858 else
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04006859 ret = tracing_alloc_snapshot_instance(tr);
Eiichi Tsukata46cc0b442019-06-25 10:29:10 +09006860 if (ret < 0)
6861 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006862 local_irq_disable();
6863 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006864 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Tom Zanussia35873a2019-02-13 17:42:45 -06006865 update_max_tr(tr, current, smp_processor_id(), NULL);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006866 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006867 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006868 local_irq_enable();
6869 break;
6870 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006871 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006872 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6873 tracing_reset_online_cpus(&tr->max_buffer);
6874 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04006875 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006876 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006877 break;
6878 }
6879
6880 if (ret >= 0) {
6881 *ppos += cnt;
6882 ret = cnt;
6883 }
6884out:
6885 mutex_unlock(&trace_types_lock);
6886 return ret;
6887}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006888
6889static int tracing_snapshot_release(struct inode *inode, struct file *file)
6890{
6891 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006892 int ret;
6893
6894 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006895
6896 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006897 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006898
6899 /* If write only, the seq_file is just a stub */
6900 if (m)
6901 kfree(m->private);
6902 kfree(m);
6903
6904 return 0;
6905}
6906
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006907static int tracing_buffers_open(struct inode *inode, struct file *filp);
6908static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6909 size_t count, loff_t *ppos);
6910static int tracing_buffers_release(struct inode *inode, struct file *file);
6911static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6912 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6913
6914static int snapshot_raw_open(struct inode *inode, struct file *filp)
6915{
6916 struct ftrace_buffer_info *info;
6917 int ret;
6918
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -04006919 /* The following checks for tracefs lockdown */
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006920 ret = tracing_buffers_open(inode, filp);
6921 if (ret < 0)
6922 return ret;
6923
6924 info = filp->private_data;
6925
6926 if (info->iter.trace->use_max_tr) {
6927 tracing_buffers_release(inode, filp);
6928 return -EBUSY;
6929 }
6930
6931 info->iter.snapshot = true;
6932 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6933
6934 return ret;
6935}
6936
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006937#endif /* CONFIG_TRACER_SNAPSHOT */
6938
6939
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006940static const struct file_operations tracing_thresh_fops = {
6941 .open = tracing_open_generic,
6942 .read = tracing_thresh_read,
6943 .write = tracing_thresh_write,
6944 .llseek = generic_file_llseek,
6945};
6946
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006947#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006948static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006949 .open = tracing_open_generic,
6950 .read = tracing_max_lat_read,
6951 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006952 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006953};
Chen Gange428abb2015-11-10 05:15:15 +08006954#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006955
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006956static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006957 .open = tracing_open_generic,
6958 .read = tracing_set_trace_read,
6959 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006960 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006961};
6962
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006963static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006964 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006965 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006966 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006967 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006968 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006969 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02006970};
6971
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006972static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006973 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006974 .read = tracing_entries_read,
6975 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006976 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006977 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006978};
6979
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006980static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006981 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006982 .read = tracing_total_entries_read,
6983 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006984 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006985};
6986
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006987static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006988 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006989 .write = tracing_free_buffer_write,
6990 .release = tracing_free_buffer_release,
6991};
6992
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006993static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006994 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006995 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006996 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006997 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006998};
6999
Steven Rostedtfa32e852016-07-06 15:25:08 -04007000static const struct file_operations tracing_mark_raw_fops = {
7001 .open = tracing_open_generic_tr,
7002 .write = tracing_mark_raw_write,
7003 .llseek = generic_file_llseek,
7004 .release = tracing_release_generic_tr,
7005};
7006
Zhaolei5079f322009-08-25 16:12:56 +08007007static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08007008 .open = tracing_clock_open,
7009 .read = seq_read,
7010 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007011 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08007012 .write = tracing_clock_write,
7013};
7014
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007015static const struct file_operations trace_time_stamp_mode_fops = {
7016 .open = tracing_time_stamp_mode_open,
7017 .read = seq_read,
7018 .llseek = seq_lseek,
7019 .release = tracing_single_release_tr,
7020};
7021
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007022#ifdef CONFIG_TRACER_SNAPSHOT
7023static const struct file_operations snapshot_fops = {
7024 .open = tracing_snapshot_open,
7025 .read = seq_read,
7026 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c879e2013-12-21 17:39:40 -05007027 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007028 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007029};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007030
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007031static const struct file_operations snapshot_raw_fops = {
7032 .open = snapshot_raw_open,
7033 .read = tracing_buffers_read,
7034 .release = tracing_buffers_release,
7035 .splice_read = tracing_buffers_splice_read,
7036 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007037};
7038
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007039#endif /* CONFIG_TRACER_SNAPSHOT */
7040
Tom Zanussi8a062902019-03-31 18:48:15 -05007041#define TRACING_LOG_ERRS_MAX 8
7042#define TRACING_LOG_LOC_MAX 128
7043
7044#define CMD_PREFIX " Command: "
7045
7046struct err_info {
7047 const char **errs; /* ptr to loc-specific array of err strings */
7048 u8 type; /* index into errs -> specific err string */
7049 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7050 u64 ts;
7051};
7052
7053struct tracing_log_err {
7054 struct list_head list;
7055 struct err_info info;
7056 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7057 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7058};
7059
Tom Zanussi8a062902019-03-31 18:48:15 -05007060static DEFINE_MUTEX(tracing_err_log_lock);
7061
YueHaibingff585c52019-06-14 23:32:10 +08007062static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007063{
7064 struct tracing_log_err *err;
7065
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007066 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007067 err = kzalloc(sizeof(*err), GFP_KERNEL);
7068 if (!err)
7069 err = ERR_PTR(-ENOMEM);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007070 tr->n_err_log_entries++;
Tom Zanussi8a062902019-03-31 18:48:15 -05007071
7072 return err;
7073 }
7074
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007075 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
Tom Zanussi8a062902019-03-31 18:48:15 -05007076 list_del(&err->list);
7077
7078 return err;
7079}
7080
7081/**
7082 * err_pos - find the position of a string within a command for error careting
7083 * @cmd: The tracing command that caused the error
7084 * @str: The string to position the caret at within @cmd
7085 *
7086 * Finds the position of the first occurence of @str within @cmd. The
7087 * return value can be passed to tracing_log_err() for caret placement
7088 * within @cmd.
7089 *
7090 * Returns the index within @cmd of the first occurence of @str or 0
7091 * if @str was not found.
7092 */
7093unsigned int err_pos(char *cmd, const char *str)
7094{
7095 char *found;
7096
7097 if (WARN_ON(!strlen(cmd)))
7098 return 0;
7099
7100 found = strstr(cmd, str);
7101 if (found)
7102 return found - cmd;
7103
7104 return 0;
7105}
7106
7107/**
7108 * tracing_log_err - write an error to the tracing error log
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007109 * @tr: The associated trace array for the error (NULL for top level array)
Tom Zanussi8a062902019-03-31 18:48:15 -05007110 * @loc: A string describing where the error occurred
7111 * @cmd: The tracing command that caused the error
7112 * @errs: The array of loc-specific static error strings
7113 * @type: The index into errs[], which produces the specific static err string
7114 * @pos: The position the caret should be placed in the cmd
7115 *
7116 * Writes an error into tracing/error_log of the form:
7117 *
7118 * <loc>: error: <text>
7119 * Command: <cmd>
7120 * ^
7121 *
7122 * tracing/error_log is a small log file containing the last
7123 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7124 * unless there has been a tracing error, and the error log can be
7125 * cleared and have its memory freed by writing the empty string in
7126 * truncation mode to it i.e. echo > tracing/error_log.
7127 *
7128 * NOTE: the @errs array along with the @type param are used to
7129 * produce a static error string - this string is not copied and saved
7130 * when the error is logged - only a pointer to it is saved. See
7131 * existing callers for examples of how static strings are typically
7132 * defined for use with tracing_log_err().
7133 */
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007134void tracing_log_err(struct trace_array *tr,
7135 const char *loc, const char *cmd,
Tom Zanussi8a062902019-03-31 18:48:15 -05007136 const char **errs, u8 type, u8 pos)
7137{
7138 struct tracing_log_err *err;
7139
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007140 if (!tr)
7141 tr = &global_trace;
7142
Tom Zanussi8a062902019-03-31 18:48:15 -05007143 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007144 err = get_tracing_log_err(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007145 if (PTR_ERR(err) == -ENOMEM) {
7146 mutex_unlock(&tracing_err_log_lock);
7147 return;
7148 }
7149
7150 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7151 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7152
7153 err->info.errs = errs;
7154 err->info.type = type;
7155 err->info.pos = pos;
7156 err->info.ts = local_clock();
7157
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007158 list_add_tail(&err->list, &tr->err_log);
Tom Zanussi8a062902019-03-31 18:48:15 -05007159 mutex_unlock(&tracing_err_log_lock);
7160}
7161
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007162static void clear_tracing_err_log(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007163{
7164 struct tracing_log_err *err, *next;
7165
7166 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007167 list_for_each_entry_safe(err, next, &tr->err_log, list) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007168 list_del(&err->list);
7169 kfree(err);
7170 }
7171
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007172 tr->n_err_log_entries = 0;
Tom Zanussi8a062902019-03-31 18:48:15 -05007173 mutex_unlock(&tracing_err_log_lock);
7174}
7175
7176static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7177{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007178 struct trace_array *tr = m->private;
7179
Tom Zanussi8a062902019-03-31 18:48:15 -05007180 mutex_lock(&tracing_err_log_lock);
7181
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007182 return seq_list_start(&tr->err_log, *pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007183}
7184
7185static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7186{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007187 struct trace_array *tr = m->private;
7188
7189 return seq_list_next(v, &tr->err_log, pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007190}
7191
7192static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7193{
7194 mutex_unlock(&tracing_err_log_lock);
7195}
7196
7197static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7198{
7199 u8 i;
7200
7201 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7202 seq_putc(m, ' ');
7203 for (i = 0; i < pos; i++)
7204 seq_putc(m, ' ');
7205 seq_puts(m, "^\n");
7206}
7207
7208static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7209{
7210 struct tracing_log_err *err = v;
7211
7212 if (err) {
7213 const char *err_text = err->info.errs[err->info.type];
7214 u64 sec = err->info.ts;
7215 u32 nsec;
7216
7217 nsec = do_div(sec, NSEC_PER_SEC);
7218 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7219 err->loc, err_text);
7220 seq_printf(m, "%s", err->cmd);
7221 tracing_err_log_show_pos(m, err->info.pos);
7222 }
7223
7224 return 0;
7225}
7226
7227static const struct seq_operations tracing_err_log_seq_ops = {
7228 .start = tracing_err_log_seq_start,
7229 .next = tracing_err_log_seq_next,
7230 .stop = tracing_err_log_seq_stop,
7231 .show = tracing_err_log_seq_show
7232};
7233
7234static int tracing_err_log_open(struct inode *inode, struct file *file)
7235{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007236 struct trace_array *tr = inode->i_private;
Tom Zanussi8a062902019-03-31 18:48:15 -05007237 int ret = 0;
7238
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007239 ret = tracing_check_open_get_tr(tr);
7240 if (ret)
7241 return ret;
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007242
Tom Zanussi8a062902019-03-31 18:48:15 -05007243 /* If this file was opened for write, then erase contents */
7244 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007245 clear_tracing_err_log(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007246
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007247 if (file->f_mode & FMODE_READ) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007248 ret = seq_open(file, &tracing_err_log_seq_ops);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007249 if (!ret) {
7250 struct seq_file *m = file->private_data;
7251 m->private = tr;
7252 } else {
7253 trace_array_put(tr);
7254 }
7255 }
Tom Zanussi8a062902019-03-31 18:48:15 -05007256 return ret;
7257}
7258
7259static ssize_t tracing_err_log_write(struct file *file,
7260 const char __user *buffer,
7261 size_t count, loff_t *ppos)
7262{
7263 return count;
7264}
7265
Takeshi Misawad122ed62019-06-28 19:56:40 +09007266static int tracing_err_log_release(struct inode *inode, struct file *file)
7267{
7268 struct trace_array *tr = inode->i_private;
7269
7270 trace_array_put(tr);
7271
7272 if (file->f_mode & FMODE_READ)
7273 seq_release(inode, file);
7274
7275 return 0;
7276}
7277
Tom Zanussi8a062902019-03-31 18:48:15 -05007278static const struct file_operations tracing_err_log_fops = {
7279 .open = tracing_err_log_open,
7280 .write = tracing_err_log_write,
7281 .read = seq_read,
7282 .llseek = seq_lseek,
Takeshi Misawad122ed62019-06-28 19:56:40 +09007283 .release = tracing_err_log_release,
Tom Zanussi8a062902019-03-31 18:48:15 -05007284};
7285
Steven Rostedt2cadf912008-12-01 22:20:19 -05007286static int tracing_buffers_open(struct inode *inode, struct file *filp)
7287{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007288 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007289 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007290 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007291
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007292 ret = tracing_check_open_get_tr(tr);
7293 if (ret)
7294 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007295
Steven Rostedt2cadf912008-12-01 22:20:19 -05007296 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007297 if (!info) {
7298 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007299 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007300 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007301
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007302 mutex_lock(&trace_types_lock);
7303
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007304 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007305 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05007306 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007307 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007308 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007309 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007310 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007311
7312 filp->private_data = info;
7313
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007314 tr->current_trace->ref++;
7315
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007316 mutex_unlock(&trace_types_lock);
7317
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007318 ret = nonseekable_open(inode, filp);
7319 if (ret < 0)
7320 trace_array_put(tr);
7321
7322 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007323}
7324
Al Viro9dd95742017-07-03 00:42:43 -04007325static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007326tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7327{
7328 struct ftrace_buffer_info *info = filp->private_data;
7329 struct trace_iterator *iter = &info->iter;
7330
7331 return trace_poll(iter, filp, poll_table);
7332}
7333
Steven Rostedt2cadf912008-12-01 22:20:19 -05007334static ssize_t
7335tracing_buffers_read(struct file *filp, char __user *ubuf,
7336 size_t count, loff_t *ppos)
7337{
7338 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007339 struct trace_iterator *iter = &info->iter;
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007340 ssize_t ret = 0;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007341 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007342
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007343 if (!count)
7344 return 0;
7345
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007346#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007347 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7348 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007349#endif
7350
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007351 if (!info->spare) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007352 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
7353 iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007354 if (IS_ERR(info->spare)) {
7355 ret = PTR_ERR(info->spare);
7356 info->spare = NULL;
7357 } else {
7358 info->spare_cpu = iter->cpu_file;
7359 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007360 }
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007361 if (!info->spare)
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007362 return ret;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007363
Steven Rostedt2cadf912008-12-01 22:20:19 -05007364 /* Do we have previous read data to read? */
7365 if (info->read < PAGE_SIZE)
7366 goto read;
7367
Steven Rostedtb6273442013-02-28 13:44:11 -05007368 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007369 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007370 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007371 &info->spare,
7372 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007373 iter->cpu_file, 0);
7374 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05007375
7376 if (ret < 0) {
7377 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007378 if ((filp->f_flags & O_NONBLOCK))
7379 return -EAGAIN;
7380
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05007381 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007382 if (ret)
7383 return ret;
7384
Steven Rostedtb6273442013-02-28 13:44:11 -05007385 goto again;
7386 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007387 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007388 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007389
Steven Rostedt436fc282011-10-14 10:44:25 -04007390 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007391 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05007392 size = PAGE_SIZE - info->read;
7393 if (size > count)
7394 size = count;
7395
7396 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007397 if (ret == size)
7398 return -EFAULT;
7399
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007400 size -= ret;
7401
Steven Rostedt2cadf912008-12-01 22:20:19 -05007402 *ppos += size;
7403 info->read += size;
7404
7405 return size;
7406}
7407
7408static int tracing_buffers_release(struct inode *inode, struct file *file)
7409{
7410 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007411 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007412
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007413 mutex_lock(&trace_types_lock);
7414
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007415 iter->tr->current_trace->ref--;
7416
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007417 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007418
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007419 if (info->spare)
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007420 ring_buffer_free_read_page(iter->trace_buffer->buffer,
7421 info->spare_cpu, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007422 kfree(info);
7423
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007424 mutex_unlock(&trace_types_lock);
7425
Steven Rostedt2cadf912008-12-01 22:20:19 -05007426 return 0;
7427}
7428
7429struct buffer_ref {
7430 struct ring_buffer *buffer;
7431 void *page;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007432 int cpu;
Jann Hornb9872222019-04-04 23:59:25 +02007433 refcount_t refcount;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007434};
7435
Jann Hornb9872222019-04-04 23:59:25 +02007436static void buffer_ref_release(struct buffer_ref *ref)
7437{
7438 if (!refcount_dec_and_test(&ref->refcount))
7439 return;
7440 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7441 kfree(ref);
7442}
7443
Steven Rostedt2cadf912008-12-01 22:20:19 -05007444static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7445 struct pipe_buffer *buf)
7446{
7447 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7448
Jann Hornb9872222019-04-04 23:59:25 +02007449 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007450 buf->private = 0;
7451}
7452
Matthew Wilcox15fab632019-04-05 14:02:10 -07007453static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007454 struct pipe_buffer *buf)
7455{
7456 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7457
Linus Torvaldse9e1a2e2019-04-26 11:09:55 -07007458 if (refcount_read(&ref->refcount) > INT_MAX/2)
Matthew Wilcox15fab632019-04-05 14:02:10 -07007459 return false;
7460
Jann Hornb9872222019-04-04 23:59:25 +02007461 refcount_inc(&ref->refcount);
Matthew Wilcox15fab632019-04-05 14:02:10 -07007462 return true;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007463}
7464
7465/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08007466static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007467 .confirm = generic_pipe_buf_confirm,
7468 .release = buffer_pipe_buf_release,
Jann Hornb9872222019-04-04 23:59:25 +02007469 .steal = generic_pipe_buf_nosteal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007470 .get = buffer_pipe_buf_get,
7471};
7472
7473/*
7474 * Callback from splice_to_pipe(), if we need to release some pages
7475 * at the end of the spd in case we error'ed out in filling the pipe.
7476 */
7477static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7478{
7479 struct buffer_ref *ref =
7480 (struct buffer_ref *)spd->partial[i].private;
7481
Jann Hornb9872222019-04-04 23:59:25 +02007482 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007483 spd->partial[i].private = 0;
7484}
7485
7486static ssize_t
7487tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7488 struct pipe_inode_info *pipe, size_t len,
7489 unsigned int flags)
7490{
7491 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007492 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02007493 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7494 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05007495 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02007496 .pages = pages_def,
7497 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02007498 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007499 .ops = &buffer_pipe_buf_ops,
7500 .spd_release = buffer_spd_release,
7501 };
7502 struct buffer_ref *ref;
Steven Rostedt (VMware)6b7e6332017-12-22 20:38:57 -05007503 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01007504 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007505
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007506#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007507 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7508 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007509#endif
7510
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007511 if (*ppos & (PAGE_SIZE - 1))
7512 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007513
7514 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007515 if (len < PAGE_SIZE)
7516 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007517 len &= PAGE_MASK;
7518 }
7519
Al Viro1ae22932016-09-17 18:31:46 -04007520 if (splice_grow_spd(pipe, &spd))
7521 return -ENOMEM;
7522
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007523 again:
7524 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007525 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04007526
Al Viroa786c062014-04-11 12:01:03 -04007527 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007528 struct page *page;
7529 int r;
7530
7531 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01007532 if (!ref) {
7533 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007534 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01007535 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007536
Jann Hornb9872222019-04-04 23:59:25 +02007537 refcount_set(&ref->refcount, 1);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007538 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007539 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007540 if (IS_ERR(ref->page)) {
7541 ret = PTR_ERR(ref->page);
7542 ref->page = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007543 kfree(ref);
7544 break;
7545 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007546 ref->cpu = iter->cpu_file;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007547
7548 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007549 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007550 if (r < 0) {
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007551 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7552 ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007553 kfree(ref);
7554 break;
7555 }
7556
Steven Rostedt2cadf912008-12-01 22:20:19 -05007557 page = virt_to_page(ref->page);
7558
7559 spd.pages[i] = page;
7560 spd.partial[i].len = PAGE_SIZE;
7561 spd.partial[i].offset = 0;
7562 spd.partial[i].private = (unsigned long)ref;
7563 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007564 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04007565
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007566 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007567 }
7568
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007569 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007570 spd.nr_pages = i;
7571
7572 /* did we read anything? */
7573 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01007574 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04007575 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01007576
Al Viro1ae22932016-09-17 18:31:46 -04007577 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007578 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04007579 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007580
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05007581 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04007582 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04007583 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01007584
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007585 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007586 }
7587
7588 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04007589out:
Eric Dumazet047fe362012-06-12 15:24:40 +02007590 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007591
Steven Rostedt2cadf912008-12-01 22:20:19 -05007592 return ret;
7593}
7594
7595static const struct file_operations tracing_buffers_fops = {
7596 .open = tracing_buffers_open,
7597 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007598 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007599 .release = tracing_buffers_release,
7600 .splice_read = tracing_buffers_splice_read,
7601 .llseek = no_llseek,
7602};
7603
Steven Rostedtc8d77182009-04-29 18:03:45 -04007604static ssize_t
7605tracing_stats_read(struct file *filp, char __user *ubuf,
7606 size_t count, loff_t *ppos)
7607{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007608 struct inode *inode = file_inode(filp);
7609 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007610 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007611 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007612 struct trace_seq *s;
7613 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007614 unsigned long long t;
7615 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04007616
Li Zefane4f2d102009-06-15 10:57:28 +08007617 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007618 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01007619 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04007620
7621 trace_seq_init(s);
7622
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007623 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007624 trace_seq_printf(s, "entries: %ld\n", cnt);
7625
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007626 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007627 trace_seq_printf(s, "overrun: %ld\n", cnt);
7628
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007629 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007630 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7631
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007632 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007633 trace_seq_printf(s, "bytes: %ld\n", cnt);
7634
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09007635 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007636 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007637 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007638 usec_rem = do_div(t, USEC_PER_SEC);
7639 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7640 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007641
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007642 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007643 usec_rem = do_div(t, USEC_PER_SEC);
7644 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7645 } else {
7646 /* counter or tsc mode for trace_clock */
7647 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007648 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007649
7650 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007651 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007652 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007653
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007654 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07007655 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7656
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007657 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05007658 trace_seq_printf(s, "read events: %ld\n", cnt);
7659
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05007660 count = simple_read_from_buffer(ubuf, count, ppos,
7661 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04007662
7663 kfree(s);
7664
7665 return count;
7666}
7667
7668static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007669 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007670 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007671 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007672 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007673};
7674
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007675#ifdef CONFIG_DYNAMIC_FTRACE
7676
7677static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007678tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007679 size_t cnt, loff_t *ppos)
7680{
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007681 ssize_t ret;
7682 char *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007683 int r;
7684
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007685 /* 256 should be plenty to hold the amount needed */
7686 buf = kmalloc(256, GFP_KERNEL);
7687 if (!buf)
7688 return -ENOMEM;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007689
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007690 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
7691 ftrace_update_tot_cnt,
7692 ftrace_number_of_pages,
7693 ftrace_number_of_groups);
7694
7695 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7696 kfree(buf);
7697 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007698}
7699
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007700static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007701 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007702 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007703 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007704};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007705#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007706
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007707#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7708static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007709ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007710 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007711 void *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007712{
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007713 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007714}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007715
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007716static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007717ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007718 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007719 void *data)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007720{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007721 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007722 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007723
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007724 if (mapper)
7725 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007726
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007727 if (count) {
7728
7729 if (*count <= 0)
7730 return;
7731
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007732 (*count)--;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007733 }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007734
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007735 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007736}
7737
7738static int
7739ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7740 struct ftrace_probe_ops *ops, void *data)
7741{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007742 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007743 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007744
7745 seq_printf(m, "%ps:", (void *)ip);
7746
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01007747 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007748
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007749 if (mapper)
7750 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7751
7752 if (count)
7753 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007754 else
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007755 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007756
7757 return 0;
7758}
7759
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007760static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007761ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007762 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007763{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007764 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007765
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007766 if (!mapper) {
7767 mapper = allocate_ftrace_func_mapper();
7768 if (!mapper)
7769 return -ENOMEM;
7770 *data = mapper;
7771 }
7772
7773 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007774}
7775
7776static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007777ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007778 unsigned long ip, void *data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007779{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007780 struct ftrace_func_mapper *mapper = data;
7781
7782 if (!ip) {
7783 if (!mapper)
7784 return;
7785 free_ftrace_func_mapper(mapper, NULL);
7786 return;
7787 }
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007788
7789 ftrace_func_mapper_remove_ip(mapper, ip);
7790}
7791
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007792static struct ftrace_probe_ops snapshot_probe_ops = {
7793 .func = ftrace_snapshot,
7794 .print = ftrace_snapshot_print,
7795};
7796
7797static struct ftrace_probe_ops snapshot_count_probe_ops = {
7798 .func = ftrace_count_snapshot,
7799 .print = ftrace_snapshot_print,
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007800 .init = ftrace_snapshot_init,
7801 .free = ftrace_snapshot_free,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007802};
7803
7804static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007805ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007806 char *glob, char *cmd, char *param, int enable)
7807{
7808 struct ftrace_probe_ops *ops;
7809 void *count = (void *)-1;
7810 char *number;
7811 int ret;
7812
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -04007813 if (!tr)
7814 return -ENODEV;
7815
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007816 /* hash funcs only work with set_ftrace_filter */
7817 if (!enable)
7818 return -EINVAL;
7819
7820 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7821
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04007822 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04007823 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007824
7825 if (!param)
7826 goto out_reg;
7827
7828 number = strsep(&param, ":");
7829
7830 if (!strlen(number))
7831 goto out_reg;
7832
7833 /*
7834 * We use the callback data field (which is a pointer)
7835 * as our counter.
7836 */
7837 ret = kstrtoul(number, 0, (unsigned long *)&count);
7838 if (ret)
7839 return ret;
7840
7841 out_reg:
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04007842 ret = tracing_alloc_snapshot_instance(tr);
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04007843 if (ret < 0)
7844 goto out;
7845
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007846 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007847
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04007848 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007849 return ret < 0 ? ret : 0;
7850}
7851
7852static struct ftrace_func_command ftrace_snapshot_cmd = {
7853 .name = "snapshot",
7854 .func = ftrace_trace_snapshot_callback,
7855};
7856
Tom Zanussi38de93a2013-10-24 08:34:18 -05007857static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007858{
7859 return register_ftrace_command(&ftrace_snapshot_cmd);
7860}
7861#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05007862static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007863#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007864
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007865static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007866{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007867 if (WARN_ON(!tr->dir))
7868 return ERR_PTR(-ENODEV);
7869
7870 /* Top directory uses NULL as the parent */
7871 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7872 return NULL;
7873
7874 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007875 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007876}
7877
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007878static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7879{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007880 struct dentry *d_tracer;
7881
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007882 if (tr->percpu_dir)
7883 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007884
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007885 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007886 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007887 return NULL;
7888
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007889 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007890
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007891 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007892 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007893
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007894 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007895}
7896
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007897static struct dentry *
7898trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7899 void *data, long cpu, const struct file_operations *fops)
7900{
7901 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7902
7903 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00007904 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007905 return ret;
7906}
7907
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007908static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007909tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007910{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007911 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007912 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04007913 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007914
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09007915 if (!d_percpu)
7916 return;
7917
Steven Rostedtdd49a382010-10-20 21:51:26 -04007918 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007919 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007920 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07007921 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007922 return;
7923 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007924
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007925 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007926 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02007927 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007928
7929 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007930 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007931 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04007932
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007933 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007934 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007935
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007936 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007937 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08007938
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007939 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007940 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007941
7942#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007943 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007944 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007945
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007946 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007947 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007948#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007949}
7950
Steven Rostedt60a11772008-05-12 21:20:44 +02007951#ifdef CONFIG_FTRACE_SELFTEST
7952/* Let selftest have access to static functions in this file */
7953#include "trace_selftest.c"
7954#endif
7955
Steven Rostedt577b7852009-02-26 23:43:05 -05007956static ssize_t
7957trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7958 loff_t *ppos)
7959{
7960 struct trace_option_dentry *topt = filp->private_data;
7961 char *buf;
7962
7963 if (topt->flags->val & topt->opt->bit)
7964 buf = "1\n";
7965 else
7966 buf = "0\n";
7967
7968 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7969}
7970
7971static ssize_t
7972trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7973 loff_t *ppos)
7974{
7975 struct trace_option_dentry *topt = filp->private_data;
7976 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05007977 int ret;
7978
Peter Huewe22fe9b52011-06-07 21:58:27 +02007979 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7980 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05007981 return ret;
7982
Li Zefan8d18eaa2009-12-08 11:17:06 +08007983 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05007984 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08007985
7986 if (!!(topt->flags->val & topt->opt->bit) != val) {
7987 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05007988 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05007989 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08007990 mutex_unlock(&trace_types_lock);
7991 if (ret)
7992 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05007993 }
7994
7995 *ppos += cnt;
7996
7997 return cnt;
7998}
7999
8000
8001static const struct file_operations trace_options_fops = {
8002 .open = tracing_open_generic,
8003 .read = trace_options_read,
8004 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008005 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05008006};
8007
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008008/*
8009 * In order to pass in both the trace_array descriptor as well as the index
8010 * to the flag that the trace option file represents, the trace_array
8011 * has a character array of trace_flags_index[], which holds the index
8012 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8013 * The address of this character array is passed to the flag option file
8014 * read/write callbacks.
8015 *
8016 * In order to extract both the index and the trace_array descriptor,
8017 * get_tr_index() uses the following algorithm.
8018 *
8019 * idx = *ptr;
8020 *
8021 * As the pointer itself contains the address of the index (remember
8022 * index[1] == 1).
8023 *
8024 * Then to get the trace_array descriptor, by subtracting that index
8025 * from the ptr, we get to the start of the index itself.
8026 *
8027 * ptr - idx == &index[0]
8028 *
8029 * Then a simple container_of() from that pointer gets us to the
8030 * trace_array descriptor.
8031 */
8032static void get_tr_index(void *data, struct trace_array **ptr,
8033 unsigned int *pindex)
8034{
8035 *pindex = *(unsigned char *)data;
8036
8037 *ptr = container_of(data - *pindex, struct trace_array,
8038 trace_flags_index);
8039}
8040
Steven Rostedta8259072009-02-26 22:19:12 -05008041static ssize_t
8042trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8043 loff_t *ppos)
8044{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008045 void *tr_index = filp->private_data;
8046 struct trace_array *tr;
8047 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008048 char *buf;
8049
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008050 get_tr_index(tr_index, &tr, &index);
8051
8052 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05008053 buf = "1\n";
8054 else
8055 buf = "0\n";
8056
8057 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8058}
8059
8060static ssize_t
8061trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8062 loff_t *ppos)
8063{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008064 void *tr_index = filp->private_data;
8065 struct trace_array *tr;
8066 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008067 unsigned long val;
8068 int ret;
8069
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008070 get_tr_index(tr_index, &tr, &index);
8071
Peter Huewe22fe9b52011-06-07 21:58:27 +02008072 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8073 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05008074 return ret;
8075
Zhaoleif2d84b62009-08-07 18:55:48 +08008076 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05008077 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008078
8079 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008080 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008081 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05008082
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04008083 if (ret < 0)
8084 return ret;
8085
Steven Rostedta8259072009-02-26 22:19:12 -05008086 *ppos += cnt;
8087
8088 return cnt;
8089}
8090
Steven Rostedta8259072009-02-26 22:19:12 -05008091static const struct file_operations trace_options_core_fops = {
8092 .open = tracing_open_generic,
8093 .read = trace_options_core_read,
8094 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008095 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05008096};
8097
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008098struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04008099 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008100 struct dentry *parent,
8101 void *data,
8102 const struct file_operations *fops)
8103{
8104 struct dentry *ret;
8105
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008106 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008107 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07008108 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008109
8110 return ret;
8111}
8112
8113
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008114static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008115{
8116 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05008117
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008118 if (tr->options)
8119 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008120
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008121 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008122 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05008123 return NULL;
8124
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008125 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008126 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008127 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05008128 return NULL;
8129 }
8130
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008131 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008132}
8133
Steven Rostedt577b7852009-02-26 23:43:05 -05008134static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008135create_trace_option_file(struct trace_array *tr,
8136 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008137 struct tracer_flags *flags,
8138 struct tracer_opt *opt)
8139{
8140 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05008141
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008142 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05008143 if (!t_options)
8144 return;
8145
8146 topt->flags = flags;
8147 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008148 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05008149
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008150 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008151 &trace_options_fops);
8152
Steven Rostedt577b7852009-02-26 23:43:05 -05008153}
8154
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008155static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008156create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05008157{
8158 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008159 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05008160 struct tracer_flags *flags;
8161 struct tracer_opt *opts;
8162 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008163 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05008164
8165 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008166 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05008167
8168 flags = tracer->flags;
8169
8170 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008171 return;
8172
8173 /*
8174 * If this is an instance, only create flags for tracers
8175 * the instance may have.
8176 */
8177 if (!trace_ok_for_array(tracer, tr))
8178 return;
8179
8180 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08008181 /* Make sure there's no duplicate flags. */
8182 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008183 return;
8184 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008185
8186 opts = flags->opts;
8187
8188 for (cnt = 0; opts[cnt].name; cnt++)
8189 ;
8190
Steven Rostedt0cfe8242009-02-27 10:51:10 -05008191 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05008192 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008193 return;
8194
8195 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8196 GFP_KERNEL);
8197 if (!tr_topts) {
8198 kfree(topts);
8199 return;
8200 }
8201
8202 tr->topts = tr_topts;
8203 tr->topts[tr->nr_topts].tracer = tracer;
8204 tr->topts[tr->nr_topts].topts = topts;
8205 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05008206
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008207 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008208 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05008209 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008210 WARN_ONCE(topts[cnt].entry == NULL,
8211 "Failed to create trace option: %s",
8212 opts[cnt].name);
8213 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008214}
8215
Steven Rostedta8259072009-02-26 22:19:12 -05008216static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008217create_trace_option_core_file(struct trace_array *tr,
8218 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05008219{
8220 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05008221
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008222 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008223 if (!t_options)
8224 return NULL;
8225
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008226 return trace_create_file(option, 0644, t_options,
8227 (void *)&tr->trace_flags_index[index],
8228 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05008229}
8230
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008231static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008232{
8233 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008234 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05008235 int i;
8236
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008237 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008238 if (!t_options)
8239 return;
8240
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008241 for (i = 0; trace_options[i]; i++) {
8242 if (top_level ||
8243 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8244 create_trace_option_core_file(tr, trace_options[i], i);
8245 }
Steven Rostedta8259072009-02-26 22:19:12 -05008246}
8247
Steven Rostedt499e5472012-02-22 15:50:28 -05008248static ssize_t
8249rb_simple_read(struct file *filp, char __user *ubuf,
8250 size_t cnt, loff_t *ppos)
8251{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008252 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05008253 char buf[64];
8254 int r;
8255
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008256 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05008257 r = sprintf(buf, "%d\n", r);
8258
8259 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8260}
8261
8262static ssize_t
8263rb_simple_write(struct file *filp, const char __user *ubuf,
8264 size_t cnt, loff_t *ppos)
8265{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008266 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008267 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05008268 unsigned long val;
8269 int ret;
8270
8271 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8272 if (ret)
8273 return ret;
8274
8275 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008276 mutex_lock(&trace_types_lock);
Steven Rostedt (VMware)f1436412018-08-01 15:40:57 -04008277 if (!!val == tracer_tracing_is_on(tr)) {
8278 val = 0; /* do nothing */
8279 } else if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008280 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008281 if (tr->current_trace->start)
8282 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008283 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008284 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008285 if (tr->current_trace->stop)
8286 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008287 }
8288 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05008289 }
8290
8291 (*ppos)++;
8292
8293 return cnt;
8294}
8295
8296static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008297 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008298 .read = rb_simple_read,
8299 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008300 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008301 .llseek = default_llseek,
8302};
8303
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008304static ssize_t
8305buffer_percent_read(struct file *filp, char __user *ubuf,
8306 size_t cnt, loff_t *ppos)
8307{
8308 struct trace_array *tr = filp->private_data;
8309 char buf[64];
8310 int r;
8311
8312 r = tr->buffer_percent;
8313 r = sprintf(buf, "%d\n", r);
8314
8315 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8316}
8317
8318static ssize_t
8319buffer_percent_write(struct file *filp, const char __user *ubuf,
8320 size_t cnt, loff_t *ppos)
8321{
8322 struct trace_array *tr = filp->private_data;
8323 unsigned long val;
8324 int ret;
8325
8326 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8327 if (ret)
8328 return ret;
8329
8330 if (val > 100)
8331 return -EINVAL;
8332
8333 if (!val)
8334 val = 1;
8335
8336 tr->buffer_percent = val;
8337
8338 (*ppos)++;
8339
8340 return cnt;
8341}
8342
8343static const struct file_operations buffer_percent_fops = {
8344 .open = tracing_open_generic_tr,
8345 .read = buffer_percent_read,
8346 .write = buffer_percent_write,
8347 .release = tracing_release_generic_tr,
8348 .llseek = default_llseek,
8349};
8350
YueHaibingff585c52019-06-14 23:32:10 +08008351static struct dentry *trace_instance_dir;
Steven Rostedt277ba042012-08-03 16:10:49 -04008352
8353static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008354init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04008355
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008356static int
8357allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04008358{
8359 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008360
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008361 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008362
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05008363 buf->tr = tr;
8364
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008365 buf->buffer = ring_buffer_alloc(size, rb_flags);
8366 if (!buf->buffer)
8367 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008368
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008369 buf->data = alloc_percpu(struct trace_array_cpu);
8370 if (!buf->data) {
8371 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)4397f042017-12-26 20:07:34 -05008372 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008373 return -ENOMEM;
8374 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008375
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008376 /* Allocate the first page for all buffers */
8377 set_buffer_entries(&tr->trace_buffer,
8378 ring_buffer_size(tr->trace_buffer.buffer, 0));
8379
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008380 return 0;
8381}
8382
8383static int allocate_trace_buffers(struct trace_array *tr, int size)
8384{
8385 int ret;
8386
8387 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
8388 if (ret)
8389 return ret;
8390
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008391#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008392 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8393 allocate_snapshot ? size : 1);
8394 if (WARN_ON(ret)) {
8395 ring_buffer_free(tr->trace_buffer.buffer);
Jing Xia24f2aaf2017-12-26 15:12:53 +08008396 tr->trace_buffer.buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008397 free_percpu(tr->trace_buffer.data);
Jing Xia24f2aaf2017-12-26 15:12:53 +08008398 tr->trace_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008399 return -ENOMEM;
8400 }
8401 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008402
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008403 /*
8404 * Only the top level trace array gets its snapshot allocated
8405 * from the kernel command line.
8406 */
8407 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008408#endif
8409 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008410}
8411
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008412static void free_trace_buffer(struct trace_buffer *buf)
8413{
8414 if (buf->buffer) {
8415 ring_buffer_free(buf->buffer);
8416 buf->buffer = NULL;
8417 free_percpu(buf->data);
8418 buf->data = NULL;
8419 }
8420}
8421
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008422static void free_trace_buffers(struct trace_array *tr)
8423{
8424 if (!tr)
8425 return;
8426
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008427 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008428
8429#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008430 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008431#endif
8432}
8433
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008434static void init_trace_flags_index(struct trace_array *tr)
8435{
8436 int i;
8437
8438 /* Used by the trace options files */
8439 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8440 tr->trace_flags_index[i] = i;
8441}
8442
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008443static void __update_tracer_options(struct trace_array *tr)
8444{
8445 struct tracer *t;
8446
8447 for (t = trace_types; t; t = t->next)
8448 add_tracer_options(tr, t);
8449}
8450
8451static void update_tracer_options(struct trace_array *tr)
8452{
8453 mutex_lock(&trace_types_lock);
8454 __update_tracer_options(tr);
8455 mutex_unlock(&trace_types_lock);
8456}
8457
Divya Indi28879782019-11-20 11:08:38 -08008458static struct trace_array *trace_array_create(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008459{
Steven Rostedt277ba042012-08-03 16:10:49 -04008460 struct trace_array *tr;
8461 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04008462
Steven Rostedt277ba042012-08-03 16:10:49 -04008463 ret = -ENOMEM;
8464 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8465 if (!tr)
Divya Indi28879782019-11-20 11:08:38 -08008466 return ERR_PTR(ret);
Steven Rostedt277ba042012-08-03 16:10:49 -04008467
8468 tr->name = kstrdup(name, GFP_KERNEL);
8469 if (!tr->name)
8470 goto out_free_tr;
8471
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008472 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8473 goto out_free_tr;
8474
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008475 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008476
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008477 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8478
Steven Rostedt277ba042012-08-03 16:10:49 -04008479 raw_spin_lock_init(&tr->start_lock);
8480
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05008481 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8482
Steven Rostedt277ba042012-08-03 16:10:49 -04008483 tr->current_trace = &nop_trace;
8484
8485 INIT_LIST_HEAD(&tr->systems);
8486 INIT_LIST_HEAD(&tr->events);
Tom Zanussi067fe032018-01-15 20:51:56 -06008487 INIT_LIST_HEAD(&tr->hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04008488 INIT_LIST_HEAD(&tr->err_log);
Steven Rostedt277ba042012-08-03 16:10:49 -04008489
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008490 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04008491 goto out_free_tr;
8492
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008493 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04008494 if (!tr->dir)
8495 goto out_free_tr;
8496
8497 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07008498 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008499 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04008500 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07008501 }
Steven Rostedt277ba042012-08-03 16:10:49 -04008502
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008503 ftrace_init_trace_array(tr);
8504
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008505 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008506 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008507 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04008508
8509 list_add(&tr->list, &ftrace_trace_arrays);
8510
Divya Indi28879782019-11-20 11:08:38 -08008511 tr->ref++;
8512
Steven Rostedt277ba042012-08-03 16:10:49 -04008513
Divya Indif45d1222019-03-20 11:28:51 -07008514 return tr;
Steven Rostedt277ba042012-08-03 16:10:49 -04008515
8516 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008517 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008518 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04008519 kfree(tr->name);
8520 kfree(tr);
8521
Divya Indif45d1222019-03-20 11:28:51 -07008522 return ERR_PTR(ret);
8523}
Steven Rostedt277ba042012-08-03 16:10:49 -04008524
Divya Indif45d1222019-03-20 11:28:51 -07008525static int instance_mkdir(const char *name)
8526{
Divya Indi28879782019-11-20 11:08:38 -08008527 struct trace_array *tr;
8528 int ret;
8529
8530 mutex_lock(&event_mutex);
8531 mutex_lock(&trace_types_lock);
8532
8533 ret = -EEXIST;
8534 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8535 if (tr->name && strcmp(tr->name, name) == 0)
8536 goto out_unlock;
8537 }
8538
8539 tr = trace_array_create(name);
8540
8541 ret = PTR_ERR_OR_ZERO(tr);
8542
8543out_unlock:
8544 mutex_unlock(&trace_types_lock);
8545 mutex_unlock(&event_mutex);
8546 return ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04008547}
8548
Divya Indi28879782019-11-20 11:08:38 -08008549/**
8550 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
8551 * @name: The name of the trace array to be looked up/created.
8552 *
8553 * Returns pointer to trace array with given name.
8554 * NULL, if it cannot be created.
8555 *
8556 * NOTE: This function increments the reference counter associated with the
8557 * trace array returned. This makes sure it cannot be freed while in use.
8558 * Use trace_array_put() once the trace array is no longer needed.
8559 *
8560 */
8561struct trace_array *trace_array_get_by_name(const char *name)
8562{
8563 struct trace_array *tr;
8564
8565 mutex_lock(&event_mutex);
8566 mutex_lock(&trace_types_lock);
8567
8568 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8569 if (tr->name && strcmp(tr->name, name) == 0)
8570 goto out_unlock;
8571 }
8572
8573 tr = trace_array_create(name);
8574
8575 if (IS_ERR(tr))
8576 tr = NULL;
8577out_unlock:
8578 if (tr)
8579 tr->ref++;
8580
8581 mutex_unlock(&trace_types_lock);
8582 mutex_unlock(&event_mutex);
8583 return tr;
8584}
8585EXPORT_SYMBOL_GPL(trace_array_get_by_name);
8586
Divya Indif45d1222019-03-20 11:28:51 -07008587static int __remove_instance(struct trace_array *tr)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008588{
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008589 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008590
Divya Indi28879782019-11-20 11:08:38 -08008591 /* Reference counter for a newly created trace array = 1. */
8592 if (tr->ref > 1 || (tr->current_trace && tr->current_trace->ref))
Divya Indif45d1222019-03-20 11:28:51 -07008593 return -EBUSY;
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05008594
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008595 list_del(&tr->list);
8596
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008597 /* Disable all the flags that were enabled coming in */
8598 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8599 if ((1 << i) & ZEROED_TRACE_FLAGS)
8600 set_tracer_flag(tr, 1 << i, 0);
8601 }
8602
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05008603 tracing_set_nop(tr);
Naveen N. Raoa0e63692017-05-16 23:21:26 +05308604 clear_ftrace_function_probes(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008605 event_trace_del_tracer(tr);
Namhyung Kimd879d0b2017-04-17 11:44:27 +09008606 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008607 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08008608 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04008609 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008610
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008611 for (i = 0; i < tr->nr_topts; i++) {
8612 kfree(tr->topts[i].topts);
8613 }
8614 kfree(tr->topts);
8615
Chunyu Hudb9108e02017-07-20 18:36:09 +08008616 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008617 kfree(tr->name);
8618 kfree(tr);
Divya Indif45d1222019-03-20 11:28:51 -07008619 tr = NULL;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008620
Divya Indif45d1222019-03-20 11:28:51 -07008621 return 0;
8622}
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008623
Divya Indie585e642019-08-14 10:55:24 -07008624int trace_array_destroy(struct trace_array *this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07008625{
Divya Indie585e642019-08-14 10:55:24 -07008626 struct trace_array *tr;
Divya Indif45d1222019-03-20 11:28:51 -07008627 int ret;
8628
Divya Indie585e642019-08-14 10:55:24 -07008629 if (!this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07008630 return -EINVAL;
8631
8632 mutex_lock(&event_mutex);
8633 mutex_lock(&trace_types_lock);
8634
Divya Indie585e642019-08-14 10:55:24 -07008635 ret = -ENODEV;
8636
8637 /* Making sure trace array exists before destroying it. */
8638 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8639 if (tr == this_tr) {
8640 ret = __remove_instance(tr);
8641 break;
8642 }
8643 }
Divya Indif45d1222019-03-20 11:28:51 -07008644
8645 mutex_unlock(&trace_types_lock);
8646 mutex_unlock(&event_mutex);
8647
8648 return ret;
8649}
8650EXPORT_SYMBOL_GPL(trace_array_destroy);
8651
8652static int instance_rmdir(const char *name)
8653{
8654 struct trace_array *tr;
8655 int ret;
8656
8657 mutex_lock(&event_mutex);
8658 mutex_lock(&trace_types_lock);
8659
8660 ret = -ENODEV;
8661 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8662 if (tr->name && strcmp(tr->name, name) == 0) {
8663 ret = __remove_instance(tr);
8664 break;
8665 }
8666 }
8667
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008668 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04008669 mutex_unlock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008670
8671 return ret;
8672}
8673
Steven Rostedt277ba042012-08-03 16:10:49 -04008674static __init void create_trace_instances(struct dentry *d_tracer)
8675{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05008676 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8677 instance_mkdir,
8678 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04008679 if (WARN_ON(!trace_instance_dir))
8680 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04008681}
8682
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008683static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008684init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008685{
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04008686 struct trace_event_file *file;
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008687 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008688
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05008689 trace_create_file("available_tracers", 0444, d_tracer,
8690 tr, &show_traces_fops);
8691
8692 trace_create_file("current_tracer", 0644, d_tracer,
8693 tr, &set_tracer_fops);
8694
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008695 trace_create_file("tracing_cpumask", 0644, d_tracer,
8696 tr, &tracing_cpumask_fops);
8697
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008698 trace_create_file("trace_options", 0644, d_tracer,
8699 tr, &tracing_iter_fops);
8700
8701 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008702 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008703
8704 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02008705 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008706
8707 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02008708 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008709
8710 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8711 tr, &tracing_total_entries_fops);
8712
Wang YanQing238ae932013-05-26 16:52:01 +08008713 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008714 tr, &tracing_free_buffer_fops);
8715
8716 trace_create_file("trace_marker", 0220, d_tracer,
8717 tr, &tracing_mark_fops);
8718
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04008719 file = __find_event_file(tr, "ftrace", "print");
8720 if (file && file->dir)
8721 trace_create_file("trigger", 0644, file->dir, file,
8722 &event_trigger_fops);
8723 tr->trace_marker_file = file;
8724
Steven Rostedtfa32e852016-07-06 15:25:08 -04008725 trace_create_file("trace_marker_raw", 0220, d_tracer,
8726 tr, &tracing_mark_raw_fops);
8727
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008728 trace_create_file("trace_clock", 0644, d_tracer, tr,
8729 &trace_clock_fops);
8730
8731 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008732 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008733
Tom Zanussi2c1ea602018-01-15 20:51:41 -06008734 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8735 &trace_time_stamp_mode_fops);
8736
Steven Rostedt (VMware)a7b1d742018-11-29 22:36:47 -05008737 tr->buffer_percent = 50;
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008738
8739 trace_create_file("buffer_percent", 0444, d_tracer,
8740 tr, &buffer_percent_fops);
8741
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008742 create_trace_options_dir(tr);
8743
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04008744#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02008745 trace_create_maxlat_file(tr, d_tracer);
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05008746#endif
8747
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008748 if (ftrace_create_function_files(tr, d_tracer))
8749 WARN(1, "Could not allocate function filter files");
8750
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008751#ifdef CONFIG_TRACER_SNAPSHOT
8752 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008753 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008754#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008755
Tom Zanussi8a062902019-03-31 18:48:15 -05008756 trace_create_file("error_log", 0644, d_tracer,
8757 tr, &tracing_err_log_fops);
8758
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008759 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008760 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008761
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04008762 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008763}
8764
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13008765static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008766{
8767 struct vfsmount *mnt;
8768 struct file_system_type *type;
8769
8770 /*
8771 * To maintain backward compatibility for tools that mount
8772 * debugfs to get to the tracing facility, tracefs is automatically
8773 * mounted to the debugfs/tracing directory.
8774 */
8775 type = get_fs_type("tracefs");
8776 if (!type)
8777 return NULL;
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13008778 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008779 put_filesystem(type);
8780 if (IS_ERR(mnt))
8781 return NULL;
8782 mntget(mnt);
8783
8784 return mnt;
8785}
8786
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008787/**
8788 * tracing_init_dentry - initialize top level trace array
8789 *
8790 * This is called when creating files or directories in the tracing
8791 * directory. It is called via fs_initcall() by any of the boot up code
8792 * and expects to return the dentry of the top level tracing directory.
8793 */
8794struct dentry *tracing_init_dentry(void)
8795{
8796 struct trace_array *tr = &global_trace;
8797
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05008798 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11008799 pr_warn("Tracing disabled due to lockdown\n");
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05008800 return ERR_PTR(-EPERM);
8801 }
8802
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008803 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008804 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008805 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008806
Jiaxing Wang8b129192015-11-06 16:04:16 +08008807 if (WARN_ON(!tracefs_initialized()) ||
8808 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8809 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008810 return ERR_PTR(-ENODEV);
8811
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008812 /*
8813 * As there may still be users that expect the tracing
8814 * files to exist in debugfs/tracing, we must automount
8815 * the tracefs file system there, so older tools still
8816 * work with the newer kerenl.
8817 */
8818 tr->dir = debugfs_create_automount("tracing", NULL,
8819 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008820
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008821 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008822}
8823
Jeremy Linton00f4b652017-05-31 16:56:43 -05008824extern struct trace_eval_map *__start_ftrace_eval_maps[];
8825extern struct trace_eval_map *__stop_ftrace_eval_maps[];
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008826
Jeremy Linton5f60b352017-05-31 16:56:47 -05008827static void __init trace_eval_init(void)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008828{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008829 int len;
8830
Jeremy Linton02fd7f62017-05-31 16:56:42 -05008831 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008832 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008833}
8834
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008835#ifdef CONFIG_MODULES
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008836static void trace_module_add_evals(struct module *mod)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008837{
Jeremy Linton99be6472017-05-31 16:56:44 -05008838 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008839 return;
8840
8841 /*
8842 * Modules with bad taint do not have events created, do
8843 * not bother with enums either.
8844 */
8845 if (trace_module_has_bad_taint(mod))
8846 return;
8847
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008848 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008849}
8850
Jeremy Linton681bec02017-05-31 16:56:53 -05008851#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008852static void trace_module_remove_evals(struct module *mod)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008853{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05008854 union trace_eval_map_item *map;
8855 union trace_eval_map_item **last = &trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008856
Jeremy Linton99be6472017-05-31 16:56:44 -05008857 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008858 return;
8859
Jeremy Linton1793ed92017-05-31 16:56:46 -05008860 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008861
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05008862 map = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008863
8864 while (map) {
8865 if (map->head.mod == mod)
8866 break;
Jeremy Linton5f60b352017-05-31 16:56:47 -05008867 map = trace_eval_jmp_to_tail(map);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008868 last = &map->tail.next;
8869 map = map->tail.next;
8870 }
8871 if (!map)
8872 goto out;
8873
Jeremy Linton5f60b352017-05-31 16:56:47 -05008874 *last = trace_eval_jmp_to_tail(map)->tail.next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008875 kfree(map);
8876 out:
Jeremy Linton1793ed92017-05-31 16:56:46 -05008877 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008878}
8879#else
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008880static inline void trace_module_remove_evals(struct module *mod) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05008881#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008882
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008883static int trace_module_notify(struct notifier_block *self,
8884 unsigned long val, void *data)
8885{
8886 struct module *mod = data;
8887
8888 switch (val) {
8889 case MODULE_STATE_COMING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008890 trace_module_add_evals(mod);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008891 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008892 case MODULE_STATE_GOING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008893 trace_module_remove_evals(mod);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008894 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008895 }
8896
8897 return 0;
8898}
8899
8900static struct notifier_block trace_module_nb = {
8901 .notifier_call = trace_module_notify,
8902 .priority = 0,
8903};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008904#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008905
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008906static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008907{
8908 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008909
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08008910 trace_access_lock_init();
8911
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008912 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008913 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09008914 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008915
Steven Rostedt (VMware)58b92542018-05-08 15:09:27 -04008916 event_trace_init();
8917
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008918 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04008919 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008920
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008921 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04008922 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008923
Li Zefan339ae5d2009-04-17 10:34:30 +08008924 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008925 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02008926
Avadh Patel69abe6a2009-04-10 16:04:48 -04008927 trace_create_file("saved_cmdlines", 0444, d_tracer,
8928 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03008929
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008930 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8931 NULL, &tracing_saved_cmdlines_size_fops);
8932
Michael Sartain99c621d2017-07-05 22:07:15 -06008933 trace_create_file("saved_tgids", 0444, d_tracer,
8934 NULL, &tracing_saved_tgids_fops);
8935
Jeremy Linton5f60b352017-05-31 16:56:47 -05008936 trace_eval_init();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008937
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008938 trace_create_eval_file(d_tracer);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008939
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008940#ifdef CONFIG_MODULES
8941 register_module_notifier(&trace_module_nb);
8942#endif
8943
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008944#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008945 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04008946 NULL, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008947#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008948
Steven Rostedt277ba042012-08-03 16:10:49 -04008949 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09008950
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008951 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05008952
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01008953 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008954}
8955
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008956static int trace_panic_handler(struct notifier_block *this,
8957 unsigned long event, void *unused)
8958{
Steven Rostedt944ac422008-10-23 19:26:08 -04008959 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008960 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008961 return NOTIFY_OK;
8962}
8963
8964static struct notifier_block trace_panic_notifier = {
8965 .notifier_call = trace_panic_handler,
8966 .next = NULL,
8967 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8968};
8969
8970static int trace_die_handler(struct notifier_block *self,
8971 unsigned long val,
8972 void *data)
8973{
8974 switch (val) {
8975 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04008976 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008977 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008978 break;
8979 default:
8980 break;
8981 }
8982 return NOTIFY_OK;
8983}
8984
8985static struct notifier_block trace_die_notifier = {
8986 .notifier_call = trace_die_handler,
8987 .priority = 200
8988};
8989
8990/*
8991 * printk is set to max of 1024, we really don't need it that big.
8992 * Nothing should be printing 1000 characters anyway.
8993 */
8994#define TRACE_MAX_PRINT 1000
8995
8996/*
8997 * Define here KERN_TRACE so that we have one place to modify
8998 * it if we decide to change what log level the ftrace dump
8999 * should be at.
9000 */
Steven Rostedt428aee12009-01-14 12:24:42 -05009001#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009002
Jason Wessel955b61e2010-08-05 09:22:23 -05009003void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009004trace_printk_seq(struct trace_seq *s)
9005{
9006 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009007 if (s->seq.len >= TRACE_MAX_PRINT)
9008 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009009
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05009010 /*
9011 * More paranoid code. Although the buffer size is set to
9012 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9013 * an extra layer of protection.
9014 */
9015 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9016 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009017
9018 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009019 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009020
9021 printk(KERN_TRACE "%s", s->buffer);
9022
Steven Rostedtf9520752009-03-02 14:04:40 -05009023 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009024}
9025
Jason Wessel955b61e2010-08-05 09:22:23 -05009026void trace_init_global_iter(struct trace_iterator *iter)
9027{
9028 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009029 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05009030 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05009031 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009032
9033 if (iter->trace && iter->trace->open)
9034 iter->trace->open(iter);
9035
9036 /* Annotate start of buffers if we had overruns */
9037 if (ring_buffer_overruns(iter->trace_buffer->buffer))
9038 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9039
9040 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9041 if (trace_clocks[iter->tr->clock_id].in_ns)
9042 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05009043}
9044
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009045void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009046{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009047 /* use static because iter can be a bit big for the stack */
9048 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009049 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009050 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009051 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04009052 unsigned long flags;
9053 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009054
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009055 /* Only allow one dump user at a time. */
9056 if (atomic_inc_return(&dump_running) != 1) {
9057 atomic_dec(&dump_running);
9058 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04009059 }
9060
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009061 /*
9062 * Always turn off tracing when we dump.
9063 * We don't need to show trace output of what happens
9064 * between multiple crashes.
9065 *
9066 * If the user does a sysrq-z, then they can re-enable
9067 * tracing with echo 1 > tracing_on.
9068 */
9069 tracing_off();
9070
9071 local_irq_save(flags);
Petr Mladek03fc7f92018-06-27 16:20:28 +02009072 printk_nmi_direct_enter();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009073
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08009074 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05009075 trace_init_global_iter(&iter);
9076
Steven Rostedtd7690412008-10-01 00:29:53 -04009077 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05309078 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04009079 }
9080
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009081 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009082
Török Edwinb54d3de2008-11-22 13:28:48 +02009083 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009084 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02009085
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009086 switch (oops_dump_mode) {
9087 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05009088 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009089 break;
9090 case DUMP_ORIG:
9091 iter.cpu_file = raw_smp_processor_id();
9092 break;
9093 case DUMP_NONE:
9094 goto out_enable;
9095 default:
9096 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05009097 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009098 }
9099
9100 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009101
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009102 /* Did function tracer already get disabled? */
9103 if (ftrace_is_dead()) {
9104 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9105 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9106 }
9107
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009108 /*
9109 * We need to stop all tracing on all CPUS to read the
9110 * the next buffer. This is a bit expensive, but is
9111 * not done often. We fill all what we can read,
9112 * and then release the locks again.
9113 */
9114
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009115 while (!trace_empty(&iter)) {
9116
9117 if (!cnt)
9118 printk(KERN_TRACE "---------------------------------\n");
9119
9120 cnt++;
9121
Miguel Ojeda0c97bf82019-05-23 14:45:35 +02009122 trace_iterator_reset(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009123 iter.iter_flags |= TRACE_FILE_LAT_FMT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009124
Jason Wessel955b61e2010-08-05 09:22:23 -05009125 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08009126 int ret;
9127
9128 ret = print_trace_line(&iter);
9129 if (ret != TRACE_TYPE_NO_CONSUME)
9130 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009131 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05009132 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009133
9134 trace_printk_seq(&iter.seq);
9135 }
9136
9137 if (!cnt)
9138 printk(KERN_TRACE " (ftrace buffer empty)\n");
9139 else
9140 printk(KERN_TRACE "---------------------------------\n");
9141
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009142 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009143 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009144
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009145 for_each_tracing_cpu(cpu) {
9146 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009147 }
Petr Mladek03fc7f92018-06-27 16:20:28 +02009148 atomic_dec(&dump_running);
9149 printk_nmi_direct_exit();
Steven Rostedtcd891ae2009-04-28 11:39:34 -04009150 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009151}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07009152EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009153
Tom Zanussi7e465ba2017-09-22 14:58:20 -05009154int trace_run_command(const char *buf, int (*createfn)(int, char **))
9155{
9156 char **argv;
9157 int argc, ret;
9158
9159 argc = 0;
9160 ret = 0;
9161 argv = argv_split(GFP_KERNEL, buf, &argc);
9162 if (!argv)
9163 return -ENOMEM;
9164
9165 if (argc)
9166 ret = createfn(argc, argv);
9167
9168 argv_free(argv);
9169
9170 return ret;
9171}
9172
9173#define WRITE_BUFSIZE 4096
9174
9175ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9176 size_t count, loff_t *ppos,
9177 int (*createfn)(int, char **))
9178{
9179 char *kbuf, *buf, *tmp;
9180 int ret = 0;
9181 size_t done = 0;
9182 size_t size;
9183
9184 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9185 if (!kbuf)
9186 return -ENOMEM;
9187
9188 while (done < count) {
9189 size = count - done;
9190
9191 if (size >= WRITE_BUFSIZE)
9192 size = WRITE_BUFSIZE - 1;
9193
9194 if (copy_from_user(kbuf, buffer + done, size)) {
9195 ret = -EFAULT;
9196 goto out;
9197 }
9198 kbuf[size] = '\0';
9199 buf = kbuf;
9200 do {
9201 tmp = strchr(buf, '\n');
9202 if (tmp) {
9203 *tmp = '\0';
9204 size = tmp - buf + 1;
9205 } else {
9206 size = strlen(buf);
9207 if (done + size < count) {
9208 if (buf != kbuf)
9209 break;
9210 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9211 pr_warn("Line length is too long: Should be less than %d\n",
9212 WRITE_BUFSIZE - 2);
9213 ret = -EINVAL;
9214 goto out;
9215 }
9216 }
9217 done += size;
9218
9219 /* Remove comments */
9220 tmp = strchr(buf, '#');
9221
9222 if (tmp)
9223 *tmp = '\0';
9224
9225 ret = trace_run_command(buf, createfn);
9226 if (ret)
9227 goto out;
9228 buf += size;
9229
9230 } while (done < count);
9231 }
9232 ret = done;
9233
9234out:
9235 kfree(kbuf);
9236
9237 return ret;
9238}
9239
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009240__init static int tracer_alloc_buffers(void)
9241{
Steven Rostedt73c51622009-03-11 13:42:01 -04009242 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309243 int ret = -ENOMEM;
9244
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009245
9246 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11009247 pr_warn("Tracing disabled due to lockdown\n");
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009248 return -EPERM;
9249 }
9250
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009251 /*
9252 * Make sure we don't accidently add more trace options
9253 * than we have bits for.
9254 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009255 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009256
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309257 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9258 goto out;
9259
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009260 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309261 goto out_free_buffer_mask;
9262
Steven Rostedt07d777f2011-09-22 14:01:55 -04009263 /* Only allocate trace_printk buffers if a trace_printk exists */
9264 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04009265 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04009266 trace_printk_init_buffers();
9267
Steven Rostedt73c51622009-03-11 13:42:01 -04009268 /* To save memory, keep the ring buffer size to its minimum */
9269 if (ring_buffer_expanded)
9270 ring_buf_size = trace_buf_size;
9271 else
9272 ring_buf_size = 1;
9273
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309274 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009275 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009276
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009277 raw_spin_lock_init(&global_trace.start_lock);
9278
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009279 /*
9280 * The prepare callbacks allocates some memory for the ring buffer. We
9281 * don't free the buffer if the if the CPU goes down. If we were to free
9282 * the buffer, then the user would lose any trace that was in the
9283 * buffer. The memory will be removed once the "instance" is removed.
9284 */
9285 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9286 "trace/RB:preapre", trace_rb_cpu_prepare,
9287 NULL);
9288 if (ret < 0)
9289 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009290 /* Used for event triggers */
Dan Carpenter147d88e02017-08-01 14:02:01 +03009291 ret = -ENOMEM;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009292 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9293 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009294 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009295
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009296 if (trace_create_savedcmd() < 0)
9297 goto out_free_temp_buffer;
9298
Steven Rostedtab464282008-05-12 21:21:00 +02009299 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009300 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009301 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
9302 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009303 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009304 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04009305
Steven Rostedt499e5472012-02-22 15:50:28 -05009306 if (global_trace.buffer_disabled)
9307 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009308
Steven Rostedte1e232c2014-02-10 23:38:46 -05009309 if (trace_boot_clock) {
9310 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9311 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07009312 pr_warn("Trace clock %s not defined, going back to default\n",
9313 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05009314 }
9315
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009316 /*
9317 * register_tracer() might reference current_trace, so it
9318 * needs to be set before we register anything. This is
9319 * just a bootstrap of current_trace anyway.
9320 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009321 global_trace.current_trace = &nop_trace;
9322
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05009323 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9324
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05009325 ftrace_init_global_array_ops(&global_trace);
9326
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009327 init_trace_flags_index(&global_trace);
9328
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009329 register_tracer(&nop_trace);
9330
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -05009331 /* Function tracing may start here (via kernel command line) */
9332 init_function_trace();
9333
Steven Rostedt60a11772008-05-12 21:20:44 +02009334 /* All seems OK, enable tracing */
9335 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009336
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009337 atomic_notifier_chain_register(&panic_notifier_list,
9338 &trace_panic_notifier);
9339
9340 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009341
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009342 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9343
9344 INIT_LIST_HEAD(&global_trace.systems);
9345 INIT_LIST_HEAD(&global_trace.events);
Tom Zanussi067fe032018-01-15 20:51:56 -06009346 INIT_LIST_HEAD(&global_trace.hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04009347 INIT_LIST_HEAD(&global_trace.err_log);
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009348 list_add(&global_trace.list, &ftrace_trace_arrays);
9349
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08009350 apply_trace_boot_options();
Steven Rostedt7bcfaf54f52012-11-01 22:56:07 -04009351
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04009352 register_snapshot_cmd();
9353
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009354 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009355
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009356out_free_savedcmd:
9357 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009358out_free_temp_buffer:
9359 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009360out_rm_hp_state:
9361 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309362out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009363 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309364out_free_buffer_mask:
9365 free_cpumask_var(tracing_buffer_mask);
9366out:
9367 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009368}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009369
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009370void __init early_trace_init(void)
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009371{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009372 if (tracepoint_printk) {
9373 tracepoint_print_iter =
9374 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9375 if (WARN_ON(!tracepoint_print_iter))
9376 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05009377 else
9378 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009379 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009380 tracer_alloc_buffers();
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009381}
9382
9383void __init trace_init(void)
9384{
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009385 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009386}
9387
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009388__init static int clear_boot_tracer(void)
9389{
9390 /*
9391 * The default tracer at boot buffer is an init section.
9392 * This function is called in lateinit. If we did not
9393 * find the boot tracer, then clear it out, to prevent
9394 * later registration from accessing the buffer that is
9395 * about to be freed.
9396 */
9397 if (!default_bootup_tracer)
9398 return 0;
9399
9400 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9401 default_bootup_tracer);
9402 default_bootup_tracer = NULL;
9403
9404 return 0;
9405}
9406
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009407fs_initcall(tracer_init_tracefs);
Steven Rostedt (VMware)4bb0f0e2017-08-01 12:01:52 -04009408late_initcall_sync(clear_boot_tracer);
Chris Wilson3fd49c92018-03-30 16:01:31 +01009409
9410#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9411__init static int tracing_set_default_clock(void)
9412{
9413 /* sched_clock_stable() is determined in late_initcall */
Chris Wilson5125eee2018-04-04 22:24:50 +01009414 if (!trace_boot_clock && !sched_clock_stable()) {
Chris Wilson3fd49c92018-03-30 16:01:31 +01009415 printk(KERN_WARNING
9416 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9417 "If you want to keep using the local clock, then add:\n"
9418 " \"trace_clock=local\"\n"
9419 "on the kernel command line\n");
9420 tracing_set_clock(&global_trace, "global");
9421 }
9422
9423 return 0;
9424}
9425late_initcall_sync(tracing_set_default_clock);
9426#endif