blob: ebe7ce2f5f4a50f9402da8978e89b7db66592079 [file] [log] [blame]
// SPDX-License-Identifier: GPL-2.0
/*
* ring buffer based function tracer
*
* Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
*
* Originally taken from the RT patch by:
* Arnaldo Carvalho de Melo <acme@redhat.com>
*
* Based on code from the latency_tracer, that is:
* Copyright (C) 2004-2006 Ingo Molnar
* Copyright (C) 2004 Nadia Yvette Chambers
*/
#include <linux/ring_buffer.h>
#include <linux/utsname.h>
#include <linux/stacktrace.h>
#include <linux/writeback.h>
#include <linux/kallsyms.h>
#include <linux/security.h>
#include <linux/seq_file.h>
#include <linux/irqflags.h>
#include <linux/debugfs.h>
#include <linux/tracefs.h>
#include <linux/pagemap.h>
#include <linux/hardirq.h>
#include <linux/linkage.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/splice.h>
#include <linux/kdebug.h>
#include <linux/string.h>
#include <linux/mount.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/panic_notifier.h>
#include <linux/poll.h>
#include <linux/nmi.h>
#include <linux/fs.h>
#include <linux/trace.h>
#include <linux/sched/clock.h>
#include <linux/sched/rt.h>
#include <linux/fsnotify.h>
#include <linux/irq_work.h>
#include <linux/workqueue.h>
#include <asm/setup.h> /* COMMAND_LINE_SIZE */
#include "trace.h"
#include "trace_output.h"
#ifdef CONFIG_FTRACE_STARTUP_TEST
/*
* We need to change this state when a selftest is running.
* A selftest will lurk into the ring-buffer to count the
* entries inserted during the selftest although some concurrent
* insertions into the ring-buffer such as trace_printk could occurred
* at the same time, giving false positive or negative results.
*/
static bool __read_mostly tracing_selftest_running;
/*
* If boot-time tracing including tracers/events via kernel cmdline
* is running, we do not want to run SELFTEST.
*/
bool __read_mostly tracing_selftest_disabled;
void __init disable_tracing_selftest(const char *reason)
{
if (!tracing_selftest_disabled) {
tracing_selftest_disabled = true;
pr_info("Ftrace startup test is disabled due to %s\n", reason);
}
}
#else
#define tracing_selftest_running 0
#define tracing_selftest_disabled 0
#endif
/* Pipe tracepoints to printk */
static struct trace_iterator *tracepoint_print_iter;
int tracepoint_printk;
static bool tracepoint_printk_stop_on_boot __initdata;
static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
/* For tracers that don't implement custom flags */
static struct tracer_opt dummy_tracer_opt[] = {
{ }
};
static int
dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
{
return 0;
}
/*
* To prevent the comm cache from being overwritten when no
* tracing is active, only save the comm when a trace event
* occurred.
*/
DEFINE_PER_CPU(bool, trace_taskinfo_save);
/*
* Kill all tracing for good (never come back).
* It is initialized to 1 but will turn to zero if the initialization
* of the tracer is successful. But that is the only place that sets
* this back to zero.
*/
static int tracing_disabled = 1;
cpumask_var_t __read_mostly tracing_buffer_mask;
/*
* ftrace_dump_on_oops - variable to dump ftrace buffer on oops
*
* If there is an oops (or kernel panic) and the ftrace_dump_on_oops
* is set, then ftrace_dump is called. This will output the contents
* of the ftrace buffers to the console. This is very useful for
* capturing traces that lead to crashes and outputing it to a
* serial console.
*
* It is default off, but you can enable it with either specifying
* "ftrace_dump_on_oops" in the kernel command line, or setting
* /proc/sys/kernel/ftrace_dump_on_oops
* Set 1 if you want to dump buffers of all CPUs
* Set 2 if you want to dump the buffer of the CPU that triggered oops
* Set instance name if you want to dump the specific trace instance
* Multiple instance dump is also supported, and instances are seperated
* by commas.
*/
/* Set to string format zero to disable by default */
char ftrace_dump_on_oops[MAX_TRACER_SIZE] = "0";
/* When set, tracing will stop when a WARN*() is hit */
int __disable_trace_on_warning;
#ifdef CONFIG_TRACE_EVAL_MAP_FILE
/* Map of enums to their values, for "eval_map" file */
struct trace_eval_map_head {
struct module *mod;
unsigned long length;
};
union trace_eval_map_item;
struct trace_eval_map_tail {
/*
* "end" is first and points to NULL as it must be different
* than "mod" or "eval_string"
*/
union trace_eval_map_item *next;
const char *end; /* points to NULL */
};
static DEFINE_MUTEX(trace_eval_mutex);
/*
* The trace_eval_maps are saved in an array with two extra elements,
* one at the beginning, and one at the end. The beginning item contains
* the count of the saved maps (head.length), and the module they
* belong to if not built in (head.mod). The ending item contains a
* pointer to the next array of saved eval_map items.
*/
union trace_eval_map_item {
struct trace_eval_map map;
struct trace_eval_map_head head;
struct trace_eval_map_tail tail;
};
static union trace_eval_map_item *trace_eval_maps;
#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
int tracing_set_tracer(struct trace_array *tr, const char *buf);
static void ftrace_trace_userstack(struct trace_array *tr,
struct trace_buffer *buffer,
unsigned int trace_ctx);
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
static char *default_bootup_tracer;
static bool allocate_snapshot;
static bool snapshot_at_boot;
static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
static int boot_instance_index;
static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
static int boot_snapshot_index;
static int __init set_cmdline_ftrace(char *str)
{
strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
default_bootup_tracer = bootup_tracer_buf;
/* We are using ftrace early, expand it */
trace_set_ring_buffer_expanded(NULL);
return 1;
}
__setup("ftrace=", set_cmdline_ftrace);
int ftrace_dump_on_oops_enabled(void)
{
if (!strcmp("0", ftrace_dump_on_oops))
return 0;
else
return 1;
}
static int __init set_ftrace_dump_on_oops(char *str)
{
if (!*str) {
strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE);
return 1;
}
if (*str == ',') {
strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE);
strscpy(ftrace_dump_on_oops + 1, str, MAX_TRACER_SIZE - 1);
return 1;
}
if (*str++ == '=') {
strscpy(ftrace_dump_on_oops, str, MAX_TRACER_SIZE);
return 1;
}
return 0;
}
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
static int __init stop_trace_on_warning(char *str)
{
if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
__disable_trace_on_warning = 1;
return 1;
}
__setup("traceoff_on_warning", stop_trace_on_warning);
static int __init boot_alloc_snapshot(char *str)
{
char *slot = boot_snapshot_info + boot_snapshot_index;
int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
int ret;
if (str[0] == '=') {
str++;
if (strlen(str) >= left)
return -1;
ret = snprintf(slot, left, "%s\t", str);
boot_snapshot_index += ret;
} else {
allocate_snapshot = true;
/* We also need the main ring buffer expanded */
trace_set_ring_buffer_expanded(NULL);
}
return 1;
}
__setup("alloc_snapshot", boot_alloc_snapshot);
static int __init boot_snapshot(char *str)
{
snapshot_at_boot = true;
boot_alloc_snapshot(str);
return 1;
}
__setup("ftrace_boot_snapshot", boot_snapshot);
static int __init boot_instance(char *str)
{
char *slot = boot_instance_info + boot_instance_index;
int left = sizeof(boot_instance_info) - boot_instance_index;
int ret;
if (strlen(str) >= left)
return -1;
ret = snprintf(slot, left, "%s\t", str);
boot_instance_index += ret;
return 1;
}
__setup("trace_instance=", boot_instance);
static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
static int __init set_trace_boot_options(char *str)
{
strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
return 1;
}
__setup("trace_options=", set_trace_boot_options);
static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
static char *trace_boot_clock __initdata;
static int __init set_trace_boot_clock(char *str)
{
strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
trace_boot_clock = trace_boot_clock_buf;
return 1;
}
__setup("trace_clock=", set_trace_boot_clock);
static int __init set_tracepoint_printk(char *str)
{
/* Ignore the "tp_printk_stop_on_boot" param */
if (*str == '_')
return 0;
if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
tracepoint_printk = 1;
return 1;
}
__setup("tp_printk", set_tracepoint_printk);
static int __init set_tracepoint_printk_stop(char *str)
{
tracepoint_printk_stop_on_boot = true;
return 1;
}
__setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
unsigned long long ns2usecs(u64 nsec)
{
nsec += 500;
do_div(nsec, 1000);
return nsec;
}
static void
trace_process_export(struct trace_export *export,
struct ring_buffer_event *event, int flag)
{
struct trace_entry *entry;
unsigned int size = 0;
if (export->flags & flag) {
entry = ring_buffer_event_data(event);
size = ring_buffer_event_length(event);
export->write(export, entry, size);
}
}
static DEFINE_MUTEX(ftrace_export_lock);
static struct trace_export __rcu *ftrace_exports_list __read_mostly;
static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
static inline void ftrace_exports_enable(struct trace_export *export)
{
if (export->flags & TRACE_EXPORT_FUNCTION)
static_branch_inc(&trace_function_exports_enabled);
if (export->flags & TRACE_EXPORT_EVENT)
static_branch_inc(&trace_event_exports_enabled);
if (export->flags & TRACE_EXPORT_MARKER)
static_branch_inc(&trace_marker_exports_enabled);
}
static inline void ftrace_exports_disable(struct trace_export *export)
{
if (export->flags & TRACE_EXPORT_FUNCTION)
static_branch_dec(&trace_function_exports_enabled);
if (export->flags & TRACE_EXPORT_EVENT)
static_branch_dec(&trace_event_exports_enabled);
if (export->flags & TRACE_EXPORT_MARKER)
static_branch_dec(&trace_marker_exports_enabled);
}
static void ftrace_exports(struct ring_buffer_event *event, int flag)
{
struct trace_export *export;
preempt_disable_notrace();
export = rcu_dereference_raw_check(ftrace_exports_list);
while (export) {
trace_process_export(export, event, flag);
export = rcu_dereference_raw_check(export->next);
}
preempt_enable_notrace();
}
static inline void
add_trace_export(struct trace_export **list, struct trace_export *export)
{
rcu_assign_pointer(export->next, *list);
/*
* We are entering export into the list but another
* CPU might be walking that list. We need to make sure
* the export->next pointer is valid before another CPU sees
* the export pointer included into the list.
*/
rcu_assign_pointer(*list, export);
}
static inline int
rm_trace_export(struct trace_export **list, struct trace_export *export)
{
struct trace_export **p;
for (p = list; *p != NULL; p = &(*p)->next)
if (*p == export)
break;
if (*p != export)
return -1;
rcu_assign_pointer(*p, (*p)->next);
return 0;
}
static inline void
add_ftrace_export(struct trace_export **list, struct trace_export *export)
{
ftrace_exports_enable(export);
add_trace_export(list, export);
}
static inline int
rm_ftrace_export(struct trace_export **list, struct trace_export *export)
{
int ret;
ret = rm_trace_export(list, export);
ftrace_exports_disable(export);
return ret;
}
int register_ftrace_export(struct trace_export *export)
{
if (WARN_ON_ONCE(!export->write))
return -1;
mutex_lock(&ftrace_export_lock);
add_ftrace_export(&ftrace_exports_list, export);
mutex_unlock(&ftrace_export_lock);
return 0;
}
EXPORT_SYMBOL_GPL(register_ftrace_export);
int unregister_ftrace_export(struct trace_export *export)
{
int ret;
mutex_lock(&ftrace_export_lock);
ret = rm_ftrace_export(&ftrace_exports_list, export);
mutex_unlock(&ftrace_export_lock);
return ret;
}
EXPORT_SYMBOL_GPL(unregister_ftrace_export);
/* trace_flags holds trace_options default values */
#define TRACE_DEFAULT_FLAGS \
(FUNCTION_DEFAULT_FLAGS | \
TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
TRACE_ITER_HASH_PTR)
/* trace_options that are only supported by global_trace */
#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
/* trace_flags that are default zero for instances */
#define ZEROED_TRACE_FLAGS \
(TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
/*
* The global_trace is the descriptor that holds the top-level tracing
* buffers for the live tracing.
*/
static struct trace_array global_trace = {
.trace_flags = TRACE_DEFAULT_FLAGS,
};
void trace_set_ring_buffer_expanded(struct trace_array *tr)
{
if (!tr)
tr = &global_trace;
tr->ring_buffer_expanded = true;
}
LIST_HEAD(ftrace_trace_arrays);
int trace_array_get(struct trace_array *this_tr)
{
struct trace_array *tr;
int ret = -ENODEV;
mutex_lock(&trace_types_lock);
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
if (tr == this_tr) {
tr->ref++;
ret = 0;
break;
}
}
mutex_unlock(&trace_types_lock);
return ret;
}
static void __trace_array_put(struct trace_array *this_tr)
{
WARN_ON(!this_tr->ref);
this_tr->ref--;
}
/**
* trace_array_put - Decrement the reference counter for this trace array.
* @this_tr : pointer to the trace array
*
* NOTE: Use this when we no longer need the trace array returned by
* trace_array_get_by_name(). This ensures the trace array can be later
* destroyed.
*
*/
void trace_array_put(struct trace_array *this_tr)
{
if (!this_tr)
return;
mutex_lock(&trace_types_lock);
__trace_array_put(this_tr);
mutex_unlock(&trace_types_lock);
}
EXPORT_SYMBOL_GPL(trace_array_put);
int tracing_check_open_get_tr(struct trace_array *tr)
{
int ret;
ret = security_locked_down(LOCKDOWN_TRACEFS);
if (ret)
return ret;
if (tracing_disabled)
return -ENODEV;
if (tr && trace_array_get(tr) < 0)
return -ENODEV;
return 0;
}
int call_filter_check_discard(struct trace_event_call *call, void *rec,
struct trace_buffer *buffer,
struct ring_buffer_event *event)
{
if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
!filter_match_preds(call->filter, rec)) {
__trace_event_discard_commit(buffer, event);
return 1;
}
return 0;
}
/**
* trace_find_filtered_pid - check if a pid exists in a filtered_pid list
* @filtered_pids: The list of pids to check
* @search_pid: The PID to find in @filtered_pids
*
* Returns true if @search_pid is found in @filtered_pids, and false otherwise.
*/
bool
trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
{
return trace_pid_list_is_set(filtered_pids, search_pid);
}
/**
* trace_ignore_this_task - should a task be ignored for tracing
* @filtered_pids: The list of pids to check
* @filtered_no_pids: The list of pids not to be traced
* @task: The task that should be ignored if not filtered
*
* Checks if @task should be traced or not from @filtered_pids.
* Returns true if @task should *NOT* be traced.
* Returns false if @task should be traced.
*/
bool
trace_ignore_this_task(struct trace_pid_list *filtered_pids,
struct trace_pid_list *filtered_no_pids,
struct task_struct *task)
{
/*
* If filtered_no_pids is not empty, and the task's pid is listed
* in filtered_no_pids, then return true.
* Otherwise, if filtered_pids is empty, that means we can
* trace all tasks. If it has content, then only trace pids
* within filtered_pids.
*/
return (filtered_pids &&
!trace_find_filtered_pid(filtered_pids, task->pid)) ||
(filtered_no_pids &&
trace_find_filtered_pid(filtered_no_pids, task->pid));
}
/**
* trace_filter_add_remove_task - Add or remove a task from a pid_list
* @pid_list: The list to modify
* @self: The current task for fork or NULL for exit
* @task: The task to add or remove
*
* If adding a task, if @self is defined, the task is only added if @self
* is also included in @pid_list. This happens on fork and tasks should
* only be added when the parent is listed. If @self is NULL, then the
* @task pid will be removed from the list, which would happen on exit
* of a task.
*/
void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
struct task_struct *self,
struct task_struct *task)
{
if (!pid_list)
return;
/* For forks, we only add if the forking task is listed */
if (self) {
if (!trace_find_filtered_pid(pid_list, self->pid))
return;
}
/* "self" is set for forks, and NULL for exits */
if (self)
trace_pid_list_set(pid_list, task->pid);
else
trace_pid_list_clear(pid_list, task->pid);
}
/**
* trace_pid_next - Used for seq_file to get to the next pid of a pid_list
* @pid_list: The pid list to show
* @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
* @pos: The position of the file
*
* This is used by the seq_file "next" operation to iterate the pids
* listed in a trace_pid_list structure.
*
* Returns the pid+1 as we want to display pid of zero, but NULL would
* stop the iteration.
*/
void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
{
long pid = (unsigned long)v;
unsigned int next;
(*pos)++;
/* pid already is +1 of the actual previous bit */
if (trace_pid_list_next(pid_list, pid, &next) < 0)
return NULL;
pid = next;
/* Return pid + 1 to allow zero to be represented */
return (void *)(pid + 1);
}
/**
* trace_pid_start - Used for seq_file to start reading pid lists
* @pid_list: The pid list to show
* @pos: The position of the file
*
* This is used by seq_file "start" operation to start the iteration
* of listing pids.
*
* Returns the pid+1 as we want to display pid of zero, but NULL would
* stop the iteration.
*/
void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
{
unsigned long pid;
unsigned int first;
loff_t l = 0;
if (trace_pid_list_first(pid_list, &first) < 0)
return NULL;
pid = first;
/* Return pid + 1 so that zero can be the exit value */
for (pid++; pid && l < *pos;
pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
;
return (void *)pid;
}
/**
* trace_pid_show - show the current pid in seq_file processing
* @m: The seq_file structure to write into
* @v: A void pointer of the pid (+1) value to display
*
* Can be directly used by seq_file operations to display the current
* pid value.
*/
int trace_pid_show(struct seq_file *m, void *v)
{
unsigned long pid = (unsigned long)v - 1;
seq_printf(m, "%lu\n", pid);
return 0;
}
/* 128 should be much more than enough */
#define PID_BUF_SIZE 127
int trace_pid_write(struct trace_pid_list *filtered_pids,
struct trace_pid_list **new_pid_list,
const char __user *ubuf, size_t cnt)
{
struct trace_pid_list *pid_list;
struct trace_parser parser;
unsigned long val;
int nr_pids = 0;
ssize_t read = 0;
ssize_t ret;
loff_t pos;
pid_t pid;
if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
return -ENOMEM;
/*
* Always recreate a new array. The write is an all or nothing
* operation. Always create a new array when adding new pids by
* the user. If the operation fails, then the current list is
* not modified.
*/
pid_list = trace_pid_list_alloc();
if (!pid_list) {
trace_parser_put(&parser);
return -ENOMEM;
}
if (filtered_pids) {
/* copy the current bits to the new max */
ret = trace_pid_list_first(filtered_pids, &pid);
while (!ret) {
trace_pid_list_set(pid_list, pid);
ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
nr_pids++;
}
}
ret = 0;
while (cnt > 0) {
pos = 0;
ret = trace_get_user(&parser, ubuf, cnt, &pos);
if (ret < 0)
break;
read += ret;
ubuf += ret;
cnt -= ret;
if (!trace_parser_loaded(&parser))
break;
ret = -EINVAL;
if (kstrtoul(parser.buffer, 0, &val))
break;
pid = (pid_t)val;
if (trace_pid_list_set(pid_list, pid) < 0) {
ret = -1;
break;
}
nr_pids++;
trace_parser_clear(&parser);
ret = 0;
}
trace_parser_put(&parser);
if (ret < 0) {
trace_pid_list_free(pid_list);
return ret;
}
if (!nr_pids) {
/* Cleared the list of pids */
trace_pid_list_free(pid_list);
pid_list = NULL;
}
*new_pid_list = pid_list;
return read;
}
static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
{
u64 ts;
/* Early boot up does not have a buffer yet */
if (!buf->buffer)
return trace_clock_local();
ts = ring_buffer_time_stamp(buf->buffer);
ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
return ts;
}
u64 ftrace_now(int cpu)
{
return buffer_ftrace_now(&global_trace.array_buffer, cpu);
}
/**
* tracing_is_enabled - Show if global_trace has been enabled
*
* Shows if the global trace has been enabled or not. It uses the
* mirror flag "buffer_disabled" to be used in fast paths such as for
* the irqsoff tracer. But it may be inaccurate due to races. If you
* need to know the accurate state, use tracing_is_on() which is a little
* slower, but accurate.
*/
int tracing_is_enabled(void)
{
/*
* For quick access (irqsoff uses this in fast path), just
* return the mirror variable of the state of the ring buffer.
* It's a little racy, but we don't really care.
*/
smp_rmb();
return !global_trace.buffer_disabled;
}
/*
* trace_buf_size is the size in bytes that is allocated
* for a buffer. Note, the number of bytes is always rounded
* to page size.
*
* This number is purposely set to a low number of 16384.
* If the dump on oops happens, it will be much appreciated
* to not have to wait for all that output. Anyway this can be
* boot time and run time configurable.
*/
#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
/* trace_types holds a link list of available tracers. */
static struct tracer *trace_types __read_mostly;
/*
* trace_types_lock is used to protect the trace_types list.
*/
DEFINE_MUTEX(trace_types_lock);
/*
* serialize the access of the ring buffer
*
* ring buffer serializes readers, but it is low level protection.
* The validity of the events (which returns by ring_buffer_peek() ..etc)
* are not protected by ring buffer.
*
* The content of events may become garbage if we allow other process consumes
* these events concurrently:
* A) the page of the consumed events may become a normal page
* (not reader page) in ring buffer, and this page will be rewritten
* by events producer.
* B) The page of the consumed events may become a page for splice_read,
* and this page will be returned to system.
*
* These primitives allow multi process access to different cpu ring buffer
* concurrently.
*
* These primitives don't distinguish read-only and read-consume access.
* Multi read-only access are also serialized.
*/
#ifdef CONFIG_SMP
static DECLARE_RWSEM(all_cpu_access_lock);
static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
static inline void trace_access_lock(int cpu)
{
if (cpu == RING_BUFFER_ALL_CPUS) {
/* gain it for accessing the whole ring buffer. */
down_write(&all_cpu_access_lock);
} else {
/* gain it for accessing a cpu ring buffer. */
/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
down_read(&all_cpu_access_lock);
/* Secondly block other access to this @cpu ring buffer. */
mutex_lock(&per_cpu(cpu_access_lock, cpu));
}
}
static inline void trace_access_unlock(int cpu)
{
if (cpu == RING_BUFFER_ALL_CPUS) {
up_write(&all_cpu_access_lock);
} else {
mutex_unlock(&per_cpu(cpu_access_lock, cpu));
up_read(&all_cpu_access_lock);
}
}
static inline void trace_access_lock_init(void)
{
int cpu;
for_each_possible_cpu(cpu)
mutex_init(&per_cpu(cpu_access_lock, cpu));
}
#else
static DEFINE_MUTEX(access_lock);
static inline void trace_access_lock(int cpu)
{
(void)cpu;
mutex_lock(&access_lock);
}
static inline void trace_access_unlock(int cpu)
{
(void)cpu;
mutex_unlock(&access_lock);
}
static inline void trace_access_lock_init(void)
{
}
#endif
#ifdef CONFIG_STACKTRACE
static void __ftrace_trace_stack(struct trace_buffer *buffer,
unsigned int trace_ctx,
int skip, struct pt_regs *regs);
static inline void ftrace_trace_stack(struct trace_array *tr,
struct trace_buffer *buffer,
unsigned int trace_ctx,
int skip, struct pt_regs *regs);
#else
static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
unsigned int trace_ctx,
int skip, struct pt_regs *regs)
{
}
static inline void ftrace_trace_stack(struct trace_array *tr,
struct trace_buffer *buffer,
unsigned long trace_ctx,
int skip, struct pt_regs *regs)
{
}
#endif
static __always_inline void
trace_event_setup(struct ring_buffer_event *event,
int type, unsigned int trace_ctx)
{
struct trace_entry *ent = ring_buffer_event_data(event);
tracing_generic_entry_update(ent, type, trace_ctx);
}
static __always_inline struct ring_buffer_event *
__trace_buffer_lock_reserve(struct trace_buffer *buffer,
int type,
unsigned long len,
unsigned int trace_ctx)
{
struct ring_buffer_event *event;
event = ring_buffer_lock_reserve(buffer, len);
if (event != NULL)
trace_event_setup(event, type, trace_ctx);
return event;
}
void tracer_tracing_on(struct trace_array *tr)
{
if (tr->array_buffer.buffer)
ring_buffer_record_on(tr->array_buffer.buffer);
/*
* This flag is looked at when buffers haven't been allocated
* yet, or by some tracers (like irqsoff), that just want to
* know if the ring buffer has been disabled, but it can handle
* races of where it gets disabled but we still do a record.
* As the check is in the fast path of the tracers, it is more
* important to be fast than accurate.
*/
tr->buffer_disabled = 0;
/* Make the flag seen by readers */
smp_wmb();
}
/**
* tracing_on - enable tracing buffers
*
* This function enables tracing buffers that may have been
* disabled with tracing_off.
*/
void tracing_on(void)
{
tracer_tracing_on(&global_trace);
}
EXPORT_SYMBOL_GPL(tracing_on);
static __always_inline void
__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
{
__this_cpu_write(trace_taskinfo_save, true);
/* If this is the temp buffer, we need to commit fully */
if (this_cpu_read(trace_buffered_event) == event) {
/* Length is in event->array[0] */
ring_buffer_write(buffer, event->array[0], &event->array[1]);
/* Release the temp buffer */
this_cpu_dec(trace_buffered_event_cnt);
/* ring_buffer_unlock_commit() enables preemption */
preempt_enable_notrace();
} else
ring_buffer_unlock_commit(buffer);
}
int __trace_array_puts(struct trace_array *tr, unsigned long ip,
const char *str, int size)
{
struct ring_buffer_event *event;
struct trace_buffer *buffer;
struct print_entry *entry;
unsigned int trace_ctx;
int alloc;
if (!(tr->trace_flags & TRACE_ITER_PRINTK))
return 0;
if (unlikely(tracing_selftest_running && tr == &global_trace))
return 0;
if (unlikely(tracing_disabled))
return 0;
alloc = sizeof(*entry) + size + 2; /* possible \n added */
trace_ctx = tracing_gen_ctx();
buffer = tr->array_buffer.buffer;
ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
trace_ctx);
if (!event) {
size = 0;
goto out;
}
entry = ring_buffer_event_data(event);
entry->ip = ip;
memcpy(&entry->buf, str, size);
/* Add a newline if necessary */
if (entry->buf[size - 1] != '\n') {
entry->buf[size] = '\n';
entry->buf[size + 1] = '\0';
} else
entry->buf[size] = '\0';
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
out:
ring_buffer_nest_end(buffer);
return size;
}
EXPORT_SYMBOL_GPL(__trace_array_puts);
/**
* __trace_puts - write a constant string into the trace buffer.
* @ip: The address of the caller
* @str: The constant string to write
* @size: The size of the string.
*/
int __trace_puts(unsigned long ip, const char *str, int size)
{
return __trace_array_puts(&global_trace, ip, str, size);
}
EXPORT_SYMBOL_GPL(__trace_puts);
/**
* __trace_bputs - write the pointer to a constant string into trace buffer
* @ip: The address of the caller
* @str: The constant string to write to the buffer to
*/
int __trace_bputs(unsigned long ip, const char *str)
{
struct ring_buffer_event *event;
struct trace_buffer *buffer;
struct bputs_entry *entry;
unsigned int trace_ctx;
int size = sizeof(struct bputs_entry);
int ret = 0;
if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
return 0;
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
trace_ctx = tracing_gen_ctx();
buffer = global_trace.array_buffer.buffer;
ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
trace_ctx);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
entry->ip = ip;
entry->str = str;
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
ret = 1;
out:
ring_buffer_nest_end(buffer);
return ret;
}
EXPORT_SYMBOL_GPL(__trace_bputs);
#ifdef CONFIG_TRACER_SNAPSHOT
static void tracing_snapshot_instance_cond(struct trace_array *tr,
void *cond_data)
{
struct tracer *tracer = tr->current_trace;
unsigned long flags;
if (in_nmi()) {
trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
trace_array_puts(tr, "*** snapshot is being ignored ***\n");
return;
}
if (!tr->allocated_snapshot) {
trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
trace_array_puts(tr, "*** stopping trace here! ***\n");
tracer_tracing_off(tr);
return;
}
/* Note, snapshot can not be used when the tracer uses it */
if (tracer->use_max_tr) {
trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
return;
}
if (tr->mapped) {
trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n");
trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
return;
}
local_irq_save(flags);
update_max_tr(tr, current, smp_processor_id(), cond_data);
local_irq_restore(flags);
}
void tracing_snapshot_instance(struct trace_array *tr)
{
tracing_snapshot_instance_cond(tr, NULL);
}
/**
* tracing_snapshot - take a snapshot of the current buffer.
*
* This causes a swap between the snapshot buffer and the current live
* tracing buffer. You can use this to take snapshots of the live
* trace when some condition is triggered, but continue to trace.
*
* Note, make sure to allocate the snapshot with either
* a tracing_snapshot_alloc(), or by doing it manually
* with: echo 1 > /sys/kernel/tracing/snapshot
*
* If the snapshot buffer is not allocated, it will stop tracing.
* Basically making a permanent snapshot.
*/
void tracing_snapshot(void)
{
struct trace_array *tr = &global_trace;
tracing_snapshot_instance(tr);
}
EXPORT_SYMBOL_GPL(tracing_snapshot);
/**
* tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
* @tr: The tracing instance to snapshot
* @cond_data: The data to be tested conditionally, and possibly saved
*
* This is the same as tracing_snapshot() except that the snapshot is
* conditional - the snapshot will only happen if the
* cond_snapshot.update() implementation receiving the cond_data
* returns true, which means that the trace array's cond_snapshot
* update() operation used the cond_data to determine whether the
* snapshot should be taken, and if it was, presumably saved it along
* with the snapshot.
*/
void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
{
tracing_snapshot_instance_cond(tr, cond_data);
}
EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
/**
* tracing_cond_snapshot_data - get the user data associated with a snapshot
* @tr: The tracing instance
*
* When the user enables a conditional snapshot using
* tracing_snapshot_cond_enable(), the user-defined cond_data is saved
* with the snapshot. This accessor is used to retrieve it.
*
* Should not be called from cond_snapshot.update(), since it takes
* the tr->max_lock lock, which the code calling
* cond_snapshot.update() has already done.
*
* Returns the cond_data associated with the trace array's snapshot.
*/
void *tracing_cond_snapshot_data(struct trace_array *tr)
{
void *cond_data = NULL;
local_irq_disable();
arch_spin_lock(&tr->max_lock);
if (tr->cond_snapshot)
cond_data = tr->cond_snapshot->cond_data;
arch_spin_unlock(&tr->max_lock);
local_irq_enable();
return cond_data;
}
EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
struct array_buffer *size_buf, int cpu_id);
static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
int tracing_alloc_snapshot_instance(struct trace_array *tr)
{
int order;
int ret;
if (!tr->allocated_snapshot) {
/* Make the snapshot buffer have the same order as main buffer */
order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
if (ret < 0)
return ret;
/* allocate spare buffer */
ret = resize_buffer_duplicate_size(&tr->max_buffer,
&tr->array_buffer, RING_BUFFER_ALL_CPUS);
if (ret < 0)
return ret;
tr->allocated_snapshot = true;
}
return 0;
}
static void free_snapshot(struct trace_array *tr)
{
/*
* We don't free the ring buffer. instead, resize it because
* The max_tr ring buffer has some state (e.g. ring->clock) and
* we want preserve it.
*/
ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0);
ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
set_buffer_entries(&tr->max_buffer, 1);
tracing_reset_online_cpus(&tr->max_buffer);
tr->allocated_snapshot = false;
}
static int tracing_arm_snapshot_locked(struct trace_array *tr)
{
int ret;
lockdep_assert_held(&trace_types_lock);
spin_lock(&tr->snapshot_trigger_lock);
if (tr->snapshot == UINT_MAX || tr->mapped) {
spin_unlock(&tr->snapshot_trigger_lock);
return -EBUSY;
}
tr->snapshot++;
spin_unlock(&tr->snapshot_trigger_lock);
ret = tracing_alloc_snapshot_instance(tr);
if (ret) {
spin_lock(&tr->snapshot_trigger_lock);
tr->snapshot--;
spin_unlock(&tr->snapshot_trigger_lock);
}
return ret;
}
int tracing_arm_snapshot(struct trace_array *tr)
{
int ret;
mutex_lock(&trace_types_lock);
ret = tracing_arm_snapshot_locked(tr);
mutex_unlock(&trace_types_lock);
return ret;
}
void tracing_disarm_snapshot(struct trace_array *tr)
{
spin_lock(&tr->snapshot_trigger_lock);
if (!WARN_ON(!tr->snapshot))
tr->snapshot--;
spin_unlock(&tr->snapshot_trigger_lock);
}
/**
* tracing_alloc_snapshot - allocate snapshot buffer.
*
* This only allocates the snapshot buffer if it isn't already
* allocated - it doesn't also take a snapshot.
*
* This is meant to be used in cases where the snapshot buffer needs
* to be set up for events that can't sleep but need to be able to
* trigger a snapshot.
*/
int tracing_alloc_snapshot(void)
{
struct trace_array *tr = &global_trace;
int ret;
ret = tracing_alloc_snapshot_instance(tr);
WARN_ON(ret < 0);
return ret;
}
EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
/**
* tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
*
* This is similar to tracing_snapshot(), but it will allocate the
* snapshot buffer if it isn't already allocated. Use this only
* where it is safe to sleep, as the allocation may sleep.
*
* This causes a swap between the snapshot buffer and the current live
* tracing buffer. You can use this to take snapshots of the live
* trace when some condition is triggered, but continue to trace.
*/
void tracing_snapshot_alloc(void)
{
int ret;
ret = tracing_alloc_snapshot();
if (ret < 0)
return;
tracing_snapshot();
}
EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
/**
* tracing_snapshot_cond_enable - enable conditional snapshot for an instance
* @tr: The tracing instance
* @cond_data: User data to associate with the snapshot
* @update: Implementation of the cond_snapshot update function
*
* Check whether the conditional snapshot for the given instance has
* already been enabled, or if the current tracer is already using a
* snapshot; if so, return -EBUSY, else create a cond_snapshot and
* save the cond_data and update function inside.
*
* Returns 0 if successful, error otherwise.
*/
int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
cond_update_fn_t update)
{
struct cond_snapshot *cond_snapshot;
int ret = 0;
cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
if (!cond_snapshot)
return -ENOMEM;
cond_snapshot->cond_data = cond_data;
cond_snapshot->update = update;
mutex_lock(&trace_types_lock);
if (tr->current_trace->use_max_tr) {
ret = -EBUSY;
goto fail_unlock;
}
/*
* The cond_snapshot can only change to NULL without the
* trace_types_lock. We don't care if we race with it going
* to NULL, but we want to make sure that it's not set to
* something other than NULL when we get here, which we can
* do safely with only holding the trace_types_lock and not
* having to take the max_lock.
*/
if (tr->cond_snapshot) {
ret = -EBUSY;
goto fail_unlock;
}
ret = tracing_arm_snapshot_locked(tr);
if (ret)
goto fail_unlock;
local_irq_disable();
arch_spin_lock(&tr->max_lock);
tr->cond_snapshot = cond_snapshot;
arch_spin_unlock(&tr->max_lock);
local_irq_enable();
mutex_unlock(&trace_types_lock);
return ret;
fail_unlock:
mutex_unlock(&trace_types_lock);
kfree(cond_snapshot);
return ret;
}
EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
/**
* tracing_snapshot_cond_disable - disable conditional snapshot for an instance
* @tr: The tracing instance
*
* Check whether the conditional snapshot for the given instance is
* enabled; if so, free the cond_snapshot associated with it,
* otherwise return -EINVAL.
*
* Returns 0 if successful, error otherwise.
*/
int tracing_snapshot_cond_disable(struct trace_array *tr)
{
int ret = 0;
local_irq_disable();
arch_spin_lock(&tr->max_lock);
if (!tr->cond_snapshot)
ret = -EINVAL;
else {
kfree(tr->cond_snapshot);
tr->cond_snapshot = NULL;
}
arch_spin_unlock(&tr->max_lock);
local_irq_enable();
tracing_disarm_snapshot(tr);
return ret;
}
EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
#else
void tracing_snapshot(void)
{
WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
}
EXPORT_SYMBOL_GPL(tracing_snapshot);
void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
{
WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
}
EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
int tracing_alloc_snapshot(void)
{
WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
return -ENODEV;
}
EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
void tracing_snapshot_alloc(void)
{
/* Give warning */
tracing_snapshot();
}
EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
void *tracing_cond_snapshot_data(struct trace_array *tr)
{
return NULL;
}
EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
{
return -ENODEV;
}
EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
int tracing_snapshot_cond_disable(struct trace_array *tr)
{
return false;
}
EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
#define free_snapshot(tr) do { } while (0)
#define tracing_arm_snapshot_locked(tr) ({ -EBUSY; })
#endif /* CONFIG_TRACER_SNAPSHOT */
void tracer_tracing_off(struct trace_array *tr)
{
if (tr->array_buffer.buffer)
ring_buffer_record_off(tr->array_buffer.buffer);
/*
* This flag is looked at when buffers haven't been allocated
* yet, or by some tracers (like irqsoff), that just want to
* know if the ring buffer has been disabled, but it can handle
* races of where it gets disabled but we still do a record.
* As the check is in the fast path of the tracers, it is more
* important to be fast than accurate.
*/
tr->buffer_disabled = 1;
/* Make the flag seen by readers */
smp_wmb();
}
/**
* tracing_off - turn off tracing buffers
*
* This function stops the tracing buffers from recording data.
* It does not disable any overhead the tracers themselves may
* be causing. This function simply causes all recording to
* the ring buffers to fail.
*/
void tracing_off(void)
{
tracer_tracing_off(&global_trace);
}
EXPORT_SYMBOL_GPL(tracing_off);
void disable_trace_on_warning(void)
{
if (__disable_trace_on_warning) {
trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
"Disabling tracing due to warning\n");
tracing_off();
}
}
/**
* tracer_tracing_is_on - show real state of ring buffer enabled
* @tr : the trace array to know if ring buffer is enabled
*
* Shows real state of the ring buffer if it is enabled or not.
*/
bool tracer_tracing_is_on(struct trace_array *tr)
{
if (tr->array_buffer.buffer)
return ring_buffer_record_is_set_on(tr->array_buffer.buffer);
return !tr->buffer_disabled;
}
/**
* tracing_is_on - show state of ring buffers enabled
*/
int tracing_is_on(void)
{
return tracer_tracing_is_on(&global_trace);
}
EXPORT_SYMBOL_GPL(tracing_is_on);
static int __init set_buf_size(char *str)
{
unsigned long buf_size;
if (!str)
return 0;
buf_size = memparse(str, &str);
/*
* nr_entries can not be zero and the startup
* tests require some buffer space. Therefore
* ensure we have at least 4096 bytes of buffer.
*/
trace_buf_size = max(4096UL, buf_size);
return 1;
}
__setup("trace_buf_size=", set_buf_size);
static int __init set_tracing_thresh(char *str)
{
unsigned long threshold;
int ret;
if (!str)
return 0;
ret = kstrtoul(str, 0, &threshold);
if (ret < 0)
return 0;
tracing_thresh = threshold * 1000;
return 1;
}
__setup("tracing_thresh=", set_tracing_thresh);
unsigned long nsecs_to_usecs(unsigned long nsecs)
{
return nsecs / 1000;
}
/*
* TRACE_FLAGS is defined as a tuple matching bit masks with strings.
* It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
* matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
* of strings in the order that the evals (enum) were defined.
*/
#undef C
#define C(a, b) b
/* These must match the bit positions in trace_iterator_flags */
static const char *trace_options[] = {
TRACE_FLAGS
NULL
};
static struct {
u64 (*func)(void);
const char *name;
int in_ns; /* is this clock in nanoseconds? */
} trace_clocks[] = {
{ trace_clock_local, "local", 1 },
{ trace_clock_global, "global", 1 },
{ trace_clock_counter, "counter", 0 },
{ trace_clock_jiffies, "uptime", 0 },
{ trace_clock, "perf", 1 },
{ ktime_get_mono_fast_ns, "mono", 1 },
{ ktime_get_raw_fast_ns, "mono_raw", 1 },
{ ktime_get_boot_fast_ns, "boot", 1 },
{ ktime_get_tai_fast_ns, "tai", 1 },
ARCH_TRACE_CLOCKS
};
bool trace_clock_in_ns(struct trace_array *tr)
{
if (trace_clocks[tr->clock_id].in_ns)
return true;
return false;
}
/*
* trace_parser_get_init - gets the buffer for trace parser
*/
int trace_parser_get_init(struct trace_parser *parser, int size)
{
memset(parser, 0, sizeof(*parser));
parser->buffer = kmalloc(size, GFP_KERNEL);
if (!parser->buffer)
return 1;
parser->size = size;
return 0;
}
/*
* trace_parser_put - frees the buffer for trace parser
*/
void trace_parser_put(struct trace_parser *parser)
{
kfree(parser->buffer);
parser->buffer = NULL;
}
/*
* trace_get_user - reads the user input string separated by space
* (matched by isspace(ch))
*
* For each string found the 'struct trace_parser' is updated,
* and the function returns.
*
* Returns number of bytes read.
*
* See kernel/trace/trace.h for 'struct trace_parser' details.
*/
int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char ch;
size_t read = 0;
ssize_t ret;
if (!*ppos)
trace_parser_clear(parser);
ret = get_user(ch, ubuf++);
if (ret)
goto out;
read++;
cnt--;
/*
* The parser is not finished with the last write,
* continue reading the user input without skipping spaces.
*/
if (!parser->cont) {
/* skip white space */
while (cnt && isspace(ch)) {
ret = get_user(ch, ubuf++);
if (ret)
goto out;
read++;
cnt--;
}
parser->idx = 0;
/* only spaces were written */
if (isspace(ch) || !ch) {
*ppos += read;
ret = read;
goto out;
}
}
/* read the non-space input */
while (cnt && !isspace(ch) && ch) {
if (parser->idx < parser->size - 1)
parser->buffer[parser->idx++] = ch;
else {
ret = -EINVAL;
goto out;
}
ret = get_user(ch, ubuf++);
if (ret)
goto out;
read++;
cnt--;
}
/* We either got finished input or we have to wait for another call. */
if (isspace(ch) || !ch) {
parser->buffer[parser->idx] = 0;
parser->cont = false;
} else if (parser->idx < parser->size - 1) {
parser->cont = true;
parser->buffer[parser->idx++] = ch;
/* Make sure the parsed string always terminates with '\0'. */
parser->buffer[parser->idx] = 0;
} else {
ret = -EINVAL;
goto out;
}
*ppos += read;
ret = read;
out:
return ret;
}
/* TODO add a seq_buf_to_buffer() */
static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
{
int len;
if (trace_seq_used(s) <= s->readpos)
return -EBUSY;
len = trace_seq_used(s) - s->readpos;
if (cnt > len)
cnt = len;
memcpy(buf, s->buffer + s->readpos, cnt);
s->readpos += cnt;
return cnt;
}
unsigned long __read_mostly tracing_thresh;
#ifdef CONFIG_TRACER_MAX_TRACE
static const struct file_operations tracing_max_lat_fops;
#ifdef LATENCY_FS_NOTIFY
static struct workqueue_struct *fsnotify_wq;
static void latency_fsnotify_workfn(struct work_struct *work)
{
struct trace_array *tr = container_of(work, struct trace_array,
fsnotify_work);
fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
}
static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
{
struct trace_array *tr = container_of(iwork, struct trace_array,
fsnotify_irqwork);
queue_work(fsnotify_wq, &tr->fsnotify_work);
}
static void trace_create_maxlat_file(struct trace_array *tr,
struct dentry *d_tracer)
{
INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
tr->d_max_latency = trace_create_file("tracing_max_latency",
TRACE_MODE_WRITE,
d_tracer, tr,
&tracing_max_lat_fops);
}
__init static int latency_fsnotify_init(void)
{
fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
WQ_UNBOUND | WQ_HIGHPRI, 0);
if (!fsnotify_wq) {
pr_err("Unable to allocate tr_max_lat_wq\n");
return -ENOMEM;
}
return 0;
}
late_initcall_sync(latency_fsnotify_init);
void latency_fsnotify(struct trace_array *tr)
{
if (!fsnotify_wq)
return;
/*
* We cannot call queue_work(&tr->fsnotify_work) from here because it's
* possible that we are called from __schedule() or do_idle(), which
* could cause a deadlock.
*/
irq_work_queue(&tr->fsnotify_irqwork);
}
#else /* !LATENCY_FS_NOTIFY */
#define trace_create_maxlat_file(tr, d_tracer) \
trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
d_tracer, tr, &tracing_max_lat_fops)
#endif
/*
* Copy the new maximum trace into the separate maximum-trace
* structure. (this way the maximum trace is permanently saved,
* for later retrieval via /sys/kernel/tracing/tracing_max_latency)
*/
static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
struct array_buffer *trace_buf = &tr->array_buffer;
struct array_buffer *max_buf = &tr->max_buffer;
struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
max_buf->cpu = cpu;
max_buf->time_start = data->preempt_timestamp;
max_data->saved_latency = tr->max_latency;
max_data->critical_start = data->critical_start;
max_data->critical_end = data->critical_end;
strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
max_data->pid = tsk->pid;
/*
* If tsk == current, then use current_uid(), as that does not use
* RCU. The irq tracer can be called out of RCU scope.
*/
if (tsk == current)
max_data->uid = current_uid();
else
max_data->uid = task_uid(tsk);
max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
max_data->policy = tsk->policy;
max_data->rt_priority = tsk->rt_priority;
/* record this tasks comm */
tracing_record_cmdline(tsk);
latency_fsnotify(tr);
}
/**
* update_max_tr - snapshot all trace buffers from global_trace to max_tr
* @tr: tracer
* @tsk: the task with the latency
* @cpu: The cpu that initiated the trace.
* @cond_data: User data associated with a conditional snapshot
*
* Flip the buffers between the @tr and the max_tr and record information
* about which task was the cause of this latency.
*/
void
update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
void *cond_data)
{
if (tr->stop_count)
return;
WARN_ON_ONCE(!irqs_disabled());
if (!tr->allocated_snapshot) {
/* Only the nop tracer should hit this when disabling */
WARN_ON_ONCE(tr->current_trace != &nop_trace);
return;
}
arch_spin_lock(&tr->max_lock);
/* Inherit the recordable setting from array_buffer */
if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
ring_buffer_record_on(tr->max_buffer.buffer);
else
ring_buffer_record_off(tr->max_buffer.buffer);
#ifdef CONFIG_TRACER_SNAPSHOT
if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
arch_spin_unlock(&tr->max_lock);
return;
}
#endif
swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
__update_max_tr(tr, tsk, cpu);
arch_spin_unlock(&tr->max_lock);
/* Any waiters on the old snapshot buffer need to wake up */
ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
}
/**
* update_max_tr_single - only copy one trace over, and reset the rest
* @tr: tracer
* @tsk: task with the latency
* @cpu: the cpu of the buffer to copy.
*
* Flip the trace of a single CPU buffer between the @tr and the max_tr.
*/
void
update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
int ret;
if (tr->stop_count)
return;
WARN_ON_ONCE(!irqs_disabled());
if (!tr->allocated_snapshot) {
/* Only the nop tracer should hit this when disabling */
WARN_ON_ONCE(tr->current_trace != &nop_trace);
return;
}
arch_spin_lock(&tr->max_lock);
ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
if (ret == -EBUSY) {
/*
* We failed to swap the buffer due to a commit taking
* place on this CPU. We fail to record, but we reset
* the max trace buffer (no one writes directly to it)
* and flag that it failed.
* Another reason is resize is in progress.
*/
trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
"Failed to swap buffers due to commit or resize in progress\n");
}
WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
__update_max_tr(tr, tsk, cpu);
arch_spin_unlock(&tr->max_lock);
}
#endif /* CONFIG_TRACER_MAX_TRACE */
struct pipe_wait {
struct trace_iterator *iter;
int wait_index;
};
static bool wait_pipe_cond(void *data)
{
struct pipe_wait *pwait = data;
struct trace_iterator *iter = pwait->iter;
if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index)
return true;
return iter->closed;
}
static int wait_on_pipe(struct trace_iterator *iter, int full)
{
struct pipe_wait pwait;
int ret;
/* Iterators are static, they should be filled or empty */
if (trace_buffer_iter(iter, iter->cpu_file))
return 0;
pwait.wait_index = atomic_read_acquire(&iter->wait_index);
pwait.iter = iter;
ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full,
wait_pipe_cond, &pwait);
#ifdef CONFIG_TRACER_MAX_TRACE
/*
* Make sure this is still the snapshot buffer, as if a snapshot were
* to happen, this would now be the main buffer.
*/
if (iter->snapshot)
iter->array_buffer = &iter->tr->max_buffer;
#endif
return ret;
}
#ifdef CONFIG_FTRACE_STARTUP_TEST
static bool selftests_can_run;
struct trace_selftests {
struct list_head list;
struct tracer *type;
};
static LIST_HEAD(postponed_selftests);
static int save_selftest(struct tracer *type)
{
struct trace_selftests *selftest;
selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
if (!selftest)
return -ENOMEM;
selftest->type = type;
list_add(&selftest->list, &postponed_selftests);
return 0;
}
static int run_tracer_selftest(struct tracer *type)
{
struct trace_array *tr = &global_trace;
struct tracer *saved_tracer = tr->current_trace;
int ret;
if (!type->selftest || tracing_selftest_disabled)
return 0;
/*
* If a tracer registers early in boot up (before scheduling is
* initialized and such), then do not run its selftests yet.
* Instead, run it a little later in the boot process.
*/
if (!selftests_can_run)
return save_selftest(type);
if (!tracing_is_on()) {
pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
type->name);
return 0;
}
/*
* Run a selftest on this tracer.
* Here we reset the trace buffer, and set the current
* tracer to be this tracer. The tracer can then run some
* internal tracing to verify that everything is in order.
* If we fail, we do not register this tracer.
*/
tracing_reset_online_cpus(&tr->array_buffer);
tr->current_trace = type;
#ifdef CONFIG_TRACER_MAX_TRACE
if (type->use_max_tr) {
/* If we expanded the buffers, make sure the max is expanded too */
if (tr->ring_buffer_expanded)
ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
RING_BUFFER_ALL_CPUS);
tr->allocated_snapshot = true;
}
#endif
/* the test is responsible for initializing and enabling */
pr_info("Testing tracer %s: ", type->name);
ret = type->selftest(type, tr);
/* the test is responsible for resetting too */
tr->current_trace = saved_tracer;
if (ret) {
printk(KERN_CONT "FAILED!\n");
/* Add the warning after printing 'FAILED' */
WARN_ON(1);
return -1;
}
/* Only reset on passing, to avoid touching corrupted buffers */
tracing_reset_online_cpus(&tr->array_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
if (type->use_max_tr) {
tr->allocated_snapshot = false;
/* Shrink the max buffer again */
if (tr->ring_buffer_expanded)
ring_buffer_resize(tr->max_buffer.buffer, 1,
RING_BUFFER_ALL_CPUS);
}
#endif
printk(KERN_CONT "PASSED\n");
return 0;
}
static int do_run_tracer_selftest(struct tracer *type)
{
int ret;
/*
* Tests can take a long time, especially if they are run one after the
* other, as does happen during bootup when all the tracers are
* registered. This could cause the soft lockup watchdog to trigger.
*/
cond_resched();
tracing_selftest_running = true;
ret = run_tracer_selftest(type);
tracing_selftest_running = false;
return ret;
}
static __init int init_trace_selftests(void)
{
struct trace_selftests *p, *n;
struct tracer *t, **last;
int ret;
selftests_can_run = true;
mutex_lock(&trace_types_lock);
if (list_empty(&postponed_selftests))
goto out;
pr_info("Running postponed tracer tests:\n");
tracing_selftest_running = true;
list_for_each_entry_safe(p, n, &postponed_selftests, list) {
/* This loop can take minutes when sanitizers are enabled, so
* lets make sure we allow RCU processing.
*/
cond_resched();
ret = run_tracer_selftest(p->type);
/* If the test fails, then warn and remove from available_tracers */
if (ret < 0) {
WARN(1, "tracer: %s failed selftest, disabling\n",
p->type->name);
last = &trace_types;
for (t = trace_types; t; t = t->next) {
if (t == p->type) {
*last = t->next;
break;
}
last = &t->next;
}
}
list_del(&p->list);
kfree(p);
}
tracing_selftest_running = false;
out:
mutex_unlock(&trace_types_lock);
return 0;
}
core_initcall(init_trace_selftests);
#else
static inline int run_tracer_selftest(struct tracer *type)
{
return 0;
}
static inline int do_run_tracer_selftest(struct tracer *type)
{
return 0;
}
#endif /* CONFIG_FTRACE_STARTUP_TEST */
static void add_tracer_options(struct trace_array *tr, struct tracer *t);
static void __init apply_trace_boot_options(void);
/**
* register_tracer - register a tracer with the ftrace system.
* @type: the plugin for the tracer
*
* Register a new plugin tracer.
*/
int __init register_tracer(struct tracer *type)
{
struct tracer *t;
int ret = 0;
if (!type->name) {
pr_info("Tracer must have a name\n");
return -1;
}
if (strlen(type->name) >= MAX_TRACER_SIZE) {
pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
return -1;
}
if (security_locked_down(LOCKDOWN_TRACEFS)) {
pr_warn("Can not register tracer %s due to lockdown\n",
type->name);
return -EPERM;
}
mutex_lock(&trace_types_lock);
for (t = trace_types; t; t = t->next) {
if (strcmp(type->name, t->name) == 0) {
/* already found */
pr_info("Tracer %s already registered\n",
type->name);
ret = -1;
goto out;
}
}
if (!type->set_flag)
type->set_flag = &dummy_set_flag;
if (!type->flags) {
/*allocate a dummy tracer_flags*/
type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
if (!type->flags) {
ret = -ENOMEM;
goto out;
}
type->flags->val = 0;
type->flags->opts = dummy_tracer_opt;
} else
if (!type->flags->opts)
type->flags->opts = dummy_tracer_opt;
/* store the tracer for __set_tracer_option */
type->flags->trace = type;
ret = do_run_tracer_selftest(type);
if (ret < 0)
goto out;
type->next = trace_types;
trace_types = type;
add_tracer_options(&global_trace, type);
out:
mutex_unlock(&trace_types_lock);
if (ret || !default_bootup_tracer)
goto out_unlock;
if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
goto out_unlock;
printk(KERN_INFO "Starting tracer '%s'\n", type->name);
/* Do we want this tracer to start on bootup? */
tracing_set_tracer(&global_trace, type->name);
default_bootup_tracer = NULL;
apply_trace_boot_options();
/* disable other selftests, since this will break it. */
disable_tracing_selftest("running a tracer");
out_unlock:
return ret;
}
static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
{
struct trace_buffer *buffer = buf->buffer;
if (!buffer)
return;
ring_buffer_record_disable(buffer);
/* Make sure all commits have finished */
synchronize_rcu();
ring_buffer_reset_cpu(buffer, cpu);
ring_buffer_record_enable(buffer);
}
void tracing_reset_online_cpus(struct array_buffer *buf)
{
struct trace_buffer *buffer = buf->buffer;
if (!buffer)
return;
ring_buffer_record_disable(buffer);
/* Make sure all commits have finished */
synchronize_rcu();
buf->time_start = buffer_ftrace_now(buf, buf->cpu);
ring_buffer_reset_online_cpus(buffer);
ring_buffer_record_enable(buffer);
}
/* Must have trace_types_lock held */
void tracing_reset_all_online_cpus_unlocked(void)
{
struct trace_array *tr;
lockdep_assert_held(&trace_types_lock);
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
if (!tr->clear_trace)
continue;
tr->clear_trace = false;
tracing_reset_online_cpus(&tr->array_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
tracing_reset_online_cpus(&tr->max_buffer);
#endif
}
}
void tracing_reset_all_online_cpus(void)
{
mutex_lock(&trace_types_lock);
tracing_reset_all_online_cpus_unlocked();
mutex_unlock(&trace_types_lock);
}
int is_tracing_stopped(void)
{
return global_trace.stop_count;
}
static void tracing_start_tr(struct trace_array *tr)
{
struct trace_buffer *buffer;
unsigned long flags;
if (tracing_disabled)
return;
raw_spin_lock_irqsave(&tr->start_lock, flags);
if (--tr->stop_count) {
if (WARN_ON_ONCE(tr->stop_count < 0)) {
/* Someone screwed up their debugging */
tr->stop_count = 0;
}
goto out;
}
/* Prevent the buffers from switching */
arch_spin_lock(&tr->max_lock);
buffer = tr->array_buffer.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
buffer = tr->max_buffer.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
#endif
arch_spin_unlock(&tr->max_lock);
out:
raw_spin_unlock_irqrestore(&tr->start_lock, flags);
}
/**
* tracing_start - quick start of the tracer
*
* If tracing is enabled but was stopped by tracing_stop,
* this will start the tracer back up.
*/
void tracing_start(void)
{
return tracing_start_tr(&global_trace);
}
static void tracing_stop_tr(struct trace_array *tr)
{
struct trace_buffer *buffer;
unsigned long flags;
raw_spin_lock_irqsave(&tr->start_lock, flags);
if (tr->stop_count++)
goto out;
/* Prevent the buffers from switching */
arch_spin_lock(&tr->max_lock);
buffer = tr->array_buffer.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
buffer = tr->max_buffer.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
#endif
arch_spin_unlock(&tr->max_lock);
out:
raw_spin_unlock_irqrestore(&tr->start_lock, flags);
}
/**
* tracing_stop - quick stop of the tracer
*
* Light weight way to stop tracing. Use in conjunction with
* tracing_start.
*/
void tracing_stop(void)
{
return tracing_stop_tr(&global_trace);
}
/*
* Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
* overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
* simplifies those functions and keeps them in sync.
*/
enum print_line_t trace_handle_return(struct trace_seq *s)
{
return trace_seq_has_overflowed(s) ?
TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
}
EXPORT_SYMBOL_GPL(trace_handle_return);
static unsigned short migration_disable_value(void)
{
#if defined(CONFIG_SMP)
return current->migration_disabled;
#else
return 0;
#endif
}
unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
{
unsigned int trace_flags = irqs_status;
unsigned int pc;
pc = preempt_count();
if (pc & NMI_MASK)
trace_flags |= TRACE_FLAG_NMI;
if (pc & HARDIRQ_MASK)
trace_flags |= TRACE_FLAG_HARDIRQ;
if (in_serving_softirq())
trace_flags |= TRACE_FLAG_SOFTIRQ;
if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
trace_flags |= TRACE_FLAG_BH_OFF;
if (tif_need_resched())
trace_flags |= TRACE_FLAG_NEED_RESCHED;
if (test_preempt_need_resched())
trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
(min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
}
struct ring_buffer_event *
trace_buffer_lock_reserve(struct trace_buffer *buffer,
int type,
unsigned long len,
unsigned int trace_ctx)
{
return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
}
DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
DEFINE_PER_CPU(int, trace_buffered_event_cnt);
static int trace_buffered_event_ref;
/**
* trace_buffered_event_enable - enable buffering events
*
* When events are being filtered, it is quicker to use a temporary
* buffer to write the event data into if there's a likely chance
* that it will not be committed. The discard of the ring buffer
* is not as fast as committing, and is much slower than copying
* a commit.
*
* When an event is to be filtered, allocate per cpu buffers to
* write the event data into, and if the event is filtered and discarded
* it is simply dropped, otherwise, the entire data is to be committed
* in one shot.
*/
void trace_buffered_event_enable(void)
{
struct ring_buffer_event *event;
struct page *page;
int cpu;
WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
if (trace_buffered_event_ref++)
return;
for_each_tracing_cpu(cpu) {
page = alloc_pages_node(cpu_to_node(cpu),
GFP_KERNEL | __GFP_NORETRY, 0);
/* This is just an optimization and can handle failures */
if (!page) {
pr_err("Failed to allocate event buffer\n");
break;
}
event = page_address(page);
memset(event, 0, sizeof(*event));
per_cpu(trace_buffered_event, cpu) = event;
preempt_disable();
if (cpu == smp_processor_id() &&
__this_cpu_read(trace_buffered_event) !=
per_cpu(trace_buffered_event, cpu))
WARN_ON_ONCE(1);
preempt_enable();
}
}
static void enable_trace_buffered_event(void *data)
{
/* Probably not needed, but do it anyway */
smp_rmb();
this_cpu_dec(trace_buffered_event_cnt);
}
static void disable_trace_buffered_event(void *data)
{
this_cpu_inc(trace_buffered_event_cnt);
}
/**
* trace_buffered_event_disable - disable buffering events
*
* When a filter is removed, it is faster to not use the buffered
* events, and to commit directly into the ring buffer. Free up
* the temp buffers when there are no more users. This requires
* special synchronization with current events.
*/
void trace_buffered_event_disable(void)
{
int cpu;
WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
if (WARN_ON_ONCE(!trace_buffered_event_ref))
return;
if (--trace_buffered_event_ref)
return;
/* For each CPU, set the buffer as used. */
on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
NULL, true);
/* Wait for all current users to finish */
synchronize_rcu();
for_each_tracing_cpu(cpu) {
free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
per_cpu(trace_buffered_event, cpu) = NULL;
}
/*
* Wait for all CPUs that potentially started checking if they can use
* their event buffer only after the previous synchronize_rcu() call and
* they still read a valid pointer from trace_buffered_event. It must be
* ensured they don't see cleared trace_buffered_event_cnt else they
* could wrongly decide to use the pointed-to buffer which is now freed.
*/
synchronize_rcu();
/* For each CPU, relinquish the buffer */
on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
true);
}
static struct trace_buffer *temp_buffer;
struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
struct trace_event_file *trace_file,
int type, unsigned long len,
unsigned int trace_ctx)
{
struct ring_buffer_event *entry;
struct trace_array *tr = trace_file->tr;
int val;
*current_rb = tr->array_buffer.buffer;
if (!tr->no_filter_buffering_ref &&
(trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
preempt_disable_notrace();
/*
* Filtering is on, so try to use the per cpu buffer first.
* This buffer will simulate a ring_buffer_event,
* where the type_len is zero and the array[0] will
* hold the full length.
* (see include/linux/ring-buffer.h for details on
* how the ring_buffer_event is structured).
*
* Using a temp buffer during filtering and copying it
* on a matched filter is quicker than writing directly
* into the ring buffer and then discarding it when
* it doesn't match. That is because the discard
* requires several atomic operations to get right.
* Copying on match and doing nothing on a failed match
* is still quicker than no copy on match, but having
* to discard out of the ring buffer on a failed match.
*/
if ((entry = __this_cpu_read(trace_buffered_event))) {
int max_len = PAGE_SIZE - struct_size(entry, array, 1);
val = this_cpu_inc_return(trace_buffered_event_cnt);
/*
* Preemption is disabled, but interrupts and NMIs
* can still come in now. If that happens after
* the above increment, then it will have to go
* back to the old method of allocating the event
* on the ring buffer, and if the filter fails, it
* will have to call ring_buffer_discard_commit()
* to remove it.
*
* Need to also check the unlikely case that the
* length is bigger than the temp buffer size.
* If that happens, then the reserve is pretty much
* guaranteed to fail, as the ring buffer currently
* only allows events less than a page. But that may
* change in the future, so let the ring buffer reserve
* handle the failure in that case.
*/
if (val == 1 && likely(len <= max_len)) {
trace_event_setup(entry, type, trace_ctx);
entry->array[0] = len;
/* Return with preemption disabled */
return entry;
}
this_cpu_dec(trace_buffered_event_cnt);
}
/* __trace_buffer_lock_reserve() disables preemption */
preempt_enable_notrace();
}
entry = __trace_buffer_lock_reserve(*current_rb, type, len,
trace_ctx);
/*
* If tracing is off, but we have triggers enabled
* we still need to look at the event data. Use the temp_buffer
* to store the trace event for the trigger to use. It's recursive
* safe and will not be recorded anywhere.
*/
if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
*current_rb = temp_buffer;
entry = __trace_buffer_lock_reserve(*current_rb, type, len,
trace_ctx);
}
return entry;
}
EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
static DEFINE_MUTEX(tracepoint_printk_mutex);
static void output_printk(struct trace_event_buffer *fbuffer)
{
struct trace_event_call *event_call;
struct trace_event_file *file;
struct trace_event *event;
unsigned long flags;
struct trace_iterator *iter = tracepoint_print_iter;
/* We should never get here if iter is NULL */
if (WARN_ON_ONCE(!iter))
return;
event_call = fbuffer->trace_file->event_call;
if (!event_call || !event_call->event.funcs ||
!event_call->event.funcs->trace)
return;
file = fbuffer->trace_file;
if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
(unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
!filter_match_preds(file->filter, fbuffer->entry)))
return;
event = &fbuffer->trace_file->event_call->event;
raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
trace_seq_init(&iter->seq);
iter->ent = fbuffer->entry;
event_call->event.funcs->trace(iter, 0, event);
trace_seq_putc(&iter->seq, 0);
printk("%s", iter->seq.buffer);
raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
}
int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp,
loff_t *ppos)
{
int save_tracepoint_printk;
int ret;
mutex_lock(&tracepoint_printk_mutex);
save_tracepoint_printk = tracepoint_printk;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
/*
* This will force exiting early, as tracepoint_printk
* is always zero when tracepoint_printk_iter is not allocated
*/
if (!tracepoint_print_iter)
tracepoint_printk = 0;
if (save_tracepoint_printk == tracepoint_printk)
goto out;
if (tracepoint_printk)
static_key_enable(&tracepoint_printk_key.key);
else
static_key_disable(&tracepoint_printk_key.key);
out:
mutex_unlock(&tracepoint_printk_mutex);
return ret;
}
void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
{
enum event_trigger_type tt = ETT_NONE;
struct trace_event_file *file = fbuffer->trace_file;
if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
fbuffer->entry, &tt))
goto discard;
if (static_key_false(&tracepoint_printk_key.key))
output_printk(fbuffer);
if (static_branch_unlikely(&trace_event_exports_enabled))
ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
discard:
if (tt)
event_triggers_post_call(file, tt);
}
EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
/*
* Skip 3:
*
* trace_buffer_unlock_commit_regs()
* trace_event_buffer_commit()
* trace_event_raw_event_xxx()
*/
# define STACK_SKIP 3
void trace_buffer_unlock_commit_regs(struct trace_array *tr,
struct trace_buffer *buffer,
struct ring_buffer_event *event,
unsigned int trace_ctx,
struct pt_regs *regs)
{
__buffer_unlock_commit(buffer, event);
/*
* If regs is not set, then skip the necessary functions.
* Note, we can still get here via blktrace, wakeup tracer
* and mmiotrace, but that's ok if they lose a function or
* two. They are not that meaningful.
*/
ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
ftrace_trace_userstack(tr, buffer, trace_ctx);
}
/*
* Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
*/
void
trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
struct ring_buffer_event *event)
{
__buffer_unlock_commit(buffer, event);
}
void
trace_function(struct trace_array *tr, unsigned long ip, unsigned long
parent_ip, unsigned int trace_ctx)
{
struct trace_event_call *call = &event_function;
struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct ftrace_entry *entry;
event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
trace_ctx);
if (!event)
return;
entry = ring_buffer_event_data(event);
entry->ip = ip;
entry->parent_ip = parent_ip;
if (!call_filter_check_discard(call, entry, buffer, event)) {
if (static_branch_unlikely(&trace_function_exports_enabled))
ftrace_exports(event, TRACE_EXPORT_FUNCTION);
__buffer_unlock_commit(buffer, event);
}
}
#ifdef CONFIG_STACKTRACE
/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
#define FTRACE_KSTACK_NESTING 4
#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
struct ftrace_stack {
unsigned long calls[FTRACE_KSTACK_ENTRIES];
};
struct ftrace_stacks {
struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
};
static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
static DEFINE_PER_CPU(int, ftrace_stack_reserve);
static void __ftrace_trace_stack(struct trace_buffer *buffer,
unsigned int trace_ctx,
int skip, struct pt_regs *regs)
{
struct trace_event_call *call = &event_kernel_stack;
struct ring_buffer_event *event;
unsigned int size, nr_entries;
struct ftrace_stack *fstack;
struct stack_entry *entry;
int stackidx;
/*
* Add one, for this function and the call to save_stack_trace()
* If regs is set, then these functions will not be in the way.
*/
#ifndef CONFIG_UNWINDER_ORC
if (!regs)
skip++;
#endif
preempt_disable_notrace();
stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
/* This should never happen. If it does, yell once and skip */
if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
goto out;
/*
* The above __this_cpu_inc_return() is 'atomic' cpu local. An
* interrupt will either see the value pre increment or post
* increment. If the interrupt happens pre increment it will have
* restored the counter when it returns. We just need a barrier to
* keep gcc from moving things around.
*/
barrier();
fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
size = ARRAY_SIZE(fstack->calls);
if (regs) {
nr_entries = stack_trace_save_regs(regs, fstack->calls,
size, skip);
} else {
nr_entries = stack_trace_save(fstack->calls, size, skip);
}
event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
struct_size(entry, caller, nr_entries),
trace_ctx);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
entry->size = nr_entries;
memcpy(&entry->caller, fstack->calls,
flex_array_size(entry, caller, nr_entries));
if (!call_filter_check_discard(call, entry, buffer, event))
__buffer_unlock_commit(buffer, event);
out:
/* Again, don't let gcc optimize things here */
barrier();
__this_cpu_dec(ftrace_stack_reserve);
preempt_enable_notrace();
}
static inline void ftrace_trace_stack(struct trace_array *tr,
struct trace_buffer *buffer,
unsigned int trace_ctx,
int skip, struct pt_regs *regs)
{
if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
return;
__ftrace_trace_stack(buffer, trace_ctx, skip, regs);
}
void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
int skip)
{
struct trace_buffer *buffer = tr->array_buffer.buffer;
if (rcu_is_watching()) {
__ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
return;
}
if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
return;
/*
* When an NMI triggers, RCU is enabled via ct_nmi_enter(),
* but if the above rcu_is_watching() failed, then the NMI
* triggered someplace critical, and ct_irq_enter() should
* not be called from NMI.
*/
if (unlikely(in_nmi()))
return;
ct_irq_enter_irqson();
__ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
ct_irq_exit_irqson();
}
/**
* trace_dump_stack - record a stack back trace in the trace buffer
* @skip: Number of functions to skip (helper handlers)
*/
void trace_dump_stack(int skip)
{
if (tracing_disabled || tracing_selftest_running)
return;
#ifndef CONFIG_UNWINDER_ORC
/* Skip 1 to skip this function. */
skip++;
#endif
__ftrace_trace_stack(global_trace.array_buffer.buffer,
tracing_gen_ctx(), skip, NULL);
}
EXPORT_SYMBOL_GPL(trace_dump_stack);
#ifdef CONFIG_USER_STACKTRACE_SUPPORT
static DEFINE_PER_CPU(int, user_stack_count);
static void
ftrace_trace_userstack(struct trace_array *tr,
struct trace_buffer *buffer, unsigned int trace_ctx)
{
struct trace_event_call *call = &event_user_stack;
struct ring_buffer_event *event;
struct userstack_entry *entry;
if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
return;
/*
* NMIs can not handle page faults, even with fix ups.
* The save user stack can (and often does) fault.
*/
if (unlikely(in_nmi()))
return;
/*
* prevent recursion, since the user stack tracing may
* trigger other kernel events.
*/
preempt_disable();
if (__this_cpu_read(user_stack_count))
goto out;
__this_cpu_inc(user_stack_count);
event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
sizeof(*entry), trace_ctx);
if (!event)
goto out_drop_count;
entry = ring_buffer_event_data(event);
entry->tgid = current->tgid;
memset(&entry->caller, 0, sizeof(entry->caller));
stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
if (!call_filter_check_discard(call, entry, buffer, event))
__buffer_unlock_commit(buffer, event);
out_drop_count:
__this_cpu_dec(user_stack_count);
out:
preempt_enable();
}
#else /* CONFIG_USER_STACKTRACE_SUPPORT */
static void ftrace_trace_userstack(struct trace_array *tr,
struct trace_buffer *buffer,
unsigned int trace_ctx)
{
}
#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
#endif /* CONFIG_STACKTRACE */
static inline void
func_repeats_set_delta_ts(struct func_repeats_entry *entry,
unsigned long long delta)
{
entry->bottom_delta_ts = delta & U32_MAX;
entry->top_delta_ts = (delta >> 32);
}
void trace_last_func_repeats(struct trace_array *tr,
struct trace_func_repeats *last_info,
unsigned int trace_ctx)
{
struct trace_buffer *buffer = tr->array_buffer.buffer;
struct func_repeats_entry *entry;
struct ring_buffer_event *event;
u64 delta;
event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
sizeof(*entry), trace_ctx);
if (!event)
return;
delta = ring_buffer_event_time_stamp(buffer, event) -
last_info->ts_last_call;
entry = ring_buffer_event_data(event);
entry->ip = last_info->ip;
entry->parent_ip = last_info->parent_ip;
entry->count = last_info->count;
func_repeats_set_delta_ts(entry, delta);
__buffer_unlock_commit(buffer, event);
}
/* created for use with alloc_percpu */
struct trace_buffer_struct {
int nesting;
char buffer[4][TRACE_BUF_SIZE];
};
static struct trace_buffer_struct __percpu *trace_percpu_buffer;
/*
* This allows for lockless recording. If we're nested too deeply, then
* this returns NULL.
*/
static char *get_trace_buf(void)
{
struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
if (!trace_percpu_buffer || buffer->nesting >= 4)
return NULL;
buffer->nesting++;
/* Interrupts must see nesting incremented before we use the buffer */
barrier();
return &buffer->buffer[buffer->nesting - 1][0];
}
static void put_trace_buf(void)
{
/* Don't let the decrement of nesting leak before this */
barrier();
this_cpu_dec(trace_percpu_buffer->nesting);
}
static int alloc_percpu_trace_buffer(void)
{
struct trace_buffer_struct __percpu *buffers;
if (trace_percpu_buffer)
return 0;
buffers = alloc_percpu(struct trace_buffer_struct);
if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
return -ENOMEM;
trace_percpu_buffer = buffers;
return 0;
}
static int buffers_allocated;
void trace_printk_init_buffers(void)
{
if (buffers_allocated)
return;
if (alloc_percpu_trace_buffer())
return;
/* trace_printk() is for debug use only. Don't use it in production. */
pr_warn("\n");
pr_warn("**********************************************************\n");
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
pr_warn("** **\n");
pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
pr_warn("** **\n");
pr_warn("** This means that this is a DEBUG kernel and it is **\n");
pr_warn("** unsafe for production use. **\n");
pr_warn("** **\n");
pr_warn("** If you see this message and you are not debugging **\n");
pr_warn("** the kernel, report this immediately to your vendor! **\n");
pr_warn("** **\n");
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
pr_warn("**********************************************************\n");
/* Expand the buffers to set size */
tracing_update_buffers(&global_trace);
buffers_allocated = 1;
/*
* trace_printk_init_buffers() can be called by modules.
* If that happens, then we need to start cmdline recording
* directly here. If the global_trace.buffer is already
* allocated here, then this was called by module code.
*/
if (global_trace.array_buffer.buffer)
tracing_start_cmdline_record();
}
EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
void trace_printk_start_comm(void)
{
/* Start tracing comms if trace printk is set */
if (!buffers_allocated)
return;
tracing_start_cmdline_record();
}
static void trace_printk_start_stop_comm(int enabled)
{
if (!buffers_allocated)
return;
if (enabled)
tracing_start_cmdline_record();
else
tracing_stop_cmdline_record();
}
/**
* trace_vbprintk - write binary msg to tracing buffer
* @ip: The address of the caller
* @fmt: The string format to write to the buffer
* @args: Arguments for @fmt
*/
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{
struct trace_event_call *call = &event_bprint;
struct ring_buffer_event *event;
struct trace_buffer *buffer;
struct trace_array *tr = &global_trace;
struct bprint_entry *entry;
unsigned int trace_ctx;
char *tbuffer;
int len = 0, size;
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
/* Don't pollute graph traces with trace_vprintk internals */
pause_graph_tracing();
trace_ctx = tracing_gen_ctx();
preempt_disable_notrace();
tbuffer = get_trace_buf();
if (!tbuffer) {
len = 0;
goto out_nobuffer;
}
len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
goto out_put;
size = sizeof(*entry) + sizeof(u32) * len;
buffer = tr->array_buffer.buffer;
ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
trace_ctx);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
entry->ip = ip;
entry->fmt = fmt;
memcpy(entry->buf, tbuffer, sizeof(u32) * len);
if (!call_filter_check_discard(call, entry, buffer, event)) {
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
}
out:
ring_buffer_nest_end(buffer);
out_put:
put_trace_buf();
out_nobuffer:
preempt_enable_notrace();
unpause_graph_tracing();
return len;
}
EXPORT_SYMBOL_GPL(trace_vbprintk);
__printf(3, 0)
static int
__trace_array_vprintk(struct trace_buffer *buffer,
unsigned long ip, const char *fmt, va_list args)
{
struct trace_event_call *call = &event_print;
struct ring_buffer_event *event;
int len = 0, size;
struct print_entry *entry;
unsigned int trace_ctx;
char *tbuffer;
if (tracing_disabled)
return 0;
/* Don't pollute graph traces with trace_vprintk internals */
pause_graph_tracing();
trace_ctx = tracing_gen_ctx();
preempt_disable_notrace();
tbuffer = get_trace_buf();
if (!tbuffer) {
len = 0;
goto out_nobuffer;
}
len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
size = sizeof(*entry) + len + 1;
ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
trace_ctx);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
entry->ip = ip;
memcpy(&entry->buf, tbuffer, len + 1);
if (!call_filter_check_discard(call, entry, buffer, event)) {
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
}
out:
ring_buffer_nest_end(buffer);
put_trace_buf();
out_nobuffer:
preempt_enable_notrace();
unpause_graph_tracing();
return len;
}
__printf(3, 0)
int trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args)
{
if (tracing_selftest_running && tr == &global_trace)
return 0;
return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
}
/**
* trace_array_printk - Print a message to a specific instance
* @tr: The instance trace_array descriptor
* @ip: The instruction pointer that this is called from.
* @fmt: The format to print (printf format)
*
* If a subsystem sets up its own instance, they have the right to
* printk strings into their tracing instance buffer using this
* function. Note, this function will not write into the top level
* buffer (use trace_printk() for that), as writing into the top level
* buffer should only have events that can be individually disabled.
* trace_printk() is only used for debugging a kernel, and should not
* be ever incorporated in normal use.
*
* trace_array_printk() can be used, as it will not add noise to the
* top level tracing buffer.
*
* Note, trace_array_init_printk() must be called on @tr before this
* can be used.
*/
__printf(3, 0)
int trace_array_printk(struct trace_array *tr,
unsigned long ip, const char *fmt, ...)
{
int ret;
va_list ap;