blob: 7e62c0a18456ebbad8fc66a83a9f2578a081e201 [file] [log] [blame]
Steven Rostedt35e8e302008-05-12 21:20:42 +02001/*
2 * trace context switch
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7#include <linux/module.h>
8#include <linux/fs.h>
9#include <linux/debugfs.h>
10#include <linux/kallsyms.h>
11#include <linux/uaccess.h>
Steven Rostedt35e8e302008-05-12 21:20:42 +020012#include <linux/ftrace.h>
Steven Rostedtad8d75f2009-04-14 19:39:12 -040013#include <trace/events/sched.h>
Steven Rostedt35e8e302008-05-12 21:20:42 +020014
15#include "trace.h"
16
17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled;
Frederic Weisbeckerefade6e2008-10-31 13:28:58 +010019static int sched_ref;
20static DEFINE_MUTEX(sched_register_mutex);
Steven Rostedt5fec6dd2009-03-17 19:59:53 -040021static int sched_stopped;
Steven Rostedt35e8e302008-05-12 21:20:42 +020022
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020023
24void
25tracing_sched_switch_trace(struct trace_array *tr,
26 struct task_struct *prev,
27 struct task_struct *next,
28 unsigned long flags, int pc)
29{
30 struct ftrace_event_call *call = &event_context_switch;
Steven Rostedte77405a2009-09-02 14:17:06 -040031 struct ring_buffer *buffer = tr->buffer;
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020032 struct ring_buffer_event *event;
33 struct ctx_switch_entry *entry;
34
Steven Rostedte77405a2009-09-02 14:17:06 -040035 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020036 sizeof(*entry), flags, pc);
37 if (!event)
38 return;
39 entry = ring_buffer_event_data(event);
40 entry->prev_pid = prev->pid;
41 entry->prev_prio = prev->prio;
42 entry->prev_state = prev->state;
43 entry->next_pid = next->pid;
44 entry->next_prio = next->prio;
45 entry->next_state = next->state;
46 entry->next_cpu = task_cpu(next);
47
Steven Rostedte77405a2009-09-02 14:17:06 -040048 if (!filter_check_discard(call, entry, buffer, event))
49 trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020050}
51
Ingo Molnare309b412008-05-12 21:20:51 +020052static void
Steven Rostedt38516ab2010-04-20 17:04:50 -040053probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next)
Steven Rostedt35e8e302008-05-12 21:20:42 +020054{
Steven Rostedt35e8e302008-05-12 21:20:42 +020055 struct trace_array_cpu *data;
56 unsigned long flags;
Steven Rostedt35e8e302008-05-12 21:20:42 +020057 int cpu;
Steven Rostedt38697052008-10-01 13:14:09 -040058 int pc;
Steven Rostedt35e8e302008-05-12 21:20:42 +020059
Zhaoleidcef7882009-03-31 15:26:14 +080060 if (unlikely(!sched_ref))
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040061 return;
62
Steven Rostedt41bc8142008-05-22 11:49:22 -040063 tracing_record_cmdline(prev);
64 tracing_record_cmdline(next);
65
Zhaoleidcef7882009-03-31 15:26:14 +080066 if (!tracer_enabled || sched_stopped)
Steven Rostedt35e8e302008-05-12 21:20:42 +020067 return;
68
Steven Rostedt38697052008-10-01 13:14:09 -040069 pc = preempt_count();
Steven Rostedt18cef372008-05-12 21:20:44 +020070 local_irq_save(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020071 cpu = raw_smp_processor_id();
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040072 data = ctx_trace->data[cpu];
Steven Rostedt35e8e302008-05-12 21:20:42 +020073
Steven Rostedt3ea2e6d2008-10-04 02:01:00 -040074 if (likely(!atomic_read(&data->disabled)))
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -050075 tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
Steven Rostedt35e8e302008-05-12 21:20:42 +020076
Steven Rostedt18cef372008-05-12 21:20:44 +020077 local_irq_restore(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020078}
79
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020080void
81tracing_sched_wakeup_trace(struct trace_array *tr,
82 struct task_struct *wakee,
83 struct task_struct *curr,
84 unsigned long flags, int pc)
85{
86 struct ftrace_event_call *call = &event_wakeup;
87 struct ring_buffer_event *event;
88 struct ctx_switch_entry *entry;
Steven Rostedte77405a2009-09-02 14:17:06 -040089 struct ring_buffer *buffer = tr->buffer;
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020090
Steven Rostedte77405a2009-09-02 14:17:06 -040091 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020092 sizeof(*entry), flags, pc);
93 if (!event)
94 return;
95 entry = ring_buffer_event_data(event);
96 entry->prev_pid = curr->pid;
97 entry->prev_prio = curr->prio;
98 entry->prev_state = curr->state;
99 entry->next_pid = wakee->pid;
100 entry->next_prio = wakee->prio;
101 entry->next_state = wakee->state;
102 entry->next_cpu = task_cpu(wakee);
103
Steven Rostedte77405a2009-09-02 14:17:06 -0400104 if (!filter_check_discard(call, entry, buffer, event))
105 ring_buffer_unlock_commit(buffer, event);
106 ftrace_trace_stack(tr->buffer, flags, 6, pc);
107 ftrace_trace_userstack(tr->buffer, flags, pc);
Frederic Weisbecker82e04af2009-07-29 18:00:29 +0200108}
109
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200110static void
Steven Rostedt38516ab2010-04-20 17:04:50 -0400111probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200112{
Ingo Molnar57422792008-05-12 21:20:51 +0200113 struct trace_array_cpu *data;
114 unsigned long flags;
Steven Rostedt38697052008-10-01 13:14:09 -0400115 int cpu, pc;
Ingo Molnar57422792008-05-12 21:20:51 +0200116
Zhaoleidcef7882009-03-31 15:26:14 +0800117 if (unlikely(!sched_ref))
118 return;
119
120 tracing_record_cmdline(current);
121
122 if (!tracer_enabled || sched_stopped)
Ingo Molnar57422792008-05-12 21:20:51 +0200123 return;
124
Steven Rostedt38697052008-10-01 13:14:09 -0400125 pc = preempt_count();
Ingo Molnar57422792008-05-12 21:20:51 +0200126 local_irq_save(flags);
127 cpu = raw_smp_processor_id();
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400128 data = ctx_trace->data[cpu];
Ingo Molnar57422792008-05-12 21:20:51 +0200129
Steven Rostedt3ea2e6d2008-10-04 02:01:00 -0400130 if (likely(!atomic_read(&data->disabled)))
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -0500131 tracing_sched_wakeup_trace(ctx_trace, wakee, current,
Steven Rostedt38697052008-10-01 13:14:09 -0400132 flags, pc);
Ingo Molnar57422792008-05-12 21:20:51 +0200133
Ingo Molnar57422792008-05-12 21:20:51 +0200134 local_irq_restore(flags);
135}
136
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200137static int tracing_sched_register(void)
138{
139 int ret;
140
Steven Rostedt38516ab2010-04-20 17:04:50 -0400141 ret = register_trace_sched_wakeup(probe_sched_wakeup, NULL);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200142 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400143 pr_info("wakeup trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200144 " probe to kernel_sched_wakeup\n");
145 return ret;
146 }
147
Steven Rostedt38516ab2010-04-20 17:04:50 -0400148 ret = register_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200149 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400150 pr_info("wakeup trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200151 " probe to kernel_sched_wakeup_new\n");
152 goto fail_deprobe;
153 }
154
Steven Rostedt38516ab2010-04-20 17:04:50 -0400155 ret = register_trace_sched_switch(probe_sched_switch, NULL);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200156 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400157 pr_info("sched trace: Couldn't activate tracepoint"
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500158 " probe to kernel_sched_switch\n");
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200159 goto fail_deprobe_wake_new;
160 }
161
162 return ret;
163fail_deprobe_wake_new:
Steven Rostedt38516ab2010-04-20 17:04:50 -0400164 unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200165fail_deprobe:
Steven Rostedt38516ab2010-04-20 17:04:50 -0400166 unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200167 return ret;
168}
169
170static void tracing_sched_unregister(void)
171{
Steven Rostedt38516ab2010-04-20 17:04:50 -0400172 unregister_trace_sched_switch(probe_sched_switch, NULL);
173 unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
174 unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200175}
176
Ingo Molnarf2252932008-05-22 10:37:48 +0200177static void tracing_start_sched_switch(void)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200178{
Frederic Weisbeckerefade6e2008-10-31 13:28:58 +0100179 mutex_lock(&sched_register_mutex);
Steven Rostedte168e052008-11-07 22:36:02 -0500180 if (!(sched_ref++))
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200181 tracing_sched_register();
Frederic Weisbeckerefade6e2008-10-31 13:28:58 +0100182 mutex_unlock(&sched_register_mutex);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200183}
184
Ingo Molnarf2252932008-05-22 10:37:48 +0200185static void tracing_stop_sched_switch(void)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200186{
Frederic Weisbeckerefade6e2008-10-31 13:28:58 +0100187 mutex_lock(&sched_register_mutex);
Steven Rostedte168e052008-11-07 22:36:02 -0500188 if (!(--sched_ref))
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200189 tracing_sched_unregister();
Frederic Weisbeckerefade6e2008-10-31 13:28:58 +0100190 mutex_unlock(&sched_register_mutex);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200191}
192
Steven Rostedt41bc8142008-05-22 11:49:22 -0400193void tracing_start_cmdline_record(void)
194{
195 tracing_start_sched_switch();
196}
197
198void tracing_stop_cmdline_record(void)
199{
200 tracing_stop_sched_switch();
201}
202
Steven Rostedt75f5c472008-11-07 22:36:02 -0500203/**
Steven Rostedte168e052008-11-07 22:36:02 -0500204 * tracing_start_sched_switch_record - start tracing context switches
205 *
206 * Turns on context switch tracing for a tracer.
207 */
208void tracing_start_sched_switch_record(void)
209{
210 if (unlikely(!ctx_trace)) {
211 WARN_ON(1);
212 return;
213 }
214
215 tracing_start_sched_switch();
216
217 mutex_lock(&sched_register_mutex);
218 tracer_enabled++;
219 mutex_unlock(&sched_register_mutex);
220}
221
222/**
223 * tracing_stop_sched_switch_record - start tracing context switches
224 *
225 * Turns off context switch tracing for a tracer.
226 */
227void tracing_stop_sched_switch_record(void)
228{
229 mutex_lock(&sched_register_mutex);
230 tracer_enabled--;
231 WARN_ON(tracer_enabled < 0);
232 mutex_unlock(&sched_register_mutex);
233
234 tracing_stop_sched_switch();
235}
236
237/**
238 * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
Steven Rostedt75f5c472008-11-07 22:36:02 -0500239 * @tr: trace array pointer to assign
240 *
241 * Some tracers might want to record the context switches in their
242 * trace. This function lets those tracers assign the trace array
243 * to use.
244 */
Steven Rostedte168e052008-11-07 22:36:02 -0500245void tracing_sched_switch_assign_trace(struct trace_array *tr)
Steven Rostedt75f5c472008-11-07 22:36:02 -0500246{
247 ctx_trace = tr;
248}
249