ftrace: sched special

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5b186be..360ca99 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2138,6 +2138,8 @@
 ftrace_wake_up_task(void *rq, struct task_struct *wakee,
 		    struct task_struct *curr);
 extern void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data);
+extern void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
 #else
 static inline void
 ftrace_ctx_switch(void *rq, struct task_struct *prev, struct task_struct *next)
@@ -2155,6 +2157,10 @@
 static inline void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
 {
 }
+static inline void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+}
 #endif
 
 extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index e24ecd3..dc1856f 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1061,6 +1061,8 @@
 	if (!(this_sd->flags & SD_WAKE_AFFINE))
 		return 0;
 
+	ftrace_special(__LINE__, curr->se.avg_overlap, sync);
+	ftrace_special(__LINE__, p->se.avg_overlap, -1);
 	/*
 	 * If the currently running task will sleep within
 	 * a reasonable amount of time then attract this newly
@@ -1238,6 +1240,7 @@
 	if (unlikely(se == pse))
 		return;
 
+	ftrace_special(__LINE__, p->pid, se->last_wakeup);
 	cfs_rq_of(pse)->next = pse;
 
 	/*
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3a40324..b87a264 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1251,7 +1251,7 @@
 				 comm);
 		break;
 	case TRACE_SPECIAL:
-		trace_seq_printf(s, " %ld %ld %ld\n",
+		trace_seq_printf(s, "# %ld %ld %ld\n",
 				 entry->special.arg1,
 				 entry->special.arg2,
 				 entry->special.arg3);
@@ -1335,7 +1335,7 @@
 			return 0;
 		break;
 	case TRACE_SPECIAL:
-		ret = trace_seq_printf(s, " %ld %ld %ld\n",
+		ret = trace_seq_printf(s, "# %ld %ld %ld\n",
 				 entry->special.arg1,
 				 entry->special.arg2,
 				 entry->special.arg3);
@@ -1400,7 +1400,7 @@
 		break;
 	case TRACE_SPECIAL:
 	case TRACE_STACK:
-		ret = trace_seq_printf(s, " %ld %ld %ld\n",
+		ret = trace_seq_printf(s, "# %ld %ld %ld\n",
 				 entry->special.arg1,
 				 entry->special.arg2,
 				 entry->special.arg3);
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 5a217e8..bddf676 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -103,6 +103,30 @@
 	wakeup_sched_wakeup(wakee, curr);
 }
 
+void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+	struct trace_array *tr = ctx_trace;
+	struct trace_array_cpu *data;
+	unsigned long flags;
+	long disabled;
+	int cpu;
+
+	if (!tracer_enabled)
+		return;
+
+	local_irq_save(flags);
+	cpu = raw_smp_processor_id();
+	data = tr->data[cpu];
+	disabled = atomic_inc_return(&data->disabled);
+
+	if (likely(disabled == 1))
+		__trace_special(tr, data, arg1, arg2, arg3);
+
+	atomic_dec(&data->disabled);
+	local_irq_restore(flags);
+}
+
 static void sched_switch_reset(struct trace_array *tr)
 {
 	int cpu;