ftrace: make some tracers reentrant
Now that the ring buffer is reentrant, some of the ftrace tracers
(sched_swich, debugging traces) can also be reentrant.
Note: Never make the function tracer reentrant, that can cause
recursion problems all over the kernel. The function tracer
must disable reentrancy.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index c7fa08a..b8f56be 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -24,7 +24,6 @@
{
struct trace_array_cpu *data;
unsigned long flags;
- long disabled;
int cpu;
int pc;
@@ -41,12 +40,10 @@
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = ctx_trace->data[cpu];
- disabled = atomic_inc_return(&data->disabled);
- if (likely(disabled == 1))
+ if (likely(!atomic_read(&data->disabled)))
tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
- atomic_dec(&data->disabled);
local_irq_restore(flags);
}
@@ -55,7 +52,6 @@
{
struct trace_array_cpu *data;
unsigned long flags;
- long disabled;
int cpu, pc;
if (!likely(tracer_enabled))
@@ -67,13 +63,11 @@
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = ctx_trace->data[cpu];
- disabled = atomic_inc_return(&data->disabled);
- if (likely(disabled == 1))
+ if (likely(!atomic_read(&data->disabled)))
tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
flags, pc);
- atomic_dec(&data->disabled);
local_irq_restore(flags);
}