blob: e37446f7916ede81404ae42af3771abb60560094 [file] [log] [blame]
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * preemptoff and irqoff tracepoints
4 *
5 * Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org>
6 */
7
8#include <linux/kallsyms.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/ftrace.h>
Masami Hiramatsueeeb0802019-02-13 01:13:40 +090012#include <linux/kprobes.h>
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -040013#include "trace.h"
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -070014
15#define CREATE_TRACE_POINTS
16#include <trace/events/preemptirq.h>
17
Thomas Gleixner0995a5d2020-03-04 13:09:50 +010018/*
Peter Zijlstra9aedeae2023-01-12 20:43:49 +010019 * Use regular trace points on architectures that implement noinstr
20 * tooling: these calls will only happen with RCU enabled, which can
21 * use a regular tracepoint.
22 *
23 * On older architectures, use the rcuidle tracing methods (which
24 * aren't NMI-safe - so exclude NMI contexts):
25 */
26#ifdef CONFIG_ARCH_WANTS_NO_INSTR
27#define trace(point) trace_##point
28#else
29#define trace(point) if (!in_nmi()) trace_##point##_rcuidle
30#endif
31
Peter Zijlstra3017ba42023-01-31 09:50:36 +010032#ifdef CONFIG_TRACE_IRQFLAGS
33/* Per-cpu variable to prevent redundant calls when IRQs already off */
34static DEFINE_PER_CPU(int, tracing_irq_cpu);
35
Peter Zijlstra9aedeae2023-01-12 20:43:49 +010036/*
Thomas Gleixner0995a5d2020-03-04 13:09:50 +010037 * Like trace_hardirqs_on() but without the lockdep invocation. This is
38 * used in the low level entry code where the ordering vs. RCU is important
39 * and lockdep uses a staged approach which splits the lockdep hardirq
40 * tracking into a RCU on and a RCU off section.
41 */
42void trace_hardirqs_on_prepare(void)
43{
44 if (this_cpu_read(tracing_irq_cpu)) {
Peter Zijlstra9aedeae2023-01-12 20:43:49 +010045 trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
Thomas Gleixner0995a5d2020-03-04 13:09:50 +010046 tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
47 this_cpu_write(tracing_irq_cpu, 0);
48 }
49}
50EXPORT_SYMBOL(trace_hardirqs_on_prepare);
51NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
52
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -070053void trace_hardirqs_on(void)
54{
Steven Rostedt (VMware)bff1b202018-08-06 15:50:58 -040055 if (this_cpu_read(tracing_irq_cpu)) {
Peter Zijlstra9aedeae2023-01-12 20:43:49 +010056 trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -040057 tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
Steven Rostedt (VMware)bff1b202018-08-06 15:50:58 -040058 this_cpu_write(tracing_irq_cpu, 0);
59 }
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -070060
Nick Desaulniers8b023ac2022-03-14 15:19:03 -070061 lockdep_hardirqs_on_prepare();
Steven Rostedt (VMware)bff1b202018-08-06 15:50:58 -040062 lockdep_hardirqs_on(CALLER_ADDR0);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -070063}
64EXPORT_SYMBOL(trace_hardirqs_on);
Masami Hiramatsueeeb0802019-02-13 01:13:40 +090065NOKPROBE_SYMBOL(trace_hardirqs_on);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -070066
Thomas Gleixner0995a5d2020-03-04 13:09:50 +010067/*
68 * Like trace_hardirqs_off() but without the lockdep invocation. This is
69 * used in the low level entry code where the ordering vs. RCU is important
70 * and lockdep uses a staged approach which splits the lockdep hardirq
71 * tracking into a RCU on and a RCU off section.
72 */
Peter Zijlstrabf2b3002020-05-29 23:27:40 +020073void trace_hardirqs_off_finish(void)
Thomas Gleixner0995a5d2020-03-04 13:09:50 +010074{
75 if (!this_cpu_read(tracing_irq_cpu)) {
76 this_cpu_write(tracing_irq_cpu, 1);
77 tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
Peter Zijlstra9aedeae2023-01-12 20:43:49 +010078 trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
Thomas Gleixner0995a5d2020-03-04 13:09:50 +010079 }
80
81}
Peter Zijlstrabf2b3002020-05-29 23:27:40 +020082EXPORT_SYMBOL(trace_hardirqs_off_finish);
83NOKPROBE_SYMBOL(trace_hardirqs_off_finish);
Thomas Gleixner0995a5d2020-03-04 13:09:50 +010084
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -070085void trace_hardirqs_off(void)
86{
Peter Zijlstrabf2b3002020-05-29 23:27:40 +020087 lockdep_hardirqs_off(CALLER_ADDR0);
88
Steven Rostedt (VMware)bff1b202018-08-06 15:50:58 -040089 if (!this_cpu_read(tracing_irq_cpu)) {
90 this_cpu_write(tracing_irq_cpu, 1);
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -040091 tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
Peter Zijlstra9aedeae2023-01-12 20:43:49 +010092 trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
Steven Rostedt (VMware)bff1b202018-08-06 15:50:58 -040093 }
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -070094}
95EXPORT_SYMBOL(trace_hardirqs_off);
Masami Hiramatsueeeb0802019-02-13 01:13:40 +090096NOKPROBE_SYMBOL(trace_hardirqs_off);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -070097#endif /* CONFIG_TRACE_IRQFLAGS */
98
99#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
100
101void trace_preempt_on(unsigned long a0, unsigned long a1)
102{
Peter Zijlstra3017ba42023-01-31 09:50:36 +0100103 trace(preempt_enable)(a0, a1);
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -0400104 tracer_preempt_on(a0, a1);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700105}
106
107void trace_preempt_off(unsigned long a0, unsigned long a1)
108{
Peter Zijlstra3017ba42023-01-31 09:50:36 +0100109 trace(preempt_disable)(a0, a1);
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -0400110 tracer_preempt_off(a0, a1);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700111}
112#endif