Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
AKASHI Takahiro | 819e50e | 2014-04-30 18:54:33 +0900 | [diff] [blame] | 2 | /* |
| 3 | * arch/arm64/include/asm/ftrace.h |
| 4 | * |
| 5 | * Copyright (C) 2013 Linaro Limited |
| 6 | * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> |
AKASHI Takahiro | 819e50e | 2014-04-30 18:54:33 +0900 | [diff] [blame] | 7 | */ |
| 8 | #ifndef __ASM_FTRACE_H |
| 9 | #define __ASM_FTRACE_H |
| 10 | |
| 11 | #include <asm/insn.h> |
| 12 | |
Mark Rutland | 5c176af | 2018-11-15 22:42:00 +0000 | [diff] [blame] | 13 | #define HAVE_FUNCTION_GRAPH_FP_TEST |
Torsten Duwe | 3b23e499 | 2019-02-08 16:10:19 +0100 | [diff] [blame] | 14 | |
Mark Rutland | c6d3cd3 | 2021-10-29 17:22:45 +0100 | [diff] [blame] | 15 | /* |
| 16 | * HAVE_FUNCTION_GRAPH_RET_ADDR_PTR means that the architecture can provide a |
| 17 | * "return address pointer" which can be used to uniquely identify a return |
| 18 | * address which has been overwritten. |
| 19 | * |
| 20 | * On arm64 we use the address of the caller's frame record, which remains the |
| 21 | * same for the lifetime of the instrumented function, unlike the return |
| 22 | * address in the LR. |
| 23 | */ |
| 24 | #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR |
| 25 | |
Mark Rutland | 26299b3 | 2022-11-03 17:05:20 +0000 | [diff] [blame] | 26 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS |
Torsten Duwe | 3b23e499 | 2019-02-08 16:10:19 +0100 | [diff] [blame] | 27 | #define ARCH_SUPPORTS_FTRACE_OPS 1 |
| 28 | #else |
Sami Tolvanen | 607289a | 2022-09-08 14:54:55 -0700 | [diff] [blame] | 29 | #define MCOUNT_ADDR ((unsigned long)_mcount) |
Torsten Duwe | 3b23e499 | 2019-02-08 16:10:19 +0100 | [diff] [blame] | 30 | #endif |
| 31 | |
| 32 | /* The BL at the callsite's adjusted rec->ip */ |
AKASHI Takahiro | 819e50e | 2014-04-30 18:54:33 +0900 | [diff] [blame] | 33 | #define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE |
| 34 | |
Torsten Duwe | 3b23e499 | 2019-02-08 16:10:19 +0100 | [diff] [blame] | 35 | #define FTRACE_PLT_IDX 0 |
Mark Rutland | 26299b3 | 2022-11-03 17:05:20 +0000 | [diff] [blame] | 36 | #define NR_FTRACE_PLTS 1 |
Torsten Duwe | 3b23e499 | 2019-02-08 16:10:19 +0100 | [diff] [blame] | 37 | |
Steven Rostedt (VMware) | f7edb45 | 2019-08-07 11:28:59 -0400 | [diff] [blame] | 38 | /* |
| 39 | * Currently, gcc tends to save the link register after the local variables |
| 40 | * on the stack. This causes the max stack tracer to report the function |
| 41 | * frame sizes for the wrong functions. By defining |
| 42 | * ARCH_FTRACE_SHIFT_STACK_TRACER, it will tell the stack tracer to expect |
| 43 | * to find the return address on the stack after the local variables have |
| 44 | * been set up. |
| 45 | * |
| 46 | * Note, this may change in the future, and we will need to deal with that |
| 47 | * if it were to happen. |
| 48 | */ |
| 49 | #define ARCH_FTRACE_SHIFT_STACK_TRACER 1 |
| 50 | |
AKASHI Takahiro | 819e50e | 2014-04-30 18:54:33 +0900 | [diff] [blame] | 51 | #ifndef __ASSEMBLY__ |
AKASHI Takahiro | 055b121 | 2014-04-30 10:54:36 +0100 | [diff] [blame] | 52 | #include <linux/compat.h> |
| 53 | |
AKASHI Takahiro | 819e50e | 2014-04-30 18:54:33 +0900 | [diff] [blame] | 54 | extern void _mcount(unsigned long); |
AKASHI Takahiro | 3711784 | 2014-04-30 10:54:35 +0100 | [diff] [blame] | 55 | extern void *return_address(unsigned int); |
AKASHI Takahiro | bd7d38d | 2014-04-30 10:54:34 +0100 | [diff] [blame] | 56 | |
| 57 | struct dyn_arch_ftrace { |
| 58 | /* No extra data needed for arm64 */ |
| 59 | }; |
| 60 | |
| 61 | extern unsigned long ftrace_graph_call; |
| 62 | |
AKASHI Takahiro | 20380bb | 2015-12-15 17:33:41 +0900 | [diff] [blame] | 63 | extern void return_to_handler(void); |
| 64 | |
Mark Rutland | baaf553 | 2023-01-23 13:46:03 +0000 | [diff] [blame] | 65 | unsigned long ftrace_call_adjust(unsigned long addr); |
AKASHI Takahiro | 3711784 | 2014-04-30 10:54:35 +0100 | [diff] [blame] | 66 | |
Mark Rutland | 26299b3 | 2022-11-03 17:05:20 +0000 | [diff] [blame] | 67 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS |
Torsten Duwe | 3b23e499 | 2019-02-08 16:10:19 +0100 | [diff] [blame] | 68 | struct dyn_ftrace; |
Chengming Zhou | c4a0ebf | 2022-04-21 00:00:06 +0800 | [diff] [blame] | 69 | struct ftrace_ops; |
Mark Rutland | 26299b3 | 2022-11-03 17:05:20 +0000 | [diff] [blame] | 70 | |
| 71 | #define arch_ftrace_get_regs(regs) NULL |
| 72 | |
Florent Revest | 2aa6ac0 | 2023-04-05 20:02:46 +0200 | [diff] [blame] | 73 | /* |
| 74 | * Note: sizeof(struct ftrace_regs) must be a multiple of 16 to ensure correct |
| 75 | * stack alignment |
| 76 | */ |
Mark Rutland | 26299b3 | 2022-11-03 17:05:20 +0000 | [diff] [blame] | 77 | struct ftrace_regs { |
| 78 | /* x0 - x8 */ |
| 79 | unsigned long regs[9]; |
Florent Revest | 2aa6ac0 | 2023-04-05 20:02:46 +0200 | [diff] [blame] | 80 | |
| 81 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
| 82 | unsigned long direct_tramp; |
| 83 | #else |
Mark Rutland | 26299b3 | 2022-11-03 17:05:20 +0000 | [diff] [blame] | 84 | unsigned long __unused; |
Florent Revest | 2aa6ac0 | 2023-04-05 20:02:46 +0200 | [diff] [blame] | 85 | #endif |
Mark Rutland | 26299b3 | 2022-11-03 17:05:20 +0000 | [diff] [blame] | 86 | |
| 87 | unsigned long fp; |
| 88 | unsigned long lr; |
| 89 | |
| 90 | unsigned long sp; |
| 91 | unsigned long pc; |
| 92 | }; |
| 93 | |
| 94 | static __always_inline unsigned long |
| 95 | ftrace_regs_get_instruction_pointer(const struct ftrace_regs *fregs) |
| 96 | { |
| 97 | return fregs->pc; |
| 98 | } |
| 99 | |
| 100 | static __always_inline void |
| 101 | ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, |
| 102 | unsigned long pc) |
| 103 | { |
| 104 | fregs->pc = pc; |
| 105 | } |
| 106 | |
| 107 | static __always_inline unsigned long |
| 108 | ftrace_regs_get_stack_pointer(const struct ftrace_regs *fregs) |
| 109 | { |
| 110 | return fregs->sp; |
| 111 | } |
| 112 | |
| 113 | static __always_inline unsigned long |
| 114 | ftrace_regs_get_argument(struct ftrace_regs *fregs, unsigned int n) |
| 115 | { |
| 116 | if (n < 8) |
| 117 | return fregs->regs[n]; |
| 118 | return 0; |
| 119 | } |
| 120 | |
| 121 | static __always_inline unsigned long |
| 122 | ftrace_regs_get_return_value(const struct ftrace_regs *fregs) |
| 123 | { |
| 124 | return fregs->regs[0]; |
| 125 | } |
| 126 | |
| 127 | static __always_inline void |
| 128 | ftrace_regs_set_return_value(struct ftrace_regs *fregs, |
| 129 | unsigned long ret) |
| 130 | { |
| 131 | fregs->regs[0] = ret; |
| 132 | } |
| 133 | |
| 134 | static __always_inline void |
| 135 | ftrace_override_function_with_return(struct ftrace_regs *fregs) |
| 136 | { |
| 137 | fregs->pc = fregs->lr; |
| 138 | } |
| 139 | |
| 140 | int ftrace_regs_query_register_offset(const char *name); |
Chengming Zhou | c4a0ebf | 2022-04-21 00:00:06 +0800 | [diff] [blame] | 141 | |
Torsten Duwe | 3b23e499 | 2019-02-08 16:10:19 +0100 | [diff] [blame] | 142 | int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); |
| 143 | #define ftrace_init_nop ftrace_init_nop |
Chengming Zhou | c4a0ebf | 2022-04-21 00:00:06 +0800 | [diff] [blame] | 144 | |
| 145 | void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, |
| 146 | struct ftrace_ops *op, struct ftrace_regs *fregs); |
| 147 | #define ftrace_graph_func ftrace_graph_func |
Florent Revest | 2aa6ac0 | 2023-04-05 20:02:46 +0200 | [diff] [blame] | 148 | |
| 149 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
| 150 | static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, |
| 151 | unsigned long addr) |
| 152 | { |
| 153 | /* |
| 154 | * The ftrace trampoline will return to this address instead of the |
| 155 | * instrumented function. |
| 156 | */ |
| 157 | fregs->direct_tramp = addr; |
| 158 | } |
| 159 | #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ |
| 160 | |
Torsten Duwe | 3b23e499 | 2019-02-08 16:10:19 +0100 | [diff] [blame] | 161 | #endif |
| 162 | |
AKASHI Takahiro | 3711784 | 2014-04-30 10:54:35 +0100 | [diff] [blame] | 163 | #define ftrace_return_address(n) return_address(n) |
AKASHI Takahiro | 055b121 | 2014-04-30 10:54:36 +0100 | [diff] [blame] | 164 | |
| 165 | /* |
| 166 | * Because AArch32 mode does not share the same syscall table with AArch64, |
| 167 | * tracing compat syscalls may result in reporting bogus syscalls or even |
| 168 | * hang-up, so just do not trace them. |
| 169 | * See kernel/trace/trace_syscalls.c |
| 170 | * |
| 171 | * x86 code says: |
Adam Buchbinder | ef769e3 | 2016-02-24 09:52:41 -0800 | [diff] [blame] | 172 | * If the user really wants these, then they should use the |
AKASHI Takahiro | 055b121 | 2014-04-30 10:54:36 +0100 | [diff] [blame] | 173 | * raw syscall tracepoints with filtering. |
| 174 | */ |
| 175 | #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS |
| 176 | static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) |
| 177 | { |
| 178 | return is_compat_task(); |
| 179 | } |
Masami Hiramatsu | 874bfc6 | 2018-11-29 14:39:33 +0900 | [diff] [blame] | 180 | |
| 181 | #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME |
| 182 | |
| 183 | static inline bool arch_syscall_match_sym_name(const char *sym, |
| 184 | const char *name) |
| 185 | { |
| 186 | /* |
| 187 | * Since all syscall functions have __arm64_ prefix, we must skip it. |
| 188 | * However, as we described above, we decided to ignore compat |
| 189 | * syscalls, so we don't care about __arm64_compat_ prefix here. |
| 190 | */ |
| 191 | return !strcmp(sym + 8, name); |
| 192 | } |
AKASHI Takahiro | 3711784 | 2014-04-30 10:54:35 +0100 | [diff] [blame] | 193 | #endif /* ifndef __ASSEMBLY__ */ |
AKASHI Takahiro | 819e50e | 2014-04-30 18:54:33 +0900 | [diff] [blame] | 194 | |
| 195 | #endif /* __ASM_FTRACE_H */ |