AKASHI Takahiro | 819e50e | 2014-04-30 18:54:33 +0900 | [diff] [blame] | 1 | /* |
| 2 | * arch/arm64/kernel/entry-ftrace.S |
| 3 | * |
| 4 | * Copyright (C) 2013 Linaro Limited |
| 5 | * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/linkage.h> |
Arnd Bergmann | f705d95 | 2017-02-14 22:32:58 +0100 | [diff] [blame] | 13 | #include <asm/assembler.h> |
AKASHI Takahiro | 819e50e | 2014-04-30 18:54:33 +0900 | [diff] [blame] | 14 | #include <asm/ftrace.h> |
| 15 | #include <asm/insn.h> |
| 16 | |
| 17 | /* |
| 18 | * Gcc with -pg will put the following code in the beginning of each function: |
| 19 | * mov x0, x30 |
| 20 | * bl _mcount |
| 21 | * [function's body ...] |
| 22 | * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic |
| 23 | * ftrace is enabled. |
| 24 | * |
| 25 | * Please note that x0 as an argument will not be used here because we can |
| 26 | * get lr(x30) of instrumented function at any time by winding up call stack |
| 27 | * as long as the kernel is compiled without -fomit-frame-pointer. |
| 28 | * (or CONFIG_FRAME_POINTER, this is forced on arm64) |
| 29 | * |
| 30 | * stack layout after mcount_enter in _mcount(): |
| 31 | * |
| 32 | * current sp/fp => 0:+-----+ |
| 33 | * in _mcount() | x29 | -> instrumented function's fp |
| 34 | * +-----+ |
| 35 | * | x30 | -> _mcount()'s lr (= instrumented function's pc) |
| 36 | * old sp => +16:+-----+ |
| 37 | * when instrumented | | |
| 38 | * function calls | ... | |
| 39 | * _mcount() | | |
| 40 | * | | |
| 41 | * instrumented => +xx:+-----+ |
| 42 | * function's fp | x29 | -> parent's fp |
| 43 | * +-----+ |
| 44 | * | x30 | -> instrumented function's lr (= parent's pc) |
| 45 | * +-----+ |
| 46 | * | ... | |
| 47 | */ |
| 48 | |
| 49 | .macro mcount_enter |
| 50 | stp x29, x30, [sp, #-16]! |
| 51 | mov x29, sp |
| 52 | .endm |
| 53 | |
| 54 | .macro mcount_exit |
| 55 | ldp x29, x30, [sp], #16 |
| 56 | ret |
| 57 | .endm |
| 58 | |
| 59 | .macro mcount_adjust_addr rd, rn |
| 60 | sub \rd, \rn, #AARCH64_INSN_SIZE |
| 61 | .endm |
| 62 | |
| 63 | /* for instrumented function's parent */ |
| 64 | .macro mcount_get_parent_fp reg |
| 65 | ldr \reg, [x29] |
| 66 | ldr \reg, [\reg] |
| 67 | .endm |
| 68 | |
| 69 | /* for instrumented function */ |
| 70 | .macro mcount_get_pc0 reg |
| 71 | mcount_adjust_addr \reg, x30 |
| 72 | .endm |
| 73 | |
| 74 | .macro mcount_get_pc reg |
| 75 | ldr \reg, [x29, #8] |
| 76 | mcount_adjust_addr \reg, \reg |
| 77 | .endm |
| 78 | |
| 79 | .macro mcount_get_lr reg |
| 80 | ldr \reg, [x29] |
| 81 | ldr \reg, [\reg, #8] |
| 82 | mcount_adjust_addr \reg, \reg |
| 83 | .endm |
| 84 | |
| 85 | .macro mcount_get_lr_addr reg |
| 86 | ldr \reg, [x29] |
| 87 | add \reg, \reg, #8 |
| 88 | .endm |
| 89 | |
AKASHI Takahiro | bd7d38d | 2014-04-30 10:54:34 +0100 | [diff] [blame] | 90 | #ifndef CONFIG_DYNAMIC_FTRACE |
AKASHI Takahiro | 819e50e | 2014-04-30 18:54:33 +0900 | [diff] [blame] | 91 | /* |
| 92 | * void _mcount(unsigned long return_address) |
| 93 | * @return_address: return address to instrumented function |
| 94 | * |
| 95 | * This function makes calls, if enabled, to: |
| 96 | * - tracer function to probe instrumented function's entry, |
| 97 | * - ftrace_graph_caller to set up an exit hook |
| 98 | */ |
| 99 | ENTRY(_mcount) |
AKASHI Takahiro | 819e50e | 2014-04-30 18:54:33 +0900 | [diff] [blame] | 100 | mcount_enter |
| 101 | |
Mark Rutland | 829d2bd | 2017-01-17 16:10:58 +0000 | [diff] [blame] | 102 | ldr_l x2, ftrace_trace_function |
AKASHI Takahiro | 819e50e | 2014-04-30 18:54:33 +0900 | [diff] [blame] | 103 | adr x0, ftrace_stub |
| 104 | cmp x0, x2 // if (ftrace_trace_function |
| 105 | b.eq skip_ftrace_call // != ftrace_stub) { |
| 106 | |
| 107 | mcount_get_pc x0 // function's pc |
| 108 | mcount_get_lr x1 // function's lr (= parent's pc) |
| 109 | blr x2 // (*ftrace_trace_function)(pc, lr); |
| 110 | |
| 111 | #ifndef CONFIG_FUNCTION_GRAPH_TRACER |
| 112 | skip_ftrace_call: // return; |
| 113 | mcount_exit // } |
| 114 | #else |
| 115 | mcount_exit // return; |
| 116 | // } |
| 117 | skip_ftrace_call: |
Mark Rutland | 829d2bd | 2017-01-17 16:10:58 +0000 | [diff] [blame] | 118 | ldr_l x2, ftrace_graph_return |
Ard Biesheuvel | f1ba46e | 2014-11-07 14:12:33 +0000 | [diff] [blame] | 119 | cmp x0, x2 // if ((ftrace_graph_return |
| 120 | b.ne ftrace_graph_caller // != ftrace_stub) |
AKASHI Takahiro | 819e50e | 2014-04-30 18:54:33 +0900 | [diff] [blame] | 121 | |
Mark Rutland | 829d2bd | 2017-01-17 16:10:58 +0000 | [diff] [blame] | 122 | ldr_l x2, ftrace_graph_entry // || (ftrace_graph_entry |
| 123 | adr_l x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub)) |
AKASHI Takahiro | 819e50e | 2014-04-30 18:54:33 +0900 | [diff] [blame] | 124 | cmp x0, x2 |
| 125 | b.ne ftrace_graph_caller // ftrace_graph_caller(); |
| 126 | |
| 127 | mcount_exit |
| 128 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 129 | ENDPROC(_mcount) |
| 130 | |
AKASHI Takahiro | bd7d38d | 2014-04-30 10:54:34 +0100 | [diff] [blame] | 131 | #else /* CONFIG_DYNAMIC_FTRACE */ |
| 132 | /* |
| 133 | * _mcount() is used to build the kernel with -pg option, but all the branch |
| 134 | * instructions to _mcount() are replaced to NOP initially at kernel start up, |
| 135 | * and later on, NOP to branch to ftrace_caller() when enabled or branch to |
| 136 | * NOP when disabled per-function base. |
| 137 | */ |
| 138 | ENTRY(_mcount) |
| 139 | ret |
| 140 | ENDPROC(_mcount) |
| 141 | |
| 142 | /* |
| 143 | * void ftrace_caller(unsigned long return_address) |
| 144 | * @return_address: return address to instrumented function |
| 145 | * |
| 146 | * This function is a counterpart of _mcount() in 'static' ftrace, and |
| 147 | * makes calls to: |
| 148 | * - tracer function to probe instrumented function's entry, |
| 149 | * - ftrace_graph_caller to set up an exit hook |
| 150 | */ |
| 151 | ENTRY(ftrace_caller) |
| 152 | mcount_enter |
| 153 | |
| 154 | mcount_get_pc0 x0 // function's pc |
| 155 | mcount_get_lr x1 // function's lr |
| 156 | |
| 157 | .global ftrace_call |
| 158 | ftrace_call: // tracer(pc, lr); |
| 159 | nop // This will be replaced with "bl xxx" |
| 160 | // where xxx can be any kind of tracer. |
| 161 | |
| 162 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 163 | .global ftrace_graph_call |
| 164 | ftrace_graph_call: // ftrace_graph_caller(); |
| 165 | nop // If enabled, this will be replaced |
| 166 | // "b ftrace_graph_caller" |
| 167 | #endif |
| 168 | |
| 169 | mcount_exit |
| 170 | ENDPROC(ftrace_caller) |
| 171 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 172 | |
AKASHI Takahiro | 819e50e | 2014-04-30 18:54:33 +0900 | [diff] [blame] | 173 | ENTRY(ftrace_stub) |
| 174 | ret |
| 175 | ENDPROC(ftrace_stub) |
| 176 | |
| 177 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
Li Bin | ee556d0 | 2015-09-30 10:49:55 +0800 | [diff] [blame] | 178 | /* save return value regs*/ |
| 179 | .macro save_return_regs |
| 180 | sub sp, sp, #64 |
| 181 | stp x0, x1, [sp] |
| 182 | stp x2, x3, [sp, #16] |
| 183 | stp x4, x5, [sp, #32] |
| 184 | stp x6, x7, [sp, #48] |
| 185 | .endm |
| 186 | |
| 187 | /* restore return value regs*/ |
| 188 | .macro restore_return_regs |
| 189 | ldp x0, x1, [sp] |
| 190 | ldp x2, x3, [sp, #16] |
| 191 | ldp x4, x5, [sp, #32] |
| 192 | ldp x6, x7, [sp, #48] |
| 193 | add sp, sp, #64 |
| 194 | .endm |
| 195 | |
AKASHI Takahiro | 819e50e | 2014-04-30 18:54:33 +0900 | [diff] [blame] | 196 | /* |
| 197 | * void ftrace_graph_caller(void) |
| 198 | * |
| 199 | * Called from _mcount() or ftrace_caller() when function_graph tracer is |
| 200 | * selected. |
| 201 | * This function w/ prepare_ftrace_return() fakes link register's value on |
| 202 | * the call stack in order to intercept instrumented function's return path |
| 203 | * and run return_to_handler() later on its exit. |
| 204 | */ |
| 205 | ENTRY(ftrace_graph_caller) |
| 206 | mcount_get_lr_addr x0 // pointer to function's saved lr |
| 207 | mcount_get_pc x1 // function's pc |
| 208 | mcount_get_parent_fp x2 // parent's fp |
| 209 | bl prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp) |
| 210 | |
| 211 | mcount_exit |
| 212 | ENDPROC(ftrace_graph_caller) |
| 213 | |
| 214 | /* |
| 215 | * void return_to_handler(void) |
| 216 | * |
| 217 | * Run ftrace_return_to_handler() before going back to parent. |
| 218 | * @fp is checked against the value passed by ftrace_graph_caller() |
Josh Poimboeuf | e4a744e | 2016-08-19 06:52:55 -0500 | [diff] [blame] | 219 | * only when HAVE_FUNCTION_GRAPH_FP_TEST is enabled. |
AKASHI Takahiro | 819e50e | 2014-04-30 18:54:33 +0900 | [diff] [blame] | 220 | */ |
| 221 | ENTRY(return_to_handler) |
Li Bin | ee556d0 | 2015-09-30 10:49:55 +0800 | [diff] [blame] | 222 | save_return_regs |
AKASHI Takahiro | 819e50e | 2014-04-30 18:54:33 +0900 | [diff] [blame] | 223 | mov x0, x29 // parent's fp |
| 224 | bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp); |
| 225 | mov x30, x0 // restore the original return address |
Li Bin | ee556d0 | 2015-09-30 10:49:55 +0800 | [diff] [blame] | 226 | restore_return_regs |
AKASHI Takahiro | 819e50e | 2014-04-30 18:54:33 +0900 | [diff] [blame] | 227 | ret |
| 228 | END(return_to_handler) |
| 229 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |