| // SPDX-License-Identifier: GPL-2.0-only |
| /* |
| * Stack tracing support |
| * |
| * Copyright (C) 2012 ARM Ltd. |
| */ |
| #include <linux/kernel.h> |
| #include <linux/export.h> |
| #include <linux/ftrace.h> |
| #include <linux/sched.h> |
| #include <linux/sched/debug.h> |
| #include <linux/sched/task_stack.h> |
| #include <linux/stacktrace.h> |
| |
| #include <asm/irq.h> |
| #include <asm/stack_pointer.h> |
| #include <asm/stacktrace.h> |
| |
| /* |
| * Start an unwind from a pt_regs. |
| * |
| * The unwind will begin at the PC within the regs. |
| * |
| * The regs must be on a stack currently owned by the calling task. |
| */ |
| static inline void unwind_init_from_regs(struct unwind_state *state, |
| struct pt_regs *regs) |
| { |
| unwind_init_common(state, current); |
| |
| state->fp = regs->regs[29]; |
| state->pc = regs->pc; |
| } |
| |
| /* |
| * Start an unwind from a caller. |
| * |
| * The unwind will begin at the caller of whichever function this is inlined |
| * into. |
| * |
| * The function which invokes this must be noinline. |
| */ |
| static __always_inline void unwind_init_from_caller(struct unwind_state *state) |
| { |
| unwind_init_common(state, current); |
| |
| state->fp = (unsigned long)__builtin_frame_address(1); |
| state->pc = (unsigned long)__builtin_return_address(0); |
| } |
| |
| /* |
| * Start an unwind from a blocked task. |
| * |
| * The unwind will begin at the blocked tasks saved PC (i.e. the caller of |
| * cpu_switch_to()). |
| * |
| * The caller should ensure the task is blocked in cpu_switch_to() for the |
| * duration of the unwind, or the unwind will be bogus. It is never valid to |
| * call this for the current task. |
| */ |
| static inline void unwind_init_from_task(struct unwind_state *state, |
| struct task_struct *task) |
| { |
| unwind_init_common(state, task); |
| |
| state->fp = thread_saved_fp(task); |
| state->pc = thread_saved_pc(task); |
| } |
| |
| /* |
| * We can only safely access per-cpu stacks from current in a non-preemptible |
| * context. |
| */ |
| static bool on_accessible_stack(const struct task_struct *tsk, |
| unsigned long sp, unsigned long size, |
| struct stack_info *info) |
| { |
| if (info) |
| info->type = STACK_TYPE_UNKNOWN; |
| |
| if (on_task_stack(tsk, sp, size, info)) |
| return true; |
| if (tsk != current || preemptible()) |
| return false; |
| if (on_irq_stack(sp, size, info)) |
| return true; |
| if (on_overflow_stack(sp, size, info)) |
| return true; |
| if (on_sdei_stack(sp, size, info)) |
| return true; |
| |
| return false; |
| } |
| |
| /* |
| * Unwind from one frame record (A) to the next frame record (B). |
| * |
| * We terminate early if the location of B indicates a malformed chain of frame |
| * records (e.g. a cycle), determined based on the location and fp value of A |
| * and the location (but not the fp value) of B. |
| */ |
| static int notrace unwind_next(struct unwind_state *state) |
| { |
| struct task_struct *tsk = state->task; |
| unsigned long fp = state->fp; |
| struct stack_info info; |
| int err; |
| |
| /* Final frame; nothing to unwind */ |
| if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) |
| return -ENOENT; |
| |
| err = unwind_next_common(state, &info, on_accessible_stack, NULL); |
| if (err) |
| return err; |
| |
| state->pc = ptrauth_strip_insn_pac(state->pc); |
| |
| #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| if (tsk->ret_stack && |
| (state->pc == (unsigned long)return_to_handler)) { |
| unsigned long orig_pc; |
| /* |
| * This is a case where function graph tracer has |
| * modified a return address (LR) in a stack frame |
| * to hook a function return. |
| * So replace it to an original value. |
| */ |
| orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc, |
| (void *)state->fp); |
| if (WARN_ON_ONCE(state->pc == orig_pc)) |
| return -EINVAL; |
| state->pc = orig_pc; |
| } |
| #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| #ifdef CONFIG_KRETPROBES |
| if (is_kretprobe_trampoline(state->pc)) |
| state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur); |
| #endif |
| |
| return 0; |
| } |
| NOKPROBE_SYMBOL(unwind_next); |
| |
| static void notrace unwind(struct unwind_state *state, |
| stack_trace_consume_fn consume_entry, void *cookie) |
| { |
| while (1) { |
| int ret; |
| |
| if (!consume_entry(cookie, state->pc)) |
| break; |
| ret = unwind_next(state); |
| if (ret < 0) |
| break; |
| } |
| } |
| NOKPROBE_SYMBOL(unwind); |
| |
| static bool dump_backtrace_entry(void *arg, unsigned long where) |
| { |
| char *loglvl = arg; |
| printk("%s %pSb\n", loglvl, (void *)where); |
| return true; |
| } |
| |
| void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, |
| const char *loglvl) |
| { |
| pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); |
| |
| if (regs && user_mode(regs)) |
| return; |
| |
| if (!tsk) |
| tsk = current; |
| |
| if (!try_get_task_stack(tsk)) |
| return; |
| |
| printk("%sCall trace:\n", loglvl); |
| arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs); |
| |
| put_task_stack(tsk); |
| } |
| |
| void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) |
| { |
| dump_backtrace(NULL, tsk, loglvl); |
| barrier(); |
| } |
| |
| noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry, |
| void *cookie, struct task_struct *task, |
| struct pt_regs *regs) |
| { |
| struct unwind_state state; |
| |
| if (regs) { |
| if (task != current) |
| return; |
| unwind_init_from_regs(&state, regs); |
| } else if (task == current) { |
| unwind_init_from_caller(&state); |
| } else { |
| unwind_init_from_task(&state, task); |
| } |
| |
| unwind(&state, consume_entry, cookie); |
| } |