blob: 549bde5c970a13e9412148c7c45aca1ef2034825 [file] [log] [blame]
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Regents of the University of California
*/
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/signal.h>
#include <linux/signal.h>
#include <linux/kdebug.h>
#include <linux/uaccess.h>
#include <linux/kprobes.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/irq.h>
#include <linux/kexec.h>
#include <asm/asm-prototypes.h>
#include <asm/bug.h>
#include <asm/csr.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
int show_unhandled_signals = 1;
static DEFINE_SPINLOCK(die_lock);
void die(struct pt_regs *regs, const char *str)
{
static int die_counter;
int ret;
long cause;
oops_enter();
spin_lock_irq(&die_lock);
console_verbose();
bust_spinlocks(1);
pr_emerg("%s [#%d]\n", str, ++die_counter);
print_modules();
if (regs)
show_regs(regs);
cause = regs ? regs->cause : -1;
ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV);
if (kexec_should_crash(current))
crash_kexec(regs);
bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die_lock);
oops_exit();
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
if (ret != NOTIFY_STOP)
make_task_dead(SIGSEGV);
}
void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
{
struct task_struct *tsk = current;
if (show_unhandled_signals && unhandled_signal(tsk, signo)
&& printk_ratelimit()) {
pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
tsk->comm, task_pid_nr(tsk), signo, code, addr);
print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
pr_cont("\n");
__show_regs(regs);
}
force_sig_fault(signo, code, (void __user *)addr);
}
static void do_trap_error(struct pt_regs *regs, int signo, int code,
unsigned long addr, const char *str)
{
current->thread.bad_cause = regs->cause;
if (user_mode(regs)) {
do_trap(regs, signo, code, addr);
} else {
if (!fixup_exception(regs))
die(regs, str);
}
}
#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE)
#define __trap_section __section(".xip.traps")
#else
#define __trap_section
#endif
#define DO_ERROR_INFO(name, signo, code, str) \
asmlinkage __visible __trap_section void name(struct pt_regs *regs) \
{ \
do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \
}
DO_ERROR_INFO(do_trap_unknown,
SIGILL, ILL_ILLTRP, "unknown exception");
DO_ERROR_INFO(do_trap_insn_misaligned,
SIGBUS, BUS_ADRALN, "instruction address misaligned");
DO_ERROR_INFO(do_trap_insn_fault,
SIGSEGV, SEGV_ACCERR, "instruction access fault");
DO_ERROR_INFO(do_trap_insn_illegal,
SIGILL, ILL_ILLOPC, "illegal instruction");
DO_ERROR_INFO(do_trap_load_fault,
SIGSEGV, SEGV_ACCERR, "load access fault");
#ifndef CONFIG_RISCV_M_MODE
DO_ERROR_INFO(do_trap_load_misaligned,
SIGBUS, BUS_ADRALN, "Oops - load address misaligned");
DO_ERROR_INFO(do_trap_store_misaligned,
SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned");
#else
int handle_misaligned_load(struct pt_regs *regs);
int handle_misaligned_store(struct pt_regs *regs);
asmlinkage void __trap_section do_trap_load_misaligned(struct pt_regs *regs)
{
if (!handle_misaligned_load(regs))
return;
do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
"Oops - load address misaligned");
}
asmlinkage void __trap_section do_trap_store_misaligned(struct pt_regs *regs)
{
if (!handle_misaligned_store(regs))
return;
do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
"Oops - store (or AMO) address misaligned");
}
#endif
DO_ERROR_INFO(do_trap_store_fault,
SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
DO_ERROR_INFO(do_trap_ecall_u,
SIGILL, ILL_ILLTRP, "environment call from U-mode");
DO_ERROR_INFO(do_trap_ecall_s,
SIGILL, ILL_ILLTRP, "environment call from S-mode");
DO_ERROR_INFO(do_trap_ecall_m,
SIGILL, ILL_ILLTRP, "environment call from M-mode");
static inline unsigned long get_break_insn_length(unsigned long pc)
{
bug_insn_t insn;
if (get_kernel_nofault(insn, (bug_insn_t *)pc))
return 0;
return GET_INSN_LENGTH(insn);
}
asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
{
#ifdef CONFIG_KPROBES
if (kprobe_single_step_handler(regs))
return;
if (kprobe_breakpoint_handler(regs))
return;
#endif
#ifdef CONFIG_UPROBES
if (uprobe_single_step_handler(regs))
return;
if (uprobe_breakpoint_handler(regs))
return;
#endif
current->thread.bad_cause = regs->cause;
if (user_mode(regs))
force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc);
#ifdef CONFIG_KGDB
else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP)
== NOTIFY_STOP)
return;
#endif
else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN)
regs->epc += get_break_insn_length(regs->epc);
else
die(regs, "Kernel BUG");
}
NOKPROBE_SYMBOL(do_trap_break);
#ifdef CONFIG_GENERIC_BUG
int is_valid_bugaddr(unsigned long pc)
{
bug_insn_t insn;
if (pc < VMALLOC_START)
return 0;
if (get_kernel_nofault(insn, (bug_insn_t *)pc))
return 0;
if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
return (insn == __BUG_INSN_32);
else
return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16);
}
#endif /* CONFIG_GENERIC_BUG */
#ifdef CONFIG_VMAP_STACK
/*
* Extra stack space that allows us to provide panic messages when the kernel
* has overflowed its stack.
*/
static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
overflow_stack)__aligned(16);
/*
* A temporary stack for use by handle_kernel_stack_overflow. This is used so
* we can call into C code to get the per-hart overflow stack. Usage of this
* stack must be protected by spin_shadow_stack.
*/
long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
/*
* A pseudo spinlock to protect the shadow stack from being used by multiple
* harts concurrently. This isn't a real spinlock because the lock side must
* be taken without a valid stack and only a single register, it's only taken
* while in the process of panicing anyway so the performance and error
* checking a proper spinlock gives us doesn't matter.
*/
unsigned long spin_shadow_stack;
asmlinkage unsigned long get_overflow_stack(void)
{
return (unsigned long)this_cpu_ptr(overflow_stack) +
OVERFLOW_STACK_SIZE;
}
asmlinkage void handle_bad_stack(struct pt_regs *regs)
{
unsigned long tsk_stk = (unsigned long)current->stack;
unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
/*
* We're done with the shadow stack by this point, as we're on the
* overflow stack. Tell any other concurrent overflowing harts that
* they can proceed with panicing by releasing the pseudo-spinlock.
*
* This pairs with an amoswap.aq in handle_kernel_stack_overflow.
*/
smp_store_release(&spin_shadow_stack, 0);
console_verbose();
pr_emerg("Insufficient stack space to handle exception!\n");
pr_emerg("Task stack: [0x%016lx..0x%016lx]\n",
tsk_stk, tsk_stk + THREAD_SIZE);
pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
__show_regs(regs);
panic("Kernel stack overflow");
for (;;)
wait_for_interrupt();
}
#endif