Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/kernel/panic.c |
| 4 | * |
| 5 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 6 | */ |
| 7 | |
| 8 | /* |
| 9 | * This function is used through-out the kernel (including mm and fs) |
| 10 | * to indicate a major problem. |
| 11 | */ |
Andrew Morton | 657b301 | 2006-08-13 23:24:19 -0700 | [diff] [blame] | 12 | #include <linux/debug_locks.h> |
Ingo Molnar | b17b015 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 13 | #include <linux/sched/debug.h> |
Ingo Molnar | c95dbf27 | 2009-03-13 11:14:06 +0100 | [diff] [blame] | 14 | #include <linux/interrupt.h> |
Douglas Anderson | 7d92bda | 2019-09-25 16:47:45 -0700 | [diff] [blame] | 15 | #include <linux/kgdb.h> |
Simon Kagstrom | 456b565 | 2009-10-16 14:09:18 +0200 | [diff] [blame] | 16 | #include <linux/kmsg_dump.h> |
Arjan van de Ven | 79b4cc5 | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 17 | #include <linux/kallsyms.h> |
Ingo Molnar | c95dbf27 | 2009-03-13 11:14:06 +0100 | [diff] [blame] | 18 | #include <linux/notifier.h> |
Sergey Senozhatsky | c7c3f05 | 2018-10-25 19:10:36 +0900 | [diff] [blame] | 19 | #include <linux/vt_kern.h> |
Ingo Molnar | c95dbf27 | 2009-03-13 11:14:06 +0100 | [diff] [blame] | 20 | #include <linux/module.h> |
| 21 | #include <linux/random.h> |
Steven Rostedt (Red Hat) | de7edd3 | 2013-06-14 16:21:43 -0400 | [diff] [blame] | 22 | #include <linux/ftrace.h> |
Ingo Molnar | c95dbf27 | 2009-03-13 11:14:06 +0100 | [diff] [blame] | 23 | #include <linux/reboot.h> |
| 24 | #include <linux/delay.h> |
| 25 | #include <linux/kexec.h> |
Andy Shevchenko | f39650d | 2021-06-30 18:54:59 -0700 | [diff] [blame] | 26 | #include <linux/panic_notifier.h> |
Ingo Molnar | c95dbf27 | 2009-03-13 11:14:06 +0100 | [diff] [blame] | 27 | #include <linux/sched.h> |
Andy Shevchenko | 5d5dd3e4 | 2022-10-08 22:59:14 +0300 | [diff] [blame] | 28 | #include <linux/string_helpers.h> |
Ingo Molnar | c95dbf27 | 2009-03-13 11:14:06 +0100 | [diff] [blame] | 29 | #include <linux/sysrq.h> |
| 30 | #include <linux/init.h> |
| 31 | #include <linux/nmi.h> |
Vitaly Kuznetsov | 08d7865 | 2015-11-06 16:32:58 -0800 | [diff] [blame] | 32 | #include <linux/console.h> |
Josh Poimboeuf | 2553b67 | 2016-03-17 14:23:04 -0700 | [diff] [blame] | 33 | #include <linux/bug.h> |
Kees Cook | 7a46ec0 | 2017-08-15 09:19:24 -0700 | [diff] [blame] | 34 | #include <linux/ratelimit.h> |
Andi Kleen | b1fca27 | 2017-11-17 15:27:03 -0800 | [diff] [blame] | 35 | #include <linux/debugfs.h> |
Kees Cook | 8b05aa2 | 2022-11-17 15:43:26 -0800 | [diff] [blame] | 36 | #include <linux/sysfs.h> |
Peter Zijlstra | 5a5d7e9 | 2023-01-26 16:08:31 +0100 | [diff] [blame] | 37 | #include <linux/context_tracking.h> |
Marco Elver | 23b36fe | 2022-01-19 18:09:56 -0800 | [diff] [blame] | 38 | #include <trace/events/error_report.h> |
Andi Kleen | b1fca27 | 2017-11-17 15:27:03 -0800 | [diff] [blame] | 39 | #include <asm/sections.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | |
TAMUKI Shoichi | c7ff0d9 | 2010-08-10 18:03:28 -0700 | [diff] [blame] | 41 | #define PANIC_TIMER_STEP 100 |
| 42 | #define PANIC_BLINK_SPD 18 |
| 43 | |
Guilherme G. Piccoli | 60c958d | 2020-06-07 21:40:48 -0700 | [diff] [blame] | 44 | #ifdef CONFIG_SMP |
| 45 | /* |
| 46 | * Should we dump all CPUs backtraces in an oops event? |
| 47 | * Defaults to 0, can be changed via sysctl. |
| 48 | */ |
tangmeng | 9df91869 | 2022-02-18 18:59:12 +0800 | [diff] [blame] | 49 | static unsigned int __read_mostly sysctl_oops_all_cpu_backtrace; |
| 50 | #else |
| 51 | #define sysctl_oops_all_cpu_backtrace 0 |
Guilherme G. Piccoli | 60c958d | 2020-06-07 21:40:48 -0700 | [diff] [blame] | 52 | #endif /* CONFIG_SMP */ |
| 53 | |
Kyle McMartin | 2a01bb3 | 2012-04-11 08:15:29 -0400 | [diff] [blame] | 54 | int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE; |
Kees Cook | bc4f2f5 | 2018-04-10 16:32:33 -0700 | [diff] [blame] | 55 | static unsigned long tainted_mask = |
Kees Cook | 595b893 | 2022-05-03 13:55:00 -0700 | [diff] [blame] | 56 | IS_ENABLED(CONFIG_RANDSTRUCT) ? (1 << TAINT_RANDSTRUCT) : 0; |
Andrew Morton | dd28779 | 2006-03-23 03:00:57 -0800 | [diff] [blame] | 57 | static int pause_on_oops; |
| 58 | static int pause_on_oops_flag; |
| 59 | static DEFINE_SPINLOCK(pause_on_oops_lock); |
HATAYAMA Daisuke | 5375b70 | 2015-06-30 14:57:46 -0700 | [diff] [blame] | 60 | bool crash_kexec_post_notifiers; |
Prarit Bhargava | 9e3961a | 2014-12-10 15:45:50 -0800 | [diff] [blame] | 61 | int panic_on_warn __read_mostly; |
Rafael Aquini | db38d5c | 2020-06-07 21:40:17 -0700 | [diff] [blame] | 62 | unsigned long panic_on_taint; |
| 63 | bool panic_on_taint_nousertaint = false; |
Kees Cook | 9fc9e27 | 2022-11-17 15:43:25 -0800 | [diff] [blame] | 64 | static unsigned int warn_limit __read_mostly; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | |
Jason Baron | 5800dc3 | 2013-11-25 23:23:04 +0000 | [diff] [blame] | 66 | int panic_timeout = CONFIG_PANIC_TIMEOUT; |
Huang Ying | 81e88fd | 2011-01-12 14:44:55 +0800 | [diff] [blame] | 67 | EXPORT_SYMBOL_GPL(panic_timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | |
Feng Tang | d999bd9 | 2019-01-03 15:28:17 -0800 | [diff] [blame] | 69 | #define PANIC_PRINT_TASK_INFO 0x00000001 |
| 70 | #define PANIC_PRINT_MEM_INFO 0x00000002 |
| 71 | #define PANIC_PRINT_TIMER_INFO 0x00000004 |
| 72 | #define PANIC_PRINT_LOCK_INFO 0x00000008 |
| 73 | #define PANIC_PRINT_FTRACE_INFO 0x00000010 |
Feng Tang | de6da1e | 2019-05-17 14:31:50 -0700 | [diff] [blame] | 74 | #define PANIC_PRINT_ALL_PRINTK_MSG 0x00000020 |
Guilherme G. Piccoli | 8d470a4 | 2022-03-23 16:07:06 -0700 | [diff] [blame] | 75 | #define PANIC_PRINT_ALL_CPU_BT 0x00000040 |
Feng Tang | 81c9d43 | 2019-01-03 15:28:20 -0800 | [diff] [blame] | 76 | unsigned long panic_print; |
Feng Tang | d999bd9 | 2019-01-03 15:28:17 -0800 | [diff] [blame] | 77 | |
Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 78 | ATOMIC_NOTIFIER_HEAD(panic_notifier_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | |
| 80 | EXPORT_SYMBOL(panic_notifier_list); |
| 81 | |
Kees Cook | 9360d03 | 2022-11-17 15:43:21 -0800 | [diff] [blame] | 82 | #ifdef CONFIG_SYSCTL |
tangmeng | 9df91869 | 2022-02-18 18:59:12 +0800 | [diff] [blame] | 83 | static struct ctl_table kern_panic_table[] = { |
Kees Cook | 9360d03 | 2022-11-17 15:43:21 -0800 | [diff] [blame] | 84 | #ifdef CONFIG_SMP |
tangmeng | 9df91869 | 2022-02-18 18:59:12 +0800 | [diff] [blame] | 85 | { |
| 86 | .procname = "oops_all_cpu_backtrace", |
| 87 | .data = &sysctl_oops_all_cpu_backtrace, |
| 88 | .maxlen = sizeof(int), |
| 89 | .mode = 0644, |
| 90 | .proc_handler = proc_dointvec_minmax, |
| 91 | .extra1 = SYSCTL_ZERO, |
| 92 | .extra2 = SYSCTL_ONE, |
| 93 | }, |
Kees Cook | 9360d03 | 2022-11-17 15:43:21 -0800 | [diff] [blame] | 94 | #endif |
Kees Cook | 9fc9e27 | 2022-11-17 15:43:25 -0800 | [diff] [blame] | 95 | { |
| 96 | .procname = "warn_limit", |
| 97 | .data = &warn_limit, |
| 98 | .maxlen = sizeof(warn_limit), |
| 99 | .mode = 0644, |
| 100 | .proc_handler = proc_douintvec, |
| 101 | }, |
tangmeng | 9df91869 | 2022-02-18 18:59:12 +0800 | [diff] [blame] | 102 | { } |
| 103 | }; |
| 104 | |
| 105 | static __init int kernel_panic_sysctls_init(void) |
| 106 | { |
| 107 | register_sysctl_init("kernel", kern_panic_table); |
| 108 | return 0; |
| 109 | } |
| 110 | late_initcall(kernel_panic_sysctls_init); |
| 111 | #endif |
| 112 | |
Kees Cook | 8b05aa2 | 2022-11-17 15:43:26 -0800 | [diff] [blame] | 113 | static atomic_t warn_count = ATOMIC_INIT(0); |
| 114 | |
| 115 | #ifdef CONFIG_SYSFS |
| 116 | static ssize_t warn_count_show(struct kobject *kobj, struct kobj_attribute *attr, |
| 117 | char *page) |
| 118 | { |
| 119 | return sysfs_emit(page, "%d\n", atomic_read(&warn_count)); |
| 120 | } |
| 121 | |
| 122 | static struct kobj_attribute warn_count_attr = __ATTR_RO(warn_count); |
| 123 | |
| 124 | static __init int kernel_panic_sysfs_init(void) |
| 125 | { |
| 126 | sysfs_add_file_to_group(kernel_kobj, &warn_count_attr.attr, NULL); |
| 127 | return 0; |
| 128 | } |
| 129 | late_initcall(kernel_panic_sysfs_init); |
| 130 | #endif |
| 131 | |
TAMUKI Shoichi | c7ff0d9 | 2010-08-10 18:03:28 -0700 | [diff] [blame] | 132 | static long no_blink(int state) |
Anton Blanchard | 8aeee85 | 2010-03-05 13:42:55 -0800 | [diff] [blame] | 133 | { |
TAMUKI Shoichi | c7ff0d9 | 2010-08-10 18:03:28 -0700 | [diff] [blame] | 134 | return 0; |
Anton Blanchard | 8aeee85 | 2010-03-05 13:42:55 -0800 | [diff] [blame] | 135 | } |
| 136 | |
TAMUKI Shoichi | c7ff0d9 | 2010-08-10 18:03:28 -0700 | [diff] [blame] | 137 | /* Returns how long it waited in ms */ |
| 138 | long (*panic_blink)(int state); |
| 139 | EXPORT_SYMBOL(panic_blink); |
| 140 | |
Michael Holzheu | 93e13a3 | 2012-01-12 17:20:18 -0800 | [diff] [blame] | 141 | /* |
| 142 | * Stop ourself in panic -- architecture code may override this |
| 143 | */ |
| 144 | void __weak panic_smp_self_stop(void) |
| 145 | { |
| 146 | while (1) |
| 147 | cpu_relax(); |
| 148 | } |
| 149 | |
Hidehiro Kawai | 58c5661 | 2015-12-14 11:19:10 +0100 | [diff] [blame] | 150 | /* |
| 151 | * Stop ourselves in NMI context if another CPU has already panicked. Arch code |
| 152 | * may override this to prepare for crash dumping, e.g. save regs info. |
| 153 | */ |
| 154 | void __weak nmi_panic_self_stop(struct pt_regs *regs) |
| 155 | { |
| 156 | panic_smp_self_stop(); |
| 157 | } |
| 158 | |
Hidehiro Kawai | 0ee5941 | 2016-10-11 13:54:23 -0700 | [diff] [blame] | 159 | /* |
| 160 | * Stop other CPUs in panic. Architecture dependent code may override this |
| 161 | * with more suitable version. For example, if the architecture supports |
| 162 | * crash dump, it should save registers of each stopped CPU and disable |
| 163 | * per-CPU features such as virtualization extensions. |
| 164 | */ |
| 165 | void __weak crash_smp_send_stop(void) |
| 166 | { |
| 167 | static int cpus_stopped; |
| 168 | |
| 169 | /* |
| 170 | * This function can be called twice in panic path, but obviously |
| 171 | * we execute this only once. |
| 172 | */ |
| 173 | if (cpus_stopped) |
| 174 | return; |
| 175 | |
| 176 | /* |
| 177 | * Note smp_send_stop is the usual smp shutdown function, which |
| 178 | * unfortunately means it may not be hardened to work in a panic |
| 179 | * situation. |
| 180 | */ |
| 181 | smp_send_stop(); |
| 182 | cpus_stopped = 1; |
| 183 | } |
| 184 | |
Hidehiro Kawai | 1717f20 | 2015-12-14 11:19:09 +0100 | [diff] [blame] | 185 | atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID); |
| 186 | |
Hidehiro Kawai | ebc41f2 | 2016-03-22 14:27:17 -0700 | [diff] [blame] | 187 | /* |
| 188 | * A variant of panic() called from NMI context. We return if we've already |
| 189 | * panicked on this CPU. If another CPU already panicked, loop in |
| 190 | * nmi_panic_self_stop() which can provide architecture dependent code such |
| 191 | * as saving register state for crash dump. |
| 192 | */ |
| 193 | void nmi_panic(struct pt_regs *regs, const char *msg) |
| 194 | { |
| 195 | int old_cpu, cpu; |
| 196 | |
| 197 | cpu = raw_smp_processor_id(); |
| 198 | old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu); |
| 199 | |
| 200 | if (old_cpu == PANIC_CPU_INVALID) |
| 201 | panic("%s", msg); |
| 202 | else if (old_cpu != cpu) |
| 203 | nmi_panic_self_stop(regs); |
| 204 | } |
| 205 | EXPORT_SYMBOL(nmi_panic); |
| 206 | |
Guilherme G. Piccoli | f953f14 | 2022-03-23 16:07:09 -0700 | [diff] [blame] | 207 | static void panic_print_sys_info(bool console_flush) |
Feng Tang | d999bd9 | 2019-01-03 15:28:17 -0800 | [diff] [blame] | 208 | { |
Guilherme G. Piccoli | f953f14 | 2022-03-23 16:07:09 -0700 | [diff] [blame] | 209 | if (console_flush) { |
| 210 | if (panic_print & PANIC_PRINT_ALL_PRINTK_MSG) |
| 211 | console_flush_on_panic(CONSOLE_REPLAY_ALL); |
| 212 | return; |
| 213 | } |
Feng Tang | de6da1e | 2019-05-17 14:31:50 -0700 | [diff] [blame] | 214 | |
Feng Tang | d999bd9 | 2019-01-03 15:28:17 -0800 | [diff] [blame] | 215 | if (panic_print & PANIC_PRINT_TASK_INFO) |
| 216 | show_state(); |
| 217 | |
| 218 | if (panic_print & PANIC_PRINT_MEM_INFO) |
| 219 | show_mem(0, NULL); |
| 220 | |
| 221 | if (panic_print & PANIC_PRINT_TIMER_INFO) |
| 222 | sysrq_timer_list_show(); |
| 223 | |
| 224 | if (panic_print & PANIC_PRINT_LOCK_INFO) |
| 225 | debug_show_all_locks(); |
| 226 | |
| 227 | if (panic_print & PANIC_PRINT_FTRACE_INFO) |
| 228 | ftrace_dump(DUMP_ALL); |
| 229 | } |
| 230 | |
Kees Cook | 79cc1ba | 2022-11-17 15:43:24 -0800 | [diff] [blame] | 231 | void check_panic_on_warn(const char *origin) |
| 232 | { |
Kees Cook | 7535b83 | 2022-12-16 12:26:57 -0800 | [diff] [blame] | 233 | unsigned int limit; |
| 234 | |
Kees Cook | 79cc1ba | 2022-11-17 15:43:24 -0800 | [diff] [blame] | 235 | if (panic_on_warn) |
| 236 | panic("%s: panic_on_warn set ...\n", origin); |
Kees Cook | 9fc9e27 | 2022-11-17 15:43:25 -0800 | [diff] [blame] | 237 | |
Kees Cook | 7535b83 | 2022-12-16 12:26:57 -0800 | [diff] [blame] | 238 | limit = READ_ONCE(warn_limit); |
| 239 | if (atomic_inc_return(&warn_count) >= limit && limit) |
Kees Cook | 9fc9e27 | 2022-11-17 15:43:25 -0800 | [diff] [blame] | 240 | panic("%s: system warned too often (kernel.warn_limit is %d)", |
Kees Cook | 7535b83 | 2022-12-16 12:26:57 -0800 | [diff] [blame] | 241 | origin, limit); |
Kees Cook | 79cc1ba | 2022-11-17 15:43:24 -0800 | [diff] [blame] | 242 | } |
| 243 | |
Guilherme G. Piccoli | b905039 | 2023-02-26 13:08:38 -0300 | [diff] [blame] | 244 | /* |
| 245 | * Helper that triggers the NMI backtrace (if set in panic_print) |
| 246 | * and then performs the secondary CPUs shutdown - we cannot have |
| 247 | * the NMI backtrace after the CPUs are off! |
| 248 | */ |
| 249 | static void panic_other_cpus_shutdown(bool crash_kexec) |
| 250 | { |
| 251 | if (panic_print & PANIC_PRINT_ALL_CPU_BT) |
| 252 | trigger_all_cpu_backtrace(); |
| 253 | |
| 254 | /* |
| 255 | * Note that smp_send_stop() is the usual SMP shutdown function, |
| 256 | * which unfortunately may not be hardened to work in a panic |
| 257 | * situation. If we want to do crash dump after notifier calls |
| 258 | * and kmsg_dump, we will need architecture dependent extra |
| 259 | * bits in addition to stopping other CPUs, hence we rely on |
| 260 | * crash_smp_send_stop() for that. |
| 261 | */ |
| 262 | if (!crash_kexec) |
| 263 | smp_send_stop(); |
| 264 | else |
| 265 | crash_smp_send_stop(); |
| 266 | } |
| 267 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | /** |
| 269 | * panic - halt the system |
| 270 | * @fmt: The text string to print |
| 271 | * |
| 272 | * Display a message, then perform cleanups. |
| 273 | * |
| 274 | * This function never returns. |
| 275 | */ |
Joe Perches | 9402c95 | 2012-01-12 17:17:17 -0800 | [diff] [blame] | 276 | void panic(const char *fmt, ...) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | static char buf[1024]; |
| 279 | va_list args; |
Borislav Petkov | b49dec1 | 2018-10-30 15:07:17 -0700 | [diff] [blame] | 280 | long i, i_next = 0, len; |
TAMUKI Shoichi | c7ff0d9 | 2010-08-10 18:03:28 -0700 | [diff] [blame] | 281 | int state = 0; |
Hidehiro Kawai | 1717f20 | 2015-12-14 11:19:09 +0100 | [diff] [blame] | 282 | int old_cpu, this_cpu; |
Hidehiro Kawai | b26e27d | 2016-08-02 14:06:13 -0700 | [diff] [blame] | 283 | bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | |
Tiezhu Yang | 1a2383e | 2022-03-23 16:06:51 -0700 | [diff] [blame] | 285 | if (panic_on_warn) { |
| 286 | /* |
| 287 | * This thread may hit another WARN() in the panic path. |
| 288 | * Resetting this prevents additional WARN() from panicking the |
| 289 | * system on this thread. Other threads are blocked by the |
| 290 | * panic_mutex in panic(). |
| 291 | */ |
| 292 | panic_on_warn = 0; |
| 293 | } |
| 294 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 295 | /* |
Vikram Mulukutla | 190320c | 2012-07-30 14:39:58 -0700 | [diff] [blame] | 296 | * Disable local interrupts. This will prevent panic_smp_self_stop |
| 297 | * from deadlocking the first cpu that invokes the panic, since |
| 298 | * there is nothing to prevent an interrupt handler (that runs |
Hidehiro Kawai | 1717f20 | 2015-12-14 11:19:09 +0100 | [diff] [blame] | 299 | * after setting panic_cpu) from invoking panic() again. |
Vikram Mulukutla | 190320c | 2012-07-30 14:39:58 -0700 | [diff] [blame] | 300 | */ |
| 301 | local_irq_disable(); |
Will Deacon | 20bb759 | 2019-10-06 17:58:00 -0700 | [diff] [blame] | 302 | preempt_disable_notrace(); |
Vikram Mulukutla | 190320c | 2012-07-30 14:39:58 -0700 | [diff] [blame] | 303 | |
| 304 | /* |
Ingo Molnar | c95dbf27 | 2009-03-13 11:14:06 +0100 | [diff] [blame] | 305 | * It's possible to come here directly from a panic-assertion and |
| 306 | * not have preempt disabled. Some functions called from here want |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 307 | * preempt to be disabled. No point enabling it later though... |
Michael Holzheu | 93e13a3 | 2012-01-12 17:20:18 -0800 | [diff] [blame] | 308 | * |
| 309 | * Only one CPU is allowed to execute the panic code from here. For |
| 310 | * multiple parallel invocations of panic, all other CPUs either |
| 311 | * stop themself or will wait until they are stopped by the 1st CPU |
| 312 | * with smp_send_stop(). |
Hidehiro Kawai | 1717f20 | 2015-12-14 11:19:09 +0100 | [diff] [blame] | 313 | * |
| 314 | * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which |
| 315 | * comes here, so go ahead. |
| 316 | * `old_cpu == this_cpu' means we came from nmi_panic() which sets |
| 317 | * panic_cpu to this CPU. In this case, this is also the 1st CPU. |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 318 | */ |
Hidehiro Kawai | 1717f20 | 2015-12-14 11:19:09 +0100 | [diff] [blame] | 319 | this_cpu = raw_smp_processor_id(); |
| 320 | old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu); |
| 321 | |
| 322 | if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu) |
Michael Holzheu | 93e13a3 | 2012-01-12 17:20:18 -0800 | [diff] [blame] | 323 | panic_smp_self_stop(); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 324 | |
Anton Blanchard | 5b530fc | 2010-05-26 14:44:24 -0700 | [diff] [blame] | 325 | console_verbose(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | bust_spinlocks(1); |
| 327 | va_start(args, fmt); |
Borislav Petkov | b49dec1 | 2018-10-30 15:07:17 -0700 | [diff] [blame] | 328 | len = vscnprintf(buf, sizeof(buf), fmt, args); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | va_end(args); |
Borislav Petkov | b49dec1 | 2018-10-30 15:07:17 -0700 | [diff] [blame] | 330 | |
| 331 | if (len && buf[len - 1] == '\n') |
| 332 | buf[len - 1] = '\0'; |
| 333 | |
Fabian Frederick | d7c0847 | 2014-04-07 15:39:03 -0700 | [diff] [blame] | 334 | pr_emerg("Kernel panic - not syncing: %s\n", buf); |
Ingo Molnar | 5cb2730 | 2008-02-14 09:07:01 +0100 | [diff] [blame] | 335 | #ifdef CONFIG_DEBUG_BUGVERBOSE |
Andi Kleen | 6e6f0a1 | 2012-01-12 17:20:30 -0800 | [diff] [blame] | 336 | /* |
| 337 | * Avoid nested stack-dumping if a panic occurs during oops processing |
| 338 | */ |
Jason Wessel | 026ee1f | 2012-04-12 12:49:17 -0700 | [diff] [blame] | 339 | if (!test_taint(TAINT_DIE) && oops_in_progress <= 1) |
Andi Kleen | 6e6f0a1 | 2012-01-12 17:20:30 -0800 | [diff] [blame] | 340 | dump_stack(); |
Ingo Molnar | 5cb2730 | 2008-02-14 09:07:01 +0100 | [diff] [blame] | 341 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 343 | /* |
Douglas Anderson | 7d92bda | 2019-09-25 16:47:45 -0700 | [diff] [blame] | 344 | * If kgdb is enabled, give it a chance to run before we stop all |
| 345 | * the other CPUs or else we won't be able to debug processes left |
| 346 | * running on them. |
| 347 | */ |
| 348 | kgdb_panic(buf); |
| 349 | |
| 350 | /* |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 351 | * If we have crashed and we have a crash kernel loaded let it handle |
| 352 | * everything else. |
Masami Hiramatsu | f06e515 | 2014-06-06 14:37:07 -0700 | [diff] [blame] | 353 | * If we want to run this after calling panic_notifiers, pass |
| 354 | * the "crash_kexec_post_notifiers" option to the kernel. |
Hidehiro Kawai | 7bbee5c | 2015-12-14 11:19:11 +0100 | [diff] [blame] | 355 | * |
| 356 | * Bypass the panic_cpu check and call __crash_kexec directly. |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 357 | */ |
Guilherme G. Piccoli | b905039 | 2023-02-26 13:08:38 -0300 | [diff] [blame] | 358 | if (!_crash_kexec_post_notifiers) |
Hidehiro Kawai | 7bbee5c | 2015-12-14 11:19:11 +0100 | [diff] [blame] | 359 | __crash_kexec(NULL); |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 360 | |
Guilherme G. Piccoli | b905039 | 2023-02-26 13:08:38 -0300 | [diff] [blame] | 361 | panic_other_cpus_shutdown(_crash_kexec_post_notifiers); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | |
Kees Cook | 6723734 | 2013-09-11 14:25:49 -0700 | [diff] [blame] | 363 | /* |
| 364 | * Run any panic handlers, including those that might need to |
| 365 | * add information to the kmsg dump output. |
| 366 | */ |
Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 367 | atomic_notifier_call_chain(&panic_notifier_list, 0, buf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | |
Guilherme G. Piccoli | f953f14 | 2022-03-23 16:07:09 -0700 | [diff] [blame] | 369 | panic_print_sys_info(false); |
| 370 | |
Kees Cook | 6723734 | 2013-09-11 14:25:49 -0700 | [diff] [blame] | 371 | kmsg_dump(KMSG_DUMP_PANIC); |
| 372 | |
Masami Hiramatsu | f06e515 | 2014-06-06 14:37:07 -0700 | [diff] [blame] | 373 | /* |
| 374 | * If you doubt kdump always works fine in any situation, |
| 375 | * "crash_kexec_post_notifiers" offers you a chance to run |
| 376 | * panic_notifiers and dumping kmsg before kdump. |
| 377 | * Note: since some panic_notifiers can make crashed kernel |
| 378 | * more unstable, it can increase risks of the kdump failure too. |
Hidehiro Kawai | 7bbee5c | 2015-12-14 11:19:11 +0100 | [diff] [blame] | 379 | * |
| 380 | * Bypass the panic_cpu check and call __crash_kexec directly. |
Masami Hiramatsu | f06e515 | 2014-06-06 14:37:07 -0700 | [diff] [blame] | 381 | */ |
Hidehiro Kawai | b26e27d | 2016-08-02 14:06:13 -0700 | [diff] [blame] | 382 | if (_crash_kexec_post_notifiers) |
Hidehiro Kawai | 7bbee5c | 2015-12-14 11:19:11 +0100 | [diff] [blame] | 383 | __crash_kexec(NULL); |
Masami Hiramatsu | f06e515 | 2014-06-06 14:37:07 -0700 | [diff] [blame] | 384 | |
Sergey Senozhatsky | c7c3f05 | 2018-10-25 19:10:36 +0900 | [diff] [blame] | 385 | console_unblank(); |
Aaro Koskinen | d014e889 | 2009-10-02 14:41:20 +0300 | [diff] [blame] | 386 | |
Vitaly Kuznetsov | 08d7865 | 2015-11-06 16:32:58 -0800 | [diff] [blame] | 387 | /* |
| 388 | * We may have ended up stopping the CPU holding the lock (in |
| 389 | * smp_send_stop()) while still having some valuable data in the console |
| 390 | * buffer. Try to acquire the lock then release it regardless of the |
Vitaly Kuznetsov | 7625b3a | 2015-11-20 15:57:24 -0800 | [diff] [blame] | 391 | * result. The release will also print the buffers out. Locks debug |
| 392 | * should be disabled to avoid reporting bad unlock balance when |
| 393 | * panic() is not being callled from OOPS. |
Vitaly Kuznetsov | 08d7865 | 2015-11-06 16:32:58 -0800 | [diff] [blame] | 394 | */ |
Vitaly Kuznetsov | 7625b3a | 2015-11-20 15:57:24 -0800 | [diff] [blame] | 395 | debug_locks_off(); |
Feng Tang | de6da1e | 2019-05-17 14:31:50 -0700 | [diff] [blame] | 396 | console_flush_on_panic(CONSOLE_FLUSH_PENDING); |
Vitaly Kuznetsov | 08d7865 | 2015-11-06 16:32:58 -0800 | [diff] [blame] | 397 | |
Guilherme G. Piccoli | f953f14 | 2022-03-23 16:07:09 -0700 | [diff] [blame] | 398 | panic_print_sys_info(true); |
Feng Tang | d999bd9 | 2019-01-03 15:28:17 -0800 | [diff] [blame] | 399 | |
TAMUKI Shoichi | c7ff0d9 | 2010-08-10 18:03:28 -0700 | [diff] [blame] | 400 | if (!panic_blink) |
| 401 | panic_blink = no_blink; |
| 402 | |
Eric W. Biederman | dc009d9 | 2005-06-25 14:57:52 -0700 | [diff] [blame] | 403 | if (panic_timeout > 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | /* |
Ingo Molnar | c95dbf27 | 2009-03-13 11:14:06 +0100 | [diff] [blame] | 405 | * Delay timeout seconds before rebooting the machine. |
| 406 | * We can't use the "normal" timers since we just panicked. |
| 407 | */ |
Jiri Slaby | ff7a28a | 2017-01-24 15:18:29 -0800 | [diff] [blame] | 408 | pr_emerg("Rebooting in %d seconds..\n", panic_timeout); |
Ingo Molnar | c95dbf27 | 2009-03-13 11:14:06 +0100 | [diff] [blame] | 409 | |
TAMUKI Shoichi | c7ff0d9 | 2010-08-10 18:03:28 -0700 | [diff] [blame] | 410 | for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | touch_nmi_watchdog(); |
TAMUKI Shoichi | c7ff0d9 | 2010-08-10 18:03:28 -0700 | [diff] [blame] | 412 | if (i >= i_next) { |
| 413 | i += panic_blink(state ^= 1); |
| 414 | i_next = i + 3600 / PANIC_BLINK_SPD; |
| 415 | } |
| 416 | mdelay(PANIC_TIMER_STEP); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | } |
Hugh Dickins | 4302fbc | 2011-07-26 16:08:52 -0700 | [diff] [blame] | 418 | } |
| 419 | if (panic_timeout != 0) { |
Ingo Molnar | c95dbf27 | 2009-03-13 11:14:06 +0100 | [diff] [blame] | 420 | /* |
| 421 | * This will not be a clean reboot, with everything |
| 422 | * shutting down. But if there is a chance of |
| 423 | * rebooting the system it will be rebooted. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | */ |
Aaro Koskinen | b287a25 | 2019-05-14 15:45:37 -0700 | [diff] [blame] | 425 | if (panic_reboot_mode != REBOOT_UNDEFINED) |
| 426 | reboot_mode = panic_reboot_mode; |
Eric W. Biederman | 2f048ea | 2005-07-26 11:49:23 -0600 | [diff] [blame] | 427 | emergency_restart(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | } |
| 429 | #ifdef __sparc__ |
| 430 | { |
| 431 | extern int stop_a_enabled; |
Tom 'spot' Callaway | a271c24 | 2005-04-24 20:38:02 -0700 | [diff] [blame] | 432 | /* Make sure the user can actually press Stop-A (L1-A) */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | stop_a_enabled = 1; |
Vijay Kumar | 7db60d0 | 2017-02-01 11:34:39 -0800 | [diff] [blame] | 434 | pr_emerg("Press Stop-A (L1-A) from sun keyboard or send break\n" |
| 435 | "twice on console to return to the boot prom\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | } |
| 437 | #endif |
Martin Schwidefsky | 347a8dc | 2006-01-06 00:19:28 -0800 | [diff] [blame] | 438 | #if defined(CONFIG_S390) |
Martin Schwidefsky | 98587c2 | 2019-04-30 12:33:45 +0200 | [diff] [blame] | 439 | disabled_wait(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | #endif |
Borislav Petkov | 5ad7510 | 2018-03-06 10:49:12 +0100 | [diff] [blame] | 441 | pr_emerg("---[ end Kernel panic - not syncing: %s ]---\n", buf); |
Feng Tang | c39ea0b | 2019-05-14 15:45:34 -0700 | [diff] [blame] | 442 | |
| 443 | /* Do not scroll important messages printed above */ |
| 444 | suppress_printk = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | local_irq_enable(); |
TAMUKI Shoichi | c7ff0d9 | 2010-08-10 18:03:28 -0700 | [diff] [blame] | 446 | for (i = 0; ; i += PANIC_TIMER_STEP) { |
Jan Beulich | c22db94 | 2006-02-10 01:51:11 -0800 | [diff] [blame] | 447 | touch_softlockup_watchdog(); |
TAMUKI Shoichi | c7ff0d9 | 2010-08-10 18:03:28 -0700 | [diff] [blame] | 448 | if (i >= i_next) { |
| 449 | i += panic_blink(state ^= 1); |
| 450 | i_next = i + 3600 / PANIC_BLINK_SPD; |
| 451 | } |
| 452 | mdelay(PANIC_TIMER_STEP); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 | } |
| 454 | } |
| 455 | |
| 456 | EXPORT_SYMBOL(panic); |
| 457 | |
Petr Mladek | 7fd8329 | 2016-09-21 13:47:22 +0200 | [diff] [blame] | 458 | /* |
| 459 | * TAINT_FORCED_RMMOD could be a per-module flag but the module |
| 460 | * is being removed anyway. |
| 461 | */ |
| 462 | const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = { |
Kees Cook | 47d4b26 | 2018-04-10 16:32:26 -0700 | [diff] [blame] | 463 | [ TAINT_PROPRIETARY_MODULE ] = { 'P', 'G', true }, |
| 464 | [ TAINT_FORCED_MODULE ] = { 'F', ' ', true }, |
| 465 | [ TAINT_CPU_OUT_OF_SPEC ] = { 'S', ' ', false }, |
| 466 | [ TAINT_FORCED_RMMOD ] = { 'R', ' ', false }, |
| 467 | [ TAINT_MACHINE_CHECK ] = { 'M', ' ', false }, |
| 468 | [ TAINT_BAD_PAGE ] = { 'B', ' ', false }, |
| 469 | [ TAINT_USER ] = { 'U', ' ', false }, |
| 470 | [ TAINT_DIE ] = { 'D', ' ', false }, |
| 471 | [ TAINT_OVERRIDDEN_ACPI_TABLE ] = { 'A', ' ', false }, |
| 472 | [ TAINT_WARN ] = { 'W', ' ', false }, |
| 473 | [ TAINT_CRAP ] = { 'C', ' ', true }, |
| 474 | [ TAINT_FIRMWARE_WORKAROUND ] = { 'I', ' ', false }, |
| 475 | [ TAINT_OOT_MODULE ] = { 'O', ' ', true }, |
| 476 | [ TAINT_UNSIGNED_MODULE ] = { 'E', ' ', true }, |
| 477 | [ TAINT_SOFTLOCKUP ] = { 'L', ' ', false }, |
| 478 | [ TAINT_LIVEPATCH ] = { 'K', ' ', true }, |
| 479 | [ TAINT_AUX ] = { 'X', ' ', true }, |
Kees Cook | bc4f2f5 | 2018-04-10 16:32:33 -0700 | [diff] [blame] | 480 | [ TAINT_RANDSTRUCT ] = { 'T', ' ', true }, |
David Gow | 2852ca7 | 2022-07-01 16:47:41 +0800 | [diff] [blame] | 481 | [ TAINT_TEST ] = { 'N', ' ', true }, |
Andi Kleen | 25ddbb1 | 2008-10-15 22:01:41 -0700 | [diff] [blame] | 482 | }; |
| 483 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | /** |
Kees Cook | 9c4560e | 2018-04-10 16:32:29 -0700 | [diff] [blame] | 485 | * print_tainted - return a string to represent the kernel taint state. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | * |
Mauro Carvalho Chehab | 5704324 | 2019-04-22 16:48:00 -0300 | [diff] [blame] | 487 | * For individual taint flag meanings, see Documentation/admin-guide/sysctl/kernel.rst |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | * |
Kees Cook | 9c4560e | 2018-04-10 16:32:29 -0700 | [diff] [blame] | 489 | * The string is overwritten by the next call to print_tainted(), |
| 490 | * but is always NULL terminated. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | const char *print_tainted(void) |
| 493 | { |
Petr Mladek | 7fd8329 | 2016-09-21 13:47:22 +0200 | [diff] [blame] | 494 | static char buf[TAINT_FLAGS_COUNT + sizeof("Tainted: ")]; |
Andi Kleen | 25ddbb1 | 2008-10-15 22:01:41 -0700 | [diff] [blame] | 495 | |
Kees Cook | 47d4b26 | 2018-04-10 16:32:26 -0700 | [diff] [blame] | 496 | BUILD_BUG_ON(ARRAY_SIZE(taint_flags) != TAINT_FLAGS_COUNT); |
| 497 | |
Andi Kleen | 25ddbb1 | 2008-10-15 22:01:41 -0700 | [diff] [blame] | 498 | if (tainted_mask) { |
| 499 | char *s; |
| 500 | int i; |
| 501 | |
| 502 | s = buf + sprintf(buf, "Tainted: "); |
Petr Mladek | 7fd8329 | 2016-09-21 13:47:22 +0200 | [diff] [blame] | 503 | for (i = 0; i < TAINT_FLAGS_COUNT; i++) { |
| 504 | const struct taint_flag *t = &taint_flags[i]; |
| 505 | *s++ = test_bit(i, &tainted_mask) ? |
Larry Finger | 5eb7c0d | 2017-01-01 20:25:25 -0600 | [diff] [blame] | 506 | t->c_true : t->c_false; |
Andi Kleen | 25ddbb1 | 2008-10-15 22:01:41 -0700 | [diff] [blame] | 507 | } |
| 508 | *s = 0; |
| 509 | } else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 510 | snprintf(buf, sizeof(buf), "Not tainted"); |
Ingo Molnar | c95dbf27 | 2009-03-13 11:14:06 +0100 | [diff] [blame] | 511 | |
| 512 | return buf; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | } |
| 514 | |
Andi Kleen | 25ddbb1 | 2008-10-15 22:01:41 -0700 | [diff] [blame] | 515 | int test_taint(unsigned flag) |
| 516 | { |
| 517 | return test_bit(flag, &tainted_mask); |
| 518 | } |
| 519 | EXPORT_SYMBOL(test_taint); |
| 520 | |
| 521 | unsigned long get_taint(void) |
| 522 | { |
| 523 | return tainted_mask; |
| 524 | } |
| 525 | |
Rusty Russell | 373d4d0 | 2013-01-21 17:17:39 +1030 | [diff] [blame] | 526 | /** |
| 527 | * add_taint: add a taint flag if not already set. |
| 528 | * @flag: one of the TAINT_* constants. |
| 529 | * @lockdep_ok: whether lock debugging is still OK. |
| 530 | * |
| 531 | * If something bad has gone wrong, you'll want @lockdebug_ok = false, but for |
| 532 | * some notewortht-but-not-corrupting cases, it can be set to true. |
| 533 | */ |
| 534 | void add_taint(unsigned flag, enum lockdep_ok lockdep_ok) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 535 | { |
Rusty Russell | 373d4d0 | 2013-01-21 17:17:39 +1030 | [diff] [blame] | 536 | if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off()) |
Fabian Frederick | d7c0847 | 2014-04-07 15:39:03 -0700 | [diff] [blame] | 537 | pr_warn("Disabling lock debugging due to kernel taint\n"); |
Frederic Weisbecker | 9eeba61 | 2009-04-11 03:17:17 +0200 | [diff] [blame] | 538 | |
Andi Kleen | 25ddbb1 | 2008-10-15 22:01:41 -0700 | [diff] [blame] | 539 | set_bit(flag, &tainted_mask); |
Rafael Aquini | db38d5c | 2020-06-07 21:40:17 -0700 | [diff] [blame] | 540 | |
| 541 | if (tainted_mask & panic_on_taint) { |
| 542 | panic_on_taint = 0; |
| 543 | panic("panic_on_taint set ..."); |
| 544 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | } |
| 546 | EXPORT_SYMBOL(add_taint); |
Andrew Morton | dd28779 | 2006-03-23 03:00:57 -0800 | [diff] [blame] | 547 | |
Andrew Morton | dd28779 | 2006-03-23 03:00:57 -0800 | [diff] [blame] | 548 | static void spin_msec(int msecs) |
| 549 | { |
| 550 | int i; |
| 551 | |
| 552 | for (i = 0; i < msecs; i++) { |
| 553 | touch_nmi_watchdog(); |
| 554 | mdelay(1); |
| 555 | } |
| 556 | } |
| 557 | |
| 558 | /* |
| 559 | * It just happens that oops_enter() and oops_exit() are identically |
| 560 | * implemented... |
| 561 | */ |
| 562 | static void do_oops_enter_exit(void) |
| 563 | { |
| 564 | unsigned long flags; |
| 565 | static int spin_counter; |
| 566 | |
| 567 | if (!pause_on_oops) |
| 568 | return; |
| 569 | |
| 570 | spin_lock_irqsave(&pause_on_oops_lock, flags); |
| 571 | if (pause_on_oops_flag == 0) { |
| 572 | /* This CPU may now print the oops message */ |
| 573 | pause_on_oops_flag = 1; |
| 574 | } else { |
| 575 | /* We need to stall this CPU */ |
| 576 | if (!spin_counter) { |
| 577 | /* This CPU gets to do the counting */ |
| 578 | spin_counter = pause_on_oops; |
| 579 | do { |
| 580 | spin_unlock(&pause_on_oops_lock); |
| 581 | spin_msec(MSEC_PER_SEC); |
| 582 | spin_lock(&pause_on_oops_lock); |
| 583 | } while (--spin_counter); |
| 584 | pause_on_oops_flag = 0; |
| 585 | } else { |
| 586 | /* This CPU waits for a different one */ |
| 587 | while (spin_counter) { |
| 588 | spin_unlock(&pause_on_oops_lock); |
| 589 | spin_msec(1); |
| 590 | spin_lock(&pause_on_oops_lock); |
| 591 | } |
| 592 | } |
| 593 | } |
| 594 | spin_unlock_irqrestore(&pause_on_oops_lock, flags); |
| 595 | } |
| 596 | |
| 597 | /* |
Ingo Molnar | c95dbf27 | 2009-03-13 11:14:06 +0100 | [diff] [blame] | 598 | * Return true if the calling CPU is allowed to print oops-related info. |
| 599 | * This is a bit racy.. |
Andrew Morton | dd28779 | 2006-03-23 03:00:57 -0800 | [diff] [blame] | 600 | */ |
Tiezhu Yang | 79076e1 | 2020-08-11 18:36:46 -0700 | [diff] [blame] | 601 | bool oops_may_print(void) |
Andrew Morton | dd28779 | 2006-03-23 03:00:57 -0800 | [diff] [blame] | 602 | { |
| 603 | return pause_on_oops_flag == 0; |
| 604 | } |
| 605 | |
| 606 | /* |
| 607 | * Called when the architecture enters its oops handler, before it prints |
Ingo Molnar | c95dbf27 | 2009-03-13 11:14:06 +0100 | [diff] [blame] | 608 | * anything. If this is the first CPU to oops, and it's oopsing the first |
| 609 | * time then let it proceed. |
Andrew Morton | dd28779 | 2006-03-23 03:00:57 -0800 | [diff] [blame] | 610 | * |
Ingo Molnar | c95dbf27 | 2009-03-13 11:14:06 +0100 | [diff] [blame] | 611 | * This is all enabled by the pause_on_oops kernel boot option. We do all |
| 612 | * this to ensure that oopses don't scroll off the screen. It has the |
| 613 | * side-effect of preventing later-oopsing CPUs from mucking up the display, |
| 614 | * too. |
Andrew Morton | dd28779 | 2006-03-23 03:00:57 -0800 | [diff] [blame] | 615 | * |
Ingo Molnar | c95dbf27 | 2009-03-13 11:14:06 +0100 | [diff] [blame] | 616 | * It turns out that the CPU which is allowed to print ends up pausing for |
| 617 | * the right duration, whereas all the other CPUs pause for twice as long: |
| 618 | * once in oops_enter(), once in oops_exit(). |
Andrew Morton | dd28779 | 2006-03-23 03:00:57 -0800 | [diff] [blame] | 619 | */ |
| 620 | void oops_enter(void) |
| 621 | { |
Thomas Gleixner | bdff787 | 2009-07-24 15:30:45 -0400 | [diff] [blame] | 622 | tracing_off(); |
Ingo Molnar | c95dbf27 | 2009-03-13 11:14:06 +0100 | [diff] [blame] | 623 | /* can't trust the integrity of the kernel anymore: */ |
| 624 | debug_locks_off(); |
Andrew Morton | dd28779 | 2006-03-23 03:00:57 -0800 | [diff] [blame] | 625 | do_oops_enter_exit(); |
Guilherme G. Piccoli | 60c958d | 2020-06-07 21:40:48 -0700 | [diff] [blame] | 626 | |
| 627 | if (sysctl_oops_all_cpu_backtrace) |
| 628 | trigger_all_cpu_backtrace(); |
Andrew Morton | dd28779 | 2006-03-23 03:00:57 -0800 | [diff] [blame] | 629 | } |
| 630 | |
Yue Hu | 63037f7 | 2020-08-11 18:36:53 -0700 | [diff] [blame] | 631 | static void print_oops_end_marker(void) |
Arjan van de Ven | 71c3391 | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 632 | { |
Sebastian Andrzej Siewior | e83a447 | 2022-01-19 18:09:59 -0800 | [diff] [blame] | 633 | pr_warn("---[ end trace %016llx ]---\n", 0ULL); |
Arjan van de Ven | 71c3391 | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 634 | } |
| 635 | |
Arjan van de Ven | 2c3b20e9 | 2007-12-20 15:01:17 +0100 | [diff] [blame] | 636 | /* |
Andrew Morton | dd28779 | 2006-03-23 03:00:57 -0800 | [diff] [blame] | 637 | * Called when the architecture exits its oops handler, after printing |
| 638 | * everything. |
| 639 | */ |
| 640 | void oops_exit(void) |
| 641 | { |
| 642 | do_oops_enter_exit(); |
Arjan van de Ven | 71c3391 | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 643 | print_oops_end_marker(); |
Simon Kagstrom | 456b565 | 2009-10-16 14:09:18 +0200 | [diff] [blame] | 644 | kmsg_dump(KMSG_DUMP_OOPS); |
Andrew Morton | dd28779 | 2006-03-23 03:00:57 -0800 | [diff] [blame] | 645 | } |
Arjan van de Ven | 3162f75 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 646 | |
Josh Poimboeuf | 2553b67 | 2016-03-17 14:23:04 -0700 | [diff] [blame] | 647 | struct warn_args { |
Linus Torvalds | 0f6f49a | 2009-05-16 13:41:28 -0700 | [diff] [blame] | 648 | const char *fmt; |
Arjan van de Ven | a8f18b9 | 2008-07-25 01:45:53 -0700 | [diff] [blame] | 649 | va_list args; |
Linus Torvalds | 0f6f49a | 2009-05-16 13:41:28 -0700 | [diff] [blame] | 650 | }; |
| 651 | |
Josh Poimboeuf | 2553b67 | 2016-03-17 14:23:04 -0700 | [diff] [blame] | 652 | void __warn(const char *file, int line, void *caller, unsigned taint, |
| 653 | struct pt_regs *regs, struct warn_args *args) |
Linus Torvalds | 0f6f49a | 2009-05-16 13:41:28 -0700 | [diff] [blame] | 654 | { |
Steven Rostedt (Red Hat) | de7edd3 | 2013-06-14 16:21:43 -0400 | [diff] [blame] | 655 | disable_trace_on_warning(); |
| 656 | |
Josh Poimboeuf | 2553b67 | 2016-03-17 14:23:04 -0700 | [diff] [blame] | 657 | if (file) |
| 658 | pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n", |
| 659 | raw_smp_processor_id(), current->pid, file, line, |
| 660 | caller); |
| 661 | else |
| 662 | pr_warn("WARNING: CPU: %d PID: %d at %pS\n", |
| 663 | raw_smp_processor_id(), current->pid, caller); |
Arjan van de Ven | 74853db | 2008-11-28 08:35:25 -0800 | [diff] [blame] | 664 | |
Linus Torvalds | 0f6f49a | 2009-05-16 13:41:28 -0700 | [diff] [blame] | 665 | if (args) |
| 666 | vprintk(args->fmt, args->args); |
Arjan van de Ven | a8f18b9 | 2008-07-25 01:45:53 -0700 | [diff] [blame] | 667 | |
Alexey Kardashevskiy | 3f388f2 | 2020-10-15 20:13:22 -0700 | [diff] [blame] | 668 | print_modules(); |
| 669 | |
| 670 | if (regs) |
| 671 | show_regs(regs); |
| 672 | |
Kees Cook | 79cc1ba | 2022-11-17 15:43:24 -0800 | [diff] [blame] | 673 | check_panic_on_warn("kernel"); |
Prarit Bhargava | 9e3961a | 2014-12-10 15:45:50 -0800 | [diff] [blame] | 674 | |
Christophe Leroy | 2f31ad6 | 2020-11-13 22:52:20 -0800 | [diff] [blame] | 675 | if (!regs) |
| 676 | dump_stack(); |
Josh Poimboeuf | 2553b67 | 2016-03-17 14:23:04 -0700 | [diff] [blame] | 677 | |
Steven Rostedt (VMware) | 4c28107 | 2018-04-03 10:31:47 -0400 | [diff] [blame] | 678 | print_irqtrace_events(current); |
| 679 | |
Arjan van de Ven | a8f18b9 | 2008-07-25 01:45:53 -0700 | [diff] [blame] | 680 | print_oops_end_marker(); |
Marco Elver | 23b36fe | 2022-01-19 18:09:56 -0800 | [diff] [blame] | 681 | trace_error_report_end(ERROR_DETECTOR_WARN, (unsigned long)caller); |
Josh Poimboeuf | 2553b67 | 2016-03-17 14:23:04 -0700 | [diff] [blame] | 682 | |
Rusty Russell | 373d4d0 | 2013-01-21 17:17:39 +1030 | [diff] [blame] | 683 | /* Just a warning, don't kill lockdep. */ |
| 684 | add_taint(taint, LOCKDEP_STILL_OK); |
Arjan van de Ven | a8f18b9 | 2008-07-25 01:45:53 -0700 | [diff] [blame] | 685 | } |
Linus Torvalds | 0f6f49a | 2009-05-16 13:41:28 -0700 | [diff] [blame] | 686 | |
Kees Cook | 2da1ead | 2019-09-25 16:48:08 -0700 | [diff] [blame] | 687 | #ifndef __WARN_FLAGS |
Kees Cook | ee87113 | 2019-09-25 16:47:52 -0700 | [diff] [blame] | 688 | void warn_slowpath_fmt(const char *file, int line, unsigned taint, |
| 689 | const char *fmt, ...) |
Ben Hutchings | b2be052 | 2010-04-03 19:34:56 +0100 | [diff] [blame] | 690 | { |
Peter Zijlstra | 5a5d7e9 | 2023-01-26 16:08:31 +0100 | [diff] [blame] | 691 | bool rcu = warn_rcu_enter(); |
Josh Poimboeuf | 2553b67 | 2016-03-17 14:23:04 -0700 | [diff] [blame] | 692 | struct warn_args args; |
Ben Hutchings | b2be052 | 2010-04-03 19:34:56 +0100 | [diff] [blame] | 693 | |
Kees Cook | d38aba4 | 2019-09-25 16:48:01 -0700 | [diff] [blame] | 694 | pr_warn(CUT_HERE); |
| 695 | |
Kees Cook | f2f84b0 | 2019-09-25 16:47:58 -0700 | [diff] [blame] | 696 | if (!fmt) { |
Kees Cook | f2f84b0 | 2019-09-25 16:47:58 -0700 | [diff] [blame] | 697 | __warn(file, line, __builtin_return_address(0), taint, |
| 698 | NULL, NULL); |
| 699 | return; |
| 700 | } |
| 701 | |
Ben Hutchings | b2be052 | 2010-04-03 19:34:56 +0100 | [diff] [blame] | 702 | args.fmt = fmt; |
| 703 | va_start(args.args, fmt); |
Josh Poimboeuf | 2553b67 | 2016-03-17 14:23:04 -0700 | [diff] [blame] | 704 | __warn(file, line, __builtin_return_address(0), taint, NULL, &args); |
Ben Hutchings | b2be052 | 2010-04-03 19:34:56 +0100 | [diff] [blame] | 705 | va_end(args.args); |
Peter Zijlstra | 5a5d7e9 | 2023-01-26 16:08:31 +0100 | [diff] [blame] | 706 | warn_rcu_exit(rcu); |
Ben Hutchings | b2be052 | 2010-04-03 19:34:56 +0100 | [diff] [blame] | 707 | } |
Kees Cook | ee87113 | 2019-09-25 16:47:52 -0700 | [diff] [blame] | 708 | EXPORT_SYMBOL(warn_slowpath_fmt); |
Kees Cook | a7bed27a | 2017-11-17 15:27:24 -0800 | [diff] [blame] | 709 | #else |
| 710 | void __warn_printk(const char *fmt, ...) |
| 711 | { |
Peter Zijlstra | 5a5d7e9 | 2023-01-26 16:08:31 +0100 | [diff] [blame] | 712 | bool rcu = warn_rcu_enter(); |
Kees Cook | a7bed27a | 2017-11-17 15:27:24 -0800 | [diff] [blame] | 713 | va_list args; |
| 714 | |
| 715 | pr_warn(CUT_HERE); |
| 716 | |
| 717 | va_start(args, fmt); |
| 718 | vprintk(fmt, args); |
| 719 | va_end(args); |
Peter Zijlstra | 5a5d7e9 | 2023-01-26 16:08:31 +0100 | [diff] [blame] | 720 | warn_rcu_exit(rcu); |
Kees Cook | a7bed27a | 2017-11-17 15:27:24 -0800 | [diff] [blame] | 721 | } |
| 722 | EXPORT_SYMBOL(__warn_printk); |
Arjan van de Ven | 79b4cc5 | 2008-01-30 13:32:50 +0100 | [diff] [blame] | 723 | #endif |
| 724 | |
Andi Kleen | b1fca27 | 2017-11-17 15:27:03 -0800 | [diff] [blame] | 725 | #ifdef CONFIG_BUG |
| 726 | |
| 727 | /* Support resetting WARN*_ONCE state */ |
| 728 | |
| 729 | static int clear_warn_once_set(void *data, u64 val) |
| 730 | { |
Andi Kleen | aaf5dcf | 2017-11-17 15:27:06 -0800 | [diff] [blame] | 731 | generic_bug_clear_once(); |
Andi Kleen | b1fca27 | 2017-11-17 15:27:03 -0800 | [diff] [blame] | 732 | memset(__start_once, 0, __end_once - __start_once); |
| 733 | return 0; |
| 734 | } |
| 735 | |
YueHaibing | 4169680 | 2019-03-07 16:26:36 -0800 | [diff] [blame] | 736 | DEFINE_DEBUGFS_ATTRIBUTE(clear_warn_once_fops, NULL, clear_warn_once_set, |
| 737 | "%lld\n"); |
Andi Kleen | b1fca27 | 2017-11-17 15:27:03 -0800 | [diff] [blame] | 738 | |
| 739 | static __init int register_warn_debugfs(void) |
| 740 | { |
| 741 | /* Don't care about failure */ |
YueHaibing | 4169680 | 2019-03-07 16:26:36 -0800 | [diff] [blame] | 742 | debugfs_create_file_unsafe("clear_warn_once", 0200, NULL, NULL, |
| 743 | &clear_warn_once_fops); |
Andi Kleen | b1fca27 | 2017-11-17 15:27:03 -0800 | [diff] [blame] | 744 | return 0; |
| 745 | } |
| 746 | |
| 747 | device_initcall(register_warn_debugfs); |
| 748 | #endif |
| 749 | |
Linus Torvalds | 050e9ba | 2018-06-14 12:21:18 +0900 | [diff] [blame] | 750 | #ifdef CONFIG_STACKPROTECTOR |
Arjan van de Ven | 54371a4 | 2008-02-15 15:33:12 -0800 | [diff] [blame] | 751 | |
Arjan van de Ven | 3162f75 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 752 | /* |
| 753 | * Called when gcc's -fstack-protector feature is used, and |
| 754 | * gcc detects corruption of the on-stack canary value |
| 755 | */ |
Thomas Gleixner | 5916d5f | 2020-03-13 13:49:51 +0100 | [diff] [blame] | 756 | __visible noinstr void __stack_chk_fail(void) |
Arjan van de Ven | 3162f75 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 757 | { |
Thomas Gleixner | 5916d5f | 2020-03-13 13:49:51 +0100 | [diff] [blame] | 758 | instrumentation_begin(); |
Borislav Petkov | 95c4fb7 | 2018-10-30 15:07:13 -0700 | [diff] [blame] | 759 | panic("stack-protector: Kernel stack is corrupted in: %pB", |
Ingo Molnar | 517a92c | 2008-02-14 09:02:13 +0100 | [diff] [blame] | 760 | __builtin_return_address(0)); |
Thomas Gleixner | 5916d5f | 2020-03-13 13:49:51 +0100 | [diff] [blame] | 761 | instrumentation_end(); |
Arjan van de Ven | 3162f75 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 762 | } |
| 763 | EXPORT_SYMBOL(__stack_chk_fail); |
Arjan van de Ven | 54371a4 | 2008-02-15 15:33:12 -0800 | [diff] [blame] | 764 | |
Arjan van de Ven | 3162f75 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 765 | #endif |
Rusty Russell | f44dd16 | 2008-10-22 10:00:24 -0500 | [diff] [blame] | 766 | |
| 767 | core_param(panic, panic_timeout, int, 0644); |
Feng Tang | d999bd9 | 2019-01-03 15:28:17 -0800 | [diff] [blame] | 768 | core_param(panic_print, panic_print, ulong, 0644); |
Rusty Russell | f44dd16 | 2008-10-22 10:00:24 -0500 | [diff] [blame] | 769 | core_param(pause_on_oops, pause_on_oops, int, 0644); |
Prarit Bhargava | 9e3961a | 2014-12-10 15:45:50 -0800 | [diff] [blame] | 770 | core_param(panic_on_warn, panic_on_warn, int, 0644); |
Hidehiro Kawai | b26e27d | 2016-08-02 14:06:13 -0700 | [diff] [blame] | 771 | core_param(crash_kexec_post_notifiers, crash_kexec_post_notifiers, bool, 0644); |
Masami Hiramatsu | f06e515 | 2014-06-06 14:37:07 -0700 | [diff] [blame] | 772 | |
Olaf Hering | d404ab0 | 2011-03-22 16:34:04 -0700 | [diff] [blame] | 773 | static int __init oops_setup(char *s) |
| 774 | { |
| 775 | if (!s) |
| 776 | return -EINVAL; |
| 777 | if (!strcmp(s, "panic")) |
| 778 | panic_on_oops = 1; |
| 779 | return 0; |
| 780 | } |
| 781 | early_param("oops", oops_setup); |
Rafael Aquini | db38d5c | 2020-06-07 21:40:17 -0700 | [diff] [blame] | 782 | |
| 783 | static int __init panic_on_taint_setup(char *s) |
| 784 | { |
| 785 | char *taint_str; |
| 786 | |
| 787 | if (!s) |
| 788 | return -EINVAL; |
| 789 | |
| 790 | taint_str = strsep(&s, ","); |
| 791 | if (kstrtoul(taint_str, 16, &panic_on_taint)) |
| 792 | return -EINVAL; |
| 793 | |
| 794 | /* make sure panic_on_taint doesn't hold out-of-range TAINT flags */ |
| 795 | panic_on_taint &= TAINT_FLAGS_MAX; |
| 796 | |
| 797 | if (!panic_on_taint) |
| 798 | return -EINVAL; |
| 799 | |
| 800 | if (s && !strcmp(s, "nousertaint")) |
| 801 | panic_on_taint_nousertaint = true; |
| 802 | |
Andy Shevchenko | 5d5dd3e4 | 2022-10-08 22:59:14 +0300 | [diff] [blame] | 803 | pr_info("panic_on_taint: bitmask=0x%lx nousertaint_mode=%s\n", |
| 804 | panic_on_taint, str_enabled_disabled(panic_on_taint_nousertaint)); |
Rafael Aquini | db38d5c | 2020-06-07 21:40:17 -0700 | [diff] [blame] | 805 | |
| 806 | return 0; |
| 807 | } |
| 808 | early_param("panic_on_taint", panic_on_taint_setup); |