Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* |
| 3 | * RCU CPU stall warnings for normal RCU grace periods |
| 4 | * |
| 5 | * Copyright IBM Corporation, 2019 |
| 6 | * |
| 7 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> |
| 8 | */ |
| 9 | |
Sergey Senozhatsky | ccfc9dd | 2021-05-22 00:56:23 +0900 | [diff] [blame] | 10 | #include <linux/kvm_para.h> |
| 11 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 12 | ////////////////////////////////////////////////////////////////////////////// |
| 13 | // |
| 14 | // Controlling CPU stall warnings, including delay calculation. |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 15 | |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 16 | /* panic() on RCU Stall sysctl. */ |
| 17 | int sysctl_panic_on_rcu_stall __read_mostly; |
chao | dfe5640 | 2020-08-30 23:41:17 -0700 | [diff] [blame] | 18 | int sysctl_max_rcu_stall_to_panic __read_mostly; |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 19 | |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 20 | #ifdef CONFIG_PROVE_RCU |
Paul E. McKenney | 6be7436 | 2020-04-10 13:47:41 -0700 | [diff] [blame] | 21 | #define RCU_STALL_DELAY_DELTA (5 * HZ) |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 22 | #else |
Paul E. McKenney | 6be7436 | 2020-04-10 13:47:41 -0700 | [diff] [blame] | 23 | #define RCU_STALL_DELAY_DELTA 0 |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 24 | #endif |
Paul E. McKenney | 6be7436 | 2020-04-10 13:47:41 -0700 | [diff] [blame] | 25 | #define RCU_STALL_MIGHT_DIV 8 |
| 26 | #define RCU_STALL_MIGHT_MIN (2 * HZ) |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 27 | |
Uladzislau Rezki | 28b3ae4 | 2022-02-16 14:52:09 +0100 | [diff] [blame] | 28 | int rcu_exp_jiffies_till_stall_check(void) |
| 29 | { |
| 30 | int cpu_stall_timeout = READ_ONCE(rcu_exp_cpu_stall_timeout); |
| 31 | int exp_stall_delay_delta = 0; |
| 32 | int till_stall_check; |
| 33 | |
| 34 | // Zero says to use rcu_cpu_stall_timeout, but in milliseconds. |
| 35 | if (!cpu_stall_timeout) |
| 36 | cpu_stall_timeout = jiffies_to_msecs(rcu_jiffies_till_stall_check()); |
| 37 | |
| 38 | // Limit check must be consistent with the Kconfig limits for |
| 39 | // CONFIG_RCU_EXP_CPU_STALL_TIMEOUT, so check the allowed range. |
| 40 | // The minimum clamped value is "2UL", because at least one full |
| 41 | // tick has to be guaranteed. |
Paul E. McKenney | 84ec7c2 | 2022-12-06 09:38:39 -0800 | [diff] [blame] | 42 | till_stall_check = clamp(msecs_to_jiffies(cpu_stall_timeout), 2UL, 300UL * HZ); |
Uladzislau Rezki | 28b3ae4 | 2022-02-16 14:52:09 +0100 | [diff] [blame] | 43 | |
| 44 | if (cpu_stall_timeout && jiffies_to_msecs(till_stall_check) != cpu_stall_timeout) |
| 45 | WRITE_ONCE(rcu_exp_cpu_stall_timeout, jiffies_to_msecs(till_stall_check)); |
| 46 | |
| 47 | #ifdef CONFIG_PROVE_RCU |
| 48 | /* Add extra ~25% out of till_stall_check. */ |
| 49 | exp_stall_delay_delta = ((till_stall_check * 25) / 100) + 1; |
| 50 | #endif |
| 51 | |
| 52 | return till_stall_check + exp_stall_delay_delta; |
| 53 | } |
| 54 | EXPORT_SYMBOL_GPL(rcu_exp_jiffies_till_stall_check); |
| 55 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 56 | /* Limit-check stall timeouts specified at boottime and runtime. */ |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 57 | int rcu_jiffies_till_stall_check(void) |
| 58 | { |
| 59 | int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); |
| 60 | |
| 61 | /* |
| 62 | * Limit check must be consistent with the Kconfig limits |
| 63 | * for CONFIG_RCU_CPU_STALL_TIMEOUT. |
| 64 | */ |
| 65 | if (till_stall_check < 3) { |
| 66 | WRITE_ONCE(rcu_cpu_stall_timeout, 3); |
| 67 | till_stall_check = 3; |
| 68 | } else if (till_stall_check > 300) { |
| 69 | WRITE_ONCE(rcu_cpu_stall_timeout, 300); |
| 70 | till_stall_check = 300; |
| 71 | } |
| 72 | return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; |
| 73 | } |
| 74 | EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check); |
| 75 | |
Paul E. McKenney | 6be7436 | 2020-04-10 13:47:41 -0700 | [diff] [blame] | 76 | /** |
| 77 | * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled? |
| 78 | * |
| 79 | * Returns @true if the current grace period is sufficiently old that |
| 80 | * it is reasonable to assume that it might be stalled. This can be |
| 81 | * useful when deciding whether to allocate memory to enable RCU-mediated |
| 82 | * freeing on the one hand or just invoking synchronize_rcu() on the other. |
| 83 | * The latter is preferable when the grace period is stalled. |
| 84 | * |
| 85 | * Note that sampling of the .gp_start and .gp_seq fields must be done |
| 86 | * carefully to avoid false positives at the beginnings and ends of |
| 87 | * grace periods. |
| 88 | */ |
| 89 | bool rcu_gp_might_be_stalled(void) |
| 90 | { |
| 91 | unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV; |
| 92 | unsigned long j = jiffies; |
| 93 | |
| 94 | if (d < RCU_STALL_MIGHT_MIN) |
| 95 | d = RCU_STALL_MIGHT_MIN; |
| 96 | smp_mb(); // jiffies before .gp_seq to avoid false positives. |
| 97 | if (!rcu_gp_in_progress()) |
| 98 | return false; |
| 99 | // Long delays at this point avoids false positive, but a delay |
| 100 | // of ULONG_MAX/4 jiffies voids your no-false-positive warranty. |
| 101 | smp_mb(); // .gp_seq before second .gp_start |
| 102 | // And ditto here. |
| 103 | return !time_before(j, READ_ONCE(rcu_state.gp_start) + d); |
| 104 | } |
| 105 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 106 | /* Don't do RCU CPU stall warnings during long sysrq printouts. */ |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 107 | void rcu_sysrq_start(void) |
| 108 | { |
| 109 | if (!rcu_cpu_stall_suppress) |
| 110 | rcu_cpu_stall_suppress = 2; |
| 111 | } |
| 112 | |
| 113 | void rcu_sysrq_end(void) |
| 114 | { |
| 115 | if (rcu_cpu_stall_suppress == 2) |
| 116 | rcu_cpu_stall_suppress = 0; |
| 117 | } |
| 118 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 119 | /* Don't print RCU CPU stall warnings during a kernel panic. */ |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 120 | static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) |
| 121 | { |
| 122 | rcu_cpu_stall_suppress = 1; |
| 123 | return NOTIFY_DONE; |
| 124 | } |
| 125 | |
| 126 | static struct notifier_block rcu_panic_block = { |
| 127 | .notifier_call = rcu_panic, |
| 128 | }; |
| 129 | |
| 130 | static int __init check_cpu_stall_init(void) |
| 131 | { |
| 132 | atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); |
| 133 | return 0; |
| 134 | } |
| 135 | early_initcall(check_cpu_stall_init); |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 136 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 137 | /* If so specified via sysctl, panic, yielding cleaner stall-warning output. */ |
| 138 | static void panic_on_rcu_stall(void) |
| 139 | { |
chao | dfe5640 | 2020-08-30 23:41:17 -0700 | [diff] [blame] | 140 | static int cpu_stall; |
| 141 | |
| 142 | if (++cpu_stall < sysctl_max_rcu_stall_to_panic) |
| 143 | return; |
| 144 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 145 | if (sysctl_panic_on_rcu_stall) |
| 146 | panic("RCU Stall\n"); |
| 147 | } |
| 148 | |
| 149 | /** |
Sergey Senozhatsky | a80be42 | 2021-05-22 00:56:24 +0900 | [diff] [blame] | 150 | * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 151 | * |
| 152 | * The caller must disable hard irqs. |
| 153 | */ |
| 154 | void rcu_cpu_stall_reset(void) |
| 155 | { |
Sergey Senozhatsky | a80be42 | 2021-05-22 00:56:24 +0900 | [diff] [blame] | 156 | WRITE_ONCE(rcu_state.jiffies_stall, |
| 157 | jiffies + rcu_jiffies_till_stall_check()); |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 158 | } |
| 159 | |
| 160 | ////////////////////////////////////////////////////////////////////////////// |
| 161 | // |
| 162 | // Interaction with RCU grace periods |
| 163 | |
| 164 | /* Start of new grace period, so record stall time (and forcing times). */ |
| 165 | static void record_gp_stall_check_time(void) |
| 166 | { |
| 167 | unsigned long j = jiffies; |
| 168 | unsigned long j1; |
| 169 | |
Paul E. McKenney | 59881bc | 2020-01-20 15:29:04 -0800 | [diff] [blame] | 170 | WRITE_ONCE(rcu_state.gp_start, j); |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 171 | j1 = rcu_jiffies_till_stall_check(); |
Paul E. McKenney | 6be7436 | 2020-04-10 13:47:41 -0700 | [diff] [blame] | 172 | smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq. |
| 173 | WRITE_ONCE(rcu_state.jiffies_stall, j + j1); |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 174 | rcu_state.jiffies_resched = j + j1 / 2; |
| 175 | rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs); |
| 176 | } |
| 177 | |
| 178 | /* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */ |
| 179 | static void zero_cpu_stall_ticks(struct rcu_data *rdp) |
| 180 | { |
| 181 | rdp->ticks_this_gp = 0; |
| 182 | rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id()); |
| 183 | WRITE_ONCE(rdp->last_fqs_resched, jiffies); |
| 184 | } |
| 185 | |
| 186 | /* |
| 187 | * If too much time has passed in the current grace period, and if |
| 188 | * so configured, go kick the relevant kthreads. |
| 189 | */ |
| 190 | static void rcu_stall_kick_kthreads(void) |
| 191 | { |
| 192 | unsigned long j; |
| 193 | |
Paul E. McKenney | fe63b72 | 2020-06-23 18:04:45 -0700 | [diff] [blame] | 194 | if (!READ_ONCE(rcu_kick_kthreads)) |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 195 | return; |
| 196 | j = READ_ONCE(rcu_state.jiffies_kick_kthreads); |
| 197 | if (time_after(jiffies, j) && rcu_state.gp_kthread && |
| 198 | (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) { |
| 199 | WARN_ONCE(1, "Kicking %s grace-period kthread\n", |
| 200 | rcu_state.name); |
| 201 | rcu_ftrace_dump(DUMP_ALL); |
| 202 | wake_up_process(rcu_state.gp_kthread); |
| 203 | WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ); |
| 204 | } |
| 205 | } |
| 206 | |
Paul E. McKenney | 7ac1907 | 2019-01-14 10:19:20 -0800 | [diff] [blame] | 207 | /* |
| 208 | * Handler for the irq_work request posted about halfway into the RCU CPU |
| 209 | * stall timeout, and used to detect excessive irq disabling. Set state |
| 210 | * appropriately, but just complain if there is unexpected state on entry. |
| 211 | */ |
| 212 | static void rcu_iw_handler(struct irq_work *iwp) |
| 213 | { |
| 214 | struct rcu_data *rdp; |
| 215 | struct rcu_node *rnp; |
| 216 | |
| 217 | rdp = container_of(iwp, struct rcu_data, rcu_iw); |
| 218 | rnp = rdp->mynode; |
| 219 | raw_spin_lock_rcu_node(rnp); |
| 220 | if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) { |
| 221 | rdp->rcu_iw_gp_seq = rnp->gp_seq; |
| 222 | rdp->rcu_iw_pending = false; |
| 223 | } |
| 224 | raw_spin_unlock_rcu_node(rnp); |
| 225 | } |
| 226 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 227 | ////////////////////////////////////////////////////////////////////////////// |
| 228 | // |
| 229 | // Printing RCU CPU stall warnings |
| 230 | |
Lai Jiangshan | c130d2d | 2019-10-15 10:28:48 +0000 | [diff] [blame] | 231 | #ifdef CONFIG_PREEMPT_RCU |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 232 | |
| 233 | /* |
| 234 | * Dump detailed information for all tasks blocking the current RCU |
| 235 | * grace period on the specified rcu_node structure. |
| 236 | */ |
| 237 | static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) |
| 238 | { |
| 239 | unsigned long flags; |
| 240 | struct task_struct *t; |
| 241 | |
| 242 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 243 | if (!rcu_preempt_blocked_readers_cgp(rnp)) { |
| 244 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 245 | return; |
| 246 | } |
| 247 | t = list_entry(rnp->gp_tasks->prev, |
| 248 | struct task_struct, rcu_node_entry); |
| 249 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { |
| 250 | /* |
| 251 | * We could be printing a lot while holding a spinlock. |
| 252 | * Avoid triggering hard lockup. |
| 253 | */ |
| 254 | touch_nmi_watchdog(); |
| 255 | sched_show_task(t); |
| 256 | } |
| 257 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 258 | } |
| 259 | |
Paul E. McKenney | 5bef8da6 | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 260 | // Communicate task state back to the RCU CPU stall warning request. |
| 261 | struct rcu_stall_chk_rdr { |
| 262 | int nesting; |
| 263 | union rcu_special rs; |
| 264 | bool on_blkd_list; |
| 265 | }; |
| 266 | |
| 267 | /* |
| 268 | * Report out the state of a not-running task that is stalling the |
| 269 | * current RCU grace period. |
| 270 | */ |
Peter Zijlstra | 9b3c4ab | 2021-09-21 21:54:32 +0200 | [diff] [blame] | 271 | static int check_slow_task(struct task_struct *t, void *arg) |
Paul E. McKenney | 5bef8da6 | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 272 | { |
Paul E. McKenney | 5bef8da6 | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 273 | struct rcu_stall_chk_rdr *rscrp = arg; |
| 274 | |
| 275 | if (task_curr(t)) |
Peter Zijlstra | 9b3c4ab | 2021-09-21 21:54:32 +0200 | [diff] [blame] | 276 | return -EBUSY; // It is running, so decline to inspect it. |
Paul E. McKenney | 5bef8da6 | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 277 | rscrp->nesting = t->rcu_read_lock_nesting; |
| 278 | rscrp->rs = t->rcu_read_unlock_special; |
Paul E. McKenney | 5bef8da6 | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 279 | rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry); |
Peter Zijlstra | 9b3c4ab | 2021-09-21 21:54:32 +0200 | [diff] [blame] | 280 | return 0; |
Paul E. McKenney | 5bef8da6 | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 281 | } |
| 282 | |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 283 | /* |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 284 | * Scan the current list of tasks blocked within RCU read-side critical |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 285 | * sections, printing out the tid of each of the first few of them. |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 286 | */ |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 287 | static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags) |
| 288 | __releases(rnp->lock) |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 289 | { |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 290 | int i = 0; |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 291 | int ndetected = 0; |
Paul E. McKenney | 5bef8da6 | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 292 | struct rcu_stall_chk_rdr rscr; |
| 293 | struct task_struct *t; |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 294 | struct task_struct *ts[8]; |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 295 | |
Paul E. McKenney | a649d25 | 2020-11-19 10:13:06 -0800 | [diff] [blame] | 296 | lockdep_assert_irqs_disabled(); |
Yanfei Xu | dc87740 | 2021-05-16 17:50:10 +0800 | [diff] [blame] | 297 | if (!rcu_preempt_blocked_readers_cgp(rnp)) { |
| 298 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 299 | return 0; |
Yanfei Xu | dc87740 | 2021-05-16 17:50:10 +0800 | [diff] [blame] | 300 | } |
Paul E. McKenney | 21d0d79 | 2019-01-11 20:36:45 -0800 | [diff] [blame] | 301 | pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):", |
| 302 | rnp->level, rnp->grplo, rnp->grphi); |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 303 | t = list_entry(rnp->gp_tasks->prev, |
| 304 | struct task_struct, rcu_node_entry); |
| 305 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 306 | get_task_struct(t); |
| 307 | ts[i++] = t; |
| 308 | if (i >= ARRAY_SIZE(ts)) |
| 309 | break; |
| 310 | } |
| 311 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
Yanfei Xu | e6a901a4 | 2021-05-16 00:45:11 +0800 | [diff] [blame] | 312 | while (i) { |
| 313 | t = ts[--i]; |
Peter Zijlstra | 9b3c4ab | 2021-09-21 21:54:32 +0200 | [diff] [blame] | 314 | if (task_call_func(t, check_slow_task, &rscr)) |
Paul E. McKenney | 5bef8da6 | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 315 | pr_cont(" P%d", t->pid); |
| 316 | else |
| 317 | pr_cont(" P%d/%d:%c%c%c%c", |
| 318 | t->pid, rscr.nesting, |
| 319 | ".b"[rscr.rs.b.blocked], |
| 320 | ".q"[rscr.rs.b.need_qs], |
| 321 | ".e"[rscr.rs.b.exp_hint], |
| 322 | ".l"[rscr.on_blkd_list]); |
Paul E. McKenney | a649d25 | 2020-11-19 10:13:06 -0800 | [diff] [blame] | 323 | lockdep_assert_irqs_disabled(); |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 324 | put_task_struct(t); |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 325 | ndetected++; |
| 326 | } |
Paul E. McKenney | 21d0d79 | 2019-01-11 20:36:45 -0800 | [diff] [blame] | 327 | pr_cont("\n"); |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 328 | return ndetected; |
| 329 | } |
| 330 | |
Lai Jiangshan | c130d2d | 2019-10-15 10:28:48 +0000 | [diff] [blame] | 331 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 332 | |
| 333 | /* |
| 334 | * Because preemptible RCU does not exist, we never have to check for |
| 335 | * tasks blocked within RCU read-side critical sections. |
| 336 | */ |
Paul E. McKenney | 21d0d79 | 2019-01-11 20:36:45 -0800 | [diff] [blame] | 337 | static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 338 | { |
| 339 | } |
| 340 | |
| 341 | /* |
| 342 | * Because preemptible RCU does not exist, we never have to check for |
| 343 | * tasks blocked within RCU read-side critical sections. |
| 344 | */ |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 345 | static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags) |
Jules Irenge | c70360c | 2021-04-29 00:12:19 +0100 | [diff] [blame] | 346 | __releases(rnp->lock) |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 347 | { |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 348 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 349 | return 0; |
| 350 | } |
Lai Jiangshan | c130d2d | 2019-10-15 10:28:48 +0000 | [diff] [blame] | 351 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 352 | |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 353 | /* |
| 354 | * Dump stacks of all tasks running on stalled CPUs. First try using |
| 355 | * NMIs, but fall back to manual remote stack tracing on architectures |
| 356 | * that don't support NMI-based stack dumps. The NMI-triggered stack |
| 357 | * traces are more accurate because they are printed by the target CPU. |
| 358 | */ |
| 359 | static void rcu_dump_cpu_stacks(void) |
| 360 | { |
| 361 | int cpu; |
| 362 | unsigned long flags; |
| 363 | struct rcu_node *rnp; |
| 364 | |
| 365 | rcu_for_each_leaf_node(rnp) { |
| 366 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 367 | for_each_leaf_node_possible_cpu(rnp, cpu) |
Paul E. McKenney | 725969a | 2020-11-12 12:19:47 -0800 | [diff] [blame] | 368 | if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { |
| 369 | if (cpu_is_offline(cpu)) |
| 370 | pr_err("Offline CPU %d blocking current GP.\n", cpu); |
Zhen Lei | e73dfe3 | 2022-08-04 10:34:19 +0800 | [diff] [blame] | 371 | else |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 372 | dump_cpu_task(cpu); |
Paul E. McKenney | 725969a | 2020-11-12 12:19:47 -0800 | [diff] [blame] | 373 | } |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 374 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 375 | } |
| 376 | } |
| 377 | |
Lai Jiangshan | e2167b3 | 2019-10-15 10:28:47 +0000 | [diff] [blame] | 378 | static const char * const gp_state_names[] = { |
| 379 | [RCU_GP_IDLE] = "RCU_GP_IDLE", |
| 380 | [RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS", |
| 381 | [RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS", |
| 382 | [RCU_GP_ONOFF] = "RCU_GP_ONOFF", |
| 383 | [RCU_GP_INIT] = "RCU_GP_INIT", |
| 384 | [RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS", |
| 385 | [RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS", |
| 386 | [RCU_GP_CLEANUP] = "RCU_GP_CLEANUP", |
| 387 | [RCU_GP_CLEANED] = "RCU_GP_CLEANED", |
| 388 | }; |
| 389 | |
| 390 | /* |
| 391 | * Convert a ->gp_state value to a character string. |
| 392 | */ |
| 393 | static const char *gp_state_getname(short gs) |
| 394 | { |
| 395 | if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names)) |
| 396 | return "???"; |
| 397 | return gp_state_names[gs]; |
| 398 | } |
| 399 | |
Paul E. McKenney | 8837582 | 2020-03-31 19:00:52 -0700 | [diff] [blame] | 400 | /* Is the RCU grace-period kthread being starved of CPU time? */ |
| 401 | static bool rcu_is_gp_kthread_starving(unsigned long *jp) |
| 402 | { |
| 403 | unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity); |
| 404 | |
| 405 | if (jp) |
| 406 | *jp = j; |
| 407 | return j > 2 * HZ; |
| 408 | } |
| 409 | |
Zqiang | c951587 | 2022-01-25 10:47:44 +0800 | [diff] [blame] | 410 | static bool rcu_is_rcuc_kthread_starving(struct rcu_data *rdp, unsigned long *jp) |
| 411 | { |
Zqiang | 245a629 | 2022-04-25 09:04:04 +0800 | [diff] [blame] | 412 | int cpu; |
| 413 | struct task_struct *rcuc; |
| 414 | unsigned long j; |
| 415 | |
| 416 | rcuc = rdp->rcu_cpu_kthread_task; |
| 417 | if (!rcuc) |
| 418 | return false; |
| 419 | |
| 420 | cpu = task_cpu(rcuc); |
| 421 | if (cpu_is_offline(cpu) || idle_cpu(cpu)) |
| 422 | return false; |
| 423 | |
| 424 | j = jiffies - READ_ONCE(rdp->rcuc_activity); |
Zqiang | c951587 | 2022-01-25 10:47:44 +0800 | [diff] [blame] | 425 | |
| 426 | if (jp) |
| 427 | *jp = j; |
| 428 | return j > 2 * HZ; |
| 429 | } |
| 430 | |
Zhen Lei | be42f00 | 2022-11-19 17:25:06 +0800 | [diff] [blame] | 431 | static void print_cpu_stat_info(int cpu) |
| 432 | { |
| 433 | struct rcu_snap_record rsr, *rsrp; |
| 434 | struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); |
| 435 | struct kernel_cpustat *kcsp = &kcpustat_cpu(cpu); |
| 436 | |
| 437 | if (!rcu_cpu_stall_cputime) |
| 438 | return; |
| 439 | |
| 440 | rsrp = &rdp->snap_record; |
| 441 | if (rsrp->gp_seq != rdp->gp_seq) |
| 442 | return; |
| 443 | |
| 444 | rsr.cputime_irq = kcpustat_field(kcsp, CPUTIME_IRQ, cpu); |
| 445 | rsr.cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu); |
| 446 | rsr.cputime_system = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu); |
| 447 | |
| 448 | pr_err("\t hardirqs softirqs csw/system\n"); |
| 449 | pr_err("\t number: %8ld %10d %12lld\n", |
| 450 | kstat_cpu_irqs_sum(cpu) - rsrp->nr_hardirqs, |
| 451 | kstat_cpu_softirqs_sum(cpu) - rsrp->nr_softirqs, |
| 452 | nr_context_switches_cpu(cpu) - rsrp->nr_csw); |
| 453 | pr_err("\tcputime: %8lld %10lld %12lld ==> %d(ms)\n", |
| 454 | div_u64(rsr.cputime_irq - rsrp->cputime_irq, NSEC_PER_MSEC), |
| 455 | div_u64(rsr.cputime_softirq - rsrp->cputime_softirq, NSEC_PER_MSEC), |
| 456 | div_u64(rsr.cputime_system - rsrp->cputime_system, NSEC_PER_MSEC), |
| 457 | jiffies_to_msecs(jiffies - rsrp->jiffies)); |
| 458 | } |
| 459 | |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 460 | /* |
| 461 | * Print out diagnostic information for the specified stalled CPU. |
| 462 | * |
| 463 | * If the specified CPU is aware of the current RCU grace period, then |
| 464 | * print the number of scheduling clock interrupts the CPU has taken |
| 465 | * during the time that it has been aware. Otherwise, print the number |
| 466 | * of RCU grace periods that this CPU is ignorant of, for example, "1" |
| 467 | * if the CPU was aware of the previous grace period. |
| 468 | * |
Paul E. McKenney | e2c73a6 | 2021-09-27 14:18:51 -0700 | [diff] [blame] | 469 | * Also print out idle info. |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 470 | */ |
| 471 | static void print_cpu_stall_info(int cpu) |
| 472 | { |
| 473 | unsigned long delta; |
Paul E. McKenney | 8837582 | 2020-03-31 19:00:52 -0700 | [diff] [blame] | 474 | bool falsepositive; |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 475 | struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); |
| 476 | char *ticks_title; |
| 477 | unsigned long ticks_value; |
Zqiang | 245a629 | 2022-04-25 09:04:04 +0800 | [diff] [blame] | 478 | bool rcuc_starved; |
| 479 | unsigned long j; |
| 480 | char buf[32]; |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 481 | |
| 482 | /* |
| 483 | * We could be printing a lot while holding a spinlock. Avoid |
| 484 | * triggering hard lockup. |
| 485 | */ |
| 486 | touch_nmi_watchdog(); |
| 487 | |
| 488 | ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq); |
| 489 | if (ticks_value) { |
| 490 | ticks_title = "GPs behind"; |
| 491 | } else { |
| 492 | ticks_title = "ticks this GP"; |
| 493 | ticks_value = rdp->ticks_this_gp; |
| 494 | } |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 495 | delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq); |
Paul E. McKenney | 8837582 | 2020-03-31 19:00:52 -0700 | [diff] [blame] | 496 | falsepositive = rcu_is_gp_kthread_starving(NULL) && |
Frederic Weisbecker | 62e2412 | 2022-06-08 16:40:29 +0200 | [diff] [blame] | 497 | rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu)); |
Zqiang | 245a629 | 2022-04-25 09:04:04 +0800 | [diff] [blame] | 498 | rcuc_starved = rcu_is_rcuc_kthread_starving(rdp, &j); |
| 499 | if (rcuc_starved) |
| 500 | sprintf(buf, " rcuc=%ld jiffies(starved)", j); |
Frederic Weisbecker | 1714767 | 2022-06-08 16:40:35 +0200 | [diff] [blame] | 501 | pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%04x/%ld/%#lx softirq=%u/%u fqs=%ld%s%s\n", |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 502 | cpu, |
| 503 | "O."[!!cpu_online(cpu)], |
| 504 | "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)], |
| 505 | "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)], |
| 506 | !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' : |
| 507 | rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' : |
| 508 | "!."[!delta], |
| 509 | ticks_value, ticks_title, |
Frederic Weisbecker | 1714767 | 2022-06-08 16:40:35 +0200 | [diff] [blame] | 510 | rcu_dynticks_snap(cpu) & 0xffff, |
Frederic Weisbecker | 95e04f4 | 2022-06-08 16:40:31 +0200 | [diff] [blame] | 511 | ct_dynticks_nesting_cpu(cpu), ct_dynticks_nmi_nesting_cpu(cpu), |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 512 | rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), |
Paul E. McKenney | 8837582 | 2020-03-31 19:00:52 -0700 | [diff] [blame] | 513 | data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart, |
Zqiang | 245a629 | 2022-04-25 09:04:04 +0800 | [diff] [blame] | 514 | rcuc_starved ? buf : "", |
Paul E. McKenney | 8837582 | 2020-03-31 19:00:52 -0700 | [diff] [blame] | 515 | falsepositive ? " (false positive?)" : ""); |
Zhen Lei | be42f00 | 2022-11-19 17:25:06 +0800 | [diff] [blame] | 516 | |
| 517 | print_cpu_stat_info(cpu); |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 518 | } |
| 519 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 520 | /* Complain about starvation of grace-period kthread. */ |
| 521 | static void rcu_check_gp_kthread_starvation(void) |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 522 | { |
Paul E. McKenney | 243027a | 2020-11-11 16:08:01 -0800 | [diff] [blame] | 523 | int cpu; |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 524 | struct task_struct *gpk = rcu_state.gp_kthread; |
| 525 | unsigned long j; |
| 526 | |
Paul E. McKenney | 8837582 | 2020-03-31 19:00:52 -0700 | [diff] [blame] | 527 | if (rcu_is_gp_kthread_starving(&j)) { |
Paul E. McKenney | 243027a | 2020-11-11 16:08:01 -0800 | [diff] [blame] | 528 | cpu = gpk ? task_cpu(gpk) : -1; |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 529 | pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n", |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 530 | rcu_state.name, j, |
| 531 | (long)rcu_seq_current(&rcu_state.gp_seq), |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 532 | data_race(READ_ONCE(rcu_state.gp_flags)), |
| 533 | gp_state_getname(rcu_state.gp_state), |
| 534 | data_race(READ_ONCE(rcu_state.gp_state)), |
| 535 | gpk ? data_race(READ_ONCE(gpk->__state)) : ~0, cpu); |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 536 | if (gpk) { |
Paul E. McKenney | 8837582 | 2020-03-31 19:00:52 -0700 | [diff] [blame] | 537 | pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name); |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 538 | pr_err("RCU grace-period kthread stack dump:\n"); |
| 539 | sched_show_task(gpk); |
Paul E. McKenney | 243027a | 2020-11-11 16:08:01 -0800 | [diff] [blame] | 540 | if (cpu >= 0) { |
Paul E. McKenney | 725969a | 2020-11-12 12:19:47 -0800 | [diff] [blame] | 541 | if (cpu_is_offline(cpu)) { |
| 542 | pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu); |
| 543 | } else { |
| 544 | pr_err("Stack dump where RCU GP kthread last ran:\n"); |
Zhen Lei | e73dfe3 | 2022-08-04 10:34:19 +0800 | [diff] [blame] | 545 | dump_cpu_task(cpu); |
Paul E. McKenney | 725969a | 2020-11-12 12:19:47 -0800 | [diff] [blame] | 546 | } |
Paul E. McKenney | 243027a | 2020-11-11 16:08:01 -0800 | [diff] [blame] | 547 | } |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 548 | wake_up_process(gpk); |
| 549 | } |
| 550 | } |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 551 | } |
| 552 | |
Neeraj Upadhyay | 683954e | 2020-11-16 21:36:00 +0530 | [diff] [blame] | 553 | /* Complain about missing wakeups from expired fqs wait timer */ |
| 554 | static void rcu_check_gp_kthread_expired_fqs_timer(void) |
| 555 | { |
| 556 | struct task_struct *gpk = rcu_state.gp_kthread; |
| 557 | short gp_state; |
| 558 | unsigned long jiffies_fqs; |
| 559 | int cpu; |
| 560 | |
| 561 | /* |
| 562 | * Order reads of .gp_state and .jiffies_force_qs. |
| 563 | * Matching smp_wmb() is present in rcu_gp_fqs_loop(). |
| 564 | */ |
| 565 | gp_state = smp_load_acquire(&rcu_state.gp_state); |
| 566 | jiffies_fqs = READ_ONCE(rcu_state.jiffies_force_qs); |
| 567 | |
| 568 | if (gp_state == RCU_GP_WAIT_FQS && |
| 569 | time_after(jiffies, jiffies_fqs + RCU_STALL_MIGHT_MIN) && |
| 570 | gpk && !READ_ONCE(gpk->on_rq)) { |
| 571 | cpu = task_cpu(gpk); |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 572 | pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x\n", |
Neeraj Upadhyay | 683954e | 2020-11-16 21:36:00 +0530 | [diff] [blame] | 573 | rcu_state.name, (jiffies - jiffies_fqs), |
| 574 | (long)rcu_seq_current(&rcu_state.gp_seq), |
| 575 | data_race(rcu_state.gp_flags), |
| 576 | gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS, |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 577 | data_race(READ_ONCE(gpk->__state))); |
Neeraj Upadhyay | 683954e | 2020-11-16 21:36:00 +0530 | [diff] [blame] | 578 | pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n", |
| 579 | cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu)); |
| 580 | } |
| 581 | } |
| 582 | |
Zhaolong Zhang | fcbcc0e | 2020-03-05 14:56:11 -0800 | [diff] [blame] | 583 | static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps) |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 584 | { |
| 585 | int cpu; |
| 586 | unsigned long flags; |
| 587 | unsigned long gpa; |
| 588 | unsigned long j; |
| 589 | int ndetected = 0; |
Paul E. McKenney | 21d0d79 | 2019-01-11 20:36:45 -0800 | [diff] [blame] | 590 | struct rcu_node *rnp; |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 591 | long totqlen = 0; |
| 592 | |
Paul E. McKenney | a649d25 | 2020-11-19 10:13:06 -0800 | [diff] [blame] | 593 | lockdep_assert_irqs_disabled(); |
| 594 | |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 595 | /* Kick and suppress, if so configured. */ |
| 596 | rcu_stall_kick_kthreads(); |
Paul E. McKenney | 58c5336 | 2019-12-05 11:29:01 -0800 | [diff] [blame] | 597 | if (rcu_stall_is_suppressed()) |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 598 | return; |
| 599 | |
| 600 | /* |
| 601 | * OK, time to rat on our buddy... |
Mauro Carvalho Chehab | f2286ab | 2020-04-21 19:04:10 +0200 | [diff] [blame] | 602 | * See Documentation/RCU/stallwarn.rst for info on how to debug |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 603 | * RCU CPU stall warnings. |
| 604 | */ |
Sangmoon Kim | 565cfb9 | 2021-03-02 20:55:15 +0900 | [diff] [blame] | 605 | trace_rcu_stall_warning(rcu_state.name, TPS("StallDetected")); |
Paul E. McKenney | 40e69ac | 2019-01-11 20:58:58 -0800 | [diff] [blame] | 606 | pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 607 | rcu_for_each_leaf_node(rnp) { |
| 608 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 609 | if (rnp->qsmask != 0) { |
| 610 | for_each_leaf_node_possible_cpu(rnp, cpu) |
| 611 | if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { |
| 612 | print_cpu_stall_info(cpu); |
| 613 | ndetected++; |
| 614 | } |
| 615 | } |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 616 | ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock. |
Paul E. McKenney | a649d25 | 2020-11-19 10:13:06 -0800 | [diff] [blame] | 617 | lockdep_assert_irqs_disabled(); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 618 | } |
| 619 | |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 620 | for_each_possible_cpu(cpu) |
| 621 | totqlen += rcu_get_n_cbs_cpu(cpu); |
Zhen Lei | 3ab955d | 2022-11-19 17:25:08 +0800 | [diff] [blame] | 622 | pr_err("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu ncpus=%d)\n", |
Zhaolong Zhang | fcbcc0e | 2020-03-05 14:56:11 -0800 | [diff] [blame] | 623 | smp_processor_id(), (long)(jiffies - gps), |
Paul E. McKenney | 80d530b | 2022-03-11 10:10:35 -0800 | [diff] [blame] | 624 | (long)rcu_seq_current(&rcu_state.gp_seq), totqlen, rcu_state.n_online_cpus); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 625 | if (ndetected) { |
| 626 | rcu_dump_cpu_stacks(); |
| 627 | |
| 628 | /* Complain about tasks blocking the grace period. */ |
Paul E. McKenney | 21d0d79 | 2019-01-11 20:36:45 -0800 | [diff] [blame] | 629 | rcu_for_each_leaf_node(rnp) |
| 630 | rcu_print_detail_task_stall_rnp(rnp); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 631 | } else { |
| 632 | if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) { |
| 633 | pr_err("INFO: Stall ended before state dump start\n"); |
| 634 | } else { |
| 635 | j = jiffies; |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 636 | gpa = data_race(READ_ONCE(rcu_state.gp_activity)); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 637 | pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n", |
| 638 | rcu_state.name, j - gpa, j, gpa, |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 639 | data_race(READ_ONCE(jiffies_till_next_fqs)), |
| 640 | data_race(READ_ONCE(rcu_get_root()->qsmask))); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 641 | } |
| 642 | } |
| 643 | /* Rewrite if needed in case of slow consoles. */ |
| 644 | if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) |
| 645 | WRITE_ONCE(rcu_state.jiffies_stall, |
| 646 | jiffies + 3 * rcu_jiffies_till_stall_check() + 3); |
| 647 | |
Neeraj Upadhyay | 683954e | 2020-11-16 21:36:00 +0530 | [diff] [blame] | 648 | rcu_check_gp_kthread_expired_fqs_timer(); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 649 | rcu_check_gp_kthread_starvation(); |
| 650 | |
| 651 | panic_on_rcu_stall(); |
| 652 | |
| 653 | rcu_force_quiescent_state(); /* Kick them all. */ |
| 654 | } |
| 655 | |
Zhaolong Zhang | fcbcc0e | 2020-03-05 14:56:11 -0800 | [diff] [blame] | 656 | static void print_cpu_stall(unsigned long gps) |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 657 | { |
| 658 | int cpu; |
| 659 | unsigned long flags; |
| 660 | struct rcu_data *rdp = this_cpu_ptr(&rcu_data); |
| 661 | struct rcu_node *rnp = rcu_get_root(); |
| 662 | long totqlen = 0; |
| 663 | |
Paul E. McKenney | a649d25 | 2020-11-19 10:13:06 -0800 | [diff] [blame] | 664 | lockdep_assert_irqs_disabled(); |
| 665 | |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 666 | /* Kick and suppress, if so configured. */ |
| 667 | rcu_stall_kick_kthreads(); |
Paul E. McKenney | 58c5336 | 2019-12-05 11:29:01 -0800 | [diff] [blame] | 668 | if (rcu_stall_is_suppressed()) |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 669 | return; |
| 670 | |
| 671 | /* |
| 672 | * OK, time to rat on ourselves... |
Mauro Carvalho Chehab | f2286ab | 2020-04-21 19:04:10 +0200 | [diff] [blame] | 673 | * See Documentation/RCU/stallwarn.rst for info on how to debug |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 674 | * RCU CPU stall warnings. |
| 675 | */ |
Sangmoon Kim | 565cfb9 | 2021-03-02 20:55:15 +0900 | [diff] [blame] | 676 | trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected")); |
Paul E. McKenney | 40e69ac | 2019-01-11 20:58:58 -0800 | [diff] [blame] | 677 | pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 678 | raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); |
| 679 | print_cpu_stall_info(smp_processor_id()); |
| 680 | raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 681 | for_each_possible_cpu(cpu) |
| 682 | totqlen += rcu_get_n_cbs_cpu(cpu); |
Zhen Lei | 3ab955d | 2022-11-19 17:25:08 +0800 | [diff] [blame] | 683 | pr_err("\t(t=%lu jiffies g=%ld q=%lu ncpus=%d)\n", |
Zhaolong Zhang | fcbcc0e | 2020-03-05 14:56:11 -0800 | [diff] [blame] | 684 | jiffies - gps, |
Paul E. McKenney | 80d530b | 2022-03-11 10:10:35 -0800 | [diff] [blame] | 685 | (long)rcu_seq_current(&rcu_state.gp_seq), totqlen, rcu_state.n_online_cpus); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 686 | |
Neeraj Upadhyay | 683954e | 2020-11-16 21:36:00 +0530 | [diff] [blame] | 687 | rcu_check_gp_kthread_expired_fqs_timer(); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 688 | rcu_check_gp_kthread_starvation(); |
| 689 | |
| 690 | rcu_dump_cpu_stacks(); |
| 691 | |
| 692 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 693 | /* Rewrite if needed in case of slow consoles. */ |
| 694 | if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) |
| 695 | WRITE_ONCE(rcu_state.jiffies_stall, |
| 696 | jiffies + 3 * rcu_jiffies_till_stall_check() + 3); |
| 697 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 698 | |
| 699 | panic_on_rcu_stall(); |
| 700 | |
| 701 | /* |
| 702 | * Attempt to revive the RCU machinery by forcing a context switch. |
| 703 | * |
| 704 | * A context switch would normally allow the RCU state machine to make |
| 705 | * progress and it could be we're stuck in kernel space without context |
| 706 | * switches for an entirely unreasonable amount of time. |
| 707 | */ |
| 708 | set_tsk_need_resched(current); |
| 709 | set_preempt_need_resched(); |
| 710 | } |
| 711 | |
| 712 | static void check_cpu_stall(struct rcu_data *rdp) |
| 713 | { |
Paul E. McKenney | b169246 | 2021-05-21 14:23:03 -0700 | [diff] [blame] | 714 | bool didstall = false; |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 715 | unsigned long gs1; |
| 716 | unsigned long gs2; |
| 717 | unsigned long gps; |
| 718 | unsigned long j; |
| 719 | unsigned long jn; |
| 720 | unsigned long js; |
| 721 | struct rcu_node *rnp; |
| 722 | |
Paul E. McKenney | a649d25 | 2020-11-19 10:13:06 -0800 | [diff] [blame] | 723 | lockdep_assert_irqs_disabled(); |
Paul E. McKenney | fe63b72 | 2020-06-23 18:04:45 -0700 | [diff] [blame] | 724 | if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) || |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 725 | !rcu_gp_in_progress()) |
| 726 | return; |
| 727 | rcu_stall_kick_kthreads(); |
| 728 | j = jiffies; |
| 729 | |
| 730 | /* |
| 731 | * Lots of memory barriers to reject false positives. |
| 732 | * |
| 733 | * The idea is to pick up rcu_state.gp_seq, then |
| 734 | * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally |
| 735 | * another copy of rcu_state.gp_seq. These values are updated in |
| 736 | * the opposite order with memory barriers (or equivalent) during |
| 737 | * grace-period initialization and cleanup. Now, a false positive |
| 738 | * can occur if we get an new value of rcu_state.gp_start and a old |
| 739 | * value of rcu_state.jiffies_stall. But given the memory barriers, |
| 740 | * the only way that this can happen is if one grace period ends |
| 741 | * and another starts between these two fetches. This is detected |
| 742 | * by comparing the second fetch of rcu_state.gp_seq with the |
| 743 | * previous fetch from rcu_state.gp_seq. |
| 744 | * |
| 745 | * Given this check, comparisons of jiffies, rcu_state.jiffies_stall, |
| 746 | * and rcu_state.gp_start suffice to forestall false positives. |
| 747 | */ |
| 748 | gs1 = READ_ONCE(rcu_state.gp_seq); |
| 749 | smp_rmb(); /* Pick up ->gp_seq first... */ |
| 750 | js = READ_ONCE(rcu_state.jiffies_stall); |
| 751 | smp_rmb(); /* ...then ->jiffies_stall before the rest... */ |
| 752 | gps = READ_ONCE(rcu_state.gp_start); |
| 753 | smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */ |
| 754 | gs2 = READ_ONCE(rcu_state.gp_seq); |
| 755 | if (gs1 != gs2 || |
| 756 | ULONG_CMP_LT(j, js) || |
| 757 | ULONG_CMP_GE(gps, js)) |
| 758 | return; /* No stall or GP completed since entering function. */ |
| 759 | rnp = rdp->mynode; |
Paul E. McKenney | b169246 | 2021-05-21 14:23:03 -0700 | [diff] [blame] | 760 | jn = jiffies + ULONG_MAX / 2; |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 761 | if (rcu_gp_in_progress() && |
| 762 | (READ_ONCE(rnp->qsmask) & rdp->grpmask) && |
| 763 | cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { |
| 764 | |
Sergey Senozhatsky | ccfc9dd | 2021-05-22 00:56:23 +0900 | [diff] [blame] | 765 | /* |
| 766 | * If a virtual machine is stopped by the host it can look to |
| 767 | * the watchdog like an RCU stall. Check to see if the host |
| 768 | * stopped the vm. |
| 769 | */ |
| 770 | if (kvm_check_and_clear_guest_paused()) |
| 771 | return; |
| 772 | |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 773 | /* We haven't checked in, so go dump stack. */ |
Zhaolong Zhang | fcbcc0e | 2020-03-05 14:56:11 -0800 | [diff] [blame] | 774 | print_cpu_stall(gps); |
Paul E. McKenney | 1ef5a44 | 2020-06-23 20:57:59 -0700 | [diff] [blame] | 775 | if (READ_ONCE(rcu_cpu_stall_ftrace_dump)) |
Paul E. McKenney | cdc694b | 2019-06-13 15:30:49 -0700 | [diff] [blame] | 776 | rcu_ftrace_dump(DUMP_ALL); |
Paul E. McKenney | b169246 | 2021-05-21 14:23:03 -0700 | [diff] [blame] | 777 | didstall = true; |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 778 | |
| 779 | } else if (rcu_gp_in_progress() && |
| 780 | ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) && |
| 781 | cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { |
| 782 | |
Sergey Senozhatsky | ccfc9dd | 2021-05-22 00:56:23 +0900 | [diff] [blame] | 783 | /* |
| 784 | * If a virtual machine is stopped by the host it can look to |
| 785 | * the watchdog like an RCU stall. Check to see if the host |
| 786 | * stopped the vm. |
| 787 | */ |
| 788 | if (kvm_check_and_clear_guest_paused()) |
| 789 | return; |
| 790 | |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 791 | /* They had a few time units to dump stack, so complain. */ |
Zhaolong Zhang | fcbcc0e | 2020-03-05 14:56:11 -0800 | [diff] [blame] | 792 | print_other_cpu_stall(gs2, gps); |
Paul E. McKenney | 1ef5a44 | 2020-06-23 20:57:59 -0700 | [diff] [blame] | 793 | if (READ_ONCE(rcu_cpu_stall_ftrace_dump)) |
Paul E. McKenney | cdc694b | 2019-06-13 15:30:49 -0700 | [diff] [blame] | 794 | rcu_ftrace_dump(DUMP_ALL); |
Paul E. McKenney | b169246 | 2021-05-21 14:23:03 -0700 | [diff] [blame] | 795 | didstall = true; |
| 796 | } |
| 797 | if (didstall && READ_ONCE(rcu_state.jiffies_stall) == jn) { |
| 798 | jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3; |
| 799 | WRITE_ONCE(rcu_state.jiffies_stall, jn); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 800 | } |
| 801 | } |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 802 | |
| 803 | ////////////////////////////////////////////////////////////////////////////// |
| 804 | // |
| 805 | // RCU forward-progress mechanisms, including of callback invocation. |
| 806 | |
| 807 | |
| 808 | /* |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 809 | * Check to see if a failure to end RCU priority inversion was due to |
| 810 | * a CPU not passing through a quiescent state. When this happens, there |
| 811 | * is nothing that RCU priority boosting can do to help, so we shouldn't |
| 812 | * count this as an RCU priority boosting failure. A return of true says |
| 813 | * RCU priority boosting is to blame, and false says otherwise. If false |
| 814 | * is returned, the first of the CPUs to blame is stored through cpup. |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 815 | * If there was no CPU blocking the current grace period, but also nothing |
| 816 | * in need of being boosted, *cpup is set to -1. This can happen in case |
| 817 | * of vCPU preemption while the last CPU is reporting its quiscent state, |
| 818 | * for example. |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 819 | * |
| 820 | * If cpup is NULL, then a lockless quick check is carried out, suitable |
| 821 | * for high-rate usage. On the other hand, if cpup is non-NULL, each |
| 822 | * rcu_node structure's ->lock is acquired, ruling out high-rate usage. |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 823 | */ |
| 824 | bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) |
| 825 | { |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 826 | bool atb = false; |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 827 | int cpu; |
| 828 | unsigned long flags; |
| 829 | struct rcu_node *rnp; |
| 830 | |
| 831 | rcu_for_each_leaf_node(rnp) { |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 832 | if (!cpup) { |
Paul E. McKenney | d9ee962 | 2021-06-03 10:17:36 -0700 | [diff] [blame] | 833 | if (data_race(READ_ONCE(rnp->qsmask))) { |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 834 | return false; |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 835 | } else { |
| 836 | if (READ_ONCE(rnp->gp_tasks)) |
| 837 | atb = true; |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 838 | continue; |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 839 | } |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 840 | } |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 841 | *cpup = -1; |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 842 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 843 | if (rnp->gp_tasks) |
| 844 | atb = true; |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 845 | if (!rnp->qsmask) { |
| 846 | // No CPUs without quiescent states for this rnp. |
| 847 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 848 | continue; |
| 849 | } |
| 850 | // Find the first holdout CPU. |
| 851 | for_each_leaf_node_possible_cpu(rnp, cpu) { |
| 852 | if (rnp->qsmask & (1UL << (cpu - rnp->grplo))) { |
| 853 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 854 | *cpup = cpu; |
| 855 | return false; |
| 856 | } |
| 857 | } |
| 858 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 859 | } |
| 860 | // Can't blame CPUs, so must blame RCU priority boosting. |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 861 | return atb; |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 862 | } |
| 863 | EXPORT_SYMBOL_GPL(rcu_check_boost_fail); |
| 864 | |
| 865 | /* |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 866 | * Show the state of the grace-period kthreads. |
| 867 | */ |
| 868 | void show_rcu_gp_kthreads(void) |
| 869 | { |
Paul E. McKenney | e816d56 | 2020-05-01 16:49:48 -0700 | [diff] [blame] | 870 | unsigned long cbs = 0; |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 871 | int cpu; |
| 872 | unsigned long j; |
| 873 | unsigned long ja; |
| 874 | unsigned long jr; |
Paul E. McKenney | e44111e | 2021-04-02 21:51:50 -0700 | [diff] [blame] | 875 | unsigned long js; |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 876 | unsigned long jw; |
| 877 | struct rcu_data *rdp; |
| 878 | struct rcu_node *rnp; |
Paul E. McKenney | 5648d65 | 2020-01-21 12:30:22 -0800 | [diff] [blame] | 879 | struct task_struct *t = READ_ONCE(rcu_state.gp_kthread); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 880 | |
| 881 | j = jiffies; |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 882 | ja = j - data_race(READ_ONCE(rcu_state.gp_activity)); |
| 883 | jr = j - data_race(READ_ONCE(rcu_state.gp_req_activity)); |
| 884 | js = j - data_race(READ_ONCE(rcu_state.gp_start)); |
| 885 | jw = j - data_race(READ_ONCE(rcu_state.gp_wake_time)); |
Paul E. McKenney | 2a2ed56 | 2021-07-04 13:59:35 -0700 | [diff] [blame] | 886 | pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n", |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 887 | rcu_state.name, gp_state_getname(rcu_state.gp_state), |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 888 | data_race(READ_ONCE(rcu_state.gp_state)), |
| 889 | t ? data_race(READ_ONCE(t->__state)) : 0x1ffff, t ? t->rt_priority : 0xffU, |
| 890 | js, ja, jr, jw, (long)data_race(READ_ONCE(rcu_state.gp_wake_seq)), |
| 891 | (long)data_race(READ_ONCE(rcu_state.gp_seq)), |
| 892 | (long)data_race(READ_ONCE(rcu_get_root()->gp_seq_needed)), |
| 893 | data_race(READ_ONCE(rcu_state.gp_max)), |
| 894 | data_race(READ_ONCE(rcu_state.gp_flags))); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 895 | rcu_for_each_node_breadth_first(rnp) { |
Paul E. McKenney | b158050 | 2021-04-07 15:14:01 -0700 | [diff] [blame] | 896 | if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) && |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 897 | !data_race(READ_ONCE(rnp->qsmask)) && !data_race(READ_ONCE(rnp->boost_tasks)) && |
| 898 | !data_race(READ_ONCE(rnp->exp_tasks)) && !data_race(READ_ONCE(rnp->gp_tasks))) |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 899 | continue; |
Paul E. McKenney | 396eba6 | 2021-04-06 16:31:42 -0700 | [diff] [blame] | 900 | pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n", |
| 901 | rnp->grplo, rnp->grphi, |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 902 | (long)data_race(READ_ONCE(rnp->gp_seq)), |
| 903 | (long)data_race(READ_ONCE(rnp->gp_seq_needed)), |
| 904 | data_race(READ_ONCE(rnp->qsmask)), |
| 905 | ".b"[!!data_race(READ_ONCE(rnp->boost_kthread_task))], |
| 906 | ".B"[!!data_race(READ_ONCE(rnp->boost_tasks))], |
| 907 | ".E"[!!data_race(READ_ONCE(rnp->exp_tasks))], |
| 908 | ".G"[!!data_race(READ_ONCE(rnp->gp_tasks))], |
| 909 | data_race(READ_ONCE(rnp->n_boosts))); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 910 | if (!rcu_is_leaf_node(rnp)) |
| 911 | continue; |
| 912 | for_each_leaf_node_possible_cpu(rnp, cpu) { |
| 913 | rdp = per_cpu_ptr(&rcu_data, cpu); |
Paul E. McKenney | a5b8950 | 2020-01-07 15:48:39 -0800 | [diff] [blame] | 914 | if (READ_ONCE(rdp->gpwrap) || |
Paul E. McKenney | 8ff37290 | 2020-01-04 11:33:17 -0800 | [diff] [blame] | 915 | ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), |
| 916 | READ_ONCE(rdp->gp_seq_needed))) |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 917 | continue; |
| 918 | pr_info("\tcpu %d ->gp_seq_needed %ld\n", |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 919 | cpu, (long)data_race(READ_ONCE(rdp->gp_seq_needed))); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 920 | } |
| 921 | } |
Paul E. McKenney | f7a81b1 | 2019-06-25 13:32:51 -0700 | [diff] [blame] | 922 | for_each_possible_cpu(cpu) { |
| 923 | rdp = per_cpu_ptr(&rcu_data, cpu); |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 924 | cbs += data_race(READ_ONCE(rdp->n_cbs_invoked)); |
Paul E. McKenney | f7a81b1 | 2019-06-25 13:32:51 -0700 | [diff] [blame] | 925 | if (rcu_segcblist_is_offloaded(&rdp->cblist)) |
| 926 | show_rcu_nocb_state(rdp); |
| 927 | } |
Paul E. McKenney | e816d56 | 2020-05-01 16:49:48 -0700 | [diff] [blame] | 928 | pr_info("RCU callbacks invoked since boot: %lu\n", cbs); |
Paul E. McKenney | e21408c | 2020-03-16 11:01:55 -0700 | [diff] [blame] | 929 | show_rcu_tasks_gp_kthreads(); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 930 | } |
| 931 | EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); |
| 932 | |
| 933 | /* |
| 934 | * This function checks for grace-period requests that fail to motivate |
| 935 | * RCU to come out of its idle mode. |
| 936 | */ |
| 937 | static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, |
| 938 | const unsigned long gpssdelay) |
| 939 | { |
| 940 | unsigned long flags; |
| 941 | unsigned long j; |
| 942 | struct rcu_node *rnp_root = rcu_get_root(); |
| 943 | static atomic_t warned = ATOMIC_INIT(0); |
| 944 | |
| 945 | if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() || |
Paul E. McKenney | 8ff37290 | 2020-01-04 11:33:17 -0800 | [diff] [blame] | 946 | ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), |
Paul E. McKenney | 5648d65 | 2020-01-21 12:30:22 -0800 | [diff] [blame] | 947 | READ_ONCE(rnp_root->gp_seq_needed)) || |
| 948 | !smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread. |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 949 | return; |
| 950 | j = jiffies; /* Expensive access, and in common case don't get here. */ |
| 951 | if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || |
| 952 | time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || |
| 953 | atomic_read(&warned)) |
| 954 | return; |
| 955 | |
| 956 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 957 | j = jiffies; |
| 958 | if (rcu_gp_in_progress() || |
Paul E. McKenney | 8ff37290 | 2020-01-04 11:33:17 -0800 | [diff] [blame] | 959 | ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), |
| 960 | READ_ONCE(rnp_root->gp_seq_needed)) || |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 961 | time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || |
| 962 | time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || |
| 963 | atomic_read(&warned)) { |
| 964 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 965 | return; |
| 966 | } |
| 967 | /* Hold onto the leaf lock to make others see warned==1. */ |
| 968 | |
| 969 | if (rnp_root != rnp) |
| 970 | raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */ |
| 971 | j = jiffies; |
| 972 | if (rcu_gp_in_progress() || |
Paul E. McKenney | 8ff37290 | 2020-01-04 11:33:17 -0800 | [diff] [blame] | 973 | ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), |
| 974 | READ_ONCE(rnp_root->gp_seq_needed)) || |
| 975 | time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || |
| 976 | time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 977 | atomic_xchg(&warned, 1)) { |
Neeraj Upadhyay | 3ae976a | 2019-03-29 16:57:08 +0530 | [diff] [blame] | 978 | if (rnp_root != rnp) |
| 979 | /* irqs remain disabled. */ |
| 980 | raw_spin_unlock_rcu_node(rnp_root); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 981 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 982 | return; |
| 983 | } |
| 984 | WARN_ON(1); |
| 985 | if (rnp_root != rnp) |
| 986 | raw_spin_unlock_rcu_node(rnp_root); |
| 987 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 988 | show_rcu_gp_kthreads(); |
| 989 | } |
| 990 | |
| 991 | /* |
| 992 | * Do a forward-progress check for rcutorture. This is normally invoked |
| 993 | * due to an OOM event. The argument "j" gives the time period during |
| 994 | * which rcutorture would like progress to have been made. |
| 995 | */ |
| 996 | void rcu_fwd_progress_check(unsigned long j) |
| 997 | { |
| 998 | unsigned long cbs; |
| 999 | int cpu; |
| 1000 | unsigned long max_cbs = 0; |
| 1001 | int max_cpu = -1; |
| 1002 | struct rcu_data *rdp; |
| 1003 | |
| 1004 | if (rcu_gp_in_progress()) { |
| 1005 | pr_info("%s: GP age %lu jiffies\n", |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 1006 | __func__, jiffies - data_race(READ_ONCE(rcu_state.gp_start))); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 1007 | show_rcu_gp_kthreads(); |
| 1008 | } else { |
| 1009 | pr_info("%s: Last GP end %lu jiffies ago\n", |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 1010 | __func__, jiffies - data_race(READ_ONCE(rcu_state.gp_end))); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 1011 | preempt_disable(); |
| 1012 | rdp = this_cpu_ptr(&rcu_data); |
| 1013 | rcu_check_gp_start_stall(rdp->mynode, rdp, j); |
| 1014 | preempt_enable(); |
| 1015 | } |
| 1016 | for_each_possible_cpu(cpu) { |
| 1017 | cbs = rcu_get_n_cbs_cpu(cpu); |
| 1018 | if (!cbs) |
| 1019 | continue; |
| 1020 | if (max_cpu < 0) |
| 1021 | pr_info("%s: callbacks", __func__); |
| 1022 | pr_cont(" %d: %lu", cpu, cbs); |
| 1023 | if (cbs <= max_cbs) |
| 1024 | continue; |
| 1025 | max_cbs = cbs; |
| 1026 | max_cpu = cpu; |
| 1027 | } |
| 1028 | if (max_cpu >= 0) |
| 1029 | pr_cont("\n"); |
| 1030 | } |
| 1031 | EXPORT_SYMBOL_GPL(rcu_fwd_progress_check); |
| 1032 | |
| 1033 | /* Commandeer a sysrq key to dump RCU's tree. */ |
| 1034 | static bool sysrq_rcu; |
| 1035 | module_param(sysrq_rcu, bool, 0444); |
| 1036 | |
| 1037 | /* Dump grace-period-request information due to commandeered sysrq. */ |
| 1038 | static void sysrq_show_rcu(int key) |
| 1039 | { |
| 1040 | show_rcu_gp_kthreads(); |
| 1041 | } |
| 1042 | |
Emil Velikov | 0ca650c4 | 2020-05-13 22:43:51 +0100 | [diff] [blame] | 1043 | static const struct sysrq_key_op sysrq_rcudump_op = { |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 1044 | .handler = sysrq_show_rcu, |
| 1045 | .help_msg = "show-rcu(y)", |
| 1046 | .action_msg = "Show RCU tree", |
| 1047 | .enable_mask = SYSRQ_ENABLE_DUMP, |
| 1048 | }; |
| 1049 | |
| 1050 | static int __init rcu_sysrq_init(void) |
| 1051 | { |
| 1052 | if (sysrq_rcu) |
| 1053 | return register_sysrq_key('y', &sysrq_rcudump_op); |
| 1054 | return 0; |
| 1055 | } |
| 1056 | early_initcall(rcu_sysrq_init); |