Paul E. McKenney | 2e24ce8 | 2019-01-17 10:16:42 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2 | /* |
Paul E. McKenney | 29766f1 | 2006-06-27 02:54:02 -0700 | [diff] [blame] | 3 | * Read-Copy Update module-based torture test facility |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 4 | * |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 5 | * Copyright (C) IBM Corporation, 2005, 2006 |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 6 | * |
Paul E. McKenney | 2e24ce8 | 2019-01-17 10:16:42 -0800 | [diff] [blame] | 7 | * Authors: Paul E. McKenney <paulmck@linux.ibm.com> |
Josh Triplett | e0198b29 | 2014-07-30 16:08:42 -0700 | [diff] [blame] | 8 | * Josh Triplett <josh@joshtriplett.org> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 9 | * |
Mauro Carvalho Chehab | 43cb545 | 2020-04-21 19:04:06 +0200 | [diff] [blame] | 10 | * See also: Documentation/RCU/torture.rst |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 11 | */ |
Paul E. McKenney | 6050003 | 2018-05-15 12:25:05 -0700 | [diff] [blame] | 12 | |
| 13 | #define pr_fmt(fmt) fmt |
| 14 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 15 | #include <linux/types.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/kthread.h> |
| 20 | #include <linux/err.h> |
| 21 | #include <linux/spinlock.h> |
| 22 | #include <linux/smp.h> |
Paul E. McKenney | 9cf8fc6 | 2020-03-06 14:00:46 -0800 | [diff] [blame] | 23 | #include <linux/rcupdate_wait.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 24 | #include <linux/interrupt.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 25 | #include <linux/sched/signal.h> |
Ingo Molnar | ae7e81c | 2017-02-01 18:07:51 +0100 | [diff] [blame] | 26 | #include <uapi/linux/sched/types.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 27 | #include <linux/atomic.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 28 | #include <linux/bitops.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 29 | #include <linux/completion.h> |
| 30 | #include <linux/moduleparam.h> |
| 31 | #include <linux/percpu.h> |
| 32 | #include <linux/notifier.h> |
Paul E. McKenney | 343e909 | 2008-12-15 16:13:07 -0800 | [diff] [blame] | 33 | #include <linux/reboot.h> |
Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 34 | #include <linux/freezer.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 35 | #include <linux/cpu.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 36 | #include <linux/delay.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 37 | #include <linux/stat.h> |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 38 | #include <linux/srcu.h> |
Robert P. J. Day | 1aeb272 | 2008-04-29 00:59:25 -0700 | [diff] [blame] | 39 | #include <linux/slab.h> |
Paul E. McKenney | 5249453 | 2012-11-14 16:26:40 -0800 | [diff] [blame] | 40 | #include <linux/trace_clock.h> |
Harvey Harrison | f07767f | 2008-10-20 10:23:38 -0700 | [diff] [blame] | 41 | #include <asm/byteorder.h> |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 42 | #include <linux/torture.h> |
Paul E. McKenney | 38706bc | 2014-08-18 21:12:17 -0700 | [diff] [blame] | 43 | #include <linux/vmalloc.h> |
Paul E. McKenney | 0032f4e | 2017-08-30 10:40:17 -0700 | [diff] [blame] | 44 | #include <linux/sched/debug.h> |
Joel Fernandes (Google) | 450efca | 2018-06-10 16:45:43 -0700 | [diff] [blame] | 45 | #include <linux/sched/sysctl.h> |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 46 | #include <linux/oom.h> |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 47 | #include <linux/tick.h> |
Paul E. McKenney | c1a76c0 | 2020-03-10 10:32:30 -0700 | [diff] [blame] | 48 | #include <linux/rcupdate_trace.h> |
Wander Lairson Costa | 5ff7c9f | 2021-11-10 11:37:45 -0300 | [diff] [blame] | 49 | #include <linux/nmi.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 50 | |
Paul E. McKenney | 25c3632 | 2017-05-03 09:51:55 -0700 | [diff] [blame] | 51 | #include "rcu.h" |
| 52 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 53 | MODULE_LICENSE("GPL"); |
Paul E. McKenney | 2e24ce8 | 2019-01-17 10:16:42 -0800 | [diff] [blame] | 54 | MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 55 | |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 56 | /* Bits for ->extendables field, extendables param, and related definitions. */ |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 57 | #define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */ |
| 58 | #define RCUTORTURE_RDR_MASK_1 (1 << RCUTORTURE_RDR_SHIFT_1) |
| 59 | #define RCUTORTURE_RDR_SHIFT_2 9 /* Put SRCU index in upper bits. */ |
| 60 | #define RCUTORTURE_RDR_MASK_2 (1 << RCUTORTURE_RDR_SHIFT_2) |
Paul E. McKenney | 2ceebc0 | 2018-07-06 15:16:12 -0700 | [diff] [blame] | 61 | #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ |
| 62 | #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ |
| 63 | #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ |
| 64 | #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ |
| 65 | #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 66 | #define RCUTORTURE_RDR_RCU_1 0x20 /* ... entering another RCU reader. */ |
| 67 | #define RCUTORTURE_RDR_RCU_2 0x40 /* ... entering another RCU reader. */ |
| 68 | #define RCUTORTURE_RDR_NBITS 7 /* Number of bits defined above. */ |
Paul E. McKenney | 2ceebc0 | 2018-07-06 15:16:12 -0700 | [diff] [blame] | 69 | #define RCUTORTURE_MAX_EXTEND \ |
| 70 | (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ |
| 71 | RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 72 | #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ |
| 73 | /* Must be power of two minus one. */ |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 74 | #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 75 | |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 76 | torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, |
| 77 | "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); |
Paul E. McKenney | cb506e1 | 2022-04-17 20:43:13 -0700 | [diff] [blame] | 78 | torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable"); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 79 | torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); |
| 80 | torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); |
Paul E. McKenney | cb506e1 | 2022-04-17 20:43:13 -0700 | [diff] [blame] | 81 | torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)"); |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 82 | torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); |
Paul E. McKenney | cb506e1 | 2022-04-17 20:43:13 -0700 | [diff] [blame] | 83 | torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)"); |
| 84 | torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()"); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 85 | torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); |
Paul E. McKenney | 11d62f0 | 2022-02-01 07:01:20 -0800 | [diff] [blame] | 86 | torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives"); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 87 | torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); |
Paul E. McKenney | cb506e1 | 2022-04-17 20:43:13 -0700 | [diff] [blame] | 88 | torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives"); |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 89 | torture_param(bool, gp_poll, false, "Use polling GP wait primitives"); |
Paul E. McKenney | 11d62f0 | 2022-02-01 07:01:20 -0800 | [diff] [blame] | 90 | torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives"); |
Paul E. McKenney | f0bf8fa | 2014-03-21 16:17:56 -0700 | [diff] [blame] | 91 | torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 92 | torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); |
Paul E. McKenney | d685514 | 2020-08-11 10:33:39 -0700 | [diff] [blame] | 93 | torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers"); |
Paul E. McKenney | cb506e1 | 2022-04-17 20:43:13 -0700 | [diff] [blame] | 94 | torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing"); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 95 | torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); |
| 96 | torture_param(int, nreaders, -1, "Number of RCU reader threads"); |
Paul E. McKenney | cb506e1 | 2022-04-17 20:43:13 -0700 | [diff] [blame] | 97 | torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing"); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 98 | torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); |
Paul E. McKenney | cb506e1 | 2022-04-17 20:43:13 -0700 | [diff] [blame] | 99 | torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable"); |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 100 | torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable"); |
| 101 | torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)"); |
Paul E. McKenney | cb506e1 | 2022-04-17 20:43:13 -0700 | [diff] [blame] | 102 | torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)"); |
| 103 | torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable"); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 104 | torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); |
| 105 | torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); |
| 106 | torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); |
Paul E. McKenney | cb506e1 | 2022-04-17 20:43:13 -0700 | [diff] [blame] | 107 | torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s)."); |
| 108 | torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall."); |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 109 | torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 110 | torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); |
Paul E. McKenney | cb506e1 | 2022-04-17 20:43:13 -0700 | [diff] [blame] | 111 | torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s)."); |
| 112 | torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s"); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 113 | torture_param(int, stutter, 5, "Number of seconds to run/halt test"); |
| 114 | torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); |
Paul E. McKenney | cb506e1 | 2022-04-17 20:43:13 -0700 | [diff] [blame] | 115 | torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds."); |
| 116 | torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds."); |
| 117 | torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs"); |
| 118 | torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 119 | |
Paul E. McKenney | b5daa8f | 2014-01-30 13:38:09 -0800 | [diff] [blame] | 120 | static char *torture_type = "rcu"; |
Josh Triplett | d6ad671 | 2007-03-06 01:42:13 -0800 | [diff] [blame] | 121 | module_param(torture_type, charp, 0444); |
Paul E. McKenney | c770c82 | 2018-07-07 10:28:07 -0700 | [diff] [blame] | 122 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 123 | |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 124 | static int nrealnocbers; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 125 | static int nrealreaders; |
| 126 | static struct task_struct *writer_task; |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 127 | static struct task_struct **fakewriter_tasks; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 128 | static struct task_struct **reader_tasks; |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 129 | static struct task_struct **nocb_tasks; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 130 | static struct task_struct *stats_task; |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 131 | static struct task_struct *fqs_task; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 132 | static struct task_struct *boost_tasks[NR_CPUS]; |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 133 | static struct task_struct *stall_task; |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 134 | static struct task_struct **fwd_prog_tasks; |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 135 | static struct task_struct **barrier_cbs_tasks; |
| 136 | static struct task_struct *barrier_task; |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 137 | static struct task_struct *read_exit_task; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 138 | |
| 139 | #define RCU_TORTURE_PIPE_LEN 10 |
| 140 | |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 141 | // Mailbox-like structure to check RCU global memory ordering. |
| 142 | struct rcu_torture_reader_check { |
| 143 | unsigned long rtc_myloops; |
| 144 | int rtc_chkrdr; |
| 145 | unsigned long rtc_chkloops; |
| 146 | int rtc_ready; |
| 147 | struct rcu_torture_reader_check *rtc_assigner; |
| 148 | } ____cacheline_internodealigned_in_smp; |
| 149 | |
| 150 | // Update-side data structure used to check RCU readers. |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 151 | struct rcu_torture { |
| 152 | struct rcu_head rtort_rcu; |
| 153 | int rtort_pipe_count; |
| 154 | struct list_head rtort_free; |
Paul E. McKenney | 996417d | 2005-11-18 01:10:50 -0800 | [diff] [blame] | 155 | int rtort_mbtest; |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 156 | struct rcu_torture_reader_check *rtort_chkp; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 157 | }; |
| 158 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 159 | static LIST_HEAD(rcu_torture_freelist); |
Paul E. McKenney | 0ddea0e | 2010-09-19 21:06:14 -0700 | [diff] [blame] | 160 | static struct rcu_torture __rcu *rcu_torture_current; |
Paul E. McKenney | 4a29865 | 2011-04-03 21:33:51 -0700 | [diff] [blame] | 161 | static unsigned long rcu_torture_current_version; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 162 | static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; |
| 163 | static DEFINE_SPINLOCK(rcu_torture_lock); |
Paul E. McKenney | 67522be | 2016-03-01 08:52:19 -0800 | [diff] [blame] | 164 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); |
| 165 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 166 | static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 167 | static struct rcu_torture_reader_check *rcu_torture_reader_mbchk; |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 168 | static atomic_t n_rcu_torture_alloc; |
| 169 | static atomic_t n_rcu_torture_alloc_fail; |
| 170 | static atomic_t n_rcu_torture_free; |
| 171 | static atomic_t n_rcu_torture_mberror; |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 172 | static atomic_t n_rcu_torture_mbchk_fail; |
| 173 | static atomic_t n_rcu_torture_mbchk_tries; |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 174 | static atomic_t n_rcu_torture_error; |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 175 | static long n_rcu_torture_barrier_error; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 176 | static long n_rcu_torture_boost_ktrerror; |
| 177 | static long n_rcu_torture_boost_rterror; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 178 | static long n_rcu_torture_boost_failure; |
| 179 | static long n_rcu_torture_boosts; |
Paul E. McKenney | 8da9a59 | 2018-05-22 11:17:51 -0700 | [diff] [blame] | 180 | static atomic_long_t n_rcu_torture_timers; |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 181 | static long n_barrier_attempts; |
Joel Fernandes (Google) | bf5b643 | 2018-06-19 15:14:19 -0700 | [diff] [blame] | 182 | static long n_barrier_successes; /* did rcu_barrier test succeed? */ |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 183 | static unsigned long n_read_exits; |
Josh Triplett | e303373 | 2006-10-04 02:17:14 -0700 | [diff] [blame] | 184 | static struct list_head rcu_torture_removed; |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 185 | static unsigned long shutdown_jiffies; |
Joel Fernandes (Google) | 959954d | 2020-06-18 16:29:55 -0400 | [diff] [blame] | 186 | static unsigned long start_gp_seq; |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 187 | static atomic_long_t n_nocb_offload; |
| 188 | static atomic_long_t n_nocb_deoffload; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 189 | |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 190 | static int rcu_torture_writer_state; |
| 191 | #define RTWS_FIXED_DELAY 0 |
| 192 | #define RTWS_DELAY 1 |
| 193 | #define RTWS_REPLACE 2 |
| 194 | #define RTWS_DEF_FREE 3 |
| 195 | #define RTWS_EXP_SYNC 4 |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 196 | #define RTWS_COND_GET 5 |
Paul E. McKenney | 11d62f0 | 2022-02-01 07:01:20 -0800 | [diff] [blame] | 197 | #define RTWS_COND_GET_EXP 6 |
| 198 | #define RTWS_COND_SYNC 7 |
| 199 | #define RTWS_COND_SYNC_EXP 8 |
| 200 | #define RTWS_POLL_GET 9 |
| 201 | #define RTWS_POLL_GET_EXP 10 |
| 202 | #define RTWS_POLL_WAIT 11 |
| 203 | #define RTWS_POLL_WAIT_EXP 12 |
| 204 | #define RTWS_SYNC 13 |
| 205 | #define RTWS_STUTTER 14 |
| 206 | #define RTWS_STOPPING 15 |
Paul E. McKenney | 18aff33 | 2015-11-17 13:35:28 -0800 | [diff] [blame] | 207 | static const char * const rcu_torture_writer_state_names[] = { |
| 208 | "RTWS_FIXED_DELAY", |
| 209 | "RTWS_DELAY", |
| 210 | "RTWS_REPLACE", |
| 211 | "RTWS_DEF_FREE", |
| 212 | "RTWS_EXP_SYNC", |
| 213 | "RTWS_COND_GET", |
Paul E. McKenney | 11d62f0 | 2022-02-01 07:01:20 -0800 | [diff] [blame] | 214 | "RTWS_COND_GET_EXP", |
Paul E. McKenney | 18aff33 | 2015-11-17 13:35:28 -0800 | [diff] [blame] | 215 | "RTWS_COND_SYNC", |
Paul E. McKenney | 11d62f0 | 2022-02-01 07:01:20 -0800 | [diff] [blame] | 216 | "RTWS_COND_SYNC_EXP", |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 217 | "RTWS_POLL_GET", |
Paul E. McKenney | 11d62f0 | 2022-02-01 07:01:20 -0800 | [diff] [blame] | 218 | "RTWS_POLL_GET_EXP", |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 219 | "RTWS_POLL_WAIT", |
Paul E. McKenney | 11d62f0 | 2022-02-01 07:01:20 -0800 | [diff] [blame] | 220 | "RTWS_POLL_WAIT_EXP", |
Paul E. McKenney | 18aff33 | 2015-11-17 13:35:28 -0800 | [diff] [blame] | 221 | "RTWS_SYNC", |
| 222 | "RTWS_STUTTER", |
| 223 | "RTWS_STOPPING", |
| 224 | }; |
| 225 | |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 226 | /* Record reader segment types and duration for first failing read. */ |
| 227 | struct rt_read_seg { |
| 228 | int rt_readstate; |
| 229 | unsigned long rt_delay_jiffies; |
| 230 | unsigned long rt_delay_ms; |
| 231 | unsigned long rt_delay_us; |
| 232 | bool rt_preempted; |
| 233 | }; |
| 234 | static int err_segs_recorded; |
| 235 | static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; |
| 236 | static int rt_read_nsegs; |
| 237 | |
Paul E. McKenney | 18aff33 | 2015-11-17 13:35:28 -0800 | [diff] [blame] | 238 | static const char *rcu_torture_writer_state_getname(void) |
| 239 | { |
| 240 | unsigned int i = READ_ONCE(rcu_torture_writer_state); |
| 241 | |
| 242 | if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) |
| 243 | return "???"; |
| 244 | return rcu_torture_writer_state_names[i]; |
| 245 | } |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 246 | |
Steven Rostedt | e4aa0da | 2013-02-04 13:36:13 -0500 | [diff] [blame] | 247 | #ifdef CONFIG_RCU_TRACE |
| 248 | static u64 notrace rcu_trace_clock_local(void) |
| 249 | { |
| 250 | u64 ts = trace_clock_local(); |
Paul E. McKenney | a3b7b6c | 2017-06-23 16:07:17 -0700 | [diff] [blame] | 251 | |
| 252 | (void)do_div(ts, NSEC_PER_USEC); |
Steven Rostedt | e4aa0da | 2013-02-04 13:36:13 -0500 | [diff] [blame] | 253 | return ts; |
| 254 | } |
| 255 | #else /* #ifdef CONFIG_RCU_TRACE */ |
| 256 | static u64 notrace rcu_trace_clock_local(void) |
| 257 | { |
| 258 | return 0ULL; |
| 259 | } |
| 260 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ |
| 261 | |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 262 | /* |
| 263 | * Stop aggressive CPU-hog tests a bit before the end of the test in order |
| 264 | * to avoid interfering with test shutdown. |
| 265 | */ |
| 266 | static bool shutdown_time_arrived(void) |
| 267 | { |
| 268 | return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); |
| 269 | } |
| 270 | |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 271 | static unsigned long boost_starttime; /* jiffies of next boost test start. */ |
Pranith Kumar | 58ade2d | 2014-06-11 16:39:43 -0400 | [diff] [blame] | 272 | static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 273 | /* and boost task create/destroy. */ |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 274 | static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ |
Paul E. McKenney | c6ebcbb | 2012-05-28 19:21:41 -0700 | [diff] [blame] | 275 | static bool barrier_phase; /* Test phase. */ |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 276 | static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ |
| 277 | static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ |
| 278 | static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 279 | |
Paul E. McKenney | e22ef8d | 2021-12-17 12:33:53 -0800 | [diff] [blame] | 280 | static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 281 | |
Paul E. McKenney | 343e909 | 2008-12-15 16:13:07 -0800 | [diff] [blame] | 282 | /* |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 283 | * Allocate an element from the rcu_tortures pool. |
| 284 | */ |
Adrian Bunk | 97a41e2 | 2006-01-08 01:02:17 -0800 | [diff] [blame] | 285 | static struct rcu_torture * |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 286 | rcu_torture_alloc(void) |
| 287 | { |
| 288 | struct list_head *p; |
| 289 | |
Ingo Molnar | adac166 | 2006-01-25 19:50:12 +0100 | [diff] [blame] | 290 | spin_lock_bh(&rcu_torture_lock); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 291 | if (list_empty(&rcu_torture_freelist)) { |
| 292 | atomic_inc(&n_rcu_torture_alloc_fail); |
Ingo Molnar | adac166 | 2006-01-25 19:50:12 +0100 | [diff] [blame] | 293 | spin_unlock_bh(&rcu_torture_lock); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 294 | return NULL; |
| 295 | } |
| 296 | atomic_inc(&n_rcu_torture_alloc); |
| 297 | p = rcu_torture_freelist.next; |
| 298 | list_del_init(p); |
Ingo Molnar | adac166 | 2006-01-25 19:50:12 +0100 | [diff] [blame] | 299 | spin_unlock_bh(&rcu_torture_lock); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 300 | return container_of(p, struct rcu_torture, rtort_free); |
| 301 | } |
| 302 | |
| 303 | /* |
| 304 | * Free an element to the rcu_tortures pool. |
| 305 | */ |
| 306 | static void |
| 307 | rcu_torture_free(struct rcu_torture *p) |
| 308 | { |
| 309 | atomic_inc(&n_rcu_torture_free); |
Ingo Molnar | adac166 | 2006-01-25 19:50:12 +0100 | [diff] [blame] | 310 | spin_lock_bh(&rcu_torture_lock); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 311 | list_add_tail(&p->rtort_free, &rcu_torture_freelist); |
Ingo Molnar | adac166 | 2006-01-25 19:50:12 +0100 | [diff] [blame] | 312 | spin_unlock_bh(&rcu_torture_lock); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 313 | } |
| 314 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 315 | /* |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 316 | * Operations vector for selecting different types of tests. |
| 317 | */ |
| 318 | |
| 319 | struct rcu_torture_ops { |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 320 | int ttype; |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 321 | void (*init)(void); |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 322 | void (*cleanup)(void); |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 323 | int (*readlock)(void); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 324 | void (*read_delay)(struct torture_random_state *rrsp, |
| 325 | struct rt_read_seg *rtrsp); |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 326 | void (*readunlock)(int idx); |
Paul E. McKenney | a5c095e | 2021-03-13 20:05:31 -0800 | [diff] [blame] | 327 | int (*readlock_held)(void); |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 328 | unsigned long (*get_gp_seq)(void); |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 329 | unsigned long (*gp_diff)(unsigned long new, unsigned long old); |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 330 | void (*deferred_free)(struct rcu_torture *p); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 331 | void (*sync)(void); |
Paul E. McKenney | 2ec1f2d9 | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 332 | void (*exp_sync)(void); |
Paul E. McKenney | 11d62f0 | 2022-02-01 07:01:20 -0800 | [diff] [blame] | 333 | unsigned long (*get_gp_state_exp)(void); |
| 334 | unsigned long (*start_gp_poll_exp)(void); |
| 335 | bool (*poll_gp_state_exp)(unsigned long oldstate); |
| 336 | void (*cond_sync_exp)(unsigned long oldstate); |
Paul E. McKenney | fd56f64b | 2020-11-13 20:14:27 -0800 | [diff] [blame] | 337 | unsigned long (*get_gp_state)(void); |
Paul E. McKenney | d0eac20 | 2022-04-13 16:14:02 -0700 | [diff] [blame] | 338 | unsigned long (*get_gp_completed)(void); |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 339 | unsigned long (*start_gp_poll)(void); |
| 340 | bool (*poll_gp_state)(unsigned long oldstate); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 341 | void (*cond_sync)(unsigned long oldstate); |
Boqun Feng | db3e8db | 2015-07-29 13:29:39 +0800 | [diff] [blame] | 342 | call_rcu_func_t call; |
Paul E. McKenney | 2326974 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 343 | void (*cb_barrier)(void); |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 344 | void (*fqs)(void); |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 345 | void (*stats)(void); |
Paul E. McKenney | 27c0f14 | 2020-09-15 17:08:03 -0700 | [diff] [blame] | 346 | void (*gp_kthread_dbg)(void); |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 347 | bool (*check_boost_failed)(unsigned long gp_state, int *cpup); |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 348 | int (*stall_dur)(void); |
Paul E. McKenney | 613b00f | 2021-11-23 11:53:52 -0800 | [diff] [blame] | 349 | long cbflood_max; |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 350 | int irq_capable; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 351 | int can_boost; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 352 | int extendables; |
Paul E. McKenney | 5eabea5 | 2019-04-12 09:02:46 -0700 | [diff] [blame] | 353 | int slow_gps; |
Paul E. McKenney | 340170f | 2021-09-24 21:30:26 -0700 | [diff] [blame] | 354 | int no_pi_lock; |
Steven Rostedt (Red Hat) | e66c33d | 2013-07-12 16:50:28 -0400 | [diff] [blame] | 355 | const char *name; |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 356 | }; |
Paul E. McKenney | a71fca5 | 2009-09-18 10:28:19 -0700 | [diff] [blame] | 357 | |
| 358 | static struct rcu_torture_ops *cur_ops; |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 359 | |
| 360 | /* |
| 361 | * Definitions for rcu torture testing. |
| 362 | */ |
| 363 | |
Paul E. McKenney | a5c095e | 2021-03-13 20:05:31 -0800 | [diff] [blame] | 364 | static int torture_readlock_not_held(void) |
| 365 | { |
| 366 | return rcu_read_lock_bh_held() || rcu_read_lock_sched_held(); |
| 367 | } |
| 368 | |
Josh Triplett | a49a4af | 2006-09-29 01:59:30 -0700 | [diff] [blame] | 369 | static int rcu_torture_read_lock(void) __acquires(RCU) |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 370 | { |
| 371 | rcu_read_lock(); |
| 372 | return 0; |
| 373 | } |
| 374 | |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 375 | static void |
| 376 | rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 377 | { |
Paul E. McKenney | d0af39e | 2016-10-10 18:26:04 -0700 | [diff] [blame] | 378 | unsigned long started; |
| 379 | unsigned long completed; |
Josh Triplett | b8d57a7 | 2009-09-08 15:54:35 -0700 | [diff] [blame] | 380 | const unsigned long shortdelay_us = 200; |
Paul E. McKenney | 1e69676 | 2018-07-20 12:04:12 -0700 | [diff] [blame] | 381 | unsigned long longdelay_ms = 300; |
Paul E. McKenney | d0af39e | 2016-10-10 18:26:04 -0700 | [diff] [blame] | 382 | unsigned long long ts; |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 383 | |
Josh Triplett | b8d57a7 | 2009-09-08 15:54:35 -0700 | [diff] [blame] | 384 | /* We want a short delay sometimes to make a reader delay the grace |
| 385 | * period, and we want a long delay occasionally to trigger |
| 386 | * force_quiescent_state. */ |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 387 | |
Paul E. McKenney | e22ef8d | 2021-12-17 12:33:53 -0800 | [diff] [blame] | 388 | if (!atomic_read(&rcu_fwd_cb_nodelay) && |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 389 | !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 390 | started = cur_ops->get_gp_seq(); |
Paul E. McKenney | d0af39e | 2016-10-10 18:26:04 -0700 | [diff] [blame] | 391 | ts = rcu_trace_clock_local(); |
Paul E. McKenney | 1e69676 | 2018-07-20 12:04:12 -0700 | [diff] [blame] | 392 | if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) |
| 393 | longdelay_ms = 5; /* Avoid triggering BH limits. */ |
Josh Triplett | b8d57a7 | 2009-09-08 15:54:35 -0700 | [diff] [blame] | 394 | mdelay(longdelay_ms); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 395 | rtrsp->rt_delay_ms = longdelay_ms; |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 396 | completed = cur_ops->get_gp_seq(); |
Paul E. McKenney | d0af39e | 2016-10-10 18:26:04 -0700 | [diff] [blame] | 397 | do_trace_rcu_torture_read(cur_ops->name, NULL, ts, |
| 398 | started, completed); |
| 399 | } |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 400 | if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { |
Josh Triplett | b8d57a7 | 2009-09-08 15:54:35 -0700 | [diff] [blame] | 401 | udelay(shortdelay_us); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 402 | rtrsp->rt_delay_us = shortdelay_us; |
| 403 | } |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 404 | if (!preempt_count() && |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 405 | !(torture_random(rrsp) % (nrealreaders * 500))) { |
Paul E. McKenney | cc1321c | 2017-10-16 11:05:03 -0700 | [diff] [blame] | 406 | torture_preempt_schedule(); /* QS only if preemptible. */ |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 407 | rtrsp->rt_preempted = true; |
| 408 | } |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 409 | } |
| 410 | |
Josh Triplett | a49a4af | 2006-09-29 01:59:30 -0700 | [diff] [blame] | 411 | static void rcu_torture_read_unlock(int idx) __releases(RCU) |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 412 | { |
| 413 | rcu_read_unlock(); |
| 414 | } |
| 415 | |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 416 | /* |
| 417 | * Update callback in the pipe. This should be invoked after a grace period. |
| 418 | */ |
| 419 | static bool |
| 420 | rcu_torture_pipe_update_one(struct rcu_torture *rp) |
| 421 | { |
| 422 | int i; |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 423 | struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 424 | |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 425 | if (rtrcp) { |
| 426 | WRITE_ONCE(rp->rtort_chkp, NULL); |
| 427 | smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire(). |
| 428 | } |
Paul E. McKenney | 2024891 | 2019-12-21 10:41:48 -0800 | [diff] [blame] | 429 | i = READ_ONCE(rp->rtort_pipe_count); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 430 | if (i > RCU_TORTURE_PIPE_LEN) |
| 431 | i = RCU_TORTURE_PIPE_LEN; |
| 432 | atomic_inc(&rcu_torture_wcount[i]); |
Paul E. McKenney | 2024891 | 2019-12-21 10:41:48 -0800 | [diff] [blame] | 433 | WRITE_ONCE(rp->rtort_pipe_count, i + 1); |
| 434 | if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 435 | rp->rtort_mbtest = 0; |
| 436 | return true; |
| 437 | } |
| 438 | return false; |
| 439 | } |
| 440 | |
| 441 | /* |
| 442 | * Update all callbacks in the pipe. Suitable for synchronous grace-period |
| 443 | * primitives. |
| 444 | */ |
| 445 | static void |
| 446 | rcu_torture_pipe_update(struct rcu_torture *old_rp) |
| 447 | { |
| 448 | struct rcu_torture *rp; |
| 449 | struct rcu_torture *rp1; |
| 450 | |
| 451 | if (old_rp) |
| 452 | list_add(&old_rp->rtort_free, &rcu_torture_removed); |
| 453 | list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { |
| 454 | if (rcu_torture_pipe_update_one(rp)) { |
| 455 | list_del(&rp->rtort_free); |
| 456 | rcu_torture_free(rp); |
| 457 | } |
| 458 | } |
| 459 | } |
| 460 | |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 461 | static void |
| 462 | rcu_torture_cb(struct rcu_head *p) |
| 463 | { |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 464 | struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); |
| 465 | |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 466 | if (torture_must_stop_irq()) { |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 467 | /* Test is ending, just drop callbacks on the floor. */ |
| 468 | /* The next initialization will pick up the pieces. */ |
| 469 | return; |
| 470 | } |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 471 | if (rcu_torture_pipe_update_one(rp)) |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 472 | rcu_torture_free(rp); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 473 | else |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 474 | cur_ops->deferred_free(rp); |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 475 | } |
| 476 | |
Paul E. McKenney | 6b80da4 | 2014-11-21 14:19:26 -0800 | [diff] [blame] | 477 | static unsigned long rcu_no_completed(void) |
Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 478 | { |
| 479 | return 0; |
| 480 | } |
| 481 | |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 482 | static void rcu_torture_deferred_free(struct rcu_torture *p) |
| 483 | { |
| 484 | call_rcu(&p->rtort_rcu, rcu_torture_cb); |
| 485 | } |
| 486 | |
Paul E. McKenney | 2ec1f2d9 | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 487 | static void rcu_sync_torture_init(void) |
| 488 | { |
| 489 | INIT_LIST_HEAD(&rcu_torture_removed); |
| 490 | } |
| 491 | |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 492 | static struct rcu_torture_ops rcu_ops = { |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 493 | .ttype = RCU_FLAVOR, |
| 494 | .init = rcu_sync_torture_init, |
| 495 | .readlock = rcu_torture_read_lock, |
| 496 | .read_delay = rcu_read_delay, |
| 497 | .readunlock = rcu_torture_read_unlock, |
| 498 | .readlock_held = torture_readlock_not_held, |
| 499 | .get_gp_seq = rcu_get_gp_seq, |
| 500 | .gp_diff = rcu_seq_diff, |
| 501 | .deferred_free = rcu_torture_deferred_free, |
| 502 | .sync = synchronize_rcu, |
| 503 | .exp_sync = synchronize_rcu_expedited, |
| 504 | .get_gp_state = get_state_synchronize_rcu, |
Paul E. McKenney | d0eac20 | 2022-04-13 16:14:02 -0700 | [diff] [blame] | 505 | .get_gp_completed = get_completed_synchronize_rcu, |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 506 | .start_gp_poll = start_poll_synchronize_rcu, |
| 507 | .poll_gp_state = poll_state_synchronize_rcu, |
| 508 | .cond_sync = cond_synchronize_rcu, |
Paul E. McKenney | 11d62f0 | 2022-02-01 07:01:20 -0800 | [diff] [blame] | 509 | .get_gp_state_exp = get_state_synchronize_rcu, |
| 510 | .start_gp_poll_exp = start_poll_synchronize_rcu_expedited, |
| 511 | .poll_gp_state_exp = poll_state_synchronize_rcu, |
| 512 | .cond_sync_exp = cond_synchronize_rcu_expedited, |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 513 | .call = call_rcu, |
| 514 | .cb_barrier = rcu_barrier, |
| 515 | .fqs = rcu_force_quiescent_state, |
| 516 | .stats = NULL, |
| 517 | .gp_kthread_dbg = show_rcu_gp_kthreads, |
| 518 | .check_boost_failed = rcu_check_boost_fail, |
| 519 | .stall_dur = rcu_jiffies_till_stall_check, |
| 520 | .irq_capable = 1, |
| 521 | .can_boost = IS_ENABLED(CONFIG_RCU_BOOST), |
| 522 | .extendables = RCUTORTURE_MAX_EXTEND, |
| 523 | .name = "rcu" |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 524 | }; |
| 525 | |
Paul E. McKenney | c32e066 | 2006-06-27 02:54:04 -0700 | [diff] [blame] | 526 | /* |
Paul E. McKenney | ff20e25 | 2014-02-06 08:45:56 -0800 | [diff] [blame] | 527 | * Don't even think about trying any of these in real life!!! |
| 528 | * The names includes "busted", and they really means it! |
| 529 | * The only purpose of these functions is to provide a buggy RCU |
| 530 | * implementation to make sure that rcutorture correctly emits |
| 531 | * buggy-RCU error messages. |
| 532 | */ |
| 533 | static void rcu_busted_torture_deferred_free(struct rcu_torture *p) |
| 534 | { |
| 535 | /* This is a deliberate bug for testing purposes only! */ |
| 536 | rcu_torture_cb(&p->rtort_rcu); |
| 537 | } |
| 538 | |
| 539 | static void synchronize_rcu_busted(void) |
| 540 | { |
| 541 | /* This is a deliberate bug for testing purposes only! */ |
| 542 | } |
| 543 | |
| 544 | static void |
Boqun Feng | b6a4ae7 | 2015-07-29 13:29:38 +0800 | [diff] [blame] | 545 | call_rcu_busted(struct rcu_head *head, rcu_callback_t func) |
Paul E. McKenney | ff20e25 | 2014-02-06 08:45:56 -0800 | [diff] [blame] | 546 | { |
| 547 | /* This is a deliberate bug for testing purposes only! */ |
| 548 | func(head); |
| 549 | } |
| 550 | |
| 551 | static struct rcu_torture_ops rcu_busted_ops = { |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 552 | .ttype = INVALID_RCU_FLAVOR, |
Paul E. McKenney | ff20e25 | 2014-02-06 08:45:56 -0800 | [diff] [blame] | 553 | .init = rcu_sync_torture_init, |
| 554 | .readlock = rcu_torture_read_lock, |
| 555 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
| 556 | .readunlock = rcu_torture_read_unlock, |
Paul E. McKenney | a5c095e | 2021-03-13 20:05:31 -0800 | [diff] [blame] | 557 | .readlock_held = torture_readlock_not_held, |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 558 | .get_gp_seq = rcu_no_completed, |
Paul E. McKenney | ff20e25 | 2014-02-06 08:45:56 -0800 | [diff] [blame] | 559 | .deferred_free = rcu_busted_torture_deferred_free, |
| 560 | .sync = synchronize_rcu_busted, |
| 561 | .exp_sync = synchronize_rcu_busted, |
| 562 | .call = call_rcu_busted, |
| 563 | .cb_barrier = NULL, |
| 564 | .fqs = NULL, |
| 565 | .stats = NULL, |
| 566 | .irq_capable = 1, |
Paul E. McKenney | b3c9831 | 2017-06-06 16:39:00 -0700 | [diff] [blame] | 567 | .name = "busted" |
Paul E. McKenney | ff20e25 | 2014-02-06 08:45:56 -0800 | [diff] [blame] | 568 | }; |
| 569 | |
| 570 | /* |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 571 | * Definitions for srcu torture testing. |
| 572 | */ |
| 573 | |
Lai Jiangshan | cda4dc8 | 2012-10-13 01:14:17 +0800 | [diff] [blame] | 574 | DEFINE_STATIC_SRCU(srcu_ctl); |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 575 | static struct srcu_struct srcu_ctld; |
| 576 | static struct srcu_struct *srcu_ctlp = &srcu_ctl; |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 577 | |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 578 | static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 579 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 580 | return srcu_read_lock(srcu_ctlp); |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 581 | } |
| 582 | |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 583 | static void |
| 584 | srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 585 | { |
| 586 | long delay; |
| 587 | const long uspertick = 1000000 / HZ; |
| 588 | const long longdelay = 10; |
| 589 | |
| 590 | /* We want there to be long-running readers, but not all the time. */ |
| 591 | |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 592 | delay = torture_random(rrsp) % |
| 593 | (nrealreaders * 2 * longdelay * uspertick); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 594 | if (!delay && in_task()) { |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 595 | schedule_timeout_interruptible(longdelay); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 596 | rtrsp->rt_delay_jiffies = longdelay; |
| 597 | } else { |
| 598 | rcu_read_delay(rrsp, rtrsp); |
| 599 | } |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 600 | } |
| 601 | |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 602 | static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 603 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 604 | srcu_read_unlock(srcu_ctlp, idx); |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 605 | } |
| 606 | |
Paul E. McKenney | a5c095e | 2021-03-13 20:05:31 -0800 | [diff] [blame] | 607 | static int torture_srcu_read_lock_held(void) |
| 608 | { |
| 609 | return srcu_read_lock_held(srcu_ctlp); |
| 610 | } |
| 611 | |
Paul E. McKenney | 6b80da4 | 2014-11-21 14:19:26 -0800 | [diff] [blame] | 612 | static unsigned long srcu_torture_completed(void) |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 613 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 614 | return srcu_batches_completed(srcu_ctlp); |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 615 | } |
| 616 | |
Lai Jiangshan | 9059c94 | 2012-03-19 16:12:14 +0800 | [diff] [blame] | 617 | static void srcu_torture_deferred_free(struct rcu_torture *rp) |
| 618 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 619 | call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); |
Lai Jiangshan | 9059c94 | 2012-03-19 16:12:14 +0800 | [diff] [blame] | 620 | } |
| 621 | |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 622 | static void srcu_torture_synchronize(void) |
| 623 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 624 | synchronize_srcu(srcu_ctlp); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 625 | } |
| 626 | |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 627 | static unsigned long srcu_torture_get_gp_state(void) |
| 628 | { |
| 629 | return get_state_synchronize_srcu(srcu_ctlp); |
| 630 | } |
| 631 | |
| 632 | static unsigned long srcu_torture_start_gp_poll(void) |
| 633 | { |
| 634 | return start_poll_synchronize_srcu(srcu_ctlp); |
| 635 | } |
| 636 | |
| 637 | static bool srcu_torture_poll_gp_state(unsigned long oldstate) |
| 638 | { |
| 639 | return poll_state_synchronize_srcu(srcu_ctlp, oldstate); |
| 640 | } |
| 641 | |
Paul E. McKenney | e3f8d37 | 2012-05-08 10:21:50 -0700 | [diff] [blame] | 642 | static void srcu_torture_call(struct rcu_head *head, |
Boqun Feng | b6a4ae7 | 2015-07-29 13:29:38 +0800 | [diff] [blame] | 643 | rcu_callback_t func) |
Paul E. McKenney | e3f8d37 | 2012-05-08 10:21:50 -0700 | [diff] [blame] | 644 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 645 | call_srcu(srcu_ctlp, head, func); |
Paul E. McKenney | e3f8d37 | 2012-05-08 10:21:50 -0700 | [diff] [blame] | 646 | } |
| 647 | |
| 648 | static void srcu_torture_barrier(void) |
| 649 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 650 | srcu_barrier(srcu_ctlp); |
Paul E. McKenney | e3f8d37 | 2012-05-08 10:21:50 -0700 | [diff] [blame] | 651 | } |
| 652 | |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 653 | static void srcu_torture_stats(void) |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 654 | { |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 655 | srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 656 | } |
| 657 | |
Paul E. McKenney | 2ec1f2d9 | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 658 | static void srcu_torture_synchronize_expedited(void) |
| 659 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 660 | synchronize_srcu_expedited(srcu_ctlp); |
Paul E. McKenney | 2ec1f2d9 | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 661 | } |
| 662 | |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 663 | static struct rcu_torture_ops srcu_ops = { |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 664 | .ttype = SRCU_FLAVOR, |
Lai Jiangshan | cda4dc8 | 2012-10-13 01:14:17 +0800 | [diff] [blame] | 665 | .init = rcu_sync_torture_init, |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 666 | .readlock = srcu_torture_read_lock, |
| 667 | .read_delay = srcu_read_delay, |
| 668 | .readunlock = srcu_torture_read_unlock, |
Paul E. McKenney | a5c095e | 2021-03-13 20:05:31 -0800 | [diff] [blame] | 669 | .readlock_held = torture_srcu_read_lock_held, |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 670 | .get_gp_seq = srcu_torture_completed, |
Lai Jiangshan | 9059c94 | 2012-03-19 16:12:14 +0800 | [diff] [blame] | 671 | .deferred_free = srcu_torture_deferred_free, |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 672 | .sync = srcu_torture_synchronize, |
Paul E. McKenney | 2ec1f2d9 | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 673 | .exp_sync = srcu_torture_synchronize_expedited, |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 674 | .get_gp_state = srcu_torture_get_gp_state, |
| 675 | .start_gp_poll = srcu_torture_start_gp_poll, |
| 676 | .poll_gp_state = srcu_torture_poll_gp_state, |
Paul E. McKenney | e3f8d37 | 2012-05-08 10:21:50 -0700 | [diff] [blame] | 677 | .call = srcu_torture_call, |
| 678 | .cb_barrier = srcu_torture_barrier, |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 679 | .stats = srcu_torture_stats, |
Paul E. McKenney | 9c0f1c7 | 2022-01-27 20:29:10 -0800 | [diff] [blame] | 680 | .cbflood_max = 50000, |
Paul E. McKenney | 5e741fa | 2017-06-06 12:52:44 -0700 | [diff] [blame] | 681 | .irq_capable = 1, |
Paul E. McKenney | 340170f | 2021-09-24 21:30:26 -0700 | [diff] [blame] | 682 | .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 683 | .name = "srcu" |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 684 | }; |
| 685 | |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 686 | static void srcu_torture_init(void) |
| 687 | { |
| 688 | rcu_sync_torture_init(); |
| 689 | WARN_ON(init_srcu_struct(&srcu_ctld)); |
| 690 | srcu_ctlp = &srcu_ctld; |
| 691 | } |
| 692 | |
| 693 | static void srcu_torture_cleanup(void) |
| 694 | { |
Paul E. McKenney | f5ad399 | 2019-02-13 13:54:37 -0800 | [diff] [blame] | 695 | cleanup_srcu_struct(&srcu_ctld); |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 696 | srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ |
| 697 | } |
| 698 | |
| 699 | /* As above, but dynamically allocated. */ |
| 700 | static struct rcu_torture_ops srcud_ops = { |
| 701 | .ttype = SRCU_FLAVOR, |
| 702 | .init = srcu_torture_init, |
| 703 | .cleanup = srcu_torture_cleanup, |
| 704 | .readlock = srcu_torture_read_lock, |
| 705 | .read_delay = srcu_read_delay, |
| 706 | .readunlock = srcu_torture_read_unlock, |
Paul E. McKenney | a5c095e | 2021-03-13 20:05:31 -0800 | [diff] [blame] | 707 | .readlock_held = torture_srcu_read_lock_held, |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 708 | .get_gp_seq = srcu_torture_completed, |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 709 | .deferred_free = srcu_torture_deferred_free, |
| 710 | .sync = srcu_torture_synchronize, |
| 711 | .exp_sync = srcu_torture_synchronize_expedited, |
| 712 | .call = srcu_torture_call, |
| 713 | .cb_barrier = srcu_torture_barrier, |
| 714 | .stats = srcu_torture_stats, |
Paul E. McKenney | 9c0f1c7 | 2022-01-27 20:29:10 -0800 | [diff] [blame] | 715 | .cbflood_max = 50000, |
Paul E. McKenney | 5e741fa | 2017-06-06 12:52:44 -0700 | [diff] [blame] | 716 | .irq_capable = 1, |
Paul E. McKenney | 340170f | 2021-09-24 21:30:26 -0700 | [diff] [blame] | 717 | .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 718 | .name = "srcud" |
| 719 | }; |
| 720 | |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 721 | /* As above, but broken due to inappropriate reader extension. */ |
| 722 | static struct rcu_torture_ops busted_srcud_ops = { |
| 723 | .ttype = SRCU_FLAVOR, |
| 724 | .init = srcu_torture_init, |
| 725 | .cleanup = srcu_torture_cleanup, |
| 726 | .readlock = srcu_torture_read_lock, |
| 727 | .read_delay = rcu_read_delay, |
| 728 | .readunlock = srcu_torture_read_unlock, |
Paul E. McKenney | a5c095e | 2021-03-13 20:05:31 -0800 | [diff] [blame] | 729 | .readlock_held = torture_srcu_read_lock_held, |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 730 | .get_gp_seq = srcu_torture_completed, |
| 731 | .deferred_free = srcu_torture_deferred_free, |
| 732 | .sync = srcu_torture_synchronize, |
| 733 | .exp_sync = srcu_torture_synchronize_expedited, |
| 734 | .call = srcu_torture_call, |
| 735 | .cb_barrier = srcu_torture_barrier, |
| 736 | .stats = srcu_torture_stats, |
| 737 | .irq_capable = 1, |
Paul E. McKenney | 340170f | 2021-09-24 21:30:26 -0700 | [diff] [blame] | 738 | .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 739 | .extendables = RCUTORTURE_MAX_EXTEND, |
| 740 | .name = "busted_srcud" |
| 741 | }; |
| 742 | |
Josh Triplett | 4b6c2cc | 2006-10-04 02:17:16 -0700 | [diff] [blame] | 743 | /* |
Paul E. McKenney | 40c1278 | 2022-03-17 13:29:59 -0700 | [diff] [blame] | 744 | * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. |
| 745 | * This implementation does not necessarily work well with CPU hotplug. |
| 746 | */ |
| 747 | |
| 748 | static void synchronize_rcu_trivial(void) |
| 749 | { |
| 750 | int cpu; |
| 751 | |
| 752 | for_each_online_cpu(cpu) { |
| 753 | rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu)); |
| 754 | WARN_ON_ONCE(raw_smp_processor_id() != cpu); |
| 755 | } |
| 756 | } |
| 757 | |
| 758 | static int rcu_torture_read_lock_trivial(void) __acquires(RCU) |
| 759 | { |
| 760 | preempt_disable(); |
| 761 | return 0; |
| 762 | } |
| 763 | |
| 764 | static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU) |
| 765 | { |
| 766 | preempt_enable(); |
| 767 | } |
| 768 | |
| 769 | static struct rcu_torture_ops trivial_ops = { |
| 770 | .ttype = RCU_TRIVIAL_FLAVOR, |
| 771 | .init = rcu_sync_torture_init, |
| 772 | .readlock = rcu_torture_read_lock_trivial, |
| 773 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
| 774 | .readunlock = rcu_torture_read_unlock_trivial, |
| 775 | .readlock_held = torture_readlock_not_held, |
| 776 | .get_gp_seq = rcu_no_completed, |
| 777 | .sync = synchronize_rcu_trivial, |
| 778 | .exp_sync = synchronize_rcu_trivial, |
| 779 | .fqs = NULL, |
| 780 | .stats = NULL, |
| 781 | .irq_capable = 1, |
| 782 | .name = "trivial" |
| 783 | }; |
| 784 | |
Paul E. McKenney | 3b6e1dd | 2022-03-17 15:18:27 -0700 | [diff] [blame] | 785 | #ifdef CONFIG_TASKS_RCU |
| 786 | |
Paul E. McKenney | 40c1278 | 2022-03-17 13:29:59 -0700 | [diff] [blame] | 787 | /* |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 788 | * Definitions for RCU-tasks torture testing. |
| 789 | */ |
| 790 | |
| 791 | static int tasks_torture_read_lock(void) |
| 792 | { |
| 793 | return 0; |
| 794 | } |
| 795 | |
| 796 | static void tasks_torture_read_unlock(int idx) |
| 797 | { |
| 798 | } |
| 799 | |
| 800 | static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) |
| 801 | { |
| 802 | call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); |
| 803 | } |
| 804 | |
Paul E. McKenney | 9cf8fc6 | 2020-03-06 14:00:46 -0800 | [diff] [blame] | 805 | static void synchronize_rcu_mult_test(void) |
| 806 | { |
| 807 | synchronize_rcu_mult(call_rcu_tasks, call_rcu); |
| 808 | } |
| 809 | |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 810 | static struct rcu_torture_ops tasks_ops = { |
| 811 | .ttype = RCU_TASKS_FLAVOR, |
| 812 | .init = rcu_sync_torture_init, |
| 813 | .readlock = tasks_torture_read_lock, |
| 814 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
| 815 | .readunlock = tasks_torture_read_unlock, |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 816 | .get_gp_seq = rcu_no_completed, |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 817 | .deferred_free = rcu_tasks_torture_deferred_free, |
| 818 | .sync = synchronize_rcu_tasks, |
Paul E. McKenney | 9cf8fc6 | 2020-03-06 14:00:46 -0800 | [diff] [blame] | 819 | .exp_sync = synchronize_rcu_mult_test, |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 820 | .call = call_rcu_tasks, |
| 821 | .cb_barrier = rcu_barrier_tasks, |
Paul E. McKenney | 27c0f14 | 2020-09-15 17:08:03 -0700 | [diff] [blame] | 822 | .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread, |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 823 | .fqs = NULL, |
| 824 | .stats = NULL, |
| 825 | .irq_capable = 1, |
Paul E. McKenney | 5eabea5 | 2019-04-12 09:02:46 -0700 | [diff] [blame] | 826 | .slow_gps = 1, |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 827 | .name = "tasks" |
| 828 | }; |
| 829 | |
Paul E. McKenney | 3b6e1dd | 2022-03-17 15:18:27 -0700 | [diff] [blame] | 830 | #define TASKS_OPS &tasks_ops, |
Paul E. McKenney | c682db5 | 2019-04-19 07:38:27 -0700 | [diff] [blame] | 831 | |
Paul E. McKenney | 3b6e1dd | 2022-03-17 15:18:27 -0700 | [diff] [blame] | 832 | #else // #ifdef CONFIG_TASKS_RCU |
Paul E. McKenney | c682db5 | 2019-04-19 07:38:27 -0700 | [diff] [blame] | 833 | |
Paul E. McKenney | 3b6e1dd | 2022-03-17 15:18:27 -0700 | [diff] [blame] | 834 | #define TASKS_OPS |
Paul E. McKenney | c682db5 | 2019-04-19 07:38:27 -0700 | [diff] [blame] | 835 | |
Paul E. McKenney | 3b6e1dd | 2022-03-17 15:18:27 -0700 | [diff] [blame] | 836 | #endif // #else #ifdef CONFIG_TASKS_RCU |
Paul E. McKenney | c682db5 | 2019-04-19 07:38:27 -0700 | [diff] [blame] | 837 | |
Paul E. McKenney | c682db5 | 2019-04-19 07:38:27 -0700 | [diff] [blame] | 838 | |
Paul E. McKenney | 4c3f7b0 | 2022-03-17 16:16:45 -0700 | [diff] [blame] | 839 | #ifdef CONFIG_TASKS_RUDE_RCU |
Paul E. McKenney | c682db5 | 2019-04-19 07:38:27 -0700 | [diff] [blame] | 840 | |
Paul E. McKenney | 3d6e43c | 2020-03-03 15:02:50 -0800 | [diff] [blame] | 841 | /* |
| 842 | * Definitions for rude RCU-tasks torture testing. |
| 843 | */ |
| 844 | |
| 845 | static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p) |
| 846 | { |
| 847 | call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb); |
| 848 | } |
| 849 | |
| 850 | static struct rcu_torture_ops tasks_rude_ops = { |
| 851 | .ttype = RCU_TASKS_RUDE_FLAVOR, |
| 852 | .init = rcu_sync_torture_init, |
| 853 | .readlock = rcu_torture_read_lock_trivial, |
| 854 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
| 855 | .readunlock = rcu_torture_read_unlock_trivial, |
| 856 | .get_gp_seq = rcu_no_completed, |
| 857 | .deferred_free = rcu_tasks_rude_torture_deferred_free, |
| 858 | .sync = synchronize_rcu_tasks_rude, |
| 859 | .exp_sync = synchronize_rcu_tasks_rude, |
| 860 | .call = call_rcu_tasks_rude, |
| 861 | .cb_barrier = rcu_barrier_tasks_rude, |
Paul E. McKenney | 27c0f14 | 2020-09-15 17:08:03 -0700 | [diff] [blame] | 862 | .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread, |
Paul E. McKenney | 613b00f | 2021-11-23 11:53:52 -0800 | [diff] [blame] | 863 | .cbflood_max = 50000, |
Paul E. McKenney | 3d6e43c | 2020-03-03 15:02:50 -0800 | [diff] [blame] | 864 | .fqs = NULL, |
| 865 | .stats = NULL, |
| 866 | .irq_capable = 1, |
| 867 | .name = "tasks-rude" |
| 868 | }; |
| 869 | |
Paul E. McKenney | 4c3f7b0 | 2022-03-17 16:16:45 -0700 | [diff] [blame] | 870 | #define TASKS_RUDE_OPS &tasks_rude_ops, |
| 871 | |
| 872 | #else // #ifdef CONFIG_TASKS_RUDE_RCU |
| 873 | |
| 874 | #define TASKS_RUDE_OPS |
| 875 | |
| 876 | #endif // #else #ifdef CONFIG_TASKS_RUDE_RCU |
| 877 | |
| 878 | |
Paul E. McKenney | 40c1278 | 2022-03-17 13:29:59 -0700 | [diff] [blame] | 879 | #ifdef CONFIG_TASKS_TRACE_RCU |
| 880 | |
Paul E. McKenney | c1a76c0 | 2020-03-10 10:32:30 -0700 | [diff] [blame] | 881 | /* |
| 882 | * Definitions for tracing RCU-tasks torture testing. |
| 883 | */ |
| 884 | |
| 885 | static int tasks_tracing_torture_read_lock(void) |
| 886 | { |
| 887 | rcu_read_lock_trace(); |
| 888 | return 0; |
| 889 | } |
| 890 | |
| 891 | static void tasks_tracing_torture_read_unlock(int idx) |
| 892 | { |
| 893 | rcu_read_unlock_trace(); |
| 894 | } |
| 895 | |
| 896 | static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) |
| 897 | { |
| 898 | call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); |
| 899 | } |
| 900 | |
| 901 | static struct rcu_torture_ops tasks_tracing_ops = { |
| 902 | .ttype = RCU_TASKS_TRACING_FLAVOR, |
| 903 | .init = rcu_sync_torture_init, |
| 904 | .readlock = tasks_tracing_torture_read_lock, |
| 905 | .read_delay = srcu_read_delay, /* just reuse srcu's version. */ |
| 906 | .readunlock = tasks_tracing_torture_read_unlock, |
Paul E. McKenney | a5c095e | 2021-03-13 20:05:31 -0800 | [diff] [blame] | 907 | .readlock_held = rcu_read_lock_trace_held, |
Paul E. McKenney | c1a76c0 | 2020-03-10 10:32:30 -0700 | [diff] [blame] | 908 | .get_gp_seq = rcu_no_completed, |
| 909 | .deferred_free = rcu_tasks_tracing_torture_deferred_free, |
| 910 | .sync = synchronize_rcu_tasks_trace, |
| 911 | .exp_sync = synchronize_rcu_tasks_trace, |
| 912 | .call = call_rcu_tasks_trace, |
| 913 | .cb_barrier = rcu_barrier_tasks_trace, |
Paul E. McKenney | 27c0f14 | 2020-09-15 17:08:03 -0700 | [diff] [blame] | 914 | .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread, |
Paul E. McKenney | 613b00f | 2021-11-23 11:53:52 -0800 | [diff] [blame] | 915 | .cbflood_max = 50000, |
Paul E. McKenney | c1a76c0 | 2020-03-10 10:32:30 -0700 | [diff] [blame] | 916 | .fqs = NULL, |
| 917 | .stats = NULL, |
| 918 | .irq_capable = 1, |
| 919 | .slow_gps = 1, |
| 920 | .name = "tasks-tracing" |
| 921 | }; |
| 922 | |
Paul E. McKenney | 40c1278 | 2022-03-17 13:29:59 -0700 | [diff] [blame] | 923 | #define TASKS_TRACING_OPS &tasks_tracing_ops, |
| 924 | |
| 925 | #else // #ifdef CONFIG_TASKS_TRACE_RCU |
| 926 | |
| 927 | #define TASKS_TRACING_OPS |
| 928 | |
| 929 | #endif // #else #ifdef CONFIG_TASKS_TRACE_RCU |
| 930 | |
| 931 | |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 932 | static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) |
| 933 | { |
| 934 | if (!cur_ops->gp_diff) |
| 935 | return new - old; |
| 936 | return cur_ops->gp_diff(new, old); |
| 937 | } |
| 938 | |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 939 | /* |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 940 | * RCU torture priority-boost testing. Runs one real-time thread per |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 941 | * CPU for moderate bursts, repeatedly starting grace periods and waiting |
| 942 | * for them to complete. If a given grace period takes too long, we assume |
| 943 | * that priority inversion has occurred. |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 944 | */ |
| 945 | |
Joel Fernandes (Google) | 450efca | 2018-06-10 16:45:43 -0700 | [diff] [blame] | 946 | static int old_rt_runtime = -1; |
| 947 | |
| 948 | static void rcu_torture_disable_rt_throttle(void) |
| 949 | { |
| 950 | /* |
| 951 | * Disable RT throttling so that rcutorture's boost threads don't get |
| 952 | * throttled. Only possible if rcutorture is built-in otherwise the |
| 953 | * user should manually do this by setting the sched_rt_period_us and |
| 954 | * sched_rt_runtime sysctls. |
| 955 | */ |
| 956 | if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) |
| 957 | return; |
| 958 | |
| 959 | old_rt_runtime = sysctl_sched_rt_runtime; |
| 960 | sysctl_sched_rt_runtime = -1; |
| 961 | } |
| 962 | |
| 963 | static void rcu_torture_enable_rt_throttle(void) |
| 964 | { |
| 965 | if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) |
| 966 | return; |
| 967 | |
| 968 | sysctl_sched_rt_runtime = old_rt_runtime; |
| 969 | old_rt_runtime = -1; |
| 970 | } |
| 971 | |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 972 | static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start) |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 973 | { |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 974 | int cpu; |
Paul E. McKenney | 5e59fba | 2021-01-15 13:30:38 -0800 | [diff] [blame] | 975 | static int dbg_done; |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 976 | unsigned long end = jiffies; |
Paul E. McKenney | bcd4af4 | 2021-04-08 10:46:55 -0700 | [diff] [blame] | 977 | bool gp_done; |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 978 | unsigned long j; |
| 979 | static unsigned long last_persist; |
| 980 | unsigned long lp; |
| 981 | unsigned long mininterval = test_boost_duration * HZ - HZ / 2; |
Paul E. McKenney | 5e59fba | 2021-01-15 13:30:38 -0800 | [diff] [blame] | 982 | |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 983 | if (end - *start > mininterval) { |
Paul E. McKenney | 7b9dad7 | 2021-04-07 17:09:37 -0700 | [diff] [blame] | 984 | // Recheck after checking time to avoid false positives. |
| 985 | smp_mb(); // Time check before grace-period check. |
| 986 | if (cur_ops->poll_gp_state(gp_state)) |
| 987 | return false; // passed, though perhaps just barely |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 988 | if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) { |
| 989 | // At most one persisted message per boost test. |
| 990 | j = jiffies; |
| 991 | lp = READ_ONCE(last_persist); |
| 992 | if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp) |
| 993 | pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu); |
| 994 | return false; // passed on a technicality |
| 995 | } |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 996 | VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); |
| 997 | n_rcu_torture_boost_failure++; |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 998 | if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) { |
| 999 | pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n", |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 1000 | current->rt_priority, gp_state, end - *start); |
Paul E. McKenney | 5e59fba | 2021-01-15 13:30:38 -0800 | [diff] [blame] | 1001 | cur_ops->gp_kthread_dbg(); |
Paul E. McKenney | bcd4af4 | 2021-04-08 10:46:55 -0700 | [diff] [blame] | 1002 | // Recheck after print to flag grace period ending during splat. |
| 1003 | gp_done = cur_ops->poll_gp_state(gp_state); |
| 1004 | pr_info("Boost inversion: GP %lu %s.\n", gp_state, |
| 1005 | gp_done ? "ended already" : "still pending"); |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 1006 | |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 1007 | } |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 1008 | |
Paul E. McKenney | 7b9dad7 | 2021-04-07 17:09:37 -0700 | [diff] [blame] | 1009 | return true; // failed |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 1010 | } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) { |
| 1011 | *start = jiffies; |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 1012 | } |
| 1013 | |
Paul E. McKenney | 7b9dad7 | 2021-04-07 17:09:37 -0700 | [diff] [blame] | 1014 | return false; // passed |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 1015 | } |
| 1016 | |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1017 | static int rcu_torture_boost(void *arg) |
| 1018 | { |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1019 | unsigned long endtime; |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 1020 | unsigned long gp_state; |
| 1021 | unsigned long gp_state_time; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1022 | unsigned long oldstarttime; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1023 | |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 1024 | VERBOSE_TOROUT_STRING("rcu_torture_boost started"); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1025 | |
| 1026 | /* Set real-time priority. */ |
Peter Zijlstra | 8b70098 | 2020-04-22 13:10:04 +0200 | [diff] [blame] | 1027 | sched_set_fifo_low(current); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1028 | |
| 1029 | /* Each pass through the following loop does one boost-test cycle. */ |
| 1030 | do { |
Paul E. McKenney | 5e59fba | 2021-01-15 13:30:38 -0800 | [diff] [blame] | 1031 | bool failed = false; // Test failed already in this test interval |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 1032 | bool gp_initiated = false; |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 1033 | |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 1034 | if (kthread_should_stop()) |
| 1035 | goto checkwait; |
| 1036 | |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1037 | /* Wait for the next test interval. */ |
Paul E. McKenney | a47f9f1 | 2021-12-13 11:05:07 -0800 | [diff] [blame] | 1038 | oldstarttime = READ_ONCE(boost_starttime); |
Paul E. McKenney | 3c80b40 | 2020-04-10 15:37:12 -0700 | [diff] [blame] | 1039 | while (time_before(jiffies, oldstarttime)) { |
Paul E. McKenney | 0e11c8e | 2013-01-10 16:21:07 -0800 | [diff] [blame] | 1040 | schedule_timeout_interruptible(oldstarttime - jiffies); |
Paul E. McKenney | ab1b788 | 2020-09-22 16:42:42 -0700 | [diff] [blame] | 1041 | if (stutter_wait("rcu_torture_boost")) |
| 1042 | sched_set_fifo_low(current); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 1043 | if (torture_must_stop()) |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1044 | goto checkwait; |
| 1045 | } |
| 1046 | |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 1047 | // Do one boost-test interval. |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1048 | endtime = oldstarttime + test_boost_duration * HZ; |
Paul E. McKenney | 3c80b40 | 2020-04-10 15:37:12 -0700 | [diff] [blame] | 1049 | while (time_before(jiffies, endtime)) { |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 1050 | // Has current GP gone too long? |
| 1051 | if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 1052 | failed = rcu_torture_boost_failed(gp_state, &gp_state_time); |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 1053 | // If we don't have a grace period in flight, start one. |
| 1054 | if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) { |
| 1055 | gp_state = cur_ops->start_gp_poll(); |
| 1056 | gp_initiated = true; |
| 1057 | gp_state_time = jiffies; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1058 | } |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 1059 | if (stutter_wait("rcu_torture_boost")) { |
Paul E. McKenney | ab1b788 | 2020-09-22 16:42:42 -0700 | [diff] [blame] | 1060 | sched_set_fifo_low(current); |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 1061 | // If the grace period already ended, |
| 1062 | // we don't know when that happened, so |
| 1063 | // start over. |
| 1064 | if (cur_ops->poll_gp_state(gp_state)) |
| 1065 | gp_initiated = false; |
| 1066 | } |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 1067 | if (torture_must_stop()) |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1068 | goto checkwait; |
| 1069 | } |
| 1070 | |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 1071 | // In case the grace period extended beyond the end of the loop. |
| 1072 | if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 1073 | rcu_torture_boost_failed(gp_state, &gp_state_time); |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 1074 | |
| 1075 | /* |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1076 | * Set the start time of the next test interval. |
| 1077 | * Yes, this is vulnerable to long delays, but such |
| 1078 | * delays simply cause a false negative for the next |
| 1079 | * interval. Besides, we are running at RT priority, |
| 1080 | * so delays should be relatively rare. |
| 1081 | */ |
Paul E. McKenney | a47f9f1 | 2021-12-13 11:05:07 -0800 | [diff] [blame] | 1082 | while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) { |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1083 | if (mutex_trylock(&boost_mutex)) { |
Paul E. McKenney | 8c7ec02 | 2021-04-07 20:00:00 -0700 | [diff] [blame] | 1084 | if (oldstarttime == boost_starttime) { |
Paul E. McKenney | a47f9f1 | 2021-12-13 11:05:07 -0800 | [diff] [blame] | 1085 | WRITE_ONCE(boost_starttime, |
| 1086 | jiffies + test_boost_interval * HZ); |
Paul E. McKenney | 8c7ec02 | 2021-04-07 20:00:00 -0700 | [diff] [blame] | 1087 | n_rcu_torture_boosts++; |
| 1088 | } |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1089 | mutex_unlock(&boost_mutex); |
| 1090 | break; |
| 1091 | } |
| 1092 | schedule_timeout_uninterruptible(1); |
| 1093 | } |
| 1094 | |
| 1095 | /* Go do the stutter. */ |
Paul E. McKenney | ab1b788 | 2020-09-22 16:42:42 -0700 | [diff] [blame] | 1096 | checkwait: if (stutter_wait("rcu_torture_boost")) |
| 1097 | sched_set_fifo_low(current); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 1098 | } while (!torture_must_stop()); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1099 | |
| 1100 | /* Clean up and exit. */ |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 1101 | while (!kthread_should_stop()) { |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 1102 | torture_shutdown_absorb("rcu_torture_boost"); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1103 | schedule_timeout_uninterruptible(1); |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 1104 | } |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 1105 | torture_kthread_stopping("rcu_torture_boost"); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1106 | return 0; |
| 1107 | } |
| 1108 | |
| 1109 | /* |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 1110 | * RCU torture force-quiescent-state kthread. Repeatedly induces |
| 1111 | * bursts of calls to force_quiescent_state(), increasing the probability |
| 1112 | * of occurrence of some important types of race conditions. |
| 1113 | */ |
| 1114 | static int |
| 1115 | rcu_torture_fqs(void *arg) |
| 1116 | { |
| 1117 | unsigned long fqs_resume_time; |
| 1118 | int fqs_burst_remaining; |
Paul E. McKenney | ab1b788 | 2020-09-22 16:42:42 -0700 | [diff] [blame] | 1119 | int oldnice = task_nice(current); |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 1120 | |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 1121 | VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 1122 | do { |
| 1123 | fqs_resume_time = jiffies + fqs_stutter * HZ; |
Paul E. McKenney | 3c80b40 | 2020-04-10 15:37:12 -0700 | [diff] [blame] | 1124 | while (time_before(jiffies, fqs_resume_time) && |
Paul E. McKenney | 93898fb | 2011-08-17 12:39:34 -0700 | [diff] [blame] | 1125 | !kthread_should_stop()) { |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 1126 | schedule_timeout_interruptible(1); |
| 1127 | } |
| 1128 | fqs_burst_remaining = fqs_duration; |
Paul E. McKenney | 93898fb | 2011-08-17 12:39:34 -0700 | [diff] [blame] | 1129 | while (fqs_burst_remaining > 0 && |
| 1130 | !kthread_should_stop()) { |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 1131 | cur_ops->fqs(); |
| 1132 | udelay(fqs_holdoff); |
| 1133 | fqs_burst_remaining -= fqs_holdoff; |
| 1134 | } |
Paul E. McKenney | ab1b788 | 2020-09-22 16:42:42 -0700 | [diff] [blame] | 1135 | if (stutter_wait("rcu_torture_fqs")) |
| 1136 | sched_set_normal(current, oldnice); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 1137 | } while (!torture_must_stop()); |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 1138 | torture_kthread_stopping("rcu_torture_fqs"); |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 1139 | return 0; |
| 1140 | } |
| 1141 | |
Paul E. McKenney | 11d62f0 | 2022-02-01 07:01:20 -0800 | [diff] [blame] | 1142 | // Used by writers to randomly choose from the available grace-period primitives. |
| 1143 | static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { }; |
Paul E. McKenney | 18fbf30 | 2020-11-16 16:46:06 -0800 | [diff] [blame] | 1144 | static int nsynctypes; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1145 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1146 | /* |
Paul E. McKenney | 18fbf30 | 2020-11-16 16:46:06 -0800 | [diff] [blame] | 1147 | * Determine which grace-period primitives are available. |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 1148 | */ |
Paul E. McKenney | 18fbf30 | 2020-11-16 16:46:06 -0800 | [diff] [blame] | 1149 | static void rcu_torture_write_types(void) |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 1150 | { |
Paul E. McKenney | 11d62f0 | 2022-02-01 07:01:20 -0800 | [diff] [blame] | 1151 | bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_exp1 = gp_exp; |
| 1152 | bool gp_poll_exp1 = gp_poll_exp, gp_normal1 = gp_normal, gp_poll1 = gp_poll; |
| 1153 | bool gp_sync1 = gp_sync; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1154 | |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1155 | /* Initialize synctype[] array. If none set, take default. */ |
Paul E. McKenney | 11d62f0 | 2022-02-01 07:01:20 -0800 | [diff] [blame] | 1156 | if (!gp_cond1 && !gp_cond_exp1 && !gp_exp1 && !gp_poll_exp && |
| 1157 | !gp_normal1 && !gp_poll1 && !gp_sync1) |
| 1158 | gp_cond1 = gp_cond_exp1 = gp_exp1 = gp_poll_exp1 = |
| 1159 | gp_normal1 = gp_poll1 = gp_sync1 = true; |
Paul E. McKenney | fd56f64b | 2020-11-13 20:14:27 -0800 | [diff] [blame] | 1160 | if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) { |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1161 | synctype[nsynctypes++] = RTWS_COND_GET; |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1162 | pr_info("%s: Testing conditional GPs.\n", __func__); |
Paul E. McKenney | fd56f64b | 2020-11-13 20:14:27 -0800 | [diff] [blame] | 1163 | } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) { |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 1164 | pr_alert("%s: gp_cond without primitives.\n", __func__); |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1165 | } |
Paul E. McKenney | 11d62f0 | 2022-02-01 07:01:20 -0800 | [diff] [blame] | 1166 | if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) { |
| 1167 | synctype[nsynctypes++] = RTWS_COND_GET_EXP; |
| 1168 | pr_info("%s: Testing conditional expedited GPs.\n", __func__); |
| 1169 | } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) { |
| 1170 | pr_alert("%s: gp_cond_exp without primitives.\n", __func__); |
| 1171 | } |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1172 | if (gp_exp1 && cur_ops->exp_sync) { |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1173 | synctype[nsynctypes++] = RTWS_EXP_SYNC; |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1174 | pr_info("%s: Testing expedited GPs.\n", __func__); |
| 1175 | } else if (gp_exp && !cur_ops->exp_sync) { |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 1176 | pr_alert("%s: gp_exp without primitives.\n", __func__); |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1177 | } |
| 1178 | if (gp_normal1 && cur_ops->deferred_free) { |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1179 | synctype[nsynctypes++] = RTWS_DEF_FREE; |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1180 | pr_info("%s: Testing asynchronous GPs.\n", __func__); |
| 1181 | } else if (gp_normal && !cur_ops->deferred_free) { |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 1182 | pr_alert("%s: gp_normal without primitives.\n", __func__); |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1183 | } |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 1184 | if (gp_poll1 && cur_ops->start_gp_poll && cur_ops->poll_gp_state) { |
| 1185 | synctype[nsynctypes++] = RTWS_POLL_GET; |
| 1186 | pr_info("%s: Testing polling GPs.\n", __func__); |
| 1187 | } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) { |
| 1188 | pr_alert("%s: gp_poll without primitives.\n", __func__); |
| 1189 | } |
Paul E. McKenney | 11d62f0 | 2022-02-01 07:01:20 -0800 | [diff] [blame] | 1190 | if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) { |
| 1191 | synctype[nsynctypes++] = RTWS_POLL_GET_EXP; |
| 1192 | pr_info("%s: Testing polling expedited GPs.\n", __func__); |
| 1193 | } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) { |
| 1194 | pr_alert("%s: gp_poll_exp without primitives.\n", __func__); |
| 1195 | } |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1196 | if (gp_sync1 && cur_ops->sync) { |
Paul E. McKenney | f0bf8fa | 2014-03-21 16:17:56 -0700 | [diff] [blame] | 1197 | synctype[nsynctypes++] = RTWS_SYNC; |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1198 | pr_info("%s: Testing normal GPs.\n", __func__); |
| 1199 | } else if (gp_sync && !cur_ops->sync) { |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 1200 | pr_alert("%s: gp_sync without primitives.\n", __func__); |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1201 | } |
Paul E. McKenney | 18fbf30 | 2020-11-16 16:46:06 -0800 | [diff] [blame] | 1202 | } |
| 1203 | |
| 1204 | /* |
| 1205 | * RCU torture writer kthread. Repeatedly substitutes a new structure |
| 1206 | * for that pointed to by rcu_torture_current, freeing the old structure |
| 1207 | * after a series of grace periods (the "pipeline"). |
| 1208 | */ |
| 1209 | static int |
| 1210 | rcu_torture_writer(void *arg) |
| 1211 | { |
| 1212 | bool boot_ended; |
| 1213 | bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); |
| 1214 | unsigned long cookie; |
| 1215 | int expediting = 0; |
| 1216 | unsigned long gp_snap; |
| 1217 | int i; |
| 1218 | int idx; |
| 1219 | int oldnice = task_nice(current); |
| 1220 | struct rcu_torture *rp; |
| 1221 | struct rcu_torture *old_rp; |
| 1222 | static DEFINE_TORTURE_RANDOM(rand); |
| 1223 | bool stutter_waited; |
| 1224 | |
| 1225 | VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); |
| 1226 | if (!can_expedite) |
| 1227 | pr_alert("%s" TORTURE_FLAG |
| 1228 | " GP expediting controlled from boot/sysfs for %s.\n", |
| 1229 | torture_type, cur_ops->name); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1230 | if (WARN_ONCE(nsynctypes == 0, |
David Vernet | 80dcee6 | 2022-03-07 14:46:57 -0800 | [diff] [blame] | 1231 | "%s: No update-side primitives.\n", __func__)) { |
Paul E. McKenney | f0bf8fa | 2014-03-21 16:17:56 -0700 | [diff] [blame] | 1232 | /* |
| 1233 | * No updates primitives, so don't try updating. |
| 1234 | * The resulting test won't be testing much, hence the |
| 1235 | * above WARN_ONCE(). |
| 1236 | */ |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1237 | rcu_torture_writer_state = RTWS_STOPPING; |
| 1238 | torture_kthread_stopping("rcu_torture_writer"); |
David Vernet | 80dcee6 | 2022-03-07 14:46:57 -0800 | [diff] [blame] | 1239 | return 0; |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1240 | } |
| 1241 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1242 | do { |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1243 | rcu_torture_writer_state = RTWS_FIXED_DELAY; |
Paul E. McKenney | 1eba0ef | 2020-11-17 14:12:24 -0800 | [diff] [blame] | 1244 | torture_hrtimeout_us(500, 1000, &rand); |
Paul E. McKenney | a71fca5 | 2009-09-18 10:28:19 -0700 | [diff] [blame] | 1245 | rp = rcu_torture_alloc(); |
| 1246 | if (rp == NULL) |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1247 | continue; |
| 1248 | rp->rtort_pipe_count = 0; |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1249 | rcu_torture_writer_state = RTWS_DELAY; |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 1250 | udelay(torture_random(&rand) & 0x3ff); |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1251 | rcu_torture_writer_state = RTWS_REPLACE; |
Paul E. McKenney | 0ddea0e | 2010-09-19 21:06:14 -0700 | [diff] [blame] | 1252 | old_rp = rcu_dereference_check(rcu_torture_current, |
| 1253 | current == writer_task); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1254 | rp->rtort_mbtest = 1; |
| 1255 | rcu_assign_pointer(rcu_torture_current, rp); |
Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 1256 | smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ |
Josh Triplett | c8e5b16 | 2007-05-08 00:33:20 -0700 | [diff] [blame] | 1257 | if (old_rp) { |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1258 | i = old_rp->rtort_pipe_count; |
| 1259 | if (i > RCU_TORTURE_PIPE_LEN) |
| 1260 | i = RCU_TORTURE_PIPE_LEN; |
| 1261 | atomic_inc(&rcu_torture_wcount[i]); |
Paul E. McKenney | 2024891 | 2019-12-21 10:41:48 -0800 | [diff] [blame] | 1262 | WRITE_ONCE(old_rp->rtort_pipe_count, |
| 1263 | old_rp->rtort_pipe_count + 1); |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 1264 | if (cur_ops->get_gp_state && cur_ops->poll_gp_state) { |
| 1265 | idx = cur_ops->readlock(); |
| 1266 | cookie = cur_ops->get_gp_state(); |
| 1267 | WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE && |
| 1268 | cur_ops->poll_gp_state(cookie), |
| 1269 | "%s: Cookie check 1 failed %s(%d) %lu->%lu\n", |
| 1270 | __func__, |
| 1271 | rcu_torture_writer_state_getname(), |
| 1272 | rcu_torture_writer_state, |
| 1273 | cookie, cur_ops->get_gp_state()); |
Paul E. McKenney | d0eac20 | 2022-04-13 16:14:02 -0700 | [diff] [blame] | 1274 | if (cur_ops->get_gp_completed) { |
| 1275 | cookie = cur_ops->get_gp_completed(); |
| 1276 | WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie)); |
| 1277 | } |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 1278 | cur_ops->readunlock(idx); |
| 1279 | } |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1280 | switch (synctype[torture_random(&rand) % nsynctypes]) { |
| 1281 | case RTWS_DEF_FREE: |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1282 | rcu_torture_writer_state = RTWS_DEF_FREE; |
Paul E. McKenney | 2ec1f2d9 | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 1283 | cur_ops->deferred_free(old_rp); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1284 | break; |
| 1285 | case RTWS_EXP_SYNC: |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1286 | rcu_torture_writer_state = RTWS_EXP_SYNC; |
Paul E. McKenney | e4333cb | 2022-04-14 09:09:11 -0700 | [diff] [blame] | 1287 | if (cur_ops->get_gp_state && cur_ops->poll_gp_state) |
| 1288 | cookie = cur_ops->get_gp_state(); |
Paul E. McKenney | 2ec1f2d9 | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 1289 | cur_ops->exp_sync(); |
Paul E. McKenney | e4333cb | 2022-04-14 09:09:11 -0700 | [diff] [blame] | 1290 | cur_ops->exp_sync(); |
| 1291 | if (cur_ops->get_gp_state && cur_ops->poll_gp_state) |
| 1292 | WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie)); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1293 | rcu_torture_pipe_update(old_rp); |
| 1294 | break; |
| 1295 | case RTWS_COND_GET: |
| 1296 | rcu_torture_writer_state = RTWS_COND_GET; |
Paul E. McKenney | fd56f64b | 2020-11-13 20:14:27 -0800 | [diff] [blame] | 1297 | gp_snap = cur_ops->get_gp_state(); |
Paul E. McKenney | ea31fd9 | 2020-11-17 11:32:54 -0800 | [diff] [blame] | 1298 | torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1299 | rcu_torture_writer_state = RTWS_COND_SYNC; |
| 1300 | cur_ops->cond_sync(gp_snap); |
| 1301 | rcu_torture_pipe_update(old_rp); |
| 1302 | break; |
Paul E. McKenney | 11d62f0 | 2022-02-01 07:01:20 -0800 | [diff] [blame] | 1303 | case RTWS_COND_GET_EXP: |
| 1304 | rcu_torture_writer_state = RTWS_COND_GET_EXP; |
| 1305 | gp_snap = cur_ops->get_gp_state_exp(); |
| 1306 | torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); |
| 1307 | rcu_torture_writer_state = RTWS_COND_SYNC_EXP; |
| 1308 | cur_ops->cond_sync_exp(gp_snap); |
| 1309 | rcu_torture_pipe_update(old_rp); |
| 1310 | break; |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 1311 | case RTWS_POLL_GET: |
| 1312 | rcu_torture_writer_state = RTWS_POLL_GET; |
| 1313 | gp_snap = cur_ops->start_gp_poll(); |
| 1314 | rcu_torture_writer_state = RTWS_POLL_WAIT; |
Paul E. McKenney | ea31fd9 | 2020-11-17 11:32:54 -0800 | [diff] [blame] | 1315 | while (!cur_ops->poll_gp_state(gp_snap)) |
| 1316 | torture_hrtimeout_jiffies(torture_random(&rand) % 16, |
| 1317 | &rand); |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 1318 | rcu_torture_pipe_update(old_rp); |
| 1319 | break; |
Paul E. McKenney | 11d62f0 | 2022-02-01 07:01:20 -0800 | [diff] [blame] | 1320 | case RTWS_POLL_GET_EXP: |
| 1321 | rcu_torture_writer_state = RTWS_POLL_GET_EXP; |
| 1322 | gp_snap = cur_ops->start_gp_poll_exp(); |
| 1323 | rcu_torture_writer_state = RTWS_POLL_WAIT_EXP; |
| 1324 | while (!cur_ops->poll_gp_state_exp(gp_snap)) |
| 1325 | torture_hrtimeout_jiffies(torture_random(&rand) % 16, |
| 1326 | &rand); |
| 1327 | rcu_torture_pipe_update(old_rp); |
| 1328 | break; |
Paul E. McKenney | f0bf8fa | 2014-03-21 16:17:56 -0700 | [diff] [blame] | 1329 | case RTWS_SYNC: |
| 1330 | rcu_torture_writer_state = RTWS_SYNC; |
Paul E. McKenney | e4333cb | 2022-04-14 09:09:11 -0700 | [diff] [blame] | 1331 | if (cur_ops->get_gp_state && cur_ops->poll_gp_state) |
| 1332 | cookie = cur_ops->get_gp_state(); |
Paul E. McKenney | f0bf8fa | 2014-03-21 16:17:56 -0700 | [diff] [blame] | 1333 | cur_ops->sync(); |
Paul E. McKenney | e4333cb | 2022-04-14 09:09:11 -0700 | [diff] [blame] | 1334 | cur_ops->sync(); |
| 1335 | if (cur_ops->get_gp_state && cur_ops->poll_gp_state) |
| 1336 | WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie)); |
Paul E. McKenney | f0bf8fa | 2014-03-21 16:17:56 -0700 | [diff] [blame] | 1337 | rcu_torture_pipe_update(old_rp); |
| 1338 | break; |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1339 | default: |
| 1340 | WARN_ON_ONCE(1); |
| 1341 | break; |
Paul E. McKenney | 2ec1f2d9 | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 1342 | } |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1343 | } |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 1344 | WRITE_ONCE(rcu_torture_current_version, |
| 1345 | rcu_torture_current_version + 1); |
Paul E. McKenney | 4bb3c5f | 2015-02-18 16:31:29 -0800 | [diff] [blame] | 1346 | /* Cycle through nesting levels of rcu_expedite_gp() calls. */ |
| 1347 | if (can_expedite && |
| 1348 | !(torture_random(&rand) & 0xff & (!!expediting - 1))) { |
| 1349 | WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); |
| 1350 | if (expediting >= 0) |
| 1351 | rcu_expedite_gp(); |
| 1352 | else |
| 1353 | rcu_unexpedite_gp(); |
| 1354 | if (++expediting > 3) |
| 1355 | expediting = -expediting; |
Paul E. McKenney | f7c0e6a | 2017-12-08 11:37:24 -0800 | [diff] [blame] | 1356 | } else if (!can_expedite) { /* Disabled during boot, recheck. */ |
| 1357 | can_expedite = !rcu_gp_is_expedited() && |
| 1358 | !rcu_gp_is_normal(); |
Paul E. McKenney | 4bb3c5f | 2015-02-18 16:31:29 -0800 | [diff] [blame] | 1359 | } |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1360 | rcu_torture_writer_state = RTWS_STUTTER; |
Paul E. McKenney | 12a910e | 2020-11-16 16:01:50 -0800 | [diff] [blame] | 1361 | boot_ended = rcu_inkernel_boot_has_ended(); |
Paul E. McKenney | ab1b788 | 2020-09-22 16:42:42 -0700 | [diff] [blame] | 1362 | stutter_waited = stutter_wait("rcu_torture_writer"); |
| 1363 | if (stutter_waited && |
Paul E. McKenney | e22ef8d | 2021-12-17 12:33:53 -0800 | [diff] [blame] | 1364 | !atomic_read(&rcu_fwd_cb_nodelay) && |
Paul E. McKenney | 3432d76 | 2019-04-15 14:50:05 -0700 | [diff] [blame] | 1365 | !cur_ops->slow_gps && |
Paul E. McKenney | 59ee032 | 2019-11-28 18:54:06 -0800 | [diff] [blame] | 1366 | !torture_must_stop() && |
Paul E. McKenney | 12a910e | 2020-11-16 16:01:50 -0800 | [diff] [blame] | 1367 | boot_ended) |
Paul E. McKenney | 474e59b | 2018-08-07 14:34:44 -0700 | [diff] [blame] | 1368 | for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) |
Paul E. McKenney | e8516c6 | 2019-04-09 11:06:32 -0700 | [diff] [blame] | 1369 | if (list_empty(&rcu_tortures[i].rtort_free) && |
| 1370 | rcu_access_pointer(rcu_torture_current) != |
Paul E. McKenney | 34aa34b | 2019-05-16 16:15:16 -0700 | [diff] [blame] | 1371 | &rcu_tortures[i]) { |
Anna-Maria Behnsen | 14c0017 | 2022-04-11 17:19:03 +0200 | [diff] [blame] | 1372 | tracing_off(); |
Paul E. McKenney | e8516c6 | 2019-04-09 11:06:32 -0700 | [diff] [blame] | 1373 | WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); |
Anna-Maria Behnsen | 14c0017 | 2022-04-11 17:19:03 +0200 | [diff] [blame] | 1374 | rcu_ftrace_dump(DUMP_ALL); |
Paul E. McKenney | 34aa34b | 2019-05-16 16:15:16 -0700 | [diff] [blame] | 1375 | } |
Paul E. McKenney | ab1b788 | 2020-09-22 16:42:42 -0700 | [diff] [blame] | 1376 | if (stutter_waited) |
| 1377 | sched_set_normal(current, oldnice); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 1378 | } while (!torture_must_stop()); |
Paul E. McKenney | cae7cc6 | 2020-04-26 19:20:37 -0700 | [diff] [blame] | 1379 | rcu_torture_current = NULL; // Let stats task know that we are done. |
Paul E. McKenney | 4bb3c5f | 2015-02-18 16:31:29 -0800 | [diff] [blame] | 1380 | /* Reset expediting back to unexpedited. */ |
| 1381 | if (expediting > 0) |
| 1382 | expediting = -expediting; |
| 1383 | while (can_expedite && expediting++ < 0) |
| 1384 | rcu_unexpedite_gp(); |
| 1385 | WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); |
Paul E. McKenney | f7c0e6a | 2017-12-08 11:37:24 -0800 | [diff] [blame] | 1386 | if (!can_expedite) |
| 1387 | pr_alert("%s" TORTURE_FLAG |
| 1388 | " Dynamic grace-period expediting was disabled.\n", |
| 1389 | torture_type); |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1390 | rcu_torture_writer_state = RTWS_STOPPING; |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 1391 | torture_kthread_stopping("rcu_torture_writer"); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1392 | return 0; |
| 1393 | } |
| 1394 | |
| 1395 | /* |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 1396 | * RCU torture fake writer kthread. Repeatedly calls sync, with a random |
| 1397 | * delay between calls. |
| 1398 | */ |
| 1399 | static int |
| 1400 | rcu_torture_fakewriter(void *arg) |
| 1401 | { |
Paul E. McKenney | 682189a | 2020-11-16 17:10:39 -0800 | [diff] [blame] | 1402 | unsigned long gp_snap; |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 1403 | DEFINE_TORTURE_RANDOM(rand); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 1404 | |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 1405 | VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); |
Linus Torvalds | 971eae7 | 2014-03-31 11:21:19 -0700 | [diff] [blame] | 1406 | set_user_nice(current, MAX_NICE); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 1407 | |
David Vernet | 39b3cab | 2022-03-07 14:46:55 -0800 | [diff] [blame] | 1408 | if (WARN_ONCE(nsynctypes == 0, |
| 1409 | "%s: No update-side primitives.\n", __func__)) { |
| 1410 | /* |
| 1411 | * No updates primitives, so don't try updating. |
| 1412 | * The resulting test won't be testing much, hence the |
| 1413 | * above WARN_ONCE(). |
| 1414 | */ |
| 1415 | torture_kthread_stopping("rcu_torture_fakewriter"); |
| 1416 | return 0; |
| 1417 | } |
| 1418 | |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 1419 | do { |
Paul E. McKenney | 1eba0ef | 2020-11-17 14:12:24 -0800 | [diff] [blame] | 1420 | torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand); |
Paul E. McKenney | 72472a0 | 2012-05-29 17:50:51 -0700 | [diff] [blame] | 1421 | if (cur_ops->cb_barrier != NULL && |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 1422 | torture_random(&rand) % (nfakewriters * 8) == 0) { |
Paul E. McKenney | 72472a0 | 2012-05-29 17:50:51 -0700 | [diff] [blame] | 1423 | cur_ops->cb_barrier(); |
Paul E. McKenney | 682189a | 2020-11-16 17:10:39 -0800 | [diff] [blame] | 1424 | } else { |
| 1425 | switch (synctype[torture_random(&rand) % nsynctypes]) { |
| 1426 | case RTWS_DEF_FREE: |
| 1427 | break; |
| 1428 | case RTWS_EXP_SYNC: |
Paul E. McKenney | 2ec1f2d9 | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 1429 | cur_ops->exp_sync(); |
Paul E. McKenney | 682189a | 2020-11-16 17:10:39 -0800 | [diff] [blame] | 1430 | break; |
| 1431 | case RTWS_COND_GET: |
| 1432 | gp_snap = cur_ops->get_gp_state(); |
Paul E. McKenney | ea31fd9 | 2020-11-17 11:32:54 -0800 | [diff] [blame] | 1433 | torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); |
Paul E. McKenney | 682189a | 2020-11-16 17:10:39 -0800 | [diff] [blame] | 1434 | cur_ops->cond_sync(gp_snap); |
| 1435 | break; |
Paul E. McKenney | 11d62f0 | 2022-02-01 07:01:20 -0800 | [diff] [blame] | 1436 | case RTWS_COND_GET_EXP: |
| 1437 | gp_snap = cur_ops->get_gp_state_exp(); |
| 1438 | torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); |
| 1439 | cur_ops->cond_sync_exp(gp_snap); |
| 1440 | break; |
Paul E. McKenney | 682189a | 2020-11-16 17:10:39 -0800 | [diff] [blame] | 1441 | case RTWS_POLL_GET: |
| 1442 | gp_snap = cur_ops->start_gp_poll(); |
| 1443 | while (!cur_ops->poll_gp_state(gp_snap)) { |
Paul E. McKenney | ea31fd9 | 2020-11-17 11:32:54 -0800 | [diff] [blame] | 1444 | torture_hrtimeout_jiffies(torture_random(&rand) % 16, |
| 1445 | &rand); |
Paul E. McKenney | 682189a | 2020-11-16 17:10:39 -0800 | [diff] [blame] | 1446 | } |
| 1447 | break; |
Paul E. McKenney | 11d62f0 | 2022-02-01 07:01:20 -0800 | [diff] [blame] | 1448 | case RTWS_POLL_GET_EXP: |
| 1449 | gp_snap = cur_ops->start_gp_poll_exp(); |
| 1450 | while (!cur_ops->poll_gp_state_exp(gp_snap)) { |
| 1451 | torture_hrtimeout_jiffies(torture_random(&rand) % 16, |
| 1452 | &rand); |
| 1453 | } |
| 1454 | break; |
Paul E. McKenney | 682189a | 2020-11-16 17:10:39 -0800 | [diff] [blame] | 1455 | case RTWS_SYNC: |
| 1456 | cur_ops->sync(); |
| 1457 | break; |
| 1458 | default: |
| 1459 | WARN_ON_ONCE(1); |
| 1460 | break; |
| 1461 | } |
Paul E. McKenney | 2ec1f2d9 | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 1462 | } |
Paul E. McKenney | 628edaa | 2014-01-31 11:57:43 -0800 | [diff] [blame] | 1463 | stutter_wait("rcu_torture_fakewriter"); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 1464 | } while (!torture_must_stop()); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 1465 | |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 1466 | torture_kthread_stopping("rcu_torture_fakewriter"); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 1467 | return 0; |
| 1468 | } |
| 1469 | |
Paul E. McKenney | f34c8585 | 2017-07-20 15:27:32 -0700 | [diff] [blame] | 1470 | static void rcu_torture_timer_cb(struct rcu_head *rhp) |
| 1471 | { |
| 1472 | kfree(rhp); |
| 1473 | } |
| 1474 | |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 1475 | // Set up and carry out testing of RCU's global memory ordering |
| 1476 | static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp, |
| 1477 | struct torture_random_state *trsp) |
| 1478 | { |
| 1479 | unsigned long loops; |
Paul E. McKenney | 1afb95f | 2020-12-19 07:34:35 -0800 | [diff] [blame] | 1480 | int noc = torture_num_online_cpus(); |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 1481 | int rdrchked; |
| 1482 | int rdrchker; |
| 1483 | struct rcu_torture_reader_check *rtrcp; // Me. |
| 1484 | struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking. |
| 1485 | struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked. |
| 1486 | struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me. |
| 1487 | |
| 1488 | if (myid < 0) |
| 1489 | return; // Don't try this from timer handlers. |
| 1490 | |
| 1491 | // Increment my counter. |
| 1492 | rtrcp = &rcu_torture_reader_mbchk[myid]; |
| 1493 | WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1); |
| 1494 | |
| 1495 | // Attempt to assign someone else some checking work. |
| 1496 | rdrchked = torture_random(trsp) % nrealreaders; |
| 1497 | rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; |
| 1498 | rdrchker = torture_random(trsp) % nrealreaders; |
| 1499 | rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker]; |
| 1500 | if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker && |
| 1501 | smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below. |
| 1502 | !READ_ONCE(rtp->rtort_chkp) && |
| 1503 | !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below. |
| 1504 | rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops); |
| 1505 | WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0); |
| 1506 | rtrcp->rtc_chkrdr = rdrchked; |
| 1507 | WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends. |
| 1508 | if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) || |
| 1509 | cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp)) |
| 1510 | (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out. |
| 1511 | } |
| 1512 | |
| 1513 | // If assigned some completed work, do it! |
| 1514 | rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner); |
| 1515 | if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready)) |
| 1516 | return; // No work or work not yet ready. |
| 1517 | rdrchked = rtrcp_assigner->rtc_chkrdr; |
| 1518 | if (WARN_ON_ONCE(rdrchked < 0)) |
| 1519 | return; |
| 1520 | rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; |
| 1521 | loops = READ_ONCE(rtrcp_chked->rtc_myloops); |
| 1522 | atomic_inc(&n_rcu_torture_mbchk_tries); |
| 1523 | if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops)) |
| 1524 | atomic_inc(&n_rcu_torture_mbchk_fail); |
| 1525 | rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2; |
| 1526 | rtrcp_assigner->rtc_ready = 0; |
| 1527 | smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work. |
| 1528 | smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign. |
| 1529 | } |
| 1530 | |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 1531 | /* |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1532 | * Do one extension of an RCU read-side critical section using the |
| 1533 | * current reader state in readstate (set to zero for initial entry |
| 1534 | * to extended critical section), set the new state as specified by |
| 1535 | * newstate (set to zero for final exit from extended critical section), |
| 1536 | * and random-number-generator state in trsp. If this is neither the |
| 1537 | * beginning or end of the critical section and if there was actually a |
| 1538 | * change, do a ->read_delay(). |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1539 | */ |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1540 | static void rcutorture_one_extend(int *readstate, int newstate, |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1541 | struct torture_random_state *trsp, |
| 1542 | struct rt_read_seg *rtrsp) |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1543 | { |
Paul E. McKenney | 52b1fc3 | 2020-03-28 18:53:25 -0700 | [diff] [blame] | 1544 | unsigned long flags; |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 1545 | int idxnew1 = -1; |
| 1546 | int idxnew2 = -1; |
| 1547 | int idxold1 = *readstate; |
| 1548 | int idxold2 = idxold1; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1549 | int statesnew = ~*readstate & newstate; |
| 1550 | int statesold = *readstate & ~newstate; |
| 1551 | |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 1552 | WARN_ON_ONCE(idxold2 < 0); |
| 1553 | WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1554 | rtrsp->rt_readstate = newstate; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1555 | |
| 1556 | /* First, put new protection in place to avoid critical-section gap. */ |
| 1557 | if (statesnew & RCUTORTURE_RDR_BH) |
| 1558 | local_bh_disable(); |
Scott Wood | 71921a9 | 2021-08-20 09:42:36 +0200 | [diff] [blame] | 1559 | if (statesnew & RCUTORTURE_RDR_RBH) |
| 1560 | rcu_read_lock_bh(); |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1561 | if (statesnew & RCUTORTURE_RDR_IRQ) |
| 1562 | local_irq_disable(); |
| 1563 | if (statesnew & RCUTORTURE_RDR_PREEMPT) |
| 1564 | preempt_disable(); |
Paul E. McKenney | 2ceebc0 | 2018-07-06 15:16:12 -0700 | [diff] [blame] | 1565 | if (statesnew & RCUTORTURE_RDR_SCHED) |
| 1566 | rcu_read_lock_sched(); |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 1567 | if (statesnew & RCUTORTURE_RDR_RCU_1) |
| 1568 | idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1; |
| 1569 | if (statesnew & RCUTORTURE_RDR_RCU_2) |
| 1570 | idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1571 | |
Scott Wood | 71921a9 | 2021-08-20 09:42:36 +0200 | [diff] [blame] | 1572 | /* |
| 1573 | * Next, remove old protection, in decreasing order of strength |
| 1574 | * to avoid unlock paths that aren't safe in the stronger |
| 1575 | * context. Namely: BH can not be enabled with disabled interrupts. |
| 1576 | * Additionally PREEMPT_RT requires that BH is enabled in preemptible |
| 1577 | * context. |
| 1578 | */ |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1579 | if (statesold & RCUTORTURE_RDR_IRQ) |
| 1580 | local_irq_enable(); |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1581 | if (statesold & RCUTORTURE_RDR_PREEMPT) |
| 1582 | preempt_enable(); |
Paul E. McKenney | 2ceebc0 | 2018-07-06 15:16:12 -0700 | [diff] [blame] | 1583 | if (statesold & RCUTORTURE_RDR_SCHED) |
| 1584 | rcu_read_unlock_sched(); |
Scott Wood | 71921a9 | 2021-08-20 09:42:36 +0200 | [diff] [blame] | 1585 | if (statesold & RCUTORTURE_RDR_BH) |
| 1586 | local_bh_enable(); |
| 1587 | if (statesold & RCUTORTURE_RDR_RBH) |
| 1588 | rcu_read_unlock_bh(); |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 1589 | if (statesold & RCUTORTURE_RDR_RCU_2) { |
| 1590 | cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1); |
| 1591 | WARN_ON_ONCE(idxnew2 != -1); |
| 1592 | idxold2 = 0; |
| 1593 | } |
| 1594 | if (statesold & RCUTORTURE_RDR_RCU_1) { |
Paul E. McKenney | 340170f | 2021-09-24 21:30:26 -0700 | [diff] [blame] | 1595 | bool lockit; |
Paul E. McKenney | 52b1fc3 | 2020-03-28 18:53:25 -0700 | [diff] [blame] | 1596 | |
Paul E. McKenney | 340170f | 2021-09-24 21:30:26 -0700 | [diff] [blame] | 1597 | lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff); |
Paul E. McKenney | 52b1fc3 | 2020-03-28 18:53:25 -0700 | [diff] [blame] | 1598 | if (lockit) |
| 1599 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 1600 | cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1); |
| 1601 | WARN_ON_ONCE(idxnew1 != -1); |
| 1602 | idxold1 = 0; |
Paul E. McKenney | 52b1fc3 | 2020-03-28 18:53:25 -0700 | [diff] [blame] | 1603 | if (lockit) |
| 1604 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
| 1605 | } |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1606 | |
| 1607 | /* Delay if neither beginning nor end and there was a change. */ |
| 1608 | if ((statesnew || statesold) && *readstate && newstate) |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1609 | cur_ops->read_delay(trsp, rtrsp); |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1610 | |
| 1611 | /* Update the reader state. */ |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 1612 | if (idxnew1 == -1) |
| 1613 | idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1; |
| 1614 | WARN_ON_ONCE(idxnew1 < 0); |
| 1615 | if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1)) |
| 1616 | pr_info("Unexpected idxnew1 value of %#x\n", idxnew1); |
| 1617 | if (idxnew2 == -1) |
| 1618 | idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2; |
| 1619 | WARN_ON_ONCE(idxnew2 < 0); |
| 1620 | WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1); |
| 1621 | *readstate = idxnew1 | idxnew2 | newstate; |
| 1622 | WARN_ON_ONCE(*readstate < 0); |
| 1623 | if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1)) |
| 1624 | pr_info("Unexpected idxnew2 value of %#x\n", idxnew2); |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1625 | } |
| 1626 | |
| 1627 | /* Return the biggest extendables mask given current RCU and boot parameters. */ |
| 1628 | static int rcutorture_extend_mask_max(void) |
| 1629 | { |
| 1630 | int mask; |
| 1631 | |
| 1632 | WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); |
| 1633 | mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 1634 | mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1635 | return mask; |
| 1636 | } |
| 1637 | |
| 1638 | /* Return a random protection state mask, but with at least one bit set. */ |
| 1639 | static int |
| 1640 | rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) |
| 1641 | { |
| 1642 | int mask = rcutorture_extend_mask_max(); |
Paul E. McKenney | bf1bef5 | 2018-06-10 08:50:09 -0700 | [diff] [blame] | 1643 | unsigned long randmask1 = torture_random(trsp) >> 8; |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1644 | unsigned long randmask2 = randmask1 >> 3; |
Scott Wood | 71921a9 | 2021-08-20 09:42:36 +0200 | [diff] [blame] | 1645 | unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; |
| 1646 | unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ; |
| 1647 | unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1648 | |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 1649 | WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1); |
Paul E. McKenney | a3b0e1e5 | 2019-02-28 15:06:13 -0800 | [diff] [blame] | 1650 | /* Mostly only one bit (need preemption!), sometimes lots of bits. */ |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1651 | if (!(randmask1 & 0x7)) |
Paul E. McKenney | bf1bef5 | 2018-06-10 08:50:09 -0700 | [diff] [blame] | 1652 | mask = mask & randmask2; |
| 1653 | else |
| 1654 | mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); |
Scott Wood | 71921a9 | 2021-08-20 09:42:36 +0200 | [diff] [blame] | 1655 | |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 1656 | // Can't have nested RCU reader without outer RCU reader. |
| 1657 | if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) { |
| 1658 | if (oldmask & RCUTORTURE_RDR_RCU_1) |
| 1659 | mask &= ~RCUTORTURE_RDR_RCU_2; |
| 1660 | else |
| 1661 | mask |= RCUTORTURE_RDR_RCU_1; |
| 1662 | } |
| 1663 | |
Scott Wood | 71921a9 | 2021-08-20 09:42:36 +0200 | [diff] [blame] | 1664 | /* |
| 1665 | * Can't enable bh w/irq disabled. |
| 1666 | */ |
| 1667 | if (mask & RCUTORTURE_RDR_IRQ) |
| 1668 | mask |= oldmask & bhs; |
| 1669 | |
| 1670 | /* |
| 1671 | * Ideally these sequences would be detected in debug builds |
| 1672 | * (regardless of RT), but until then don't stop testing |
| 1673 | * them on non-RT. |
| 1674 | */ |
| 1675 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) { |
| 1676 | /* Can't modify BH in atomic context */ |
| 1677 | if (oldmask & preempts_irq) |
| 1678 | mask &= ~bhs; |
| 1679 | if ((oldmask | mask) & preempts_irq) |
| 1680 | mask |= oldmask & bhs; |
| 1681 | } |
| 1682 | |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 1683 | return mask ?: RCUTORTURE_RDR_RCU_1; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1684 | } |
| 1685 | |
| 1686 | /* |
| 1687 | * Do a randomly selected number of extensions of an existing RCU read-side |
| 1688 | * critical section. |
| 1689 | */ |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1690 | static struct rt_read_seg * |
| 1691 | rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, |
| 1692 | struct rt_read_seg *rtrsp) |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1693 | { |
| 1694 | int i; |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1695 | int j; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1696 | int mask = rcutorture_extend_mask_max(); |
| 1697 | |
| 1698 | WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ |
| 1699 | if (!((mask - 1) & mask)) |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1700 | return rtrsp; /* Current RCU reader not extendable. */ |
| 1701 | /* Bias towards larger numbers of loops. */ |
| 1702 | i = (torture_random(trsp) >> 3); |
| 1703 | i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; |
| 1704 | for (j = 0; j < i; j++) { |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1705 | mask = rcutorture_extend_mask(*readstate, trsp); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1706 | rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1707 | } |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1708 | return &rtrsp[j]; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1709 | } |
| 1710 | |
| 1711 | /* |
Paul E. McKenney | 6b06aa7 | 2018-05-22 10:56:05 -0700 | [diff] [blame] | 1712 | * Do one read-side critical section, returning false if there was |
| 1713 | * no data to read. Can be invoked both from process context and |
| 1714 | * from a timer handler. |
| 1715 | */ |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 1716 | static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) |
Paul E. McKenney | 6b06aa7 | 2018-05-22 10:56:05 -0700 | [diff] [blame] | 1717 | { |
Paul E. McKenney | bc480a6 | 2020-11-15 12:45:57 -0800 | [diff] [blame] | 1718 | unsigned long cookie; |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1719 | int i; |
Paul E. McKenney | 917963d | 2014-11-21 17:10:16 -0800 | [diff] [blame] | 1720 | unsigned long started; |
Paul E. McKenney | 6b80da4 | 2014-11-21 14:19:26 -0800 | [diff] [blame] | 1721 | unsigned long completed; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1722 | int newstate; |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1723 | struct rcu_torture *p; |
| 1724 | int pipe_count; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1725 | int readstate = 0; |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1726 | struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; |
| 1727 | struct rt_read_seg *rtrsp = &rtseg[0]; |
| 1728 | struct rt_read_seg *rtrsp1; |
Paul E. McKenney | 5249453 | 2012-11-14 16:26:40 -0800 | [diff] [blame] | 1729 | unsigned long long ts; |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1730 | |
Paul E. McKenney | 7752275 | 2020-06-11 16:43:14 -0700 | [diff] [blame] | 1731 | WARN_ON_ONCE(!rcu_is_watching()); |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1732 | newstate = rcutorture_extend_mask(readstate, trsp); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1733 | rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); |
Paul E. McKenney | bc480a6 | 2020-11-15 12:45:57 -0800 | [diff] [blame] | 1734 | if (cur_ops->get_gp_state && cur_ops->poll_gp_state) |
| 1735 | cookie = cur_ops->get_gp_state(); |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 1736 | started = cur_ops->get_gp_seq(); |
Steven Rostedt | e4aa0da | 2013-02-04 13:36:13 -0500 | [diff] [blame] | 1737 | ts = rcu_trace_clock_local(); |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 1738 | p = rcu_dereference_check(rcu_torture_current, |
Paul E. McKenney | a5c095e | 2021-03-13 20:05:31 -0800 | [diff] [blame] | 1739 | !cur_ops->readlock_held || cur_ops->readlock_held()); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1740 | if (p == NULL) { |
Paul E. McKenney | 6b06aa7 | 2018-05-22 10:56:05 -0700 | [diff] [blame] | 1741 | /* Wait for rcu_torture_writer to get underway */ |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1742 | rcutorture_one_extend(&readstate, 0, trsp, rtrsp); |
Paul E. McKenney | 6b06aa7 | 2018-05-22 10:56:05 -0700 | [diff] [blame] | 1743 | return false; |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1744 | } |
| 1745 | if (p->rtort_mbtest == 0) |
| 1746 | atomic_inc(&n_rcu_torture_mberror); |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 1747 | rcu_torture_reader_do_mbchk(myid, p, trsp); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1748 | rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1749 | preempt_disable(); |
Paul E. McKenney | 2024891 | 2019-12-21 10:41:48 -0800 | [diff] [blame] | 1750 | pipe_count = READ_ONCE(p->rtort_pipe_count); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1751 | if (pipe_count > RCU_TORTURE_PIPE_LEN) { |
| 1752 | /* Should not happen, but... */ |
| 1753 | pipe_count = RCU_TORTURE_PIPE_LEN; |
| 1754 | } |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 1755 | completed = cur_ops->get_gp_seq(); |
Paul E. McKenney | 5249453 | 2012-11-14 16:26:40 -0800 | [diff] [blame] | 1756 | if (pipe_count > 1) { |
Paul E. McKenney | 6b06aa7 | 2018-05-22 10:56:05 -0700 | [diff] [blame] | 1757 | do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, |
| 1758 | ts, started, completed); |
Paul E. McKenney | 274529b | 2016-03-21 19:46:04 -0700 | [diff] [blame] | 1759 | rcu_ftrace_dump(DUMP_ALL); |
Paul E. McKenney | 5249453 | 2012-11-14 16:26:40 -0800 | [diff] [blame] | 1760 | } |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 1761 | __this_cpu_inc(rcu_torture_count[pipe_count]); |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 1762 | completed = rcutorture_seq_diff(completed, started); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1763 | if (completed > RCU_TORTURE_PIPE_LEN) { |
| 1764 | /* Should not happen, but... */ |
| 1765 | completed = RCU_TORTURE_PIPE_LEN; |
| 1766 | } |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 1767 | __this_cpu_inc(rcu_torture_batch[completed]); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1768 | preempt_enable(); |
Paul E. McKenney | bc480a6 | 2020-11-15 12:45:57 -0800 | [diff] [blame] | 1769 | if (cur_ops->get_gp_state && cur_ops->poll_gp_state) |
| 1770 | WARN_ONCE(cur_ops->poll_gp_state(cookie), |
Paul E. McKenney | 7ac3fdf | 2021-02-25 20:56:10 -0800 | [diff] [blame] | 1771 | "%s: Cookie check 2 failed %s(%d) %lu->%lu\n", |
Paul E. McKenney | bc480a6 | 2020-11-15 12:45:57 -0800 | [diff] [blame] | 1772 | __func__, |
| 1773 | rcu_torture_writer_state_getname(), |
| 1774 | rcu_torture_writer_state, |
| 1775 | cookie, cur_ops->get_gp_state()); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1776 | rcutorture_one_extend(&readstate, 0, trsp, rtrsp); |
Paul E. McKenney | 902d82e6 | 2021-09-22 20:31:44 -0700 | [diff] [blame] | 1777 | WARN_ON_ONCE(readstate); |
Paul E. McKenney | d685514 | 2020-08-11 10:33:39 -0700 | [diff] [blame] | 1778 | // This next splat is expected behavior if leakpointer, especially |
| 1779 | // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. |
| 1780 | WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1781 | |
| 1782 | /* If error or close call, record the sequence of reader protections. */ |
| 1783 | if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { |
| 1784 | i = 0; |
| 1785 | for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) |
| 1786 | err_segs[i++] = *rtrsp1; |
| 1787 | rt_read_nsegs = i; |
| 1788 | } |
| 1789 | |
Paul E. McKenney | 6b06aa7 | 2018-05-22 10:56:05 -0700 | [diff] [blame] | 1790 | return true; |
| 1791 | } |
| 1792 | |
Paul E. McKenney | 3025520e | 2018-05-22 11:38:47 -0700 | [diff] [blame] | 1793 | static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); |
| 1794 | |
Paul E. McKenney | 6b06aa7 | 2018-05-22 10:56:05 -0700 | [diff] [blame] | 1795 | /* |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1796 | * RCU torture reader from timer handler. Dereferences rcu_torture_current, |
| 1797 | * incrementing the corresponding element of the pipeline array. The |
| 1798 | * counter in the element should never be greater than 1, otherwise, the |
| 1799 | * RCU implementation is broken. |
| 1800 | */ |
| 1801 | static void rcu_torture_timer(struct timer_list *unused) |
| 1802 | { |
Paul E. McKenney | 8da9a59 | 2018-05-22 11:17:51 -0700 | [diff] [blame] | 1803 | atomic_long_inc(&n_rcu_torture_timers); |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 1804 | (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1); |
Paul E. McKenney | f34c8585 | 2017-07-20 15:27:32 -0700 | [diff] [blame] | 1805 | |
| 1806 | /* Test call_rcu() invocation from interrupt handler. */ |
| 1807 | if (cur_ops->call) { |
| 1808 | struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); |
| 1809 | |
| 1810 | if (rhp) |
| 1811 | cur_ops->call(rhp, rcu_torture_timer_cb); |
| 1812 | } |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1813 | } |
| 1814 | |
| 1815 | /* |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1816 | * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, |
| 1817 | * incrementing the corresponding element of the pipeline array. The |
| 1818 | * counter in the element should never be greater than 1, otherwise, the |
| 1819 | * RCU implementation is broken. |
| 1820 | */ |
| 1821 | static int |
| 1822 | rcu_torture_reader(void *arg) |
| 1823 | { |
Paul E. McKenney | 444da51 | 2018-07-04 14:14:42 -0700 | [diff] [blame] | 1824 | unsigned long lastsleep = jiffies; |
Paul E. McKenney | c04dd09 | 2018-07-23 14:16:47 -0700 | [diff] [blame] | 1825 | long myid = (long)arg; |
| 1826 | int mynumonline = myid; |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 1827 | DEFINE_TORTURE_RANDOM(rand); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1828 | struct timer_list t; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1829 | |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 1830 | VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); |
Linus Torvalds | 971eae7 | 2014-03-31 11:21:19 -0700 | [diff] [blame] | 1831 | set_user_nice(current, MAX_NICE); |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 1832 | if (irqreader && cur_ops->irq_capable) |
Kees Cook | fd30b71 | 2017-10-22 17:58:54 -0700 | [diff] [blame] | 1833 | timer_setup_on_stack(&t, rcu_torture_timer, 0); |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 1834 | tick_dep_set_task(current, TICK_DEP_BIT_RCU); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1835 | do { |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 1836 | if (irqreader && cur_ops->irq_capable) { |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1837 | if (!timer_pending(&t)) |
Paul E. McKenney | 6155fec | 2010-02-22 17:05:04 -0800 | [diff] [blame] | 1838 | mod_timer(&t, jiffies + 1); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1839 | } |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 1840 | if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop()) |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1841 | schedule_timeout_interruptible(HZ); |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 1842 | if (time_after(jiffies, lastsleep) && !torture_must_stop()) { |
Paul E. McKenney | 1eba0ef | 2020-11-17 14:12:24 -0800 | [diff] [blame] | 1843 | torture_hrtimeout_us(500, 1000, &rand); |
Paul E. McKenney | 444da51 | 2018-07-04 14:14:42 -0700 | [diff] [blame] | 1844 | lastsleep = jiffies + 10; |
| 1845 | } |
Paul E. McKenney | 1afb95f | 2020-12-19 07:34:35 -0800 | [diff] [blame] | 1846 | while (torture_num_online_cpus() < mynumonline && !torture_must_stop()) |
Paul E. McKenney | c04dd09 | 2018-07-23 14:16:47 -0700 | [diff] [blame] | 1847 | schedule_timeout_interruptible(HZ / 5); |
Paul E. McKenney | 628edaa | 2014-01-31 11:57:43 -0800 | [diff] [blame] | 1848 | stutter_wait("rcu_torture_reader"); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 1849 | } while (!torture_must_stop()); |
Thomas Gleixner | 424c1b6 | 2014-03-23 08:58:27 -0700 | [diff] [blame] | 1850 | if (irqreader && cur_ops->irq_capable) { |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1851 | del_timer_sync(&t); |
Thomas Gleixner | 424c1b6 | 2014-03-23 08:58:27 -0700 | [diff] [blame] | 1852 | destroy_timer_on_stack(&t); |
| 1853 | } |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 1854 | tick_dep_clear_task(current, TICK_DEP_BIT_RCU); |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 1855 | torture_kthread_stopping("rcu_torture_reader"); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1856 | return 0; |
| 1857 | } |
| 1858 | |
| 1859 | /* |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 1860 | * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to |
| 1861 | * increase race probabilities and fuzzes the interval between toggling. |
| 1862 | */ |
| 1863 | static int rcu_nocb_toggle(void *arg) |
| 1864 | { |
| 1865 | int cpu; |
| 1866 | int maxcpu = -1; |
| 1867 | int oldnice = task_nice(current); |
| 1868 | long r; |
| 1869 | DEFINE_TORTURE_RANDOM(rand); |
| 1870 | ktime_t toggle_delay; |
| 1871 | unsigned long toggle_fuzz; |
| 1872 | ktime_t toggle_interval = ms_to_ktime(nocbs_toggle); |
| 1873 | |
| 1874 | VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started"); |
| 1875 | while (!rcu_inkernel_boot_has_ended()) |
| 1876 | schedule_timeout_interruptible(HZ / 10); |
| 1877 | for_each_online_cpu(cpu) |
| 1878 | maxcpu = cpu; |
| 1879 | WARN_ON(maxcpu < 0); |
| 1880 | if (toggle_interval > ULONG_MAX) |
| 1881 | toggle_fuzz = ULONG_MAX >> 3; |
| 1882 | else |
| 1883 | toggle_fuzz = toggle_interval >> 3; |
| 1884 | if (toggle_fuzz <= 0) |
| 1885 | toggle_fuzz = NSEC_PER_USEC; |
| 1886 | do { |
| 1887 | r = torture_random(&rand); |
| 1888 | cpu = (r >> 4) % (maxcpu + 1); |
| 1889 | if (r & 0x1) { |
| 1890 | rcu_nocb_cpu_offload(cpu); |
| 1891 | atomic_long_inc(&n_nocb_offload); |
| 1892 | } else { |
| 1893 | rcu_nocb_cpu_deoffload(cpu); |
| 1894 | atomic_long_inc(&n_nocb_deoffload); |
| 1895 | } |
| 1896 | toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval; |
| 1897 | set_current_state(TASK_INTERRUPTIBLE); |
| 1898 | schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL); |
| 1899 | if (stutter_wait("rcu_nocb_toggle")) |
| 1900 | sched_set_normal(current, oldnice); |
| 1901 | } while (!torture_must_stop()); |
| 1902 | torture_kthread_stopping("rcu_nocb_toggle"); |
| 1903 | return 0; |
| 1904 | } |
| 1905 | |
| 1906 | /* |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1907 | * Print torture statistics. Caller must ensure that there is only |
| 1908 | * one call to this function at a given time!!! This is normally |
| 1909 | * accomplished by relying on the module system to only have one copy |
| 1910 | * of the module loaded, and then by giving the rcu_torture_stats |
| 1911 | * kthread full control (or the init/cleanup functions when rcu_torture_stats |
| 1912 | * thread is not running). |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1913 | */ |
Chen Gang | d100895 | 2013-11-07 10:30:25 +0800 | [diff] [blame] | 1914 | static void |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1915 | rcu_torture_stats_print(void) |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1916 | { |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1917 | int cpu; |
| 1918 | int i; |
| 1919 | long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; |
| 1920 | long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; |
Paul E. McKenney | 5396d31 | 2020-01-08 19:58:13 -0800 | [diff] [blame] | 1921 | struct rcu_torture *rtcp; |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1922 | static unsigned long rtcv_snap = ULONG_MAX; |
Paul E. McKenney | 0032f4e | 2017-08-30 10:40:17 -0700 | [diff] [blame] | 1923 | static bool splatted; |
Paul E. McKenney | 4ffa669 | 2016-06-30 11:56:38 -0700 | [diff] [blame] | 1924 | struct task_struct *wtp; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1925 | |
KAMEZAWA Hiroyuki | 0a94502 | 2006-03-28 01:56:37 -0800 | [diff] [blame] | 1926 | for_each_possible_cpu(cpu) { |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1927 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { |
Paul E. McKenney | f042a43 | 2020-01-03 16:27:00 -0800 | [diff] [blame] | 1928 | pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); |
| 1929 | batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1930 | } |
| 1931 | } |
Paul E. McKenney | 8c0666d | 2022-05-20 13:18:16 -0700 | [diff] [blame] | 1932 | for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) { |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1933 | if (pipesummary[i] != 0) |
| 1934 | break; |
| 1935 | } |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1936 | |
| 1937 | pr_alert("%s%s ", torture_type, TORTURE_FLAG); |
Paul E. McKenney | 5396d31 | 2020-01-08 19:58:13 -0800 | [diff] [blame] | 1938 | rtcp = rcu_access_pointer(rcu_torture_current); |
Paul E. McKenney | 354ea05 | 2019-05-25 12:36:53 -0700 | [diff] [blame] | 1939 | pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", |
Paul E. McKenney | 5396d31 | 2020-01-08 19:58:13 -0800 | [diff] [blame] | 1940 | rtcp, |
| 1941 | rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1942 | rcu_torture_current_version, |
| 1943 | list_empty(&rcu_torture_freelist), |
| 1944 | atomic_read(&n_rcu_torture_alloc), |
| 1945 | atomic_read(&n_rcu_torture_alloc_fail), |
| 1946 | atomic_read(&n_rcu_torture_free)); |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 1947 | pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld rtbre: %ld ", |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1948 | atomic_read(&n_rcu_torture_mberror), |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 1949 | atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries), |
SeongJae Park | 472213a | 2016-08-13 15:54:35 +0900 | [diff] [blame] | 1950 | n_rcu_torture_barrier_error, |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1951 | n_rcu_torture_boost_ktrerror, |
| 1952 | n_rcu_torture_boost_rterror); |
| 1953 | pr_cont("rtbf: %ld rtb: %ld nt: %ld ", |
| 1954 | n_rcu_torture_boost_failure, |
| 1955 | n_rcu_torture_boosts, |
Paul E. McKenney | 8da9a59 | 2018-05-22 11:17:51 -0700 | [diff] [blame] | 1956 | atomic_long_read(&n_rcu_torture_timers)); |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1957 | torture_onoff_stats(); |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 1958 | pr_cont("barrier: %ld/%ld:%ld ", |
Paul E. McKenney | c9527be | 2020-02-18 13:41:02 -0800 | [diff] [blame] | 1959 | data_race(n_barrier_successes), |
| 1960 | data_race(n_barrier_attempts), |
| 1961 | data_race(n_rcu_torture_barrier_error)); |
Paul E. McKenney | f759081 | 2020-12-21 11:17:16 -0800 | [diff] [blame] | 1962 | pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic. |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 1963 | pr_cont("nocb-toggles: %ld:%ld\n", |
| 1964 | atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload)); |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1965 | |
| 1966 | pr_alert("%s%s ", torture_type, TORTURE_FLAG); |
Paul E. McKenney | 8b5ddf8 | 2019-08-14 12:02:40 -0700 | [diff] [blame] | 1967 | if (atomic_read(&n_rcu_torture_mberror) || |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 1968 | atomic_read(&n_rcu_torture_mbchk_fail) || |
Paul E. McKenney | 8b5ddf8 | 2019-08-14 12:02:40 -0700 | [diff] [blame] | 1969 | n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || |
| 1970 | n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure || |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 1971 | i > 1) { |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1972 | pr_cont("%s", "!!! "); |
Paul E. McKenney | 996417d | 2005-11-18 01:10:50 -0800 | [diff] [blame] | 1973 | atomic_inc(&n_rcu_torture_error); |
Paul E. McKenney | 8b5ddf8 | 2019-08-14 12:02:40 -0700 | [diff] [blame] | 1974 | WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 1975 | WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail)); |
Paul E. McKenney | 8b5ddf8 | 2019-08-14 12:02:40 -0700 | [diff] [blame] | 1976 | WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() |
| 1977 | WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread |
| 1978 | WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio |
Paul E. McKenney | 5e59fba | 2021-01-15 13:30:38 -0800 | [diff] [blame] | 1979 | WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?) |
Paul E. McKenney | 8b5ddf8 | 2019-08-14 12:02:40 -0700 | [diff] [blame] | 1980 | WARN_ON_ONCE(i > 1); // Too-short grace period |
Paul E. McKenney | 996417d | 2005-11-18 01:10:50 -0800 | [diff] [blame] | 1981 | } |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1982 | pr_cont("Reader Pipe: "); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1983 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1984 | pr_cont(" %ld", pipesummary[i]); |
| 1985 | pr_cont("\n"); |
| 1986 | |
| 1987 | pr_alert("%s%s ", torture_type, TORTURE_FLAG); |
| 1988 | pr_cont("Reader Batch: "); |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 1989 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1990 | pr_cont(" %ld", batchsummary[i]); |
| 1991 | pr_cont("\n"); |
| 1992 | |
| 1993 | pr_alert("%s%s ", torture_type, TORTURE_FLAG); |
| 1994 | pr_cont("Free-Block Circulation: "); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1995 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1996 | pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1997 | } |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1998 | pr_cont("\n"); |
| 1999 | |
Josh Triplett | c8e5b16 | 2007-05-08 00:33:20 -0700 | [diff] [blame] | 2000 | if (cur_ops->stats) |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 2001 | cur_ops->stats(); |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 2002 | if (rtcv_snap == rcu_torture_current_version && |
Paul E. McKenney | 5396d31 | 2020-01-08 19:58:13 -0800 | [diff] [blame] | 2003 | rcu_access_pointer(rcu_torture_current) && |
| 2004 | !rcu_stall_is_suppressed()) { |
Paul E. McKenney | 7f6733c | 2017-04-18 17:17:35 -0700 | [diff] [blame] | 2005 | int __maybe_unused flags = 0; |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 2006 | unsigned long __maybe_unused gp_seq = 0; |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 2007 | |
| 2008 | rcutorture_get_gp_data(cur_ops->ttype, |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 2009 | &flags, &gp_seq); |
Paul E. McKenney | 7f6733c | 2017-04-18 17:17:35 -0700 | [diff] [blame] | 2010 | srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 2011 | &flags, &gp_seq); |
Paul E. McKenney | 4ffa669 | 2016-06-30 11:56:38 -0700 | [diff] [blame] | 2012 | wtp = READ_ONCE(writer_task); |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 2013 | pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n", |
Paul E. McKenney | 18aff33 | 2015-11-17 13:35:28 -0800 | [diff] [blame] | 2014 | rcu_torture_writer_state_getname(), |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 2015 | rcu_torture_writer_state, gp_seq, flags, |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 2016 | wtp == NULL ? ~0U : wtp->__state, |
Paul E. McKenney | 808de39 | 2017-06-19 10:03:22 -0700 | [diff] [blame] | 2017 | wtp == NULL ? -1 : (int)task_cpu(wtp)); |
Paul E. McKenney | 0032f4e | 2017-08-30 10:40:17 -0700 | [diff] [blame] | 2018 | if (!splatted && wtp) { |
| 2019 | sched_show_task(wtp); |
| 2020 | splatted = true; |
| 2021 | } |
Paul E. McKenney | 27c0f14 | 2020-09-15 17:08:03 -0700 | [diff] [blame] | 2022 | if (cur_ops->gp_kthread_dbg) |
| 2023 | cur_ops->gp_kthread_dbg(); |
Paul E. McKenney | 274529b | 2016-03-21 19:46:04 -0700 | [diff] [blame] | 2024 | rcu_ftrace_dump(DUMP_ALL); |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 2025 | } |
| 2026 | rtcv_snap = rcu_torture_current_version; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2027 | } |
| 2028 | |
| 2029 | /* |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2030 | * Periodically prints torture statistics, if periodic statistics printing |
| 2031 | * was specified via the stat_interval module parameter. |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2032 | */ |
| 2033 | static int |
| 2034 | rcu_torture_stats(void *arg) |
| 2035 | { |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 2036 | VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2037 | do { |
| 2038 | schedule_timeout_interruptible(stat_interval * HZ); |
| 2039 | rcu_torture_stats_print(); |
Paul E. McKenney | f67a335 | 2014-01-29 07:40:27 -0800 | [diff] [blame] | 2040 | torture_shutdown_absorb("rcu_torture_stats"); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 2041 | } while (!torture_must_stop()); |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 2042 | torture_kthread_stopping("rcu_torture_stats"); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2043 | return 0; |
| 2044 | } |
| 2045 | |
Paul E. McKenney | 7ab2bd3 | 2021-05-02 19:56:05 -0700 | [diff] [blame] | 2046 | /* Test mem_dump_obj() and friends. */ |
| 2047 | static void rcu_torture_mem_dump_obj(void) |
| 2048 | { |
| 2049 | struct rcu_head *rhp; |
| 2050 | struct kmem_cache *kcp; |
| 2051 | static int z; |
| 2052 | |
| 2053 | kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL); |
Li Qiong | 1a5ca5e | 2022-06-12 14:48:25 +0800 | [diff] [blame] | 2054 | if (WARN_ON_ONCE(!kcp)) |
| 2055 | return; |
Paul E. McKenney | 7ab2bd3 | 2021-05-02 19:56:05 -0700 | [diff] [blame] | 2056 | rhp = kmem_cache_alloc(kcp, GFP_KERNEL); |
Li Qiong | 1a5ca5e | 2022-06-12 14:48:25 +0800 | [diff] [blame] | 2057 | if (WARN_ON_ONCE(!rhp)) { |
| 2058 | kmem_cache_destroy(kcp); |
| 2059 | return; |
| 2060 | } |
Paul E. McKenney | 7ab2bd3 | 2021-05-02 19:56:05 -0700 | [diff] [blame] | 2061 | pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); |
| 2062 | pr_alert("mem_dump_obj(ZERO_SIZE_PTR):"); |
| 2063 | mem_dump_obj(ZERO_SIZE_PTR); |
| 2064 | pr_alert("mem_dump_obj(NULL):"); |
| 2065 | mem_dump_obj(NULL); |
| 2066 | pr_alert("mem_dump_obj(%px):", &rhp); |
| 2067 | mem_dump_obj(&rhp); |
| 2068 | pr_alert("mem_dump_obj(%px):", rhp); |
| 2069 | mem_dump_obj(rhp); |
| 2070 | pr_alert("mem_dump_obj(%px):", &rhp->func); |
| 2071 | mem_dump_obj(&rhp->func); |
| 2072 | pr_alert("mem_dump_obj(%px):", &z); |
| 2073 | mem_dump_obj(&z); |
| 2074 | kmem_cache_free(kcp, rhp); |
| 2075 | kmem_cache_destroy(kcp); |
| 2076 | rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); |
Li Qiong | 1a5ca5e | 2022-06-12 14:48:25 +0800 | [diff] [blame] | 2077 | if (WARN_ON_ONCE(!rhp)) |
| 2078 | return; |
Paul E. McKenney | 7ab2bd3 | 2021-05-02 19:56:05 -0700 | [diff] [blame] | 2079 | pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); |
| 2080 | pr_alert("mem_dump_obj(kmalloc %px):", rhp); |
| 2081 | mem_dump_obj(rhp); |
| 2082 | pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func); |
| 2083 | mem_dump_obj(&rhp->func); |
| 2084 | kfree(rhp); |
| 2085 | rhp = vmalloc(4096); |
Li Qiong | 1a5ca5e | 2022-06-12 14:48:25 +0800 | [diff] [blame] | 2086 | if (WARN_ON_ONCE(!rhp)) |
| 2087 | return; |
Paul E. McKenney | 7ab2bd3 | 2021-05-02 19:56:05 -0700 | [diff] [blame] | 2088 | pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); |
| 2089 | pr_alert("mem_dump_obj(vmalloc %px):", rhp); |
| 2090 | mem_dump_obj(rhp); |
| 2091 | pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func); |
| 2092 | mem_dump_obj(&rhp->func); |
| 2093 | vfree(rhp); |
| 2094 | } |
| 2095 | |
Paul E. McKenney | eac45e5 | 2018-05-17 11:33:17 -0700 | [diff] [blame] | 2096 | static void |
Steven Rostedt (Red Hat) | e66c33d | 2013-07-12 16:50:28 -0400 | [diff] [blame] | 2097 | rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) |
Paul E. McKenney | 95c3832 | 2006-03-24 03:15:58 -0800 | [diff] [blame] | 2098 | { |
Paul E. McKenney | 2caa1e4 | 2012-08-09 16:30:45 -0700 | [diff] [blame] | 2099 | pr_alert("%s" TORTURE_FLAG |
| 2100 | "--- %s: nreaders=%d nfakewriters=%d " |
| 2101 | "stat_interval=%d verbose=%d test_no_idle_hz=%d " |
| 2102 | "shuffle_interval=%d stutter=%d irqreader=%d " |
| 2103 | "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " |
| 2104 | "test_boost=%d/%d test_boost_interval=%d " |
| 2105 | "test_boost_duration=%d shutdown_secs=%d " |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 2106 | "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 2107 | "stall_cpu_block=%d " |
Paul E. McKenney | 67afeed | 2012-10-20 12:56:06 -0700 | [diff] [blame] | 2108 | "n_barrier_cbs=%d " |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 2109 | "onoff_interval=%d onoff_holdoff=%d " |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 2110 | "read_exit_delay=%d read_exit_burst=%d " |
| 2111 | "nocbs_nthreads=%d nocbs_toggle=%d\n", |
Paul E. McKenney | 2caa1e4 | 2012-08-09 16:30:45 -0700 | [diff] [blame] | 2112 | torture_type, tag, nrealreaders, nfakewriters, |
| 2113 | stat_interval, verbose, test_no_idle_hz, shuffle_interval, |
| 2114 | stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, |
| 2115 | test_boost, cur_ops->can_boost, |
| 2116 | test_boost_interval, test_boost_duration, shutdown_secs, |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 2117 | stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 2118 | stall_cpu_block, |
Paul E. McKenney | 67afeed | 2012-10-20 12:56:06 -0700 | [diff] [blame] | 2119 | n_barrier_cbs, |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 2120 | onoff_interval, onoff_holdoff, |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 2121 | read_exit_delay, read_exit_burst, |
| 2122 | nocbs_nthreads, nocbs_toggle); |
Paul E. McKenney | 95c3832 | 2006-03-24 03:15:58 -0800 | [diff] [blame] | 2123 | } |
| 2124 | |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 2125 | static int rcutorture_booster_cleanup(unsigned int cpu) |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2126 | { |
| 2127 | struct task_struct *t; |
| 2128 | |
| 2129 | if (boost_tasks[cpu] == NULL) |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 2130 | return 0; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2131 | mutex_lock(&boost_mutex); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2132 | t = boost_tasks[cpu]; |
| 2133 | boost_tasks[cpu] = NULL; |
Joel Fernandes (Google) | 450efca | 2018-06-10 16:45:43 -0700 | [diff] [blame] | 2134 | rcu_torture_enable_rt_throttle(); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2135 | mutex_unlock(&boost_mutex); |
| 2136 | |
| 2137 | /* This must be outside of the mutex, otherwise deadlock! */ |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 2138 | torture_stop_kthread(rcu_torture_boost, t); |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 2139 | return 0; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2140 | } |
| 2141 | |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 2142 | static int rcutorture_booster_init(unsigned int cpu) |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2143 | { |
| 2144 | int retval; |
| 2145 | |
| 2146 | if (boost_tasks[cpu] != NULL) |
| 2147 | return 0; /* Already created, nothing more to do. */ |
| 2148 | |
Frederic Weisbecker | 3002153 | 2022-06-10 15:03:57 +0200 | [diff] [blame] | 2149 | // Testing RCU priority boosting requires rcutorture do |
| 2150 | // some serious abuse. Counter this by running ksoftirqd |
| 2151 | // at higher priority. |
| 2152 | if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) { |
| 2153 | struct sched_param sp; |
| 2154 | struct task_struct *t; |
| 2155 | |
| 2156 | t = per_cpu(ksoftirqd, cpu); |
| 2157 | WARN_ON_ONCE(!t); |
| 2158 | sp.sched_priority = 2; |
| 2159 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); |
| 2160 | } |
| 2161 | |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2162 | /* Don't allow time recalculation while creating a new task. */ |
| 2163 | mutex_lock(&boost_mutex); |
Joel Fernandes (Google) | 450efca | 2018-06-10 16:45:43 -0700 | [diff] [blame] | 2164 | rcu_torture_disable_rt_throttle(); |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 2165 | VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); |
Cai Huoqing | 3b9cb4b | 2022-01-14 14:03:02 -0800 | [diff] [blame] | 2166 | boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL, |
| 2167 | cpu, "rcu_torture_boost_%u"); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2168 | if (IS_ERR(boost_tasks[cpu])) { |
| 2169 | retval = PTR_ERR(boost_tasks[cpu]); |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 2170 | VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2171 | n_rcu_torture_boost_ktrerror++; |
| 2172 | boost_tasks[cpu] = NULL; |
| 2173 | mutex_unlock(&boost_mutex); |
| 2174 | return retval; |
| 2175 | } |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2176 | mutex_unlock(&boost_mutex); |
| 2177 | return 0; |
| 2178 | } |
| 2179 | |
Paul E. McKenney | d5f546d | 2011-11-04 11:44:12 -0700 | [diff] [blame] | 2180 | /* |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2181 | * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then |
| 2182 | * induces a CPU stall for the time specified by stall_cpu. |
| 2183 | */ |
Paul Gortmaker | 49fb4c6 | 2013-06-19 14:52:21 -0400 | [diff] [blame] | 2184 | static int rcu_torture_stall(void *args) |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2185 | { |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 2186 | int idx; |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2187 | unsigned long stop_at; |
| 2188 | |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 2189 | VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2190 | if (stall_cpu_holdoff > 0) { |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 2191 | VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2192 | schedule_timeout_interruptible(stall_cpu_holdoff * HZ); |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 2193 | VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2194 | } |
Paul E. McKenney | 55b2dcf | 2020-04-01 19:57:52 -0700 | [diff] [blame] | 2195 | if (!kthread_should_stop() && stall_gp_kthread > 0) { |
| 2196 | VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); |
| 2197 | rcu_gp_set_torture_wait(stall_gp_kthread * HZ); |
| 2198 | for (idx = 0; idx < stall_gp_kthread + 2; idx++) { |
| 2199 | if (kthread_should_stop()) |
| 2200 | break; |
| 2201 | schedule_timeout_uninterruptible(HZ); |
| 2202 | } |
| 2203 | } |
| 2204 | if (!kthread_should_stop() && stall_cpu > 0) { |
| 2205 | VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); |
Arnd Bergmann | 622be33f | 2018-06-18 16:47:34 +0200 | [diff] [blame] | 2206 | stop_at = ktime_get_seconds() + stall_cpu; |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2207 | /* RCU CPU stall is expected behavior in following code. */ |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 2208 | idx = cur_ops->readlock(); |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 2209 | if (stall_cpu_irqsoff) |
| 2210 | local_irq_disable(); |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 2211 | else if (!stall_cpu_block) |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 2212 | preempt_disable(); |
Stephen Zhang | 0a27fff | 2021-01-23 17:54:17 +0800 | [diff] [blame] | 2213 | pr_alert("%s start on CPU %d.\n", |
| 2214 | __func__, raw_smp_processor_id()); |
Arnd Bergmann | 622be33f | 2018-06-18 16:47:34 +0200 | [diff] [blame] | 2215 | while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), |
| 2216 | stop_at)) |
Paul E. McKenney | 59e8366 | 2021-05-16 21:17:27 -0700 | [diff] [blame] | 2217 | if (stall_cpu_block) { |
| 2218 | #ifdef CONFIG_PREEMPTION |
| 2219 | preempt_schedule(); |
| 2220 | #else |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 2221 | schedule_timeout_uninterruptible(HZ); |
Paul E. McKenney | 59e8366 | 2021-05-16 21:17:27 -0700 | [diff] [blame] | 2222 | #endif |
Wander Lairson Costa | 5ff7c9f | 2021-11-10 11:37:45 -0300 | [diff] [blame] | 2223 | } else if (stall_no_softlockup) { |
| 2224 | touch_softlockup_watchdog(); |
Paul E. McKenney | 59e8366 | 2021-05-16 21:17:27 -0700 | [diff] [blame] | 2225 | } |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 2226 | if (stall_cpu_irqsoff) |
| 2227 | local_irq_enable(); |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 2228 | else if (!stall_cpu_block) |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 2229 | preempt_enable(); |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 2230 | cur_ops->readunlock(idx); |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2231 | } |
Stephen Zhang | 0a27fff | 2021-01-23 17:54:17 +0800 | [diff] [blame] | 2232 | pr_alert("%s end.\n", __func__); |
Paul E. McKenney | f67a335 | 2014-01-29 07:40:27 -0800 | [diff] [blame] | 2233 | torture_shutdown_absorb("rcu_torture_stall"); |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2234 | while (!kthread_should_stop()) |
| 2235 | schedule_timeout_interruptible(10 * HZ); |
| 2236 | return 0; |
| 2237 | } |
| 2238 | |
| 2239 | /* Spawn CPU-stall kthread, if stall_cpu specified. */ |
| 2240 | static int __init rcu_torture_stall_init(void) |
| 2241 | { |
Paul E. McKenney | 55b2dcf | 2020-04-01 19:57:52 -0700 | [diff] [blame] | 2242 | if (stall_cpu <= 0 && stall_gp_kthread <= 0) |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2243 | return 0; |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 2244 | return torture_create_kthread(rcu_torture_stall, NULL, stall_task); |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2245 | } |
| 2246 | |
Paul E. McKenney | 9fdcb9a | 2018-07-19 13:36:00 -0700 | [diff] [blame] | 2247 | /* State structure for forward-progress self-propagating RCU callback. */ |
| 2248 | struct fwd_cb_state { |
| 2249 | struct rcu_head rh; |
| 2250 | int stop; |
| 2251 | }; |
| 2252 | |
| 2253 | /* |
| 2254 | * Forward-progress self-propagating RCU callback function. Because |
| 2255 | * callbacks run from softirq, this function is an implicit RCU read-side |
| 2256 | * critical section. |
| 2257 | */ |
| 2258 | static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) |
| 2259 | { |
| 2260 | struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); |
| 2261 | |
| 2262 | if (READ_ONCE(fcsp->stop)) { |
| 2263 | WRITE_ONCE(fcsp->stop, 2); |
| 2264 | return; |
| 2265 | } |
| 2266 | cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); |
| 2267 | } |
| 2268 | |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2269 | /* State for continuous-flood RCU callbacks. */ |
| 2270 | struct rcu_fwd_cb { |
| 2271 | struct rcu_head rh; |
| 2272 | struct rcu_fwd_cb *rfc_next; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2273 | struct rcu_fwd *rfc_rfp; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2274 | int rfc_gps; |
| 2275 | }; |
Paul E. McKenney | a289e60 | 2019-11-05 08:31:56 -0800 | [diff] [blame] | 2276 | |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2277 | #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ |
| 2278 | #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ |
| 2279 | #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ |
Paul E. McKenney | 2e57bf9 | 2018-10-05 16:43:09 -0700 | [diff] [blame] | 2280 | #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ |
Paul E. McKenney | a289e60 | 2019-11-05 08:31:56 -0800 | [diff] [blame] | 2281 | #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) |
| 2282 | |
Paul E. McKenney | cd618d1 | 2019-01-08 13:41:26 -0800 | [diff] [blame] | 2283 | struct rcu_launder_hist { |
| 2284 | long n_launders; |
| 2285 | unsigned long launder_gp_seq; |
| 2286 | }; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2287 | |
Paul E. McKenney | a289e60 | 2019-11-05 08:31:56 -0800 | [diff] [blame] | 2288 | struct rcu_fwd { |
| 2289 | spinlock_t rcu_fwd_lock; |
| 2290 | struct rcu_fwd_cb *rcu_fwd_cb_head; |
| 2291 | struct rcu_fwd_cb **rcu_fwd_cb_tail; |
| 2292 | long n_launders_cb; |
| 2293 | unsigned long rcu_fwd_startat; |
| 2294 | struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; |
| 2295 | unsigned long rcu_launder_gp_seq_start; |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2296 | int rcu_fwd_id; |
Paul E. McKenney | a289e60 | 2019-11-05 08:31:56 -0800 | [diff] [blame] | 2297 | }; |
| 2298 | |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2299 | static DEFINE_MUTEX(rcu_fwd_mutex); |
Jason Yan | afbc157 | 2020-04-09 19:42:38 +0800 | [diff] [blame] | 2300 | static struct rcu_fwd *rcu_fwds; |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2301 | static unsigned long rcu_fwd_seq; |
Paul E. McKenney | 53b541f | 2021-11-23 13:51:11 -0800 | [diff] [blame] | 2302 | static atomic_long_t rcu_fwd_max_cbs; |
Jason Yan | afbc157 | 2020-04-09 19:42:38 +0800 | [diff] [blame] | 2303 | static bool rcu_fwd_emergency_stop; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2304 | |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2305 | static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) |
Paul E. McKenney | 1a68275 | 2018-10-03 12:33:41 -0700 | [diff] [blame] | 2306 | { |
Paul E. McKenney | cd618d1 | 2019-01-08 13:41:26 -0800 | [diff] [blame] | 2307 | unsigned long gps; |
| 2308 | unsigned long gps_old; |
Paul E. McKenney | 1a68275 | 2018-10-03 12:33:41 -0700 | [diff] [blame] | 2309 | int i; |
| 2310 | int j; |
| 2311 | |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2312 | for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) |
| 2313 | if (rfp->n_launders_hist[i].n_launders > 0) |
Paul E. McKenney | 1a68275 | 2018-10-03 12:33:41 -0700 | [diff] [blame] | 2314 | break; |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2315 | pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):", |
| 2316 | __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat); |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2317 | gps_old = rfp->rcu_launder_gp_seq_start; |
Paul E. McKenney | cd618d1 | 2019-01-08 13:41:26 -0800 | [diff] [blame] | 2318 | for (j = 0; j <= i; j++) { |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2319 | gps = rfp->n_launders_hist[j].launder_gp_seq; |
Paul E. McKenney | cd618d1 | 2019-01-08 13:41:26 -0800 | [diff] [blame] | 2320 | pr_cont(" %ds/%d: %ld:%ld", |
Paul E. McKenney | a289e60 | 2019-11-05 08:31:56 -0800 | [diff] [blame] | 2321 | j + 1, FWD_CBS_HIST_DIV, |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2322 | rfp->n_launders_hist[j].n_launders, |
Paul E. McKenney | cd618d1 | 2019-01-08 13:41:26 -0800 | [diff] [blame] | 2323 | rcutorture_seq_diff(gps, gps_old)); |
| 2324 | gps_old = gps; |
| 2325 | } |
Paul E. McKenney | 1a68275 | 2018-10-03 12:33:41 -0700 | [diff] [blame] | 2326 | pr_cont("\n"); |
| 2327 | } |
| 2328 | |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2329 | /* Callback function for continuous-flood RCU callbacks. */ |
| 2330 | static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) |
| 2331 | { |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2332 | unsigned long flags; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2333 | int i; |
| 2334 | struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); |
| 2335 | struct rcu_fwd_cb **rfcpp; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2336 | struct rcu_fwd *rfp = rfcp->rfc_rfp; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2337 | |
| 2338 | rfcp->rfc_next = NULL; |
| 2339 | rfcp->rfc_gps++; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2340 | spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); |
| 2341 | rfcpp = rfp->rcu_fwd_cb_tail; |
| 2342 | rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2343 | WRITE_ONCE(*rfcpp, rfcp); |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2344 | WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); |
| 2345 | i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); |
| 2346 | if (i >= ARRAY_SIZE(rfp->n_launders_hist)) |
| 2347 | i = ARRAY_SIZE(rfp->n_launders_hist) - 1; |
| 2348 | rfp->n_launders_hist[i].n_launders++; |
| 2349 | rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); |
| 2350 | spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2351 | } |
| 2352 | |
Paul E. McKenney | ab21f60 | 2019-04-14 18:30:22 -0700 | [diff] [blame] | 2353 | // Give the scheduler a chance, even on nohz_full CPUs. |
Paul E. McKenney | bd1bfc5 | 2019-06-22 14:35:59 -0700 | [diff] [blame] | 2354 | static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) |
Paul E. McKenney | ab21f60 | 2019-04-14 18:30:22 -0700 | [diff] [blame] | 2355 | { |
Sebastian Andrzej Siewior | 90326f0 | 2019-10-15 21:18:14 +0200 | [diff] [blame] | 2356 | if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { |
Paul E. McKenney | bd1bfc5 | 2019-06-22 14:35:59 -0700 | [diff] [blame] | 2357 | // Real call_rcu() floods hit userspace, so emulate that. |
| 2358 | if (need_resched() || (iter & 0xfff)) |
Paul E. McKenney | ab21f60 | 2019-04-14 18:30:22 -0700 | [diff] [blame] | 2359 | schedule(); |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 2360 | return; |
Paul E. McKenney | ab21f60 | 2019-04-14 18:30:22 -0700 | [diff] [blame] | 2361 | } |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 2362 | // No userspace emulation: CB invocation throttles call_rcu() |
| 2363 | cond_resched(); |
Paul E. McKenney | ab21f60 | 2019-04-14 18:30:22 -0700 | [diff] [blame] | 2364 | } |
| 2365 | |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2366 | /* |
| 2367 | * Free all callbacks on the rcu_fwd_cb_head list, either because the |
| 2368 | * test is over or because we hit an OOM event. |
| 2369 | */ |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2370 | static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2371 | { |
| 2372 | unsigned long flags; |
| 2373 | unsigned long freed = 0; |
| 2374 | struct rcu_fwd_cb *rfcp; |
| 2375 | |
| 2376 | for (;;) { |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2377 | spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); |
| 2378 | rfcp = rfp->rcu_fwd_cb_head; |
Paul E. McKenney | 140e53f | 2019-04-09 10:08:18 -0700 | [diff] [blame] | 2379 | if (!rfcp) { |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2380 | spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2381 | break; |
Paul E. McKenney | 140e53f | 2019-04-09 10:08:18 -0700 | [diff] [blame] | 2382 | } |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2383 | rfp->rcu_fwd_cb_head = rfcp->rfc_next; |
| 2384 | if (!rfp->rcu_fwd_cb_head) |
| 2385 | rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; |
| 2386 | spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2387 | kfree(rfcp); |
| 2388 | freed++; |
Paul E. McKenney | bd1bfc5 | 2019-06-22 14:35:59 -0700 | [diff] [blame] | 2389 | rcu_torture_fwd_prog_cond_resched(freed); |
Paul E. McKenney | 79ba7ff | 2019-08-04 13:17:35 -0700 | [diff] [blame] | 2390 | if (tick_nohz_full_enabled()) { |
| 2391 | local_irq_save(flags); |
| 2392 | rcu_momentary_dyntick_idle(); |
| 2393 | local_irq_restore(flags); |
| 2394 | } |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2395 | } |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2396 | return freed; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2397 | } |
| 2398 | |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2399 | /* Carry out need_resched()/cond_resched() forward-progress testing. */ |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2400 | static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, |
| 2401 | int *tested, int *tested_tries) |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2402 | { |
Paul E. McKenney | 119248b | 2018-07-18 15:39:37 -0700 | [diff] [blame] | 2403 | unsigned long cver; |
Paul E. McKenney | f4de46e | 2018-07-24 20:50:40 -0700 | [diff] [blame] | 2404 | unsigned long dur; |
Paul E. McKenney | 7c590fc | 2018-08-07 16:42:42 -0700 | [diff] [blame] | 2405 | struct fwd_cb_state fcs; |
Paul E. McKenney | 119248b | 2018-07-18 15:39:37 -0700 | [diff] [blame] | 2406 | unsigned long gps; |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2407 | int idx; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2408 | int sd; |
| 2409 | int sd4; |
| 2410 | bool selfpropcb = false; |
| 2411 | unsigned long stopat; |
| 2412 | static DEFINE_TORTURE_RANDOM(trs); |
| 2413 | |
Paul E. McKenney | 05b7246 | 2021-12-16 15:36:02 -0800 | [diff] [blame] | 2414 | pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); |
Paul E. McKenney | a7eb937 | 2020-10-09 19:51:55 -0700 | [diff] [blame] | 2415 | if (!cur_ops->sync) |
| 2416 | return; // Cannot do need_resched() forward progress testing without ->sync. |
| 2417 | if (cur_ops->call && cur_ops->cb_barrier) { |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2418 | init_rcu_head_on_stack(&fcs.rh); |
| 2419 | selfpropcb = true; |
| 2420 | } |
| 2421 | |
| 2422 | /* Tight loop containing cond_resched(). */ |
Paul E. McKenney | e22ef8d | 2021-12-17 12:33:53 -0800 | [diff] [blame] | 2423 | atomic_inc(&rcu_fwd_cb_nodelay); |
Paul E. McKenney | e8516c6 | 2019-04-09 11:06:32 -0700 | [diff] [blame] | 2424 | cur_ops->sync(); /* Later readers see above write. */ |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2425 | if (selfpropcb) { |
| 2426 | WRITE_ONCE(fcs.stop, 0); |
| 2427 | cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); |
| 2428 | } |
| 2429 | cver = READ_ONCE(rcu_torture_current_version); |
| 2430 | gps = cur_ops->get_gp_seq(); |
| 2431 | sd = cur_ops->stall_dur() + 1; |
| 2432 | sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; |
| 2433 | dur = sd4 + torture_random(&trs) % (sd - sd4); |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2434 | WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); |
| 2435 | stopat = rfp->rcu_fwd_startat + dur; |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2436 | while (time_before(jiffies, stopat) && |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 2437 | !shutdown_time_arrived() && |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2438 | !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2439 | idx = cur_ops->readlock(); |
| 2440 | udelay(10); |
| 2441 | cur_ops->readunlock(idx); |
| 2442 | if (!fwd_progress_need_resched || need_resched()) |
Paul E. McKenney | fbbd5e3 | 2019-08-15 11:43:53 -0700 | [diff] [blame] | 2443 | cond_resched(); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2444 | } |
| 2445 | (*tested_tries)++; |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2446 | if (!time_before(jiffies, stopat) && |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 2447 | !shutdown_time_arrived() && |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2448 | !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2449 | (*tested)++; |
| 2450 | cver = READ_ONCE(rcu_torture_current_version) - cver; |
| 2451 | gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); |
| 2452 | WARN_ON(!cver && gps < 2); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2453 | pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__, |
| 2454 | rfp->rcu_fwd_id, dur, cver, gps); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2455 | } |
| 2456 | if (selfpropcb) { |
| 2457 | WRITE_ONCE(fcs.stop, 1); |
| 2458 | cur_ops->sync(); /* Wait for running CB to complete. */ |
Paul E. McKenney | 05b7246 | 2021-12-16 15:36:02 -0800 | [diff] [blame] | 2459 | pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2460 | cur_ops->cb_barrier(); /* Wait for queued callbacks. */ |
| 2461 | } |
| 2462 | |
| 2463 | if (selfpropcb) { |
| 2464 | WARN_ON(READ_ONCE(fcs.stop) != 2); |
| 2465 | destroy_rcu_head_on_stack(&fcs.rh); |
| 2466 | } |
Paul E. McKenney | e8516c6 | 2019-04-09 11:06:32 -0700 | [diff] [blame] | 2467 | schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ |
Paul E. McKenney | e22ef8d | 2021-12-17 12:33:53 -0800 | [diff] [blame] | 2468 | atomic_dec(&rcu_fwd_cb_nodelay); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2469 | } |
| 2470 | |
| 2471 | /* Carry out call_rcu() forward-progress testing. */ |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2472 | static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2473 | { |
| 2474 | unsigned long cver; |
Paul E. McKenney | 79ba7ff | 2019-08-04 13:17:35 -0700 | [diff] [blame] | 2475 | unsigned long flags; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2476 | unsigned long gps; |
| 2477 | int i; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2478 | long n_launders; |
| 2479 | long n_launders_cb_snap; |
| 2480 | long n_launders_sa; |
| 2481 | long n_max_cbs; |
| 2482 | long n_max_gps; |
| 2483 | struct rcu_fwd_cb *rfcp; |
| 2484 | struct rcu_fwd_cb *rfcpn; |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2485 | unsigned long stopat; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2486 | unsigned long stoppedat; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2487 | |
Paul E. McKenney | 05b7246 | 2021-12-16 15:36:02 -0800 | [diff] [blame] | 2488 | pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2489 | if (READ_ONCE(rcu_fwd_emergency_stop)) |
| 2490 | return; /* Get out of the way quickly, no GP wait! */ |
Paul E. McKenney | c682db5 | 2019-04-19 07:38:27 -0700 | [diff] [blame] | 2491 | if (!cur_ops->call) |
| 2492 | return; /* Can't do call_rcu() fwd prog without ->call. */ |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2493 | |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2494 | /* Loop continuously posting RCU callbacks. */ |
Paul E. McKenney | e22ef8d | 2021-12-17 12:33:53 -0800 | [diff] [blame] | 2495 | atomic_inc(&rcu_fwd_cb_nodelay); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2496 | cur_ops->sync(); /* Later readers see above write. */ |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2497 | WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); |
| 2498 | stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2499 | n_launders = 0; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2500 | rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2501 | n_launders_sa = 0; |
| 2502 | n_max_cbs = 0; |
| 2503 | n_max_gps = 0; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2504 | for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) |
| 2505 | rfp->n_launders_hist[i].n_launders = 0; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2506 | cver = READ_ONCE(rcu_torture_current_version); |
| 2507 | gps = cur_ops->get_gp_seq(); |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2508 | rfp->rcu_launder_gp_seq_start = gps; |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 2509 | tick_dep_set_task(current, TICK_DEP_BIT_RCU); |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2510 | while (time_before(jiffies, stopat) && |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 2511 | !shutdown_time_arrived() && |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2512 | !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2513 | rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2514 | rfcpn = NULL; |
| 2515 | if (rfcp) |
| 2516 | rfcpn = READ_ONCE(rfcp->rfc_next); |
| 2517 | if (rfcpn) { |
| 2518 | if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && |
| 2519 | ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) |
| 2520 | break; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2521 | rfp->rcu_fwd_cb_head = rfcpn; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2522 | n_launders++; |
| 2523 | n_launders_sa++; |
Paul E. McKenney | 613b00f | 2021-11-23 11:53:52 -0800 | [diff] [blame] | 2524 | } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) { |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2525 | rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); |
| 2526 | if (WARN_ON_ONCE(!rfcp)) { |
| 2527 | schedule_timeout_interruptible(1); |
| 2528 | continue; |
| 2529 | } |
| 2530 | n_max_cbs++; |
| 2531 | n_launders_sa = 0; |
| 2532 | rfcp->rfc_gps = 0; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2533 | rfcp->rfc_rfp = rfp; |
Paul E. McKenney | 613b00f | 2021-11-23 11:53:52 -0800 | [diff] [blame] | 2534 | } else { |
| 2535 | rfcp = NULL; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2536 | } |
Paul E. McKenney | 613b00f | 2021-11-23 11:53:52 -0800 | [diff] [blame] | 2537 | if (rfcp) |
| 2538 | cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); |
Paul E. McKenney | bd1bfc5 | 2019-06-22 14:35:59 -0700 | [diff] [blame] | 2539 | rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); |
Paul E. McKenney | 79ba7ff | 2019-08-04 13:17:35 -0700 | [diff] [blame] | 2540 | if (tick_nohz_full_enabled()) { |
| 2541 | local_irq_save(flags); |
| 2542 | rcu_momentary_dyntick_idle(); |
| 2543 | local_irq_restore(flags); |
| 2544 | } |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2545 | } |
| 2546 | stoppedat = jiffies; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2547 | n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2548 | cver = READ_ONCE(rcu_torture_current_version) - cver; |
| 2549 | gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); |
Paul E. McKenney | 05b7246 | 2021-12-16 15:36:02 -0800 | [diff] [blame] | 2550 | pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2551 | cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2552 | (void)rcu_torture_fwd_prog_cbfree(rfp); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2553 | |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 2554 | if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && |
| 2555 | !shutdown_time_arrived()) { |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2556 | WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); |
| 2557 | pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", |
| 2558 | __func__, |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2559 | stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2560 | n_launders + n_max_cbs - n_launders_cb_snap, |
| 2561 | n_launders, n_launders_sa, |
| 2562 | n_max_gps, n_max_cbs, cver, gps); |
Paul E. McKenney | 53b541f | 2021-11-23 13:51:11 -0800 | [diff] [blame] | 2563 | atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs); |
Paul E. McKenney | 89440d2 | 2021-12-28 15:59:38 -0800 | [diff] [blame] | 2564 | mutex_lock(&rcu_fwd_mutex); // Serialize histograms. |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2565 | rcu_torture_fwd_cb_hist(rfp); |
Paul E. McKenney | 89440d2 | 2021-12-28 15:59:38 -0800 | [diff] [blame] | 2566 | mutex_unlock(&rcu_fwd_mutex); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2567 | } |
Paul E. McKenney | e8516c6 | 2019-04-09 11:06:32 -0700 | [diff] [blame] | 2568 | schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 2569 | tick_dep_clear_task(current, TICK_DEP_BIT_RCU); |
Paul E. McKenney | e22ef8d | 2021-12-17 12:33:53 -0800 | [diff] [blame] | 2570 | atomic_dec(&rcu_fwd_cb_nodelay); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2571 | } |
| 2572 | |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2573 | |
| 2574 | /* |
| 2575 | * OOM notifier, but this only prints diagnostic information for the |
| 2576 | * current forward-progress test. |
| 2577 | */ |
| 2578 | static int rcutorture_oom_notify(struct notifier_block *self, |
| 2579 | unsigned long notused, void *nfreed) |
| 2580 | { |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2581 | int i; |
| 2582 | long ncbs; |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2583 | struct rcu_fwd *rfp; |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2584 | |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2585 | mutex_lock(&rcu_fwd_mutex); |
| 2586 | rfp = rcu_fwds; |
| 2587 | if (!rfp) { |
| 2588 | mutex_unlock(&rcu_fwd_mutex); |
| 2589 | return NOTIFY_OK; |
| 2590 | } |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2591 | WARN(1, "%s invoked upon OOM during forward-progress testing.\n", |
| 2592 | __func__); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2593 | for (i = 0; i < fwd_progress; i++) { |
| 2594 | rcu_torture_fwd_cb_hist(&rfp[i]); |
| 2595 | rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2); |
| 2596 | } |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2597 | WRITE_ONCE(rcu_fwd_emergency_stop, true); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2598 | smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2599 | ncbs = 0; |
| 2600 | for (i = 0; i < fwd_progress; i++) |
| 2601 | ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); |
| 2602 | pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2603 | rcu_barrier(); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2604 | ncbs = 0; |
| 2605 | for (i = 0; i < fwd_progress; i++) |
| 2606 | ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); |
| 2607 | pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2608 | rcu_barrier(); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2609 | ncbs = 0; |
| 2610 | for (i = 0; i < fwd_progress; i++) |
| 2611 | ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); |
| 2612 | pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2613 | smp_mb(); /* Frees before return to avoid redoing OOM. */ |
| 2614 | (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ |
| 2615 | pr_info("%s returning after OOM processing.\n", __func__); |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2616 | mutex_unlock(&rcu_fwd_mutex); |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2617 | return NOTIFY_OK; |
| 2618 | } |
| 2619 | |
| 2620 | static struct notifier_block rcutorture_oom_nb = { |
| 2621 | .notifier_call = rcutorture_oom_notify |
| 2622 | }; |
| 2623 | |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2624 | /* Carry out grace-period forward-progress testing. */ |
| 2625 | static int rcu_torture_fwd_prog(void *args) |
| 2626 | { |
Paul E. McKenney | 53b541f | 2021-11-23 13:51:11 -0800 | [diff] [blame] | 2627 | bool firsttime = true; |
| 2628 | long max_cbs; |
Paul E. McKenney | ab1b788 | 2020-09-22 16:42:42 -0700 | [diff] [blame] | 2629 | int oldnice = task_nice(current); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2630 | unsigned long oldseq = READ_ONCE(rcu_fwd_seq); |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2631 | struct rcu_fwd *rfp = args; |
Paul E. McKenney | f4de46e | 2018-07-24 20:50:40 -0700 | [diff] [blame] | 2632 | int tested = 0; |
Paul E. McKenney | 152f4af | 2018-07-19 10:57:58 -0700 | [diff] [blame] | 2633 | int tested_tries = 0; |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2634 | |
| 2635 | VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); |
Paul E. McKenney | 5ab7ab8 | 2018-09-21 18:08:09 -0700 | [diff] [blame] | 2636 | rcu_bind_current_to_nocb(); |
Paul E. McKenney | fecad50 | 2018-07-20 12:18:11 -0700 | [diff] [blame] | 2637 | if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) |
| 2638 | set_user_nice(current, MAX_NICE); |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2639 | do { |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2640 | if (!rfp->rcu_fwd_id) { |
| 2641 | schedule_timeout_interruptible(fwd_progress_holdoff * HZ); |
| 2642 | WRITE_ONCE(rcu_fwd_emergency_stop, false); |
Paul E. McKenney | 53b541f | 2021-11-23 13:51:11 -0800 | [diff] [blame] | 2643 | if (!firsttime) { |
| 2644 | max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0); |
| 2645 | pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs); |
| 2646 | } |
| 2647 | firsttime = false; |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2648 | WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1); |
| 2649 | } else { |
Paul E. McKenney | 02b51a1 | 2021-12-17 15:05:05 -0800 | [diff] [blame] | 2650 | while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop()) |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2651 | schedule_timeout_interruptible(1); |
| 2652 | oldseq = READ_ONCE(rcu_fwd_seq); |
| 2653 | } |
| 2654 | pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); |
| 2655 | if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id) |
Paul E. McKenney | 4355080 | 2019-12-04 15:58:41 -0800 | [diff] [blame] | 2656 | rcu_torture_fwd_prog_cr(rfp); |
Paul E. McKenney | 613b00f | 2021-11-23 11:53:52 -0800 | [diff] [blame] | 2657 | if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) && |
| 2658 | (!IS_ENABLED(CONFIG_TINY_RCU) || |
| 2659 | (rcu_inkernel_boot_has_ended() && |
| 2660 | torture_num_online_cpus() > rfp->rcu_fwd_id))) |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2661 | rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2662 | |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2663 | /* Avoid slow periods, better to test when busy. */ |
Paul E. McKenney | ab1b788 | 2020-09-22 16:42:42 -0700 | [diff] [blame] | 2664 | if (stutter_wait("rcu_torture_fwd_prog")) |
| 2665 | sched_set_normal(current, oldnice); |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2666 | } while (!torture_must_stop()); |
Paul E. McKenney | 152f4af | 2018-07-19 10:57:58 -0700 | [diff] [blame] | 2667 | /* Short runs might not contain a valid forward-progress attempt. */ |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2668 | if (!rfp->rcu_fwd_id) { |
| 2669 | WARN_ON(!tested && tested_tries >= 5); |
| 2670 | pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); |
| 2671 | } |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2672 | torture_kthread_stopping("rcu_torture_fwd_prog"); |
| 2673 | return 0; |
| 2674 | } |
| 2675 | |
| 2676 | /* If forward-progress checking is requested and feasible, spawn the thread. */ |
| 2677 | static int __init rcu_torture_fwd_prog_init(void) |
| 2678 | { |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2679 | int i; |
| 2680 | int ret = 0; |
Paul E. McKenney | 5155be9 | 2019-11-06 08:35:08 -0800 | [diff] [blame] | 2681 | struct rcu_fwd *rfp; |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2682 | |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2683 | if (!fwd_progress) |
| 2684 | return 0; /* Not requested, so don't do it. */ |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2685 | if (fwd_progress >= nr_cpu_ids) { |
| 2686 | VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n"); |
| 2687 | fwd_progress = nr_cpu_ids; |
| 2688 | } else if (fwd_progress < 0) { |
| 2689 | fwd_progress = nr_cpu_ids; |
| 2690 | } |
Paul E. McKenney | a7eb937 | 2020-10-09 19:51:55 -0700 | [diff] [blame] | 2691 | if ((!cur_ops->sync && !cur_ops->call) || |
Paul E. McKenney | 613b00f | 2021-11-23 11:53:52 -0800 | [diff] [blame] | 2692 | (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) || |
| 2693 | cur_ops == &rcu_busted_ops) { |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2694 | VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2695 | fwd_progress = 0; |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2696 | return 0; |
| 2697 | } |
| 2698 | if (stall_cpu > 0) { |
| 2699 | VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2700 | fwd_progress = 0; |
Zhouyi Zhou | 3ac8587 | 2021-07-26 05:43:33 +0800 | [diff] [blame] | 2701 | if (IS_MODULE(CONFIG_RCU_TORTURE_TEST)) |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2702 | return -EINVAL; /* In module, can fail back to user. */ |
| 2703 | WARN_ON(1); /* Make sure rcutorture notices conflict. */ |
| 2704 | return 0; |
| 2705 | } |
| 2706 | if (fwd_progress_holdoff <= 0) |
| 2707 | fwd_progress_holdoff = 1; |
| 2708 | if (fwd_progress_div <= 0) |
| 2709 | fwd_progress_div = 4; |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2710 | rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL); |
| 2711 | fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL); |
| 2712 | if (!rfp || !fwd_prog_tasks) { |
| 2713 | kfree(rfp); |
| 2714 | kfree(fwd_prog_tasks); |
| 2715 | fwd_prog_tasks = NULL; |
| 2716 | fwd_progress = 0; |
Paul E. McKenney | 5155be9 | 2019-11-06 08:35:08 -0800 | [diff] [blame] | 2717 | return -ENOMEM; |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2718 | } |
| 2719 | for (i = 0; i < fwd_progress; i++) { |
| 2720 | spin_lock_init(&rfp[i].rcu_fwd_lock); |
| 2721 | rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head; |
| 2722 | rfp[i].rcu_fwd_id = i; |
| 2723 | } |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2724 | mutex_lock(&rcu_fwd_mutex); |
Paul E. McKenney | c8fa637 | 2020-07-19 14:40:31 -0700 | [diff] [blame] | 2725 | rcu_fwds = rfp; |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2726 | mutex_unlock(&rcu_fwd_mutex); |
Paul E. McKenney | 299c7d9 | 2020-07-22 10:45:12 -0700 | [diff] [blame] | 2727 | register_oom_notifier(&rcutorture_oom_nb); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2728 | for (i = 0; i < fwd_progress; i++) { |
| 2729 | ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]); |
| 2730 | if (ret) { |
| 2731 | fwd_progress = i; |
| 2732 | return ret; |
| 2733 | } |
| 2734 | } |
| 2735 | return 0; |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2736 | } |
| 2737 | |
Paul E. McKenney | c8fa637 | 2020-07-19 14:40:31 -0700 | [diff] [blame] | 2738 | static void rcu_torture_fwd_prog_cleanup(void) |
| 2739 | { |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2740 | int i; |
Paul E. McKenney | c8fa637 | 2020-07-19 14:40:31 -0700 | [diff] [blame] | 2741 | struct rcu_fwd *rfp; |
| 2742 | |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2743 | if (!rcu_fwds || !fwd_prog_tasks) |
| 2744 | return; |
| 2745 | for (i = 0; i < fwd_progress; i++) |
| 2746 | torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]); |
| 2747 | unregister_oom_notifier(&rcutorture_oom_nb); |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2748 | mutex_lock(&rcu_fwd_mutex); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2749 | rfp = rcu_fwds; |
Paul E. McKenney | c8fa637 | 2020-07-19 14:40:31 -0700 | [diff] [blame] | 2750 | rcu_fwds = NULL; |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2751 | mutex_unlock(&rcu_fwd_mutex); |
Paul E. McKenney | c8fa637 | 2020-07-19 14:40:31 -0700 | [diff] [blame] | 2752 | kfree(rfp); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2753 | kfree(fwd_prog_tasks); |
| 2754 | fwd_prog_tasks = NULL; |
Paul E. McKenney | c8fa637 | 2020-07-19 14:40:31 -0700 | [diff] [blame] | 2755 | } |
| 2756 | |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2757 | /* Callback function for RCU barrier testing. */ |
Rashika Kheria | b3b8a4d | 2014-02-27 17:16:57 +0530 | [diff] [blame] | 2758 | static void rcu_torture_barrier_cbf(struct rcu_head *rcu) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2759 | { |
| 2760 | atomic_inc(&barrier_cbs_invoked); |
| 2761 | } |
| 2762 | |
Paul E. McKenney | 50d4b62 | 2020-02-04 15:00:56 -0800 | [diff] [blame] | 2763 | /* IPI handler to get callback posted on desired CPU, if online. */ |
| 2764 | static void rcu_torture_barrier1cb(void *rcu_void) |
| 2765 | { |
| 2766 | struct rcu_head *rhp = rcu_void; |
| 2767 | |
| 2768 | cur_ops->call(rhp, rcu_torture_barrier_cbf); |
| 2769 | } |
| 2770 | |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2771 | /* kthread function to register callbacks used to test RCU barriers. */ |
| 2772 | static int rcu_torture_barrier_cbs(void *arg) |
| 2773 | { |
| 2774 | long myid = (long)arg; |
Jules Irenge | 8f43d59 | 2020-06-01 19:45:48 +0100 | [diff] [blame] | 2775 | bool lastphase = false; |
Paul E. McKenney | 78e4bc3 | 2013-09-24 15:04:06 -0700 | [diff] [blame] | 2776 | bool newphase; |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2777 | struct rcu_head rcu; |
| 2778 | |
| 2779 | init_rcu_head_on_stack(&rcu); |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 2780 | VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); |
Linus Torvalds | 971eae7 | 2014-03-31 11:21:19 -0700 | [diff] [blame] | 2781 | set_user_nice(current, MAX_NICE); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2782 | do { |
| 2783 | wait_event(barrier_cbs_wq[myid], |
Paul E. McKenney | 78e4bc3 | 2013-09-24 15:04:06 -0700 | [diff] [blame] | 2784 | (newphase = |
Paul E. McKenney | 6c7ed42 | 2015-04-13 11:58:08 -0700 | [diff] [blame] | 2785 | smp_load_acquire(&barrier_phase)) != lastphase || |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 2786 | torture_must_stop()); |
Paul E. McKenney | 78e4bc3 | 2013-09-24 15:04:06 -0700 | [diff] [blame] | 2787 | lastphase = newphase; |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 2788 | if (torture_must_stop()) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2789 | break; |
Paul E. McKenney | 6c7ed42 | 2015-04-13 11:58:08 -0700 | [diff] [blame] | 2790 | /* |
| 2791 | * The above smp_load_acquire() ensures barrier_phase load |
Paul E. McKenney | aab0573 | 2016-05-02 12:20:51 -0700 | [diff] [blame] | 2792 | * is ordered before the following ->call(). |
Paul E. McKenney | 6c7ed42 | 2015-04-13 11:58:08 -0700 | [diff] [blame] | 2793 | */ |
Paul E. McKenney | 50d4b62 | 2020-02-04 15:00:56 -0800 | [diff] [blame] | 2794 | if (smp_call_function_single(myid, rcu_torture_barrier1cb, |
| 2795 | &rcu, 1)) { |
| 2796 | // IPI failed, so use direct call from current CPU. |
| 2797 | cur_ops->call(&rcu, rcu_torture_barrier_cbf); |
| 2798 | } |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2799 | if (atomic_dec_and_test(&barrier_cbs_count)) |
| 2800 | wake_up(&barrier_wq); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 2801 | } while (!torture_must_stop()); |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 2802 | if (cur_ops->cb_barrier != NULL) |
| 2803 | cur_ops->cb_barrier(); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2804 | destroy_rcu_head_on_stack(&rcu); |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 2805 | torture_kthread_stopping("rcu_torture_barrier_cbs"); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2806 | return 0; |
| 2807 | } |
| 2808 | |
| 2809 | /* kthread function to drive and coordinate RCU barrier testing. */ |
| 2810 | static int rcu_torture_barrier(void *arg) |
| 2811 | { |
| 2812 | int i; |
| 2813 | |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 2814 | VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2815 | do { |
| 2816 | atomic_set(&barrier_cbs_invoked, 0); |
| 2817 | atomic_set(&barrier_cbs_count, n_barrier_cbs); |
Paul E. McKenney | 6c7ed42 | 2015-04-13 11:58:08 -0700 | [diff] [blame] | 2818 | /* Ensure barrier_phase ordered after prior assignments. */ |
| 2819 | smp_store_release(&barrier_phase, !barrier_phase); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2820 | for (i = 0; i < n_barrier_cbs; i++) |
| 2821 | wake_up(&barrier_cbs_wq[i]); |
| 2822 | wait_event(barrier_wq, |
| 2823 | atomic_read(&barrier_cbs_count) == 0 || |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 2824 | torture_must_stop()); |
| 2825 | if (torture_must_stop()) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2826 | break; |
| 2827 | n_barrier_attempts++; |
Paul E. McKenney | 78e4bc3 | 2013-09-24 15:04:06 -0700 | [diff] [blame] | 2828 | cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2829 | if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { |
| 2830 | n_rcu_torture_barrier_error++; |
Paul E. McKenney | 7602de4a | 2014-12-17 18:39:54 -0800 | [diff] [blame] | 2831 | pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", |
| 2832 | atomic_read(&barrier_cbs_invoked), |
| 2833 | n_barrier_cbs); |
Paul E. McKenney | 9470a18 | 2020-02-05 12:54:34 -0800 | [diff] [blame] | 2834 | WARN_ON(1); |
| 2835 | // Wait manually for the remaining callbacks |
| 2836 | i = 0; |
| 2837 | do { |
| 2838 | if (WARN_ON(i++ > HZ)) |
| 2839 | i = INT_MIN; |
| 2840 | schedule_timeout_interruptible(1); |
| 2841 | cur_ops->cb_barrier(); |
| 2842 | } while (atomic_read(&barrier_cbs_invoked) != |
| 2843 | n_barrier_cbs && |
| 2844 | !torture_must_stop()); |
| 2845 | smp_mb(); // Can't trust ordering if broken. |
| 2846 | if (!torture_must_stop()) |
| 2847 | pr_err("Recovered: barrier_cbs_invoked = %d\n", |
| 2848 | atomic_read(&barrier_cbs_invoked)); |
Joel Fernandes (Google) | bf5b643 | 2018-06-19 15:14:19 -0700 | [diff] [blame] | 2849 | } else { |
| 2850 | n_barrier_successes++; |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2851 | } |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2852 | schedule_timeout_interruptible(HZ / 10); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 2853 | } while (!torture_must_stop()); |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 2854 | torture_kthread_stopping("rcu_torture_barrier"); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2855 | return 0; |
| 2856 | } |
| 2857 | |
| 2858 | /* Initialize RCU barrier testing. */ |
| 2859 | static int rcu_torture_barrier_init(void) |
| 2860 | { |
| 2861 | int i; |
| 2862 | int ret; |
| 2863 | |
Paul E. McKenney | d9eba768 | 2015-05-14 15:35:43 -0700 | [diff] [blame] | 2864 | if (n_barrier_cbs <= 0) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2865 | return 0; |
| 2866 | if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { |
Paul E. McKenney | 2caa1e4 | 2012-08-09 16:30:45 -0700 | [diff] [blame] | 2867 | pr_alert("%s" TORTURE_FLAG |
| 2868 | " Call or barrier ops missing for %s,\n", |
| 2869 | torture_type, cur_ops->name); |
| 2870 | pr_alert("%s" TORTURE_FLAG |
| 2871 | " RCU barrier testing omitted from run.\n", |
| 2872 | torture_type); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2873 | return 0; |
| 2874 | } |
| 2875 | atomic_set(&barrier_cbs_count, 0); |
| 2876 | atomic_set(&barrier_cbs_invoked, 0); |
| 2877 | barrier_cbs_tasks = |
Paul E. McKenney | 68a675d | 2017-12-01 14:26:56 -0800 | [diff] [blame] | 2878 | kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2879 | GFP_KERNEL); |
| 2880 | barrier_cbs_wq = |
Paul E. McKenney | 68a675d | 2017-12-01 14:26:56 -0800 | [diff] [blame] | 2881 | kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); |
Sasha Levin | de5e643 | 2012-12-20 14:11:28 -0500 | [diff] [blame] | 2882 | if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2883 | return -ENOMEM; |
| 2884 | for (i = 0; i < n_barrier_cbs; i++) { |
| 2885 | init_waitqueue_head(&barrier_cbs_wq[i]); |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 2886 | ret = torture_create_kthread(rcu_torture_barrier_cbs, |
| 2887 | (void *)(long)i, |
| 2888 | barrier_cbs_tasks[i]); |
| 2889 | if (ret) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2890 | return ret; |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2891 | } |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 2892 | return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2893 | } |
| 2894 | |
| 2895 | /* Clean up after RCU barrier testing. */ |
| 2896 | static void rcu_torture_barrier_cleanup(void) |
| 2897 | { |
| 2898 | int i; |
| 2899 | |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 2900 | torture_stop_kthread(rcu_torture_barrier, barrier_task); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2901 | if (barrier_cbs_tasks != NULL) { |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 2902 | for (i = 0; i < n_barrier_cbs; i++) |
| 2903 | torture_stop_kthread(rcu_torture_barrier_cbs, |
| 2904 | barrier_cbs_tasks[i]); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2905 | kfree(barrier_cbs_tasks); |
| 2906 | barrier_cbs_tasks = NULL; |
| 2907 | } |
| 2908 | if (barrier_cbs_wq != NULL) { |
| 2909 | kfree(barrier_cbs_wq); |
| 2910 | barrier_cbs_wq = NULL; |
| 2911 | } |
| 2912 | } |
| 2913 | |
Joel Fernandes (Google) | 4babd85 | 2018-06-19 15:14:18 -0700 | [diff] [blame] | 2914 | static bool rcu_torture_can_boost(void) |
| 2915 | { |
| 2916 | static int boost_warn_once; |
| 2917 | int prio; |
| 2918 | |
| 2919 | if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) |
| 2920 | return false; |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 2921 | if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state) |
Paul E. McKenney | 5e59fba | 2021-01-15 13:30:38 -0800 | [diff] [blame] | 2922 | return false; |
Joel Fernandes (Google) | 4babd85 | 2018-06-19 15:14:18 -0700 | [diff] [blame] | 2923 | |
| 2924 | prio = rcu_get_gp_kthreads_prio(); |
| 2925 | if (!prio) |
| 2926 | return false; |
| 2927 | |
| 2928 | if (prio < 2) { |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 2929 | if (boost_warn_once == 1) |
Joel Fernandes (Google) | 4babd85 | 2018-06-19 15:14:18 -0700 | [diff] [blame] | 2930 | return false; |
| 2931 | |
Joel Fernandes (Google) | bf5b643 | 2018-06-19 15:14:19 -0700 | [diff] [blame] | 2932 | pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); |
Joel Fernandes (Google) | 4babd85 | 2018-06-19 15:14:18 -0700 | [diff] [blame] | 2933 | boost_warn_once = 1; |
| 2934 | return false; |
| 2935 | } |
| 2936 | |
| 2937 | return true; |
| 2938 | } |
| 2939 | |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 2940 | static bool read_exit_child_stop; |
| 2941 | static bool read_exit_child_stopped; |
| 2942 | static wait_queue_head_t read_exit_wq; |
| 2943 | |
| 2944 | // Child kthread which just does an rcutorture reader and exits. |
| 2945 | static int rcu_torture_read_exit_child(void *trsp_in) |
| 2946 | { |
| 2947 | struct torture_random_state *trsp = trsp_in; |
| 2948 | |
| 2949 | set_user_nice(current, MAX_NICE); |
| 2950 | // Minimize time between reading and exiting. |
| 2951 | while (!kthread_should_stop()) |
| 2952 | schedule_timeout_uninterruptible(1); |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 2953 | (void)rcu_torture_one_read(trsp, -1); |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 2954 | return 0; |
| 2955 | } |
| 2956 | |
| 2957 | // Parent kthread which creates and destroys read-exit child kthreads. |
| 2958 | static int rcu_torture_read_exit(void *unused) |
| 2959 | { |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 2960 | bool errexit = false; |
| 2961 | int i; |
| 2962 | struct task_struct *tsp; |
| 2963 | DEFINE_TORTURE_RANDOM(trs); |
| 2964 | |
| 2965 | // Allocate and initialize. |
| 2966 | set_user_nice(current, MAX_NICE); |
| 2967 | VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); |
| 2968 | |
| 2969 | // Each pass through this loop does one read-exit episode. |
| 2970 | do { |
Paul E. McKenney | d984114 | 2022-04-27 11:46:02 -0700 | [diff] [blame] | 2971 | VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); |
| 2972 | for (i = 0; i < read_exit_burst; i++) { |
| 2973 | if (READ_ONCE(read_exit_child_stop)) |
| 2974 | break; |
| 2975 | stutter_wait("rcu_torture_read_exit"); |
| 2976 | // Spawn child. |
| 2977 | tsp = kthread_run(rcu_torture_read_exit_child, |
| 2978 | &trs, "%s", "rcu_torture_read_exit_child"); |
| 2979 | if (IS_ERR(tsp)) { |
| 2980 | TOROUT_ERRSTRING("out of memory"); |
| 2981 | errexit = true; |
| 2982 | break; |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 2983 | } |
Paul E. McKenney | d984114 | 2022-04-27 11:46:02 -0700 | [diff] [blame] | 2984 | cond_resched(); |
| 2985 | kthread_stop(tsp); |
| 2986 | n_read_exits++; |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 2987 | } |
Paul E. McKenney | d984114 | 2022-04-27 11:46:02 -0700 | [diff] [blame] | 2988 | VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); |
| 2989 | rcu_barrier(); // Wait for task_struct free, avoid OOM. |
| 2990 | i = 0; |
| 2991 | for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++) |
| 2992 | schedule_timeout_uninterruptible(HZ); |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 2993 | } while (!errexit && !READ_ONCE(read_exit_child_stop)); |
| 2994 | |
| 2995 | // Clean up and exit. |
| 2996 | smp_store_release(&read_exit_child_stopped, true); // After reaping. |
| 2997 | smp_mb(); // Store before wakeup. |
| 2998 | wake_up(&read_exit_wq); |
| 2999 | while (!torture_must_stop()) |
| 3000 | schedule_timeout_uninterruptible(1); |
| 3001 | torture_kthread_stopping("rcu_torture_read_exit"); |
| 3002 | return 0; |
| 3003 | } |
| 3004 | |
| 3005 | static int rcu_torture_read_exit_init(void) |
| 3006 | { |
| 3007 | if (read_exit_burst <= 0) |
Paul E. McKenney | fda8486 | 2021-08-03 17:42:25 -0700 | [diff] [blame] | 3008 | return 0; |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 3009 | init_waitqueue_head(&read_exit_wq); |
| 3010 | read_exit_child_stop = false; |
| 3011 | read_exit_child_stopped = false; |
| 3012 | return torture_create_kthread(rcu_torture_read_exit, NULL, |
| 3013 | read_exit_task); |
| 3014 | } |
| 3015 | |
| 3016 | static void rcu_torture_read_exit_cleanup(void) |
| 3017 | { |
| 3018 | if (!read_exit_task) |
| 3019 | return; |
| 3020 | WRITE_ONCE(read_exit_child_stop, true); |
| 3021 | smp_mb(); // Above write before wait. |
| 3022 | wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); |
| 3023 | torture_stop_kthread(rcutorture_read_exit, read_exit_task); |
| 3024 | } |
| 3025 | |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 3026 | static enum cpuhp_state rcutor_hp; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 3027 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3028 | static void |
| 3029 | rcu_torture_cleanup(void) |
| 3030 | { |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 3031 | int firsttime; |
Paul E. McKenney | 034777d | 2018-04-19 08:43:11 -0700 | [diff] [blame] | 3032 | int flags = 0; |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 3033 | unsigned long gp_seq = 0; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3034 | int i; |
| 3035 | |
Davidlohr Bueso | d36a7a0 | 2014-09-11 20:40:21 -0700 | [diff] [blame] | 3036 | if (torture_cleanup_begin()) { |
Paul E. McKenney | 6f81bd6 | 2021-12-06 15:12:14 -0800 | [diff] [blame] | 3037 | if (cur_ops->cb_barrier != NULL) { |
| 3038 | pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); |
Paul E. McKenney | 343e909 | 2008-12-15 16:13:07 -0800 | [diff] [blame] | 3039 | cur_ops->cb_barrier(); |
Paul E. McKenney | 6f81bd6 | 2021-12-06 15:12:14 -0800 | [diff] [blame] | 3040 | } |
Paul E. McKenney | 99d6a2a | 2022-02-04 12:45:18 -0800 | [diff] [blame] | 3041 | rcu_gp_slow_unregister(NULL); |
Paul E. McKenney | 343e909 | 2008-12-15 16:13:07 -0800 | [diff] [blame] | 3042 | return; |
| 3043 | } |
Paul E. McKenney | b813afa | 2019-03-21 09:27:28 -0700 | [diff] [blame] | 3044 | if (!cur_ops) { |
| 3045 | torture_cleanup_end(); |
Paul E. McKenney | 99d6a2a | 2022-02-04 12:45:18 -0800 | [diff] [blame] | 3046 | rcu_gp_slow_unregister(NULL); |
Paul E. McKenney | b813afa | 2019-03-21 09:27:28 -0700 | [diff] [blame] | 3047 | return; |
| 3048 | } |
Paul E. McKenney | 3808dc9 | 2014-01-28 15:29:21 -0800 | [diff] [blame] | 3049 | |
Paul E. McKenney | 27c0f14 | 2020-09-15 17:08:03 -0700 | [diff] [blame] | 3050 | if (cur_ops->gp_kthread_dbg) |
| 3051 | cur_ops->gp_kthread_dbg(); |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 3052 | rcu_torture_read_exit_cleanup(); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 3053 | rcu_torture_barrier_cleanup(); |
Paul E. McKenney | c8fa637 | 2020-07-19 14:40:31 -0700 | [diff] [blame] | 3054 | rcu_torture_fwd_prog_cleanup(); |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 3055 | torture_stop_kthread(rcu_torture_stall, stall_task); |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 3056 | torture_stop_kthread(rcu_torture_writer, writer_task); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3057 | |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 3058 | if (nocb_tasks) { |
| 3059 | for (i = 0; i < nrealnocbers; i++) |
| 3060 | torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]); |
| 3061 | kfree(nocb_tasks); |
| 3062 | nocb_tasks = NULL; |
| 3063 | } |
| 3064 | |
Josh Triplett | c8e5b16 | 2007-05-08 00:33:20 -0700 | [diff] [blame] | 3065 | if (reader_tasks) { |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 3066 | for (i = 0; i < nrealreaders; i++) |
| 3067 | torture_stop_kthread(rcu_torture_reader, |
| 3068 | reader_tasks[i]); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3069 | kfree(reader_tasks); |
Paul E. McKenney | 293b93d | 2020-09-23 16:46:36 -0700 | [diff] [blame] | 3070 | reader_tasks = NULL; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3071 | } |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 3072 | kfree(rcu_torture_reader_mbchk); |
| 3073 | rcu_torture_reader_mbchk = NULL; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3074 | |
Josh Triplett | c8e5b16 | 2007-05-08 00:33:20 -0700 | [diff] [blame] | 3075 | if (fakewriter_tasks) { |
Paul E. McKenney | 293b93d | 2020-09-23 16:46:36 -0700 | [diff] [blame] | 3076 | for (i = 0; i < nfakewriters; i++) |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 3077 | torture_stop_kthread(rcu_torture_fakewriter, |
| 3078 | fakewriter_tasks[i]); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 3079 | kfree(fakewriter_tasks); |
| 3080 | fakewriter_tasks = NULL; |
| 3081 | } |
| 3082 | |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 3083 | rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); |
| 3084 | srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); |
Joel Fernandes (Google) | 959954d | 2020-06-18 16:29:55 -0400 | [diff] [blame] | 3085 | pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n", |
| 3086 | cur_ops->name, (long)gp_seq, flags, |
| 3087 | rcutorture_seq_diff(gp_seq, start_gp_seq)); |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 3088 | torture_stop_kthread(rcu_torture_stats, stats_task); |
| 3089 | torture_stop_kthread(rcu_torture_fqs, fqs_task); |
Paul E. McKenney | fd13fe1 | 2021-08-06 08:57:26 -0700 | [diff] [blame] | 3090 | if (rcu_torture_can_boost() && rcutor_hp >= 0) |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 3091 | cpuhp_remove_state(rcutor_hp); |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 3092 | |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 3093 | /* |
Paul E. McKenney | 62a1a94 | 2018-07-07 18:12:26 -0700 | [diff] [blame] | 3094 | * Wait for all RCU callbacks to fire, then do torture-type-specific |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 3095 | * cleanup operations. |
| 3096 | */ |
Paul E. McKenney | 6f81bd6 | 2021-12-06 15:12:14 -0800 | [diff] [blame] | 3097 | if (cur_ops->cb_barrier != NULL) { |
| 3098 | pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); |
Paul E. McKenney | 2326974 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 3099 | cur_ops->cb_barrier(); |
Paul E. McKenney | 6f81bd6 | 2021-12-06 15:12:14 -0800 | [diff] [blame] | 3100 | } |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 3101 | if (cur_ops->cleanup != NULL) |
| 3102 | cur_ops->cleanup(); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3103 | |
Paul E. McKenney | 7ab2bd3 | 2021-05-02 19:56:05 -0700 | [diff] [blame] | 3104 | rcu_torture_mem_dump_obj(); |
| 3105 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3106 | rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 3107 | |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 3108 | if (err_segs_recorded) { |
| 3109 | pr_alert("Failure/close-call rcutorture reader segments:\n"); |
| 3110 | if (rt_read_nsegs == 0) |
| 3111 | pr_alert("\t: No segments recorded!!!\n"); |
| 3112 | firsttime = 1; |
| 3113 | for (i = 0; i < rt_read_nsegs; i++) { |
| 3114 | pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); |
| 3115 | if (err_segs[i].rt_delay_jiffies != 0) { |
| 3116 | pr_cont("%s%ldjiffies", firsttime ? "" : "+", |
| 3117 | err_segs[i].rt_delay_jiffies); |
| 3118 | firsttime = 0; |
| 3119 | } |
| 3120 | if (err_segs[i].rt_delay_ms != 0) { |
| 3121 | pr_cont("%s%ldms", firsttime ? "" : "+", |
| 3122 | err_segs[i].rt_delay_ms); |
| 3123 | firsttime = 0; |
| 3124 | } |
| 3125 | if (err_segs[i].rt_delay_us != 0) { |
| 3126 | pr_cont("%s%ldus", firsttime ? "" : "+", |
| 3127 | err_segs[i].rt_delay_us); |
| 3128 | firsttime = 0; |
| 3129 | } |
| 3130 | pr_cont("%s\n", |
| 3131 | err_segs[i].rt_preempted ? "preempted" : ""); |
| 3132 | |
| 3133 | } |
| 3134 | } |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 3135 | if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 3136 | rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); |
Paul E. McKenney | 2e9e808 | 2014-01-28 15:58:22 -0800 | [diff] [blame] | 3137 | else if (torture_onoff_failures()) |
Paul E. McKenney | 091541b | 2012-01-10 12:51:14 -0800 | [diff] [blame] | 3138 | rcu_torture_print_module_parms(cur_ops, |
| 3139 | "End of test: RCU_HOTPLUG"); |
Paul E. McKenney | 95c3832 | 2006-03-24 03:15:58 -0800 | [diff] [blame] | 3140 | else |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 3141 | rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); |
Davidlohr Bueso | d36a7a0 | 2014-09-11 20:40:21 -0700 | [diff] [blame] | 3142 | torture_cleanup_end(); |
Paul E. McKenney | 99d6a2a | 2022-02-04 12:45:18 -0800 | [diff] [blame] | 3143 | rcu_gp_slow_unregister(&rcu_fwd_cb_nodelay); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3144 | } |
| 3145 | |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 3146 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
| 3147 | static void rcu_torture_leak_cb(struct rcu_head *rhp) |
| 3148 | { |
| 3149 | } |
| 3150 | |
| 3151 | static void rcu_torture_err_cb(struct rcu_head *rhp) |
| 3152 | { |
| 3153 | /* |
| 3154 | * This -might- happen due to race conditions, but is unlikely. |
| 3155 | * The scenario that leads to this happening is that the |
| 3156 | * first of the pair of duplicate callbacks is queued, |
| 3157 | * someone else starts a grace period that includes that |
| 3158 | * callback, then the second of the pair must wait for the |
| 3159 | * next grace period. Unlikely, but can happen. If it |
| 3160 | * does happen, the debug-objects subsystem won't have splatted. |
| 3161 | */ |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 3162 | pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 3163 | } |
| 3164 | #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
| 3165 | |
| 3166 | /* |
| 3167 | * Verify that double-free causes debug-objects to complain, but only |
| 3168 | * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test |
| 3169 | * cannot be carried out. |
| 3170 | */ |
| 3171 | static void rcu_test_debug_objects(void) |
| 3172 | { |
| 3173 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
| 3174 | struct rcu_head rh1; |
| 3175 | struct rcu_head rh2; |
Paul E. McKenney | edf7b84 | 2020-12-02 17:52:07 -0800 | [diff] [blame] | 3176 | struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 3177 | |
| 3178 | init_rcu_head_on_stack(&rh1); |
| 3179 | init_rcu_head_on_stack(&rh2); |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 3180 | pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 3181 | |
| 3182 | /* Try to queue the rh2 pair of callbacks for the same grace period. */ |
| 3183 | preempt_disable(); /* Prevent preemption from interrupting test. */ |
| 3184 | rcu_read_lock(); /* Make it impossible to finish a grace period. */ |
| 3185 | call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ |
| 3186 | local_irq_disable(); /* Make it harder to start a new grace period. */ |
| 3187 | call_rcu(&rh2, rcu_torture_leak_cb); |
| 3188 | call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ |
Paul E. McKenney | edf7b84 | 2020-12-02 17:52:07 -0800 | [diff] [blame] | 3189 | if (rhp) { |
| 3190 | call_rcu(rhp, rcu_torture_leak_cb); |
| 3191 | call_rcu(rhp, rcu_torture_err_cb); /* Another duplicate callback. */ |
| 3192 | } |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 3193 | local_irq_enable(); |
| 3194 | rcu_read_unlock(); |
| 3195 | preempt_enable(); |
| 3196 | |
| 3197 | /* Wait for them all to get done so we can safely return. */ |
| 3198 | rcu_barrier(); |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 3199 | pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 3200 | destroy_rcu_head_on_stack(&rh1); |
| 3201 | destroy_rcu_head_on_stack(&rh2); |
Zqiang | 98ea203 | 2022-04-27 15:15:20 +0800 | [diff] [blame] | 3202 | kfree(rhp); |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 3203 | #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 3204 | pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 3205 | #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
| 3206 | } |
| 3207 | |
Paul E. McKenney | 3a6cb58 | 2018-12-10 09:44:52 -0800 | [diff] [blame] | 3208 | static void rcutorture_sync(void) |
| 3209 | { |
| 3210 | static unsigned long n; |
| 3211 | |
| 3212 | if (cur_ops->sync && !(++n & 0xfff)) |
| 3213 | cur_ops->sync(); |
| 3214 | } |
| 3215 | |
Josh Triplett | 6f8bc500 | 2007-05-08 00:25:24 -0700 | [diff] [blame] | 3216 | static int __init |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3217 | rcu_torture_init(void) |
| 3218 | { |
Paul E. McKenney | c04dd09 | 2018-07-23 14:16:47 -0700 | [diff] [blame] | 3219 | long i; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3220 | int cpu; |
| 3221 | int firsterr = 0; |
Joel Fernandes (Google) | 959954d | 2020-06-18 16:29:55 -0400 | [diff] [blame] | 3222 | int flags = 0; |
| 3223 | unsigned long gp_seq = 0; |
Paul E. McKenney | 2ec1f2d9 | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 3224 | static struct rcu_torture_ops *torture_ops[] = { |
Paul E. McKenney | 40c1278 | 2022-03-17 13:29:59 -0700 | [diff] [blame] | 3225 | &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops, |
Paul E. McKenney | 4c3f7b0 | 2022-03-17 16:16:45 -0700 | [diff] [blame] | 3226 | TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS |
Paul E. McKenney | 40c1278 | 2022-03-17 13:29:59 -0700 | [diff] [blame] | 3227 | &trivial_ops, |
Paul E. McKenney | 2ec1f2d9 | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 3228 | }; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3229 | |
Paul E. McKenney | a2f2577 | 2017-11-21 20:19:17 -0800 | [diff] [blame] | 3230 | if (!torture_init_begin(torture_type, verbose)) |
Paul E. McKenney | 5228084 | 2014-04-07 09:14:11 -0700 | [diff] [blame] | 3231 | return -EBUSY; |
Paul E. McKenney | 343e909 | 2008-12-15 16:13:07 -0800 | [diff] [blame] | 3232 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3233 | /* Process args and tell the world that the torturer is on the job. */ |
Josh Triplett | ade5fb8 | 2007-05-08 00:33:22 -0700 | [diff] [blame] | 3234 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 3235 | cur_ops = torture_ops[i]; |
Josh Triplett | ade5fb8 | 2007-05-08 00:33:22 -0700 | [diff] [blame] | 3236 | if (strcmp(torture_type, cur_ops->name) == 0) |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 3237 | break; |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 3238 | } |
Josh Triplett | ade5fb8 | 2007-05-08 00:33:22 -0700 | [diff] [blame] | 3239 | if (i == ARRAY_SIZE(torture_ops)) { |
Paul E. McKenney | 2caa1e4 | 2012-08-09 16:30:45 -0700 | [diff] [blame] | 3240 | pr_alert("rcu-torture: invalid torture type: \"%s\"\n", |
| 3241 | torture_type); |
| 3242 | pr_alert("rcu-torture types:"); |
Paul E. McKenney | cf886c4 | 2009-10-25 19:03:54 -0700 | [diff] [blame] | 3243 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) |
Joe Perches | a753835 | 2018-05-14 13:27:33 -0700 | [diff] [blame] | 3244 | pr_cont(" %s", torture_ops[i]->name); |
| 3245 | pr_cont("\n"); |
Paul E. McKenney | 889d487 | 2015-08-24 11:37:58 -0700 | [diff] [blame] | 3246 | firsterr = -EINVAL; |
Paul E. McKenney | b813afa | 2019-03-21 09:27:28 -0700 | [diff] [blame] | 3247 | cur_ops = NULL; |
Paul E. McKenney | 889d487 | 2015-08-24 11:37:58 -0700 | [diff] [blame] | 3248 | goto unwind; |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 3249 | } |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 3250 | if (cur_ops->fqs == NULL && fqs_duration != 0) { |
Paul E. McKenney | 2caa1e4 | 2012-08-09 16:30:45 -0700 | [diff] [blame] | 3251 | pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 3252 | fqs_duration = 0; |
| 3253 | } |
Josh Triplett | c8e5b16 | 2007-05-08 00:33:20 -0700 | [diff] [blame] | 3254 | if (cur_ops->init) |
Paul E. McKenney | 889d487 | 2015-08-24 11:37:58 -0700 | [diff] [blame] | 3255 | cur_ops->init(); |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 3256 | |
Paul E. McKenney | 64e4b43 | 2014-03-12 10:26:35 -0700 | [diff] [blame] | 3257 | if (nreaders >= 0) { |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3258 | nrealreaders = nreaders; |
Paul E. McKenney | 64e4b43 | 2014-03-12 10:26:35 -0700 | [diff] [blame] | 3259 | } else { |
Paul E. McKenney | 3838cc1 | 2015-03-12 13:55:48 -0700 | [diff] [blame] | 3260 | nrealreaders = num_online_cpus() - 2 - nreaders; |
Paul E. McKenney | 64e4b43 | 2014-03-12 10:26:35 -0700 | [diff] [blame] | 3261 | if (nrealreaders <= 0) |
| 3262 | nrealreaders = 1; |
| 3263 | } |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 3264 | rcu_torture_print_module_parms(cur_ops, "Start of test"); |
Joel Fernandes (Google) | 959954d | 2020-06-18 16:29:55 -0400 | [diff] [blame] | 3265 | rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); |
| 3266 | srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); |
| 3267 | start_gp_seq = gp_seq; |
| 3268 | pr_alert("%s: Start-test grace-period state: g%ld f%#x\n", |
| 3269 | cur_ops->name, (long)gp_seq, flags); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3270 | |
| 3271 | /* Set up the freelist. */ |
| 3272 | |
| 3273 | INIT_LIST_HEAD(&rcu_torture_freelist); |
Ahmed S. Darwish | 788e770 | 2007-05-08 00:33:14 -0700 | [diff] [blame] | 3274 | for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { |
Paul E. McKenney | 996417d | 2005-11-18 01:10:50 -0800 | [diff] [blame] | 3275 | rcu_tortures[i].rtort_mbtest = 0; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3276 | list_add_tail(&rcu_tortures[i].rtort_free, |
| 3277 | &rcu_torture_freelist); |
| 3278 | } |
| 3279 | |
| 3280 | /* Initialize the statistics so that each run gets its own numbers. */ |
| 3281 | |
| 3282 | rcu_torture_current = NULL; |
| 3283 | rcu_torture_current_version = 0; |
| 3284 | atomic_set(&n_rcu_torture_alloc, 0); |
| 3285 | atomic_set(&n_rcu_torture_alloc_fail, 0); |
| 3286 | atomic_set(&n_rcu_torture_free, 0); |
Paul E. McKenney | 996417d | 2005-11-18 01:10:50 -0800 | [diff] [blame] | 3287 | atomic_set(&n_rcu_torture_mberror, 0); |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 3288 | atomic_set(&n_rcu_torture_mbchk_fail, 0); |
| 3289 | atomic_set(&n_rcu_torture_mbchk_tries, 0); |
Paul E. McKenney | 996417d | 2005-11-18 01:10:50 -0800 | [diff] [blame] | 3290 | atomic_set(&n_rcu_torture_error, 0); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 3291 | n_rcu_torture_barrier_error = 0; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 3292 | n_rcu_torture_boost_ktrerror = 0; |
| 3293 | n_rcu_torture_boost_rterror = 0; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 3294 | n_rcu_torture_boost_failure = 0; |
| 3295 | n_rcu_torture_boosts = 0; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3296 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
| 3297 | atomic_set(&rcu_torture_wcount[i], 0); |
KAMEZAWA Hiroyuki | 0a94502 | 2006-03-28 01:56:37 -0800 | [diff] [blame] | 3298 | for_each_possible_cpu(cpu) { |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3299 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { |
| 3300 | per_cpu(rcu_torture_count, cpu)[i] = 0; |
| 3301 | per_cpu(rcu_torture_batch, cpu)[i] = 0; |
| 3302 | } |
| 3303 | } |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 3304 | err_segs_recorded = 0; |
| 3305 | rt_read_nsegs = 0; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3306 | |
| 3307 | /* Start up the kthreads. */ |
| 3308 | |
Paul E. McKenney | 18fbf30 | 2020-11-16 16:46:06 -0800 | [diff] [blame] | 3309 | rcu_torture_write_types(); |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 3310 | firsterr = torture_create_kthread(rcu_torture_writer, NULL, |
| 3311 | writer_task); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3312 | if (torture_init_error(firsterr)) |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3313 | goto unwind; |
Paul E. McKenney | 4444d85 | 2015-05-14 15:42:40 -0700 | [diff] [blame] | 3314 | if (nfakewriters > 0) { |
Paul E. McKenney | 68a675d | 2017-12-01 14:26:56 -0800 | [diff] [blame] | 3315 | fakewriter_tasks = kcalloc(nfakewriters, |
Paul E. McKenney | 4444d85 | 2015-05-14 15:42:40 -0700 | [diff] [blame] | 3316 | sizeof(fakewriter_tasks[0]), |
| 3317 | GFP_KERNEL); |
| 3318 | if (fakewriter_tasks == NULL) { |
Li Zhijian | 81faa4f | 2021-11-03 16:30:28 +0800 | [diff] [blame] | 3319 | TOROUT_ERRSTRING("out of memory"); |
Paul E. McKenney | 4444d85 | 2015-05-14 15:42:40 -0700 | [diff] [blame] | 3320 | firsterr = -ENOMEM; |
| 3321 | goto unwind; |
| 3322 | } |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 3323 | } |
| 3324 | for (i = 0; i < nfakewriters; i++) { |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 3325 | firsterr = torture_create_kthread(rcu_torture_fakewriter, |
| 3326 | NULL, fakewriter_tasks[i]); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3327 | if (torture_init_error(firsterr)) |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 3328 | goto unwind; |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 3329 | } |
Paul E. McKenney | 68a675d | 2017-12-01 14:26:56 -0800 | [diff] [blame] | 3330 | reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3331 | GFP_KERNEL); |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 3332 | rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk), |
| 3333 | GFP_KERNEL); |
| 3334 | if (!reader_tasks || !rcu_torture_reader_mbchk) { |
Li Zhijian | 81faa4f | 2021-11-03 16:30:28 +0800 | [diff] [blame] | 3335 | TOROUT_ERRSTRING("out of memory"); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3336 | firsterr = -ENOMEM; |
| 3337 | goto unwind; |
| 3338 | } |
| 3339 | for (i = 0; i < nrealreaders; i++) { |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 3340 | rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; |
Paul E. McKenney | c04dd09 | 2018-07-23 14:16:47 -0700 | [diff] [blame] | 3341 | firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 3342 | reader_tasks[i]); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3343 | if (torture_init_error(firsterr)) |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3344 | goto unwind; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3345 | } |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 3346 | nrealnocbers = nocbs_nthreads; |
| 3347 | if (WARN_ON(nrealnocbers < 0)) |
| 3348 | nrealnocbers = 1; |
| 3349 | if (WARN_ON(nocbs_toggle < 0)) |
| 3350 | nocbs_toggle = HZ; |
| 3351 | if (nrealnocbers > 0) { |
| 3352 | nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL); |
| 3353 | if (nocb_tasks == NULL) { |
Li Zhijian | 81faa4f | 2021-11-03 16:30:28 +0800 | [diff] [blame] | 3354 | TOROUT_ERRSTRING("out of memory"); |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 3355 | firsterr = -ENOMEM; |
| 3356 | goto unwind; |
| 3357 | } |
| 3358 | } else { |
| 3359 | nocb_tasks = NULL; |
| 3360 | } |
| 3361 | for (i = 0; i < nrealnocbers; i++) { |
| 3362 | firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3363 | if (torture_init_error(firsterr)) |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 3364 | goto unwind; |
| 3365 | } |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3366 | if (stat_interval > 0) { |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 3367 | firsterr = torture_create_kthread(rcu_torture_stats, NULL, |
| 3368 | stats_task); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3369 | if (torture_init_error(firsterr)) |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3370 | goto unwind; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3371 | } |
Paul E. McKenney | e8e255f | 2015-05-14 16:55:45 -0700 | [diff] [blame] | 3372 | if (test_no_idle_hz && shuffle_interval > 0) { |
Paul E. McKenney | 3808dc9 | 2014-01-28 15:29:21 -0800 | [diff] [blame] | 3373 | firsterr = torture_shuffle_init(shuffle_interval * HZ); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3374 | if (torture_init_error(firsterr)) |
Rusty Russell | 73d0a4b | 2009-03-30 22:05:16 -0600 | [diff] [blame] | 3375 | goto unwind; |
Srivatsa Vaddagiri | d84f520 | 2006-01-08 01:03:42 -0800 | [diff] [blame] | 3376 | } |
Paul E. McKenney | d120f65 | 2008-06-18 05:21:44 -0700 | [diff] [blame] | 3377 | if (stutter < 0) |
| 3378 | stutter = 0; |
| 3379 | if (stutter) { |
Paul E. McKenney | ff3bf92 | 2019-04-09 14:44:49 -0700 | [diff] [blame] | 3380 | int t; |
| 3381 | |
| 3382 | t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; |
| 3383 | firsterr = torture_stutter_init(stutter * HZ, t); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3384 | if (torture_init_error(firsterr)) |
Paul E. McKenney | d120f65 | 2008-06-18 05:21:44 -0700 | [diff] [blame] | 3385 | goto unwind; |
Paul E. McKenney | d120f65 | 2008-06-18 05:21:44 -0700 | [diff] [blame] | 3386 | } |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 3387 | if (fqs_duration < 0) |
| 3388 | fqs_duration = 0; |
| 3389 | if (fqs_duration) { |
Paul E. McKenney | 628edaa | 2014-01-31 11:57:43 -0800 | [diff] [blame] | 3390 | /* Create the fqs thread */ |
Paul E. McKenney | d0d0606 | 2014-03-17 20:56:45 -0700 | [diff] [blame] | 3391 | firsterr = torture_create_kthread(rcu_torture_fqs, NULL, |
| 3392 | fqs_task); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3393 | if (torture_init_error(firsterr)) |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 3394 | goto unwind; |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 3395 | } |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 3396 | if (test_boost_interval < 1) |
| 3397 | test_boost_interval = 1; |
| 3398 | if (test_boost_duration < 2) |
| 3399 | test_boost_duration = 2; |
Joel Fernandes (Google) | 4babd85 | 2018-06-19 15:14:18 -0700 | [diff] [blame] | 3400 | if (rcu_torture_can_boost()) { |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 3401 | |
| 3402 | boost_starttime = jiffies + test_boost_interval * HZ; |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 3403 | |
| 3404 | firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", |
| 3405 | rcutorture_booster_init, |
| 3406 | rcutorture_booster_cleanup); |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 3407 | rcutor_hp = firsterr; |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3408 | if (torture_init_error(firsterr)) |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3409 | goto unwind; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 3410 | } |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 3411 | shutdown_jiffies = jiffies + shutdown_secs * HZ; |
Paul E. McKenney | 01025eb | 2014-01-31 15:15:02 -0800 | [diff] [blame] | 3412 | firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3413 | if (torture_init_error(firsterr)) |
Paul E. McKenney | e991dbc | 2014-01-31 14:52:13 -0800 | [diff] [blame] | 3414 | goto unwind; |
Paul E. McKenney | 3a6cb58 | 2018-12-10 09:44:52 -0800 | [diff] [blame] | 3415 | firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, |
| 3416 | rcutorture_sync); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3417 | if (torture_init_error(firsterr)) |
Paul E. McKenney | 37e377d | 2012-02-17 22:12:18 -0800 | [diff] [blame] | 3418 | goto unwind; |
Paul E. McKenney | 01025eb | 2014-01-31 15:15:02 -0800 | [diff] [blame] | 3419 | firsterr = rcu_torture_stall_init(); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3420 | if (torture_init_error(firsterr)) |
Paul E. McKenney | 37e377d | 2012-02-17 22:12:18 -0800 | [diff] [blame] | 3421 | goto unwind; |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 3422 | firsterr = rcu_torture_fwd_prog_init(); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3423 | if (torture_init_error(firsterr)) |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 3424 | goto unwind; |
Paul E. McKenney | 01025eb | 2014-01-31 15:15:02 -0800 | [diff] [blame] | 3425 | firsterr = rcu_torture_barrier_init(); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3426 | if (torture_init_error(firsterr)) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 3427 | goto unwind; |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 3428 | firsterr = rcu_torture_read_exit_init(); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3429 | if (torture_init_error(firsterr)) |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 3430 | goto unwind; |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 3431 | if (object_debug) |
| 3432 | rcu_test_debug_objects(); |
Paul E. McKenney | b5daa8f | 2014-01-30 13:38:09 -0800 | [diff] [blame] | 3433 | torture_init_end(); |
Paul E. McKenney | 99d6a2a | 2022-02-04 12:45:18 -0800 | [diff] [blame] | 3434 | rcu_gp_slow_register(&rcu_fwd_cb_nodelay); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3435 | return 0; |
| 3436 | |
| 3437 | unwind: |
Paul E. McKenney | b5daa8f | 2014-01-30 13:38:09 -0800 | [diff] [blame] | 3438 | torture_init_end(); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3439 | rcu_torture_cleanup(); |
Paul E. McKenney | 4994684 | 2020-09-18 13:30:33 -0700 | [diff] [blame] | 3440 | if (shutdown_secs) { |
| 3441 | WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); |
| 3442 | kernel_power_off(); |
| 3443 | } |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3444 | return firsterr; |
| 3445 | } |
| 3446 | |
| 3447 | module_init(rcu_torture_init); |
| 3448 | module_exit(rcu_torture_cleanup); |