Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | // |
Paul E. McKenney | 8e4ec3d | 2020-06-17 11:33:54 -0700 | [diff] [blame] | 3 | // Scalability test comparing RCU vs other mechanisms |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 4 | // for acquiring references on objects. |
| 5 | // |
| 6 | // Copyright (C) Google, 2020. |
| 7 | // |
| 8 | // Author: Joel Fernandes <joel@joelfernandes.org> |
| 9 | |
| 10 | #define pr_fmt(fmt) fmt |
| 11 | |
| 12 | #include <linux/atomic.h> |
| 13 | #include <linux/bitops.h> |
| 14 | #include <linux/completion.h> |
| 15 | #include <linux/cpu.h> |
| 16 | #include <linux/delay.h> |
| 17 | #include <linux/err.h> |
| 18 | #include <linux/init.h> |
| 19 | #include <linux/interrupt.h> |
| 20 | #include <linux/kthread.h> |
| 21 | #include <linux/kernel.h> |
| 22 | #include <linux/mm.h> |
| 23 | #include <linux/module.h> |
| 24 | #include <linux/moduleparam.h> |
| 25 | #include <linux/notifier.h> |
| 26 | #include <linux/percpu.h> |
| 27 | #include <linux/rcupdate.h> |
Paul E. McKenney | 72bb749 | 2020-06-02 08:34:41 -0700 | [diff] [blame] | 28 | #include <linux/rcupdate_trace.h> |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 29 | #include <linux/reboot.h> |
| 30 | #include <linux/sched.h> |
| 31 | #include <linux/spinlock.h> |
| 32 | #include <linux/smp.h> |
| 33 | #include <linux/stat.h> |
| 34 | #include <linux/srcu.h> |
| 35 | #include <linux/slab.h> |
| 36 | #include <linux/torture.h> |
| 37 | #include <linux/types.h> |
| 38 | |
| 39 | #include "rcu.h" |
| 40 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 41 | #define SCALE_FLAG "-ref-scale: " |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 42 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 43 | #define SCALEOUT(s, x...) \ |
| 44 | pr_alert("%s" SCALE_FLAG s, scale_type, ## x) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 45 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 46 | #define VERBOSE_SCALEOUT(s, x...) \ |
Li Zhijian | f71f22b | 2021-10-29 17:40:24 +0800 | [diff] [blame] | 47 | do { \ |
| 48 | if (verbose) \ |
| 49 | pr_alert("%s" SCALE_FLAG s "\n", scale_type, ## x); \ |
| 50 | } while (0) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 51 | |
Paul E. McKenney | e76506f | 2020-11-15 10:24:52 -0800 | [diff] [blame] | 52 | static atomic_t verbose_batch_ctr; |
| 53 | |
| 54 | #define VERBOSE_SCALEOUT_BATCH(s, x...) \ |
| 55 | do { \ |
| 56 | if (verbose && \ |
| 57 | (verbose_batched <= 0 || \ |
Paul E. McKenney | 414c116 | 2020-11-25 10:50:35 -0800 | [diff] [blame] | 58 | !(atomic_inc_return(&verbose_batch_ctr) % verbose_batched))) { \ |
| 59 | schedule_timeout_uninterruptible(1); \ |
Li Zhijian | f71f22b | 2021-10-29 17:40:24 +0800 | [diff] [blame] | 60 | pr_alert("%s" SCALE_FLAG s "\n", scale_type, ## x); \ |
Paul E. McKenney | 414c116 | 2020-11-25 10:50:35 -0800 | [diff] [blame] | 61 | } \ |
Paul E. McKenney | e76506f | 2020-11-15 10:24:52 -0800 | [diff] [blame] | 62 | } while (0) |
| 63 | |
Li Zhijian | f71f22b | 2021-10-29 17:40:24 +0800 | [diff] [blame] | 64 | #define SCALEOUT_ERRSTRING(s, x...) pr_alert("%s" SCALE_FLAG "!!! " s "\n", scale_type, ## x) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 65 | |
| 66 | MODULE_LICENSE("GPL"); |
| 67 | MODULE_AUTHOR("Joel Fernandes (Google) <joel@joelfernandes.org>"); |
| 68 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 69 | static char *scale_type = "rcu"; |
| 70 | module_param(scale_type, charp, 0444); |
| 71 | MODULE_PARM_DESC(scale_type, "Type of test (rcu, srcu, refcnt, rwsem, rwlock."); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 72 | |
| 73 | torture_param(int, verbose, 0, "Enable verbose debugging printk()s"); |
Paul E. McKenney | e76506f | 2020-11-15 10:24:52 -0800 | [diff] [blame] | 74 | torture_param(int, verbose_batched, 0, "Batch verbose debugging printk()s"); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 75 | |
Paul E. McKenney | 777a54c | 2020-05-25 14:16:44 -0700 | [diff] [blame] | 76 | // Wait until there are multiple CPUs before starting test. |
Paul E. McKenney | 8e4ec3d | 2020-06-17 11:33:54 -0700 | [diff] [blame] | 77 | torture_param(int, holdoff, IS_BUILTIN(CONFIG_RCU_REF_SCALE_TEST) ? 10 : 0, |
Paul E. McKenney | 777a54c | 2020-05-25 14:16:44 -0700 | [diff] [blame] | 78 | "Holdoff time before test start (s)"); |
Paul E. McKenney | a6889be | 2022-11-08 08:18:06 -0800 | [diff] [blame] | 79 | // Number of typesafe_lookup structures, that is, the degree of concurrency. |
| 80 | torture_param(long, lookup_instances, 0, "Number of typesafe_lookup structures."); |
Paul E. McKenney | 777a54c | 2020-05-25 14:16:44 -0700 | [diff] [blame] | 81 | // Number of loops per experiment, all readers execute operations concurrently. |
Paul E. McKenney | 4dd72a3 | 2020-05-29 13:11:26 -0700 | [diff] [blame] | 82 | torture_param(long, loops, 10000, "Number of loops per experiment."); |
Paul E. McKenney | 8fc2878 | 2020-05-25 15:48:38 -0700 | [diff] [blame] | 83 | // Number of readers, with -1 defaulting to about 75% of the CPUs. |
| 84 | torture_param(int, nreaders, -1, "Number of readers, -1 for 75% of CPUs."); |
| 85 | // Number of runs. |
| 86 | torture_param(int, nruns, 30, "Number of experiments to run."); |
Paul E. McKenney | 918b351 | 2020-05-31 18:14:57 -0700 | [diff] [blame] | 87 | // Reader delay in nanoseconds, 0 for no delay. |
| 88 | torture_param(int, readdelay, 0, "Read-side delay in nanoseconds."); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 89 | |
| 90 | #ifdef MODULE |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 91 | # define REFSCALE_SHUTDOWN 0 |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 92 | #else |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 93 | # define REFSCALE_SHUTDOWN 1 |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 94 | #endif |
| 95 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 96 | torture_param(bool, shutdown, REFSCALE_SHUTDOWN, |
| 97 | "Shutdown at end of scalability tests."); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 98 | |
| 99 | struct reader_task { |
| 100 | struct task_struct *task; |
Paul E. McKenney | af2789d | 2020-05-26 11:22:03 -0700 | [diff] [blame] | 101 | int start_reader; |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 102 | wait_queue_head_t wq; |
| 103 | u64 last_duration_ns; |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 104 | }; |
| 105 | |
| 106 | static struct task_struct *shutdown_task; |
| 107 | static wait_queue_head_t shutdown_wq; |
| 108 | |
| 109 | static struct task_struct *main_task; |
| 110 | static wait_queue_head_t main_wq; |
| 111 | static int shutdown_start; |
| 112 | |
| 113 | static struct reader_task *reader_tasks; |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 114 | |
| 115 | // Number of readers that are part of the current experiment. |
| 116 | static atomic_t nreaders_exp; |
| 117 | |
| 118 | // Use to wait for all threads to start. |
| 119 | static atomic_t n_init; |
Paul E. McKenney | 86e0da2 | 2020-05-26 11:40:52 -0700 | [diff] [blame] | 120 | static atomic_t n_started; |
Paul E. McKenney | 2db0bda | 2020-05-26 12:34:57 -0700 | [diff] [blame] | 121 | static atomic_t n_warmedup; |
| 122 | static atomic_t n_cooleddown; |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 123 | |
| 124 | // Track which experiment is currently running. |
| 125 | static int exp_idx; |
| 126 | |
| 127 | // Operations vector for selecting different types of tests. |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 128 | struct ref_scale_ops { |
Paul E. McKenney | 3c6496c | 2022-11-06 20:58:15 -0800 | [diff] [blame] | 129 | bool (*init)(void); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 130 | void (*cleanup)(void); |
Paul E. McKenney | 75dd8ef | 2020-05-25 14:59:06 -0700 | [diff] [blame] | 131 | void (*readsection)(const int nloops); |
Paul E. McKenney | 918b351 | 2020-05-31 18:14:57 -0700 | [diff] [blame] | 132 | void (*delaysection)(const int nloops, const int udl, const int ndl); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 133 | const char *name; |
| 134 | }; |
| 135 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 136 | static struct ref_scale_ops *cur_ops; |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 137 | |
Paul E. McKenney | 918b351 | 2020-05-31 18:14:57 -0700 | [diff] [blame] | 138 | static void un_delay(const int udl, const int ndl) |
| 139 | { |
| 140 | if (udl) |
| 141 | udelay(udl); |
| 142 | if (ndl) |
| 143 | ndelay(ndl); |
| 144 | } |
| 145 | |
Paul E. McKenney | 75dd8ef | 2020-05-25 14:59:06 -0700 | [diff] [blame] | 146 | static void ref_rcu_read_section(const int nloops) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 147 | { |
Paul E. McKenney | 75dd8ef | 2020-05-25 14:59:06 -0700 | [diff] [blame] | 148 | int i; |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 149 | |
Paul E. McKenney | 75dd8ef | 2020-05-25 14:59:06 -0700 | [diff] [blame] | 150 | for (i = nloops; i >= 0; i--) { |
| 151 | rcu_read_lock(); |
| 152 | rcu_read_unlock(); |
| 153 | } |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 154 | } |
| 155 | |
Paul E. McKenney | 918b351 | 2020-05-31 18:14:57 -0700 | [diff] [blame] | 156 | static void ref_rcu_delay_section(const int nloops, const int udl, const int ndl) |
Paul E. McKenney | b4d1e34 | 2020-05-28 16:37:35 -0700 | [diff] [blame] | 157 | { |
| 158 | int i; |
| 159 | |
| 160 | for (i = nloops; i >= 0; i--) { |
| 161 | rcu_read_lock(); |
Paul E. McKenney | 918b351 | 2020-05-31 18:14:57 -0700 | [diff] [blame] | 162 | un_delay(udl, ndl); |
Paul E. McKenney | b4d1e34 | 2020-05-28 16:37:35 -0700 | [diff] [blame] | 163 | rcu_read_unlock(); |
| 164 | } |
| 165 | } |
| 166 | |
Paul E. McKenney | 3c6496c | 2022-11-06 20:58:15 -0800 | [diff] [blame] | 167 | static bool rcu_sync_scale_init(void) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 168 | { |
Paul E. McKenney | 3c6496c | 2022-11-06 20:58:15 -0800 | [diff] [blame] | 169 | return true; |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 170 | } |
| 171 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 172 | static struct ref_scale_ops rcu_ops = { |
| 173 | .init = rcu_sync_scale_init, |
Paul E. McKenney | 75dd8ef | 2020-05-25 14:59:06 -0700 | [diff] [blame] | 174 | .readsection = ref_rcu_read_section, |
Paul E. McKenney | b4d1e34 | 2020-05-28 16:37:35 -0700 | [diff] [blame] | 175 | .delaysection = ref_rcu_delay_section, |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 176 | .name = "rcu" |
| 177 | }; |
| 178 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 179 | // Definitions for SRCU ref scale testing. |
| 180 | DEFINE_STATIC_SRCU(srcu_refctl_scale); |
| 181 | static struct srcu_struct *srcu_ctlp = &srcu_refctl_scale; |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 182 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 183 | static void srcu_ref_scale_read_section(const int nloops) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 184 | { |
Paul E. McKenney | 75dd8ef | 2020-05-25 14:59:06 -0700 | [diff] [blame] | 185 | int i; |
| 186 | int idx; |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 187 | |
Paul E. McKenney | 75dd8ef | 2020-05-25 14:59:06 -0700 | [diff] [blame] | 188 | for (i = nloops; i >= 0; i--) { |
| 189 | idx = srcu_read_lock(srcu_ctlp); |
| 190 | srcu_read_unlock(srcu_ctlp, idx); |
| 191 | } |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 192 | } |
| 193 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 194 | static void srcu_ref_scale_delay_section(const int nloops, const int udl, const int ndl) |
Paul E. McKenney | b4d1e34 | 2020-05-28 16:37:35 -0700 | [diff] [blame] | 195 | { |
| 196 | int i; |
| 197 | int idx; |
| 198 | |
| 199 | for (i = nloops; i >= 0; i--) { |
| 200 | idx = srcu_read_lock(srcu_ctlp); |
Paul E. McKenney | 918b351 | 2020-05-31 18:14:57 -0700 | [diff] [blame] | 201 | un_delay(udl, ndl); |
Paul E. McKenney | b4d1e34 | 2020-05-28 16:37:35 -0700 | [diff] [blame] | 202 | srcu_read_unlock(srcu_ctlp, idx); |
| 203 | } |
| 204 | } |
| 205 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 206 | static struct ref_scale_ops srcu_ops = { |
| 207 | .init = rcu_sync_scale_init, |
| 208 | .readsection = srcu_ref_scale_read_section, |
| 209 | .delaysection = srcu_ref_scale_delay_section, |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 210 | .name = "srcu" |
| 211 | }; |
| 212 | |
Paul E. McKenney | 5f654af | 2022-03-25 14:39:54 -0700 | [diff] [blame] | 213 | #ifdef CONFIG_TASKS_RCU |
| 214 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 215 | // Definitions for RCU Tasks ref scale testing: Empty read markers. |
Paul E. McKenney | e13ef44 | 2020-06-03 11:56:34 -0700 | [diff] [blame] | 216 | // These definitions also work for RCU Rude readers. |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 217 | static void rcu_tasks_ref_scale_read_section(const int nloops) |
Paul E. McKenney | e13ef44 | 2020-06-03 11:56:34 -0700 | [diff] [blame] | 218 | { |
| 219 | int i; |
| 220 | |
| 221 | for (i = nloops; i >= 0; i--) |
| 222 | continue; |
| 223 | } |
| 224 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 225 | static void rcu_tasks_ref_scale_delay_section(const int nloops, const int udl, const int ndl) |
Paul E. McKenney | e13ef44 | 2020-06-03 11:56:34 -0700 | [diff] [blame] | 226 | { |
| 227 | int i; |
| 228 | |
| 229 | for (i = nloops; i >= 0; i--) |
| 230 | un_delay(udl, ndl); |
| 231 | } |
| 232 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 233 | static struct ref_scale_ops rcu_tasks_ops = { |
| 234 | .init = rcu_sync_scale_init, |
| 235 | .readsection = rcu_tasks_ref_scale_read_section, |
| 236 | .delaysection = rcu_tasks_ref_scale_delay_section, |
Paul E. McKenney | e13ef44 | 2020-06-03 11:56:34 -0700 | [diff] [blame] | 237 | .name = "rcu-tasks" |
| 238 | }; |
| 239 | |
Paul E. McKenney | 5f654af | 2022-03-25 14:39:54 -0700 | [diff] [blame] | 240 | #define RCU_TASKS_OPS &rcu_tasks_ops, |
| 241 | |
| 242 | #else // #ifdef CONFIG_TASKS_RCU |
| 243 | |
| 244 | #define RCU_TASKS_OPS |
| 245 | |
| 246 | #endif // #else // #ifdef CONFIG_TASKS_RCU |
| 247 | |
Paul E. McKenney | dec8678 | 2022-03-25 15:21:07 -0700 | [diff] [blame] | 248 | #ifdef CONFIG_TASKS_TRACE_RCU |
| 249 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 250 | // Definitions for RCU Tasks Trace ref scale testing. |
| 251 | static void rcu_trace_ref_scale_read_section(const int nloops) |
Paul E. McKenney | 72bb749 | 2020-06-02 08:34:41 -0700 | [diff] [blame] | 252 | { |
| 253 | int i; |
| 254 | |
| 255 | for (i = nloops; i >= 0; i--) { |
| 256 | rcu_read_lock_trace(); |
| 257 | rcu_read_unlock_trace(); |
| 258 | } |
| 259 | } |
| 260 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 261 | static void rcu_trace_ref_scale_delay_section(const int nloops, const int udl, const int ndl) |
Paul E. McKenney | 72bb749 | 2020-06-02 08:34:41 -0700 | [diff] [blame] | 262 | { |
| 263 | int i; |
| 264 | |
| 265 | for (i = nloops; i >= 0; i--) { |
| 266 | rcu_read_lock_trace(); |
| 267 | un_delay(udl, ndl); |
| 268 | rcu_read_unlock_trace(); |
| 269 | } |
| 270 | } |
| 271 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 272 | static struct ref_scale_ops rcu_trace_ops = { |
| 273 | .init = rcu_sync_scale_init, |
| 274 | .readsection = rcu_trace_ref_scale_read_section, |
| 275 | .delaysection = rcu_trace_ref_scale_delay_section, |
Paul E. McKenney | 72bb749 | 2020-06-02 08:34:41 -0700 | [diff] [blame] | 276 | .name = "rcu-trace" |
| 277 | }; |
| 278 | |
Paul E. McKenney | dec8678 | 2022-03-25 15:21:07 -0700 | [diff] [blame] | 279 | #define RCU_TRACE_OPS &rcu_trace_ops, |
| 280 | |
| 281 | #else // #ifdef CONFIG_TASKS_TRACE_RCU |
| 282 | |
| 283 | #define RCU_TRACE_OPS |
| 284 | |
| 285 | #endif // #else // #ifdef CONFIG_TASKS_TRACE_RCU |
| 286 | |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 287 | // Definitions for reference count |
| 288 | static atomic_t refcnt; |
| 289 | |
Paul E. McKenney | b4d1e34 | 2020-05-28 16:37:35 -0700 | [diff] [blame] | 290 | static void ref_refcnt_section(const int nloops) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 291 | { |
Paul E. McKenney | 75dd8ef | 2020-05-25 14:59:06 -0700 | [diff] [blame] | 292 | int i; |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 293 | |
Paul E. McKenney | 75dd8ef | 2020-05-25 14:59:06 -0700 | [diff] [blame] | 294 | for (i = nloops; i >= 0; i--) { |
| 295 | atomic_inc(&refcnt); |
| 296 | atomic_dec(&refcnt); |
| 297 | } |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 298 | } |
| 299 | |
Paul E. McKenney | 918b351 | 2020-05-31 18:14:57 -0700 | [diff] [blame] | 300 | static void ref_refcnt_delay_section(const int nloops, const int udl, const int ndl) |
Paul E. McKenney | b4d1e34 | 2020-05-28 16:37:35 -0700 | [diff] [blame] | 301 | { |
| 302 | int i; |
| 303 | |
| 304 | for (i = nloops; i >= 0; i--) { |
| 305 | atomic_inc(&refcnt); |
Paul E. McKenney | 918b351 | 2020-05-31 18:14:57 -0700 | [diff] [blame] | 306 | un_delay(udl, ndl); |
Paul E. McKenney | b4d1e34 | 2020-05-28 16:37:35 -0700 | [diff] [blame] | 307 | atomic_dec(&refcnt); |
| 308 | } |
| 309 | } |
| 310 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 311 | static struct ref_scale_ops refcnt_ops = { |
| 312 | .init = rcu_sync_scale_init, |
Paul E. McKenney | b4d1e34 | 2020-05-28 16:37:35 -0700 | [diff] [blame] | 313 | .readsection = ref_refcnt_section, |
| 314 | .delaysection = ref_refcnt_delay_section, |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 315 | .name = "refcnt" |
| 316 | }; |
| 317 | |
| 318 | // Definitions for rwlock |
| 319 | static rwlock_t test_rwlock; |
| 320 | |
Paul E. McKenney | 3c6496c | 2022-11-06 20:58:15 -0800 | [diff] [blame] | 321 | static bool ref_rwlock_init(void) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 322 | { |
| 323 | rwlock_init(&test_rwlock); |
Paul E. McKenney | 3c6496c | 2022-11-06 20:58:15 -0800 | [diff] [blame] | 324 | return true; |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 325 | } |
| 326 | |
Paul E. McKenney | b4d1e34 | 2020-05-28 16:37:35 -0700 | [diff] [blame] | 327 | static void ref_rwlock_section(const int nloops) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 328 | { |
Paul E. McKenney | 75dd8ef | 2020-05-25 14:59:06 -0700 | [diff] [blame] | 329 | int i; |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 330 | |
Paul E. McKenney | 75dd8ef | 2020-05-25 14:59:06 -0700 | [diff] [blame] | 331 | for (i = nloops; i >= 0; i--) { |
| 332 | read_lock(&test_rwlock); |
| 333 | read_unlock(&test_rwlock); |
| 334 | } |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 335 | } |
| 336 | |
Paul E. McKenney | 918b351 | 2020-05-31 18:14:57 -0700 | [diff] [blame] | 337 | static void ref_rwlock_delay_section(const int nloops, const int udl, const int ndl) |
Paul E. McKenney | b4d1e34 | 2020-05-28 16:37:35 -0700 | [diff] [blame] | 338 | { |
| 339 | int i; |
| 340 | |
| 341 | for (i = nloops; i >= 0; i--) { |
| 342 | read_lock(&test_rwlock); |
Paul E. McKenney | 918b351 | 2020-05-31 18:14:57 -0700 | [diff] [blame] | 343 | un_delay(udl, ndl); |
Paul E. McKenney | b4d1e34 | 2020-05-28 16:37:35 -0700 | [diff] [blame] | 344 | read_unlock(&test_rwlock); |
| 345 | } |
| 346 | } |
| 347 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 348 | static struct ref_scale_ops rwlock_ops = { |
Paul E. McKenney | b4d1e34 | 2020-05-28 16:37:35 -0700 | [diff] [blame] | 349 | .init = ref_rwlock_init, |
| 350 | .readsection = ref_rwlock_section, |
| 351 | .delaysection = ref_rwlock_delay_section, |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 352 | .name = "rwlock" |
| 353 | }; |
| 354 | |
| 355 | // Definitions for rwsem |
| 356 | static struct rw_semaphore test_rwsem; |
| 357 | |
Paul E. McKenney | 3c6496c | 2022-11-06 20:58:15 -0800 | [diff] [blame] | 358 | static bool ref_rwsem_init(void) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 359 | { |
| 360 | init_rwsem(&test_rwsem); |
Paul E. McKenney | 3c6496c | 2022-11-06 20:58:15 -0800 | [diff] [blame] | 361 | return true; |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 362 | } |
| 363 | |
Paul E. McKenney | b4d1e34 | 2020-05-28 16:37:35 -0700 | [diff] [blame] | 364 | static void ref_rwsem_section(const int nloops) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 365 | { |
Paul E. McKenney | 75dd8ef | 2020-05-25 14:59:06 -0700 | [diff] [blame] | 366 | int i; |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 367 | |
Paul E. McKenney | 75dd8ef | 2020-05-25 14:59:06 -0700 | [diff] [blame] | 368 | for (i = nloops; i >= 0; i--) { |
| 369 | down_read(&test_rwsem); |
| 370 | up_read(&test_rwsem); |
| 371 | } |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 372 | } |
| 373 | |
Paul E. McKenney | 918b351 | 2020-05-31 18:14:57 -0700 | [diff] [blame] | 374 | static void ref_rwsem_delay_section(const int nloops, const int udl, const int ndl) |
Paul E. McKenney | b4d1e34 | 2020-05-28 16:37:35 -0700 | [diff] [blame] | 375 | { |
| 376 | int i; |
| 377 | |
| 378 | for (i = nloops; i >= 0; i--) { |
| 379 | down_read(&test_rwsem); |
Paul E. McKenney | 918b351 | 2020-05-31 18:14:57 -0700 | [diff] [blame] | 380 | un_delay(udl, ndl); |
Paul E. McKenney | b4d1e34 | 2020-05-28 16:37:35 -0700 | [diff] [blame] | 381 | up_read(&test_rwsem); |
| 382 | } |
| 383 | } |
| 384 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 385 | static struct ref_scale_ops rwsem_ops = { |
Paul E. McKenney | b4d1e34 | 2020-05-28 16:37:35 -0700 | [diff] [blame] | 386 | .init = ref_rwsem_init, |
| 387 | .readsection = ref_rwsem_section, |
| 388 | .delaysection = ref_rwsem_delay_section, |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 389 | .name = "rwsem" |
| 390 | }; |
| 391 | |
Paul E. McKenney | e9b800d | 2021-03-10 18:02:36 -0800 | [diff] [blame] | 392 | // Definitions for global spinlock |
Zqiang | 7bf336f | 2022-06-12 10:02:25 +0800 | [diff] [blame] | 393 | static DEFINE_RAW_SPINLOCK(test_lock); |
Paul E. McKenney | e9b800d | 2021-03-10 18:02:36 -0800 | [diff] [blame] | 394 | |
| 395 | static void ref_lock_section(const int nloops) |
| 396 | { |
| 397 | int i; |
| 398 | |
| 399 | preempt_disable(); |
| 400 | for (i = nloops; i >= 0; i--) { |
Zqiang | 7bf336f | 2022-06-12 10:02:25 +0800 | [diff] [blame] | 401 | raw_spin_lock(&test_lock); |
| 402 | raw_spin_unlock(&test_lock); |
Paul E. McKenney | e9b800d | 2021-03-10 18:02:36 -0800 | [diff] [blame] | 403 | } |
| 404 | preempt_enable(); |
| 405 | } |
| 406 | |
| 407 | static void ref_lock_delay_section(const int nloops, const int udl, const int ndl) |
| 408 | { |
| 409 | int i; |
| 410 | |
| 411 | preempt_disable(); |
| 412 | for (i = nloops; i >= 0; i--) { |
Zqiang | 7bf336f | 2022-06-12 10:02:25 +0800 | [diff] [blame] | 413 | raw_spin_lock(&test_lock); |
Paul E. McKenney | e9b800d | 2021-03-10 18:02:36 -0800 | [diff] [blame] | 414 | un_delay(udl, ndl); |
Zqiang | 7bf336f | 2022-06-12 10:02:25 +0800 | [diff] [blame] | 415 | raw_spin_unlock(&test_lock); |
Paul E. McKenney | e9b800d | 2021-03-10 18:02:36 -0800 | [diff] [blame] | 416 | } |
| 417 | preempt_enable(); |
| 418 | } |
| 419 | |
| 420 | static struct ref_scale_ops lock_ops = { |
| 421 | .readsection = ref_lock_section, |
| 422 | .delaysection = ref_lock_delay_section, |
| 423 | .name = "lock" |
| 424 | }; |
| 425 | |
| 426 | // Definitions for global irq-save spinlock |
| 427 | |
| 428 | static void ref_lock_irq_section(const int nloops) |
| 429 | { |
| 430 | unsigned long flags; |
| 431 | int i; |
| 432 | |
| 433 | preempt_disable(); |
| 434 | for (i = nloops; i >= 0; i--) { |
Zqiang | 7bf336f | 2022-06-12 10:02:25 +0800 | [diff] [blame] | 435 | raw_spin_lock_irqsave(&test_lock, flags); |
| 436 | raw_spin_unlock_irqrestore(&test_lock, flags); |
Paul E. McKenney | e9b800d | 2021-03-10 18:02:36 -0800 | [diff] [blame] | 437 | } |
| 438 | preempt_enable(); |
| 439 | } |
| 440 | |
| 441 | static void ref_lock_irq_delay_section(const int nloops, const int udl, const int ndl) |
| 442 | { |
| 443 | unsigned long flags; |
| 444 | int i; |
| 445 | |
| 446 | preempt_disable(); |
| 447 | for (i = nloops; i >= 0; i--) { |
Zqiang | 7bf336f | 2022-06-12 10:02:25 +0800 | [diff] [blame] | 448 | raw_spin_lock_irqsave(&test_lock, flags); |
Paul E. McKenney | e9b800d | 2021-03-10 18:02:36 -0800 | [diff] [blame] | 449 | un_delay(udl, ndl); |
Zqiang | 7bf336f | 2022-06-12 10:02:25 +0800 | [diff] [blame] | 450 | raw_spin_unlock_irqrestore(&test_lock, flags); |
Paul E. McKenney | e9b800d | 2021-03-10 18:02:36 -0800 | [diff] [blame] | 451 | } |
| 452 | preempt_enable(); |
| 453 | } |
| 454 | |
| 455 | static struct ref_scale_ops lock_irq_ops = { |
| 456 | .readsection = ref_lock_irq_section, |
| 457 | .delaysection = ref_lock_irq_delay_section, |
| 458 | .name = "lock-irq" |
| 459 | }; |
| 460 | |
| 461 | // Definitions acquire-release. |
| 462 | static DEFINE_PER_CPU(unsigned long, test_acqrel); |
| 463 | |
| 464 | static void ref_acqrel_section(const int nloops) |
| 465 | { |
| 466 | unsigned long x; |
| 467 | int i; |
| 468 | |
| 469 | preempt_disable(); |
| 470 | for (i = nloops; i >= 0; i--) { |
| 471 | x = smp_load_acquire(this_cpu_ptr(&test_acqrel)); |
| 472 | smp_store_release(this_cpu_ptr(&test_acqrel), x + 1); |
| 473 | } |
| 474 | preempt_enable(); |
| 475 | } |
| 476 | |
| 477 | static void ref_acqrel_delay_section(const int nloops, const int udl, const int ndl) |
| 478 | { |
| 479 | unsigned long x; |
| 480 | int i; |
| 481 | |
| 482 | preempt_disable(); |
| 483 | for (i = nloops; i >= 0; i--) { |
| 484 | x = smp_load_acquire(this_cpu_ptr(&test_acqrel)); |
| 485 | un_delay(udl, ndl); |
| 486 | smp_store_release(this_cpu_ptr(&test_acqrel), x + 1); |
| 487 | } |
| 488 | preempt_enable(); |
| 489 | } |
| 490 | |
| 491 | static struct ref_scale_ops acqrel_ops = { |
| 492 | .readsection = ref_acqrel_section, |
| 493 | .delaysection = ref_acqrel_delay_section, |
| 494 | .name = "acqrel" |
| 495 | }; |
| 496 | |
Paul E. McKenney | 25f6fa5 | 2021-05-03 17:04:57 -0700 | [diff] [blame] | 497 | static volatile u64 stopopts; |
| 498 | |
| 499 | static void ref_clock_section(const int nloops) |
| 500 | { |
| 501 | u64 x = 0; |
| 502 | int i; |
| 503 | |
| 504 | preempt_disable(); |
| 505 | for (i = nloops; i >= 0; i--) |
| 506 | x += ktime_get_real_fast_ns(); |
| 507 | preempt_enable(); |
| 508 | stopopts = x; |
| 509 | } |
| 510 | |
| 511 | static void ref_clock_delay_section(const int nloops, const int udl, const int ndl) |
| 512 | { |
| 513 | u64 x = 0; |
| 514 | int i; |
| 515 | |
| 516 | preempt_disable(); |
| 517 | for (i = nloops; i >= 0; i--) { |
| 518 | x += ktime_get_real_fast_ns(); |
| 519 | un_delay(udl, ndl); |
| 520 | } |
| 521 | preempt_enable(); |
| 522 | stopopts = x; |
| 523 | } |
| 524 | |
| 525 | static struct ref_scale_ops clock_ops = { |
| 526 | .readsection = ref_clock_section, |
| 527 | .delaysection = ref_clock_delay_section, |
| 528 | .name = "clock" |
| 529 | }; |
| 530 | |
Paul E. McKenney | b5a2801 | 2023-07-11 18:01:38 -0700 | [diff] [blame] | 531 | static void ref_jiffies_section(const int nloops) |
| 532 | { |
| 533 | u64 x = 0; |
| 534 | int i; |
| 535 | |
| 536 | preempt_disable(); |
| 537 | for (i = nloops; i >= 0; i--) |
| 538 | x += jiffies; |
| 539 | preempt_enable(); |
| 540 | stopopts = x; |
| 541 | } |
| 542 | |
| 543 | static void ref_jiffies_delay_section(const int nloops, const int udl, const int ndl) |
| 544 | { |
| 545 | u64 x = 0; |
| 546 | int i; |
| 547 | |
| 548 | preempt_disable(); |
| 549 | for (i = nloops; i >= 0; i--) { |
| 550 | x += jiffies; |
| 551 | un_delay(udl, ndl); |
| 552 | } |
| 553 | preempt_enable(); |
| 554 | stopopts = x; |
| 555 | } |
| 556 | |
| 557 | static struct ref_scale_ops jiffies_ops = { |
| 558 | .readsection = ref_jiffies_section, |
| 559 | .delaysection = ref_jiffies_delay_section, |
| 560 | .name = "jiffies" |
| 561 | }; |
| 562 | |
Paul E. McKenney | a6889be | 2022-11-08 08:18:06 -0800 | [diff] [blame] | 563 | //////////////////////////////////////////////////////////////////////// |
| 564 | // |
| 565 | // Methods leveraging SLAB_TYPESAFE_BY_RCU. |
| 566 | // |
| 567 | |
| 568 | // Item to look up in a typesafe manner. Array of pointers to these. |
| 569 | struct refscale_typesafe { |
| 570 | atomic_t rts_refctr; // Used by all flavors |
| 571 | spinlock_t rts_lock; |
| 572 | seqlock_t rts_seqlock; |
| 573 | unsigned int a; |
| 574 | unsigned int b; |
| 575 | }; |
| 576 | |
| 577 | static struct kmem_cache *typesafe_kmem_cachep; |
| 578 | static struct refscale_typesafe **rtsarray; |
| 579 | static long rtsarray_size; |
| 580 | static DEFINE_TORTURE_RANDOM_PERCPU(refscale_rand); |
| 581 | static bool (*rts_acquire)(struct refscale_typesafe *rtsp, unsigned int *start); |
| 582 | static bool (*rts_release)(struct refscale_typesafe *rtsp, unsigned int start); |
| 583 | |
| 584 | // Conditionally acquire an explicit in-structure reference count. |
| 585 | static bool typesafe_ref_acquire(struct refscale_typesafe *rtsp, unsigned int *start) |
| 586 | { |
| 587 | return atomic_inc_not_zero(&rtsp->rts_refctr); |
| 588 | } |
| 589 | |
| 590 | // Unconditionally release an explicit in-structure reference count. |
| 591 | static bool typesafe_ref_release(struct refscale_typesafe *rtsp, unsigned int start) |
| 592 | { |
| 593 | if (!atomic_dec_return(&rtsp->rts_refctr)) { |
| 594 | WRITE_ONCE(rtsp->a, rtsp->a + 1); |
| 595 | kmem_cache_free(typesafe_kmem_cachep, rtsp); |
| 596 | } |
| 597 | return true; |
| 598 | } |
| 599 | |
| 600 | // Unconditionally acquire an explicit in-structure spinlock. |
| 601 | static bool typesafe_lock_acquire(struct refscale_typesafe *rtsp, unsigned int *start) |
| 602 | { |
| 603 | spin_lock(&rtsp->rts_lock); |
| 604 | return true; |
| 605 | } |
| 606 | |
| 607 | // Unconditionally release an explicit in-structure spinlock. |
| 608 | static bool typesafe_lock_release(struct refscale_typesafe *rtsp, unsigned int start) |
| 609 | { |
| 610 | spin_unlock(&rtsp->rts_lock); |
| 611 | return true; |
| 612 | } |
| 613 | |
| 614 | // Unconditionally acquire an explicit in-structure sequence lock. |
| 615 | static bool typesafe_seqlock_acquire(struct refscale_typesafe *rtsp, unsigned int *start) |
| 616 | { |
| 617 | *start = read_seqbegin(&rtsp->rts_seqlock); |
| 618 | return true; |
| 619 | } |
| 620 | |
| 621 | // Conditionally release an explicit in-structure sequence lock. Return |
| 622 | // true if this release was successful, that is, if no retry is required. |
| 623 | static bool typesafe_seqlock_release(struct refscale_typesafe *rtsp, unsigned int start) |
| 624 | { |
| 625 | return !read_seqretry(&rtsp->rts_seqlock, start); |
| 626 | } |
| 627 | |
| 628 | // Do a read-side critical section with the specified delay in |
| 629 | // microseconds and nanoseconds inserted so as to increase probability |
| 630 | // of failure. |
| 631 | static void typesafe_delay_section(const int nloops, const int udl, const int ndl) |
| 632 | { |
| 633 | unsigned int a; |
| 634 | unsigned int b; |
| 635 | int i; |
| 636 | long idx; |
| 637 | struct refscale_typesafe *rtsp; |
| 638 | unsigned int start; |
| 639 | |
| 640 | for (i = nloops; i >= 0; i--) { |
| 641 | preempt_disable(); |
| 642 | idx = torture_random(this_cpu_ptr(&refscale_rand)) % rtsarray_size; |
| 643 | preempt_enable(); |
| 644 | retry: |
| 645 | rcu_read_lock(); |
| 646 | rtsp = rcu_dereference(rtsarray[idx]); |
| 647 | a = READ_ONCE(rtsp->a); |
| 648 | if (!rts_acquire(rtsp, &start)) { |
| 649 | rcu_read_unlock(); |
| 650 | goto retry; |
| 651 | } |
| 652 | if (a != READ_ONCE(rtsp->a)) { |
| 653 | (void)rts_release(rtsp, start); |
| 654 | rcu_read_unlock(); |
| 655 | goto retry; |
| 656 | } |
| 657 | un_delay(udl, ndl); |
Paul E. McKenney | 730c3ed | 2023-08-01 09:30:18 -0700 | [diff] [blame] | 658 | b = READ_ONCE(rtsp->a); |
Paul E. McKenney | a6889be | 2022-11-08 08:18:06 -0800 | [diff] [blame] | 659 | // Remember, seqlock read-side release can fail. |
| 660 | if (!rts_release(rtsp, start)) { |
| 661 | rcu_read_unlock(); |
| 662 | goto retry; |
| 663 | } |
Paul E. McKenney | a6889be | 2022-11-08 08:18:06 -0800 | [diff] [blame] | 664 | WARN_ONCE(a != b, "Re-read of ->a changed from %u to %u.\n", a, b); |
| 665 | b = rtsp->b; |
| 666 | rcu_read_unlock(); |
| 667 | WARN_ON_ONCE(a * a != b); |
| 668 | } |
| 669 | } |
| 670 | |
| 671 | // Because the acquisition and release methods are expensive, there |
| 672 | // is no point in optimizing away the un_delay() function's two checks. |
| 673 | // Thus simply define typesafe_read_section() as a simple wrapper around |
| 674 | // typesafe_delay_section(). |
| 675 | static void typesafe_read_section(const int nloops) |
| 676 | { |
| 677 | typesafe_delay_section(nloops, 0, 0); |
| 678 | } |
| 679 | |
| 680 | // Allocate and initialize one refscale_typesafe structure. |
| 681 | static struct refscale_typesafe *typesafe_alloc_one(void) |
| 682 | { |
| 683 | struct refscale_typesafe *rtsp; |
| 684 | |
| 685 | rtsp = kmem_cache_alloc(typesafe_kmem_cachep, GFP_KERNEL); |
| 686 | if (!rtsp) |
| 687 | return NULL; |
| 688 | atomic_set(&rtsp->rts_refctr, 1); |
| 689 | WRITE_ONCE(rtsp->a, rtsp->a + 1); |
| 690 | WRITE_ONCE(rtsp->b, rtsp->a * rtsp->a); |
| 691 | return rtsp; |
| 692 | } |
| 693 | |
| 694 | // Slab-allocator constructor for refscale_typesafe structures created |
| 695 | // out of a new slab of system memory. |
| 696 | static void refscale_typesafe_ctor(void *rtsp_in) |
| 697 | { |
| 698 | struct refscale_typesafe *rtsp = rtsp_in; |
| 699 | |
| 700 | spin_lock_init(&rtsp->rts_lock); |
| 701 | seqlock_init(&rtsp->rts_seqlock); |
| 702 | preempt_disable(); |
| 703 | rtsp->a = torture_random(this_cpu_ptr(&refscale_rand)); |
| 704 | preempt_enable(); |
| 705 | } |
| 706 | |
| 707 | static struct ref_scale_ops typesafe_ref_ops; |
| 708 | static struct ref_scale_ops typesafe_lock_ops; |
| 709 | static struct ref_scale_ops typesafe_seqlock_ops; |
| 710 | |
| 711 | // Initialize for a typesafe test. |
| 712 | static bool typesafe_init(void) |
| 713 | { |
| 714 | long idx; |
| 715 | long si = lookup_instances; |
| 716 | |
| 717 | typesafe_kmem_cachep = kmem_cache_create("refscale_typesafe", |
| 718 | sizeof(struct refscale_typesafe), sizeof(void *), |
| 719 | SLAB_TYPESAFE_BY_RCU, refscale_typesafe_ctor); |
| 720 | if (!typesafe_kmem_cachep) |
| 721 | return false; |
| 722 | if (si < 0) |
| 723 | si = -si * nr_cpu_ids; |
| 724 | else if (si == 0) |
| 725 | si = nr_cpu_ids; |
| 726 | rtsarray_size = si; |
| 727 | rtsarray = kcalloc(si, sizeof(*rtsarray), GFP_KERNEL); |
| 728 | if (!rtsarray) |
| 729 | return false; |
| 730 | for (idx = 0; idx < rtsarray_size; idx++) { |
| 731 | rtsarray[idx] = typesafe_alloc_one(); |
| 732 | if (!rtsarray[idx]) |
| 733 | return false; |
| 734 | } |
| 735 | if (cur_ops == &typesafe_ref_ops) { |
| 736 | rts_acquire = typesafe_ref_acquire; |
| 737 | rts_release = typesafe_ref_release; |
| 738 | } else if (cur_ops == &typesafe_lock_ops) { |
| 739 | rts_acquire = typesafe_lock_acquire; |
| 740 | rts_release = typesafe_lock_release; |
| 741 | } else if (cur_ops == &typesafe_seqlock_ops) { |
| 742 | rts_acquire = typesafe_seqlock_acquire; |
| 743 | rts_release = typesafe_seqlock_release; |
| 744 | } else { |
| 745 | WARN_ON_ONCE(1); |
| 746 | return false; |
| 747 | } |
| 748 | return true; |
| 749 | } |
| 750 | |
| 751 | // Clean up after a typesafe test. |
| 752 | static void typesafe_cleanup(void) |
| 753 | { |
| 754 | long idx; |
| 755 | |
| 756 | if (rtsarray) { |
| 757 | for (idx = 0; idx < rtsarray_size; idx++) |
| 758 | kmem_cache_free(typesafe_kmem_cachep, rtsarray[idx]); |
| 759 | kfree(rtsarray); |
| 760 | rtsarray = NULL; |
| 761 | rtsarray_size = 0; |
| 762 | } |
| 763 | kmem_cache_destroy(typesafe_kmem_cachep); |
| 764 | typesafe_kmem_cachep = NULL; |
| 765 | rts_acquire = NULL; |
| 766 | rts_release = NULL; |
| 767 | } |
| 768 | |
| 769 | // The typesafe_init() function distinguishes these structures by address. |
| 770 | static struct ref_scale_ops typesafe_ref_ops = { |
| 771 | .init = typesafe_init, |
| 772 | .cleanup = typesafe_cleanup, |
| 773 | .readsection = typesafe_read_section, |
| 774 | .delaysection = typesafe_delay_section, |
| 775 | .name = "typesafe_ref" |
| 776 | }; |
| 777 | |
| 778 | static struct ref_scale_ops typesafe_lock_ops = { |
| 779 | .init = typesafe_init, |
| 780 | .cleanup = typesafe_cleanup, |
| 781 | .readsection = typesafe_read_section, |
| 782 | .delaysection = typesafe_delay_section, |
| 783 | .name = "typesafe_lock" |
| 784 | }; |
| 785 | |
| 786 | static struct ref_scale_ops typesafe_seqlock_ops = { |
| 787 | .init = typesafe_init, |
| 788 | .cleanup = typesafe_cleanup, |
| 789 | .readsection = typesafe_read_section, |
| 790 | .delaysection = typesafe_delay_section, |
| 791 | .name = "typesafe_seqlock" |
| 792 | }; |
| 793 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 794 | static void rcu_scale_one_reader(void) |
Paul E. McKenney | b4d1e34 | 2020-05-28 16:37:35 -0700 | [diff] [blame] | 795 | { |
| 796 | if (readdelay <= 0) |
| 797 | cur_ops->readsection(loops); |
| 798 | else |
Paul E. McKenney | 918b351 | 2020-05-31 18:14:57 -0700 | [diff] [blame] | 799 | cur_ops->delaysection(loops, readdelay / 1000, readdelay % 1000); |
Paul E. McKenney | b4d1e34 | 2020-05-28 16:37:35 -0700 | [diff] [blame] | 800 | } |
| 801 | |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 802 | // Reader kthread. Repeatedly does empty RCU read-side |
| 803 | // critical section, minimizing update-side interference. |
| 804 | static int |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 805 | ref_scale_reader(void *arg) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 806 | { |
| 807 | unsigned long flags; |
| 808 | long me = (long)arg; |
| 809 | struct reader_task *rt = &(reader_tasks[me]); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 810 | u64 start; |
| 811 | s64 duration; |
| 812 | |
Paul E. McKenney | e76506f | 2020-11-15 10:24:52 -0800 | [diff] [blame] | 813 | VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: task started", me); |
Paul E. McKenney | 05bc276 | 2021-06-10 09:24:43 -0700 | [diff] [blame] | 814 | WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids))); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 815 | set_user_nice(current, MAX_NICE); |
| 816 | atomic_inc(&n_init); |
Paul E. McKenney | 777a54c | 2020-05-25 14:16:44 -0700 | [diff] [blame] | 817 | if (holdoff) |
| 818 | schedule_timeout_interruptible(holdoff * HZ); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 819 | repeat: |
Paul E. McKenney | 05bc276 | 2021-06-10 09:24:43 -0700 | [diff] [blame] | 820 | VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, raw_smp_processor_id()); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 821 | |
| 822 | // Wait for signal that this reader can start. |
Paul E. McKenney | af2789d | 2020-05-26 11:22:03 -0700 | [diff] [blame] | 823 | wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) || |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 824 | torture_must_stop()); |
| 825 | |
| 826 | if (torture_must_stop()) |
| 827 | goto end; |
| 828 | |
| 829 | // Make sure that the CPU is affinitized appropriately during testing. |
Paul E. McKenney | 05bc276 | 2021-06-10 09:24:43 -0700 | [diff] [blame] | 830 | WARN_ON_ONCE(raw_smp_processor_id() != me); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 831 | |
Paul E. McKenney | af2789d | 2020-05-26 11:22:03 -0700 | [diff] [blame] | 832 | WRITE_ONCE(rt->start_reader, 0); |
Paul E. McKenney | 86e0da2 | 2020-05-26 11:40:52 -0700 | [diff] [blame] | 833 | if (!atomic_dec_return(&n_started)) |
| 834 | while (atomic_read_acquire(&n_started)) |
| 835 | cpu_relax(); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 836 | |
Paul E. McKenney | e76506f | 2020-11-15 10:24:52 -0800 | [diff] [blame] | 837 | VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d started", me, exp_idx); |
Paul E. McKenney | b864f89 | 2020-05-26 10:57:34 -0700 | [diff] [blame] | 838 | |
Paul E. McKenney | 2db0bda | 2020-05-26 12:34:57 -0700 | [diff] [blame] | 839 | |
| 840 | // To reduce noise, do an initial cache-warming invocation, check |
| 841 | // in, and then keep warming until everyone has checked in. |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 842 | rcu_scale_one_reader(); |
Paul E. McKenney | 2db0bda | 2020-05-26 12:34:57 -0700 | [diff] [blame] | 843 | if (!atomic_dec_return(&n_warmedup)) |
| 844 | while (atomic_read_acquire(&n_warmedup)) |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 845 | rcu_scale_one_reader(); |
Paul E. McKenney | 2db0bda | 2020-05-26 12:34:57 -0700 | [diff] [blame] | 846 | // Also keep interrupts disabled. This also has the effect |
| 847 | // of preventing entries into slow path for rcu_read_unlock(). |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 848 | local_irq_save(flags); |
| 849 | start = ktime_get_mono_fast_ns(); |
| 850 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 851 | rcu_scale_one_reader(); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 852 | |
| 853 | duration = ktime_get_mono_fast_ns() - start; |
| 854 | local_irq_restore(flags); |
| 855 | |
| 856 | rt->last_duration_ns = WARN_ON_ONCE(duration < 0) ? 0 : duration; |
Paul E. McKenney | 2db0bda | 2020-05-26 12:34:57 -0700 | [diff] [blame] | 857 | // To reduce runtime-skew noise, do maintain-load invocations until |
| 858 | // everyone is done. |
| 859 | if (!atomic_dec_return(&n_cooleddown)) |
| 860 | while (atomic_read_acquire(&n_cooleddown)) |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 861 | rcu_scale_one_reader(); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 862 | |
Paul E. McKenney | b864f89 | 2020-05-26 10:57:34 -0700 | [diff] [blame] | 863 | if (atomic_dec_and_test(&nreaders_exp)) |
| 864 | wake_up(&main_wq); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 865 | |
Paul E. McKenney | e76506f | 2020-11-15 10:24:52 -0800 | [diff] [blame] | 866 | VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d ended, (readers remaining=%d)", |
| 867 | me, exp_idx, atomic_read(&nreaders_exp)); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 868 | |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 869 | if (!torture_must_stop()) |
| 870 | goto repeat; |
| 871 | end: |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 872 | torture_kthread_stopping("ref_scale_reader"); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 873 | return 0; |
| 874 | } |
| 875 | |
Paul E. McKenney | 2990750 | 2020-05-26 09:32:57 -0700 | [diff] [blame] | 876 | static void reset_readers(void) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 877 | { |
| 878 | int i; |
| 879 | struct reader_task *rt; |
| 880 | |
Paul E. McKenney | dbf28ef | 2020-05-25 17:22:24 -0700 | [diff] [blame] | 881 | for (i = 0; i < nreaders; i++) { |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 882 | rt = &(reader_tasks[i]); |
| 883 | |
| 884 | rt->last_duration_ns = 0; |
| 885 | } |
| 886 | } |
| 887 | |
| 888 | // Print the results of each reader and return the sum of all their durations. |
Paul E. McKenney | 2990750 | 2020-05-26 09:32:57 -0700 | [diff] [blame] | 889 | static u64 process_durations(int n) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 890 | { |
| 891 | int i; |
| 892 | struct reader_task *rt; |
| 893 | char buf1[64]; |
Paul E. McKenney | 2e90de7 | 2020-05-25 17:45:03 -0700 | [diff] [blame] | 894 | char *buf; |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 895 | u64 sum = 0; |
| 896 | |
Li Zhijian | 9880eb8 | 2021-10-25 11:26:57 +0800 | [diff] [blame] | 897 | buf = kmalloc(800 + 64, GFP_KERNEL); |
Paul E. McKenney | 2e90de7 | 2020-05-25 17:45:03 -0700 | [diff] [blame] | 898 | if (!buf) |
| 899 | return 0; |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 900 | buf[0] = 0; |
| 901 | sprintf(buf, "Experiment #%d (Format: <THREAD-NUM>:<Total loop time in ns>)", |
| 902 | exp_idx); |
| 903 | |
Paul E. McKenney | dbf28ef | 2020-05-25 17:22:24 -0700 | [diff] [blame] | 904 | for (i = 0; i < n && !torture_must_stop(); i++) { |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 905 | rt = &(reader_tasks[i]); |
| 906 | sprintf(buf1, "%d: %llu\t", i, rt->last_duration_ns); |
| 907 | |
| 908 | if (i % 5 == 0) |
| 909 | strcat(buf, "\n"); |
Li Zhijian | 9880eb8 | 2021-10-25 11:26:57 +0800 | [diff] [blame] | 910 | if (strlen(buf) >= 800) { |
| 911 | pr_alert("%s", buf); |
| 912 | buf[0] = 0; |
| 913 | } |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 914 | strcat(buf, buf1); |
| 915 | |
| 916 | sum += rt->last_duration_ns; |
| 917 | } |
Li Zhijian | 9880eb8 | 2021-10-25 11:26:57 +0800 | [diff] [blame] | 918 | pr_alert("%s\n", buf); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 919 | |
Paul E. McKenney | 2e90de7 | 2020-05-25 17:45:03 -0700 | [diff] [blame] | 920 | kfree(buf); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 921 | return sum; |
| 922 | } |
| 923 | |
| 924 | // The main_func is the main orchestrator, it performs a bunch of |
| 925 | // experiments. For every experiment, it orders all the readers |
| 926 | // involved to start and waits for them to finish the experiment. It |
| 927 | // then reads their timestamps and starts the next experiment. Each |
| 928 | // experiment progresses from 1 concurrent reader to N of them at which |
| 929 | // point all the timestamps are printed. |
| 930 | static int main_func(void *arg) |
| 931 | { |
| 932 | int exp, r; |
| 933 | char buf1[64]; |
Paul E. McKenney | f518f15 | 2020-05-25 17:32:56 -0700 | [diff] [blame] | 934 | char *buf; |
Paul E. McKenney | dbf28ef | 2020-05-25 17:22:24 -0700 | [diff] [blame] | 935 | u64 *result_avg; |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 936 | |
| 937 | set_cpus_allowed_ptr(current, cpumask_of(nreaders % nr_cpu_ids)); |
| 938 | set_user_nice(current, MAX_NICE); |
| 939 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 940 | VERBOSE_SCALEOUT("main_func task started"); |
Paul E. McKenney | dbf28ef | 2020-05-25 17:22:24 -0700 | [diff] [blame] | 941 | result_avg = kzalloc(nruns * sizeof(*result_avg), GFP_KERNEL); |
Li Zhijian | 9880eb8 | 2021-10-25 11:26:57 +0800 | [diff] [blame] | 942 | buf = kzalloc(800 + 64, GFP_KERNEL); |
Paul E. McKenney | f518f15 | 2020-05-25 17:32:56 -0700 | [diff] [blame] | 943 | if (!result_avg || !buf) { |
Li Zhijian | 4feeb9d | 2021-10-25 11:26:58 +0800 | [diff] [blame] | 944 | SCALEOUT_ERRSTRING("out of memory"); |
Li Zhijian | c30c876 | 2021-10-25 11:26:56 +0800 | [diff] [blame] | 945 | goto oom_exit; |
Paul E. McKenney | f518f15 | 2020-05-25 17:32:56 -0700 | [diff] [blame] | 946 | } |
Paul E. McKenney | 777a54c | 2020-05-25 14:16:44 -0700 | [diff] [blame] | 947 | if (holdoff) |
| 948 | schedule_timeout_interruptible(holdoff * HZ); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 949 | |
Paul E. McKenney | 96af866 | 2020-05-27 16:46:56 -0700 | [diff] [blame] | 950 | // Wait for all threads to start. |
| 951 | atomic_inc(&n_init); |
| 952 | while (atomic_read(&n_init) < nreaders + 1) |
| 953 | schedule_timeout_uninterruptible(1); |
| 954 | |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 955 | // Start exp readers up per experiment |
Paul E. McKenney | dbf28ef | 2020-05-25 17:22:24 -0700 | [diff] [blame] | 956 | for (exp = 0; exp < nruns && !torture_must_stop(); exp++) { |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 957 | if (torture_must_stop()) |
| 958 | goto end; |
| 959 | |
Paul E. McKenney | dbf28ef | 2020-05-25 17:22:24 -0700 | [diff] [blame] | 960 | reset_readers(); |
| 961 | atomic_set(&nreaders_exp, nreaders); |
Paul E. McKenney | 86e0da2 | 2020-05-26 11:40:52 -0700 | [diff] [blame] | 962 | atomic_set(&n_started, nreaders); |
Paul E. McKenney | 2db0bda | 2020-05-26 12:34:57 -0700 | [diff] [blame] | 963 | atomic_set(&n_warmedup, nreaders); |
| 964 | atomic_set(&n_cooleddown, nreaders); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 965 | |
| 966 | exp_idx = exp; |
| 967 | |
Paul E. McKenney | dbf28ef | 2020-05-25 17:22:24 -0700 | [diff] [blame] | 968 | for (r = 0; r < nreaders; r++) { |
Paul E. McKenney | af2789d | 2020-05-26 11:22:03 -0700 | [diff] [blame] | 969 | smp_store_release(&reader_tasks[r].start_reader, 1); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 970 | wake_up(&reader_tasks[r].wq); |
| 971 | } |
| 972 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 973 | VERBOSE_SCALEOUT("main_func: experiment started, waiting for %d readers", |
Paul E. McKenney | dbf28ef | 2020-05-25 17:22:24 -0700 | [diff] [blame] | 974 | nreaders); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 975 | |
| 976 | wait_event(main_wq, |
| 977 | !atomic_read(&nreaders_exp) || torture_must_stop()); |
| 978 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 979 | VERBOSE_SCALEOUT("main_func: experiment ended"); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 980 | |
| 981 | if (torture_must_stop()) |
| 982 | goto end; |
| 983 | |
Arnd Bergmann | 7c944d7 | 2020-05-29 14:36:26 -0700 | [diff] [blame] | 984 | result_avg[exp] = div_u64(1000 * process_durations(nreaders), nreaders * loops); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 985 | } |
| 986 | |
| 987 | // Print the average of all experiments |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 988 | SCALEOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n"); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 989 | |
Li Zhijian | 9880eb8 | 2021-10-25 11:26:57 +0800 | [diff] [blame] | 990 | pr_alert("Runs\tTime(ns)\n"); |
Paul E. McKenney | dbf28ef | 2020-05-25 17:22:24 -0700 | [diff] [blame] | 991 | for (exp = 0; exp < nruns; exp++) { |
Arnd Bergmann | 7c944d7 | 2020-05-29 14:36:26 -0700 | [diff] [blame] | 992 | u64 avg; |
| 993 | u32 rem; |
| 994 | |
Arnd Bergmann | 7c944d7 | 2020-05-29 14:36:26 -0700 | [diff] [blame] | 995 | avg = div_u64_rem(result_avg[exp], 1000, &rem); |
| 996 | sprintf(buf1, "%d\t%llu.%03u\n", exp + 1, avg, rem); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 997 | strcat(buf, buf1); |
Li Zhijian | 9880eb8 | 2021-10-25 11:26:57 +0800 | [diff] [blame] | 998 | if (strlen(buf) >= 800) { |
| 999 | pr_alert("%s", buf); |
| 1000 | buf[0] = 0; |
| 1001 | } |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1002 | } |
| 1003 | |
Li Zhijian | 9880eb8 | 2021-10-25 11:26:57 +0800 | [diff] [blame] | 1004 | pr_alert("%s", buf); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1005 | |
Li Zhijian | c30c876 | 2021-10-25 11:26:56 +0800 | [diff] [blame] | 1006 | oom_exit: |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1007 | // This will shutdown everything including us. |
| 1008 | if (shutdown) { |
| 1009 | shutdown_start = 1; |
| 1010 | wake_up(&shutdown_wq); |
| 1011 | } |
| 1012 | |
| 1013 | // Wait for torture to stop us |
| 1014 | while (!torture_must_stop()) |
| 1015 | schedule_timeout_uninterruptible(1); |
| 1016 | |
| 1017 | end: |
| 1018 | torture_kthread_stopping("main_func"); |
Paul E. McKenney | f518f15 | 2020-05-25 17:32:56 -0700 | [diff] [blame] | 1019 | kfree(result_avg); |
| 1020 | kfree(buf); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1021 | return 0; |
| 1022 | } |
| 1023 | |
| 1024 | static void |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 1025 | ref_scale_print_module_parms(struct ref_scale_ops *cur_ops, const char *tag) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1026 | { |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 1027 | pr_alert("%s" SCALE_FLAG |
Paul E. McKenney | d6fea1d | 2023-08-16 11:27:26 -0700 | [diff] [blame] | 1028 | "--- %s: verbose=%d verbose_batched=%d shutdown=%d holdoff=%d lookup_instances=%ld loops=%ld nreaders=%d nruns=%d readdelay=%d\n", scale_type, tag, |
| 1029 | verbose, verbose_batched, shutdown, holdoff, lookup_instances, loops, nreaders, nruns, readdelay); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1030 | } |
| 1031 | |
| 1032 | static void |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 1033 | ref_scale_cleanup(void) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1034 | { |
| 1035 | int i; |
| 1036 | |
| 1037 | if (torture_cleanup_begin()) |
| 1038 | return; |
| 1039 | |
| 1040 | if (!cur_ops) { |
| 1041 | torture_cleanup_end(); |
| 1042 | return; |
| 1043 | } |
| 1044 | |
| 1045 | if (reader_tasks) { |
| 1046 | for (i = 0; i < nreaders; i++) |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 1047 | torture_stop_kthread("ref_scale_reader", |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1048 | reader_tasks[i].task); |
| 1049 | } |
| 1050 | kfree(reader_tasks); |
| 1051 | |
| 1052 | torture_stop_kthread("main_task", main_task); |
| 1053 | kfree(main_task); |
| 1054 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 1055 | // Do scale-type-specific cleanup operations. |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1056 | if (cur_ops->cleanup != NULL) |
| 1057 | cur_ops->cleanup(); |
| 1058 | |
| 1059 | torture_cleanup_end(); |
| 1060 | } |
| 1061 | |
| 1062 | // Shutdown kthread. Just waits to be awakened, then shuts down system. |
| 1063 | static int |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 1064 | ref_scale_shutdown(void *arg) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1065 | { |
Paul E. McKenney | 6bc6e6b | 2023-01-31 16:12:18 -0800 | [diff] [blame] | 1066 | wait_event_idle(shutdown_wq, shutdown_start); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1067 | |
| 1068 | smp_mb(); // Wake before output. |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 1069 | ref_scale_cleanup(); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1070 | kernel_power_off(); |
| 1071 | |
| 1072 | return -EINVAL; |
| 1073 | } |
| 1074 | |
| 1075 | static int __init |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 1076 | ref_scale_init(void) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1077 | { |
| 1078 | long i; |
| 1079 | int firsterr = 0; |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 1080 | static struct ref_scale_ops *scale_ops[] = { |
Paul E. McKenney | dec8678 | 2022-03-25 15:21:07 -0700 | [diff] [blame] | 1081 | &rcu_ops, &srcu_ops, RCU_TRACE_OPS RCU_TASKS_OPS &refcnt_ops, &rwlock_ops, |
Paul E. McKenney | b5a2801 | 2023-07-11 18:01:38 -0700 | [diff] [blame] | 1082 | &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops, &clock_ops, &jiffies_ops, |
Paul E. McKenney | a6889be | 2022-11-08 08:18:06 -0800 | [diff] [blame] | 1083 | &typesafe_ref_ops, &typesafe_lock_ops, &typesafe_seqlock_ops, |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1084 | }; |
| 1085 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 1086 | if (!torture_init_begin(scale_type, verbose)) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1087 | return -EBUSY; |
| 1088 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 1089 | for (i = 0; i < ARRAY_SIZE(scale_ops); i++) { |
| 1090 | cur_ops = scale_ops[i]; |
| 1091 | if (strcmp(scale_type, cur_ops->name) == 0) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1092 | break; |
| 1093 | } |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 1094 | if (i == ARRAY_SIZE(scale_ops)) { |
| 1095 | pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type); |
| 1096 | pr_alert("rcu-scale types:"); |
| 1097 | for (i = 0; i < ARRAY_SIZE(scale_ops); i++) |
| 1098 | pr_cont(" %s", scale_ops[i]->name); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1099 | pr_cont("\n"); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1100 | firsterr = -EINVAL; |
| 1101 | cur_ops = NULL; |
| 1102 | goto unwind; |
| 1103 | } |
| 1104 | if (cur_ops->init) |
Paul E. McKenney | 3c6496c | 2022-11-06 20:58:15 -0800 | [diff] [blame] | 1105 | if (!cur_ops->init()) { |
| 1106 | firsterr = -EUCLEAN; |
| 1107 | goto unwind; |
| 1108 | } |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1109 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 1110 | ref_scale_print_module_parms(cur_ops, "Start of test"); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1111 | |
| 1112 | // Shutdown task |
| 1113 | if (shutdown) { |
| 1114 | init_waitqueue_head(&shutdown_wq); |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 1115 | firsterr = torture_create_kthread(ref_scale_shutdown, NULL, |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1116 | shutdown_task); |
Paul E. McKenney | ed60ad7 | 2021-08-05 15:57:12 -0700 | [diff] [blame] | 1117 | if (torture_init_error(firsterr)) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1118 | goto unwind; |
| 1119 | schedule_timeout_uninterruptible(1); |
| 1120 | } |
| 1121 | |
Paul E. McKenney | 8fc2878 | 2020-05-25 15:48:38 -0700 | [diff] [blame] | 1122 | // Reader tasks (default to ~75% of online CPUs). |
| 1123 | if (nreaders < 0) |
| 1124 | nreaders = (num_online_cpus() >> 1) + (num_online_cpus() >> 2); |
Paul E. McKenney | 0c6d18d | 2020-08-27 09:58:19 -0700 | [diff] [blame] | 1125 | if (WARN_ONCE(loops <= 0, "%s: loops = %ld, adjusted to 1\n", __func__, loops)) |
| 1126 | loops = 1; |
| 1127 | if (WARN_ONCE(nreaders <= 0, "%s: nreaders = %d, adjusted to 1\n", __func__, nreaders)) |
| 1128 | nreaders = 1; |
| 1129 | if (WARN_ONCE(nruns <= 0, "%s: nruns = %d, adjusted to 1\n", __func__, nruns)) |
| 1130 | nruns = 1; |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1131 | reader_tasks = kcalloc(nreaders, sizeof(reader_tasks[0]), |
| 1132 | GFP_KERNEL); |
| 1133 | if (!reader_tasks) { |
Li Zhijian | 4feeb9d | 2021-10-25 11:26:58 +0800 | [diff] [blame] | 1134 | SCALEOUT_ERRSTRING("out of memory"); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1135 | firsterr = -ENOMEM; |
| 1136 | goto unwind; |
| 1137 | } |
| 1138 | |
Li Zhijian | f71f22b | 2021-10-29 17:40:24 +0800 | [diff] [blame] | 1139 | VERBOSE_SCALEOUT("Starting %d reader threads", nreaders); |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1140 | |
| 1141 | for (i = 0; i < nreaders; i++) { |
Waiman Long | f5063e89 | 2023-07-07 13:53:55 -0400 | [diff] [blame] | 1142 | init_waitqueue_head(&reader_tasks[i].wq); |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 1143 | firsterr = torture_create_kthread(ref_scale_reader, (void *)i, |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1144 | reader_tasks[i].task); |
Paul E. McKenney | ed60ad7 | 2021-08-05 15:57:12 -0700 | [diff] [blame] | 1145 | if (torture_init_error(firsterr)) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1146 | goto unwind; |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1147 | } |
| 1148 | |
| 1149 | // Main Task |
| 1150 | init_waitqueue_head(&main_wq); |
| 1151 | firsterr = torture_create_kthread(main_func, NULL, main_task); |
Paul E. McKenney | ed60ad7 | 2021-08-05 15:57:12 -0700 | [diff] [blame] | 1152 | if (torture_init_error(firsterr)) |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1153 | goto unwind; |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1154 | |
| 1155 | torture_init_end(); |
| 1156 | return 0; |
| 1157 | |
| 1158 | unwind: |
| 1159 | torture_init_end(); |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 1160 | ref_scale_cleanup(); |
Paul E. McKenney | bc80d35 | 2020-09-17 10:37:10 -0700 | [diff] [blame] | 1161 | if (shutdown) { |
| 1162 | WARN_ON(!IS_MODULE(CONFIG_RCU_REF_SCALE_TEST)); |
| 1163 | kernel_power_off(); |
| 1164 | } |
Joel Fernandes (Google) | 653ed64 | 2020-05-25 00:36:48 -0400 | [diff] [blame] | 1165 | return firsterr; |
| 1166 | } |
| 1167 | |
Paul E. McKenney | 1fbeb3a | 2020-06-17 11:53:53 -0700 | [diff] [blame] | 1168 | module_init(ref_scale_init); |
| 1169 | module_exit(ref_scale_cleanup); |