Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
| 2 | /* |
| 3 | * Task-based RCU implementations. |
| 4 | * |
| 5 | * Copyright (C) 2020 Paul E. McKenney |
| 6 | */ |
| 7 | |
Paul E. McKenney | 8fd8ca3 | 2020-03-15 14:51:20 -0700 | [diff] [blame] | 8 | #ifdef CONFIG_TASKS_RCU_GENERIC |
Paul E. McKenney | 9b073de | 2021-11-08 16:18:57 -0800 | [diff] [blame] | 9 | #include "rcu_segcblist.h" |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 10 | |
| 11 | //////////////////////////////////////////////////////////////////////// |
| 12 | // |
| 13 | // Generic data structures. |
| 14 | |
| 15 | struct rcu_tasks; |
| 16 | typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp); |
Paul E. McKenney | e4fe5dd | 2020-03-04 17:31:43 -0800 | [diff] [blame] | 17 | typedef void (*pregp_func_t)(void); |
| 18 | typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop); |
Paul E. McKenney | 9796e1a | 2020-03-22 13:18:54 -0700 | [diff] [blame] | 19 | typedef void (*postscan_func_t)(struct list_head *hop); |
Paul E. McKenney | e4fe5dd | 2020-03-04 17:31:43 -0800 | [diff] [blame] | 20 | typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp); |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 21 | typedef void (*postgp_func_t)(struct rcu_tasks *rtp); |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 22 | |
Paul E. McKenney | 07e1051 | 2020-03-02 15:16:57 -0800 | [diff] [blame] | 23 | /** |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 24 | * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism. |
Paul E. McKenney | 9b073de | 2021-11-08 16:18:57 -0800 | [diff] [blame] | 25 | * @cblist: Callback list. |
Paul E. McKenney | 381a4f3 | 2021-11-08 16:52:02 -0800 | [diff] [blame] | 26 | * @lock: Lock protecting per-CPU callback list. |
Paul E. McKenney | 7d13d30 | 2021-11-22 13:38:42 -0800 | [diff] [blame] | 27 | * @rtp_jiffies: Jiffies counter value for statistics. |
| 28 | * @rtp_n_lock_retries: Rough lock-contention statistic. |
Paul E. McKenney | d363f83 | 2021-11-10 15:56:40 -0800 | [diff] [blame] | 29 | * @rtp_work: Work queue for invoking callbacks. |
Paul E. McKenney | 3063b33 | 2021-11-23 16:16:50 -0800 | [diff] [blame] | 30 | * @rtp_irq_work: IRQ work queue for deferred wakeups. |
Paul E. McKenney | ce9b1c6 | 2021-11-11 14:53:43 -0800 | [diff] [blame] | 31 | * @barrier_q_head: RCU callback for barrier operation. |
| 32 | * @cpu: CPU number corresponding to this entry. |
| 33 | * @rtpp: Pointer to the rcu_tasks structure. |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 34 | */ |
| 35 | struct rcu_tasks_percpu { |
Paul E. McKenney | 9b073de | 2021-11-08 16:18:57 -0800 | [diff] [blame] | 36 | struct rcu_segcblist cblist; |
Paul E. McKenney | 381a4f3 | 2021-11-08 16:52:02 -0800 | [diff] [blame] | 37 | raw_spinlock_t __private lock; |
Paul E. McKenney | 7d13d30 | 2021-11-22 13:38:42 -0800 | [diff] [blame] | 38 | unsigned long rtp_jiffies; |
| 39 | unsigned long rtp_n_lock_retries; |
Paul E. McKenney | d363f83 | 2021-11-10 15:56:40 -0800 | [diff] [blame] | 40 | struct work_struct rtp_work; |
Paul E. McKenney | 3063b33 | 2021-11-23 16:16:50 -0800 | [diff] [blame] | 41 | struct irq_work rtp_irq_work; |
Paul E. McKenney | ce9b1c6 | 2021-11-11 14:53:43 -0800 | [diff] [blame] | 42 | struct rcu_head barrier_q_head; |
Paul E. McKenney | d363f83 | 2021-11-10 15:56:40 -0800 | [diff] [blame] | 43 | int cpu; |
| 44 | struct rcu_tasks *rtpp; |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 45 | }; |
| 46 | |
| 47 | /** |
| 48 | * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism. |
Sebastian Andrzej Siewior | 88db792 | 2022-03-04 12:07:25 +0100 | [diff] [blame] | 49 | * @cbs_wait: RCU wait allowing a new callback to get kthread's attention. |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 50 | * @cbs_gbl_lock: Lock protecting callback list. |
Paul E. McKenney | 07e1051 | 2020-03-02 15:16:57 -0800 | [diff] [blame] | 51 | * @kthread_ptr: This flavor's grace-period/callback-invocation kthread. |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 52 | * @gp_func: This flavor's grace-period-wait function. |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 53 | * @gp_state: Grace period's most recent state transition (debugging). |
Paul E. McKenney | 4fe192d | 2020-09-09 22:05:41 -0700 | [diff] [blame] | 54 | * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping. |
Paul E. McKenney | 2393a61 | 2020-09-09 21:36:34 -0700 | [diff] [blame] | 55 | * @init_fract: Initial backoff sleep interval. |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 56 | * @gp_jiffies: Time of last @gp_state transition. |
| 57 | * @gp_start: Most recent grace-period start in jiffies. |
Paul E. McKenney | b14fb4f | 2021-11-08 14:14:43 -0800 | [diff] [blame] | 58 | * @tasks_gp_seq: Number of grace periods completed since boot. |
Paul E. McKenney | 238dbce | 2020-03-18 10:54:05 -0700 | [diff] [blame] | 59 | * @n_ipis: Number of IPIs sent to encourage grace periods to end. |
Paul E. McKenney | 7e0669c | 2020-03-25 14:36:05 -0700 | [diff] [blame] | 60 | * @n_ipis_fails: Number of IPI-send failures. |
Paul E. McKenney | e4fe5dd | 2020-03-04 17:31:43 -0800 | [diff] [blame] | 61 | * @pregp_func: This flavor's pre-grace-period function (optional). |
| 62 | * @pertask_func: This flavor's per-task scan function (optional). |
| 63 | * @postscan_func: This flavor's post-task scan function (optional). |
Lukas Bulwahn | 85b8699 | 2021-01-25 08:41:05 +0100 | [diff] [blame] | 64 | * @holdouts_func: This flavor's holdout-list scan function (optional). |
Paul E. McKenney | e4fe5dd | 2020-03-04 17:31:43 -0800 | [diff] [blame] | 65 | * @postgp_func: This flavor's post-grace-period function (optional). |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 66 | * @call_func: This flavor's call_rcu()-equivalent function. |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 67 | * @rtpcpu: This flavor's rcu_tasks_percpu structure. |
Paul E. McKenney | 7a30871 | 2021-11-08 10:51:13 -0800 | [diff] [blame] | 68 | * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks. |
Paul E. McKenney | 2cee078 | 2021-11-29 11:46:33 -0800 | [diff] [blame] | 69 | * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing. |
| 70 | * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing. |
Paul E. McKenney | fd796e4 | 2021-11-29 16:52:31 -0800 | [diff] [blame] | 71 | * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers. |
Paul E. McKenney | ce9b1c6 | 2021-11-11 14:53:43 -0800 | [diff] [blame] | 72 | * @barrier_q_mutex: Serialize barrier operations. |
| 73 | * @barrier_q_count: Number of queues being waited on. |
| 74 | * @barrier_q_completion: Barrier wait/wakeup mechanism. |
| 75 | * @barrier_q_seq: Sequence number for barrier operations. |
Paul E. McKenney | c97d12a | 2020-03-03 15:50:31 -0800 | [diff] [blame] | 76 | * @name: This flavor's textual name. |
| 77 | * @kname: This flavor's kthread name. |
Paul E. McKenney | 07e1051 | 2020-03-02 15:16:57 -0800 | [diff] [blame] | 78 | */ |
| 79 | struct rcu_tasks { |
Sebastian Andrzej Siewior | 88db792 | 2022-03-04 12:07:25 +0100 | [diff] [blame] | 80 | struct rcuwait cbs_wait; |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 81 | raw_spinlock_t cbs_gbl_lock; |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 82 | int gp_state; |
Paul E. McKenney | 4fe192d | 2020-09-09 22:05:41 -0700 | [diff] [blame] | 83 | int gp_sleep; |
Paul E. McKenney | 2393a61 | 2020-09-09 21:36:34 -0700 | [diff] [blame] | 84 | int init_fract; |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 85 | unsigned long gp_jiffies; |
Paul E. McKenney | 88092d0 | 2020-03-17 08:57:02 -0700 | [diff] [blame] | 86 | unsigned long gp_start; |
Paul E. McKenney | b14fb4f | 2021-11-08 14:14:43 -0800 | [diff] [blame] | 87 | unsigned long tasks_gp_seq; |
Paul E. McKenney | 238dbce | 2020-03-18 10:54:05 -0700 | [diff] [blame] | 88 | unsigned long n_ipis; |
Paul E. McKenney | 7e0669c | 2020-03-25 14:36:05 -0700 | [diff] [blame] | 89 | unsigned long n_ipis_fails; |
Paul E. McKenney | 07e1051 | 2020-03-02 15:16:57 -0800 | [diff] [blame] | 90 | struct task_struct *kthread_ptr; |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 91 | rcu_tasks_gp_func_t gp_func; |
Paul E. McKenney | e4fe5dd | 2020-03-04 17:31:43 -0800 | [diff] [blame] | 92 | pregp_func_t pregp_func; |
| 93 | pertask_func_t pertask_func; |
| 94 | postscan_func_t postscan_func; |
| 95 | holdouts_func_t holdouts_func; |
| 96 | postgp_func_t postgp_func; |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 97 | call_rcu_func_t call_func; |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 98 | struct rcu_tasks_percpu __percpu *rtpcpu; |
Paul E. McKenney | 7a30871 | 2021-11-08 10:51:13 -0800 | [diff] [blame] | 99 | int percpu_enqueue_shift; |
Paul E. McKenney | 8dd593f | 2021-11-09 11:11:32 -0800 | [diff] [blame] | 100 | int percpu_enqueue_lim; |
Paul E. McKenney | 2cee078 | 2021-11-29 11:46:33 -0800 | [diff] [blame] | 101 | int percpu_dequeue_lim; |
Paul E. McKenney | fd796e4 | 2021-11-29 16:52:31 -0800 | [diff] [blame] | 102 | unsigned long percpu_dequeue_gpseq; |
Paul E. McKenney | ce9b1c6 | 2021-11-11 14:53:43 -0800 | [diff] [blame] | 103 | struct mutex barrier_q_mutex; |
| 104 | atomic_t barrier_q_count; |
| 105 | struct completion barrier_q_completion; |
| 106 | unsigned long barrier_q_seq; |
Paul E. McKenney | c97d12a | 2020-03-03 15:50:31 -0800 | [diff] [blame] | 107 | char *name; |
| 108 | char *kname; |
Paul E. McKenney | 07e1051 | 2020-03-02 15:16:57 -0800 | [diff] [blame] | 109 | }; |
| 110 | |
Paul E. McKenney | 3063b33 | 2021-11-23 16:16:50 -0800 | [diff] [blame] | 111 | static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp); |
| 112 | |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 113 | #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \ |
| 114 | static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \ |
Paul E. McKenney | 381a4f3 | 2021-11-08 16:52:02 -0800 | [diff] [blame] | 115 | .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \ |
Sebastian Andrzej Siewior | 88db792 | 2022-03-04 12:07:25 +0100 | [diff] [blame] | 116 | .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \ |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 117 | }; \ |
| 118 | static struct rcu_tasks rt_name = \ |
| 119 | { \ |
Sebastian Andrzej Siewior | 88db792 | 2022-03-04 12:07:25 +0100 | [diff] [blame] | 120 | .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \ |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 121 | .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \ |
| 122 | .gp_func = gp, \ |
| 123 | .call_func = call, \ |
| 124 | .rtpcpu = &rt_name ## __percpu, \ |
| 125 | .name = n, \ |
Paul E. McKenney | 2bcd18e | 2022-02-02 15:42:36 -0800 | [diff] [blame] | 126 | .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \ |
Paul E. McKenney | 8dd593f | 2021-11-09 11:11:32 -0800 | [diff] [blame] | 127 | .percpu_enqueue_lim = 1, \ |
Paul E. McKenney | 2cee078 | 2021-11-29 11:46:33 -0800 | [diff] [blame] | 128 | .percpu_dequeue_lim = 1, \ |
Paul E. McKenney | ce9b1c6 | 2021-11-11 14:53:43 -0800 | [diff] [blame] | 129 | .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \ |
| 130 | .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \ |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 131 | .kname = #rt_name, \ |
Paul E. McKenney | 07e1051 | 2020-03-02 15:16:57 -0800 | [diff] [blame] | 132 | } |
| 133 | |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 134 | /* Track exiting tasks in order to allow them to be waited for. */ |
| 135 | DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu); |
| 136 | |
Paul E. McKenney | b0afa0f | 2020-03-17 11:39:26 -0700 | [diff] [blame] | 137 | /* Avoid IPIing CPUs early in the grace period. */ |
Paul E. McKenney | 574de87 | 2020-09-09 21:51:09 -0700 | [diff] [blame] | 138 | #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0) |
Paul E. McKenney | b0afa0f | 2020-03-17 11:39:26 -0700 | [diff] [blame] | 139 | static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY; |
| 140 | module_param(rcu_task_ipi_delay, int, 0644); |
| 141 | |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 142 | /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ |
| 143 | #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10) |
| 144 | static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT; |
| 145 | module_param(rcu_task_stall_timeout, int, 0644); |
Paul E. McKenney | f2539003 | 2022-02-25 16:01:12 -0800 | [diff] [blame] | 146 | #define RCU_TASK_STALL_INFO (HZ * 10) |
| 147 | static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO; |
| 148 | module_param(rcu_task_stall_info, int, 0644); |
| 149 | static int rcu_task_stall_info_mult __read_mostly = 3; |
| 150 | module_param(rcu_task_stall_info_mult, int, 0444); |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 151 | |
Paul E. McKenney | 8610b65 | 2021-11-12 07:33:40 -0800 | [diff] [blame] | 152 | static int rcu_task_enqueue_lim __read_mostly = -1; |
| 153 | module_param(rcu_task_enqueue_lim, int, 0444); |
| 154 | |
Paul E. McKenney | ab97152 | 2021-11-24 15:12:15 -0800 | [diff] [blame] | 155 | static bool rcu_task_cb_adjust; |
| 156 | static int rcu_task_contend_lim __read_mostly = 100; |
| 157 | module_param(rcu_task_contend_lim, int, 0444); |
Paul E. McKenney | fd796e4 | 2021-11-29 16:52:31 -0800 | [diff] [blame] | 158 | static int rcu_task_collapse_lim __read_mostly = 10; |
| 159 | module_param(rcu_task_collapse_lim, int, 0444); |
Paul E. McKenney | ab97152 | 2021-11-24 15:12:15 -0800 | [diff] [blame] | 160 | |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 161 | /* RCU tasks grace-period state for debugging. */ |
| 162 | #define RTGS_INIT 0 |
| 163 | #define RTGS_WAIT_WAIT_CBS 1 |
| 164 | #define RTGS_WAIT_GP 2 |
| 165 | #define RTGS_PRE_WAIT_GP 3 |
| 166 | #define RTGS_SCAN_TASKLIST 4 |
| 167 | #define RTGS_POST_SCAN_TASKLIST 5 |
| 168 | #define RTGS_WAIT_SCAN_HOLDOUTS 6 |
| 169 | #define RTGS_SCAN_HOLDOUTS 7 |
| 170 | #define RTGS_POST_GP 8 |
| 171 | #define RTGS_WAIT_READERS 9 |
| 172 | #define RTGS_INVOKE_CBS 10 |
| 173 | #define RTGS_WAIT_CBS 11 |
Paul E. McKenney | 8344496 | 2020-05-28 20:03:48 -0700 | [diff] [blame] | 174 | #ifndef CONFIG_TINY_RCU |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 175 | static const char * const rcu_tasks_gp_state_names[] = { |
| 176 | "RTGS_INIT", |
| 177 | "RTGS_WAIT_WAIT_CBS", |
| 178 | "RTGS_WAIT_GP", |
| 179 | "RTGS_PRE_WAIT_GP", |
| 180 | "RTGS_SCAN_TASKLIST", |
| 181 | "RTGS_POST_SCAN_TASKLIST", |
| 182 | "RTGS_WAIT_SCAN_HOLDOUTS", |
| 183 | "RTGS_SCAN_HOLDOUTS", |
| 184 | "RTGS_POST_GP", |
| 185 | "RTGS_WAIT_READERS", |
| 186 | "RTGS_INVOKE_CBS", |
| 187 | "RTGS_WAIT_CBS", |
| 188 | }; |
Paul E. McKenney | 8344496 | 2020-05-28 20:03:48 -0700 | [diff] [blame] | 189 | #endif /* #ifndef CONFIG_TINY_RCU */ |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 190 | |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 191 | //////////////////////////////////////////////////////////////////////// |
| 192 | // |
| 193 | // Generic code. |
| 194 | |
Paul E. McKenney | d363f83 | 2021-11-10 15:56:40 -0800 | [diff] [blame] | 195 | static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp); |
| 196 | |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 197 | /* Record grace-period phase and time. */ |
| 198 | static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate) |
| 199 | { |
| 200 | rtp->gp_state = newstate; |
| 201 | rtp->gp_jiffies = jiffies; |
| 202 | } |
| 203 | |
Paul E. McKenney | 8344496 | 2020-05-28 20:03:48 -0700 | [diff] [blame] | 204 | #ifndef CONFIG_TINY_RCU |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 205 | /* Return state name. */ |
| 206 | static const char *tasks_gp_state_getname(struct rcu_tasks *rtp) |
| 207 | { |
| 208 | int i = data_race(rtp->gp_state); // Let KCSAN detect update races |
| 209 | int j = READ_ONCE(i); // Prevent the compiler from reading twice |
| 210 | |
| 211 | if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names)) |
| 212 | return "???"; |
| 213 | return rcu_tasks_gp_state_names[j]; |
| 214 | } |
Paul E. McKenney | 8344496 | 2020-05-28 20:03:48 -0700 | [diff] [blame] | 215 | #endif /* #ifndef CONFIG_TINY_RCU */ |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 216 | |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 217 | // Initialize per-CPU callback lists for the specified flavor of |
| 218 | // Tasks RCU. |
| 219 | static void cblist_init_generic(struct rcu_tasks *rtp) |
| 220 | { |
| 221 | int cpu; |
| 222 | unsigned long flags; |
Paul E. McKenney | 8610b65 | 2021-11-12 07:33:40 -0800 | [diff] [blame] | 223 | int lim; |
Paul E. McKenney | da12301 | 2022-01-26 10:42:58 -0800 | [diff] [blame] | 224 | int shift; |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 225 | |
| 226 | raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); |
Paul E. McKenney | ab97152 | 2021-11-24 15:12:15 -0800 | [diff] [blame] | 227 | if (rcu_task_enqueue_lim < 0) { |
Paul E. McKenney | 8610b65 | 2021-11-12 07:33:40 -0800 | [diff] [blame] | 228 | rcu_task_enqueue_lim = 1; |
Paul E. McKenney | ab97152 | 2021-11-24 15:12:15 -0800 | [diff] [blame] | 229 | rcu_task_cb_adjust = true; |
| 230 | pr_info("%s: Setting adjustable number of callback queues.\n", __func__); |
| 231 | } else if (rcu_task_enqueue_lim == 0) { |
| 232 | rcu_task_enqueue_lim = 1; |
| 233 | } |
Paul E. McKenney | 8610b65 | 2021-11-12 07:33:40 -0800 | [diff] [blame] | 234 | lim = rcu_task_enqueue_lim; |
| 235 | |
| 236 | if (lim > nr_cpu_ids) |
| 237 | lim = nr_cpu_ids; |
Paul E. McKenney | da12301 | 2022-01-26 10:42:58 -0800 | [diff] [blame] | 238 | shift = ilog2(nr_cpu_ids / lim); |
| 239 | if (((nr_cpu_ids - 1) >> shift) >= lim) |
| 240 | shift++; |
| 241 | WRITE_ONCE(rtp->percpu_enqueue_shift, shift); |
Paul E. McKenney | 2cee078 | 2021-11-29 11:46:33 -0800 | [diff] [blame] | 242 | WRITE_ONCE(rtp->percpu_dequeue_lim, lim); |
Paul E. McKenney | 8610b65 | 2021-11-12 07:33:40 -0800 | [diff] [blame] | 243 | smp_store_release(&rtp->percpu_enqueue_lim, lim); |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 244 | for_each_possible_cpu(cpu) { |
| 245 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); |
| 246 | |
| 247 | WARN_ON_ONCE(!rtpcp); |
| 248 | if (cpu) |
Paul E. McKenney | 381a4f3 | 2021-11-08 16:52:02 -0800 | [diff] [blame] | 249 | raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock)); |
| 250 | raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. |
Paul E. McKenney | 9b073de | 2021-11-08 16:18:57 -0800 | [diff] [blame] | 251 | if (rcu_segcblist_empty(&rtpcp->cblist)) |
| 252 | rcu_segcblist_init(&rtpcp->cblist); |
Paul E. McKenney | d363f83 | 2021-11-10 15:56:40 -0800 | [diff] [blame] | 253 | INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq); |
| 254 | rtpcp->cpu = cpu; |
| 255 | rtpcp->rtpp = rtp; |
Paul E. McKenney | 381a4f3 | 2021-11-08 16:52:02 -0800 | [diff] [blame] | 256 | raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 257 | } |
| 258 | raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); |
Paul E. McKenney | 8610b65 | 2021-11-12 07:33:40 -0800 | [diff] [blame] | 259 | pr_info("%s: Setting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim)); |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 260 | } |
| 261 | |
Paul E. McKenney | 3063b33 | 2021-11-23 16:16:50 -0800 | [diff] [blame] | 262 | // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic(). |
| 263 | static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp) |
| 264 | { |
| 265 | struct rcu_tasks *rtp; |
| 266 | struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work); |
| 267 | |
| 268 | rtp = rtpcp->rtpp; |
Sebastian Andrzej Siewior | 88db792 | 2022-03-04 12:07:25 +0100 | [diff] [blame] | 269 | rcuwait_wake_up(&rtp->cbs_wait); |
Paul E. McKenney | 3063b33 | 2021-11-23 16:16:50 -0800 | [diff] [blame] | 270 | } |
| 271 | |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 272 | // Enqueue a callback for the specified flavor of Tasks RCU. |
| 273 | static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, |
| 274 | struct rcu_tasks *rtp) |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 275 | { |
Eric Dumazet | 07d95c3 | 2022-04-04 12:30:18 -0700 | [diff] [blame] | 276 | int chosen_cpu; |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 277 | unsigned long flags; |
Eric Dumazet | 07d95c3 | 2022-04-04 12:30:18 -0700 | [diff] [blame] | 278 | int ideal_cpu; |
Paul E. McKenney | 7d13d30 | 2021-11-22 13:38:42 -0800 | [diff] [blame] | 279 | unsigned long j; |
Paul E. McKenney | ab97152 | 2021-11-24 15:12:15 -0800 | [diff] [blame] | 280 | bool needadjust = false; |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 281 | bool needwake; |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 282 | struct rcu_tasks_percpu *rtpcp; |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 283 | |
| 284 | rhp->next = NULL; |
| 285 | rhp->func = func; |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 286 | local_irq_save(flags); |
Paul E. McKenney | fd796e4 | 2021-11-29 16:52:31 -0800 | [diff] [blame] | 287 | rcu_read_lock(); |
Eric Dumazet | 07d95c3 | 2022-04-04 12:30:18 -0700 | [diff] [blame] | 288 | ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift); |
| 289 | chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask); |
| 290 | rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu); |
Paul E. McKenney | 7d13d30 | 2021-11-22 13:38:42 -0800 | [diff] [blame] | 291 | if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled. |
| 292 | raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. |
| 293 | j = jiffies; |
| 294 | if (rtpcp->rtp_jiffies != j) { |
| 295 | rtpcp->rtp_jiffies = j; |
| 296 | rtpcp->rtp_n_lock_retries = 0; |
| 297 | } |
Paul E. McKenney | ab97152 | 2021-11-24 15:12:15 -0800 | [diff] [blame] | 298 | if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim && |
| 299 | READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids) |
| 300 | needadjust = true; // Defer adjustment to avoid deadlock. |
Paul E. McKenney | 7d13d30 | 2021-11-22 13:38:42 -0800 | [diff] [blame] | 301 | } |
Paul E. McKenney | 9b073de | 2021-11-08 16:18:57 -0800 | [diff] [blame] | 302 | if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) { |
Paul E. McKenney | 381a4f3 | 2021-11-08 16:52:02 -0800 | [diff] [blame] | 303 | raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 304 | cblist_init_generic(rtp); |
Paul E. McKenney | 381a4f3 | 2021-11-08 16:52:02 -0800 | [diff] [blame] | 305 | raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 306 | } |
Paul E. McKenney | 9b073de | 2021-11-08 16:18:57 -0800 | [diff] [blame] | 307 | needwake = rcu_segcblist_empty(&rtpcp->cblist); |
| 308 | rcu_segcblist_enqueue(&rtpcp->cblist, rhp); |
Paul E. McKenney | 381a4f3 | 2021-11-08 16:52:02 -0800 | [diff] [blame] | 309 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
Paul E. McKenney | ab97152 | 2021-11-24 15:12:15 -0800 | [diff] [blame] | 310 | if (unlikely(needadjust)) { |
| 311 | raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); |
| 312 | if (rtp->percpu_enqueue_lim != nr_cpu_ids) { |
Paul E. McKenney | 00a8b4b | 2022-02-02 16:34:40 -0800 | [diff] [blame] | 313 | WRITE_ONCE(rtp->percpu_enqueue_shift, 0); |
Paul E. McKenney | fd796e4 | 2021-11-29 16:52:31 -0800 | [diff] [blame] | 314 | WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids); |
Paul E. McKenney | ab97152 | 2021-11-24 15:12:15 -0800 | [diff] [blame] | 315 | smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids); |
| 316 | pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name); |
| 317 | } |
| 318 | raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); |
| 319 | } |
Paul E. McKenney | fd796e4 | 2021-11-29 16:52:31 -0800 | [diff] [blame] | 320 | rcu_read_unlock(); |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 321 | /* We can't create the thread unless interrupts are enabled. */ |
Paul E. McKenney | 07e1051 | 2020-03-02 15:16:57 -0800 | [diff] [blame] | 322 | if (needwake && READ_ONCE(rtp->kthread_ptr)) |
Paul E. McKenney | 3063b33 | 2021-11-23 16:16:50 -0800 | [diff] [blame] | 323 | irq_work_queue(&rtpcp->rtp_irq_work); |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 324 | } |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 325 | |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 326 | // Wait for a grace period for the specified flavor of Tasks RCU. |
| 327 | static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp) |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 328 | { |
| 329 | /* Complain if the scheduler has not started. */ |
| 330 | RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, |
| 331 | "synchronize_rcu_tasks called too soon"); |
| 332 | |
| 333 | /* Wait for the grace period. */ |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 334 | wait_rcu_gp(rtp->call_func); |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 335 | } |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 336 | |
Paul E. McKenney | ce9b1c6 | 2021-11-11 14:53:43 -0800 | [diff] [blame] | 337 | // RCU callback function for rcu_barrier_tasks_generic(). |
| 338 | static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp) |
| 339 | { |
| 340 | struct rcu_tasks *rtp; |
| 341 | struct rcu_tasks_percpu *rtpcp; |
| 342 | |
| 343 | rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head); |
| 344 | rtp = rtpcp->rtpp; |
| 345 | if (atomic_dec_and_test(&rtp->barrier_q_count)) |
| 346 | complete(&rtp->barrier_q_completion); |
| 347 | } |
| 348 | |
| 349 | // Wait for all in-flight callbacks for the specified RCU Tasks flavor. |
| 350 | // Operates in a manner similar to rcu_barrier(). |
| 351 | static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp) |
| 352 | { |
| 353 | int cpu; |
| 354 | unsigned long flags; |
| 355 | struct rcu_tasks_percpu *rtpcp; |
| 356 | unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq); |
| 357 | |
| 358 | mutex_lock(&rtp->barrier_q_mutex); |
| 359 | if (rcu_seq_done(&rtp->barrier_q_seq, s)) { |
| 360 | smp_mb(); |
| 361 | mutex_unlock(&rtp->barrier_q_mutex); |
| 362 | return; |
| 363 | } |
| 364 | rcu_seq_start(&rtp->barrier_q_seq); |
| 365 | init_completion(&rtp->barrier_q_completion); |
| 366 | atomic_set(&rtp->barrier_q_count, 2); |
| 367 | for_each_possible_cpu(cpu) { |
Paul E. McKenney | 2cee078 | 2021-11-29 11:46:33 -0800 | [diff] [blame] | 368 | if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim)) |
Paul E. McKenney | ce9b1c6 | 2021-11-11 14:53:43 -0800 | [diff] [blame] | 369 | break; |
| 370 | rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); |
| 371 | rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb; |
| 372 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
| 373 | if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head)) |
| 374 | atomic_inc(&rtp->barrier_q_count); |
| 375 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
| 376 | } |
| 377 | if (atomic_sub_and_test(2, &rtp->barrier_q_count)) |
| 378 | complete(&rtp->barrier_q_completion); |
| 379 | wait_for_completion(&rtp->barrier_q_completion); |
| 380 | rcu_seq_end(&rtp->barrier_q_seq); |
| 381 | mutex_unlock(&rtp->barrier_q_mutex); |
| 382 | } |
| 383 | |
Paul E. McKenney | 4d1114c | 2021-11-09 13:37:34 -0800 | [diff] [blame] | 384 | // Advance callbacks and indicate whether either a grace period or |
| 385 | // callback invocation is needed. |
| 386 | static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) |
| 387 | { |
| 388 | int cpu; |
| 389 | unsigned long flags; |
Paul E. McKenney | fd796e4 | 2021-11-29 16:52:31 -0800 | [diff] [blame] | 390 | long n; |
| 391 | long ncbs = 0; |
| 392 | long ncbsnz = 0; |
Paul E. McKenney | 4d1114c | 2021-11-09 13:37:34 -0800 | [diff] [blame] | 393 | int needgpcb = 0; |
| 394 | |
Paul E. McKenney | 2cee078 | 2021-11-29 11:46:33 -0800 | [diff] [blame] | 395 | for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_dequeue_lim); cpu++) { |
Paul E. McKenney | 4d1114c | 2021-11-09 13:37:34 -0800 | [diff] [blame] | 396 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); |
| 397 | |
| 398 | /* Advance and accelerate any new callbacks. */ |
Paul E. McKenney | fd796e4 | 2021-11-29 16:52:31 -0800 | [diff] [blame] | 399 | if (!rcu_segcblist_n_cbs(&rtpcp->cblist)) |
Paul E. McKenney | 4d1114c | 2021-11-09 13:37:34 -0800 | [diff] [blame] | 400 | continue; |
| 401 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
Paul E. McKenney | fd796e4 | 2021-11-29 16:52:31 -0800 | [diff] [blame] | 402 | // Should we shrink down to a single callback queue? |
| 403 | n = rcu_segcblist_n_cbs(&rtpcp->cblist); |
| 404 | if (n) { |
| 405 | ncbs += n; |
| 406 | if (cpu > 0) |
| 407 | ncbsnz += n; |
| 408 | } |
Paul E. McKenney | 4d1114c | 2021-11-09 13:37:34 -0800 | [diff] [blame] | 409 | rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); |
| 410 | (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); |
| 411 | if (rcu_segcblist_pend_cbs(&rtpcp->cblist)) |
| 412 | needgpcb |= 0x3; |
| 413 | if (!rcu_segcblist_empty(&rtpcp->cblist)) |
| 414 | needgpcb |= 0x1; |
| 415 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
| 416 | } |
Paul E. McKenney | fd796e4 | 2021-11-29 16:52:31 -0800 | [diff] [blame] | 417 | |
| 418 | // Shrink down to a single callback queue if appropriate. |
| 419 | // This is done in two stages: (1) If there are no more than |
| 420 | // rcu_task_collapse_lim callbacks on CPU 0 and none on any other |
| 421 | // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period, |
| 422 | // if there has not been an increase in callbacks, limit dequeuing |
| 423 | // to CPU 0. Note the matching RCU read-side critical section in |
| 424 | // call_rcu_tasks_generic(). |
| 425 | if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) { |
| 426 | raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); |
| 427 | if (rtp->percpu_enqueue_lim > 1) { |
Paul E. McKenney | 2bcd18e | 2022-02-02 15:42:36 -0800 | [diff] [blame] | 428 | WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids)); |
Paul E. McKenney | fd796e4 | 2021-11-29 16:52:31 -0800 | [diff] [blame] | 429 | smp_store_release(&rtp->percpu_enqueue_lim, 1); |
| 430 | rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu(); |
| 431 | pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name); |
| 432 | } |
| 433 | raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); |
| 434 | } |
| 435 | if (rcu_task_cb_adjust && !ncbsnz && |
| 436 | poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq)) { |
| 437 | raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); |
| 438 | if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) { |
| 439 | WRITE_ONCE(rtp->percpu_dequeue_lim, 1); |
| 440 | pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name); |
| 441 | } |
| 442 | raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); |
| 443 | } |
| 444 | |
Paul E. McKenney | 4d1114c | 2021-11-09 13:37:34 -0800 | [diff] [blame] | 445 | return needgpcb; |
| 446 | } |
| 447 | |
Paul E. McKenney | 5788186 | 2021-11-09 15:34:56 -0800 | [diff] [blame] | 448 | // Advance callbacks and invoke any that are ready. |
Paul E. McKenney | d363f83 | 2021-11-10 15:56:40 -0800 | [diff] [blame] | 449 | static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp) |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 450 | { |
Paul E. McKenney | 5788186 | 2021-11-09 15:34:56 -0800 | [diff] [blame] | 451 | int cpu; |
Paul E. McKenney | d363f83 | 2021-11-10 15:56:40 -0800 | [diff] [blame] | 452 | int cpunext; |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 453 | unsigned long flags; |
Paul E. McKenney | 9b073de | 2021-11-08 16:18:57 -0800 | [diff] [blame] | 454 | int len; |
Paul E. McKenney | 9b073de | 2021-11-08 16:18:57 -0800 | [diff] [blame] | 455 | struct rcu_head *rhp; |
Paul E. McKenney | d363f83 | 2021-11-10 15:56:40 -0800 | [diff] [blame] | 456 | struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); |
| 457 | struct rcu_tasks_percpu *rtpcp_next; |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 458 | |
Paul E. McKenney | d363f83 | 2021-11-10 15:56:40 -0800 | [diff] [blame] | 459 | cpu = rtpcp->cpu; |
| 460 | cpunext = cpu * 2 + 1; |
Paul E. McKenney | 2cee078 | 2021-11-29 11:46:33 -0800 | [diff] [blame] | 461 | if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) { |
Paul E. McKenney | d363f83 | 2021-11-10 15:56:40 -0800 | [diff] [blame] | 462 | rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); |
| 463 | queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work); |
| 464 | cpunext++; |
Paul E. McKenney | 2cee078 | 2021-11-29 11:46:33 -0800 | [diff] [blame] | 465 | if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) { |
Paul E. McKenney | d363f83 | 2021-11-10 15:56:40 -0800 | [diff] [blame] | 466 | rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); |
| 467 | queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work); |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 468 | } |
Paul E. McKenney | 5788186 | 2021-11-09 15:34:56 -0800 | [diff] [blame] | 469 | } |
Paul E. McKenney | d363f83 | 2021-11-10 15:56:40 -0800 | [diff] [blame] | 470 | |
Paul E. McKenney | ab2756e | 2022-04-08 09:21:50 -0700 | [diff] [blame] | 471 | if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu)) |
Paul E. McKenney | d363f83 | 2021-11-10 15:56:40 -0800 | [diff] [blame] | 472 | return; |
| 473 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
| 474 | rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); |
| 475 | rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl); |
| 476 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
| 477 | len = rcl.len; |
| 478 | for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) { |
| 479 | local_bh_disable(); |
| 480 | rhp->func(rhp); |
| 481 | local_bh_enable(); |
| 482 | cond_resched(); |
| 483 | } |
| 484 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
| 485 | rcu_segcblist_add_len(&rtpcp->cblist, -len); |
| 486 | (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); |
| 487 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
| 488 | } |
| 489 | |
| 490 | // Workqueue flood to advance callbacks and invoke any that are ready. |
| 491 | static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp) |
| 492 | { |
| 493 | struct rcu_tasks *rtp; |
| 494 | struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work); |
| 495 | |
| 496 | rtp = rtpcp->rtpp; |
| 497 | rcu_tasks_invoke_cbs(rtp, rtpcp); |
Paul E. McKenney | 5788186 | 2021-11-09 15:34:56 -0800 | [diff] [blame] | 498 | } |
| 499 | |
| 500 | /* RCU-tasks kthread that detects grace periods and invokes callbacks. */ |
| 501 | static int __noreturn rcu_tasks_kthread(void *arg) |
| 502 | { |
| 503 | int needgpcb; |
| 504 | struct rcu_tasks *rtp = arg; |
| 505 | |
| 506 | /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ |
Frederic Weisbecker | 04d4e66 | 2022-02-07 16:59:06 +0100 | [diff] [blame] | 507 | housekeeping_affine(current, HK_TYPE_RCU); |
Paul E. McKenney | 5788186 | 2021-11-09 15:34:56 -0800 | [diff] [blame] | 508 | WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start! |
| 509 | |
| 510 | /* |
| 511 | * Each pass through the following loop makes one check for |
| 512 | * newly arrived callbacks, and, if there are some, waits for |
| 513 | * one RCU-tasks grace period and then invokes the callbacks. |
| 514 | * This loop is terminated by the system going down. ;-) |
| 515 | */ |
| 516 | for (;;) { |
| 517 | set_tasks_gp_state(rtp, RTGS_WAIT_CBS); |
| 518 | |
| 519 | /* If there were none, wait a bit and start over. */ |
Sebastian Andrzej Siewior | 88db792 | 2022-03-04 12:07:25 +0100 | [diff] [blame] | 520 | rcuwait_wait_event(&rtp->cbs_wait, |
| 521 | (needgpcb = rcu_tasks_need_gpcb(rtp)), |
| 522 | TASK_IDLE); |
Paul E. McKenney | 5788186 | 2021-11-09 15:34:56 -0800 | [diff] [blame] | 523 | |
| 524 | if (needgpcb & 0x2) { |
| 525 | // Wait for one grace period. |
| 526 | set_tasks_gp_state(rtp, RTGS_WAIT_GP); |
| 527 | rtp->gp_start = jiffies; |
| 528 | rcu_seq_start(&rtp->tasks_gp_seq); |
| 529 | rtp->gp_func(rtp); |
| 530 | rcu_seq_end(&rtp->tasks_gp_seq); |
| 531 | } |
| 532 | |
| 533 | /* Invoke callbacks. */ |
| 534 | set_tasks_gp_state(rtp, RTGS_INVOKE_CBS); |
Paul E. McKenney | d363f83 | 2021-11-10 15:56:40 -0800 | [diff] [blame] | 535 | rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0)); |
Paul E. McKenney | 5788186 | 2021-11-09 15:34:56 -0800 | [diff] [blame] | 536 | |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 537 | /* Paranoid sleep to keep this from entering a tight loop */ |
Paul E. McKenney | 4fe192d | 2020-09-09 22:05:41 -0700 | [diff] [blame] | 538 | schedule_timeout_idle(rtp->gp_sleep); |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 539 | } |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 540 | } |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 541 | |
Uladzislau Rezki (Sony) | 1b04fa9 | 2020-12-09 21:27:31 +0100 | [diff] [blame] | 542 | /* Spawn RCU-tasks grace-period kthread. */ |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 543 | static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp) |
| 544 | { |
| 545 | struct task_struct *t; |
| 546 | |
Paul E. McKenney | c97d12a | 2020-03-03 15:50:31 -0800 | [diff] [blame] | 547 | t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); |
| 548 | if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name)) |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 549 | return; |
| 550 | smp_mb(); /* Ensure others see full kthread. */ |
| 551 | } |
| 552 | |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 553 | #ifndef CONFIG_TINY_RCU |
| 554 | |
| 555 | /* |
| 556 | * Print any non-default Tasks RCU settings. |
| 557 | */ |
| 558 | static void __init rcu_tasks_bootup_oddness(void) |
| 559 | { |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 560 | #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) |
Paul E. McKenney | f2539003 | 2022-02-25 16:01:12 -0800 | [diff] [blame] | 561 | int rtsimc; |
| 562 | |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 563 | if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT) |
| 564 | pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout); |
Paul E. McKenney | f2539003 | 2022-02-25 16:01:12 -0800 | [diff] [blame] | 565 | rtsimc = clamp(rcu_task_stall_info_mult, 1, 10); |
| 566 | if (rtsimc != rcu_task_stall_info_mult) { |
| 567 | pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc); |
| 568 | rcu_task_stall_info_mult = rtsimc; |
| 569 | } |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 570 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
| 571 | #ifdef CONFIG_TASKS_RCU |
| 572 | pr_info("\tTrampoline variant of Tasks RCU enabled.\n"); |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 573 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
Paul E. McKenney | c84aad7 | 2020-03-02 21:06:43 -0800 | [diff] [blame] | 574 | #ifdef CONFIG_TASKS_RUDE_RCU |
| 575 | pr_info("\tRude variant of Tasks RCU enabled.\n"); |
| 576 | #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 577 | #ifdef CONFIG_TASKS_TRACE_RCU |
| 578 | pr_info("\tTracing variant of Tasks RCU enabled.\n"); |
| 579 | #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 580 | } |
| 581 | |
| 582 | #endif /* #ifndef CONFIG_TINY_RCU */ |
| 583 | |
Paul E. McKenney | 8344496 | 2020-05-28 20:03:48 -0700 | [diff] [blame] | 584 | #ifndef CONFIG_TINY_RCU |
Paul E. McKenney | e21408c | 2020-03-16 11:01:55 -0700 | [diff] [blame] | 585 | /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */ |
| 586 | static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s) |
| 587 | { |
Paul E. McKenney | 10b3742 | 2022-03-28 16:00:05 -0700 | [diff] [blame] | 588 | int cpu; |
| 589 | bool havecbs = false; |
| 590 | |
| 591 | for_each_possible_cpu(cpu) { |
| 592 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); |
| 593 | |
| 594 | if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) { |
| 595 | havecbs = true; |
| 596 | break; |
| 597 | } |
| 598 | } |
Paul E. McKenney | 7e0669c | 2020-03-25 14:36:05 -0700 | [diff] [blame] | 599 | pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n", |
Paul E. McKenney | e21408c | 2020-03-16 11:01:55 -0700 | [diff] [blame] | 600 | rtp->kname, |
Paul E. McKenney | 7e0669c | 2020-03-25 14:36:05 -0700 | [diff] [blame] | 601 | tasks_gp_state_getname(rtp), data_race(rtp->gp_state), |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 602 | jiffies - data_race(rtp->gp_jiffies), |
Paul E. McKenney | b14fb4f | 2021-11-08 14:14:43 -0800 | [diff] [blame] | 603 | data_race(rcu_seq_current(&rtp->tasks_gp_seq)), |
Paul E. McKenney | 7e0669c | 2020-03-25 14:36:05 -0700 | [diff] [blame] | 604 | data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), |
Paul E. McKenney | e21408c | 2020-03-16 11:01:55 -0700 | [diff] [blame] | 605 | ".k"[!!data_race(rtp->kthread_ptr)], |
Paul E. McKenney | 10b3742 | 2022-03-28 16:00:05 -0700 | [diff] [blame] | 606 | ".C"[havecbs], |
Paul E. McKenney | e21408c | 2020-03-16 11:01:55 -0700 | [diff] [blame] | 607 | s); |
| 608 | } |
Paul E. McKenney | 27c0f14 | 2020-09-15 17:08:03 -0700 | [diff] [blame] | 609 | #endif // #ifndef CONFIG_TINY_RCU |
Paul E. McKenney | e21408c | 2020-03-16 11:01:55 -0700 | [diff] [blame] | 610 | |
Paul E. McKenney | 25246fc | 2020-04-05 20:49:13 -0700 | [diff] [blame] | 611 | static void exit_tasks_rcu_finish_trace(struct task_struct *t); |
| 612 | |
| 613 | #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 614 | |
| 615 | //////////////////////////////////////////////////////////////////////// |
| 616 | // |
Paul E. McKenney | d01aa26 | 2020-03-05 17:07:07 -0800 | [diff] [blame] | 617 | // Shared code between task-list-scanning variants of Tasks RCU. |
| 618 | |
| 619 | /* Wait for one RCU-tasks grace period. */ |
| 620 | static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) |
| 621 | { |
Paul E. McKenney | f2539003 | 2022-02-25 16:01:12 -0800 | [diff] [blame] | 622 | struct task_struct *g; |
Paul E. McKenney | d01aa26 | 2020-03-05 17:07:07 -0800 | [diff] [blame] | 623 | int fract; |
Paul E. McKenney | f2539003 | 2022-02-25 16:01:12 -0800 | [diff] [blame] | 624 | LIST_HEAD(holdouts); |
| 625 | unsigned long j; |
| 626 | unsigned long lastinfo; |
| 627 | unsigned long lastreport; |
| 628 | bool reported = false; |
| 629 | int rtsi; |
| 630 | struct task_struct *t; |
Paul E. McKenney | d01aa26 | 2020-03-05 17:07:07 -0800 | [diff] [blame] | 631 | |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 632 | set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP); |
Paul E. McKenney | d01aa26 | 2020-03-05 17:07:07 -0800 | [diff] [blame] | 633 | rtp->pregp_func(); |
| 634 | |
| 635 | /* |
| 636 | * There were callbacks, so we need to wait for an RCU-tasks |
| 637 | * grace period. Start off by scanning the task list for tasks |
| 638 | * that are not already voluntarily blocked. Mark these tasks |
| 639 | * and make a list of them in holdouts. |
| 640 | */ |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 641 | set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST); |
Paul E. McKenney | d01aa26 | 2020-03-05 17:07:07 -0800 | [diff] [blame] | 642 | rcu_read_lock(); |
| 643 | for_each_process_thread(g, t) |
| 644 | rtp->pertask_func(t, &holdouts); |
| 645 | rcu_read_unlock(); |
| 646 | |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 647 | set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST); |
Paul E. McKenney | 9796e1a | 2020-03-22 13:18:54 -0700 | [diff] [blame] | 648 | rtp->postscan_func(&holdouts); |
Paul E. McKenney | d01aa26 | 2020-03-05 17:07:07 -0800 | [diff] [blame] | 649 | |
| 650 | /* |
| 651 | * Each pass through the following loop scans the list of holdout |
| 652 | * tasks, removing any that are no longer holdouts. When the list |
| 653 | * is empty, we are done. |
| 654 | */ |
| 655 | lastreport = jiffies; |
Paul E. McKenney | f2539003 | 2022-02-25 16:01:12 -0800 | [diff] [blame] | 656 | lastinfo = lastreport; |
| 657 | rtsi = READ_ONCE(rcu_task_stall_info); |
Paul E. McKenney | d01aa26 | 2020-03-05 17:07:07 -0800 | [diff] [blame] | 658 | |
Paul E. McKenney | 2393a61 | 2020-09-09 21:36:34 -0700 | [diff] [blame] | 659 | // Start off with initial wait and slowly back off to 1 HZ wait. |
| 660 | fract = rtp->init_fract; |
Paul E. McKenney | d01aa26 | 2020-03-05 17:07:07 -0800 | [diff] [blame] | 661 | |
Paul E. McKenney | 77dc174 | 2020-09-15 15:41:50 -0700 | [diff] [blame] | 662 | while (!list_empty(&holdouts)) { |
Sebastian Andrzej Siewior | 777570d | 2022-03-08 09:54:13 -0800 | [diff] [blame] | 663 | ktime_t exp; |
Paul E. McKenney | d01aa26 | 2020-03-05 17:07:07 -0800 | [diff] [blame] | 664 | bool firstreport; |
| 665 | bool needreport; |
| 666 | int rtst; |
| 667 | |
Paul E. McKenney | f2539003 | 2022-02-25 16:01:12 -0800 | [diff] [blame] | 668 | // Slowly back off waiting for holdouts |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 669 | set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS); |
Paul E. McKenney | bddf712 | 2022-03-18 08:08:14 -0700 | [diff] [blame] | 670 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { |
| 671 | schedule_timeout_idle(fract); |
| 672 | } else { |
| 673 | exp = jiffies_to_nsecs(fract); |
| 674 | __set_current_state(TASK_IDLE); |
| 675 | schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD); |
| 676 | } |
Paul E. McKenney | d01aa26 | 2020-03-05 17:07:07 -0800 | [diff] [blame] | 677 | |
Paul E. McKenney | 75dc2da | 2020-09-17 16:17:17 -0700 | [diff] [blame] | 678 | if (fract < HZ) |
| 679 | fract++; |
Paul E. McKenney | d01aa26 | 2020-03-05 17:07:07 -0800 | [diff] [blame] | 680 | |
| 681 | rtst = READ_ONCE(rcu_task_stall_timeout); |
| 682 | needreport = rtst > 0 && time_after(jiffies, lastreport + rtst); |
Paul E. McKenney | f2539003 | 2022-02-25 16:01:12 -0800 | [diff] [blame] | 683 | if (needreport) { |
Paul E. McKenney | d01aa26 | 2020-03-05 17:07:07 -0800 | [diff] [blame] | 684 | lastreport = jiffies; |
Paul E. McKenney | f2539003 | 2022-02-25 16:01:12 -0800 | [diff] [blame] | 685 | reported = true; |
| 686 | } |
Paul E. McKenney | d01aa26 | 2020-03-05 17:07:07 -0800 | [diff] [blame] | 687 | firstreport = true; |
| 688 | WARN_ON(signal_pending(current)); |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 689 | set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS); |
Paul E. McKenney | d01aa26 | 2020-03-05 17:07:07 -0800 | [diff] [blame] | 690 | rtp->holdouts_func(&holdouts, needreport, &firstreport); |
Paul E. McKenney | f2539003 | 2022-02-25 16:01:12 -0800 | [diff] [blame] | 691 | |
| 692 | // Print pre-stall informational messages if needed. |
| 693 | j = jiffies; |
| 694 | if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) { |
| 695 | lastinfo = j; |
| 696 | rtsi = rtsi * rcu_task_stall_info_mult; |
| 697 | pr_info("%s: %s grace period %lu is %lu jiffies old.\n", |
| 698 | __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start); |
| 699 | } |
Paul E. McKenney | d01aa26 | 2020-03-05 17:07:07 -0800 | [diff] [blame] | 700 | } |
| 701 | |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 702 | set_tasks_gp_state(rtp, RTGS_POST_GP); |
| 703 | rtp->postgp_func(rtp); |
Paul E. McKenney | d01aa26 | 2020-03-05 17:07:07 -0800 | [diff] [blame] | 704 | } |
| 705 | |
Paul E. McKenney | 25246fc | 2020-04-05 20:49:13 -0700 | [diff] [blame] | 706 | #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */ |
| 707 | |
| 708 | #ifdef CONFIG_TASKS_RCU |
| 709 | |
Paul E. McKenney | d01aa26 | 2020-03-05 17:07:07 -0800 | [diff] [blame] | 710 | //////////////////////////////////////////////////////////////////////// |
| 711 | // |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 712 | // Simple variant of RCU whose quiescent states are voluntary context |
Paul E. McKenney | 8af9e2c | 2021-09-15 09:24:18 -0700 | [diff] [blame] | 713 | // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle. |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 714 | // As such, grace periods can take one good long time. There are no |
| 715 | // read-side primitives similar to rcu_read_lock() and rcu_read_unlock() |
| 716 | // because this implementation is intended to get the system into a safe |
| 717 | // state for some of the manipulations involved in tracing and the like. |
| 718 | // Finally, this implementation does not support high call_rcu_tasks() |
| 719 | // rates from multiple CPUs. If this is required, per-CPU callback lists |
| 720 | // will be needed. |
Paul E. McKenney | 06a3ec9 | 2021-03-04 14:41:47 -0800 | [diff] [blame] | 721 | // |
| 722 | // The implementation uses rcu_tasks_wait_gp(), which relies on function |
| 723 | // pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread() |
| 724 | // function sets these function pointers up so that rcu_tasks_wait_gp() |
| 725 | // invokes these functions in this order: |
| 726 | // |
| 727 | // rcu_tasks_pregp_step(): |
| 728 | // Invokes synchronize_rcu() in order to wait for all in-flight |
| 729 | // t->on_rq and t->nvcsw transitions to complete. This works because |
| 730 | // all such transitions are carried out with interrupts disabled. |
| 731 | // rcu_tasks_pertask(), invoked on every non-idle task: |
| 732 | // For every runnable non-idle task other than the current one, use |
| 733 | // get_task_struct() to pin down that task, snapshot that task's |
| 734 | // number of voluntary context switches, and add that task to the |
| 735 | // holdout list. |
| 736 | // rcu_tasks_postscan(): |
| 737 | // Invoke synchronize_srcu() to ensure that all tasks that were |
| 738 | // in the process of exiting (and which thus might not know to |
| 739 | // synchronize with this RCU Tasks grace period) have completed |
| 740 | // exiting. |
| 741 | // check_all_holdout_tasks(), repeatedly until holdout list is empty: |
| 742 | // Scans the holdout list, attempting to identify a quiescent state |
| 743 | // for each task on the list. If there is a quiescent state, the |
| 744 | // corresponding task is removed from the holdout list. |
| 745 | // rcu_tasks_postgp(): |
| 746 | // Invokes synchronize_rcu() in order to ensure that all prior |
| 747 | // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks |
| 748 | // to have happened before the end of this RCU Tasks grace period. |
| 749 | // Again, this works because all such transitions are carried out |
| 750 | // with interrupts disabled. |
| 751 | // |
| 752 | // For each exiting task, the exit_tasks_rcu_start() and |
| 753 | // exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU |
| 754 | // read-side critical sections waited for by rcu_tasks_postscan(). |
| 755 | // |
Paul E. McKenney | 381a4f3 | 2021-11-08 16:52:02 -0800 | [diff] [blame] | 756 | // Pre-grace-period update-side code is ordered before the grace |
| 757 | // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code |
| 758 | // is ordered before the grace period via synchronize_rcu() call in |
| 759 | // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt |
Paul E. McKenney | 06a3ec9 | 2021-03-04 14:41:47 -0800 | [diff] [blame] | 760 | // disabling. |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 761 | |
Paul E. McKenney | e4fe5dd | 2020-03-04 17:31:43 -0800 | [diff] [blame] | 762 | /* Pre-grace-period preparation. */ |
| 763 | static void rcu_tasks_pregp_step(void) |
| 764 | { |
| 765 | /* |
| 766 | * Wait for all pre-existing t->on_rq and t->nvcsw transitions |
| 767 | * to complete. Invoking synchronize_rcu() suffices because all |
| 768 | * these transitions occur with interrupts disabled. Without this |
| 769 | * synchronize_rcu(), a read-side critical section that started |
| 770 | * before the grace period might be incorrectly seen as having |
| 771 | * started after the grace period. |
| 772 | * |
| 773 | * This synchronize_rcu() also dispenses with the need for a |
| 774 | * memory barrier on the first store to t->rcu_tasks_holdout, |
| 775 | * as it forces the store to happen after the beginning of the |
| 776 | * grace period. |
| 777 | */ |
| 778 | synchronize_rcu(); |
| 779 | } |
| 780 | |
| 781 | /* Per-task initial processing. */ |
| 782 | static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) |
| 783 | { |
| 784 | if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) { |
| 785 | get_task_struct(t); |
| 786 | t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); |
| 787 | WRITE_ONCE(t->rcu_tasks_holdout, true); |
| 788 | list_add(&t->rcu_tasks_holdout_list, hop); |
| 789 | } |
| 790 | } |
| 791 | |
| 792 | /* Processing between scanning taskslist and draining the holdout list. */ |
Paul E. McKenney | 04a3c5a | 2020-05-28 19:27:06 -0700 | [diff] [blame] | 793 | static void rcu_tasks_postscan(struct list_head *hop) |
Paul E. McKenney | e4fe5dd | 2020-03-04 17:31:43 -0800 | [diff] [blame] | 794 | { |
| 795 | /* |
| 796 | * Wait for tasks that are in the process of exiting. This |
| 797 | * does only part of the job, ensuring that all tasks that were |
| 798 | * previously exiting reach the point where they have disabled |
| 799 | * preemption, allowing the later synchronize_rcu() to finish |
| 800 | * the job. |
| 801 | */ |
| 802 | synchronize_srcu(&tasks_rcu_exit_srcu); |
| 803 | } |
| 804 | |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 805 | /* See if tasks are still holding out, complain if so. */ |
| 806 | static void check_holdout_task(struct task_struct *t, |
| 807 | bool needreport, bool *firstreport) |
| 808 | { |
| 809 | int cpu; |
| 810 | |
| 811 | if (!READ_ONCE(t->rcu_tasks_holdout) || |
| 812 | t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || |
| 813 | !READ_ONCE(t->on_rq) || |
| 814 | (IS_ENABLED(CONFIG_NO_HZ_FULL) && |
| 815 | !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { |
| 816 | WRITE_ONCE(t->rcu_tasks_holdout, false); |
| 817 | list_del_init(&t->rcu_tasks_holdout_list); |
| 818 | put_task_struct(t); |
| 819 | return; |
| 820 | } |
| 821 | rcu_request_urgent_qs_task(t); |
| 822 | if (!needreport) |
| 823 | return; |
| 824 | if (*firstreport) { |
| 825 | pr_err("INFO: rcu_tasks detected stalls on tasks:\n"); |
| 826 | *firstreport = false; |
| 827 | } |
| 828 | cpu = task_cpu(t); |
| 829 | pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n", |
| 830 | t, ".I"[is_idle_task(t)], |
| 831 | "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], |
| 832 | t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, |
| 833 | t->rcu_tasks_idle_cpu, cpu); |
| 834 | sched_show_task(t); |
| 835 | } |
| 836 | |
Paul E. McKenney | e4fe5dd | 2020-03-04 17:31:43 -0800 | [diff] [blame] | 837 | /* Scan the holdout lists for tasks no longer holding out. */ |
| 838 | static void check_all_holdout_tasks(struct list_head *hop, |
| 839 | bool needreport, bool *firstreport) |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 840 | { |
Paul E. McKenney | e4fe5dd | 2020-03-04 17:31:43 -0800 | [diff] [blame] | 841 | struct task_struct *t, *t1; |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 842 | |
Paul E. McKenney | e4fe5dd | 2020-03-04 17:31:43 -0800 | [diff] [blame] | 843 | list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) { |
| 844 | check_holdout_task(t, needreport, firstreport); |
| 845 | cond_resched(); |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 846 | } |
Paul E. McKenney | e4fe5dd | 2020-03-04 17:31:43 -0800 | [diff] [blame] | 847 | } |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 848 | |
Paul E. McKenney | e4fe5dd | 2020-03-04 17:31:43 -0800 | [diff] [blame] | 849 | /* Finish off the Tasks-RCU grace period. */ |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 850 | static void rcu_tasks_postgp(struct rcu_tasks *rtp) |
Paul E. McKenney | e4fe5dd | 2020-03-04 17:31:43 -0800 | [diff] [blame] | 851 | { |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 852 | /* |
| 853 | * Because ->on_rq and ->nvcsw are not guaranteed to have a full |
| 854 | * memory barriers prior to them in the schedule() path, memory |
| 855 | * reordering on other CPUs could cause their RCU-tasks read-side |
| 856 | * critical sections to extend past the end of the grace period. |
| 857 | * However, because these ->nvcsw updates are carried out with |
| 858 | * interrupts disabled, we can use synchronize_rcu() to force the |
| 859 | * needed ordering on all such CPUs. |
| 860 | * |
| 861 | * This synchronize_rcu() also confines all ->rcu_tasks_holdout |
| 862 | * accesses to be within the grace period, avoiding the need for |
| 863 | * memory barriers for ->rcu_tasks_holdout accesses. |
| 864 | * |
| 865 | * In addition, this synchronize_rcu() waits for exiting tasks |
| 866 | * to complete their final preempt_disable() region of execution, |
| 867 | * cleaning up after the synchronize_srcu() above. |
| 868 | */ |
| 869 | synchronize_rcu(); |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 870 | } |
| 871 | |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 872 | void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func); |
Paul E. McKenney | c97d12a | 2020-03-03 15:50:31 -0800 | [diff] [blame] | 873 | DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks"); |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 874 | |
| 875 | /** |
| 876 | * call_rcu_tasks() - Queue an RCU for invocation task-based grace period |
| 877 | * @rhp: structure to be used for queueing the RCU updates. |
| 878 | * @func: actual callback function to be invoked after the grace period |
| 879 | * |
| 880 | * The callback function will be invoked some time after a full grace |
| 881 | * period elapses, in other words after all currently executing RCU |
| 882 | * read-side critical sections have completed. call_rcu_tasks() assumes |
| 883 | * that the read-side critical sections end at a voluntary context |
Paul E. McKenney | 8af9e2c | 2021-09-15 09:24:18 -0700 | [diff] [blame] | 884 | * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle, |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 885 | * or transition to usermode execution. As such, there are no read-side |
| 886 | * primitives analogous to rcu_read_lock() and rcu_read_unlock() because |
| 887 | * this primitive is intended to determine that all tasks have passed |
Ingo Molnar | a616aec | 2021-03-22 22:29:10 -0700 | [diff] [blame] | 888 | * through a safe state, not so much for data-structure synchronization. |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 889 | * |
| 890 | * See the description of call_rcu() for more detailed information on |
| 891 | * memory ordering guarantees. |
| 892 | */ |
| 893 | void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) |
| 894 | { |
| 895 | call_rcu_tasks_generic(rhp, func, &rcu_tasks); |
| 896 | } |
| 897 | EXPORT_SYMBOL_GPL(call_rcu_tasks); |
| 898 | |
| 899 | /** |
| 900 | * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. |
| 901 | * |
| 902 | * Control will return to the caller some time after a full rcu-tasks |
| 903 | * grace period has elapsed, in other words after all currently |
| 904 | * executing rcu-tasks read-side critical sections have elapsed. These |
| 905 | * read-side critical sections are delimited by calls to schedule(), |
| 906 | * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls |
| 907 | * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). |
| 908 | * |
| 909 | * This is a very specialized primitive, intended only for a few uses in |
| 910 | * tracing and other situations requiring manipulation of function |
| 911 | * preambles and profiling hooks. The synchronize_rcu_tasks() function |
| 912 | * is not (yet) intended for heavy use from multiple CPUs. |
| 913 | * |
| 914 | * See the description of synchronize_rcu() for more detailed information |
| 915 | * on memory ordering guarantees. |
| 916 | */ |
| 917 | void synchronize_rcu_tasks(void) |
| 918 | { |
| 919 | synchronize_rcu_tasks_generic(&rcu_tasks); |
| 920 | } |
| 921 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); |
| 922 | |
| 923 | /** |
| 924 | * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. |
| 925 | * |
| 926 | * Although the current implementation is guaranteed to wait, it is not |
| 927 | * obligated to, for example, if there are no pending callbacks. |
| 928 | */ |
| 929 | void rcu_barrier_tasks(void) |
| 930 | { |
Paul E. McKenney | ce9b1c6 | 2021-11-11 14:53:43 -0800 | [diff] [blame] | 931 | rcu_barrier_tasks_generic(&rcu_tasks); |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 932 | } |
| 933 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks); |
| 934 | |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 935 | static int __init rcu_spawn_tasks_kthread(void) |
| 936 | { |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 937 | cblist_init_generic(&rcu_tasks); |
Paul E. McKenney | 4fe192d | 2020-09-09 22:05:41 -0700 | [diff] [blame] | 938 | rcu_tasks.gp_sleep = HZ / 10; |
Paul E. McKenney | 75dc2da | 2020-09-17 16:17:17 -0700 | [diff] [blame] | 939 | rcu_tasks.init_fract = HZ / 10; |
Paul E. McKenney | e4fe5dd | 2020-03-04 17:31:43 -0800 | [diff] [blame] | 940 | rcu_tasks.pregp_func = rcu_tasks_pregp_step; |
| 941 | rcu_tasks.pertask_func = rcu_tasks_pertask; |
| 942 | rcu_tasks.postscan_func = rcu_tasks_postscan; |
| 943 | rcu_tasks.holdouts_func = check_all_holdout_tasks; |
| 944 | rcu_tasks.postgp_func = rcu_tasks_postgp; |
Paul E. McKenney | 5873b8a | 2020-03-03 11:49:21 -0800 | [diff] [blame] | 945 | rcu_spawn_tasks_kthread_generic(&rcu_tasks); |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 946 | return 0; |
| 947 | } |
Paul E. McKenney | eacd6f0 | 2020-03-02 11:59:20 -0800 | [diff] [blame] | 948 | |
Paul E. McKenney | 27c0f14 | 2020-09-15 17:08:03 -0700 | [diff] [blame] | 949 | #if !defined(CONFIG_TINY_RCU) |
| 950 | void show_rcu_tasks_classic_gp_kthread(void) |
Paul E. McKenney | e21408c | 2020-03-16 11:01:55 -0700 | [diff] [blame] | 951 | { |
| 952 | show_rcu_tasks_generic_gp_kthread(&rcu_tasks, ""); |
| 953 | } |
Paul E. McKenney | 27c0f14 | 2020-09-15 17:08:03 -0700 | [diff] [blame] | 954 | EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread); |
| 955 | #endif // !defined(CONFIG_TINY_RCU) |
Paul E. McKenney | e21408c | 2020-03-16 11:01:55 -0700 | [diff] [blame] | 956 | |
Paul E. McKenney | 25246fc | 2020-04-05 20:49:13 -0700 | [diff] [blame] | 957 | /* Do the srcu_read_lock() for the above synchronize_srcu(). */ |
| 958 | void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu) |
| 959 | { |
| 960 | preempt_disable(); |
| 961 | current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu); |
| 962 | preempt_enable(); |
| 963 | } |
| 964 | |
| 965 | /* Do the srcu_read_unlock() for the above synchronize_srcu(). */ |
| 966 | void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu) |
| 967 | { |
| 968 | struct task_struct *t = current; |
| 969 | |
| 970 | preempt_disable(); |
| 971 | __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx); |
| 972 | preempt_enable(); |
| 973 | exit_tasks_rcu_finish_trace(t); |
| 974 | } |
| 975 | |
Paul E. McKenney | e21408c | 2020-03-16 11:01:55 -0700 | [diff] [blame] | 976 | #else /* #ifdef CONFIG_TASKS_RCU */ |
Paul E. McKenney | 25246fc | 2020-04-05 20:49:13 -0700 | [diff] [blame] | 977 | void exit_tasks_rcu_start(void) { } |
| 978 | void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); } |
Paul E. McKenney | e21408c | 2020-03-16 11:01:55 -0700 | [diff] [blame] | 979 | #endif /* #else #ifdef CONFIG_TASKS_RCU */ |
Paul E. McKenney | c84aad7 | 2020-03-02 21:06:43 -0800 | [diff] [blame] | 980 | |
| 981 | #ifdef CONFIG_TASKS_RUDE_RCU |
| 982 | |
| 983 | //////////////////////////////////////////////////////////////////////// |
| 984 | // |
| 985 | // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of |
| 986 | // passing an empty function to schedule_on_each_cpu(). This approach |
Paul E. McKenney | e4be1f4 | 2021-06-22 11:57:15 -0700 | [diff] [blame] | 987 | // provides an asynchronous call_rcu_tasks_rude() API and batching of |
| 988 | // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API. |
Paul E. McKenney | 9fc98e3 | 2021-03-04 14:46:59 -0800 | [diff] [blame] | 989 | // This invokes schedule_on_each_cpu() in order to send IPIs far and wide |
| 990 | // and induces otherwise unnecessary context switches on all online CPUs, |
| 991 | // whether idle or not. |
| 992 | // |
| 993 | // Callback handling is provided by the rcu_tasks_kthread() function. |
| 994 | // |
| 995 | // Ordering is provided by the scheduler's context-switch code. |
Paul E. McKenney | c84aad7 | 2020-03-02 21:06:43 -0800 | [diff] [blame] | 996 | |
| 997 | // Empty function to allow workqueues to force a context switch. |
| 998 | static void rcu_tasks_be_rude(struct work_struct *work) |
| 999 | { |
| 1000 | } |
| 1001 | |
| 1002 | // Wait for one rude RCU-tasks grace period. |
| 1003 | static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp) |
| 1004 | { |
Padmanabha Srinivasaiah | f75fd4b | 2022-02-17 16:25:19 +0100 | [diff] [blame] | 1005 | if (num_online_cpus() <= 1) |
| 1006 | return; // Fastpath for only one CPU. |
| 1007 | |
Paul E. McKenney | 238dbce | 2020-03-18 10:54:05 -0700 | [diff] [blame] | 1008 | rtp->n_ipis += cpumask_weight(cpu_online_mask); |
Paul E. McKenney | c84aad7 | 2020-03-02 21:06:43 -0800 | [diff] [blame] | 1009 | schedule_on_each_cpu(rcu_tasks_be_rude); |
| 1010 | } |
| 1011 | |
| 1012 | void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func); |
Paul E. McKenney | c97d12a | 2020-03-03 15:50:31 -0800 | [diff] [blame] | 1013 | DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude, |
| 1014 | "RCU Tasks Rude"); |
Paul E. McKenney | c84aad7 | 2020-03-02 21:06:43 -0800 | [diff] [blame] | 1015 | |
| 1016 | /** |
| 1017 | * call_rcu_tasks_rude() - Queue a callback rude task-based grace period |
| 1018 | * @rhp: structure to be used for queueing the RCU updates. |
| 1019 | * @func: actual callback function to be invoked after the grace period |
| 1020 | * |
| 1021 | * The callback function will be invoked some time after a full grace |
| 1022 | * period elapses, in other words after all currently executing RCU |
| 1023 | * read-side critical sections have completed. call_rcu_tasks_rude() |
| 1024 | * assumes that the read-side critical sections end at context switch, |
Paul E. McKenney | 8af9e2c | 2021-09-15 09:24:18 -0700 | [diff] [blame] | 1025 | * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as |
Neeraj Upadhyay | a6517e9 | 2021-08-18 12:58:43 +0530 | [diff] [blame] | 1026 | * usermode execution is schedulable). As such, there are no read-side |
| 1027 | * primitives analogous to rcu_read_lock() and rcu_read_unlock() because |
| 1028 | * this primitive is intended to determine that all tasks have passed |
| 1029 | * through a safe state, not so much for data-structure synchronization. |
Paul E. McKenney | c84aad7 | 2020-03-02 21:06:43 -0800 | [diff] [blame] | 1030 | * |
| 1031 | * See the description of call_rcu() for more detailed information on |
| 1032 | * memory ordering guarantees. |
| 1033 | */ |
| 1034 | void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func) |
| 1035 | { |
| 1036 | call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude); |
| 1037 | } |
| 1038 | EXPORT_SYMBOL_GPL(call_rcu_tasks_rude); |
| 1039 | |
| 1040 | /** |
| 1041 | * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period |
| 1042 | * |
| 1043 | * Control will return to the caller some time after a rude rcu-tasks |
| 1044 | * grace period has elapsed, in other words after all currently |
| 1045 | * executing rcu-tasks read-side critical sections have elapsed. These |
| 1046 | * read-side critical sections are delimited by calls to schedule(), |
Neeraj Upadhyay | a6517e9 | 2021-08-18 12:58:43 +0530 | [diff] [blame] | 1047 | * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable |
| 1048 | * context), and (in theory, anyway) cond_resched(). |
Paul E. McKenney | c84aad7 | 2020-03-02 21:06:43 -0800 | [diff] [blame] | 1049 | * |
| 1050 | * This is a very specialized primitive, intended only for a few uses in |
| 1051 | * tracing and other situations requiring manipulation of function preambles |
| 1052 | * and profiling hooks. The synchronize_rcu_tasks_rude() function is not |
| 1053 | * (yet) intended for heavy use from multiple CPUs. |
| 1054 | * |
| 1055 | * See the description of synchronize_rcu() for more detailed information |
| 1056 | * on memory ordering guarantees. |
| 1057 | */ |
| 1058 | void synchronize_rcu_tasks_rude(void) |
| 1059 | { |
| 1060 | synchronize_rcu_tasks_generic(&rcu_tasks_rude); |
| 1061 | } |
| 1062 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude); |
| 1063 | |
| 1064 | /** |
| 1065 | * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks. |
| 1066 | * |
| 1067 | * Although the current implementation is guaranteed to wait, it is not |
| 1068 | * obligated to, for example, if there are no pending callbacks. |
| 1069 | */ |
| 1070 | void rcu_barrier_tasks_rude(void) |
| 1071 | { |
Paul E. McKenney | ce9b1c6 | 2021-11-11 14:53:43 -0800 | [diff] [blame] | 1072 | rcu_barrier_tasks_generic(&rcu_tasks_rude); |
Paul E. McKenney | c84aad7 | 2020-03-02 21:06:43 -0800 | [diff] [blame] | 1073 | } |
| 1074 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude); |
| 1075 | |
| 1076 | static int __init rcu_spawn_tasks_rude_kthread(void) |
| 1077 | { |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 1078 | cblist_init_generic(&rcu_tasks_rude); |
Paul E. McKenney | 4fe192d | 2020-09-09 22:05:41 -0700 | [diff] [blame] | 1079 | rcu_tasks_rude.gp_sleep = HZ / 10; |
Paul E. McKenney | c84aad7 | 2020-03-02 21:06:43 -0800 | [diff] [blame] | 1080 | rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude); |
| 1081 | return 0; |
| 1082 | } |
Paul E. McKenney | c84aad7 | 2020-03-02 21:06:43 -0800 | [diff] [blame] | 1083 | |
Paul E. McKenney | 27c0f14 | 2020-09-15 17:08:03 -0700 | [diff] [blame] | 1084 | #if !defined(CONFIG_TINY_RCU) |
| 1085 | void show_rcu_tasks_rude_gp_kthread(void) |
Paul E. McKenney | e21408c | 2020-03-16 11:01:55 -0700 | [diff] [blame] | 1086 | { |
| 1087 | show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, ""); |
| 1088 | } |
Paul E. McKenney | 27c0f14 | 2020-09-15 17:08:03 -0700 | [diff] [blame] | 1089 | EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread); |
| 1090 | #endif // !defined(CONFIG_TINY_RCU) |
| 1091 | #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1092 | |
| 1093 | //////////////////////////////////////////////////////////////////////// |
| 1094 | // |
| 1095 | // Tracing variant of Tasks RCU. This variant is designed to be used |
| 1096 | // to protect tracing hooks, including those of BPF. This variant |
| 1097 | // therefore: |
| 1098 | // |
| 1099 | // 1. Has explicit read-side markers to allow finite grace periods |
| 1100 | // in the face of in-kernel loops for PREEMPT=n builds. |
| 1101 | // |
| 1102 | // 2. Protects code in the idle loop, exception entry/exit, and |
| 1103 | // CPU-hotplug code paths, similar to the capabilities of SRCU. |
| 1104 | // |
Paul E. McKenney | c4f113a | 2021-08-05 09:54:45 -0700 | [diff] [blame] | 1105 | // 3. Avoids expensive read-side instructions, having overhead similar |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1106 | // to that of Preemptible RCU. |
| 1107 | // |
| 1108 | // There are of course downsides. The grace-period code can send IPIs to |
| 1109 | // CPUs, even when those CPUs are in the idle loop or in nohz_full userspace. |
| 1110 | // It is necessary to scan the full tasklist, much as for Tasks RCU. There |
| 1111 | // is a single callback queue guarded by a single lock, again, much as for |
| 1112 | // Tasks RCU. If needed, these downsides can be at least partially remedied. |
| 1113 | // |
| 1114 | // Perhaps most important, this variant of RCU does not affect the vanilla |
| 1115 | // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace |
| 1116 | // readers can operate from idle, offline, and exception entry/exit in no |
| 1117 | // way allows rcu_preempt and rcu_sched readers to also do so. |
Paul E. McKenney | a434dd1 | 2021-02-25 10:26:00 -0800 | [diff] [blame] | 1118 | // |
| 1119 | // The implementation uses rcu_tasks_wait_gp(), which relies on function |
| 1120 | // pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread() |
| 1121 | // function sets these function pointers up so that rcu_tasks_wait_gp() |
| 1122 | // invokes these functions in this order: |
| 1123 | // |
| 1124 | // rcu_tasks_trace_pregp_step(): |
| 1125 | // Initialize the count of readers and block CPU-hotplug operations. |
| 1126 | // rcu_tasks_trace_pertask(), invoked on every non-idle task: |
| 1127 | // Initialize per-task state and attempt to identify an immediate |
| 1128 | // quiescent state for that task, or, failing that, attempt to |
| 1129 | // set that task's .need_qs flag so that task's next outermost |
| 1130 | // rcu_read_unlock_trace() will report the quiescent state (in which |
| 1131 | // case the count of readers is incremented). If both attempts fail, |
Paul E. McKenney | 45f4b4a | 2021-05-24 11:26:53 -0700 | [diff] [blame] | 1132 | // the task is added to a "holdout" list. Note that IPIs are used |
| 1133 | // to invoke trc_read_check_handler() in the context of running tasks |
| 1134 | // in order to avoid ordering overhead on common-case shared-variable |
| 1135 | // accessses. |
Paul E. McKenney | a434dd1 | 2021-02-25 10:26:00 -0800 | [diff] [blame] | 1136 | // rcu_tasks_trace_postscan(): |
| 1137 | // Initialize state and attempt to identify an immediate quiescent |
| 1138 | // state as above (but only for idle tasks), unblock CPU-hotplug |
| 1139 | // operations, and wait for an RCU grace period to avoid races with |
| 1140 | // tasks that are in the process of exiting. |
| 1141 | // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty: |
| 1142 | // Scans the holdout list, attempting to identify a quiescent state |
| 1143 | // for each task on the list. If there is a quiescent state, the |
| 1144 | // corresponding task is removed from the holdout list. |
| 1145 | // rcu_tasks_trace_postgp(): |
| 1146 | // Wait for the count of readers do drop to zero, reporting any stalls. |
| 1147 | // Also execute full memory barriers to maintain ordering with code |
| 1148 | // executing after the grace period. |
| 1149 | // |
| 1150 | // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks. |
| 1151 | // |
| 1152 | // Pre-grace-period update-side code is ordered before the grace |
| 1153 | // period via the ->cbs_lock and barriers in rcu_tasks_kthread(). |
| 1154 | // Pre-grace-period read-side code is ordered before the grace period by |
| 1155 | // atomic_dec_and_test() of the count of readers (for IPIed readers) and by |
| 1156 | // scheduler context-switch ordering (for locked-down non-running readers). |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1157 | |
| 1158 | // The lockdep state must be outside of #ifdef to be useful. |
| 1159 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 1160 | static struct lock_class_key rcu_lock_trace_key; |
| 1161 | struct lockdep_map rcu_trace_lock_map = |
| 1162 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key); |
| 1163 | EXPORT_SYMBOL_GPL(rcu_trace_lock_map); |
| 1164 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 1165 | |
| 1166 | #ifdef CONFIG_TASKS_TRACE_RCU |
| 1167 | |
Paul E. McKenney | 30d8aa5 | 2020-06-09 09:24:51 -0700 | [diff] [blame] | 1168 | static atomic_t trc_n_readers_need_end; // Number of waited-for readers. |
| 1169 | static DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks. |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1170 | |
| 1171 | // Record outstanding IPIs to each CPU. No point in sending two... |
| 1172 | static DEFINE_PER_CPU(bool, trc_ipi_to_cpu); |
| 1173 | |
Paul E. McKenney | 4047150 | 2020-03-22 13:34:34 -0700 | [diff] [blame] | 1174 | // The number of detections of task quiescent state relying on |
| 1175 | // heavyweight readers executing explicit memory barriers. |
Paul E. McKenney | 6731da9 | 2020-09-09 14:14:34 -0700 | [diff] [blame] | 1176 | static unsigned long n_heavy_reader_attempts; |
| 1177 | static unsigned long n_heavy_reader_updates; |
| 1178 | static unsigned long n_heavy_reader_ofl_updates; |
Paul E. McKenney | 4047150 | 2020-03-22 13:34:34 -0700 | [diff] [blame] | 1179 | |
Paul E. McKenney | b0afa0f | 2020-03-17 11:39:26 -0700 | [diff] [blame] | 1180 | void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); |
| 1181 | DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace, |
| 1182 | "RCU Tasks Trace"); |
| 1183 | |
Paul E. McKenney | b38f57c | 2020-03-20 14:29:08 -0700 | [diff] [blame] | 1184 | /* |
| 1185 | * This irq_work handler allows rcu_read_unlock_trace() to be invoked |
| 1186 | * while the scheduler locks are held. |
| 1187 | */ |
| 1188 | static void rcu_read_unlock_iw(struct irq_work *iwp) |
| 1189 | { |
| 1190 | wake_up(&trc_wait); |
| 1191 | } |
| 1192 | static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw); |
| 1193 | |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1194 | /* If we are the last reader, wake up the grace-period kthread. */ |
Paul E. McKenney | a5c071c | 2021-07-28 12:28:27 -0700 | [diff] [blame] | 1195 | void rcu_read_unlock_trace_special(struct task_struct *t) |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1196 | { |
Paul E. McKenney | f8ab3fa | 2021-05-24 15:36:37 -0700 | [diff] [blame] | 1197 | int nq = READ_ONCE(t->trc_reader_special.b.need_qs); |
Paul E. McKenney | 276c410 | 2020-03-17 16:02:06 -0700 | [diff] [blame] | 1198 | |
Paul E. McKenney | 9ae58d7 | 2020-03-18 17:16:37 -0700 | [diff] [blame] | 1199 | if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && |
| 1200 | t->trc_reader_special.b.need_mb) |
Paul E. McKenney | 276c410 | 2020-03-17 16:02:06 -0700 | [diff] [blame] | 1201 | smp_mb(); // Pairs with update-side barriers. |
| 1202 | // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers. |
| 1203 | if (nq) |
| 1204 | WRITE_ONCE(t->trc_reader_special.b.need_qs, false); |
Paul E. McKenney | a5c071c | 2021-07-28 12:28:27 -0700 | [diff] [blame] | 1205 | WRITE_ONCE(t->trc_reader_nesting, 0); |
Paul E. McKenney | 276c410 | 2020-03-17 16:02:06 -0700 | [diff] [blame] | 1206 | if (nq && atomic_dec_and_test(&trc_n_readers_need_end)) |
Paul E. McKenney | b38f57c | 2020-03-20 14:29:08 -0700 | [diff] [blame] | 1207 | irq_work_queue(&rcu_tasks_trace_iw); |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1208 | } |
| 1209 | EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special); |
| 1210 | |
| 1211 | /* Add a task to the holdout list, if it is not already on the list. */ |
| 1212 | static void trc_add_holdout(struct task_struct *t, struct list_head *bhp) |
| 1213 | { |
| 1214 | if (list_empty(&t->trc_holdout_list)) { |
| 1215 | get_task_struct(t); |
| 1216 | list_add(&t->trc_holdout_list, bhp); |
| 1217 | } |
| 1218 | } |
| 1219 | |
| 1220 | /* Remove a task from the holdout list, if it is in fact present. */ |
| 1221 | static void trc_del_holdout(struct task_struct *t) |
| 1222 | { |
| 1223 | if (!list_empty(&t->trc_holdout_list)) { |
| 1224 | list_del_init(&t->trc_holdout_list); |
| 1225 | put_task_struct(t); |
| 1226 | } |
| 1227 | } |
| 1228 | |
| 1229 | /* IPI handler to check task state. */ |
| 1230 | static void trc_read_check_handler(void *t_in) |
| 1231 | { |
| 1232 | struct task_struct *t = current; |
| 1233 | struct task_struct *texp = t_in; |
| 1234 | |
| 1235 | // If the task is no longer running on this CPU, leave. |
| 1236 | if (unlikely(texp != t)) { |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1237 | goto reset_ipi; // Already on holdout list, so will check later. |
| 1238 | } |
| 1239 | |
| 1240 | // If the task is not in a read-side critical section, and |
| 1241 | // if this is the last reader, awaken the grace-period kthread. |
Paul E. McKenney | bdb0cca | 2021-05-24 12:48:18 -0700 | [diff] [blame] | 1242 | if (likely(!READ_ONCE(t->trc_reader_nesting))) { |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1243 | WRITE_ONCE(t->trc_reader_checked, true); |
| 1244 | goto reset_ipi; |
| 1245 | } |
Paul E. McKenney | ba3a86e | 2020-09-14 15:44:37 -0700 | [diff] [blame] | 1246 | // If we are racing with an rcu_read_unlock_trace(), try again later. |
Paul E. McKenney | 96017bf | 2021-07-28 10:53:41 -0700 | [diff] [blame] | 1247 | if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) |
Paul E. McKenney | ba3a86e | 2020-09-14 15:44:37 -0700 | [diff] [blame] | 1248 | goto reset_ipi; |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1249 | WRITE_ONCE(t->trc_reader_checked, true); |
| 1250 | |
| 1251 | // Get here if the task is in a read-side critical section. Set |
| 1252 | // its state so that it will awaken the grace-period kthread upon |
| 1253 | // exit from that critical section. |
Paul E. McKenney | 96017bf | 2021-07-28 10:53:41 -0700 | [diff] [blame] | 1254 | atomic_inc(&trc_n_readers_need_end); // One more to wait on. |
Paul E. McKenney | f8ab3fa | 2021-05-24 15:36:37 -0700 | [diff] [blame] | 1255 | WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)); |
Paul E. McKenney | 276c410 | 2020-03-17 16:02:06 -0700 | [diff] [blame] | 1256 | WRITE_ONCE(t->trc_reader_special.b.need_qs, true); |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1257 | |
| 1258 | reset_ipi: |
| 1259 | // Allow future IPIs to be sent on CPU and for task. |
| 1260 | // Also order this IPI handler against any later manipulations of |
| 1261 | // the intended task. |
Liu Song | 8211e92 | 2021-06-30 22:08:02 +0800 | [diff] [blame] | 1262 | smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^ |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1263 | smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^ |
| 1264 | } |
| 1265 | |
| 1266 | /* Callback function for scheduler to check locked-down task. */ |
Peter Zijlstra | 9b3c4ab | 2021-09-21 21:54:32 +0200 | [diff] [blame] | 1267 | static int trc_inspect_reader(struct task_struct *t, void *arg) |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1268 | { |
Paul E. McKenney | 7d0c9c5 | 2020-03-19 15:33:12 -0700 | [diff] [blame] | 1269 | int cpu = task_cpu(t); |
Paul E. McKenney | 18f08e7 | 2021-07-28 11:32:28 -0700 | [diff] [blame] | 1270 | int nesting; |
Paul E. McKenney | 7e3b70e | 2020-03-22 11:24:58 -0700 | [diff] [blame] | 1271 | bool ofl = cpu_is_offline(cpu); |
Paul E. McKenney | 7d0c9c5 | 2020-03-19 15:33:12 -0700 | [diff] [blame] | 1272 | |
| 1273 | if (task_curr(t)) { |
Paul E. McKenney | 30d8aa5 | 2020-06-09 09:24:51 -0700 | [diff] [blame] | 1274 | WARN_ON_ONCE(ofl && !is_idle_task(t)); |
Paul E. McKenney | 7e3b70e | 2020-03-22 11:24:58 -0700 | [diff] [blame] | 1275 | |
Paul E. McKenney | 7d0c9c5 | 2020-03-19 15:33:12 -0700 | [diff] [blame] | 1276 | // If no chance of heavyweight readers, do it the hard way. |
Paul E. McKenney | 7e3b70e | 2020-03-22 11:24:58 -0700 | [diff] [blame] | 1277 | if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) |
Peter Zijlstra | 9b3c4ab | 2021-09-21 21:54:32 +0200 | [diff] [blame] | 1278 | return -EINVAL; |
Paul E. McKenney | 7d0c9c5 | 2020-03-19 15:33:12 -0700 | [diff] [blame] | 1279 | |
| 1280 | // If heavyweight readers are enabled on the remote task, |
| 1281 | // we can inspect its state despite its currently running. |
| 1282 | // However, we cannot safely change its state. |
Paul E. McKenney | 4047150 | 2020-03-22 13:34:34 -0700 | [diff] [blame] | 1283 | n_heavy_reader_attempts++; |
Paul E. McKenney | 7e3b70e | 2020-03-22 11:24:58 -0700 | [diff] [blame] | 1284 | if (!ofl && // Check for "running" idle tasks on offline CPUs. |
| 1285 | !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting)) |
Peter Zijlstra | 9b3c4ab | 2021-09-21 21:54:32 +0200 | [diff] [blame] | 1286 | return -EINVAL; // No quiescent state, do it the hard way. |
Paul E. McKenney | 4047150 | 2020-03-22 13:34:34 -0700 | [diff] [blame] | 1287 | n_heavy_reader_updates++; |
Paul E. McKenney | edf3775 | 2020-03-22 14:09:45 -0700 | [diff] [blame] | 1288 | if (ofl) |
| 1289 | n_heavy_reader_ofl_updates++; |
Paul E. McKenney | 18f08e7 | 2021-07-28 11:32:28 -0700 | [diff] [blame] | 1290 | nesting = 0; |
Paul E. McKenney | 7d0c9c5 | 2020-03-19 15:33:12 -0700 | [diff] [blame] | 1291 | } else { |
Paul E. McKenney | bdb0cca | 2021-05-24 12:48:18 -0700 | [diff] [blame] | 1292 | // The task is not running, so C-language access is safe. |
Paul E. McKenney | 18f08e7 | 2021-07-28 11:32:28 -0700 | [diff] [blame] | 1293 | nesting = t->trc_reader_nesting; |
Paul E. McKenney | 7d0c9c5 | 2020-03-19 15:33:12 -0700 | [diff] [blame] | 1294 | } |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1295 | |
Paul E. McKenney | 18f08e7 | 2021-07-28 11:32:28 -0700 | [diff] [blame] | 1296 | // If not exiting a read-side critical section, mark as checked |
| 1297 | // so that the grace-period kthread will remove it from the |
| 1298 | // holdout list. |
| 1299 | t->trc_reader_checked = nesting >= 0; |
| 1300 | if (nesting <= 0) |
Linus Torvalds | 6fedc28 | 2021-11-01 20:25:38 -0700 | [diff] [blame] | 1301 | return nesting ? -EINVAL : 0; // If in QS, done, otherwise try again later. |
Paul E. McKenney | 7d0c9c5 | 2020-03-19 15:33:12 -0700 | [diff] [blame] | 1302 | |
| 1303 | // The task is in a read-side critical section, so set up its |
| 1304 | // state so that it will awaken the grace-period kthread upon exit |
| 1305 | // from that critical section. |
| 1306 | atomic_inc(&trc_n_readers_need_end); // One more to wait on. |
Paul E. McKenney | f8ab3fa | 2021-05-24 15:36:37 -0700 | [diff] [blame] | 1307 | WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)); |
Paul E. McKenney | 7d0c9c5 | 2020-03-19 15:33:12 -0700 | [diff] [blame] | 1308 | WRITE_ONCE(t->trc_reader_special.b.need_qs, true); |
Peter Zijlstra | 9b3c4ab | 2021-09-21 21:54:32 +0200 | [diff] [blame] | 1309 | return 0; |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1310 | } |
| 1311 | |
| 1312 | /* Attempt to extract the state for the specified task. */ |
| 1313 | static void trc_wait_for_one_reader(struct task_struct *t, |
| 1314 | struct list_head *bhp) |
| 1315 | { |
| 1316 | int cpu; |
| 1317 | |
| 1318 | // If a previous IPI is still in flight, let it complete. |
| 1319 | if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI |
| 1320 | return; |
| 1321 | |
| 1322 | // The current task had better be in a quiescent state. |
| 1323 | if (t == current) { |
| 1324 | t->trc_reader_checked = true; |
Paul E. McKenney | bdb0cca | 2021-05-24 12:48:18 -0700 | [diff] [blame] | 1325 | WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1326 | return; |
| 1327 | } |
| 1328 | |
| 1329 | // Attempt to nail down the task for inspection. |
| 1330 | get_task_struct(t); |
Peter Zijlstra | 9b3c4ab | 2021-09-21 21:54:32 +0200 | [diff] [blame] | 1331 | if (!task_call_func(t, trc_inspect_reader, NULL)) { |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1332 | put_task_struct(t); |
| 1333 | return; |
| 1334 | } |
| 1335 | put_task_struct(t); |
| 1336 | |
Paul E. McKenney | 45f4b4a | 2021-05-24 11:26:53 -0700 | [diff] [blame] | 1337 | // If this task is not yet on the holdout list, then we are in |
| 1338 | // an RCU read-side critical section. Otherwise, the invocation of |
Neeraj Upadhyay | d0a8585 | 2021-08-18 12:58:39 +0530 | [diff] [blame] | 1339 | // trc_add_holdout() that added it to the list did the necessary |
Paul E. McKenney | 45f4b4a | 2021-05-24 11:26:53 -0700 | [diff] [blame] | 1340 | // get_task_struct(). Either way, the task cannot be freed out |
| 1341 | // from under this code. |
| 1342 | |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1343 | // If currently running, send an IPI, either way, add to list. |
| 1344 | trc_add_holdout(t, bhp); |
Paul E. McKenney | 574de87 | 2020-09-09 21:51:09 -0700 | [diff] [blame] | 1345 | if (task_curr(t) && |
| 1346 | time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) { |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1347 | // The task is currently running, so try IPIing it. |
| 1348 | cpu = task_cpu(t); |
| 1349 | |
| 1350 | // If there is already an IPI outstanding, let it happen. |
| 1351 | if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) |
| 1352 | return; |
| 1353 | |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1354 | per_cpu(trc_ipi_to_cpu, cpu) = true; |
| 1355 | t->trc_ipi_to_cpu = cpu; |
Paul E. McKenney | 238dbce | 2020-03-18 10:54:05 -0700 | [diff] [blame] | 1356 | rcu_tasks_trace.n_ipis++; |
Paul E. McKenney | 96017bf | 2021-07-28 10:53:41 -0700 | [diff] [blame] | 1357 | if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) { |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1358 | // Just in case there is some other reason for |
| 1359 | // failure than the target CPU being offline. |
Neeraj Upadhyay | 46aa886 | 2021-08-27 13:43:35 +0530 | [diff] [blame] | 1360 | WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n", |
| 1361 | __func__, cpu); |
Paul E. McKenney | 7e0669c | 2020-03-25 14:36:05 -0700 | [diff] [blame] | 1362 | rcu_tasks_trace.n_ipis_fails++; |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1363 | per_cpu(trc_ipi_to_cpu, cpu) = false; |
Neeraj Upadhyay | 46aa886 | 2021-08-27 13:43:35 +0530 | [diff] [blame] | 1364 | t->trc_ipi_to_cpu = -1; |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1365 | } |
| 1366 | } |
| 1367 | } |
| 1368 | |
| 1369 | /* Initialize for a new RCU-tasks-trace grace period. */ |
| 1370 | static void rcu_tasks_trace_pregp_step(void) |
| 1371 | { |
| 1372 | int cpu; |
| 1373 | |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1374 | // Allow for fast-acting IPIs. |
| 1375 | atomic_set(&trc_n_readers_need_end, 1); |
| 1376 | |
| 1377 | // There shouldn't be any old IPIs, but... |
| 1378 | for_each_possible_cpu(cpu) |
| 1379 | WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu)); |
Paul E. McKenney | 81b4a7b | 2020-03-22 10:10:07 -0700 | [diff] [blame] | 1380 | |
| 1381 | // Disable CPU hotplug across the tasklist scan. |
| 1382 | // This also waits for all readers in CPU-hotplug code paths. |
| 1383 | cpus_read_lock(); |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1384 | } |
| 1385 | |
| 1386 | /* Do first-round processing for the specified task. */ |
| 1387 | static void rcu_tasks_trace_pertask(struct task_struct *t, |
| 1388 | struct list_head *hop) |
| 1389 | { |
Uladzislau Rezki (Sony) | 1b04fa9 | 2020-12-09 21:27:31 +0100 | [diff] [blame] | 1390 | // During early boot when there is only the one boot CPU, there |
| 1391 | // is no idle task for the other CPUs. Just return. |
| 1392 | if (unlikely(t == NULL)) |
| 1393 | return; |
| 1394 | |
Paul E. McKenney | 276c410 | 2020-03-17 16:02:06 -0700 | [diff] [blame] | 1395 | WRITE_ONCE(t->trc_reader_special.b.need_qs, false); |
Paul E. McKenney | 43766c3 | 2020-03-16 20:38:29 -0700 | [diff] [blame] | 1396 | WRITE_ONCE(t->trc_reader_checked, false); |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1397 | t->trc_ipi_to_cpu = -1; |
| 1398 | trc_wait_for_one_reader(t, hop); |
| 1399 | } |
| 1400 | |
Paul E. McKenney | 9796e1a | 2020-03-22 13:18:54 -0700 | [diff] [blame] | 1401 | /* |
| 1402 | * Do intermediate processing between task and holdout scans and |
| 1403 | * pick up the idle tasks. |
| 1404 | */ |
| 1405 | static void rcu_tasks_trace_postscan(struct list_head *hop) |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1406 | { |
Paul E. McKenney | 9796e1a | 2020-03-22 13:18:54 -0700 | [diff] [blame] | 1407 | int cpu; |
| 1408 | |
| 1409 | for_each_possible_cpu(cpu) |
| 1410 | rcu_tasks_trace_pertask(idle_task(cpu), hop); |
| 1411 | |
Paul E. McKenney | 81b4a7b | 2020-03-22 10:10:07 -0700 | [diff] [blame] | 1412 | // Re-enable CPU hotplug now that the tasklist scan has completed. |
| 1413 | cpus_read_unlock(); |
| 1414 | |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1415 | // Wait for late-stage exiting tasks to finish exiting. |
| 1416 | // These might have passed the call to exit_tasks_rcu_finish(). |
| 1417 | synchronize_rcu(); |
| 1418 | // Any tasks that exit after this point will set ->trc_reader_checked. |
| 1419 | } |
| 1420 | |
Neeraj Upadhyay | 65b629e | 2021-11-09 16:52:14 +0530 | [diff] [blame] | 1421 | /* Communicate task state back to the RCU tasks trace stall warning request. */ |
| 1422 | struct trc_stall_chk_rdr { |
| 1423 | int nesting; |
| 1424 | int ipi_to_cpu; |
| 1425 | u8 needqs; |
| 1426 | }; |
| 1427 | |
| 1428 | static int trc_check_slow_task(struct task_struct *t, void *arg) |
| 1429 | { |
| 1430 | struct trc_stall_chk_rdr *trc_rdrp = arg; |
| 1431 | |
| 1432 | if (task_curr(t)) |
| 1433 | return false; // It is running, so decline to inspect it. |
| 1434 | trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting); |
| 1435 | trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu); |
| 1436 | trc_rdrp->needqs = READ_ONCE(t->trc_reader_special.b.need_qs); |
| 1437 | return true; |
| 1438 | } |
| 1439 | |
Paul E. McKenney | 4593e77 | 2020-03-10 12:13:53 -0700 | [diff] [blame] | 1440 | /* Show the state of a task stalling the current RCU tasks trace GP. */ |
| 1441 | static void show_stalled_task_trace(struct task_struct *t, bool *firstreport) |
| 1442 | { |
| 1443 | int cpu; |
Neeraj Upadhyay | 65b629e | 2021-11-09 16:52:14 +0530 | [diff] [blame] | 1444 | struct trc_stall_chk_rdr trc_rdr; |
| 1445 | bool is_idle_tsk = is_idle_task(t); |
Paul E. McKenney | 4593e77 | 2020-03-10 12:13:53 -0700 | [diff] [blame] | 1446 | |
| 1447 | if (*firstreport) { |
| 1448 | pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n"); |
| 1449 | *firstreport = false; |
| 1450 | } |
Paul E. McKenney | 4593e77 | 2020-03-10 12:13:53 -0700 | [diff] [blame] | 1451 | cpu = task_cpu(t); |
Neeraj Upadhyay | 65b629e | 2021-11-09 16:52:14 +0530 | [diff] [blame] | 1452 | if (!task_call_func(t, trc_check_slow_task, &trc_rdr)) |
| 1453 | pr_alert("P%d: %c\n", |
| 1454 | t->pid, |
| 1455 | ".i"[is_idle_tsk]); |
| 1456 | else |
| 1457 | pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n", |
| 1458 | t->pid, |
| 1459 | ".I"[trc_rdr.ipi_to_cpu >= 0], |
| 1460 | ".i"[is_idle_tsk], |
| 1461 | ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)], |
| 1462 | trc_rdr.nesting, |
| 1463 | " N"[!!trc_rdr.needqs], |
| 1464 | cpu); |
Paul E. McKenney | 4593e77 | 2020-03-10 12:13:53 -0700 | [diff] [blame] | 1465 | sched_show_task(t); |
| 1466 | } |
| 1467 | |
| 1468 | /* List stalled IPIs for RCU tasks trace. */ |
| 1469 | static void show_stalled_ipi_trace(void) |
| 1470 | { |
| 1471 | int cpu; |
| 1472 | |
| 1473 | for_each_possible_cpu(cpu) |
| 1474 | if (per_cpu(trc_ipi_to_cpu, cpu)) |
| 1475 | pr_alert("\tIPI outstanding to CPU %d\n", cpu); |
| 1476 | } |
| 1477 | |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1478 | /* Do one scan of the holdout list. */ |
| 1479 | static void check_all_holdout_tasks_trace(struct list_head *hop, |
Paul E. McKenney | 4593e77 | 2020-03-10 12:13:53 -0700 | [diff] [blame] | 1480 | bool needreport, bool *firstreport) |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1481 | { |
| 1482 | struct task_struct *g, *t; |
| 1483 | |
Paul E. McKenney | 81b4a7b | 2020-03-22 10:10:07 -0700 | [diff] [blame] | 1484 | // Disable CPU hotplug across the holdout list scan. |
| 1485 | cpus_read_lock(); |
| 1486 | |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1487 | list_for_each_entry_safe(t, g, hop, trc_holdout_list) { |
| 1488 | // If safe and needed, try to check the current task. |
| 1489 | if (READ_ONCE(t->trc_ipi_to_cpu) == -1 && |
| 1490 | !READ_ONCE(t->trc_reader_checked)) |
| 1491 | trc_wait_for_one_reader(t, hop); |
| 1492 | |
| 1493 | // If check succeeded, remove this task from the list. |
Paul E. McKenney | f5dbc59 | 2021-09-18 20:40:48 -0700 | [diff] [blame] | 1494 | if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 && |
| 1495 | READ_ONCE(t->trc_reader_checked)) |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1496 | trc_del_holdout(t); |
Paul E. McKenney | 4593e77 | 2020-03-10 12:13:53 -0700 | [diff] [blame] | 1497 | else if (needreport) |
| 1498 | show_stalled_task_trace(t, firstreport); |
| 1499 | } |
Paul E. McKenney | 81b4a7b | 2020-03-22 10:10:07 -0700 | [diff] [blame] | 1500 | |
| 1501 | // Re-enable CPU hotplug now that the holdout list scan has completed. |
| 1502 | cpus_read_unlock(); |
| 1503 | |
Paul E. McKenney | 4593e77 | 2020-03-10 12:13:53 -0700 | [diff] [blame] | 1504 | if (needreport) { |
Neeraj Upadhyay | 8940117 | 2021-08-18 12:58:40 +0530 | [diff] [blame] | 1505 | if (*firstreport) |
Paul E. McKenney | 4593e77 | 2020-03-10 12:13:53 -0700 | [diff] [blame] | 1506 | pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n"); |
| 1507 | show_stalled_ipi_trace(); |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1508 | } |
| 1509 | } |
| 1510 | |
Paul E. McKenney | cbe0d8d | 2021-07-30 12:17:59 -0700 | [diff] [blame] | 1511 | static void rcu_tasks_trace_empty_fn(void *unused) |
| 1512 | { |
| 1513 | } |
| 1514 | |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1515 | /* Wait for grace period to complete and provide ordering. */ |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 1516 | static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp) |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1517 | { |
Paul E. McKenney | cbe0d8d | 2021-07-30 12:17:59 -0700 | [diff] [blame] | 1518 | int cpu; |
Paul E. McKenney | 4593e77 | 2020-03-10 12:13:53 -0700 | [diff] [blame] | 1519 | bool firstreport; |
| 1520 | struct task_struct *g, *t; |
| 1521 | LIST_HEAD(holdouts); |
| 1522 | long ret; |
| 1523 | |
Paul E. McKenney | cbe0d8d | 2021-07-30 12:17:59 -0700 | [diff] [blame] | 1524 | // Wait for any lingering IPI handlers to complete. Note that |
| 1525 | // if a CPU has gone offline or transitioned to userspace in the |
| 1526 | // meantime, all IPI handlers should have been drained beforehand. |
| 1527 | // Yes, this assumes that CPUs process IPIs in order. If that ever |
| 1528 | // changes, there will need to be a recheck and/or timed wait. |
| 1529 | for_each_online_cpu(cpu) |
Paul E. McKenney | f5dbc59 | 2021-09-18 20:40:48 -0700 | [diff] [blame] | 1530 | if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))) |
Paul E. McKenney | cbe0d8d | 2021-07-30 12:17:59 -0700 | [diff] [blame] | 1531 | smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1); |
| 1532 | |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1533 | // Remove the safety count. |
| 1534 | smp_mb__before_atomic(); // Order vs. earlier atomics |
| 1535 | atomic_dec(&trc_n_readers_need_end); |
| 1536 | smp_mb__after_atomic(); // Order vs. later atomics |
| 1537 | |
| 1538 | // Wait for readers. |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 1539 | set_tasks_gp_state(rtp, RTGS_WAIT_READERS); |
Paul E. McKenney | 4593e77 | 2020-03-10 12:13:53 -0700 | [diff] [blame] | 1540 | for (;;) { |
| 1541 | ret = wait_event_idle_exclusive_timeout( |
| 1542 | trc_wait, |
| 1543 | atomic_read(&trc_n_readers_need_end) == 0, |
| 1544 | READ_ONCE(rcu_task_stall_timeout)); |
| 1545 | if (ret) |
| 1546 | break; // Count reached zero. |
Paul E. McKenney | af051ca | 2020-03-16 12:13:33 -0700 | [diff] [blame] | 1547 | // Stall warning time, so make a list of the offenders. |
Paul E. McKenney | f747c7e | 2020-09-15 14:27:38 -0700 | [diff] [blame] | 1548 | rcu_read_lock(); |
Paul E. McKenney | 4593e77 | 2020-03-10 12:13:53 -0700 | [diff] [blame] | 1549 | for_each_process_thread(g, t) |
Paul E. McKenney | 276c410 | 2020-03-17 16:02:06 -0700 | [diff] [blame] | 1550 | if (READ_ONCE(t->trc_reader_special.b.need_qs)) |
Paul E. McKenney | 4593e77 | 2020-03-10 12:13:53 -0700 | [diff] [blame] | 1551 | trc_add_holdout(t, &holdouts); |
Paul E. McKenney | f747c7e | 2020-09-15 14:27:38 -0700 | [diff] [blame] | 1552 | rcu_read_unlock(); |
Paul E. McKenney | 4593e77 | 2020-03-10 12:13:53 -0700 | [diff] [blame] | 1553 | firstreport = true; |
Paul E. McKenney | 592031c | 2020-09-15 14:03:34 -0700 | [diff] [blame] | 1554 | list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) { |
| 1555 | if (READ_ONCE(t->trc_reader_special.b.need_qs)) |
Paul E. McKenney | 4593e77 | 2020-03-10 12:13:53 -0700 | [diff] [blame] | 1556 | show_stalled_task_trace(t, &firstreport); |
Paul E. McKenney | 592031c | 2020-09-15 14:03:34 -0700 | [diff] [blame] | 1557 | trc_del_holdout(t); // Release task_struct reference. |
| 1558 | } |
Paul E. McKenney | 4593e77 | 2020-03-10 12:13:53 -0700 | [diff] [blame] | 1559 | if (firstreport) |
| 1560 | pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n"); |
| 1561 | show_stalled_ipi_trace(); |
| 1562 | pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end)); |
| 1563 | } |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1564 | smp_mb(); // Caller's code must be ordered after wakeup. |
Paul E. McKenney | 43766c3 | 2020-03-16 20:38:29 -0700 | [diff] [blame] | 1565 | // Pairs with pretty much every ordering primitive. |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1566 | } |
| 1567 | |
| 1568 | /* Report any needed quiescent state for this exiting task. */ |
Paul E. McKenney | 25246fc | 2020-04-05 20:49:13 -0700 | [diff] [blame] | 1569 | static void exit_tasks_rcu_finish_trace(struct task_struct *t) |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1570 | { |
| 1571 | WRITE_ONCE(t->trc_reader_checked, true); |
Paul E. McKenney | bdb0cca | 2021-05-24 12:48:18 -0700 | [diff] [blame] | 1572 | WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1573 | WRITE_ONCE(t->trc_reader_nesting, 0); |
Paul E. McKenney | 276c410 | 2020-03-17 16:02:06 -0700 | [diff] [blame] | 1574 | if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs))) |
Paul E. McKenney | a5c071c | 2021-07-28 12:28:27 -0700 | [diff] [blame] | 1575 | rcu_read_unlock_trace_special(t); |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1576 | } |
| 1577 | |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1578 | /** |
| 1579 | * call_rcu_tasks_trace() - Queue a callback trace task-based grace period |
| 1580 | * @rhp: structure to be used for queueing the RCU updates. |
| 1581 | * @func: actual callback function to be invoked after the grace period |
| 1582 | * |
Neeraj Upadhyay | ed42c38 | 2021-08-25 12:40:50 +0530 | [diff] [blame] | 1583 | * The callback function will be invoked some time after a trace rcu-tasks |
| 1584 | * grace period elapses, in other words after all currently executing |
| 1585 | * trace rcu-tasks read-side critical sections have completed. These |
| 1586 | * read-side critical sections are delimited by calls to rcu_read_lock_trace() |
| 1587 | * and rcu_read_unlock_trace(). |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1588 | * |
| 1589 | * See the description of call_rcu() for more detailed information on |
| 1590 | * memory ordering guarantees. |
| 1591 | */ |
| 1592 | void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) |
| 1593 | { |
| 1594 | call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace); |
| 1595 | } |
| 1596 | EXPORT_SYMBOL_GPL(call_rcu_tasks_trace); |
| 1597 | |
| 1598 | /** |
| 1599 | * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period |
| 1600 | * |
| 1601 | * Control will return to the caller some time after a trace rcu-tasks |
Paul E. McKenney | c7dcf81 | 2020-06-12 13:11:29 -0700 | [diff] [blame] | 1602 | * grace period has elapsed, in other words after all currently executing |
Neeraj Upadhyay | ed42c38 | 2021-08-25 12:40:50 +0530 | [diff] [blame] | 1603 | * trace rcu-tasks read-side critical sections have elapsed. These read-side |
Paul E. McKenney | c7dcf81 | 2020-06-12 13:11:29 -0700 | [diff] [blame] | 1604 | * critical sections are delimited by calls to rcu_read_lock_trace() |
| 1605 | * and rcu_read_unlock_trace(). |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1606 | * |
| 1607 | * This is a very specialized primitive, intended only for a few uses in |
| 1608 | * tracing and other situations requiring manipulation of function preambles |
| 1609 | * and profiling hooks. The synchronize_rcu_tasks_trace() function is not |
| 1610 | * (yet) intended for heavy use from multiple CPUs. |
| 1611 | * |
| 1612 | * See the description of synchronize_rcu() for more detailed information |
| 1613 | * on memory ordering guarantees. |
| 1614 | */ |
| 1615 | void synchronize_rcu_tasks_trace(void) |
| 1616 | { |
| 1617 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section"); |
| 1618 | synchronize_rcu_tasks_generic(&rcu_tasks_trace); |
| 1619 | } |
| 1620 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace); |
| 1621 | |
| 1622 | /** |
| 1623 | * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks. |
| 1624 | * |
| 1625 | * Although the current implementation is guaranteed to wait, it is not |
| 1626 | * obligated to, for example, if there are no pending callbacks. |
| 1627 | */ |
| 1628 | void rcu_barrier_tasks_trace(void) |
| 1629 | { |
Paul E. McKenney | ce9b1c6 | 2021-11-11 14:53:43 -0800 | [diff] [blame] | 1630 | rcu_barrier_tasks_generic(&rcu_tasks_trace); |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1631 | } |
| 1632 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace); |
| 1633 | |
| 1634 | static int __init rcu_spawn_tasks_trace_kthread(void) |
| 1635 | { |
Paul E. McKenney | cafafd6 | 2021-11-05 21:52:00 -0700 | [diff] [blame] | 1636 | cblist_init_generic(&rcu_tasks_trace); |
Paul E. McKenney | 2393a61 | 2020-09-09 21:36:34 -0700 | [diff] [blame] | 1637 | if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) { |
Paul E. McKenney | 4fe192d | 2020-09-09 22:05:41 -0700 | [diff] [blame] | 1638 | rcu_tasks_trace.gp_sleep = HZ / 10; |
Paul E. McKenney | 75dc2da | 2020-09-17 16:17:17 -0700 | [diff] [blame] | 1639 | rcu_tasks_trace.init_fract = HZ / 10; |
Paul E. McKenney | 2393a61 | 2020-09-09 21:36:34 -0700 | [diff] [blame] | 1640 | } else { |
Paul E. McKenney | 4fe192d | 2020-09-09 22:05:41 -0700 | [diff] [blame] | 1641 | rcu_tasks_trace.gp_sleep = HZ / 200; |
| 1642 | if (rcu_tasks_trace.gp_sleep <= 0) |
| 1643 | rcu_tasks_trace.gp_sleep = 1; |
Paul E. McKenney | 75dc2da | 2020-09-17 16:17:17 -0700 | [diff] [blame] | 1644 | rcu_tasks_trace.init_fract = HZ / 200; |
Paul E. McKenney | 2393a61 | 2020-09-09 21:36:34 -0700 | [diff] [blame] | 1645 | if (rcu_tasks_trace.init_fract <= 0) |
| 1646 | rcu_tasks_trace.init_fract = 1; |
| 1647 | } |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1648 | rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step; |
| 1649 | rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask; |
| 1650 | rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan; |
| 1651 | rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace; |
| 1652 | rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp; |
| 1653 | rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace); |
| 1654 | return 0; |
| 1655 | } |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1656 | |
Paul E. McKenney | 27c0f14 | 2020-09-15 17:08:03 -0700 | [diff] [blame] | 1657 | #if !defined(CONFIG_TINY_RCU) |
| 1658 | void show_rcu_tasks_trace_gp_kthread(void) |
Paul E. McKenney | e21408c | 2020-03-16 11:01:55 -0700 | [diff] [blame] | 1659 | { |
Paul E. McKenney | 4047150 | 2020-03-22 13:34:34 -0700 | [diff] [blame] | 1660 | char buf[64]; |
Paul E. McKenney | e21408c | 2020-03-16 11:01:55 -0700 | [diff] [blame] | 1661 | |
Paul E. McKenney | edf3775 | 2020-03-22 14:09:45 -0700 | [diff] [blame] | 1662 | sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end), |
| 1663 | data_race(n_heavy_reader_ofl_updates), |
Paul E. McKenney | 4047150 | 2020-03-22 13:34:34 -0700 | [diff] [blame] | 1664 | data_race(n_heavy_reader_updates), |
| 1665 | data_race(n_heavy_reader_attempts)); |
Paul E. McKenney | e21408c | 2020-03-16 11:01:55 -0700 | [diff] [blame] | 1666 | show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf); |
| 1667 | } |
Paul E. McKenney | 27c0f14 | 2020-09-15 17:08:03 -0700 | [diff] [blame] | 1668 | EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread); |
| 1669 | #endif // !defined(CONFIG_TINY_RCU) |
Paul E. McKenney | e21408c | 2020-03-16 11:01:55 -0700 | [diff] [blame] | 1670 | |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1671 | #else /* #ifdef CONFIG_TASKS_TRACE_RCU */ |
Paul E. McKenney | 25246fc | 2020-04-05 20:49:13 -0700 | [diff] [blame] | 1672 | static void exit_tasks_rcu_finish_trace(struct task_struct *t) { } |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1673 | #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */ |
Paul E. McKenney | 8fd8ca3 | 2020-03-15 14:51:20 -0700 | [diff] [blame] | 1674 | |
Paul E. McKenney | 8344496 | 2020-05-28 20:03:48 -0700 | [diff] [blame] | 1675 | #ifndef CONFIG_TINY_RCU |
Paul E. McKenney | e21408c | 2020-03-16 11:01:55 -0700 | [diff] [blame] | 1676 | void show_rcu_tasks_gp_kthreads(void) |
| 1677 | { |
| 1678 | show_rcu_tasks_classic_gp_kthread(); |
| 1679 | show_rcu_tasks_rude_gp_kthread(); |
| 1680 | show_rcu_tasks_trace_gp_kthread(); |
| 1681 | } |
Paul E. McKenney | 8344496 | 2020-05-28 20:03:48 -0700 | [diff] [blame] | 1682 | #endif /* #ifndef CONFIG_TINY_RCU */ |
Paul E. McKenney | e21408c | 2020-03-16 11:01:55 -0700 | [diff] [blame] | 1683 | |
Uladzislau Rezki (Sony) | bfba7ed | 2020-12-09 21:27:32 +0100 | [diff] [blame] | 1684 | #ifdef CONFIG_PROVE_RCU |
| 1685 | struct rcu_tasks_test_desc { |
| 1686 | struct rcu_head rh; |
| 1687 | const char *name; |
| 1688 | bool notrun; |
| 1689 | }; |
| 1690 | |
| 1691 | static struct rcu_tasks_test_desc tests[] = { |
| 1692 | { |
| 1693 | .name = "call_rcu_tasks()", |
| 1694 | /* If not defined, the test is skipped. */ |
| 1695 | .notrun = !IS_ENABLED(CONFIG_TASKS_RCU), |
| 1696 | }, |
| 1697 | { |
| 1698 | .name = "call_rcu_tasks_rude()", |
| 1699 | /* If not defined, the test is skipped. */ |
| 1700 | .notrun = !IS_ENABLED(CONFIG_TASKS_RUDE_RCU), |
| 1701 | }, |
| 1702 | { |
| 1703 | .name = "call_rcu_tasks_trace()", |
| 1704 | /* If not defined, the test is skipped. */ |
| 1705 | .notrun = !IS_ENABLED(CONFIG_TASKS_TRACE_RCU) |
| 1706 | } |
| 1707 | }; |
| 1708 | |
| 1709 | static void test_rcu_tasks_callback(struct rcu_head *rhp) |
| 1710 | { |
| 1711 | struct rcu_tasks_test_desc *rttd = |
| 1712 | container_of(rhp, struct rcu_tasks_test_desc, rh); |
| 1713 | |
| 1714 | pr_info("Callback from %s invoked.\n", rttd->name); |
| 1715 | |
| 1716 | rttd->notrun = true; |
| 1717 | } |
| 1718 | |
| 1719 | static void rcu_tasks_initiate_self_tests(void) |
| 1720 | { |
| 1721 | pr_info("Running RCU-tasks wait API self tests\n"); |
| 1722 | #ifdef CONFIG_TASKS_RCU |
| 1723 | synchronize_rcu_tasks(); |
| 1724 | call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback); |
| 1725 | #endif |
| 1726 | |
| 1727 | #ifdef CONFIG_TASKS_RUDE_RCU |
| 1728 | synchronize_rcu_tasks_rude(); |
| 1729 | call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback); |
| 1730 | #endif |
| 1731 | |
| 1732 | #ifdef CONFIG_TASKS_TRACE_RCU |
| 1733 | synchronize_rcu_tasks_trace(); |
| 1734 | call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback); |
| 1735 | #endif |
| 1736 | } |
| 1737 | |
| 1738 | static int rcu_tasks_verify_self_tests(void) |
| 1739 | { |
| 1740 | int ret = 0; |
| 1741 | int i; |
| 1742 | |
| 1743 | for (i = 0; i < ARRAY_SIZE(tests); i++) { |
| 1744 | if (!tests[i].notrun) { // still hanging. |
| 1745 | pr_err("%s has been failed.\n", tests[i].name); |
| 1746 | ret = -1; |
| 1747 | } |
| 1748 | } |
| 1749 | |
| 1750 | if (ret) |
| 1751 | WARN_ON(1); |
| 1752 | |
| 1753 | return ret; |
| 1754 | } |
| 1755 | late_initcall(rcu_tasks_verify_self_tests); |
| 1756 | #else /* #ifdef CONFIG_PROVE_RCU */ |
| 1757 | static void rcu_tasks_initiate_self_tests(void) { } |
| 1758 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ |
| 1759 | |
Uladzislau Rezki (Sony) | 1b04fa9 | 2020-12-09 21:27:31 +0100 | [diff] [blame] | 1760 | void __init rcu_init_tasks_generic(void) |
| 1761 | { |
| 1762 | #ifdef CONFIG_TASKS_RCU |
| 1763 | rcu_spawn_tasks_kthread(); |
| 1764 | #endif |
| 1765 | |
| 1766 | #ifdef CONFIG_TASKS_RUDE_RCU |
| 1767 | rcu_spawn_tasks_rude_kthread(); |
| 1768 | #endif |
| 1769 | |
| 1770 | #ifdef CONFIG_TASKS_TRACE_RCU |
| 1771 | rcu_spawn_tasks_trace_kthread(); |
| 1772 | #endif |
Uladzislau Rezki (Sony) | bfba7ed | 2020-12-09 21:27:32 +0100 | [diff] [blame] | 1773 | |
| 1774 | // Run the self-tests. |
| 1775 | rcu_tasks_initiate_self_tests(); |
Uladzislau Rezki (Sony) | 1b04fa9 | 2020-12-09 21:27:31 +0100 | [diff] [blame] | 1776 | } |
| 1777 | |
Paul E. McKenney | 8fd8ca3 | 2020-03-15 14:51:20 -0700 | [diff] [blame] | 1778 | #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ |
| 1779 | static inline void rcu_tasks_bootup_oddness(void) {} |
| 1780 | #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ |