blob: c8a6913c067d9307c0f208196c047a2bfc9197bb [file] [log] [blame]
Thomas Gleixner767a67b2019-06-01 10:08:44 +02001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/kernel/softirq.c
4 *
5 * Copyright (C) 1992 Linus Torvalds
6 *
Pavel Machekb10db7f2008-01-30 13:30:00 +01007 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
Joe Perches40322762014-01-27 17:07:15 -080010#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
Paul Gortmaker9984de12011-05-23 14:51:41 -040012#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/kernel_stat.h>
14#include <linux/interrupt.h>
15#include <linux/init.h>
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +010016#include <linux/local_lock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070021#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/kthread.h>
23#include <linux/rcupdate.h>
Steven Rostedt7e49fcc2009-01-22 19:01:40 -050024#include <linux/ftrace.h>
Andrew Morton78eef012006-03-22 00:08:16 -080025#include <linux/smp.h>
Thomas Gleixner3e339b52012-07-16 10:42:37 +000026#include <linux/smpboot.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080027#include <linux/tick.h>
Thomas Gleixnerd5326762014-03-19 11:19:52 +010028#include <linux/irq.h>
Peter Zijlstrada0447472021-03-09 09:42:08 +010029#include <linux/wait_bit.h>
Heiko Carstensa0e39ed32009-04-29 13:51:39 +020030
Thomas Gleixnerdb1cc7a2021-02-10 00:40:53 +010031#include <asm/softirq_stack.h>
32
Heiko Carstensa0e39ed32009-04-29 13:51:39 +020033#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040034#include <trace/events/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Linus Torvalds1da177e2005-04-16 15:20:36 -070036/*
37 - No shared variables, all the data are CPU local.
38 - If a softirq needs serialization, let it serialize itself
39 by its own spinlocks.
40 - Even if softirq is serialized, only local cpu is marked for
41 execution. Hence, we get something sort of weak cpu binding.
42 Though it is still not clear, will it result in better locality
43 or will not.
44
45 Examples:
46 - NET RX softirq. It is multithreaded and does not require
47 any global serialization.
48 - NET TX softirq. It kicks software netdevice queues, hence
49 it is logically serialized per device, but this serialization
50 is invisible to common code.
51 - Tasklets: serialized wrt itself.
52 */
53
54#ifndef __ARCH_IRQ_STAT
Frederic Weisbecker0f6f47b2018-05-08 15:38:19 +020055DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
56EXPORT_PER_CPU_SYMBOL(irq_stat);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#endif
58
Alexey Dobriyan978b0112008-09-06 20:04:36 +020059static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Venkatesh Pallipadi4dd53d82010-12-21 17:09:00 -080061DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Joe Perchesce85b4f2014-01-27 17:07:16 -080063const char * const softirq_to_name[NR_SOFTIRQS] = {
Sagi Grimbergf660f602016-10-10 15:10:51 +030064 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
Shaohua Li09223372011-06-14 13:26:25 +080065 "TASKLET", "SCHED", "HRTIMER", "RCU"
Jason Baron5d592b42009-03-12 14:33:36 -040066};
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/*
69 * we cannot loop indefinitely here to avoid userspace starvation,
70 * but we also don't want to introduce a worst case 1/HZ latency
71 * to the pending events, so lets the scheduler to balance
72 * the softirq load for us.
73 */
Thomas Gleixner676cb022009-07-20 23:33:49 +020074static void wakeup_softirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070075{
76 /* Interrupts are disabled: no need to stop preemption */
Christoph Lameter909ea962010-12-08 16:22:55 +010077 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
Peter Zijlstra37aadc682021-06-11 10:28:11 +020079 if (tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 wake_up_process(tsk);
81}
82
83/*
Eric Dumazet4cd13c22016-08-31 10:42:29 -070084 * If ksoftirqd is scheduled, we do not want to process pending softirqs
Linus Torvalds3c537762018-01-08 11:51:04 -080085 * right now. Let ksoftirqd handle this at its own rate, to get fairness,
86 * unless we're doing some of the synchronous softirqs.
Eric Dumazet4cd13c22016-08-31 10:42:29 -070087 */
Linus Torvalds3c537762018-01-08 11:51:04 -080088#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
89static bool ksoftirqd_running(unsigned long pending)
Eric Dumazet4cd13c22016-08-31 10:42:29 -070090{
91 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
92
Linus Torvalds3c537762018-01-08 11:51:04 -080093 if (pending & SOFTIRQ_NOW_MASK)
94 return false;
Peter Zijlstrab03fbd42021-06-11 10:28:12 +020095 return tsk && task_is_running(tsk) && !__kthread_should_park(tsk);
Eric Dumazet4cd13c22016-08-31 10:42:29 -070096}
97
Thomas Gleixnerae9ef582020-11-13 15:02:18 +010098#ifdef CONFIG_TRACE_IRQFLAGS
99DEFINE_PER_CPU(int, hardirqs_enabled);
100DEFINE_PER_CPU(int, hardirq_context);
101EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
102EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
103#endif
104
Eric Dumazet4cd13c22016-08-31 10:42:29 -0700105/*
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +0100106 * SOFTIRQ_OFFSET usage:
107 *
108 * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
109 * to a per CPU counter and to task::softirqs_disabled_cnt.
110 *
111 * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
112 * processing.
113 *
114 * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700115 * on local_bh_disable or local_bh_enable.
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +0100116 *
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700117 * This lets us distinguish between whether we are currently processing
118 * softirq and whether we just have bh disabled.
119 */
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +0100120#ifdef CONFIG_PREEMPT_RT
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700121
Thomas Gleixnerae9ef582020-11-13 15:02:18 +0100122/*
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +0100123 * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
124 * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
125 * softirq disabled section to be preempted.
126 *
127 * The per task counter is used for softirq_count(), in_softirq() and
128 * in_serving_softirqs() because these counts are only valid when the task
129 * holding softirq_ctrl::lock is running.
130 *
131 * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
132 * the task which is in a softirq disabled section is preempted or blocks.
133 */
134struct softirq_ctrl {
135 local_lock_t lock;
136 int cnt;
137};
138
139static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
140 .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
141};
142
Thomas Gleixner47c218d2021-03-09 09:55:57 +0100143/**
144 * local_bh_blocked() - Check for idle whether BH processing is blocked
145 *
146 * Returns false if the per CPU softirq::cnt is 0 otherwise true.
147 *
148 * This is invoked from the idle task to guard against false positive
149 * softirq pending warnings, which would happen when the task which holds
150 * softirq_ctrl::lock was the only running task on the CPU and blocks on
151 * some other lock.
152 */
153bool local_bh_blocked(void)
154{
155 return __this_cpu_read(softirq_ctrl.cnt) != 0;
156}
157
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +0100158void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
159{
160 unsigned long flags;
161 int newcnt;
162
163 WARN_ON_ONCE(in_hardirq());
164
165 /* First entry of a task into a BH disabled section? */
166 if (!current->softirq_disable_cnt) {
167 if (preemptible()) {
168 local_lock(&softirq_ctrl.lock);
169 /* Required to meet the RCU bottomhalf requirements. */
170 rcu_read_lock();
171 } else {
172 DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
173 }
174 }
175
176 /*
177 * Track the per CPU softirq disabled state. On RT this is per CPU
178 * state to allow preemption of bottom half disabled sections.
179 */
180 newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
181 /*
182 * Reflect the result in the task state to prevent recursion on the
183 * local lock and to make softirq_count() & al work.
184 */
185 current->softirq_disable_cnt = newcnt;
186
187 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
188 raw_local_irq_save(flags);
189 lockdep_softirqs_off(ip);
190 raw_local_irq_restore(flags);
191 }
192}
193EXPORT_SYMBOL(__local_bh_disable_ip);
194
195static void __local_bh_enable(unsigned int cnt, bool unlock)
196{
197 unsigned long flags;
198 int newcnt;
199
200 DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
201 this_cpu_read(softirq_ctrl.cnt));
202
203 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
204 raw_local_irq_save(flags);
205 lockdep_softirqs_on(_RET_IP_);
206 raw_local_irq_restore(flags);
207 }
208
209 newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
210 current->softirq_disable_cnt = newcnt;
211
212 if (!newcnt && unlock) {
213 rcu_read_unlock();
214 local_unlock(&softirq_ctrl.lock);
215 }
216}
217
218void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
219{
220 bool preempt_on = preemptible();
221 unsigned long flags;
222 u32 pending;
223 int curcnt;
224
Changbin Dufe138892022-01-28 19:07:27 +0800225 WARN_ON_ONCE(in_hardirq());
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +0100226 lockdep_assert_irqs_enabled();
227
228 local_irq_save(flags);
229 curcnt = __this_cpu_read(softirq_ctrl.cnt);
230
231 /*
232 * If this is not reenabling soft interrupts, no point in trying to
233 * run pending ones.
234 */
235 if (curcnt != cnt)
236 goto out;
237
238 pending = local_softirq_pending();
239 if (!pending || ksoftirqd_running(pending))
240 goto out;
241
242 /*
243 * If this was called from non preemptible context, wake up the
244 * softirq daemon.
245 */
246 if (!preempt_on) {
247 wakeup_softirqd();
248 goto out;
249 }
250
251 /*
252 * Adjust softirq count to SOFTIRQ_OFFSET which makes
253 * in_serving_softirq() become true.
254 */
255 cnt = SOFTIRQ_OFFSET;
256 __local_bh_enable(cnt, false);
257 __do_softirq();
258
259out:
260 __local_bh_enable(cnt, preempt_on);
261 local_irq_restore(flags);
262}
263EXPORT_SYMBOL(__local_bh_enable_ip);
264
265/*
266 * Invoked from ksoftirqd_run() outside of the interrupt disabled section
267 * to acquire the per CPU local lock for reentrancy protection.
268 */
269static inline void ksoftirqd_run_begin(void)
270{
271 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
272 local_irq_disable();
273}
274
275/* Counterpart to ksoftirqd_run_begin() */
276static inline void ksoftirqd_run_end(void)
277{
278 __local_bh_enable(SOFTIRQ_OFFSET, true);
279 WARN_ON_ONCE(in_interrupt());
280 local_irq_enable();
281}
282
283static inline void softirq_handle_begin(void) { }
284static inline void softirq_handle_end(void) { }
285
286static inline bool should_wake_ksoftirqd(void)
287{
288 return !this_cpu_read(softirq_ctrl.cnt);
289}
290
291static inline void invoke_softirq(void)
292{
293 if (should_wake_ksoftirqd())
294 wakeup_softirqd();
295}
296
Sebastian Andrzej Siewior1a90bfd2022-04-13 15:31:05 +0200297/*
298 * flush_smp_call_function_queue() can raise a soft interrupt in a function
299 * call. On RT kernels this is undesired and the only known functionality
300 * in the block layer which does this is disabled on RT. If soft interrupts
301 * get raised which haven't been raised before the flush, warn so it can be
302 * investigated.
303 */
304void do_softirq_post_smp_call_flush(unsigned int was_pending)
305{
306 if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
307 invoke_softirq();
308}
309
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +0100310#else /* CONFIG_PREEMPT_RT */
311
312/*
313 * This one is for softirq.c-internal use, where hardirqs are disabled
Thomas Gleixnerae9ef582020-11-13 15:02:18 +0100314 * legitimately:
315 */
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +0100316#ifdef CONFIG_TRACE_IRQFLAGS
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100317void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700318{
319 unsigned long flags;
320
Changbin Dufe138892022-01-28 19:07:27 +0800321 WARN_ON_ONCE(in_hardirq());
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700322
323 raw_local_irq_save(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500324 /*
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200325 * The preempt tracer hooks into preempt_count_add and will break
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500326 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
327 * is set and before current->softirq_enabled is cleared.
328 * We must manually increment preempt_count here and manually
329 * call the trace_preempt_off later.
330 */
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200331 __preempt_count_add(cnt);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700332 /*
333 * Were softirqs turned off above:
334 */
Peter Zijlstra9ea4c382013-11-19 16:13:38 +0100335 if (softirq_count() == (cnt & SOFTIRQ_MASK))
Peter Zijlstra0d384532020-03-20 12:56:41 +0100336 lockdep_softirqs_off(ip);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700337 raw_local_irq_restore(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500338
Heiko Carstens0f1ba9a2015-01-07 10:04:41 +0100339 if (preempt_count() == cnt) {
340#ifdef CONFIG_DEBUG_PREEMPT
Sebastian Andrzej Siewiorf904f582016-02-26 14:54:56 +0100341 current->preempt_disable_ip = get_lock_parent_ip();
Heiko Carstens0f1ba9a2015-01-07 10:04:41 +0100342#endif
Sebastian Andrzej Siewiorf904f582016-02-26 14:54:56 +0100343 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
Heiko Carstens0f1ba9a2015-01-07 10:04:41 +0100344 }
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700345}
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100346EXPORT_SYMBOL(__local_bh_disable_ip);
Tim Chen3c829c32006-07-30 03:04:02 -0700347#endif /* CONFIG_TRACE_IRQFLAGS */
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700348
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700349static void __local_bh_enable(unsigned int cnt)
350{
Frederic Weisbeckerf71b74b2017-11-06 16:01:18 +0100351 lockdep_assert_irqs_disabled();
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700352
Joel Fernandes (Google)1a63dcd2018-06-07 13:11:43 -0700353 if (preempt_count() == cnt)
354 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
355
Peter Zijlstra9ea4c382013-11-19 16:13:38 +0100356 if (softirq_count() == (cnt & SOFTIRQ_MASK))
Peter Zijlstra0d384532020-03-20 12:56:41 +0100357 lockdep_softirqs_on(_RET_IP_);
Joel Fernandes (Google)1a63dcd2018-06-07 13:11:43 -0700358
359 __preempt_count_sub(cnt);
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700360}
361
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700362/*
Paul E. McKenneyc3442692018-03-05 11:29:40 -0800363 * Special-case - softirqs can safely be enabled by __do_softirq(),
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700364 * without processing still-pending softirqs:
365 */
366void _local_bh_enable(void)
367{
Changbin Dufe138892022-01-28 19:07:27 +0800368 WARN_ON_ONCE(in_hardirq());
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700369 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700370}
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700371EXPORT_SYMBOL(_local_bh_enable);
372
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100373void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700374{
Changbin Dufe138892022-01-28 19:07:27 +0800375 WARN_ON_ONCE(in_hardirq());
Frederic Weisbeckerf71b74b2017-11-06 16:01:18 +0100376 lockdep_assert_irqs_enabled();
Tim Chen3c829c32006-07-30 03:04:02 -0700377#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200378 local_irq_disable();
Tim Chen3c829c32006-07-30 03:04:02 -0700379#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700380 /*
381 * Are softirqs going to be turned on now:
382 */
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700383 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
Peter Zijlstra0d384532020-03-20 12:56:41 +0100384 lockdep_softirqs_on(ip);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700385 /*
386 * Keep preemption disabled until we are done with
387 * softirq processing:
Joe Perchesce85b4f2014-01-27 17:07:16 -0800388 */
Peter Zijlstra91ea62d2020-12-18 16:39:14 +0100389 __preempt_count_sub(cnt - 1);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700390
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200391 if (unlikely(!in_interrupt() && local_softirq_pending())) {
392 /*
393 * Run softirq if any pending. And do it in its own stack
394 * as we may be calling this deep in a task call stack already.
395 */
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700396 do_softirq();
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200397 }
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700398
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200399 preempt_count_dec();
Tim Chen3c829c32006-07-30 03:04:02 -0700400#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200401 local_irq_enable();
Tim Chen3c829c32006-07-30 03:04:02 -0700402#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700403 preempt_check_resched();
404}
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100405EXPORT_SYMBOL(__local_bh_enable_ip);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700406
Thomas Gleixnerf02fc962021-03-09 09:55:55 +0100407static inline void softirq_handle_begin(void)
408{
409 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
410}
411
412static inline void softirq_handle_end(void)
413{
414 __local_bh_enable(SOFTIRQ_OFFSET);
415 WARN_ON_ONCE(in_interrupt());
416}
417
418static inline void ksoftirqd_run_begin(void)
419{
420 local_irq_disable();
421}
422
423static inline void ksoftirqd_run_end(void)
424{
425 local_irq_enable();
426}
427
428static inline bool should_wake_ksoftirqd(void)
429{
430 return true;
431}
432
Thomas Gleixnerae9ef582020-11-13 15:02:18 +0100433static inline void invoke_softirq(void)
434{
435 if (ksoftirqd_running(local_softirq_pending()))
436 return;
437
Tanner Love91cc4702021-06-02 14:03:38 -0400438 if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
Thomas Gleixnerae9ef582020-11-13 15:02:18 +0100439#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
440 /*
441 * We can safely execute softirq on the current stack if
442 * it is the irq stack, because it should be near empty
443 * at this stage.
444 */
445 __do_softirq();
446#else
447 /*
448 * Otherwise, irq_exit() is called on the task stack that can
449 * be potentially deep already. So call softirq in its own stack
450 * to prevent from any overrun.
451 */
452 do_softirq_own_stack();
453#endif
454 } else {
455 wakeup_softirqd();
456 }
457}
458
459asmlinkage __visible void do_softirq(void)
460{
461 __u32 pending;
462 unsigned long flags;
463
464 if (in_interrupt())
465 return;
466
467 local_irq_save(flags);
468
469 pending = local_softirq_pending();
470
471 if (pending && !ksoftirqd_running(pending))
472 do_softirq_own_stack();
473
474 local_irq_restore(flags);
475}
476
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +0100477#endif /* !CONFIG_PREEMPT_RT */
478
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700479/*
Ben Greear34376a52013-06-06 14:29:49 -0700480 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
481 * but break the loop if need_resched() is set or after 2 ms.
482 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
483 * certain cases, such as stop_machine(), jiffies may cease to
484 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
485 * well to make sure we eventually return from this method.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 *
Eric Dumazetc10d73672013-01-10 15:26:34 -0800487 * These limits have been established via experimentation.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 * The two things to balance is latency against fairness -
489 * we want to handle softirqs as soon as possible, but they
490 * should not be able to lock up the box.
491 */
Eric Dumazetc10d73672013-01-10 15:26:34 -0800492#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
Ben Greear34376a52013-06-06 14:29:49 -0700493#define MAX_SOFTIRQ_RESTART 10
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100495#ifdef CONFIG_TRACE_IRQFLAGS
496/*
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100497 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
498 * to keep the lockdep irq context tracking as tight as possible in order to
499 * not miss-qualify lock contexts and miss possible deadlocks.
500 */
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100501
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100502static inline bool lockdep_softirq_start(void)
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100503{
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100504 bool in_hardirq = false;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100505
Peter Zijlstraf9ad4a52020-05-27 13:03:26 +0200506 if (lockdep_hardirq_context()) {
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100507 in_hardirq = true;
Thomas Gleixner2502ec32020-03-20 12:56:40 +0100508 lockdep_hardirq_exit();
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100509 }
510
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100511 lockdep_softirq_enter();
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100512
513 return in_hardirq;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100514}
515
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100516static inline void lockdep_softirq_end(bool in_hardirq)
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100517{
518 lockdep_softirq_exit();
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100519
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100520 if (in_hardirq)
Thomas Gleixner2502ec32020-03-20 12:56:40 +0100521 lockdep_hardirq_enter();
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100522}
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100523#else
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100524static inline bool lockdep_softirq_start(void) { return false; }
525static inline void lockdep_softirq_end(bool in_hardirq) { }
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100526#endif
527
Alexander Potapenkobe7635e2016-03-25 14:22:05 -0700528asmlinkage __visible void __softirq_entry __do_softirq(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529{
Eric Dumazetc10d73672013-01-10 15:26:34 -0800530 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
Mel Gorman907aed42012-07-31 16:44:07 -0700531 unsigned long old_flags = current->flags;
Ben Greear34376a52013-06-06 14:29:49 -0700532 int max_restart = MAX_SOFTIRQ_RESTART;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100533 struct softirq_action *h;
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100534 bool in_hardirq;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100535 __u32 pending;
Joe Perches2e702b92014-01-27 17:07:14 -0800536 int softirq_bit;
Mel Gorman907aed42012-07-31 16:44:07 -0700537
538 /*
Yangtao Lie45506a2018-10-18 10:21:33 -0400539 * Mask out PF_MEMALLOC as the current task context is borrowed for the
540 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
541 * again if the socket is related to swapping.
Mel Gorman907aed42012-07-31 16:44:07 -0700542 */
543 current->flags &= ~PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
545 pending = local_softirq_pending();
Paul Mackerras829035fd2006-07-03 00:25:40 -0700546
Thomas Gleixnerf02fc962021-03-09 09:55:55 +0100547 softirq_handle_begin();
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100548 in_hardirq = lockdep_softirq_start();
Frederic Weisbeckerd3759e72020-12-02 12:57:31 +0100549 account_softirq_enter(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551restart:
552 /* Reset the pending bitmask before enabling irqs */
Andi Kleen3f744782005-09-12 18:49:24 +0200553 set_softirq_pending(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700555 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556
557 h = softirq_vec;
558
Joe Perches2e702b92014-01-27 17:07:14 -0800559 while ((softirq_bit = ffs(pending))) {
560 unsigned int vec_nr;
561 int prev_count;
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200562
Joe Perches2e702b92014-01-27 17:07:14 -0800563 h += softirq_bit - 1;
Thomas Gleixnerf4bc6bb2010-10-19 15:00:13 +0200564
Joe Perches2e702b92014-01-27 17:07:14 -0800565 vec_nr = h - softirq_vec;
566 prev_count = preempt_count();
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200567
Joe Perches2e702b92014-01-27 17:07:14 -0800568 kstat_incr_softirqs_this_cpu(vec_nr);
569
570 trace_softirq_entry(vec_nr);
571 h->action(h);
572 trace_softirq_exit(vec_nr);
573 if (unlikely(prev_count != preempt_count())) {
Joe Perches40322762014-01-27 17:07:15 -0800574 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
Joe Perches2e702b92014-01-27 17:07:14 -0800575 vec_nr, softirq_to_name[vec_nr], h->action,
576 prev_count, preempt_count());
577 preempt_count_set(prev_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 }
579 h++;
Joe Perches2e702b92014-01-27 17:07:14 -0800580 pending >>= softirq_bit;
581 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +0100583 if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
584 __this_cpu_read(ksoftirqd) == current)
Paul E. McKenneyd28139c2018-06-28 14:45:25 -0700585 rcu_softirq_qs();
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +0100586
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700587 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
589 pending = local_softirq_pending();
Eric Dumazetc10d73672013-01-10 15:26:34 -0800590 if (pending) {
Ben Greear34376a52013-06-06 14:29:49 -0700591 if (time_before(jiffies, end) && !need_resched() &&
592 --max_restart)
Eric Dumazetc10d73672013-01-10 15:26:34 -0800593 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 wakeup_softirqd();
Eric Dumazetc10d73672013-01-10 15:26:34 -0800596 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
Frederic Weisbeckerd3759e72020-12-02 12:57:31 +0100598 account_softirq_exit(current);
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100599 lockdep_softirq_end(in_hardirq);
Thomas Gleixnerf02fc962021-03-09 09:55:55 +0100600 softirq_handle_end();
NeilBrown717a94b2017-04-07 10:03:26 +1000601 current_restore_flags(old_flags, PF_MEMALLOC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602}
603
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200604/**
605 * irq_enter_rcu - Enter an interrupt context with RCU watching
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800606 */
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200607void irq_enter_rcu(void)
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800608{
Frederic Weisbeckerd14ce742020-12-02 12:57:32 +0100609 __irq_enter_raw();
610
Frederic Weisbecker53e87e32021-10-26 16:10:54 +0200611 if (tick_nohz_full_cpu(smp_processor_id()) ||
612 (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
Frederic Weisbecker5acac1b2013-12-04 18:28:20 +0100613 tick_irq_enter();
Frederic Weisbeckerd14ce742020-12-02 12:57:32 +0100614
615 account_hardirq_enter(current);
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800616}
617
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200618/**
619 * irq_enter - Enter an interrupt context including RCU update
620 */
621void irq_enter(void)
622{
Frederic Weisbecker6f0e6c12022-06-08 16:40:26 +0200623 ct_irq_enter();
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200624 irq_enter_rcu();
625}
626
Frederic Weisbecker67826ea2013-04-20 17:43:13 +0200627static inline void tick_irq_exit(void)
628{
629#ifdef CONFIG_NO_HZ_COMMON
630 int cpu = smp_processor_id();
631
632 /* Make sure that timer wheel updates are propagated */
633 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
Changbin Dufe138892022-01-28 19:07:27 +0800634 if (!in_hardirq())
Frederic Weisbecker67826ea2013-04-20 17:43:13 +0200635 tick_nohz_irq_exit();
636 }
637#endif
638}
639
Peter Zijlstra59bc3002020-05-29 23:27:39 +0200640static inline void __irq_exit_rcu(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641{
Thomas Gleixner74eed012013-02-20 22:00:48 +0100642#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
Frederic Weisbecker4cd5d112013-02-28 20:00:43 +0100643 local_irq_disable();
Thomas Gleixner74eed012013-02-20 22:00:48 +0100644#else
Frederic Weisbeckerf71b74b2017-11-06 16:01:18 +0100645 lockdep_assert_irqs_disabled();
Thomas Gleixner74eed012013-02-20 22:00:48 +0100646#endif
Frederic Weisbeckerd3759e72020-12-02 12:57:31 +0100647 account_hardirq_exit(current);
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200648 preempt_count_sub(HARDIRQ_OFFSET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 if (!in_interrupt() && local_softirq_pending())
650 invoke_softirq();
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800651
Frederic Weisbecker67826ea2013-04-20 17:43:13 +0200652 tick_irq_exit();
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200653}
654
655/**
Peter Zijlstra59bc3002020-05-29 23:27:39 +0200656 * irq_exit_rcu() - Exit an interrupt context without updating RCU
657 *
658 * Also processes softirqs if needed and possible.
659 */
660void irq_exit_rcu(void)
661{
662 __irq_exit_rcu();
663 /* must be last! */
664 lockdep_hardirq_exit();
665}
666
667/**
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200668 * irq_exit - Exit an interrupt context, update RCU and lockdep
669 *
670 * Also processes softirqs if needed and possible.
671 */
672void irq_exit(void)
673{
Peter Zijlstra59bc3002020-05-29 23:27:39 +0200674 __irq_exit_rcu();
Frederic Weisbecker6f0e6c12022-06-08 16:40:26 +0200675 ct_irq_exit();
Thomas Gleixner2502ec32020-03-20 12:56:40 +0100676 /* must be last! */
677 lockdep_hardirq_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678}
679
680/*
681 * This function must run with irqs disabled!
682 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800683inline void raise_softirq_irqoff(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684{
685 __raise_softirq_irqoff(nr);
686
687 /*
688 * If we're in an interrupt or softirq, we're done
689 * (this also catches softirq-disabled code). We will
690 * actually run the softirq once we return from
691 * the irq or softirq.
692 *
693 * Otherwise we wake up ksoftirqd to make sure we
694 * schedule the softirq soon.
695 */
Thomas Gleixnerf02fc962021-03-09 09:55:55 +0100696 if (!in_interrupt() && should_wake_ksoftirqd())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 wakeup_softirqd();
698}
699
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800700void raise_softirq(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701{
702 unsigned long flags;
703
704 local_irq_save(flags);
705 raise_softirq_irqoff(nr);
706 local_irq_restore(flags);
707}
708
Steven Rostedtf0696862012-01-25 20:18:55 -0500709void __raise_softirq_irqoff(unsigned int nr)
710{
Jiafei Pancdabce2e2020-08-14 12:55:22 +0800711 lockdep_assert_irqs_disabled();
Steven Rostedtf0696862012-01-25 20:18:55 -0500712 trace_softirq_raise(nr);
713 or_softirq_pending(1UL << nr);
714}
715
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300716void open_softirq(int nr, void (*action)(struct softirq_action *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 softirq_vec[nr].action = action;
719}
720
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200721/*
722 * Tasklets
723 */
Joe Perchesce85b4f2014-01-27 17:07:16 -0800724struct tasklet_head {
Olof Johansson48f20a92008-03-04 15:23:25 -0800725 struct tasklet_struct *head;
726 struct tasklet_struct **tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727};
728
Vegard Nossum4620b492008-06-12 23:21:53 +0200729static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
730static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731
Ingo Molnar6498dda2018-02-27 17:48:07 +0100732static void __tasklet_schedule_common(struct tasklet_struct *t,
733 struct tasklet_head __percpu *headp,
734 unsigned int softirq_nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735{
Ingo Molnar6498dda2018-02-27 17:48:07 +0100736 struct tasklet_head *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 unsigned long flags;
738
739 local_irq_save(flags);
Ingo Molnar6498dda2018-02-27 17:48:07 +0100740 head = this_cpu_ptr(headp);
Olof Johansson48f20a92008-03-04 15:23:25 -0800741 t->next = NULL;
Ingo Molnar6498dda2018-02-27 17:48:07 +0100742 *head->tail = t;
743 head->tail = &(t->next);
744 raise_softirq_irqoff(softirq_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 local_irq_restore(flags);
746}
Ingo Molnar6498dda2018-02-27 17:48:07 +0100747
748void __tasklet_schedule(struct tasklet_struct *t)
749{
750 __tasklet_schedule_common(t, &tasklet_vec,
751 TASKLET_SOFTIRQ);
752}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753EXPORT_SYMBOL(__tasklet_schedule);
754
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800755void __tasklet_hi_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756{
Ingo Molnar6498dda2018-02-27 17:48:07 +0100757 __tasklet_schedule_common(t, &tasklet_hi_vec,
758 HI_SOFTIRQ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760EXPORT_SYMBOL(__tasklet_hi_schedule);
761
Peter Zijlstra697d8c62021-03-09 09:42:09 +0100762static bool tasklet_clear_sched(struct tasklet_struct *t)
Dirk Behme6b2c3392021-03-17 11:20:12 +0100763{
Peter Zijlstra697d8c62021-03-09 09:42:09 +0100764 if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
765 wake_up_var(&t->state);
Dirk Behme6b2c3392021-03-17 11:20:12 +0100766 return true;
Peter Zijlstra697d8c62021-03-09 09:42:09 +0100767 }
Dirk Behme6b2c3392021-03-17 11:20:12 +0100768
769 WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
770 t->use_callback ? "callback" : "func",
771 t->use_callback ? (void *)t->callback : (void *)t->func);
772
773 return false;
774}
775
Ingo Molnar82b691b2018-02-27 17:48:08 +0100776static void tasklet_action_common(struct softirq_action *a,
777 struct tasklet_head *tl_head,
778 unsigned int softirq_nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779{
780 struct tasklet_struct *list;
781
782 local_irq_disable();
Ingo Molnar82b691b2018-02-27 17:48:08 +0100783 list = tl_head->head;
784 tl_head->head = NULL;
785 tl_head->tail = &tl_head->head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 local_irq_enable();
787
788 while (list) {
789 struct tasklet_struct *t = list;
790
791 list = list->next;
792
793 if (tasklet_trylock(t)) {
794 if (!atomic_read(&t->count)) {
Peter Zijlstra697d8c62021-03-09 09:42:09 +0100795 if (tasklet_clear_sched(t)) {
Dirk Behme6b2c3392021-03-17 11:20:12 +0100796 if (t->use_callback)
797 t->callback(t);
798 else
799 t->func(t->data);
800 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 tasklet_unlock(t);
802 continue;
803 }
804 tasklet_unlock(t);
805 }
806
807 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800808 t->next = NULL;
Ingo Molnar82b691b2018-02-27 17:48:08 +0100809 *tl_head->tail = t;
810 tl_head->tail = &t->next;
811 __raise_softirq_irqoff(softirq_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 local_irq_enable();
813 }
814}
815
Ingo Molnar82b691b2018-02-27 17:48:08 +0100816static __latent_entropy void tasklet_action(struct softirq_action *a)
817{
818 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
819}
820
Emese Revfy0766f782016-06-20 20:42:34 +0200821static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822{
Ingo Molnar82b691b2018-02-27 17:48:08 +0100823 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824}
825
Romain Perier12cc9232019-09-29 18:30:13 +0200826void tasklet_setup(struct tasklet_struct *t,
827 void (*callback)(struct tasklet_struct *))
828{
829 t->next = NULL;
830 t->state = 0;
831 atomic_set(&t->count, 0);
832 t->callback = callback;
833 t->use_callback = true;
834 t->data = 0;
835}
836EXPORT_SYMBOL(tasklet_setup);
837
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838void tasklet_init(struct tasklet_struct *t,
839 void (*func)(unsigned long), unsigned long data)
840{
841 t->next = NULL;
842 t->state = 0;
843 atomic_set(&t->count, 0);
844 t->func = func;
Romain Perier12cc9232019-09-29 18:30:13 +0200845 t->use_callback = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 t->data = data;
847}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848EXPORT_SYMBOL(tasklet_init);
849
Thomas Gleixnereb2dafb2021-03-09 09:42:10 +0100850#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
851/*
852 * Do not use in new code. Waiting for tasklets from atomic contexts is
853 * error prone and should be avoided.
854 */
855void tasklet_unlock_spin_wait(struct tasklet_struct *t)
856{
857 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
858 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
859 /*
860 * Prevent a live lock when current preempted soft
861 * interrupt processing or prevents ksoftirqd from
862 * running. If the tasklet runs on a different CPU
863 * then this has no effect other than doing the BH
864 * disable/enable dance for nothing.
865 */
866 local_bh_disable();
867 local_bh_enable();
868 } else {
869 cpu_relax();
870 }
871 }
872}
873EXPORT_SYMBOL(tasklet_unlock_spin_wait);
874#endif
875
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876void tasklet_kill(struct tasklet_struct *t)
877{
878 if (in_interrupt())
Joe Perches40322762014-01-27 17:07:15 -0800879 pr_notice("Attempt to kill tasklet from interrupt\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
Peter Zijlstra697d8c62021-03-09 09:42:09 +0100881 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
882 wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
883
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 tasklet_unlock_wait(t);
Peter Zijlstra697d8c62021-03-09 09:42:09 +0100885 tasklet_clear_sched(t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887EXPORT_SYMBOL(tasklet_kill);
888
Thomas Gleixnereb2dafb2021-03-09 09:42:10 +0100889#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
Peter Zijlstrada0447472021-03-09 09:42:08 +0100890void tasklet_unlock(struct tasklet_struct *t)
891{
892 smp_mb__before_atomic();
893 clear_bit(TASKLET_STATE_RUN, &t->state);
894 smp_mb__after_atomic();
895 wake_up_var(&t->state);
896}
897EXPORT_SYMBOL_GPL(tasklet_unlock);
898
899void tasklet_unlock_wait(struct tasklet_struct *t)
900{
901 wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
902}
903EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
904#endif
905
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906void __init softirq_init(void)
907{
Olof Johansson48f20a92008-03-04 15:23:25 -0800908 int cpu;
909
910 for_each_possible_cpu(cpu) {
911 per_cpu(tasklet_vec, cpu).tail =
912 &per_cpu(tasklet_vec, cpu).head;
913 per_cpu(tasklet_hi_vec, cpu).tail =
914 &per_cpu(tasklet_hi_vec, cpu).head;
915 }
916
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300917 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
918 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919}
920
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000921static int ksoftirqd_should_run(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922{
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000923 return local_softirq_pending();
924}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000926static void run_ksoftirqd(unsigned int cpu)
927{
Thomas Gleixnerf02fc962021-03-09 09:55:55 +0100928 ksoftirqd_run_begin();
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000929 if (local_softirq_pending()) {
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200930 /*
931 * We can safely run softirq on inline stack, as we are not deep
932 * in the task stack here.
933 */
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000934 __do_softirq();
Thomas Gleixnerf02fc962021-03-09 09:55:55 +0100935 ksoftirqd_run_end();
Paul E. McKenneyedf22f42017-10-24 08:31:12 -0700936 cond_resched();
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000937 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 }
Thomas Gleixnerf02fc962021-03-09 09:55:55 +0100939 ksoftirqd_run_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940}
941
942#ifdef CONFIG_HOTPLUG_CPU
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200943static int takeover_tasklets(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 /* CPU is dead, so no lock needed. */
946 local_irq_disable();
947
948 /* Find end, append list for that CPU. */
Christian Borntraegere5e41722008-05-01 04:34:23 -0700949 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
Christoph Lameter909ea962010-12-08 16:22:55 +0100950 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
Muchun Song8afecaa2019-06-18 22:33:05 +0800951 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
Christian Borntraegere5e41722008-05-01 04:34:23 -0700952 per_cpu(tasklet_vec, cpu).head = NULL;
953 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
954 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 raise_softirq_irqoff(TASKLET_SOFTIRQ);
956
Christian Borntraegere5e41722008-05-01 04:34:23 -0700957 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
Christoph Lameter909ea962010-12-08 16:22:55 +0100958 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
959 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
Christian Borntraegere5e41722008-05-01 04:34:23 -0700960 per_cpu(tasklet_hi_vec, cpu).head = NULL;
961 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
962 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 raise_softirq_irqoff(HI_SOFTIRQ);
964
965 local_irq_enable();
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200966 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967}
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200968#else
969#define takeover_tasklets NULL
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970#endif /* CONFIG_HOTPLUG_CPU */
971
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000972static struct smp_hotplug_thread softirq_threads = {
973 .store = &ksoftirqd,
974 .thread_should_run = ksoftirqd_should_run,
975 .thread_fn = run_ksoftirqd,
976 .thread_comm = "ksoftirqd/%u",
977};
978
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700979static __init int spawn_ksoftirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980{
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200981 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
982 takeover_tasklets);
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000983 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
984
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 return 0;
986}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700987early_initcall(spawn_ksoftirqd);
Andrew Morton78eef012006-03-22 00:08:16 -0800988
Yinghai Lu43a25632008-12-28 16:01:13 -0800989/*
990 * [ These __weak aliases are kept in a separate compilation unit, so that
991 * GCC does not inline them incorrectly. ]
992 */
993
994int __init __weak early_irq_init(void)
995{
996 return 0;
997}
998
Yinghai Lu4a046d12009-01-12 17:39:24 -0800999int __init __weak arch_probe_nr_irqs(void)
1000{
Thomas Gleixnerb683de22010-09-27 20:55:03 +02001001 return NR_IRQS_LEGACY;
Yinghai Lu4a046d12009-01-12 17:39:24 -08001002}
1003
Yinghai Lu43a25632008-12-28 16:01:13 -08001004int __init __weak arch_early_irq_init(void)
1005{
1006 return 0;
1007}
Thomas Gleixner62a08ae2014-04-24 09:50:53 +02001008
1009unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
1010{
1011 return from;
1012}