Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * IRQ subsystem internal functions and variables: |
Thomas Gleixner | dbec07b | 2011-02-07 20:19:55 +0100 | [diff] [blame] | 4 | * |
| 5 | * Do not ever include this file from anything else than |
| 6 | * kernel/irq/. Do not even think about using any information outside |
| 7 | * of this file for your non core code. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | */ |
Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 9 | #include <linux/irqdesc.h> |
Thomas Gleixner | 8f945a3 | 2014-02-23 21:40:23 +0000 | [diff] [blame] | 10 | #include <linux/kernel_stat.h> |
Jon Hunter | be45beb | 2016-06-07 16:12:29 +0100 | [diff] [blame] | 11 | #include <linux/pm_runtime.h> |
Daniel Lezcano | b2d3d61 | 2017-06-23 16:11:07 +0200 | [diff] [blame] | 12 | #include <linux/sched/clock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | |
Thomas Gleixner | c1ee626 | 2011-02-17 17:45:15 +0100 | [diff] [blame] | 14 | #ifdef CONFIG_SPARSE_IRQ |
| 15 | # define IRQ_BITMAP_BITS (NR_IRQS + 8196) |
| 16 | #else |
| 17 | # define IRQ_BITMAP_BITS NR_IRQS |
| 18 | #endif |
| 19 | |
Thomas Gleixner | dbec07b | 2011-02-07 20:19:55 +0100 | [diff] [blame] | 20 | #define istate core_internal_state__do_not_mess_with_it |
| 21 | |
Rusty Russell | 2329abf | 2012-01-13 09:32:18 +1030 | [diff] [blame] | 22 | extern bool noirqdebug; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
Mika Westerberg | e509bd7 | 2015-10-05 13:12:15 +0300 | [diff] [blame] | 24 | extern struct irqaction chained_action; |
| 25 | |
Thomas Gleixner | 1535dfa | 2011-02-07 01:55:43 +0100 | [diff] [blame] | 26 | /* |
| 27 | * Bits used by threaded handlers: |
| 28 | * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run |
Thomas Gleixner | 1535dfa | 2011-02-07 01:55:43 +0100 | [diff] [blame] | 29 | * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed |
| 30 | * IRQTF_AFFINITY - irq thread is requested to adjust affinity |
Thomas Gleixner | 8d32a30 | 2011-02-23 23:52:23 +0000 | [diff] [blame] | 31 | * IRQTF_FORCED_THREAD - irq action is force threaded |
Thomas Gleixner | 1535dfa | 2011-02-07 01:55:43 +0100 | [diff] [blame] | 32 | */ |
| 33 | enum { |
| 34 | IRQTF_RUNTHREAD, |
Thomas Gleixner | 1535dfa | 2011-02-07 01:55:43 +0100 | [diff] [blame] | 35 | IRQTF_WARNED, |
| 36 | IRQTF_AFFINITY, |
Thomas Gleixner | 8d32a30 | 2011-02-23 23:52:23 +0000 | [diff] [blame] | 37 | IRQTF_FORCED_THREAD, |
Thomas Gleixner | 1535dfa | 2011-02-07 01:55:43 +0100 | [diff] [blame] | 38 | }; |
| 39 | |
Thomas Gleixner | bd062e7 | 2011-02-07 20:25:25 +0100 | [diff] [blame] | 40 | /* |
Jiang Liu | a257954 | 2014-05-27 16:07:37 +0800 | [diff] [blame] | 41 | * Bit masks for desc->core_internal_state__do_not_mess_with_it |
Thomas Gleixner | bd062e7 | 2011-02-07 20:25:25 +0100 | [diff] [blame] | 42 | * |
| 43 | * IRQS_AUTODETECT - autodetection in progress |
Thomas Gleixner | 7acdd53 | 2011-02-07 20:40:54 +0100 | [diff] [blame] | 44 | * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt |
| 45 | * detection |
Thomas Gleixner | 6954b75 | 2011-02-07 20:55:35 +0100 | [diff] [blame] | 46 | * IRQS_POLL_INPROGRESS - polling in progress |
Thomas Gleixner | 3d67bae | 2011-02-07 21:02:10 +0100 | [diff] [blame] | 47 | * IRQS_ONESHOT - irq is not unmasked in primary handler |
Thomas Gleixner | 163ef30 | 2011-02-08 11:39:15 +0100 | [diff] [blame] | 48 | * IRQS_REPLAY - irq is replayed |
| 49 | * IRQS_WAITING - irq is waiting |
Thomas Gleixner | 2a0d6fb | 2011-02-08 12:17:57 +0100 | [diff] [blame] | 50 | * IRQS_PENDING - irq is pending and replayed later |
Thomas Gleixner | c531e83 | 2011-02-08 12:44:58 +0100 | [diff] [blame] | 51 | * IRQS_SUSPENDED - irq is suspended |
Julien Thierry | b525903 | 2019-01-31 14:53:58 +0000 | [diff] [blame] | 52 | * IRQS_NMI - irq line is used to deliver NMIs |
Thomas Gleixner | bd062e7 | 2011-02-07 20:25:25 +0100 | [diff] [blame] | 53 | */ |
| 54 | enum { |
| 55 | IRQS_AUTODETECT = 0x00000001, |
Thomas Gleixner | 7acdd53 | 2011-02-07 20:40:54 +0100 | [diff] [blame] | 56 | IRQS_SPURIOUS_DISABLED = 0x00000002, |
Thomas Gleixner | 6954b75 | 2011-02-07 20:55:35 +0100 | [diff] [blame] | 57 | IRQS_POLL_INPROGRESS = 0x00000008, |
Thomas Gleixner | 3d67bae | 2011-02-07 21:02:10 +0100 | [diff] [blame] | 58 | IRQS_ONESHOT = 0x00000020, |
Thomas Gleixner | 163ef30 | 2011-02-08 11:39:15 +0100 | [diff] [blame] | 59 | IRQS_REPLAY = 0x00000040, |
| 60 | IRQS_WAITING = 0x00000080, |
Thomas Gleixner | 2a0d6fb | 2011-02-08 12:17:57 +0100 | [diff] [blame] | 61 | IRQS_PENDING = 0x00000200, |
Thomas Gleixner | c531e83 | 2011-02-08 12:44:58 +0100 | [diff] [blame] | 62 | IRQS_SUSPENDED = 0x00000800, |
Daniel Lezcano | b2d3d61 | 2017-06-23 16:11:07 +0200 | [diff] [blame] | 63 | IRQS_TIMINGS = 0x00001000, |
Julien Thierry | b525903 | 2019-01-31 14:53:58 +0000 | [diff] [blame] | 64 | IRQS_NMI = 0x00002000, |
Thomas Gleixner | bd062e7 | 2011-02-07 20:25:25 +0100 | [diff] [blame] | 65 | }; |
| 66 | |
Thomas Gleixner | 1ce6068 | 2011-02-09 20:44:21 +0100 | [diff] [blame] | 67 | #include "debug.h" |
| 68 | #include "settings.h" |
| 69 | |
Jiang Liu | a1ff541 | 2015-06-23 19:47:29 +0200 | [diff] [blame] | 70 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned long flags); |
Jiang Liu | 79ff1cd | 2015-06-23 19:52:36 +0200 | [diff] [blame] | 71 | extern void __disable_irq(struct irq_desc *desc); |
| 72 | extern void __enable_irq(struct irq_desc *desc); |
David Brownell | 0c5d1eb | 2008-10-01 14:46:18 -0700 | [diff] [blame] | 73 | |
Thomas Gleixner | 4cde9c6 | 2017-06-20 01:37:49 +0200 | [diff] [blame] | 74 | #define IRQ_RESEND true |
| 75 | #define IRQ_NORESEND false |
| 76 | |
| 77 | #define IRQ_START_FORCE true |
| 78 | #define IRQ_START_COND false |
| 79 | |
Thomas Gleixner | c942cee | 2017-09-13 23:29:09 +0200 | [diff] [blame] | 80 | extern int irq_activate(struct irq_desc *desc); |
Thomas Gleixner | 1beaeac | 2018-01-30 19:36:32 +0100 | [diff] [blame] | 81 | extern int irq_activate_and_startup(struct irq_desc *desc, bool resend); |
Thomas Gleixner | 4cde9c6 | 2017-06-20 01:37:49 +0200 | [diff] [blame] | 82 | extern int irq_startup(struct irq_desc *desc, bool resend, bool force); |
| 83 | |
Thomas Gleixner | 4699923 | 2011-02-02 21:41:14 +0000 | [diff] [blame] | 84 | extern void irq_shutdown(struct irq_desc *desc); |
Thomas Gleixner | 4001d8e | 2019-06-28 13:11:49 +0200 | [diff] [blame] | 85 | extern void irq_shutdown_and_deactivate(struct irq_desc *desc); |
Thomas Gleixner | 8792347 | 2011-02-03 12:27:44 +0100 | [diff] [blame] | 86 | extern void irq_enable(struct irq_desc *desc); |
| 87 | extern void irq_disable(struct irq_desc *desc); |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 88 | extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu); |
| 89 | extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu); |
Thomas Gleixner | d4d5e08 | 2011-02-10 13:16:14 +0100 | [diff] [blame] | 90 | extern void mask_irq(struct irq_desc *desc); |
| 91 | extern void unmask_irq(struct irq_desc *desc); |
Thomas Gleixner | 328a497 | 2014-03-13 19:03:51 +0100 | [diff] [blame] | 92 | extern void unmask_threaded_irq(struct irq_desc *desc); |
Thomas Gleixner | 4699923 | 2011-02-02 21:41:14 +0000 | [diff] [blame] | 93 | |
Thomas Gleixner | f63b6a0 | 2014-05-07 15:44:21 +0000 | [diff] [blame] | 94 | #ifdef CONFIG_SPARSE_IRQ |
| 95 | static inline void irq_mark_irq(unsigned int irq) { } |
| 96 | #else |
| 97 | extern void irq_mark_irq(unsigned int irq); |
| 98 | #endif |
| 99 | |
Thomas Gleixner | 62e0468 | 2019-06-28 13:11:51 +0200 | [diff] [blame] | 100 | extern int __irq_get_irqchip_state(struct irq_data *data, |
| 101 | enum irqchip_irq_state which, |
| 102 | bool *state); |
| 103 | |
Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 104 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); |
Mike Travis | 0fa0ebb | 2009-01-10 22:24:06 -0800 | [diff] [blame] | 105 | |
Keith Busch | edd14cf | 2016-06-17 16:00:20 -0600 | [diff] [blame] | 106 | irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags); |
Huang Shijie | 71f6434 | 2015-09-02 10:24:55 +0800 | [diff] [blame] | 107 | irqreturn_t handle_irq_event_percpu(struct irq_desc *desc); |
Thomas Gleixner | 4912609 | 2011-02-07 01:08:49 +0100 | [diff] [blame] | 108 | irqreturn_t handle_irq_event(struct irq_desc *desc); |
| 109 | |
Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 110 | /* Resending of interrupts :*/ |
Thomas Gleixner | acd26bc | 2020-03-06 14:03:47 +0100 | [diff] [blame] | 111 | int check_irq_resend(struct irq_desc *desc, bool inject); |
Thomas Gleixner | fe200ae | 2011-02-07 10:34:30 +0100 | [diff] [blame] | 112 | bool irq_wait_for_poll(struct irq_desc *desc); |
Thomas Gleixner | a92444c | 2014-02-15 00:55:19 +0000 | [diff] [blame] | 113 | void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); |
Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 114 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | #ifdef CONFIG_PROC_FS |
Yinghai Lu | 2c6927a | 2008-08-19 20:50:11 -0700 | [diff] [blame] | 116 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); |
Thomas Gleixner | 13bfe99 | 2010-09-30 02:46:07 +0200 | [diff] [blame] | 117 | extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | extern void register_handler_proc(unsigned int irq, struct irqaction *action); |
| 119 | extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); |
| 120 | #else |
Yinghai Lu | 2c6927a | 2008-08-19 20:50:11 -0700 | [diff] [blame] | 121 | static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } |
Thomas Gleixner | 13bfe99 | 2010-09-30 02:46:07 +0200 | [diff] [blame] | 122 | static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | static inline void register_handler_proc(unsigned int irq, |
| 124 | struct irqaction *action) { } |
| 125 | static inline void unregister_handler_proc(unsigned int irq, |
| 126 | struct irqaction *action) { } |
| 127 | #endif |
| 128 | |
Thomas Gleixner | 9c255583 | 2016-07-04 17:39:23 +0900 | [diff] [blame] | 129 | extern bool irq_can_set_affinity_usr(unsigned int irq); |
| 130 | |
Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 131 | extern void irq_set_thread_affinity(struct irq_desc *desc); |
Yinghai Lu | 57b150c | 2009-04-27 17:59:53 -0700 | [diff] [blame] | 132 | |
Jiang Liu | 818b0f3 | 2012-03-30 23:11:34 +0800 | [diff] [blame] | 133 | extern int irq_do_set_affinity(struct irq_data *data, |
| 134 | const struct cpumask *dest, bool force); |
| 135 | |
Thomas Gleixner | 43564bd | 2017-06-20 01:37:22 +0200 | [diff] [blame] | 136 | #ifdef CONFIG_SMP |
| 137 | extern int irq_setup_affinity(struct irq_desc *desc); |
| 138 | #else |
| 139 | static inline int irq_setup_affinity(struct irq_desc *desc) { return 0; } |
| 140 | #endif |
| 141 | |
Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 142 | /* Inline functions for support of irq chips on slow busses */ |
Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 143 | static inline void chip_bus_lock(struct irq_desc *desc) |
Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 144 | { |
Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 145 | if (unlikely(desc->irq_data.chip->irq_bus_lock)) |
| 146 | desc->irq_data.chip->irq_bus_lock(&desc->irq_data); |
Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 147 | } |
| 148 | |
Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 149 | static inline void chip_bus_sync_unlock(struct irq_desc *desc) |
Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 150 | { |
Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 151 | if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock)) |
| 152 | desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); |
Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 153 | } |
| 154 | |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 155 | #define _IRQ_DESC_CHECK (1 << 0) |
| 156 | #define _IRQ_DESC_PERCPU (1 << 1) |
| 157 | |
| 158 | #define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK) |
| 159 | #define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU) |
| 160 | |
Daniel Lezcano | f944b5a | 2016-01-14 10:54:13 +0100 | [diff] [blame] | 161 | #define for_each_action_of_desc(desc, act) \ |
Masahiro Yamada | 163616c | 2017-08-09 15:32:21 +0900 | [diff] [blame] | 162 | for (act = desc->action; act; act = act->next) |
Daniel Lezcano | f944b5a | 2016-01-14 10:54:13 +0100 | [diff] [blame] | 163 | |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 164 | struct irq_desc * |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 165 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, |
| 166 | unsigned int check); |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 167 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus); |
| 168 | |
| 169 | static inline struct irq_desc * |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 170 | irq_get_desc_buslock(unsigned int irq, unsigned long *flags, unsigned int check) |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 171 | { |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 172 | return __irq_get_desc_lock(irq, flags, true, check); |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 173 | } |
| 174 | |
| 175 | static inline void |
| 176 | irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags) |
| 177 | { |
| 178 | __irq_put_desc_unlock(desc, flags, true); |
| 179 | } |
| 180 | |
| 181 | static inline struct irq_desc * |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 182 | irq_get_desc_lock(unsigned int irq, unsigned long *flags, unsigned int check) |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 183 | { |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 184 | return __irq_get_desc_lock(irq, flags, false, check); |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 185 | } |
| 186 | |
| 187 | static inline void |
| 188 | irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags) |
| 189 | { |
| 190 | __irq_put_desc_unlock(desc, flags, false); |
| 191 | } |
| 192 | |
Boqun Feng | b354286 | 2015-12-29 12:18:48 +0800 | [diff] [blame] | 193 | #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) |
| 194 | |
Thomas Gleixner | 087cdfb | 2017-06-20 01:37:17 +0200 | [diff] [blame] | 195 | static inline unsigned int irqd_get(struct irq_data *d) |
| 196 | { |
| 197 | return __irqd_to_state(d); |
| 198 | } |
| 199 | |
Ingo Molnar | 43f7775 | 2006-06-29 02:24:58 -0700 | [diff] [blame] | 200 | /* |
Thomas Gleixner | f230b6d | 2011-02-05 15:20:04 +0100 | [diff] [blame] | 201 | * Manipulation functions for irq_data.state |
| 202 | */ |
| 203 | static inline void irqd_set_move_pending(struct irq_data *d) |
| 204 | { |
Jiang Liu | 0d0b4c8 | 2015-06-01 16:05:12 +0800 | [diff] [blame] | 205 | __irqd_to_state(d) |= IRQD_SETAFFINITY_PENDING; |
Thomas Gleixner | f230b6d | 2011-02-05 15:20:04 +0100 | [diff] [blame] | 206 | } |
| 207 | |
| 208 | static inline void irqd_clr_move_pending(struct irq_data *d) |
| 209 | { |
Jiang Liu | 0d0b4c8 | 2015-06-01 16:05:12 +0800 | [diff] [blame] | 210 | __irqd_to_state(d) &= ~IRQD_SETAFFINITY_PENDING; |
Thomas Gleixner | f230b6d | 2011-02-05 15:20:04 +0100 | [diff] [blame] | 211 | } |
Thomas Gleixner | a005677 | 2011-02-08 17:11:03 +0100 | [diff] [blame] | 212 | |
Thomas Gleixner | 54fdf6a | 2017-06-20 01:37:47 +0200 | [diff] [blame] | 213 | static inline void irqd_set_managed_shutdown(struct irq_data *d) |
| 214 | { |
| 215 | __irqd_to_state(d) |= IRQD_MANAGED_SHUTDOWN; |
| 216 | } |
| 217 | |
| 218 | static inline void irqd_clr_managed_shutdown(struct irq_data *d) |
| 219 | { |
| 220 | __irqd_to_state(d) &= ~IRQD_MANAGED_SHUTDOWN; |
| 221 | } |
| 222 | |
Thomas Gleixner | a005677 | 2011-02-08 17:11:03 +0100 | [diff] [blame] | 223 | static inline void irqd_clear(struct irq_data *d, unsigned int mask) |
| 224 | { |
Jiang Liu | 0d0b4c8 | 2015-06-01 16:05:12 +0800 | [diff] [blame] | 225 | __irqd_to_state(d) &= ~mask; |
Thomas Gleixner | a005677 | 2011-02-08 17:11:03 +0100 | [diff] [blame] | 226 | } |
| 227 | |
| 228 | static inline void irqd_set(struct irq_data *d, unsigned int mask) |
| 229 | { |
Jiang Liu | 0d0b4c8 | 2015-06-01 16:05:12 +0800 | [diff] [blame] | 230 | __irqd_to_state(d) |= mask; |
Thomas Gleixner | a005677 | 2011-02-08 17:11:03 +0100 | [diff] [blame] | 231 | } |
| 232 | |
Thomas Gleixner | 2bdd105 | 2011-02-08 17:22:00 +0100 | [diff] [blame] | 233 | static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) |
| 234 | { |
Jiang Liu | 0d0b4c8 | 2015-06-01 16:05:12 +0800 | [diff] [blame] | 235 | return __irqd_to_state(d) & mask; |
Thomas Gleixner | 2bdd105 | 2011-02-08 17:22:00 +0100 | [diff] [blame] | 236 | } |
Thomas Gleixner | 8f945a3 | 2014-02-23 21:40:23 +0000 | [diff] [blame] | 237 | |
Juergen Gross | a696712 | 2017-07-17 19:47:02 +0200 | [diff] [blame] | 238 | static inline void irq_state_set_disabled(struct irq_desc *desc) |
| 239 | { |
| 240 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
| 241 | } |
| 242 | |
| 243 | static inline void irq_state_set_masked(struct irq_desc *desc) |
| 244 | { |
| 245 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
| 246 | } |
| 247 | |
Boqun Feng | b354286 | 2015-12-29 12:18:48 +0800 | [diff] [blame] | 248 | #undef __irqd_to_state |
| 249 | |
Thomas Gleixner | 1136b07 | 2019-02-08 14:48:03 +0100 | [diff] [blame] | 250 | static inline void __kstat_incr_irqs_this_cpu(struct irq_desc *desc) |
Thomas Gleixner | 8f945a3 | 2014-02-23 21:40:23 +0000 | [diff] [blame] | 251 | { |
| 252 | __this_cpu_inc(*desc->kstat_irqs); |
| 253 | __this_cpu_inc(kstat.irqs_sum); |
| 254 | } |
Thomas Gleixner | cab303b | 2014-08-28 11:44:31 +0200 | [diff] [blame] | 255 | |
Thomas Gleixner | 1136b07 | 2019-02-08 14:48:03 +0100 | [diff] [blame] | 256 | static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc) |
| 257 | { |
| 258 | __kstat_incr_irqs_this_cpu(desc); |
| 259 | desc->tot_count++; |
| 260 | } |
| 261 | |
Jiang Liu | 6783011 | 2015-06-01 16:05:13 +0800 | [diff] [blame] | 262 | static inline int irq_desc_get_node(struct irq_desc *desc) |
| 263 | { |
Jiang Liu | 449e9ca | 2015-06-01 16:05:16 +0800 | [diff] [blame] | 264 | return irq_common_data_get_node(&desc->irq_common_data); |
Jiang Liu | 6783011 | 2015-06-01 16:05:13 +0800 | [diff] [blame] | 265 | } |
| 266 | |
Grygorii Strashko | 4717f13 | 2015-11-10 11:58:12 +0200 | [diff] [blame] | 267 | static inline int irq_desc_is_chained(struct irq_desc *desc) |
| 268 | { |
| 269 | return (desc->action && desc->action == &chained_action); |
| 270 | } |
| 271 | |
Thomas Gleixner | cab303b | 2014-08-28 11:44:31 +0200 | [diff] [blame] | 272 | #ifdef CONFIG_PM_SLEEP |
Thomas Gleixner | 9ce7a25 | 2014-08-29 14:00:16 +0200 | [diff] [blame] | 273 | bool irq_pm_check_wakeup(struct irq_desc *desc); |
Thomas Gleixner | cab303b | 2014-08-28 11:44:31 +0200 | [diff] [blame] | 274 | void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action); |
| 275 | void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action); |
| 276 | #else |
Thomas Gleixner | 9ce7a25 | 2014-08-29 14:00:16 +0200 | [diff] [blame] | 277 | static inline bool irq_pm_check_wakeup(struct irq_desc *desc) { return false; } |
Thomas Gleixner | cab303b | 2014-08-28 11:44:31 +0200 | [diff] [blame] | 278 | static inline void |
| 279 | irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { } |
| 280 | static inline void |
| 281 | irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { } |
| 282 | #endif |
Bartosz Golaszewski | f160203 | 2017-05-31 18:06:58 +0200 | [diff] [blame] | 283 | |
Daniel Lezcano | b2d3d61 | 2017-06-23 16:11:07 +0200 | [diff] [blame] | 284 | #ifdef CONFIG_IRQ_TIMINGS |
| 285 | |
| 286 | #define IRQ_TIMINGS_SHIFT 5 |
| 287 | #define IRQ_TIMINGS_SIZE (1 << IRQ_TIMINGS_SHIFT) |
| 288 | #define IRQ_TIMINGS_MASK (IRQ_TIMINGS_SIZE - 1) |
| 289 | |
| 290 | /** |
| 291 | * struct irq_timings - irq timings storing structure |
| 292 | * @values: a circular buffer of u64 encoded <timestamp,irq> values |
| 293 | * @count: the number of elements in the array |
| 294 | */ |
| 295 | struct irq_timings { |
| 296 | u64 values[IRQ_TIMINGS_SIZE]; |
| 297 | int count; |
| 298 | }; |
| 299 | |
| 300 | DECLARE_PER_CPU(struct irq_timings, irq_timings); |
| 301 | |
Daniel Lezcano | e1c9214 | 2017-06-23 16:11:08 +0200 | [diff] [blame] | 302 | extern void irq_timings_free(int irq); |
| 303 | extern int irq_timings_alloc(int irq); |
| 304 | |
Daniel Lezcano | b2d3d61 | 2017-06-23 16:11:07 +0200 | [diff] [blame] | 305 | static inline void irq_remove_timings(struct irq_desc *desc) |
| 306 | { |
| 307 | desc->istate &= ~IRQS_TIMINGS; |
Daniel Lezcano | e1c9214 | 2017-06-23 16:11:08 +0200 | [diff] [blame] | 308 | |
| 309 | irq_timings_free(irq_desc_get_irq(desc)); |
Daniel Lezcano | b2d3d61 | 2017-06-23 16:11:07 +0200 | [diff] [blame] | 310 | } |
| 311 | |
| 312 | static inline void irq_setup_timings(struct irq_desc *desc, struct irqaction *act) |
| 313 | { |
Daniel Lezcano | e1c9214 | 2017-06-23 16:11:08 +0200 | [diff] [blame] | 314 | int irq = irq_desc_get_irq(desc); |
| 315 | int ret; |
| 316 | |
Daniel Lezcano | b2d3d61 | 2017-06-23 16:11:07 +0200 | [diff] [blame] | 317 | /* |
| 318 | * We don't need the measurement because the idle code already |
| 319 | * knows the next expiry event. |
| 320 | */ |
| 321 | if (act->flags & __IRQF_TIMER) |
| 322 | return; |
| 323 | |
Daniel Lezcano | e1c9214 | 2017-06-23 16:11:08 +0200 | [diff] [blame] | 324 | /* |
| 325 | * In case the timing allocation fails, we just want to warn, |
| 326 | * not fail, so letting the system boot anyway. |
| 327 | */ |
| 328 | ret = irq_timings_alloc(irq); |
| 329 | if (ret) { |
| 330 | pr_warn("Failed to allocate irq timing stats for irq%d (%d)", |
| 331 | irq, ret); |
| 332 | return; |
| 333 | } |
| 334 | |
Daniel Lezcano | b2d3d61 | 2017-06-23 16:11:07 +0200 | [diff] [blame] | 335 | desc->istate |= IRQS_TIMINGS; |
| 336 | } |
| 337 | |
| 338 | extern void irq_timings_enable(void); |
| 339 | extern void irq_timings_disable(void); |
| 340 | |
| 341 | DECLARE_STATIC_KEY_FALSE(irq_timing_enabled); |
| 342 | |
| 343 | /* |
| 344 | * The interrupt number and the timestamp are encoded into a single |
| 345 | * u64 variable to optimize the size. |
| 346 | * 48 bit time stamp and 16 bit IRQ number is way sufficient. |
| 347 | * Who cares an IRQ after 78 hours of idle time? |
| 348 | */ |
| 349 | static inline u64 irq_timing_encode(u64 timestamp, int irq) |
| 350 | { |
| 351 | return (timestamp << 16) | irq; |
| 352 | } |
| 353 | |
| 354 | static inline int irq_timing_decode(u64 value, u64 *timestamp) |
| 355 | { |
| 356 | *timestamp = value >> 16; |
| 357 | return value & U16_MAX; |
| 358 | } |
| 359 | |
Daniel Lezcano | df025e4 | 2019-05-27 22:55:17 +0200 | [diff] [blame] | 360 | static __always_inline void irq_timings_push(u64 ts, int irq) |
| 361 | { |
| 362 | struct irq_timings *timings = this_cpu_ptr(&irq_timings); |
| 363 | |
| 364 | timings->values[timings->count & IRQ_TIMINGS_MASK] = |
| 365 | irq_timing_encode(ts, irq); |
| 366 | |
| 367 | timings->count++; |
| 368 | } |
| 369 | |
Daniel Lezcano | b2d3d61 | 2017-06-23 16:11:07 +0200 | [diff] [blame] | 370 | /* |
| 371 | * The function record_irq_time is only called in one place in the |
| 372 | * interrupts handler. We want this function always inline so the code |
| 373 | * inside is embedded in the function and the static key branching |
| 374 | * code can act at the higher level. Without the explicit |
| 375 | * __always_inline we can end up with a function call and a small |
| 376 | * overhead in the hotpath for nothing. |
| 377 | */ |
| 378 | static __always_inline void record_irq_time(struct irq_desc *desc) |
| 379 | { |
| 380 | if (!static_branch_likely(&irq_timing_enabled)) |
| 381 | return; |
| 382 | |
Daniel Lezcano | df025e4 | 2019-05-27 22:55:17 +0200 | [diff] [blame] | 383 | if (desc->istate & IRQS_TIMINGS) |
| 384 | irq_timings_push(local_clock(), irq_desc_get_irq(desc)); |
Daniel Lezcano | b2d3d61 | 2017-06-23 16:11:07 +0200 | [diff] [blame] | 385 | } |
| 386 | #else |
| 387 | static inline void irq_remove_timings(struct irq_desc *desc) {} |
| 388 | static inline void irq_setup_timings(struct irq_desc *desc, |
| 389 | struct irqaction *act) {}; |
| 390 | static inline void record_irq_time(struct irq_desc *desc) {} |
| 391 | #endif /* CONFIG_IRQ_TIMINGS */ |
| 392 | |
| 393 | |
Bartosz Golaszewski | f160203 | 2017-05-31 18:06:58 +0200 | [diff] [blame] | 394 | #ifdef CONFIG_GENERIC_IRQ_CHIP |
| 395 | void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name, |
| 396 | int num_ct, unsigned int irq_base, |
| 397 | void __iomem *reg_base, irq_flow_handler_t handler); |
| 398 | #else |
| 399 | static inline void |
| 400 | irq_init_generic_chip(struct irq_chip_generic *gc, const char *name, |
| 401 | int num_ct, unsigned int irq_base, |
| 402 | void __iomem *reg_base, irq_flow_handler_t handler) { } |
| 403 | #endif /* CONFIG_GENERIC_IRQ_CHIP */ |
Thomas Gleixner | 087cdfb | 2017-06-20 01:37:17 +0200 | [diff] [blame] | 404 | |
Christoph Hellwig | 137221d | 2017-06-20 01:37:24 +0200 | [diff] [blame] | 405 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 406 | static inline bool irq_can_move_pcntxt(struct irq_data *data) |
| 407 | { |
| 408 | return irqd_can_move_in_process_context(data); |
| 409 | } |
| 410 | static inline bool irq_move_pending(struct irq_data *data) |
| 411 | { |
| 412 | return irqd_is_setaffinity_pending(data); |
| 413 | } |
| 414 | static inline void |
| 415 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) |
| 416 | { |
| 417 | cpumask_copy(desc->pending_mask, mask); |
| 418 | } |
| 419 | static inline void |
| 420 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) |
| 421 | { |
| 422 | cpumask_copy(mask, desc->pending_mask); |
| 423 | } |
Thomas Gleixner | f0383c2 | 2017-06-20 01:37:29 +0200 | [diff] [blame] | 424 | static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) |
| 425 | { |
| 426 | return desc->pending_mask; |
| 427 | } |
Thomas Gleixner | c16816a | 2020-03-06 14:03:43 +0100 | [diff] [blame] | 428 | static inline bool handle_enforce_irqctx(struct irq_data *data) |
| 429 | { |
| 430 | return irqd_is_handle_enforce_irqctx(data); |
| 431 | } |
Thomas Gleixner | 36d84fb | 2017-06-20 01:37:34 +0200 | [diff] [blame] | 432 | bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear); |
Christoph Hellwig | 137221d | 2017-06-20 01:37:24 +0200 | [diff] [blame] | 433 | #else /* CONFIG_GENERIC_PENDING_IRQ */ |
| 434 | static inline bool irq_can_move_pcntxt(struct irq_data *data) |
| 435 | { |
| 436 | return true; |
| 437 | } |
| 438 | static inline bool irq_move_pending(struct irq_data *data) |
| 439 | { |
| 440 | return false; |
| 441 | } |
| 442 | static inline void |
| 443 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) |
| 444 | { |
| 445 | } |
| 446 | static inline void |
| 447 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) |
| 448 | { |
| 449 | } |
Thomas Gleixner | f0383c2 | 2017-06-20 01:37:29 +0200 | [diff] [blame] | 450 | static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) |
| 451 | { |
| 452 | return NULL; |
| 453 | } |
Thomas Gleixner | 36d84fb | 2017-06-20 01:37:34 +0200 | [diff] [blame] | 454 | static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear) |
| 455 | { |
| 456 | return false; |
| 457 | } |
Thomas Gleixner | c16816a | 2020-03-06 14:03:43 +0100 | [diff] [blame] | 458 | static inline bool handle_enforce_irqctx(struct irq_data *data) |
| 459 | { |
| 460 | return false; |
| 461 | } |
Thomas Gleixner | f0383c2 | 2017-06-20 01:37:29 +0200 | [diff] [blame] | 462 | #endif /* !CONFIG_GENERIC_PENDING_IRQ */ |
Christoph Hellwig | 137221d | 2017-06-20 01:37:24 +0200 | [diff] [blame] | 463 | |
Thomas Gleixner | 457f6d3 | 2017-09-13 23:29:07 +0200 | [diff] [blame] | 464 | #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY) |
Thomas Gleixner | 702cb0a | 2017-12-29 16:59:06 +0100 | [diff] [blame] | 465 | static inline int irq_domain_activate_irq(struct irq_data *data, bool reserve) |
Thomas Gleixner | 457f6d3 | 2017-09-13 23:29:07 +0200 | [diff] [blame] | 466 | { |
| 467 | irqd_set_activated(data); |
Thomas Gleixner | bb9b428 | 2017-09-13 23:29:11 +0200 | [diff] [blame] | 468 | return 0; |
Thomas Gleixner | 457f6d3 | 2017-09-13 23:29:07 +0200 | [diff] [blame] | 469 | } |
| 470 | static inline void irq_domain_deactivate_irq(struct irq_data *data) |
| 471 | { |
| 472 | irqd_clr_activated(data); |
| 473 | } |
| 474 | #endif |
| 475 | |
Thomas Gleixner | 13b90ca | 2020-08-26 13:16:32 +0200 | [diff] [blame] | 476 | static inline struct irq_data *irqd_get_parent_data(struct irq_data *irqd) |
| 477 | { |
| 478 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY |
| 479 | return irqd->parent_data; |
| 480 | #else |
| 481 | return NULL; |
| 482 | #endif |
| 483 | } |
| 484 | |
Thomas Gleixner | 087cdfb | 2017-06-20 01:37:17 +0200 | [diff] [blame] | 485 | #ifdef CONFIG_GENERIC_IRQ_DEBUGFS |
Thomas Gleixner | c2ce34c | 2017-06-24 11:05:59 +0200 | [diff] [blame] | 486 | #include <linux/debugfs.h> |
| 487 | |
Thomas Gleixner | 087cdfb | 2017-06-20 01:37:17 +0200 | [diff] [blame] | 488 | void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc); |
Thomas Gleixner | c2ce34c | 2017-06-24 11:05:59 +0200 | [diff] [blame] | 489 | static inline void irq_remove_debugfs_entry(struct irq_desc *desc) |
| 490 | { |
| 491 | debugfs_remove(desc->debugfs_file); |
Thomas Gleixner | 07557cc | 2017-09-13 23:29:05 +0200 | [diff] [blame] | 492 | kfree(desc->dev_name); |
Thomas Gleixner | c2ce34c | 2017-06-24 11:05:59 +0200 | [diff] [blame] | 493 | } |
Thomas Gleixner | 07557cc | 2017-09-13 23:29:05 +0200 | [diff] [blame] | 494 | void irq_debugfs_copy_devname(int irq, struct device *dev); |
Thomas Gleixner | 087cdfb | 2017-06-20 01:37:17 +0200 | [diff] [blame] | 495 | # ifdef CONFIG_IRQ_DOMAIN |
| 496 | void irq_domain_debugfs_init(struct dentry *root); |
| 497 | # else |
Sebastian Ott | e5682b4 | 2017-07-04 11:25:15 +0200 | [diff] [blame] | 498 | static inline void irq_domain_debugfs_init(struct dentry *root) |
| 499 | { |
| 500 | } |
Thomas Gleixner | 087cdfb | 2017-06-20 01:37:17 +0200 | [diff] [blame] | 501 | # endif |
| 502 | #else /* CONFIG_GENERIC_IRQ_DEBUGFS */ |
| 503 | static inline void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *d) |
| 504 | { |
| 505 | } |
| 506 | static inline void irq_remove_debugfs_entry(struct irq_desc *d) |
| 507 | { |
| 508 | } |
Thomas Gleixner | 07557cc | 2017-09-13 23:29:05 +0200 | [diff] [blame] | 509 | static inline void irq_debugfs_copy_devname(int irq, struct device *dev) |
| 510 | { |
| 511 | } |
Thomas Gleixner | 087cdfb | 2017-06-20 01:37:17 +0200 | [diff] [blame] | 512 | #endif /* CONFIG_GENERIC_IRQ_DEBUGFS */ |