David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 1 | /* MN10300 Arch-specific interrupt handling |
| 2 | * |
| 3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public Licence |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the Licence, or (at your option) any later version. |
| 10 | */ |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/kernel_stat.h> |
| 14 | #include <linux/seq_file.h> |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 15 | #include <linux/cpumask.h> |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 16 | #include <asm/setup.h> |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 17 | #include <asm/serial-regs.h> |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 18 | |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 19 | unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = { |
| 20 | [0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7 |
| 21 | }; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 22 | EXPORT_SYMBOL(__mn10300_irq_enabled_epsw); |
| 23 | |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 24 | #ifdef CONFIG_SMP |
| 25 | static char irq_affinity_online[NR_IRQS] = { |
| 26 | [0 ... NR_IRQS - 1] = 0 |
| 27 | }; |
| 28 | |
| 29 | #define NR_IRQ_WORDS ((NR_IRQS + 31) / 32) |
| 30 | static unsigned long irq_affinity_request[NR_IRQ_WORDS] = { |
| 31 | [0 ... NR_IRQ_WORDS - 1] = 0 |
| 32 | }; |
| 33 | #endif /* CONFIG_SMP */ |
| 34 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 35 | atomic_t irq_err_count; |
| 36 | |
| 37 | /* |
David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 38 | * MN10300 interrupt controller operations |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 39 | */ |
Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 40 | static void mn10300_cpupic_ack(struct irq_data *d) |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 41 | { |
Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 42 | unsigned int irq = d->irq; |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 43 | unsigned long flags; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 44 | u16 tmp; |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 45 | |
| 46 | flags = arch_local_cli_save(); |
| 47 | GxICR_u8(irq) = GxICR_DETECT; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 48 | tmp = GxICR(irq); |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 49 | arch_local_irq_restore(flags); |
| 50 | } |
| 51 | |
| 52 | static void __mask_and_set_icr(unsigned int irq, |
| 53 | unsigned int mask, unsigned int set) |
| 54 | { |
| 55 | unsigned long flags; |
| 56 | u16 tmp; |
| 57 | |
| 58 | flags = arch_local_cli_save(); |
| 59 | tmp = GxICR(irq); |
| 60 | GxICR(irq) = (tmp & mask) | set; |
| 61 | tmp = GxICR(irq); |
| 62 | arch_local_irq_restore(flags); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 63 | } |
| 64 | |
Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 65 | static void mn10300_cpupic_mask(struct irq_data *d) |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 66 | { |
Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 67 | __mask_and_set_icr(d->irq, GxICR_LEVEL, 0); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 68 | } |
| 69 | |
Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 70 | static void mn10300_cpupic_mask_ack(struct irq_data *d) |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 71 | { |
Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 72 | unsigned int irq = d->irq; |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 73 | #ifdef CONFIG_SMP |
| 74 | unsigned long flags; |
| 75 | u16 tmp; |
| 76 | |
| 77 | flags = arch_local_cli_save(); |
| 78 | |
| 79 | if (!test_and_clear_bit(irq, irq_affinity_request)) { |
| 80 | tmp = GxICR(irq); |
| 81 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT; |
| 82 | tmp = GxICR(irq); |
| 83 | } else { |
| 84 | u16 tmp2; |
| 85 | tmp = GxICR(irq); |
| 86 | GxICR(irq) = (tmp & GxICR_LEVEL); |
| 87 | tmp2 = GxICR(irq); |
| 88 | |
Mark Salter | 730c1fa | 2010-10-27 17:28:57 +0100 | [diff] [blame] | 89 | irq_affinity_online[irq] = |
KOSAKI Motohiro | 8ea9716 | 2011-05-24 17:12:58 -0700 | [diff] [blame] | 90 | cpumask_any_and(d->affinity, cpu_online_mask); |
Mark Salter | 730c1fa | 2010-10-27 17:28:57 +0100 | [diff] [blame] | 91 | CROSS_GxICR(irq, irq_affinity_online[irq]) = |
| 92 | (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT; |
| 93 | tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 94 | } |
| 95 | |
| 96 | arch_local_irq_restore(flags); |
| 97 | #else /* CONFIG_SMP */ |
| 98 | __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT); |
| 99 | #endif /* CONFIG_SMP */ |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 100 | } |
| 101 | |
Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 102 | static void mn10300_cpupic_unmask(struct irq_data *d) |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 103 | { |
Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 104 | __mask_and_set_icr(d->irq, GxICR_LEVEL, GxICR_ENABLE); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 105 | } |
| 106 | |
Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 107 | static void mn10300_cpupic_unmask_clear(struct irq_data *d) |
David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 108 | { |
Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 109 | unsigned int irq = d->irq; |
David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 110 | /* the MN10300 PIC latches its interrupt request bit, even after the |
| 111 | * device has ceased to assert its interrupt line and the interrupt |
| 112 | * channel has been disabled in the PIC, so for level-triggered |
| 113 | * interrupts we need to clear the request bit when we re-enable */ |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 114 | #ifdef CONFIG_SMP |
| 115 | unsigned long flags; |
| 116 | u16 tmp; |
| 117 | |
| 118 | flags = arch_local_cli_save(); |
| 119 | |
| 120 | if (!test_and_clear_bit(irq, irq_affinity_request)) { |
| 121 | tmp = GxICR(irq); |
| 122 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; |
| 123 | tmp = GxICR(irq); |
| 124 | } else { |
| 125 | tmp = GxICR(irq); |
| 126 | |
KOSAKI Motohiro | 8ea9716 | 2011-05-24 17:12:58 -0700 | [diff] [blame] | 127 | irq_affinity_online[irq] = cpumask_any_and(d->affinity, |
| 128 | cpu_online_mask); |
Mark Salter | 730c1fa | 2010-10-27 17:28:57 +0100 | [diff] [blame] | 129 | CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; |
| 130 | tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 131 | } |
| 132 | |
| 133 | arch_local_irq_restore(flags); |
| 134 | #else /* CONFIG_SMP */ |
| 135 | __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT); |
| 136 | #endif /* CONFIG_SMP */ |
David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 137 | } |
| 138 | |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 139 | #ifdef CONFIG_SMP |
| 140 | static int |
Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 141 | mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask, |
| 142 | bool force) |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 143 | { |
| 144 | unsigned long flags; |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 145 | |
| 146 | flags = arch_local_cli_save(); |
Mark Salter | 7d361cb | 2012-12-12 15:36:37 +0000 | [diff] [blame] | 147 | set_bit(d->irq, irq_affinity_request); |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 148 | arch_local_irq_restore(flags); |
Mark Salter | 7d361cb | 2012-12-12 15:36:37 +0000 | [diff] [blame] | 149 | return 0; |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 150 | } |
| 151 | #endif /* CONFIG_SMP */ |
| 152 | |
David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 153 | /* |
| 154 | * MN10300 PIC level-triggered IRQ handling. |
| 155 | * |
| 156 | * The PIC has no 'ACK' function per se. It is possible to clear individual |
| 157 | * channel latches, but each latch relatches whether or not the channel is |
| 158 | * masked, so we need to clear the latch when we unmask the channel. |
| 159 | * |
| 160 | * Also for this reason, we don't supply an ack() op (it's unused anyway if |
| 161 | * mask_ack() is provided), and mask_ack() just masks. |
| 162 | */ |
| 163 | static struct irq_chip mn10300_cpu_pic_level = { |
Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 164 | .name = "cpu_l", |
| 165 | .irq_disable = mn10300_cpupic_mask, |
| 166 | .irq_enable = mn10300_cpupic_unmask_clear, |
| 167 | .irq_ack = NULL, |
| 168 | .irq_mask = mn10300_cpupic_mask, |
| 169 | .irq_mask_ack = mn10300_cpupic_mask, |
| 170 | .irq_unmask = mn10300_cpupic_unmask_clear, |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 171 | #ifdef CONFIG_SMP |
Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 172 | .irq_set_affinity = mn10300_cpupic_setaffinity, |
Mark Salter | 730c1fa | 2010-10-27 17:28:57 +0100 | [diff] [blame] | 173 | #endif |
David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 174 | }; |
| 175 | |
| 176 | /* |
| 177 | * MN10300 PIC edge-triggered IRQ handling. |
| 178 | * |
| 179 | * We use the latch clearing function of the PIC as the 'ACK' function. |
| 180 | */ |
| 181 | static struct irq_chip mn10300_cpu_pic_edge = { |
Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 182 | .name = "cpu_e", |
| 183 | .irq_disable = mn10300_cpupic_mask, |
| 184 | .irq_enable = mn10300_cpupic_unmask, |
| 185 | .irq_ack = mn10300_cpupic_ack, |
| 186 | .irq_mask = mn10300_cpupic_mask, |
| 187 | .irq_mask_ack = mn10300_cpupic_mask_ack, |
| 188 | .irq_unmask = mn10300_cpupic_unmask, |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 189 | #ifdef CONFIG_SMP |
Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 190 | .irq_set_affinity = mn10300_cpupic_setaffinity, |
Mark Salter | 730c1fa | 2010-10-27 17:28:57 +0100 | [diff] [blame] | 191 | #endif |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 192 | }; |
| 193 | |
| 194 | /* |
| 195 | * 'what should we do if we get a hw irq event on an illegal vector'. |
| 196 | * each architecture has to answer this themselves. |
| 197 | */ |
| 198 | void ack_bad_irq(int irq) |
| 199 | { |
| 200 | printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq); |
| 201 | } |
| 202 | |
| 203 | /* |
| 204 | * change the level at which an IRQ executes |
| 205 | * - must not be called whilst interrupts are being processed! |
| 206 | */ |
| 207 | void set_intr_level(int irq, u16 level) |
| 208 | { |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 209 | BUG_ON(in_interrupt()); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 210 | |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 211 | __mask_and_set_icr(irq, GxICR_ENABLE, level); |
| 212 | } |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 213 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 214 | /* |
| 215 | * mark an interrupt to be ACK'd after interrupt handlers have been run rather |
| 216 | * than before |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 217 | */ |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 218 | void mn10300_set_lateack_irq_type(int irq) |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 219 | { |
Thomas Gleixner | f4c547e | 2011-03-24 17:35:56 +0100 | [diff] [blame] | 220 | irq_set_chip_and_handler(irq, &mn10300_cpu_pic_level, |
David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 221 | handle_level_irq); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 222 | } |
| 223 | |
| 224 | /* |
| 225 | * initialise the interrupt system |
| 226 | */ |
| 227 | void __init init_IRQ(void) |
| 228 | { |
| 229 | int irq; |
| 230 | |
| 231 | for (irq = 0; irq < NR_IRQS; irq++) |
Thomas Gleixner | f4c547e | 2011-03-24 17:35:56 +0100 | [diff] [blame] | 232 | if (irq_get_chip(irq) == &no_irq_chip) |
David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 233 | /* due to the PIC latching interrupt requests, even |
| 234 | * when the IRQ is disabled, IRQ_PENDING is superfluous |
| 235 | * and we can use handle_level_irq() for edge-triggered |
| 236 | * interrupts */ |
Thomas Gleixner | f4c547e | 2011-03-24 17:35:56 +0100 | [diff] [blame] | 237 | irq_set_chip_and_handler(irq, &mn10300_cpu_pic_edge, |
David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 238 | handle_level_irq); |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 239 | |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 240 | unit_init_IRQ(); |
| 241 | } |
| 242 | |
| 243 | /* |
| 244 | * handle normal device IRQs |
| 245 | */ |
| 246 | asmlinkage void do_IRQ(void) |
| 247 | { |
| 248 | unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw; |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 249 | unsigned int cpu_id = smp_processor_id(); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 250 | int irq; |
| 251 | |
| 252 | sp = current_stack_pointer(); |
Stoyan Gaydarov | 292aa14 | 2010-10-27 17:28:33 +0100 | [diff] [blame] | 253 | BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 254 | |
| 255 | /* make sure local_irq_enable() doesn't muck up the interrupt priority |
| 256 | * setting in EPSW */ |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 257 | old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id]; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 258 | local_save_flags(epsw); |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 259 | __mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 260 | irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL; |
| 261 | |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 262 | #ifdef CONFIG_MN10300_WD_TIMER |
| 263 | __IRQ_STAT(cpu_id, __irq_count)++; |
| 264 | #endif |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 265 | |
| 266 | irq_enter(); |
| 267 | |
| 268 | for (;;) { |
| 269 | /* ask the interrupt controller for the next IRQ to process |
| 270 | * - the result we get depends on EPSW.IM |
| 271 | */ |
| 272 | irq = IAGR & IAGR_GN; |
| 273 | if (!irq) |
| 274 | break; |
| 275 | |
| 276 | local_irq_restore(irq_disabled_epsw); |
| 277 | |
| 278 | generic_handle_irq(irq >> 2); |
| 279 | |
| 280 | /* restore IRQ controls for IAGR access */ |
| 281 | local_irq_restore(epsw); |
| 282 | } |
| 283 | |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 284 | __mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw; |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 285 | |
| 286 | irq_exit(); |
| 287 | } |
| 288 | |
| 289 | /* |
| 290 | * Display interrupt management information through /proc/interrupts |
| 291 | */ |
Thomas Gleixner | 2a8f55b | 2011-03-24 18:54:24 +0100 | [diff] [blame] | 292 | int arch_show_interrupts(struct seq_file *p, int prec) |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 293 | { |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 294 | #ifdef CONFIG_MN10300_WD_TIMER |
Thomas Gleixner | 2a8f55b | 2011-03-24 18:54:24 +0100 | [diff] [blame] | 295 | int j; |
| 296 | |
| 297 | seq_printf(p, "%*s: ", prec, "NMI"); |
| 298 | for (j = 0; j < NR_CPUS; j++) |
| 299 | if (cpu_online(j)) |
| 300 | seq_printf(p, "%10u ", nmi_count(j)); |
| 301 | seq_putc(p, '\n'); |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 302 | #endif |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 303 | |
Thomas Gleixner | 2a8f55b | 2011-03-24 18:54:24 +0100 | [diff] [blame] | 304 | seq_printf(p, "%*s: ", prec, "ERR"); |
| 305 | seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); |
David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 306 | return 0; |
| 307 | } |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 308 | |
| 309 | #ifdef CONFIG_HOTPLUG_CPU |
| 310 | void migrate_irqs(void) |
| 311 | { |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 312 | int irq; |
| 313 | unsigned int self, new; |
| 314 | unsigned long flags; |
| 315 | |
| 316 | self = smp_processor_id(); |
| 317 | for (irq = 0; irq < NR_IRQS; irq++) { |
Thomas Gleixner | 232f1d8 | 2011-03-24 17:36:37 +0100 | [diff] [blame] | 318 | struct irq_data *data = irq_get_irq_data(irq); |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 319 | |
Thomas Gleixner | 232f1d8 | 2011-03-24 17:36:37 +0100 | [diff] [blame] | 320 | if (irqd_is_per_cpu(data)) |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 321 | continue; |
| 322 | |
KOSAKI Motohiro | 8ea9716 | 2011-05-24 17:12:58 -0700 | [diff] [blame] | 323 | if (cpumask_test_cpu(self, &data->affinity) && |
| 324 | !cpumask_intersects(&irq_affinity[irq], cpu_online_mask)) { |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 325 | int cpu_id; |
KOSAKI Motohiro | 8ea9716 | 2011-05-24 17:12:58 -0700 | [diff] [blame] | 326 | cpu_id = cpumask_first(cpu_online_mask); |
| 327 | cpumask_set_cpu(cpu_id, &data->affinity); |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 328 | } |
| 329 | /* We need to operate irq_affinity_online atomically. */ |
| 330 | arch_local_cli_save(flags); |
| 331 | if (irq_affinity_online[irq] == self) { |
| 332 | u16 x, tmp; |
| 333 | |
Mark Salter | 730c1fa | 2010-10-27 17:28:57 +0100 | [diff] [blame] | 334 | x = GxICR(irq); |
| 335 | GxICR(irq) = x & GxICR_LEVEL; |
| 336 | tmp = GxICR(irq); |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 337 | |
KOSAKI Motohiro | 8ea9716 | 2011-05-24 17:12:58 -0700 | [diff] [blame] | 338 | new = cpumask_any_and(&data->affinity, |
| 339 | cpu_online_mask); |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 340 | irq_affinity_online[irq] = new; |
| 341 | |
| 342 | CROSS_GxICR(irq, new) = |
| 343 | (x & GxICR_LEVEL) | GxICR_DETECT; |
| 344 | tmp = CROSS_GxICR(irq, new); |
| 345 | |
| 346 | x &= GxICR_LEVEL | GxICR_ENABLE; |
Andrew Morton | d9a1abe | 2011-01-03 14:59:11 -0800 | [diff] [blame] | 347 | if (GxICR(irq) & GxICR_REQUEST) |
Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 348 | x |= GxICR_REQUEST | GxICR_DETECT; |
| 349 | CROSS_GxICR(irq, new) = x; |
| 350 | tmp = CROSS_GxICR(irq, new); |
| 351 | } |
| 352 | arch_local_irq_restore(flags); |
| 353 | } |
| 354 | } |
| 355 | #endif /* CONFIG_HOTPLUG_CPU */ |