blob: 6ab3b73efcf8d4304d0b832d4d398f94a0f332d5 [file] [log] [blame]
David Howellsb920de12008-02-08 04:19:31 -08001/* MN10300 Arch-specific interrupt handling
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/module.h>
12#include <linux/interrupt.h>
13#include <linux/kernel_stat.h>
14#include <linux/seq_file.h>
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010015#include <linux/cpumask.h>
David Howellsb920de12008-02-08 04:19:31 -080016#include <asm/setup.h>
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010017#include <asm/serial-regs.h>
David Howellsb920de12008-02-08 04:19:31 -080018
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010019unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = {
20 [0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7
21};
David Howellsb920de12008-02-08 04:19:31 -080022EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
23
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010024#ifdef CONFIG_SMP
25static char irq_affinity_online[NR_IRQS] = {
26 [0 ... NR_IRQS - 1] = 0
27};
28
29#define NR_IRQ_WORDS ((NR_IRQS + 31) / 32)
30static unsigned long irq_affinity_request[NR_IRQ_WORDS] = {
31 [0 ... NR_IRQ_WORDS - 1] = 0
32};
33#endif /* CONFIG_SMP */
34
David Howellsb920de12008-02-08 04:19:31 -080035atomic_t irq_err_count;
36
37/*
David Howellsd6478fa2008-10-01 13:47:06 +010038 * MN10300 interrupt controller operations
David Howellsb920de12008-02-08 04:19:31 -080039 */
Thomas Gleixner125bb1d2011-03-18 16:52:51 +000040static void mn10300_cpupic_ack(struct irq_data *d)
David Howellsb920de12008-02-08 04:19:31 -080041{
Thomas Gleixner125bb1d2011-03-18 16:52:51 +000042 unsigned int irq = d->irq;
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010043 unsigned long flags;
David Howellsb920de12008-02-08 04:19:31 -080044 u16 tmp;
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010045
46 flags = arch_local_cli_save();
47 GxICR_u8(irq) = GxICR_DETECT;
David Howellsb920de12008-02-08 04:19:31 -080048 tmp = GxICR(irq);
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010049 arch_local_irq_restore(flags);
50}
51
52static void __mask_and_set_icr(unsigned int irq,
53 unsigned int mask, unsigned int set)
54{
55 unsigned long flags;
56 u16 tmp;
57
58 flags = arch_local_cli_save();
59 tmp = GxICR(irq);
60 GxICR(irq) = (tmp & mask) | set;
61 tmp = GxICR(irq);
62 arch_local_irq_restore(flags);
David Howellsb920de12008-02-08 04:19:31 -080063}
64
Thomas Gleixner125bb1d2011-03-18 16:52:51 +000065static void mn10300_cpupic_mask(struct irq_data *d)
David Howellsb920de12008-02-08 04:19:31 -080066{
Thomas Gleixner125bb1d2011-03-18 16:52:51 +000067 __mask_and_set_icr(d->irq, GxICR_LEVEL, 0);
David Howellsb920de12008-02-08 04:19:31 -080068}
69
Thomas Gleixner125bb1d2011-03-18 16:52:51 +000070static void mn10300_cpupic_mask_ack(struct irq_data *d)
David Howellsb920de12008-02-08 04:19:31 -080071{
Thomas Gleixner125bb1d2011-03-18 16:52:51 +000072 unsigned int irq = d->irq;
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010073#ifdef CONFIG_SMP
74 unsigned long flags;
75 u16 tmp;
76
77 flags = arch_local_cli_save();
78
79 if (!test_and_clear_bit(irq, irq_affinity_request)) {
80 tmp = GxICR(irq);
81 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
82 tmp = GxICR(irq);
83 } else {
84 u16 tmp2;
85 tmp = GxICR(irq);
86 GxICR(irq) = (tmp & GxICR_LEVEL);
87 tmp2 = GxICR(irq);
88
Mark Salter730c1fa2010-10-27 17:28:57 +010089 irq_affinity_online[irq] =
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -070090 cpumask_any_and(d->affinity, cpu_online_mask);
Mark Salter730c1fa2010-10-27 17:28:57 +010091 CROSS_GxICR(irq, irq_affinity_online[irq]) =
92 (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
93 tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010094 }
95
96 arch_local_irq_restore(flags);
97#else /* CONFIG_SMP */
98 __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT);
99#endif /* CONFIG_SMP */
David Howellsb920de12008-02-08 04:19:31 -0800100}
101
Thomas Gleixner125bb1d2011-03-18 16:52:51 +0000102static void mn10300_cpupic_unmask(struct irq_data *d)
David Howellsb920de12008-02-08 04:19:31 -0800103{
Thomas Gleixner125bb1d2011-03-18 16:52:51 +0000104 __mask_and_set_icr(d->irq, GxICR_LEVEL, GxICR_ENABLE);
David Howellsb920de12008-02-08 04:19:31 -0800105}
106
Thomas Gleixner125bb1d2011-03-18 16:52:51 +0000107static void mn10300_cpupic_unmask_clear(struct irq_data *d)
David Howellsd6478fa2008-10-01 13:47:06 +0100108{
Thomas Gleixner125bb1d2011-03-18 16:52:51 +0000109 unsigned int irq = d->irq;
David Howellsd6478fa2008-10-01 13:47:06 +0100110 /* the MN10300 PIC latches its interrupt request bit, even after the
111 * device has ceased to assert its interrupt line and the interrupt
112 * channel has been disabled in the PIC, so for level-triggered
113 * interrupts we need to clear the request bit when we re-enable */
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100114#ifdef CONFIG_SMP
115 unsigned long flags;
116 u16 tmp;
117
118 flags = arch_local_cli_save();
119
120 if (!test_and_clear_bit(irq, irq_affinity_request)) {
121 tmp = GxICR(irq);
122 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
123 tmp = GxICR(irq);
124 } else {
125 tmp = GxICR(irq);
126
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -0700127 irq_affinity_online[irq] = cpumask_any_and(d->affinity,
128 cpu_online_mask);
Mark Salter730c1fa2010-10-27 17:28:57 +0100129 CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
130 tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100131 }
132
133 arch_local_irq_restore(flags);
134#else /* CONFIG_SMP */
135 __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT);
136#endif /* CONFIG_SMP */
David Howellsd6478fa2008-10-01 13:47:06 +0100137}
138
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100139#ifdef CONFIG_SMP
140static int
Thomas Gleixner125bb1d2011-03-18 16:52:51 +0000141mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask,
142 bool force)
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100143{
144 unsigned long flags;
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100145
146 flags = arch_local_cli_save();
Mark Salter7d361cb2012-12-12 15:36:37 +0000147 set_bit(d->irq, irq_affinity_request);
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100148 arch_local_irq_restore(flags);
Mark Salter7d361cb2012-12-12 15:36:37 +0000149 return 0;
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100150}
151#endif /* CONFIG_SMP */
152
David Howellsd6478fa2008-10-01 13:47:06 +0100153/*
154 * MN10300 PIC level-triggered IRQ handling.
155 *
156 * The PIC has no 'ACK' function per se. It is possible to clear individual
157 * channel latches, but each latch relatches whether or not the channel is
158 * masked, so we need to clear the latch when we unmask the channel.
159 *
160 * Also for this reason, we don't supply an ack() op (it's unused anyway if
161 * mask_ack() is provided), and mask_ack() just masks.
162 */
163static struct irq_chip mn10300_cpu_pic_level = {
Thomas Gleixner125bb1d2011-03-18 16:52:51 +0000164 .name = "cpu_l",
165 .irq_disable = mn10300_cpupic_mask,
166 .irq_enable = mn10300_cpupic_unmask_clear,
167 .irq_ack = NULL,
168 .irq_mask = mn10300_cpupic_mask,
169 .irq_mask_ack = mn10300_cpupic_mask,
170 .irq_unmask = mn10300_cpupic_unmask_clear,
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100171#ifdef CONFIG_SMP
Thomas Gleixner125bb1d2011-03-18 16:52:51 +0000172 .irq_set_affinity = mn10300_cpupic_setaffinity,
Mark Salter730c1fa2010-10-27 17:28:57 +0100173#endif
David Howellsd6478fa2008-10-01 13:47:06 +0100174};
175
176/*
177 * MN10300 PIC edge-triggered IRQ handling.
178 *
179 * We use the latch clearing function of the PIC as the 'ACK' function.
180 */
181static struct irq_chip mn10300_cpu_pic_edge = {
Thomas Gleixner125bb1d2011-03-18 16:52:51 +0000182 .name = "cpu_e",
183 .irq_disable = mn10300_cpupic_mask,
184 .irq_enable = mn10300_cpupic_unmask,
185 .irq_ack = mn10300_cpupic_ack,
186 .irq_mask = mn10300_cpupic_mask,
187 .irq_mask_ack = mn10300_cpupic_mask_ack,
188 .irq_unmask = mn10300_cpupic_unmask,
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100189#ifdef CONFIG_SMP
Thomas Gleixner125bb1d2011-03-18 16:52:51 +0000190 .irq_set_affinity = mn10300_cpupic_setaffinity,
Mark Salter730c1fa2010-10-27 17:28:57 +0100191#endif
David Howellsb920de12008-02-08 04:19:31 -0800192};
193
194/*
195 * 'what should we do if we get a hw irq event on an illegal vector'.
196 * each architecture has to answer this themselves.
197 */
198void ack_bad_irq(int irq)
199{
200 printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq);
201}
202
203/*
204 * change the level at which an IRQ executes
205 * - must not be called whilst interrupts are being processed!
206 */
207void set_intr_level(int irq, u16 level)
208{
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100209 BUG_ON(in_interrupt());
David Howellsb920de12008-02-08 04:19:31 -0800210
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100211 __mask_and_set_icr(irq, GxICR_ENABLE, level);
212}
David Howellsb920de12008-02-08 04:19:31 -0800213
David Howellsb920de12008-02-08 04:19:31 -0800214/*
215 * mark an interrupt to be ACK'd after interrupt handlers have been run rather
216 * than before
David Howellsb920de12008-02-08 04:19:31 -0800217 */
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100218void mn10300_set_lateack_irq_type(int irq)
David Howellsb920de12008-02-08 04:19:31 -0800219{
Thomas Gleixnerf4c547e2011-03-24 17:35:56 +0100220 irq_set_chip_and_handler(irq, &mn10300_cpu_pic_level,
David Howellsd6478fa2008-10-01 13:47:06 +0100221 handle_level_irq);
David Howellsb920de12008-02-08 04:19:31 -0800222}
223
224/*
225 * initialise the interrupt system
226 */
227void __init init_IRQ(void)
228{
229 int irq;
230
231 for (irq = 0; irq < NR_IRQS; irq++)
Thomas Gleixnerf4c547e2011-03-24 17:35:56 +0100232 if (irq_get_chip(irq) == &no_irq_chip)
David Howellsd6478fa2008-10-01 13:47:06 +0100233 /* due to the PIC latching interrupt requests, even
234 * when the IRQ is disabled, IRQ_PENDING is superfluous
235 * and we can use handle_level_irq() for edge-triggered
236 * interrupts */
Thomas Gleixnerf4c547e2011-03-24 17:35:56 +0100237 irq_set_chip_and_handler(irq, &mn10300_cpu_pic_edge,
David Howellsd6478fa2008-10-01 13:47:06 +0100238 handle_level_irq);
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100239
David Howellsb920de12008-02-08 04:19:31 -0800240 unit_init_IRQ();
241}
242
243/*
244 * handle normal device IRQs
245 */
246asmlinkage void do_IRQ(void)
247{
248 unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw;
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100249 unsigned int cpu_id = smp_processor_id();
David Howellsb920de12008-02-08 04:19:31 -0800250 int irq;
251
252 sp = current_stack_pointer();
Stoyan Gaydarov292aa142010-10-27 17:28:33 +0100253 BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN);
David Howellsb920de12008-02-08 04:19:31 -0800254
255 /* make sure local_irq_enable() doesn't muck up the interrupt priority
256 * setting in EPSW */
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100257 old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id];
David Howellsb920de12008-02-08 04:19:31 -0800258 local_save_flags(epsw);
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100259 __mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw);
David Howellsb920de12008-02-08 04:19:31 -0800260 irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL;
261
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100262#ifdef CONFIG_MN10300_WD_TIMER
263 __IRQ_STAT(cpu_id, __irq_count)++;
264#endif
David Howellsb920de12008-02-08 04:19:31 -0800265
266 irq_enter();
267
268 for (;;) {
269 /* ask the interrupt controller for the next IRQ to process
270 * - the result we get depends on EPSW.IM
271 */
272 irq = IAGR & IAGR_GN;
273 if (!irq)
274 break;
275
276 local_irq_restore(irq_disabled_epsw);
277
278 generic_handle_irq(irq >> 2);
279
280 /* restore IRQ controls for IAGR access */
281 local_irq_restore(epsw);
282 }
283
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100284 __mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw;
David Howellsb920de12008-02-08 04:19:31 -0800285
286 irq_exit();
287}
288
289/*
290 * Display interrupt management information through /proc/interrupts
291 */
Thomas Gleixner2a8f55b2011-03-24 18:54:24 +0100292int arch_show_interrupts(struct seq_file *p, int prec)
David Howellsb920de12008-02-08 04:19:31 -0800293{
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100294#ifdef CONFIG_MN10300_WD_TIMER
Thomas Gleixner2a8f55b2011-03-24 18:54:24 +0100295 int j;
296
297 seq_printf(p, "%*s: ", prec, "NMI");
298 for (j = 0; j < NR_CPUS; j++)
299 if (cpu_online(j))
300 seq_printf(p, "%10u ", nmi_count(j));
301 seq_putc(p, '\n');
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100302#endif
David Howellsb920de12008-02-08 04:19:31 -0800303
Thomas Gleixner2a8f55b2011-03-24 18:54:24 +0100304 seq_printf(p, "%*s: ", prec, "ERR");
305 seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
David Howellsb920de12008-02-08 04:19:31 -0800306 return 0;
307}
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100308
309#ifdef CONFIG_HOTPLUG_CPU
310void migrate_irqs(void)
311{
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100312 int irq;
313 unsigned int self, new;
314 unsigned long flags;
315
316 self = smp_processor_id();
317 for (irq = 0; irq < NR_IRQS; irq++) {
Thomas Gleixner232f1d82011-03-24 17:36:37 +0100318 struct irq_data *data = irq_get_irq_data(irq);
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100319
Thomas Gleixner232f1d82011-03-24 17:36:37 +0100320 if (irqd_is_per_cpu(data))
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100321 continue;
322
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -0700323 if (cpumask_test_cpu(self, &data->affinity) &&
324 !cpumask_intersects(&irq_affinity[irq], cpu_online_mask)) {
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100325 int cpu_id;
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -0700326 cpu_id = cpumask_first(cpu_online_mask);
327 cpumask_set_cpu(cpu_id, &data->affinity);
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100328 }
329 /* We need to operate irq_affinity_online atomically. */
330 arch_local_cli_save(flags);
331 if (irq_affinity_online[irq] == self) {
332 u16 x, tmp;
333
Mark Salter730c1fa2010-10-27 17:28:57 +0100334 x = GxICR(irq);
335 GxICR(irq) = x & GxICR_LEVEL;
336 tmp = GxICR(irq);
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100337
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -0700338 new = cpumask_any_and(&data->affinity,
339 cpu_online_mask);
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100340 irq_affinity_online[irq] = new;
341
342 CROSS_GxICR(irq, new) =
343 (x & GxICR_LEVEL) | GxICR_DETECT;
344 tmp = CROSS_GxICR(irq, new);
345
346 x &= GxICR_LEVEL | GxICR_ENABLE;
Andrew Mortond9a1abe2011-01-03 14:59:11 -0800347 if (GxICR(irq) & GxICR_REQUEST)
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100348 x |= GxICR_REQUEST | GxICR_DETECT;
349 CROSS_GxICR(irq, new) = x;
350 tmp = CROSS_GxICR(irq, new);
351 }
352 arch_local_irq_restore(flags);
353 }
354}
355#endif /* CONFIG_HOTPLUG_CPU */