Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 1994 by Waldorf GMBH, written by Ralf Baechle |
| 7 | * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle |
| 8 | */ |
| 9 | #ifndef _ASM_IRQ_H |
| 10 | #define _ASM_IRQ_H |
| 11 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/linkage.h> |
Ralf Baechle | 631330f | 2009-06-19 14:05:26 +0100 | [diff] [blame] | 13 | #include <linux/smp.h> |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 14 | |
| 15 | #include <asm/mipsmtregs.h> |
| 16 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <irq.h> |
| 18 | |
Dezhong Diao | f2ffa5a | 2010-10-13 00:52:46 -0600 | [diff] [blame] | 19 | static inline void irq_dispose_mapping(unsigned int virq) |
| 20 | { |
Dezhong Diao | f2ffa5a | 2010-10-13 00:52:46 -0600 | [diff] [blame] | 21 | } |
| 22 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #ifdef CONFIG_I8259 |
| 24 | static inline int irq_canonicalize(int irq) |
| 25 | { |
Atsushi Nemoto | 2fa7937 | 2007-01-14 23:41:42 +0900 | [diff] [blame] | 26 | return ((irq == I8259A_IRQ_BASE + 2) ? I8259A_IRQ_BASE + 9 : irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | } |
| 28 | #else |
| 29 | #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ |
| 30 | #endif |
| 31 | |
Ralf Baechle | 1146fe3 | 2007-09-21 17:13:55 +0100 | [diff] [blame] | 32 | #ifdef CONFIG_MIPS_MT_SMTC |
| 33 | |
| 34 | struct irqaction; |
| 35 | |
| 36 | extern unsigned long irq_hwmask[]; |
| 37 | extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, |
| 38 | unsigned long hwmask); |
| 39 | |
| 40 | static inline void smtc_im_ack_irq(unsigned int irq) |
| 41 | { |
| 42 | if (irq_hwmask[irq] & ST0_IM) |
| 43 | set_c0_status(irq_hwmask[irq] & ST0_IM); |
| 44 | } |
| 45 | |
| 46 | #else |
| 47 | |
| 48 | static inline void smtc_im_ack_irq(unsigned int irq) |
| 49 | { |
| 50 | } |
| 51 | |
| 52 | #endif /* CONFIG_MIPS_MT_SMTC */ |
| 53 | |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 54 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF |
| 55 | #include <linux/cpumask.h> |
| 56 | |
Thomas Gleixner | 7c8d948 | 2011-03-23 21:08:57 +0000 | [diff] [blame] | 57 | extern int plat_set_irq_affinity(struct irq_data *d, |
| 58 | const struct cpumask *affinity, bool force); |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame] | 59 | extern void smtc_forward_irq(struct irq_data *d); |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 60 | |
| 61 | /* |
| 62 | * IRQ affinity hook invoked at the beginning of interrupt dispatch |
| 63 | * if option is enabled. |
| 64 | * |
| 65 | * Up through Linux 2.6.22 (at least) cpumask operations are very |
| 66 | * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity |
| 67 | * used a "fast path" per-IRQ-descriptor cache of affinity information |
| 68 | * to reduce latency. As there is a project afoot to optimize the |
| 69 | * cpumask implementations, this version is optimistically assuming |
| 70 | * that cpumask.h macro overhead is reasonable during interrupt dispatch. |
| 71 | */ |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame] | 72 | static inline int handle_on_other_cpu(unsigned int irq) |
| 73 | { |
| 74 | struct irq_data *d = irq_get_irq_data(irq); |
| 75 | |
| 76 | if (cpumask_test_cpu(smp_processor_id(), d->affinity)) |
| 77 | return 0; |
| 78 | smtc_forward_irq(d); |
| 79 | return 1; |
| 80 | } |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 81 | |
| 82 | #else /* Not doing SMTC affinity */ |
| 83 | |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame] | 84 | static inline int handle_on_other_cpu(unsigned int irq) { return 0; } |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 85 | |
| 86 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ |
| 87 | |
Kevin D. Kissell | 0db3421 | 2007-07-12 16:21:08 +0100 | [diff] [blame] | 88 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP |
Ralf Baechle | 1146fe3 | 2007-09-21 17:13:55 +0100 | [diff] [blame] | 89 | |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame] | 90 | static inline void smtc_im_backstop(unsigned int irq) |
| 91 | { |
| 92 | if (irq_hwmask[irq] & 0x0000ff00) |
| 93 | write_c0_tccontext(read_c0_tccontext() & |
| 94 | ~(irq_hwmask[irq] & 0x0000ff00)); |
| 95 | } |
| 96 | |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 97 | /* |
| 98 | * Clear interrupt mask handling "backstop" if irq_hwmask |
| 99 | * entry so indicates. This implies that the ack() or end() |
| 100 | * functions will take over re-enabling the low-level mask. |
| 101 | * Otherwise it will be done on return from exception. |
| 102 | */ |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame] | 103 | static inline int smtc_handle_on_other_cpu(unsigned int irq) |
| 104 | { |
| 105 | int ret = handle_on_other_cpu(irq); |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 106 | |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame] | 107 | if (!ret) |
| 108 | smtc_im_backstop(irq); |
| 109 | return ret; |
| 110 | } |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 111 | |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 112 | #else |
Ralf Baechle | 1146fe3 | 2007-09-21 17:13:55 +0100 | [diff] [blame] | 113 | |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame] | 114 | static inline void smtc_im_backstop(unsigned int irq) { } |
| 115 | static inline int smtc_handle_on_other_cpu(unsigned int irq) |
| 116 | { |
| 117 | return handle_on_other_cpu(irq); |
| 118 | } |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 119 | |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 120 | #endif |
| 121 | |
Wu Zhangjin | 8f99a16 | 2009-11-20 20:34:33 +0800 | [diff] [blame] | 122 | extern void do_IRQ(unsigned int irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 124 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 125 | |
Wu Zhangjin | 8f99a16 | 2009-11-20 20:34:33 +0800 | [diff] [blame] | 126 | extern void do_IRQ_no_affinity(unsigned int irq); |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 127 | |
| 128 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ |
| 129 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | extern void arch_init_irq(void); |
Ralf Baechle | 937a801 | 2006-10-07 19:44:33 +0100 | [diff] [blame] | 131 | extern void spurious_interrupt(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | |
Ralf Baechle | 4a4cf77 | 2006-11-06 17:41:06 +0000 | [diff] [blame] | 133 | extern int allocate_irqno(void); |
| 134 | extern void alloc_legacy_irqno(void); |
| 135 | extern void free_irqno(unsigned int irq); |
| 136 | |
Ralf Baechle | 3b1d4ed | 2007-06-20 22:27:10 +0100 | [diff] [blame] | 137 | /* |
| 138 | * Before R2 the timer and performance counter interrupts were both fixed to |
| 139 | * IE7. Since R2 their number has to be read from the c0_intctl register. |
| 140 | */ |
| 141 | #define CP0_LEGACY_COMPARE_IRQ 7 |
| 142 | |
| 143 | extern int cp0_compare_irq; |
David VomLehn | 010c108 | 2009-12-21 17:49:22 -0800 | [diff] [blame] | 144 | extern int cp0_compare_irq_shift; |
Ralf Baechle | 3b1d4ed | 2007-06-20 22:27:10 +0100 | [diff] [blame] | 145 | extern int cp0_perfcount_irq; |
| 146 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | #endif /* _ASM_IRQ_H */ |