blob: 9b734d74ae8e0ee43922d34629ed1838d5c0e7c8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Code to handle x86 style IRQs plus some generic interrupt stuff.
7 *
8 * Copyright (C) 1992 Linus Torvalds
9 * Copyright (C) 1994 - 2000 Ralf Baechle
10 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/kernel.h>
12#include <linux/delay.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/kernel_stat.h>
16#include <linux/module.h>
17#include <linux/proc_fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/mm.h>
19#include <linux/random.h>
20#include <linux/sched.h>
21#include <linux/seq_file.h>
22#include <linux/kallsyms.h>
Jason Wessel88547002008-07-29 15:58:53 -050023#include <linux/kgdb.h>
Wu Zhangjin8f99a162009-11-20 20:34:33 +080024#include <linux/ftrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
26#include <asm/atomic.h>
27#include <asm/system.h>
28#include <asm/uaccess.h>
29
Jason Wessel88547002008-07-29 15:58:53 -050030#ifdef CONFIG_KGDB
31int kgdb_early_setup;
32#endif
33
Ralf Baechle4a4cf772006-11-06 17:41:06 +000034static unsigned long irq_map[NR_IRQS / BITS_PER_LONG];
35
Atsushi Nemotof5431102007-05-10 01:20:30 +090036int allocate_irqno(void)
Ralf Baechle4a4cf772006-11-06 17:41:06 +000037{
38 int irq;
39
40again:
41 irq = find_first_zero_bit(irq_map, NR_IRQS);
42
43 if (irq >= NR_IRQS)
44 return -ENOSPC;
45
46 if (test_and_set_bit(irq, irq_map))
47 goto again;
48
49 return irq;
50}
51
Ralf Baechle4a4cf772006-11-06 17:41:06 +000052/*
53 * Allocate the 16 legacy interrupts for i8259 devices. This happens early
54 * in the kernel initialization so treating allocation failure as BUG() is
55 * ok.
56 */
57void __init alloc_legacy_irqno(void)
58{
59 int i;
60
61 for (i = 0; i <= 16; i++)
62 BUG_ON(test_and_set_bit(i, irq_map));
63}
64
Atsushi Nemotof5431102007-05-10 01:20:30 +090065void free_irqno(unsigned int irq)
Ralf Baechle4a4cf772006-11-06 17:41:06 +000066{
67 smp_mb__before_clear_bit();
68 clear_bit(irq, irq_map);
69 smp_mb__after_clear_bit();
70}
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/*
73 * 'what should we do if we get a hw irq event on an illegal vector'.
74 * each architecture has to answer this themselves.
75 */
76void ack_bad_irq(unsigned int irq)
77{
Ralf Baechle1146fe32007-09-21 17:13:55 +010078 smtc_im_ack_irq(irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 printk("unexpected IRQ # %d\n", irq);
80}
81
82atomic_t irq_err_count;
83
Thomas Gleixnerf8396c12011-03-23 21:09:05 +000084int arch_show_interrupts(struct seq_file *p, int prec)
Linus Torvalds1da177e2005-04-16 15:20:36 -070085{
Thomas Gleixnerf8396c12011-03-23 21:09:05 +000086 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 return 0;
88}
89
Ralf Baechle937a8012006-10-07 19:44:33 +010090asmlinkage void spurious_interrupt(void)
Ralf Baechle93373ed2006-04-01 21:17:45 +010091{
92 atomic_inc(&irq_err_count);
93}
94
Linus Torvalds1da177e2005-04-16 15:20:36 -070095void __init init_IRQ(void)
96{
Ralf Baechle24649c02008-02-08 04:22:02 -080097 int i;
98
Jason Wessel88547002008-07-29 15:58:53 -050099#ifdef CONFIG_KGDB
100 if (kgdb_early_setup)
101 return;
102#endif
103
Ralf Baechle24649c02008-02-08 04:22:02 -0800104 for (i = 0; i < NR_IRQS; i++)
Thomas Gleixnere4ec7982011-03-27 15:19:28 +0200105 irq_set_noprobe(i);
Ralf Baechle24649c02008-02-08 04:22:02 -0800106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 arch_init_irq();
Jason Wessel88547002008-07-29 15:58:53 -0500108
109#ifdef CONFIG_KGDB
110 if (!kgdb_early_setup)
111 kgdb_early_setup = 1;
112#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113}
Wu Zhangjin8f99a162009-11-20 20:34:33 +0800114
From: jiang.adam@gmail.com334c86c2010-08-27 18:32:06 +0900115#ifdef DEBUG_STACKOVERFLOW
116static inline void check_stack_overflow(void)
117{
118 unsigned long sp;
119
120 __asm__ __volatile__("move %0, $sp" : "=r" (sp));
121 sp &= THREAD_MASK;
122
123 /*
124 * Check for stack overflow: is there less than STACK_WARN free?
125 * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
126 */
127 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
128 printk("do_IRQ: stack overflow: %ld\n",
129 sp - sizeof(struct thread_info));
130 dump_stack();
131 }
132}
133#else
134static inline void check_stack_overflow(void) {}
135#endif
136
137
Wu Zhangjin8f99a162009-11-20 20:34:33 +0800138/*
139 * do_IRQ handles all normal device IRQ's (the special
140 * SMP cross-CPU interrupts have their own specific
141 * handlers).
142 */
143void __irq_entry do_IRQ(unsigned int irq)
144{
145 irq_enter();
From: jiang.adam@gmail.com334c86c2010-08-27 18:32:06 +0900146 check_stack_overflow();
Thomas Gleixner930cd542011-03-23 21:09:04 +0000147 if (!smtc_handle_on_other_cpu(irq))
148 generic_handle_irq(irq);
Wu Zhangjin8f99a162009-11-20 20:34:33 +0800149 irq_exit();
150}
151
152#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
153/*
154 * To avoid inefficient and in some cases pathological re-checking of
155 * IRQ affinity, we have this variant that skips the affinity check.
156 */
157
158void __irq_entry do_IRQ_no_affinity(unsigned int irq)
159{
160 irq_enter();
Thomas Gleixner930cd542011-03-23 21:09:04 +0000161 smtc_im_backstop(irq);
Wu Zhangjin8f99a162009-11-20 20:34:33 +0800162 generic_handle_irq(irq);
163 irq_exit();
164}
165
166#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */