Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Common interrupt code for 32 and 64 bit |
| 3 | */ |
| 4 | #include <linux/cpu.h> |
| 5 | #include <linux/interrupt.h> |
| 6 | #include <linux/kernel_stat.h> |
Andres Salomon | 4722d19 | 2010-11-12 05:45:26 +0000 | [diff] [blame] | 7 | #include <linux/of.h> |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 8 | #include <linux/seq_file.h> |
Jaswinder Singh Rajput | 6a02e71 | 2009-01-04 16:22:17 +0530 | [diff] [blame] | 9 | #include <linux/smp.h> |
Jeremy Fitzhardinge | 7c1d7cd | 2009-02-06 14:09:41 -0800 | [diff] [blame] | 10 | #include <linux/ftrace.h> |
Jean Delvare | ca444564 | 2011-03-25 15:20:14 +0100 | [diff] [blame] | 11 | #include <linux/delay.h> |
Paul Gortmaker | 69c60c8 | 2011-05-26 12:22:53 -0400 | [diff] [blame] | 12 | #include <linux/export.h> |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 13 | |
Ingo Molnar | 7b6aa33 | 2009-02-17 13:58:15 +0100 | [diff] [blame] | 14 | #include <asm/apic.h> |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 15 | #include <asm/io_apic.h> |
Ingo Molnar | c3d8000 | 2008-12-23 15:15:17 +0100 | [diff] [blame] | 16 | #include <asm/irq.h> |
Jeremy Fitzhardinge | 7c1d7cd | 2009-02-06 14:09:41 -0800 | [diff] [blame] | 17 | #include <asm/idle.h> |
Andi Kleen | 01ca79f | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 18 | #include <asm/mce.h> |
Jaswinder Singh Rajput | 2c1b284 | 2009-04-11 00:03:10 +0530 | [diff] [blame] | 19 | #include <asm/hw_irq.h> |
Yinghai Lu | ac2a553 | 2014-05-13 11:39:34 -0400 | [diff] [blame] | 20 | #include <asm/desc.h> |
Steven Rostedt (Red Hat) | 83ab851 | 2013-06-21 10:29:05 -0400 | [diff] [blame] | 21 | |
| 22 | #define CREATE_TRACE_POINTS |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 23 | #include <asm/trace/irq_vectors.h> |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 24 | |
| 25 | atomic_t irq_err_count; |
| 26 | |
Dimitri Sivanich | acaabe7 | 2009-03-04 12:56:05 -0600 | [diff] [blame] | 27 | /* Function pointer for generic interrupt vector handling */ |
Dimitri Sivanich | 4a4de9c | 2009-10-14 09:22:57 -0500 | [diff] [blame] | 28 | void (*x86_platform_ipi_callback)(void) = NULL; |
Dimitri Sivanich | acaabe7 | 2009-03-04 12:56:05 -0600 | [diff] [blame] | 29 | |
Thomas Gleixner | 249f6d9 | 2008-10-16 12:18:50 +0200 | [diff] [blame] | 30 | /* |
| 31 | * 'what should we do if we get a hw irq event on an illegal vector'. |
| 32 | * each architecture has to answer this themselves. |
| 33 | */ |
| 34 | void ack_bad_irq(unsigned int irq) |
| 35 | { |
Cyrill Gorcunov | edea714 | 2009-04-12 20:47:39 +0400 | [diff] [blame] | 36 | if (printk_ratelimit()) |
| 37 | pr_err("unexpected IRQ trap at vector %02x\n", irq); |
Thomas Gleixner | 249f6d9 | 2008-10-16 12:18:50 +0200 | [diff] [blame] | 38 | |
Thomas Gleixner | 249f6d9 | 2008-10-16 12:18:50 +0200 | [diff] [blame] | 39 | /* |
| 40 | * Currently unexpected vectors happen only on SMP and APIC. |
| 41 | * We _must_ ack these because every local APIC has only N |
| 42 | * irq slots per priority level, and a 'hanging, unacked' IRQ |
| 43 | * holds up an irq slot - in excessive cases (when multiple |
| 44 | * unexpected vectors occur) that might lock up the APIC |
| 45 | * completely. |
| 46 | * But only ack when the APIC is enabled -AK |
| 47 | */ |
Cyrill Gorcunov | 08306ce | 2009-04-12 20:47:41 +0400 | [diff] [blame] | 48 | ack_APIC_irq(); |
Thomas Gleixner | 249f6d9 | 2008-10-16 12:18:50 +0200 | [diff] [blame] | 49 | } |
| 50 | |
Brian Gerst | 1b437c8 | 2009-01-19 00:38:57 +0900 | [diff] [blame] | 51 | #define irq_stats(x) (&per_cpu(irq_stat, x)) |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 52 | /* |
Thomas Gleixner | 517e498 | 2010-12-16 17:59:57 +0100 | [diff] [blame] | 53 | * /proc/interrupts printing for arch specific interrupts |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 54 | */ |
Thomas Gleixner | 517e498 | 2010-12-16 17:59:57 +0100 | [diff] [blame] | 55 | int arch_show_interrupts(struct seq_file *p, int prec) |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 56 | { |
| 57 | int j; |
| 58 | |
Jan Beulich | 7a81d9a | 2009-03-12 12:45:15 +0000 | [diff] [blame] | 59 | seq_printf(p, "%*s: ", prec, "NMI"); |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 60 | for_each_online_cpu(j) |
| 61 | seq_printf(p, "%10u ", irq_stats(j)->__nmi_count); |
Rasmus Villemoes | 3736708 | 2014-11-28 22:03:41 +0100 | [diff] [blame] | 62 | seq_puts(p, " Non-maskable interrupts\n"); |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 63 | #ifdef CONFIG_X86_LOCAL_APIC |
Jan Beulich | 7a81d9a | 2009-03-12 12:45:15 +0000 | [diff] [blame] | 64 | seq_printf(p, "%*s: ", prec, "LOC"); |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 65 | for_each_online_cpu(j) |
| 66 | seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs); |
Rasmus Villemoes | 3736708 | 2014-11-28 22:03:41 +0100 | [diff] [blame] | 67 | seq_puts(p, " Local timer interrupts\n"); |
Jaswinder Singh Rajput | 474e56b | 2009-03-23 02:08:34 +0530 | [diff] [blame] | 68 | |
| 69 | seq_printf(p, "%*s: ", prec, "SPU"); |
| 70 | for_each_online_cpu(j) |
| 71 | seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); |
Rasmus Villemoes | 3736708 | 2014-11-28 22:03:41 +0100 | [diff] [blame] | 72 | seq_puts(p, " Spurious interrupts\n"); |
Li Hong | 89ccf46 | 2009-10-14 18:50:39 +0800 | [diff] [blame] | 73 | seq_printf(p, "%*s: ", prec, "PMI"); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 74 | for_each_online_cpu(j) |
| 75 | seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); |
Rasmus Villemoes | 3736708 | 2014-11-28 22:03:41 +0100 | [diff] [blame] | 76 | seq_puts(p, " Performance monitoring interrupts\n"); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 77 | seq_printf(p, "%*s: ", prec, "IWI"); |
Peter Zijlstra | b6276f3 | 2009-04-06 11:45:03 +0200 | [diff] [blame] | 78 | for_each_online_cpu(j) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 79 | seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs); |
Rasmus Villemoes | 3736708 | 2014-11-28 22:03:41 +0100 | [diff] [blame] | 80 | seq_puts(p, " IRQ work interrupts\n"); |
Fernando Luis Vázquez Cao | 346b46b | 2011-12-13 11:51:53 +0900 | [diff] [blame] | 81 | seq_printf(p, "%*s: ", prec, "RTR"); |
| 82 | for_each_online_cpu(j) |
Fernando Luis Vazquez Cao | b49d7d8 | 2011-12-15 11:32:24 +0900 | [diff] [blame] | 83 | seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count); |
Rasmus Villemoes | 3736708 | 2014-11-28 22:03:41 +0100 | [diff] [blame] | 84 | seq_puts(p, " APIC ICR read retries\n"); |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 85 | #endif |
Dimitri Sivanich | 4a4de9c | 2009-10-14 09:22:57 -0500 | [diff] [blame] | 86 | if (x86_platform_ipi_callback) { |
Hidetoshi Seto | 59d1381 | 2009-03-25 10:50:34 +0900 | [diff] [blame] | 87 | seq_printf(p, "%*s: ", prec, "PLT"); |
Dimitri Sivanich | acaabe7 | 2009-03-04 12:56:05 -0600 | [diff] [blame] | 88 | for_each_online_cpu(j) |
Dimitri Sivanich | 4a4de9c | 2009-10-14 09:22:57 -0500 | [diff] [blame] | 89 | seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis); |
Rasmus Villemoes | 3736708 | 2014-11-28 22:03:41 +0100 | [diff] [blame] | 90 | seq_puts(p, " Platform interrupts\n"); |
Dimitri Sivanich | acaabe7 | 2009-03-04 12:56:05 -0600 | [diff] [blame] | 91 | } |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 92 | #ifdef CONFIG_SMP |
Jan Beulich | 7a81d9a | 2009-03-12 12:45:15 +0000 | [diff] [blame] | 93 | seq_printf(p, "%*s: ", prec, "RES"); |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 94 | for_each_online_cpu(j) |
| 95 | seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); |
Rasmus Villemoes | 3736708 | 2014-11-28 22:03:41 +0100 | [diff] [blame] | 96 | seq_puts(p, " Rescheduling interrupts\n"); |
Jan Beulich | 7a81d9a | 2009-03-12 12:45:15 +0000 | [diff] [blame] | 97 | seq_printf(p, "%*s: ", prec, "CAL"); |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 98 | for_each_online_cpu(j) |
Tomoki Sekiyama | fd0f586 | 2012-09-26 11:11:28 +0900 | [diff] [blame] | 99 | seq_printf(p, "%10u ", irq_stats(j)->irq_call_count - |
| 100 | irq_stats(j)->irq_tlb_count); |
Rasmus Villemoes | 3736708 | 2014-11-28 22:03:41 +0100 | [diff] [blame] | 101 | seq_puts(p, " Function call interrupts\n"); |
Jan Beulich | 7a81d9a | 2009-03-12 12:45:15 +0000 | [diff] [blame] | 102 | seq_printf(p, "%*s: ", prec, "TLB"); |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 103 | for_each_online_cpu(j) |
| 104 | seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); |
Rasmus Villemoes | 3736708 | 2014-11-28 22:03:41 +0100 | [diff] [blame] | 105 | seq_puts(p, " TLB shootdowns\n"); |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 106 | #endif |
Jan Beulich | 0444c9b | 2009-11-20 14:03:05 +0000 | [diff] [blame] | 107 | #ifdef CONFIG_X86_THERMAL_VECTOR |
Jan Beulich | 7a81d9a | 2009-03-12 12:45:15 +0000 | [diff] [blame] | 108 | seq_printf(p, "%*s: ", prec, "TRM"); |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 109 | for_each_online_cpu(j) |
| 110 | seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); |
Rasmus Villemoes | 3736708 | 2014-11-28 22:03:41 +0100 | [diff] [blame] | 111 | seq_puts(p, " Thermal event interrupts\n"); |
Jan Beulich | 0444c9b | 2009-11-20 14:03:05 +0000 | [diff] [blame] | 112 | #endif |
| 113 | #ifdef CONFIG_X86_MCE_THRESHOLD |
Jan Beulich | 7a81d9a | 2009-03-12 12:45:15 +0000 | [diff] [blame] | 114 | seq_printf(p, "%*s: ", prec, "THR"); |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 115 | for_each_online_cpu(j) |
| 116 | seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); |
Rasmus Villemoes | 3736708 | 2014-11-28 22:03:41 +0100 | [diff] [blame] | 117 | seq_puts(p, " Threshold APIC interrupts\n"); |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 118 | #endif |
Andi Kleen | c1ebf83 | 2009-07-09 00:31:41 +0200 | [diff] [blame] | 119 | #ifdef CONFIG_X86_MCE |
Andi Kleen | 01ca79f | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 120 | seq_printf(p, "%*s: ", prec, "MCE"); |
| 121 | for_each_online_cpu(j) |
| 122 | seq_printf(p, "%10u ", per_cpu(mce_exception_count, j)); |
Rasmus Villemoes | 3736708 | 2014-11-28 22:03:41 +0100 | [diff] [blame] | 123 | seq_puts(p, " Machine check exceptions\n"); |
Andi Kleen | ca84f69 | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 124 | seq_printf(p, "%*s: ", prec, "MCP"); |
| 125 | for_each_online_cpu(j) |
| 126 | seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); |
Rasmus Villemoes | 3736708 | 2014-11-28 22:03:41 +0100 | [diff] [blame] | 127 | seq_puts(p, " Machine check polls\n"); |
Andi Kleen | 01ca79f | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 128 | #endif |
K. Y. Srinivasan | f704a7d | 2014-04-01 23:51:42 -0700 | [diff] [blame] | 129 | #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN) |
Jan Beulich | 4a0d310 | 2015-01-16 15:47:07 +0000 | [diff] [blame] | 130 | seq_printf(p, "%*s: ", prec, "HYP"); |
Thomas Gleixner | 929320e | 2014-02-23 21:40:20 +0000 | [diff] [blame] | 131 | for_each_online_cpu(j) |
| 132 | seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count); |
Rasmus Villemoes | 3736708 | 2014-11-28 22:03:41 +0100 | [diff] [blame] | 133 | seq_puts(p, " Hypervisor callback interrupts\n"); |
Thomas Gleixner | 929320e | 2014-02-23 21:40:20 +0000 | [diff] [blame] | 134 | #endif |
Jan Beulich | 7a81d9a | 2009-03-12 12:45:15 +0000 | [diff] [blame] | 135 | seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 136 | #if defined(CONFIG_X86_IO_APIC) |
Jan Beulich | 7a81d9a | 2009-03-12 12:45:15 +0000 | [diff] [blame] | 137 | seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count)); |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 138 | #endif |
| 139 | return 0; |
| 140 | } |
| 141 | |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 142 | /* |
| 143 | * /proc/stat helpers |
| 144 | */ |
| 145 | u64 arch_irq_stat_cpu(unsigned int cpu) |
| 146 | { |
| 147 | u64 sum = irq_stats(cpu)->__nmi_count; |
| 148 | |
| 149 | #ifdef CONFIG_X86_LOCAL_APIC |
| 150 | sum += irq_stats(cpu)->apic_timer_irqs; |
Jaswinder Singh Rajput | 474e56b | 2009-03-23 02:08:34 +0530 | [diff] [blame] | 151 | sum += irq_stats(cpu)->irq_spurious_count; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 152 | sum += irq_stats(cpu)->apic_perf_irqs; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 153 | sum += irq_stats(cpu)->apic_irq_work_irqs; |
Fernando Luis Vazquez Cao | b49d7d8 | 2011-12-15 11:32:24 +0900 | [diff] [blame] | 154 | sum += irq_stats(cpu)->icr_read_retry_count; |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 155 | #endif |
Dimitri Sivanich | 4a4de9c | 2009-10-14 09:22:57 -0500 | [diff] [blame] | 156 | if (x86_platform_ipi_callback) |
| 157 | sum += irq_stats(cpu)->x86_platform_ipis; |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 158 | #ifdef CONFIG_SMP |
| 159 | sum += irq_stats(cpu)->irq_resched_count; |
| 160 | sum += irq_stats(cpu)->irq_call_count; |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 161 | #endif |
Jan Beulich | 0444c9b | 2009-11-20 14:03:05 +0000 | [diff] [blame] | 162 | #ifdef CONFIG_X86_THERMAL_VECTOR |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 163 | sum += irq_stats(cpu)->irq_thermal_count; |
Jan Beulich | 0444c9b | 2009-11-20 14:03:05 +0000 | [diff] [blame] | 164 | #endif |
| 165 | #ifdef CONFIG_X86_MCE_THRESHOLD |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 166 | sum += irq_stats(cpu)->irq_threshold_count; |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 167 | #endif |
Andi Kleen | c1ebf83 | 2009-07-09 00:31:41 +0200 | [diff] [blame] | 168 | #ifdef CONFIG_X86_MCE |
Hidetoshi Seto | 8051dbd | 2009-06-02 16:53:23 +0900 | [diff] [blame] | 169 | sum += per_cpu(mce_exception_count, cpu); |
| 170 | sum += per_cpu(mce_poll_count, cpu); |
| 171 | #endif |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 172 | return sum; |
| 173 | } |
| 174 | |
| 175 | u64 arch_irq_stat(void) |
| 176 | { |
| 177 | u64 sum = atomic_read(&irq_err_count); |
Thomas Gleixner | 6b39ba7 | 2008-10-16 11:32:24 +0200 | [diff] [blame] | 178 | return sum; |
| 179 | } |
Ingo Molnar | c3d8000 | 2008-12-23 15:15:17 +0100 | [diff] [blame] | 180 | |
Jeremy Fitzhardinge | 7c1d7cd | 2009-02-06 14:09:41 -0800 | [diff] [blame] | 181 | |
| 182 | /* |
| 183 | * do_IRQ handles all normal device IRQ's (the special |
| 184 | * SMP cross-CPU interrupts have their own specific |
| 185 | * handlers). |
| 186 | */ |
Andi Kleen | 1d9090e | 2013-08-05 15:02:37 -0700 | [diff] [blame] | 187 | __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) |
Jeremy Fitzhardinge | 7c1d7cd | 2009-02-06 14:09:41 -0800 | [diff] [blame] | 188 | { |
| 189 | struct pt_regs *old_regs = set_irq_regs(regs); |
| 190 | |
| 191 | /* high bit used in ret_from_ code */ |
| 192 | unsigned vector = ~regs->orig_ax; |
| 193 | unsigned irq; |
| 194 | |
Jeremy Fitzhardinge | 7c1d7cd | 2009-02-06 14:09:41 -0800 | [diff] [blame] | 195 | irq_enter(); |
Frederic Weisbecker | 98ad1cc | 2011-10-07 18:22:09 +0200 | [diff] [blame] | 196 | exit_idle(); |
Jeremy Fitzhardinge | 7c1d7cd | 2009-02-06 14:09:41 -0800 | [diff] [blame] | 197 | |
Tejun Heo | 0a3aee0 | 2010-12-18 16:28:55 +0100 | [diff] [blame] | 198 | irq = __this_cpu_read(vector_irq[vector]); |
Jeremy Fitzhardinge | 7c1d7cd | 2009-02-06 14:09:41 -0800 | [diff] [blame] | 199 | |
| 200 | if (!handle_irq(irq, regs)) { |
Cyrill Gorcunov | 08306ce | 2009-04-12 20:47:41 +0400 | [diff] [blame] | 201 | ack_APIC_irq(); |
Jeremy Fitzhardinge | 7c1d7cd | 2009-02-06 14:09:41 -0800 | [diff] [blame] | 202 | |
Prarit Bhargava | 9345005 | 2014-01-05 11:10:52 -0500 | [diff] [blame] | 203 | if (irq != VECTOR_RETRIGGERED) { |
| 204 | pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n", |
| 205 | __func__, smp_processor_id(), |
| 206 | vector, irq); |
| 207 | } else { |
| 208 | __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); |
| 209 | } |
Jeremy Fitzhardinge | 7c1d7cd | 2009-02-06 14:09:41 -0800 | [diff] [blame] | 210 | } |
| 211 | |
| 212 | irq_exit(); |
| 213 | |
| 214 | set_irq_regs(old_regs); |
| 215 | return 1; |
| 216 | } |
| 217 | |
Dimitri Sivanich | acaabe7 | 2009-03-04 12:56:05 -0600 | [diff] [blame] | 218 | /* |
Dimitri Sivanich | 4a4de9c | 2009-10-14 09:22:57 -0500 | [diff] [blame] | 219 | * Handler for X86_PLATFORM_IPI_VECTOR. |
Dimitri Sivanich | acaabe7 | 2009-03-04 12:56:05 -0600 | [diff] [blame] | 220 | */ |
Seiji Aguchi | eddc0e9 | 2013-06-20 11:45:17 -0400 | [diff] [blame] | 221 | void __smp_x86_platform_ipi(void) |
Dimitri Sivanich | acaabe7 | 2009-03-04 12:56:05 -0600 | [diff] [blame] | 222 | { |
Dimitri Sivanich | 4a4de9c | 2009-10-14 09:22:57 -0500 | [diff] [blame] | 223 | inc_irq_stat(x86_platform_ipis); |
Dimitri Sivanich | acaabe7 | 2009-03-04 12:56:05 -0600 | [diff] [blame] | 224 | |
Dimitri Sivanich | 4a4de9c | 2009-10-14 09:22:57 -0500 | [diff] [blame] | 225 | if (x86_platform_ipi_callback) |
| 226 | x86_platform_ipi_callback(); |
Seiji Aguchi | eddc0e9 | 2013-06-20 11:45:17 -0400 | [diff] [blame] | 227 | } |
Dimitri Sivanich | acaabe7 | 2009-03-04 12:56:05 -0600 | [diff] [blame] | 228 | |
Andi Kleen | 1d9090e | 2013-08-05 15:02:37 -0700 | [diff] [blame] | 229 | __visible void smp_x86_platform_ipi(struct pt_regs *regs) |
Seiji Aguchi | eddc0e9 | 2013-06-20 11:45:17 -0400 | [diff] [blame] | 230 | { |
| 231 | struct pt_regs *old_regs = set_irq_regs(regs); |
Dimitri Sivanich | acaabe7 | 2009-03-04 12:56:05 -0600 | [diff] [blame] | 232 | |
Seiji Aguchi | eddc0e9 | 2013-06-20 11:45:17 -0400 | [diff] [blame] | 233 | entering_ack_irq(); |
| 234 | __smp_x86_platform_ipi(); |
| 235 | exiting_irq(); |
Dimitri Sivanich | acaabe7 | 2009-03-04 12:56:05 -0600 | [diff] [blame] | 236 | set_irq_regs(old_regs); |
| 237 | } |
| 238 | |
Yang Zhang | d78f266 | 2013-04-11 19:25:11 +0800 | [diff] [blame] | 239 | #ifdef CONFIG_HAVE_KVM |
| 240 | /* |
| 241 | * Handler for POSTED_INTERRUPT_VECTOR. |
| 242 | */ |
Andi Kleen | 1d9090e | 2013-08-05 15:02:37 -0700 | [diff] [blame] | 243 | __visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs) |
Yang Zhang | d78f266 | 2013-04-11 19:25:11 +0800 | [diff] [blame] | 244 | { |
| 245 | struct pt_regs *old_regs = set_irq_regs(regs); |
| 246 | |
| 247 | ack_APIC_irq(); |
| 248 | |
| 249 | irq_enter(); |
| 250 | |
| 251 | exit_idle(); |
| 252 | |
| 253 | inc_irq_stat(kvm_posted_intr_ipis); |
| 254 | |
| 255 | irq_exit(); |
| 256 | |
| 257 | set_irq_regs(old_regs); |
| 258 | } |
| 259 | #endif |
| 260 | |
Andi Kleen | 1d9090e | 2013-08-05 15:02:37 -0700 | [diff] [blame] | 261 | __visible void smp_trace_x86_platform_ipi(struct pt_regs *regs) |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 262 | { |
| 263 | struct pt_regs *old_regs = set_irq_regs(regs); |
| 264 | |
| 265 | entering_ack_irq(); |
| 266 | trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR); |
| 267 | __smp_x86_platform_ipi(); |
| 268 | trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR); |
| 269 | exiting_irq(); |
| 270 | set_irq_regs(old_regs); |
| 271 | } |
| 272 | |
Ingo Molnar | c3d8000 | 2008-12-23 15:15:17 +0100 | [diff] [blame] | 273 | EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); |
Suresh Siddha | 7a7732b | 2009-10-26 14:24:31 -0800 | [diff] [blame] | 274 | |
| 275 | #ifdef CONFIG_HOTPLUG_CPU |
Prarit Bhargava | 39424e8 | 2014-01-28 08:22:11 -0500 | [diff] [blame] | 276 | |
| 277 | /* These two declarations are only used in check_irq_vectors_for_cpu_disable() |
| 278 | * below, which is protected by stop_machine(). Putting them on the stack |
| 279 | * results in a stack frame overflow. Dynamically allocating could result in a |
| 280 | * failure so declare these two cpumasks as global. |
| 281 | */ |
| 282 | static struct cpumask affinity_new, online_new; |
| 283 | |
Prarit Bhargava | da6139e | 2014-01-13 06:51:01 -0500 | [diff] [blame] | 284 | /* |
| 285 | * This cpu is going to be removed and its vectors migrated to the remaining |
| 286 | * online cpus. Check to see if there are enough vectors in the remaining cpus. |
| 287 | * This function is protected by stop_machine(). |
| 288 | */ |
| 289 | int check_irq_vectors_for_cpu_disable(void) |
| 290 | { |
| 291 | int irq, cpu; |
| 292 | unsigned int this_cpu, vector, this_count, count; |
| 293 | struct irq_desc *desc; |
| 294 | struct irq_data *data; |
Prarit Bhargava | da6139e | 2014-01-13 06:51:01 -0500 | [diff] [blame] | 295 | |
| 296 | this_cpu = smp_processor_id(); |
| 297 | cpumask_copy(&online_new, cpu_online_mask); |
Rusty Russell | 020b37a | 2015-03-02 22:05:49 +1030 | [diff] [blame] | 298 | cpumask_clear_cpu(this_cpu, &online_new); |
Prarit Bhargava | da6139e | 2014-01-13 06:51:01 -0500 | [diff] [blame] | 299 | |
| 300 | this_count = 0; |
| 301 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { |
| 302 | irq = __this_cpu_read(vector_irq[vector]); |
| 303 | if (irq >= 0) { |
| 304 | desc = irq_to_desc(irq); |
Joerg Roedel | d97eb89 | 2015-02-04 13:33:33 +0100 | [diff] [blame] | 305 | if (!desc) |
| 306 | continue; |
| 307 | |
Prarit Bhargava | da6139e | 2014-01-13 06:51:01 -0500 | [diff] [blame] | 308 | data = irq_desc_get_irq_data(desc); |
| 309 | cpumask_copy(&affinity_new, data->affinity); |
Rusty Russell | 020b37a | 2015-03-02 22:05:49 +1030 | [diff] [blame] | 310 | cpumask_clear_cpu(this_cpu, &affinity_new); |
Prarit Bhargava | da6139e | 2014-01-13 06:51:01 -0500 | [diff] [blame] | 311 | |
| 312 | /* Do not count inactive or per-cpu irqs. */ |
| 313 | if (!irq_has_action(irq) || irqd_is_per_cpu(data)) |
| 314 | continue; |
| 315 | |
| 316 | /* |
| 317 | * A single irq may be mapped to multiple |
| 318 | * cpu's vector_irq[] (for example IOAPIC cluster |
| 319 | * mode). In this case we have two |
| 320 | * possibilities: |
| 321 | * |
| 322 | * 1) the resulting affinity mask is empty; that is |
| 323 | * this the down'd cpu is the last cpu in the irq's |
| 324 | * affinity mask, or |
| 325 | * |
| 326 | * 2) the resulting affinity mask is no longer |
| 327 | * a subset of the online cpus but the affinity |
| 328 | * mask is not zero; that is the down'd cpu is the |
| 329 | * last online cpu in a user set affinity mask. |
| 330 | */ |
| 331 | if (cpumask_empty(&affinity_new) || |
| 332 | !cpumask_subset(&affinity_new, &online_new)) |
| 333 | this_count++; |
| 334 | } |
| 335 | } |
| 336 | |
| 337 | count = 0; |
| 338 | for_each_online_cpu(cpu) { |
| 339 | if (cpu == this_cpu) |
| 340 | continue; |
Yinghai Lu | ac2a553 | 2014-05-13 11:39:34 -0400 | [diff] [blame] | 341 | /* |
| 342 | * We scan from FIRST_EXTERNAL_VECTOR to first system |
| 343 | * vector. If the vector is marked in the used vectors |
| 344 | * bitmap or an irq is assigned to it, we don't count |
| 345 | * it as available. |
| 346 | */ |
| 347 | for (vector = FIRST_EXTERNAL_VECTOR; |
| 348 | vector < first_system_vector; vector++) { |
| 349 | if (!test_bit(vector, used_vectors) && |
| 350 | per_cpu(vector_irq, cpu)[vector] < 0) |
| 351 | count++; |
Prarit Bhargava | da6139e | 2014-01-13 06:51:01 -0500 | [diff] [blame] | 352 | } |
| 353 | } |
| 354 | |
| 355 | if (count < this_count) { |
| 356 | pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n", |
| 357 | this_cpu, this_count, count); |
| 358 | return -ERANGE; |
| 359 | } |
| 360 | return 0; |
| 361 | } |
| 362 | |
Suresh Siddha | 7a7732b | 2009-10-26 14:24:31 -0800 | [diff] [blame] | 363 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ |
| 364 | void fixup_irqs(void) |
| 365 | { |
Suresh Siddha | 5231a68 | 2009-10-26 14:24:36 -0800 | [diff] [blame] | 366 | unsigned int irq, vector; |
Suresh Siddha | 7a7732b | 2009-10-26 14:24:31 -0800 | [diff] [blame] | 367 | static int warned; |
| 368 | struct irq_desc *desc; |
Thomas Gleixner | a3c08e5 | 2010-10-08 20:24:58 +0200 | [diff] [blame] | 369 | struct irq_data *data; |
Thomas Gleixner | 51c43ac | 2011-02-10 21:40:36 +0100 | [diff] [blame] | 370 | struct irq_chip *chip; |
Prarit Bhargava | fb24da8 | 2014-04-02 08:11:13 -0400 | [diff] [blame] | 371 | int ret; |
Suresh Siddha | 7a7732b | 2009-10-26 14:24:31 -0800 | [diff] [blame] | 372 | |
| 373 | for_each_irq_desc(irq, desc) { |
| 374 | int break_affinity = 0; |
| 375 | int set_affinity = 1; |
| 376 | const struct cpumask *affinity; |
| 377 | |
| 378 | if (!desc) |
| 379 | continue; |
| 380 | if (irq == 2) |
| 381 | continue; |
| 382 | |
| 383 | /* interrupt's are disabled at this point */ |
Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 384 | raw_spin_lock(&desc->lock); |
Suresh Siddha | 7a7732b | 2009-10-26 14:24:31 -0800 | [diff] [blame] | 385 | |
Thomas Gleixner | 51c43ac | 2011-02-10 21:40:36 +0100 | [diff] [blame] | 386 | data = irq_desc_get_irq_data(desc); |
Thomas Gleixner | a3c08e5 | 2010-10-08 20:24:58 +0200 | [diff] [blame] | 387 | affinity = data->affinity; |
Tian, Kevin | b87ba87 | 2011-05-06 14:43:36 +0800 | [diff] [blame] | 388 | if (!irq_has_action(irq) || irqd_is_per_cpu(data) || |
Jan Beulich | 58bff94 | 2011-02-17 15:54:26 +0000 | [diff] [blame] | 389 | cpumask_subset(affinity, cpu_online_mask)) { |
Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 390 | raw_spin_unlock(&desc->lock); |
Suresh Siddha | 7a7732b | 2009-10-26 14:24:31 -0800 | [diff] [blame] | 391 | continue; |
| 392 | } |
| 393 | |
Suresh Siddha | a5e74b8 | 2009-10-26 14:24:34 -0800 | [diff] [blame] | 394 | /* |
| 395 | * Complete the irq move. This cpu is going down and for |
| 396 | * non intr-remapping case, we can't wait till this interrupt |
| 397 | * arrives at this cpu before completing the irq move. |
| 398 | */ |
| 399 | irq_force_complete_move(irq); |
| 400 | |
Suresh Siddha | 7a7732b | 2009-10-26 14:24:31 -0800 | [diff] [blame] | 401 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
| 402 | break_affinity = 1; |
Liu, Chuansheng | 2530cd4 | 2012-08-14 06:55:01 +0000 | [diff] [blame] | 403 | affinity = cpu_online_mask; |
Suresh Siddha | 7a7732b | 2009-10-26 14:24:31 -0800 | [diff] [blame] | 404 | } |
| 405 | |
Thomas Gleixner | 51c43ac | 2011-02-10 21:40:36 +0100 | [diff] [blame] | 406 | chip = irq_data_get_irq_chip(data); |
| 407 | if (!irqd_can_move_in_process_context(data) && chip->irq_mask) |
| 408 | chip->irq_mask(data); |
Suresh Siddha | 7a7732b | 2009-10-26 14:24:31 -0800 | [diff] [blame] | 409 | |
Prarit Bhargava | fb24da8 | 2014-04-02 08:11:13 -0400 | [diff] [blame] | 410 | if (chip->irq_set_affinity) { |
| 411 | ret = chip->irq_set_affinity(data, affinity, true); |
| 412 | if (ret == -ENOSPC) |
| 413 | pr_crit("IRQ %d set affinity failed because there are no available vectors. The device assigned to this IRQ is unstable.\n", irq); |
| 414 | } else { |
| 415 | if (!(warned++)) |
| 416 | set_affinity = 0; |
| 417 | } |
Suresh Siddha | 7a7732b | 2009-10-26 14:24:31 -0800 | [diff] [blame] | 418 | |
Liu, Chuansheng | 99dd549 | 2012-03-26 07:11:50 +0000 | [diff] [blame] | 419 | /* |
| 420 | * We unmask if the irq was not marked masked by the |
| 421 | * core code. That respects the lazy irq disable |
| 422 | * behaviour. |
| 423 | */ |
Tian, Kevin | 983bbf1 | 2011-05-06 14:43:56 +0800 | [diff] [blame] | 424 | if (!irqd_can_move_in_process_context(data) && |
Liu, Chuansheng | 99dd549 | 2012-03-26 07:11:50 +0000 | [diff] [blame] | 425 | !irqd_irq_masked(data) && chip->irq_unmask) |
Thomas Gleixner | 51c43ac | 2011-02-10 21:40:36 +0100 | [diff] [blame] | 426 | chip->irq_unmask(data); |
Suresh Siddha | 7a7732b | 2009-10-26 14:24:31 -0800 | [diff] [blame] | 427 | |
Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 428 | raw_spin_unlock(&desc->lock); |
Suresh Siddha | 7a7732b | 2009-10-26 14:24:31 -0800 | [diff] [blame] | 429 | |
| 430 | if (break_affinity && set_affinity) |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 431 | pr_notice("Broke affinity for irq %i\n", irq); |
Suresh Siddha | 7a7732b | 2009-10-26 14:24:31 -0800 | [diff] [blame] | 432 | else if (!set_affinity) |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 433 | pr_notice("Cannot set affinity for irq %i\n", irq); |
Suresh Siddha | 7a7732b | 2009-10-26 14:24:31 -0800 | [diff] [blame] | 434 | } |
| 435 | |
Suresh Siddha | 5231a68 | 2009-10-26 14:24:36 -0800 | [diff] [blame] | 436 | /* |
| 437 | * We can remove mdelay() and then send spuriuous interrupts to |
| 438 | * new cpu targets for all the irqs that were handled previously by |
| 439 | * this cpu. While it works, I have seen spurious interrupt messages |
| 440 | * (nothing wrong but still...). |
| 441 | * |
| 442 | * So for now, retain mdelay(1) and check the IRR and then send those |
| 443 | * interrupts to new targets as this cpu is already offlined... |
| 444 | */ |
Suresh Siddha | 7a7732b | 2009-10-26 14:24:31 -0800 | [diff] [blame] | 445 | mdelay(1); |
Suresh Siddha | 5231a68 | 2009-10-26 14:24:36 -0800 | [diff] [blame] | 446 | |
| 447 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { |
| 448 | unsigned int irr; |
| 449 | |
Prarit Bhargava | 9345005 | 2014-01-05 11:10:52 -0500 | [diff] [blame] | 450 | if (__this_cpu_read(vector_irq[vector]) <= VECTOR_UNDEFINED) |
Suresh Siddha | 5231a68 | 2009-10-26 14:24:36 -0800 | [diff] [blame] | 451 | continue; |
| 452 | |
| 453 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); |
| 454 | if (irr & (1 << (vector % 32))) { |
Tejun Heo | 0a3aee0 | 2010-12-18 16:28:55 +0100 | [diff] [blame] | 455 | irq = __this_cpu_read(vector_irq[vector]); |
Suresh Siddha | 5231a68 | 2009-10-26 14:24:36 -0800 | [diff] [blame] | 456 | |
Thomas Gleixner | 5117348 | 2011-02-12 11:51:03 +0100 | [diff] [blame] | 457 | desc = irq_to_desc(irq); |
Thomas Gleixner | 51c43ac | 2011-02-10 21:40:36 +0100 | [diff] [blame] | 458 | data = irq_desc_get_irq_data(desc); |
| 459 | chip = irq_data_get_irq_chip(data); |
Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 460 | raw_spin_lock(&desc->lock); |
Prarit Bhargava | 9345005 | 2014-01-05 11:10:52 -0500 | [diff] [blame] | 461 | if (chip->irq_retrigger) { |
Thomas Gleixner | 51c43ac | 2011-02-10 21:40:36 +0100 | [diff] [blame] | 462 | chip->irq_retrigger(data); |
Prarit Bhargava | 9345005 | 2014-01-05 11:10:52 -0500 | [diff] [blame] | 463 | __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED); |
| 464 | } |
Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 465 | raw_spin_unlock(&desc->lock); |
Suresh Siddha | 5231a68 | 2009-10-26 14:24:36 -0800 | [diff] [blame] | 466 | } |
Prarit Bhargava | 9345005 | 2014-01-05 11:10:52 -0500 | [diff] [blame] | 467 | if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED) |
| 468 | __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); |
Suresh Siddha | 5231a68 | 2009-10-26 14:24:36 -0800 | [diff] [blame] | 469 | } |
Suresh Siddha | 7a7732b | 2009-10-26 14:24:31 -0800 | [diff] [blame] | 470 | } |
| 471 | #endif |