Thomas Gleixner | 52a65ff | 2018-03-14 22:15:19 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 2 | /* |
| 3 | * Generic cpu hotunplug interrupt migration code copied from the |
| 4 | * arch/arm implementation |
| 5 | * |
| 6 | * Copyright (C) Russell King |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as |
| 10 | * published by the Free Software Foundation. |
| 11 | */ |
| 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/ratelimit.h> |
| 14 | #include <linux/irq.h> |
| 15 | |
| 16 | #include "internals.h" |
| 17 | |
Thomas Gleixner | 415fcf1 | 2017-06-20 01:37:39 +0200 | [diff] [blame] | 18 | /* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */ |
| 19 | static inline bool irq_needs_fixup(struct irq_data *d) |
| 20 | { |
| 21 | const struct cpumask *m = irq_data_get_effective_affinity_mask(d); |
Thomas Gleixner | 60b09c5 | 2017-10-09 12:47:24 +0200 | [diff] [blame] | 22 | unsigned int cpu = smp_processor_id(); |
Thomas Gleixner | 415fcf1 | 2017-06-20 01:37:39 +0200 | [diff] [blame] | 23 | |
Thomas Gleixner | 60b09c5 | 2017-10-09 12:47:24 +0200 | [diff] [blame] | 24 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
| 25 | /* |
| 26 | * The cpumask_empty() check is a workaround for interrupt chips, |
| 27 | * which do not implement effective affinity, but the architecture has |
| 28 | * enabled the config switch. Use the general affinity mask instead. |
| 29 | */ |
| 30 | if (cpumask_empty(m)) |
| 31 | m = irq_data_get_affinity_mask(d); |
| 32 | |
| 33 | /* |
| 34 | * Sanity check. If the mask is not empty when excluding the outgoing |
| 35 | * CPU then it must contain at least one online CPU. The outgoing CPU |
| 36 | * has been removed from the online mask already. |
| 37 | */ |
| 38 | if (cpumask_any_but(m, cpu) < nr_cpu_ids && |
| 39 | cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) { |
| 40 | /* |
| 41 | * If this happens then there was a missed IRQ fixup at some |
| 42 | * point. Warn about it and enforce fixup. |
| 43 | */ |
| 44 | pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n", |
| 45 | cpumask_pr_args(m), d->irq, cpu); |
| 46 | return true; |
| 47 | } |
| 48 | #endif |
| 49 | return cpumask_test_cpu(cpu, m); |
Thomas Gleixner | 415fcf1 | 2017-06-20 01:37:39 +0200 | [diff] [blame] | 50 | } |
| 51 | |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 52 | static bool migrate_one_irq(struct irq_desc *desc) |
| 53 | { |
| 54 | struct irq_data *d = irq_desc_get_irq_data(desc); |
Thomas Gleixner | e8a7035 | 2017-06-20 01:37:27 +0200 | [diff] [blame] | 55 | struct irq_chip *chip = irq_data_get_irq_chip(d); |
Thomas Gleixner | 47a06d3 | 2017-06-20 01:37:30 +0200 | [diff] [blame] | 56 | bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d); |
Thomas Gleixner | f0383c2 | 2017-06-20 01:37:29 +0200 | [diff] [blame] | 57 | const struct cpumask *affinity; |
Thomas Gleixner | e8a7035 | 2017-06-20 01:37:27 +0200 | [diff] [blame] | 58 | bool brokeaff = false; |
| 59 | int err; |
| 60 | |
| 61 | /* |
| 62 | * IRQ chip might be already torn down, but the irq descriptor is |
| 63 | * still in the radix tree. Also if the chip has no affinity setter, |
| 64 | * nothing can be done here. |
| 65 | */ |
| 66 | if (!chip || !chip->irq_set_affinity) { |
| 67 | pr_debug("IRQ %u: Unable to migrate away\n", d->irq); |
| 68 | return false; |
| 69 | } |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 70 | |
| 71 | /* |
Thomas Gleixner | 91f26cb | 2017-06-20 01:37:28 +0200 | [diff] [blame] | 72 | * No move required, if: |
| 73 | * - Interrupt is per cpu |
| 74 | * - Interrupt is not started |
| 75 | * - Affinity mask does not include this CPU. |
| 76 | * |
| 77 | * Note: Do not check desc->action as this might be a chained |
| 78 | * interrupt. |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 79 | */ |
Thomas Gleixner | 415fcf1 | 2017-06-20 01:37:39 +0200 | [diff] [blame] | 80 | if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) { |
Thomas Gleixner | f0383c2 | 2017-06-20 01:37:29 +0200 | [diff] [blame] | 81 | /* |
| 82 | * If an irq move is pending, abort it if the dying CPU is |
| 83 | * the sole target. |
| 84 | */ |
| 85 | irq_fixup_move_pending(desc, false); |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 86 | return false; |
Thomas Gleixner | f0383c2 | 2017-06-20 01:37:29 +0200 | [diff] [blame] | 87 | } |
| 88 | |
| 89 | /* |
| 90 | * Complete an eventually pending irq move cleanup. If this |
| 91 | * interrupt was moved in hard irq context, then the vectors need |
| 92 | * to be cleaned up. It can't wait until this interrupt actually |
| 93 | * happens and this CPU was involved. |
| 94 | */ |
| 95 | irq_force_complete_move(desc); |
| 96 | |
| 97 | /* |
| 98 | * If there is a setaffinity pending, then try to reuse the pending |
| 99 | * mask, so the last change of the affinity does not get lost. If |
| 100 | * there is no move pending or the pending mask does not contain |
| 101 | * any online CPU, use the current affinity mask. |
| 102 | */ |
| 103 | if (irq_fixup_move_pending(desc, true)) |
| 104 | affinity = irq_desc_get_pending_mask(desc); |
Thomas Gleixner | 415fcf1 | 2017-06-20 01:37:39 +0200 | [diff] [blame] | 105 | else |
| 106 | affinity = irq_data_get_affinity_mask(d); |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 107 | |
Thomas Gleixner | 47a06d3 | 2017-06-20 01:37:30 +0200 | [diff] [blame] | 108 | /* Mask the chip for interrupts which cannot move in process context */ |
| 109 | if (maskchip && chip->irq_mask) |
| 110 | chip->irq_mask(d); |
| 111 | |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 112 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
Thomas Gleixner | c5cb83b | 2017-06-20 01:37:51 +0200 | [diff] [blame] | 113 | /* |
| 114 | * If the interrupt is managed, then shut it down and leave |
| 115 | * the affinity untouched. |
| 116 | */ |
| 117 | if (irqd_affinity_is_managed(d)) { |
| 118 | irqd_set_managed_shutdown(d); |
Thomas Gleixner | 4001d8e | 2019-06-28 13:11:49 +0200 | [diff] [blame] | 119 | irq_shutdown_and_deactivate(desc); |
Thomas Gleixner | c5cb83b | 2017-06-20 01:37:51 +0200 | [diff] [blame] | 120 | return false; |
| 121 | } |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 122 | affinity = cpu_online_mask; |
Thomas Gleixner | e8a7035 | 2017-06-20 01:37:27 +0200 | [diff] [blame] | 123 | brokeaff = true; |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 124 | } |
Thomas Gleixner | 8397913 | 2017-07-27 12:21:11 +0200 | [diff] [blame] | 125 | /* |
| 126 | * Do not set the force argument of irq_do_set_affinity() as this |
| 127 | * disables the masking of offline CPUs from the supplied affinity |
| 128 | * mask and therefore might keep/reassign the irq to the outgoing |
| 129 | * CPU. |
| 130 | */ |
| 131 | err = irq_do_set_affinity(d, affinity, false); |
Thomas Gleixner | e8a7035 | 2017-06-20 01:37:27 +0200 | [diff] [blame] | 132 | if (err) { |
| 133 | pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n", |
| 134 | d->irq, err); |
Thomas Gleixner | 47a06d3 | 2017-06-20 01:37:30 +0200 | [diff] [blame] | 135 | brokeaff = false; |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 136 | } |
Thomas Gleixner | 47a06d3 | 2017-06-20 01:37:30 +0200 | [diff] [blame] | 137 | |
| 138 | if (maskchip && chip->irq_unmask) |
| 139 | chip->irq_unmask(d); |
| 140 | |
Thomas Gleixner | e8a7035 | 2017-06-20 01:37:27 +0200 | [diff] [blame] | 141 | return brokeaff; |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 142 | } |
| 143 | |
| 144 | /** |
| 145 | * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu |
| 146 | * |
| 147 | * The current CPU has been marked offline. Migrate IRQs off this CPU. |
| 148 | * If the affinity settings do not allow other CPUs, force them onto any |
| 149 | * available CPU. |
| 150 | * |
| 151 | * Note: we must iterate over all IRQs, whether they have an attached |
| 152 | * action structure or not, as we need to get chained interrupts too. |
| 153 | */ |
| 154 | void irq_migrate_all_off_this_cpu(void) |
| 155 | { |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 156 | struct irq_desc *desc; |
Thomas Gleixner | 0dd945f | 2017-06-20 01:37:25 +0200 | [diff] [blame] | 157 | unsigned int irq; |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 158 | |
| 159 | for_each_active_irq(irq) { |
| 160 | bool affinity_broken; |
| 161 | |
| 162 | desc = irq_to_desc(irq); |
| 163 | raw_spin_lock(&desc->lock); |
| 164 | affinity_broken = migrate_one_irq(desc); |
| 165 | raw_spin_unlock(&desc->lock); |
| 166 | |
Thomas Gleixner | 0dd945f | 2017-06-20 01:37:25 +0200 | [diff] [blame] | 167 | if (affinity_broken) { |
| 168 | pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n", |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 169 | irq, smp_processor_id()); |
Thomas Gleixner | 0dd945f | 2017-06-20 01:37:25 +0200 | [diff] [blame] | 170 | } |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 171 | } |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 172 | } |
Thomas Gleixner | c5cb83b | 2017-06-20 01:37:51 +0200 | [diff] [blame] | 173 | |
| 174 | static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu) |
| 175 | { |
| 176 | struct irq_data *data = irq_desc_get_irq_data(desc); |
| 177 | const struct cpumask *affinity = irq_data_get_affinity_mask(data); |
| 178 | |
| 179 | if (!irqd_affinity_is_managed(data) || !desc->action || |
| 180 | !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity)) |
| 181 | return; |
| 182 | |
Thomas Gleixner | 8f31a98 | 2017-06-20 01:37:53 +0200 | [diff] [blame] | 183 | if (irqd_is_managed_and_shutdown(data)) { |
Thomas Gleixner | c5cb83b | 2017-06-20 01:37:51 +0200 | [diff] [blame] | 184 | irq_startup(desc, IRQ_RESEND, IRQ_START_COND); |
Thomas Gleixner | 8f31a98 | 2017-06-20 01:37:53 +0200 | [diff] [blame] | 185 | return; |
| 186 | } |
| 187 | |
| 188 | /* |
| 189 | * If the interrupt can only be directed to a single target |
| 190 | * CPU then it is already assigned to a CPU in the affinity |
| 191 | * mask. No point in trying to move it around. |
| 192 | */ |
| 193 | if (!irqd_is_single_target(data)) |
Thomas Gleixner | c5cb83b | 2017-06-20 01:37:51 +0200 | [diff] [blame] | 194 | irq_set_affinity_locked(data, affinity, false); |
| 195 | } |
| 196 | |
| 197 | /** |
| 198 | * irq_affinity_online_cpu - Restore affinity for managed interrupts |
| 199 | * @cpu: Upcoming CPU for which interrupts should be restored |
| 200 | */ |
| 201 | int irq_affinity_online_cpu(unsigned int cpu) |
| 202 | { |
| 203 | struct irq_desc *desc; |
| 204 | unsigned int irq; |
| 205 | |
| 206 | irq_lock_sparse(); |
| 207 | for_each_active_irq(irq) { |
| 208 | desc = irq_to_desc(irq); |
| 209 | raw_spin_lock_irq(&desc->lock); |
| 210 | irq_restore_affinity_of_irq(desc, cpu); |
| 211 | raw_spin_unlock_irq(&desc->lock); |
| 212 | } |
| 213 | irq_unlock_sparse(); |
| 214 | |
| 215 | return 0; |
| 216 | } |