Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | /* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. |
| 4 | * Copyright (C) 2018-2020 Linaro Ltd. |
| 5 | */ |
| 6 | |
| 7 | /* DOC: IPA Interrupts |
| 8 | * |
| 9 | * The IPA has an interrupt line distinct from the interrupt used by the GSI |
| 10 | * code. Whereas GSI interrupts are generally related to channel events (like |
| 11 | * transfer completions), IPA interrupts are related to other events related |
| 12 | * to the IPA. Some of the IPA interrupts come from a microcontroller |
| 13 | * embedded in the IPA. Each IPA interrupt type can be both masked and |
| 14 | * acknowledged independent of the others. |
| 15 | * |
| 16 | * Two of the IPA interrupts are initiated by the microcontroller. A third |
| 17 | * can be generated to signal the need for a wakeup/resume when an IPA |
| 18 | * endpoint has been suspended. There are other IPA events, but at this |
| 19 | * time only these three are supported. |
| 20 | */ |
| 21 | |
| 22 | #include <linux/types.h> |
| 23 | #include <linux/interrupt.h> |
| 24 | |
| 25 | #include "ipa.h" |
| 26 | #include "ipa_clock.h" |
| 27 | #include "ipa_reg.h" |
| 28 | #include "ipa_endpoint.h" |
| 29 | #include "ipa_interrupt.h" |
| 30 | |
| 31 | /** |
| 32 | * struct ipa_interrupt - IPA interrupt information |
| 33 | * @ipa: IPA pointer |
| 34 | * @irq: Linux IRQ number used for IPA interrupts |
| 35 | * @enabled: Mask indicating which interrupts are enabled |
| 36 | * @handler: Array of handlers indexed by IPA interrupt ID |
| 37 | */ |
| 38 | struct ipa_interrupt { |
| 39 | struct ipa *ipa; |
| 40 | u32 irq; |
| 41 | u32 enabled; |
| 42 | ipa_irq_handler_t handler[IPA_IRQ_COUNT]; |
| 43 | }; |
| 44 | |
| 45 | /* Returns true if the interrupt type is associated with the microcontroller */ |
| 46 | static bool ipa_interrupt_uc(struct ipa_interrupt *interrupt, u32 irq_id) |
| 47 | { |
| 48 | return irq_id == IPA_IRQ_UC_0 || irq_id == IPA_IRQ_UC_1; |
| 49 | } |
| 50 | |
| 51 | /* Process a particular interrupt type that has been received */ |
| 52 | static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id) |
| 53 | { |
| 54 | bool uc_irq = ipa_interrupt_uc(interrupt, irq_id); |
| 55 | struct ipa *ipa = interrupt->ipa; |
| 56 | u32 mask = BIT(irq_id); |
Alex Elder | e666aa9 | 2021-03-25 09:44:34 -0500 | [diff] [blame] | 57 | u32 offset; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 58 | |
| 59 | /* For microcontroller interrupts, clear the interrupt right away, |
| 60 | * "to avoid clearing unhandled interrupts." |
| 61 | */ |
Alex Elder | e666aa9 | 2021-03-25 09:44:34 -0500 | [diff] [blame] | 62 | offset = ipa_reg_irq_clr_offset(ipa->version); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 63 | if (uc_irq) |
Alex Elder | e666aa9 | 2021-03-25 09:44:34 -0500 | [diff] [blame] | 64 | iowrite32(mask, ipa->reg_virt + offset); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 65 | |
| 66 | if (irq_id < IPA_IRQ_COUNT && interrupt->handler[irq_id]) |
| 67 | interrupt->handler[irq_id](interrupt->ipa, irq_id); |
| 68 | |
| 69 | /* Clearing the SUSPEND_TX interrupt also clears the register |
| 70 | * that tells us which suspended endpoint(s) caused the interrupt, |
| 71 | * so defer clearing until after the handler has been called. |
| 72 | */ |
| 73 | if (!uc_irq) |
Alex Elder | e666aa9 | 2021-03-25 09:44:34 -0500 | [diff] [blame] | 74 | iowrite32(mask, ipa->reg_virt + offset); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 75 | } |
| 76 | |
Alex Elder | 176086d | 2021-07-27 14:46:29 -0500 | [diff] [blame] | 77 | /* IPA IRQ handler is threaded */ |
| 78 | static irqreturn_t ipa_isr_thread(int irq, void *dev_id) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 79 | { |
Alex Elder | 176086d | 2021-07-27 14:46:29 -0500 | [diff] [blame] | 80 | struct ipa_interrupt *interrupt = dev_id; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 81 | struct ipa *ipa = interrupt->ipa; |
| 82 | u32 enabled = interrupt->enabled; |
Alex Elder | e70e410 | 2021-07-27 14:46:27 -0500 | [diff] [blame] | 83 | u32 pending; |
Alex Elder | e666aa9 | 2021-03-25 09:44:34 -0500 | [diff] [blame] | 84 | u32 offset; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 85 | u32 mask; |
| 86 | |
Alex Elder | 176086d | 2021-07-27 14:46:29 -0500 | [diff] [blame] | 87 | ipa_clock_get(ipa); |
| 88 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 89 | /* The status register indicates which conditions are present, |
| 90 | * including conditions whose interrupt is not enabled. Handle |
| 91 | * only the enabled ones. |
| 92 | */ |
Alex Elder | e666aa9 | 2021-03-25 09:44:34 -0500 | [diff] [blame] | 93 | offset = ipa_reg_irq_stts_offset(ipa->version); |
Alex Elder | e70e410 | 2021-07-27 14:46:27 -0500 | [diff] [blame] | 94 | pending = ioread32(ipa->reg_virt + offset); |
| 95 | while ((mask = pending & enabled)) { |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 96 | do { |
| 97 | u32 irq_id = __ffs(mask); |
| 98 | |
| 99 | mask ^= BIT(irq_id); |
| 100 | |
| 101 | ipa_interrupt_process(interrupt, irq_id); |
| 102 | } while (mask); |
Alex Elder | e70e410 | 2021-07-27 14:46:27 -0500 | [diff] [blame] | 103 | pending = ioread32(ipa->reg_virt + offset); |
| 104 | } |
| 105 | |
| 106 | /* If any disabled interrupts are pending, clear them */ |
| 107 | if (pending) { |
| 108 | struct device *dev = &ipa->pdev->dev; |
| 109 | |
| 110 | dev_dbg(dev, "clearing disabled IPA interrupts 0x%08x\n", |
| 111 | pending); |
| 112 | offset = ipa_reg_irq_clr_offset(ipa->version); |
| 113 | iowrite32(pending, ipa->reg_virt + offset); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 114 | } |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 115 | |
Alex Elder | 937a0da | 2021-07-27 14:46:26 -0500 | [diff] [blame] | 116 | ipa_clock_put(ipa); |
| 117 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 118 | return IRQ_HANDLED; |
| 119 | } |
| 120 | |
| 121 | /* Common function used to enable/disable TX_SUSPEND for an endpoint */ |
| 122 | static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt, |
| 123 | u32 endpoint_id, bool enable) |
| 124 | { |
| 125 | struct ipa *ipa = interrupt->ipa; |
| 126 | u32 mask = BIT(endpoint_id); |
Alex Elder | e666aa9 | 2021-03-25 09:44:34 -0500 | [diff] [blame] | 127 | u32 offset; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 128 | u32 val; |
| 129 | |
Alex Elder | 5bc5588 | 2021-07-26 12:40:10 -0500 | [diff] [blame] | 130 | WARN_ON(!(mask & ipa->available)); |
Alex Elder | e666aa9 | 2021-03-25 09:44:34 -0500 | [diff] [blame] | 131 | |
| 132 | /* IPA version 3.0 does not support TX_SUSPEND interrupt control */ |
| 133 | if (ipa->version == IPA_VERSION_3_0) |
| 134 | return; |
| 135 | |
| 136 | offset = ipa_reg_irq_suspend_en_offset(ipa->version); |
| 137 | val = ioread32(ipa->reg_virt + offset); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 138 | if (enable) |
| 139 | val |= mask; |
| 140 | else |
| 141 | val &= ~mask; |
Alex Elder | e666aa9 | 2021-03-25 09:44:34 -0500 | [diff] [blame] | 142 | iowrite32(val, ipa->reg_virt + offset); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 143 | } |
| 144 | |
| 145 | /* Enable TX_SUSPEND for an endpoint */ |
| 146 | void |
| 147 | ipa_interrupt_suspend_enable(struct ipa_interrupt *interrupt, u32 endpoint_id) |
| 148 | { |
| 149 | ipa_interrupt_suspend_control(interrupt, endpoint_id, true); |
| 150 | } |
| 151 | |
| 152 | /* Disable TX_SUSPEND for an endpoint */ |
| 153 | void |
| 154 | ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id) |
| 155 | { |
| 156 | ipa_interrupt_suspend_control(interrupt, endpoint_id, false); |
| 157 | } |
| 158 | |
| 159 | /* Clear the suspend interrupt for all endpoints that signaled it */ |
| 160 | void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt) |
| 161 | { |
| 162 | struct ipa *ipa = interrupt->ipa; |
Alex Elder | e666aa9 | 2021-03-25 09:44:34 -0500 | [diff] [blame] | 163 | u32 offset; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 164 | u32 val; |
| 165 | |
Alex Elder | e666aa9 | 2021-03-25 09:44:34 -0500 | [diff] [blame] | 166 | offset = ipa_reg_irq_suspend_info_offset(ipa->version); |
| 167 | val = ioread32(ipa->reg_virt + offset); |
| 168 | |
| 169 | /* SUSPEND interrupt status isn't cleared on IPA version 3.0 */ |
| 170 | if (ipa->version == IPA_VERSION_3_0) |
| 171 | return; |
| 172 | |
| 173 | offset = ipa_reg_irq_suspend_clr_offset(ipa->version); |
| 174 | iowrite32(val, ipa->reg_virt + offset); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | /* Simulate arrival of an IPA TX_SUSPEND interrupt */ |
| 178 | void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt) |
| 179 | { |
| 180 | ipa_interrupt_process(interrupt, IPA_IRQ_TX_SUSPEND); |
| 181 | } |
| 182 | |
| 183 | /* Add a handler for an IPA interrupt */ |
| 184 | void ipa_interrupt_add(struct ipa_interrupt *interrupt, |
| 185 | enum ipa_irq_id ipa_irq, ipa_irq_handler_t handler) |
| 186 | { |
| 187 | struct ipa *ipa = interrupt->ipa; |
Alex Elder | e666aa9 | 2021-03-25 09:44:34 -0500 | [diff] [blame] | 188 | u32 offset; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 189 | |
Alex Elder | 5bc5588 | 2021-07-26 12:40:10 -0500 | [diff] [blame] | 190 | WARN_ON(ipa_irq >= IPA_IRQ_COUNT); |
| 191 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 192 | interrupt->handler[ipa_irq] = handler; |
| 193 | |
| 194 | /* Update the IPA interrupt mask to enable it */ |
| 195 | interrupt->enabled |= BIT(ipa_irq); |
Alex Elder | e666aa9 | 2021-03-25 09:44:34 -0500 | [diff] [blame] | 196 | offset = ipa_reg_irq_en_offset(ipa->version); |
| 197 | iowrite32(interrupt->enabled, ipa->reg_virt + offset); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 198 | } |
| 199 | |
| 200 | /* Remove the handler for an IPA interrupt type */ |
| 201 | void |
| 202 | ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq) |
| 203 | { |
| 204 | struct ipa *ipa = interrupt->ipa; |
Alex Elder | e666aa9 | 2021-03-25 09:44:34 -0500 | [diff] [blame] | 205 | u32 offset; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 206 | |
Alex Elder | 5bc5588 | 2021-07-26 12:40:10 -0500 | [diff] [blame] | 207 | WARN_ON(ipa_irq >= IPA_IRQ_COUNT); |
| 208 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 209 | /* Update the IPA interrupt mask to disable it */ |
| 210 | interrupt->enabled &= ~BIT(ipa_irq); |
Alex Elder | e666aa9 | 2021-03-25 09:44:34 -0500 | [diff] [blame] | 211 | offset = ipa_reg_irq_en_offset(ipa->version); |
| 212 | iowrite32(interrupt->enabled, ipa->reg_virt + offset); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 213 | |
| 214 | interrupt->handler[ipa_irq] = NULL; |
| 215 | } |
| 216 | |
Alex Elder | 1118a14 | 2021-07-26 15:11:34 -0500 | [diff] [blame] | 217 | /* Configure the IPA interrupt framework */ |
| 218 | struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 219 | { |
| 220 | struct device *dev = &ipa->pdev->dev; |
| 221 | struct ipa_interrupt *interrupt; |
| 222 | unsigned int irq; |
Alex Elder | e666aa9 | 2021-03-25 09:44:34 -0500 | [diff] [blame] | 223 | u32 offset; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 224 | int ret; |
| 225 | |
| 226 | ret = platform_get_irq_byname(ipa->pdev, "ipa"); |
| 227 | if (ret <= 0) { |
| 228 | dev_err(dev, "DT error %d getting \"ipa\" IRQ property\n", |
| 229 | ret); |
| 230 | return ERR_PTR(ret ? : -EINVAL); |
| 231 | } |
| 232 | irq = ret; |
| 233 | |
| 234 | interrupt = kzalloc(sizeof(*interrupt), GFP_KERNEL); |
| 235 | if (!interrupt) |
| 236 | return ERR_PTR(-ENOMEM); |
| 237 | interrupt->ipa = ipa; |
| 238 | interrupt->irq = irq; |
| 239 | |
| 240 | /* Start with all IPA interrupts disabled */ |
Alex Elder | e666aa9 | 2021-03-25 09:44:34 -0500 | [diff] [blame] | 241 | offset = ipa_reg_irq_en_offset(ipa->version); |
| 242 | iowrite32(0, ipa->reg_virt + offset); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 243 | |
Alex Elder | 937a0da | 2021-07-27 14:46:26 -0500 | [diff] [blame] | 244 | ret = request_threaded_irq(irq, NULL, ipa_isr_thread, IRQF_ONESHOT, |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 245 | "ipa", interrupt); |
| 246 | if (ret) { |
| 247 | dev_err(dev, "error %d requesting \"ipa\" IRQ\n", ret); |
| 248 | goto err_kfree; |
| 249 | } |
| 250 | |
Alex Elder | d1b5126 | 2020-09-17 12:39:25 -0500 | [diff] [blame] | 251 | ret = enable_irq_wake(irq); |
| 252 | if (ret) { |
| 253 | dev_err(dev, "error %d enabling wakeup for \"ipa\" IRQ\n", ret); |
| 254 | goto err_free_irq; |
| 255 | } |
| 256 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 257 | return interrupt; |
| 258 | |
Alex Elder | d1b5126 | 2020-09-17 12:39:25 -0500 | [diff] [blame] | 259 | err_free_irq: |
| 260 | free_irq(interrupt->irq, interrupt); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 261 | err_kfree: |
| 262 | kfree(interrupt); |
| 263 | |
| 264 | return ERR_PTR(ret); |
| 265 | } |
| 266 | |
Alex Elder | 1118a14 | 2021-07-26 15:11:34 -0500 | [diff] [blame] | 267 | /* Inverse of ipa_interrupt_config() */ |
| 268 | void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 269 | { |
Alex Elder | d1b5126 | 2020-09-17 12:39:25 -0500 | [diff] [blame] | 270 | struct device *dev = &interrupt->ipa->pdev->dev; |
| 271 | int ret; |
| 272 | |
| 273 | ret = disable_irq_wake(interrupt->irq); |
| 274 | if (ret) |
| 275 | dev_err(dev, "error %d disabling \"ipa\" IRQ wakeup\n", ret); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 276 | free_irq(interrupt->irq, interrupt); |
| 277 | kfree(interrupt); |
| 278 | } |