Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2012 Regents of the University of California |
| 4 | * Copyright (C) 2017-2018 SiFive |
| 5 | * Copyright (C) 2020 Western Digital Corporation or its affiliates. |
| 6 | */ |
| 7 | |
| 8 | #define pr_fmt(fmt) "riscv-intc: " fmt |
Sunil V L | 7023b9d | 2023-05-15 11:19:23 +0530 | [diff] [blame] | 9 | #include <linux/acpi.h> |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 10 | #include <linux/atomic.h> |
| 11 | #include <linux/bits.h> |
| 12 | #include <linux/cpu.h> |
| 13 | #include <linux/irq.h> |
| 14 | #include <linux/irqchip.h> |
| 15 | #include <linux/irqdomain.h> |
| 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/module.h> |
| 18 | #include <linux/of.h> |
| 19 | #include <linux/smp.h> |
Yu Chien Peter Lin | f4cc33e | 2024-02-22 16:39:39 +0800 | [diff] [blame] | 20 | #include <linux/soc/andes/irq.h> |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 21 | |
Anup Patel | 3c46fc5 | 2024-02-22 15:09:56 +0530 | [diff] [blame] | 22 | #include <asm/hwcap.h> |
| 23 | |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 24 | static struct irq_domain *intc_domain; |
Yu Chien Peter Lin | 96303bc | 2024-02-22 16:39:38 +0800 | [diff] [blame] | 25 | static unsigned int riscv_intc_nr_irqs __ro_after_init = BITS_PER_LONG; |
| 26 | static unsigned int riscv_intc_custom_base __ro_after_init = BITS_PER_LONG; |
| 27 | static unsigned int riscv_intc_custom_nr_irqs __ro_after_init; |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 28 | |
| 29 | static asmlinkage void riscv_intc_irq(struct pt_regs *regs) |
| 30 | { |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 31 | unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG; |
| 32 | |
Yu Chien Peter Lin | 96303bc | 2024-02-22 16:39:38 +0800 | [diff] [blame] | 33 | if (generic_handle_domain_irq(intc_domain, cause)) |
| 34 | pr_warn_ratelimited("Failed to handle interrupt (cause: %ld)\n", cause); |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 35 | } |
| 36 | |
Anup Patel | 3c46fc5 | 2024-02-22 15:09:56 +0530 | [diff] [blame] | 37 | static asmlinkage void riscv_intc_aia_irq(struct pt_regs *regs) |
| 38 | { |
| 39 | unsigned long topi; |
| 40 | |
| 41 | while ((topi = csr_read(CSR_TOPI))) |
| 42 | generic_handle_domain_irq(intc_domain, topi >> TOPI_IID_SHIFT); |
| 43 | } |
| 44 | |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 45 | /* |
| 46 | * On RISC-V systems local interrupts are masked or unmasked by writing |
| 47 | * the SIE (Supervisor Interrupt Enable) CSR. As CSRs can only be written |
| 48 | * on the local hart, these functions can only be called on the hart that |
| 49 | * corresponds to the IRQ chip. |
| 50 | */ |
| 51 | |
| 52 | static void riscv_intc_irq_mask(struct irq_data *d) |
| 53 | { |
Anup Patel | 3c46fc5 | 2024-02-22 15:09:56 +0530 | [diff] [blame] | 54 | if (IS_ENABLED(CONFIG_32BIT) && d->hwirq >= BITS_PER_LONG) |
| 55 | csr_clear(CSR_IEH, BIT(d->hwirq - BITS_PER_LONG)); |
| 56 | else |
| 57 | csr_clear(CSR_IE, BIT(d->hwirq)); |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 58 | } |
| 59 | |
| 60 | static void riscv_intc_irq_unmask(struct irq_data *d) |
| 61 | { |
Anup Patel | 3c46fc5 | 2024-02-22 15:09:56 +0530 | [diff] [blame] | 62 | if (IS_ENABLED(CONFIG_32BIT) && d->hwirq >= BITS_PER_LONG) |
| 63 | csr_set(CSR_IEH, BIT(d->hwirq - BITS_PER_LONG)); |
| 64 | else |
| 65 | csr_set(CSR_IE, BIT(d->hwirq)); |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 66 | } |
| 67 | |
Yu Chien Peter Lin | f4cc33e | 2024-02-22 16:39:39 +0800 | [diff] [blame] | 68 | static void andes_intc_irq_mask(struct irq_data *d) |
| 69 | { |
| 70 | /* |
| 71 | * Andes specific S-mode local interrupt causes (hwirq) |
| 72 | * are defined as (256 + n) and controlled by n-th bit |
| 73 | * of SLIE. |
| 74 | */ |
| 75 | unsigned int mask = BIT(d->hwirq % BITS_PER_LONG); |
| 76 | |
| 77 | if (d->hwirq < ANDES_SLI_CAUSE_BASE) |
| 78 | csr_clear(CSR_IE, mask); |
| 79 | else |
| 80 | csr_clear(ANDES_CSR_SLIE, mask); |
| 81 | } |
| 82 | |
| 83 | static void andes_intc_irq_unmask(struct irq_data *d) |
| 84 | { |
| 85 | unsigned int mask = BIT(d->hwirq % BITS_PER_LONG); |
| 86 | |
| 87 | if (d->hwirq < ANDES_SLI_CAUSE_BASE) |
| 88 | csr_set(CSR_IE, mask); |
| 89 | else |
| 90 | csr_set(ANDES_CSR_SLIE, mask); |
| 91 | } |
| 92 | |
Anup Patel | f8415f2 | 2023-03-28 09:22:23 +0530 | [diff] [blame] | 93 | static void riscv_intc_irq_eoi(struct irq_data *d) |
| 94 | { |
| 95 | /* |
| 96 | * The RISC-V INTC driver uses handle_percpu_devid_irq() flow |
| 97 | * for the per-HART local interrupts and child irqchip drivers |
| 98 | * (such as PLIC, SBI IPI, CLINT, APLIC, IMSIC, etc) implement |
| 99 | * chained handlers for the per-HART local interrupts. |
| 100 | * |
| 101 | * In the absence of irq_eoi(), the chained_irq_enter() and |
| 102 | * chained_irq_exit() functions (used by child irqchip drivers) |
| 103 | * will do unnecessary mask/unmask of per-HART local interrupts |
| 104 | * at the time of handling interrupts. To avoid this, we provide |
| 105 | * an empty irq_eoi() callback for RISC-V INTC irqchip. |
| 106 | */ |
| 107 | } |
| 108 | |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 109 | static struct irq_chip riscv_intc_chip = { |
| 110 | .name = "RISC-V INTC", |
| 111 | .irq_mask = riscv_intc_irq_mask, |
| 112 | .irq_unmask = riscv_intc_irq_unmask, |
Anup Patel | f8415f2 | 2023-03-28 09:22:23 +0530 | [diff] [blame] | 113 | .irq_eoi = riscv_intc_irq_eoi, |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 114 | }; |
| 115 | |
Yu Chien Peter Lin | f4cc33e | 2024-02-22 16:39:39 +0800 | [diff] [blame] | 116 | static struct irq_chip andes_intc_chip = { |
| 117 | .name = "RISC-V INTC", |
| 118 | .irq_mask = andes_intc_irq_mask, |
| 119 | .irq_unmask = andes_intc_irq_unmask, |
| 120 | .irq_eoi = riscv_intc_irq_eoi, |
| 121 | }; |
| 122 | |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 123 | static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq, |
| 124 | irq_hw_number_t hwirq) |
| 125 | { |
Yu Chien Peter Lin | f4cc33e | 2024-02-22 16:39:39 +0800 | [diff] [blame] | 126 | struct irq_chip *chip = d->host_data; |
| 127 | |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 128 | irq_set_percpu_devid(irq); |
Yu Chien Peter Lin | f4cc33e | 2024-02-22 16:39:39 +0800 | [diff] [blame] | 129 | irq_domain_set_info(d, irq, hwirq, chip, NULL, handle_percpu_devid_irq, |
| 130 | NULL, NULL); |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 131 | |
| 132 | return 0; |
| 133 | } |
| 134 | |
Anup Patel | 832f15f4 | 2023-03-28 09:22:19 +0530 | [diff] [blame] | 135 | static int riscv_intc_domain_alloc(struct irq_domain *domain, |
| 136 | unsigned int virq, unsigned int nr_irqs, |
| 137 | void *arg) |
| 138 | { |
| 139 | int i, ret; |
| 140 | irq_hw_number_t hwirq; |
| 141 | unsigned int type = IRQ_TYPE_NONE; |
| 142 | struct irq_fwspec *fwspec = arg; |
| 143 | |
| 144 | ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type); |
| 145 | if (ret) |
| 146 | return ret; |
| 147 | |
Yu Chien Peter Lin | 96303bc | 2024-02-22 16:39:38 +0800 | [diff] [blame] | 148 | /* |
| 149 | * Only allow hwirq for which we have corresponding standard or |
| 150 | * custom interrupt enable register. |
| 151 | */ |
Samuel Holland | ca5b0b7 | 2024-03-12 14:28:08 -0700 | [diff] [blame] | 152 | if (hwirq >= riscv_intc_nr_irqs && |
| 153 | (hwirq < riscv_intc_custom_base || |
| 154 | hwirq >= riscv_intc_custom_base + riscv_intc_custom_nr_irqs)) |
Yu Chien Peter Lin | 96303bc | 2024-02-22 16:39:38 +0800 | [diff] [blame] | 155 | return -EINVAL; |
| 156 | |
Anup Patel | 832f15f4 | 2023-03-28 09:22:19 +0530 | [diff] [blame] | 157 | for (i = 0; i < nr_irqs; i++) { |
| 158 | ret = riscv_intc_domain_map(domain, virq + i, hwirq + i); |
| 159 | if (ret) |
| 160 | return ret; |
| 161 | } |
| 162 | |
| 163 | return 0; |
| 164 | } |
| 165 | |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 166 | static const struct irq_domain_ops riscv_intc_domain_ops = { |
| 167 | .map = riscv_intc_domain_map, |
| 168 | .xlate = irq_domain_xlate_onecell, |
Anup Patel | 832f15f4 | 2023-03-28 09:22:19 +0530 | [diff] [blame] | 169 | .alloc = riscv_intc_domain_alloc |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 170 | }; |
| 171 | |
Anup Patel | 0c60a31 | 2023-03-28 09:22:18 +0530 | [diff] [blame] | 172 | static struct fwnode_handle *riscv_intc_hwnode(void) |
| 173 | { |
| 174 | return intc_domain->fwnode; |
| 175 | } |
| 176 | |
Anup Patel | 3c46fc5 | 2024-02-22 15:09:56 +0530 | [diff] [blame] | 177 | static int __init riscv_intc_init_common(struct fwnode_handle *fn, struct irq_chip *chip) |
Sunil V L | 7023b9d | 2023-05-15 11:19:23 +0530 | [diff] [blame] | 178 | { |
| 179 | int rc; |
| 180 | |
Yu Chien Peter Lin | f4cc33e | 2024-02-22 16:39:39 +0800 | [diff] [blame] | 181 | intc_domain = irq_domain_create_tree(fn, &riscv_intc_domain_ops, chip); |
Sunil V L | 7023b9d | 2023-05-15 11:19:23 +0530 | [diff] [blame] | 182 | if (!intc_domain) { |
| 183 | pr_err("unable to add IRQ domain\n"); |
| 184 | return -ENXIO; |
| 185 | } |
| 186 | |
Samuel Holland | ca5b0b7 | 2024-03-12 14:28:08 -0700 | [diff] [blame] | 187 | if (riscv_isa_extension_available(NULL, SxAIA)) { |
| 188 | riscv_intc_nr_irqs = 64; |
Anup Patel | 678c607 | 2024-02-26 09:37:37 +0530 | [diff] [blame] | 189 | rc = set_handle_irq(&riscv_intc_aia_irq); |
Samuel Holland | ca5b0b7 | 2024-03-12 14:28:08 -0700 | [diff] [blame] | 190 | } else { |
Anup Patel | 678c607 | 2024-02-26 09:37:37 +0530 | [diff] [blame] | 191 | rc = set_handle_irq(&riscv_intc_irq); |
Samuel Holland | ca5b0b7 | 2024-03-12 14:28:08 -0700 | [diff] [blame] | 192 | } |
Sunil V L | 7023b9d | 2023-05-15 11:19:23 +0530 | [diff] [blame] | 193 | if (rc) { |
| 194 | pr_err("failed to set irq handler\n"); |
| 195 | return rc; |
| 196 | } |
| 197 | |
| 198 | riscv_set_intc_hwnode_fn(riscv_intc_hwnode); |
| 199 | |
Anup Patel | 678c607 | 2024-02-26 09:37:37 +0530 | [diff] [blame] | 200 | pr_info("%d local interrupts mapped%s\n", |
Samuel Holland | ca5b0b7 | 2024-03-12 14:28:08 -0700 | [diff] [blame] | 201 | riscv_intc_nr_irqs, |
Anup Patel | 678c607 | 2024-02-26 09:37:37 +0530 | [diff] [blame] | 202 | riscv_isa_extension_available(NULL, SxAIA) ? " using AIA" : ""); |
Anup Patel | 3c46fc5 | 2024-02-22 15:09:56 +0530 | [diff] [blame] | 203 | if (riscv_intc_custom_nr_irqs) |
| 204 | pr_info("%d custom local interrupts mapped\n", riscv_intc_custom_nr_irqs); |
Sunil V L | 7023b9d | 2023-05-15 11:19:23 +0530 | [diff] [blame] | 205 | |
| 206 | return 0; |
| 207 | } |
| 208 | |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 209 | static int __init riscv_intc_init(struct device_node *node, |
| 210 | struct device_node *parent) |
| 211 | { |
Yu Chien Peter Lin | f4cc33e | 2024-02-22 16:39:39 +0800 | [diff] [blame] | 212 | struct irq_chip *chip = &riscv_intc_chip; |
Sunil V L | ad635e7 | 2022-05-27 10:47:42 +0530 | [diff] [blame] | 213 | unsigned long hartid; |
Yu Chien Peter Lin | f4cc33e | 2024-02-22 16:39:39 +0800 | [diff] [blame] | 214 | int rc; |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 215 | |
Sunil V L | ad635e7 | 2022-05-27 10:47:42 +0530 | [diff] [blame] | 216 | rc = riscv_of_parent_hartid(node, &hartid); |
| 217 | if (rc < 0) { |
Palmer Dabbelt | 559fe74 | 2020-06-11 10:53:02 -0700 | [diff] [blame] | 218 | pr_warn("unable to find hart id for %pOF\n", node); |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 219 | return 0; |
| 220 | } |
| 221 | |
| 222 | /* |
| 223 | * The DT will have one INTC DT node under each CPU (or HART) |
| 224 | * DT node so riscv_intc_init() function will be called once |
| 225 | * for each INTC DT node. We only need to do INTC initialization |
| 226 | * for the INTC DT node belonging to boot CPU (or boot HART). |
| 227 | */ |
Anup Patel | e13cd66 | 2023-10-03 10:13:51 +0530 | [diff] [blame] | 228 | if (riscv_hartid_to_cpuid(hartid) != smp_processor_id()) { |
| 229 | /* |
| 230 | * The INTC nodes of each CPU are suppliers for downstream |
| 231 | * interrupt controllers (such as PLIC, IMSIC and APLIC |
| 232 | * direct-mode) so we should mark an INTC node as initialized |
| 233 | * if we are not creating IRQ domain for it. |
| 234 | */ |
| 235 | fwnode_dev_initialized(of_fwnode_handle(node), true); |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 236 | return 0; |
Anup Patel | e13cd66 | 2023-10-03 10:13:51 +0530 | [diff] [blame] | 237 | } |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 238 | |
Yu Chien Peter Lin | f4cc33e | 2024-02-22 16:39:39 +0800 | [diff] [blame] | 239 | if (of_device_is_compatible(node, "andestech,cpu-intc")) { |
| 240 | riscv_intc_custom_base = ANDES_SLI_CAUSE_BASE; |
| 241 | riscv_intc_custom_nr_irqs = ANDES_RV_IRQ_LAST; |
| 242 | chip = &andes_intc_chip; |
| 243 | } |
| 244 | |
| 245 | return riscv_intc_init_common(of_node_to_fwnode(node), chip); |
Anup Patel | 6b7ce892 | 2020-06-01 14:45:40 +0530 | [diff] [blame] | 246 | } |
| 247 | |
| 248 | IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init); |
Yu Chien Peter Lin | f4cc33e | 2024-02-22 16:39:39 +0800 | [diff] [blame] | 249 | IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init); |
Sunil V L | 7023b9d | 2023-05-15 11:19:23 +0530 | [diff] [blame] | 250 | |
| 251 | #ifdef CONFIG_ACPI |
| 252 | |
| 253 | static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header, |
| 254 | const unsigned long end) |
| 255 | { |
Sunil V L | 7023b9d | 2023-05-15 11:19:23 +0530 | [diff] [blame] | 256 | struct acpi_madt_rintc *rintc; |
Sunil V L | 0110c4b | 2024-05-27 13:41:13 +0530 | [diff] [blame] | 257 | struct fwnode_handle *fn; |
| 258 | int rc; |
Sunil V L | 7023b9d | 2023-05-15 11:19:23 +0530 | [diff] [blame] | 259 | |
| 260 | rintc = (struct acpi_madt_rintc *)header; |
| 261 | |
| 262 | /* |
| 263 | * The ACPI MADT will have one INTC for each CPU (or HART) |
| 264 | * so riscv_intc_acpi_init() function will be called once |
| 265 | * for each INTC. We only do INTC initialization |
| 266 | * for the INTC belonging to the boot CPU (or boot HART). |
| 267 | */ |
| 268 | if (riscv_hartid_to_cpuid(rintc->hart_id) != smp_processor_id()) |
| 269 | return 0; |
| 270 | |
| 271 | fn = irq_domain_alloc_named_fwnode("RISCV-INTC"); |
| 272 | if (!fn) { |
| 273 | pr_err("unable to allocate INTC FW node\n"); |
| 274 | return -ENOMEM; |
| 275 | } |
| 276 | |
Sunil V L | 0110c4b | 2024-05-27 13:41:13 +0530 | [diff] [blame] | 277 | rc = riscv_intc_init_common(fn, &riscv_intc_chip); |
| 278 | if (rc) |
| 279 | irq_domain_free_fwnode(fn); |
| 280 | |
| 281 | return rc; |
Sunil V L | 7023b9d | 2023-05-15 11:19:23 +0530 | [diff] [blame] | 282 | } |
| 283 | |
| 284 | IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL, |
| 285 | ACPI_MADT_RINTC_VERSION_V1, riscv_intc_acpi_init); |
| 286 | #endif |