blob: 4f3a12383a1e4111788baf798cacd3b9accb71eb [file] [log] [blame]
Anup Patel6b7ce8922020-06-01 14:45:40 +05301// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017-2018 SiFive
5 * Copyright (C) 2020 Western Digital Corporation or its affiliates.
6 */
7
8#define pr_fmt(fmt) "riscv-intc: " fmt
Sunil V L7023b9d2023-05-15 11:19:23 +05309#include <linux/acpi.h>
Anup Patel6b7ce8922020-06-01 14:45:40 +053010#include <linux/atomic.h>
11#include <linux/bits.h>
12#include <linux/cpu.h>
13#include <linux/irq.h>
14#include <linux/irqchip.h>
15#include <linux/irqdomain.h>
16#include <linux/interrupt.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/smp.h>
Yu Chien Peter Linf4cc33e2024-02-22 16:39:39 +080020#include <linux/soc/andes/irq.h>
Anup Patel6b7ce8922020-06-01 14:45:40 +053021
Anup Patel3c46fc52024-02-22 15:09:56 +053022#include <asm/hwcap.h>
23
Anup Patel6b7ce8922020-06-01 14:45:40 +053024static struct irq_domain *intc_domain;
Yu Chien Peter Lin96303bc2024-02-22 16:39:38 +080025static unsigned int riscv_intc_nr_irqs __ro_after_init = BITS_PER_LONG;
26static unsigned int riscv_intc_custom_base __ro_after_init = BITS_PER_LONG;
27static unsigned int riscv_intc_custom_nr_irqs __ro_after_init;
Anup Patel6b7ce8922020-06-01 14:45:40 +053028
29static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
30{
Anup Patel6b7ce8922020-06-01 14:45:40 +053031 unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG;
32
Yu Chien Peter Lin96303bc2024-02-22 16:39:38 +080033 if (generic_handle_domain_irq(intc_domain, cause))
34 pr_warn_ratelimited("Failed to handle interrupt (cause: %ld)\n", cause);
Anup Patel6b7ce8922020-06-01 14:45:40 +053035}
36
Anup Patel3c46fc52024-02-22 15:09:56 +053037static asmlinkage void riscv_intc_aia_irq(struct pt_regs *regs)
38{
39 unsigned long topi;
40
41 while ((topi = csr_read(CSR_TOPI)))
42 generic_handle_domain_irq(intc_domain, topi >> TOPI_IID_SHIFT);
43}
44
Anup Patel6b7ce8922020-06-01 14:45:40 +053045/*
46 * On RISC-V systems local interrupts are masked or unmasked by writing
47 * the SIE (Supervisor Interrupt Enable) CSR. As CSRs can only be written
48 * on the local hart, these functions can only be called on the hart that
49 * corresponds to the IRQ chip.
50 */
51
52static void riscv_intc_irq_mask(struct irq_data *d)
53{
Anup Patel3c46fc52024-02-22 15:09:56 +053054 if (IS_ENABLED(CONFIG_32BIT) && d->hwirq >= BITS_PER_LONG)
55 csr_clear(CSR_IEH, BIT(d->hwirq - BITS_PER_LONG));
56 else
57 csr_clear(CSR_IE, BIT(d->hwirq));
Anup Patel6b7ce8922020-06-01 14:45:40 +053058}
59
60static void riscv_intc_irq_unmask(struct irq_data *d)
61{
Anup Patel3c46fc52024-02-22 15:09:56 +053062 if (IS_ENABLED(CONFIG_32BIT) && d->hwirq >= BITS_PER_LONG)
63 csr_set(CSR_IEH, BIT(d->hwirq - BITS_PER_LONG));
64 else
65 csr_set(CSR_IE, BIT(d->hwirq));
Anup Patel6b7ce8922020-06-01 14:45:40 +053066}
67
Yu Chien Peter Linf4cc33e2024-02-22 16:39:39 +080068static void andes_intc_irq_mask(struct irq_data *d)
69{
70 /*
71 * Andes specific S-mode local interrupt causes (hwirq)
72 * are defined as (256 + n) and controlled by n-th bit
73 * of SLIE.
74 */
75 unsigned int mask = BIT(d->hwirq % BITS_PER_LONG);
76
77 if (d->hwirq < ANDES_SLI_CAUSE_BASE)
78 csr_clear(CSR_IE, mask);
79 else
80 csr_clear(ANDES_CSR_SLIE, mask);
81}
82
83static void andes_intc_irq_unmask(struct irq_data *d)
84{
85 unsigned int mask = BIT(d->hwirq % BITS_PER_LONG);
86
87 if (d->hwirq < ANDES_SLI_CAUSE_BASE)
88 csr_set(CSR_IE, mask);
89 else
90 csr_set(ANDES_CSR_SLIE, mask);
91}
92
Anup Patelf8415f22023-03-28 09:22:23 +053093static void riscv_intc_irq_eoi(struct irq_data *d)
94{
95 /*
96 * The RISC-V INTC driver uses handle_percpu_devid_irq() flow
97 * for the per-HART local interrupts and child irqchip drivers
98 * (such as PLIC, SBI IPI, CLINT, APLIC, IMSIC, etc) implement
99 * chained handlers for the per-HART local interrupts.
100 *
101 * In the absence of irq_eoi(), the chained_irq_enter() and
102 * chained_irq_exit() functions (used by child irqchip drivers)
103 * will do unnecessary mask/unmask of per-HART local interrupts
104 * at the time of handling interrupts. To avoid this, we provide
105 * an empty irq_eoi() callback for RISC-V INTC irqchip.
106 */
107}
108
Anup Patel6b7ce8922020-06-01 14:45:40 +0530109static struct irq_chip riscv_intc_chip = {
110 .name = "RISC-V INTC",
111 .irq_mask = riscv_intc_irq_mask,
112 .irq_unmask = riscv_intc_irq_unmask,
Anup Patelf8415f22023-03-28 09:22:23 +0530113 .irq_eoi = riscv_intc_irq_eoi,
Anup Patel6b7ce8922020-06-01 14:45:40 +0530114};
115
Yu Chien Peter Linf4cc33e2024-02-22 16:39:39 +0800116static struct irq_chip andes_intc_chip = {
117 .name = "RISC-V INTC",
118 .irq_mask = andes_intc_irq_mask,
119 .irq_unmask = andes_intc_irq_unmask,
120 .irq_eoi = riscv_intc_irq_eoi,
121};
122
Anup Patel6b7ce8922020-06-01 14:45:40 +0530123static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq,
124 irq_hw_number_t hwirq)
125{
Yu Chien Peter Linf4cc33e2024-02-22 16:39:39 +0800126 struct irq_chip *chip = d->host_data;
127
Anup Patel6b7ce8922020-06-01 14:45:40 +0530128 irq_set_percpu_devid(irq);
Yu Chien Peter Linf4cc33e2024-02-22 16:39:39 +0800129 irq_domain_set_info(d, irq, hwirq, chip, NULL, handle_percpu_devid_irq,
130 NULL, NULL);
Anup Patel6b7ce8922020-06-01 14:45:40 +0530131
132 return 0;
133}
134
Anup Patel832f15f42023-03-28 09:22:19 +0530135static int riscv_intc_domain_alloc(struct irq_domain *domain,
136 unsigned int virq, unsigned int nr_irqs,
137 void *arg)
138{
139 int i, ret;
140 irq_hw_number_t hwirq;
141 unsigned int type = IRQ_TYPE_NONE;
142 struct irq_fwspec *fwspec = arg;
143
144 ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
145 if (ret)
146 return ret;
147
Yu Chien Peter Lin96303bc2024-02-22 16:39:38 +0800148 /*
149 * Only allow hwirq for which we have corresponding standard or
150 * custom interrupt enable register.
151 */
Samuel Hollandca5b0b72024-03-12 14:28:08 -0700152 if (hwirq >= riscv_intc_nr_irqs &&
153 (hwirq < riscv_intc_custom_base ||
154 hwirq >= riscv_intc_custom_base + riscv_intc_custom_nr_irqs))
Yu Chien Peter Lin96303bc2024-02-22 16:39:38 +0800155 return -EINVAL;
156
Anup Patel832f15f42023-03-28 09:22:19 +0530157 for (i = 0; i < nr_irqs; i++) {
158 ret = riscv_intc_domain_map(domain, virq + i, hwirq + i);
159 if (ret)
160 return ret;
161 }
162
163 return 0;
164}
165
Anup Patel6b7ce8922020-06-01 14:45:40 +0530166static const struct irq_domain_ops riscv_intc_domain_ops = {
167 .map = riscv_intc_domain_map,
168 .xlate = irq_domain_xlate_onecell,
Anup Patel832f15f42023-03-28 09:22:19 +0530169 .alloc = riscv_intc_domain_alloc
Anup Patel6b7ce8922020-06-01 14:45:40 +0530170};
171
Anup Patel0c60a312023-03-28 09:22:18 +0530172static struct fwnode_handle *riscv_intc_hwnode(void)
173{
174 return intc_domain->fwnode;
175}
176
Anup Patel3c46fc52024-02-22 15:09:56 +0530177static int __init riscv_intc_init_common(struct fwnode_handle *fn, struct irq_chip *chip)
Sunil V L7023b9d2023-05-15 11:19:23 +0530178{
179 int rc;
180
Yu Chien Peter Linf4cc33e2024-02-22 16:39:39 +0800181 intc_domain = irq_domain_create_tree(fn, &riscv_intc_domain_ops, chip);
Sunil V L7023b9d2023-05-15 11:19:23 +0530182 if (!intc_domain) {
183 pr_err("unable to add IRQ domain\n");
184 return -ENXIO;
185 }
186
Samuel Hollandca5b0b72024-03-12 14:28:08 -0700187 if (riscv_isa_extension_available(NULL, SxAIA)) {
188 riscv_intc_nr_irqs = 64;
Anup Patel678c6072024-02-26 09:37:37 +0530189 rc = set_handle_irq(&riscv_intc_aia_irq);
Samuel Hollandca5b0b72024-03-12 14:28:08 -0700190 } else {
Anup Patel678c6072024-02-26 09:37:37 +0530191 rc = set_handle_irq(&riscv_intc_irq);
Samuel Hollandca5b0b72024-03-12 14:28:08 -0700192 }
Sunil V L7023b9d2023-05-15 11:19:23 +0530193 if (rc) {
194 pr_err("failed to set irq handler\n");
195 return rc;
196 }
197
198 riscv_set_intc_hwnode_fn(riscv_intc_hwnode);
199
Anup Patel678c6072024-02-26 09:37:37 +0530200 pr_info("%d local interrupts mapped%s\n",
Samuel Hollandca5b0b72024-03-12 14:28:08 -0700201 riscv_intc_nr_irqs,
Anup Patel678c6072024-02-26 09:37:37 +0530202 riscv_isa_extension_available(NULL, SxAIA) ? " using AIA" : "");
Anup Patel3c46fc52024-02-22 15:09:56 +0530203 if (riscv_intc_custom_nr_irqs)
204 pr_info("%d custom local interrupts mapped\n", riscv_intc_custom_nr_irqs);
Sunil V L7023b9d2023-05-15 11:19:23 +0530205
206 return 0;
207}
208
Anup Patel6b7ce8922020-06-01 14:45:40 +0530209static int __init riscv_intc_init(struct device_node *node,
210 struct device_node *parent)
211{
Yu Chien Peter Linf4cc33e2024-02-22 16:39:39 +0800212 struct irq_chip *chip = &riscv_intc_chip;
Sunil V Lad635e72022-05-27 10:47:42 +0530213 unsigned long hartid;
Yu Chien Peter Linf4cc33e2024-02-22 16:39:39 +0800214 int rc;
Anup Patel6b7ce8922020-06-01 14:45:40 +0530215
Sunil V Lad635e72022-05-27 10:47:42 +0530216 rc = riscv_of_parent_hartid(node, &hartid);
217 if (rc < 0) {
Palmer Dabbelt559fe742020-06-11 10:53:02 -0700218 pr_warn("unable to find hart id for %pOF\n", node);
Anup Patel6b7ce8922020-06-01 14:45:40 +0530219 return 0;
220 }
221
222 /*
223 * The DT will have one INTC DT node under each CPU (or HART)
224 * DT node so riscv_intc_init() function will be called once
225 * for each INTC DT node. We only need to do INTC initialization
226 * for the INTC DT node belonging to boot CPU (or boot HART).
227 */
Anup Patele13cd662023-10-03 10:13:51 +0530228 if (riscv_hartid_to_cpuid(hartid) != smp_processor_id()) {
229 /*
230 * The INTC nodes of each CPU are suppliers for downstream
231 * interrupt controllers (such as PLIC, IMSIC and APLIC
232 * direct-mode) so we should mark an INTC node as initialized
233 * if we are not creating IRQ domain for it.
234 */
235 fwnode_dev_initialized(of_fwnode_handle(node), true);
Anup Patel6b7ce8922020-06-01 14:45:40 +0530236 return 0;
Anup Patele13cd662023-10-03 10:13:51 +0530237 }
Anup Patel6b7ce8922020-06-01 14:45:40 +0530238
Yu Chien Peter Linf4cc33e2024-02-22 16:39:39 +0800239 if (of_device_is_compatible(node, "andestech,cpu-intc")) {
240 riscv_intc_custom_base = ANDES_SLI_CAUSE_BASE;
241 riscv_intc_custom_nr_irqs = ANDES_RV_IRQ_LAST;
242 chip = &andes_intc_chip;
243 }
244
245 return riscv_intc_init_common(of_node_to_fwnode(node), chip);
Anup Patel6b7ce8922020-06-01 14:45:40 +0530246}
247
248IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init);
Yu Chien Peter Linf4cc33e2024-02-22 16:39:39 +0800249IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init);
Sunil V L7023b9d2023-05-15 11:19:23 +0530250
251#ifdef CONFIG_ACPI
252
253static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
254 const unsigned long end)
255{
Sunil V L7023b9d2023-05-15 11:19:23 +0530256 struct acpi_madt_rintc *rintc;
Sunil V L0110c4b2024-05-27 13:41:13 +0530257 struct fwnode_handle *fn;
258 int rc;
Sunil V L7023b9d2023-05-15 11:19:23 +0530259
260 rintc = (struct acpi_madt_rintc *)header;
261
262 /*
263 * The ACPI MADT will have one INTC for each CPU (or HART)
264 * so riscv_intc_acpi_init() function will be called once
265 * for each INTC. We only do INTC initialization
266 * for the INTC belonging to the boot CPU (or boot HART).
267 */
268 if (riscv_hartid_to_cpuid(rintc->hart_id) != smp_processor_id())
269 return 0;
270
271 fn = irq_domain_alloc_named_fwnode("RISCV-INTC");
272 if (!fn) {
273 pr_err("unable to allocate INTC FW node\n");
274 return -ENOMEM;
275 }
276
Sunil V L0110c4b2024-05-27 13:41:13 +0530277 rc = riscv_intc_init_common(fn, &riscv_intc_chip);
278 if (rc)
279 irq_domain_free_fwnode(fn);
280
281 return rc;
Sunil V L7023b9d2023-05-15 11:19:23 +0530282}
283
284IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL,
285 ACPI_MADT_RINTC_VERSION_V1, riscv_intc_acpi_init);
286#endif