blob: 4a3ffe856d6c30e18dc41a9d5e91314eca8a0207 [file] [log] [blame]
Anup Patel2333df52024-03-07 19:33:04 +05301// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2022 Ventana Micro Systems Inc.
5 */
6
7#include <linux/bitfield.h>
8#include <linux/bitops.h>
9#include <linux/cpu.h>
10#include <linux/interrupt.h>
11#include <linux/irqchip.h>
12#include <linux/irqchip/chained_irq.h>
13#include <linux/irqchip/riscv-aplic.h>
14#include <linux/module.h>
15#include <linux/of_address.h>
16#include <linux/printk.h>
17#include <linux/smp.h>
18
19#include "irq-riscv-aplic-main.h"
20
21#define APLIC_DISABLE_IDELIVERY 0
22#define APLIC_ENABLE_IDELIVERY 1
23#define APLIC_DISABLE_ITHRESHOLD 1
24#define APLIC_ENABLE_ITHRESHOLD 0
25
26struct aplic_direct {
27 struct aplic_priv priv;
28 struct irq_domain *irqdomain;
29 struct cpumask lmask;
30};
31
32struct aplic_idc {
33 unsigned int hart_index;
34 void __iomem *regs;
35 struct aplic_direct *direct;
36};
37
38static unsigned int aplic_direct_parent_irq;
39static DEFINE_PER_CPU(struct aplic_idc, aplic_idcs);
40
41static void aplic_direct_irq_eoi(struct irq_data *d)
42{
43 /*
44 * The fasteoi_handler requires irq_eoi() callback hence
45 * provide a dummy handler.
46 */
47}
48
49#ifdef CONFIG_SMP
50static int aplic_direct_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
51 bool force)
52{
53 struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
54 struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv);
55 struct aplic_idc *idc;
56 unsigned int cpu, val;
Anup Patel2333df52024-03-07 19:33:04 +053057 void __iomem *target;
58
Anup Patel2333df52024-03-07 19:33:04 +053059 if (force)
Dawei Li5d650d12024-04-16 16:54:52 +080060 cpu = cpumask_first_and(&direct->lmask, mask_val);
Anup Patel2333df52024-03-07 19:33:04 +053061 else
Dawei Li5d650d12024-04-16 16:54:52 +080062 cpu = cpumask_first_and_and(&direct->lmask, mask_val, cpu_online_mask);
Anup Patel2333df52024-03-07 19:33:04 +053063
64 if (cpu >= nr_cpu_ids)
65 return -EINVAL;
66
67 idc = per_cpu_ptr(&aplic_idcs, cpu);
68 target = priv->regs + APLIC_TARGET_BASE + (d->hwirq - 1) * sizeof(u32);
69 val = FIELD_PREP(APLIC_TARGET_HART_IDX, idc->hart_index);
70 val |= FIELD_PREP(APLIC_TARGET_IPRIO, APLIC_DEFAULT_PRIORITY);
71 writel(val, target);
72
73 irq_data_update_effective_affinity(d, cpumask_of(cpu));
74
75 return IRQ_SET_MASK_OK_DONE;
76}
77#endif
78
79static struct irq_chip aplic_direct_chip = {
80 .name = "APLIC-DIRECT",
81 .irq_mask = aplic_irq_mask,
82 .irq_unmask = aplic_irq_unmask,
83 .irq_set_type = aplic_irq_set_type,
84 .irq_eoi = aplic_direct_irq_eoi,
85#ifdef CONFIG_SMP
86 .irq_set_affinity = aplic_direct_set_affinity,
87#endif
88 .flags = IRQCHIP_SET_TYPE_MASKED |
89 IRQCHIP_SKIP_SET_WAKE |
90 IRQCHIP_MASK_ON_SUSPEND,
91};
92
93static int aplic_direct_irqdomain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
94 unsigned long *hwirq, unsigned int *type)
95{
96 struct aplic_priv *priv = d->host_data;
97
98 return aplic_irqdomain_translate(fwspec, priv->gsi_base, hwirq, type);
99}
100
101static int aplic_direct_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
102 unsigned int nr_irqs, void *arg)
103{
104 struct aplic_priv *priv = domain->host_data;
105 struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv);
106 struct irq_fwspec *fwspec = arg;
107 irq_hw_number_t hwirq;
108 unsigned int type;
109 int i, ret;
110
111 ret = aplic_irqdomain_translate(fwspec, priv->gsi_base, &hwirq, &type);
112 if (ret)
113 return ret;
114
115 for (i = 0; i < nr_irqs; i++) {
116 irq_domain_set_info(domain, virq + i, hwirq + i, &aplic_direct_chip,
117 priv, handle_fasteoi_irq, NULL, NULL);
118 irq_set_affinity(virq + i, &direct->lmask);
119 }
120
121 return 0;
122}
123
124static const struct irq_domain_ops aplic_direct_irqdomain_ops = {
125 .translate = aplic_direct_irqdomain_translate,
126 .alloc = aplic_direct_irqdomain_alloc,
127 .free = irq_domain_free_irqs_top,
128};
129
130/*
131 * To handle an APLIC direct interrupts, we just read the CLAIMI register
132 * which will return highest priority pending interrupt and clear the
133 * pending bit of the interrupt. This process is repeated until CLAIMI
134 * register return zero value.
135 */
136static void aplic_direct_handle_irq(struct irq_desc *desc)
137{
138 struct aplic_idc *idc = this_cpu_ptr(&aplic_idcs);
139 struct irq_domain *irqdomain = idc->direct->irqdomain;
140 struct irq_chip *chip = irq_desc_get_chip(desc);
141 irq_hw_number_t hw_irq;
142 int irq;
143
144 chained_irq_enter(chip, desc);
145
146 while ((hw_irq = readl(idc->regs + APLIC_IDC_CLAIMI))) {
147 hw_irq = hw_irq >> APLIC_IDC_TOPI_ID_SHIFT;
148 irq = irq_find_mapping(irqdomain, hw_irq);
149
150 if (unlikely(irq <= 0)) {
151 dev_warn_ratelimited(idc->direct->priv.dev,
152 "hw_irq %lu mapping not found\n", hw_irq);
153 } else {
154 generic_handle_irq(irq);
155 }
156 }
157
158 chained_irq_exit(chip, desc);
159}
160
161static void aplic_idc_set_delivery(struct aplic_idc *idc, bool en)
162{
163 u32 de = (en) ? APLIC_ENABLE_IDELIVERY : APLIC_DISABLE_IDELIVERY;
164 u32 th = (en) ? APLIC_ENABLE_ITHRESHOLD : APLIC_DISABLE_ITHRESHOLD;
165
166 /* Priority must be less than threshold for interrupt triggering */
167 writel(th, idc->regs + APLIC_IDC_ITHRESHOLD);
168
169 /* Delivery must be set to 1 for interrupt triggering */
170 writel(de, idc->regs + APLIC_IDC_IDELIVERY);
171}
172
173static int aplic_direct_dying_cpu(unsigned int cpu)
174{
175 if (aplic_direct_parent_irq)
176 disable_percpu_irq(aplic_direct_parent_irq);
177
178 return 0;
179}
180
181static int aplic_direct_starting_cpu(unsigned int cpu)
182{
183 if (aplic_direct_parent_irq) {
184 enable_percpu_irq(aplic_direct_parent_irq,
185 irq_get_trigger_type(aplic_direct_parent_irq));
186 }
187
188 return 0;
189}
190
191static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index,
192 u32 *parent_hwirq, unsigned long *parent_hartid)
193{
194 struct of_phandle_args parent;
195 int rc;
196
197 /*
198 * Currently, only OF fwnode is supported so extend this
199 * function for ACPI support.
200 */
201 if (!is_of_node(dev->fwnode))
202 return -EINVAL;
203
204 rc = of_irq_parse_one(to_of_node(dev->fwnode), index, &parent);
205 if (rc)
206 return rc;
207
208 rc = riscv_of_parent_hartid(parent.np, parent_hartid);
209 if (rc)
210 return rc;
211
212 *parent_hwirq = parent.args[0];
213 return 0;
214}
215
216int aplic_direct_setup(struct device *dev, void __iomem *regs)
217{
218 int i, j, rc, cpu, current_cpu, setup_count = 0;
219 struct aplic_direct *direct;
220 struct irq_domain *domain;
221 struct aplic_priv *priv;
222 struct aplic_idc *idc;
223 unsigned long hartid;
224 u32 v, hwirq;
225
226 direct = devm_kzalloc(dev, sizeof(*direct), GFP_KERNEL);
227 if (!direct)
228 return -ENOMEM;
229 priv = &direct->priv;
230
231 rc = aplic_setup_priv(priv, dev, regs);
232 if (rc) {
233 dev_err(dev, "failed to create APLIC context\n");
234 return rc;
235 }
236
237 /* Setup per-CPU IDC and target CPU mask */
238 current_cpu = get_cpu();
239 for (i = 0; i < priv->nr_idcs; i++) {
240 rc = aplic_direct_parse_parent_hwirq(dev, i, &hwirq, &hartid);
241 if (rc) {
242 dev_warn(dev, "parent irq for IDC%d not found\n", i);
243 continue;
244 }
245
246 /*
247 * Skip interrupts other than external interrupts for
248 * current privilege level.
249 */
250 if (hwirq != RV_IRQ_EXT)
251 continue;
252
253 cpu = riscv_hartid_to_cpuid(hartid);
254 if (cpu < 0) {
255 dev_warn(dev, "invalid cpuid for IDC%d\n", i);
256 continue;
257 }
258
259 cpumask_set_cpu(cpu, &direct->lmask);
260
261 idc = per_cpu_ptr(&aplic_idcs, cpu);
262 idc->hart_index = i;
263 idc->regs = priv->regs + APLIC_IDC_BASE + i * APLIC_IDC_SIZE;
264 idc->direct = direct;
265
266 aplic_idc_set_delivery(idc, true);
267
268 /*
269 * Boot cpu might not have APLIC hart_index = 0 so check
270 * and update target registers of all interrupts.
271 */
272 if (cpu == current_cpu && idc->hart_index) {
273 v = FIELD_PREP(APLIC_TARGET_HART_IDX, idc->hart_index);
274 v |= FIELD_PREP(APLIC_TARGET_IPRIO, APLIC_DEFAULT_PRIORITY);
275 for (j = 1; j <= priv->nr_irqs; j++)
276 writel(v, priv->regs + APLIC_TARGET_BASE + (j - 1) * sizeof(u32));
277 }
278
279 setup_count++;
280 }
281 put_cpu();
282
283 /* Find parent domain and register chained handler */
284 domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
285 DOMAIN_BUS_ANY);
286 if (!aplic_direct_parent_irq && domain) {
287 aplic_direct_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
288 if (aplic_direct_parent_irq) {
289 irq_set_chained_handler(aplic_direct_parent_irq,
290 aplic_direct_handle_irq);
291
292 /*
293 * Setup CPUHP notifier to enable parent
294 * interrupt on all CPUs
295 */
296 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
297 "irqchip/riscv/aplic:starting",
298 aplic_direct_starting_cpu,
299 aplic_direct_dying_cpu);
300 }
301 }
302
303 /* Fail if we were not able to setup IDC for any CPU */
304 if (!setup_count)
305 return -ENODEV;
306
307 /* Setup global config and interrupt delivery */
308 aplic_init_hw_global(priv, false);
309
310 /* Create irq domain instance for the APLIC */
311 direct->irqdomain = irq_domain_create_linear(dev->fwnode, priv->nr_irqs + 1,
312 &aplic_direct_irqdomain_ops, priv);
313 if (!direct->irqdomain) {
314 dev_err(dev, "failed to create direct irq domain\n");
315 return -ENOMEM;
316 }
317
318 /* Advertise the interrupt controller */
319 dev_info(dev, "%d interrupts directly connected to %d CPUs\n",
320 priv->nr_irqs, priv->nr_idcs);
321
322 return 0;
323}