blob: 330c1f7e96656a7f651ee9ca9b336b699cbb335e [file] [log] [blame]
Mark Brownf8beab22011-10-28 23:50:49 +02001/*
2 * regmap based irq_chip
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
Paul Gortmaker51990e82012-01-22 11:23:42 -050013#include <linux/device.h>
Xiubo Lie1289202014-05-19 15:13:45 +080014#include <linux/export.h>
Mark Brownf8beab22011-10-28 23:50:49 +020015#include <linux/interrupt.h>
Xiubo Lie1289202014-05-19 15:13:45 +080016#include <linux/irq.h>
Mark Brown4af8be62012-05-13 10:59:56 +010017#include <linux/irqdomain.h>
Mark Brown0c00c502012-07-24 15:41:19 +010018#include <linux/pm_runtime.h>
Xiubo Lie1289202014-05-19 15:13:45 +080019#include <linux/regmap.h>
Mark Brownf8beab22011-10-28 23:50:49 +020020#include <linux/slab.h>
21
22#include "internal.h"
23
24struct regmap_irq_chip_data {
25 struct mutex lock;
Stephen Warren7ac140e2012-08-01 11:40:47 -060026 struct irq_chip irq_chip;
Mark Brownf8beab22011-10-28 23:50:49 +020027
28 struct regmap *map;
Mark Brownb026ddb2012-05-31 21:01:46 +010029 const struct regmap_irq_chip *chip;
Mark Brownf8beab22011-10-28 23:50:49 +020030
31 int irq_base;
Mark Brown4af8be62012-05-13 10:59:56 +010032 struct irq_domain *domain;
Mark Brownf8beab22011-10-28 23:50:49 +020033
Mark Browna43fd502012-06-05 14:34:03 +010034 int irq;
35 int wake_count;
36
Mark Browna7440ea2013-01-03 14:27:15 +000037 void *status_reg_buf;
Mark Brownf8beab22011-10-28 23:50:49 +020038 unsigned int *status_buf;
39 unsigned int *mask_buf;
40 unsigned int *mask_buf_def;
Mark Browna43fd502012-06-05 14:34:03 +010041 unsigned int *wake_buf;
Laxman Dewangan7a78479f2015-12-22 18:25:26 +053042 unsigned int *type_buf;
43 unsigned int *type_buf_def;
Graeme Gregory022f926a2012-05-14 22:40:43 +090044
45 unsigned int irq_reg_stride;
Laxman Dewangan7a78479f2015-12-22 18:25:26 +053046 unsigned int type_reg_stride;
Bartosz Golaszewskic82ea332018-12-19 12:18:05 +010047
48 bool clear_status:1;
Mark Brownf8beab22011-10-28 23:50:49 +020049};
50
51static inline const
52struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
53 int irq)
54{
Mark Brown4af8be62012-05-13 10:59:56 +010055 return &data->chip->irqs[irq];
Mark Brownf8beab22011-10-28 23:50:49 +020056}
57
58static void regmap_irq_lock(struct irq_data *data)
59{
60 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
61
62 mutex_lock(&d->lock);
63}
64
Michael Grzeschika71411d2017-06-23 14:35:09 +020065static int regmap_irq_update_bits(struct regmap_irq_chip_data *d,
66 unsigned int reg, unsigned int mask,
67 unsigned int val)
68{
69 if (d->chip->mask_writeonly)
70 return regmap_write_bits(d->map, reg, mask, val);
71 else
72 return regmap_update_bits(d->map, reg, mask, val);
73}
74
Mark Brownf8beab22011-10-28 23:50:49 +020075static void regmap_irq_sync_unlock(struct irq_data *data)
76{
77 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
Stephen Warren56806552012-04-10 23:37:22 -060078 struct regmap *map = d->map;
Mark Brownf8beab22011-10-28 23:50:49 +020079 int i, ret;
Stephen Warren16032622012-07-27 13:01:54 -060080 u32 reg;
Guo Zeng7b7d1962015-09-17 05:23:20 +000081 u32 unmask_offset;
Bartosz Golaszewskic82ea332018-12-19 12:18:05 +010082 u32 val;
Mark Brownf8beab22011-10-28 23:50:49 +020083
Mark Brown0c00c502012-07-24 15:41:19 +010084 if (d->chip->runtime_pm) {
85 ret = pm_runtime_get_sync(map->dev);
86 if (ret < 0)
87 dev_err(map->dev, "IRQ sync failed to resume: %d\n",
88 ret);
89 }
90
Bartosz Golaszewskic82ea332018-12-19 12:18:05 +010091 if (d->clear_status) {
92 for (i = 0; i < d->chip->num_regs; i++) {
93 reg = d->chip->status_base +
94 (i * map->reg_stride * d->irq_reg_stride);
95
96 ret = regmap_read(map, reg, &val);
97 if (ret)
98 dev_err(d->map->dev,
99 "Failed to clear the interrupt status bits\n");
100 }
101
102 d->clear_status = false;
103 }
104
Mark Brownf8beab22011-10-28 23:50:49 +0200105 /*
106 * If there's been a change in the mask write it back to the
107 * hardware. We rely on the use of the regmap core cache to
108 * suppress pointless writes.
109 */
110 for (i = 0; i < d->chip->num_regs; i++) {
Mark Zhang71514492019-01-14 17:32:58 +0800111 if (!d->chip->mask_base)
112 continue;
113
Stephen Warren16032622012-07-27 13:01:54 -0600114 reg = d->chip->mask_base +
115 (i * map->reg_stride * d->irq_reg_stride);
Guo Zeng7b7d1962015-09-17 05:23:20 +0000116 if (d->chip->mask_invert) {
Michael Grzeschika71411d2017-06-23 14:35:09 +0200117 ret = regmap_irq_update_bits(d, reg,
Xiaofan Tian36ac9142012-08-30 17:03:35 +0800118 d->mask_buf_def[i], ~d->mask_buf[i]);
Guo Zeng7b7d1962015-09-17 05:23:20 +0000119 } else if (d->chip->unmask_base) {
120 /* set mask with mask_base register */
Michael Grzeschika71411d2017-06-23 14:35:09 +0200121 ret = regmap_irq_update_bits(d, reg,
Guo Zeng7b7d1962015-09-17 05:23:20 +0000122 d->mask_buf_def[i], ~d->mask_buf[i]);
123 if (ret < 0)
124 dev_err(d->map->dev,
125 "Failed to sync unmasks in %x\n",
126 reg);
127 unmask_offset = d->chip->unmask_base -
128 d->chip->mask_base;
129 /* clear mask with unmask_base register */
Michael Grzeschika71411d2017-06-23 14:35:09 +0200130 ret = regmap_irq_update_bits(d,
Guo Zeng7b7d1962015-09-17 05:23:20 +0000131 reg + unmask_offset,
132 d->mask_buf_def[i],
133 d->mask_buf[i]);
134 } else {
Michael Grzeschika71411d2017-06-23 14:35:09 +0200135 ret = regmap_irq_update_bits(d, reg,
Mark Brownf8beab22011-10-28 23:50:49 +0200136 d->mask_buf_def[i], d->mask_buf[i]);
Guo Zeng7b7d1962015-09-17 05:23:20 +0000137 }
Mark Brownf8beab22011-10-28 23:50:49 +0200138 if (ret != 0)
139 dev_err(d->map->dev, "Failed to sync masks in %x\n",
Stephen Warren16032622012-07-27 13:01:54 -0600140 reg);
Mark Brown33be4932013-01-04 16:32:54 +0000141
142 reg = d->chip->wake_base +
143 (i * map->reg_stride * d->irq_reg_stride);
144 if (d->wake_buf) {
Mark Brown94424902013-01-04 16:35:07 +0000145 if (d->chip->wake_invert)
Michael Grzeschika71411d2017-06-23 14:35:09 +0200146 ret = regmap_irq_update_bits(d, reg,
Mark Brown94424902013-01-04 16:35:07 +0000147 d->mask_buf_def[i],
148 ~d->wake_buf[i]);
149 else
Michael Grzeschika71411d2017-06-23 14:35:09 +0200150 ret = regmap_irq_update_bits(d, reg,
Mark Brown94424902013-01-04 16:35:07 +0000151 d->mask_buf_def[i],
152 d->wake_buf[i]);
Mark Brown33be4932013-01-04 16:32:54 +0000153 if (ret != 0)
154 dev_err(d->map->dev,
155 "Failed to sync wakes in %x: %d\n",
156 reg, ret);
157 }
Yi Zhang4bd71452013-10-22 18:44:32 +0800158
159 if (!d->chip->init_ack_masked)
160 continue;
161 /*
dashsriram7043f5f2015-05-27 00:55:13 +0530162 * Ack all the masked interrupts unconditionally,
Yi Zhang4bd71452013-10-22 18:44:32 +0800163 * OR if there is masked interrupt which hasn't been Acked,
164 * it'll be ignored in irq handler, then may introduce irq storm
165 */
Alexander Shiyand3233432013-12-15 13:36:51 +0400166 if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
Yi Zhang4bd71452013-10-22 18:44:32 +0800167 reg = d->chip->ack_base +
168 (i * map->reg_stride * d->irq_reg_stride);
Guo Zenga650fdd2015-09-17 05:23:21 +0000169 /* some chips ack by write 0 */
170 if (d->chip->ack_invert)
171 ret = regmap_write(map, reg, ~d->mask_buf[i]);
172 else
173 ret = regmap_write(map, reg, d->mask_buf[i]);
Yi Zhang4bd71452013-10-22 18:44:32 +0800174 if (ret != 0)
175 dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
176 reg, ret);
177 }
Mark Brownf8beab22011-10-28 23:50:49 +0200178 }
179
Bartosz Golaszewskibc998a72018-12-07 14:04:52 +0100180 /* Don't update the type bits if we're using mask bits for irq type. */
181 if (!d->chip->type_in_mask) {
182 for (i = 0; i < d->chip->num_type_reg; i++) {
183 if (!d->type_buf_def[i])
184 continue;
185 reg = d->chip->type_base +
186 (i * map->reg_stride * d->type_reg_stride);
187 if (d->chip->type_invert)
188 ret = regmap_irq_update_bits(d, reg,
189 d->type_buf_def[i], ~d->type_buf[i]);
190 else
191 ret = regmap_irq_update_bits(d, reg,
192 d->type_buf_def[i], d->type_buf[i]);
193 if (ret != 0)
194 dev_err(d->map->dev, "Failed to sync type in %x\n",
195 reg);
196 }
Laxman Dewangan7a78479f2015-12-22 18:25:26 +0530197 }
198
Mark Brown0c00c502012-07-24 15:41:19 +0100199 if (d->chip->runtime_pm)
200 pm_runtime_put(map->dev);
201
Mark Browna43fd502012-06-05 14:34:03 +0100202 /* If we've changed our wakeup count propagate it to the parent */
203 if (d->wake_count < 0)
204 for (i = d->wake_count; i < 0; i++)
205 irq_set_irq_wake(d->irq, 0);
206 else if (d->wake_count > 0)
207 for (i = 0; i < d->wake_count; i++)
208 irq_set_irq_wake(d->irq, 1);
209
210 d->wake_count = 0;
211
Mark Brownf8beab22011-10-28 23:50:49 +0200212 mutex_unlock(&d->lock);
213}
214
215static void regmap_irq_enable(struct irq_data *data)
216{
217 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
Stephen Warren56806552012-04-10 23:37:22 -0600218 struct regmap *map = d->map;
Mark Brown4af8be62012-05-13 10:59:56 +0100219 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
Bartosz Golaszewskibc998a72018-12-07 14:04:52 +0100220 unsigned int mask, type;
Mark Brownf8beab22011-10-28 23:50:49 +0200221
Matti Vaittinen1c2928e2018-12-18 13:59:31 +0200222 type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
Bartosz Golaszewskibc998a72018-12-07 14:04:52 +0100223
224 /*
225 * The type_in_mask flag means that the underlying hardware uses
226 * separate mask bits for rising and falling edge interrupts, but
227 * we want to make them into a single virtual interrupt with
228 * configurable edge.
229 *
230 * If the interrupt we're enabling defines the falling or rising
231 * masks then instead of using the regular mask bits for this
232 * interrupt, use the value previously written to the type buffer
233 * at the corresponding offset in regmap_irq_set_type().
234 */
235 if (d->chip->type_in_mask && type)
236 mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
237 else
238 mask = irq_data->mask;
239
Bartosz Golaszewskic82ea332018-12-19 12:18:05 +0100240 if (d->chip->clear_on_unmask)
241 d->clear_status = true;
242
Bartosz Golaszewskibc998a72018-12-07 14:04:52 +0100243 d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
Mark Brownf8beab22011-10-28 23:50:49 +0200244}
245
246static void regmap_irq_disable(struct irq_data *data)
247{
248 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
Stephen Warren56806552012-04-10 23:37:22 -0600249 struct regmap *map = d->map;
Mark Brown4af8be62012-05-13 10:59:56 +0100250 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
Mark Brownf8beab22011-10-28 23:50:49 +0200251
Stephen Warrenf01ee602012-04-09 13:40:24 -0600252 d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
Mark Brownf8beab22011-10-28 23:50:49 +0200253}
254
Laxman Dewangan7a78479f2015-12-22 18:25:26 +0530255static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
256{
257 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
258 struct regmap *map = d->map;
259 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
Matti Vaittinen1c2928e2018-12-18 13:59:31 +0200260 int reg;
261 const struct regmap_irq_type *t = &irq_data->type;
Laxman Dewangan7a78479f2015-12-22 18:25:26 +0530262
Matti Vaittinen1c2928e2018-12-18 13:59:31 +0200263 if ((t->types_supported & type) != type)
Matti Vaittinen74d4b4e2018-12-27 10:44:43 +0200264 return 0;
Laxman Dewangan7a78479f2015-12-22 18:25:26 +0530265
Matti Vaittinen1c2928e2018-12-18 13:59:31 +0200266 reg = t->type_reg_offset / map->reg_stride;
267
268 if (t->type_reg_mask)
269 d->type_buf[reg] &= ~t->type_reg_mask;
270 else
271 d->type_buf[reg] &= ~(t->type_falling_val |
272 t->type_rising_val |
273 t->type_level_low_val |
274 t->type_level_high_val);
Laxman Dewangan7a78479f2015-12-22 18:25:26 +0530275 switch (type) {
276 case IRQ_TYPE_EDGE_FALLING:
Matti Vaittinen1c2928e2018-12-18 13:59:31 +0200277 d->type_buf[reg] |= t->type_falling_val;
Laxman Dewangan7a78479f2015-12-22 18:25:26 +0530278 break;
279
280 case IRQ_TYPE_EDGE_RISING:
Matti Vaittinen1c2928e2018-12-18 13:59:31 +0200281 d->type_buf[reg] |= t->type_rising_val;
Laxman Dewangan7a78479f2015-12-22 18:25:26 +0530282 break;
283
284 case IRQ_TYPE_EDGE_BOTH:
Matti Vaittinen1c2928e2018-12-18 13:59:31 +0200285 d->type_buf[reg] |= (t->type_falling_val |
286 t->type_rising_val);
Laxman Dewangan7a78479f2015-12-22 18:25:26 +0530287 break;
288
Matti Vaittinen1c2928e2018-12-18 13:59:31 +0200289 case IRQ_TYPE_LEVEL_HIGH:
290 d->type_buf[reg] |= t->type_level_high_val;
291 break;
292
293 case IRQ_TYPE_LEVEL_LOW:
294 d->type_buf[reg] |= t->type_level_low_val;
295 break;
Laxman Dewangan7a78479f2015-12-22 18:25:26 +0530296 default:
297 return -EINVAL;
298 }
299 return 0;
300}
301
Mark Browna43fd502012-06-05 14:34:03 +0100302static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
303{
304 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
305 struct regmap *map = d->map;
306 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
307
Mark Browna43fd502012-06-05 14:34:03 +0100308 if (on) {
Laxman Dewangan55ac85e2012-12-19 19:42:28 +0530309 if (d->wake_buf)
310 d->wake_buf[irq_data->reg_offset / map->reg_stride]
311 &= ~irq_data->mask;
Mark Browna43fd502012-06-05 14:34:03 +0100312 d->wake_count++;
313 } else {
Laxman Dewangan55ac85e2012-12-19 19:42:28 +0530314 if (d->wake_buf)
315 d->wake_buf[irq_data->reg_offset / map->reg_stride]
316 |= irq_data->mask;
Mark Browna43fd502012-06-05 14:34:03 +0100317 d->wake_count--;
318 }
319
320 return 0;
321}
322
Stephen Warren7ac140e2012-08-01 11:40:47 -0600323static const struct irq_chip regmap_irq_chip = {
Mark Brownf8beab22011-10-28 23:50:49 +0200324 .irq_bus_lock = regmap_irq_lock,
325 .irq_bus_sync_unlock = regmap_irq_sync_unlock,
326 .irq_disable = regmap_irq_disable,
327 .irq_enable = regmap_irq_enable,
Laxman Dewangan7a78479f2015-12-22 18:25:26 +0530328 .irq_set_type = regmap_irq_set_type,
Mark Browna43fd502012-06-05 14:34:03 +0100329 .irq_set_wake = regmap_irq_set_wake,
Mark Brownf8beab22011-10-28 23:50:49 +0200330};
331
332static irqreturn_t regmap_irq_thread(int irq, void *d)
333{
334 struct regmap_irq_chip_data *data = d;
Mark Brownb026ddb2012-05-31 21:01:46 +0100335 const struct regmap_irq_chip *chip = data->chip;
Mark Brownf8beab22011-10-28 23:50:49 +0200336 struct regmap *map = data->map;
337 int ret, i;
Mark Brownd23511f2011-11-28 18:50:39 +0000338 bool handled = false;
Stephen Warren16032622012-07-27 13:01:54 -0600339 u32 reg;
Mark Brownf8beab22011-10-28 23:50:49 +0200340
Laxman Dewanganccc12562016-05-20 20:40:26 +0530341 if (chip->handle_pre_irq)
342 chip->handle_pre_irq(chip->irq_drv_data);
343
Mark Brown0c00c502012-07-24 15:41:19 +0100344 if (chip->runtime_pm) {
345 ret = pm_runtime_get_sync(map->dev);
346 if (ret < 0) {
347 dev_err(map->dev, "IRQ thread failed to resume: %d\n",
348 ret);
Li Fei283189d2013-02-28 15:37:11 +0800349 pm_runtime_put(map->dev);
Laxman Dewanganccc12562016-05-20 20:40:26 +0530350 goto exit;
Mark Brown0c00c502012-07-24 15:41:19 +0100351 }
352 }
353
Mark Browna7440ea2013-01-03 14:27:15 +0000354 /*
355 * Read in the statuses, using a single bulk read if possible
356 * in order to reduce the I/O overheads.
357 */
Markus Pargmann67921a12015-08-21 10:26:42 +0200358 if (!map->use_single_read && map->reg_stride == 1 &&
Mark Browna7440ea2013-01-03 14:27:15 +0000359 data->irq_reg_stride == 1) {
360 u8 *buf8 = data->status_reg_buf;
361 u16 *buf16 = data->status_reg_buf;
362 u32 *buf32 = data->status_reg_buf;
Graeme Gregory022f926a2012-05-14 22:40:43 +0900363
Mark Browna7440ea2013-01-03 14:27:15 +0000364 BUG_ON(!data->status_reg_buf);
365
366 ret = regmap_bulk_read(map, chip->status_base,
367 data->status_reg_buf,
368 chip->num_regs);
Graeme Gregory022f926a2012-05-14 22:40:43 +0900369 if (ret != 0) {
370 dev_err(map->dev, "Failed to read IRQ status: %d\n",
Mark Browna7440ea2013-01-03 14:27:15 +0000371 ret);
Laxman Dewanganccc12562016-05-20 20:40:26 +0530372 goto exit;
Mark Brownf8beab22011-10-28 23:50:49 +0200373 }
Mark Browna7440ea2013-01-03 14:27:15 +0000374
375 for (i = 0; i < data->chip->num_regs; i++) {
376 switch (map->format.val_bytes) {
377 case 1:
378 data->status_buf[i] = buf8[i];
379 break;
380 case 2:
381 data->status_buf[i] = buf16[i];
382 break;
383 case 4:
384 data->status_buf[i] = buf32[i];
385 break;
386 default:
387 BUG();
Laxman Dewanganccc12562016-05-20 20:40:26 +0530388 goto exit;
Mark Browna7440ea2013-01-03 14:27:15 +0000389 }
390 }
391
392 } else {
393 for (i = 0; i < data->chip->num_regs; i++) {
394 ret = regmap_read(map, chip->status_base +
395 (i * map->reg_stride
396 * data->irq_reg_stride),
397 &data->status_buf[i]);
398
399 if (ret != 0) {
400 dev_err(map->dev,
401 "Failed to read IRQ status: %d\n",
402 ret);
403 if (chip->runtime_pm)
404 pm_runtime_put(map->dev);
Laxman Dewanganccc12562016-05-20 20:40:26 +0530405 goto exit;
Mark Browna7440ea2013-01-03 14:27:15 +0000406 }
407 }
Mark Brownbbae92c2013-01-03 13:58:33 +0000408 }
Mark Brownf8beab22011-10-28 23:50:49 +0200409
Mark Brownbbae92c2013-01-03 13:58:33 +0000410 /*
411 * Ignore masked IRQs and ack if we need to; we ack early so
412 * there is no race between handling and acknowleding the
413 * interrupt. We assume that typically few of the interrupts
414 * will fire simultaneously so don't worry about overhead from
415 * doing a write per register.
416 */
417 for (i = 0; i < data->chip->num_regs; i++) {
Mark Brownf8beab22011-10-28 23:50:49 +0200418 data->status_buf[i] &= ~data->mask_buf[i];
419
Alexander Shiyand3233432013-12-15 13:36:51 +0400420 if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
Stephen Warren16032622012-07-27 13:01:54 -0600421 reg = chip->ack_base +
422 (i * map->reg_stride * data->irq_reg_stride);
423 ret = regmap_write(map, reg, data->status_buf[i]);
Mark Brownf8beab22011-10-28 23:50:49 +0200424 if (ret != 0)
425 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
Stephen Warren16032622012-07-27 13:01:54 -0600426 reg, ret);
Mark Brownf8beab22011-10-28 23:50:49 +0200427 }
428 }
429
430 for (i = 0; i < chip->num_irqs; i++) {
Stephen Warrenf01ee602012-04-09 13:40:24 -0600431 if (data->status_buf[chip->irqs[i].reg_offset /
432 map->reg_stride] & chip->irqs[i].mask) {
Mark Brown4af8be62012-05-13 10:59:56 +0100433 handle_nested_irq(irq_find_mapping(data->domain, i));
Mark Brownd23511f2011-11-28 18:50:39 +0000434 handled = true;
Mark Brownf8beab22011-10-28 23:50:49 +0200435 }
436 }
437
Mark Brown0c00c502012-07-24 15:41:19 +0100438 if (chip->runtime_pm)
439 pm_runtime_put(map->dev);
440
Laxman Dewanganccc12562016-05-20 20:40:26 +0530441exit:
442 if (chip->handle_post_irq)
443 chip->handle_post_irq(chip->irq_drv_data);
444
Mark Brownd23511f2011-11-28 18:50:39 +0000445 if (handled)
446 return IRQ_HANDLED;
447 else
448 return IRQ_NONE;
Mark Brownf8beab22011-10-28 23:50:49 +0200449}
450
Mark Brown4af8be62012-05-13 10:59:56 +0100451static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
452 irq_hw_number_t hw)
453{
454 struct regmap_irq_chip_data *data = h->host_data;
455
456 irq_set_chip_data(virq, data);
Yunfan Zhang81380732012-09-08 03:53:25 -0700457 irq_set_chip(virq, &data->irq_chip);
Mark Brown4af8be62012-05-13 10:59:56 +0100458 irq_set_nested_thread(virq, 1);
Grygorii Strashko58a53362016-02-26 17:53:57 +0200459 irq_set_parent(virq, data->irq);
Mark Brown4af8be62012-05-13 10:59:56 +0100460 irq_set_noprobe(virq);
Mark Brown4af8be62012-05-13 10:59:56 +0100461
462 return 0;
463}
464
Krzysztof Kozlowski77f5f3e2015-04-27 21:52:10 +0900465static const struct irq_domain_ops regmap_domain_ops = {
Mark Brown4af8be62012-05-13 10:59:56 +0100466 .map = regmap_irq_map,
Vladimir Zapolskiy63c73b02017-06-09 12:05:16 +0300467 .xlate = irq_domain_xlate_onetwocell,
Mark Brown4af8be62012-05-13 10:59:56 +0100468};
469
Mark Brownf8beab22011-10-28 23:50:49 +0200470/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +0000471 * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
Mark Brownf8beab22011-10-28 23:50:49 +0200472 *
Charles Keepax2cf8e2d2017-01-12 11:17:39 +0000473 * @map: The regmap for the device.
474 * @irq: The IRQ the device uses to signal interrupts.
475 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
476 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
477 * @chip: Configuration for the interrupt controller.
478 * @data: Runtime data structure for the controller, allocated on success.
Mark Brownf8beab22011-10-28 23:50:49 +0200479 *
480 * Returns 0 on success or an errno on failure.
481 *
482 * In order for this to be efficient the chip really should use a
483 * register cache. The chip driver is responsible for restoring the
484 * register values used by the IRQ controller over suspend and resume.
485 */
486int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
Mark Brownb026ddb2012-05-31 21:01:46 +0100487 int irq_base, const struct regmap_irq_chip *chip,
Mark Brownf8beab22011-10-28 23:50:49 +0200488 struct regmap_irq_chip_data **data)
489{
490 struct regmap_irq_chip_data *d;
Mark Brown4af8be62012-05-13 10:59:56 +0100491 int i;
Mark Brownf8beab22011-10-28 23:50:49 +0200492 int ret = -ENOMEM;
Bartosz Golaszewskibc998a72018-12-07 14:04:52 +0100493 int num_type_reg;
Stephen Warren16032622012-07-27 13:01:54 -0600494 u32 reg;
Guo Zeng7b7d1962015-09-17 05:23:20 +0000495 u32 unmask_offset;
Mark Brownf8beab22011-10-28 23:50:49 +0200496
Xiubo Lie1289202014-05-19 15:13:45 +0800497 if (chip->num_regs <= 0)
498 return -EINVAL;
499
Bartosz Golaszewskic82ea332018-12-19 12:18:05 +0100500 if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack))
501 return -EINVAL;
502
Stephen Warrenf01ee602012-04-09 13:40:24 -0600503 for (i = 0; i < chip->num_irqs; i++) {
504 if (chip->irqs[i].reg_offset % map->reg_stride)
505 return -EINVAL;
506 if (chip->irqs[i].reg_offset / map->reg_stride >=
507 chip->num_regs)
508 return -EINVAL;
509 }
510
Mark Brown4af8be62012-05-13 10:59:56 +0100511 if (irq_base) {
512 irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
513 if (irq_base < 0) {
514 dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
515 irq_base);
516 return irq_base;
517 }
Mark Brownf8beab22011-10-28 23:50:49 +0200518 }
519
520 d = kzalloc(sizeof(*d), GFP_KERNEL);
521 if (!d)
522 return -ENOMEM;
523
lixiuboeeda1bd2015-11-20 18:06:29 +0800524 d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
Mark Brownf8beab22011-10-28 23:50:49 +0200525 GFP_KERNEL);
526 if (!d->status_buf)
527 goto err_alloc;
528
lixiuboeeda1bd2015-11-20 18:06:29 +0800529 d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
Mark Brownf8beab22011-10-28 23:50:49 +0200530 GFP_KERNEL);
531 if (!d->mask_buf)
532 goto err_alloc;
533
lixiuboeeda1bd2015-11-20 18:06:29 +0800534 d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
Mark Brownf8beab22011-10-28 23:50:49 +0200535 GFP_KERNEL);
536 if (!d->mask_buf_def)
537 goto err_alloc;
538
Mark Browna43fd502012-06-05 14:34:03 +0100539 if (chip->wake_base) {
lixiuboeeda1bd2015-11-20 18:06:29 +0800540 d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
Mark Browna43fd502012-06-05 14:34:03 +0100541 GFP_KERNEL);
542 if (!d->wake_buf)
543 goto err_alloc;
544 }
545
Bartosz Golaszewskibc998a72018-12-07 14:04:52 +0100546 num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg;
547 if (num_type_reg) {
548 d->type_buf_def = kcalloc(num_type_reg,
549 sizeof(unsigned int), GFP_KERNEL);
Laxman Dewangan7a78479f2015-12-22 18:25:26 +0530550 if (!d->type_buf_def)
551 goto err_alloc;
552
Bartosz Golaszewskibc998a72018-12-07 14:04:52 +0100553 d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int),
Laxman Dewangan7a78479f2015-12-22 18:25:26 +0530554 GFP_KERNEL);
555 if (!d->type_buf)
556 goto err_alloc;
557 }
558
Stephen Warren7ac140e2012-08-01 11:40:47 -0600559 d->irq_chip = regmap_irq_chip;
Stephen Warrenca142752012-08-01 11:40:48 -0600560 d->irq_chip.name = chip->name;
Mark Browna43fd502012-06-05 14:34:03 +0100561 d->irq = irq;
Mark Brownf8beab22011-10-28 23:50:49 +0200562 d->map = map;
563 d->chip = chip;
564 d->irq_base = irq_base;
Graeme Gregory022f926a2012-05-14 22:40:43 +0900565
566 if (chip->irq_reg_stride)
567 d->irq_reg_stride = chip->irq_reg_stride;
568 else
569 d->irq_reg_stride = 1;
570
Laxman Dewangan7a78479f2015-12-22 18:25:26 +0530571 if (chip->type_reg_stride)
572 d->type_reg_stride = chip->type_reg_stride;
573 else
574 d->type_reg_stride = 1;
575
Markus Pargmann67921a12015-08-21 10:26:42 +0200576 if (!map->use_single_read && map->reg_stride == 1 &&
Mark Browna7440ea2013-01-03 14:27:15 +0000577 d->irq_reg_stride == 1) {
lixiubo549e08a2015-11-20 18:06:30 +0800578 d->status_reg_buf = kmalloc_array(chip->num_regs,
579 map->format.val_bytes,
580 GFP_KERNEL);
Mark Browna7440ea2013-01-03 14:27:15 +0000581 if (!d->status_reg_buf)
582 goto err_alloc;
583 }
584
Mark Brownf8beab22011-10-28 23:50:49 +0200585 mutex_init(&d->lock);
586
587 for (i = 0; i < chip->num_irqs; i++)
Stephen Warrenf01ee602012-04-09 13:40:24 -0600588 d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
Mark Brownf8beab22011-10-28 23:50:49 +0200589 |= chip->irqs[i].mask;
590
591 /* Mask all the interrupts by default */
592 for (i = 0; i < chip->num_regs; i++) {
593 d->mask_buf[i] = d->mask_buf_def[i];
Mark Zhang71514492019-01-14 17:32:58 +0800594 if (!chip->mask_base)
595 continue;
596
Stephen Warren16032622012-07-27 13:01:54 -0600597 reg = chip->mask_base +
598 (i * map->reg_stride * d->irq_reg_stride);
Xiaofan Tian36ac9142012-08-30 17:03:35 +0800599 if (chip->mask_invert)
Michael Grzeschika71411d2017-06-23 14:35:09 +0200600 ret = regmap_irq_update_bits(d, reg,
Xiaofan Tian36ac9142012-08-30 17:03:35 +0800601 d->mask_buf[i], ~d->mask_buf[i]);
Guo Zeng7b7d1962015-09-17 05:23:20 +0000602 else if (d->chip->unmask_base) {
603 unmask_offset = d->chip->unmask_base -
604 d->chip->mask_base;
Michael Grzeschika71411d2017-06-23 14:35:09 +0200605 ret = regmap_irq_update_bits(d,
Guo Zeng7b7d1962015-09-17 05:23:20 +0000606 reg + unmask_offset,
607 d->mask_buf[i],
608 d->mask_buf[i]);
609 } else
Michael Grzeschika71411d2017-06-23 14:35:09 +0200610 ret = regmap_irq_update_bits(d, reg,
Mark Brown0eb46ad2012-08-01 20:29:14 +0100611 d->mask_buf[i], d->mask_buf[i]);
Mark Brownf8beab22011-10-28 23:50:49 +0200612 if (ret != 0) {
613 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
Stephen Warren16032622012-07-27 13:01:54 -0600614 reg, ret);
Mark Brownf8beab22011-10-28 23:50:49 +0200615 goto err_alloc;
616 }
Philipp Zabel2753e6f2013-07-22 17:15:52 +0200617
618 if (!chip->init_ack_masked)
619 continue;
620
621 /* Ack masked but set interrupts */
622 reg = chip->status_base +
623 (i * map->reg_stride * d->irq_reg_stride);
624 ret = regmap_read(map, reg, &d->status_buf[i]);
625 if (ret != 0) {
626 dev_err(map->dev, "Failed to read IRQ status: %d\n",
627 ret);
628 goto err_alloc;
629 }
630
Alexander Shiyand3233432013-12-15 13:36:51 +0400631 if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
Philipp Zabel2753e6f2013-07-22 17:15:52 +0200632 reg = chip->ack_base +
633 (i * map->reg_stride * d->irq_reg_stride);
Guo Zenga650fdd2015-09-17 05:23:21 +0000634 if (chip->ack_invert)
635 ret = regmap_write(map, reg,
636 ~(d->status_buf[i] & d->mask_buf[i]));
637 else
638 ret = regmap_write(map, reg,
Philipp Zabel2753e6f2013-07-22 17:15:52 +0200639 d->status_buf[i] & d->mask_buf[i]);
640 if (ret != 0) {
641 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
642 reg, ret);
643 goto err_alloc;
644 }
645 }
Mark Brownf8beab22011-10-28 23:50:49 +0200646 }
647
Stephen Warren40052ca2012-08-01 13:57:24 -0600648 /* Wake is disabled by default */
649 if (d->wake_buf) {
650 for (i = 0; i < chip->num_regs; i++) {
651 d->wake_buf[i] = d->mask_buf_def[i];
652 reg = chip->wake_base +
653 (i * map->reg_stride * d->irq_reg_stride);
Mark Brown94424902013-01-04 16:35:07 +0000654
655 if (chip->wake_invert)
Michael Grzeschika71411d2017-06-23 14:35:09 +0200656 ret = regmap_irq_update_bits(d, reg,
Mark Brown94424902013-01-04 16:35:07 +0000657 d->mask_buf_def[i],
658 0);
659 else
Michael Grzeschika71411d2017-06-23 14:35:09 +0200660 ret = regmap_irq_update_bits(d, reg,
Mark Brown94424902013-01-04 16:35:07 +0000661 d->mask_buf_def[i],
662 d->wake_buf[i]);
Stephen Warren40052ca2012-08-01 13:57:24 -0600663 if (ret != 0) {
664 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
665 reg, ret);
666 goto err_alloc;
667 }
668 }
669 }
670
Bartosz Golaszewskibc998a72018-12-07 14:04:52 +0100671 if (chip->num_type_reg && !chip->type_in_mask) {
Laxman Dewangan7a78479f2015-12-22 18:25:26 +0530672 for (i = 0; i < chip->num_type_reg; ++i) {
673 if (!d->type_buf_def[i])
674 continue;
675
676 reg = chip->type_base +
677 (i * map->reg_stride * d->type_reg_stride);
Matti Vaittinen84267d12018-12-18 12:58:13 +0200678
679 ret = regmap_read(map, reg, &d->type_buf_def[i]);
680
681 if (d->chip->type_invert)
682 d->type_buf_def[i] = ~d->type_buf_def[i];
683
684 if (ret) {
685 dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n",
Laxman Dewangan7a78479f2015-12-22 18:25:26 +0530686 reg, ret);
687 goto err_alloc;
688 }
689 }
690 }
691
Mark Brown4af8be62012-05-13 10:59:56 +0100692 if (irq_base)
693 d->domain = irq_domain_add_legacy(map->dev->of_node,
694 chip->num_irqs, irq_base, 0,
695 &regmap_domain_ops, d);
696 else
697 d->domain = irq_domain_add_linear(map->dev->of_node,
698 chip->num_irqs,
699 &regmap_domain_ops, d);
700 if (!d->domain) {
701 dev_err(map->dev, "Failed to create IRQ domain\n");
702 ret = -ENOMEM;
703 goto err_alloc;
Mark Brownf8beab22011-10-28 23:50:49 +0200704 }
705
Valentin Rothberg09cadf62015-02-11 16:37:57 +0100706 ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
707 irq_flags | IRQF_ONESHOT,
Mark Brownf8beab22011-10-28 23:50:49 +0200708 chip->name, d);
709 if (ret != 0) {
Mark Browneed456f2013-03-19 10:45:04 +0000710 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
711 irq, chip->name, ret);
Mark Brown4af8be62012-05-13 10:59:56 +0100712 goto err_domain;
Mark Brownf8beab22011-10-28 23:50:49 +0200713 }
714
Krzysztof Kozlowski72a6a5d2014-03-13 09:06:01 +0100715 *data = d;
716
Mark Brownf8beab22011-10-28 23:50:49 +0200717 return 0;
718
Mark Brown4af8be62012-05-13 10:59:56 +0100719err_domain:
720 /* Should really dispose of the domain but... */
Mark Brownf8beab22011-10-28 23:50:49 +0200721err_alloc:
Laxman Dewangan7a78479f2015-12-22 18:25:26 +0530722 kfree(d->type_buf);
723 kfree(d->type_buf_def);
Mark Browna43fd502012-06-05 14:34:03 +0100724 kfree(d->wake_buf);
Mark Brownf8beab22011-10-28 23:50:49 +0200725 kfree(d->mask_buf_def);
726 kfree(d->mask_buf);
Mark Brownf8beab22011-10-28 23:50:49 +0200727 kfree(d->status_buf);
Mark Browna7440ea2013-01-03 14:27:15 +0000728 kfree(d->status_reg_buf);
Mark Brownf8beab22011-10-28 23:50:49 +0200729 kfree(d);
730 return ret;
731}
732EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
733
734/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +0000735 * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
Mark Brownf8beab22011-10-28 23:50:49 +0200736 *
737 * @irq: Primary IRQ for the device
Charles Keepax2cf8e2d2017-01-12 11:17:39 +0000738 * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
Laxman Dewangan46189512016-02-09 17:58:22 +0530739 *
Charles Keepax2cf8e2d2017-01-12 11:17:39 +0000740 * This function also disposes of all mapped IRQs on the chip.
Mark Brownf8beab22011-10-28 23:50:49 +0200741 */
742void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
743{
Laxman Dewangan46189512016-02-09 17:58:22 +0530744 unsigned int virq;
745 int hwirq;
746
Mark Brownf8beab22011-10-28 23:50:49 +0200747 if (!d)
748 return;
749
750 free_irq(irq, d);
Laxman Dewangan46189512016-02-09 17:58:22 +0530751
752 /* Dispose all virtual irq from irq domain before removing it */
753 for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
754 /* Ignore hwirq if holes in the IRQ list */
755 if (!d->chip->irqs[hwirq].mask)
756 continue;
757
758 /*
759 * Find the virtual irq of hwirq on chip and if it is
760 * there then dispose it
761 */
762 virq = irq_find_mapping(d->domain, hwirq);
763 if (virq)
764 irq_dispose_mapping(virq);
765 }
766
Mark Brownb5ab3e52014-01-22 20:25:48 +0000767 irq_domain_remove(d->domain);
Laxman Dewangan7a78479f2015-12-22 18:25:26 +0530768 kfree(d->type_buf);
769 kfree(d->type_buf_def);
Mark Browna43fd502012-06-05 14:34:03 +0100770 kfree(d->wake_buf);
Mark Brownf8beab22011-10-28 23:50:49 +0200771 kfree(d->mask_buf_def);
772 kfree(d->mask_buf);
Mark Browna7440ea2013-01-03 14:27:15 +0000773 kfree(d->status_reg_buf);
Mark Brownf8beab22011-10-28 23:50:49 +0200774 kfree(d->status_buf);
775 kfree(d);
776}
777EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
Mark Brown209a6002011-12-05 16:10:15 +0000778
Laxman Dewangan800c3a02016-02-10 14:29:50 +0530779static void devm_regmap_irq_chip_release(struct device *dev, void *res)
780{
781 struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
782
783 regmap_del_irq_chip(d->irq, d);
784}
785
786static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
787
788{
789 struct regmap_irq_chip_data **r = res;
790
791 if (!r || !*r) {
792 WARN_ON(!r || !*r);
793 return 0;
794 }
795 return *r == data;
796}
797
798/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +0000799 * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
Laxman Dewangan800c3a02016-02-10 14:29:50 +0530800 *
Charles Keepax2cf8e2d2017-01-12 11:17:39 +0000801 * @dev: The device pointer on which irq_chip belongs to.
802 * @map: The regmap for the device.
803 * @irq: The IRQ the device uses to signal interrupts
Laxman Dewangan800c3a02016-02-10 14:29:50 +0530804 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
Charles Keepax2cf8e2d2017-01-12 11:17:39 +0000805 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
806 * @chip: Configuration for the interrupt controller.
807 * @data: Runtime data structure for the controller, allocated on success
Laxman Dewangan800c3a02016-02-10 14:29:50 +0530808 *
809 * Returns 0 on success or an errno on failure.
810 *
Charles Keepax2cf8e2d2017-01-12 11:17:39 +0000811 * The &regmap_irq_chip_data will be automatically released when the device is
Laxman Dewangan800c3a02016-02-10 14:29:50 +0530812 * unbound.
813 */
814int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
815 int irq_flags, int irq_base,
816 const struct regmap_irq_chip *chip,
817 struct regmap_irq_chip_data **data)
818{
819 struct regmap_irq_chip_data **ptr, *d;
820 int ret;
821
822 ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
823 GFP_KERNEL);
824 if (!ptr)
825 return -ENOMEM;
826
827 ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base,
828 chip, &d);
829 if (ret < 0) {
830 devres_free(ptr);
831 return ret;
832 }
833
834 *ptr = d;
835 devres_add(dev, ptr);
836 *data = d;
837 return 0;
838}
839EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
840
841/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +0000842 * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
Laxman Dewangan800c3a02016-02-10 14:29:50 +0530843 *
844 * @dev: Device for which which resource was allocated.
Charles Keepax2cf8e2d2017-01-12 11:17:39 +0000845 * @irq: Primary IRQ for the device.
846 * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
847 *
848 * A resource managed version of regmap_del_irq_chip().
Laxman Dewangan800c3a02016-02-10 14:29:50 +0530849 */
850void devm_regmap_del_irq_chip(struct device *dev, int irq,
851 struct regmap_irq_chip_data *data)
852{
853 int rc;
854
855 WARN_ON(irq != data->irq);
856 rc = devres_release(dev, devm_regmap_irq_chip_release,
857 devm_regmap_irq_chip_match, data);
858
859 if (rc != 0)
860 WARN_ON(rc);
861}
862EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
863
Mark Brown209a6002011-12-05 16:10:15 +0000864/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +0000865 * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
866 *
867 * @data: regmap irq controller to operate on.
Mark Brown209a6002011-12-05 16:10:15 +0000868 *
869 * Useful for drivers to request their own IRQs.
Mark Brown209a6002011-12-05 16:10:15 +0000870 */
871int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
872{
Mark Brown4af8be62012-05-13 10:59:56 +0100873 WARN_ON(!data->irq_base);
Mark Brown209a6002011-12-05 16:10:15 +0000874 return data->irq_base;
875}
876EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
Mark Brown4af8be62012-05-13 10:59:56 +0100877
878/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +0000879 * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
880 *
881 * @data: regmap irq controller to operate on.
882 * @irq: index of the interrupt requested in the chip IRQs.
Mark Brown4af8be62012-05-13 10:59:56 +0100883 *
884 * Useful for drivers to request their own IRQs.
Mark Brown4af8be62012-05-13 10:59:56 +0100885 */
886int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
887{
Mark Brownbfd6185d2012-06-05 14:29:36 +0100888 /* Handle holes in the IRQ list */
889 if (!data->chip->irqs[irq].mask)
890 return -EINVAL;
891
Mark Brown4af8be62012-05-13 10:59:56 +0100892 return irq_create_mapping(data->domain, irq);
893}
894EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
Mark Brown90f790d2012-08-20 21:45:05 +0100895
896/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +0000897 * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
898 *
899 * @data: regmap_irq controller to operate on.
Mark Brown90f790d2012-08-20 21:45:05 +0100900 *
901 * Useful for drivers to request their own IRQs and for integration
902 * with subsystems. For ease of integration NULL is accepted as a
903 * domain, allowing devices to just call this even if no domain is
904 * allocated.
Mark Brown90f790d2012-08-20 21:45:05 +0100905 */
906struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
907{
908 if (data)
909 return data->domain;
910 else
911 return NULL;
912}
913EXPORT_SYMBOL_GPL(regmap_irq_get_domain);