David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 2016, 2017 Cavium Inc. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/bitops.h> |
| 10 | #include <linux/gpio/driver.h> |
| 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/io.h> |
| 13 | #include <linux/irq.h> |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/pci.h> |
| 17 | #include <linux/spinlock.h> |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 18 | |
| 19 | #define GPIO_RX_DAT 0x0 |
| 20 | #define GPIO_TX_SET 0x8 |
| 21 | #define GPIO_TX_CLR 0x10 |
| 22 | #define GPIO_CONST 0x90 |
| 23 | #define GPIO_CONST_GPIOS_MASK 0xff |
| 24 | #define GPIO_BIT_CFG 0x400 |
| 25 | #define GPIO_BIT_CFG_TX_OE BIT(0) |
| 26 | #define GPIO_BIT_CFG_PIN_XOR BIT(1) |
| 27 | #define GPIO_BIT_CFG_INT_EN BIT(2) |
| 28 | #define GPIO_BIT_CFG_INT_TYPE BIT(3) |
| 29 | #define GPIO_BIT_CFG_FIL_MASK GENMASK(11, 4) |
| 30 | #define GPIO_BIT_CFG_FIL_CNT_SHIFT 4 |
| 31 | #define GPIO_BIT_CFG_FIL_SEL_SHIFT 8 |
| 32 | #define GPIO_BIT_CFG_TX_OD BIT(12) |
| 33 | #define GPIO_BIT_CFG_PIN_SEL_MASK GENMASK(25, 16) |
| 34 | #define GPIO_INTR 0x800 |
| 35 | #define GPIO_INTR_INTR BIT(0) |
| 36 | #define GPIO_INTR_INTR_W1S BIT(1) |
| 37 | #define GPIO_INTR_ENA_W1C BIT(2) |
| 38 | #define GPIO_INTR_ENA_W1S BIT(3) |
| 39 | #define GPIO_2ND_BANK 0x1400 |
| 40 | |
| 41 | #define GLITCH_FILTER_400NS ((4u << GPIO_BIT_CFG_FIL_SEL_SHIFT) | \ |
| 42 | (9u << GPIO_BIT_CFG_FIL_CNT_SHIFT)) |
| 43 | |
| 44 | struct thunderx_gpio; |
| 45 | |
| 46 | struct thunderx_line { |
| 47 | struct thunderx_gpio *txgpio; |
| 48 | unsigned int line; |
| 49 | unsigned int fil_bits; |
| 50 | }; |
| 51 | |
| 52 | struct thunderx_gpio { |
| 53 | struct gpio_chip chip; |
| 54 | u8 __iomem *register_base; |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 55 | struct msix_entry *msix_entries; /* per line MSI-X */ |
| 56 | struct thunderx_line *line_entries; /* per line irq info */ |
| 57 | raw_spinlock_t lock; |
| 58 | unsigned long invert_mask[2]; |
| 59 | unsigned long od_mask[2]; |
| 60 | int base_msi; |
| 61 | }; |
| 62 | |
| 63 | static unsigned int bit_cfg_reg(unsigned int line) |
| 64 | { |
| 65 | return 8 * line + GPIO_BIT_CFG; |
| 66 | } |
| 67 | |
| 68 | static unsigned int intr_reg(unsigned int line) |
| 69 | { |
| 70 | return 8 * line + GPIO_INTR; |
| 71 | } |
| 72 | |
| 73 | static bool thunderx_gpio_is_gpio_nowarn(struct thunderx_gpio *txgpio, |
| 74 | unsigned int line) |
| 75 | { |
| 76 | u64 bit_cfg = readq(txgpio->register_base + bit_cfg_reg(line)); |
| 77 | |
| 78 | return (bit_cfg & GPIO_BIT_CFG_PIN_SEL_MASK) == 0; |
| 79 | } |
| 80 | |
| 81 | /* |
| 82 | * Check (and WARN) that the pin is available for GPIO. We will not |
| 83 | * allow modification of the state of non-GPIO pins from this driver. |
| 84 | */ |
| 85 | static bool thunderx_gpio_is_gpio(struct thunderx_gpio *txgpio, |
| 86 | unsigned int line) |
| 87 | { |
| 88 | bool rv = thunderx_gpio_is_gpio_nowarn(txgpio, line); |
| 89 | |
| 90 | WARN_RATELIMIT(!rv, "Pin %d not available for GPIO\n", line); |
| 91 | |
| 92 | return rv; |
| 93 | } |
| 94 | |
| 95 | static int thunderx_gpio_request(struct gpio_chip *chip, unsigned int line) |
| 96 | { |
| 97 | struct thunderx_gpio *txgpio = gpiochip_get_data(chip); |
| 98 | |
| 99 | return thunderx_gpio_is_gpio(txgpio, line) ? 0 : -EIO; |
| 100 | } |
| 101 | |
| 102 | static int thunderx_gpio_dir_in(struct gpio_chip *chip, unsigned int line) |
| 103 | { |
| 104 | struct thunderx_gpio *txgpio = gpiochip_get_data(chip); |
| 105 | |
| 106 | if (!thunderx_gpio_is_gpio(txgpio, line)) |
| 107 | return -EIO; |
| 108 | |
| 109 | raw_spin_lock(&txgpio->lock); |
| 110 | clear_bit(line, txgpio->invert_mask); |
| 111 | clear_bit(line, txgpio->od_mask); |
| 112 | writeq(txgpio->line_entries[line].fil_bits, |
| 113 | txgpio->register_base + bit_cfg_reg(line)); |
| 114 | raw_spin_unlock(&txgpio->lock); |
| 115 | return 0; |
| 116 | } |
| 117 | |
| 118 | static void thunderx_gpio_set(struct gpio_chip *chip, unsigned int line, |
| 119 | int value) |
| 120 | { |
| 121 | struct thunderx_gpio *txgpio = gpiochip_get_data(chip); |
| 122 | int bank = line / 64; |
| 123 | int bank_bit = line % 64; |
| 124 | |
| 125 | void __iomem *reg = txgpio->register_base + |
| 126 | (bank * GPIO_2ND_BANK) + (value ? GPIO_TX_SET : GPIO_TX_CLR); |
| 127 | |
| 128 | writeq(BIT_ULL(bank_bit), reg); |
| 129 | } |
| 130 | |
| 131 | static int thunderx_gpio_dir_out(struct gpio_chip *chip, unsigned int line, |
| 132 | int value) |
| 133 | { |
| 134 | struct thunderx_gpio *txgpio = gpiochip_get_data(chip); |
| 135 | u64 bit_cfg = txgpio->line_entries[line].fil_bits | GPIO_BIT_CFG_TX_OE; |
| 136 | |
| 137 | if (!thunderx_gpio_is_gpio(txgpio, line)) |
| 138 | return -EIO; |
| 139 | |
| 140 | raw_spin_lock(&txgpio->lock); |
| 141 | |
| 142 | thunderx_gpio_set(chip, line, value); |
| 143 | |
| 144 | if (test_bit(line, txgpio->invert_mask)) |
| 145 | bit_cfg |= GPIO_BIT_CFG_PIN_XOR; |
| 146 | |
| 147 | if (test_bit(line, txgpio->od_mask)) |
| 148 | bit_cfg |= GPIO_BIT_CFG_TX_OD; |
| 149 | |
| 150 | writeq(bit_cfg, txgpio->register_base + bit_cfg_reg(line)); |
| 151 | |
| 152 | raw_spin_unlock(&txgpio->lock); |
| 153 | return 0; |
| 154 | } |
| 155 | |
| 156 | static int thunderx_gpio_get_direction(struct gpio_chip *chip, unsigned int line) |
| 157 | { |
| 158 | struct thunderx_gpio *txgpio = gpiochip_get_data(chip); |
| 159 | u64 bit_cfg; |
| 160 | |
| 161 | if (!thunderx_gpio_is_gpio_nowarn(txgpio, line)) |
| 162 | /* |
| 163 | * Say it is input for now to avoid WARNing on |
| 164 | * gpiochip_add_data(). We will WARN if someone |
| 165 | * requests it or tries to use it. |
| 166 | */ |
| 167 | return 1; |
| 168 | |
| 169 | bit_cfg = readq(txgpio->register_base + bit_cfg_reg(line)); |
| 170 | |
Matti Vaittinen | e42615e | 2019-11-06 10:54:12 +0200 | [diff] [blame] | 171 | if (bit_cfg & GPIO_BIT_CFG_TX_OE) |
| 172 | return GPIO_LINE_DIRECTION_OUT; |
| 173 | |
| 174 | return GPIO_LINE_DIRECTION_IN; |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | static int thunderx_gpio_set_config(struct gpio_chip *chip, |
| 178 | unsigned int line, |
| 179 | unsigned long cfg) |
| 180 | { |
| 181 | bool orig_invert, orig_od, orig_dat, new_invert, new_od; |
| 182 | u32 arg, sel; |
| 183 | u64 bit_cfg; |
| 184 | int bank = line / 64; |
| 185 | int bank_bit = line % 64; |
| 186 | int ret = -ENOTSUPP; |
| 187 | struct thunderx_gpio *txgpio = gpiochip_get_data(chip); |
| 188 | void __iomem *reg = txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_SET; |
| 189 | |
| 190 | if (!thunderx_gpio_is_gpio(txgpio, line)) |
| 191 | return -EIO; |
| 192 | |
| 193 | raw_spin_lock(&txgpio->lock); |
| 194 | orig_invert = test_bit(line, txgpio->invert_mask); |
| 195 | new_invert = orig_invert; |
| 196 | orig_od = test_bit(line, txgpio->od_mask); |
| 197 | new_od = orig_od; |
| 198 | orig_dat = ((readq(reg) >> bank_bit) & 1) ^ orig_invert; |
| 199 | bit_cfg = readq(txgpio->register_base + bit_cfg_reg(line)); |
| 200 | switch (pinconf_to_config_param(cfg)) { |
| 201 | case PIN_CONFIG_DRIVE_OPEN_DRAIN: |
| 202 | /* |
| 203 | * Weird, setting open-drain mode causes signal |
| 204 | * inversion. Note this so we can compensate in the |
| 205 | * dir_out function. |
| 206 | */ |
| 207 | set_bit(line, txgpio->invert_mask); |
| 208 | new_invert = true; |
| 209 | set_bit(line, txgpio->od_mask); |
| 210 | new_od = true; |
| 211 | ret = 0; |
| 212 | break; |
| 213 | case PIN_CONFIG_DRIVE_PUSH_PULL: |
| 214 | clear_bit(line, txgpio->invert_mask); |
| 215 | new_invert = false; |
| 216 | clear_bit(line, txgpio->od_mask); |
| 217 | new_od = false; |
| 218 | ret = 0; |
| 219 | break; |
| 220 | case PIN_CONFIG_INPUT_DEBOUNCE: |
| 221 | arg = pinconf_to_config_argument(cfg); |
| 222 | if (arg > 1228) { /* 15 * 2^15 * 2.5nS maximum */ |
| 223 | ret = -EINVAL; |
| 224 | break; |
| 225 | } |
| 226 | arg *= 400; /* scale to 2.5nS clocks. */ |
| 227 | sel = 0; |
| 228 | while (arg > 15) { |
| 229 | sel++; |
| 230 | arg++; /* always round up */ |
| 231 | arg >>= 1; |
| 232 | } |
| 233 | txgpio->line_entries[line].fil_bits = |
| 234 | (sel << GPIO_BIT_CFG_FIL_SEL_SHIFT) | |
| 235 | (arg << GPIO_BIT_CFG_FIL_CNT_SHIFT); |
| 236 | bit_cfg &= ~GPIO_BIT_CFG_FIL_MASK; |
| 237 | bit_cfg |= txgpio->line_entries[line].fil_bits; |
| 238 | writeq(bit_cfg, txgpio->register_base + bit_cfg_reg(line)); |
| 239 | ret = 0; |
| 240 | break; |
| 241 | default: |
| 242 | break; |
| 243 | } |
| 244 | raw_spin_unlock(&txgpio->lock); |
| 245 | |
| 246 | /* |
| 247 | * If currently output and OPEN_DRAIN changed, install the new |
| 248 | * settings |
| 249 | */ |
| 250 | if ((new_invert != orig_invert || new_od != orig_od) && |
| 251 | (bit_cfg & GPIO_BIT_CFG_TX_OE)) |
| 252 | ret = thunderx_gpio_dir_out(chip, line, orig_dat ^ new_invert); |
| 253 | |
| 254 | return ret; |
| 255 | } |
| 256 | |
| 257 | static int thunderx_gpio_get(struct gpio_chip *chip, unsigned int line) |
| 258 | { |
| 259 | struct thunderx_gpio *txgpio = gpiochip_get_data(chip); |
| 260 | int bank = line / 64; |
| 261 | int bank_bit = line % 64; |
| 262 | u64 read_bits = readq(txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_RX_DAT); |
| 263 | u64 masked_bits = read_bits & BIT_ULL(bank_bit); |
| 264 | |
| 265 | if (test_bit(line, txgpio->invert_mask)) |
| 266 | return masked_bits == 0; |
| 267 | else |
| 268 | return masked_bits != 0; |
| 269 | } |
| 270 | |
| 271 | static void thunderx_gpio_set_multiple(struct gpio_chip *chip, |
| 272 | unsigned long *mask, |
| 273 | unsigned long *bits) |
| 274 | { |
| 275 | int bank; |
| 276 | u64 set_bits, clear_bits; |
| 277 | struct thunderx_gpio *txgpio = gpiochip_get_data(chip); |
| 278 | |
| 279 | for (bank = 0; bank <= chip->ngpio / 64; bank++) { |
| 280 | set_bits = bits[bank] & mask[bank]; |
| 281 | clear_bits = ~bits[bank] & mask[bank]; |
| 282 | writeq(set_bits, txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_SET); |
| 283 | writeq(clear_bits, txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_CLR); |
| 284 | } |
| 285 | } |
| 286 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 287 | static void thunderx_gpio_irq_ack(struct irq_data *d) |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 288 | { |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 289 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
| 290 | struct thunderx_gpio *txgpio = gpiochip_get_data(gc); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 291 | |
| 292 | writeq(GPIO_INTR_INTR, |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 293 | txgpio->register_base + intr_reg(irqd_to_hwirq(d))); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 294 | } |
| 295 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 296 | static void thunderx_gpio_irq_mask(struct irq_data *d) |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 297 | { |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 298 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
| 299 | struct thunderx_gpio *txgpio = gpiochip_get_data(gc); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 300 | |
| 301 | writeq(GPIO_INTR_ENA_W1C, |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 302 | txgpio->register_base + intr_reg(irqd_to_hwirq(d))); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 303 | } |
| 304 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 305 | static void thunderx_gpio_irq_mask_ack(struct irq_data *d) |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 306 | { |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 307 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
| 308 | struct thunderx_gpio *txgpio = gpiochip_get_data(gc); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 309 | |
| 310 | writeq(GPIO_INTR_ENA_W1C | GPIO_INTR_INTR, |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 311 | txgpio->register_base + intr_reg(irqd_to_hwirq(d))); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 312 | } |
| 313 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 314 | static void thunderx_gpio_irq_unmask(struct irq_data *d) |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 315 | { |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 316 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
| 317 | struct thunderx_gpio *txgpio = gpiochip_get_data(gc); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 318 | |
| 319 | writeq(GPIO_INTR_ENA_W1S, |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 320 | txgpio->register_base + intr_reg(irqd_to_hwirq(d))); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 321 | } |
| 322 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 323 | static int thunderx_gpio_irq_set_type(struct irq_data *d, |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 324 | unsigned int flow_type) |
| 325 | { |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 326 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
| 327 | struct thunderx_gpio *txgpio = gpiochip_get_data(gc); |
| 328 | struct thunderx_line *txline = |
| 329 | &txgpio->line_entries[irqd_to_hwirq(d)]; |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 330 | u64 bit_cfg; |
| 331 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 332 | irqd_set_trigger_type(d, flow_type); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 333 | |
| 334 | bit_cfg = txline->fil_bits | GPIO_BIT_CFG_INT_EN; |
| 335 | |
| 336 | if (flow_type & IRQ_TYPE_EDGE_BOTH) { |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 337 | irq_set_handler_locked(d, handle_fasteoi_ack_irq); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 338 | bit_cfg |= GPIO_BIT_CFG_INT_TYPE; |
| 339 | } else { |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 340 | irq_set_handler_locked(d, handle_fasteoi_mask_irq); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 341 | } |
| 342 | |
| 343 | raw_spin_lock(&txgpio->lock); |
| 344 | if (flow_type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)) { |
| 345 | bit_cfg |= GPIO_BIT_CFG_PIN_XOR; |
| 346 | set_bit(txline->line, txgpio->invert_mask); |
| 347 | } else { |
| 348 | clear_bit(txline->line, txgpio->invert_mask); |
| 349 | } |
| 350 | clear_bit(txline->line, txgpio->od_mask); |
| 351 | writeq(bit_cfg, txgpio->register_base + bit_cfg_reg(txline->line)); |
| 352 | raw_spin_unlock(&txgpio->lock); |
| 353 | |
| 354 | return IRQ_SET_MASK_OK; |
| 355 | } |
| 356 | |
Linus Walleij | ef902f5 | 2023-03-20 10:55:11 +0100 | [diff] [blame] | 357 | static void thunderx_gpio_irq_enable(struct irq_data *d) |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 358 | { |
Linus Walleij | ef902f5 | 2023-03-20 10:55:11 +0100 | [diff] [blame] | 359 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
| 360 | |
| 361 | gpiochip_enable_irq(gc, irqd_to_hwirq(d)); |
| 362 | irq_chip_enable_parent(d); |
| 363 | thunderx_gpio_irq_unmask(d); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 364 | } |
| 365 | |
Linus Walleij | ef902f5 | 2023-03-20 10:55:11 +0100 | [diff] [blame] | 366 | static void thunderx_gpio_irq_disable(struct irq_data *d) |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 367 | { |
Linus Walleij | ef902f5 | 2023-03-20 10:55:11 +0100 | [diff] [blame] | 368 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
| 369 | |
| 370 | thunderx_gpio_irq_mask(d); |
| 371 | irq_chip_disable_parent(d); |
| 372 | gpiochip_disable_irq(gc, irqd_to_hwirq(d)); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 373 | } |
| 374 | |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 375 | /* |
| 376 | * Interrupts are chained from underlying MSI-X vectors. We have |
| 377 | * these irq_chip functions to be able to handle level triggering |
| 378 | * semantics and other acknowledgment tasks associated with the GPIO |
| 379 | * mechanism. |
| 380 | */ |
Linus Walleij | ef902f5 | 2023-03-20 10:55:11 +0100 | [diff] [blame] | 381 | static const struct irq_chip thunderx_gpio_irq_chip = { |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 382 | .name = "GPIO", |
| 383 | .irq_enable = thunderx_gpio_irq_enable, |
| 384 | .irq_disable = thunderx_gpio_irq_disable, |
| 385 | .irq_ack = thunderx_gpio_irq_ack, |
| 386 | .irq_mask = thunderx_gpio_irq_mask, |
| 387 | .irq_mask_ack = thunderx_gpio_irq_mask_ack, |
| 388 | .irq_unmask = thunderx_gpio_irq_unmask, |
| 389 | .irq_eoi = irq_chip_eoi_parent, |
| 390 | .irq_set_affinity = irq_chip_set_affinity_parent, |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 391 | .irq_set_type = thunderx_gpio_irq_set_type, |
Linus Walleij | ef902f5 | 2023-03-20 10:55:11 +0100 | [diff] [blame] | 392 | .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_IMMUTABLE, |
| 393 | GPIOCHIP_IRQ_RESOURCE_HELPERS, |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 394 | }; |
| 395 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 396 | static int thunderx_gpio_child_to_parent_hwirq(struct gpio_chip *gc, |
| 397 | unsigned int child, |
| 398 | unsigned int child_type, |
| 399 | unsigned int *parent, |
| 400 | unsigned int *parent_type) |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 401 | { |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 402 | struct thunderx_gpio *txgpio = gpiochip_get_data(gc); |
| 403 | struct irq_data *irqd; |
| 404 | unsigned int irq; |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 405 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 406 | irq = txgpio->msix_entries[child].vector; |
| 407 | irqd = irq_domain_get_irq_data(gc->irq.parent_domain, irq); |
| 408 | if (!irqd) |
Kevin Hao | a564ac3 | 2020-01-14 16:28:18 +0800 | [diff] [blame] | 409 | return -EINVAL; |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 410 | *parent = irqd_to_hwirq(irqd); |
| 411 | *parent_type = IRQ_TYPE_LEVEL_HIGH; |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 412 | return 0; |
| 413 | } |
| 414 | |
Marc Zyngier | 91a29af | 2022-07-07 19:23:09 +0100 | [diff] [blame] | 415 | static int thunderx_gpio_populate_parent_alloc_info(struct gpio_chip *chip, |
| 416 | union gpio_irq_fwspec *gfwspec, |
| 417 | unsigned int parent_hwirq, |
| 418 | unsigned int parent_type) |
Kevin Hao | a564ac3 | 2020-01-14 16:28:18 +0800 | [diff] [blame] | 419 | { |
Marc Zyngier | 91a29af | 2022-07-07 19:23:09 +0100 | [diff] [blame] | 420 | msi_alloc_info_t *info = &gfwspec->msiinfo; |
Kevin Hao | a564ac3 | 2020-01-14 16:28:18 +0800 | [diff] [blame] | 421 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 422 | info->hwirq = parent_hwirq; |
Marc Zyngier | 91a29af | 2022-07-07 19:23:09 +0100 | [diff] [blame] | 423 | return 0; |
Kevin Hao | a564ac3 | 2020-01-14 16:28:18 +0800 | [diff] [blame] | 424 | } |
| 425 | |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 426 | static int thunderx_gpio_probe(struct pci_dev *pdev, |
| 427 | const struct pci_device_id *id) |
| 428 | { |
| 429 | void __iomem * const *tbl; |
| 430 | struct device *dev = &pdev->dev; |
| 431 | struct thunderx_gpio *txgpio; |
| 432 | struct gpio_chip *chip; |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 433 | struct gpio_irq_chip *girq; |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 434 | int ngpio, i; |
| 435 | int err = 0; |
| 436 | |
| 437 | txgpio = devm_kzalloc(dev, sizeof(*txgpio), GFP_KERNEL); |
| 438 | if (!txgpio) |
| 439 | return -ENOMEM; |
| 440 | |
| 441 | raw_spin_lock_init(&txgpio->lock); |
| 442 | chip = &txgpio->chip; |
| 443 | |
| 444 | pci_set_drvdata(pdev, txgpio); |
| 445 | |
| 446 | err = pcim_enable_device(pdev); |
| 447 | if (err) { |
| 448 | dev_err(dev, "Failed to enable PCI device: err %d\n", err); |
| 449 | goto out; |
| 450 | } |
| 451 | |
| 452 | err = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME); |
| 453 | if (err) { |
| 454 | dev_err(dev, "Failed to iomap PCI device: err %d\n", err); |
| 455 | goto out; |
| 456 | } |
| 457 | |
| 458 | tbl = pcim_iomap_table(pdev); |
| 459 | txgpio->register_base = tbl[0]; |
| 460 | if (!txgpio->register_base) { |
| 461 | dev_err(dev, "Cannot map PCI resource\n"); |
| 462 | err = -ENOMEM; |
| 463 | goto out; |
| 464 | } |
| 465 | |
| 466 | if (pdev->subsystem_device == 0xa10a) { |
| 467 | /* CN88XX has no GPIO_CONST register*/ |
| 468 | ngpio = 50; |
| 469 | txgpio->base_msi = 48; |
| 470 | } else { |
| 471 | u64 c = readq(txgpio->register_base + GPIO_CONST); |
| 472 | |
| 473 | ngpio = c & GPIO_CONST_GPIOS_MASK; |
| 474 | txgpio->base_msi = (c >> 8) & 0xff; |
| 475 | } |
| 476 | |
Kees Cook | a86854d | 2018-06-12 14:07:58 -0700 | [diff] [blame] | 477 | txgpio->msix_entries = devm_kcalloc(dev, |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 478 | ngpio, sizeof(struct msix_entry), |
| 479 | GFP_KERNEL); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 480 | if (!txgpio->msix_entries) { |
| 481 | err = -ENOMEM; |
| 482 | goto out; |
| 483 | } |
| 484 | |
Kees Cook | a86854d | 2018-06-12 14:07:58 -0700 | [diff] [blame] | 485 | txgpio->line_entries = devm_kcalloc(dev, |
| 486 | ngpio, |
| 487 | sizeof(struct thunderx_line), |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 488 | GFP_KERNEL); |
| 489 | if (!txgpio->line_entries) { |
| 490 | err = -ENOMEM; |
| 491 | goto out; |
| 492 | } |
| 493 | |
| 494 | for (i = 0; i < ngpio; i++) { |
| 495 | u64 bit_cfg = readq(txgpio->register_base + bit_cfg_reg(i)); |
| 496 | |
| 497 | txgpio->msix_entries[i].entry = txgpio->base_msi + (2 * i); |
| 498 | txgpio->line_entries[i].line = i; |
| 499 | txgpio->line_entries[i].txgpio = txgpio; |
| 500 | /* |
| 501 | * If something has already programmed the pin, use |
| 502 | * the existing glitch filter settings, otherwise go |
| 503 | * to 400nS. |
| 504 | */ |
| 505 | txgpio->line_entries[i].fil_bits = bit_cfg ? |
| 506 | (bit_cfg & GPIO_BIT_CFG_FIL_MASK) : GLITCH_FILTER_400NS; |
| 507 | |
| 508 | if ((bit_cfg & GPIO_BIT_CFG_TX_OE) && (bit_cfg & GPIO_BIT_CFG_TX_OD)) |
| 509 | set_bit(i, txgpio->od_mask); |
| 510 | if (bit_cfg & GPIO_BIT_CFG_PIN_XOR) |
| 511 | set_bit(i, txgpio->invert_mask); |
| 512 | } |
| 513 | |
| 514 | |
| 515 | /* Enable all MSI-X for interrupts on all possible lines. */ |
| 516 | err = pci_enable_msix_range(pdev, txgpio->msix_entries, ngpio, ngpio); |
| 517 | if (err < 0) |
| 518 | goto out; |
| 519 | |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 520 | chip->label = KBUILD_MODNAME; |
| 521 | chip->parent = dev; |
| 522 | chip->owner = THIS_MODULE; |
| 523 | chip->request = thunderx_gpio_request; |
| 524 | chip->base = -1; /* System allocated */ |
| 525 | chip->can_sleep = false; |
| 526 | chip->ngpio = ngpio; |
| 527 | chip->get_direction = thunderx_gpio_get_direction; |
| 528 | chip->direction_input = thunderx_gpio_dir_in; |
| 529 | chip->get = thunderx_gpio_get; |
| 530 | chip->direction_output = thunderx_gpio_dir_out; |
| 531 | chip->set = thunderx_gpio_set; |
| 532 | chip->set_multiple = thunderx_gpio_set_multiple; |
| 533 | chip->set_config = thunderx_gpio_set_config; |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 534 | girq = &chip->irq; |
Linus Walleij | ef902f5 | 2023-03-20 10:55:11 +0100 | [diff] [blame] | 535 | gpio_irq_chip_set_chip(girq, &thunderx_gpio_irq_chip); |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 536 | girq->fwnode = of_node_to_fwnode(dev->of_node); |
| 537 | girq->parent_domain = |
| 538 | irq_get_irq_data(txgpio->msix_entries[0].vector)->domain; |
| 539 | girq->child_to_parent_hwirq = thunderx_gpio_child_to_parent_hwirq; |
| 540 | girq->populate_parent_alloc_arg = thunderx_gpio_populate_parent_alloc_info; |
| 541 | girq->handler = handle_bad_irq; |
| 542 | girq->default_type = IRQ_TYPE_NONE; |
| 543 | |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 544 | err = devm_gpiochip_add_data(dev, chip, txgpio); |
| 545 | if (err) |
| 546 | goto out; |
| 547 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 548 | /* Push on irq_data and the domain for each line. */ |
| 549 | for (i = 0; i < ngpio; i++) { |
| 550 | struct irq_fwspec fwspec; |
| 551 | |
| 552 | fwspec.fwnode = of_node_to_fwnode(dev->of_node); |
| 553 | fwspec.param_count = 2; |
| 554 | fwspec.param[0] = i; |
| 555 | fwspec.param[1] = IRQ_TYPE_NONE; |
| 556 | err = irq_domain_push_irq(girq->domain, |
| 557 | txgpio->msix_entries[i].vector, |
| 558 | &fwspec); |
| 559 | if (err < 0) |
| 560 | dev_err(dev, "irq_domain_push_irq: %d\n", err); |
| 561 | } |
| 562 | |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 563 | dev_info(dev, "ThunderX GPIO: %d lines with base %d.\n", |
| 564 | ngpio, chip->base); |
| 565 | return 0; |
| 566 | out: |
| 567 | pci_set_drvdata(pdev, NULL); |
| 568 | return err; |
| 569 | } |
| 570 | |
| 571 | static void thunderx_gpio_remove(struct pci_dev *pdev) |
| 572 | { |
| 573 | int i; |
| 574 | struct thunderx_gpio *txgpio = pci_get_drvdata(pdev); |
| 575 | |
| 576 | for (i = 0; i < txgpio->chip.ngpio; i++) |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 577 | irq_domain_pop_irq(txgpio->chip.irq.domain, |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 578 | txgpio->msix_entries[i].vector); |
| 579 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 580 | irq_domain_remove(txgpio->chip.irq.domain); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 581 | |
| 582 | pci_set_drvdata(pdev, NULL); |
| 583 | } |
| 584 | |
| 585 | static const struct pci_device_id thunderx_gpio_id_table[] = { |
| 586 | { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA00A) }, |
| 587 | { 0, } /* end of table */ |
| 588 | }; |
| 589 | |
| 590 | MODULE_DEVICE_TABLE(pci, thunderx_gpio_id_table); |
| 591 | |
| 592 | static struct pci_driver thunderx_gpio_driver = { |
| 593 | .name = KBUILD_MODNAME, |
| 594 | .id_table = thunderx_gpio_id_table, |
| 595 | .probe = thunderx_gpio_probe, |
| 596 | .remove = thunderx_gpio_remove, |
| 597 | }; |
| 598 | |
| 599 | module_pci_driver(thunderx_gpio_driver); |
| 600 | |
| 601 | MODULE_DESCRIPTION("Cavium Inc. ThunderX/OCTEON-TX GPIO Driver"); |
| 602 | MODULE_LICENSE("GPL"); |