1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) Maxime Coquelin 2015 4 * Copyright (C) STMicroelectronics 2017 5 * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com> 6 */ 7 8 #include <linux/bitops.h> 9 #include <linux/delay.h> 10 #include <linux/hwspinlock.h> 11 #include <linux/interrupt.h> 12 #include <linux/io.h> 13 #include <linux/irq.h> 14 #include <linux/irqchip.h> 15 #include <linux/irqchip/chained_irq.h> 16 #include <linux/irqdomain.h> 17 #include <linux/module.h> 18 #include <linux/of_address.h> 19 #include <linux/of_irq.h> 20 #include <linux/of_platform.h> 21 #include <linux/syscore_ops.h> 22 23 #include <dt-bindings/interrupt-controller/arm-gic.h> 24 25 #define IRQS_PER_BANK 32 26 27 #define HWSPNLCK_TIMEOUT 1000 /* usec */ 28 #define HWSPNLCK_RETRY_DELAY 100 /* usec */ 29 30 struct stm32_exti_bank { 31 u32 imr_ofst; 32 u32 emr_ofst; 33 u32 rtsr_ofst; 34 u32 ftsr_ofst; 35 u32 swier_ofst; 36 u32 rpr_ofst; 37 u32 fpr_ofst; 38 }; 39 40 #define UNDEF_REG ~0 41 42 struct stm32_desc_irq { 43 u32 exti; 44 u32 irq_parent; 45 }; 46 47 struct stm32_exti_drv_data { 48 const struct stm32_exti_bank **exti_banks; 49 const struct stm32_desc_irq *desc_irqs; 50 u32 bank_nr; 51 u32 irq_nr; 52 }; 53 54 struct stm32_exti_chip_data { 55 struct stm32_exti_host_data *host_data; 56 const struct stm32_exti_bank *reg_bank; 57 struct raw_spinlock rlock; 58 u32 wake_active; 59 u32 mask_cache; 60 u32 rtsr_cache; 61 u32 ftsr_cache; 62 }; 63 64 struct stm32_exti_host_data { 65 void __iomem *base; 66 struct stm32_exti_chip_data *chips_data; 67 const struct stm32_exti_drv_data *drv_data; 68 struct hwspinlock *hwlock; 69 }; 70 71 static struct stm32_exti_host_data *stm32_host_data; 72 73 static const struct stm32_exti_bank stm32f4xx_exti_b1 = { 74 .imr_ofst = 0x00, 75 .emr_ofst = 0x04, 76 .rtsr_ofst = 0x08, 77 .ftsr_ofst = 0x0C, 78 .swier_ofst = 0x10, 79 .rpr_ofst = 0x14, 80 .fpr_ofst = UNDEF_REG, 81 }; 82 83 static const struct stm32_exti_bank *stm32f4xx_exti_banks[] = { 84 &stm32f4xx_exti_b1, 85 }; 86 87 static const struct stm32_exti_drv_data stm32f4xx_drv_data = { 88 .exti_banks = stm32f4xx_exti_banks, 89 .bank_nr = ARRAY_SIZE(stm32f4xx_exti_banks), 90 }; 91 92 static const struct stm32_exti_bank stm32h7xx_exti_b1 = { 93 .imr_ofst = 0x80, 94 .emr_ofst = 0x84, 95 .rtsr_ofst = 0x00, 96 .ftsr_ofst = 0x04, 97 .swier_ofst = 0x08, 98 .rpr_ofst = 0x88, 99 .fpr_ofst = UNDEF_REG, 100 }; 101 102 static const struct stm32_exti_bank stm32h7xx_exti_b2 = { 103 .imr_ofst = 0x90, 104 .emr_ofst = 0x94, 105 .rtsr_ofst = 0x20, 106 .ftsr_ofst = 0x24, 107 .swier_ofst = 0x28, 108 .rpr_ofst = 0x98, 109 .fpr_ofst = UNDEF_REG, 110 }; 111 112 static const struct stm32_exti_bank stm32h7xx_exti_b3 = { 113 .imr_ofst = 0xA0, 114 .emr_ofst = 0xA4, 115 .rtsr_ofst = 0x40, 116 .ftsr_ofst = 0x44, 117 .swier_ofst = 0x48, 118 .rpr_ofst = 0xA8, 119 .fpr_ofst = UNDEF_REG, 120 }; 121 122 static const struct stm32_exti_bank *stm32h7xx_exti_banks[] = { 123 &stm32h7xx_exti_b1, 124 &stm32h7xx_exti_b2, 125 &stm32h7xx_exti_b3, 126 }; 127 128 static const struct stm32_exti_drv_data stm32h7xx_drv_data = { 129 .exti_banks = stm32h7xx_exti_banks, 130 .bank_nr = ARRAY_SIZE(stm32h7xx_exti_banks), 131 }; 132 133 static const struct stm32_exti_bank stm32mp1_exti_b1 = { 134 .imr_ofst = 0x80, 135 .emr_ofst = 0x84, 136 .rtsr_ofst = 0x00, 137 .ftsr_ofst = 0x04, 138 .swier_ofst = 0x08, 139 .rpr_ofst = 0x0C, 140 .fpr_ofst = 0x10, 141 }; 142 143 static const struct stm32_exti_bank stm32mp1_exti_b2 = { 144 .imr_ofst = 0x90, 145 .emr_ofst = 0x94, 146 .rtsr_ofst = 0x20, 147 .ftsr_ofst = 0x24, 148 .swier_ofst = 0x28, 149 .rpr_ofst = 0x2C, 150 .fpr_ofst = 0x30, 151 }; 152 153 static const struct stm32_exti_bank stm32mp1_exti_b3 = { 154 .imr_ofst = 0xA0, 155 .emr_ofst = 0xA4, 156 .rtsr_ofst = 0x40, 157 .ftsr_ofst = 0x44, 158 .swier_ofst = 0x48, 159 .rpr_ofst = 0x4C, 160 .fpr_ofst = 0x50, 161 }; 162 163 static const struct stm32_exti_bank *stm32mp1_exti_banks[] = { 164 &stm32mp1_exti_b1, 165 &stm32mp1_exti_b2, 166 &stm32mp1_exti_b3, 167 }; 168 169 static const struct stm32_desc_irq stm32mp1_desc_irq[] = { 170 { .exti = 0, .irq_parent = 6 }, 171 { .exti = 1, .irq_parent = 7 }, 172 { .exti = 2, .irq_parent = 8 }, 173 { .exti = 3, .irq_parent = 9 }, 174 { .exti = 4, .irq_parent = 10 }, 175 { .exti = 5, .irq_parent = 23 }, 176 { .exti = 6, .irq_parent = 64 }, 177 { .exti = 7, .irq_parent = 65 }, 178 { .exti = 8, .irq_parent = 66 }, 179 { .exti = 9, .irq_parent = 67 }, 180 { .exti = 10, .irq_parent = 40 }, 181 { .exti = 11, .irq_parent = 42 }, 182 { .exti = 12, .irq_parent = 76 }, 183 { .exti = 13, .irq_parent = 77 }, 184 { .exti = 14, .irq_parent = 121 }, 185 { .exti = 15, .irq_parent = 127 }, 186 { .exti = 16, .irq_parent = 1 }, 187 { .exti = 65, .irq_parent = 144 }, 188 { .exti = 68, .irq_parent = 143 }, 189 { .exti = 73, .irq_parent = 129 }, 190 }; 191 192 static const struct stm32_exti_drv_data stm32mp1_drv_data = { 193 .exti_banks = stm32mp1_exti_banks, 194 .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks), 195 .desc_irqs = stm32mp1_desc_irq, 196 .irq_nr = ARRAY_SIZE(stm32mp1_desc_irq), 197 }; 198 199 static int stm32_exti_to_irq(const struct stm32_exti_drv_data *drv_data, 200 irq_hw_number_t hwirq) 201 { 202 const struct stm32_desc_irq *desc_irq; 203 int i; 204 205 if (!drv_data->desc_irqs) 206 return -EINVAL; 207 208 for (i = 0; i < drv_data->irq_nr; i++) { 209 desc_irq = &drv_data->desc_irqs[i]; 210 if (desc_irq->exti == hwirq) 211 return desc_irq->irq_parent; 212 } 213 214 return -EINVAL; 215 } 216 217 static unsigned long stm32_exti_pending(struct irq_chip_generic *gc) 218 { 219 struct stm32_exti_chip_data *chip_data = gc->private; 220 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; 221 unsigned long pending; 222 223 pending = irq_reg_readl(gc, stm32_bank->rpr_ofst); 224 if (stm32_bank->fpr_ofst != UNDEF_REG) 225 pending |= irq_reg_readl(gc, stm32_bank->fpr_ofst); 226 227 return pending; 228 } 229 230 static void stm32_irq_handler(struct irq_desc *desc) 231 { 232 struct irq_domain *domain = irq_desc_get_handler_data(desc); 233 struct irq_chip *chip = irq_desc_get_chip(desc); 234 unsigned int virq, nbanks = domain->gc->num_chips; 235 struct irq_chip_generic *gc; 236 unsigned long pending; 237 int n, i, irq_base = 0; 238 239 chained_irq_enter(chip, desc); 240 241 for (i = 0; i < nbanks; i++, irq_base += IRQS_PER_BANK) { 242 gc = irq_get_domain_generic_chip(domain, irq_base); 243 244 while ((pending = stm32_exti_pending(gc))) { 245 for_each_set_bit(n, &pending, IRQS_PER_BANK) { 246 virq = irq_find_mapping(domain, irq_base + n); 247 generic_handle_irq(virq); 248 } 249 } 250 } 251 252 chained_irq_exit(chip, desc); 253 } 254 255 static int stm32_exti_set_type(struct irq_data *d, 256 unsigned int type, u32 *rtsr, u32 *ftsr) 257 { 258 u32 mask = BIT(d->hwirq % IRQS_PER_BANK); 259 260 switch (type) { 261 case IRQ_TYPE_EDGE_RISING: 262 *rtsr |= mask; 263 *ftsr &= ~mask; 264 break; 265 case IRQ_TYPE_EDGE_FALLING: 266 *rtsr &= ~mask; 267 *ftsr |= mask; 268 break; 269 case IRQ_TYPE_EDGE_BOTH: 270 *rtsr |= mask; 271 *ftsr |= mask; 272 break; 273 default: 274 return -EINVAL; 275 } 276 277 return 0; 278 } 279 280 static int stm32_exti_hwspin_lock(struct stm32_exti_chip_data *chip_data) 281 { 282 int ret, timeout = 0; 283 284 if (!chip_data->host_data->hwlock) 285 return 0; 286 287 /* 288 * Use the x_raw API since we are under spin_lock protection. 289 * Do not use the x_timeout API because we are under irq_disable 290 * mode (see __setup_irq()) 291 */ 292 do { 293 ret = hwspin_trylock_raw(chip_data->host_data->hwlock); 294 if (!ret) 295 return 0; 296 297 udelay(HWSPNLCK_RETRY_DELAY); 298 timeout += HWSPNLCK_RETRY_DELAY; 299 } while (timeout < HWSPNLCK_TIMEOUT); 300 301 if (ret == -EBUSY) 302 ret = -ETIMEDOUT; 303 304 if (ret) 305 pr_err("%s can't get hwspinlock (%d)\n", __func__, ret); 306 307 return ret; 308 } 309 310 static void stm32_exti_hwspin_unlock(struct stm32_exti_chip_data *chip_data) 311 { 312 if (chip_data->host_data->hwlock) 313 hwspin_unlock_raw(chip_data->host_data->hwlock); 314 } 315 316 static int stm32_irq_set_type(struct irq_data *d, unsigned int type) 317 { 318 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 319 struct stm32_exti_chip_data *chip_data = gc->private; 320 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; 321 u32 rtsr, ftsr; 322 int err; 323 324 irq_gc_lock(gc); 325 326 err = stm32_exti_hwspin_lock(chip_data); 327 if (err) 328 goto unlock; 329 330 rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst); 331 ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst); 332 333 err = stm32_exti_set_type(d, type, &rtsr, &ftsr); 334 if (err) 335 goto unspinlock; 336 337 irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst); 338 irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst); 339 340 unspinlock: 341 stm32_exti_hwspin_unlock(chip_data); 342 unlock: 343 irq_gc_unlock(gc); 344 345 return err; 346 } 347 348 static void stm32_chip_suspend(struct stm32_exti_chip_data *chip_data, 349 u32 wake_active) 350 { 351 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; 352 void __iomem *base = chip_data->host_data->base; 353 354 /* save rtsr, ftsr registers */ 355 chip_data->rtsr_cache = readl_relaxed(base + stm32_bank->rtsr_ofst); 356 chip_data->ftsr_cache = readl_relaxed(base + stm32_bank->ftsr_ofst); 357 358 writel_relaxed(wake_active, base + stm32_bank->imr_ofst); 359 } 360 361 static void stm32_chip_resume(struct stm32_exti_chip_data *chip_data, 362 u32 mask_cache) 363 { 364 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; 365 void __iomem *base = chip_data->host_data->base; 366 367 /* restore rtsr, ftsr, registers */ 368 writel_relaxed(chip_data->rtsr_cache, base + stm32_bank->rtsr_ofst); 369 writel_relaxed(chip_data->ftsr_cache, base + stm32_bank->ftsr_ofst); 370 371 writel_relaxed(mask_cache, base + stm32_bank->imr_ofst); 372 } 373 374 static void stm32_irq_suspend(struct irq_chip_generic *gc) 375 { 376 struct stm32_exti_chip_data *chip_data = gc->private; 377 378 irq_gc_lock(gc); 379 stm32_chip_suspend(chip_data, gc->wake_active); 380 irq_gc_unlock(gc); 381 } 382 383 static void stm32_irq_resume(struct irq_chip_generic *gc) 384 { 385 struct stm32_exti_chip_data *chip_data = gc->private; 386 387 irq_gc_lock(gc); 388 stm32_chip_resume(chip_data, gc->mask_cache); 389 irq_gc_unlock(gc); 390 } 391 392 static int stm32_exti_alloc(struct irq_domain *d, unsigned int virq, 393 unsigned int nr_irqs, void *data) 394 { 395 struct irq_fwspec *fwspec = data; 396 irq_hw_number_t hwirq; 397 398 hwirq = fwspec->param[0]; 399 400 irq_map_generic_chip(d, virq, hwirq); 401 402 return 0; 403 } 404 405 static void stm32_exti_free(struct irq_domain *d, unsigned int virq, 406 unsigned int nr_irqs) 407 { 408 struct irq_data *data = irq_domain_get_irq_data(d, virq); 409 410 irq_domain_reset_irq_data(data); 411 } 412 413 static const struct irq_domain_ops irq_exti_domain_ops = { 414 .map = irq_map_generic_chip, 415 .alloc = stm32_exti_alloc, 416 .free = stm32_exti_free, 417 }; 418 419 static void stm32_irq_ack(struct irq_data *d) 420 { 421 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 422 struct stm32_exti_chip_data *chip_data = gc->private; 423 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; 424 425 irq_gc_lock(gc); 426 427 irq_reg_writel(gc, d->mask, stm32_bank->rpr_ofst); 428 if (stm32_bank->fpr_ofst != UNDEF_REG) 429 irq_reg_writel(gc, d->mask, stm32_bank->fpr_ofst); 430 431 irq_gc_unlock(gc); 432 } 433 434 static inline u32 stm32_exti_set_bit(struct irq_data *d, u32 reg) 435 { 436 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); 437 void __iomem *base = chip_data->host_data->base; 438 u32 val; 439 440 val = readl_relaxed(base + reg); 441 val |= BIT(d->hwirq % IRQS_PER_BANK); 442 writel_relaxed(val, base + reg); 443 444 return val; 445 } 446 447 static inline u32 stm32_exti_clr_bit(struct irq_data *d, u32 reg) 448 { 449 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); 450 void __iomem *base = chip_data->host_data->base; 451 u32 val; 452 453 val = readl_relaxed(base + reg); 454 val &= ~BIT(d->hwirq % IRQS_PER_BANK); 455 writel_relaxed(val, base + reg); 456 457 return val; 458 } 459 460 static void stm32_exti_h_eoi(struct irq_data *d) 461 { 462 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); 463 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; 464 465 raw_spin_lock(&chip_data->rlock); 466 467 stm32_exti_set_bit(d, stm32_bank->rpr_ofst); 468 if (stm32_bank->fpr_ofst != UNDEF_REG) 469 stm32_exti_set_bit(d, stm32_bank->fpr_ofst); 470 471 raw_spin_unlock(&chip_data->rlock); 472 473 if (d->parent_data->chip) 474 irq_chip_eoi_parent(d); 475 } 476 477 static void stm32_exti_h_mask(struct irq_data *d) 478 { 479 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); 480 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; 481 482 raw_spin_lock(&chip_data->rlock); 483 chip_data->mask_cache = stm32_exti_clr_bit(d, stm32_bank->imr_ofst); 484 raw_spin_unlock(&chip_data->rlock); 485 486 if (d->parent_data->chip) 487 irq_chip_mask_parent(d); 488 } 489 490 static void stm32_exti_h_unmask(struct irq_data *d) 491 { 492 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); 493 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; 494 495 raw_spin_lock(&chip_data->rlock); 496 chip_data->mask_cache = stm32_exti_set_bit(d, stm32_bank->imr_ofst); 497 raw_spin_unlock(&chip_data->rlock); 498 499 if (d->parent_data->chip) 500 irq_chip_unmask_parent(d); 501 } 502 503 static int stm32_exti_h_set_type(struct irq_data *d, unsigned int type) 504 { 505 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); 506 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; 507 void __iomem *base = chip_data->host_data->base; 508 u32 rtsr, ftsr; 509 int err; 510 511 raw_spin_lock(&chip_data->rlock); 512 513 err = stm32_exti_hwspin_lock(chip_data); 514 if (err) 515 goto unlock; 516 517 rtsr = readl_relaxed(base + stm32_bank->rtsr_ofst); 518 ftsr = readl_relaxed(base + stm32_bank->ftsr_ofst); 519 520 err = stm32_exti_set_type(d, type, &rtsr, &ftsr); 521 if (err) 522 goto unspinlock; 523 524 writel_relaxed(rtsr, base + stm32_bank->rtsr_ofst); 525 writel_relaxed(ftsr, base + stm32_bank->ftsr_ofst); 526 527 unspinlock: 528 stm32_exti_hwspin_unlock(chip_data); 529 unlock: 530 raw_spin_unlock(&chip_data->rlock); 531 532 return err; 533 } 534 535 static int stm32_exti_h_set_wake(struct irq_data *d, unsigned int on) 536 { 537 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); 538 u32 mask = BIT(d->hwirq % IRQS_PER_BANK); 539 540 raw_spin_lock(&chip_data->rlock); 541 542 if (on) 543 chip_data->wake_active |= mask; 544 else 545 chip_data->wake_active &= ~mask; 546 547 raw_spin_unlock(&chip_data->rlock); 548 549 return 0; 550 } 551 552 static int stm32_exti_h_set_affinity(struct irq_data *d, 553 const struct cpumask *dest, bool force) 554 { 555 if (d->parent_data->chip) 556 return irq_chip_set_affinity_parent(d, dest, force); 557 558 return -EINVAL; 559 } 560 561 static int __maybe_unused stm32_exti_h_suspend(void) 562 { 563 struct stm32_exti_chip_data *chip_data; 564 int i; 565 566 for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) { 567 chip_data = &stm32_host_data->chips_data[i]; 568 raw_spin_lock(&chip_data->rlock); 569 stm32_chip_suspend(chip_data, chip_data->wake_active); 570 raw_spin_unlock(&chip_data->rlock); 571 } 572 573 return 0; 574 } 575 576 static void __maybe_unused stm32_exti_h_resume(void) 577 { 578 struct stm32_exti_chip_data *chip_data; 579 int i; 580 581 for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) { 582 chip_data = &stm32_host_data->chips_data[i]; 583 raw_spin_lock(&chip_data->rlock); 584 stm32_chip_resume(chip_data, chip_data->mask_cache); 585 raw_spin_unlock(&chip_data->rlock); 586 } 587 } 588 589 static struct syscore_ops stm32_exti_h_syscore_ops = { 590 #ifdef CONFIG_PM_SLEEP 591 .suspend = stm32_exti_h_suspend, 592 .resume = stm32_exti_h_resume, 593 #endif 594 }; 595 596 static void stm32_exti_h_syscore_init(struct stm32_exti_host_data *host_data) 597 { 598 stm32_host_data = host_data; 599 register_syscore_ops(&stm32_exti_h_syscore_ops); 600 } 601 602 static void stm32_exti_h_syscore_deinit(void) 603 { 604 unregister_syscore_ops(&stm32_exti_h_syscore_ops); 605 } 606 607 static int stm32_exti_h_retrigger(struct irq_data *d) 608 { 609 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); 610 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; 611 void __iomem *base = chip_data->host_data->base; 612 u32 mask = BIT(d->hwirq % IRQS_PER_BANK); 613 614 writel_relaxed(mask, base + stm32_bank->swier_ofst); 615 616 return 0; 617 } 618 619 static struct irq_chip stm32_exti_h_chip = { 620 .name = "stm32-exti-h", 621 .irq_eoi = stm32_exti_h_eoi, 622 .irq_mask = stm32_exti_h_mask, 623 .irq_unmask = stm32_exti_h_unmask, 624 .irq_retrigger = stm32_exti_h_retrigger, 625 .irq_set_type = stm32_exti_h_set_type, 626 .irq_set_wake = stm32_exti_h_set_wake, 627 .flags = IRQCHIP_MASK_ON_SUSPEND, 628 .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? stm32_exti_h_set_affinity : NULL, 629 }; 630 631 static int stm32_exti_h_domain_alloc(struct irq_domain *dm, 632 unsigned int virq, 633 unsigned int nr_irqs, void *data) 634 { 635 struct stm32_exti_host_data *host_data = dm->host_data; 636 struct stm32_exti_chip_data *chip_data; 637 struct irq_fwspec *fwspec = data; 638 struct irq_fwspec p_fwspec; 639 irq_hw_number_t hwirq; 640 int p_irq, bank; 641 642 hwirq = fwspec->param[0]; 643 bank = hwirq / IRQS_PER_BANK; 644 chip_data = &host_data->chips_data[bank]; 645 646 irq_domain_set_hwirq_and_chip(dm, virq, hwirq, 647 &stm32_exti_h_chip, chip_data); 648 649 p_irq = stm32_exti_to_irq(host_data->drv_data, hwirq); 650 if (p_irq >= 0) { 651 p_fwspec.fwnode = dm->parent->fwnode; 652 p_fwspec.param_count = 3; 653 p_fwspec.param[0] = GIC_SPI; 654 p_fwspec.param[1] = p_irq; 655 p_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH; 656 657 return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec); 658 } 659 660 return 0; 661 } 662 663 static struct 664 stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd, 665 struct device_node *node) 666 { 667 struct stm32_exti_host_data *host_data; 668 669 host_data = kzalloc(sizeof(*host_data), GFP_KERNEL); 670 if (!host_data) 671 return NULL; 672 673 host_data->drv_data = dd; 674 host_data->chips_data = kcalloc(dd->bank_nr, 675 sizeof(struct stm32_exti_chip_data), 676 GFP_KERNEL); 677 if (!host_data->chips_data) 678 goto free_host_data; 679 680 host_data->base = of_iomap(node, 0); 681 if (!host_data->base) { 682 pr_err("%pOF: Unable to map registers\n", node); 683 goto free_chips_data; 684 } 685 686 stm32_host_data = host_data; 687 688 return host_data; 689 690 free_chips_data: 691 kfree(host_data->chips_data); 692 free_host_data: 693 kfree(host_data); 694 695 return NULL; 696 } 697 698 static struct 699 stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data, 700 u32 bank_idx, 701 struct device_node *node) 702 { 703 const struct stm32_exti_bank *stm32_bank; 704 struct stm32_exti_chip_data *chip_data; 705 void __iomem *base = h_data->base; 706 707 stm32_bank = h_data->drv_data->exti_banks[bank_idx]; 708 chip_data = &h_data->chips_data[bank_idx]; 709 chip_data->host_data = h_data; 710 chip_data->reg_bank = stm32_bank; 711 712 raw_spin_lock_init(&chip_data->rlock); 713 714 /* 715 * This IP has no reset, so after hot reboot we should 716 * clear registers to avoid residue 717 */ 718 writel_relaxed(0, base + stm32_bank->imr_ofst); 719 writel_relaxed(0, base + stm32_bank->emr_ofst); 720 721 pr_info("%pOF: bank%d\n", node, bank_idx); 722 723 return chip_data; 724 } 725 726 static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data, 727 struct device_node *node) 728 { 729 struct stm32_exti_host_data *host_data; 730 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; 731 int nr_irqs, ret, i; 732 struct irq_chip_generic *gc; 733 struct irq_domain *domain; 734 735 host_data = stm32_exti_host_init(drv_data, node); 736 if (!host_data) 737 return -ENOMEM; 738 739 domain = irq_domain_add_linear(node, drv_data->bank_nr * IRQS_PER_BANK, 740 &irq_exti_domain_ops, NULL); 741 if (!domain) { 742 pr_err("%pOFn: Could not register interrupt domain.\n", 743 node); 744 ret = -ENOMEM; 745 goto out_unmap; 746 } 747 748 ret = irq_alloc_domain_generic_chips(domain, IRQS_PER_BANK, 1, "exti", 749 handle_edge_irq, clr, 0, 0); 750 if (ret) { 751 pr_err("%pOF: Could not allocate generic interrupt chip.\n", 752 node); 753 goto out_free_domain; 754 } 755 756 for (i = 0; i < drv_data->bank_nr; i++) { 757 const struct stm32_exti_bank *stm32_bank; 758 struct stm32_exti_chip_data *chip_data; 759 760 stm32_bank = drv_data->exti_banks[i]; 761 chip_data = stm32_exti_chip_init(host_data, i, node); 762 763 gc = irq_get_domain_generic_chip(domain, i * IRQS_PER_BANK); 764 765 gc->reg_base = host_data->base; 766 gc->chip_types->type = IRQ_TYPE_EDGE_BOTH; 767 gc->chip_types->chip.irq_ack = stm32_irq_ack; 768 gc->chip_types->chip.irq_mask = irq_gc_mask_clr_bit; 769 gc->chip_types->chip.irq_unmask = irq_gc_mask_set_bit; 770 gc->chip_types->chip.irq_set_type = stm32_irq_set_type; 771 gc->chip_types->chip.irq_set_wake = irq_gc_set_wake; 772 gc->suspend = stm32_irq_suspend; 773 gc->resume = stm32_irq_resume; 774 gc->wake_enabled = IRQ_MSK(IRQS_PER_BANK); 775 776 gc->chip_types->regs.mask = stm32_bank->imr_ofst; 777 gc->private = (void *)chip_data; 778 } 779 780 nr_irqs = of_irq_count(node); 781 for (i = 0; i < nr_irqs; i++) { 782 unsigned int irq = irq_of_parse_and_map(node, i); 783 784 irq_set_handler_data(irq, domain); 785 irq_set_chained_handler(irq, stm32_irq_handler); 786 } 787 788 return 0; 789 790 out_free_domain: 791 irq_domain_remove(domain); 792 out_unmap: 793 iounmap(host_data->base); 794 kfree(host_data->chips_data); 795 kfree(host_data); 796 return ret; 797 } 798 799 static const struct irq_domain_ops stm32_exti_h_domain_ops = { 800 .alloc = stm32_exti_h_domain_alloc, 801 .free = irq_domain_free_irqs_common, 802 .xlate = irq_domain_xlate_twocell, 803 }; 804 805 static void stm32_exti_remove_irq(void *data) 806 { 807 struct irq_domain *domain = data; 808 809 irq_domain_remove(domain); 810 } 811 812 static int stm32_exti_remove(struct platform_device *pdev) 813 { 814 stm32_exti_h_syscore_deinit(); 815 return 0; 816 } 817 818 static int stm32_exti_probe(struct platform_device *pdev) 819 { 820 int ret, i; 821 struct device *dev = &pdev->dev; 822 struct device_node *np = dev->of_node; 823 struct irq_domain *parent_domain, *domain; 824 struct stm32_exti_host_data *host_data; 825 const struct stm32_exti_drv_data *drv_data; 826 struct resource *res; 827 828 host_data = devm_kzalloc(dev, sizeof(*host_data), GFP_KERNEL); 829 if (!host_data) 830 return -ENOMEM; 831 832 /* check for optional hwspinlock which may be not available yet */ 833 ret = of_hwspin_lock_get_id(np, 0); 834 if (ret == -EPROBE_DEFER) 835 /* hwspinlock framework not yet ready */ 836 return ret; 837 838 if (ret >= 0) { 839 host_data->hwlock = devm_hwspin_lock_request_specific(dev, ret); 840 if (!host_data->hwlock) { 841 dev_err(dev, "Failed to request hwspinlock\n"); 842 return -EINVAL; 843 } 844 } else if (ret != -ENOENT) { 845 /* note: ENOENT is a valid case (means 'no hwspinlock') */ 846 dev_err(dev, "Failed to get hwspinlock\n"); 847 return ret; 848 } 849 850 /* initialize host_data */ 851 drv_data = of_device_get_match_data(dev); 852 if (!drv_data) { 853 dev_err(dev, "no of match data\n"); 854 return -ENODEV; 855 } 856 host_data->drv_data = drv_data; 857 858 host_data->chips_data = devm_kcalloc(dev, drv_data->bank_nr, 859 sizeof(*host_data->chips_data), 860 GFP_KERNEL); 861 if (!host_data->chips_data) 862 return -ENOMEM; 863 864 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 865 host_data->base = devm_ioremap_resource(dev, res); 866 if (IS_ERR(host_data->base)) { 867 dev_err(dev, "Unable to map registers\n"); 868 return PTR_ERR(host_data->base); 869 } 870 871 for (i = 0; i < drv_data->bank_nr; i++) 872 stm32_exti_chip_init(host_data, i, np); 873 874 parent_domain = irq_find_host(of_irq_find_parent(np)); 875 if (!parent_domain) { 876 dev_err(dev, "GIC interrupt-parent not found\n"); 877 return -EINVAL; 878 } 879 880 domain = irq_domain_add_hierarchy(parent_domain, 0, 881 drv_data->bank_nr * IRQS_PER_BANK, 882 np, &stm32_exti_h_domain_ops, 883 host_data); 884 885 if (!domain) { 886 dev_err(dev, "Could not register exti domain\n"); 887 return -ENOMEM; 888 } 889 890 ret = devm_add_action_or_reset(dev, stm32_exti_remove_irq, domain); 891 if (ret) 892 return ret; 893 894 stm32_exti_h_syscore_init(host_data); 895 896 return 0; 897 } 898 899 /* platform driver only for MP1 */ 900 static const struct of_device_id stm32_exti_ids[] = { 901 { .compatible = "st,stm32mp1-exti", .data = &stm32mp1_drv_data}, 902 {}, 903 }; 904 MODULE_DEVICE_TABLE(of, stm32_exti_ids); 905 906 static struct platform_driver stm32_exti_driver = { 907 .probe = stm32_exti_probe, 908 .remove = stm32_exti_remove, 909 .driver = { 910 .name = "stm32_exti", 911 .of_match_table = stm32_exti_ids, 912 }, 913 }; 914 915 static int __init stm32_exti_arch_init(void) 916 { 917 return platform_driver_register(&stm32_exti_driver); 918 } 919 920 static void __exit stm32_exti_arch_exit(void) 921 { 922 return platform_driver_unregister(&stm32_exti_driver); 923 } 924 925 arch_initcall(stm32_exti_arch_init); 926 module_exit(stm32_exti_arch_exit); 927 928 /* no platform driver for F4 and H7 */ 929 static int __init stm32f4_exti_of_init(struct device_node *np, 930 struct device_node *parent) 931 { 932 return stm32_exti_init(&stm32f4xx_drv_data, np); 933 } 934 935 IRQCHIP_DECLARE(stm32f4_exti, "st,stm32-exti", stm32f4_exti_of_init); 936 937 static int __init stm32h7_exti_of_init(struct device_node *np, 938 struct device_node *parent) 939 { 940 return stm32_exti_init(&stm32h7xx_drv_data, np); 941 } 942 943 IRQCHIP_DECLARE(stm32h7_exti, "st,stm32h7-exti", stm32h7_exti_of_init); 944