1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // regmap based irq_chip 4 // 5 // Copyright 2011 Wolfson Microelectronics plc 6 // 7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/interrupt.h> 12 #include <linux/irq.h> 13 #include <linux/irqdomain.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/regmap.h> 16 #include <linux/slab.h> 17 18 #include "internal.h" 19 20 struct regmap_irq_chip_data { 21 struct mutex lock; 22 struct irq_chip irq_chip; 23 24 struct regmap *map; 25 const struct regmap_irq_chip *chip; 26 27 int irq_base; 28 struct irq_domain *domain; 29 30 int irq; 31 int wake_count; 32 33 unsigned int mask_base; 34 unsigned int unmask_base; 35 36 void *status_reg_buf; 37 unsigned int *main_status_buf; 38 unsigned int *status_buf; 39 unsigned int *mask_buf; 40 unsigned int *mask_buf_def; 41 unsigned int *wake_buf; 42 unsigned int *type_buf; 43 unsigned int *type_buf_def; 44 unsigned int **config_buf; 45 46 unsigned int irq_reg_stride; 47 48 unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *data, 49 unsigned int base, int index); 50 51 unsigned int clear_status:1; 52 }; 53 54 static inline const 55 struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data, 56 int irq) 57 { 58 return &data->chip->irqs[irq]; 59 } 60 61 static bool regmap_irq_can_bulk_read_status(struct regmap_irq_chip_data *data) 62 { 63 struct regmap *map = data->map; 64 65 /* 66 * While possible that a user-defined ->get_irq_reg() callback might 67 * be linear enough to support bulk reads, most of the time it won't. 68 * Therefore only allow them if the default callback is being used. 69 */ 70 return data->irq_reg_stride == 1 && map->reg_stride == 1 && 71 data->get_irq_reg == regmap_irq_get_irq_reg_linear && 72 !map->use_single_read; 73 } 74 75 static void regmap_irq_lock(struct irq_data *data) 76 { 77 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 78 79 mutex_lock(&d->lock); 80 } 81 82 static void regmap_irq_sync_unlock(struct irq_data *data) 83 { 84 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 85 struct regmap *map = d->map; 86 int i, j, ret; 87 u32 reg; 88 u32 val; 89 90 if (d->chip->runtime_pm) { 91 ret = pm_runtime_get_sync(map->dev); 92 if (ret < 0) 93 dev_err(map->dev, "IRQ sync failed to resume: %d\n", 94 ret); 95 } 96 97 if (d->clear_status) { 98 for (i = 0; i < d->chip->num_regs; i++) { 99 reg = d->get_irq_reg(d, d->chip->status_base, i); 100 101 ret = regmap_read(map, reg, &val); 102 if (ret) 103 dev_err(d->map->dev, 104 "Failed to clear the interrupt status bits\n"); 105 } 106 107 d->clear_status = false; 108 } 109 110 /* 111 * If there's been a change in the mask write it back to the 112 * hardware. We rely on the use of the regmap core cache to 113 * suppress pointless writes. 114 */ 115 for (i = 0; i < d->chip->num_regs; i++) { 116 if (d->mask_base) { 117 if (d->chip->handle_mask_sync) 118 d->chip->handle_mask_sync(i, d->mask_buf_def[i], 119 d->mask_buf[i], 120 d->chip->irq_drv_data); 121 else { 122 reg = d->get_irq_reg(d, d->mask_base, i); 123 ret = regmap_update_bits(d->map, reg, 124 d->mask_buf_def[i], 125 d->mask_buf[i]); 126 if (ret) 127 dev_err(d->map->dev, "Failed to sync masks in %x\n", 128 reg); 129 } 130 } 131 132 if (d->unmask_base) { 133 reg = d->get_irq_reg(d, d->unmask_base, i); 134 ret = regmap_update_bits(d->map, reg, 135 d->mask_buf_def[i], ~d->mask_buf[i]); 136 if (ret) 137 dev_err(d->map->dev, "Failed to sync masks in %x\n", 138 reg); 139 } 140 141 reg = d->get_irq_reg(d, d->chip->wake_base, i); 142 if (d->wake_buf) { 143 if (d->chip->wake_invert) 144 ret = regmap_update_bits(d->map, reg, 145 d->mask_buf_def[i], 146 ~d->wake_buf[i]); 147 else 148 ret = regmap_update_bits(d->map, reg, 149 d->mask_buf_def[i], 150 d->wake_buf[i]); 151 if (ret != 0) 152 dev_err(d->map->dev, 153 "Failed to sync wakes in %x: %d\n", 154 reg, ret); 155 } 156 157 if (!d->chip->init_ack_masked) 158 continue; 159 /* 160 * Ack all the masked interrupts unconditionally, 161 * OR if there is masked interrupt which hasn't been Acked, 162 * it'll be ignored in irq handler, then may introduce irq storm 163 */ 164 if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) { 165 reg = d->get_irq_reg(d, d->chip->ack_base, i); 166 167 /* some chips ack by write 0 */ 168 if (d->chip->ack_invert) 169 ret = regmap_write(map, reg, ~d->mask_buf[i]); 170 else 171 ret = regmap_write(map, reg, d->mask_buf[i]); 172 if (d->chip->clear_ack) { 173 if (d->chip->ack_invert && !ret) 174 ret = regmap_write(map, reg, UINT_MAX); 175 else if (!ret) 176 ret = regmap_write(map, reg, 0); 177 } 178 if (ret != 0) 179 dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", 180 reg, ret); 181 } 182 } 183 184 for (i = 0; i < d->chip->num_config_bases; i++) { 185 for (j = 0; j < d->chip->num_config_regs; j++) { 186 reg = d->get_irq_reg(d, d->chip->config_base[i], j); 187 ret = regmap_write(map, reg, d->config_buf[i][j]); 188 if (ret) 189 dev_err(d->map->dev, 190 "Failed to write config %x: %d\n", 191 reg, ret); 192 } 193 } 194 195 if (d->chip->runtime_pm) 196 pm_runtime_put(map->dev); 197 198 /* If we've changed our wakeup count propagate it to the parent */ 199 if (d->wake_count < 0) 200 for (i = d->wake_count; i < 0; i++) 201 irq_set_irq_wake(d->irq, 0); 202 else if (d->wake_count > 0) 203 for (i = 0; i < d->wake_count; i++) 204 irq_set_irq_wake(d->irq, 1); 205 206 d->wake_count = 0; 207 208 mutex_unlock(&d->lock); 209 } 210 211 static void regmap_irq_enable(struct irq_data *data) 212 { 213 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 214 struct regmap *map = d->map; 215 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 216 unsigned int reg = irq_data->reg_offset / map->reg_stride; 217 unsigned int mask; 218 219 /* 220 * The type_in_mask flag means that the underlying hardware uses 221 * separate mask bits for each interrupt trigger type, but we want 222 * to have a single logical interrupt with a configurable type. 223 * 224 * If the interrupt we're enabling defines any supported types 225 * then instead of using the regular mask bits for this interrupt, 226 * use the value previously written to the type buffer at the 227 * corresponding offset in regmap_irq_set_type(). 228 */ 229 if (d->chip->type_in_mask && irq_data->type.types_supported) 230 mask = d->type_buf[reg] & irq_data->mask; 231 else 232 mask = irq_data->mask; 233 234 if (d->chip->clear_on_unmask) 235 d->clear_status = true; 236 237 d->mask_buf[reg] &= ~mask; 238 } 239 240 static void regmap_irq_disable(struct irq_data *data) 241 { 242 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 243 struct regmap *map = d->map; 244 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 245 246 d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask; 247 } 248 249 static int regmap_irq_set_type(struct irq_data *data, unsigned int type) 250 { 251 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 252 struct regmap *map = d->map; 253 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 254 int reg, ret; 255 const struct regmap_irq_type *t = &irq_data->type; 256 257 if ((t->types_supported & type) != type) 258 return 0; 259 260 reg = t->type_reg_offset / map->reg_stride; 261 262 if (d->chip->type_in_mask) { 263 ret = regmap_irq_set_type_config_simple(&d->type_buf, type, 264 irq_data, reg, d->chip->irq_drv_data); 265 if (ret) 266 return ret; 267 } 268 269 if (d->chip->set_type_config) { 270 ret = d->chip->set_type_config(d->config_buf, type, irq_data, 271 reg, d->chip->irq_drv_data); 272 if (ret) 273 return ret; 274 } 275 276 return 0; 277 } 278 279 static int regmap_irq_set_wake(struct irq_data *data, unsigned int on) 280 { 281 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 282 struct regmap *map = d->map; 283 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 284 285 if (on) { 286 if (d->wake_buf) 287 d->wake_buf[irq_data->reg_offset / map->reg_stride] 288 &= ~irq_data->mask; 289 d->wake_count++; 290 } else { 291 if (d->wake_buf) 292 d->wake_buf[irq_data->reg_offset / map->reg_stride] 293 |= irq_data->mask; 294 d->wake_count--; 295 } 296 297 return 0; 298 } 299 300 static const struct irq_chip regmap_irq_chip = { 301 .irq_bus_lock = regmap_irq_lock, 302 .irq_bus_sync_unlock = regmap_irq_sync_unlock, 303 .irq_disable = regmap_irq_disable, 304 .irq_enable = regmap_irq_enable, 305 .irq_set_type = regmap_irq_set_type, 306 .irq_set_wake = regmap_irq_set_wake, 307 }; 308 309 static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, 310 unsigned int b) 311 { 312 const struct regmap_irq_chip *chip = data->chip; 313 struct regmap *map = data->map; 314 struct regmap_irq_sub_irq_map *subreg; 315 unsigned int reg; 316 int i, ret = 0; 317 318 if (!chip->sub_reg_offsets) { 319 reg = data->get_irq_reg(data, chip->status_base, b); 320 ret = regmap_read(map, reg, &data->status_buf[b]); 321 } else { 322 /* 323 * Note we can't use ->get_irq_reg() here because the offsets 324 * in 'subreg' are *not* interchangeable with indices. 325 */ 326 subreg = &chip->sub_reg_offsets[b]; 327 for (i = 0; i < subreg->num_regs; i++) { 328 unsigned int offset = subreg->offset[i]; 329 unsigned int index = offset / map->reg_stride; 330 331 ret = regmap_read(map, chip->status_base + offset, 332 &data->status_buf[index]); 333 if (ret) 334 break; 335 } 336 } 337 return ret; 338 } 339 340 static irqreturn_t regmap_irq_thread(int irq, void *d) 341 { 342 struct regmap_irq_chip_data *data = d; 343 const struct regmap_irq_chip *chip = data->chip; 344 struct regmap *map = data->map; 345 int ret, i; 346 bool handled = false; 347 u32 reg; 348 349 if (chip->handle_pre_irq) 350 chip->handle_pre_irq(chip->irq_drv_data); 351 352 if (chip->runtime_pm) { 353 ret = pm_runtime_get_sync(map->dev); 354 if (ret < 0) { 355 dev_err(map->dev, "IRQ thread failed to resume: %d\n", 356 ret); 357 goto exit; 358 } 359 } 360 361 /* 362 * Read only registers with active IRQs if the chip has 'main status 363 * register'. Else read in the statuses, using a single bulk read if 364 * possible in order to reduce the I/O overheads. 365 */ 366 367 if (chip->no_status) { 368 /* no status register so default to all active */ 369 memset32(data->status_buf, GENMASK(31, 0), chip->num_regs); 370 } else if (chip->num_main_regs) { 371 unsigned int max_main_bits; 372 unsigned long size; 373 374 size = chip->num_regs * sizeof(unsigned int); 375 376 max_main_bits = (chip->num_main_status_bits) ? 377 chip->num_main_status_bits : chip->num_regs; 378 /* Clear the status buf as we don't read all status regs */ 379 memset(data->status_buf, 0, size); 380 381 /* We could support bulk read for main status registers 382 * but I don't expect to see devices with really many main 383 * status registers so let's only support single reads for the 384 * sake of simplicity. and add bulk reads only if needed 385 */ 386 for (i = 0; i < chip->num_main_regs; i++) { 387 reg = data->get_irq_reg(data, chip->main_status, i); 388 ret = regmap_read(map, reg, &data->main_status_buf[i]); 389 if (ret) { 390 dev_err(map->dev, 391 "Failed to read IRQ status %d\n", 392 ret); 393 goto exit; 394 } 395 } 396 397 /* Read sub registers with active IRQs */ 398 for (i = 0; i < chip->num_main_regs; i++) { 399 unsigned int b; 400 const unsigned long mreg = data->main_status_buf[i]; 401 402 for_each_set_bit(b, &mreg, map->format.val_bytes * 8) { 403 if (i * map->format.val_bytes * 8 + b > 404 max_main_bits) 405 break; 406 ret = read_sub_irq_data(data, b); 407 408 if (ret != 0) { 409 dev_err(map->dev, 410 "Failed to read IRQ status %d\n", 411 ret); 412 goto exit; 413 } 414 } 415 416 } 417 } else if (regmap_irq_can_bulk_read_status(data)) { 418 419 u8 *buf8 = data->status_reg_buf; 420 u16 *buf16 = data->status_reg_buf; 421 u32 *buf32 = data->status_reg_buf; 422 423 BUG_ON(!data->status_reg_buf); 424 425 ret = regmap_bulk_read(map, chip->status_base, 426 data->status_reg_buf, 427 chip->num_regs); 428 if (ret != 0) { 429 dev_err(map->dev, "Failed to read IRQ status: %d\n", 430 ret); 431 goto exit; 432 } 433 434 for (i = 0; i < data->chip->num_regs; i++) { 435 switch (map->format.val_bytes) { 436 case 1: 437 data->status_buf[i] = buf8[i]; 438 break; 439 case 2: 440 data->status_buf[i] = buf16[i]; 441 break; 442 case 4: 443 data->status_buf[i] = buf32[i]; 444 break; 445 default: 446 BUG(); 447 goto exit; 448 } 449 } 450 451 } else { 452 for (i = 0; i < data->chip->num_regs; i++) { 453 unsigned int reg = data->get_irq_reg(data, 454 data->chip->status_base, i); 455 ret = regmap_read(map, reg, &data->status_buf[i]); 456 457 if (ret != 0) { 458 dev_err(map->dev, 459 "Failed to read IRQ status: %d\n", 460 ret); 461 goto exit; 462 } 463 } 464 } 465 466 if (chip->status_invert) 467 for (i = 0; i < data->chip->num_regs; i++) 468 data->status_buf[i] = ~data->status_buf[i]; 469 470 /* 471 * Ignore masked IRQs and ack if we need to; we ack early so 472 * there is no race between handling and acknowledging the 473 * interrupt. We assume that typically few of the interrupts 474 * will fire simultaneously so don't worry about overhead from 475 * doing a write per register. 476 */ 477 for (i = 0; i < data->chip->num_regs; i++) { 478 data->status_buf[i] &= ~data->mask_buf[i]; 479 480 if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) { 481 reg = data->get_irq_reg(data, data->chip->ack_base, i); 482 483 if (chip->ack_invert) 484 ret = regmap_write(map, reg, 485 ~data->status_buf[i]); 486 else 487 ret = regmap_write(map, reg, 488 data->status_buf[i]); 489 if (chip->clear_ack) { 490 if (chip->ack_invert && !ret) 491 ret = regmap_write(map, reg, UINT_MAX); 492 else if (!ret) 493 ret = regmap_write(map, reg, 0); 494 } 495 if (ret != 0) 496 dev_err(map->dev, "Failed to ack 0x%x: %d\n", 497 reg, ret); 498 } 499 } 500 501 for (i = 0; i < chip->num_irqs; i++) { 502 if (data->status_buf[chip->irqs[i].reg_offset / 503 map->reg_stride] & chip->irqs[i].mask) { 504 handle_nested_irq(irq_find_mapping(data->domain, i)); 505 handled = true; 506 } 507 } 508 509 exit: 510 if (chip->runtime_pm) 511 pm_runtime_put(map->dev); 512 513 if (chip->handle_post_irq) 514 chip->handle_post_irq(chip->irq_drv_data); 515 516 if (handled) 517 return IRQ_HANDLED; 518 else 519 return IRQ_NONE; 520 } 521 522 static int regmap_irq_map(struct irq_domain *h, unsigned int virq, 523 irq_hw_number_t hw) 524 { 525 struct regmap_irq_chip_data *data = h->host_data; 526 527 irq_set_chip_data(virq, data); 528 irq_set_chip(virq, &data->irq_chip); 529 irq_set_nested_thread(virq, 1); 530 irq_set_parent(virq, data->irq); 531 irq_set_noprobe(virq); 532 533 return 0; 534 } 535 536 static const struct irq_domain_ops regmap_domain_ops = { 537 .map = regmap_irq_map, 538 .xlate = irq_domain_xlate_onetwocell, 539 }; 540 541 /** 542 * regmap_irq_get_irq_reg_linear() - Linear IRQ register mapping callback. 543 * @data: Data for the &struct regmap_irq_chip 544 * @base: Base register 545 * @index: Register index 546 * 547 * Returns the register address corresponding to the given @base and @index 548 * by the formula ``base + index * regmap_stride * irq_reg_stride``. 549 */ 550 unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data, 551 unsigned int base, int index) 552 { 553 struct regmap *map = data->map; 554 555 return base + index * map->reg_stride * data->irq_reg_stride; 556 } 557 EXPORT_SYMBOL_GPL(regmap_irq_get_irq_reg_linear); 558 559 /** 560 * regmap_irq_set_type_config_simple() - Simple IRQ type configuration callback. 561 * @buf: Buffer containing configuration register values, this is a 2D array of 562 * `num_config_bases` rows, each of `num_config_regs` elements. 563 * @type: The requested IRQ type. 564 * @irq_data: The IRQ being configured. 565 * @idx: Index of the irq's config registers within each array `buf[i]` 566 * @irq_drv_data: Driver specific IRQ data 567 * 568 * This is a &struct regmap_irq_chip->set_type_config callback suitable for 569 * chips with one config register. Register values are updated according to 570 * the &struct regmap_irq_type data associated with an IRQ. 571 */ 572 int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type, 573 const struct regmap_irq *irq_data, 574 int idx, void *irq_drv_data) 575 { 576 const struct regmap_irq_type *t = &irq_data->type; 577 578 if (t->type_reg_mask) 579 buf[0][idx] &= ~t->type_reg_mask; 580 else 581 buf[0][idx] &= ~(t->type_falling_val | 582 t->type_rising_val | 583 t->type_level_low_val | 584 t->type_level_high_val); 585 586 switch (type) { 587 case IRQ_TYPE_EDGE_FALLING: 588 buf[0][idx] |= t->type_falling_val; 589 break; 590 591 case IRQ_TYPE_EDGE_RISING: 592 buf[0][idx] |= t->type_rising_val; 593 break; 594 595 case IRQ_TYPE_EDGE_BOTH: 596 buf[0][idx] |= (t->type_falling_val | 597 t->type_rising_val); 598 break; 599 600 case IRQ_TYPE_LEVEL_HIGH: 601 buf[0][idx] |= t->type_level_high_val; 602 break; 603 604 case IRQ_TYPE_LEVEL_LOW: 605 buf[0][idx] |= t->type_level_low_val; 606 break; 607 608 default: 609 return -EINVAL; 610 } 611 612 return 0; 613 } 614 EXPORT_SYMBOL_GPL(regmap_irq_set_type_config_simple); 615 616 /** 617 * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling 618 * 619 * @fwnode: The firmware node where the IRQ domain should be added to. 620 * @map: The regmap for the device. 621 * @irq: The IRQ the device uses to signal interrupts. 622 * @irq_flags: The IRQF_ flags to use for the primary interrupt. 623 * @irq_base: Allocate at specific IRQ number if irq_base > 0. 624 * @chip: Configuration for the interrupt controller. 625 * @data: Runtime data structure for the controller, allocated on success. 626 * 627 * Returns 0 on success or an errno on failure. 628 * 629 * In order for this to be efficient the chip really should use a 630 * register cache. The chip driver is responsible for restoring the 631 * register values used by the IRQ controller over suspend and resume. 632 */ 633 int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, 634 struct regmap *map, int irq, 635 int irq_flags, int irq_base, 636 const struct regmap_irq_chip *chip, 637 struct regmap_irq_chip_data **data) 638 { 639 struct regmap_irq_chip_data *d; 640 int i; 641 int ret = -ENOMEM; 642 u32 reg; 643 644 if (chip->num_regs <= 0) 645 return -EINVAL; 646 647 if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack)) 648 return -EINVAL; 649 650 for (i = 0; i < chip->num_irqs; i++) { 651 if (chip->irqs[i].reg_offset % map->reg_stride) 652 return -EINVAL; 653 if (chip->irqs[i].reg_offset / map->reg_stride >= 654 chip->num_regs) 655 return -EINVAL; 656 } 657 658 if (irq_base) { 659 irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0); 660 if (irq_base < 0) { 661 dev_warn(map->dev, "Failed to allocate IRQs: %d\n", 662 irq_base); 663 return irq_base; 664 } 665 } 666 667 d = kzalloc(sizeof(*d), GFP_KERNEL); 668 if (!d) 669 return -ENOMEM; 670 671 if (chip->num_main_regs) { 672 d->main_status_buf = kcalloc(chip->num_main_regs, 673 sizeof(*d->main_status_buf), 674 GFP_KERNEL); 675 676 if (!d->main_status_buf) 677 goto err_alloc; 678 } 679 680 d->status_buf = kcalloc(chip->num_regs, sizeof(*d->status_buf), 681 GFP_KERNEL); 682 if (!d->status_buf) 683 goto err_alloc; 684 685 d->mask_buf = kcalloc(chip->num_regs, sizeof(*d->mask_buf), 686 GFP_KERNEL); 687 if (!d->mask_buf) 688 goto err_alloc; 689 690 d->mask_buf_def = kcalloc(chip->num_regs, sizeof(*d->mask_buf_def), 691 GFP_KERNEL); 692 if (!d->mask_buf_def) 693 goto err_alloc; 694 695 if (chip->wake_base) { 696 d->wake_buf = kcalloc(chip->num_regs, sizeof(*d->wake_buf), 697 GFP_KERNEL); 698 if (!d->wake_buf) 699 goto err_alloc; 700 } 701 702 if (chip->type_in_mask) { 703 d->type_buf_def = kcalloc(chip->num_regs, 704 sizeof(*d->type_buf_def), GFP_KERNEL); 705 if (!d->type_buf_def) 706 goto err_alloc; 707 708 d->type_buf = kcalloc(chip->num_regs, sizeof(*d->type_buf), GFP_KERNEL); 709 if (!d->type_buf) 710 goto err_alloc; 711 } 712 713 if (chip->num_config_bases && chip->num_config_regs) { 714 /* 715 * Create config_buf[num_config_bases][num_config_regs] 716 */ 717 d->config_buf = kcalloc(chip->num_config_bases, 718 sizeof(*d->config_buf), GFP_KERNEL); 719 if (!d->config_buf) 720 goto err_alloc; 721 722 for (i = 0; i < chip->num_config_regs; i++) { 723 d->config_buf[i] = kcalloc(chip->num_config_regs, 724 sizeof(**d->config_buf), 725 GFP_KERNEL); 726 if (!d->config_buf[i]) 727 goto err_alloc; 728 } 729 } 730 731 d->irq_chip = regmap_irq_chip; 732 d->irq_chip.name = chip->name; 733 d->irq = irq; 734 d->map = map; 735 d->chip = chip; 736 d->irq_base = irq_base; 737 738 if (chip->mask_base && chip->unmask_base && 739 !chip->mask_unmask_non_inverted) { 740 /* 741 * Chips that specify both mask_base and unmask_base used to 742 * get inverted mask behavior by default, with no way to ask 743 * for the normal, non-inverted behavior. This "inverted by 744 * default" behavior is deprecated, but we have to support it 745 * until existing drivers have been fixed. 746 * 747 * Existing drivers should be updated by swapping mask_base 748 * and unmask_base and setting mask_unmask_non_inverted=true. 749 * New drivers should always set the flag. 750 */ 751 dev_warn(map->dev, "mask_base and unmask_base are inverted, please fix it"); 752 753 d->mask_base = chip->unmask_base; 754 d->unmask_base = chip->mask_base; 755 } else { 756 d->mask_base = chip->mask_base; 757 d->unmask_base = chip->unmask_base; 758 } 759 760 if (chip->irq_reg_stride) 761 d->irq_reg_stride = chip->irq_reg_stride; 762 else 763 d->irq_reg_stride = 1; 764 765 if (chip->get_irq_reg) 766 d->get_irq_reg = chip->get_irq_reg; 767 else 768 d->get_irq_reg = regmap_irq_get_irq_reg_linear; 769 770 if (regmap_irq_can_bulk_read_status(d)) { 771 d->status_reg_buf = kmalloc_array(chip->num_regs, 772 map->format.val_bytes, 773 GFP_KERNEL); 774 if (!d->status_reg_buf) 775 goto err_alloc; 776 } 777 778 mutex_init(&d->lock); 779 780 for (i = 0; i < chip->num_irqs; i++) 781 d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride] 782 |= chip->irqs[i].mask; 783 784 /* Mask all the interrupts by default */ 785 for (i = 0; i < chip->num_regs; i++) { 786 d->mask_buf[i] = d->mask_buf_def[i]; 787 788 if (d->mask_base) { 789 if (chip->handle_mask_sync) { 790 ret = chip->handle_mask_sync(i, 791 d->mask_buf_def[i], 792 d->mask_buf[i], 793 chip->irq_drv_data); 794 if (ret) 795 goto err_alloc; 796 } else { 797 reg = d->get_irq_reg(d, d->mask_base, i); 798 ret = regmap_update_bits(d->map, reg, 799 d->mask_buf_def[i], 800 d->mask_buf[i]); 801 if (ret) { 802 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", 803 reg, ret); 804 goto err_alloc; 805 } 806 } 807 } 808 809 if (d->unmask_base) { 810 reg = d->get_irq_reg(d, d->unmask_base, i); 811 ret = regmap_update_bits(d->map, reg, 812 d->mask_buf_def[i], ~d->mask_buf[i]); 813 if (ret) { 814 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", 815 reg, ret); 816 goto err_alloc; 817 } 818 } 819 820 if (!chip->init_ack_masked) 821 continue; 822 823 /* Ack masked but set interrupts */ 824 if (d->chip->no_status) { 825 /* no status register so default to all active */ 826 d->status_buf[i] = GENMASK(31, 0); 827 } else { 828 reg = d->get_irq_reg(d, d->chip->status_base, i); 829 ret = regmap_read(map, reg, &d->status_buf[i]); 830 if (ret != 0) { 831 dev_err(map->dev, "Failed to read IRQ status: %d\n", 832 ret); 833 goto err_alloc; 834 } 835 } 836 837 if (chip->status_invert) 838 d->status_buf[i] = ~d->status_buf[i]; 839 840 if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) { 841 reg = d->get_irq_reg(d, d->chip->ack_base, i); 842 if (chip->ack_invert) 843 ret = regmap_write(map, reg, 844 ~(d->status_buf[i] & d->mask_buf[i])); 845 else 846 ret = regmap_write(map, reg, 847 d->status_buf[i] & d->mask_buf[i]); 848 if (chip->clear_ack) { 849 if (chip->ack_invert && !ret) 850 ret = regmap_write(map, reg, UINT_MAX); 851 else if (!ret) 852 ret = regmap_write(map, reg, 0); 853 } 854 if (ret != 0) { 855 dev_err(map->dev, "Failed to ack 0x%x: %d\n", 856 reg, ret); 857 goto err_alloc; 858 } 859 } 860 } 861 862 /* Wake is disabled by default */ 863 if (d->wake_buf) { 864 for (i = 0; i < chip->num_regs; i++) { 865 d->wake_buf[i] = d->mask_buf_def[i]; 866 reg = d->get_irq_reg(d, d->chip->wake_base, i); 867 868 if (chip->wake_invert) 869 ret = regmap_update_bits(d->map, reg, 870 d->mask_buf_def[i], 871 0); 872 else 873 ret = regmap_update_bits(d->map, reg, 874 d->mask_buf_def[i], 875 d->wake_buf[i]); 876 if (ret != 0) { 877 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", 878 reg, ret); 879 goto err_alloc; 880 } 881 } 882 } 883 884 if (irq_base) 885 d->domain = irq_domain_create_legacy(fwnode, chip->num_irqs, 886 irq_base, 0, 887 ®map_domain_ops, d); 888 else 889 d->domain = irq_domain_create_linear(fwnode, chip->num_irqs, 890 ®map_domain_ops, d); 891 if (!d->domain) { 892 dev_err(map->dev, "Failed to create IRQ domain\n"); 893 ret = -ENOMEM; 894 goto err_alloc; 895 } 896 897 ret = request_threaded_irq(irq, NULL, regmap_irq_thread, 898 irq_flags | IRQF_ONESHOT, 899 chip->name, d); 900 if (ret != 0) { 901 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n", 902 irq, chip->name, ret); 903 goto err_domain; 904 } 905 906 *data = d; 907 908 return 0; 909 910 err_domain: 911 /* Should really dispose of the domain but... */ 912 err_alloc: 913 kfree(d->type_buf); 914 kfree(d->type_buf_def); 915 kfree(d->wake_buf); 916 kfree(d->mask_buf_def); 917 kfree(d->mask_buf); 918 kfree(d->status_buf); 919 kfree(d->status_reg_buf); 920 if (d->config_buf) { 921 for (i = 0; i < chip->num_config_bases; i++) 922 kfree(d->config_buf[i]); 923 kfree(d->config_buf); 924 } 925 kfree(d); 926 return ret; 927 } 928 EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode); 929 930 /** 931 * regmap_add_irq_chip() - Use standard regmap IRQ controller handling 932 * 933 * @map: The regmap for the device. 934 * @irq: The IRQ the device uses to signal interrupts. 935 * @irq_flags: The IRQF_ flags to use for the primary interrupt. 936 * @irq_base: Allocate at specific IRQ number if irq_base > 0. 937 * @chip: Configuration for the interrupt controller. 938 * @data: Runtime data structure for the controller, allocated on success. 939 * 940 * Returns 0 on success or an errno on failure. 941 * 942 * This is the same as regmap_add_irq_chip_fwnode, except that the firmware 943 * node of the regmap is used. 944 */ 945 int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, 946 int irq_base, const struct regmap_irq_chip *chip, 947 struct regmap_irq_chip_data **data) 948 { 949 return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq, 950 irq_flags, irq_base, chip, data); 951 } 952 EXPORT_SYMBOL_GPL(regmap_add_irq_chip); 953 954 /** 955 * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip 956 * 957 * @irq: Primary IRQ for the device 958 * @d: ®map_irq_chip_data allocated by regmap_add_irq_chip() 959 * 960 * This function also disposes of all mapped IRQs on the chip. 961 */ 962 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) 963 { 964 unsigned int virq; 965 int i, hwirq; 966 967 if (!d) 968 return; 969 970 free_irq(irq, d); 971 972 /* Dispose all virtual irq from irq domain before removing it */ 973 for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) { 974 /* Ignore hwirq if holes in the IRQ list */ 975 if (!d->chip->irqs[hwirq].mask) 976 continue; 977 978 /* 979 * Find the virtual irq of hwirq on chip and if it is 980 * there then dispose it 981 */ 982 virq = irq_find_mapping(d->domain, hwirq); 983 if (virq) 984 irq_dispose_mapping(virq); 985 } 986 987 irq_domain_remove(d->domain); 988 kfree(d->type_buf); 989 kfree(d->type_buf_def); 990 kfree(d->wake_buf); 991 kfree(d->mask_buf_def); 992 kfree(d->mask_buf); 993 kfree(d->status_reg_buf); 994 kfree(d->status_buf); 995 if (d->config_buf) { 996 for (i = 0; i < d->chip->num_config_bases; i++) 997 kfree(d->config_buf[i]); 998 kfree(d->config_buf); 999 } 1000 kfree(d); 1001 } 1002 EXPORT_SYMBOL_GPL(regmap_del_irq_chip); 1003 1004 static void devm_regmap_irq_chip_release(struct device *dev, void *res) 1005 { 1006 struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res; 1007 1008 regmap_del_irq_chip(d->irq, d); 1009 } 1010 1011 static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data) 1012 1013 { 1014 struct regmap_irq_chip_data **r = res; 1015 1016 if (!r || !*r) { 1017 WARN_ON(!r || !*r); 1018 return 0; 1019 } 1020 return *r == data; 1021 } 1022 1023 /** 1024 * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode() 1025 * 1026 * @dev: The device pointer on which irq_chip belongs to. 1027 * @fwnode: The firmware node where the IRQ domain should be added to. 1028 * @map: The regmap for the device. 1029 * @irq: The IRQ the device uses to signal interrupts 1030 * @irq_flags: The IRQF_ flags to use for the primary interrupt. 1031 * @irq_base: Allocate at specific IRQ number if irq_base > 0. 1032 * @chip: Configuration for the interrupt controller. 1033 * @data: Runtime data structure for the controller, allocated on success 1034 * 1035 * Returns 0 on success or an errno on failure. 1036 * 1037 * The ®map_irq_chip_data will be automatically released when the device is 1038 * unbound. 1039 */ 1040 int devm_regmap_add_irq_chip_fwnode(struct device *dev, 1041 struct fwnode_handle *fwnode, 1042 struct regmap *map, int irq, 1043 int irq_flags, int irq_base, 1044 const struct regmap_irq_chip *chip, 1045 struct regmap_irq_chip_data **data) 1046 { 1047 struct regmap_irq_chip_data **ptr, *d; 1048 int ret; 1049 1050 ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr), 1051 GFP_KERNEL); 1052 if (!ptr) 1053 return -ENOMEM; 1054 1055 ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base, 1056 chip, &d); 1057 if (ret < 0) { 1058 devres_free(ptr); 1059 return ret; 1060 } 1061 1062 *ptr = d; 1063 devres_add(dev, ptr); 1064 *data = d; 1065 return 0; 1066 } 1067 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode); 1068 1069 /** 1070 * devm_regmap_add_irq_chip() - Resource managed regmap_add_irq_chip() 1071 * 1072 * @dev: The device pointer on which irq_chip belongs to. 1073 * @map: The regmap for the device. 1074 * @irq: The IRQ the device uses to signal interrupts 1075 * @irq_flags: The IRQF_ flags to use for the primary interrupt. 1076 * @irq_base: Allocate at specific IRQ number if irq_base > 0. 1077 * @chip: Configuration for the interrupt controller. 1078 * @data: Runtime data structure for the controller, allocated on success 1079 * 1080 * Returns 0 on success or an errno on failure. 1081 * 1082 * The ®map_irq_chip_data will be automatically released when the device is 1083 * unbound. 1084 */ 1085 int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, 1086 int irq_flags, int irq_base, 1087 const struct regmap_irq_chip *chip, 1088 struct regmap_irq_chip_data **data) 1089 { 1090 return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map, 1091 irq, irq_flags, irq_base, chip, 1092 data); 1093 } 1094 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip); 1095 1096 /** 1097 * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip() 1098 * 1099 * @dev: Device for which the resource was allocated. 1100 * @irq: Primary IRQ for the device. 1101 * @data: ®map_irq_chip_data allocated by regmap_add_irq_chip(). 1102 * 1103 * A resource managed version of regmap_del_irq_chip(). 1104 */ 1105 void devm_regmap_del_irq_chip(struct device *dev, int irq, 1106 struct regmap_irq_chip_data *data) 1107 { 1108 int rc; 1109 1110 WARN_ON(irq != data->irq); 1111 rc = devres_release(dev, devm_regmap_irq_chip_release, 1112 devm_regmap_irq_chip_match, data); 1113 1114 if (rc != 0) 1115 WARN_ON(rc); 1116 } 1117 EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip); 1118 1119 /** 1120 * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip 1121 * 1122 * @data: regmap irq controller to operate on. 1123 * 1124 * Useful for drivers to request their own IRQs. 1125 */ 1126 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data) 1127 { 1128 WARN_ON(!data->irq_base); 1129 return data->irq_base; 1130 } 1131 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base); 1132 1133 /** 1134 * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ 1135 * 1136 * @data: regmap irq controller to operate on. 1137 * @irq: index of the interrupt requested in the chip IRQs. 1138 * 1139 * Useful for drivers to request their own IRQs. 1140 */ 1141 int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq) 1142 { 1143 /* Handle holes in the IRQ list */ 1144 if (!data->chip->irqs[irq].mask) 1145 return -EINVAL; 1146 1147 return irq_create_mapping(data->domain, irq); 1148 } 1149 EXPORT_SYMBOL_GPL(regmap_irq_get_virq); 1150 1151 /** 1152 * regmap_irq_get_domain() - Retrieve the irq_domain for the chip 1153 * 1154 * @data: regmap_irq controller to operate on. 1155 * 1156 * Useful for drivers to request their own IRQs and for integration 1157 * with subsystems. For ease of integration NULL is accepted as a 1158 * domain, allowing devices to just call this even if no domain is 1159 * allocated. 1160 */ 1161 struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data) 1162 { 1163 if (data) 1164 return data->domain; 1165 else 1166 return NULL; 1167 } 1168 EXPORT_SYMBOL_GPL(regmap_irq_get_domain); 1169