1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // regmap based irq_chip 4 // 5 // Copyright 2011 Wolfson Microelectronics plc 6 // 7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/interrupt.h> 12 #include <linux/irq.h> 13 #include <linux/irqdomain.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/regmap.h> 16 #include <linux/slab.h> 17 18 #include "internal.h" 19 20 struct regmap_irq_chip_data { 21 struct mutex lock; 22 struct irq_chip irq_chip; 23 24 struct regmap *map; 25 const struct regmap_irq_chip *chip; 26 27 int irq_base; 28 struct irq_domain *domain; 29 30 int irq; 31 int wake_count; 32 33 unsigned int mask_base; 34 unsigned int unmask_base; 35 36 void *status_reg_buf; 37 unsigned int *main_status_buf; 38 unsigned int *status_buf; 39 unsigned int *mask_buf; 40 unsigned int *mask_buf_def; 41 unsigned int *wake_buf; 42 unsigned int *type_buf; 43 unsigned int *type_buf_def; 44 unsigned int **config_buf; 45 46 unsigned int irq_reg_stride; 47 48 unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *data, 49 unsigned int base, int index); 50 51 unsigned int clear_status:1; 52 }; 53 54 static inline const 55 struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data, 56 int irq) 57 { 58 return &data->chip->irqs[irq]; 59 } 60 61 static bool regmap_irq_can_bulk_read_status(struct regmap_irq_chip_data *data) 62 { 63 struct regmap *map = data->map; 64 65 /* 66 * While possible that a user-defined ->get_irq_reg() callback might 67 * be linear enough to support bulk reads, most of the time it won't. 68 * Therefore only allow them if the default callback is being used. 69 */ 70 return data->irq_reg_stride == 1 && map->reg_stride == 1 && 71 data->get_irq_reg == regmap_irq_get_irq_reg_linear && 72 !map->use_single_read; 73 } 74 75 static void regmap_irq_lock(struct irq_data *data) 76 { 77 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 78 79 mutex_lock(&d->lock); 80 } 81 82 static void regmap_irq_sync_unlock(struct irq_data *data) 83 { 84 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 85 struct regmap *map = d->map; 86 int i, j, ret; 87 u32 reg; 88 u32 val; 89 90 if (d->chip->runtime_pm) { 91 ret = pm_runtime_get_sync(map->dev); 92 if (ret < 0) 93 dev_err(map->dev, "IRQ sync failed to resume: %d\n", 94 ret); 95 } 96 97 if (d->clear_status) { 98 for (i = 0; i < d->chip->num_regs; i++) { 99 reg = d->get_irq_reg(d, d->chip->status_base, i); 100 101 ret = regmap_read(map, reg, &val); 102 if (ret) 103 dev_err(d->map->dev, 104 "Failed to clear the interrupt status bits\n"); 105 } 106 107 d->clear_status = false; 108 } 109 110 /* 111 * If there's been a change in the mask write it back to the 112 * hardware. We rely on the use of the regmap core cache to 113 * suppress pointless writes. 114 */ 115 for (i = 0; i < d->chip->num_regs; i++) { 116 if (d->mask_base) { 117 if (d->chip->handle_mask_sync) 118 d->chip->handle_mask_sync(i, d->mask_buf_def[i], 119 d->mask_buf[i], 120 d->chip->irq_drv_data); 121 else { 122 reg = d->get_irq_reg(d, d->mask_base, i); 123 ret = regmap_update_bits(d->map, reg, 124 d->mask_buf_def[i], 125 d->mask_buf[i]); 126 if (ret) 127 dev_err(d->map->dev, "Failed to sync masks in %x\n", 128 reg); 129 } 130 } 131 132 if (d->unmask_base) { 133 reg = d->get_irq_reg(d, d->unmask_base, i); 134 ret = regmap_update_bits(d->map, reg, 135 d->mask_buf_def[i], ~d->mask_buf[i]); 136 if (ret) 137 dev_err(d->map->dev, "Failed to sync masks in %x\n", 138 reg); 139 } 140 141 reg = d->get_irq_reg(d, d->chip->wake_base, i); 142 if (d->wake_buf) { 143 if (d->chip->wake_invert) 144 ret = regmap_update_bits(d->map, reg, 145 d->mask_buf_def[i], 146 ~d->wake_buf[i]); 147 else 148 ret = regmap_update_bits(d->map, reg, 149 d->mask_buf_def[i], 150 d->wake_buf[i]); 151 if (ret != 0) 152 dev_err(d->map->dev, 153 "Failed to sync wakes in %x: %d\n", 154 reg, ret); 155 } 156 157 if (!d->chip->init_ack_masked) 158 continue; 159 /* 160 * Ack all the masked interrupts unconditionally, 161 * OR if there is masked interrupt which hasn't been Acked, 162 * it'll be ignored in irq handler, then may introduce irq storm 163 */ 164 if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) { 165 reg = d->get_irq_reg(d, d->chip->ack_base, i); 166 167 /* some chips ack by write 0 */ 168 if (d->chip->ack_invert) 169 ret = regmap_write(map, reg, ~d->mask_buf[i]); 170 else 171 ret = regmap_write(map, reg, d->mask_buf[i]); 172 if (d->chip->clear_ack) { 173 if (d->chip->ack_invert && !ret) 174 ret = regmap_write(map, reg, UINT_MAX); 175 else if (!ret) 176 ret = regmap_write(map, reg, 0); 177 } 178 if (ret != 0) 179 dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", 180 reg, ret); 181 } 182 } 183 184 /* Don't update the type bits if we're using mask bits for irq type. */ 185 if (!d->chip->type_in_mask) { 186 for (i = 0; i < d->chip->num_type_reg; i++) { 187 if (!d->type_buf_def[i]) 188 continue; 189 reg = d->get_irq_reg(d, d->chip->type_base, i); 190 ret = regmap_update_bits(d->map, reg, 191 d->type_buf_def[i], d->type_buf[i]); 192 if (ret != 0) 193 dev_err(d->map->dev, "Failed to sync type in %x\n", 194 reg); 195 } 196 } 197 198 for (i = 0; i < d->chip->num_config_bases; i++) { 199 for (j = 0; j < d->chip->num_config_regs; j++) { 200 reg = d->get_irq_reg(d, d->chip->config_base[i], j); 201 ret = regmap_write(map, reg, d->config_buf[i][j]); 202 if (ret) 203 dev_err(d->map->dev, 204 "Failed to write config %x: %d\n", 205 reg, ret); 206 } 207 } 208 209 if (d->chip->runtime_pm) 210 pm_runtime_put(map->dev); 211 212 /* If we've changed our wakeup count propagate it to the parent */ 213 if (d->wake_count < 0) 214 for (i = d->wake_count; i < 0; i++) 215 irq_set_irq_wake(d->irq, 0); 216 else if (d->wake_count > 0) 217 for (i = 0; i < d->wake_count; i++) 218 irq_set_irq_wake(d->irq, 1); 219 220 d->wake_count = 0; 221 222 mutex_unlock(&d->lock); 223 } 224 225 static void regmap_irq_enable(struct irq_data *data) 226 { 227 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 228 struct regmap *map = d->map; 229 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 230 unsigned int reg = irq_data->reg_offset / map->reg_stride; 231 unsigned int mask; 232 233 /* 234 * The type_in_mask flag means that the underlying hardware uses 235 * separate mask bits for each interrupt trigger type, but we want 236 * to have a single logical interrupt with a configurable type. 237 * 238 * If the interrupt we're enabling defines any supported types 239 * then instead of using the regular mask bits for this interrupt, 240 * use the value previously written to the type buffer at the 241 * corresponding offset in regmap_irq_set_type(). 242 */ 243 if (d->chip->type_in_mask && irq_data->type.types_supported) 244 mask = d->type_buf[reg] & irq_data->mask; 245 else 246 mask = irq_data->mask; 247 248 if (d->chip->clear_on_unmask) 249 d->clear_status = true; 250 251 d->mask_buf[reg] &= ~mask; 252 } 253 254 static void regmap_irq_disable(struct irq_data *data) 255 { 256 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 257 struct regmap *map = d->map; 258 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 259 260 d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask; 261 } 262 263 static int regmap_irq_set_type(struct irq_data *data, unsigned int type) 264 { 265 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 266 struct regmap *map = d->map; 267 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 268 int reg, ret; 269 const struct regmap_irq_type *t = &irq_data->type; 270 271 if ((t->types_supported & type) != type) 272 return 0; 273 274 reg = t->type_reg_offset / map->reg_stride; 275 276 if (t->type_reg_mask) 277 d->type_buf[reg] &= ~t->type_reg_mask; 278 else 279 d->type_buf[reg] &= ~(t->type_falling_val | 280 t->type_rising_val | 281 t->type_level_low_val | 282 t->type_level_high_val); 283 switch (type) { 284 case IRQ_TYPE_EDGE_FALLING: 285 d->type_buf[reg] |= t->type_falling_val; 286 break; 287 288 case IRQ_TYPE_EDGE_RISING: 289 d->type_buf[reg] |= t->type_rising_val; 290 break; 291 292 case IRQ_TYPE_EDGE_BOTH: 293 d->type_buf[reg] |= (t->type_falling_val | 294 t->type_rising_val); 295 break; 296 297 case IRQ_TYPE_LEVEL_HIGH: 298 d->type_buf[reg] |= t->type_level_high_val; 299 break; 300 301 case IRQ_TYPE_LEVEL_LOW: 302 d->type_buf[reg] |= t->type_level_low_val; 303 break; 304 default: 305 return -EINVAL; 306 } 307 308 if (d->chip->set_type_config) { 309 ret = d->chip->set_type_config(d->config_buf, type, irq_data, 310 reg, d->chip->irq_drv_data); 311 if (ret) 312 return ret; 313 } 314 315 return 0; 316 } 317 318 static int regmap_irq_set_wake(struct irq_data *data, unsigned int on) 319 { 320 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 321 struct regmap *map = d->map; 322 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 323 324 if (on) { 325 if (d->wake_buf) 326 d->wake_buf[irq_data->reg_offset / map->reg_stride] 327 &= ~irq_data->mask; 328 d->wake_count++; 329 } else { 330 if (d->wake_buf) 331 d->wake_buf[irq_data->reg_offset / map->reg_stride] 332 |= irq_data->mask; 333 d->wake_count--; 334 } 335 336 return 0; 337 } 338 339 static const struct irq_chip regmap_irq_chip = { 340 .irq_bus_lock = regmap_irq_lock, 341 .irq_bus_sync_unlock = regmap_irq_sync_unlock, 342 .irq_disable = regmap_irq_disable, 343 .irq_enable = regmap_irq_enable, 344 .irq_set_type = regmap_irq_set_type, 345 .irq_set_wake = regmap_irq_set_wake, 346 }; 347 348 static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, 349 unsigned int b) 350 { 351 const struct regmap_irq_chip *chip = data->chip; 352 struct regmap *map = data->map; 353 struct regmap_irq_sub_irq_map *subreg; 354 unsigned int reg; 355 int i, ret = 0; 356 357 if (!chip->sub_reg_offsets) { 358 reg = data->get_irq_reg(data, chip->status_base, b); 359 ret = regmap_read(map, reg, &data->status_buf[b]); 360 } else { 361 /* 362 * Note we can't use ->get_irq_reg() here because the offsets 363 * in 'subreg' are *not* interchangeable with indices. 364 */ 365 subreg = &chip->sub_reg_offsets[b]; 366 for (i = 0; i < subreg->num_regs; i++) { 367 unsigned int offset = subreg->offset[i]; 368 unsigned int index = offset / map->reg_stride; 369 370 if (chip->not_fixed_stride) 371 ret = regmap_read(map, 372 chip->status_base + offset, 373 &data->status_buf[b]); 374 else 375 ret = regmap_read(map, 376 chip->status_base + offset, 377 &data->status_buf[index]); 378 379 if (ret) 380 break; 381 } 382 } 383 return ret; 384 } 385 386 static irqreturn_t regmap_irq_thread(int irq, void *d) 387 { 388 struct regmap_irq_chip_data *data = d; 389 const struct regmap_irq_chip *chip = data->chip; 390 struct regmap *map = data->map; 391 int ret, i; 392 bool handled = false; 393 u32 reg; 394 395 if (chip->handle_pre_irq) 396 chip->handle_pre_irq(chip->irq_drv_data); 397 398 if (chip->runtime_pm) { 399 ret = pm_runtime_get_sync(map->dev); 400 if (ret < 0) { 401 dev_err(map->dev, "IRQ thread failed to resume: %d\n", 402 ret); 403 goto exit; 404 } 405 } 406 407 /* 408 * Read only registers with active IRQs if the chip has 'main status 409 * register'. Else read in the statuses, using a single bulk read if 410 * possible in order to reduce the I/O overheads. 411 */ 412 413 if (chip->no_status) { 414 /* no status register so default to all active */ 415 memset32(data->status_buf, GENMASK(31, 0), chip->num_regs); 416 } else if (chip->num_main_regs) { 417 unsigned int max_main_bits; 418 unsigned long size; 419 420 size = chip->num_regs * sizeof(unsigned int); 421 422 max_main_bits = (chip->num_main_status_bits) ? 423 chip->num_main_status_bits : chip->num_regs; 424 /* Clear the status buf as we don't read all status regs */ 425 memset(data->status_buf, 0, size); 426 427 /* We could support bulk read for main status registers 428 * but I don't expect to see devices with really many main 429 * status registers so let's only support single reads for the 430 * sake of simplicity. and add bulk reads only if needed 431 */ 432 for (i = 0; i < chip->num_main_regs; i++) { 433 /* 434 * For not_fixed_stride, don't use ->get_irq_reg(). 435 * It would produce an incorrect result. 436 */ 437 if (data->chip->not_fixed_stride) 438 reg = chip->main_status + 439 i * map->reg_stride * data->irq_reg_stride; 440 else 441 reg = data->get_irq_reg(data, 442 chip->main_status, i); 443 444 ret = regmap_read(map, reg, &data->main_status_buf[i]); 445 if (ret) { 446 dev_err(map->dev, 447 "Failed to read IRQ status %d\n", 448 ret); 449 goto exit; 450 } 451 } 452 453 /* Read sub registers with active IRQs */ 454 for (i = 0; i < chip->num_main_regs; i++) { 455 unsigned int b; 456 const unsigned long mreg = data->main_status_buf[i]; 457 458 for_each_set_bit(b, &mreg, map->format.val_bytes * 8) { 459 if (i * map->format.val_bytes * 8 + b > 460 max_main_bits) 461 break; 462 ret = read_sub_irq_data(data, b); 463 464 if (ret != 0) { 465 dev_err(map->dev, 466 "Failed to read IRQ status %d\n", 467 ret); 468 goto exit; 469 } 470 } 471 472 } 473 } else if (regmap_irq_can_bulk_read_status(data)) { 474 475 u8 *buf8 = data->status_reg_buf; 476 u16 *buf16 = data->status_reg_buf; 477 u32 *buf32 = data->status_reg_buf; 478 479 BUG_ON(!data->status_reg_buf); 480 481 ret = regmap_bulk_read(map, chip->status_base, 482 data->status_reg_buf, 483 chip->num_regs); 484 if (ret != 0) { 485 dev_err(map->dev, "Failed to read IRQ status: %d\n", 486 ret); 487 goto exit; 488 } 489 490 for (i = 0; i < data->chip->num_regs; i++) { 491 switch (map->format.val_bytes) { 492 case 1: 493 data->status_buf[i] = buf8[i]; 494 break; 495 case 2: 496 data->status_buf[i] = buf16[i]; 497 break; 498 case 4: 499 data->status_buf[i] = buf32[i]; 500 break; 501 default: 502 BUG(); 503 goto exit; 504 } 505 } 506 507 } else { 508 for (i = 0; i < data->chip->num_regs; i++) { 509 unsigned int reg = data->get_irq_reg(data, 510 data->chip->status_base, i); 511 ret = regmap_read(map, reg, &data->status_buf[i]); 512 513 if (ret != 0) { 514 dev_err(map->dev, 515 "Failed to read IRQ status: %d\n", 516 ret); 517 goto exit; 518 } 519 } 520 } 521 522 if (chip->status_invert) 523 for (i = 0; i < data->chip->num_regs; i++) 524 data->status_buf[i] = ~data->status_buf[i]; 525 526 /* 527 * Ignore masked IRQs and ack if we need to; we ack early so 528 * there is no race between handling and acknowledging the 529 * interrupt. We assume that typically few of the interrupts 530 * will fire simultaneously so don't worry about overhead from 531 * doing a write per register. 532 */ 533 for (i = 0; i < data->chip->num_regs; i++) { 534 data->status_buf[i] &= ~data->mask_buf[i]; 535 536 if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) { 537 reg = data->get_irq_reg(data, data->chip->ack_base, i); 538 539 if (chip->ack_invert) 540 ret = regmap_write(map, reg, 541 ~data->status_buf[i]); 542 else 543 ret = regmap_write(map, reg, 544 data->status_buf[i]); 545 if (chip->clear_ack) { 546 if (chip->ack_invert && !ret) 547 ret = regmap_write(map, reg, UINT_MAX); 548 else if (!ret) 549 ret = regmap_write(map, reg, 0); 550 } 551 if (ret != 0) 552 dev_err(map->dev, "Failed to ack 0x%x: %d\n", 553 reg, ret); 554 } 555 } 556 557 for (i = 0; i < chip->num_irqs; i++) { 558 if (data->status_buf[chip->irqs[i].reg_offset / 559 map->reg_stride] & chip->irqs[i].mask) { 560 handle_nested_irq(irq_find_mapping(data->domain, i)); 561 handled = true; 562 } 563 } 564 565 exit: 566 if (chip->runtime_pm) 567 pm_runtime_put(map->dev); 568 569 if (chip->handle_post_irq) 570 chip->handle_post_irq(chip->irq_drv_data); 571 572 if (handled) 573 return IRQ_HANDLED; 574 else 575 return IRQ_NONE; 576 } 577 578 static int regmap_irq_map(struct irq_domain *h, unsigned int virq, 579 irq_hw_number_t hw) 580 { 581 struct regmap_irq_chip_data *data = h->host_data; 582 583 irq_set_chip_data(virq, data); 584 irq_set_chip(virq, &data->irq_chip); 585 irq_set_nested_thread(virq, 1); 586 irq_set_parent(virq, data->irq); 587 irq_set_noprobe(virq); 588 589 return 0; 590 } 591 592 static const struct irq_domain_ops regmap_domain_ops = { 593 .map = regmap_irq_map, 594 .xlate = irq_domain_xlate_onetwocell, 595 }; 596 597 /** 598 * regmap_irq_get_irq_reg_linear() - Linear IRQ register mapping callback. 599 * @data: Data for the &struct regmap_irq_chip 600 * @base: Base register 601 * @index: Register index 602 * 603 * Returns the register address corresponding to the given @base and @index 604 * by the formula ``base + index * regmap_stride * irq_reg_stride``. 605 */ 606 unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data, 607 unsigned int base, int index) 608 { 609 const struct regmap_irq_chip *chip = data->chip; 610 struct regmap *map = data->map; 611 612 /* 613 * FIXME: This is for backward compatibility and should be removed 614 * when not_fixed_stride is dropped (it's only used by qcom-pm8008). 615 */ 616 if (chip->not_fixed_stride && chip->sub_reg_offsets) { 617 struct regmap_irq_sub_irq_map *subreg; 618 619 subreg = &chip->sub_reg_offsets[0]; 620 return base + subreg->offset[0]; 621 } 622 623 return base + index * map->reg_stride * data->irq_reg_stride; 624 } 625 EXPORT_SYMBOL_GPL(regmap_irq_get_irq_reg_linear); 626 627 /** 628 * regmap_irq_set_type_config_simple() - Simple IRQ type configuration callback. 629 * @buf: Buffer containing configuration register values, this is a 2D array of 630 * `num_config_bases` rows, each of `num_config_regs` elements. 631 * @type: The requested IRQ type. 632 * @irq_data: The IRQ being configured. 633 * @idx: Index of the irq's config registers within each array `buf[i]` 634 * @irq_drv_data: Driver specific IRQ data 635 * 636 * This is a &struct regmap_irq_chip->set_type_config callback suitable for 637 * chips with one config register. Register values are updated according to 638 * the &struct regmap_irq_type data associated with an IRQ. 639 */ 640 int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type, 641 const struct regmap_irq *irq_data, 642 int idx, void *irq_drv_data) 643 { 644 const struct regmap_irq_type *t = &irq_data->type; 645 646 if (t->type_reg_mask) 647 buf[0][idx] &= ~t->type_reg_mask; 648 else 649 buf[0][idx] &= ~(t->type_falling_val | 650 t->type_rising_val | 651 t->type_level_low_val | 652 t->type_level_high_val); 653 654 switch (type) { 655 case IRQ_TYPE_EDGE_FALLING: 656 buf[0][idx] |= t->type_falling_val; 657 break; 658 659 case IRQ_TYPE_EDGE_RISING: 660 buf[0][idx] |= t->type_rising_val; 661 break; 662 663 case IRQ_TYPE_EDGE_BOTH: 664 buf[0][idx] |= (t->type_falling_val | 665 t->type_rising_val); 666 break; 667 668 case IRQ_TYPE_LEVEL_HIGH: 669 buf[0][idx] |= t->type_level_high_val; 670 break; 671 672 case IRQ_TYPE_LEVEL_LOW: 673 buf[0][idx] |= t->type_level_low_val; 674 break; 675 676 default: 677 return -EINVAL; 678 } 679 680 return 0; 681 } 682 EXPORT_SYMBOL_GPL(regmap_irq_set_type_config_simple); 683 684 /** 685 * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling 686 * 687 * @fwnode: The firmware node where the IRQ domain should be added to. 688 * @map: The regmap for the device. 689 * @irq: The IRQ the device uses to signal interrupts. 690 * @irq_flags: The IRQF_ flags to use for the primary interrupt. 691 * @irq_base: Allocate at specific IRQ number if irq_base > 0. 692 * @chip: Configuration for the interrupt controller. 693 * @data: Runtime data structure for the controller, allocated on success. 694 * 695 * Returns 0 on success or an errno on failure. 696 * 697 * In order for this to be efficient the chip really should use a 698 * register cache. The chip driver is responsible for restoring the 699 * register values used by the IRQ controller over suspend and resume. 700 */ 701 int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, 702 struct regmap *map, int irq, 703 int irq_flags, int irq_base, 704 const struct regmap_irq_chip *chip, 705 struct regmap_irq_chip_data **data) 706 { 707 struct regmap_irq_chip_data *d; 708 int i; 709 int ret = -ENOMEM; 710 int num_type_reg; 711 int num_regs; 712 u32 reg; 713 714 if (chip->num_regs <= 0) 715 return -EINVAL; 716 717 if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack)) 718 return -EINVAL; 719 720 for (i = 0; i < chip->num_irqs; i++) { 721 if (chip->irqs[i].reg_offset % map->reg_stride) 722 return -EINVAL; 723 if (chip->irqs[i].reg_offset / map->reg_stride >= 724 chip->num_regs) 725 return -EINVAL; 726 } 727 728 if (chip->not_fixed_stride) { 729 dev_warn(map->dev, "not_fixed_stride is deprecated; use ->get_irq_reg() instead"); 730 731 for (i = 0; i < chip->num_regs; i++) 732 if (chip->sub_reg_offsets[i].num_regs != 1) 733 return -EINVAL; 734 } 735 736 if (chip->num_type_reg) 737 dev_warn(map->dev, "type registers are deprecated; use config registers instead"); 738 739 if (irq_base) { 740 irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0); 741 if (irq_base < 0) { 742 dev_warn(map->dev, "Failed to allocate IRQs: %d\n", 743 irq_base); 744 return irq_base; 745 } 746 } 747 748 d = kzalloc(sizeof(*d), GFP_KERNEL); 749 if (!d) 750 return -ENOMEM; 751 752 if (chip->num_main_regs) { 753 d->main_status_buf = kcalloc(chip->num_main_regs, 754 sizeof(*d->main_status_buf), 755 GFP_KERNEL); 756 757 if (!d->main_status_buf) 758 goto err_alloc; 759 } 760 761 d->status_buf = kcalloc(chip->num_regs, sizeof(*d->status_buf), 762 GFP_KERNEL); 763 if (!d->status_buf) 764 goto err_alloc; 765 766 d->mask_buf = kcalloc(chip->num_regs, sizeof(*d->mask_buf), 767 GFP_KERNEL); 768 if (!d->mask_buf) 769 goto err_alloc; 770 771 d->mask_buf_def = kcalloc(chip->num_regs, sizeof(*d->mask_buf_def), 772 GFP_KERNEL); 773 if (!d->mask_buf_def) 774 goto err_alloc; 775 776 if (chip->wake_base) { 777 d->wake_buf = kcalloc(chip->num_regs, sizeof(*d->wake_buf), 778 GFP_KERNEL); 779 if (!d->wake_buf) 780 goto err_alloc; 781 } 782 783 /* 784 * Use num_config_regs if defined, otherwise fall back to num_type_reg 785 * to maintain backward compatibility. 786 */ 787 num_type_reg = chip->num_config_regs ? chip->num_config_regs 788 : chip->num_type_reg; 789 num_regs = chip->type_in_mask ? chip->num_regs : num_type_reg; 790 if (num_regs) { 791 d->type_buf_def = kcalloc(num_regs, 792 sizeof(*d->type_buf_def), GFP_KERNEL); 793 if (!d->type_buf_def) 794 goto err_alloc; 795 796 d->type_buf = kcalloc(num_regs, sizeof(*d->type_buf), 797 GFP_KERNEL); 798 if (!d->type_buf) 799 goto err_alloc; 800 } 801 802 if (chip->num_config_bases && chip->num_config_regs) { 803 /* 804 * Create config_buf[num_config_bases][num_config_regs] 805 */ 806 d->config_buf = kcalloc(chip->num_config_bases, 807 sizeof(*d->config_buf), GFP_KERNEL); 808 if (!d->config_buf) 809 goto err_alloc; 810 811 for (i = 0; i < chip->num_config_regs; i++) { 812 d->config_buf[i] = kcalloc(chip->num_config_regs, 813 sizeof(**d->config_buf), 814 GFP_KERNEL); 815 if (!d->config_buf[i]) 816 goto err_alloc; 817 } 818 } 819 820 d->irq_chip = regmap_irq_chip; 821 d->irq_chip.name = chip->name; 822 d->irq = irq; 823 d->map = map; 824 d->chip = chip; 825 d->irq_base = irq_base; 826 827 if (chip->mask_base && chip->unmask_base && 828 !chip->mask_unmask_non_inverted) { 829 /* 830 * Chips that specify both mask_base and unmask_base used to 831 * get inverted mask behavior by default, with no way to ask 832 * for the normal, non-inverted behavior. This "inverted by 833 * default" behavior is deprecated, but we have to support it 834 * until existing drivers have been fixed. 835 * 836 * Existing drivers should be updated by swapping mask_base 837 * and unmask_base and setting mask_unmask_non_inverted=true. 838 * New drivers should always set the flag. 839 */ 840 dev_warn(map->dev, "mask_base and unmask_base are inverted, please fix it"); 841 842 d->mask_base = chip->unmask_base; 843 d->unmask_base = chip->mask_base; 844 } else { 845 d->mask_base = chip->mask_base; 846 d->unmask_base = chip->unmask_base; 847 } 848 849 if (chip->irq_reg_stride) 850 d->irq_reg_stride = chip->irq_reg_stride; 851 else 852 d->irq_reg_stride = 1; 853 854 if (chip->get_irq_reg) 855 d->get_irq_reg = chip->get_irq_reg; 856 else 857 d->get_irq_reg = regmap_irq_get_irq_reg_linear; 858 859 if (regmap_irq_can_bulk_read_status(d)) { 860 d->status_reg_buf = kmalloc_array(chip->num_regs, 861 map->format.val_bytes, 862 GFP_KERNEL); 863 if (!d->status_reg_buf) 864 goto err_alloc; 865 } 866 867 mutex_init(&d->lock); 868 869 for (i = 0; i < chip->num_irqs; i++) 870 d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride] 871 |= chip->irqs[i].mask; 872 873 /* Mask all the interrupts by default */ 874 for (i = 0; i < chip->num_regs; i++) { 875 d->mask_buf[i] = d->mask_buf_def[i]; 876 877 if (d->mask_base) { 878 if (chip->handle_mask_sync) { 879 ret = chip->handle_mask_sync(i, 880 d->mask_buf_def[i], 881 d->mask_buf[i], 882 chip->irq_drv_data); 883 if (ret) 884 goto err_alloc; 885 } else { 886 reg = d->get_irq_reg(d, d->mask_base, i); 887 ret = regmap_update_bits(d->map, reg, 888 d->mask_buf_def[i], 889 d->mask_buf[i]); 890 if (ret) { 891 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", 892 reg, ret); 893 goto err_alloc; 894 } 895 } 896 } 897 898 if (d->unmask_base) { 899 reg = d->get_irq_reg(d, d->unmask_base, i); 900 ret = regmap_update_bits(d->map, reg, 901 d->mask_buf_def[i], ~d->mask_buf[i]); 902 if (ret) { 903 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", 904 reg, ret); 905 goto err_alloc; 906 } 907 } 908 909 if (!chip->init_ack_masked) 910 continue; 911 912 /* Ack masked but set interrupts */ 913 if (d->chip->no_status) { 914 /* no status register so default to all active */ 915 d->status_buf[i] = GENMASK(31, 0); 916 } else { 917 reg = d->get_irq_reg(d, d->chip->status_base, i); 918 ret = regmap_read(map, reg, &d->status_buf[i]); 919 if (ret != 0) { 920 dev_err(map->dev, "Failed to read IRQ status: %d\n", 921 ret); 922 goto err_alloc; 923 } 924 } 925 926 if (chip->status_invert) 927 d->status_buf[i] = ~d->status_buf[i]; 928 929 if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) { 930 reg = d->get_irq_reg(d, d->chip->ack_base, i); 931 if (chip->ack_invert) 932 ret = regmap_write(map, reg, 933 ~(d->status_buf[i] & d->mask_buf[i])); 934 else 935 ret = regmap_write(map, reg, 936 d->status_buf[i] & d->mask_buf[i]); 937 if (chip->clear_ack) { 938 if (chip->ack_invert && !ret) 939 ret = regmap_write(map, reg, UINT_MAX); 940 else if (!ret) 941 ret = regmap_write(map, reg, 0); 942 } 943 if (ret != 0) { 944 dev_err(map->dev, "Failed to ack 0x%x: %d\n", 945 reg, ret); 946 goto err_alloc; 947 } 948 } 949 } 950 951 /* Wake is disabled by default */ 952 if (d->wake_buf) { 953 for (i = 0; i < chip->num_regs; i++) { 954 d->wake_buf[i] = d->mask_buf_def[i]; 955 reg = d->get_irq_reg(d, d->chip->wake_base, i); 956 957 if (chip->wake_invert) 958 ret = regmap_update_bits(d->map, reg, 959 d->mask_buf_def[i], 960 0); 961 else 962 ret = regmap_update_bits(d->map, reg, 963 d->mask_buf_def[i], 964 d->wake_buf[i]); 965 if (ret != 0) { 966 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", 967 reg, ret); 968 goto err_alloc; 969 } 970 } 971 } 972 973 if (chip->num_type_reg && !chip->type_in_mask) { 974 for (i = 0; i < chip->num_type_reg; ++i) { 975 reg = d->get_irq_reg(d, d->chip->type_base, i); 976 977 ret = regmap_read(map, reg, &d->type_buf_def[i]); 978 979 if (ret) { 980 dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n", 981 reg, ret); 982 goto err_alloc; 983 } 984 } 985 } 986 987 if (irq_base) 988 d->domain = irq_domain_create_legacy(fwnode, chip->num_irqs, 989 irq_base, 0, 990 ®map_domain_ops, d); 991 else 992 d->domain = irq_domain_create_linear(fwnode, chip->num_irqs, 993 ®map_domain_ops, d); 994 if (!d->domain) { 995 dev_err(map->dev, "Failed to create IRQ domain\n"); 996 ret = -ENOMEM; 997 goto err_alloc; 998 } 999 1000 ret = request_threaded_irq(irq, NULL, regmap_irq_thread, 1001 irq_flags | IRQF_ONESHOT, 1002 chip->name, d); 1003 if (ret != 0) { 1004 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n", 1005 irq, chip->name, ret); 1006 goto err_domain; 1007 } 1008 1009 *data = d; 1010 1011 return 0; 1012 1013 err_domain: 1014 /* Should really dispose of the domain but... */ 1015 err_alloc: 1016 kfree(d->type_buf); 1017 kfree(d->type_buf_def); 1018 kfree(d->wake_buf); 1019 kfree(d->mask_buf_def); 1020 kfree(d->mask_buf); 1021 kfree(d->status_buf); 1022 kfree(d->status_reg_buf); 1023 if (d->config_buf) { 1024 for (i = 0; i < chip->num_config_bases; i++) 1025 kfree(d->config_buf[i]); 1026 kfree(d->config_buf); 1027 } 1028 kfree(d); 1029 return ret; 1030 } 1031 EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode); 1032 1033 /** 1034 * regmap_add_irq_chip() - Use standard regmap IRQ controller handling 1035 * 1036 * @map: The regmap for the device. 1037 * @irq: The IRQ the device uses to signal interrupts. 1038 * @irq_flags: The IRQF_ flags to use for the primary interrupt. 1039 * @irq_base: Allocate at specific IRQ number if irq_base > 0. 1040 * @chip: Configuration for the interrupt controller. 1041 * @data: Runtime data structure for the controller, allocated on success. 1042 * 1043 * Returns 0 on success or an errno on failure. 1044 * 1045 * This is the same as regmap_add_irq_chip_fwnode, except that the firmware 1046 * node of the regmap is used. 1047 */ 1048 int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, 1049 int irq_base, const struct regmap_irq_chip *chip, 1050 struct regmap_irq_chip_data **data) 1051 { 1052 return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq, 1053 irq_flags, irq_base, chip, data); 1054 } 1055 EXPORT_SYMBOL_GPL(regmap_add_irq_chip); 1056 1057 /** 1058 * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip 1059 * 1060 * @irq: Primary IRQ for the device 1061 * @d: ®map_irq_chip_data allocated by regmap_add_irq_chip() 1062 * 1063 * This function also disposes of all mapped IRQs on the chip. 1064 */ 1065 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) 1066 { 1067 unsigned int virq; 1068 int i, hwirq; 1069 1070 if (!d) 1071 return; 1072 1073 free_irq(irq, d); 1074 1075 /* Dispose all virtual irq from irq domain before removing it */ 1076 for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) { 1077 /* Ignore hwirq if holes in the IRQ list */ 1078 if (!d->chip->irqs[hwirq].mask) 1079 continue; 1080 1081 /* 1082 * Find the virtual irq of hwirq on chip and if it is 1083 * there then dispose it 1084 */ 1085 virq = irq_find_mapping(d->domain, hwirq); 1086 if (virq) 1087 irq_dispose_mapping(virq); 1088 } 1089 1090 irq_domain_remove(d->domain); 1091 kfree(d->type_buf); 1092 kfree(d->type_buf_def); 1093 kfree(d->wake_buf); 1094 kfree(d->mask_buf_def); 1095 kfree(d->mask_buf); 1096 kfree(d->status_reg_buf); 1097 kfree(d->status_buf); 1098 if (d->config_buf) { 1099 for (i = 0; i < d->chip->num_config_bases; i++) 1100 kfree(d->config_buf[i]); 1101 kfree(d->config_buf); 1102 } 1103 kfree(d); 1104 } 1105 EXPORT_SYMBOL_GPL(regmap_del_irq_chip); 1106 1107 static void devm_regmap_irq_chip_release(struct device *dev, void *res) 1108 { 1109 struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res; 1110 1111 regmap_del_irq_chip(d->irq, d); 1112 } 1113 1114 static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data) 1115 1116 { 1117 struct regmap_irq_chip_data **r = res; 1118 1119 if (!r || !*r) { 1120 WARN_ON(!r || !*r); 1121 return 0; 1122 } 1123 return *r == data; 1124 } 1125 1126 /** 1127 * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode() 1128 * 1129 * @dev: The device pointer on which irq_chip belongs to. 1130 * @fwnode: The firmware node where the IRQ domain should be added to. 1131 * @map: The regmap for the device. 1132 * @irq: The IRQ the device uses to signal interrupts 1133 * @irq_flags: The IRQF_ flags to use for the primary interrupt. 1134 * @irq_base: Allocate at specific IRQ number if irq_base > 0. 1135 * @chip: Configuration for the interrupt controller. 1136 * @data: Runtime data structure for the controller, allocated on success 1137 * 1138 * Returns 0 on success or an errno on failure. 1139 * 1140 * The ®map_irq_chip_data will be automatically released when the device is 1141 * unbound. 1142 */ 1143 int devm_regmap_add_irq_chip_fwnode(struct device *dev, 1144 struct fwnode_handle *fwnode, 1145 struct regmap *map, int irq, 1146 int irq_flags, int irq_base, 1147 const struct regmap_irq_chip *chip, 1148 struct regmap_irq_chip_data **data) 1149 { 1150 struct regmap_irq_chip_data **ptr, *d; 1151 int ret; 1152 1153 ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr), 1154 GFP_KERNEL); 1155 if (!ptr) 1156 return -ENOMEM; 1157 1158 ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base, 1159 chip, &d); 1160 if (ret < 0) { 1161 devres_free(ptr); 1162 return ret; 1163 } 1164 1165 *ptr = d; 1166 devres_add(dev, ptr); 1167 *data = d; 1168 return 0; 1169 } 1170 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode); 1171 1172 /** 1173 * devm_regmap_add_irq_chip() - Resource managed regmap_add_irq_chip() 1174 * 1175 * @dev: The device pointer on which irq_chip belongs to. 1176 * @map: The regmap for the device. 1177 * @irq: The IRQ the device uses to signal interrupts 1178 * @irq_flags: The IRQF_ flags to use for the primary interrupt. 1179 * @irq_base: Allocate at specific IRQ number if irq_base > 0. 1180 * @chip: Configuration for the interrupt controller. 1181 * @data: Runtime data structure for the controller, allocated on success 1182 * 1183 * Returns 0 on success or an errno on failure. 1184 * 1185 * The ®map_irq_chip_data will be automatically released when the device is 1186 * unbound. 1187 */ 1188 int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, 1189 int irq_flags, int irq_base, 1190 const struct regmap_irq_chip *chip, 1191 struct regmap_irq_chip_data **data) 1192 { 1193 return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map, 1194 irq, irq_flags, irq_base, chip, 1195 data); 1196 } 1197 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip); 1198 1199 /** 1200 * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip() 1201 * 1202 * @dev: Device for which the resource was allocated. 1203 * @irq: Primary IRQ for the device. 1204 * @data: ®map_irq_chip_data allocated by regmap_add_irq_chip(). 1205 * 1206 * A resource managed version of regmap_del_irq_chip(). 1207 */ 1208 void devm_regmap_del_irq_chip(struct device *dev, int irq, 1209 struct regmap_irq_chip_data *data) 1210 { 1211 int rc; 1212 1213 WARN_ON(irq != data->irq); 1214 rc = devres_release(dev, devm_regmap_irq_chip_release, 1215 devm_regmap_irq_chip_match, data); 1216 1217 if (rc != 0) 1218 WARN_ON(rc); 1219 } 1220 EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip); 1221 1222 /** 1223 * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip 1224 * 1225 * @data: regmap irq controller to operate on. 1226 * 1227 * Useful for drivers to request their own IRQs. 1228 */ 1229 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data) 1230 { 1231 WARN_ON(!data->irq_base); 1232 return data->irq_base; 1233 } 1234 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base); 1235 1236 /** 1237 * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ 1238 * 1239 * @data: regmap irq controller to operate on. 1240 * @irq: index of the interrupt requested in the chip IRQs. 1241 * 1242 * Useful for drivers to request their own IRQs. 1243 */ 1244 int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq) 1245 { 1246 /* Handle holes in the IRQ list */ 1247 if (!data->chip->irqs[irq].mask) 1248 return -EINVAL; 1249 1250 return irq_create_mapping(data->domain, irq); 1251 } 1252 EXPORT_SYMBOL_GPL(regmap_irq_get_virq); 1253 1254 /** 1255 * regmap_irq_get_domain() - Retrieve the irq_domain for the chip 1256 * 1257 * @data: regmap_irq controller to operate on. 1258 * 1259 * Useful for drivers to request their own IRQs and for integration 1260 * with subsystems. For ease of integration NULL is accepted as a 1261 * domain, allowing devices to just call this even if no domain is 1262 * allocated. 1263 */ 1264 struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data) 1265 { 1266 if (data) 1267 return data->domain; 1268 else 1269 return NULL; 1270 } 1271 EXPORT_SYMBOL_GPL(regmap_irq_get_domain); 1272