1 /* 2 * regmap based irq_chip 3 * 4 * Copyright 2011 Wolfson Microelectronics plc 5 * 6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/export.h> 14 #include <linux/device.h> 15 #include <linux/regmap.h> 16 #include <linux/irq.h> 17 #include <linux/interrupt.h> 18 #include <linux/irqdomain.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/slab.h> 21 22 #include "internal.h" 23 24 struct regmap_irq_chip_data { 25 struct mutex lock; 26 struct irq_chip irq_chip; 27 28 struct regmap *map; 29 const struct regmap_irq_chip *chip; 30 31 int irq_base; 32 struct irq_domain *domain; 33 34 int irq; 35 int wake_count; 36 37 void *status_reg_buf; 38 unsigned int *status_buf; 39 unsigned int *mask_buf; 40 unsigned int *mask_buf_def; 41 unsigned int *wake_buf; 42 43 unsigned int irq_reg_stride; 44 }; 45 46 static inline const 47 struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data, 48 int irq) 49 { 50 return &data->chip->irqs[irq]; 51 } 52 53 static void regmap_irq_lock(struct irq_data *data) 54 { 55 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 56 57 mutex_lock(&d->lock); 58 } 59 60 static void regmap_irq_sync_unlock(struct irq_data *data) 61 { 62 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 63 struct regmap *map = d->map; 64 int i, ret; 65 u32 reg; 66 67 if (d->chip->runtime_pm) { 68 ret = pm_runtime_get_sync(map->dev); 69 if (ret < 0) 70 dev_err(map->dev, "IRQ sync failed to resume: %d\n", 71 ret); 72 } 73 74 /* 75 * If there's been a change in the mask write it back to the 76 * hardware. We rely on the use of the regmap core cache to 77 * suppress pointless writes. 78 */ 79 for (i = 0; i < d->chip->num_regs; i++) { 80 reg = d->chip->mask_base + 81 (i * map->reg_stride * d->irq_reg_stride); 82 if (d->chip->mask_invert) 83 ret = regmap_update_bits(d->map, reg, 84 d->mask_buf_def[i], ~d->mask_buf[i]); 85 else 86 ret = regmap_update_bits(d->map, reg, 87 d->mask_buf_def[i], d->mask_buf[i]); 88 if (ret != 0) 89 dev_err(d->map->dev, "Failed to sync masks in %x\n", 90 reg); 91 92 reg = d->chip->wake_base + 93 (i * map->reg_stride * d->irq_reg_stride); 94 if (d->wake_buf) { 95 if (d->chip->wake_invert) 96 ret = regmap_update_bits(d->map, reg, 97 d->mask_buf_def[i], 98 ~d->wake_buf[i]); 99 else 100 ret = regmap_update_bits(d->map, reg, 101 d->mask_buf_def[i], 102 d->wake_buf[i]); 103 if (ret != 0) 104 dev_err(d->map->dev, 105 "Failed to sync wakes in %x: %d\n", 106 reg, ret); 107 } 108 } 109 110 if (d->chip->runtime_pm) 111 pm_runtime_put(map->dev); 112 113 /* If we've changed our wakeup count propagate it to the parent */ 114 if (d->wake_count < 0) 115 for (i = d->wake_count; i < 0; i++) 116 irq_set_irq_wake(d->irq, 0); 117 else if (d->wake_count > 0) 118 for (i = 0; i < d->wake_count; i++) 119 irq_set_irq_wake(d->irq, 1); 120 121 d->wake_count = 0; 122 123 mutex_unlock(&d->lock); 124 } 125 126 static void regmap_irq_enable(struct irq_data *data) 127 { 128 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 129 struct regmap *map = d->map; 130 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 131 132 d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask; 133 } 134 135 static void regmap_irq_disable(struct irq_data *data) 136 { 137 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 138 struct regmap *map = d->map; 139 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 140 141 d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask; 142 } 143 144 static int regmap_irq_set_wake(struct irq_data *data, unsigned int on) 145 { 146 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 147 struct regmap *map = d->map; 148 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 149 150 if (on) { 151 if (d->wake_buf) 152 d->wake_buf[irq_data->reg_offset / map->reg_stride] 153 &= ~irq_data->mask; 154 d->wake_count++; 155 } else { 156 if (d->wake_buf) 157 d->wake_buf[irq_data->reg_offset / map->reg_stride] 158 |= irq_data->mask; 159 d->wake_count--; 160 } 161 162 return 0; 163 } 164 165 static const struct irq_chip regmap_irq_chip = { 166 .irq_bus_lock = regmap_irq_lock, 167 .irq_bus_sync_unlock = regmap_irq_sync_unlock, 168 .irq_disable = regmap_irq_disable, 169 .irq_enable = regmap_irq_enable, 170 .irq_set_wake = regmap_irq_set_wake, 171 }; 172 173 static irqreturn_t regmap_irq_thread(int irq, void *d) 174 { 175 struct regmap_irq_chip_data *data = d; 176 const struct regmap_irq_chip *chip = data->chip; 177 struct regmap *map = data->map; 178 int ret, i; 179 bool handled = false; 180 u32 reg; 181 182 if (chip->runtime_pm) { 183 ret = pm_runtime_get_sync(map->dev); 184 if (ret < 0) { 185 dev_err(map->dev, "IRQ thread failed to resume: %d\n", 186 ret); 187 return IRQ_NONE; 188 } 189 } 190 191 /* 192 * Read in the statuses, using a single bulk read if possible 193 * in order to reduce the I/O overheads. 194 */ 195 if (!map->use_single_rw && map->reg_stride == 1 && 196 data->irq_reg_stride == 1) { 197 u8 *buf8 = data->status_reg_buf; 198 u16 *buf16 = data->status_reg_buf; 199 u32 *buf32 = data->status_reg_buf; 200 201 BUG_ON(!data->status_reg_buf); 202 203 ret = regmap_bulk_read(map, chip->status_base, 204 data->status_reg_buf, 205 chip->num_regs); 206 if (ret != 0) { 207 dev_err(map->dev, "Failed to read IRQ status: %d\n", 208 ret); 209 return IRQ_NONE; 210 } 211 212 for (i = 0; i < data->chip->num_regs; i++) { 213 switch (map->format.val_bytes) { 214 case 1: 215 data->status_buf[i] = buf8[i]; 216 break; 217 case 2: 218 data->status_buf[i] = buf16[i]; 219 break; 220 case 4: 221 data->status_buf[i] = buf32[i]; 222 break; 223 default: 224 BUG(); 225 return IRQ_NONE; 226 } 227 } 228 229 } else { 230 for (i = 0; i < data->chip->num_regs; i++) { 231 ret = regmap_read(map, chip->status_base + 232 (i * map->reg_stride 233 * data->irq_reg_stride), 234 &data->status_buf[i]); 235 236 if (ret != 0) { 237 dev_err(map->dev, 238 "Failed to read IRQ status: %d\n", 239 ret); 240 if (chip->runtime_pm) 241 pm_runtime_put(map->dev); 242 return IRQ_NONE; 243 } 244 } 245 } 246 247 /* 248 * Ignore masked IRQs and ack if we need to; we ack early so 249 * there is no race between handling and acknowleding the 250 * interrupt. We assume that typically few of the interrupts 251 * will fire simultaneously so don't worry about overhead from 252 * doing a write per register. 253 */ 254 for (i = 0; i < data->chip->num_regs; i++) { 255 data->status_buf[i] &= ~data->mask_buf[i]; 256 257 if (data->status_buf[i] && chip->ack_base) { 258 reg = chip->ack_base + 259 (i * map->reg_stride * data->irq_reg_stride); 260 ret = regmap_write(map, reg, data->status_buf[i]); 261 if (ret != 0) 262 dev_err(map->dev, "Failed to ack 0x%x: %d\n", 263 reg, ret); 264 } 265 } 266 267 for (i = 0; i < chip->num_irqs; i++) { 268 if (data->status_buf[chip->irqs[i].reg_offset / 269 map->reg_stride] & chip->irqs[i].mask) { 270 handle_nested_irq(irq_find_mapping(data->domain, i)); 271 handled = true; 272 } 273 } 274 275 if (chip->runtime_pm) 276 pm_runtime_put(map->dev); 277 278 if (handled) 279 return IRQ_HANDLED; 280 else 281 return IRQ_NONE; 282 } 283 284 static int regmap_irq_map(struct irq_domain *h, unsigned int virq, 285 irq_hw_number_t hw) 286 { 287 struct regmap_irq_chip_data *data = h->host_data; 288 289 irq_set_chip_data(virq, data); 290 irq_set_chip(virq, &data->irq_chip); 291 irq_set_nested_thread(virq, 1); 292 293 /* ARM needs us to explicitly flag the IRQ as valid 294 * and will set them noprobe when we do so. */ 295 #ifdef CONFIG_ARM 296 set_irq_flags(virq, IRQF_VALID); 297 #else 298 irq_set_noprobe(virq); 299 #endif 300 301 return 0; 302 } 303 304 static struct irq_domain_ops regmap_domain_ops = { 305 .map = regmap_irq_map, 306 .xlate = irq_domain_xlate_twocell, 307 }; 308 309 /** 310 * regmap_add_irq_chip(): Use standard regmap IRQ controller handling 311 * 312 * map: The regmap for the device. 313 * irq: The IRQ the device uses to signal interrupts 314 * irq_flags: The IRQF_ flags to use for the primary interrupt. 315 * chip: Configuration for the interrupt controller. 316 * data: Runtime data structure for the controller, allocated on success 317 * 318 * Returns 0 on success or an errno on failure. 319 * 320 * In order for this to be efficient the chip really should use a 321 * register cache. The chip driver is responsible for restoring the 322 * register values used by the IRQ controller over suspend and resume. 323 */ 324 int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, 325 int irq_base, const struct regmap_irq_chip *chip, 326 struct regmap_irq_chip_data **data) 327 { 328 struct regmap_irq_chip_data *d; 329 int i; 330 int ret = -ENOMEM; 331 u32 reg; 332 333 for (i = 0; i < chip->num_irqs; i++) { 334 if (chip->irqs[i].reg_offset % map->reg_stride) 335 return -EINVAL; 336 if (chip->irqs[i].reg_offset / map->reg_stride >= 337 chip->num_regs) 338 return -EINVAL; 339 } 340 341 if (irq_base) { 342 irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0); 343 if (irq_base < 0) { 344 dev_warn(map->dev, "Failed to allocate IRQs: %d\n", 345 irq_base); 346 return irq_base; 347 } 348 } 349 350 d = kzalloc(sizeof(*d), GFP_KERNEL); 351 if (!d) 352 return -ENOMEM; 353 354 *data = d; 355 356 d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, 357 GFP_KERNEL); 358 if (!d->status_buf) 359 goto err_alloc; 360 361 d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, 362 GFP_KERNEL); 363 if (!d->mask_buf) 364 goto err_alloc; 365 366 d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs, 367 GFP_KERNEL); 368 if (!d->mask_buf_def) 369 goto err_alloc; 370 371 if (chip->wake_base) { 372 d->wake_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, 373 GFP_KERNEL); 374 if (!d->wake_buf) 375 goto err_alloc; 376 } 377 378 d->irq_chip = regmap_irq_chip; 379 d->irq_chip.name = chip->name; 380 d->irq = irq; 381 d->map = map; 382 d->chip = chip; 383 d->irq_base = irq_base; 384 385 if (chip->irq_reg_stride) 386 d->irq_reg_stride = chip->irq_reg_stride; 387 else 388 d->irq_reg_stride = 1; 389 390 if (!map->use_single_rw && map->reg_stride == 1 && 391 d->irq_reg_stride == 1) { 392 d->status_reg_buf = kmalloc(map->format.val_bytes * 393 chip->num_regs, GFP_KERNEL); 394 if (!d->status_reg_buf) 395 goto err_alloc; 396 } 397 398 mutex_init(&d->lock); 399 400 for (i = 0; i < chip->num_irqs; i++) 401 d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride] 402 |= chip->irqs[i].mask; 403 404 /* Mask all the interrupts by default */ 405 for (i = 0; i < chip->num_regs; i++) { 406 d->mask_buf[i] = d->mask_buf_def[i]; 407 reg = chip->mask_base + 408 (i * map->reg_stride * d->irq_reg_stride); 409 if (chip->mask_invert) 410 ret = regmap_update_bits(map, reg, 411 d->mask_buf[i], ~d->mask_buf[i]); 412 else 413 ret = regmap_update_bits(map, reg, 414 d->mask_buf[i], d->mask_buf[i]); 415 if (ret != 0) { 416 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", 417 reg, ret); 418 goto err_alloc; 419 } 420 } 421 422 /* Wake is disabled by default */ 423 if (d->wake_buf) { 424 for (i = 0; i < chip->num_regs; i++) { 425 d->wake_buf[i] = d->mask_buf_def[i]; 426 reg = chip->wake_base + 427 (i * map->reg_stride * d->irq_reg_stride); 428 429 if (chip->wake_invert) 430 ret = regmap_update_bits(map, reg, 431 d->mask_buf_def[i], 432 0); 433 else 434 ret = regmap_update_bits(map, reg, 435 d->mask_buf_def[i], 436 d->wake_buf[i]); 437 if (ret != 0) { 438 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", 439 reg, ret); 440 goto err_alloc; 441 } 442 } 443 } 444 445 if (irq_base) 446 d->domain = irq_domain_add_legacy(map->dev->of_node, 447 chip->num_irqs, irq_base, 0, 448 ®map_domain_ops, d); 449 else 450 d->domain = irq_domain_add_linear(map->dev->of_node, 451 chip->num_irqs, 452 ®map_domain_ops, d); 453 if (!d->domain) { 454 dev_err(map->dev, "Failed to create IRQ domain\n"); 455 ret = -ENOMEM; 456 goto err_alloc; 457 } 458 459 ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags, 460 chip->name, d); 461 if (ret != 0) { 462 dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret); 463 goto err_domain; 464 } 465 466 return 0; 467 468 err_domain: 469 /* Should really dispose of the domain but... */ 470 err_alloc: 471 kfree(d->wake_buf); 472 kfree(d->mask_buf_def); 473 kfree(d->mask_buf); 474 kfree(d->status_buf); 475 kfree(d->status_reg_buf); 476 kfree(d); 477 return ret; 478 } 479 EXPORT_SYMBOL_GPL(regmap_add_irq_chip); 480 481 /** 482 * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip 483 * 484 * @irq: Primary IRQ for the device 485 * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip() 486 */ 487 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) 488 { 489 if (!d) 490 return; 491 492 free_irq(irq, d); 493 /* We should unmap the domain but... */ 494 kfree(d->wake_buf); 495 kfree(d->mask_buf_def); 496 kfree(d->mask_buf); 497 kfree(d->status_reg_buf); 498 kfree(d->status_buf); 499 kfree(d); 500 } 501 EXPORT_SYMBOL_GPL(regmap_del_irq_chip); 502 503 /** 504 * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip 505 * 506 * Useful for drivers to request their own IRQs. 507 * 508 * @data: regmap_irq controller to operate on. 509 */ 510 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data) 511 { 512 WARN_ON(!data->irq_base); 513 return data->irq_base; 514 } 515 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base); 516 517 /** 518 * regmap_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ 519 * 520 * Useful for drivers to request their own IRQs. 521 * 522 * @data: regmap_irq controller to operate on. 523 * @irq: index of the interrupt requested in the chip IRQs 524 */ 525 int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq) 526 { 527 /* Handle holes in the IRQ list */ 528 if (!data->chip->irqs[irq].mask) 529 return -EINVAL; 530 531 return irq_create_mapping(data->domain, irq); 532 } 533 EXPORT_SYMBOL_GPL(regmap_irq_get_virq); 534 535 /** 536 * regmap_irq_get_domain(): Retrieve the irq_domain for the chip 537 * 538 * Useful for drivers to request their own IRQs and for integration 539 * with subsystems. For ease of integration NULL is accepted as a 540 * domain, allowing devices to just call this even if no domain is 541 * allocated. 542 * 543 * @data: regmap_irq controller to operate on. 544 */ 545 struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data) 546 { 547 if (data) 548 return data->domain; 549 else 550 return NULL; 551 } 552 EXPORT_SYMBOL_GPL(regmap_irq_get_domain); 553