1 #define pr_fmt(fmt) "irq: " fmt 2 3 #include <linux/debugfs.h> 4 #include <linux/hardirq.h> 5 #include <linux/interrupt.h> 6 #include <linux/irq.h> 7 #include <linux/irqdesc.h> 8 #include <linux/irqdomain.h> 9 #include <linux/module.h> 10 #include <linux/mutex.h> 11 #include <linux/of.h> 12 #include <linux/of_address.h> 13 #include <linux/of_irq.h> 14 #include <linux/topology.h> 15 #include <linux/seq_file.h> 16 #include <linux/slab.h> 17 #include <linux/smp.h> 18 #include <linux/fs.h> 19 20 static LIST_HEAD(irq_domain_list); 21 static DEFINE_MUTEX(irq_domain_mutex); 22 23 static DEFINE_MUTEX(revmap_trees_mutex); 24 static struct irq_domain *irq_default_domain; 25 26 static void irq_domain_check_hierarchy(struct irq_domain *domain); 27 28 struct irqchip_fwid { 29 struct fwnode_handle fwnode; 30 char *name; 31 void *data; 32 }; 33 34 /** 35 * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for 36 * identifying an irq domain 37 * @data: optional user-provided data 38 * 39 * Allocate a struct device_node, and return a poiner to the embedded 40 * fwnode_handle (or NULL on failure). 41 */ 42 struct fwnode_handle *irq_domain_alloc_fwnode(void *data) 43 { 44 struct irqchip_fwid *fwid; 45 char *name; 46 47 fwid = kzalloc(sizeof(*fwid), GFP_KERNEL); 48 name = kasprintf(GFP_KERNEL, "irqchip@%p", data); 49 50 if (!fwid || !name) { 51 kfree(fwid); 52 kfree(name); 53 return NULL; 54 } 55 56 fwid->name = name; 57 fwid->data = data; 58 fwid->fwnode.type = FWNODE_IRQCHIP; 59 return &fwid->fwnode; 60 } 61 EXPORT_SYMBOL_GPL(irq_domain_alloc_fwnode); 62 63 /** 64 * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle 65 * 66 * Free a fwnode_handle allocated with irq_domain_alloc_fwnode. 67 */ 68 void irq_domain_free_fwnode(struct fwnode_handle *fwnode) 69 { 70 struct irqchip_fwid *fwid; 71 72 if (WARN_ON(!is_fwnode_irqchip(fwnode))) 73 return; 74 75 fwid = container_of(fwnode, struct irqchip_fwid, fwnode); 76 kfree(fwid->name); 77 kfree(fwid); 78 } 79 EXPORT_SYMBOL_GPL(irq_domain_free_fwnode); 80 81 /** 82 * __irq_domain_add() - Allocate a new irq_domain data structure 83 * @fwnode: firmware node for the interrupt controller 84 * @size: Size of linear map; 0 for radix mapping only 85 * @hwirq_max: Maximum number of interrupts supported by controller 86 * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no 87 * direct mapping 88 * @ops: domain callbacks 89 * @host_data: Controller private data pointer 90 * 91 * Allocates and initialize and irq_domain structure. 92 * Returns pointer to IRQ domain, or NULL on failure. 93 */ 94 struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, 95 irq_hw_number_t hwirq_max, int direct_max, 96 const struct irq_domain_ops *ops, 97 void *host_data) 98 { 99 struct device_node *of_node = to_of_node(fwnode); 100 struct irq_domain *domain; 101 102 domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size), 103 GFP_KERNEL, of_node_to_nid(of_node)); 104 if (WARN_ON(!domain)) 105 return NULL; 106 107 of_node_get(of_node); 108 109 /* Fill structure */ 110 INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); 111 domain->ops = ops; 112 domain->host_data = host_data; 113 domain->fwnode = fwnode; 114 domain->hwirq_max = hwirq_max; 115 domain->revmap_size = size; 116 domain->revmap_direct_max_irq = direct_max; 117 irq_domain_check_hierarchy(domain); 118 119 mutex_lock(&irq_domain_mutex); 120 list_add(&domain->link, &irq_domain_list); 121 mutex_unlock(&irq_domain_mutex); 122 123 pr_debug("Added domain %s\n", domain->name); 124 return domain; 125 } 126 EXPORT_SYMBOL_GPL(__irq_domain_add); 127 128 /** 129 * irq_domain_remove() - Remove an irq domain. 130 * @domain: domain to remove 131 * 132 * This routine is used to remove an irq domain. The caller must ensure 133 * that all mappings within the domain have been disposed of prior to 134 * use, depending on the revmap type. 135 */ 136 void irq_domain_remove(struct irq_domain *domain) 137 { 138 mutex_lock(&irq_domain_mutex); 139 140 WARN_ON(!radix_tree_empty(&domain->revmap_tree)); 141 142 list_del(&domain->link); 143 144 /* 145 * If the going away domain is the default one, reset it. 146 */ 147 if (unlikely(irq_default_domain == domain)) 148 irq_set_default_host(NULL); 149 150 mutex_unlock(&irq_domain_mutex); 151 152 pr_debug("Removed domain %s\n", domain->name); 153 154 of_node_put(irq_domain_get_of_node(domain)); 155 kfree(domain); 156 } 157 EXPORT_SYMBOL_GPL(irq_domain_remove); 158 159 /** 160 * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs 161 * @of_node: pointer to interrupt controller's device tree node. 162 * @size: total number of irqs in mapping 163 * @first_irq: first number of irq block assigned to the domain, 164 * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then 165 * pre-map all of the irqs in the domain to virqs starting at first_irq. 166 * @ops: domain callbacks 167 * @host_data: Controller private data pointer 168 * 169 * Allocates an irq_domain, and optionally if first_irq is positive then also 170 * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq. 171 * 172 * This is intended to implement the expected behaviour for most 173 * interrupt controllers. If device tree is used, then first_irq will be 0 and 174 * irqs get mapped dynamically on the fly. However, if the controller requires 175 * static virq assignments (non-DT boot) then it will set that up correctly. 176 */ 177 struct irq_domain *irq_domain_add_simple(struct device_node *of_node, 178 unsigned int size, 179 unsigned int first_irq, 180 const struct irq_domain_ops *ops, 181 void *host_data) 182 { 183 struct irq_domain *domain; 184 185 domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); 186 if (!domain) 187 return NULL; 188 189 if (first_irq > 0) { 190 if (IS_ENABLED(CONFIG_SPARSE_IRQ)) { 191 /* attempt to allocated irq_descs */ 192 int rc = irq_alloc_descs(first_irq, first_irq, size, 193 of_node_to_nid(of_node)); 194 if (rc < 0) 195 pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", 196 first_irq); 197 } 198 irq_domain_associate_many(domain, first_irq, 0, size); 199 } 200 201 return domain; 202 } 203 EXPORT_SYMBOL_GPL(irq_domain_add_simple); 204 205 /** 206 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain. 207 * @of_node: pointer to interrupt controller's device tree node. 208 * @size: total number of irqs in legacy mapping 209 * @first_irq: first number of irq block assigned to the domain 210 * @first_hwirq: first hwirq number to use for the translation. Should normally 211 * be '0', but a positive integer can be used if the effective 212 * hwirqs numbering does not begin at zero. 213 * @ops: map/unmap domain callbacks 214 * @host_data: Controller private data pointer 215 * 216 * Note: the map() callback will be called before this function returns 217 * for all legacy interrupts except 0 (which is always the invalid irq for 218 * a legacy controller). 219 */ 220 struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, 221 unsigned int size, 222 unsigned int first_irq, 223 irq_hw_number_t first_hwirq, 224 const struct irq_domain_ops *ops, 225 void *host_data) 226 { 227 struct irq_domain *domain; 228 229 domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size, 230 first_hwirq + size, 0, ops, host_data); 231 if (domain) 232 irq_domain_associate_many(domain, first_irq, first_hwirq, size); 233 234 return domain; 235 } 236 EXPORT_SYMBOL_GPL(irq_domain_add_legacy); 237 238 /** 239 * irq_find_matching_fwspec() - Locates a domain for a given fwspec 240 * @fwspec: FW specifier for an interrupt 241 * @bus_token: domain-specific data 242 */ 243 struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, 244 enum irq_domain_bus_token bus_token) 245 { 246 struct irq_domain *h, *found = NULL; 247 struct fwnode_handle *fwnode = fwspec->fwnode; 248 int rc; 249 250 /* We might want to match the legacy controller last since 251 * it might potentially be set to match all interrupts in 252 * the absence of a device node. This isn't a problem so far 253 * yet though... 254 * 255 * bus_token == DOMAIN_BUS_ANY matches any domain, any other 256 * values must generate an exact match for the domain to be 257 * selected. 258 */ 259 mutex_lock(&irq_domain_mutex); 260 list_for_each_entry(h, &irq_domain_list, link) { 261 if (h->ops->select && fwspec->param_count) 262 rc = h->ops->select(h, fwspec, bus_token); 263 else if (h->ops->match) 264 rc = h->ops->match(h, to_of_node(fwnode), bus_token); 265 else 266 rc = ((fwnode != NULL) && (h->fwnode == fwnode) && 267 ((bus_token == DOMAIN_BUS_ANY) || 268 (h->bus_token == bus_token))); 269 270 if (rc) { 271 found = h; 272 break; 273 } 274 } 275 mutex_unlock(&irq_domain_mutex); 276 return found; 277 } 278 EXPORT_SYMBOL_GPL(irq_find_matching_fwspec); 279 280 /** 281 * irq_set_default_host() - Set a "default" irq domain 282 * @domain: default domain pointer 283 * 284 * For convenience, it's possible to set a "default" domain that will be used 285 * whenever NULL is passed to irq_create_mapping(). It makes life easier for 286 * platforms that want to manipulate a few hard coded interrupt numbers that 287 * aren't properly represented in the device-tree. 288 */ 289 void irq_set_default_host(struct irq_domain *domain) 290 { 291 pr_debug("Default domain set to @0x%p\n", domain); 292 293 irq_default_domain = domain; 294 } 295 EXPORT_SYMBOL_GPL(irq_set_default_host); 296 297 void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) 298 { 299 struct irq_data *irq_data = irq_get_irq_data(irq); 300 irq_hw_number_t hwirq; 301 302 if (WARN(!irq_data || irq_data->domain != domain, 303 "virq%i doesn't exist; cannot disassociate\n", irq)) 304 return; 305 306 hwirq = irq_data->hwirq; 307 irq_set_status_flags(irq, IRQ_NOREQUEST); 308 309 /* remove chip and handler */ 310 irq_set_chip_and_handler(irq, NULL, NULL); 311 312 /* Make sure it's completed */ 313 synchronize_irq(irq); 314 315 /* Tell the PIC about it */ 316 if (domain->ops->unmap) 317 domain->ops->unmap(domain, irq); 318 smp_mb(); 319 320 irq_data->domain = NULL; 321 irq_data->hwirq = 0; 322 323 /* Clear reverse map for this hwirq */ 324 if (hwirq < domain->revmap_size) { 325 domain->linear_revmap[hwirq] = 0; 326 } else { 327 mutex_lock(&revmap_trees_mutex); 328 radix_tree_delete(&domain->revmap_tree, hwirq); 329 mutex_unlock(&revmap_trees_mutex); 330 } 331 } 332 333 int irq_domain_associate(struct irq_domain *domain, unsigned int virq, 334 irq_hw_number_t hwirq) 335 { 336 struct irq_data *irq_data = irq_get_irq_data(virq); 337 int ret; 338 339 if (WARN(hwirq >= domain->hwirq_max, 340 "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name)) 341 return -EINVAL; 342 if (WARN(!irq_data, "error: virq%i is not allocated", virq)) 343 return -EINVAL; 344 if (WARN(irq_data->domain, "error: virq%i is already associated", virq)) 345 return -EINVAL; 346 347 mutex_lock(&irq_domain_mutex); 348 irq_data->hwirq = hwirq; 349 irq_data->domain = domain; 350 if (domain->ops->map) { 351 ret = domain->ops->map(domain, virq, hwirq); 352 if (ret != 0) { 353 /* 354 * If map() returns -EPERM, this interrupt is protected 355 * by the firmware or some other service and shall not 356 * be mapped. Don't bother telling the user about it. 357 */ 358 if (ret != -EPERM) { 359 pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n", 360 domain->name, hwirq, virq, ret); 361 } 362 irq_data->domain = NULL; 363 irq_data->hwirq = 0; 364 mutex_unlock(&irq_domain_mutex); 365 return ret; 366 } 367 368 /* If not already assigned, give the domain the chip's name */ 369 if (!domain->name && irq_data->chip) 370 domain->name = irq_data->chip->name; 371 } 372 373 if (hwirq < domain->revmap_size) { 374 domain->linear_revmap[hwirq] = virq; 375 } else { 376 mutex_lock(&revmap_trees_mutex); 377 radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); 378 mutex_unlock(&revmap_trees_mutex); 379 } 380 mutex_unlock(&irq_domain_mutex); 381 382 irq_clear_status_flags(virq, IRQ_NOREQUEST); 383 384 return 0; 385 } 386 EXPORT_SYMBOL_GPL(irq_domain_associate); 387 388 void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, 389 irq_hw_number_t hwirq_base, int count) 390 { 391 struct device_node *of_node; 392 int i; 393 394 of_node = irq_domain_get_of_node(domain); 395 pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__, 396 of_node_full_name(of_node), irq_base, (int)hwirq_base, count); 397 398 for (i = 0; i < count; i++) { 399 irq_domain_associate(domain, irq_base + i, hwirq_base + i); 400 } 401 } 402 EXPORT_SYMBOL_GPL(irq_domain_associate_many); 403 404 /** 405 * irq_create_direct_mapping() - Allocate an irq for direct mapping 406 * @domain: domain to allocate the irq for or NULL for default domain 407 * 408 * This routine is used for irq controllers which can choose the hardware 409 * interrupt numbers they generate. In such a case it's simplest to use 410 * the linux irq as the hardware interrupt number. It still uses the linear 411 * or radix tree to store the mapping, but the irq controller can optimize 412 * the revmap path by using the hwirq directly. 413 */ 414 unsigned int irq_create_direct_mapping(struct irq_domain *domain) 415 { 416 struct device_node *of_node; 417 unsigned int virq; 418 419 if (domain == NULL) 420 domain = irq_default_domain; 421 422 of_node = irq_domain_get_of_node(domain); 423 virq = irq_alloc_desc_from(1, of_node_to_nid(of_node)); 424 if (!virq) { 425 pr_debug("create_direct virq allocation failed\n"); 426 return 0; 427 } 428 if (virq >= domain->revmap_direct_max_irq) { 429 pr_err("ERROR: no free irqs available below %i maximum\n", 430 domain->revmap_direct_max_irq); 431 irq_free_desc(virq); 432 return 0; 433 } 434 pr_debug("create_direct obtained virq %d\n", virq); 435 436 if (irq_domain_associate(domain, virq, virq)) { 437 irq_free_desc(virq); 438 return 0; 439 } 440 441 return virq; 442 } 443 EXPORT_SYMBOL_GPL(irq_create_direct_mapping); 444 445 /** 446 * irq_create_mapping() - Map a hardware interrupt into linux irq space 447 * @domain: domain owning this hardware interrupt or NULL for default domain 448 * @hwirq: hardware irq number in that domain space 449 * 450 * Only one mapping per hardware interrupt is permitted. Returns a linux 451 * irq number. 452 * If the sense/trigger is to be specified, set_irq_type() should be called 453 * on the number returned from that call. 454 */ 455 unsigned int irq_create_mapping(struct irq_domain *domain, 456 irq_hw_number_t hwirq) 457 { 458 struct device_node *of_node; 459 int virq; 460 461 pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); 462 463 /* Look for default domain if nececssary */ 464 if (domain == NULL) 465 domain = irq_default_domain; 466 if (domain == NULL) { 467 WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq); 468 return 0; 469 } 470 pr_debug("-> using domain @%p\n", domain); 471 472 of_node = irq_domain_get_of_node(domain); 473 474 /* Check if mapping already exists */ 475 virq = irq_find_mapping(domain, hwirq); 476 if (virq) { 477 pr_debug("-> existing mapping on virq %d\n", virq); 478 return virq; 479 } 480 481 /* Allocate a virtual interrupt number */ 482 virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL); 483 if (virq <= 0) { 484 pr_debug("-> virq allocation failed\n"); 485 return 0; 486 } 487 488 if (irq_domain_associate(domain, virq, hwirq)) { 489 irq_free_desc(virq); 490 return 0; 491 } 492 493 pr_debug("irq %lu on domain %s mapped to virtual irq %u\n", 494 hwirq, of_node_full_name(of_node), virq); 495 496 return virq; 497 } 498 EXPORT_SYMBOL_GPL(irq_create_mapping); 499 500 /** 501 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs 502 * @domain: domain owning the interrupt range 503 * @irq_base: beginning of linux IRQ range 504 * @hwirq_base: beginning of hardware IRQ range 505 * @count: Number of interrupts to map 506 * 507 * This routine is used for allocating and mapping a range of hardware 508 * irqs to linux irqs where the linux irq numbers are at pre-defined 509 * locations. For use by controllers that already have static mappings 510 * to insert in to the domain. 511 * 512 * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time 513 * domain insertion. 514 * 515 * 0 is returned upon success, while any failure to establish a static 516 * mapping is treated as an error. 517 */ 518 int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base, 519 irq_hw_number_t hwirq_base, int count) 520 { 521 struct device_node *of_node; 522 int ret; 523 524 of_node = irq_domain_get_of_node(domain); 525 ret = irq_alloc_descs(irq_base, irq_base, count, 526 of_node_to_nid(of_node)); 527 if (unlikely(ret < 0)) 528 return ret; 529 530 irq_domain_associate_many(domain, irq_base, hwirq_base, count); 531 return 0; 532 } 533 EXPORT_SYMBOL_GPL(irq_create_strict_mappings); 534 535 static int irq_domain_translate(struct irq_domain *d, 536 struct irq_fwspec *fwspec, 537 irq_hw_number_t *hwirq, unsigned int *type) 538 { 539 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 540 if (d->ops->translate) 541 return d->ops->translate(d, fwspec, hwirq, type); 542 #endif 543 if (d->ops->xlate) 544 return d->ops->xlate(d, to_of_node(fwspec->fwnode), 545 fwspec->param, fwspec->param_count, 546 hwirq, type); 547 548 /* If domain has no translation, then we assume interrupt line */ 549 *hwirq = fwspec->param[0]; 550 return 0; 551 } 552 553 static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data, 554 struct irq_fwspec *fwspec) 555 { 556 int i; 557 558 fwspec->fwnode = irq_data->np ? &irq_data->np->fwnode : NULL; 559 fwspec->param_count = irq_data->args_count; 560 561 for (i = 0; i < irq_data->args_count; i++) 562 fwspec->param[i] = irq_data->args[i]; 563 } 564 565 unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) 566 { 567 struct irq_domain *domain; 568 struct irq_data *irq_data; 569 irq_hw_number_t hwirq; 570 unsigned int type = IRQ_TYPE_NONE; 571 int virq; 572 573 if (fwspec->fwnode) { 574 domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED); 575 if (!domain) 576 domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_ANY); 577 } else { 578 domain = irq_default_domain; 579 } 580 581 if (!domain) { 582 pr_warn("no irq domain found for %s !\n", 583 of_node_full_name(to_of_node(fwspec->fwnode))); 584 return 0; 585 } 586 587 if (irq_domain_translate(domain, fwspec, &hwirq, &type)) 588 return 0; 589 590 /* 591 * WARN if the irqchip returns a type with bits 592 * outside the sense mask set and clear these bits. 593 */ 594 if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK)) 595 type &= IRQ_TYPE_SENSE_MASK; 596 597 /* 598 * If we've already configured this interrupt, 599 * don't do it again, or hell will break loose. 600 */ 601 virq = irq_find_mapping(domain, hwirq); 602 if (virq) { 603 /* 604 * If the trigger type is not specified or matches the 605 * current trigger type then we are done so return the 606 * interrupt number. 607 */ 608 if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq)) 609 return virq; 610 611 /* 612 * If the trigger type has not been set yet, then set 613 * it now and return the interrupt number. 614 */ 615 if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) { 616 irq_data = irq_get_irq_data(virq); 617 if (!irq_data) 618 return 0; 619 620 irqd_set_trigger_type(irq_data, type); 621 return virq; 622 } 623 624 pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n", 625 hwirq, of_node_full_name(to_of_node(fwspec->fwnode))); 626 return 0; 627 } 628 629 if (irq_domain_is_hierarchy(domain)) { 630 virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec); 631 if (virq <= 0) 632 return 0; 633 } else { 634 /* Create mapping */ 635 virq = irq_create_mapping(domain, hwirq); 636 if (!virq) 637 return virq; 638 } 639 640 irq_data = irq_get_irq_data(virq); 641 if (!irq_data) { 642 if (irq_domain_is_hierarchy(domain)) 643 irq_domain_free_irqs(virq, 1); 644 else 645 irq_dispose_mapping(virq); 646 return 0; 647 } 648 649 /* Store trigger type */ 650 irqd_set_trigger_type(irq_data, type); 651 652 return virq; 653 } 654 EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping); 655 656 unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) 657 { 658 struct irq_fwspec fwspec; 659 660 of_phandle_args_to_fwspec(irq_data, &fwspec); 661 return irq_create_fwspec_mapping(&fwspec); 662 } 663 EXPORT_SYMBOL_GPL(irq_create_of_mapping); 664 665 /** 666 * irq_dispose_mapping() - Unmap an interrupt 667 * @virq: linux irq number of the interrupt to unmap 668 */ 669 void irq_dispose_mapping(unsigned int virq) 670 { 671 struct irq_data *irq_data = irq_get_irq_data(virq); 672 struct irq_domain *domain; 673 674 if (!virq || !irq_data) 675 return; 676 677 domain = irq_data->domain; 678 if (WARN_ON(domain == NULL)) 679 return; 680 681 if (irq_domain_is_hierarchy(domain)) { 682 irq_domain_free_irqs(virq, 1); 683 } else { 684 irq_domain_disassociate(domain, virq); 685 irq_free_desc(virq); 686 } 687 } 688 EXPORT_SYMBOL_GPL(irq_dispose_mapping); 689 690 /** 691 * irq_find_mapping() - Find a linux irq from an hw irq number. 692 * @domain: domain owning this hardware interrupt 693 * @hwirq: hardware irq number in that domain space 694 */ 695 unsigned int irq_find_mapping(struct irq_domain *domain, 696 irq_hw_number_t hwirq) 697 { 698 struct irq_data *data; 699 700 /* Look for default domain if nececssary */ 701 if (domain == NULL) 702 domain = irq_default_domain; 703 if (domain == NULL) 704 return 0; 705 706 if (hwirq < domain->revmap_direct_max_irq) { 707 data = irq_domain_get_irq_data(domain, hwirq); 708 if (data && data->hwirq == hwirq) 709 return hwirq; 710 } 711 712 /* Check if the hwirq is in the linear revmap. */ 713 if (hwirq < domain->revmap_size) 714 return domain->linear_revmap[hwirq]; 715 716 rcu_read_lock(); 717 data = radix_tree_lookup(&domain->revmap_tree, hwirq); 718 rcu_read_unlock(); 719 return data ? data->irq : 0; 720 } 721 EXPORT_SYMBOL_GPL(irq_find_mapping); 722 723 #ifdef CONFIG_IRQ_DOMAIN_DEBUG 724 static int virq_debug_show(struct seq_file *m, void *private) 725 { 726 unsigned long flags; 727 struct irq_desc *desc; 728 struct irq_domain *domain; 729 struct radix_tree_iter iter; 730 void *data, **slot; 731 int i; 732 733 seq_printf(m, " %-16s %-6s %-10s %-10s %s\n", 734 "name", "mapped", "linear-max", "direct-max", "devtree-node"); 735 mutex_lock(&irq_domain_mutex); 736 list_for_each_entry(domain, &irq_domain_list, link) { 737 struct device_node *of_node; 738 int count = 0; 739 of_node = irq_domain_get_of_node(domain); 740 radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0) 741 count++; 742 seq_printf(m, "%c%-16s %6u %10u %10u %s\n", 743 domain == irq_default_domain ? '*' : ' ', domain->name, 744 domain->revmap_size + count, domain->revmap_size, 745 domain->revmap_direct_max_irq, 746 of_node ? of_node_full_name(of_node) : ""); 747 } 748 mutex_unlock(&irq_domain_mutex); 749 750 seq_printf(m, "%-5s %-7s %-15s %-*s %6s %-14s %s\n", "irq", "hwirq", 751 "chip name", (int)(2 * sizeof(void *) + 2), "chip data", 752 "active", "type", "domain"); 753 754 for (i = 1; i < nr_irqs; i++) { 755 desc = irq_to_desc(i); 756 if (!desc) 757 continue; 758 759 raw_spin_lock_irqsave(&desc->lock, flags); 760 domain = desc->irq_data.domain; 761 762 if (domain) { 763 struct irq_chip *chip; 764 int hwirq = desc->irq_data.hwirq; 765 bool direct; 766 767 seq_printf(m, "%5d ", i); 768 seq_printf(m, "0x%05x ", hwirq); 769 770 chip = irq_desc_get_chip(desc); 771 seq_printf(m, "%-15s ", (chip && chip->name) ? chip->name : "none"); 772 773 data = irq_desc_get_chip_data(desc); 774 seq_printf(m, data ? "0x%p " : " %p ", data); 775 776 seq_printf(m, " %c ", (desc->action && desc->action->handler) ? '*' : ' '); 777 direct = (i == hwirq) && (i < domain->revmap_direct_max_irq); 778 seq_printf(m, "%6s%-8s ", 779 (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX", 780 direct ? "(DIRECT)" : ""); 781 seq_printf(m, "%s\n", desc->irq_data.domain->name); 782 } 783 784 raw_spin_unlock_irqrestore(&desc->lock, flags); 785 } 786 787 return 0; 788 } 789 790 static int virq_debug_open(struct inode *inode, struct file *file) 791 { 792 return single_open(file, virq_debug_show, inode->i_private); 793 } 794 795 static const struct file_operations virq_debug_fops = { 796 .open = virq_debug_open, 797 .read = seq_read, 798 .llseek = seq_lseek, 799 .release = single_release, 800 }; 801 802 static int __init irq_debugfs_init(void) 803 { 804 if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL, 805 NULL, &virq_debug_fops) == NULL) 806 return -ENOMEM; 807 808 return 0; 809 } 810 __initcall(irq_debugfs_init); 811 #endif /* CONFIG_IRQ_DOMAIN_DEBUG */ 812 813 /** 814 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings 815 * 816 * Device Tree IRQ specifier translation function which works with one cell 817 * bindings where the cell value maps directly to the hwirq number. 818 */ 819 int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, 820 const u32 *intspec, unsigned int intsize, 821 unsigned long *out_hwirq, unsigned int *out_type) 822 { 823 if (WARN_ON(intsize < 1)) 824 return -EINVAL; 825 *out_hwirq = intspec[0]; 826 *out_type = IRQ_TYPE_NONE; 827 return 0; 828 } 829 EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell); 830 831 /** 832 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings 833 * 834 * Device Tree IRQ specifier translation function which works with two cell 835 * bindings where the cell values map directly to the hwirq number 836 * and linux irq flags. 837 */ 838 int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, 839 const u32 *intspec, unsigned int intsize, 840 irq_hw_number_t *out_hwirq, unsigned int *out_type) 841 { 842 if (WARN_ON(intsize < 2)) 843 return -EINVAL; 844 *out_hwirq = intspec[0]; 845 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK; 846 return 0; 847 } 848 EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell); 849 850 /** 851 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings 852 * 853 * Device Tree IRQ specifier translation function which works with either one 854 * or two cell bindings where the cell values map directly to the hwirq number 855 * and linux irq flags. 856 * 857 * Note: don't use this function unless your interrupt controller explicitly 858 * supports both one and two cell bindings. For the majority of controllers 859 * the _onecell() or _twocell() variants above should be used. 860 */ 861 int irq_domain_xlate_onetwocell(struct irq_domain *d, 862 struct device_node *ctrlr, 863 const u32 *intspec, unsigned int intsize, 864 unsigned long *out_hwirq, unsigned int *out_type) 865 { 866 if (WARN_ON(intsize < 1)) 867 return -EINVAL; 868 *out_hwirq = intspec[0]; 869 if (intsize > 1) 870 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK; 871 else 872 *out_type = IRQ_TYPE_NONE; 873 return 0; 874 } 875 EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell); 876 877 const struct irq_domain_ops irq_domain_simple_ops = { 878 .xlate = irq_domain_xlate_onetwocell, 879 }; 880 EXPORT_SYMBOL_GPL(irq_domain_simple_ops); 881 882 int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq, 883 int node, const struct cpumask *affinity) 884 { 885 unsigned int hint; 886 887 if (virq >= 0) { 888 virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE, 889 affinity); 890 } else { 891 hint = hwirq % nr_irqs; 892 if (hint == 0) 893 hint++; 894 virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE, 895 affinity); 896 if (virq <= 0 && hint > 1) { 897 virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE, 898 affinity); 899 } 900 } 901 902 return virq; 903 } 904 905 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 906 /** 907 * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy 908 * @parent: Parent irq domain to associate with the new domain 909 * @flags: Irq domain flags associated to the domain 910 * @size: Size of the domain. See below 911 * @fwnode: Optional fwnode of the interrupt controller 912 * @ops: Pointer to the interrupt domain callbacks 913 * @host_data: Controller private data pointer 914 * 915 * If @size is 0 a tree domain is created, otherwise a linear domain. 916 * 917 * If successful the parent is associated to the new domain and the 918 * domain flags are set. 919 * Returns pointer to IRQ domain, or NULL on failure. 920 */ 921 struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, 922 unsigned int flags, 923 unsigned int size, 924 struct fwnode_handle *fwnode, 925 const struct irq_domain_ops *ops, 926 void *host_data) 927 { 928 struct irq_domain *domain; 929 930 if (size) 931 domain = irq_domain_create_linear(fwnode, size, ops, host_data); 932 else 933 domain = irq_domain_create_tree(fwnode, ops, host_data); 934 if (domain) { 935 domain->parent = parent; 936 domain->flags |= flags; 937 } 938 939 return domain; 940 } 941 EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy); 942 943 static void irq_domain_insert_irq(int virq) 944 { 945 struct irq_data *data; 946 947 for (data = irq_get_irq_data(virq); data; data = data->parent_data) { 948 struct irq_domain *domain = data->domain; 949 irq_hw_number_t hwirq = data->hwirq; 950 951 if (hwirq < domain->revmap_size) { 952 domain->linear_revmap[hwirq] = virq; 953 } else { 954 mutex_lock(&revmap_trees_mutex); 955 radix_tree_insert(&domain->revmap_tree, hwirq, data); 956 mutex_unlock(&revmap_trees_mutex); 957 } 958 959 /* If not already assigned, give the domain the chip's name */ 960 if (!domain->name && data->chip) 961 domain->name = data->chip->name; 962 } 963 964 irq_clear_status_flags(virq, IRQ_NOREQUEST); 965 } 966 967 static void irq_domain_remove_irq(int virq) 968 { 969 struct irq_data *data; 970 971 irq_set_status_flags(virq, IRQ_NOREQUEST); 972 irq_set_chip_and_handler(virq, NULL, NULL); 973 synchronize_irq(virq); 974 smp_mb(); 975 976 for (data = irq_get_irq_data(virq); data; data = data->parent_data) { 977 struct irq_domain *domain = data->domain; 978 irq_hw_number_t hwirq = data->hwirq; 979 980 if (hwirq < domain->revmap_size) { 981 domain->linear_revmap[hwirq] = 0; 982 } else { 983 mutex_lock(&revmap_trees_mutex); 984 radix_tree_delete(&domain->revmap_tree, hwirq); 985 mutex_unlock(&revmap_trees_mutex); 986 } 987 } 988 } 989 990 static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain, 991 struct irq_data *child) 992 { 993 struct irq_data *irq_data; 994 995 irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL, 996 irq_data_get_node(child)); 997 if (irq_data) { 998 child->parent_data = irq_data; 999 irq_data->irq = child->irq; 1000 irq_data->common = child->common; 1001 irq_data->domain = domain; 1002 } 1003 1004 return irq_data; 1005 } 1006 1007 static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs) 1008 { 1009 struct irq_data *irq_data, *tmp; 1010 int i; 1011 1012 for (i = 0; i < nr_irqs; i++) { 1013 irq_data = irq_get_irq_data(virq + i); 1014 tmp = irq_data->parent_data; 1015 irq_data->parent_data = NULL; 1016 irq_data->domain = NULL; 1017 1018 while (tmp) { 1019 irq_data = tmp; 1020 tmp = tmp->parent_data; 1021 kfree(irq_data); 1022 } 1023 } 1024 } 1025 1026 static int irq_domain_alloc_irq_data(struct irq_domain *domain, 1027 unsigned int virq, unsigned int nr_irqs) 1028 { 1029 struct irq_data *irq_data; 1030 struct irq_domain *parent; 1031 int i; 1032 1033 /* The outermost irq_data is embedded in struct irq_desc */ 1034 for (i = 0; i < nr_irqs; i++) { 1035 irq_data = irq_get_irq_data(virq + i); 1036 irq_data->domain = domain; 1037 1038 for (parent = domain->parent; parent; parent = parent->parent) { 1039 irq_data = irq_domain_insert_irq_data(parent, irq_data); 1040 if (!irq_data) { 1041 irq_domain_free_irq_data(virq, i + 1); 1042 return -ENOMEM; 1043 } 1044 } 1045 } 1046 1047 return 0; 1048 } 1049 1050 /** 1051 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain 1052 * @domain: domain to match 1053 * @virq: IRQ number to get irq_data 1054 */ 1055 struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, 1056 unsigned int virq) 1057 { 1058 struct irq_data *irq_data; 1059 1060 for (irq_data = irq_get_irq_data(virq); irq_data; 1061 irq_data = irq_data->parent_data) 1062 if (irq_data->domain == domain) 1063 return irq_data; 1064 1065 return NULL; 1066 } 1067 EXPORT_SYMBOL_GPL(irq_domain_get_irq_data); 1068 1069 /** 1070 * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain 1071 * @domain: Interrupt domain to match 1072 * @virq: IRQ number 1073 * @hwirq: The hwirq number 1074 * @chip: The associated interrupt chip 1075 * @chip_data: The associated chip data 1076 */ 1077 int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, 1078 irq_hw_number_t hwirq, struct irq_chip *chip, 1079 void *chip_data) 1080 { 1081 struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); 1082 1083 if (!irq_data) 1084 return -ENOENT; 1085 1086 irq_data->hwirq = hwirq; 1087 irq_data->chip = chip ? chip : &no_irq_chip; 1088 irq_data->chip_data = chip_data; 1089 1090 return 0; 1091 } 1092 EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip); 1093 1094 /** 1095 * irq_domain_set_info - Set the complete data for a @virq in @domain 1096 * @domain: Interrupt domain to match 1097 * @virq: IRQ number 1098 * @hwirq: The hardware interrupt number 1099 * @chip: The associated interrupt chip 1100 * @chip_data: The associated interrupt chip data 1101 * @handler: The interrupt flow handler 1102 * @handler_data: The interrupt flow handler data 1103 * @handler_name: The interrupt handler name 1104 */ 1105 void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, 1106 irq_hw_number_t hwirq, struct irq_chip *chip, 1107 void *chip_data, irq_flow_handler_t handler, 1108 void *handler_data, const char *handler_name) 1109 { 1110 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data); 1111 __irq_set_handler(virq, handler, 0, handler_name); 1112 irq_set_handler_data(virq, handler_data); 1113 } 1114 EXPORT_SYMBOL(irq_domain_set_info); 1115 1116 /** 1117 * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data 1118 * @irq_data: The pointer to irq_data 1119 */ 1120 void irq_domain_reset_irq_data(struct irq_data *irq_data) 1121 { 1122 irq_data->hwirq = 0; 1123 irq_data->chip = &no_irq_chip; 1124 irq_data->chip_data = NULL; 1125 } 1126 EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data); 1127 1128 /** 1129 * irq_domain_free_irqs_common - Clear irq_data and free the parent 1130 * @domain: Interrupt domain to match 1131 * @virq: IRQ number to start with 1132 * @nr_irqs: The number of irqs to free 1133 */ 1134 void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq, 1135 unsigned int nr_irqs) 1136 { 1137 struct irq_data *irq_data; 1138 int i; 1139 1140 for (i = 0; i < nr_irqs; i++) { 1141 irq_data = irq_domain_get_irq_data(domain, virq + i); 1142 if (irq_data) 1143 irq_domain_reset_irq_data(irq_data); 1144 } 1145 irq_domain_free_irqs_parent(domain, virq, nr_irqs); 1146 } 1147 EXPORT_SYMBOL_GPL(irq_domain_free_irqs_common); 1148 1149 /** 1150 * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent 1151 * @domain: Interrupt domain to match 1152 * @virq: IRQ number to start with 1153 * @nr_irqs: The number of irqs to free 1154 */ 1155 void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, 1156 unsigned int nr_irqs) 1157 { 1158 int i; 1159 1160 for (i = 0; i < nr_irqs; i++) { 1161 irq_set_handler_data(virq + i, NULL); 1162 irq_set_handler(virq + i, NULL); 1163 } 1164 irq_domain_free_irqs_common(domain, virq, nr_irqs); 1165 } 1166 1167 static bool irq_domain_is_auto_recursive(struct irq_domain *domain) 1168 { 1169 return domain->flags & IRQ_DOMAIN_FLAG_AUTO_RECURSIVE; 1170 } 1171 1172 static void irq_domain_free_irqs_recursive(struct irq_domain *domain, 1173 unsigned int irq_base, 1174 unsigned int nr_irqs) 1175 { 1176 domain->ops->free(domain, irq_base, nr_irqs); 1177 if (irq_domain_is_auto_recursive(domain)) { 1178 BUG_ON(!domain->parent); 1179 irq_domain_free_irqs_recursive(domain->parent, irq_base, 1180 nr_irqs); 1181 } 1182 } 1183 1184 int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, 1185 unsigned int irq_base, 1186 unsigned int nr_irqs, void *arg) 1187 { 1188 int ret = 0; 1189 struct irq_domain *parent = domain->parent; 1190 bool recursive = irq_domain_is_auto_recursive(domain); 1191 1192 BUG_ON(recursive && !parent); 1193 if (recursive) 1194 ret = irq_domain_alloc_irqs_recursive(parent, irq_base, 1195 nr_irqs, arg); 1196 if (ret < 0) 1197 return ret; 1198 1199 ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg); 1200 if (ret < 0 && recursive) 1201 irq_domain_free_irqs_recursive(parent, irq_base, nr_irqs); 1202 1203 return ret; 1204 } 1205 1206 /** 1207 * __irq_domain_alloc_irqs - Allocate IRQs from domain 1208 * @domain: domain to allocate from 1209 * @irq_base: allocate specified IRQ nubmer if irq_base >= 0 1210 * @nr_irqs: number of IRQs to allocate 1211 * @node: NUMA node id for memory allocation 1212 * @arg: domain specific argument 1213 * @realloc: IRQ descriptors have already been allocated if true 1214 * @affinity: Optional irq affinity mask for multiqueue devices 1215 * 1216 * Allocate IRQ numbers and initialized all data structures to support 1217 * hierarchy IRQ domains. 1218 * Parameter @realloc is mainly to support legacy IRQs. 1219 * Returns error code or allocated IRQ number 1220 * 1221 * The whole process to setup an IRQ has been split into two steps. 1222 * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ 1223 * descriptor and required hardware resources. The second step, 1224 * irq_domain_activate_irq(), is to program hardwares with preallocated 1225 * resources. In this way, it's easier to rollback when failing to 1226 * allocate resources. 1227 */ 1228 int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, 1229 unsigned int nr_irqs, int node, void *arg, 1230 bool realloc, const struct cpumask *affinity) 1231 { 1232 int i, ret, virq; 1233 1234 if (domain == NULL) { 1235 domain = irq_default_domain; 1236 if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n")) 1237 return -EINVAL; 1238 } 1239 1240 if (!domain->ops->alloc) { 1241 pr_debug("domain->ops->alloc() is NULL\n"); 1242 return -ENOSYS; 1243 } 1244 1245 if (realloc && irq_base >= 0) { 1246 virq = irq_base; 1247 } else { 1248 virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node, 1249 affinity); 1250 if (virq < 0) { 1251 pr_debug("cannot allocate IRQ(base %d, count %d)\n", 1252 irq_base, nr_irqs); 1253 return virq; 1254 } 1255 } 1256 1257 if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) { 1258 pr_debug("cannot allocate memory for IRQ%d\n", virq); 1259 ret = -ENOMEM; 1260 goto out_free_desc; 1261 } 1262 1263 mutex_lock(&irq_domain_mutex); 1264 ret = irq_domain_alloc_irqs_recursive(domain, virq, nr_irqs, arg); 1265 if (ret < 0) { 1266 mutex_unlock(&irq_domain_mutex); 1267 goto out_free_irq_data; 1268 } 1269 for (i = 0; i < nr_irqs; i++) 1270 irq_domain_insert_irq(virq + i); 1271 mutex_unlock(&irq_domain_mutex); 1272 1273 return virq; 1274 1275 out_free_irq_data: 1276 irq_domain_free_irq_data(virq, nr_irqs); 1277 out_free_desc: 1278 irq_free_descs(virq, nr_irqs); 1279 return ret; 1280 } 1281 1282 /** 1283 * irq_domain_free_irqs - Free IRQ number and associated data structures 1284 * @virq: base IRQ number 1285 * @nr_irqs: number of IRQs to free 1286 */ 1287 void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) 1288 { 1289 struct irq_data *data = irq_get_irq_data(virq); 1290 int i; 1291 1292 if (WARN(!data || !data->domain || !data->domain->ops->free, 1293 "NULL pointer, cannot free irq\n")) 1294 return; 1295 1296 mutex_lock(&irq_domain_mutex); 1297 for (i = 0; i < nr_irqs; i++) 1298 irq_domain_remove_irq(virq + i); 1299 irq_domain_free_irqs_recursive(data->domain, virq, nr_irqs); 1300 mutex_unlock(&irq_domain_mutex); 1301 1302 irq_domain_free_irq_data(virq, nr_irqs); 1303 irq_free_descs(virq, nr_irqs); 1304 } 1305 1306 /** 1307 * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain 1308 * @irq_base: Base IRQ number 1309 * @nr_irqs: Number of IRQs to allocate 1310 * @arg: Allocation data (arch/domain specific) 1311 * 1312 * Check whether the domain has been setup recursive. If not allocate 1313 * through the parent domain. 1314 */ 1315 int irq_domain_alloc_irqs_parent(struct irq_domain *domain, 1316 unsigned int irq_base, unsigned int nr_irqs, 1317 void *arg) 1318 { 1319 /* irq_domain_alloc_irqs_recursive() has called parent's alloc() */ 1320 if (irq_domain_is_auto_recursive(domain)) 1321 return 0; 1322 1323 domain = domain->parent; 1324 if (domain) 1325 return irq_domain_alloc_irqs_recursive(domain, irq_base, 1326 nr_irqs, arg); 1327 return -ENOSYS; 1328 } 1329 EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent); 1330 1331 /** 1332 * irq_domain_free_irqs_parent - Free interrupts from parent domain 1333 * @irq_base: Base IRQ number 1334 * @nr_irqs: Number of IRQs to free 1335 * 1336 * Check whether the domain has been setup recursive. If not free 1337 * through the parent domain. 1338 */ 1339 void irq_domain_free_irqs_parent(struct irq_domain *domain, 1340 unsigned int irq_base, unsigned int nr_irqs) 1341 { 1342 /* irq_domain_free_irqs_recursive() will call parent's free */ 1343 if (!irq_domain_is_auto_recursive(domain) && domain->parent) 1344 irq_domain_free_irqs_recursive(domain->parent, irq_base, 1345 nr_irqs); 1346 } 1347 EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); 1348 1349 /** 1350 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate 1351 * interrupt 1352 * @irq_data: outermost irq_data associated with interrupt 1353 * 1354 * This is the second step to call domain_ops->activate to program interrupt 1355 * controllers, so the interrupt could actually get delivered. 1356 */ 1357 void irq_domain_activate_irq(struct irq_data *irq_data) 1358 { 1359 if (irq_data && irq_data->domain) { 1360 struct irq_domain *domain = irq_data->domain; 1361 1362 if (irq_data->parent_data) 1363 irq_domain_activate_irq(irq_data->parent_data); 1364 if (domain->ops->activate) 1365 domain->ops->activate(domain, irq_data); 1366 } 1367 } 1368 1369 /** 1370 * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to 1371 * deactivate interrupt 1372 * @irq_data: outermost irq_data associated with interrupt 1373 * 1374 * It calls domain_ops->deactivate to program interrupt controllers to disable 1375 * interrupt delivery. 1376 */ 1377 void irq_domain_deactivate_irq(struct irq_data *irq_data) 1378 { 1379 if (irq_data && irq_data->domain) { 1380 struct irq_domain *domain = irq_data->domain; 1381 1382 if (domain->ops->deactivate) 1383 domain->ops->deactivate(domain, irq_data); 1384 if (irq_data->parent_data) 1385 irq_domain_deactivate_irq(irq_data->parent_data); 1386 } 1387 } 1388 1389 static void irq_domain_check_hierarchy(struct irq_domain *domain) 1390 { 1391 /* Hierarchy irq_domains must implement callback alloc() */ 1392 if (domain->ops->alloc) 1393 domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; 1394 } 1395 #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 1396 /** 1397 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain 1398 * @domain: domain to match 1399 * @virq: IRQ number to get irq_data 1400 */ 1401 struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, 1402 unsigned int virq) 1403 { 1404 struct irq_data *irq_data = irq_get_irq_data(virq); 1405 1406 return (irq_data && irq_data->domain == domain) ? irq_data : NULL; 1407 } 1408 EXPORT_SYMBOL_GPL(irq_domain_get_irq_data); 1409 1410 /** 1411 * irq_domain_set_info - Set the complete data for a @virq in @domain 1412 * @domain: Interrupt domain to match 1413 * @virq: IRQ number 1414 * @hwirq: The hardware interrupt number 1415 * @chip: The associated interrupt chip 1416 * @chip_data: The associated interrupt chip data 1417 * @handler: The interrupt flow handler 1418 * @handler_data: The interrupt flow handler data 1419 * @handler_name: The interrupt handler name 1420 */ 1421 void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, 1422 irq_hw_number_t hwirq, struct irq_chip *chip, 1423 void *chip_data, irq_flow_handler_t handler, 1424 void *handler_data, const char *handler_name) 1425 { 1426 irq_set_chip_and_handler_name(virq, chip, handler, handler_name); 1427 irq_set_chip_data(virq, chip_data); 1428 irq_set_handler_data(virq, handler_data); 1429 } 1430 1431 static void irq_domain_check_hierarchy(struct irq_domain *domain) 1432 { 1433 } 1434 #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 1435