1 #define pr_fmt(fmt) "irq: " fmt 2 3 #include <linux/debugfs.h> 4 #include <linux/hardirq.h> 5 #include <linux/interrupt.h> 6 #include <linux/irq.h> 7 #include <linux/irqdesc.h> 8 #include <linux/irqdomain.h> 9 #include <linux/module.h> 10 #include <linux/mutex.h> 11 #include <linux/of.h> 12 #include <linux/of_address.h> 13 #include <linux/of_irq.h> 14 #include <linux/topology.h> 15 #include <linux/seq_file.h> 16 #include <linux/slab.h> 17 #include <linux/smp.h> 18 #include <linux/fs.h> 19 20 static LIST_HEAD(irq_domain_list); 21 static DEFINE_MUTEX(irq_domain_mutex); 22 23 static DEFINE_MUTEX(revmap_trees_mutex); 24 static struct irq_domain *irq_default_domain; 25 26 static void irq_domain_check_hierarchy(struct irq_domain *domain); 27 28 struct irqchip_fwid { 29 struct fwnode_handle fwnode; 30 char *name; 31 void *data; 32 }; 33 34 /** 35 * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for 36 * identifying an irq domain 37 * @data: optional user-provided data 38 * 39 * Allocate a struct device_node, and return a poiner to the embedded 40 * fwnode_handle (or NULL on failure). 41 */ 42 struct fwnode_handle *irq_domain_alloc_fwnode(void *data) 43 { 44 struct irqchip_fwid *fwid; 45 char *name; 46 47 fwid = kzalloc(sizeof(*fwid), GFP_KERNEL); 48 name = kasprintf(GFP_KERNEL, "irqchip@%p", data); 49 50 if (!fwid || !name) { 51 kfree(fwid); 52 kfree(name); 53 return NULL; 54 } 55 56 fwid->name = name; 57 fwid->data = data; 58 fwid->fwnode.type = FWNODE_IRQCHIP; 59 return &fwid->fwnode; 60 } 61 EXPORT_SYMBOL_GPL(irq_domain_alloc_fwnode); 62 63 /** 64 * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle 65 * 66 * Free a fwnode_handle allocated with irq_domain_alloc_fwnode. 67 */ 68 void irq_domain_free_fwnode(struct fwnode_handle *fwnode) 69 { 70 struct irqchip_fwid *fwid; 71 72 if (WARN_ON(!is_fwnode_irqchip(fwnode))) 73 return; 74 75 fwid = container_of(fwnode, struct irqchip_fwid, fwnode); 76 kfree(fwid->name); 77 kfree(fwid); 78 } 79 EXPORT_SYMBOL_GPL(irq_domain_free_fwnode); 80 81 /** 82 * __irq_domain_add() - Allocate a new irq_domain data structure 83 * @fwnode: firmware node for the interrupt controller 84 * @size: Size of linear map; 0 for radix mapping only 85 * @hwirq_max: Maximum number of interrupts supported by controller 86 * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no 87 * direct mapping 88 * @ops: domain callbacks 89 * @host_data: Controller private data pointer 90 * 91 * Allocates and initialize and irq_domain structure. 92 * Returns pointer to IRQ domain, or NULL on failure. 93 */ 94 struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, 95 irq_hw_number_t hwirq_max, int direct_max, 96 const struct irq_domain_ops *ops, 97 void *host_data) 98 { 99 struct device_node *of_node = to_of_node(fwnode); 100 struct irq_domain *domain; 101 102 domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size), 103 GFP_KERNEL, of_node_to_nid(of_node)); 104 if (WARN_ON(!domain)) 105 return NULL; 106 107 of_node_get(of_node); 108 109 /* Fill structure */ 110 INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); 111 domain->ops = ops; 112 domain->host_data = host_data; 113 domain->fwnode = fwnode; 114 domain->hwirq_max = hwirq_max; 115 domain->revmap_size = size; 116 domain->revmap_direct_max_irq = direct_max; 117 irq_domain_check_hierarchy(domain); 118 119 mutex_lock(&irq_domain_mutex); 120 list_add(&domain->link, &irq_domain_list); 121 mutex_unlock(&irq_domain_mutex); 122 123 pr_debug("Added domain %s\n", domain->name); 124 return domain; 125 } 126 EXPORT_SYMBOL_GPL(__irq_domain_add); 127 128 /** 129 * irq_domain_remove() - Remove an irq domain. 130 * @domain: domain to remove 131 * 132 * This routine is used to remove an irq domain. The caller must ensure 133 * that all mappings within the domain have been disposed of prior to 134 * use, depending on the revmap type. 135 */ 136 void irq_domain_remove(struct irq_domain *domain) 137 { 138 mutex_lock(&irq_domain_mutex); 139 140 WARN_ON(!radix_tree_empty(&domain->revmap_tree)); 141 142 list_del(&domain->link); 143 144 /* 145 * If the going away domain is the default one, reset it. 146 */ 147 if (unlikely(irq_default_domain == domain)) 148 irq_set_default_host(NULL); 149 150 mutex_unlock(&irq_domain_mutex); 151 152 pr_debug("Removed domain %s\n", domain->name); 153 154 of_node_put(irq_domain_get_of_node(domain)); 155 kfree(domain); 156 } 157 EXPORT_SYMBOL_GPL(irq_domain_remove); 158 159 /** 160 * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs 161 * @of_node: pointer to interrupt controller's device tree node. 162 * @size: total number of irqs in mapping 163 * @first_irq: first number of irq block assigned to the domain, 164 * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then 165 * pre-map all of the irqs in the domain to virqs starting at first_irq. 166 * @ops: domain callbacks 167 * @host_data: Controller private data pointer 168 * 169 * Allocates an irq_domain, and optionally if first_irq is positive then also 170 * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq. 171 * 172 * This is intended to implement the expected behaviour for most 173 * interrupt controllers. If device tree is used, then first_irq will be 0 and 174 * irqs get mapped dynamically on the fly. However, if the controller requires 175 * static virq assignments (non-DT boot) then it will set that up correctly. 176 */ 177 struct irq_domain *irq_domain_add_simple(struct device_node *of_node, 178 unsigned int size, 179 unsigned int first_irq, 180 const struct irq_domain_ops *ops, 181 void *host_data) 182 { 183 struct irq_domain *domain; 184 185 domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); 186 if (!domain) 187 return NULL; 188 189 if (first_irq > 0) { 190 if (IS_ENABLED(CONFIG_SPARSE_IRQ)) { 191 /* attempt to allocated irq_descs */ 192 int rc = irq_alloc_descs(first_irq, first_irq, size, 193 of_node_to_nid(of_node)); 194 if (rc < 0) 195 pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", 196 first_irq); 197 } 198 irq_domain_associate_many(domain, first_irq, 0, size); 199 } 200 201 return domain; 202 } 203 EXPORT_SYMBOL_GPL(irq_domain_add_simple); 204 205 /** 206 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain. 207 * @of_node: pointer to interrupt controller's device tree node. 208 * @size: total number of irqs in legacy mapping 209 * @first_irq: first number of irq block assigned to the domain 210 * @first_hwirq: first hwirq number to use for the translation. Should normally 211 * be '0', but a positive integer can be used if the effective 212 * hwirqs numbering does not begin at zero. 213 * @ops: map/unmap domain callbacks 214 * @host_data: Controller private data pointer 215 * 216 * Note: the map() callback will be called before this function returns 217 * for all legacy interrupts except 0 (which is always the invalid irq for 218 * a legacy controller). 219 */ 220 struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, 221 unsigned int size, 222 unsigned int first_irq, 223 irq_hw_number_t first_hwirq, 224 const struct irq_domain_ops *ops, 225 void *host_data) 226 { 227 struct irq_domain *domain; 228 229 domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size, 230 first_hwirq + size, 0, ops, host_data); 231 if (domain) 232 irq_domain_associate_many(domain, first_irq, first_hwirq, size); 233 234 return domain; 235 } 236 EXPORT_SYMBOL_GPL(irq_domain_add_legacy); 237 238 /** 239 * irq_find_matching_fwspec() - Locates a domain for a given fwspec 240 * @fwspec: FW specifier for an interrupt 241 * @bus_token: domain-specific data 242 */ 243 struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, 244 enum irq_domain_bus_token bus_token) 245 { 246 struct irq_domain *h, *found = NULL; 247 struct fwnode_handle *fwnode = fwspec->fwnode; 248 int rc; 249 250 /* We might want to match the legacy controller last since 251 * it might potentially be set to match all interrupts in 252 * the absence of a device node. This isn't a problem so far 253 * yet though... 254 * 255 * bus_token == DOMAIN_BUS_ANY matches any domain, any other 256 * values must generate an exact match for the domain to be 257 * selected. 258 */ 259 mutex_lock(&irq_domain_mutex); 260 list_for_each_entry(h, &irq_domain_list, link) { 261 if (h->ops->select && fwspec->param_count) 262 rc = h->ops->select(h, fwspec, bus_token); 263 else if (h->ops->match) 264 rc = h->ops->match(h, to_of_node(fwnode), bus_token); 265 else 266 rc = ((fwnode != NULL) && (h->fwnode == fwnode) && 267 ((bus_token == DOMAIN_BUS_ANY) || 268 (h->bus_token == bus_token))); 269 270 if (rc) { 271 found = h; 272 break; 273 } 274 } 275 mutex_unlock(&irq_domain_mutex); 276 return found; 277 } 278 EXPORT_SYMBOL_GPL(irq_find_matching_fwspec); 279 280 /** 281 * irq_domain_check_msi_remap - Check whether all MSI irq domains implement 282 * IRQ remapping 283 * 284 * Return: false if any MSI irq domain does not support IRQ remapping, 285 * true otherwise (including if there is no MSI irq domain) 286 */ 287 bool irq_domain_check_msi_remap(void) 288 { 289 struct irq_domain *h; 290 bool ret = true; 291 292 mutex_lock(&irq_domain_mutex); 293 list_for_each_entry(h, &irq_domain_list, link) { 294 if (irq_domain_is_msi(h) && 295 !irq_domain_hierarchical_is_msi_remap(h)) { 296 ret = false; 297 break; 298 } 299 } 300 mutex_unlock(&irq_domain_mutex); 301 return ret; 302 } 303 EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap); 304 305 /** 306 * irq_set_default_host() - Set a "default" irq domain 307 * @domain: default domain pointer 308 * 309 * For convenience, it's possible to set a "default" domain that will be used 310 * whenever NULL is passed to irq_create_mapping(). It makes life easier for 311 * platforms that want to manipulate a few hard coded interrupt numbers that 312 * aren't properly represented in the device-tree. 313 */ 314 void irq_set_default_host(struct irq_domain *domain) 315 { 316 pr_debug("Default domain set to @0x%p\n", domain); 317 318 irq_default_domain = domain; 319 } 320 EXPORT_SYMBOL_GPL(irq_set_default_host); 321 322 void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) 323 { 324 struct irq_data *irq_data = irq_get_irq_data(irq); 325 irq_hw_number_t hwirq; 326 327 if (WARN(!irq_data || irq_data->domain != domain, 328 "virq%i doesn't exist; cannot disassociate\n", irq)) 329 return; 330 331 hwirq = irq_data->hwirq; 332 irq_set_status_flags(irq, IRQ_NOREQUEST); 333 334 /* remove chip and handler */ 335 irq_set_chip_and_handler(irq, NULL, NULL); 336 337 /* Make sure it's completed */ 338 synchronize_irq(irq); 339 340 /* Tell the PIC about it */ 341 if (domain->ops->unmap) 342 domain->ops->unmap(domain, irq); 343 smp_mb(); 344 345 irq_data->domain = NULL; 346 irq_data->hwirq = 0; 347 348 /* Clear reverse map for this hwirq */ 349 if (hwirq < domain->revmap_size) { 350 domain->linear_revmap[hwirq] = 0; 351 } else { 352 mutex_lock(&revmap_trees_mutex); 353 radix_tree_delete(&domain->revmap_tree, hwirq); 354 mutex_unlock(&revmap_trees_mutex); 355 } 356 } 357 358 int irq_domain_associate(struct irq_domain *domain, unsigned int virq, 359 irq_hw_number_t hwirq) 360 { 361 struct irq_data *irq_data = irq_get_irq_data(virq); 362 int ret; 363 364 if (WARN(hwirq >= domain->hwirq_max, 365 "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name)) 366 return -EINVAL; 367 if (WARN(!irq_data, "error: virq%i is not allocated", virq)) 368 return -EINVAL; 369 if (WARN(irq_data->domain, "error: virq%i is already associated", virq)) 370 return -EINVAL; 371 372 mutex_lock(&irq_domain_mutex); 373 irq_data->hwirq = hwirq; 374 irq_data->domain = domain; 375 if (domain->ops->map) { 376 ret = domain->ops->map(domain, virq, hwirq); 377 if (ret != 0) { 378 /* 379 * If map() returns -EPERM, this interrupt is protected 380 * by the firmware or some other service and shall not 381 * be mapped. Don't bother telling the user about it. 382 */ 383 if (ret != -EPERM) { 384 pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n", 385 domain->name, hwirq, virq, ret); 386 } 387 irq_data->domain = NULL; 388 irq_data->hwirq = 0; 389 mutex_unlock(&irq_domain_mutex); 390 return ret; 391 } 392 393 /* If not already assigned, give the domain the chip's name */ 394 if (!domain->name && irq_data->chip) 395 domain->name = irq_data->chip->name; 396 } 397 398 if (hwirq < domain->revmap_size) { 399 domain->linear_revmap[hwirq] = virq; 400 } else { 401 mutex_lock(&revmap_trees_mutex); 402 radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); 403 mutex_unlock(&revmap_trees_mutex); 404 } 405 mutex_unlock(&irq_domain_mutex); 406 407 irq_clear_status_flags(virq, IRQ_NOREQUEST); 408 409 return 0; 410 } 411 EXPORT_SYMBOL_GPL(irq_domain_associate); 412 413 void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, 414 irq_hw_number_t hwirq_base, int count) 415 { 416 struct device_node *of_node; 417 int i; 418 419 of_node = irq_domain_get_of_node(domain); 420 pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__, 421 of_node_full_name(of_node), irq_base, (int)hwirq_base, count); 422 423 for (i = 0; i < count; i++) { 424 irq_domain_associate(domain, irq_base + i, hwirq_base + i); 425 } 426 } 427 EXPORT_SYMBOL_GPL(irq_domain_associate_many); 428 429 /** 430 * irq_create_direct_mapping() - Allocate an irq for direct mapping 431 * @domain: domain to allocate the irq for or NULL for default domain 432 * 433 * This routine is used for irq controllers which can choose the hardware 434 * interrupt numbers they generate. In such a case it's simplest to use 435 * the linux irq as the hardware interrupt number. It still uses the linear 436 * or radix tree to store the mapping, but the irq controller can optimize 437 * the revmap path by using the hwirq directly. 438 */ 439 unsigned int irq_create_direct_mapping(struct irq_domain *domain) 440 { 441 struct device_node *of_node; 442 unsigned int virq; 443 444 if (domain == NULL) 445 domain = irq_default_domain; 446 447 of_node = irq_domain_get_of_node(domain); 448 virq = irq_alloc_desc_from(1, of_node_to_nid(of_node)); 449 if (!virq) { 450 pr_debug("create_direct virq allocation failed\n"); 451 return 0; 452 } 453 if (virq >= domain->revmap_direct_max_irq) { 454 pr_err("ERROR: no free irqs available below %i maximum\n", 455 domain->revmap_direct_max_irq); 456 irq_free_desc(virq); 457 return 0; 458 } 459 pr_debug("create_direct obtained virq %d\n", virq); 460 461 if (irq_domain_associate(domain, virq, virq)) { 462 irq_free_desc(virq); 463 return 0; 464 } 465 466 return virq; 467 } 468 EXPORT_SYMBOL_GPL(irq_create_direct_mapping); 469 470 /** 471 * irq_create_mapping() - Map a hardware interrupt into linux irq space 472 * @domain: domain owning this hardware interrupt or NULL for default domain 473 * @hwirq: hardware irq number in that domain space 474 * 475 * Only one mapping per hardware interrupt is permitted. Returns a linux 476 * irq number. 477 * If the sense/trigger is to be specified, set_irq_type() should be called 478 * on the number returned from that call. 479 */ 480 unsigned int irq_create_mapping(struct irq_domain *domain, 481 irq_hw_number_t hwirq) 482 { 483 struct device_node *of_node; 484 int virq; 485 486 pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); 487 488 /* Look for default domain if nececssary */ 489 if (domain == NULL) 490 domain = irq_default_domain; 491 if (domain == NULL) { 492 WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq); 493 return 0; 494 } 495 pr_debug("-> using domain @%p\n", domain); 496 497 of_node = irq_domain_get_of_node(domain); 498 499 /* Check if mapping already exists */ 500 virq = irq_find_mapping(domain, hwirq); 501 if (virq) { 502 pr_debug("-> existing mapping on virq %d\n", virq); 503 return virq; 504 } 505 506 /* Allocate a virtual interrupt number */ 507 virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL); 508 if (virq <= 0) { 509 pr_debug("-> virq allocation failed\n"); 510 return 0; 511 } 512 513 if (irq_domain_associate(domain, virq, hwirq)) { 514 irq_free_desc(virq); 515 return 0; 516 } 517 518 pr_debug("irq %lu on domain %s mapped to virtual irq %u\n", 519 hwirq, of_node_full_name(of_node), virq); 520 521 return virq; 522 } 523 EXPORT_SYMBOL_GPL(irq_create_mapping); 524 525 /** 526 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs 527 * @domain: domain owning the interrupt range 528 * @irq_base: beginning of linux IRQ range 529 * @hwirq_base: beginning of hardware IRQ range 530 * @count: Number of interrupts to map 531 * 532 * This routine is used for allocating and mapping a range of hardware 533 * irqs to linux irqs where the linux irq numbers are at pre-defined 534 * locations. For use by controllers that already have static mappings 535 * to insert in to the domain. 536 * 537 * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time 538 * domain insertion. 539 * 540 * 0 is returned upon success, while any failure to establish a static 541 * mapping is treated as an error. 542 */ 543 int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base, 544 irq_hw_number_t hwirq_base, int count) 545 { 546 struct device_node *of_node; 547 int ret; 548 549 of_node = irq_domain_get_of_node(domain); 550 ret = irq_alloc_descs(irq_base, irq_base, count, 551 of_node_to_nid(of_node)); 552 if (unlikely(ret < 0)) 553 return ret; 554 555 irq_domain_associate_many(domain, irq_base, hwirq_base, count); 556 return 0; 557 } 558 EXPORT_SYMBOL_GPL(irq_create_strict_mappings); 559 560 static int irq_domain_translate(struct irq_domain *d, 561 struct irq_fwspec *fwspec, 562 irq_hw_number_t *hwirq, unsigned int *type) 563 { 564 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 565 if (d->ops->translate) 566 return d->ops->translate(d, fwspec, hwirq, type); 567 #endif 568 if (d->ops->xlate) 569 return d->ops->xlate(d, to_of_node(fwspec->fwnode), 570 fwspec->param, fwspec->param_count, 571 hwirq, type); 572 573 /* If domain has no translation, then we assume interrupt line */ 574 *hwirq = fwspec->param[0]; 575 return 0; 576 } 577 578 static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data, 579 struct irq_fwspec *fwspec) 580 { 581 int i; 582 583 fwspec->fwnode = irq_data->np ? &irq_data->np->fwnode : NULL; 584 fwspec->param_count = irq_data->args_count; 585 586 for (i = 0; i < irq_data->args_count; i++) 587 fwspec->param[i] = irq_data->args[i]; 588 } 589 590 unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) 591 { 592 struct irq_domain *domain; 593 struct irq_data *irq_data; 594 irq_hw_number_t hwirq; 595 unsigned int type = IRQ_TYPE_NONE; 596 int virq; 597 598 if (fwspec->fwnode) { 599 domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED); 600 if (!domain) 601 domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_ANY); 602 } else { 603 domain = irq_default_domain; 604 } 605 606 if (!domain) { 607 pr_warn("no irq domain found for %s !\n", 608 of_node_full_name(to_of_node(fwspec->fwnode))); 609 return 0; 610 } 611 612 if (irq_domain_translate(domain, fwspec, &hwirq, &type)) 613 return 0; 614 615 /* 616 * WARN if the irqchip returns a type with bits 617 * outside the sense mask set and clear these bits. 618 */ 619 if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK)) 620 type &= IRQ_TYPE_SENSE_MASK; 621 622 /* 623 * If we've already configured this interrupt, 624 * don't do it again, or hell will break loose. 625 */ 626 virq = irq_find_mapping(domain, hwirq); 627 if (virq) { 628 /* 629 * If the trigger type is not specified or matches the 630 * current trigger type then we are done so return the 631 * interrupt number. 632 */ 633 if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq)) 634 return virq; 635 636 /* 637 * If the trigger type has not been set yet, then set 638 * it now and return the interrupt number. 639 */ 640 if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) { 641 irq_data = irq_get_irq_data(virq); 642 if (!irq_data) 643 return 0; 644 645 irqd_set_trigger_type(irq_data, type); 646 return virq; 647 } 648 649 pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n", 650 hwirq, of_node_full_name(to_of_node(fwspec->fwnode))); 651 return 0; 652 } 653 654 if (irq_domain_is_hierarchy(domain)) { 655 virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec); 656 if (virq <= 0) 657 return 0; 658 } else { 659 /* Create mapping */ 660 virq = irq_create_mapping(domain, hwirq); 661 if (!virq) 662 return virq; 663 } 664 665 irq_data = irq_get_irq_data(virq); 666 if (!irq_data) { 667 if (irq_domain_is_hierarchy(domain)) 668 irq_domain_free_irqs(virq, 1); 669 else 670 irq_dispose_mapping(virq); 671 return 0; 672 } 673 674 /* Store trigger type */ 675 irqd_set_trigger_type(irq_data, type); 676 677 return virq; 678 } 679 EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping); 680 681 unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) 682 { 683 struct irq_fwspec fwspec; 684 685 of_phandle_args_to_fwspec(irq_data, &fwspec); 686 return irq_create_fwspec_mapping(&fwspec); 687 } 688 EXPORT_SYMBOL_GPL(irq_create_of_mapping); 689 690 /** 691 * irq_dispose_mapping() - Unmap an interrupt 692 * @virq: linux irq number of the interrupt to unmap 693 */ 694 void irq_dispose_mapping(unsigned int virq) 695 { 696 struct irq_data *irq_data = irq_get_irq_data(virq); 697 struct irq_domain *domain; 698 699 if (!virq || !irq_data) 700 return; 701 702 domain = irq_data->domain; 703 if (WARN_ON(domain == NULL)) 704 return; 705 706 if (irq_domain_is_hierarchy(domain)) { 707 irq_domain_free_irqs(virq, 1); 708 } else { 709 irq_domain_disassociate(domain, virq); 710 irq_free_desc(virq); 711 } 712 } 713 EXPORT_SYMBOL_GPL(irq_dispose_mapping); 714 715 /** 716 * irq_find_mapping() - Find a linux irq from an hw irq number. 717 * @domain: domain owning this hardware interrupt 718 * @hwirq: hardware irq number in that domain space 719 */ 720 unsigned int irq_find_mapping(struct irq_domain *domain, 721 irq_hw_number_t hwirq) 722 { 723 struct irq_data *data; 724 725 /* Look for default domain if nececssary */ 726 if (domain == NULL) 727 domain = irq_default_domain; 728 if (domain == NULL) 729 return 0; 730 731 if (hwirq < domain->revmap_direct_max_irq) { 732 data = irq_domain_get_irq_data(domain, hwirq); 733 if (data && data->hwirq == hwirq) 734 return hwirq; 735 } 736 737 /* Check if the hwirq is in the linear revmap. */ 738 if (hwirq < domain->revmap_size) 739 return domain->linear_revmap[hwirq]; 740 741 rcu_read_lock(); 742 data = radix_tree_lookup(&domain->revmap_tree, hwirq); 743 rcu_read_unlock(); 744 return data ? data->irq : 0; 745 } 746 EXPORT_SYMBOL_GPL(irq_find_mapping); 747 748 #ifdef CONFIG_IRQ_DOMAIN_DEBUG 749 static int virq_debug_show(struct seq_file *m, void *private) 750 { 751 unsigned long flags; 752 struct irq_desc *desc; 753 struct irq_domain *domain; 754 struct radix_tree_iter iter; 755 void *data, **slot; 756 int i; 757 758 seq_printf(m, " %-16s %-6s %-10s %-10s %s\n", 759 "name", "mapped", "linear-max", "direct-max", "devtree-node"); 760 mutex_lock(&irq_domain_mutex); 761 list_for_each_entry(domain, &irq_domain_list, link) { 762 struct device_node *of_node; 763 int count = 0; 764 of_node = irq_domain_get_of_node(domain); 765 radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0) 766 count++; 767 seq_printf(m, "%c%-16s %6u %10u %10u %s\n", 768 domain == irq_default_domain ? '*' : ' ', domain->name, 769 domain->revmap_size + count, domain->revmap_size, 770 domain->revmap_direct_max_irq, 771 of_node ? of_node_full_name(of_node) : ""); 772 } 773 mutex_unlock(&irq_domain_mutex); 774 775 seq_printf(m, "%-5s %-7s %-15s %-*s %6s %-14s %s\n", "irq", "hwirq", 776 "chip name", (int)(2 * sizeof(void *) + 2), "chip data", 777 "active", "type", "domain"); 778 779 for (i = 1; i < nr_irqs; i++) { 780 desc = irq_to_desc(i); 781 if (!desc) 782 continue; 783 784 raw_spin_lock_irqsave(&desc->lock, flags); 785 domain = desc->irq_data.domain; 786 787 if (domain) { 788 struct irq_chip *chip; 789 int hwirq = desc->irq_data.hwirq; 790 bool direct; 791 792 seq_printf(m, "%5d ", i); 793 seq_printf(m, "0x%05x ", hwirq); 794 795 chip = irq_desc_get_chip(desc); 796 seq_printf(m, "%-15s ", (chip && chip->name) ? chip->name : "none"); 797 798 data = irq_desc_get_chip_data(desc); 799 seq_printf(m, data ? "0x%p " : " %p ", data); 800 801 seq_printf(m, " %c ", (desc->action && desc->action->handler) ? '*' : ' '); 802 direct = (i == hwirq) && (i < domain->revmap_direct_max_irq); 803 seq_printf(m, "%6s%-8s ", 804 (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX", 805 direct ? "(DIRECT)" : ""); 806 seq_printf(m, "%s\n", desc->irq_data.domain->name); 807 } 808 809 raw_spin_unlock_irqrestore(&desc->lock, flags); 810 } 811 812 return 0; 813 } 814 815 static int virq_debug_open(struct inode *inode, struct file *file) 816 { 817 return single_open(file, virq_debug_show, inode->i_private); 818 } 819 820 static const struct file_operations virq_debug_fops = { 821 .open = virq_debug_open, 822 .read = seq_read, 823 .llseek = seq_lseek, 824 .release = single_release, 825 }; 826 827 static int __init irq_debugfs_init(void) 828 { 829 if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL, 830 NULL, &virq_debug_fops) == NULL) 831 return -ENOMEM; 832 833 return 0; 834 } 835 __initcall(irq_debugfs_init); 836 #endif /* CONFIG_IRQ_DOMAIN_DEBUG */ 837 838 /** 839 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings 840 * 841 * Device Tree IRQ specifier translation function which works with one cell 842 * bindings where the cell value maps directly to the hwirq number. 843 */ 844 int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, 845 const u32 *intspec, unsigned int intsize, 846 unsigned long *out_hwirq, unsigned int *out_type) 847 { 848 if (WARN_ON(intsize < 1)) 849 return -EINVAL; 850 *out_hwirq = intspec[0]; 851 *out_type = IRQ_TYPE_NONE; 852 return 0; 853 } 854 EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell); 855 856 /** 857 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings 858 * 859 * Device Tree IRQ specifier translation function which works with two cell 860 * bindings where the cell values map directly to the hwirq number 861 * and linux irq flags. 862 */ 863 int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, 864 const u32 *intspec, unsigned int intsize, 865 irq_hw_number_t *out_hwirq, unsigned int *out_type) 866 { 867 if (WARN_ON(intsize < 2)) 868 return -EINVAL; 869 *out_hwirq = intspec[0]; 870 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK; 871 return 0; 872 } 873 EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell); 874 875 /** 876 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings 877 * 878 * Device Tree IRQ specifier translation function which works with either one 879 * or two cell bindings where the cell values map directly to the hwirq number 880 * and linux irq flags. 881 * 882 * Note: don't use this function unless your interrupt controller explicitly 883 * supports both one and two cell bindings. For the majority of controllers 884 * the _onecell() or _twocell() variants above should be used. 885 */ 886 int irq_domain_xlate_onetwocell(struct irq_domain *d, 887 struct device_node *ctrlr, 888 const u32 *intspec, unsigned int intsize, 889 unsigned long *out_hwirq, unsigned int *out_type) 890 { 891 if (WARN_ON(intsize < 1)) 892 return -EINVAL; 893 *out_hwirq = intspec[0]; 894 if (intsize > 1) 895 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK; 896 else 897 *out_type = IRQ_TYPE_NONE; 898 return 0; 899 } 900 EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell); 901 902 const struct irq_domain_ops irq_domain_simple_ops = { 903 .xlate = irq_domain_xlate_onetwocell, 904 }; 905 EXPORT_SYMBOL_GPL(irq_domain_simple_ops); 906 907 int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq, 908 int node, const struct cpumask *affinity) 909 { 910 unsigned int hint; 911 912 if (virq >= 0) { 913 virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE, 914 affinity); 915 } else { 916 hint = hwirq % nr_irqs; 917 if (hint == 0) 918 hint++; 919 virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE, 920 affinity); 921 if (virq <= 0 && hint > 1) { 922 virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE, 923 affinity); 924 } 925 } 926 927 return virq; 928 } 929 930 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 931 /** 932 * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy 933 * @parent: Parent irq domain to associate with the new domain 934 * @flags: Irq domain flags associated to the domain 935 * @size: Size of the domain. See below 936 * @fwnode: Optional fwnode of the interrupt controller 937 * @ops: Pointer to the interrupt domain callbacks 938 * @host_data: Controller private data pointer 939 * 940 * If @size is 0 a tree domain is created, otherwise a linear domain. 941 * 942 * If successful the parent is associated to the new domain and the 943 * domain flags are set. 944 * Returns pointer to IRQ domain, or NULL on failure. 945 */ 946 struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, 947 unsigned int flags, 948 unsigned int size, 949 struct fwnode_handle *fwnode, 950 const struct irq_domain_ops *ops, 951 void *host_data) 952 { 953 struct irq_domain *domain; 954 955 if (size) 956 domain = irq_domain_create_linear(fwnode, size, ops, host_data); 957 else 958 domain = irq_domain_create_tree(fwnode, ops, host_data); 959 if (domain) { 960 domain->parent = parent; 961 domain->flags |= flags; 962 } 963 964 return domain; 965 } 966 EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy); 967 968 static void irq_domain_insert_irq(int virq) 969 { 970 struct irq_data *data; 971 972 for (data = irq_get_irq_data(virq); data; data = data->parent_data) { 973 struct irq_domain *domain = data->domain; 974 irq_hw_number_t hwirq = data->hwirq; 975 976 if (hwirq < domain->revmap_size) { 977 domain->linear_revmap[hwirq] = virq; 978 } else { 979 mutex_lock(&revmap_trees_mutex); 980 radix_tree_insert(&domain->revmap_tree, hwirq, data); 981 mutex_unlock(&revmap_trees_mutex); 982 } 983 984 /* If not already assigned, give the domain the chip's name */ 985 if (!domain->name && data->chip) 986 domain->name = data->chip->name; 987 } 988 989 irq_clear_status_flags(virq, IRQ_NOREQUEST); 990 } 991 992 static void irq_domain_remove_irq(int virq) 993 { 994 struct irq_data *data; 995 996 irq_set_status_flags(virq, IRQ_NOREQUEST); 997 irq_set_chip_and_handler(virq, NULL, NULL); 998 synchronize_irq(virq); 999 smp_mb(); 1000 1001 for (data = irq_get_irq_data(virq); data; data = data->parent_data) { 1002 struct irq_domain *domain = data->domain; 1003 irq_hw_number_t hwirq = data->hwirq; 1004 1005 if (hwirq < domain->revmap_size) { 1006 domain->linear_revmap[hwirq] = 0; 1007 } else { 1008 mutex_lock(&revmap_trees_mutex); 1009 radix_tree_delete(&domain->revmap_tree, hwirq); 1010 mutex_unlock(&revmap_trees_mutex); 1011 } 1012 } 1013 } 1014 1015 static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain, 1016 struct irq_data *child) 1017 { 1018 struct irq_data *irq_data; 1019 1020 irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL, 1021 irq_data_get_node(child)); 1022 if (irq_data) { 1023 child->parent_data = irq_data; 1024 irq_data->irq = child->irq; 1025 irq_data->common = child->common; 1026 irq_data->domain = domain; 1027 } 1028 1029 return irq_data; 1030 } 1031 1032 static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs) 1033 { 1034 struct irq_data *irq_data, *tmp; 1035 int i; 1036 1037 for (i = 0; i < nr_irqs; i++) { 1038 irq_data = irq_get_irq_data(virq + i); 1039 tmp = irq_data->parent_data; 1040 irq_data->parent_data = NULL; 1041 irq_data->domain = NULL; 1042 1043 while (tmp) { 1044 irq_data = tmp; 1045 tmp = tmp->parent_data; 1046 kfree(irq_data); 1047 } 1048 } 1049 } 1050 1051 static int irq_domain_alloc_irq_data(struct irq_domain *domain, 1052 unsigned int virq, unsigned int nr_irqs) 1053 { 1054 struct irq_data *irq_data; 1055 struct irq_domain *parent; 1056 int i; 1057 1058 /* The outermost irq_data is embedded in struct irq_desc */ 1059 for (i = 0; i < nr_irqs; i++) { 1060 irq_data = irq_get_irq_data(virq + i); 1061 irq_data->domain = domain; 1062 1063 for (parent = domain->parent; parent; parent = parent->parent) { 1064 irq_data = irq_domain_insert_irq_data(parent, irq_data); 1065 if (!irq_data) { 1066 irq_domain_free_irq_data(virq, i + 1); 1067 return -ENOMEM; 1068 } 1069 } 1070 } 1071 1072 return 0; 1073 } 1074 1075 /** 1076 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain 1077 * @domain: domain to match 1078 * @virq: IRQ number to get irq_data 1079 */ 1080 struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, 1081 unsigned int virq) 1082 { 1083 struct irq_data *irq_data; 1084 1085 for (irq_data = irq_get_irq_data(virq); irq_data; 1086 irq_data = irq_data->parent_data) 1087 if (irq_data->domain == domain) 1088 return irq_data; 1089 1090 return NULL; 1091 } 1092 EXPORT_SYMBOL_GPL(irq_domain_get_irq_data); 1093 1094 /** 1095 * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain 1096 * @domain: Interrupt domain to match 1097 * @virq: IRQ number 1098 * @hwirq: The hwirq number 1099 * @chip: The associated interrupt chip 1100 * @chip_data: The associated chip data 1101 */ 1102 int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, 1103 irq_hw_number_t hwirq, struct irq_chip *chip, 1104 void *chip_data) 1105 { 1106 struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); 1107 1108 if (!irq_data) 1109 return -ENOENT; 1110 1111 irq_data->hwirq = hwirq; 1112 irq_data->chip = chip ? chip : &no_irq_chip; 1113 irq_data->chip_data = chip_data; 1114 1115 return 0; 1116 } 1117 EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip); 1118 1119 /** 1120 * irq_domain_set_info - Set the complete data for a @virq in @domain 1121 * @domain: Interrupt domain to match 1122 * @virq: IRQ number 1123 * @hwirq: The hardware interrupt number 1124 * @chip: The associated interrupt chip 1125 * @chip_data: The associated interrupt chip data 1126 * @handler: The interrupt flow handler 1127 * @handler_data: The interrupt flow handler data 1128 * @handler_name: The interrupt handler name 1129 */ 1130 void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, 1131 irq_hw_number_t hwirq, struct irq_chip *chip, 1132 void *chip_data, irq_flow_handler_t handler, 1133 void *handler_data, const char *handler_name) 1134 { 1135 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data); 1136 __irq_set_handler(virq, handler, 0, handler_name); 1137 irq_set_handler_data(virq, handler_data); 1138 } 1139 EXPORT_SYMBOL(irq_domain_set_info); 1140 1141 /** 1142 * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data 1143 * @irq_data: The pointer to irq_data 1144 */ 1145 void irq_domain_reset_irq_data(struct irq_data *irq_data) 1146 { 1147 irq_data->hwirq = 0; 1148 irq_data->chip = &no_irq_chip; 1149 irq_data->chip_data = NULL; 1150 } 1151 EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data); 1152 1153 /** 1154 * irq_domain_free_irqs_common - Clear irq_data and free the parent 1155 * @domain: Interrupt domain to match 1156 * @virq: IRQ number to start with 1157 * @nr_irqs: The number of irqs to free 1158 */ 1159 void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq, 1160 unsigned int nr_irqs) 1161 { 1162 struct irq_data *irq_data; 1163 int i; 1164 1165 for (i = 0; i < nr_irqs; i++) { 1166 irq_data = irq_domain_get_irq_data(domain, virq + i); 1167 if (irq_data) 1168 irq_domain_reset_irq_data(irq_data); 1169 } 1170 irq_domain_free_irqs_parent(domain, virq, nr_irqs); 1171 } 1172 EXPORT_SYMBOL_GPL(irq_domain_free_irqs_common); 1173 1174 /** 1175 * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent 1176 * @domain: Interrupt domain to match 1177 * @virq: IRQ number to start with 1178 * @nr_irqs: The number of irqs to free 1179 */ 1180 void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, 1181 unsigned int nr_irqs) 1182 { 1183 int i; 1184 1185 for (i = 0; i < nr_irqs; i++) { 1186 irq_set_handler_data(virq + i, NULL); 1187 irq_set_handler(virq + i, NULL); 1188 } 1189 irq_domain_free_irqs_common(domain, virq, nr_irqs); 1190 } 1191 1192 static bool irq_domain_is_auto_recursive(struct irq_domain *domain) 1193 { 1194 return domain->flags & IRQ_DOMAIN_FLAG_AUTO_RECURSIVE; 1195 } 1196 1197 static void irq_domain_free_irqs_recursive(struct irq_domain *domain, 1198 unsigned int irq_base, 1199 unsigned int nr_irqs) 1200 { 1201 domain->ops->free(domain, irq_base, nr_irqs); 1202 if (irq_domain_is_auto_recursive(domain)) { 1203 BUG_ON(!domain->parent); 1204 irq_domain_free_irqs_recursive(domain->parent, irq_base, 1205 nr_irqs); 1206 } 1207 } 1208 1209 int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, 1210 unsigned int irq_base, 1211 unsigned int nr_irqs, void *arg) 1212 { 1213 int ret = 0; 1214 struct irq_domain *parent = domain->parent; 1215 bool recursive = irq_domain_is_auto_recursive(domain); 1216 1217 BUG_ON(recursive && !parent); 1218 if (recursive) 1219 ret = irq_domain_alloc_irqs_recursive(parent, irq_base, 1220 nr_irqs, arg); 1221 if (ret < 0) 1222 return ret; 1223 1224 ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg); 1225 if (ret < 0 && recursive) 1226 irq_domain_free_irqs_recursive(parent, irq_base, nr_irqs); 1227 1228 return ret; 1229 } 1230 1231 /** 1232 * __irq_domain_alloc_irqs - Allocate IRQs from domain 1233 * @domain: domain to allocate from 1234 * @irq_base: allocate specified IRQ nubmer if irq_base >= 0 1235 * @nr_irqs: number of IRQs to allocate 1236 * @node: NUMA node id for memory allocation 1237 * @arg: domain specific argument 1238 * @realloc: IRQ descriptors have already been allocated if true 1239 * @affinity: Optional irq affinity mask for multiqueue devices 1240 * 1241 * Allocate IRQ numbers and initialized all data structures to support 1242 * hierarchy IRQ domains. 1243 * Parameter @realloc is mainly to support legacy IRQs. 1244 * Returns error code or allocated IRQ number 1245 * 1246 * The whole process to setup an IRQ has been split into two steps. 1247 * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ 1248 * descriptor and required hardware resources. The second step, 1249 * irq_domain_activate_irq(), is to program hardwares with preallocated 1250 * resources. In this way, it's easier to rollback when failing to 1251 * allocate resources. 1252 */ 1253 int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, 1254 unsigned int nr_irqs, int node, void *arg, 1255 bool realloc, const struct cpumask *affinity) 1256 { 1257 int i, ret, virq; 1258 1259 if (domain == NULL) { 1260 domain = irq_default_domain; 1261 if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n")) 1262 return -EINVAL; 1263 } 1264 1265 if (!domain->ops->alloc) { 1266 pr_debug("domain->ops->alloc() is NULL\n"); 1267 return -ENOSYS; 1268 } 1269 1270 if (realloc && irq_base >= 0) { 1271 virq = irq_base; 1272 } else { 1273 virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node, 1274 affinity); 1275 if (virq < 0) { 1276 pr_debug("cannot allocate IRQ(base %d, count %d)\n", 1277 irq_base, nr_irqs); 1278 return virq; 1279 } 1280 } 1281 1282 if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) { 1283 pr_debug("cannot allocate memory for IRQ%d\n", virq); 1284 ret = -ENOMEM; 1285 goto out_free_desc; 1286 } 1287 1288 mutex_lock(&irq_domain_mutex); 1289 ret = irq_domain_alloc_irqs_recursive(domain, virq, nr_irqs, arg); 1290 if (ret < 0) { 1291 mutex_unlock(&irq_domain_mutex); 1292 goto out_free_irq_data; 1293 } 1294 for (i = 0; i < nr_irqs; i++) 1295 irq_domain_insert_irq(virq + i); 1296 mutex_unlock(&irq_domain_mutex); 1297 1298 return virq; 1299 1300 out_free_irq_data: 1301 irq_domain_free_irq_data(virq, nr_irqs); 1302 out_free_desc: 1303 irq_free_descs(virq, nr_irqs); 1304 return ret; 1305 } 1306 1307 /** 1308 * irq_domain_free_irqs - Free IRQ number and associated data structures 1309 * @virq: base IRQ number 1310 * @nr_irqs: number of IRQs to free 1311 */ 1312 void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) 1313 { 1314 struct irq_data *data = irq_get_irq_data(virq); 1315 int i; 1316 1317 if (WARN(!data || !data->domain || !data->domain->ops->free, 1318 "NULL pointer, cannot free irq\n")) 1319 return; 1320 1321 mutex_lock(&irq_domain_mutex); 1322 for (i = 0; i < nr_irqs; i++) 1323 irq_domain_remove_irq(virq + i); 1324 irq_domain_free_irqs_recursive(data->domain, virq, nr_irqs); 1325 mutex_unlock(&irq_domain_mutex); 1326 1327 irq_domain_free_irq_data(virq, nr_irqs); 1328 irq_free_descs(virq, nr_irqs); 1329 } 1330 1331 /** 1332 * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain 1333 * @irq_base: Base IRQ number 1334 * @nr_irqs: Number of IRQs to allocate 1335 * @arg: Allocation data (arch/domain specific) 1336 * 1337 * Check whether the domain has been setup recursive. If not allocate 1338 * through the parent domain. 1339 */ 1340 int irq_domain_alloc_irqs_parent(struct irq_domain *domain, 1341 unsigned int irq_base, unsigned int nr_irqs, 1342 void *arg) 1343 { 1344 /* irq_domain_alloc_irqs_recursive() has called parent's alloc() */ 1345 if (irq_domain_is_auto_recursive(domain)) 1346 return 0; 1347 1348 domain = domain->parent; 1349 if (domain) 1350 return irq_domain_alloc_irqs_recursive(domain, irq_base, 1351 nr_irqs, arg); 1352 return -ENOSYS; 1353 } 1354 EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent); 1355 1356 /** 1357 * irq_domain_free_irqs_parent - Free interrupts from parent domain 1358 * @irq_base: Base IRQ number 1359 * @nr_irqs: Number of IRQs to free 1360 * 1361 * Check whether the domain has been setup recursive. If not free 1362 * through the parent domain. 1363 */ 1364 void irq_domain_free_irqs_parent(struct irq_domain *domain, 1365 unsigned int irq_base, unsigned int nr_irqs) 1366 { 1367 /* irq_domain_free_irqs_recursive() will call parent's free */ 1368 if (!irq_domain_is_auto_recursive(domain) && domain->parent) 1369 irq_domain_free_irqs_recursive(domain->parent, irq_base, 1370 nr_irqs); 1371 } 1372 EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); 1373 1374 static void __irq_domain_activate_irq(struct irq_data *irq_data) 1375 { 1376 if (irq_data && irq_data->domain) { 1377 struct irq_domain *domain = irq_data->domain; 1378 1379 if (irq_data->parent_data) 1380 __irq_domain_activate_irq(irq_data->parent_data); 1381 if (domain->ops->activate) 1382 domain->ops->activate(domain, irq_data); 1383 } 1384 } 1385 1386 static void __irq_domain_deactivate_irq(struct irq_data *irq_data) 1387 { 1388 if (irq_data && irq_data->domain) { 1389 struct irq_domain *domain = irq_data->domain; 1390 1391 if (domain->ops->deactivate) 1392 domain->ops->deactivate(domain, irq_data); 1393 if (irq_data->parent_data) 1394 __irq_domain_deactivate_irq(irq_data->parent_data); 1395 } 1396 } 1397 1398 /** 1399 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate 1400 * interrupt 1401 * @irq_data: outermost irq_data associated with interrupt 1402 * 1403 * This is the second step to call domain_ops->activate to program interrupt 1404 * controllers, so the interrupt could actually get delivered. 1405 */ 1406 void irq_domain_activate_irq(struct irq_data *irq_data) 1407 { 1408 if (!irqd_is_activated(irq_data)) { 1409 __irq_domain_activate_irq(irq_data); 1410 irqd_set_activated(irq_data); 1411 } 1412 } 1413 1414 /** 1415 * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to 1416 * deactivate interrupt 1417 * @irq_data: outermost irq_data associated with interrupt 1418 * 1419 * It calls domain_ops->deactivate to program interrupt controllers to disable 1420 * interrupt delivery. 1421 */ 1422 void irq_domain_deactivate_irq(struct irq_data *irq_data) 1423 { 1424 if (irqd_is_activated(irq_data)) { 1425 __irq_domain_deactivate_irq(irq_data); 1426 irqd_clr_activated(irq_data); 1427 } 1428 } 1429 1430 static void irq_domain_check_hierarchy(struct irq_domain *domain) 1431 { 1432 /* Hierarchy irq_domains must implement callback alloc() */ 1433 if (domain->ops->alloc) 1434 domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; 1435 } 1436 1437 /** 1438 * irq_domain_hierarchical_is_msi_remap - Check if the domain or any 1439 * parent has MSI remapping support 1440 * @domain: domain pointer 1441 */ 1442 bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain) 1443 { 1444 for (; domain; domain = domain->parent) { 1445 if (irq_domain_is_msi_remap(domain)) 1446 return true; 1447 } 1448 return false; 1449 } 1450 #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 1451 /** 1452 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain 1453 * @domain: domain to match 1454 * @virq: IRQ number to get irq_data 1455 */ 1456 struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, 1457 unsigned int virq) 1458 { 1459 struct irq_data *irq_data = irq_get_irq_data(virq); 1460 1461 return (irq_data && irq_data->domain == domain) ? irq_data : NULL; 1462 } 1463 EXPORT_SYMBOL_GPL(irq_domain_get_irq_data); 1464 1465 /** 1466 * irq_domain_set_info - Set the complete data for a @virq in @domain 1467 * @domain: Interrupt domain to match 1468 * @virq: IRQ number 1469 * @hwirq: The hardware interrupt number 1470 * @chip: The associated interrupt chip 1471 * @chip_data: The associated interrupt chip data 1472 * @handler: The interrupt flow handler 1473 * @handler_data: The interrupt flow handler data 1474 * @handler_name: The interrupt handler name 1475 */ 1476 void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, 1477 irq_hw_number_t hwirq, struct irq_chip *chip, 1478 void *chip_data, irq_flow_handler_t handler, 1479 void *handler_data, const char *handler_name) 1480 { 1481 irq_set_chip_and_handler_name(virq, chip, handler, handler_name); 1482 irq_set_chip_data(virq, chip_data); 1483 irq_set_handler_data(virq, handler_data); 1484 } 1485 1486 static void irq_domain_check_hierarchy(struct irq_domain *domain) 1487 { 1488 } 1489 #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 1490