1 #define pr_fmt(fmt) "irq: " fmt 2 3 #include <linux/debugfs.h> 4 #include <linux/hardirq.h> 5 #include <linux/interrupt.h> 6 #include <linux/irq.h> 7 #include <linux/irqdesc.h> 8 #include <linux/irqdomain.h> 9 #include <linux/module.h> 10 #include <linux/mutex.h> 11 #include <linux/of.h> 12 #include <linux/of_address.h> 13 #include <linux/of_irq.h> 14 #include <linux/topology.h> 15 #include <linux/seq_file.h> 16 #include <linux/slab.h> 17 #include <linux/smp.h> 18 #include <linux/fs.h> 19 20 static LIST_HEAD(irq_domain_list); 21 static DEFINE_MUTEX(irq_domain_mutex); 22 23 static DEFINE_MUTEX(revmap_trees_mutex); 24 static struct irq_domain *irq_default_domain; 25 26 static int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, 27 irq_hw_number_t hwirq, int node); 28 static void irq_domain_check_hierarchy(struct irq_domain *domain); 29 30 struct irqchip_fwid { 31 struct fwnode_handle fwnode; 32 char *name; 33 void *data; 34 }; 35 36 /** 37 * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for 38 * identifying an irq domain 39 * @data: optional user-provided data 40 * 41 * Allocate a struct device_node, and return a poiner to the embedded 42 * fwnode_handle (or NULL on failure). 43 */ 44 struct fwnode_handle *irq_domain_alloc_fwnode(void *data) 45 { 46 struct irqchip_fwid *fwid; 47 char *name; 48 49 fwid = kzalloc(sizeof(*fwid), GFP_KERNEL); 50 name = kasprintf(GFP_KERNEL, "irqchip@%p", data); 51 52 if (!fwid || !name) { 53 kfree(fwid); 54 kfree(name); 55 return NULL; 56 } 57 58 fwid->name = name; 59 fwid->data = data; 60 fwid->fwnode.type = FWNODE_IRQCHIP; 61 return &fwid->fwnode; 62 } 63 64 /** 65 * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle 66 * 67 * Free a fwnode_handle allocated with irq_domain_alloc_fwnode. 68 */ 69 void irq_domain_free_fwnode(struct fwnode_handle *fwnode) 70 { 71 struct irqchip_fwid *fwid; 72 73 if (WARN_ON(fwnode->type != FWNODE_IRQCHIP)) 74 return; 75 76 fwid = container_of(fwnode, struct irqchip_fwid, fwnode); 77 kfree(fwid->name); 78 kfree(fwid); 79 } 80 81 /** 82 * __irq_domain_add() - Allocate a new irq_domain data structure 83 * @of_node: optional device-tree node of the interrupt controller 84 * @size: Size of linear map; 0 for radix mapping only 85 * @hwirq_max: Maximum number of interrupts supported by controller 86 * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no 87 * direct mapping 88 * @ops: domain callbacks 89 * @host_data: Controller private data pointer 90 * 91 * Allocates and initialize and irq_domain structure. 92 * Returns pointer to IRQ domain, or NULL on failure. 93 */ 94 struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, 95 irq_hw_number_t hwirq_max, int direct_max, 96 const struct irq_domain_ops *ops, 97 void *host_data) 98 { 99 struct irq_domain *domain; 100 struct device_node *of_node; 101 102 of_node = to_of_node(fwnode); 103 104 domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size), 105 GFP_KERNEL, of_node_to_nid(of_node)); 106 if (WARN_ON(!domain)) 107 return NULL; 108 109 of_node_get(of_node); 110 111 /* Fill structure */ 112 INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); 113 domain->ops = ops; 114 domain->host_data = host_data; 115 domain->fwnode = fwnode; 116 domain->hwirq_max = hwirq_max; 117 domain->revmap_size = size; 118 domain->revmap_direct_max_irq = direct_max; 119 irq_domain_check_hierarchy(domain); 120 121 mutex_lock(&irq_domain_mutex); 122 list_add(&domain->link, &irq_domain_list); 123 mutex_unlock(&irq_domain_mutex); 124 125 pr_debug("Added domain %s\n", domain->name); 126 return domain; 127 } 128 EXPORT_SYMBOL_GPL(__irq_domain_add); 129 130 /** 131 * irq_domain_remove() - Remove an irq domain. 132 * @domain: domain to remove 133 * 134 * This routine is used to remove an irq domain. The caller must ensure 135 * that all mappings within the domain have been disposed of prior to 136 * use, depending on the revmap type. 137 */ 138 void irq_domain_remove(struct irq_domain *domain) 139 { 140 mutex_lock(&irq_domain_mutex); 141 142 /* 143 * radix_tree_delete() takes care of destroying the root 144 * node when all entries are removed. Shout if there are 145 * any mappings left. 146 */ 147 WARN_ON(domain->revmap_tree.height); 148 149 list_del(&domain->link); 150 151 /* 152 * If the going away domain is the default one, reset it. 153 */ 154 if (unlikely(irq_default_domain == domain)) 155 irq_set_default_host(NULL); 156 157 mutex_unlock(&irq_domain_mutex); 158 159 pr_debug("Removed domain %s\n", domain->name); 160 161 of_node_put(irq_domain_get_of_node(domain)); 162 kfree(domain); 163 } 164 EXPORT_SYMBOL_GPL(irq_domain_remove); 165 166 /** 167 * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs 168 * @of_node: pointer to interrupt controller's device tree node. 169 * @size: total number of irqs in mapping 170 * @first_irq: first number of irq block assigned to the domain, 171 * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then 172 * pre-map all of the irqs in the domain to virqs starting at first_irq. 173 * @ops: domain callbacks 174 * @host_data: Controller private data pointer 175 * 176 * Allocates an irq_domain, and optionally if first_irq is positive then also 177 * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq. 178 * 179 * This is intended to implement the expected behaviour for most 180 * interrupt controllers. If device tree is used, then first_irq will be 0 and 181 * irqs get mapped dynamically on the fly. However, if the controller requires 182 * static virq assignments (non-DT boot) then it will set that up correctly. 183 */ 184 struct irq_domain *irq_domain_add_simple(struct device_node *of_node, 185 unsigned int size, 186 unsigned int first_irq, 187 const struct irq_domain_ops *ops, 188 void *host_data) 189 { 190 struct irq_domain *domain; 191 192 domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); 193 if (!domain) 194 return NULL; 195 196 if (first_irq > 0) { 197 if (IS_ENABLED(CONFIG_SPARSE_IRQ)) { 198 /* attempt to allocated irq_descs */ 199 int rc = irq_alloc_descs(first_irq, first_irq, size, 200 of_node_to_nid(of_node)); 201 if (rc < 0) 202 pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", 203 first_irq); 204 } 205 irq_domain_associate_many(domain, first_irq, 0, size); 206 } 207 208 return domain; 209 } 210 EXPORT_SYMBOL_GPL(irq_domain_add_simple); 211 212 /** 213 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain. 214 * @of_node: pointer to interrupt controller's device tree node. 215 * @size: total number of irqs in legacy mapping 216 * @first_irq: first number of irq block assigned to the domain 217 * @first_hwirq: first hwirq number to use for the translation. Should normally 218 * be '0', but a positive integer can be used if the effective 219 * hwirqs numbering does not begin at zero. 220 * @ops: map/unmap domain callbacks 221 * @host_data: Controller private data pointer 222 * 223 * Note: the map() callback will be called before this function returns 224 * for all legacy interrupts except 0 (which is always the invalid irq for 225 * a legacy controller). 226 */ 227 struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, 228 unsigned int size, 229 unsigned int first_irq, 230 irq_hw_number_t first_hwirq, 231 const struct irq_domain_ops *ops, 232 void *host_data) 233 { 234 struct irq_domain *domain; 235 236 domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size, 237 first_hwirq + size, 0, ops, host_data); 238 if (domain) 239 irq_domain_associate_many(domain, first_irq, first_hwirq, size); 240 241 return domain; 242 } 243 EXPORT_SYMBOL_GPL(irq_domain_add_legacy); 244 245 /** 246 * irq_find_matching_fwnode() - Locates a domain for a given fwnode 247 * @fwnode: FW descriptor of the interrupt controller 248 * @bus_token: domain-specific data 249 */ 250 struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, 251 enum irq_domain_bus_token bus_token) 252 { 253 struct irq_domain *h, *found = NULL; 254 int rc; 255 256 /* We might want to match the legacy controller last since 257 * it might potentially be set to match all interrupts in 258 * the absence of a device node. This isn't a problem so far 259 * yet though... 260 * 261 * bus_token == DOMAIN_BUS_ANY matches any domain, any other 262 * values must generate an exact match for the domain to be 263 * selected. 264 */ 265 mutex_lock(&irq_domain_mutex); 266 list_for_each_entry(h, &irq_domain_list, link) { 267 if (h->ops->match) 268 rc = h->ops->match(h, to_of_node(fwnode), bus_token); 269 else 270 rc = ((fwnode != NULL) && (h->fwnode == fwnode) && 271 ((bus_token == DOMAIN_BUS_ANY) || 272 (h->bus_token == bus_token))); 273 274 if (rc) { 275 found = h; 276 break; 277 } 278 } 279 mutex_unlock(&irq_domain_mutex); 280 return found; 281 } 282 EXPORT_SYMBOL_GPL(irq_find_matching_fwnode); 283 284 /** 285 * irq_set_default_host() - Set a "default" irq domain 286 * @domain: default domain pointer 287 * 288 * For convenience, it's possible to set a "default" domain that will be used 289 * whenever NULL is passed to irq_create_mapping(). It makes life easier for 290 * platforms that want to manipulate a few hard coded interrupt numbers that 291 * aren't properly represented in the device-tree. 292 */ 293 void irq_set_default_host(struct irq_domain *domain) 294 { 295 pr_debug("Default domain set to @0x%p\n", domain); 296 297 irq_default_domain = domain; 298 } 299 EXPORT_SYMBOL_GPL(irq_set_default_host); 300 301 void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) 302 { 303 struct irq_data *irq_data = irq_get_irq_data(irq); 304 irq_hw_number_t hwirq; 305 306 if (WARN(!irq_data || irq_data->domain != domain, 307 "virq%i doesn't exist; cannot disassociate\n", irq)) 308 return; 309 310 hwirq = irq_data->hwirq; 311 irq_set_status_flags(irq, IRQ_NOREQUEST); 312 313 /* remove chip and handler */ 314 irq_set_chip_and_handler(irq, NULL, NULL); 315 316 /* Make sure it's completed */ 317 synchronize_irq(irq); 318 319 /* Tell the PIC about it */ 320 if (domain->ops->unmap) 321 domain->ops->unmap(domain, irq); 322 smp_mb(); 323 324 irq_data->domain = NULL; 325 irq_data->hwirq = 0; 326 327 /* Clear reverse map for this hwirq */ 328 if (hwirq < domain->revmap_size) { 329 domain->linear_revmap[hwirq] = 0; 330 } else { 331 mutex_lock(&revmap_trees_mutex); 332 radix_tree_delete(&domain->revmap_tree, hwirq); 333 mutex_unlock(&revmap_trees_mutex); 334 } 335 } 336 337 int irq_domain_associate(struct irq_domain *domain, unsigned int virq, 338 irq_hw_number_t hwirq) 339 { 340 struct irq_data *irq_data = irq_get_irq_data(virq); 341 int ret; 342 343 if (WARN(hwirq >= domain->hwirq_max, 344 "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name)) 345 return -EINVAL; 346 if (WARN(!irq_data, "error: virq%i is not allocated", virq)) 347 return -EINVAL; 348 if (WARN(irq_data->domain, "error: virq%i is already associated", virq)) 349 return -EINVAL; 350 351 mutex_lock(&irq_domain_mutex); 352 irq_data->hwirq = hwirq; 353 irq_data->domain = domain; 354 if (domain->ops->map) { 355 ret = domain->ops->map(domain, virq, hwirq); 356 if (ret != 0) { 357 /* 358 * If map() returns -EPERM, this interrupt is protected 359 * by the firmware or some other service and shall not 360 * be mapped. Don't bother telling the user about it. 361 */ 362 if (ret != -EPERM) { 363 pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n", 364 domain->name, hwirq, virq, ret); 365 } 366 irq_data->domain = NULL; 367 irq_data->hwirq = 0; 368 mutex_unlock(&irq_domain_mutex); 369 return ret; 370 } 371 372 /* If not already assigned, give the domain the chip's name */ 373 if (!domain->name && irq_data->chip) 374 domain->name = irq_data->chip->name; 375 } 376 377 if (hwirq < domain->revmap_size) { 378 domain->linear_revmap[hwirq] = virq; 379 } else { 380 mutex_lock(&revmap_trees_mutex); 381 radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); 382 mutex_unlock(&revmap_trees_mutex); 383 } 384 mutex_unlock(&irq_domain_mutex); 385 386 irq_clear_status_flags(virq, IRQ_NOREQUEST); 387 388 return 0; 389 } 390 EXPORT_SYMBOL_GPL(irq_domain_associate); 391 392 void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, 393 irq_hw_number_t hwirq_base, int count) 394 { 395 struct device_node *of_node; 396 int i; 397 398 of_node = irq_domain_get_of_node(domain); 399 pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__, 400 of_node_full_name(of_node), irq_base, (int)hwirq_base, count); 401 402 for (i = 0; i < count; i++) { 403 irq_domain_associate(domain, irq_base + i, hwirq_base + i); 404 } 405 } 406 EXPORT_SYMBOL_GPL(irq_domain_associate_many); 407 408 /** 409 * irq_create_direct_mapping() - Allocate an irq for direct mapping 410 * @domain: domain to allocate the irq for or NULL for default domain 411 * 412 * This routine is used for irq controllers which can choose the hardware 413 * interrupt numbers they generate. In such a case it's simplest to use 414 * the linux irq as the hardware interrupt number. It still uses the linear 415 * or radix tree to store the mapping, but the irq controller can optimize 416 * the revmap path by using the hwirq directly. 417 */ 418 unsigned int irq_create_direct_mapping(struct irq_domain *domain) 419 { 420 struct device_node *of_node; 421 unsigned int virq; 422 423 if (domain == NULL) 424 domain = irq_default_domain; 425 426 of_node = irq_domain_get_of_node(domain); 427 virq = irq_alloc_desc_from(1, of_node_to_nid(of_node)); 428 if (!virq) { 429 pr_debug("create_direct virq allocation failed\n"); 430 return 0; 431 } 432 if (virq >= domain->revmap_direct_max_irq) { 433 pr_err("ERROR: no free irqs available below %i maximum\n", 434 domain->revmap_direct_max_irq); 435 irq_free_desc(virq); 436 return 0; 437 } 438 pr_debug("create_direct obtained virq %d\n", virq); 439 440 if (irq_domain_associate(domain, virq, virq)) { 441 irq_free_desc(virq); 442 return 0; 443 } 444 445 return virq; 446 } 447 EXPORT_SYMBOL_GPL(irq_create_direct_mapping); 448 449 /** 450 * irq_create_mapping() - Map a hardware interrupt into linux irq space 451 * @domain: domain owning this hardware interrupt or NULL for default domain 452 * @hwirq: hardware irq number in that domain space 453 * 454 * Only one mapping per hardware interrupt is permitted. Returns a linux 455 * irq number. 456 * If the sense/trigger is to be specified, set_irq_type() should be called 457 * on the number returned from that call. 458 */ 459 unsigned int irq_create_mapping(struct irq_domain *domain, 460 irq_hw_number_t hwirq) 461 { 462 struct device_node *of_node; 463 int virq; 464 465 pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); 466 467 /* Look for default domain if nececssary */ 468 if (domain == NULL) 469 domain = irq_default_domain; 470 if (domain == NULL) { 471 WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq); 472 return 0; 473 } 474 pr_debug("-> using domain @%p\n", domain); 475 476 of_node = irq_domain_get_of_node(domain); 477 478 /* Check if mapping already exists */ 479 virq = irq_find_mapping(domain, hwirq); 480 if (virq) { 481 pr_debug("-> existing mapping on virq %d\n", virq); 482 return virq; 483 } 484 485 /* Allocate a virtual interrupt number */ 486 virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node)); 487 if (virq <= 0) { 488 pr_debug("-> virq allocation failed\n"); 489 return 0; 490 } 491 492 if (irq_domain_associate(domain, virq, hwirq)) { 493 irq_free_desc(virq); 494 return 0; 495 } 496 497 pr_debug("irq %lu on domain %s mapped to virtual irq %u\n", 498 hwirq, of_node_full_name(of_node), virq); 499 500 return virq; 501 } 502 EXPORT_SYMBOL_GPL(irq_create_mapping); 503 504 /** 505 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs 506 * @domain: domain owning the interrupt range 507 * @irq_base: beginning of linux IRQ range 508 * @hwirq_base: beginning of hardware IRQ range 509 * @count: Number of interrupts to map 510 * 511 * This routine is used for allocating and mapping a range of hardware 512 * irqs to linux irqs where the linux irq numbers are at pre-defined 513 * locations. For use by controllers that already have static mappings 514 * to insert in to the domain. 515 * 516 * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time 517 * domain insertion. 518 * 519 * 0 is returned upon success, while any failure to establish a static 520 * mapping is treated as an error. 521 */ 522 int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base, 523 irq_hw_number_t hwirq_base, int count) 524 { 525 struct device_node *of_node; 526 int ret; 527 528 of_node = irq_domain_get_of_node(domain); 529 ret = irq_alloc_descs(irq_base, irq_base, count, 530 of_node_to_nid(of_node)); 531 if (unlikely(ret < 0)) 532 return ret; 533 534 irq_domain_associate_many(domain, irq_base, hwirq_base, count); 535 return 0; 536 } 537 EXPORT_SYMBOL_GPL(irq_create_strict_mappings); 538 539 static int irq_domain_translate(struct irq_domain *d, 540 struct irq_fwspec *fwspec, 541 irq_hw_number_t *hwirq, unsigned int *type) 542 { 543 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 544 if (d->ops->translate) 545 return d->ops->translate(d, fwspec, hwirq, type); 546 #endif 547 if (d->ops->xlate) 548 return d->ops->xlate(d, to_of_node(fwspec->fwnode), 549 fwspec->param, fwspec->param_count, 550 hwirq, type); 551 552 /* If domain has no translation, then we assume interrupt line */ 553 *hwirq = fwspec->param[0]; 554 return 0; 555 } 556 557 static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data, 558 struct irq_fwspec *fwspec) 559 { 560 int i; 561 562 fwspec->fwnode = irq_data->np ? &irq_data->np->fwnode : NULL; 563 fwspec->param_count = irq_data->args_count; 564 565 for (i = 0; i < irq_data->args_count; i++) 566 fwspec->param[i] = irq_data->args[i]; 567 } 568 569 unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) 570 { 571 struct irq_domain *domain; 572 irq_hw_number_t hwirq; 573 unsigned int type = IRQ_TYPE_NONE; 574 int virq; 575 576 if (fwspec->fwnode) 577 domain = irq_find_matching_fwnode(fwspec->fwnode, DOMAIN_BUS_ANY); 578 else 579 domain = irq_default_domain; 580 581 if (!domain) { 582 pr_warn("no irq domain found for %s !\n", 583 of_node_full_name(to_of_node(fwspec->fwnode))); 584 return 0; 585 } 586 587 if (irq_domain_translate(domain, fwspec, &hwirq, &type)) 588 return 0; 589 590 if (irq_domain_is_hierarchy(domain)) { 591 /* 592 * If we've already configured this interrupt, 593 * don't do it again, or hell will break loose. 594 */ 595 virq = irq_find_mapping(domain, hwirq); 596 if (virq) 597 return virq; 598 599 virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec); 600 if (virq <= 0) 601 return 0; 602 } else { 603 /* Create mapping */ 604 virq = irq_create_mapping(domain, hwirq); 605 if (!virq) 606 return virq; 607 } 608 609 /* Set type if specified and different than the current one */ 610 if (type != IRQ_TYPE_NONE && 611 type != irq_get_trigger_type(virq)) 612 irq_set_irq_type(virq, type); 613 return virq; 614 } 615 EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping); 616 617 unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) 618 { 619 struct irq_fwspec fwspec; 620 621 of_phandle_args_to_fwspec(irq_data, &fwspec); 622 return irq_create_fwspec_mapping(&fwspec); 623 } 624 EXPORT_SYMBOL_GPL(irq_create_of_mapping); 625 626 /** 627 * irq_dispose_mapping() - Unmap an interrupt 628 * @virq: linux irq number of the interrupt to unmap 629 */ 630 void irq_dispose_mapping(unsigned int virq) 631 { 632 struct irq_data *irq_data = irq_get_irq_data(virq); 633 struct irq_domain *domain; 634 635 if (!virq || !irq_data) 636 return; 637 638 domain = irq_data->domain; 639 if (WARN_ON(domain == NULL)) 640 return; 641 642 irq_domain_disassociate(domain, virq); 643 irq_free_desc(virq); 644 } 645 EXPORT_SYMBOL_GPL(irq_dispose_mapping); 646 647 /** 648 * irq_find_mapping() - Find a linux irq from an hw irq number. 649 * @domain: domain owning this hardware interrupt 650 * @hwirq: hardware irq number in that domain space 651 */ 652 unsigned int irq_find_mapping(struct irq_domain *domain, 653 irq_hw_number_t hwirq) 654 { 655 struct irq_data *data; 656 657 /* Look for default domain if nececssary */ 658 if (domain == NULL) 659 domain = irq_default_domain; 660 if (domain == NULL) 661 return 0; 662 663 if (hwirq < domain->revmap_direct_max_irq) { 664 data = irq_domain_get_irq_data(domain, hwirq); 665 if (data && data->hwirq == hwirq) 666 return hwirq; 667 } 668 669 /* Check if the hwirq is in the linear revmap. */ 670 if (hwirq < domain->revmap_size) 671 return domain->linear_revmap[hwirq]; 672 673 rcu_read_lock(); 674 data = radix_tree_lookup(&domain->revmap_tree, hwirq); 675 rcu_read_unlock(); 676 return data ? data->irq : 0; 677 } 678 EXPORT_SYMBOL_GPL(irq_find_mapping); 679 680 #ifdef CONFIG_IRQ_DOMAIN_DEBUG 681 static int virq_debug_show(struct seq_file *m, void *private) 682 { 683 unsigned long flags; 684 struct irq_desc *desc; 685 struct irq_domain *domain; 686 struct radix_tree_iter iter; 687 void *data, **slot; 688 int i; 689 690 seq_printf(m, " %-16s %-6s %-10s %-10s %s\n", 691 "name", "mapped", "linear-max", "direct-max", "devtree-node"); 692 mutex_lock(&irq_domain_mutex); 693 list_for_each_entry(domain, &irq_domain_list, link) { 694 struct device_node *of_node; 695 int count = 0; 696 of_node = irq_domain_get_of_node(domain); 697 radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0) 698 count++; 699 seq_printf(m, "%c%-16s %6u %10u %10u %s\n", 700 domain == irq_default_domain ? '*' : ' ', domain->name, 701 domain->revmap_size + count, domain->revmap_size, 702 domain->revmap_direct_max_irq, 703 of_node ? of_node_full_name(of_node) : ""); 704 } 705 mutex_unlock(&irq_domain_mutex); 706 707 seq_printf(m, "%-5s %-7s %-15s %-*s %6s %-14s %s\n", "irq", "hwirq", 708 "chip name", (int)(2 * sizeof(void *) + 2), "chip data", 709 "active", "type", "domain"); 710 711 for (i = 1; i < nr_irqs; i++) { 712 desc = irq_to_desc(i); 713 if (!desc) 714 continue; 715 716 raw_spin_lock_irqsave(&desc->lock, flags); 717 domain = desc->irq_data.domain; 718 719 if (domain) { 720 struct irq_chip *chip; 721 int hwirq = desc->irq_data.hwirq; 722 bool direct; 723 724 seq_printf(m, "%5d ", i); 725 seq_printf(m, "0x%05x ", hwirq); 726 727 chip = irq_desc_get_chip(desc); 728 seq_printf(m, "%-15s ", (chip && chip->name) ? chip->name : "none"); 729 730 data = irq_desc_get_chip_data(desc); 731 seq_printf(m, data ? "0x%p " : " %p ", data); 732 733 seq_printf(m, " %c ", (desc->action && desc->action->handler) ? '*' : ' '); 734 direct = (i == hwirq) && (i < domain->revmap_direct_max_irq); 735 seq_printf(m, "%6s%-8s ", 736 (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX", 737 direct ? "(DIRECT)" : ""); 738 seq_printf(m, "%s\n", desc->irq_data.domain->name); 739 } 740 741 raw_spin_unlock_irqrestore(&desc->lock, flags); 742 } 743 744 return 0; 745 } 746 747 static int virq_debug_open(struct inode *inode, struct file *file) 748 { 749 return single_open(file, virq_debug_show, inode->i_private); 750 } 751 752 static const struct file_operations virq_debug_fops = { 753 .open = virq_debug_open, 754 .read = seq_read, 755 .llseek = seq_lseek, 756 .release = single_release, 757 }; 758 759 static int __init irq_debugfs_init(void) 760 { 761 if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL, 762 NULL, &virq_debug_fops) == NULL) 763 return -ENOMEM; 764 765 return 0; 766 } 767 __initcall(irq_debugfs_init); 768 #endif /* CONFIG_IRQ_DOMAIN_DEBUG */ 769 770 /** 771 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings 772 * 773 * Device Tree IRQ specifier translation function which works with one cell 774 * bindings where the cell value maps directly to the hwirq number. 775 */ 776 int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, 777 const u32 *intspec, unsigned int intsize, 778 unsigned long *out_hwirq, unsigned int *out_type) 779 { 780 if (WARN_ON(intsize < 1)) 781 return -EINVAL; 782 *out_hwirq = intspec[0]; 783 *out_type = IRQ_TYPE_NONE; 784 return 0; 785 } 786 EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell); 787 788 /** 789 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings 790 * 791 * Device Tree IRQ specifier translation function which works with two cell 792 * bindings where the cell values map directly to the hwirq number 793 * and linux irq flags. 794 */ 795 int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, 796 const u32 *intspec, unsigned int intsize, 797 irq_hw_number_t *out_hwirq, unsigned int *out_type) 798 { 799 if (WARN_ON(intsize < 2)) 800 return -EINVAL; 801 *out_hwirq = intspec[0]; 802 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK; 803 return 0; 804 } 805 EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell); 806 807 /** 808 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings 809 * 810 * Device Tree IRQ specifier translation function which works with either one 811 * or two cell bindings where the cell values map directly to the hwirq number 812 * and linux irq flags. 813 * 814 * Note: don't use this function unless your interrupt controller explicitly 815 * supports both one and two cell bindings. For the majority of controllers 816 * the _onecell() or _twocell() variants above should be used. 817 */ 818 int irq_domain_xlate_onetwocell(struct irq_domain *d, 819 struct device_node *ctrlr, 820 const u32 *intspec, unsigned int intsize, 821 unsigned long *out_hwirq, unsigned int *out_type) 822 { 823 if (WARN_ON(intsize < 1)) 824 return -EINVAL; 825 *out_hwirq = intspec[0]; 826 *out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE; 827 return 0; 828 } 829 EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell); 830 831 const struct irq_domain_ops irq_domain_simple_ops = { 832 .xlate = irq_domain_xlate_onetwocell, 833 }; 834 EXPORT_SYMBOL_GPL(irq_domain_simple_ops); 835 836 static int irq_domain_alloc_descs(int virq, unsigned int cnt, 837 irq_hw_number_t hwirq, int node) 838 { 839 unsigned int hint; 840 841 if (virq >= 0) { 842 virq = irq_alloc_descs(virq, virq, cnt, node); 843 } else { 844 hint = hwirq % nr_irqs; 845 if (hint == 0) 846 hint++; 847 virq = irq_alloc_descs_from(hint, cnt, node); 848 if (virq <= 0 && hint > 1) 849 virq = irq_alloc_descs_from(1, cnt, node); 850 } 851 852 return virq; 853 } 854 855 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 856 /** 857 * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy 858 * @parent: Parent irq domain to associate with the new domain 859 * @flags: Irq domain flags associated to the domain 860 * @size: Size of the domain. See below 861 * @fwnode: Optional fwnode of the interrupt controller 862 * @ops: Pointer to the interrupt domain callbacks 863 * @host_data: Controller private data pointer 864 * 865 * If @size is 0 a tree domain is created, otherwise a linear domain. 866 * 867 * If successful the parent is associated to the new domain and the 868 * domain flags are set. 869 * Returns pointer to IRQ domain, or NULL on failure. 870 */ 871 struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, 872 unsigned int flags, 873 unsigned int size, 874 struct fwnode_handle *fwnode, 875 const struct irq_domain_ops *ops, 876 void *host_data) 877 { 878 struct irq_domain *domain; 879 880 if (size) 881 domain = irq_domain_create_linear(fwnode, size, ops, host_data); 882 else 883 domain = irq_domain_create_tree(fwnode, ops, host_data); 884 if (domain) { 885 domain->parent = parent; 886 domain->flags |= flags; 887 } 888 889 return domain; 890 } 891 892 static void irq_domain_insert_irq(int virq) 893 { 894 struct irq_data *data; 895 896 for (data = irq_get_irq_data(virq); data; data = data->parent_data) { 897 struct irq_domain *domain = data->domain; 898 irq_hw_number_t hwirq = data->hwirq; 899 900 if (hwirq < domain->revmap_size) { 901 domain->linear_revmap[hwirq] = virq; 902 } else { 903 mutex_lock(&revmap_trees_mutex); 904 radix_tree_insert(&domain->revmap_tree, hwirq, data); 905 mutex_unlock(&revmap_trees_mutex); 906 } 907 908 /* If not already assigned, give the domain the chip's name */ 909 if (!domain->name && data->chip) 910 domain->name = data->chip->name; 911 } 912 913 irq_clear_status_flags(virq, IRQ_NOREQUEST); 914 } 915 916 static void irq_domain_remove_irq(int virq) 917 { 918 struct irq_data *data; 919 920 irq_set_status_flags(virq, IRQ_NOREQUEST); 921 irq_set_chip_and_handler(virq, NULL, NULL); 922 synchronize_irq(virq); 923 smp_mb(); 924 925 for (data = irq_get_irq_data(virq); data; data = data->parent_data) { 926 struct irq_domain *domain = data->domain; 927 irq_hw_number_t hwirq = data->hwirq; 928 929 if (hwirq < domain->revmap_size) { 930 domain->linear_revmap[hwirq] = 0; 931 } else { 932 mutex_lock(&revmap_trees_mutex); 933 radix_tree_delete(&domain->revmap_tree, hwirq); 934 mutex_unlock(&revmap_trees_mutex); 935 } 936 } 937 } 938 939 static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain, 940 struct irq_data *child) 941 { 942 struct irq_data *irq_data; 943 944 irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL, 945 irq_data_get_node(child)); 946 if (irq_data) { 947 child->parent_data = irq_data; 948 irq_data->irq = child->irq; 949 irq_data->common = child->common; 950 irq_data->domain = domain; 951 } 952 953 return irq_data; 954 } 955 956 static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs) 957 { 958 struct irq_data *irq_data, *tmp; 959 int i; 960 961 for (i = 0; i < nr_irqs; i++) { 962 irq_data = irq_get_irq_data(virq + i); 963 tmp = irq_data->parent_data; 964 irq_data->parent_data = NULL; 965 irq_data->domain = NULL; 966 967 while (tmp) { 968 irq_data = tmp; 969 tmp = tmp->parent_data; 970 kfree(irq_data); 971 } 972 } 973 } 974 975 static int irq_domain_alloc_irq_data(struct irq_domain *domain, 976 unsigned int virq, unsigned int nr_irqs) 977 { 978 struct irq_data *irq_data; 979 struct irq_domain *parent; 980 int i; 981 982 /* The outermost irq_data is embedded in struct irq_desc */ 983 for (i = 0; i < nr_irqs; i++) { 984 irq_data = irq_get_irq_data(virq + i); 985 irq_data->domain = domain; 986 987 for (parent = domain->parent; parent; parent = parent->parent) { 988 irq_data = irq_domain_insert_irq_data(parent, irq_data); 989 if (!irq_data) { 990 irq_domain_free_irq_data(virq, i + 1); 991 return -ENOMEM; 992 } 993 } 994 } 995 996 return 0; 997 } 998 999 /** 1000 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain 1001 * @domain: domain to match 1002 * @virq: IRQ number to get irq_data 1003 */ 1004 struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, 1005 unsigned int virq) 1006 { 1007 struct irq_data *irq_data; 1008 1009 for (irq_data = irq_get_irq_data(virq); irq_data; 1010 irq_data = irq_data->parent_data) 1011 if (irq_data->domain == domain) 1012 return irq_data; 1013 1014 return NULL; 1015 } 1016 1017 /** 1018 * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain 1019 * @domain: Interrupt domain to match 1020 * @virq: IRQ number 1021 * @hwirq: The hwirq number 1022 * @chip: The associated interrupt chip 1023 * @chip_data: The associated chip data 1024 */ 1025 int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, 1026 irq_hw_number_t hwirq, struct irq_chip *chip, 1027 void *chip_data) 1028 { 1029 struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); 1030 1031 if (!irq_data) 1032 return -ENOENT; 1033 1034 irq_data->hwirq = hwirq; 1035 irq_data->chip = chip ? chip : &no_irq_chip; 1036 irq_data->chip_data = chip_data; 1037 1038 return 0; 1039 } 1040 1041 /** 1042 * irq_domain_set_info - Set the complete data for a @virq in @domain 1043 * @domain: Interrupt domain to match 1044 * @virq: IRQ number 1045 * @hwirq: The hardware interrupt number 1046 * @chip: The associated interrupt chip 1047 * @chip_data: The associated interrupt chip data 1048 * @handler: The interrupt flow handler 1049 * @handler_data: The interrupt flow handler data 1050 * @handler_name: The interrupt handler name 1051 */ 1052 void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, 1053 irq_hw_number_t hwirq, struct irq_chip *chip, 1054 void *chip_data, irq_flow_handler_t handler, 1055 void *handler_data, const char *handler_name) 1056 { 1057 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data); 1058 __irq_set_handler(virq, handler, 0, handler_name); 1059 irq_set_handler_data(virq, handler_data); 1060 } 1061 1062 /** 1063 * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data 1064 * @irq_data: The pointer to irq_data 1065 */ 1066 void irq_domain_reset_irq_data(struct irq_data *irq_data) 1067 { 1068 irq_data->hwirq = 0; 1069 irq_data->chip = &no_irq_chip; 1070 irq_data->chip_data = NULL; 1071 } 1072 1073 /** 1074 * irq_domain_free_irqs_common - Clear irq_data and free the parent 1075 * @domain: Interrupt domain to match 1076 * @virq: IRQ number to start with 1077 * @nr_irqs: The number of irqs to free 1078 */ 1079 void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq, 1080 unsigned int nr_irqs) 1081 { 1082 struct irq_data *irq_data; 1083 int i; 1084 1085 for (i = 0; i < nr_irqs; i++) { 1086 irq_data = irq_domain_get_irq_data(domain, virq + i); 1087 if (irq_data) 1088 irq_domain_reset_irq_data(irq_data); 1089 } 1090 irq_domain_free_irqs_parent(domain, virq, nr_irqs); 1091 } 1092 1093 /** 1094 * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent 1095 * @domain: Interrupt domain to match 1096 * @virq: IRQ number to start with 1097 * @nr_irqs: The number of irqs to free 1098 */ 1099 void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, 1100 unsigned int nr_irqs) 1101 { 1102 int i; 1103 1104 for (i = 0; i < nr_irqs; i++) { 1105 irq_set_handler_data(virq + i, NULL); 1106 irq_set_handler(virq + i, NULL); 1107 } 1108 irq_domain_free_irqs_common(domain, virq, nr_irqs); 1109 } 1110 1111 static bool irq_domain_is_auto_recursive(struct irq_domain *domain) 1112 { 1113 return domain->flags & IRQ_DOMAIN_FLAG_AUTO_RECURSIVE; 1114 } 1115 1116 static void irq_domain_free_irqs_recursive(struct irq_domain *domain, 1117 unsigned int irq_base, 1118 unsigned int nr_irqs) 1119 { 1120 domain->ops->free(domain, irq_base, nr_irqs); 1121 if (irq_domain_is_auto_recursive(domain)) { 1122 BUG_ON(!domain->parent); 1123 irq_domain_free_irqs_recursive(domain->parent, irq_base, 1124 nr_irqs); 1125 } 1126 } 1127 1128 static int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, 1129 unsigned int irq_base, 1130 unsigned int nr_irqs, void *arg) 1131 { 1132 int ret = 0; 1133 struct irq_domain *parent = domain->parent; 1134 bool recursive = irq_domain_is_auto_recursive(domain); 1135 1136 BUG_ON(recursive && !parent); 1137 if (recursive) 1138 ret = irq_domain_alloc_irqs_recursive(parent, irq_base, 1139 nr_irqs, arg); 1140 if (ret >= 0) 1141 ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg); 1142 if (ret < 0 && recursive) 1143 irq_domain_free_irqs_recursive(parent, irq_base, nr_irqs); 1144 1145 return ret; 1146 } 1147 1148 /** 1149 * __irq_domain_alloc_irqs - Allocate IRQs from domain 1150 * @domain: domain to allocate from 1151 * @irq_base: allocate specified IRQ nubmer if irq_base >= 0 1152 * @nr_irqs: number of IRQs to allocate 1153 * @node: NUMA node id for memory allocation 1154 * @arg: domain specific argument 1155 * @realloc: IRQ descriptors have already been allocated if true 1156 * 1157 * Allocate IRQ numbers and initialized all data structures to support 1158 * hierarchy IRQ domains. 1159 * Parameter @realloc is mainly to support legacy IRQs. 1160 * Returns error code or allocated IRQ number 1161 * 1162 * The whole process to setup an IRQ has been split into two steps. 1163 * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ 1164 * descriptor and required hardware resources. The second step, 1165 * irq_domain_activate_irq(), is to program hardwares with preallocated 1166 * resources. In this way, it's easier to rollback when failing to 1167 * allocate resources. 1168 */ 1169 int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, 1170 unsigned int nr_irqs, int node, void *arg, 1171 bool realloc) 1172 { 1173 int i, ret, virq; 1174 1175 if (domain == NULL) { 1176 domain = irq_default_domain; 1177 if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n")) 1178 return -EINVAL; 1179 } 1180 1181 if (!domain->ops->alloc) { 1182 pr_debug("domain->ops->alloc() is NULL\n"); 1183 return -ENOSYS; 1184 } 1185 1186 if (realloc && irq_base >= 0) { 1187 virq = irq_base; 1188 } else { 1189 virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node); 1190 if (virq < 0) { 1191 pr_debug("cannot allocate IRQ(base %d, count %d)\n", 1192 irq_base, nr_irqs); 1193 return virq; 1194 } 1195 } 1196 1197 if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) { 1198 pr_debug("cannot allocate memory for IRQ%d\n", virq); 1199 ret = -ENOMEM; 1200 goto out_free_desc; 1201 } 1202 1203 mutex_lock(&irq_domain_mutex); 1204 ret = irq_domain_alloc_irqs_recursive(domain, virq, nr_irqs, arg); 1205 if (ret < 0) { 1206 mutex_unlock(&irq_domain_mutex); 1207 goto out_free_irq_data; 1208 } 1209 for (i = 0; i < nr_irqs; i++) 1210 irq_domain_insert_irq(virq + i); 1211 mutex_unlock(&irq_domain_mutex); 1212 1213 return virq; 1214 1215 out_free_irq_data: 1216 irq_domain_free_irq_data(virq, nr_irqs); 1217 out_free_desc: 1218 irq_free_descs(virq, nr_irqs); 1219 return ret; 1220 } 1221 1222 /** 1223 * irq_domain_free_irqs - Free IRQ number and associated data structures 1224 * @virq: base IRQ number 1225 * @nr_irqs: number of IRQs to free 1226 */ 1227 void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) 1228 { 1229 struct irq_data *data = irq_get_irq_data(virq); 1230 int i; 1231 1232 if (WARN(!data || !data->domain || !data->domain->ops->free, 1233 "NULL pointer, cannot free irq\n")) 1234 return; 1235 1236 mutex_lock(&irq_domain_mutex); 1237 for (i = 0; i < nr_irqs; i++) 1238 irq_domain_remove_irq(virq + i); 1239 irq_domain_free_irqs_recursive(data->domain, virq, nr_irqs); 1240 mutex_unlock(&irq_domain_mutex); 1241 1242 irq_domain_free_irq_data(virq, nr_irqs); 1243 irq_free_descs(virq, nr_irqs); 1244 } 1245 1246 /** 1247 * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain 1248 * @irq_base: Base IRQ number 1249 * @nr_irqs: Number of IRQs to allocate 1250 * @arg: Allocation data (arch/domain specific) 1251 * 1252 * Check whether the domain has been setup recursive. If not allocate 1253 * through the parent domain. 1254 */ 1255 int irq_domain_alloc_irqs_parent(struct irq_domain *domain, 1256 unsigned int irq_base, unsigned int nr_irqs, 1257 void *arg) 1258 { 1259 /* irq_domain_alloc_irqs_recursive() has called parent's alloc() */ 1260 if (irq_domain_is_auto_recursive(domain)) 1261 return 0; 1262 1263 domain = domain->parent; 1264 if (domain) 1265 return irq_domain_alloc_irqs_recursive(domain, irq_base, 1266 nr_irqs, arg); 1267 return -ENOSYS; 1268 } 1269 1270 /** 1271 * irq_domain_free_irqs_parent - Free interrupts from parent domain 1272 * @irq_base: Base IRQ number 1273 * @nr_irqs: Number of IRQs to free 1274 * 1275 * Check whether the domain has been setup recursive. If not free 1276 * through the parent domain. 1277 */ 1278 void irq_domain_free_irqs_parent(struct irq_domain *domain, 1279 unsigned int irq_base, unsigned int nr_irqs) 1280 { 1281 /* irq_domain_free_irqs_recursive() will call parent's free */ 1282 if (!irq_domain_is_auto_recursive(domain) && domain->parent) 1283 irq_domain_free_irqs_recursive(domain->parent, irq_base, 1284 nr_irqs); 1285 } 1286 1287 /** 1288 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate 1289 * interrupt 1290 * @irq_data: outermost irq_data associated with interrupt 1291 * 1292 * This is the second step to call domain_ops->activate to program interrupt 1293 * controllers, so the interrupt could actually get delivered. 1294 */ 1295 void irq_domain_activate_irq(struct irq_data *irq_data) 1296 { 1297 if (irq_data && irq_data->domain) { 1298 struct irq_domain *domain = irq_data->domain; 1299 1300 if (irq_data->parent_data) 1301 irq_domain_activate_irq(irq_data->parent_data); 1302 if (domain->ops->activate) 1303 domain->ops->activate(domain, irq_data); 1304 } 1305 } 1306 1307 /** 1308 * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to 1309 * deactivate interrupt 1310 * @irq_data: outermost irq_data associated with interrupt 1311 * 1312 * It calls domain_ops->deactivate to program interrupt controllers to disable 1313 * interrupt delivery. 1314 */ 1315 void irq_domain_deactivate_irq(struct irq_data *irq_data) 1316 { 1317 if (irq_data && irq_data->domain) { 1318 struct irq_domain *domain = irq_data->domain; 1319 1320 if (domain->ops->deactivate) 1321 domain->ops->deactivate(domain, irq_data); 1322 if (irq_data->parent_data) 1323 irq_domain_deactivate_irq(irq_data->parent_data); 1324 } 1325 } 1326 1327 static void irq_domain_check_hierarchy(struct irq_domain *domain) 1328 { 1329 /* Hierarchy irq_domains must implement callback alloc() */ 1330 if (domain->ops->alloc) 1331 domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; 1332 } 1333 #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 1334 /** 1335 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain 1336 * @domain: domain to match 1337 * @virq: IRQ number to get irq_data 1338 */ 1339 struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, 1340 unsigned int virq) 1341 { 1342 struct irq_data *irq_data = irq_get_irq_data(virq); 1343 1344 return (irq_data && irq_data->domain == domain) ? irq_data : NULL; 1345 } 1346 1347 /** 1348 * irq_domain_set_info - Set the complete data for a @virq in @domain 1349 * @domain: Interrupt domain to match 1350 * @virq: IRQ number 1351 * @hwirq: The hardware interrupt number 1352 * @chip: The associated interrupt chip 1353 * @chip_data: The associated interrupt chip data 1354 * @handler: The interrupt flow handler 1355 * @handler_data: The interrupt flow handler data 1356 * @handler_name: The interrupt handler name 1357 */ 1358 void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, 1359 irq_hw_number_t hwirq, struct irq_chip *chip, 1360 void *chip_data, irq_flow_handler_t handler, 1361 void *handler_data, const char *handler_name) 1362 { 1363 irq_set_chip_and_handler_name(virq, chip, handler, handler_name); 1364 irq_set_chip_data(virq, chip_data); 1365 irq_set_handler_data(virq, handler_data); 1366 } 1367 1368 static void irq_domain_check_hierarchy(struct irq_domain *domain) 1369 { 1370 } 1371 #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 1372