1 #define pr_fmt(fmt) "irq: " fmt 2 3 #include <linux/debugfs.h> 4 #include <linux/hardirq.h> 5 #include <linux/interrupt.h> 6 #include <linux/irq.h> 7 #include <linux/irqdesc.h> 8 #include <linux/irqdomain.h> 9 #include <linux/module.h> 10 #include <linux/mutex.h> 11 #include <linux/of.h> 12 #include <linux/of_address.h> 13 #include <linux/of_irq.h> 14 #include <linux/topology.h> 15 #include <linux/seq_file.h> 16 #include <linux/slab.h> 17 #include <linux/smp.h> 18 #include <linux/fs.h> 19 20 static LIST_HEAD(irq_domain_list); 21 static DEFINE_MUTEX(irq_domain_mutex); 22 23 static DEFINE_MUTEX(revmap_trees_mutex); 24 static struct irq_domain *irq_default_domain; 25 26 /** 27 * __irq_domain_add() - Allocate a new irq_domain data structure 28 * @of_node: optional device-tree node of the interrupt controller 29 * @size: Size of linear map; 0 for radix mapping only 30 * @hwirq_max: Maximum number of interrupts supported by controller 31 * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no 32 * direct mapping 33 * @ops: map/unmap domain callbacks 34 * @host_data: Controller private data pointer 35 * 36 * Allocates and initialize and irq_domain structure. 37 * Returns pointer to IRQ domain, or NULL on failure. 38 */ 39 struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, 40 irq_hw_number_t hwirq_max, int direct_max, 41 const struct irq_domain_ops *ops, 42 void *host_data) 43 { 44 struct irq_domain *domain; 45 46 domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size), 47 GFP_KERNEL, of_node_to_nid(of_node)); 48 if (WARN_ON(!domain)) 49 return NULL; 50 51 /* Fill structure */ 52 INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); 53 domain->ops = ops; 54 domain->host_data = host_data; 55 domain->of_node = of_node_get(of_node); 56 domain->hwirq_max = hwirq_max; 57 domain->revmap_size = size; 58 domain->revmap_direct_max_irq = direct_max; 59 60 mutex_lock(&irq_domain_mutex); 61 list_add(&domain->link, &irq_domain_list); 62 mutex_unlock(&irq_domain_mutex); 63 64 pr_debug("Added domain %s\n", domain->name); 65 return domain; 66 } 67 EXPORT_SYMBOL_GPL(__irq_domain_add); 68 69 /** 70 * irq_domain_remove() - Remove an irq domain. 71 * @domain: domain to remove 72 * 73 * This routine is used to remove an irq domain. The caller must ensure 74 * that all mappings within the domain have been disposed of prior to 75 * use, depending on the revmap type. 76 */ 77 void irq_domain_remove(struct irq_domain *domain) 78 { 79 mutex_lock(&irq_domain_mutex); 80 81 /* 82 * radix_tree_delete() takes care of destroying the root 83 * node when all entries are removed. Shout if there are 84 * any mappings left. 85 */ 86 WARN_ON(domain->revmap_tree.height); 87 88 list_del(&domain->link); 89 90 /* 91 * If the going away domain is the default one, reset it. 92 */ 93 if (unlikely(irq_default_domain == domain)) 94 irq_set_default_host(NULL); 95 96 mutex_unlock(&irq_domain_mutex); 97 98 pr_debug("Removed domain %s\n", domain->name); 99 100 of_node_put(domain->of_node); 101 kfree(domain); 102 } 103 EXPORT_SYMBOL_GPL(irq_domain_remove); 104 105 /** 106 * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs 107 * @of_node: pointer to interrupt controller's device tree node. 108 * @size: total number of irqs in mapping 109 * @first_irq: first number of irq block assigned to the domain, 110 * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then 111 * pre-map all of the irqs in the domain to virqs starting at first_irq. 112 * @ops: map/unmap domain callbacks 113 * @host_data: Controller private data pointer 114 * 115 * Allocates an irq_domain, and optionally if first_irq is positive then also 116 * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq. 117 * 118 * This is intended to implement the expected behaviour for most 119 * interrupt controllers. If device tree is used, then first_irq will be 0 and 120 * irqs get mapped dynamically on the fly. However, if the controller requires 121 * static virq assignments (non-DT boot) then it will set that up correctly. 122 */ 123 struct irq_domain *irq_domain_add_simple(struct device_node *of_node, 124 unsigned int size, 125 unsigned int first_irq, 126 const struct irq_domain_ops *ops, 127 void *host_data) 128 { 129 struct irq_domain *domain; 130 131 domain = __irq_domain_add(of_node, size, size, 0, ops, host_data); 132 if (!domain) 133 return NULL; 134 135 if (first_irq > 0) { 136 if (IS_ENABLED(CONFIG_SPARSE_IRQ)) { 137 /* attempt to allocated irq_descs */ 138 int rc = irq_alloc_descs(first_irq, first_irq, size, 139 of_node_to_nid(of_node)); 140 if (rc < 0) 141 pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", 142 first_irq); 143 } 144 irq_domain_associate_many(domain, first_irq, 0, size); 145 } 146 147 return domain; 148 } 149 EXPORT_SYMBOL_GPL(irq_domain_add_simple); 150 151 /** 152 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain. 153 * @of_node: pointer to interrupt controller's device tree node. 154 * @size: total number of irqs in legacy mapping 155 * @first_irq: first number of irq block assigned to the domain 156 * @first_hwirq: first hwirq number to use for the translation. Should normally 157 * be '0', but a positive integer can be used if the effective 158 * hwirqs numbering does not begin at zero. 159 * @ops: map/unmap domain callbacks 160 * @host_data: Controller private data pointer 161 * 162 * Note: the map() callback will be called before this function returns 163 * for all legacy interrupts except 0 (which is always the invalid irq for 164 * a legacy controller). 165 */ 166 struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, 167 unsigned int size, 168 unsigned int first_irq, 169 irq_hw_number_t first_hwirq, 170 const struct irq_domain_ops *ops, 171 void *host_data) 172 { 173 struct irq_domain *domain; 174 175 domain = __irq_domain_add(of_node, first_hwirq + size, 176 first_hwirq + size, 0, ops, host_data); 177 if (!domain) 178 return NULL; 179 180 irq_domain_associate_many(domain, first_irq, first_hwirq, size); 181 182 return domain; 183 } 184 EXPORT_SYMBOL_GPL(irq_domain_add_legacy); 185 186 /** 187 * irq_find_host() - Locates a domain for a given device node 188 * @node: device-tree node of the interrupt controller 189 */ 190 struct irq_domain *irq_find_host(struct device_node *node) 191 { 192 struct irq_domain *h, *found = NULL; 193 int rc; 194 195 /* We might want to match the legacy controller last since 196 * it might potentially be set to match all interrupts in 197 * the absence of a device node. This isn't a problem so far 198 * yet though... 199 */ 200 mutex_lock(&irq_domain_mutex); 201 list_for_each_entry(h, &irq_domain_list, link) { 202 if (h->ops->match) 203 rc = h->ops->match(h, node); 204 else 205 rc = (h->of_node != NULL) && (h->of_node == node); 206 207 if (rc) { 208 found = h; 209 break; 210 } 211 } 212 mutex_unlock(&irq_domain_mutex); 213 return found; 214 } 215 EXPORT_SYMBOL_GPL(irq_find_host); 216 217 /** 218 * irq_set_default_host() - Set a "default" irq domain 219 * @domain: default domain pointer 220 * 221 * For convenience, it's possible to set a "default" domain that will be used 222 * whenever NULL is passed to irq_create_mapping(). It makes life easier for 223 * platforms that want to manipulate a few hard coded interrupt numbers that 224 * aren't properly represented in the device-tree. 225 */ 226 void irq_set_default_host(struct irq_domain *domain) 227 { 228 pr_debug("Default domain set to @0x%p\n", domain); 229 230 irq_default_domain = domain; 231 } 232 EXPORT_SYMBOL_GPL(irq_set_default_host); 233 234 static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) 235 { 236 struct irq_data *irq_data = irq_get_irq_data(irq); 237 irq_hw_number_t hwirq; 238 239 if (WARN(!irq_data || irq_data->domain != domain, 240 "virq%i doesn't exist; cannot disassociate\n", irq)) 241 return; 242 243 hwirq = irq_data->hwirq; 244 irq_set_status_flags(irq, IRQ_NOREQUEST); 245 246 /* remove chip and handler */ 247 irq_set_chip_and_handler(irq, NULL, NULL); 248 249 /* Make sure it's completed */ 250 synchronize_irq(irq); 251 252 /* Tell the PIC about it */ 253 if (domain->ops->unmap) 254 domain->ops->unmap(domain, irq); 255 smp_mb(); 256 257 irq_data->domain = NULL; 258 irq_data->hwirq = 0; 259 260 /* Clear reverse map for this hwirq */ 261 if (hwirq < domain->revmap_size) { 262 domain->linear_revmap[hwirq] = 0; 263 } else { 264 mutex_lock(&revmap_trees_mutex); 265 radix_tree_delete(&domain->revmap_tree, hwirq); 266 mutex_unlock(&revmap_trees_mutex); 267 } 268 } 269 270 int irq_domain_associate(struct irq_domain *domain, unsigned int virq, 271 irq_hw_number_t hwirq) 272 { 273 struct irq_data *irq_data = irq_get_irq_data(virq); 274 int ret; 275 276 if (WARN(hwirq >= domain->hwirq_max, 277 "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name)) 278 return -EINVAL; 279 if (WARN(!irq_data, "error: virq%i is not allocated", virq)) 280 return -EINVAL; 281 if (WARN(irq_data->domain, "error: virq%i is already associated", virq)) 282 return -EINVAL; 283 284 mutex_lock(&irq_domain_mutex); 285 irq_data->hwirq = hwirq; 286 irq_data->domain = domain; 287 if (domain->ops->map) { 288 ret = domain->ops->map(domain, virq, hwirq); 289 if (ret != 0) { 290 /* 291 * If map() returns -EPERM, this interrupt is protected 292 * by the firmware or some other service and shall not 293 * be mapped. Don't bother telling the user about it. 294 */ 295 if (ret != -EPERM) { 296 pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n", 297 domain->name, hwirq, virq, ret); 298 } 299 irq_data->domain = NULL; 300 irq_data->hwirq = 0; 301 mutex_unlock(&irq_domain_mutex); 302 return ret; 303 } 304 305 /* If not already assigned, give the domain the chip's name */ 306 if (!domain->name && irq_data->chip) 307 domain->name = irq_data->chip->name; 308 } 309 310 if (hwirq < domain->revmap_size) { 311 domain->linear_revmap[hwirq] = virq; 312 } else { 313 mutex_lock(&revmap_trees_mutex); 314 radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); 315 mutex_unlock(&revmap_trees_mutex); 316 } 317 mutex_unlock(&irq_domain_mutex); 318 319 irq_clear_status_flags(virq, IRQ_NOREQUEST); 320 321 return 0; 322 } 323 EXPORT_SYMBOL_GPL(irq_domain_associate); 324 325 void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, 326 irq_hw_number_t hwirq_base, int count) 327 { 328 int i; 329 330 pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__, 331 of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count); 332 333 for (i = 0; i < count; i++) { 334 irq_domain_associate(domain, irq_base + i, hwirq_base + i); 335 } 336 } 337 EXPORT_SYMBOL_GPL(irq_domain_associate_many); 338 339 /** 340 * irq_create_direct_mapping() - Allocate an irq for direct mapping 341 * @domain: domain to allocate the irq for or NULL for default domain 342 * 343 * This routine is used for irq controllers which can choose the hardware 344 * interrupt numbers they generate. In such a case it's simplest to use 345 * the linux irq as the hardware interrupt number. It still uses the linear 346 * or radix tree to store the mapping, but the irq controller can optimize 347 * the revmap path by using the hwirq directly. 348 */ 349 unsigned int irq_create_direct_mapping(struct irq_domain *domain) 350 { 351 unsigned int virq; 352 353 if (domain == NULL) 354 domain = irq_default_domain; 355 356 virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node)); 357 if (!virq) { 358 pr_debug("create_direct virq allocation failed\n"); 359 return 0; 360 } 361 if (virq >= domain->revmap_direct_max_irq) { 362 pr_err("ERROR: no free irqs available below %i maximum\n", 363 domain->revmap_direct_max_irq); 364 irq_free_desc(virq); 365 return 0; 366 } 367 pr_debug("create_direct obtained virq %d\n", virq); 368 369 if (irq_domain_associate(domain, virq, virq)) { 370 irq_free_desc(virq); 371 return 0; 372 } 373 374 return virq; 375 } 376 EXPORT_SYMBOL_GPL(irq_create_direct_mapping); 377 378 /** 379 * irq_create_mapping() - Map a hardware interrupt into linux irq space 380 * @domain: domain owning this hardware interrupt or NULL for default domain 381 * @hwirq: hardware irq number in that domain space 382 * 383 * Only one mapping per hardware interrupt is permitted. Returns a linux 384 * irq number. 385 * If the sense/trigger is to be specified, set_irq_type() should be called 386 * on the number returned from that call. 387 */ 388 unsigned int irq_create_mapping(struct irq_domain *domain, 389 irq_hw_number_t hwirq) 390 { 391 unsigned int hint; 392 int virq; 393 394 pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); 395 396 /* Look for default domain if nececssary */ 397 if (domain == NULL) 398 domain = irq_default_domain; 399 if (domain == NULL) { 400 WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq); 401 return 0; 402 } 403 pr_debug("-> using domain @%p\n", domain); 404 405 /* Check if mapping already exists */ 406 virq = irq_find_mapping(domain, hwirq); 407 if (virq) { 408 pr_debug("-> existing mapping on virq %d\n", virq); 409 return virq; 410 } 411 412 /* Allocate a virtual interrupt number */ 413 hint = hwirq % nr_irqs; 414 if (hint == 0) 415 hint++; 416 virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node)); 417 if (virq <= 0) 418 virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node)); 419 if (virq <= 0) { 420 pr_debug("-> virq allocation failed\n"); 421 return 0; 422 } 423 424 if (irq_domain_associate(domain, virq, hwirq)) { 425 irq_free_desc(virq); 426 return 0; 427 } 428 429 pr_debug("irq %lu on domain %s mapped to virtual irq %u\n", 430 hwirq, of_node_full_name(domain->of_node), virq); 431 432 return virq; 433 } 434 EXPORT_SYMBOL_GPL(irq_create_mapping); 435 436 /** 437 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs 438 * @domain: domain owning the interrupt range 439 * @irq_base: beginning of linux IRQ range 440 * @hwirq_base: beginning of hardware IRQ range 441 * @count: Number of interrupts to map 442 * 443 * This routine is used for allocating and mapping a range of hardware 444 * irqs to linux irqs where the linux irq numbers are at pre-defined 445 * locations. For use by controllers that already have static mappings 446 * to insert in to the domain. 447 * 448 * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time 449 * domain insertion. 450 * 451 * 0 is returned upon success, while any failure to establish a static 452 * mapping is treated as an error. 453 */ 454 int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base, 455 irq_hw_number_t hwirq_base, int count) 456 { 457 int ret; 458 459 ret = irq_alloc_descs(irq_base, irq_base, count, 460 of_node_to_nid(domain->of_node)); 461 if (unlikely(ret < 0)) 462 return ret; 463 464 irq_domain_associate_many(domain, irq_base, hwirq_base, count); 465 return 0; 466 } 467 EXPORT_SYMBOL_GPL(irq_create_strict_mappings); 468 469 unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) 470 { 471 struct irq_domain *domain; 472 irq_hw_number_t hwirq; 473 unsigned int type = IRQ_TYPE_NONE; 474 unsigned int virq; 475 476 domain = irq_data->np ? irq_find_host(irq_data->np) : irq_default_domain; 477 if (!domain) { 478 pr_warn("no irq domain found for %s !\n", 479 of_node_full_name(irq_data->np)); 480 return 0; 481 } 482 483 /* If domain has no translation, then we assume interrupt line */ 484 if (domain->ops->xlate == NULL) 485 hwirq = irq_data->args[0]; 486 else { 487 if (domain->ops->xlate(domain, irq_data->np, irq_data->args, 488 irq_data->args_count, &hwirq, &type)) 489 return 0; 490 } 491 492 /* Create mapping */ 493 virq = irq_create_mapping(domain, hwirq); 494 if (!virq) 495 return virq; 496 497 /* Set type if specified and different than the current one */ 498 if (type != IRQ_TYPE_NONE && 499 type != irq_get_trigger_type(virq)) 500 irq_set_irq_type(virq, type); 501 return virq; 502 } 503 EXPORT_SYMBOL_GPL(irq_create_of_mapping); 504 505 /** 506 * irq_dispose_mapping() - Unmap an interrupt 507 * @virq: linux irq number of the interrupt to unmap 508 */ 509 void irq_dispose_mapping(unsigned int virq) 510 { 511 struct irq_data *irq_data = irq_get_irq_data(virq); 512 struct irq_domain *domain; 513 514 if (!virq || !irq_data) 515 return; 516 517 domain = irq_data->domain; 518 if (WARN_ON(domain == NULL)) 519 return; 520 521 irq_domain_disassociate(domain, virq); 522 irq_free_desc(virq); 523 } 524 EXPORT_SYMBOL_GPL(irq_dispose_mapping); 525 526 /** 527 * irq_find_mapping() - Find a linux irq from an hw irq number. 528 * @domain: domain owning this hardware interrupt 529 * @hwirq: hardware irq number in that domain space 530 */ 531 unsigned int irq_find_mapping(struct irq_domain *domain, 532 irq_hw_number_t hwirq) 533 { 534 struct irq_data *data; 535 536 /* Look for default domain if nececssary */ 537 if (domain == NULL) 538 domain = irq_default_domain; 539 if (domain == NULL) 540 return 0; 541 542 if (hwirq < domain->revmap_direct_max_irq) { 543 data = irq_get_irq_data(hwirq); 544 if (data && (data->domain == domain) && (data->hwirq == hwirq)) 545 return hwirq; 546 } 547 548 /* Check if the hwirq is in the linear revmap. */ 549 if (hwirq < domain->revmap_size) 550 return domain->linear_revmap[hwirq]; 551 552 rcu_read_lock(); 553 data = radix_tree_lookup(&domain->revmap_tree, hwirq); 554 rcu_read_unlock(); 555 return data ? data->irq : 0; 556 } 557 EXPORT_SYMBOL_GPL(irq_find_mapping); 558 559 #ifdef CONFIG_IRQ_DOMAIN_DEBUG 560 static int virq_debug_show(struct seq_file *m, void *private) 561 { 562 unsigned long flags; 563 struct irq_desc *desc; 564 struct irq_domain *domain; 565 struct radix_tree_iter iter; 566 void *data, **slot; 567 int i; 568 569 seq_printf(m, " %-16s %-6s %-10s %-10s %s\n", 570 "name", "mapped", "linear-max", "direct-max", "devtree-node"); 571 mutex_lock(&irq_domain_mutex); 572 list_for_each_entry(domain, &irq_domain_list, link) { 573 int count = 0; 574 radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0) 575 count++; 576 seq_printf(m, "%c%-16s %6u %10u %10u %s\n", 577 domain == irq_default_domain ? '*' : ' ', domain->name, 578 domain->revmap_size + count, domain->revmap_size, 579 domain->revmap_direct_max_irq, 580 domain->of_node ? of_node_full_name(domain->of_node) : ""); 581 } 582 mutex_unlock(&irq_domain_mutex); 583 584 seq_printf(m, "%-5s %-7s %-15s %-*s %6s %-14s %s\n", "irq", "hwirq", 585 "chip name", (int)(2 * sizeof(void *) + 2), "chip data", 586 "active", "type", "domain"); 587 588 for (i = 1; i < nr_irqs; i++) { 589 desc = irq_to_desc(i); 590 if (!desc) 591 continue; 592 593 raw_spin_lock_irqsave(&desc->lock, flags); 594 domain = desc->irq_data.domain; 595 596 if (domain) { 597 struct irq_chip *chip; 598 int hwirq = desc->irq_data.hwirq; 599 bool direct; 600 601 seq_printf(m, "%5d ", i); 602 seq_printf(m, "0x%05x ", hwirq); 603 604 chip = irq_desc_get_chip(desc); 605 seq_printf(m, "%-15s ", (chip && chip->name) ? chip->name : "none"); 606 607 data = irq_desc_get_chip_data(desc); 608 seq_printf(m, data ? "0x%p " : " %p ", data); 609 610 seq_printf(m, " %c ", (desc->action && desc->action->handler) ? '*' : ' '); 611 direct = (i == hwirq) && (i < domain->revmap_direct_max_irq); 612 seq_printf(m, "%6s%-8s ", 613 (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX", 614 direct ? "(DIRECT)" : ""); 615 seq_printf(m, "%s\n", desc->irq_data.domain->name); 616 } 617 618 raw_spin_unlock_irqrestore(&desc->lock, flags); 619 } 620 621 return 0; 622 } 623 624 static int virq_debug_open(struct inode *inode, struct file *file) 625 { 626 return single_open(file, virq_debug_show, inode->i_private); 627 } 628 629 static const struct file_operations virq_debug_fops = { 630 .open = virq_debug_open, 631 .read = seq_read, 632 .llseek = seq_lseek, 633 .release = single_release, 634 }; 635 636 static int __init irq_debugfs_init(void) 637 { 638 if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL, 639 NULL, &virq_debug_fops) == NULL) 640 return -ENOMEM; 641 642 return 0; 643 } 644 __initcall(irq_debugfs_init); 645 #endif /* CONFIG_IRQ_DOMAIN_DEBUG */ 646 647 /** 648 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings 649 * 650 * Device Tree IRQ specifier translation function which works with one cell 651 * bindings where the cell value maps directly to the hwirq number. 652 */ 653 int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, 654 const u32 *intspec, unsigned int intsize, 655 unsigned long *out_hwirq, unsigned int *out_type) 656 { 657 if (WARN_ON(intsize < 1)) 658 return -EINVAL; 659 *out_hwirq = intspec[0]; 660 *out_type = IRQ_TYPE_NONE; 661 return 0; 662 } 663 EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell); 664 665 /** 666 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings 667 * 668 * Device Tree IRQ specifier translation function which works with two cell 669 * bindings where the cell values map directly to the hwirq number 670 * and linux irq flags. 671 */ 672 int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, 673 const u32 *intspec, unsigned int intsize, 674 irq_hw_number_t *out_hwirq, unsigned int *out_type) 675 { 676 if (WARN_ON(intsize < 2)) 677 return -EINVAL; 678 *out_hwirq = intspec[0]; 679 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK; 680 return 0; 681 } 682 EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell); 683 684 /** 685 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings 686 * 687 * Device Tree IRQ specifier translation function which works with either one 688 * or two cell bindings where the cell values map directly to the hwirq number 689 * and linux irq flags. 690 * 691 * Note: don't use this function unless your interrupt controller explicitly 692 * supports both one and two cell bindings. For the majority of controllers 693 * the _onecell() or _twocell() variants above should be used. 694 */ 695 int irq_domain_xlate_onetwocell(struct irq_domain *d, 696 struct device_node *ctrlr, 697 const u32 *intspec, unsigned int intsize, 698 unsigned long *out_hwirq, unsigned int *out_type) 699 { 700 if (WARN_ON(intsize < 1)) 701 return -EINVAL; 702 *out_hwirq = intspec[0]; 703 *out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE; 704 return 0; 705 } 706 EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell); 707 708 const struct irq_domain_ops irq_domain_simple_ops = { 709 .xlate = irq_domain_xlate_onetwocell, 710 }; 711 EXPORT_SYMBOL_GPL(irq_domain_simple_ops); 712