1 #include <linux/debugfs.h> 2 #include <linux/hardirq.h> 3 #include <linux/interrupt.h> 4 #include <linux/irq.h> 5 #include <linux/irqdesc.h> 6 #include <linux/irqdomain.h> 7 #include <linux/module.h> 8 #include <linux/mutex.h> 9 #include <linux/of.h> 10 #include <linux/of_address.h> 11 #include <linux/seq_file.h> 12 #include <linux/slab.h> 13 #include <linux/smp.h> 14 #include <linux/fs.h> 15 16 #define IRQ_DOMAIN_MAP_LEGACY 0 /* driver allocated fixed range of irqs. 17 * ie. legacy 8259, gets irqs 1..15 */ 18 #define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */ 19 #define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */ 20 #define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */ 21 22 static LIST_HEAD(irq_domain_list); 23 static DEFINE_MUTEX(irq_domain_mutex); 24 25 static DEFINE_MUTEX(revmap_trees_mutex); 26 static unsigned int irq_virq_count = NR_IRQS; 27 static struct irq_domain *irq_default_domain; 28 29 /** 30 * irq_domain_alloc() - Allocate a new irq_domain data structure 31 * @of_node: optional device-tree node of the interrupt controller 32 * @revmap_type: type of reverse mapping to use 33 * @ops: map/unmap domain callbacks 34 * @host_data: Controller private data pointer 35 * 36 * Allocates and initialize and irq_domain structure. Caller is expected to 37 * register allocated irq_domain with irq_domain_register(). Returns pointer 38 * to IRQ domain, or NULL on failure. 39 */ 40 static struct irq_domain *irq_domain_alloc(struct device_node *of_node, 41 unsigned int revmap_type, 42 const struct irq_domain_ops *ops, 43 void *host_data) 44 { 45 struct irq_domain *domain; 46 47 domain = kzalloc(sizeof(*domain), GFP_KERNEL); 48 if (WARN_ON(!domain)) 49 return NULL; 50 51 /* Fill structure */ 52 domain->revmap_type = revmap_type; 53 domain->ops = ops; 54 domain->host_data = host_data; 55 domain->of_node = of_node_get(of_node); 56 57 return domain; 58 } 59 60 static void irq_domain_add(struct irq_domain *domain) 61 { 62 mutex_lock(&irq_domain_mutex); 63 list_add(&domain->link, &irq_domain_list); 64 mutex_unlock(&irq_domain_mutex); 65 pr_debug("irq: Allocated domain of type %d @0x%p\n", 66 domain->revmap_type, domain); 67 } 68 69 static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain, 70 irq_hw_number_t hwirq) 71 { 72 irq_hw_number_t first_hwirq = domain->revmap_data.legacy.first_hwirq; 73 int size = domain->revmap_data.legacy.size; 74 75 if (WARN_ON(hwirq < first_hwirq || hwirq >= first_hwirq + size)) 76 return 0; 77 return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq; 78 } 79 80 /** 81 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain. 82 * @of_node: pointer to interrupt controller's device tree node. 83 * @size: total number of irqs in legacy mapping 84 * @first_irq: first number of irq block assigned to the domain 85 * @first_hwirq: first hwirq number to use for the translation. Should normally 86 * be '0', but a positive integer can be used if the effective 87 * hwirqs numbering does not begin at zero. 88 * @ops: map/unmap domain callbacks 89 * @host_data: Controller private data pointer 90 * 91 * Note: the map() callback will be called before this function returns 92 * for all legacy interrupts except 0 (which is always the invalid irq for 93 * a legacy controller). 94 */ 95 struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, 96 unsigned int size, 97 unsigned int first_irq, 98 irq_hw_number_t first_hwirq, 99 const struct irq_domain_ops *ops, 100 void *host_data) 101 { 102 struct irq_domain *domain; 103 unsigned int i; 104 105 domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LEGACY, ops, host_data); 106 if (!domain) 107 return NULL; 108 109 domain->revmap_data.legacy.first_irq = first_irq; 110 domain->revmap_data.legacy.first_hwirq = first_hwirq; 111 domain->revmap_data.legacy.size = size; 112 113 mutex_lock(&irq_domain_mutex); 114 /* Verify that all the irqs are available */ 115 for (i = 0; i < size; i++) { 116 int irq = first_irq + i; 117 struct irq_data *irq_data = irq_get_irq_data(irq); 118 119 if (WARN_ON(!irq_data || irq_data->domain)) { 120 mutex_unlock(&irq_domain_mutex); 121 of_node_put(domain->of_node); 122 kfree(domain); 123 return NULL; 124 } 125 } 126 127 /* Claim all of the irqs before registering a legacy domain */ 128 for (i = 0; i < size; i++) { 129 struct irq_data *irq_data = irq_get_irq_data(first_irq + i); 130 irq_data->hwirq = first_hwirq + i; 131 irq_data->domain = domain; 132 } 133 mutex_unlock(&irq_domain_mutex); 134 135 for (i = 0; i < size; i++) { 136 int irq = first_irq + i; 137 int hwirq = first_hwirq + i; 138 139 /* IRQ0 gets ignored */ 140 if (!irq) 141 continue; 142 143 /* Legacy flags are left to default at this point, 144 * one can then use irq_create_mapping() to 145 * explicitly change them 146 */ 147 ops->map(domain, irq, hwirq); 148 149 /* Clear norequest flags */ 150 irq_clear_status_flags(irq, IRQ_NOREQUEST); 151 } 152 153 irq_domain_add(domain); 154 return domain; 155 } 156 157 /** 158 * irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain. 159 * @of_node: pointer to interrupt controller's device tree node. 160 * @ops: map/unmap domain callbacks 161 * @host_data: Controller private data pointer 162 */ 163 struct irq_domain *irq_domain_add_linear(struct device_node *of_node, 164 unsigned int size, 165 const struct irq_domain_ops *ops, 166 void *host_data) 167 { 168 struct irq_domain *domain; 169 unsigned int *revmap; 170 171 revmap = kzalloc(sizeof(*revmap) * size, GFP_KERNEL); 172 if (WARN_ON(!revmap)) 173 return NULL; 174 175 domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data); 176 if (!domain) { 177 kfree(revmap); 178 return NULL; 179 } 180 domain->revmap_data.linear.size = size; 181 domain->revmap_data.linear.revmap = revmap; 182 irq_domain_add(domain); 183 return domain; 184 } 185 186 struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, 187 const struct irq_domain_ops *ops, 188 void *host_data) 189 { 190 struct irq_domain *domain = irq_domain_alloc(of_node, 191 IRQ_DOMAIN_MAP_NOMAP, ops, host_data); 192 if (domain) 193 irq_domain_add(domain); 194 return domain; 195 } 196 197 /** 198 * irq_domain_add_tree() 199 * @of_node: pointer to interrupt controller's device tree node. 200 * @ops: map/unmap domain callbacks 201 * 202 * Note: The radix tree will be allocated later during boot automatically 203 * (the reverse mapping will use the slow path until that happens). 204 */ 205 struct irq_domain *irq_domain_add_tree(struct device_node *of_node, 206 const struct irq_domain_ops *ops, 207 void *host_data) 208 { 209 struct irq_domain *domain = irq_domain_alloc(of_node, 210 IRQ_DOMAIN_MAP_TREE, ops, host_data); 211 if (domain) { 212 INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL); 213 irq_domain_add(domain); 214 } 215 return domain; 216 } 217 218 /** 219 * irq_find_host() - Locates a domain for a given device node 220 * @node: device-tree node of the interrupt controller 221 */ 222 struct irq_domain *irq_find_host(struct device_node *node) 223 { 224 struct irq_domain *h, *found = NULL; 225 int rc; 226 227 /* We might want to match the legacy controller last since 228 * it might potentially be set to match all interrupts in 229 * the absence of a device node. This isn't a problem so far 230 * yet though... 231 */ 232 mutex_lock(&irq_domain_mutex); 233 list_for_each_entry(h, &irq_domain_list, link) { 234 if (h->ops->match) 235 rc = h->ops->match(h, node); 236 else 237 rc = (h->of_node != NULL) && (h->of_node == node); 238 239 if (rc) { 240 found = h; 241 break; 242 } 243 } 244 mutex_unlock(&irq_domain_mutex); 245 return found; 246 } 247 EXPORT_SYMBOL_GPL(irq_find_host); 248 249 /** 250 * irq_set_default_host() - Set a "default" irq domain 251 * @domain: default domain pointer 252 * 253 * For convenience, it's possible to set a "default" domain that will be used 254 * whenever NULL is passed to irq_create_mapping(). It makes life easier for 255 * platforms that want to manipulate a few hard coded interrupt numbers that 256 * aren't properly represented in the device-tree. 257 */ 258 void irq_set_default_host(struct irq_domain *domain) 259 { 260 pr_debug("irq: Default domain set to @0x%p\n", domain); 261 262 irq_default_domain = domain; 263 } 264 265 /** 266 * irq_set_virq_count() - Set the maximum number of linux irqs 267 * @count: number of linux irqs, capped with NR_IRQS 268 * 269 * This is mainly for use by platforms like iSeries who want to program 270 * the virtual irq number in the controller to avoid the reverse mapping 271 */ 272 void irq_set_virq_count(unsigned int count) 273 { 274 pr_debug("irq: Trying to set virq count to %d\n", count); 275 276 BUG_ON(count < NUM_ISA_INTERRUPTS); 277 if (count < NR_IRQS) 278 irq_virq_count = count; 279 } 280 281 static int irq_setup_virq(struct irq_domain *domain, unsigned int virq, 282 irq_hw_number_t hwirq) 283 { 284 struct irq_data *irq_data = irq_get_irq_data(virq); 285 286 irq_data->hwirq = hwirq; 287 irq_data->domain = domain; 288 if (domain->ops->map(domain, virq, hwirq)) { 289 pr_debug("irq: -> mapping failed, freeing\n"); 290 irq_data->domain = NULL; 291 irq_data->hwirq = 0; 292 return -1; 293 } 294 295 irq_clear_status_flags(virq, IRQ_NOREQUEST); 296 297 return 0; 298 } 299 300 /** 301 * irq_create_direct_mapping() - Allocate an irq for direct mapping 302 * @domain: domain to allocate the irq for or NULL for default domain 303 * 304 * This routine is used for irq controllers which can choose the hardware 305 * interrupt numbers they generate. In such a case it's simplest to use 306 * the linux irq as the hardware interrupt number. 307 */ 308 unsigned int irq_create_direct_mapping(struct irq_domain *domain) 309 { 310 unsigned int virq; 311 312 if (domain == NULL) 313 domain = irq_default_domain; 314 315 BUG_ON(domain == NULL); 316 WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP); 317 318 virq = irq_alloc_desc_from(1, 0); 319 if (!virq) { 320 pr_debug("irq: create_direct virq allocation failed\n"); 321 return 0; 322 } 323 if (virq >= irq_virq_count) { 324 pr_err("ERROR: no free irqs available below %i maximum\n", 325 irq_virq_count); 326 irq_free_desc(virq); 327 return 0; 328 } 329 330 pr_debug("irq: create_direct obtained virq %d\n", virq); 331 332 if (irq_setup_virq(domain, virq, virq)) { 333 irq_free_desc(virq); 334 return 0; 335 } 336 337 return virq; 338 } 339 340 /** 341 * irq_create_mapping() - Map a hardware interrupt into linux irq space 342 * @domain: domain owning this hardware interrupt or NULL for default domain 343 * @hwirq: hardware irq number in that domain space 344 * 345 * Only one mapping per hardware interrupt is permitted. Returns a linux 346 * irq number. 347 * If the sense/trigger is to be specified, set_irq_type() should be called 348 * on the number returned from that call. 349 */ 350 unsigned int irq_create_mapping(struct irq_domain *domain, 351 irq_hw_number_t hwirq) 352 { 353 unsigned int virq, hint; 354 355 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); 356 357 /* Look for default domain if nececssary */ 358 if (domain == NULL) 359 domain = irq_default_domain; 360 if (domain == NULL) { 361 printk(KERN_WARNING "irq_create_mapping called for" 362 " NULL domain, hwirq=%lx\n", hwirq); 363 WARN_ON(1); 364 return 0; 365 } 366 pr_debug("irq: -> using domain @%p\n", domain); 367 368 /* Check if mapping already exists */ 369 virq = irq_find_mapping(domain, hwirq); 370 if (virq) { 371 pr_debug("irq: -> existing mapping on virq %d\n", virq); 372 return virq; 373 } 374 375 /* Get a virtual interrupt number */ 376 if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY) 377 return irq_domain_legacy_revmap(domain, hwirq); 378 379 /* Allocate a virtual interrupt number */ 380 hint = hwirq % irq_virq_count; 381 if (hint == 0) 382 hint++; 383 virq = irq_alloc_desc_from(hint, 0); 384 if (!virq) 385 virq = irq_alloc_desc_from(1, 0); 386 if (!virq) { 387 pr_debug("irq: -> virq allocation failed\n"); 388 return 0; 389 } 390 391 if (irq_setup_virq(domain, virq, hwirq)) { 392 if (domain->revmap_type != IRQ_DOMAIN_MAP_LEGACY) 393 irq_free_desc(virq); 394 return 0; 395 } 396 397 pr_debug("irq: irq %lu on domain %s mapped to virtual irq %u\n", 398 hwirq, domain->of_node ? domain->of_node->full_name : "null", virq); 399 400 return virq; 401 } 402 EXPORT_SYMBOL_GPL(irq_create_mapping); 403 404 unsigned int irq_create_of_mapping(struct device_node *controller, 405 const u32 *intspec, unsigned int intsize) 406 { 407 struct irq_domain *domain; 408 irq_hw_number_t hwirq; 409 unsigned int type = IRQ_TYPE_NONE; 410 unsigned int virq; 411 412 domain = controller ? irq_find_host(controller) : irq_default_domain; 413 if (!domain) { 414 #ifdef CONFIG_MIPS 415 /* 416 * Workaround to avoid breaking interrupt controller drivers 417 * that don't yet register an irq_domain. This is temporary 418 * code. ~~~gcl, Feb 24, 2012 419 * 420 * Scheduled for removal in Linux v3.6. That should be enough 421 * time. 422 */ 423 if (intsize > 0) 424 return intspec[0]; 425 #endif 426 printk(KERN_WARNING "irq: no irq domain found for %s !\n", 427 controller->full_name); 428 return 0; 429 } 430 431 /* If domain has no translation, then we assume interrupt line */ 432 if (domain->ops->xlate == NULL) 433 hwirq = intspec[0]; 434 else { 435 if (domain->ops->xlate(domain, controller, intspec, intsize, 436 &hwirq, &type)) 437 return 0; 438 } 439 440 /* Create mapping */ 441 virq = irq_create_mapping(domain, hwirq); 442 if (!virq) 443 return virq; 444 445 /* Set type if specified and different than the current one */ 446 if (type != IRQ_TYPE_NONE && 447 type != (irqd_get_trigger_type(irq_get_irq_data(virq)))) 448 irq_set_irq_type(virq, type); 449 return virq; 450 } 451 EXPORT_SYMBOL_GPL(irq_create_of_mapping); 452 453 /** 454 * irq_dispose_mapping() - Unmap an interrupt 455 * @virq: linux irq number of the interrupt to unmap 456 */ 457 void irq_dispose_mapping(unsigned int virq) 458 { 459 struct irq_data *irq_data = irq_get_irq_data(virq); 460 struct irq_domain *domain; 461 irq_hw_number_t hwirq; 462 463 if (!virq || !irq_data) 464 return; 465 466 domain = irq_data->domain; 467 if (WARN_ON(domain == NULL)) 468 return; 469 470 /* Never unmap legacy interrupts */ 471 if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY) 472 return; 473 474 irq_set_status_flags(virq, IRQ_NOREQUEST); 475 476 /* remove chip and handler */ 477 irq_set_chip_and_handler(virq, NULL, NULL); 478 479 /* Make sure it's completed */ 480 synchronize_irq(virq); 481 482 /* Tell the PIC about it */ 483 if (domain->ops->unmap) 484 domain->ops->unmap(domain, virq); 485 smp_mb(); 486 487 /* Clear reverse map */ 488 hwirq = irq_data->hwirq; 489 switch(domain->revmap_type) { 490 case IRQ_DOMAIN_MAP_LINEAR: 491 if (hwirq < domain->revmap_data.linear.size) 492 domain->revmap_data.linear.revmap[hwirq] = 0; 493 break; 494 case IRQ_DOMAIN_MAP_TREE: 495 mutex_lock(&revmap_trees_mutex); 496 radix_tree_delete(&domain->revmap_data.tree, hwirq); 497 mutex_unlock(&revmap_trees_mutex); 498 break; 499 } 500 501 irq_free_desc(virq); 502 } 503 EXPORT_SYMBOL_GPL(irq_dispose_mapping); 504 505 /** 506 * irq_find_mapping() - Find a linux irq from an hw irq number. 507 * @domain: domain owning this hardware interrupt 508 * @hwirq: hardware irq number in that domain space 509 * 510 * This is a slow path, for use by generic code. It's expected that an 511 * irq controller implementation directly calls the appropriate low level 512 * mapping function. 513 */ 514 unsigned int irq_find_mapping(struct irq_domain *domain, 515 irq_hw_number_t hwirq) 516 { 517 unsigned int i; 518 unsigned int hint = hwirq % irq_virq_count; 519 520 /* Look for default domain if nececssary */ 521 if (domain == NULL) 522 domain = irq_default_domain; 523 if (domain == NULL) 524 return 0; 525 526 /* legacy -> bail early */ 527 if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY) 528 return irq_domain_legacy_revmap(domain, hwirq); 529 530 /* Slow path does a linear search of the map */ 531 if (hint == 0) 532 hint = 1; 533 i = hint; 534 do { 535 struct irq_data *data = irq_get_irq_data(i); 536 if (data && (data->domain == domain) && (data->hwirq == hwirq)) 537 return i; 538 i++; 539 if (i >= irq_virq_count) 540 i = 1; 541 } while(i != hint); 542 return 0; 543 } 544 EXPORT_SYMBOL_GPL(irq_find_mapping); 545 546 /** 547 * irq_radix_revmap_lookup() - Find a linux irq from a hw irq number. 548 * @domain: domain owning this hardware interrupt 549 * @hwirq: hardware irq number in that domain space 550 * 551 * This is a fast path, for use by irq controller code that uses radix tree 552 * revmaps 553 */ 554 unsigned int irq_radix_revmap_lookup(struct irq_domain *domain, 555 irq_hw_number_t hwirq) 556 { 557 struct irq_data *irq_data; 558 559 if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_TREE)) 560 return irq_find_mapping(domain, hwirq); 561 562 /* 563 * Freeing an irq can delete nodes along the path to 564 * do the lookup via call_rcu. 565 */ 566 rcu_read_lock(); 567 irq_data = radix_tree_lookup(&domain->revmap_data.tree, hwirq); 568 rcu_read_unlock(); 569 570 /* 571 * If found in radix tree, then fine. 572 * Else fallback to linear lookup - this should not happen in practice 573 * as it means that we failed to insert the node in the radix tree. 574 */ 575 return irq_data ? irq_data->irq : irq_find_mapping(domain, hwirq); 576 } 577 578 /** 579 * irq_radix_revmap_insert() - Insert a hw irq to linux irq number mapping. 580 * @domain: domain owning this hardware interrupt 581 * @virq: linux irq number 582 * @hwirq: hardware irq number in that domain space 583 * 584 * This is for use by irq controllers that use a radix tree reverse 585 * mapping for fast lookup. 586 */ 587 void irq_radix_revmap_insert(struct irq_domain *domain, unsigned int virq, 588 irq_hw_number_t hwirq) 589 { 590 struct irq_data *irq_data = irq_get_irq_data(virq); 591 592 if (WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_TREE)) 593 return; 594 595 if (virq) { 596 mutex_lock(&revmap_trees_mutex); 597 radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data); 598 mutex_unlock(&revmap_trees_mutex); 599 } 600 } 601 602 /** 603 * irq_linear_revmap() - Find a linux irq from a hw irq number. 604 * @domain: domain owning this hardware interrupt 605 * @hwirq: hardware irq number in that domain space 606 * 607 * This is a fast path, for use by irq controller code that uses linear 608 * revmaps. It does fallback to the slow path if the revmap doesn't exist 609 * yet and will create the revmap entry with appropriate locking 610 */ 611 unsigned int irq_linear_revmap(struct irq_domain *domain, 612 irq_hw_number_t hwirq) 613 { 614 unsigned int *revmap; 615 616 if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR)) 617 return irq_find_mapping(domain, hwirq); 618 619 /* Check revmap bounds */ 620 if (unlikely(hwirq >= domain->revmap_data.linear.size)) 621 return irq_find_mapping(domain, hwirq); 622 623 /* Check if revmap was allocated */ 624 revmap = domain->revmap_data.linear.revmap; 625 if (unlikely(revmap == NULL)) 626 return irq_find_mapping(domain, hwirq); 627 628 /* Fill up revmap with slow path if no mapping found */ 629 if (unlikely(!revmap[hwirq])) 630 revmap[hwirq] = irq_find_mapping(domain, hwirq); 631 632 return revmap[hwirq]; 633 } 634 635 #ifdef CONFIG_IRQ_DOMAIN_DEBUG 636 static int virq_debug_show(struct seq_file *m, void *private) 637 { 638 unsigned long flags; 639 struct irq_desc *desc; 640 const char *p; 641 static const char none[] = "none"; 642 void *data; 643 int i; 644 645 seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq", 646 "chip name", "chip data", "domain name"); 647 648 for (i = 1; i < nr_irqs; i++) { 649 desc = irq_to_desc(i); 650 if (!desc) 651 continue; 652 653 raw_spin_lock_irqsave(&desc->lock, flags); 654 655 if (desc->action && desc->action->handler) { 656 struct irq_chip *chip; 657 658 seq_printf(m, "%5d ", i); 659 seq_printf(m, "0x%05lx ", desc->irq_data.hwirq); 660 661 chip = irq_desc_get_chip(desc); 662 if (chip && chip->name) 663 p = chip->name; 664 else 665 p = none; 666 seq_printf(m, "%-15s ", p); 667 668 data = irq_desc_get_chip_data(desc); 669 seq_printf(m, "0x%16p ", data); 670 671 if (desc->irq_data.domain && desc->irq_data.domain->of_node) 672 p = desc->irq_data.domain->of_node->full_name; 673 else 674 p = none; 675 seq_printf(m, "%s\n", p); 676 } 677 678 raw_spin_unlock_irqrestore(&desc->lock, flags); 679 } 680 681 return 0; 682 } 683 684 static int virq_debug_open(struct inode *inode, struct file *file) 685 { 686 return single_open(file, virq_debug_show, inode->i_private); 687 } 688 689 static const struct file_operations virq_debug_fops = { 690 .open = virq_debug_open, 691 .read = seq_read, 692 .llseek = seq_lseek, 693 .release = single_release, 694 }; 695 696 static int __init irq_debugfs_init(void) 697 { 698 if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL, 699 NULL, &virq_debug_fops) == NULL) 700 return -ENOMEM; 701 702 return 0; 703 } 704 __initcall(irq_debugfs_init); 705 #endif /* CONFIG_IRQ_DOMAIN_DEBUG */ 706 707 int irq_domain_simple_map(struct irq_domain *d, unsigned int irq, 708 irq_hw_number_t hwirq) 709 { 710 return 0; 711 } 712 713 /** 714 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings 715 * 716 * Device Tree IRQ specifier translation function which works with one cell 717 * bindings where the cell value maps directly to the hwirq number. 718 */ 719 int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, 720 const u32 *intspec, unsigned int intsize, 721 unsigned long *out_hwirq, unsigned int *out_type) 722 { 723 if (WARN_ON(intsize < 1)) 724 return -EINVAL; 725 *out_hwirq = intspec[0]; 726 *out_type = IRQ_TYPE_NONE; 727 return 0; 728 } 729 EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell); 730 731 /** 732 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings 733 * 734 * Device Tree IRQ specifier translation function which works with two cell 735 * bindings where the cell values map directly to the hwirq number 736 * and linux irq flags. 737 */ 738 int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, 739 const u32 *intspec, unsigned int intsize, 740 irq_hw_number_t *out_hwirq, unsigned int *out_type) 741 { 742 if (WARN_ON(intsize < 2)) 743 return -EINVAL; 744 *out_hwirq = intspec[0]; 745 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK; 746 return 0; 747 } 748 EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell); 749 750 /** 751 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings 752 * 753 * Device Tree IRQ specifier translation function which works with either one 754 * or two cell bindings where the cell values map directly to the hwirq number 755 * and linux irq flags. 756 * 757 * Note: don't use this function unless your interrupt controller explicitly 758 * supports both one and two cell bindings. For the majority of controllers 759 * the _onecell() or _twocell() variants above should be used. 760 */ 761 int irq_domain_xlate_onetwocell(struct irq_domain *d, 762 struct device_node *ctrlr, 763 const u32 *intspec, unsigned int intsize, 764 unsigned long *out_hwirq, unsigned int *out_type) 765 { 766 if (WARN_ON(intsize < 1)) 767 return -EINVAL; 768 *out_hwirq = intspec[0]; 769 *out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE; 770 return 0; 771 } 772 EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell); 773 774 const struct irq_domain_ops irq_domain_simple_ops = { 775 .map = irq_domain_simple_map, 776 .xlate = irq_domain_xlate_onetwocell, 777 }; 778 EXPORT_SYMBOL_GPL(irq_domain_simple_ops); 779 780 #ifdef CONFIG_OF_IRQ 781 void irq_domain_generate_simple(const struct of_device_id *match, 782 u64 phys_base, unsigned int irq_start) 783 { 784 struct device_node *node; 785 pr_debug("looking for phys_base=%llx, irq_start=%i\n", 786 (unsigned long long) phys_base, (int) irq_start); 787 node = of_find_matching_node_by_address(NULL, match, phys_base); 788 if (node) 789 irq_domain_add_legacy(node, 32, irq_start, 0, 790 &irq_domain_simple_ops, NULL); 791 } 792 EXPORT_SYMBOL_GPL(irq_domain_generate_simple); 793 #endif 794