1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 5 * 6 * This file contains the interrupt descriptor management code. Detailed 7 * information is available in Documentation/core-api/genericirq.rst 8 * 9 */ 10 #include <linux/irq.h> 11 #include <linux/slab.h> 12 #include <linux/export.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/radix-tree.h> 16 #include <linux/bitmap.h> 17 #include <linux/irqdomain.h> 18 #include <linux/sysfs.h> 19 20 #include "internals.h" 21 22 /* 23 * lockdep: we want to handle all irq_desc locks as a single lock-class: 24 */ 25 static struct lock_class_key irq_desc_lock_class; 26 27 #if defined(CONFIG_SMP) 28 static int __init irq_affinity_setup(char *str) 29 { 30 alloc_bootmem_cpumask_var(&irq_default_affinity); 31 cpulist_parse(str, irq_default_affinity); 32 /* 33 * Set at least the boot cpu. We don't want to end up with 34 * bugreports caused by random comandline masks 35 */ 36 cpumask_set_cpu(smp_processor_id(), irq_default_affinity); 37 return 1; 38 } 39 __setup("irqaffinity=", irq_affinity_setup); 40 41 static void __init init_irq_default_affinity(void) 42 { 43 if (!cpumask_available(irq_default_affinity)) 44 zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); 45 if (cpumask_empty(irq_default_affinity)) 46 cpumask_setall(irq_default_affinity); 47 } 48 #else 49 static void __init init_irq_default_affinity(void) 50 { 51 } 52 #endif 53 54 #ifdef CONFIG_SMP 55 static int alloc_masks(struct irq_desc *desc, int node) 56 { 57 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, 58 GFP_KERNEL, node)) 59 return -ENOMEM; 60 61 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 62 if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity, 63 GFP_KERNEL, node)) { 64 free_cpumask_var(desc->irq_common_data.affinity); 65 return -ENOMEM; 66 } 67 #endif 68 69 #ifdef CONFIG_GENERIC_PENDING_IRQ 70 if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) { 71 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 72 free_cpumask_var(desc->irq_common_data.effective_affinity); 73 #endif 74 free_cpumask_var(desc->irq_common_data.affinity); 75 return -ENOMEM; 76 } 77 #endif 78 return 0; 79 } 80 81 static void desc_smp_init(struct irq_desc *desc, int node, 82 const struct cpumask *affinity) 83 { 84 if (!affinity) 85 affinity = irq_default_affinity; 86 cpumask_copy(desc->irq_common_data.affinity, affinity); 87 88 #ifdef CONFIG_GENERIC_PENDING_IRQ 89 cpumask_clear(desc->pending_mask); 90 #endif 91 #ifdef CONFIG_NUMA 92 desc->irq_common_data.node = node; 93 #endif 94 } 95 96 #else 97 static inline int 98 alloc_masks(struct irq_desc *desc, int node) { return 0; } 99 static inline void 100 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } 101 #endif 102 103 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, 104 const struct cpumask *affinity, struct module *owner) 105 { 106 int cpu; 107 108 desc->irq_common_data.handler_data = NULL; 109 desc->irq_common_data.msi_desc = NULL; 110 111 desc->irq_data.common = &desc->irq_common_data; 112 desc->irq_data.irq = irq; 113 desc->irq_data.chip = &no_irq_chip; 114 desc->irq_data.chip_data = NULL; 115 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); 116 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 117 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); 118 desc->handle_irq = handle_bad_irq; 119 desc->depth = 1; 120 desc->irq_count = 0; 121 desc->irqs_unhandled = 0; 122 desc->tot_count = 0; 123 desc->name = NULL; 124 desc->owner = owner; 125 for_each_possible_cpu(cpu) 126 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; 127 desc_smp_init(desc, node, affinity); 128 } 129 130 int nr_irqs = NR_IRQS; 131 EXPORT_SYMBOL_GPL(nr_irqs); 132 133 static DEFINE_MUTEX(sparse_irq_lock); 134 static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); 135 136 #ifdef CONFIG_SPARSE_IRQ 137 138 static void irq_kobj_release(struct kobject *kobj); 139 140 #ifdef CONFIG_SYSFS 141 static struct kobject *irq_kobj_base; 142 143 #define IRQ_ATTR_RO(_name) \ 144 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 145 146 static ssize_t per_cpu_count_show(struct kobject *kobj, 147 struct kobj_attribute *attr, char *buf) 148 { 149 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 150 int cpu, irq = desc->irq_data.irq; 151 ssize_t ret = 0; 152 char *p = ""; 153 154 for_each_possible_cpu(cpu) { 155 unsigned int c = kstat_irqs_cpu(irq, cpu); 156 157 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c); 158 p = ","; 159 } 160 161 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); 162 return ret; 163 } 164 IRQ_ATTR_RO(per_cpu_count); 165 166 static ssize_t chip_name_show(struct kobject *kobj, 167 struct kobj_attribute *attr, char *buf) 168 { 169 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 170 ssize_t ret = 0; 171 172 raw_spin_lock_irq(&desc->lock); 173 if (desc->irq_data.chip && desc->irq_data.chip->name) { 174 ret = scnprintf(buf, PAGE_SIZE, "%s\n", 175 desc->irq_data.chip->name); 176 } 177 raw_spin_unlock_irq(&desc->lock); 178 179 return ret; 180 } 181 IRQ_ATTR_RO(chip_name); 182 183 static ssize_t hwirq_show(struct kobject *kobj, 184 struct kobj_attribute *attr, char *buf) 185 { 186 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 187 ssize_t ret = 0; 188 189 raw_spin_lock_irq(&desc->lock); 190 if (desc->irq_data.domain) 191 ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq); 192 raw_spin_unlock_irq(&desc->lock); 193 194 return ret; 195 } 196 IRQ_ATTR_RO(hwirq); 197 198 static ssize_t type_show(struct kobject *kobj, 199 struct kobj_attribute *attr, char *buf) 200 { 201 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 202 ssize_t ret = 0; 203 204 raw_spin_lock_irq(&desc->lock); 205 ret = sprintf(buf, "%s\n", 206 irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); 207 raw_spin_unlock_irq(&desc->lock); 208 209 return ret; 210 211 } 212 IRQ_ATTR_RO(type); 213 214 static ssize_t wakeup_show(struct kobject *kobj, 215 struct kobj_attribute *attr, char *buf) 216 { 217 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 218 ssize_t ret = 0; 219 220 raw_spin_lock_irq(&desc->lock); 221 ret = sprintf(buf, "%s\n", 222 irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled"); 223 raw_spin_unlock_irq(&desc->lock); 224 225 return ret; 226 227 } 228 IRQ_ATTR_RO(wakeup); 229 230 static ssize_t name_show(struct kobject *kobj, 231 struct kobj_attribute *attr, char *buf) 232 { 233 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 234 ssize_t ret = 0; 235 236 raw_spin_lock_irq(&desc->lock); 237 if (desc->name) 238 ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name); 239 raw_spin_unlock_irq(&desc->lock); 240 241 return ret; 242 } 243 IRQ_ATTR_RO(name); 244 245 static ssize_t actions_show(struct kobject *kobj, 246 struct kobj_attribute *attr, char *buf) 247 { 248 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 249 struct irqaction *action; 250 ssize_t ret = 0; 251 char *p = ""; 252 253 raw_spin_lock_irq(&desc->lock); 254 for (action = desc->action; action != NULL; action = action->next) { 255 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s", 256 p, action->name); 257 p = ","; 258 } 259 raw_spin_unlock_irq(&desc->lock); 260 261 if (ret) 262 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); 263 264 return ret; 265 } 266 IRQ_ATTR_RO(actions); 267 268 static struct attribute *irq_attrs[] = { 269 &per_cpu_count_attr.attr, 270 &chip_name_attr.attr, 271 &hwirq_attr.attr, 272 &type_attr.attr, 273 &wakeup_attr.attr, 274 &name_attr.attr, 275 &actions_attr.attr, 276 NULL 277 }; 278 ATTRIBUTE_GROUPS(irq); 279 280 static struct kobj_type irq_kobj_type = { 281 .release = irq_kobj_release, 282 .sysfs_ops = &kobj_sysfs_ops, 283 .default_groups = irq_groups, 284 }; 285 286 static void irq_sysfs_add(int irq, struct irq_desc *desc) 287 { 288 if (irq_kobj_base) { 289 /* 290 * Continue even in case of failure as this is nothing 291 * crucial. 292 */ 293 if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq)) 294 pr_warn("Failed to add kobject for irq %d\n", irq); 295 } 296 } 297 298 static int __init irq_sysfs_init(void) 299 { 300 struct irq_desc *desc; 301 int irq; 302 303 /* Prevent concurrent irq alloc/free */ 304 irq_lock_sparse(); 305 306 irq_kobj_base = kobject_create_and_add("irq", kernel_kobj); 307 if (!irq_kobj_base) { 308 irq_unlock_sparse(); 309 return -ENOMEM; 310 } 311 312 /* Add the already allocated interrupts */ 313 for_each_irq_desc(irq, desc) 314 irq_sysfs_add(irq, desc); 315 irq_unlock_sparse(); 316 317 return 0; 318 } 319 postcore_initcall(irq_sysfs_init); 320 321 #else /* !CONFIG_SYSFS */ 322 323 static struct kobj_type irq_kobj_type = { 324 .release = irq_kobj_release, 325 }; 326 327 static void irq_sysfs_add(int irq, struct irq_desc *desc) {} 328 329 #endif /* CONFIG_SYSFS */ 330 331 static RADIX_TREE(irq_desc_tree, GFP_KERNEL); 332 333 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) 334 { 335 radix_tree_insert(&irq_desc_tree, irq, desc); 336 } 337 338 struct irq_desc *irq_to_desc(unsigned int irq) 339 { 340 return radix_tree_lookup(&irq_desc_tree, irq); 341 } 342 EXPORT_SYMBOL(irq_to_desc); 343 344 static void delete_irq_desc(unsigned int irq) 345 { 346 radix_tree_delete(&irq_desc_tree, irq); 347 } 348 349 #ifdef CONFIG_SMP 350 static void free_masks(struct irq_desc *desc) 351 { 352 #ifdef CONFIG_GENERIC_PENDING_IRQ 353 free_cpumask_var(desc->pending_mask); 354 #endif 355 free_cpumask_var(desc->irq_common_data.affinity); 356 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 357 free_cpumask_var(desc->irq_common_data.effective_affinity); 358 #endif 359 } 360 #else 361 static inline void free_masks(struct irq_desc *desc) { } 362 #endif 363 364 void irq_lock_sparse(void) 365 { 366 mutex_lock(&sparse_irq_lock); 367 } 368 369 void irq_unlock_sparse(void) 370 { 371 mutex_unlock(&sparse_irq_lock); 372 } 373 374 static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, 375 const struct cpumask *affinity, 376 struct module *owner) 377 { 378 struct irq_desc *desc; 379 380 desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node); 381 if (!desc) 382 return NULL; 383 /* allocate based on nr_cpu_ids */ 384 desc->kstat_irqs = alloc_percpu(unsigned int); 385 if (!desc->kstat_irqs) 386 goto err_desc; 387 388 if (alloc_masks(desc, node)) 389 goto err_kstat; 390 391 raw_spin_lock_init(&desc->lock); 392 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 393 mutex_init(&desc->request_mutex); 394 init_rcu_head(&desc->rcu); 395 396 desc_set_defaults(irq, desc, node, affinity, owner); 397 irqd_set(&desc->irq_data, flags); 398 kobject_init(&desc->kobj, &irq_kobj_type); 399 400 return desc; 401 402 err_kstat: 403 free_percpu(desc->kstat_irqs); 404 err_desc: 405 kfree(desc); 406 return NULL; 407 } 408 409 static void irq_kobj_release(struct kobject *kobj) 410 { 411 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 412 413 free_masks(desc); 414 free_percpu(desc->kstat_irqs); 415 kfree(desc); 416 } 417 418 static void delayed_free_desc(struct rcu_head *rhp) 419 { 420 struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); 421 422 kobject_put(&desc->kobj); 423 } 424 425 static void free_desc(unsigned int irq) 426 { 427 struct irq_desc *desc = irq_to_desc(irq); 428 429 irq_remove_debugfs_entry(desc); 430 unregister_irq_proc(irq, desc); 431 432 /* 433 * sparse_irq_lock protects also show_interrupts() and 434 * kstat_irq_usr(). Once we deleted the descriptor from the 435 * sparse tree we can free it. Access in proc will fail to 436 * lookup the descriptor. 437 * 438 * The sysfs entry must be serialized against a concurrent 439 * irq_sysfs_init() as well. 440 */ 441 kobject_del(&desc->kobj); 442 delete_irq_desc(irq); 443 444 /* 445 * We free the descriptor, masks and stat fields via RCU. That 446 * allows demultiplex interrupts to do rcu based management of 447 * the child interrupts. 448 * This also allows us to use rcu in kstat_irqs_usr(). 449 */ 450 call_rcu(&desc->rcu, delayed_free_desc); 451 } 452 453 static int alloc_descs(unsigned int start, unsigned int cnt, int node, 454 const struct irq_affinity_desc *affinity, 455 struct module *owner) 456 { 457 struct irq_desc *desc; 458 int i; 459 460 /* Validate affinity mask(s) */ 461 if (affinity) { 462 for (i = 0; i < cnt; i++) { 463 if (cpumask_empty(&affinity[i].mask)) 464 return -EINVAL; 465 } 466 } 467 468 for (i = 0; i < cnt; i++) { 469 const struct cpumask *mask = NULL; 470 unsigned int flags = 0; 471 472 if (affinity) { 473 if (affinity->is_managed) { 474 flags = IRQD_AFFINITY_MANAGED | 475 IRQD_MANAGED_SHUTDOWN; 476 } 477 mask = &affinity->mask; 478 node = cpu_to_node(cpumask_first(mask)); 479 affinity++; 480 } 481 482 desc = alloc_desc(start + i, node, flags, mask, owner); 483 if (!desc) 484 goto err; 485 irq_insert_desc(start + i, desc); 486 irq_sysfs_add(start + i, desc); 487 irq_add_debugfs_entry(start + i, desc); 488 } 489 bitmap_set(allocated_irqs, start, cnt); 490 return start; 491 492 err: 493 for (i--; i >= 0; i--) 494 free_desc(start + i); 495 return -ENOMEM; 496 } 497 498 static int irq_expand_nr_irqs(unsigned int nr) 499 { 500 if (nr > IRQ_BITMAP_BITS) 501 return -ENOMEM; 502 nr_irqs = nr; 503 return 0; 504 } 505 506 int __init early_irq_init(void) 507 { 508 int i, initcnt, node = first_online_node; 509 struct irq_desc *desc; 510 511 init_irq_default_affinity(); 512 513 /* Let arch update nr_irqs and return the nr of preallocated irqs */ 514 initcnt = arch_probe_nr_irqs(); 515 printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n", 516 NR_IRQS, nr_irqs, initcnt); 517 518 if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) 519 nr_irqs = IRQ_BITMAP_BITS; 520 521 if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) 522 initcnt = IRQ_BITMAP_BITS; 523 524 if (initcnt > nr_irqs) 525 nr_irqs = initcnt; 526 527 for (i = 0; i < initcnt; i++) { 528 desc = alloc_desc(i, node, 0, NULL, NULL); 529 set_bit(i, allocated_irqs); 530 irq_insert_desc(i, desc); 531 } 532 return arch_early_irq_init(); 533 } 534 535 #else /* !CONFIG_SPARSE_IRQ */ 536 537 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 538 [0 ... NR_IRQS-1] = { 539 .handle_irq = handle_bad_irq, 540 .depth = 1, 541 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), 542 } 543 }; 544 545 int __init early_irq_init(void) 546 { 547 int count, i, node = first_online_node; 548 struct irq_desc *desc; 549 550 init_irq_default_affinity(); 551 552 printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS); 553 554 desc = irq_desc; 555 count = ARRAY_SIZE(irq_desc); 556 557 for (i = 0; i < count; i++) { 558 desc[i].kstat_irqs = alloc_percpu(unsigned int); 559 alloc_masks(&desc[i], node); 560 raw_spin_lock_init(&desc[i].lock); 561 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 562 mutex_init(&desc[i].request_mutex); 563 desc_set_defaults(i, &desc[i], node, NULL, NULL); 564 } 565 return arch_early_irq_init(); 566 } 567 568 struct irq_desc *irq_to_desc(unsigned int irq) 569 { 570 return (irq < NR_IRQS) ? irq_desc + irq : NULL; 571 } 572 EXPORT_SYMBOL(irq_to_desc); 573 574 static void free_desc(unsigned int irq) 575 { 576 struct irq_desc *desc = irq_to_desc(irq); 577 unsigned long flags; 578 579 raw_spin_lock_irqsave(&desc->lock, flags); 580 desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); 581 raw_spin_unlock_irqrestore(&desc->lock, flags); 582 } 583 584 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, 585 const struct irq_affinity_desc *affinity, 586 struct module *owner) 587 { 588 u32 i; 589 590 for (i = 0; i < cnt; i++) { 591 struct irq_desc *desc = irq_to_desc(start + i); 592 593 desc->owner = owner; 594 } 595 bitmap_set(allocated_irqs, start, cnt); 596 return start; 597 } 598 599 static int irq_expand_nr_irqs(unsigned int nr) 600 { 601 return -ENOMEM; 602 } 603 604 void irq_mark_irq(unsigned int irq) 605 { 606 mutex_lock(&sparse_irq_lock); 607 bitmap_set(allocated_irqs, irq, 1); 608 mutex_unlock(&sparse_irq_lock); 609 } 610 611 #ifdef CONFIG_GENERIC_IRQ_LEGACY 612 void irq_init_desc(unsigned int irq) 613 { 614 free_desc(irq); 615 } 616 #endif 617 618 #endif /* !CONFIG_SPARSE_IRQ */ 619 620 /** 621 * generic_handle_irq - Invoke the handler for a particular irq 622 * @irq: The irq number to handle 623 * 624 */ 625 int generic_handle_irq(unsigned int irq) 626 { 627 struct irq_desc *desc = irq_to_desc(irq); 628 629 if (!desc) 630 return -EINVAL; 631 generic_handle_irq_desc(desc); 632 return 0; 633 } 634 EXPORT_SYMBOL_GPL(generic_handle_irq); 635 636 #ifdef CONFIG_HANDLE_DOMAIN_IRQ 637 /** 638 * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain 639 * @domain: The domain where to perform the lookup 640 * @hwirq: The HW irq number to convert to a logical one 641 * @lookup: Whether to perform the domain lookup or not 642 * @regs: Register file coming from the low-level handling code 643 * 644 * Returns: 0 on success, or -EINVAL if conversion has failed 645 */ 646 int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, 647 bool lookup, struct pt_regs *regs) 648 { 649 struct pt_regs *old_regs = set_irq_regs(regs); 650 unsigned int irq = hwirq; 651 int ret = 0; 652 653 irq_enter(); 654 655 #ifdef CONFIG_IRQ_DOMAIN 656 if (lookup) 657 irq = irq_find_mapping(domain, hwirq); 658 #endif 659 660 /* 661 * Some hardware gives randomly wrong interrupts. Rather 662 * than crashing, do something sensible. 663 */ 664 if (unlikely(!irq || irq >= nr_irqs)) { 665 ack_bad_irq(irq); 666 ret = -EINVAL; 667 } else { 668 generic_handle_irq(irq); 669 } 670 671 irq_exit(); 672 set_irq_regs(old_regs); 673 return ret; 674 } 675 676 #ifdef CONFIG_IRQ_DOMAIN 677 /** 678 * handle_domain_nmi - Invoke the handler for a HW irq belonging to a domain 679 * @domain: The domain where to perform the lookup 680 * @hwirq: The HW irq number to convert to a logical one 681 * @regs: Register file coming from the low-level handling code 682 * 683 * This function must be called from an NMI context. 684 * 685 * Returns: 0 on success, or -EINVAL if conversion has failed 686 */ 687 int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq, 688 struct pt_regs *regs) 689 { 690 struct pt_regs *old_regs = set_irq_regs(regs); 691 unsigned int irq; 692 int ret = 0; 693 694 /* 695 * NMI context needs to be setup earlier in order to deal with tracing. 696 */ 697 WARN_ON(!in_nmi()); 698 699 irq = irq_find_mapping(domain, hwirq); 700 701 /* 702 * ack_bad_irq is not NMI-safe, just report 703 * an invalid interrupt. 704 */ 705 if (likely(irq)) 706 generic_handle_irq(irq); 707 else 708 ret = -EINVAL; 709 710 set_irq_regs(old_regs); 711 return ret; 712 } 713 #endif 714 #endif 715 716 /* Dynamic interrupt handling */ 717 718 /** 719 * irq_free_descs - free irq descriptors 720 * @from: Start of descriptor range 721 * @cnt: Number of consecutive irqs to free 722 */ 723 void irq_free_descs(unsigned int from, unsigned int cnt) 724 { 725 int i; 726 727 if (from >= nr_irqs || (from + cnt) > nr_irqs) 728 return; 729 730 mutex_lock(&sparse_irq_lock); 731 for (i = 0; i < cnt; i++) 732 free_desc(from + i); 733 734 bitmap_clear(allocated_irqs, from, cnt); 735 mutex_unlock(&sparse_irq_lock); 736 } 737 EXPORT_SYMBOL_GPL(irq_free_descs); 738 739 /** 740 * irq_alloc_descs - allocate and initialize a range of irq descriptors 741 * @irq: Allocate for specific irq number if irq >= 0 742 * @from: Start the search from this irq number 743 * @cnt: Number of consecutive irqs to allocate. 744 * @node: Preferred node on which the irq descriptor should be allocated 745 * @owner: Owning module (can be NULL) 746 * @affinity: Optional pointer to an affinity mask array of size @cnt which 747 * hints where the irq descriptors should be allocated and which 748 * default affinities to use 749 * 750 * Returns the first irq number or error code 751 */ 752 int __ref 753 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, 754 struct module *owner, const struct irq_affinity_desc *affinity) 755 { 756 int start, ret; 757 758 if (!cnt) 759 return -EINVAL; 760 761 if (irq >= 0) { 762 if (from > irq) 763 return -EINVAL; 764 from = irq; 765 } else { 766 /* 767 * For interrupts which are freely allocated the 768 * architecture can force a lower bound to the @from 769 * argument. x86 uses this to exclude the GSI space. 770 */ 771 from = arch_dynirq_lower_bound(from); 772 } 773 774 mutex_lock(&sparse_irq_lock); 775 776 start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, 777 from, cnt, 0); 778 ret = -EEXIST; 779 if (irq >=0 && start != irq) 780 goto unlock; 781 782 if (start + cnt > nr_irqs) { 783 ret = irq_expand_nr_irqs(start + cnt); 784 if (ret) 785 goto unlock; 786 } 787 ret = alloc_descs(start, cnt, node, affinity, owner); 788 unlock: 789 mutex_unlock(&sparse_irq_lock); 790 return ret; 791 } 792 EXPORT_SYMBOL_GPL(__irq_alloc_descs); 793 794 #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ 795 /** 796 * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware 797 * @cnt: number of interrupts to allocate 798 * @node: node on which to allocate 799 * 800 * Returns an interrupt number > 0 or 0, if the allocation fails. 801 */ 802 unsigned int irq_alloc_hwirqs(int cnt, int node) 803 { 804 int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL); 805 806 if (irq < 0) 807 return 0; 808 809 for (i = irq; cnt > 0; i++, cnt--) { 810 if (arch_setup_hwirq(i, node)) 811 goto err; 812 irq_clear_status_flags(i, _IRQ_NOREQUEST); 813 } 814 return irq; 815 816 err: 817 for (i--; i >= irq; i--) { 818 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); 819 arch_teardown_hwirq(i); 820 } 821 irq_free_descs(irq, cnt); 822 return 0; 823 } 824 EXPORT_SYMBOL_GPL(irq_alloc_hwirqs); 825 826 /** 827 * irq_free_hwirqs - Free irq descriptor and cleanup the hardware 828 * @from: Free from irq number 829 * @cnt: number of interrupts to free 830 * 831 */ 832 void irq_free_hwirqs(unsigned int from, int cnt) 833 { 834 int i, j; 835 836 for (i = from, j = cnt; j > 0; i++, j--) { 837 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); 838 arch_teardown_hwirq(i); 839 } 840 irq_free_descs(from, cnt); 841 } 842 EXPORT_SYMBOL_GPL(irq_free_hwirqs); 843 #endif 844 845 /** 846 * irq_get_next_irq - get next allocated irq number 847 * @offset: where to start the search 848 * 849 * Returns next irq number after offset or nr_irqs if none is found. 850 */ 851 unsigned int irq_get_next_irq(unsigned int offset) 852 { 853 return find_next_bit(allocated_irqs, nr_irqs, offset); 854 } 855 856 struct irq_desc * 857 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, 858 unsigned int check) 859 { 860 struct irq_desc *desc = irq_to_desc(irq); 861 862 if (desc) { 863 if (check & _IRQ_DESC_CHECK) { 864 if ((check & _IRQ_DESC_PERCPU) && 865 !irq_settings_is_per_cpu_devid(desc)) 866 return NULL; 867 868 if (!(check & _IRQ_DESC_PERCPU) && 869 irq_settings_is_per_cpu_devid(desc)) 870 return NULL; 871 } 872 873 if (bus) 874 chip_bus_lock(desc); 875 raw_spin_lock_irqsave(&desc->lock, *flags); 876 } 877 return desc; 878 } 879 880 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) 881 { 882 raw_spin_unlock_irqrestore(&desc->lock, flags); 883 if (bus) 884 chip_bus_sync_unlock(desc); 885 } 886 887 int irq_set_percpu_devid_partition(unsigned int irq, 888 const struct cpumask *affinity) 889 { 890 struct irq_desc *desc = irq_to_desc(irq); 891 892 if (!desc) 893 return -EINVAL; 894 895 if (desc->percpu_enabled) 896 return -EINVAL; 897 898 desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); 899 900 if (!desc->percpu_enabled) 901 return -ENOMEM; 902 903 if (affinity) 904 desc->percpu_affinity = affinity; 905 else 906 desc->percpu_affinity = cpu_possible_mask; 907 908 irq_set_percpu_devid_flags(irq); 909 return 0; 910 } 911 912 int irq_set_percpu_devid(unsigned int irq) 913 { 914 return irq_set_percpu_devid_partition(irq, NULL); 915 } 916 917 int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity) 918 { 919 struct irq_desc *desc = irq_to_desc(irq); 920 921 if (!desc || !desc->percpu_enabled) 922 return -EINVAL; 923 924 if (affinity) 925 cpumask_copy(affinity, desc->percpu_affinity); 926 927 return 0; 928 } 929 EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition); 930 931 void kstat_incr_irq_this_cpu(unsigned int irq) 932 { 933 kstat_incr_irqs_this_cpu(irq_to_desc(irq)); 934 } 935 936 /** 937 * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu 938 * @irq: The interrupt number 939 * @cpu: The cpu number 940 * 941 * Returns the sum of interrupt counts on @cpu since boot for 942 * @irq. The caller must ensure that the interrupt is not removed 943 * concurrently. 944 */ 945 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 946 { 947 struct irq_desc *desc = irq_to_desc(irq); 948 949 return desc && desc->kstat_irqs ? 950 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; 951 } 952 953 static bool irq_is_nmi(struct irq_desc *desc) 954 { 955 return desc->istate & IRQS_NMI; 956 } 957 958 /** 959 * kstat_irqs - Get the statistics for an interrupt 960 * @irq: The interrupt number 961 * 962 * Returns the sum of interrupt counts on all cpus since boot for 963 * @irq. The caller must ensure that the interrupt is not removed 964 * concurrently. 965 */ 966 unsigned int kstat_irqs(unsigned int irq) 967 { 968 struct irq_desc *desc = irq_to_desc(irq); 969 unsigned int sum = 0; 970 int cpu; 971 972 if (!desc || !desc->kstat_irqs) 973 return 0; 974 if (!irq_settings_is_per_cpu_devid(desc) && 975 !irq_settings_is_per_cpu(desc) && 976 !irq_is_nmi(desc)) 977 return desc->tot_count; 978 979 for_each_possible_cpu(cpu) 980 sum += *per_cpu_ptr(desc->kstat_irqs, cpu); 981 return sum; 982 } 983 984 /** 985 * kstat_irqs_usr - Get the statistics for an interrupt 986 * @irq: The interrupt number 987 * 988 * Returns the sum of interrupt counts on all cpus since boot for @irq. 989 * Contrary to kstat_irqs() this can be called from any context. 990 * It uses rcu since a concurrent removal of an interrupt descriptor is 991 * observing an rcu grace period before delayed_free_desc()/irq_kobj_release(). 992 */ 993 unsigned int kstat_irqs_usr(unsigned int irq) 994 { 995 unsigned int sum; 996 997 rcu_read_lock(); 998 sum = kstat_irqs(irq); 999 rcu_read_unlock(); 1000 return sum; 1001 } 1002