1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 5 * 6 * This file contains the interrupt descriptor management code. Detailed 7 * information is available in Documentation/core-api/genericirq.rst 8 * 9 */ 10 #include <linux/irq.h> 11 #include <linux/slab.h> 12 #include <linux/export.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/maple_tree.h> 16 #include <linux/irqdomain.h> 17 #include <linux/sysfs.h> 18 19 #include "internals.h" 20 21 /* 22 * lockdep: we want to handle all irq_desc locks as a single lock-class: 23 */ 24 static struct lock_class_key irq_desc_lock_class; 25 26 #if defined(CONFIG_SMP) 27 static int __init irq_affinity_setup(char *str) 28 { 29 alloc_bootmem_cpumask_var(&irq_default_affinity); 30 cpulist_parse(str, irq_default_affinity); 31 /* 32 * Set at least the boot cpu. We don't want to end up with 33 * bugreports caused by random commandline masks 34 */ 35 cpumask_set_cpu(smp_processor_id(), irq_default_affinity); 36 return 1; 37 } 38 __setup("irqaffinity=", irq_affinity_setup); 39 40 static void __init init_irq_default_affinity(void) 41 { 42 if (!cpumask_available(irq_default_affinity)) 43 zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); 44 if (cpumask_empty(irq_default_affinity)) 45 cpumask_setall(irq_default_affinity); 46 } 47 #else 48 static void __init init_irq_default_affinity(void) 49 { 50 } 51 #endif 52 53 #ifdef CONFIG_SMP 54 static int alloc_masks(struct irq_desc *desc, int node) 55 { 56 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, 57 GFP_KERNEL, node)) 58 return -ENOMEM; 59 60 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 61 if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity, 62 GFP_KERNEL, node)) { 63 free_cpumask_var(desc->irq_common_data.affinity); 64 return -ENOMEM; 65 } 66 #endif 67 68 #ifdef CONFIG_GENERIC_PENDING_IRQ 69 if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) { 70 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 71 free_cpumask_var(desc->irq_common_data.effective_affinity); 72 #endif 73 free_cpumask_var(desc->irq_common_data.affinity); 74 return -ENOMEM; 75 } 76 #endif 77 return 0; 78 } 79 80 static void desc_smp_init(struct irq_desc *desc, int node, 81 const struct cpumask *affinity) 82 { 83 if (!affinity) 84 affinity = irq_default_affinity; 85 cpumask_copy(desc->irq_common_data.affinity, affinity); 86 87 #ifdef CONFIG_GENERIC_PENDING_IRQ 88 cpumask_clear(desc->pending_mask); 89 #endif 90 #ifdef CONFIG_NUMA 91 desc->irq_common_data.node = node; 92 #endif 93 } 94 95 #else 96 static inline int 97 alloc_masks(struct irq_desc *desc, int node) { return 0; } 98 static inline void 99 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } 100 #endif 101 102 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, 103 const struct cpumask *affinity, struct module *owner) 104 { 105 int cpu; 106 107 desc->irq_common_data.handler_data = NULL; 108 desc->irq_common_data.msi_desc = NULL; 109 110 desc->irq_data.common = &desc->irq_common_data; 111 desc->irq_data.irq = irq; 112 desc->irq_data.chip = &no_irq_chip; 113 desc->irq_data.chip_data = NULL; 114 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); 115 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 116 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); 117 desc->handle_irq = handle_bad_irq; 118 desc->depth = 1; 119 desc->irq_count = 0; 120 desc->irqs_unhandled = 0; 121 desc->tot_count = 0; 122 desc->name = NULL; 123 desc->owner = owner; 124 for_each_possible_cpu(cpu) 125 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; 126 desc_smp_init(desc, node, affinity); 127 } 128 129 int nr_irqs = NR_IRQS; 130 EXPORT_SYMBOL_GPL(nr_irqs); 131 132 static DEFINE_MUTEX(sparse_irq_lock); 133 static struct maple_tree sparse_irqs = MTREE_INIT_EXT(sparse_irqs, 134 MT_FLAGS_ALLOC_RANGE | 135 MT_FLAGS_LOCK_EXTERN | 136 MT_FLAGS_USE_RCU, 137 sparse_irq_lock); 138 139 static int irq_find_free_area(unsigned int from, unsigned int cnt) 140 { 141 MA_STATE(mas, &sparse_irqs, 0, 0); 142 143 if (mas_empty_area(&mas, from, MAX_SPARSE_IRQS, cnt)) 144 return -ENOSPC; 145 return mas.index; 146 } 147 148 static unsigned int irq_find_at_or_after(unsigned int offset) 149 { 150 unsigned long index = offset; 151 struct irq_desc *desc; 152 153 guard(rcu)(); 154 desc = mt_find(&sparse_irqs, &index, nr_irqs); 155 156 return desc ? irq_desc_get_irq(desc) : nr_irqs; 157 } 158 159 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) 160 { 161 MA_STATE(mas, &sparse_irqs, irq, irq); 162 WARN_ON(mas_store_gfp(&mas, desc, GFP_KERNEL) != 0); 163 } 164 165 static void delete_irq_desc(unsigned int irq) 166 { 167 MA_STATE(mas, &sparse_irqs, irq, irq); 168 mas_erase(&mas); 169 } 170 171 #ifdef CONFIG_SPARSE_IRQ 172 173 static void irq_kobj_release(struct kobject *kobj); 174 175 #ifdef CONFIG_SYSFS 176 static struct kobject *irq_kobj_base; 177 178 #define IRQ_ATTR_RO(_name) \ 179 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 180 181 static ssize_t per_cpu_count_show(struct kobject *kobj, 182 struct kobj_attribute *attr, char *buf) 183 { 184 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 185 ssize_t ret = 0; 186 char *p = ""; 187 int cpu; 188 189 for_each_possible_cpu(cpu) { 190 unsigned int c = irq_desc_kstat_cpu(desc, cpu); 191 192 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c); 193 p = ","; 194 } 195 196 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); 197 return ret; 198 } 199 IRQ_ATTR_RO(per_cpu_count); 200 201 static ssize_t chip_name_show(struct kobject *kobj, 202 struct kobj_attribute *attr, char *buf) 203 { 204 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 205 ssize_t ret = 0; 206 207 raw_spin_lock_irq(&desc->lock); 208 if (desc->irq_data.chip && desc->irq_data.chip->name) { 209 ret = scnprintf(buf, PAGE_SIZE, "%s\n", 210 desc->irq_data.chip->name); 211 } 212 raw_spin_unlock_irq(&desc->lock); 213 214 return ret; 215 } 216 IRQ_ATTR_RO(chip_name); 217 218 static ssize_t hwirq_show(struct kobject *kobj, 219 struct kobj_attribute *attr, char *buf) 220 { 221 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 222 ssize_t ret = 0; 223 224 raw_spin_lock_irq(&desc->lock); 225 if (desc->irq_data.domain) 226 ret = sprintf(buf, "%lu\n", desc->irq_data.hwirq); 227 raw_spin_unlock_irq(&desc->lock); 228 229 return ret; 230 } 231 IRQ_ATTR_RO(hwirq); 232 233 static ssize_t type_show(struct kobject *kobj, 234 struct kobj_attribute *attr, char *buf) 235 { 236 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 237 ssize_t ret = 0; 238 239 raw_spin_lock_irq(&desc->lock); 240 ret = sprintf(buf, "%s\n", 241 irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); 242 raw_spin_unlock_irq(&desc->lock); 243 244 return ret; 245 246 } 247 IRQ_ATTR_RO(type); 248 249 static ssize_t wakeup_show(struct kobject *kobj, 250 struct kobj_attribute *attr, char *buf) 251 { 252 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 253 ssize_t ret = 0; 254 255 raw_spin_lock_irq(&desc->lock); 256 ret = sprintf(buf, "%s\n", 257 irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled"); 258 raw_spin_unlock_irq(&desc->lock); 259 260 return ret; 261 262 } 263 IRQ_ATTR_RO(wakeup); 264 265 static ssize_t name_show(struct kobject *kobj, 266 struct kobj_attribute *attr, char *buf) 267 { 268 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 269 ssize_t ret = 0; 270 271 raw_spin_lock_irq(&desc->lock); 272 if (desc->name) 273 ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name); 274 raw_spin_unlock_irq(&desc->lock); 275 276 return ret; 277 } 278 IRQ_ATTR_RO(name); 279 280 static ssize_t actions_show(struct kobject *kobj, 281 struct kobj_attribute *attr, char *buf) 282 { 283 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 284 struct irqaction *action; 285 ssize_t ret = 0; 286 char *p = ""; 287 288 raw_spin_lock_irq(&desc->lock); 289 for_each_action_of_desc(desc, action) { 290 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s", 291 p, action->name); 292 p = ","; 293 } 294 raw_spin_unlock_irq(&desc->lock); 295 296 if (ret) 297 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); 298 299 return ret; 300 } 301 IRQ_ATTR_RO(actions); 302 303 static struct attribute *irq_attrs[] = { 304 &per_cpu_count_attr.attr, 305 &chip_name_attr.attr, 306 &hwirq_attr.attr, 307 &type_attr.attr, 308 &wakeup_attr.attr, 309 &name_attr.attr, 310 &actions_attr.attr, 311 NULL 312 }; 313 ATTRIBUTE_GROUPS(irq); 314 315 static const struct kobj_type irq_kobj_type = { 316 .release = irq_kobj_release, 317 .sysfs_ops = &kobj_sysfs_ops, 318 .default_groups = irq_groups, 319 }; 320 321 static void irq_sysfs_add(int irq, struct irq_desc *desc) 322 { 323 if (irq_kobj_base) { 324 /* 325 * Continue even in case of failure as this is nothing 326 * crucial and failures in the late irq_sysfs_init() 327 * cannot be rolled back. 328 */ 329 if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq)) 330 pr_warn("Failed to add kobject for irq %d\n", irq); 331 else 332 desc->istate |= IRQS_SYSFS; 333 } 334 } 335 336 static void irq_sysfs_del(struct irq_desc *desc) 337 { 338 /* 339 * Only invoke kobject_del() when kobject_add() was successfully 340 * invoked for the descriptor. This covers both early boot, where 341 * sysfs is not initialized yet, and the case of a failed 342 * kobject_add() invocation. 343 */ 344 if (desc->istate & IRQS_SYSFS) 345 kobject_del(&desc->kobj); 346 } 347 348 static int __init irq_sysfs_init(void) 349 { 350 struct irq_desc *desc; 351 int irq; 352 353 /* Prevent concurrent irq alloc/free */ 354 irq_lock_sparse(); 355 356 irq_kobj_base = kobject_create_and_add("irq", kernel_kobj); 357 if (!irq_kobj_base) { 358 irq_unlock_sparse(); 359 return -ENOMEM; 360 } 361 362 /* Add the already allocated interrupts */ 363 for_each_irq_desc(irq, desc) 364 irq_sysfs_add(irq, desc); 365 irq_unlock_sparse(); 366 367 return 0; 368 } 369 postcore_initcall(irq_sysfs_init); 370 371 #else /* !CONFIG_SYSFS */ 372 373 static const struct kobj_type irq_kobj_type = { 374 .release = irq_kobj_release, 375 }; 376 377 static void irq_sysfs_add(int irq, struct irq_desc *desc) {} 378 static void irq_sysfs_del(struct irq_desc *desc) {} 379 380 #endif /* CONFIG_SYSFS */ 381 382 struct irq_desc *irq_to_desc(unsigned int irq) 383 { 384 return mtree_load(&sparse_irqs, irq); 385 } 386 #ifdef CONFIG_KVM_BOOK3S_64_HV_MODULE 387 EXPORT_SYMBOL_GPL(irq_to_desc); 388 #endif 389 390 #ifdef CONFIG_SMP 391 static void free_masks(struct irq_desc *desc) 392 { 393 #ifdef CONFIG_GENERIC_PENDING_IRQ 394 free_cpumask_var(desc->pending_mask); 395 #endif 396 free_cpumask_var(desc->irq_common_data.affinity); 397 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 398 free_cpumask_var(desc->irq_common_data.effective_affinity); 399 #endif 400 } 401 #else 402 static inline void free_masks(struct irq_desc *desc) { } 403 #endif 404 405 void irq_lock_sparse(void) 406 { 407 mutex_lock(&sparse_irq_lock); 408 } 409 410 void irq_unlock_sparse(void) 411 { 412 mutex_unlock(&sparse_irq_lock); 413 } 414 415 static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, 416 const struct cpumask *affinity, 417 struct module *owner) 418 { 419 struct irq_desc *desc; 420 421 desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node); 422 if (!desc) 423 return NULL; 424 /* allocate based on nr_cpu_ids */ 425 desc->kstat_irqs = alloc_percpu(unsigned int); 426 if (!desc->kstat_irqs) 427 goto err_desc; 428 429 if (alloc_masks(desc, node)) 430 goto err_kstat; 431 432 raw_spin_lock_init(&desc->lock); 433 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 434 mutex_init(&desc->request_mutex); 435 init_rcu_head(&desc->rcu); 436 init_waitqueue_head(&desc->wait_for_threads); 437 438 desc_set_defaults(irq, desc, node, affinity, owner); 439 irqd_set(&desc->irq_data, flags); 440 kobject_init(&desc->kobj, &irq_kobj_type); 441 irq_resend_init(desc); 442 443 return desc; 444 445 err_kstat: 446 free_percpu(desc->kstat_irqs); 447 err_desc: 448 kfree(desc); 449 return NULL; 450 } 451 452 static void irq_kobj_release(struct kobject *kobj) 453 { 454 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 455 456 free_masks(desc); 457 free_percpu(desc->kstat_irqs); 458 kfree(desc); 459 } 460 461 static void delayed_free_desc(struct rcu_head *rhp) 462 { 463 struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); 464 465 kobject_put(&desc->kobj); 466 } 467 468 static void free_desc(unsigned int irq) 469 { 470 struct irq_desc *desc = irq_to_desc(irq); 471 472 irq_remove_debugfs_entry(desc); 473 unregister_irq_proc(irq, desc); 474 475 /* 476 * sparse_irq_lock protects also show_interrupts() and 477 * kstat_irq_usr(). Once we deleted the descriptor from the 478 * sparse tree we can free it. Access in proc will fail to 479 * lookup the descriptor. 480 * 481 * The sysfs entry must be serialized against a concurrent 482 * irq_sysfs_init() as well. 483 */ 484 irq_sysfs_del(desc); 485 delete_irq_desc(irq); 486 487 /* 488 * We free the descriptor, masks and stat fields via RCU. That 489 * allows demultiplex interrupts to do rcu based management of 490 * the child interrupts. 491 * This also allows us to use rcu in kstat_irqs_usr(). 492 */ 493 call_rcu(&desc->rcu, delayed_free_desc); 494 } 495 496 static int alloc_descs(unsigned int start, unsigned int cnt, int node, 497 const struct irq_affinity_desc *affinity, 498 struct module *owner) 499 { 500 struct irq_desc *desc; 501 int i; 502 503 /* Validate affinity mask(s) */ 504 if (affinity) { 505 for (i = 0; i < cnt; i++) { 506 if (cpumask_empty(&affinity[i].mask)) 507 return -EINVAL; 508 } 509 } 510 511 for (i = 0; i < cnt; i++) { 512 const struct cpumask *mask = NULL; 513 unsigned int flags = 0; 514 515 if (affinity) { 516 if (affinity->is_managed) { 517 flags = IRQD_AFFINITY_MANAGED | 518 IRQD_MANAGED_SHUTDOWN; 519 } 520 flags |= IRQD_AFFINITY_SET; 521 mask = &affinity->mask; 522 node = cpu_to_node(cpumask_first(mask)); 523 affinity++; 524 } 525 526 desc = alloc_desc(start + i, node, flags, mask, owner); 527 if (!desc) 528 goto err; 529 irq_insert_desc(start + i, desc); 530 irq_sysfs_add(start + i, desc); 531 irq_add_debugfs_entry(start + i, desc); 532 } 533 return start; 534 535 err: 536 for (i--; i >= 0; i--) 537 free_desc(start + i); 538 return -ENOMEM; 539 } 540 541 static int irq_expand_nr_irqs(unsigned int nr) 542 { 543 if (nr > MAX_SPARSE_IRQS) 544 return -ENOMEM; 545 nr_irqs = nr; 546 return 0; 547 } 548 549 int __init early_irq_init(void) 550 { 551 int i, initcnt, node = first_online_node; 552 struct irq_desc *desc; 553 554 init_irq_default_affinity(); 555 556 /* Let arch update nr_irqs and return the nr of preallocated irqs */ 557 initcnt = arch_probe_nr_irqs(); 558 printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n", 559 NR_IRQS, nr_irqs, initcnt); 560 561 if (WARN_ON(nr_irqs > MAX_SPARSE_IRQS)) 562 nr_irqs = MAX_SPARSE_IRQS; 563 564 if (WARN_ON(initcnt > MAX_SPARSE_IRQS)) 565 initcnt = MAX_SPARSE_IRQS; 566 567 if (initcnt > nr_irqs) 568 nr_irqs = initcnt; 569 570 for (i = 0; i < initcnt; i++) { 571 desc = alloc_desc(i, node, 0, NULL, NULL); 572 irq_insert_desc(i, desc); 573 } 574 return arch_early_irq_init(); 575 } 576 577 #else /* !CONFIG_SPARSE_IRQ */ 578 579 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 580 [0 ... NR_IRQS-1] = { 581 .handle_irq = handle_bad_irq, 582 .depth = 1, 583 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), 584 } 585 }; 586 587 int __init early_irq_init(void) 588 { 589 int count, i, node = first_online_node; 590 struct irq_desc *desc; 591 592 init_irq_default_affinity(); 593 594 printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS); 595 596 desc = irq_desc; 597 count = ARRAY_SIZE(irq_desc); 598 599 for (i = 0; i < count; i++) { 600 desc[i].kstat_irqs = alloc_percpu(unsigned int); 601 alloc_masks(&desc[i], node); 602 raw_spin_lock_init(&desc[i].lock); 603 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 604 mutex_init(&desc[i].request_mutex); 605 init_waitqueue_head(&desc[i].wait_for_threads); 606 desc_set_defaults(i, &desc[i], node, NULL, NULL); 607 irq_resend_init(&desc[i]); 608 } 609 return arch_early_irq_init(); 610 } 611 612 struct irq_desc *irq_to_desc(unsigned int irq) 613 { 614 return (irq < NR_IRQS) ? irq_desc + irq : NULL; 615 } 616 EXPORT_SYMBOL(irq_to_desc); 617 618 static void free_desc(unsigned int irq) 619 { 620 struct irq_desc *desc = irq_to_desc(irq); 621 unsigned long flags; 622 623 raw_spin_lock_irqsave(&desc->lock, flags); 624 desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); 625 raw_spin_unlock_irqrestore(&desc->lock, flags); 626 delete_irq_desc(irq); 627 } 628 629 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, 630 const struct irq_affinity_desc *affinity, 631 struct module *owner) 632 { 633 u32 i; 634 635 for (i = 0; i < cnt; i++) { 636 struct irq_desc *desc = irq_to_desc(start + i); 637 638 desc->owner = owner; 639 irq_insert_desc(start + i, desc); 640 } 641 return start; 642 } 643 644 static int irq_expand_nr_irqs(unsigned int nr) 645 { 646 return -ENOMEM; 647 } 648 649 void irq_mark_irq(unsigned int irq) 650 { 651 mutex_lock(&sparse_irq_lock); 652 irq_insert_desc(irq, irq_desc + irq); 653 mutex_unlock(&sparse_irq_lock); 654 } 655 656 #ifdef CONFIG_GENERIC_IRQ_LEGACY 657 void irq_init_desc(unsigned int irq) 658 { 659 free_desc(irq); 660 } 661 #endif 662 663 #endif /* !CONFIG_SPARSE_IRQ */ 664 665 int handle_irq_desc(struct irq_desc *desc) 666 { 667 struct irq_data *data; 668 669 if (!desc) 670 return -EINVAL; 671 672 data = irq_desc_get_irq_data(desc); 673 if (WARN_ON_ONCE(!in_hardirq() && handle_enforce_irqctx(data))) 674 return -EPERM; 675 676 generic_handle_irq_desc(desc); 677 return 0; 678 } 679 680 /** 681 * generic_handle_irq - Invoke the handler for a particular irq 682 * @irq: The irq number to handle 683 * 684 * Returns: 0 on success, or -EINVAL if conversion has failed 685 * 686 * This function must be called from an IRQ context with irq regs 687 * initialized. 688 */ 689 int generic_handle_irq(unsigned int irq) 690 { 691 return handle_irq_desc(irq_to_desc(irq)); 692 } 693 EXPORT_SYMBOL_GPL(generic_handle_irq); 694 695 /** 696 * generic_handle_irq_safe - Invoke the handler for a particular irq from any 697 * context. 698 * @irq: The irq number to handle 699 * 700 * Returns: 0 on success, a negative value on error. 701 * 702 * This function can be called from any context (IRQ or process context). It 703 * will report an error if not invoked from IRQ context and the irq has been 704 * marked to enforce IRQ-context only. 705 */ 706 int generic_handle_irq_safe(unsigned int irq) 707 { 708 unsigned long flags; 709 int ret; 710 711 local_irq_save(flags); 712 ret = handle_irq_desc(irq_to_desc(irq)); 713 local_irq_restore(flags); 714 return ret; 715 } 716 EXPORT_SYMBOL_GPL(generic_handle_irq_safe); 717 718 #ifdef CONFIG_IRQ_DOMAIN 719 /** 720 * generic_handle_domain_irq - Invoke the handler for a HW irq belonging 721 * to a domain. 722 * @domain: The domain where to perform the lookup 723 * @hwirq: The HW irq number to convert to a logical one 724 * 725 * Returns: 0 on success, or -EINVAL if conversion has failed 726 * 727 * This function must be called from an IRQ context with irq regs 728 * initialized. 729 */ 730 int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq) 731 { 732 return handle_irq_desc(irq_resolve_mapping(domain, hwirq)); 733 } 734 EXPORT_SYMBOL_GPL(generic_handle_domain_irq); 735 736 /** 737 * generic_handle_irq_safe - Invoke the handler for a HW irq belonging 738 * to a domain from any context. 739 * @domain: The domain where to perform the lookup 740 * @hwirq: The HW irq number to convert to a logical one 741 * 742 * Returns: 0 on success, a negative value on error. 743 * 744 * This function can be called from any context (IRQ or process 745 * context). If the interrupt is marked as 'enforce IRQ-context only' then 746 * the function must be invoked from hard interrupt context. 747 */ 748 int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq) 749 { 750 unsigned long flags; 751 int ret; 752 753 local_irq_save(flags); 754 ret = handle_irq_desc(irq_resolve_mapping(domain, hwirq)); 755 local_irq_restore(flags); 756 return ret; 757 } 758 EXPORT_SYMBOL_GPL(generic_handle_domain_irq_safe); 759 760 /** 761 * generic_handle_domain_nmi - Invoke the handler for a HW nmi belonging 762 * to a domain. 763 * @domain: The domain where to perform the lookup 764 * @hwirq: The HW irq number to convert to a logical one 765 * 766 * Returns: 0 on success, or -EINVAL if conversion has failed 767 * 768 * This function must be called from an NMI context with irq regs 769 * initialized. 770 **/ 771 int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq) 772 { 773 WARN_ON_ONCE(!in_nmi()); 774 return handle_irq_desc(irq_resolve_mapping(domain, hwirq)); 775 } 776 #endif 777 778 /* Dynamic interrupt handling */ 779 780 /** 781 * irq_free_descs - free irq descriptors 782 * @from: Start of descriptor range 783 * @cnt: Number of consecutive irqs to free 784 */ 785 void irq_free_descs(unsigned int from, unsigned int cnt) 786 { 787 int i; 788 789 if (from >= nr_irqs || (from + cnt) > nr_irqs) 790 return; 791 792 mutex_lock(&sparse_irq_lock); 793 for (i = 0; i < cnt; i++) 794 free_desc(from + i); 795 796 mutex_unlock(&sparse_irq_lock); 797 } 798 EXPORT_SYMBOL_GPL(irq_free_descs); 799 800 /** 801 * __irq_alloc_descs - allocate and initialize a range of irq descriptors 802 * @irq: Allocate for specific irq number if irq >= 0 803 * @from: Start the search from this irq number 804 * @cnt: Number of consecutive irqs to allocate. 805 * @node: Preferred node on which the irq descriptor should be allocated 806 * @owner: Owning module (can be NULL) 807 * @affinity: Optional pointer to an affinity mask array of size @cnt which 808 * hints where the irq descriptors should be allocated and which 809 * default affinities to use 810 * 811 * Returns the first irq number or error code 812 */ 813 int __ref 814 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, 815 struct module *owner, const struct irq_affinity_desc *affinity) 816 { 817 int start, ret; 818 819 if (!cnt) 820 return -EINVAL; 821 822 if (irq >= 0) { 823 if (from > irq) 824 return -EINVAL; 825 from = irq; 826 } else { 827 /* 828 * For interrupts which are freely allocated the 829 * architecture can force a lower bound to the @from 830 * argument. x86 uses this to exclude the GSI space. 831 */ 832 from = arch_dynirq_lower_bound(from); 833 } 834 835 mutex_lock(&sparse_irq_lock); 836 837 start = irq_find_free_area(from, cnt); 838 ret = -EEXIST; 839 if (irq >=0 && start != irq) 840 goto unlock; 841 842 if (start + cnt > nr_irqs) { 843 ret = irq_expand_nr_irqs(start + cnt); 844 if (ret) 845 goto unlock; 846 } 847 ret = alloc_descs(start, cnt, node, affinity, owner); 848 unlock: 849 mutex_unlock(&sparse_irq_lock); 850 return ret; 851 } 852 EXPORT_SYMBOL_GPL(__irq_alloc_descs); 853 854 /** 855 * irq_get_next_irq - get next allocated irq number 856 * @offset: where to start the search 857 * 858 * Returns next irq number after offset or nr_irqs if none is found. 859 */ 860 unsigned int irq_get_next_irq(unsigned int offset) 861 { 862 return irq_find_at_or_after(offset); 863 } 864 865 struct irq_desc * 866 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, 867 unsigned int check) 868 { 869 struct irq_desc *desc = irq_to_desc(irq); 870 871 if (desc) { 872 if (check & _IRQ_DESC_CHECK) { 873 if ((check & _IRQ_DESC_PERCPU) && 874 !irq_settings_is_per_cpu_devid(desc)) 875 return NULL; 876 877 if (!(check & _IRQ_DESC_PERCPU) && 878 irq_settings_is_per_cpu_devid(desc)) 879 return NULL; 880 } 881 882 if (bus) 883 chip_bus_lock(desc); 884 raw_spin_lock_irqsave(&desc->lock, *flags); 885 } 886 return desc; 887 } 888 889 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) 890 __releases(&desc->lock) 891 { 892 raw_spin_unlock_irqrestore(&desc->lock, flags); 893 if (bus) 894 chip_bus_sync_unlock(desc); 895 } 896 897 int irq_set_percpu_devid_partition(unsigned int irq, 898 const struct cpumask *affinity) 899 { 900 struct irq_desc *desc = irq_to_desc(irq); 901 902 if (!desc) 903 return -EINVAL; 904 905 if (desc->percpu_enabled) 906 return -EINVAL; 907 908 desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); 909 910 if (!desc->percpu_enabled) 911 return -ENOMEM; 912 913 if (affinity) 914 desc->percpu_affinity = affinity; 915 else 916 desc->percpu_affinity = cpu_possible_mask; 917 918 irq_set_percpu_devid_flags(irq); 919 return 0; 920 } 921 922 int irq_set_percpu_devid(unsigned int irq) 923 { 924 return irq_set_percpu_devid_partition(irq, NULL); 925 } 926 927 int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity) 928 { 929 struct irq_desc *desc = irq_to_desc(irq); 930 931 if (!desc || !desc->percpu_enabled) 932 return -EINVAL; 933 934 if (affinity) 935 cpumask_copy(affinity, desc->percpu_affinity); 936 937 return 0; 938 } 939 EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition); 940 941 void kstat_incr_irq_this_cpu(unsigned int irq) 942 { 943 kstat_incr_irqs_this_cpu(irq_to_desc(irq)); 944 } 945 946 /** 947 * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu 948 * @irq: The interrupt number 949 * @cpu: The cpu number 950 * 951 * Returns the sum of interrupt counts on @cpu since boot for 952 * @irq. The caller must ensure that the interrupt is not removed 953 * concurrently. 954 */ 955 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 956 { 957 struct irq_desc *desc = irq_to_desc(irq); 958 959 return desc && desc->kstat_irqs ? 960 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; 961 } 962 963 static bool irq_is_nmi(struct irq_desc *desc) 964 { 965 return desc->istate & IRQS_NMI; 966 } 967 968 static unsigned int kstat_irqs(unsigned int irq) 969 { 970 struct irq_desc *desc = irq_to_desc(irq); 971 unsigned int sum = 0; 972 int cpu; 973 974 if (!desc || !desc->kstat_irqs) 975 return 0; 976 if (!irq_settings_is_per_cpu_devid(desc) && 977 !irq_settings_is_per_cpu(desc) && 978 !irq_is_nmi(desc)) 979 return data_race(desc->tot_count); 980 981 for_each_possible_cpu(cpu) 982 sum += data_race(*per_cpu_ptr(desc->kstat_irqs, cpu)); 983 return sum; 984 } 985 986 /** 987 * kstat_irqs_usr - Get the statistics for an interrupt from thread context 988 * @irq: The interrupt number 989 * 990 * Returns the sum of interrupt counts on all cpus since boot for @irq. 991 * 992 * It uses rcu to protect the access since a concurrent removal of an 993 * interrupt descriptor is observing an rcu grace period before 994 * delayed_free_desc()/irq_kobj_release(). 995 */ 996 unsigned int kstat_irqs_usr(unsigned int irq) 997 { 998 unsigned int sum; 999 1000 rcu_read_lock(); 1001 sum = kstat_irqs(irq); 1002 rcu_read_unlock(); 1003 return sum; 1004 } 1005 1006 #ifdef CONFIG_LOCKDEP 1007 void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, 1008 struct lock_class_key *request_class) 1009 { 1010 struct irq_desc *desc = irq_to_desc(irq); 1011 1012 if (desc) { 1013 lockdep_set_class(&desc->lock, lock_class); 1014 lockdep_set_class(&desc->request_mutex, request_class); 1015 } 1016 } 1017 EXPORT_SYMBOL_GPL(__irq_set_lockdep_class); 1018 #endif 1019