1 /* 2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 4 * 5 * This file contains the interrupt descriptor management code 6 * 7 * Detailed information is available in Documentation/DocBook/genericirq 8 * 9 */ 10 #include <linux/irq.h> 11 #include <linux/slab.h> 12 #include <linux/export.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/radix-tree.h> 16 #include <linux/bitmap.h> 17 #include <linux/irqdomain.h> 18 19 #include "internals.h" 20 21 /* 22 * lockdep: we want to handle all irq_desc locks as a single lock-class: 23 */ 24 static struct lock_class_key irq_desc_lock_class; 25 26 #if defined(CONFIG_SMP) 27 static void __init init_irq_default_affinity(void) 28 { 29 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); 30 cpumask_setall(irq_default_affinity); 31 } 32 #else 33 static void __init init_irq_default_affinity(void) 34 { 35 } 36 #endif 37 38 #ifdef CONFIG_SMP 39 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) 40 { 41 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, 42 gfp, node)) 43 return -ENOMEM; 44 45 #ifdef CONFIG_GENERIC_PENDING_IRQ 46 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { 47 free_cpumask_var(desc->irq_common_data.affinity); 48 return -ENOMEM; 49 } 50 #endif 51 return 0; 52 } 53 54 static void desc_smp_init(struct irq_desc *desc, int node) 55 { 56 cpumask_copy(desc->irq_common_data.affinity, irq_default_affinity); 57 #ifdef CONFIG_GENERIC_PENDING_IRQ 58 cpumask_clear(desc->pending_mask); 59 #endif 60 #ifdef CONFIG_NUMA 61 desc->irq_common_data.node = node; 62 #endif 63 } 64 65 #else 66 static inline int 67 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } 68 static inline void desc_smp_init(struct irq_desc *desc, int node) { } 69 #endif 70 71 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, 72 struct module *owner) 73 { 74 int cpu; 75 76 desc->irq_common_data.handler_data = NULL; 77 desc->irq_common_data.msi_desc = NULL; 78 79 desc->irq_data.common = &desc->irq_common_data; 80 desc->irq_data.irq = irq; 81 desc->irq_data.chip = &no_irq_chip; 82 desc->irq_data.chip_data = NULL; 83 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); 84 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 85 desc->handle_irq = handle_bad_irq; 86 desc->depth = 1; 87 desc->irq_count = 0; 88 desc->irqs_unhandled = 0; 89 desc->name = NULL; 90 desc->owner = owner; 91 for_each_possible_cpu(cpu) 92 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; 93 desc_smp_init(desc, node); 94 } 95 96 int nr_irqs = NR_IRQS; 97 EXPORT_SYMBOL_GPL(nr_irqs); 98 99 static DEFINE_MUTEX(sparse_irq_lock); 100 static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); 101 102 #ifdef CONFIG_SPARSE_IRQ 103 104 static RADIX_TREE(irq_desc_tree, GFP_KERNEL); 105 106 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) 107 { 108 radix_tree_insert(&irq_desc_tree, irq, desc); 109 } 110 111 struct irq_desc *irq_to_desc(unsigned int irq) 112 { 113 return radix_tree_lookup(&irq_desc_tree, irq); 114 } 115 EXPORT_SYMBOL(irq_to_desc); 116 117 static void delete_irq_desc(unsigned int irq) 118 { 119 radix_tree_delete(&irq_desc_tree, irq); 120 } 121 122 #ifdef CONFIG_SMP 123 static void free_masks(struct irq_desc *desc) 124 { 125 #ifdef CONFIG_GENERIC_PENDING_IRQ 126 free_cpumask_var(desc->pending_mask); 127 #endif 128 free_cpumask_var(desc->irq_common_data.affinity); 129 } 130 #else 131 static inline void free_masks(struct irq_desc *desc) { } 132 #endif 133 134 void irq_lock_sparse(void) 135 { 136 mutex_lock(&sparse_irq_lock); 137 } 138 139 void irq_unlock_sparse(void) 140 { 141 mutex_unlock(&sparse_irq_lock); 142 } 143 144 static struct irq_desc *alloc_desc(int irq, int node, struct module *owner) 145 { 146 struct irq_desc *desc; 147 gfp_t gfp = GFP_KERNEL; 148 149 desc = kzalloc_node(sizeof(*desc), gfp, node); 150 if (!desc) 151 return NULL; 152 /* allocate based on nr_cpu_ids */ 153 desc->kstat_irqs = alloc_percpu(unsigned int); 154 if (!desc->kstat_irqs) 155 goto err_desc; 156 157 if (alloc_masks(desc, gfp, node)) 158 goto err_kstat; 159 160 raw_spin_lock_init(&desc->lock); 161 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 162 init_rcu_head(&desc->rcu); 163 164 desc_set_defaults(irq, desc, node, owner); 165 166 return desc; 167 168 err_kstat: 169 free_percpu(desc->kstat_irqs); 170 err_desc: 171 kfree(desc); 172 return NULL; 173 } 174 175 static void delayed_free_desc(struct rcu_head *rhp) 176 { 177 struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); 178 179 free_masks(desc); 180 free_percpu(desc->kstat_irqs); 181 kfree(desc); 182 } 183 184 static void free_desc(unsigned int irq) 185 { 186 struct irq_desc *desc = irq_to_desc(irq); 187 188 unregister_irq_proc(irq, desc); 189 190 /* 191 * sparse_irq_lock protects also show_interrupts() and 192 * kstat_irq_usr(). Once we deleted the descriptor from the 193 * sparse tree we can free it. Access in proc will fail to 194 * lookup the descriptor. 195 */ 196 mutex_lock(&sparse_irq_lock); 197 delete_irq_desc(irq); 198 mutex_unlock(&sparse_irq_lock); 199 200 /* 201 * We free the descriptor, masks and stat fields via RCU. That 202 * allows demultiplex interrupts to do rcu based management of 203 * the child interrupts. 204 */ 205 call_rcu(&desc->rcu, delayed_free_desc); 206 } 207 208 static int alloc_descs(unsigned int start, unsigned int cnt, int node, 209 struct module *owner) 210 { 211 struct irq_desc *desc; 212 int i; 213 214 for (i = 0; i < cnt; i++) { 215 desc = alloc_desc(start + i, node, owner); 216 if (!desc) 217 goto err; 218 mutex_lock(&sparse_irq_lock); 219 irq_insert_desc(start + i, desc); 220 mutex_unlock(&sparse_irq_lock); 221 } 222 return start; 223 224 err: 225 for (i--; i >= 0; i--) 226 free_desc(start + i); 227 228 mutex_lock(&sparse_irq_lock); 229 bitmap_clear(allocated_irqs, start, cnt); 230 mutex_unlock(&sparse_irq_lock); 231 return -ENOMEM; 232 } 233 234 static int irq_expand_nr_irqs(unsigned int nr) 235 { 236 if (nr > IRQ_BITMAP_BITS) 237 return -ENOMEM; 238 nr_irqs = nr; 239 return 0; 240 } 241 242 int __init early_irq_init(void) 243 { 244 int i, initcnt, node = first_online_node; 245 struct irq_desc *desc; 246 247 init_irq_default_affinity(); 248 249 /* Let arch update nr_irqs and return the nr of preallocated irqs */ 250 initcnt = arch_probe_nr_irqs(); 251 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); 252 253 if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) 254 nr_irqs = IRQ_BITMAP_BITS; 255 256 if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) 257 initcnt = IRQ_BITMAP_BITS; 258 259 if (initcnt > nr_irqs) 260 nr_irqs = initcnt; 261 262 for (i = 0; i < initcnt; i++) { 263 desc = alloc_desc(i, node, NULL); 264 set_bit(i, allocated_irqs); 265 irq_insert_desc(i, desc); 266 } 267 return arch_early_irq_init(); 268 } 269 270 #else /* !CONFIG_SPARSE_IRQ */ 271 272 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 273 [0 ... NR_IRQS-1] = { 274 .handle_irq = handle_bad_irq, 275 .depth = 1, 276 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), 277 } 278 }; 279 280 int __init early_irq_init(void) 281 { 282 int count, i, node = first_online_node; 283 struct irq_desc *desc; 284 285 init_irq_default_affinity(); 286 287 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); 288 289 desc = irq_desc; 290 count = ARRAY_SIZE(irq_desc); 291 292 for (i = 0; i < count; i++) { 293 desc[i].kstat_irqs = alloc_percpu(unsigned int); 294 alloc_masks(&desc[i], GFP_KERNEL, node); 295 raw_spin_lock_init(&desc[i].lock); 296 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 297 desc_set_defaults(i, &desc[i], node, NULL); 298 } 299 return arch_early_irq_init(); 300 } 301 302 struct irq_desc *irq_to_desc(unsigned int irq) 303 { 304 return (irq < NR_IRQS) ? irq_desc + irq : NULL; 305 } 306 EXPORT_SYMBOL(irq_to_desc); 307 308 static void free_desc(unsigned int irq) 309 { 310 struct irq_desc *desc = irq_to_desc(irq); 311 unsigned long flags; 312 313 raw_spin_lock_irqsave(&desc->lock, flags); 314 desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL); 315 raw_spin_unlock_irqrestore(&desc->lock, flags); 316 } 317 318 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, 319 struct module *owner) 320 { 321 u32 i; 322 323 for (i = 0; i < cnt; i++) { 324 struct irq_desc *desc = irq_to_desc(start + i); 325 326 desc->owner = owner; 327 } 328 return start; 329 } 330 331 static int irq_expand_nr_irqs(unsigned int nr) 332 { 333 return -ENOMEM; 334 } 335 336 void irq_mark_irq(unsigned int irq) 337 { 338 mutex_lock(&sparse_irq_lock); 339 bitmap_set(allocated_irqs, irq, 1); 340 mutex_unlock(&sparse_irq_lock); 341 } 342 343 #ifdef CONFIG_GENERIC_IRQ_LEGACY 344 void irq_init_desc(unsigned int irq) 345 { 346 free_desc(irq); 347 } 348 #endif 349 350 #endif /* !CONFIG_SPARSE_IRQ */ 351 352 /** 353 * generic_handle_irq - Invoke the handler for a particular irq 354 * @irq: The irq number to handle 355 * 356 */ 357 int generic_handle_irq(unsigned int irq) 358 { 359 struct irq_desc *desc = irq_to_desc(irq); 360 361 if (!desc) 362 return -EINVAL; 363 generic_handle_irq_desc(desc); 364 return 0; 365 } 366 EXPORT_SYMBOL_GPL(generic_handle_irq); 367 368 #ifdef CONFIG_HANDLE_DOMAIN_IRQ 369 /** 370 * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain 371 * @domain: The domain where to perform the lookup 372 * @hwirq: The HW irq number to convert to a logical one 373 * @lookup: Whether to perform the domain lookup or not 374 * @regs: Register file coming from the low-level handling code 375 * 376 * Returns: 0 on success, or -EINVAL if conversion has failed 377 */ 378 int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, 379 bool lookup, struct pt_regs *regs) 380 { 381 struct pt_regs *old_regs = set_irq_regs(regs); 382 unsigned int irq = hwirq; 383 int ret = 0; 384 385 irq_enter(); 386 387 #ifdef CONFIG_IRQ_DOMAIN 388 if (lookup) 389 irq = irq_find_mapping(domain, hwirq); 390 #endif 391 392 /* 393 * Some hardware gives randomly wrong interrupts. Rather 394 * than crashing, do something sensible. 395 */ 396 if (unlikely(!irq || irq >= nr_irqs)) { 397 ack_bad_irq(irq); 398 ret = -EINVAL; 399 } else { 400 generic_handle_irq(irq); 401 } 402 403 irq_exit(); 404 set_irq_regs(old_regs); 405 return ret; 406 } 407 #endif 408 409 /* Dynamic interrupt handling */ 410 411 /** 412 * irq_free_descs - free irq descriptors 413 * @from: Start of descriptor range 414 * @cnt: Number of consecutive irqs to free 415 */ 416 void irq_free_descs(unsigned int from, unsigned int cnt) 417 { 418 int i; 419 420 if (from >= nr_irqs || (from + cnt) > nr_irqs) 421 return; 422 423 for (i = 0; i < cnt; i++) 424 free_desc(from + i); 425 426 mutex_lock(&sparse_irq_lock); 427 bitmap_clear(allocated_irqs, from, cnt); 428 mutex_unlock(&sparse_irq_lock); 429 } 430 EXPORT_SYMBOL_GPL(irq_free_descs); 431 432 /** 433 * irq_alloc_descs - allocate and initialize a range of irq descriptors 434 * @irq: Allocate for specific irq number if irq >= 0 435 * @from: Start the search from this irq number 436 * @cnt: Number of consecutive irqs to allocate. 437 * @node: Preferred node on which the irq descriptor should be allocated 438 * @owner: Owning module (can be NULL) 439 * 440 * Returns the first irq number or error code 441 */ 442 int __ref 443 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, 444 struct module *owner) 445 { 446 int start, ret; 447 448 if (!cnt) 449 return -EINVAL; 450 451 if (irq >= 0) { 452 if (from > irq) 453 return -EINVAL; 454 from = irq; 455 } else { 456 /* 457 * For interrupts which are freely allocated the 458 * architecture can force a lower bound to the @from 459 * argument. x86 uses this to exclude the GSI space. 460 */ 461 from = arch_dynirq_lower_bound(from); 462 } 463 464 mutex_lock(&sparse_irq_lock); 465 466 start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, 467 from, cnt, 0); 468 ret = -EEXIST; 469 if (irq >=0 && start != irq) 470 goto err; 471 472 if (start + cnt > nr_irqs) { 473 ret = irq_expand_nr_irqs(start + cnt); 474 if (ret) 475 goto err; 476 } 477 478 bitmap_set(allocated_irqs, start, cnt); 479 mutex_unlock(&sparse_irq_lock); 480 return alloc_descs(start, cnt, node, owner); 481 482 err: 483 mutex_unlock(&sparse_irq_lock); 484 return ret; 485 } 486 EXPORT_SYMBOL_GPL(__irq_alloc_descs); 487 488 #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ 489 /** 490 * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware 491 * @cnt: number of interrupts to allocate 492 * @node: node on which to allocate 493 * 494 * Returns an interrupt number > 0 or 0, if the allocation fails. 495 */ 496 unsigned int irq_alloc_hwirqs(int cnt, int node) 497 { 498 int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL); 499 500 if (irq < 0) 501 return 0; 502 503 for (i = irq; cnt > 0; i++, cnt--) { 504 if (arch_setup_hwirq(i, node)) 505 goto err; 506 irq_clear_status_flags(i, _IRQ_NOREQUEST); 507 } 508 return irq; 509 510 err: 511 for (i--; i >= irq; i--) { 512 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); 513 arch_teardown_hwirq(i); 514 } 515 irq_free_descs(irq, cnt); 516 return 0; 517 } 518 EXPORT_SYMBOL_GPL(irq_alloc_hwirqs); 519 520 /** 521 * irq_free_hwirqs - Free irq descriptor and cleanup the hardware 522 * @from: Free from irq number 523 * @cnt: number of interrupts to free 524 * 525 */ 526 void irq_free_hwirqs(unsigned int from, int cnt) 527 { 528 int i, j; 529 530 for (i = from, j = cnt; j > 0; i++, j--) { 531 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); 532 arch_teardown_hwirq(i); 533 } 534 irq_free_descs(from, cnt); 535 } 536 EXPORT_SYMBOL_GPL(irq_free_hwirqs); 537 #endif 538 539 /** 540 * irq_get_next_irq - get next allocated irq number 541 * @offset: where to start the search 542 * 543 * Returns next irq number after offset or nr_irqs if none is found. 544 */ 545 unsigned int irq_get_next_irq(unsigned int offset) 546 { 547 return find_next_bit(allocated_irqs, nr_irqs, offset); 548 } 549 550 struct irq_desc * 551 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, 552 unsigned int check) 553 { 554 struct irq_desc *desc = irq_to_desc(irq); 555 556 if (desc) { 557 if (check & _IRQ_DESC_CHECK) { 558 if ((check & _IRQ_DESC_PERCPU) && 559 !irq_settings_is_per_cpu_devid(desc)) 560 return NULL; 561 562 if (!(check & _IRQ_DESC_PERCPU) && 563 irq_settings_is_per_cpu_devid(desc)) 564 return NULL; 565 } 566 567 if (bus) 568 chip_bus_lock(desc); 569 raw_spin_lock_irqsave(&desc->lock, *flags); 570 } 571 return desc; 572 } 573 574 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) 575 { 576 raw_spin_unlock_irqrestore(&desc->lock, flags); 577 if (bus) 578 chip_bus_sync_unlock(desc); 579 } 580 581 int irq_set_percpu_devid(unsigned int irq) 582 { 583 struct irq_desc *desc = irq_to_desc(irq); 584 585 if (!desc) 586 return -EINVAL; 587 588 if (desc->percpu_enabled) 589 return -EINVAL; 590 591 desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); 592 593 if (!desc->percpu_enabled) 594 return -ENOMEM; 595 596 irq_set_percpu_devid_flags(irq); 597 return 0; 598 } 599 600 void kstat_incr_irq_this_cpu(unsigned int irq) 601 { 602 kstat_incr_irqs_this_cpu(irq_to_desc(irq)); 603 } 604 605 /** 606 * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu 607 * @irq: The interrupt number 608 * @cpu: The cpu number 609 * 610 * Returns the sum of interrupt counts on @cpu since boot for 611 * @irq. The caller must ensure that the interrupt is not removed 612 * concurrently. 613 */ 614 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 615 { 616 struct irq_desc *desc = irq_to_desc(irq); 617 618 return desc && desc->kstat_irqs ? 619 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; 620 } 621 622 /** 623 * kstat_irqs - Get the statistics for an interrupt 624 * @irq: The interrupt number 625 * 626 * Returns the sum of interrupt counts on all cpus since boot for 627 * @irq. The caller must ensure that the interrupt is not removed 628 * concurrently. 629 */ 630 unsigned int kstat_irqs(unsigned int irq) 631 { 632 struct irq_desc *desc = irq_to_desc(irq); 633 int cpu; 634 unsigned int sum = 0; 635 636 if (!desc || !desc->kstat_irqs) 637 return 0; 638 for_each_possible_cpu(cpu) 639 sum += *per_cpu_ptr(desc->kstat_irqs, cpu); 640 return sum; 641 } 642 643 /** 644 * kstat_irqs_usr - Get the statistics for an interrupt 645 * @irq: The interrupt number 646 * 647 * Returns the sum of interrupt counts on all cpus since boot for 648 * @irq. Contrary to kstat_irqs() this can be called from any 649 * preemptible context. It's protected against concurrent removal of 650 * an interrupt descriptor when sparse irqs are enabled. 651 */ 652 unsigned int kstat_irqs_usr(unsigned int irq) 653 { 654 unsigned int sum; 655 656 irq_lock_sparse(); 657 sum = kstat_irqs(irq); 658 irq_unlock_sparse(); 659 return sum; 660 } 661