1 /* 2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 4 * 5 * This file contains the interrupt descriptor management code 6 * 7 * Detailed information is available in Documentation/DocBook/genericirq 8 * 9 */ 10 #include <linux/irq.h> 11 #include <linux/slab.h> 12 #include <linux/module.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/radix-tree.h> 16 #include <linux/bitmap.h> 17 18 #include "internals.h" 19 20 /* 21 * lockdep: we want to handle all irq_desc locks as a single lock-class: 22 */ 23 static struct lock_class_key irq_desc_lock_class; 24 25 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 26 static void __init init_irq_default_affinity(void) 27 { 28 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); 29 cpumask_setall(irq_default_affinity); 30 } 31 #else 32 static void __init init_irq_default_affinity(void) 33 { 34 } 35 #endif 36 37 #ifdef CONFIG_SMP 38 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) 39 { 40 if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) 41 return -ENOMEM; 42 43 #ifdef CONFIG_GENERIC_PENDING_IRQ 44 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { 45 free_cpumask_var(desc->irq_data.affinity); 46 return -ENOMEM; 47 } 48 #endif 49 return 0; 50 } 51 52 static void desc_smp_init(struct irq_desc *desc, int node) 53 { 54 desc->irq_data.node = node; 55 cpumask_copy(desc->irq_data.affinity, irq_default_affinity); 56 #ifdef CONFIG_GENERIC_PENDING_IRQ 57 cpumask_clear(desc->pending_mask); 58 #endif 59 } 60 61 static inline int desc_node(struct irq_desc *desc) 62 { 63 return desc->irq_data.node; 64 } 65 66 #else 67 static inline int 68 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } 69 static inline void desc_smp_init(struct irq_desc *desc, int node) { } 70 static inline int desc_node(struct irq_desc *desc) { return 0; } 71 #endif 72 73 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) 74 { 75 int cpu; 76 77 desc->irq_data.irq = irq; 78 desc->irq_data.chip = &no_irq_chip; 79 desc->irq_data.chip_data = NULL; 80 desc->irq_data.handler_data = NULL; 81 desc->irq_data.msi_desc = NULL; 82 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); 83 desc->istate = IRQS_DISABLED; 84 desc->handle_irq = handle_bad_irq; 85 desc->depth = 1; 86 desc->irq_count = 0; 87 desc->irqs_unhandled = 0; 88 desc->name = NULL; 89 for_each_possible_cpu(cpu) 90 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; 91 desc_smp_init(desc, node); 92 } 93 94 int nr_irqs = NR_IRQS; 95 EXPORT_SYMBOL_GPL(nr_irqs); 96 97 static DEFINE_MUTEX(sparse_irq_lock); 98 static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); 99 100 #ifdef CONFIG_SPARSE_IRQ 101 102 static RADIX_TREE(irq_desc_tree, GFP_KERNEL); 103 104 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) 105 { 106 radix_tree_insert(&irq_desc_tree, irq, desc); 107 } 108 109 struct irq_desc *irq_to_desc(unsigned int irq) 110 { 111 return radix_tree_lookup(&irq_desc_tree, irq); 112 } 113 114 static void delete_irq_desc(unsigned int irq) 115 { 116 radix_tree_delete(&irq_desc_tree, irq); 117 } 118 119 #ifdef CONFIG_SMP 120 static void free_masks(struct irq_desc *desc) 121 { 122 #ifdef CONFIG_GENERIC_PENDING_IRQ 123 free_cpumask_var(desc->pending_mask); 124 #endif 125 free_cpumask_var(desc->irq_data.affinity); 126 } 127 #else 128 static inline void free_masks(struct irq_desc *desc) { } 129 #endif 130 131 static struct irq_desc *alloc_desc(int irq, int node) 132 { 133 struct irq_desc *desc; 134 gfp_t gfp = GFP_KERNEL; 135 136 desc = kzalloc_node(sizeof(*desc), gfp, node); 137 if (!desc) 138 return NULL; 139 /* allocate based on nr_cpu_ids */ 140 desc->kstat_irqs = alloc_percpu(unsigned int); 141 if (!desc->kstat_irqs) 142 goto err_desc; 143 144 if (alloc_masks(desc, gfp, node)) 145 goto err_kstat; 146 147 raw_spin_lock_init(&desc->lock); 148 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 149 150 desc_set_defaults(irq, desc, node); 151 152 return desc; 153 154 err_kstat: 155 free_percpu(desc->kstat_irqs); 156 err_desc: 157 kfree(desc); 158 return NULL; 159 } 160 161 static void free_desc(unsigned int irq) 162 { 163 struct irq_desc *desc = irq_to_desc(irq); 164 165 unregister_irq_proc(irq, desc); 166 167 mutex_lock(&sparse_irq_lock); 168 delete_irq_desc(irq); 169 mutex_unlock(&sparse_irq_lock); 170 171 free_masks(desc); 172 free_percpu(desc->kstat_irqs); 173 kfree(desc); 174 } 175 176 static int alloc_descs(unsigned int start, unsigned int cnt, int node) 177 { 178 struct irq_desc *desc; 179 int i; 180 181 for (i = 0; i < cnt; i++) { 182 desc = alloc_desc(start + i, node); 183 if (!desc) 184 goto err; 185 mutex_lock(&sparse_irq_lock); 186 irq_insert_desc(start + i, desc); 187 mutex_unlock(&sparse_irq_lock); 188 } 189 return start; 190 191 err: 192 for (i--; i >= 0; i--) 193 free_desc(start + i); 194 195 mutex_lock(&sparse_irq_lock); 196 bitmap_clear(allocated_irqs, start, cnt); 197 mutex_unlock(&sparse_irq_lock); 198 return -ENOMEM; 199 } 200 201 struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) 202 { 203 int res = irq_alloc_descs(irq, irq, 1, node); 204 205 if (res == -EEXIST || res == irq) 206 return irq_to_desc(irq); 207 return NULL; 208 } 209 210 static int irq_expand_nr_irqs(unsigned int nr) 211 { 212 if (nr > IRQ_BITMAP_BITS) 213 return -ENOMEM; 214 nr_irqs = nr; 215 return 0; 216 } 217 218 int __init early_irq_init(void) 219 { 220 int i, initcnt, node = first_online_node; 221 struct irq_desc *desc; 222 223 init_irq_default_affinity(); 224 225 /* Let arch update nr_irqs and return the nr of preallocated irqs */ 226 initcnt = arch_probe_nr_irqs(); 227 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); 228 229 if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) 230 nr_irqs = IRQ_BITMAP_BITS; 231 232 if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) 233 initcnt = IRQ_BITMAP_BITS; 234 235 if (initcnt > nr_irqs) 236 nr_irqs = initcnt; 237 238 for (i = 0; i < initcnt; i++) { 239 desc = alloc_desc(i, node); 240 set_bit(i, allocated_irqs); 241 irq_insert_desc(i, desc); 242 } 243 return arch_early_irq_init(); 244 } 245 246 #else /* !CONFIG_SPARSE_IRQ */ 247 248 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 249 [0 ... NR_IRQS-1] = { 250 .istate = IRQS_DISABLED, 251 .handle_irq = handle_bad_irq, 252 .depth = 1, 253 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), 254 } 255 }; 256 257 int __init early_irq_init(void) 258 { 259 int count, i, node = first_online_node; 260 struct irq_desc *desc; 261 262 init_irq_default_affinity(); 263 264 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); 265 266 desc = irq_desc; 267 count = ARRAY_SIZE(irq_desc); 268 269 for (i = 0; i < count; i++) { 270 desc[i].irq_data.irq = i; 271 desc[i].irq_data.chip = &no_irq_chip; 272 desc[i].kstat_irqs = alloc_percpu(unsigned int); 273 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); 274 alloc_masks(desc + i, GFP_KERNEL, node); 275 desc_smp_init(desc + i, node); 276 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 277 } 278 return arch_early_irq_init(); 279 } 280 281 struct irq_desc *irq_to_desc(unsigned int irq) 282 { 283 return (irq < NR_IRQS) ? irq_desc + irq : NULL; 284 } 285 286 struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) 287 { 288 return irq_to_desc(irq); 289 } 290 291 static void free_desc(unsigned int irq) 292 { 293 dynamic_irq_cleanup(irq); 294 } 295 296 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) 297 { 298 return start; 299 } 300 301 static int irq_expand_nr_irqs(unsigned int nr) 302 { 303 return -ENOMEM; 304 } 305 306 #endif /* !CONFIG_SPARSE_IRQ */ 307 308 /* Dynamic interrupt handling */ 309 310 /** 311 * irq_free_descs - free irq descriptors 312 * @from: Start of descriptor range 313 * @cnt: Number of consecutive irqs to free 314 */ 315 void irq_free_descs(unsigned int from, unsigned int cnt) 316 { 317 int i; 318 319 if (from >= nr_irqs || (from + cnt) > nr_irqs) 320 return; 321 322 for (i = 0; i < cnt; i++) 323 free_desc(from + i); 324 325 mutex_lock(&sparse_irq_lock); 326 bitmap_clear(allocated_irqs, from, cnt); 327 mutex_unlock(&sparse_irq_lock); 328 } 329 330 /** 331 * irq_alloc_descs - allocate and initialize a range of irq descriptors 332 * @irq: Allocate for specific irq number if irq >= 0 333 * @from: Start the search from this irq number 334 * @cnt: Number of consecutive irqs to allocate. 335 * @node: Preferred node on which the irq descriptor should be allocated 336 * 337 * Returns the first irq number or error code 338 */ 339 int __ref 340 irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) 341 { 342 int start, ret; 343 344 if (!cnt) 345 return -EINVAL; 346 347 mutex_lock(&sparse_irq_lock); 348 349 start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, 350 from, cnt, 0); 351 ret = -EEXIST; 352 if (irq >=0 && start != irq) 353 goto err; 354 355 if (start + cnt > nr_irqs) { 356 ret = irq_expand_nr_irqs(start + cnt); 357 if (ret) 358 goto err; 359 } 360 361 bitmap_set(allocated_irqs, start, cnt); 362 mutex_unlock(&sparse_irq_lock); 363 return alloc_descs(start, cnt, node); 364 365 err: 366 mutex_unlock(&sparse_irq_lock); 367 return ret; 368 } 369 370 /** 371 * irq_reserve_irqs - mark irqs allocated 372 * @from: mark from irq number 373 * @cnt: number of irqs to mark 374 * 375 * Returns 0 on success or an appropriate error code 376 */ 377 int irq_reserve_irqs(unsigned int from, unsigned int cnt) 378 { 379 unsigned int start; 380 int ret = 0; 381 382 if (!cnt || (from + cnt) > nr_irqs) 383 return -EINVAL; 384 385 mutex_lock(&sparse_irq_lock); 386 start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); 387 if (start == from) 388 bitmap_set(allocated_irqs, start, cnt); 389 else 390 ret = -EEXIST; 391 mutex_unlock(&sparse_irq_lock); 392 return ret; 393 } 394 395 /** 396 * irq_get_next_irq - get next allocated irq number 397 * @offset: where to start the search 398 * 399 * Returns next irq number after offset or nr_irqs if none is found. 400 */ 401 unsigned int irq_get_next_irq(unsigned int offset) 402 { 403 return find_next_bit(allocated_irqs, nr_irqs, offset); 404 } 405 406 struct irq_desc * 407 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus) 408 { 409 struct irq_desc *desc = irq_to_desc(irq); 410 411 if (desc) { 412 if (bus) 413 chip_bus_lock(desc); 414 raw_spin_lock_irqsave(&desc->lock, *flags); 415 } 416 return desc; 417 } 418 419 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) 420 { 421 raw_spin_unlock_irqrestore(&desc->lock, flags); 422 if (bus) 423 chip_bus_sync_unlock(desc); 424 } 425 426 /** 427 * dynamic_irq_cleanup - cleanup a dynamically allocated irq 428 * @irq: irq number to initialize 429 */ 430 void dynamic_irq_cleanup(unsigned int irq) 431 { 432 struct irq_desc *desc = irq_to_desc(irq); 433 unsigned long flags; 434 435 raw_spin_lock_irqsave(&desc->lock, flags); 436 desc_set_defaults(irq, desc, desc_node(desc)); 437 raw_spin_unlock_irqrestore(&desc->lock, flags); 438 } 439 440 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 441 { 442 struct irq_desc *desc = irq_to_desc(irq); 443 444 return desc && desc->kstat_irqs ? 445 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; 446 } 447 448 #ifdef CONFIG_GENERIC_HARDIRQS 449 unsigned int kstat_irqs(unsigned int irq) 450 { 451 struct irq_desc *desc = irq_to_desc(irq); 452 int cpu; 453 int sum = 0; 454 455 if (!desc || !desc->kstat_irqs) 456 return 0; 457 for_each_possible_cpu(cpu) 458 sum += *per_cpu_ptr(desc->kstat_irqs, cpu); 459 return sum; 460 } 461 #endif /* CONFIG_GENERIC_HARDIRQS */ 462