1 /* 2 * Intel IO-APIC support for multi-Pentium hosts. 3 * 4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo 5 * 6 * Many thanks to Stig Venaas for trying out countless experimental 7 * patches and reporting/debugging problems patiently! 8 * 9 * (c) 1999, Multiple IO-APIC support, developed by 10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and 11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>, 12 * further tested and cleaned up by Zach Brown <zab@redhat.com> 13 * and Ingo Molnar <mingo@redhat.com> 14 * 15 * Fixes 16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs; 17 * thanks to Eric Gilmore 18 * and Rolf G. Tews 19 * for testing these extensively 20 * Paul Diefenbaugh : Added full ACPI support 21 */ 22 23 #include <linux/mm.h> 24 #include <linux/interrupt.h> 25 #include <linux/init.h> 26 #include <linux/delay.h> 27 #include <linux/sched.h> 28 #include <linux/pci.h> 29 #include <linux/mc146818rtc.h> 30 #include <linux/compiler.h> 31 #include <linux/acpi.h> 32 #include <linux/module.h> 33 #include <linux/sysdev.h> 34 #include <linux/msi.h> 35 #include <linux/htirq.h> 36 #include <linux/freezer.h> 37 #include <linux/kthread.h> 38 #include <linux/jiffies.h> /* time_after() */ 39 #include <linux/slab.h> 40 #ifdef CONFIG_ACPI 41 #include <acpi/acpi_bus.h> 42 #endif 43 #include <linux/bootmem.h> 44 #include <linux/dmar.h> 45 #include <linux/hpet.h> 46 47 #include <asm/idle.h> 48 #include <asm/io.h> 49 #include <asm/smp.h> 50 #include <asm/cpu.h> 51 #include <asm/desc.h> 52 #include <asm/proto.h> 53 #include <asm/acpi.h> 54 #include <asm/dma.h> 55 #include <asm/timer.h> 56 #include <asm/i8259.h> 57 #include <asm/msidef.h> 58 #include <asm/hypertransport.h> 59 #include <asm/setup.h> 60 #include <asm/irq_remapping.h> 61 #include <asm/hpet.h> 62 #include <asm/hw_irq.h> 63 64 #include <asm/apic.h> 65 66 #define __apicdebuginit(type) static type __init 67 #define for_each_irq_pin(entry, head) \ 68 for (entry = head; entry; entry = entry->next) 69 70 /* 71 * Is the SiS APIC rmw bug present ? 72 * -1 = don't know, 0 = no, 1 = yes 73 */ 74 int sis_apic_bug = -1; 75 76 static DEFINE_RAW_SPINLOCK(ioapic_lock); 77 static DEFINE_RAW_SPINLOCK(vector_lock); 78 79 /* 80 * # of IRQ routing registers 81 */ 82 int nr_ioapic_registers[MAX_IO_APICS]; 83 84 /* I/O APIC entries */ 85 struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; 86 int nr_ioapics; 87 88 /* IO APIC gsi routing info */ 89 struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS]; 90 91 /* The one past the highest gsi number used */ 92 u32 gsi_top; 93 94 /* MP IRQ source entries */ 95 struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; 96 97 /* # of MP IRQ source entries */ 98 int mp_irq_entries; 99 100 /* GSI interrupts */ 101 static int nr_irqs_gsi = NR_IRQS_LEGACY; 102 103 #if defined (CONFIG_MCA) || defined (CONFIG_EISA) 104 int mp_bus_id_to_type[MAX_MP_BUSSES]; 105 #endif 106 107 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); 108 109 int skip_ioapic_setup; 110 111 void arch_disable_smp_support(void) 112 { 113 #ifdef CONFIG_PCI 114 noioapicquirk = 1; 115 noioapicreroute = -1; 116 #endif 117 skip_ioapic_setup = 1; 118 } 119 120 static int __init parse_noapic(char *str) 121 { 122 /* disable IO-APIC */ 123 arch_disable_smp_support(); 124 return 0; 125 } 126 early_param("noapic", parse_noapic); 127 128 struct irq_pin_list { 129 int apic, pin; 130 struct irq_pin_list *next; 131 }; 132 133 static struct irq_pin_list *alloc_irq_pin_list(int node) 134 { 135 return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); 136 } 137 138 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 139 #ifdef CONFIG_SPARSE_IRQ 140 static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY]; 141 #else 142 static struct irq_cfg irq_cfgx[NR_IRQS]; 143 #endif 144 145 int __init arch_early_irq_init(void) 146 { 147 struct irq_cfg *cfg; 148 int count, node, i; 149 150 if (!legacy_pic->nr_legacy_irqs) { 151 nr_irqs_gsi = 0; 152 io_apic_irqs = ~0UL; 153 } 154 155 cfg = irq_cfgx; 156 count = ARRAY_SIZE(irq_cfgx); 157 node = cpu_to_node(0); 158 159 /* Make sure the legacy interrupts are marked in the bitmap */ 160 irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); 161 162 for (i = 0; i < count; i++) { 163 set_irq_chip_data(i, &cfg[i]); 164 zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); 165 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); 166 /* 167 * For legacy IRQ's, start with assigning irq0 to irq15 to 168 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. 169 */ 170 if (i < legacy_pic->nr_legacy_irqs) { 171 cfg[i].vector = IRQ0_VECTOR + i; 172 cpumask_set_cpu(0, cfg[i].domain); 173 } 174 } 175 176 return 0; 177 } 178 179 #ifdef CONFIG_SPARSE_IRQ 180 static struct irq_cfg *irq_cfg(unsigned int irq) 181 { 182 return get_irq_chip_data(irq); 183 } 184 185 static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) 186 { 187 struct irq_cfg *cfg; 188 189 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); 190 if (!cfg) 191 return NULL; 192 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) 193 goto out_cfg; 194 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) 195 goto out_domain; 196 return cfg; 197 out_domain: 198 free_cpumask_var(cfg->domain); 199 out_cfg: 200 kfree(cfg); 201 return NULL; 202 } 203 204 static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) 205 { 206 if (!cfg) 207 return; 208 set_irq_chip_data(at, NULL); 209 free_cpumask_var(cfg->domain); 210 free_cpumask_var(cfg->old_domain); 211 kfree(cfg); 212 } 213 214 #else 215 216 struct irq_cfg *irq_cfg(unsigned int irq) 217 { 218 return irq < nr_irqs ? irq_cfgx + irq : NULL; 219 } 220 221 static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) 222 { 223 return irq_cfgx + irq; 224 } 225 226 static inline void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { } 227 228 #endif 229 230 static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) 231 { 232 int res = irq_alloc_desc_at(at, node); 233 struct irq_cfg *cfg; 234 235 if (res < 0) { 236 if (res != -EEXIST) 237 return NULL; 238 cfg = get_irq_chip_data(at); 239 if (cfg) 240 return cfg; 241 } 242 243 cfg = alloc_irq_cfg(at, node); 244 if (cfg) 245 set_irq_chip_data(at, cfg); 246 else 247 irq_free_desc(at); 248 return cfg; 249 } 250 251 static int alloc_irq_from(unsigned int from, int node) 252 { 253 return irq_alloc_desc_from(from, node); 254 } 255 256 static void free_irq_at(unsigned int at, struct irq_cfg *cfg) 257 { 258 free_irq_cfg(at, cfg); 259 irq_free_desc(at); 260 } 261 262 struct io_apic { 263 unsigned int index; 264 unsigned int unused[3]; 265 unsigned int data; 266 unsigned int unused2[11]; 267 unsigned int eoi; 268 }; 269 270 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) 271 { 272 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) 273 + (mp_ioapics[idx].apicaddr & ~PAGE_MASK); 274 } 275 276 static inline void io_apic_eoi(unsigned int apic, unsigned int vector) 277 { 278 struct io_apic __iomem *io_apic = io_apic_base(apic); 279 writel(vector, &io_apic->eoi); 280 } 281 282 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) 283 { 284 struct io_apic __iomem *io_apic = io_apic_base(apic); 285 writel(reg, &io_apic->index); 286 return readl(&io_apic->data); 287 } 288 289 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) 290 { 291 struct io_apic __iomem *io_apic = io_apic_base(apic); 292 writel(reg, &io_apic->index); 293 writel(value, &io_apic->data); 294 } 295 296 /* 297 * Re-write a value: to be used for read-modify-write 298 * cycles where the read already set up the index register. 299 * 300 * Older SiS APIC requires we rewrite the index register 301 */ 302 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) 303 { 304 struct io_apic __iomem *io_apic = io_apic_base(apic); 305 306 if (sis_apic_bug) 307 writel(reg, &io_apic->index); 308 writel(value, &io_apic->data); 309 } 310 311 static bool io_apic_level_ack_pending(struct irq_cfg *cfg) 312 { 313 struct irq_pin_list *entry; 314 unsigned long flags; 315 316 raw_spin_lock_irqsave(&ioapic_lock, flags); 317 for_each_irq_pin(entry, cfg->irq_2_pin) { 318 unsigned int reg; 319 int pin; 320 321 pin = entry->pin; 322 reg = io_apic_read(entry->apic, 0x10 + pin*2); 323 /* Is the remote IRR bit set? */ 324 if (reg & IO_APIC_REDIR_REMOTE_IRR) { 325 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 326 return true; 327 } 328 } 329 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 330 331 return false; 332 } 333 334 union entry_union { 335 struct { u32 w1, w2; }; 336 struct IO_APIC_route_entry entry; 337 }; 338 339 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) 340 { 341 union entry_union eu; 342 unsigned long flags; 343 raw_spin_lock_irqsave(&ioapic_lock, flags); 344 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); 345 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); 346 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 347 return eu.entry; 348 } 349 350 /* 351 * When we write a new IO APIC routing entry, we need to write the high 352 * word first! If the mask bit in the low word is clear, we will enable 353 * the interrupt, and we need to make sure the entry is fully populated 354 * before that happens. 355 */ 356 static void 357 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 358 { 359 union entry_union eu = {{0, 0}}; 360 361 eu.entry = e; 362 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 363 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 364 } 365 366 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 367 { 368 unsigned long flags; 369 raw_spin_lock_irqsave(&ioapic_lock, flags); 370 __ioapic_write_entry(apic, pin, e); 371 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 372 } 373 374 /* 375 * When we mask an IO APIC routing entry, we need to write the low 376 * word first, in order to set the mask bit before we change the 377 * high bits! 378 */ 379 static void ioapic_mask_entry(int apic, int pin) 380 { 381 unsigned long flags; 382 union entry_union eu = { .entry.mask = 1 }; 383 384 raw_spin_lock_irqsave(&ioapic_lock, flags); 385 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 386 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 387 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 388 } 389 390 /* 391 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are 392 * shared ISA-space IRQs, so we have to support them. We are super 393 * fast in the common case, and fast for shared ISA-space IRQs. 394 */ 395 static int 396 __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 397 { 398 struct irq_pin_list **last, *entry; 399 400 /* don't allow duplicates */ 401 last = &cfg->irq_2_pin; 402 for_each_irq_pin(entry, cfg->irq_2_pin) { 403 if (entry->apic == apic && entry->pin == pin) 404 return 0; 405 last = &entry->next; 406 } 407 408 entry = alloc_irq_pin_list(node); 409 if (!entry) { 410 printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", 411 node, apic, pin); 412 return -ENOMEM; 413 } 414 entry->apic = apic; 415 entry->pin = pin; 416 417 *last = entry; 418 return 0; 419 } 420 421 static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 422 { 423 if (__add_pin_to_irq_node(cfg, node, apic, pin)) 424 panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); 425 } 426 427 /* 428 * Reroute an IRQ to a different pin. 429 */ 430 static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, 431 int oldapic, int oldpin, 432 int newapic, int newpin) 433 { 434 struct irq_pin_list *entry; 435 436 for_each_irq_pin(entry, cfg->irq_2_pin) { 437 if (entry->apic == oldapic && entry->pin == oldpin) { 438 entry->apic = newapic; 439 entry->pin = newpin; 440 /* every one is different, right? */ 441 return; 442 } 443 } 444 445 /* old apic/pin didn't exist, so just add new ones */ 446 add_pin_to_irq_node(cfg, node, newapic, newpin); 447 } 448 449 static void __io_apic_modify_irq(struct irq_pin_list *entry, 450 int mask_and, int mask_or, 451 void (*final)(struct irq_pin_list *entry)) 452 { 453 unsigned int reg, pin; 454 455 pin = entry->pin; 456 reg = io_apic_read(entry->apic, 0x10 + pin * 2); 457 reg &= mask_and; 458 reg |= mask_or; 459 io_apic_modify(entry->apic, 0x10 + pin * 2, reg); 460 if (final) 461 final(entry); 462 } 463 464 static void io_apic_modify_irq(struct irq_cfg *cfg, 465 int mask_and, int mask_or, 466 void (*final)(struct irq_pin_list *entry)) 467 { 468 struct irq_pin_list *entry; 469 470 for_each_irq_pin(entry, cfg->irq_2_pin) 471 __io_apic_modify_irq(entry, mask_and, mask_or, final); 472 } 473 474 static void __mask_and_edge_IO_APIC_irq(struct irq_pin_list *entry) 475 { 476 __io_apic_modify_irq(entry, ~IO_APIC_REDIR_LEVEL_TRIGGER, 477 IO_APIC_REDIR_MASKED, NULL); 478 } 479 480 static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry) 481 { 482 __io_apic_modify_irq(entry, ~IO_APIC_REDIR_MASKED, 483 IO_APIC_REDIR_LEVEL_TRIGGER, NULL); 484 } 485 486 static void io_apic_sync(struct irq_pin_list *entry) 487 { 488 /* 489 * Synchronize the IO-APIC and the CPU by doing 490 * a dummy read from the IO-APIC 491 */ 492 struct io_apic __iomem *io_apic; 493 io_apic = io_apic_base(entry->apic); 494 readl(&io_apic->data); 495 } 496 497 static void mask_ioapic(struct irq_cfg *cfg) 498 { 499 unsigned long flags; 500 501 raw_spin_lock_irqsave(&ioapic_lock, flags); 502 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); 503 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 504 } 505 506 static void mask_ioapic_irq(struct irq_data *data) 507 { 508 mask_ioapic(data->chip_data); 509 } 510 511 static void __unmask_ioapic(struct irq_cfg *cfg) 512 { 513 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); 514 } 515 516 static void unmask_ioapic(struct irq_cfg *cfg) 517 { 518 unsigned long flags; 519 520 raw_spin_lock_irqsave(&ioapic_lock, flags); 521 __unmask_ioapic(cfg); 522 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 523 } 524 525 static void unmask_ioapic_irq(struct irq_data *data) 526 { 527 unmask_ioapic(data->chip_data); 528 } 529 530 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) 531 { 532 struct IO_APIC_route_entry entry; 533 534 /* Check delivery_mode to be sure we're not clearing an SMI pin */ 535 entry = ioapic_read_entry(apic, pin); 536 if (entry.delivery_mode == dest_SMI) 537 return; 538 /* 539 * Disable it in the IO-APIC irq-routing table: 540 */ 541 ioapic_mask_entry(apic, pin); 542 } 543 544 static void clear_IO_APIC (void) 545 { 546 int apic, pin; 547 548 for (apic = 0; apic < nr_ioapics; apic++) 549 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) 550 clear_IO_APIC_pin(apic, pin); 551 } 552 553 #ifdef CONFIG_X86_32 554 /* 555 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to 556 * specific CPU-side IRQs. 557 */ 558 559 #define MAX_PIRQS 8 560 static int pirq_entries[MAX_PIRQS] = { 561 [0 ... MAX_PIRQS - 1] = -1 562 }; 563 564 static int __init ioapic_pirq_setup(char *str) 565 { 566 int i, max; 567 int ints[MAX_PIRQS+1]; 568 569 get_options(str, ARRAY_SIZE(ints), ints); 570 571 apic_printk(APIC_VERBOSE, KERN_INFO 572 "PIRQ redirection, working around broken MP-BIOS.\n"); 573 max = MAX_PIRQS; 574 if (ints[0] < MAX_PIRQS) 575 max = ints[0]; 576 577 for (i = 0; i < max; i++) { 578 apic_printk(APIC_VERBOSE, KERN_DEBUG 579 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]); 580 /* 581 * PIRQs are mapped upside down, usually. 582 */ 583 pirq_entries[MAX_PIRQS-i-1] = ints[i+1]; 584 } 585 return 1; 586 } 587 588 __setup("pirq=", ioapic_pirq_setup); 589 #endif /* CONFIG_X86_32 */ 590 591 struct IO_APIC_route_entry **alloc_ioapic_entries(void) 592 { 593 int apic; 594 struct IO_APIC_route_entry **ioapic_entries; 595 596 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics, 597 GFP_KERNEL); 598 if (!ioapic_entries) 599 return 0; 600 601 for (apic = 0; apic < nr_ioapics; apic++) { 602 ioapic_entries[apic] = 603 kzalloc(sizeof(struct IO_APIC_route_entry) * 604 nr_ioapic_registers[apic], GFP_KERNEL); 605 if (!ioapic_entries[apic]) 606 goto nomem; 607 } 608 609 return ioapic_entries; 610 611 nomem: 612 while (--apic >= 0) 613 kfree(ioapic_entries[apic]); 614 kfree(ioapic_entries); 615 616 return 0; 617 } 618 619 /* 620 * Saves all the IO-APIC RTE's 621 */ 622 int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries) 623 { 624 int apic, pin; 625 626 if (!ioapic_entries) 627 return -ENOMEM; 628 629 for (apic = 0; apic < nr_ioapics; apic++) { 630 if (!ioapic_entries[apic]) 631 return -ENOMEM; 632 633 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) 634 ioapic_entries[apic][pin] = 635 ioapic_read_entry(apic, pin); 636 } 637 638 return 0; 639 } 640 641 /* 642 * Mask all IO APIC entries. 643 */ 644 void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries) 645 { 646 int apic, pin; 647 648 if (!ioapic_entries) 649 return; 650 651 for (apic = 0; apic < nr_ioapics; apic++) { 652 if (!ioapic_entries[apic]) 653 break; 654 655 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { 656 struct IO_APIC_route_entry entry; 657 658 entry = ioapic_entries[apic][pin]; 659 if (!entry.mask) { 660 entry.mask = 1; 661 ioapic_write_entry(apic, pin, entry); 662 } 663 } 664 } 665 } 666 667 /* 668 * Restore IO APIC entries which was saved in ioapic_entries. 669 */ 670 int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries) 671 { 672 int apic, pin; 673 674 if (!ioapic_entries) 675 return -ENOMEM; 676 677 for (apic = 0; apic < nr_ioapics; apic++) { 678 if (!ioapic_entries[apic]) 679 return -ENOMEM; 680 681 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) 682 ioapic_write_entry(apic, pin, 683 ioapic_entries[apic][pin]); 684 } 685 return 0; 686 } 687 688 void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries) 689 { 690 int apic; 691 692 for (apic = 0; apic < nr_ioapics; apic++) 693 kfree(ioapic_entries[apic]); 694 695 kfree(ioapic_entries); 696 } 697 698 /* 699 * Find the IRQ entry number of a certain pin. 700 */ 701 static int find_irq_entry(int apic, int pin, int type) 702 { 703 int i; 704 705 for (i = 0; i < mp_irq_entries; i++) 706 if (mp_irqs[i].irqtype == type && 707 (mp_irqs[i].dstapic == mp_ioapics[apic].apicid || 708 mp_irqs[i].dstapic == MP_APIC_ALL) && 709 mp_irqs[i].dstirq == pin) 710 return i; 711 712 return -1; 713 } 714 715 /* 716 * Find the pin to which IRQ[irq] (ISA) is connected 717 */ 718 static int __init find_isa_irq_pin(int irq, int type) 719 { 720 int i; 721 722 for (i = 0; i < mp_irq_entries; i++) { 723 int lbus = mp_irqs[i].srcbus; 724 725 if (test_bit(lbus, mp_bus_not_pci) && 726 (mp_irqs[i].irqtype == type) && 727 (mp_irqs[i].srcbusirq == irq)) 728 729 return mp_irqs[i].dstirq; 730 } 731 return -1; 732 } 733 734 static int __init find_isa_irq_apic(int irq, int type) 735 { 736 int i; 737 738 for (i = 0; i < mp_irq_entries; i++) { 739 int lbus = mp_irqs[i].srcbus; 740 741 if (test_bit(lbus, mp_bus_not_pci) && 742 (mp_irqs[i].irqtype == type) && 743 (mp_irqs[i].srcbusirq == irq)) 744 break; 745 } 746 if (i < mp_irq_entries) { 747 int apic; 748 for(apic = 0; apic < nr_ioapics; apic++) { 749 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic) 750 return apic; 751 } 752 } 753 754 return -1; 755 } 756 757 #if defined(CONFIG_EISA) || defined(CONFIG_MCA) 758 /* 759 * EISA Edge/Level control register, ELCR 760 */ 761 static int EISA_ELCR(unsigned int irq) 762 { 763 if (irq < legacy_pic->nr_legacy_irqs) { 764 unsigned int port = 0x4d0 + (irq >> 3); 765 return (inb(port) >> (irq & 7)) & 1; 766 } 767 apic_printk(APIC_VERBOSE, KERN_INFO 768 "Broken MPtable reports ISA irq %d\n", irq); 769 return 0; 770 } 771 772 #endif 773 774 /* ISA interrupts are always polarity zero edge triggered, 775 * when listed as conforming in the MP table. */ 776 777 #define default_ISA_trigger(idx) (0) 778 #define default_ISA_polarity(idx) (0) 779 780 /* EISA interrupts are always polarity zero and can be edge or level 781 * trigger depending on the ELCR value. If an interrupt is listed as 782 * EISA conforming in the MP table, that means its trigger type must 783 * be read in from the ELCR */ 784 785 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq)) 786 #define default_EISA_polarity(idx) default_ISA_polarity(idx) 787 788 /* PCI interrupts are always polarity one level triggered, 789 * when listed as conforming in the MP table. */ 790 791 #define default_PCI_trigger(idx) (1) 792 #define default_PCI_polarity(idx) (1) 793 794 /* MCA interrupts are always polarity zero level triggered, 795 * when listed as conforming in the MP table. */ 796 797 #define default_MCA_trigger(idx) (1) 798 #define default_MCA_polarity(idx) default_ISA_polarity(idx) 799 800 static int MPBIOS_polarity(int idx) 801 { 802 int bus = mp_irqs[idx].srcbus; 803 int polarity; 804 805 /* 806 * Determine IRQ line polarity (high active or low active): 807 */ 808 switch (mp_irqs[idx].irqflag & 3) 809 { 810 case 0: /* conforms, ie. bus-type dependent polarity */ 811 if (test_bit(bus, mp_bus_not_pci)) 812 polarity = default_ISA_polarity(idx); 813 else 814 polarity = default_PCI_polarity(idx); 815 break; 816 case 1: /* high active */ 817 { 818 polarity = 0; 819 break; 820 } 821 case 2: /* reserved */ 822 { 823 printk(KERN_WARNING "broken BIOS!!\n"); 824 polarity = 1; 825 break; 826 } 827 case 3: /* low active */ 828 { 829 polarity = 1; 830 break; 831 } 832 default: /* invalid */ 833 { 834 printk(KERN_WARNING "broken BIOS!!\n"); 835 polarity = 1; 836 break; 837 } 838 } 839 return polarity; 840 } 841 842 static int MPBIOS_trigger(int idx) 843 { 844 int bus = mp_irqs[idx].srcbus; 845 int trigger; 846 847 /* 848 * Determine IRQ trigger mode (edge or level sensitive): 849 */ 850 switch ((mp_irqs[idx].irqflag>>2) & 3) 851 { 852 case 0: /* conforms, ie. bus-type dependent */ 853 if (test_bit(bus, mp_bus_not_pci)) 854 trigger = default_ISA_trigger(idx); 855 else 856 trigger = default_PCI_trigger(idx); 857 #if defined(CONFIG_EISA) || defined(CONFIG_MCA) 858 switch (mp_bus_id_to_type[bus]) { 859 case MP_BUS_ISA: /* ISA pin */ 860 { 861 /* set before the switch */ 862 break; 863 } 864 case MP_BUS_EISA: /* EISA pin */ 865 { 866 trigger = default_EISA_trigger(idx); 867 break; 868 } 869 case MP_BUS_PCI: /* PCI pin */ 870 { 871 /* set before the switch */ 872 break; 873 } 874 case MP_BUS_MCA: /* MCA pin */ 875 { 876 trigger = default_MCA_trigger(idx); 877 break; 878 } 879 default: 880 { 881 printk(KERN_WARNING "broken BIOS!!\n"); 882 trigger = 1; 883 break; 884 } 885 } 886 #endif 887 break; 888 case 1: /* edge */ 889 { 890 trigger = 0; 891 break; 892 } 893 case 2: /* reserved */ 894 { 895 printk(KERN_WARNING "broken BIOS!!\n"); 896 trigger = 1; 897 break; 898 } 899 case 3: /* level */ 900 { 901 trigger = 1; 902 break; 903 } 904 default: /* invalid */ 905 { 906 printk(KERN_WARNING "broken BIOS!!\n"); 907 trigger = 0; 908 break; 909 } 910 } 911 return trigger; 912 } 913 914 static inline int irq_polarity(int idx) 915 { 916 return MPBIOS_polarity(idx); 917 } 918 919 static inline int irq_trigger(int idx) 920 { 921 return MPBIOS_trigger(idx); 922 } 923 924 static int pin_2_irq(int idx, int apic, int pin) 925 { 926 int irq; 927 int bus = mp_irqs[idx].srcbus; 928 929 /* 930 * Debugging check, we are in big trouble if this message pops up! 931 */ 932 if (mp_irqs[idx].dstirq != pin) 933 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); 934 935 if (test_bit(bus, mp_bus_not_pci)) { 936 irq = mp_irqs[idx].srcbusirq; 937 } else { 938 u32 gsi = mp_gsi_routing[apic].gsi_base + pin; 939 940 if (gsi >= NR_IRQS_LEGACY) 941 irq = gsi; 942 else 943 irq = gsi_top + gsi; 944 } 945 946 #ifdef CONFIG_X86_32 947 /* 948 * PCI IRQ command line redirection. Yes, limits are hardcoded. 949 */ 950 if ((pin >= 16) && (pin <= 23)) { 951 if (pirq_entries[pin-16] != -1) { 952 if (!pirq_entries[pin-16]) { 953 apic_printk(APIC_VERBOSE, KERN_DEBUG 954 "disabling PIRQ%d\n", pin-16); 955 } else { 956 irq = pirq_entries[pin-16]; 957 apic_printk(APIC_VERBOSE, KERN_DEBUG 958 "using PIRQ%d -> IRQ %d\n", 959 pin-16, irq); 960 } 961 } 962 } 963 #endif 964 965 return irq; 966 } 967 968 /* 969 * Find a specific PCI IRQ entry. 970 * Not an __init, possibly needed by modules 971 */ 972 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, 973 struct io_apic_irq_attr *irq_attr) 974 { 975 int apic, i, best_guess = -1; 976 977 apic_printk(APIC_DEBUG, 978 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", 979 bus, slot, pin); 980 if (test_bit(bus, mp_bus_not_pci)) { 981 apic_printk(APIC_VERBOSE, 982 "PCI BIOS passed nonexistent PCI bus %d!\n", bus); 983 return -1; 984 } 985 for (i = 0; i < mp_irq_entries; i++) { 986 int lbus = mp_irqs[i].srcbus; 987 988 for (apic = 0; apic < nr_ioapics; apic++) 989 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic || 990 mp_irqs[i].dstapic == MP_APIC_ALL) 991 break; 992 993 if (!test_bit(lbus, mp_bus_not_pci) && 994 !mp_irqs[i].irqtype && 995 (bus == lbus) && 996 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) { 997 int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq); 998 999 if (!(apic || IO_APIC_IRQ(irq))) 1000 continue; 1001 1002 if (pin == (mp_irqs[i].srcbusirq & 3)) { 1003 set_io_apic_irq_attr(irq_attr, apic, 1004 mp_irqs[i].dstirq, 1005 irq_trigger(i), 1006 irq_polarity(i)); 1007 return irq; 1008 } 1009 /* 1010 * Use the first all-but-pin matching entry as a 1011 * best-guess fuzzy result for broken mptables. 1012 */ 1013 if (best_guess < 0) { 1014 set_io_apic_irq_attr(irq_attr, apic, 1015 mp_irqs[i].dstirq, 1016 irq_trigger(i), 1017 irq_polarity(i)); 1018 best_guess = irq; 1019 } 1020 } 1021 } 1022 return best_guess; 1023 } 1024 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); 1025 1026 void lock_vector_lock(void) 1027 { 1028 /* Used to the online set of cpus does not change 1029 * during assign_irq_vector. 1030 */ 1031 raw_spin_lock(&vector_lock); 1032 } 1033 1034 void unlock_vector_lock(void) 1035 { 1036 raw_spin_unlock(&vector_lock); 1037 } 1038 1039 static int 1040 __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 1041 { 1042 /* 1043 * NOTE! The local APIC isn't very good at handling 1044 * multiple interrupts at the same interrupt level. 1045 * As the interrupt level is determined by taking the 1046 * vector number and shifting that right by 4, we 1047 * want to spread these out a bit so that they don't 1048 * all fall in the same interrupt level. 1049 * 1050 * Also, we've got to be careful not to trash gate 1051 * 0x80, because int 0x80 is hm, kind of importantish. ;) 1052 */ 1053 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; 1054 static int current_offset = VECTOR_OFFSET_START % 8; 1055 unsigned int old_vector; 1056 int cpu, err; 1057 cpumask_var_t tmp_mask; 1058 1059 if (cfg->move_in_progress) 1060 return -EBUSY; 1061 1062 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) 1063 return -ENOMEM; 1064 1065 old_vector = cfg->vector; 1066 if (old_vector) { 1067 cpumask_and(tmp_mask, mask, cpu_online_mask); 1068 cpumask_and(tmp_mask, cfg->domain, tmp_mask); 1069 if (!cpumask_empty(tmp_mask)) { 1070 free_cpumask_var(tmp_mask); 1071 return 0; 1072 } 1073 } 1074 1075 /* Only try and allocate irqs on cpus that are present */ 1076 err = -ENOSPC; 1077 for_each_cpu_and(cpu, mask, cpu_online_mask) { 1078 int new_cpu; 1079 int vector, offset; 1080 1081 apic->vector_allocation_domain(cpu, tmp_mask); 1082 1083 vector = current_vector; 1084 offset = current_offset; 1085 next: 1086 vector += 8; 1087 if (vector >= first_system_vector) { 1088 /* If out of vectors on large boxen, must share them. */ 1089 offset = (offset + 1) % 8; 1090 vector = FIRST_EXTERNAL_VECTOR + offset; 1091 } 1092 if (unlikely(current_vector == vector)) 1093 continue; 1094 1095 if (test_bit(vector, used_vectors)) 1096 goto next; 1097 1098 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 1099 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 1100 goto next; 1101 /* Found one! */ 1102 current_vector = vector; 1103 current_offset = offset; 1104 if (old_vector) { 1105 cfg->move_in_progress = 1; 1106 cpumask_copy(cfg->old_domain, cfg->domain); 1107 } 1108 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 1109 per_cpu(vector_irq, new_cpu)[vector] = irq; 1110 cfg->vector = vector; 1111 cpumask_copy(cfg->domain, tmp_mask); 1112 err = 0; 1113 break; 1114 } 1115 free_cpumask_var(tmp_mask); 1116 return err; 1117 } 1118 1119 int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 1120 { 1121 int err; 1122 unsigned long flags; 1123 1124 raw_spin_lock_irqsave(&vector_lock, flags); 1125 err = __assign_irq_vector(irq, cfg, mask); 1126 raw_spin_unlock_irqrestore(&vector_lock, flags); 1127 return err; 1128 } 1129 1130 static void __clear_irq_vector(int irq, struct irq_cfg *cfg) 1131 { 1132 int cpu, vector; 1133 1134 BUG_ON(!cfg->vector); 1135 1136 vector = cfg->vector; 1137 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) 1138 per_cpu(vector_irq, cpu)[vector] = -1; 1139 1140 cfg->vector = 0; 1141 cpumask_clear(cfg->domain); 1142 1143 if (likely(!cfg->move_in_progress)) 1144 return; 1145 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { 1146 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 1147 vector++) { 1148 if (per_cpu(vector_irq, cpu)[vector] != irq) 1149 continue; 1150 per_cpu(vector_irq, cpu)[vector] = -1; 1151 break; 1152 } 1153 } 1154 cfg->move_in_progress = 0; 1155 } 1156 1157 void __setup_vector_irq(int cpu) 1158 { 1159 /* Initialize vector_irq on a new cpu */ 1160 int irq, vector; 1161 struct irq_cfg *cfg; 1162 1163 /* 1164 * vector_lock will make sure that we don't run into irq vector 1165 * assignments that might be happening on another cpu in parallel, 1166 * while we setup our initial vector to irq mappings. 1167 */ 1168 raw_spin_lock(&vector_lock); 1169 /* Mark the inuse vectors */ 1170 for_each_active_irq(irq) { 1171 cfg = get_irq_chip_data(irq); 1172 if (!cfg) 1173 continue; 1174 /* 1175 * If it is a legacy IRQ handled by the legacy PIC, this cpu 1176 * will be part of the irq_cfg's domain. 1177 */ 1178 if (irq < legacy_pic->nr_legacy_irqs && !IO_APIC_IRQ(irq)) 1179 cpumask_set_cpu(cpu, cfg->domain); 1180 1181 if (!cpumask_test_cpu(cpu, cfg->domain)) 1182 continue; 1183 vector = cfg->vector; 1184 per_cpu(vector_irq, cpu)[vector] = irq; 1185 } 1186 /* Mark the free vectors */ 1187 for (vector = 0; vector < NR_VECTORS; ++vector) { 1188 irq = per_cpu(vector_irq, cpu)[vector]; 1189 if (irq < 0) 1190 continue; 1191 1192 cfg = irq_cfg(irq); 1193 if (!cpumask_test_cpu(cpu, cfg->domain)) 1194 per_cpu(vector_irq, cpu)[vector] = -1; 1195 } 1196 raw_spin_unlock(&vector_lock); 1197 } 1198 1199 static struct irq_chip ioapic_chip; 1200 static struct irq_chip ir_ioapic_chip; 1201 1202 #define IOAPIC_AUTO -1 1203 #define IOAPIC_EDGE 0 1204 #define IOAPIC_LEVEL 1 1205 1206 #ifdef CONFIG_X86_32 1207 static inline int IO_APIC_irq_trigger(int irq) 1208 { 1209 int apic, idx, pin; 1210 1211 for (apic = 0; apic < nr_ioapics; apic++) { 1212 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { 1213 idx = find_irq_entry(apic, pin, mp_INT); 1214 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin))) 1215 return irq_trigger(idx); 1216 } 1217 } 1218 /* 1219 * nonexistent IRQs are edge default 1220 */ 1221 return 0; 1222 } 1223 #else 1224 static inline int IO_APIC_irq_trigger(int irq) 1225 { 1226 return 1; 1227 } 1228 #endif 1229 1230 static void ioapic_register_intr(unsigned int irq, unsigned long trigger) 1231 { 1232 1233 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1234 trigger == IOAPIC_LEVEL) 1235 irq_set_status_flags(irq, IRQ_LEVEL); 1236 else 1237 irq_clear_status_flags(irq, IRQ_LEVEL); 1238 1239 if (irq_remapped(get_irq_chip_data(irq))) { 1240 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 1241 if (trigger) 1242 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, 1243 handle_fasteoi_irq, 1244 "fasteoi"); 1245 else 1246 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, 1247 handle_edge_irq, "edge"); 1248 return; 1249 } 1250 1251 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1252 trigger == IOAPIC_LEVEL) 1253 set_irq_chip_and_handler_name(irq, &ioapic_chip, 1254 handle_fasteoi_irq, 1255 "fasteoi"); 1256 else 1257 set_irq_chip_and_handler_name(irq, &ioapic_chip, 1258 handle_edge_irq, "edge"); 1259 } 1260 1261 static int setup_ioapic_entry(int apic_id, int irq, 1262 struct IO_APIC_route_entry *entry, 1263 unsigned int destination, int trigger, 1264 int polarity, int vector, int pin) 1265 { 1266 /* 1267 * add it to the IO-APIC irq-routing table: 1268 */ 1269 memset(entry,0,sizeof(*entry)); 1270 1271 if (intr_remapping_enabled) { 1272 struct intel_iommu *iommu = map_ioapic_to_ir(apic_id); 1273 struct irte irte; 1274 struct IR_IO_APIC_route_entry *ir_entry = 1275 (struct IR_IO_APIC_route_entry *) entry; 1276 int index; 1277 1278 if (!iommu) 1279 panic("No mapping iommu for ioapic %d\n", apic_id); 1280 1281 index = alloc_irte(iommu, irq, 1); 1282 if (index < 0) 1283 panic("Failed to allocate IRTE for ioapic %d\n", apic_id); 1284 1285 prepare_irte(&irte, vector, destination); 1286 1287 /* Set source-id of interrupt request */ 1288 set_ioapic_sid(&irte, apic_id); 1289 1290 modify_irte(irq, &irte); 1291 1292 ir_entry->index2 = (index >> 15) & 0x1; 1293 ir_entry->zero = 0; 1294 ir_entry->format = 1; 1295 ir_entry->index = (index & 0x7fff); 1296 /* 1297 * IO-APIC RTE will be configured with virtual vector. 1298 * irq handler will do the explicit EOI to the io-apic. 1299 */ 1300 ir_entry->vector = pin; 1301 } else { 1302 entry->delivery_mode = apic->irq_delivery_mode; 1303 entry->dest_mode = apic->irq_dest_mode; 1304 entry->dest = destination; 1305 entry->vector = vector; 1306 } 1307 1308 entry->mask = 0; /* enable IRQ */ 1309 entry->trigger = trigger; 1310 entry->polarity = polarity; 1311 1312 /* Mask level triggered irqs. 1313 * Use IRQ_DELAYED_DISABLE for edge triggered irqs. 1314 */ 1315 if (trigger) 1316 entry->mask = 1; 1317 return 0; 1318 } 1319 1320 static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq, 1321 struct irq_cfg *cfg, int trigger, int polarity) 1322 { 1323 struct IO_APIC_route_entry entry; 1324 unsigned int dest; 1325 1326 if (!IO_APIC_IRQ(irq)) 1327 return; 1328 /* 1329 * For legacy irqs, cfg->domain starts with cpu 0 for legacy 1330 * controllers like 8259. Now that IO-APIC can handle this irq, update 1331 * the cfg->domain. 1332 */ 1333 if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain)) 1334 apic->vector_allocation_domain(0, cfg->domain); 1335 1336 if (assign_irq_vector(irq, cfg, apic->target_cpus())) 1337 return; 1338 1339 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 1340 1341 apic_printk(APIC_VERBOSE,KERN_DEBUG 1342 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1343 "IRQ %d Mode:%i Active:%i)\n", 1344 apic_id, mp_ioapics[apic_id].apicid, pin, cfg->vector, 1345 irq, trigger, polarity); 1346 1347 1348 if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry, 1349 dest, trigger, polarity, cfg->vector, pin)) { 1350 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1351 mp_ioapics[apic_id].apicid, pin); 1352 __clear_irq_vector(irq, cfg); 1353 return; 1354 } 1355 1356 ioapic_register_intr(irq, trigger); 1357 if (irq < legacy_pic->nr_legacy_irqs) 1358 legacy_pic->mask(irq); 1359 1360 ioapic_write_entry(apic_id, pin, entry); 1361 } 1362 1363 static struct { 1364 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); 1365 } mp_ioapic_routing[MAX_IO_APICS]; 1366 1367 static void __init setup_IO_APIC_irqs(void) 1368 { 1369 int apic_id, pin, idx, irq, notcon = 0; 1370 int node = cpu_to_node(0); 1371 struct irq_cfg *cfg; 1372 1373 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1374 1375 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) 1376 for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) { 1377 idx = find_irq_entry(apic_id, pin, mp_INT); 1378 if (idx == -1) { 1379 if (!notcon) { 1380 notcon = 1; 1381 apic_printk(APIC_VERBOSE, 1382 KERN_DEBUG " %d-%d", 1383 mp_ioapics[apic_id].apicid, pin); 1384 } else 1385 apic_printk(APIC_VERBOSE, " %d-%d", 1386 mp_ioapics[apic_id].apicid, pin); 1387 continue; 1388 } 1389 if (notcon) { 1390 apic_printk(APIC_VERBOSE, 1391 " (apicid-pin) not connected\n"); 1392 notcon = 0; 1393 } 1394 1395 irq = pin_2_irq(idx, apic_id, pin); 1396 1397 if ((apic_id > 0) && (irq > 16)) 1398 continue; 1399 1400 /* 1401 * Skip the timer IRQ if there's a quirk handler 1402 * installed and if it returns 1: 1403 */ 1404 if (apic->multi_timer_check && 1405 apic->multi_timer_check(apic_id, irq)) 1406 continue; 1407 1408 cfg = alloc_irq_and_cfg_at(irq, node); 1409 if (!cfg) 1410 continue; 1411 1412 add_pin_to_irq_node(cfg, node, apic_id, pin); 1413 /* 1414 * don't mark it in pin_programmed, so later acpi could 1415 * set it correctly when irq < 16 1416 */ 1417 setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx), 1418 irq_polarity(idx)); 1419 } 1420 1421 if (notcon) 1422 apic_printk(APIC_VERBOSE, 1423 " (apicid-pin) not connected\n"); 1424 } 1425 1426 /* 1427 * for the gsit that is not in first ioapic 1428 * but could not use acpi_register_gsi() 1429 * like some special sci in IBM x3330 1430 */ 1431 void setup_IO_APIC_irq_extra(u32 gsi) 1432 { 1433 int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); 1434 struct irq_cfg *cfg; 1435 1436 /* 1437 * Convert 'gsi' to 'ioapic.pin'. 1438 */ 1439 apic_id = mp_find_ioapic(gsi); 1440 if (apic_id < 0) 1441 return; 1442 1443 pin = mp_find_ioapic_pin(apic_id, gsi); 1444 idx = find_irq_entry(apic_id, pin, mp_INT); 1445 if (idx == -1) 1446 return; 1447 1448 irq = pin_2_irq(idx, apic_id, pin); 1449 1450 /* Only handle the non legacy irqs on secondary ioapics */ 1451 if (apic_id == 0 || irq < NR_IRQS_LEGACY) 1452 return; 1453 1454 cfg = alloc_irq_and_cfg_at(irq, node); 1455 if (!cfg) 1456 return; 1457 1458 add_pin_to_irq_node(cfg, node, apic_id, pin); 1459 1460 if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { 1461 pr_debug("Pin %d-%d already programmed\n", 1462 mp_ioapics[apic_id].apicid, pin); 1463 return; 1464 } 1465 set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed); 1466 1467 setup_ioapic_irq(apic_id, pin, irq, cfg, 1468 irq_trigger(idx), irq_polarity(idx)); 1469 } 1470 1471 /* 1472 * Set up the timer pin, possibly with the 8259A-master behind. 1473 */ 1474 static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, 1475 int vector) 1476 { 1477 struct IO_APIC_route_entry entry; 1478 1479 if (intr_remapping_enabled) 1480 return; 1481 1482 memset(&entry, 0, sizeof(entry)); 1483 1484 /* 1485 * We use logical delivery to get the timer IRQ 1486 * to the first CPU. 1487 */ 1488 entry.dest_mode = apic->irq_dest_mode; 1489 entry.mask = 0; /* don't mask IRQ for edge */ 1490 entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus()); 1491 entry.delivery_mode = apic->irq_delivery_mode; 1492 entry.polarity = 0; 1493 entry.trigger = 0; 1494 entry.vector = vector; 1495 1496 /* 1497 * The timer IRQ doesn't have to know that behind the 1498 * scene we may have a 8259A-master in AEOI mode ... 1499 */ 1500 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); 1501 1502 /* 1503 * Add it to the IO-APIC irq-routing table: 1504 */ 1505 ioapic_write_entry(apic_id, pin, entry); 1506 } 1507 1508 1509 __apicdebuginit(void) print_IO_APIC(void) 1510 { 1511 int apic, i; 1512 union IO_APIC_reg_00 reg_00; 1513 union IO_APIC_reg_01 reg_01; 1514 union IO_APIC_reg_02 reg_02; 1515 union IO_APIC_reg_03 reg_03; 1516 unsigned long flags; 1517 struct irq_cfg *cfg; 1518 unsigned int irq; 1519 1520 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); 1521 for (i = 0; i < nr_ioapics; i++) 1522 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", 1523 mp_ioapics[i].apicid, nr_ioapic_registers[i]); 1524 1525 /* 1526 * We are a bit conservative about what we expect. We have to 1527 * know about every hardware change ASAP. 1528 */ 1529 printk(KERN_INFO "testing the IO APIC.......................\n"); 1530 1531 for (apic = 0; apic < nr_ioapics; apic++) { 1532 1533 raw_spin_lock_irqsave(&ioapic_lock, flags); 1534 reg_00.raw = io_apic_read(apic, 0); 1535 reg_01.raw = io_apic_read(apic, 1); 1536 if (reg_01.bits.version >= 0x10) 1537 reg_02.raw = io_apic_read(apic, 2); 1538 if (reg_01.bits.version >= 0x20) 1539 reg_03.raw = io_apic_read(apic, 3); 1540 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1541 1542 printk("\n"); 1543 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid); 1544 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); 1545 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); 1546 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); 1547 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); 1548 1549 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01); 1550 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries); 1551 1552 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); 1553 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version); 1554 1555 /* 1556 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, 1557 * but the value of reg_02 is read as the previous read register 1558 * value, so ignore it if reg_02 == reg_01. 1559 */ 1560 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { 1561 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); 1562 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); 1563 } 1564 1565 /* 1566 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 1567 * or reg_03, but the value of reg_0[23] is read as the previous read 1568 * register value, so ignore it if reg_03 == reg_0[12]. 1569 */ 1570 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && 1571 reg_03.raw != reg_01.raw) { 1572 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); 1573 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); 1574 } 1575 1576 printk(KERN_DEBUG ".... IRQ redirection table:\n"); 1577 1578 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol" 1579 " Stat Dmod Deli Vect:\n"); 1580 1581 for (i = 0; i <= reg_01.bits.entries; i++) { 1582 struct IO_APIC_route_entry entry; 1583 1584 entry = ioapic_read_entry(apic, i); 1585 1586 printk(KERN_DEBUG " %02x %03X ", 1587 i, 1588 entry.dest 1589 ); 1590 1591 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n", 1592 entry.mask, 1593 entry.trigger, 1594 entry.irr, 1595 entry.polarity, 1596 entry.delivery_status, 1597 entry.dest_mode, 1598 entry.delivery_mode, 1599 entry.vector 1600 ); 1601 } 1602 } 1603 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1604 for_each_active_irq(irq) { 1605 struct irq_pin_list *entry; 1606 1607 cfg = get_irq_chip_data(irq); 1608 if (!cfg) 1609 continue; 1610 entry = cfg->irq_2_pin; 1611 if (!entry) 1612 continue; 1613 printk(KERN_DEBUG "IRQ%d ", irq); 1614 for_each_irq_pin(entry, cfg->irq_2_pin) 1615 printk("-> %d:%d", entry->apic, entry->pin); 1616 printk("\n"); 1617 } 1618 1619 printk(KERN_INFO ".................................... done.\n"); 1620 1621 return; 1622 } 1623 1624 __apicdebuginit(void) print_APIC_field(int base) 1625 { 1626 int i; 1627 1628 printk(KERN_DEBUG); 1629 1630 for (i = 0; i < 8; i++) 1631 printk(KERN_CONT "%08x", apic_read(base + i*0x10)); 1632 1633 printk(KERN_CONT "\n"); 1634 } 1635 1636 __apicdebuginit(void) print_local_APIC(void *dummy) 1637 { 1638 unsigned int i, v, ver, maxlvt; 1639 u64 icr; 1640 1641 printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", 1642 smp_processor_id(), hard_smp_processor_id()); 1643 v = apic_read(APIC_ID); 1644 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id()); 1645 v = apic_read(APIC_LVR); 1646 printk(KERN_INFO "... APIC VERSION: %08x\n", v); 1647 ver = GET_APIC_VERSION(v); 1648 maxlvt = lapic_get_maxlvt(); 1649 1650 v = apic_read(APIC_TASKPRI); 1651 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); 1652 1653 if (APIC_INTEGRATED(ver)) { /* !82489DX */ 1654 if (!APIC_XAPIC(ver)) { 1655 v = apic_read(APIC_ARBPRI); 1656 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v, 1657 v & APIC_ARBPRI_MASK); 1658 } 1659 v = apic_read(APIC_PROCPRI); 1660 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v); 1661 } 1662 1663 /* 1664 * Remote read supported only in the 82489DX and local APIC for 1665 * Pentium processors. 1666 */ 1667 if (!APIC_INTEGRATED(ver) || maxlvt == 3) { 1668 v = apic_read(APIC_RRR); 1669 printk(KERN_DEBUG "... APIC RRR: %08x\n", v); 1670 } 1671 1672 v = apic_read(APIC_LDR); 1673 printk(KERN_DEBUG "... APIC LDR: %08x\n", v); 1674 if (!x2apic_enabled()) { 1675 v = apic_read(APIC_DFR); 1676 printk(KERN_DEBUG "... APIC DFR: %08x\n", v); 1677 } 1678 v = apic_read(APIC_SPIV); 1679 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v); 1680 1681 printk(KERN_DEBUG "... APIC ISR field:\n"); 1682 print_APIC_field(APIC_ISR); 1683 printk(KERN_DEBUG "... APIC TMR field:\n"); 1684 print_APIC_field(APIC_TMR); 1685 printk(KERN_DEBUG "... APIC IRR field:\n"); 1686 print_APIC_field(APIC_IRR); 1687 1688 if (APIC_INTEGRATED(ver)) { /* !82489DX */ 1689 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 1690 apic_write(APIC_ESR, 0); 1691 1692 v = apic_read(APIC_ESR); 1693 printk(KERN_DEBUG "... APIC ESR: %08x\n", v); 1694 } 1695 1696 icr = apic_icr_read(); 1697 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr); 1698 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32)); 1699 1700 v = apic_read(APIC_LVTT); 1701 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); 1702 1703 if (maxlvt > 3) { /* PC is LVT#4. */ 1704 v = apic_read(APIC_LVTPC); 1705 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v); 1706 } 1707 v = apic_read(APIC_LVT0); 1708 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v); 1709 v = apic_read(APIC_LVT1); 1710 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v); 1711 1712 if (maxlvt > 2) { /* ERR is LVT#3. */ 1713 v = apic_read(APIC_LVTERR); 1714 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v); 1715 } 1716 1717 v = apic_read(APIC_TMICT); 1718 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v); 1719 v = apic_read(APIC_TMCCT); 1720 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v); 1721 v = apic_read(APIC_TDCR); 1722 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v); 1723 1724 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { 1725 v = apic_read(APIC_EFEAT); 1726 maxlvt = (v >> 16) & 0xff; 1727 printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v); 1728 v = apic_read(APIC_ECTRL); 1729 printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v); 1730 for (i = 0; i < maxlvt; i++) { 1731 v = apic_read(APIC_EILVTn(i)); 1732 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v); 1733 } 1734 } 1735 printk("\n"); 1736 } 1737 1738 __apicdebuginit(void) print_local_APICs(int maxcpu) 1739 { 1740 int cpu; 1741 1742 if (!maxcpu) 1743 return; 1744 1745 preempt_disable(); 1746 for_each_online_cpu(cpu) { 1747 if (cpu >= maxcpu) 1748 break; 1749 smp_call_function_single(cpu, print_local_APIC, NULL, 1); 1750 } 1751 preempt_enable(); 1752 } 1753 1754 __apicdebuginit(void) print_PIC(void) 1755 { 1756 unsigned int v; 1757 unsigned long flags; 1758 1759 if (!legacy_pic->nr_legacy_irqs) 1760 return; 1761 1762 printk(KERN_DEBUG "\nprinting PIC contents\n"); 1763 1764 raw_spin_lock_irqsave(&i8259A_lock, flags); 1765 1766 v = inb(0xa1) << 8 | inb(0x21); 1767 printk(KERN_DEBUG "... PIC IMR: %04x\n", v); 1768 1769 v = inb(0xa0) << 8 | inb(0x20); 1770 printk(KERN_DEBUG "... PIC IRR: %04x\n", v); 1771 1772 outb(0x0b,0xa0); 1773 outb(0x0b,0x20); 1774 v = inb(0xa0) << 8 | inb(0x20); 1775 outb(0x0a,0xa0); 1776 outb(0x0a,0x20); 1777 1778 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 1779 1780 printk(KERN_DEBUG "... PIC ISR: %04x\n", v); 1781 1782 v = inb(0x4d1) << 8 | inb(0x4d0); 1783 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); 1784 } 1785 1786 static int __initdata show_lapic = 1; 1787 static __init int setup_show_lapic(char *arg) 1788 { 1789 int num = -1; 1790 1791 if (strcmp(arg, "all") == 0) { 1792 show_lapic = CONFIG_NR_CPUS; 1793 } else { 1794 get_option(&arg, &num); 1795 if (num >= 0) 1796 show_lapic = num; 1797 } 1798 1799 return 1; 1800 } 1801 __setup("show_lapic=", setup_show_lapic); 1802 1803 __apicdebuginit(int) print_ICs(void) 1804 { 1805 if (apic_verbosity == APIC_QUIET) 1806 return 0; 1807 1808 print_PIC(); 1809 1810 /* don't print out if apic is not there */ 1811 if (!cpu_has_apic && !apic_from_smp_config()) 1812 return 0; 1813 1814 print_local_APICs(show_lapic); 1815 print_IO_APIC(); 1816 1817 return 0; 1818 } 1819 1820 fs_initcall(print_ICs); 1821 1822 1823 /* Where if anywhere is the i8259 connect in external int mode */ 1824 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 1825 1826 void __init enable_IO_APIC(void) 1827 { 1828 int i8259_apic, i8259_pin; 1829 int apic; 1830 1831 if (!legacy_pic->nr_legacy_irqs) 1832 return; 1833 1834 for(apic = 0; apic < nr_ioapics; apic++) { 1835 int pin; 1836 /* See if any of the pins is in ExtINT mode */ 1837 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { 1838 struct IO_APIC_route_entry entry; 1839 entry = ioapic_read_entry(apic, pin); 1840 1841 /* If the interrupt line is enabled and in ExtInt mode 1842 * I have found the pin where the i8259 is connected. 1843 */ 1844 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) { 1845 ioapic_i8259.apic = apic; 1846 ioapic_i8259.pin = pin; 1847 goto found_i8259; 1848 } 1849 } 1850 } 1851 found_i8259: 1852 /* Look to see what if the MP table has reported the ExtINT */ 1853 /* If we could not find the appropriate pin by looking at the ioapic 1854 * the i8259 probably is not connected the ioapic but give the 1855 * mptable a chance anyway. 1856 */ 1857 i8259_pin = find_isa_irq_pin(0, mp_ExtINT); 1858 i8259_apic = find_isa_irq_apic(0, mp_ExtINT); 1859 /* Trust the MP table if nothing is setup in the hardware */ 1860 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { 1861 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); 1862 ioapic_i8259.pin = i8259_pin; 1863 ioapic_i8259.apic = i8259_apic; 1864 } 1865 /* Complain if the MP table and the hardware disagree */ 1866 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && 1867 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) 1868 { 1869 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); 1870 } 1871 1872 /* 1873 * Do not trust the IO-APIC being empty at bootup 1874 */ 1875 clear_IO_APIC(); 1876 } 1877 1878 /* 1879 * Not an __init, needed by the reboot code 1880 */ 1881 void disable_IO_APIC(void) 1882 { 1883 /* 1884 * Clear the IO-APIC before rebooting: 1885 */ 1886 clear_IO_APIC(); 1887 1888 if (!legacy_pic->nr_legacy_irqs) 1889 return; 1890 1891 /* 1892 * If the i8259 is routed through an IOAPIC 1893 * Put that IOAPIC in virtual wire mode 1894 * so legacy interrupts can be delivered. 1895 * 1896 * With interrupt-remapping, for now we will use virtual wire A mode, 1897 * as virtual wire B is little complex (need to configure both 1898 * IOAPIC RTE aswell as interrupt-remapping table entry). 1899 * As this gets called during crash dump, keep this simple for now. 1900 */ 1901 if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) { 1902 struct IO_APIC_route_entry entry; 1903 1904 memset(&entry, 0, sizeof(entry)); 1905 entry.mask = 0; /* Enabled */ 1906 entry.trigger = 0; /* Edge */ 1907 entry.irr = 0; 1908 entry.polarity = 0; /* High */ 1909 entry.delivery_status = 0; 1910 entry.dest_mode = 0; /* Physical */ 1911 entry.delivery_mode = dest_ExtINT; /* ExtInt */ 1912 entry.vector = 0; 1913 entry.dest = read_apic_id(); 1914 1915 /* 1916 * Add it to the IO-APIC irq-routing table: 1917 */ 1918 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); 1919 } 1920 1921 /* 1922 * Use virtual wire A mode when interrupt remapping is enabled. 1923 */ 1924 if (cpu_has_apic || apic_from_smp_config()) 1925 disconnect_bsp_APIC(!intr_remapping_enabled && 1926 ioapic_i8259.pin != -1); 1927 } 1928 1929 #ifdef CONFIG_X86_32 1930 /* 1931 * function to set the IO-APIC physical IDs based on the 1932 * values stored in the MPC table. 1933 * 1934 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999 1935 */ 1936 void __init setup_ioapic_ids_from_mpc_nocheck(void) 1937 { 1938 union IO_APIC_reg_00 reg_00; 1939 physid_mask_t phys_id_present_map; 1940 int apic_id; 1941 int i; 1942 unsigned char old_id; 1943 unsigned long flags; 1944 1945 /* 1946 * This is broken; anything with a real cpu count has to 1947 * circumvent this idiocy regardless. 1948 */ 1949 apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map); 1950 1951 /* 1952 * Set the IOAPIC ID to the value stored in the MPC table. 1953 */ 1954 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { 1955 1956 /* Read the register 0 value */ 1957 raw_spin_lock_irqsave(&ioapic_lock, flags); 1958 reg_00.raw = io_apic_read(apic_id, 0); 1959 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1960 1961 old_id = mp_ioapics[apic_id].apicid; 1962 1963 if (mp_ioapics[apic_id].apicid >= get_physical_broadcast()) { 1964 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", 1965 apic_id, mp_ioapics[apic_id].apicid); 1966 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 1967 reg_00.bits.ID); 1968 mp_ioapics[apic_id].apicid = reg_00.bits.ID; 1969 } 1970 1971 /* 1972 * Sanity check, is the ID really free? Every APIC in a 1973 * system must have a unique ID or we get lots of nice 1974 * 'stuck on smp_invalidate_needed IPI wait' messages. 1975 */ 1976 if (apic->check_apicid_used(&phys_id_present_map, 1977 mp_ioapics[apic_id].apicid)) { 1978 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", 1979 apic_id, mp_ioapics[apic_id].apicid); 1980 for (i = 0; i < get_physical_broadcast(); i++) 1981 if (!physid_isset(i, phys_id_present_map)) 1982 break; 1983 if (i >= get_physical_broadcast()) 1984 panic("Max APIC ID exceeded!\n"); 1985 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 1986 i); 1987 physid_set(i, phys_id_present_map); 1988 mp_ioapics[apic_id].apicid = i; 1989 } else { 1990 physid_mask_t tmp; 1991 apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid, &tmp); 1992 apic_printk(APIC_VERBOSE, "Setting %d in the " 1993 "phys_id_present_map\n", 1994 mp_ioapics[apic_id].apicid); 1995 physids_or(phys_id_present_map, phys_id_present_map, tmp); 1996 } 1997 1998 /* 1999 * We need to adjust the IRQ routing table 2000 * if the ID changed. 2001 */ 2002 if (old_id != mp_ioapics[apic_id].apicid) 2003 for (i = 0; i < mp_irq_entries; i++) 2004 if (mp_irqs[i].dstapic == old_id) 2005 mp_irqs[i].dstapic 2006 = mp_ioapics[apic_id].apicid; 2007 2008 /* 2009 * Read the right value from the MPC table and 2010 * write it into the ID register. 2011 */ 2012 apic_printk(APIC_VERBOSE, KERN_INFO 2013 "...changing IO-APIC physical APIC ID to %d ...", 2014 mp_ioapics[apic_id].apicid); 2015 2016 reg_00.bits.ID = mp_ioapics[apic_id].apicid; 2017 raw_spin_lock_irqsave(&ioapic_lock, flags); 2018 io_apic_write(apic_id, 0, reg_00.raw); 2019 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2020 2021 /* 2022 * Sanity check 2023 */ 2024 raw_spin_lock_irqsave(&ioapic_lock, flags); 2025 reg_00.raw = io_apic_read(apic_id, 0); 2026 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2027 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid) 2028 printk("could not set ID!\n"); 2029 else 2030 apic_printk(APIC_VERBOSE, " ok.\n"); 2031 } 2032 } 2033 2034 void __init setup_ioapic_ids_from_mpc(void) 2035 { 2036 2037 if (acpi_ioapic) 2038 return; 2039 /* 2040 * Don't check I/O APIC IDs for xAPIC systems. They have 2041 * no meaning without the serial APIC bus. 2042 */ 2043 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 2044 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) 2045 return; 2046 setup_ioapic_ids_from_mpc_nocheck(); 2047 } 2048 #endif 2049 2050 int no_timer_check __initdata; 2051 2052 static int __init notimercheck(char *s) 2053 { 2054 no_timer_check = 1; 2055 return 1; 2056 } 2057 __setup("no_timer_check", notimercheck); 2058 2059 /* 2060 * There is a nasty bug in some older SMP boards, their mptable lies 2061 * about the timer IRQ. We do the following to work around the situation: 2062 * 2063 * - timer IRQ defaults to IO-APIC IRQ 2064 * - if this function detects that timer IRQs are defunct, then we fall 2065 * back to ISA timer IRQs 2066 */ 2067 static int __init timer_irq_works(void) 2068 { 2069 unsigned long t1 = jiffies; 2070 unsigned long flags; 2071 2072 if (no_timer_check) 2073 return 1; 2074 2075 local_save_flags(flags); 2076 local_irq_enable(); 2077 /* Let ten ticks pass... */ 2078 mdelay((10 * 1000) / HZ); 2079 local_irq_restore(flags); 2080 2081 /* 2082 * Expect a few ticks at least, to be sure some possible 2083 * glue logic does not lock up after one or two first 2084 * ticks in a non-ExtINT mode. Also the local APIC 2085 * might have cached one ExtINT interrupt. Finally, at 2086 * least one tick may be lost due to delays. 2087 */ 2088 2089 /* jiffies wrap? */ 2090 if (time_after(jiffies, t1 + 4)) 2091 return 1; 2092 return 0; 2093 } 2094 2095 /* 2096 * In the SMP+IOAPIC case it might happen that there are an unspecified 2097 * number of pending IRQ events unhandled. These cases are very rare, 2098 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much 2099 * better to do it this way as thus we do not have to be aware of 2100 * 'pending' interrupts in the IRQ path, except at this point. 2101 */ 2102 /* 2103 * Edge triggered needs to resend any interrupt 2104 * that was delayed but this is now handled in the device 2105 * independent code. 2106 */ 2107 2108 /* 2109 * Starting up a edge-triggered IO-APIC interrupt is 2110 * nasty - we need to make sure that we get the edge. 2111 * If it is already asserted for some reason, we need 2112 * return 1 to indicate that is was pending. 2113 * 2114 * This is not complete - we should be able to fake 2115 * an edge even if it isn't on the 8259A... 2116 */ 2117 2118 static unsigned int startup_ioapic_irq(struct irq_data *data) 2119 { 2120 int was_pending = 0, irq = data->irq; 2121 unsigned long flags; 2122 2123 raw_spin_lock_irqsave(&ioapic_lock, flags); 2124 if (irq < legacy_pic->nr_legacy_irqs) { 2125 legacy_pic->mask(irq); 2126 if (legacy_pic->irq_pending(irq)) 2127 was_pending = 1; 2128 } 2129 __unmask_ioapic(data->chip_data); 2130 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2131 2132 return was_pending; 2133 } 2134 2135 static int ioapic_retrigger_irq(struct irq_data *data) 2136 { 2137 struct irq_cfg *cfg = data->chip_data; 2138 unsigned long flags; 2139 2140 raw_spin_lock_irqsave(&vector_lock, flags); 2141 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); 2142 raw_spin_unlock_irqrestore(&vector_lock, flags); 2143 2144 return 1; 2145 } 2146 2147 /* 2148 * Level and edge triggered IO-APIC interrupts need different handling, 2149 * so we use two separate IRQ descriptors. Edge triggered IRQs can be 2150 * handled with the level-triggered descriptor, but that one has slightly 2151 * more overhead. Level-triggered interrupts cannot be handled with the 2152 * edge-triggered handler, without risking IRQ storms and other ugly 2153 * races. 2154 */ 2155 2156 #ifdef CONFIG_SMP 2157 void send_cleanup_vector(struct irq_cfg *cfg) 2158 { 2159 cpumask_var_t cleanup_mask; 2160 2161 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { 2162 unsigned int i; 2163 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) 2164 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); 2165 } else { 2166 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); 2167 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); 2168 free_cpumask_var(cleanup_mask); 2169 } 2170 cfg->move_in_progress = 0; 2171 } 2172 2173 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) 2174 { 2175 int apic, pin; 2176 struct irq_pin_list *entry; 2177 u8 vector = cfg->vector; 2178 2179 for_each_irq_pin(entry, cfg->irq_2_pin) { 2180 unsigned int reg; 2181 2182 apic = entry->apic; 2183 pin = entry->pin; 2184 /* 2185 * With interrupt-remapping, destination information comes 2186 * from interrupt-remapping table entry. 2187 */ 2188 if (!irq_remapped(cfg)) 2189 io_apic_write(apic, 0x11 + pin*2, dest); 2190 reg = io_apic_read(apic, 0x10 + pin*2); 2191 reg &= ~IO_APIC_REDIR_VECTOR_MASK; 2192 reg |= vector; 2193 io_apic_modify(apic, 0x10 + pin*2, reg); 2194 } 2195 } 2196 2197 /* 2198 * Either sets data->affinity to a valid value, and returns 2199 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and 2200 * leaves data->affinity untouched. 2201 */ 2202 int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2203 unsigned int *dest_id) 2204 { 2205 struct irq_cfg *cfg = data->chip_data; 2206 2207 if (!cpumask_intersects(mask, cpu_online_mask)) 2208 return -1; 2209 2210 if (assign_irq_vector(data->irq, data->chip_data, mask)) 2211 return -1; 2212 2213 cpumask_copy(data->affinity, mask); 2214 2215 *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain); 2216 return 0; 2217 } 2218 2219 static int 2220 ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2221 bool force) 2222 { 2223 unsigned int dest, irq = data->irq; 2224 unsigned long flags; 2225 int ret; 2226 2227 raw_spin_lock_irqsave(&ioapic_lock, flags); 2228 ret = __ioapic_set_affinity(data, mask, &dest); 2229 if (!ret) { 2230 /* Only the high 8 bits are valid. */ 2231 dest = SET_APIC_LOGICAL_ID(dest); 2232 __target_IO_APIC_irq(irq, dest, data->chip_data); 2233 } 2234 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2235 return ret; 2236 } 2237 2238 #ifdef CONFIG_INTR_REMAP 2239 2240 /* 2241 * Migrate the IO-APIC irq in the presence of intr-remapping. 2242 * 2243 * For both level and edge triggered, irq migration is a simple atomic 2244 * update(of vector and cpu destination) of IRTE and flush the hardware cache. 2245 * 2246 * For level triggered, we eliminate the io-apic RTE modification (with the 2247 * updated vector information), by using a virtual vector (io-apic pin number). 2248 * Real vector that is used for interrupting cpu will be coming from 2249 * the interrupt-remapping table entry. 2250 */ 2251 static int 2252 ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2253 bool force) 2254 { 2255 struct irq_cfg *cfg = data->chip_data; 2256 unsigned int dest, irq = data->irq; 2257 struct irte irte; 2258 2259 if (!cpumask_intersects(mask, cpu_online_mask)) 2260 return -EINVAL; 2261 2262 if (get_irte(irq, &irte)) 2263 return -EBUSY; 2264 2265 if (assign_irq_vector(irq, cfg, mask)) 2266 return -EBUSY; 2267 2268 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); 2269 2270 irte.vector = cfg->vector; 2271 irte.dest_id = IRTE_DEST(dest); 2272 2273 /* 2274 * Modified the IRTE and flushes the Interrupt entry cache. 2275 */ 2276 modify_irte(irq, &irte); 2277 2278 if (cfg->move_in_progress) 2279 send_cleanup_vector(cfg); 2280 2281 cpumask_copy(data->affinity, mask); 2282 return 0; 2283 } 2284 2285 #else 2286 static inline int 2287 ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2288 bool force) 2289 { 2290 return 0; 2291 } 2292 #endif 2293 2294 asmlinkage void smp_irq_move_cleanup_interrupt(void) 2295 { 2296 unsigned vector, me; 2297 2298 ack_APIC_irq(); 2299 exit_idle(); 2300 irq_enter(); 2301 2302 me = smp_processor_id(); 2303 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { 2304 unsigned int irq; 2305 unsigned int irr; 2306 struct irq_desc *desc; 2307 struct irq_cfg *cfg; 2308 irq = __get_cpu_var(vector_irq)[vector]; 2309 2310 if (irq == -1) 2311 continue; 2312 2313 desc = irq_to_desc(irq); 2314 if (!desc) 2315 continue; 2316 2317 cfg = irq_cfg(irq); 2318 raw_spin_lock(&desc->lock); 2319 2320 /* 2321 * Check if the irq migration is in progress. If so, we 2322 * haven't received the cleanup request yet for this irq. 2323 */ 2324 if (cfg->move_in_progress) 2325 goto unlock; 2326 2327 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2328 goto unlock; 2329 2330 irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); 2331 /* 2332 * Check if the vector that needs to be cleanedup is 2333 * registered at the cpu's IRR. If so, then this is not 2334 * the best time to clean it up. Lets clean it up in the 2335 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR 2336 * to myself. 2337 */ 2338 if (irr & (1 << (vector % 32))) { 2339 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); 2340 goto unlock; 2341 } 2342 __get_cpu_var(vector_irq)[vector] = -1; 2343 unlock: 2344 raw_spin_unlock(&desc->lock); 2345 } 2346 2347 irq_exit(); 2348 } 2349 2350 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) 2351 { 2352 unsigned me; 2353 2354 if (likely(!cfg->move_in_progress)) 2355 return; 2356 2357 me = smp_processor_id(); 2358 2359 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2360 send_cleanup_vector(cfg); 2361 } 2362 2363 static void irq_complete_move(struct irq_cfg *cfg) 2364 { 2365 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); 2366 } 2367 2368 void irq_force_complete_move(int irq) 2369 { 2370 struct irq_cfg *cfg = get_irq_chip_data(irq); 2371 2372 if (!cfg) 2373 return; 2374 2375 __irq_complete_move(cfg, cfg->vector); 2376 } 2377 #else 2378 static inline void irq_complete_move(struct irq_cfg *cfg) { } 2379 #endif 2380 2381 static void ack_apic_edge(struct irq_data *data) 2382 { 2383 irq_complete_move(data->chip_data); 2384 move_native_irq(data->irq); 2385 ack_APIC_irq(); 2386 } 2387 2388 atomic_t irq_mis_count; 2389 2390 /* 2391 * IO-APIC versions below 0x20 don't support EOI register. 2392 * For the record, here is the information about various versions: 2393 * 0Xh 82489DX 2394 * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant 2395 * 2Xh I/O(x)APIC which is PCI 2.2 Compliant 2396 * 30h-FFh Reserved 2397 * 2398 * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic 2399 * version as 0x2. This is an error with documentation and these ICH chips 2400 * use io-apic's of version 0x20. 2401 * 2402 * For IO-APIC's with EOI register, we use that to do an explicit EOI. 2403 * Otherwise, we simulate the EOI message manually by changing the trigger 2404 * mode to edge and then back to level, with RTE being masked during this. 2405 */ 2406 static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 2407 { 2408 struct irq_pin_list *entry; 2409 unsigned long flags; 2410 2411 raw_spin_lock_irqsave(&ioapic_lock, flags); 2412 for_each_irq_pin(entry, cfg->irq_2_pin) { 2413 if (mp_ioapics[entry->apic].apicver >= 0x20) { 2414 /* 2415 * Intr-remapping uses pin number as the virtual vector 2416 * in the RTE. Actual vector is programmed in 2417 * intr-remapping table entry. Hence for the io-apic 2418 * EOI we use the pin number. 2419 */ 2420 if (irq_remapped(cfg)) 2421 io_apic_eoi(entry->apic, entry->pin); 2422 else 2423 io_apic_eoi(entry->apic, cfg->vector); 2424 } else { 2425 __mask_and_edge_IO_APIC_irq(entry); 2426 __unmask_and_level_IO_APIC_irq(entry); 2427 } 2428 } 2429 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2430 } 2431 2432 static void ack_apic_level(struct irq_data *data) 2433 { 2434 struct irq_cfg *cfg = data->chip_data; 2435 int i, do_unmask_irq = 0, irq = data->irq; 2436 unsigned long v; 2437 2438 irq_complete_move(cfg); 2439 #ifdef CONFIG_GENERIC_PENDING_IRQ 2440 /* If we are moving the irq we need to mask it */ 2441 if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) { 2442 do_unmask_irq = 1; 2443 mask_ioapic(cfg); 2444 } 2445 #endif 2446 2447 /* 2448 * It appears there is an erratum which affects at least version 0x11 2449 * of I/O APIC (that's the 82093AA and cores integrated into various 2450 * chipsets). Under certain conditions a level-triggered interrupt is 2451 * erroneously delivered as edge-triggered one but the respective IRR 2452 * bit gets set nevertheless. As a result the I/O unit expects an EOI 2453 * message but it will never arrive and further interrupts are blocked 2454 * from the source. The exact reason is so far unknown, but the 2455 * phenomenon was observed when two consecutive interrupt requests 2456 * from a given source get delivered to the same CPU and the source is 2457 * temporarily disabled in between. 2458 * 2459 * A workaround is to simulate an EOI message manually. We achieve it 2460 * by setting the trigger mode to edge and then to level when the edge 2461 * trigger mode gets detected in the TMR of a local APIC for a 2462 * level-triggered interrupt. We mask the source for the time of the 2463 * operation to prevent an edge-triggered interrupt escaping meanwhile. 2464 * The idea is from Manfred Spraul. --macro 2465 * 2466 * Also in the case when cpu goes offline, fixup_irqs() will forward 2467 * any unhandled interrupt on the offlined cpu to the new cpu 2468 * destination that is handling the corresponding interrupt. This 2469 * interrupt forwarding is done via IPI's. Hence, in this case also 2470 * level-triggered io-apic interrupt will be seen as an edge 2471 * interrupt in the IRR. And we can't rely on the cpu's EOI 2472 * to be broadcasted to the IO-APIC's which will clear the remoteIRR 2473 * corresponding to the level-triggered interrupt. Hence on IO-APIC's 2474 * supporting EOI register, we do an explicit EOI to clear the 2475 * remote IRR and on IO-APIC's which don't have an EOI register, 2476 * we use the above logic (mask+edge followed by unmask+level) from 2477 * Manfred Spraul to clear the remote IRR. 2478 */ 2479 i = cfg->vector; 2480 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); 2481 2482 /* 2483 * We must acknowledge the irq before we move it or the acknowledge will 2484 * not propagate properly. 2485 */ 2486 ack_APIC_irq(); 2487 2488 /* 2489 * Tail end of clearing remote IRR bit (either by delivering the EOI 2490 * message via io-apic EOI register write or simulating it using 2491 * mask+edge followed by unnask+level logic) manually when the 2492 * level triggered interrupt is seen as the edge triggered interrupt 2493 * at the cpu. 2494 */ 2495 if (!(v & (1 << (i & 0x1f)))) { 2496 atomic_inc(&irq_mis_count); 2497 2498 eoi_ioapic_irq(irq, cfg); 2499 } 2500 2501 /* Now we can move and renable the irq */ 2502 if (unlikely(do_unmask_irq)) { 2503 /* Only migrate the irq if the ack has been received. 2504 * 2505 * On rare occasions the broadcast level triggered ack gets 2506 * delayed going to ioapics, and if we reprogram the 2507 * vector while Remote IRR is still set the irq will never 2508 * fire again. 2509 * 2510 * To prevent this scenario we read the Remote IRR bit 2511 * of the ioapic. This has two effects. 2512 * - On any sane system the read of the ioapic will 2513 * flush writes (and acks) going to the ioapic from 2514 * this cpu. 2515 * - We get to see if the ACK has actually been delivered. 2516 * 2517 * Based on failed experiments of reprogramming the 2518 * ioapic entry from outside of irq context starting 2519 * with masking the ioapic entry and then polling until 2520 * Remote IRR was clear before reprogramming the 2521 * ioapic I don't trust the Remote IRR bit to be 2522 * completey accurate. 2523 * 2524 * However there appears to be no other way to plug 2525 * this race, so if the Remote IRR bit is not 2526 * accurate and is causing problems then it is a hardware bug 2527 * and you can go talk to the chipset vendor about it. 2528 */ 2529 if (!io_apic_level_ack_pending(cfg)) 2530 move_masked_irq(irq); 2531 unmask_ioapic(cfg); 2532 } 2533 } 2534 2535 #ifdef CONFIG_INTR_REMAP 2536 static void ir_ack_apic_edge(struct irq_data *data) 2537 { 2538 ack_APIC_irq(); 2539 } 2540 2541 static void ir_ack_apic_level(struct irq_data *data) 2542 { 2543 ack_APIC_irq(); 2544 eoi_ioapic_irq(data->irq, data->chip_data); 2545 } 2546 #endif /* CONFIG_INTR_REMAP */ 2547 2548 static struct irq_chip ioapic_chip __read_mostly = { 2549 .name = "IO-APIC", 2550 .irq_startup = startup_ioapic_irq, 2551 .irq_mask = mask_ioapic_irq, 2552 .irq_unmask = unmask_ioapic_irq, 2553 .irq_ack = ack_apic_edge, 2554 .irq_eoi = ack_apic_level, 2555 #ifdef CONFIG_SMP 2556 .irq_set_affinity = ioapic_set_affinity, 2557 #endif 2558 .irq_retrigger = ioapic_retrigger_irq, 2559 }; 2560 2561 static struct irq_chip ir_ioapic_chip __read_mostly = { 2562 .name = "IR-IO-APIC", 2563 .irq_startup = startup_ioapic_irq, 2564 .irq_mask = mask_ioapic_irq, 2565 .irq_unmask = unmask_ioapic_irq, 2566 #ifdef CONFIG_INTR_REMAP 2567 .irq_ack = ir_ack_apic_edge, 2568 .irq_eoi = ir_ack_apic_level, 2569 #ifdef CONFIG_SMP 2570 .irq_set_affinity = ir_ioapic_set_affinity, 2571 #endif 2572 #endif 2573 .irq_retrigger = ioapic_retrigger_irq, 2574 }; 2575 2576 static inline void init_IO_APIC_traps(void) 2577 { 2578 struct irq_cfg *cfg; 2579 unsigned int irq; 2580 2581 /* 2582 * NOTE! The local APIC isn't very good at handling 2583 * multiple interrupts at the same interrupt level. 2584 * As the interrupt level is determined by taking the 2585 * vector number and shifting that right by 4, we 2586 * want to spread these out a bit so that they don't 2587 * all fall in the same interrupt level. 2588 * 2589 * Also, we've got to be careful not to trash gate 2590 * 0x80, because int 0x80 is hm, kind of importantish. ;) 2591 */ 2592 for_each_active_irq(irq) { 2593 cfg = get_irq_chip_data(irq); 2594 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { 2595 /* 2596 * Hmm.. We don't have an entry for this, 2597 * so default to an old-fashioned 8259 2598 * interrupt if we can.. 2599 */ 2600 if (irq < legacy_pic->nr_legacy_irqs) 2601 legacy_pic->make_irq(irq); 2602 else 2603 /* Strange. Oh, well.. */ 2604 set_irq_chip(irq, &no_irq_chip); 2605 } 2606 } 2607 } 2608 2609 /* 2610 * The local APIC irq-chip implementation: 2611 */ 2612 2613 static void mask_lapic_irq(struct irq_data *data) 2614 { 2615 unsigned long v; 2616 2617 v = apic_read(APIC_LVT0); 2618 apic_write(APIC_LVT0, v | APIC_LVT_MASKED); 2619 } 2620 2621 static void unmask_lapic_irq(struct irq_data *data) 2622 { 2623 unsigned long v; 2624 2625 v = apic_read(APIC_LVT0); 2626 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); 2627 } 2628 2629 static void ack_lapic_irq(struct irq_data *data) 2630 { 2631 ack_APIC_irq(); 2632 } 2633 2634 static struct irq_chip lapic_chip __read_mostly = { 2635 .name = "local-APIC", 2636 .irq_mask = mask_lapic_irq, 2637 .irq_unmask = unmask_lapic_irq, 2638 .irq_ack = ack_lapic_irq, 2639 }; 2640 2641 static void lapic_register_intr(int irq) 2642 { 2643 irq_clear_status_flags(irq, IRQ_LEVEL); 2644 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, 2645 "edge"); 2646 } 2647 2648 /* 2649 * This looks a bit hackish but it's about the only one way of sending 2650 * a few INTA cycles to 8259As and any associated glue logic. ICR does 2651 * not support the ExtINT mode, unfortunately. We need to send these 2652 * cycles as some i82489DX-based boards have glue logic that keeps the 2653 * 8259A interrupt line asserted until INTA. --macro 2654 */ 2655 static inline void __init unlock_ExtINT_logic(void) 2656 { 2657 int apic, pin, i; 2658 struct IO_APIC_route_entry entry0, entry1; 2659 unsigned char save_control, save_freq_select; 2660 2661 pin = find_isa_irq_pin(8, mp_INT); 2662 if (pin == -1) { 2663 WARN_ON_ONCE(1); 2664 return; 2665 } 2666 apic = find_isa_irq_apic(8, mp_INT); 2667 if (apic == -1) { 2668 WARN_ON_ONCE(1); 2669 return; 2670 } 2671 2672 entry0 = ioapic_read_entry(apic, pin); 2673 clear_IO_APIC_pin(apic, pin); 2674 2675 memset(&entry1, 0, sizeof(entry1)); 2676 2677 entry1.dest_mode = 0; /* physical delivery */ 2678 entry1.mask = 0; /* unmask IRQ now */ 2679 entry1.dest = hard_smp_processor_id(); 2680 entry1.delivery_mode = dest_ExtINT; 2681 entry1.polarity = entry0.polarity; 2682 entry1.trigger = 0; 2683 entry1.vector = 0; 2684 2685 ioapic_write_entry(apic, pin, entry1); 2686 2687 save_control = CMOS_READ(RTC_CONTROL); 2688 save_freq_select = CMOS_READ(RTC_FREQ_SELECT); 2689 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6, 2690 RTC_FREQ_SELECT); 2691 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL); 2692 2693 i = 100; 2694 while (i-- > 0) { 2695 mdelay(10); 2696 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF) 2697 i -= 10; 2698 } 2699 2700 CMOS_WRITE(save_control, RTC_CONTROL); 2701 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); 2702 clear_IO_APIC_pin(apic, pin); 2703 2704 ioapic_write_entry(apic, pin, entry0); 2705 } 2706 2707 static int disable_timer_pin_1 __initdata; 2708 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */ 2709 static int __init disable_timer_pin_setup(char *arg) 2710 { 2711 disable_timer_pin_1 = 1; 2712 return 0; 2713 } 2714 early_param("disable_timer_pin_1", disable_timer_pin_setup); 2715 2716 int timer_through_8259 __initdata; 2717 2718 /* 2719 * This code may look a bit paranoid, but it's supposed to cooperate with 2720 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ 2721 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast 2722 * fanatically on his truly buggy board. 2723 * 2724 * FIXME: really need to revamp this for all platforms. 2725 */ 2726 static inline void __init check_timer(void) 2727 { 2728 struct irq_cfg *cfg = get_irq_chip_data(0); 2729 int node = cpu_to_node(0); 2730 int apic1, pin1, apic2, pin2; 2731 unsigned long flags; 2732 int no_pin1 = 0; 2733 2734 local_irq_save(flags); 2735 2736 /* 2737 * get/set the timer IRQ vector: 2738 */ 2739 legacy_pic->mask(0); 2740 assign_irq_vector(0, cfg, apic->target_cpus()); 2741 2742 /* 2743 * As IRQ0 is to be enabled in the 8259A, the virtual 2744 * wire has to be disabled in the local APIC. Also 2745 * timer interrupts need to be acknowledged manually in 2746 * the 8259A for the i82489DX when using the NMI 2747 * watchdog as that APIC treats NMIs as level-triggered. 2748 * The AEOI mode will finish them in the 8259A 2749 * automatically. 2750 */ 2751 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); 2752 legacy_pic->init(1); 2753 2754 pin1 = find_isa_irq_pin(0, mp_INT); 2755 apic1 = find_isa_irq_apic(0, mp_INT); 2756 pin2 = ioapic_i8259.pin; 2757 apic2 = ioapic_i8259.apic; 2758 2759 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X " 2760 "apic1=%d pin1=%d apic2=%d pin2=%d\n", 2761 cfg->vector, apic1, pin1, apic2, pin2); 2762 2763 /* 2764 * Some BIOS writers are clueless and report the ExtINTA 2765 * I/O APIC input from the cascaded 8259A as the timer 2766 * interrupt input. So just in case, if only one pin 2767 * was found above, try it both directly and through the 2768 * 8259A. 2769 */ 2770 if (pin1 == -1) { 2771 if (intr_remapping_enabled) 2772 panic("BIOS bug: timer not connected to IO-APIC"); 2773 pin1 = pin2; 2774 apic1 = apic2; 2775 no_pin1 = 1; 2776 } else if (pin2 == -1) { 2777 pin2 = pin1; 2778 apic2 = apic1; 2779 } 2780 2781 if (pin1 != -1) { 2782 /* 2783 * Ok, does IRQ0 through the IOAPIC work? 2784 */ 2785 if (no_pin1) { 2786 add_pin_to_irq_node(cfg, node, apic1, pin1); 2787 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); 2788 } else { 2789 /* for edge trigger, setup_ioapic_irq already 2790 * leave it unmasked. 2791 * so only need to unmask if it is level-trigger 2792 * do we really have level trigger timer? 2793 */ 2794 int idx; 2795 idx = find_irq_entry(apic1, pin1, mp_INT); 2796 if (idx != -1 && irq_trigger(idx)) 2797 unmask_ioapic(cfg); 2798 } 2799 if (timer_irq_works()) { 2800 if (disable_timer_pin_1 > 0) 2801 clear_IO_APIC_pin(0, pin1); 2802 goto out; 2803 } 2804 if (intr_remapping_enabled) 2805 panic("timer doesn't work through Interrupt-remapped IO-APIC"); 2806 local_irq_disable(); 2807 clear_IO_APIC_pin(apic1, pin1); 2808 if (!no_pin1) 2809 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " 2810 "8254 timer not connected to IO-APIC\n"); 2811 2812 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer " 2813 "(IRQ0) through the 8259A ...\n"); 2814 apic_printk(APIC_QUIET, KERN_INFO 2815 "..... (found apic %d pin %d) ...\n", apic2, pin2); 2816 /* 2817 * legacy devices should be connected to IO APIC #0 2818 */ 2819 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); 2820 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); 2821 legacy_pic->unmask(0); 2822 if (timer_irq_works()) { 2823 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2824 timer_through_8259 = 1; 2825 goto out; 2826 } 2827 /* 2828 * Cleanup, just in case ... 2829 */ 2830 local_irq_disable(); 2831 legacy_pic->mask(0); 2832 clear_IO_APIC_pin(apic2, pin2); 2833 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); 2834 } 2835 2836 apic_printk(APIC_QUIET, KERN_INFO 2837 "...trying to set up timer as Virtual Wire IRQ...\n"); 2838 2839 lapic_register_intr(0); 2840 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ 2841 legacy_pic->unmask(0); 2842 2843 if (timer_irq_works()) { 2844 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2845 goto out; 2846 } 2847 local_irq_disable(); 2848 legacy_pic->mask(0); 2849 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); 2850 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); 2851 2852 apic_printk(APIC_QUIET, KERN_INFO 2853 "...trying to set up timer as ExtINT IRQ...\n"); 2854 2855 legacy_pic->init(0); 2856 legacy_pic->make_irq(0); 2857 apic_write(APIC_LVT0, APIC_DM_EXTINT); 2858 2859 unlock_ExtINT_logic(); 2860 2861 if (timer_irq_works()) { 2862 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2863 goto out; 2864 } 2865 local_irq_disable(); 2866 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); 2867 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " 2868 "report. Then try booting with the 'noapic' option.\n"); 2869 out: 2870 local_irq_restore(flags); 2871 } 2872 2873 /* 2874 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available 2875 * to devices. However there may be an I/O APIC pin available for 2876 * this interrupt regardless. The pin may be left unconnected, but 2877 * typically it will be reused as an ExtINT cascade interrupt for 2878 * the master 8259A. In the MPS case such a pin will normally be 2879 * reported as an ExtINT interrupt in the MP table. With ACPI 2880 * there is no provision for ExtINT interrupts, and in the absence 2881 * of an override it would be treated as an ordinary ISA I/O APIC 2882 * interrupt, that is edge-triggered and unmasked by default. We 2883 * used to do this, but it caused problems on some systems because 2884 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using 2885 * the same ExtINT cascade interrupt to drive the local APIC of the 2886 * bootstrap processor. Therefore we refrain from routing IRQ2 to 2887 * the I/O APIC in all cases now. No actual device should request 2888 * it anyway. --macro 2889 */ 2890 #define PIC_IRQS (1UL << PIC_CASCADE_IR) 2891 2892 void __init setup_IO_APIC(void) 2893 { 2894 2895 /* 2896 * calling enable_IO_APIC() is moved to setup_local_APIC for BP 2897 */ 2898 io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL; 2899 2900 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); 2901 /* 2902 * Set up IO-APIC IRQ routing. 2903 */ 2904 x86_init.mpparse.setup_ioapic_ids(); 2905 2906 sync_Arb_IDs(); 2907 setup_IO_APIC_irqs(); 2908 init_IO_APIC_traps(); 2909 if (legacy_pic->nr_legacy_irqs) 2910 check_timer(); 2911 } 2912 2913 /* 2914 * Called after all the initialization is done. If we didnt find any 2915 * APIC bugs then we can allow the modify fast path 2916 */ 2917 2918 static int __init io_apic_bug_finalize(void) 2919 { 2920 if (sis_apic_bug == -1) 2921 sis_apic_bug = 0; 2922 return 0; 2923 } 2924 2925 late_initcall(io_apic_bug_finalize); 2926 2927 struct sysfs_ioapic_data { 2928 struct sys_device dev; 2929 struct IO_APIC_route_entry entry[0]; 2930 }; 2931 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS]; 2932 2933 static int ioapic_suspend(struct sys_device *dev, pm_message_t state) 2934 { 2935 struct IO_APIC_route_entry *entry; 2936 struct sysfs_ioapic_data *data; 2937 int i; 2938 2939 data = container_of(dev, struct sysfs_ioapic_data, dev); 2940 entry = data->entry; 2941 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) 2942 *entry = ioapic_read_entry(dev->id, i); 2943 2944 return 0; 2945 } 2946 2947 static int ioapic_resume(struct sys_device *dev) 2948 { 2949 struct IO_APIC_route_entry *entry; 2950 struct sysfs_ioapic_data *data; 2951 unsigned long flags; 2952 union IO_APIC_reg_00 reg_00; 2953 int i; 2954 2955 data = container_of(dev, struct sysfs_ioapic_data, dev); 2956 entry = data->entry; 2957 2958 raw_spin_lock_irqsave(&ioapic_lock, flags); 2959 reg_00.raw = io_apic_read(dev->id, 0); 2960 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) { 2961 reg_00.bits.ID = mp_ioapics[dev->id].apicid; 2962 io_apic_write(dev->id, 0, reg_00.raw); 2963 } 2964 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2965 for (i = 0; i < nr_ioapic_registers[dev->id]; i++) 2966 ioapic_write_entry(dev->id, i, entry[i]); 2967 2968 return 0; 2969 } 2970 2971 static struct sysdev_class ioapic_sysdev_class = { 2972 .name = "ioapic", 2973 .suspend = ioapic_suspend, 2974 .resume = ioapic_resume, 2975 }; 2976 2977 static int __init ioapic_init_sysfs(void) 2978 { 2979 struct sys_device * dev; 2980 int i, size, error; 2981 2982 error = sysdev_class_register(&ioapic_sysdev_class); 2983 if (error) 2984 return error; 2985 2986 for (i = 0; i < nr_ioapics; i++ ) { 2987 size = sizeof(struct sys_device) + nr_ioapic_registers[i] 2988 * sizeof(struct IO_APIC_route_entry); 2989 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL); 2990 if (!mp_ioapic_data[i]) { 2991 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); 2992 continue; 2993 } 2994 dev = &mp_ioapic_data[i]->dev; 2995 dev->id = i; 2996 dev->cls = &ioapic_sysdev_class; 2997 error = sysdev_register(dev); 2998 if (error) { 2999 kfree(mp_ioapic_data[i]); 3000 mp_ioapic_data[i] = NULL; 3001 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); 3002 continue; 3003 } 3004 } 3005 3006 return 0; 3007 } 3008 3009 device_initcall(ioapic_init_sysfs); 3010 3011 /* 3012 * Dynamic irq allocate and deallocation 3013 */ 3014 unsigned int create_irq_nr(unsigned int from, int node) 3015 { 3016 struct irq_cfg *cfg; 3017 unsigned long flags; 3018 unsigned int ret = 0; 3019 int irq; 3020 3021 if (from < nr_irqs_gsi) 3022 from = nr_irqs_gsi; 3023 3024 irq = alloc_irq_from(from, node); 3025 if (irq < 0) 3026 return 0; 3027 cfg = alloc_irq_cfg(irq, node); 3028 if (!cfg) { 3029 free_irq_at(irq, NULL); 3030 return 0; 3031 } 3032 3033 raw_spin_lock_irqsave(&vector_lock, flags); 3034 if (!__assign_irq_vector(irq, cfg, apic->target_cpus())) 3035 ret = irq; 3036 raw_spin_unlock_irqrestore(&vector_lock, flags); 3037 3038 if (ret) { 3039 set_irq_chip_data(irq, cfg); 3040 irq_clear_status_flags(irq, IRQ_NOREQUEST); 3041 } else { 3042 free_irq_at(irq, cfg); 3043 } 3044 return ret; 3045 } 3046 3047 int create_irq(void) 3048 { 3049 int node = cpu_to_node(0); 3050 unsigned int irq_want; 3051 int irq; 3052 3053 irq_want = nr_irqs_gsi; 3054 irq = create_irq_nr(irq_want, node); 3055 3056 if (irq == 0) 3057 irq = -1; 3058 3059 return irq; 3060 } 3061 3062 void destroy_irq(unsigned int irq) 3063 { 3064 struct irq_cfg *cfg = get_irq_chip_data(irq); 3065 unsigned long flags; 3066 3067 irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); 3068 3069 if (irq_remapped(cfg)) 3070 free_irte(irq); 3071 raw_spin_lock_irqsave(&vector_lock, flags); 3072 __clear_irq_vector(irq, cfg); 3073 raw_spin_unlock_irqrestore(&vector_lock, flags); 3074 free_irq_at(irq, cfg); 3075 } 3076 3077 /* 3078 * MSI message composition 3079 */ 3080 #ifdef CONFIG_PCI_MSI 3081 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, 3082 struct msi_msg *msg, u8 hpet_id) 3083 { 3084 struct irq_cfg *cfg; 3085 int err; 3086 unsigned dest; 3087 3088 if (disable_apic) 3089 return -ENXIO; 3090 3091 cfg = irq_cfg(irq); 3092 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3093 if (err) 3094 return err; 3095 3096 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 3097 3098 if (irq_remapped(get_irq_chip_data(irq))) { 3099 struct irte irte; 3100 int ir_index; 3101 u16 sub_handle; 3102 3103 ir_index = map_irq_to_irte_handle(irq, &sub_handle); 3104 BUG_ON(ir_index == -1); 3105 3106 prepare_irte(&irte, cfg->vector, dest); 3107 3108 /* Set source-id of interrupt request */ 3109 if (pdev) 3110 set_msi_sid(&irte, pdev); 3111 else 3112 set_hpet_sid(&irte, hpet_id); 3113 3114 modify_irte(irq, &irte); 3115 3116 msg->address_hi = MSI_ADDR_BASE_HI; 3117 msg->data = sub_handle; 3118 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | 3119 MSI_ADDR_IR_SHV | 3120 MSI_ADDR_IR_INDEX1(ir_index) | 3121 MSI_ADDR_IR_INDEX2(ir_index); 3122 } else { 3123 if (x2apic_enabled()) 3124 msg->address_hi = MSI_ADDR_BASE_HI | 3125 MSI_ADDR_EXT_DEST_ID(dest); 3126 else 3127 msg->address_hi = MSI_ADDR_BASE_HI; 3128 3129 msg->address_lo = 3130 MSI_ADDR_BASE_LO | 3131 ((apic->irq_dest_mode == 0) ? 3132 MSI_ADDR_DEST_MODE_PHYSICAL: 3133 MSI_ADDR_DEST_MODE_LOGICAL) | 3134 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3135 MSI_ADDR_REDIRECTION_CPU: 3136 MSI_ADDR_REDIRECTION_LOWPRI) | 3137 MSI_ADDR_DEST_ID(dest); 3138 3139 msg->data = 3140 MSI_DATA_TRIGGER_EDGE | 3141 MSI_DATA_LEVEL_ASSERT | 3142 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3143 MSI_DATA_DELIVERY_FIXED: 3144 MSI_DATA_DELIVERY_LOWPRI) | 3145 MSI_DATA_VECTOR(cfg->vector); 3146 } 3147 return err; 3148 } 3149 3150 #ifdef CONFIG_SMP 3151 static int 3152 msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3153 { 3154 struct irq_cfg *cfg = data->chip_data; 3155 struct msi_msg msg; 3156 unsigned int dest; 3157 3158 if (__ioapic_set_affinity(data, mask, &dest)) 3159 return -1; 3160 3161 __get_cached_msi_msg(data->msi_desc, &msg); 3162 3163 msg.data &= ~MSI_DATA_VECTOR_MASK; 3164 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3165 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3166 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3167 3168 __write_msi_msg(data->msi_desc, &msg); 3169 3170 return 0; 3171 } 3172 #ifdef CONFIG_INTR_REMAP 3173 /* 3174 * Migrate the MSI irq to another cpumask. This migration is 3175 * done in the process context using interrupt-remapping hardware. 3176 */ 3177 static int 3178 ir_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, 3179 bool force) 3180 { 3181 struct irq_cfg *cfg = data->chip_data; 3182 unsigned int dest, irq = data->irq; 3183 struct irte irte; 3184 3185 if (get_irte(irq, &irte)) 3186 return -1; 3187 3188 if (__ioapic_set_affinity(data, mask, &dest)) 3189 return -1; 3190 3191 irte.vector = cfg->vector; 3192 irte.dest_id = IRTE_DEST(dest); 3193 3194 /* 3195 * atomically update the IRTE with the new destination and vector. 3196 */ 3197 modify_irte(irq, &irte); 3198 3199 /* 3200 * After this point, all the interrupts will start arriving 3201 * at the new destination. So, time to cleanup the previous 3202 * vector allocation. 3203 */ 3204 if (cfg->move_in_progress) 3205 send_cleanup_vector(cfg); 3206 3207 return 0; 3208 } 3209 3210 #endif 3211 #endif /* CONFIG_SMP */ 3212 3213 /* 3214 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, 3215 * which implement the MSI or MSI-X Capability Structure. 3216 */ 3217 static struct irq_chip msi_chip = { 3218 .name = "PCI-MSI", 3219 .irq_unmask = unmask_msi_irq, 3220 .irq_mask = mask_msi_irq, 3221 .irq_ack = ack_apic_edge, 3222 #ifdef CONFIG_SMP 3223 .irq_set_affinity = msi_set_affinity, 3224 #endif 3225 .irq_retrigger = ioapic_retrigger_irq, 3226 }; 3227 3228 static struct irq_chip msi_ir_chip = { 3229 .name = "IR-PCI-MSI", 3230 .irq_unmask = unmask_msi_irq, 3231 .irq_mask = mask_msi_irq, 3232 #ifdef CONFIG_INTR_REMAP 3233 .irq_ack = ir_ack_apic_edge, 3234 #ifdef CONFIG_SMP 3235 .irq_set_affinity = ir_msi_set_affinity, 3236 #endif 3237 #endif 3238 .irq_retrigger = ioapic_retrigger_irq, 3239 }; 3240 3241 /* 3242 * Map the PCI dev to the corresponding remapping hardware unit 3243 * and allocate 'nvec' consecutive interrupt-remapping table entries 3244 * in it. 3245 */ 3246 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec) 3247 { 3248 struct intel_iommu *iommu; 3249 int index; 3250 3251 iommu = map_dev_to_ir(dev); 3252 if (!iommu) { 3253 printk(KERN_ERR 3254 "Unable to map PCI %s to iommu\n", pci_name(dev)); 3255 return -ENOENT; 3256 } 3257 3258 index = alloc_irte(iommu, irq, nvec); 3259 if (index < 0) { 3260 printk(KERN_ERR 3261 "Unable to allocate %d IRTE for PCI %s\n", nvec, 3262 pci_name(dev)); 3263 return -ENOSPC; 3264 } 3265 return index; 3266 } 3267 3268 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) 3269 { 3270 struct msi_msg msg; 3271 int ret; 3272 3273 ret = msi_compose_msg(dev, irq, &msg, -1); 3274 if (ret < 0) 3275 return ret; 3276 3277 set_irq_msi(irq, msidesc); 3278 write_msi_msg(irq, &msg); 3279 3280 if (irq_remapped(get_irq_chip_data(irq))) { 3281 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3282 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); 3283 } else 3284 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); 3285 3286 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); 3287 3288 return 0; 3289 } 3290 3291 int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 3292 { 3293 int node, ret, sub_handle, index = 0; 3294 unsigned int irq, irq_want; 3295 struct msi_desc *msidesc; 3296 struct intel_iommu *iommu = NULL; 3297 3298 /* x86 doesn't support multiple MSI yet */ 3299 if (type == PCI_CAP_ID_MSI && nvec > 1) 3300 return 1; 3301 3302 node = dev_to_node(&dev->dev); 3303 irq_want = nr_irqs_gsi; 3304 sub_handle = 0; 3305 list_for_each_entry(msidesc, &dev->msi_list, list) { 3306 irq = create_irq_nr(irq_want, node); 3307 if (irq == 0) 3308 return -1; 3309 irq_want = irq + 1; 3310 if (!intr_remapping_enabled) 3311 goto no_ir; 3312 3313 if (!sub_handle) { 3314 /* 3315 * allocate the consecutive block of IRTE's 3316 * for 'nvec' 3317 */ 3318 index = msi_alloc_irte(dev, irq, nvec); 3319 if (index < 0) { 3320 ret = index; 3321 goto error; 3322 } 3323 } else { 3324 iommu = map_dev_to_ir(dev); 3325 if (!iommu) { 3326 ret = -ENOENT; 3327 goto error; 3328 } 3329 /* 3330 * setup the mapping between the irq and the IRTE 3331 * base index, the sub_handle pointing to the 3332 * appropriate interrupt remap table entry. 3333 */ 3334 set_irte_irq(irq, iommu, index, sub_handle); 3335 } 3336 no_ir: 3337 ret = setup_msi_irq(dev, msidesc, irq); 3338 if (ret < 0) 3339 goto error; 3340 sub_handle++; 3341 } 3342 return 0; 3343 3344 error: 3345 destroy_irq(irq); 3346 return ret; 3347 } 3348 3349 void native_teardown_msi_irq(unsigned int irq) 3350 { 3351 destroy_irq(irq); 3352 } 3353 3354 #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) 3355 #ifdef CONFIG_SMP 3356 static int 3357 dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, 3358 bool force) 3359 { 3360 struct irq_cfg *cfg = data->chip_data; 3361 unsigned int dest, irq = data->irq; 3362 struct msi_msg msg; 3363 3364 if (__ioapic_set_affinity(data, mask, &dest)) 3365 return -1; 3366 3367 dmar_msi_read(irq, &msg); 3368 3369 msg.data &= ~MSI_DATA_VECTOR_MASK; 3370 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3371 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3372 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3373 msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest); 3374 3375 dmar_msi_write(irq, &msg); 3376 3377 return 0; 3378 } 3379 3380 #endif /* CONFIG_SMP */ 3381 3382 static struct irq_chip dmar_msi_type = { 3383 .name = "DMAR_MSI", 3384 .irq_unmask = dmar_msi_unmask, 3385 .irq_mask = dmar_msi_mask, 3386 .irq_ack = ack_apic_edge, 3387 #ifdef CONFIG_SMP 3388 .irq_set_affinity = dmar_msi_set_affinity, 3389 #endif 3390 .irq_retrigger = ioapic_retrigger_irq, 3391 }; 3392 3393 int arch_setup_dmar_msi(unsigned int irq) 3394 { 3395 int ret; 3396 struct msi_msg msg; 3397 3398 ret = msi_compose_msg(NULL, irq, &msg, -1); 3399 if (ret < 0) 3400 return ret; 3401 dmar_msi_write(irq, &msg); 3402 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, 3403 "edge"); 3404 return 0; 3405 } 3406 #endif 3407 3408 #ifdef CONFIG_HPET_TIMER 3409 3410 #ifdef CONFIG_SMP 3411 static int hpet_msi_set_affinity(struct irq_data *data, 3412 const struct cpumask *mask, bool force) 3413 { 3414 struct irq_cfg *cfg = data->chip_data; 3415 struct msi_msg msg; 3416 unsigned int dest; 3417 3418 if (__ioapic_set_affinity(data, mask, &dest)) 3419 return -1; 3420 3421 hpet_msi_read(data->handler_data, &msg); 3422 3423 msg.data &= ~MSI_DATA_VECTOR_MASK; 3424 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3425 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3426 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3427 3428 hpet_msi_write(data->handler_data, &msg); 3429 3430 return 0; 3431 } 3432 3433 #endif /* CONFIG_SMP */ 3434 3435 static struct irq_chip ir_hpet_msi_type = { 3436 .name = "IR-HPET_MSI", 3437 .irq_unmask = hpet_msi_unmask, 3438 .irq_mask = hpet_msi_mask, 3439 #ifdef CONFIG_INTR_REMAP 3440 .irq_ack = ir_ack_apic_edge, 3441 #ifdef CONFIG_SMP 3442 .irq_set_affinity = ir_msi_set_affinity, 3443 #endif 3444 #endif 3445 .irq_retrigger = ioapic_retrigger_irq, 3446 }; 3447 3448 static struct irq_chip hpet_msi_type = { 3449 .name = "HPET_MSI", 3450 .irq_unmask = hpet_msi_unmask, 3451 .irq_mask = hpet_msi_mask, 3452 .irq_ack = ack_apic_edge, 3453 #ifdef CONFIG_SMP 3454 .irq_set_affinity = hpet_msi_set_affinity, 3455 #endif 3456 .irq_retrigger = ioapic_retrigger_irq, 3457 }; 3458 3459 int arch_setup_hpet_msi(unsigned int irq, unsigned int id) 3460 { 3461 struct msi_msg msg; 3462 int ret; 3463 3464 if (intr_remapping_enabled) { 3465 struct intel_iommu *iommu = map_hpet_to_ir(id); 3466 int index; 3467 3468 if (!iommu) 3469 return -1; 3470 3471 index = alloc_irte(iommu, irq, 1); 3472 if (index < 0) 3473 return -1; 3474 } 3475 3476 ret = msi_compose_msg(NULL, irq, &msg, id); 3477 if (ret < 0) 3478 return ret; 3479 3480 hpet_msi_write(get_irq_data(irq), &msg); 3481 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3482 if (irq_remapped(get_irq_chip_data(irq))) 3483 set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, 3484 handle_edge_irq, "edge"); 3485 else 3486 set_irq_chip_and_handler_name(irq, &hpet_msi_type, 3487 handle_edge_irq, "edge"); 3488 3489 return 0; 3490 } 3491 #endif 3492 3493 #endif /* CONFIG_PCI_MSI */ 3494 /* 3495 * Hypertransport interrupt support 3496 */ 3497 #ifdef CONFIG_HT_IRQ 3498 3499 #ifdef CONFIG_SMP 3500 3501 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) 3502 { 3503 struct ht_irq_msg msg; 3504 fetch_ht_irq_msg(irq, &msg); 3505 3506 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK); 3507 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); 3508 3509 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest); 3510 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest); 3511 3512 write_ht_irq_msg(irq, &msg); 3513 } 3514 3515 static int 3516 ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3517 { 3518 struct irq_cfg *cfg = data->chip_data; 3519 unsigned int dest; 3520 3521 if (__ioapic_set_affinity(data, mask, &dest)) 3522 return -1; 3523 3524 target_ht_irq(data->irq, dest, cfg->vector); 3525 return 0; 3526 } 3527 3528 #endif 3529 3530 static struct irq_chip ht_irq_chip = { 3531 .name = "PCI-HT", 3532 .irq_mask = mask_ht_irq, 3533 .irq_unmask = unmask_ht_irq, 3534 .irq_ack = ack_apic_edge, 3535 #ifdef CONFIG_SMP 3536 .irq_set_affinity = ht_set_affinity, 3537 #endif 3538 .irq_retrigger = ioapic_retrigger_irq, 3539 }; 3540 3541 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) 3542 { 3543 struct irq_cfg *cfg; 3544 int err; 3545 3546 if (disable_apic) 3547 return -ENXIO; 3548 3549 cfg = irq_cfg(irq); 3550 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3551 if (!err) { 3552 struct ht_irq_msg msg; 3553 unsigned dest; 3554 3555 dest = apic->cpu_mask_to_apicid_and(cfg->domain, 3556 apic->target_cpus()); 3557 3558 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3559 3560 msg.address_lo = 3561 HT_IRQ_LOW_BASE | 3562 HT_IRQ_LOW_DEST_ID(dest) | 3563 HT_IRQ_LOW_VECTOR(cfg->vector) | 3564 ((apic->irq_dest_mode == 0) ? 3565 HT_IRQ_LOW_DM_PHYSICAL : 3566 HT_IRQ_LOW_DM_LOGICAL) | 3567 HT_IRQ_LOW_RQEOI_EDGE | 3568 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3569 HT_IRQ_LOW_MT_FIXED : 3570 HT_IRQ_LOW_MT_ARBITRATED) | 3571 HT_IRQ_LOW_IRQ_MASKED; 3572 3573 write_ht_irq_msg(irq, &msg); 3574 3575 set_irq_chip_and_handler_name(irq, &ht_irq_chip, 3576 handle_edge_irq, "edge"); 3577 3578 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); 3579 } 3580 return err; 3581 } 3582 #endif /* CONFIG_HT_IRQ */ 3583 3584 int __init io_apic_get_redir_entries (int ioapic) 3585 { 3586 union IO_APIC_reg_01 reg_01; 3587 unsigned long flags; 3588 3589 raw_spin_lock_irqsave(&ioapic_lock, flags); 3590 reg_01.raw = io_apic_read(ioapic, 1); 3591 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3592 3593 /* The register returns the maximum index redir index 3594 * supported, which is one less than the total number of redir 3595 * entries. 3596 */ 3597 return reg_01.bits.entries + 1; 3598 } 3599 3600 static void __init probe_nr_irqs_gsi(void) 3601 { 3602 int nr; 3603 3604 nr = gsi_top + NR_IRQS_LEGACY; 3605 if (nr > nr_irqs_gsi) 3606 nr_irqs_gsi = nr; 3607 3608 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); 3609 } 3610 3611 int get_nr_irqs_gsi(void) 3612 { 3613 return nr_irqs_gsi; 3614 } 3615 3616 #ifdef CONFIG_SPARSE_IRQ 3617 int __init arch_probe_nr_irqs(void) 3618 { 3619 int nr; 3620 3621 if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) 3622 nr_irqs = NR_VECTORS * nr_cpu_ids; 3623 3624 nr = nr_irqs_gsi + 8 * nr_cpu_ids; 3625 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) 3626 /* 3627 * for MSI and HT dyn irq 3628 */ 3629 nr += nr_irqs_gsi * 16; 3630 #endif 3631 if (nr < nr_irqs) 3632 nr_irqs = nr; 3633 3634 return NR_IRQS_LEGACY; 3635 } 3636 #endif 3637 3638 static int __io_apic_set_pci_routing(struct device *dev, int irq, 3639 struct io_apic_irq_attr *irq_attr) 3640 { 3641 struct irq_cfg *cfg; 3642 int node; 3643 int ioapic, pin; 3644 int trigger, polarity; 3645 3646 ioapic = irq_attr->ioapic; 3647 if (!IO_APIC_IRQ(irq)) { 3648 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", 3649 ioapic); 3650 return -EINVAL; 3651 } 3652 3653 if (dev) 3654 node = dev_to_node(dev); 3655 else 3656 node = cpu_to_node(0); 3657 3658 cfg = alloc_irq_and_cfg_at(irq, node); 3659 if (!cfg) 3660 return 0; 3661 3662 pin = irq_attr->ioapic_pin; 3663 trigger = irq_attr->trigger; 3664 polarity = irq_attr->polarity; 3665 3666 /* 3667 * IRQs < 16 are already in the irq_2_pin[] map 3668 */ 3669 if (irq >= legacy_pic->nr_legacy_irqs) { 3670 if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) { 3671 printk(KERN_INFO "can not add pin %d for irq %d\n", 3672 pin, irq); 3673 return 0; 3674 } 3675 } 3676 3677 setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity); 3678 3679 return 0; 3680 } 3681 3682 int io_apic_set_pci_routing(struct device *dev, int irq, 3683 struct io_apic_irq_attr *irq_attr) 3684 { 3685 int ioapic, pin; 3686 /* 3687 * Avoid pin reprogramming. PRTs typically include entries 3688 * with redundant pin->gsi mappings (but unique PCI devices); 3689 * we only program the IOAPIC on the first. 3690 */ 3691 ioapic = irq_attr->ioapic; 3692 pin = irq_attr->ioapic_pin; 3693 if (test_bit(pin, mp_ioapic_routing[ioapic].pin_programmed)) { 3694 pr_debug("Pin %d-%d already programmed\n", 3695 mp_ioapics[ioapic].apicid, pin); 3696 return 0; 3697 } 3698 set_bit(pin, mp_ioapic_routing[ioapic].pin_programmed); 3699 3700 return __io_apic_set_pci_routing(dev, irq, irq_attr); 3701 } 3702 3703 u8 __init io_apic_unique_id(u8 id) 3704 { 3705 #ifdef CONFIG_X86_32 3706 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && 3707 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) 3708 return io_apic_get_unique_id(nr_ioapics, id); 3709 else 3710 return id; 3711 #else 3712 int i; 3713 DECLARE_BITMAP(used, 256); 3714 3715 bitmap_zero(used, 256); 3716 for (i = 0; i < nr_ioapics; i++) { 3717 struct mpc_ioapic *ia = &mp_ioapics[i]; 3718 __set_bit(ia->apicid, used); 3719 } 3720 if (!test_bit(id, used)) 3721 return id; 3722 return find_first_zero_bit(used, 256); 3723 #endif 3724 } 3725 3726 #ifdef CONFIG_X86_32 3727 int __init io_apic_get_unique_id(int ioapic, int apic_id) 3728 { 3729 union IO_APIC_reg_00 reg_00; 3730 static physid_mask_t apic_id_map = PHYSID_MASK_NONE; 3731 physid_mask_t tmp; 3732 unsigned long flags; 3733 int i = 0; 3734 3735 /* 3736 * The P4 platform supports up to 256 APIC IDs on two separate APIC 3737 * buses (one for LAPICs, one for IOAPICs), where predecessors only 3738 * supports up to 16 on one shared APIC bus. 3739 * 3740 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full 3741 * advantage of new APIC bus architecture. 3742 */ 3743 3744 if (physids_empty(apic_id_map)) 3745 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); 3746 3747 raw_spin_lock_irqsave(&ioapic_lock, flags); 3748 reg_00.raw = io_apic_read(ioapic, 0); 3749 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3750 3751 if (apic_id >= get_physical_broadcast()) { 3752 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " 3753 "%d\n", ioapic, apic_id, reg_00.bits.ID); 3754 apic_id = reg_00.bits.ID; 3755 } 3756 3757 /* 3758 * Every APIC in a system must have a unique ID or we get lots of nice 3759 * 'stuck on smp_invalidate_needed IPI wait' messages. 3760 */ 3761 if (apic->check_apicid_used(&apic_id_map, apic_id)) { 3762 3763 for (i = 0; i < get_physical_broadcast(); i++) { 3764 if (!apic->check_apicid_used(&apic_id_map, i)) 3765 break; 3766 } 3767 3768 if (i == get_physical_broadcast()) 3769 panic("Max apic_id exceeded!\n"); 3770 3771 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " 3772 "trying %d\n", ioapic, apic_id, i); 3773 3774 apic_id = i; 3775 } 3776 3777 apic->apicid_to_cpu_present(apic_id, &tmp); 3778 physids_or(apic_id_map, apic_id_map, tmp); 3779 3780 if (reg_00.bits.ID != apic_id) { 3781 reg_00.bits.ID = apic_id; 3782 3783 raw_spin_lock_irqsave(&ioapic_lock, flags); 3784 io_apic_write(ioapic, 0, reg_00.raw); 3785 reg_00.raw = io_apic_read(ioapic, 0); 3786 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3787 3788 /* Sanity check */ 3789 if (reg_00.bits.ID != apic_id) { 3790 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); 3791 return -1; 3792 } 3793 } 3794 3795 apic_printk(APIC_VERBOSE, KERN_INFO 3796 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); 3797 3798 return apic_id; 3799 } 3800 #endif 3801 3802 int __init io_apic_get_version(int ioapic) 3803 { 3804 union IO_APIC_reg_01 reg_01; 3805 unsigned long flags; 3806 3807 raw_spin_lock_irqsave(&ioapic_lock, flags); 3808 reg_01.raw = io_apic_read(ioapic, 1); 3809 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3810 3811 return reg_01.bits.version; 3812 } 3813 3814 int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) 3815 { 3816 int ioapic, pin, idx; 3817 3818 if (skip_ioapic_setup) 3819 return -1; 3820 3821 ioapic = mp_find_ioapic(gsi); 3822 if (ioapic < 0) 3823 return -1; 3824 3825 pin = mp_find_ioapic_pin(ioapic, gsi); 3826 if (pin < 0) 3827 return -1; 3828 3829 idx = find_irq_entry(ioapic, pin, mp_INT); 3830 if (idx < 0) 3831 return -1; 3832 3833 *trigger = irq_trigger(idx); 3834 *polarity = irq_polarity(idx); 3835 return 0; 3836 } 3837 3838 /* 3839 * This function currently is only a helper for the i386 smp boot process where 3840 * we need to reprogram the ioredtbls to cater for the cpus which have come online 3841 * so mask in all cases should simply be apic->target_cpus() 3842 */ 3843 #ifdef CONFIG_SMP 3844 void __init setup_ioapic_dest(void) 3845 { 3846 int pin, ioapic, irq, irq_entry; 3847 struct irq_desc *desc; 3848 const struct cpumask *mask; 3849 3850 if (skip_ioapic_setup == 1) 3851 return; 3852 3853 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) 3854 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) { 3855 irq_entry = find_irq_entry(ioapic, pin, mp_INT); 3856 if (irq_entry == -1) 3857 continue; 3858 irq = pin_2_irq(irq_entry, ioapic, pin); 3859 3860 if ((ioapic > 0) && (irq > 16)) 3861 continue; 3862 3863 desc = irq_to_desc(irq); 3864 3865 /* 3866 * Honour affinities which have been set in early boot 3867 */ 3868 if (desc->status & 3869 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) 3870 mask = desc->irq_data.affinity; 3871 else 3872 mask = apic->target_cpus(); 3873 3874 if (intr_remapping_enabled) 3875 ir_ioapic_set_affinity(&desc->irq_data, mask, false); 3876 else 3877 ioapic_set_affinity(&desc->irq_data, mask, false); 3878 } 3879 3880 } 3881 #endif 3882 3883 #define IOAPIC_RESOURCE_NAME_SIZE 11 3884 3885 static struct resource *ioapic_resources; 3886 3887 static struct resource * __init ioapic_setup_resources(int nr_ioapics) 3888 { 3889 unsigned long n; 3890 struct resource *res; 3891 char *mem; 3892 int i; 3893 3894 if (nr_ioapics <= 0) 3895 return NULL; 3896 3897 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource); 3898 n *= nr_ioapics; 3899 3900 mem = alloc_bootmem(n); 3901 res = (void *)mem; 3902 3903 mem += sizeof(struct resource) * nr_ioapics; 3904 3905 for (i = 0; i < nr_ioapics; i++) { 3906 res[i].name = mem; 3907 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; 3908 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); 3909 mem += IOAPIC_RESOURCE_NAME_SIZE; 3910 } 3911 3912 ioapic_resources = res; 3913 3914 return res; 3915 } 3916 3917 void __init ioapic_and_gsi_init(void) 3918 { 3919 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; 3920 struct resource *ioapic_res; 3921 int i; 3922 3923 ioapic_res = ioapic_setup_resources(nr_ioapics); 3924 for (i = 0; i < nr_ioapics; i++) { 3925 if (smp_found_config) { 3926 ioapic_phys = mp_ioapics[i].apicaddr; 3927 #ifdef CONFIG_X86_32 3928 if (!ioapic_phys) { 3929 printk(KERN_ERR 3930 "WARNING: bogus zero IO-APIC " 3931 "address found in MPTABLE, " 3932 "disabling IO/APIC support!\n"); 3933 smp_found_config = 0; 3934 skip_ioapic_setup = 1; 3935 goto fake_ioapic_page; 3936 } 3937 #endif 3938 } else { 3939 #ifdef CONFIG_X86_32 3940 fake_ioapic_page: 3941 #endif 3942 ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); 3943 ioapic_phys = __pa(ioapic_phys); 3944 } 3945 set_fixmap_nocache(idx, ioapic_phys); 3946 apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n", 3947 __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK), 3948 ioapic_phys); 3949 idx++; 3950 3951 ioapic_res->start = ioapic_phys; 3952 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1; 3953 ioapic_res++; 3954 } 3955 3956 probe_nr_irqs_gsi(); 3957 } 3958 3959 void __init ioapic_insert_resources(void) 3960 { 3961 int i; 3962 struct resource *r = ioapic_resources; 3963 3964 if (!r) { 3965 if (nr_ioapics > 0) 3966 printk(KERN_ERR 3967 "IO APIC resources couldn't be allocated.\n"); 3968 return; 3969 } 3970 3971 for (i = 0; i < nr_ioapics; i++) { 3972 insert_resource(&iomem_resource, r); 3973 r++; 3974 } 3975 } 3976 3977 int mp_find_ioapic(u32 gsi) 3978 { 3979 int i = 0; 3980 3981 /* Find the IOAPIC that manages this GSI. */ 3982 for (i = 0; i < nr_ioapics; i++) { 3983 if ((gsi >= mp_gsi_routing[i].gsi_base) 3984 && (gsi <= mp_gsi_routing[i].gsi_end)) 3985 return i; 3986 } 3987 3988 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); 3989 return -1; 3990 } 3991 3992 int mp_find_ioapic_pin(int ioapic, u32 gsi) 3993 { 3994 if (WARN_ON(ioapic == -1)) 3995 return -1; 3996 if (WARN_ON(gsi > mp_gsi_routing[ioapic].gsi_end)) 3997 return -1; 3998 3999 return gsi - mp_gsi_routing[ioapic].gsi_base; 4000 } 4001 4002 static int bad_ioapic(unsigned long address) 4003 { 4004 if (nr_ioapics >= MAX_IO_APICS) { 4005 printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded " 4006 "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics); 4007 return 1; 4008 } 4009 if (!address) { 4010 printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address" 4011 " found in table, skipping!\n"); 4012 return 1; 4013 } 4014 return 0; 4015 } 4016 4017 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) 4018 { 4019 int idx = 0; 4020 int entries; 4021 4022 if (bad_ioapic(address)) 4023 return; 4024 4025 idx = nr_ioapics; 4026 4027 mp_ioapics[idx].type = MP_IOAPIC; 4028 mp_ioapics[idx].flags = MPC_APIC_USABLE; 4029 mp_ioapics[idx].apicaddr = address; 4030 4031 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); 4032 mp_ioapics[idx].apicid = io_apic_unique_id(id); 4033 mp_ioapics[idx].apicver = io_apic_get_version(idx); 4034 4035 /* 4036 * Build basic GSI lookup table to facilitate gsi->io_apic lookups 4037 * and to prevent reprogramming of IOAPIC pins (PCI GSIs). 4038 */ 4039 entries = io_apic_get_redir_entries(idx); 4040 mp_gsi_routing[idx].gsi_base = gsi_base; 4041 mp_gsi_routing[idx].gsi_end = gsi_base + entries - 1; 4042 4043 /* 4044 * The number of IO-APIC IRQ registers (== #pins): 4045 */ 4046 nr_ioapic_registers[idx] = entries; 4047 4048 if (mp_gsi_routing[idx].gsi_end >= gsi_top) 4049 gsi_top = mp_gsi_routing[idx].gsi_end + 1; 4050 4051 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " 4052 "GSI %d-%d\n", idx, mp_ioapics[idx].apicid, 4053 mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr, 4054 mp_gsi_routing[idx].gsi_base, mp_gsi_routing[idx].gsi_end); 4055 4056 nr_ioapics++; 4057 } 4058 4059 /* Enable IOAPIC early just for system timer */ 4060 void __init pre_init_apic_IRQ0(void) 4061 { 4062 struct irq_cfg *cfg; 4063 4064 printk(KERN_INFO "Early APIC setup for system timer0\n"); 4065 #ifndef CONFIG_SMP 4066 physid_set_mask_of_physid(boot_cpu_physical_apicid, 4067 &phys_cpu_present_map); 4068 #endif 4069 /* Make sure the irq descriptor is set up */ 4070 cfg = alloc_irq_and_cfg_at(0, 0); 4071 4072 setup_local_APIC(); 4073 4074 add_pin_to_irq_node(cfg, 0, 0, 0); 4075 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); 4076 4077 setup_ioapic_irq(0, 0, 0, cfg, 0, 0); 4078 } 4079