1 /* 2 * Intel IO-APIC support for multi-Pentium hosts. 3 * 4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo 5 * 6 * Many thanks to Stig Venaas for trying out countless experimental 7 * patches and reporting/debugging problems patiently! 8 * 9 * (c) 1999, Multiple IO-APIC support, developed by 10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and 11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>, 12 * further tested and cleaned up by Zach Brown <zab@redhat.com> 13 * and Ingo Molnar <mingo@redhat.com> 14 * 15 * Fixes 16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs; 17 * thanks to Eric Gilmore 18 * and Rolf G. Tews 19 * for testing these extensively 20 * Paul Diefenbaugh : Added full ACPI support 21 */ 22 23 #include <linux/mm.h> 24 #include <linux/interrupt.h> 25 #include <linux/init.h> 26 #include <linux/delay.h> 27 #include <linux/sched.h> 28 #include <linux/pci.h> 29 #include <linux/mc146818rtc.h> 30 #include <linux/compiler.h> 31 #include <linux/acpi.h> 32 #include <linux/module.h> 33 #include <linux/syscore_ops.h> 34 #include <linux/msi.h> 35 #include <linux/htirq.h> 36 #include <linux/freezer.h> 37 #include <linux/kthread.h> 38 #include <linux/jiffies.h> /* time_after() */ 39 #include <linux/slab.h> 40 #ifdef CONFIG_ACPI 41 #include <acpi/acpi_bus.h> 42 #endif 43 #include <linux/bootmem.h> 44 #include <linux/dmar.h> 45 #include <linux/hpet.h> 46 47 #include <asm/idle.h> 48 #include <asm/io.h> 49 #include <asm/smp.h> 50 #include <asm/cpu.h> 51 #include <asm/desc.h> 52 #include <asm/proto.h> 53 #include <asm/acpi.h> 54 #include <asm/dma.h> 55 #include <asm/timer.h> 56 #include <asm/i8259.h> 57 #include <asm/msidef.h> 58 #include <asm/hypertransport.h> 59 #include <asm/setup.h> 60 #include <asm/irq_remapping.h> 61 #include <asm/hpet.h> 62 #include <asm/hw_irq.h> 63 64 #include <asm/apic.h> 65 66 #define __apicdebuginit(type) static type __init 67 #define for_each_irq_pin(entry, head) \ 68 for (entry = head; entry; entry = entry->next) 69 70 /* 71 * Is the SiS APIC rmw bug present ? 72 * -1 = don't know, 0 = no, 1 = yes 73 */ 74 int sis_apic_bug = -1; 75 76 static DEFINE_RAW_SPINLOCK(ioapic_lock); 77 static DEFINE_RAW_SPINLOCK(vector_lock); 78 79 static struct ioapic { 80 /* 81 * # of IRQ routing registers 82 */ 83 int nr_registers; 84 /* 85 * Saved state during suspend/resume, or while enabling intr-remap. 86 */ 87 struct IO_APIC_route_entry *saved_registers; 88 /* I/O APIC config */ 89 struct mpc_ioapic mp_config; 90 /* IO APIC gsi routing info */ 91 struct mp_ioapic_gsi gsi_config; 92 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); 93 } ioapics[MAX_IO_APICS]; 94 95 #define mpc_ioapic_ver(id) ioapics[id].mp_config.apicver 96 97 int mpc_ioapic_id(int id) 98 { 99 return ioapics[id].mp_config.apicid; 100 } 101 102 unsigned int mpc_ioapic_addr(int id) 103 { 104 return ioapics[id].mp_config.apicaddr; 105 } 106 107 struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int id) 108 { 109 return &ioapics[id].gsi_config; 110 } 111 112 int nr_ioapics; 113 114 /* The one past the highest gsi number used */ 115 u32 gsi_top; 116 117 /* MP IRQ source entries */ 118 struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; 119 120 /* # of MP IRQ source entries */ 121 int mp_irq_entries; 122 123 /* GSI interrupts */ 124 static int nr_irqs_gsi = NR_IRQS_LEGACY; 125 126 #if defined (CONFIG_MCA) || defined (CONFIG_EISA) 127 int mp_bus_id_to_type[MAX_MP_BUSSES]; 128 #endif 129 130 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); 131 132 int skip_ioapic_setup; 133 134 /** 135 * disable_ioapic_support() - disables ioapic support at runtime 136 */ 137 void disable_ioapic_support(void) 138 { 139 #ifdef CONFIG_PCI 140 noioapicquirk = 1; 141 noioapicreroute = -1; 142 #endif 143 skip_ioapic_setup = 1; 144 } 145 146 static int __init parse_noapic(char *str) 147 { 148 /* disable IO-APIC */ 149 disable_ioapic_support(); 150 return 0; 151 } 152 early_param("noapic", parse_noapic); 153 154 static int io_apic_setup_irq_pin(unsigned int irq, int node, 155 struct io_apic_irq_attr *attr); 156 157 /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ 158 void mp_save_irq(struct mpc_intsrc *m) 159 { 160 int i; 161 162 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," 163 " IRQ %02x, APIC ID %x, APIC INT %02x\n", 164 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus, 165 m->srcbusirq, m->dstapic, m->dstirq); 166 167 for (i = 0; i < mp_irq_entries; i++) { 168 if (!memcmp(&mp_irqs[i], m, sizeof(*m))) 169 return; 170 } 171 172 memcpy(&mp_irqs[mp_irq_entries], m, sizeof(*m)); 173 if (++mp_irq_entries == MAX_IRQ_SOURCES) 174 panic("Max # of irq sources exceeded!!\n"); 175 } 176 177 struct irq_pin_list { 178 int apic, pin; 179 struct irq_pin_list *next; 180 }; 181 182 static struct irq_pin_list *alloc_irq_pin_list(int node) 183 { 184 return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); 185 } 186 187 188 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 189 #ifdef CONFIG_SPARSE_IRQ 190 static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY]; 191 #else 192 static struct irq_cfg irq_cfgx[NR_IRQS]; 193 #endif 194 195 int __init arch_early_irq_init(void) 196 { 197 struct irq_cfg *cfg; 198 int count, node, i; 199 200 if (!legacy_pic->nr_legacy_irqs) { 201 nr_irqs_gsi = 0; 202 io_apic_irqs = ~0UL; 203 } 204 205 for (i = 0; i < nr_ioapics; i++) { 206 ioapics[i].saved_registers = 207 kzalloc(sizeof(struct IO_APIC_route_entry) * 208 ioapics[i].nr_registers, GFP_KERNEL); 209 if (!ioapics[i].saved_registers) 210 pr_err("IOAPIC %d: suspend/resume impossible!\n", i); 211 } 212 213 cfg = irq_cfgx; 214 count = ARRAY_SIZE(irq_cfgx); 215 node = cpu_to_node(0); 216 217 /* Make sure the legacy interrupts are marked in the bitmap */ 218 irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); 219 220 for (i = 0; i < count; i++) { 221 irq_set_chip_data(i, &cfg[i]); 222 zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); 223 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); 224 /* 225 * For legacy IRQ's, start with assigning irq0 to irq15 to 226 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. 227 */ 228 if (i < legacy_pic->nr_legacy_irqs) { 229 cfg[i].vector = IRQ0_VECTOR + i; 230 cpumask_set_cpu(0, cfg[i].domain); 231 } 232 } 233 234 return 0; 235 } 236 237 #ifdef CONFIG_SPARSE_IRQ 238 static struct irq_cfg *irq_cfg(unsigned int irq) 239 { 240 return irq_get_chip_data(irq); 241 } 242 243 static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) 244 { 245 struct irq_cfg *cfg; 246 247 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); 248 if (!cfg) 249 return NULL; 250 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) 251 goto out_cfg; 252 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) 253 goto out_domain; 254 return cfg; 255 out_domain: 256 free_cpumask_var(cfg->domain); 257 out_cfg: 258 kfree(cfg); 259 return NULL; 260 } 261 262 static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) 263 { 264 if (!cfg) 265 return; 266 irq_set_chip_data(at, NULL); 267 free_cpumask_var(cfg->domain); 268 free_cpumask_var(cfg->old_domain); 269 kfree(cfg); 270 } 271 272 #else 273 274 struct irq_cfg *irq_cfg(unsigned int irq) 275 { 276 return irq < nr_irqs ? irq_cfgx + irq : NULL; 277 } 278 279 static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) 280 { 281 return irq_cfgx + irq; 282 } 283 284 static inline void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { } 285 286 #endif 287 288 static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) 289 { 290 int res = irq_alloc_desc_at(at, node); 291 struct irq_cfg *cfg; 292 293 if (res < 0) { 294 if (res != -EEXIST) 295 return NULL; 296 cfg = irq_get_chip_data(at); 297 if (cfg) 298 return cfg; 299 } 300 301 cfg = alloc_irq_cfg(at, node); 302 if (cfg) 303 irq_set_chip_data(at, cfg); 304 else 305 irq_free_desc(at); 306 return cfg; 307 } 308 309 static int alloc_irq_from(unsigned int from, int node) 310 { 311 return irq_alloc_desc_from(from, node); 312 } 313 314 static void free_irq_at(unsigned int at, struct irq_cfg *cfg) 315 { 316 free_irq_cfg(at, cfg); 317 irq_free_desc(at); 318 } 319 320 struct io_apic { 321 unsigned int index; 322 unsigned int unused[3]; 323 unsigned int data; 324 unsigned int unused2[11]; 325 unsigned int eoi; 326 }; 327 328 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) 329 { 330 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) 331 + (mpc_ioapic_addr(idx) & ~PAGE_MASK); 332 } 333 334 static inline void io_apic_eoi(unsigned int apic, unsigned int vector) 335 { 336 struct io_apic __iomem *io_apic = io_apic_base(apic); 337 writel(vector, &io_apic->eoi); 338 } 339 340 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) 341 { 342 struct io_apic __iomem *io_apic = io_apic_base(apic); 343 writel(reg, &io_apic->index); 344 return readl(&io_apic->data); 345 } 346 347 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) 348 { 349 struct io_apic __iomem *io_apic = io_apic_base(apic); 350 writel(reg, &io_apic->index); 351 writel(value, &io_apic->data); 352 } 353 354 /* 355 * Re-write a value: to be used for read-modify-write 356 * cycles where the read already set up the index register. 357 * 358 * Older SiS APIC requires we rewrite the index register 359 */ 360 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) 361 { 362 struct io_apic __iomem *io_apic = io_apic_base(apic); 363 364 if (sis_apic_bug) 365 writel(reg, &io_apic->index); 366 writel(value, &io_apic->data); 367 } 368 369 static bool io_apic_level_ack_pending(struct irq_cfg *cfg) 370 { 371 struct irq_pin_list *entry; 372 unsigned long flags; 373 374 raw_spin_lock_irqsave(&ioapic_lock, flags); 375 for_each_irq_pin(entry, cfg->irq_2_pin) { 376 unsigned int reg; 377 int pin; 378 379 pin = entry->pin; 380 reg = io_apic_read(entry->apic, 0x10 + pin*2); 381 /* Is the remote IRR bit set? */ 382 if (reg & IO_APIC_REDIR_REMOTE_IRR) { 383 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 384 return true; 385 } 386 } 387 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 388 389 return false; 390 } 391 392 union entry_union { 393 struct { u32 w1, w2; }; 394 struct IO_APIC_route_entry entry; 395 }; 396 397 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) 398 { 399 union entry_union eu; 400 unsigned long flags; 401 raw_spin_lock_irqsave(&ioapic_lock, flags); 402 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); 403 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); 404 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 405 return eu.entry; 406 } 407 408 /* 409 * When we write a new IO APIC routing entry, we need to write the high 410 * word first! If the mask bit in the low word is clear, we will enable 411 * the interrupt, and we need to make sure the entry is fully populated 412 * before that happens. 413 */ 414 static void 415 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 416 { 417 union entry_union eu = {{0, 0}}; 418 419 eu.entry = e; 420 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 421 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 422 } 423 424 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 425 { 426 unsigned long flags; 427 raw_spin_lock_irqsave(&ioapic_lock, flags); 428 __ioapic_write_entry(apic, pin, e); 429 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 430 } 431 432 /* 433 * When we mask an IO APIC routing entry, we need to write the low 434 * word first, in order to set the mask bit before we change the 435 * high bits! 436 */ 437 static void ioapic_mask_entry(int apic, int pin) 438 { 439 unsigned long flags; 440 union entry_union eu = { .entry.mask = 1 }; 441 442 raw_spin_lock_irqsave(&ioapic_lock, flags); 443 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 444 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 445 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 446 } 447 448 /* 449 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are 450 * shared ISA-space IRQs, so we have to support them. We are super 451 * fast in the common case, and fast for shared ISA-space IRQs. 452 */ 453 static int 454 __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 455 { 456 struct irq_pin_list **last, *entry; 457 458 /* don't allow duplicates */ 459 last = &cfg->irq_2_pin; 460 for_each_irq_pin(entry, cfg->irq_2_pin) { 461 if (entry->apic == apic && entry->pin == pin) 462 return 0; 463 last = &entry->next; 464 } 465 466 entry = alloc_irq_pin_list(node); 467 if (!entry) { 468 printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", 469 node, apic, pin); 470 return -ENOMEM; 471 } 472 entry->apic = apic; 473 entry->pin = pin; 474 475 *last = entry; 476 return 0; 477 } 478 479 static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 480 { 481 if (__add_pin_to_irq_node(cfg, node, apic, pin)) 482 panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); 483 } 484 485 /* 486 * Reroute an IRQ to a different pin. 487 */ 488 static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, 489 int oldapic, int oldpin, 490 int newapic, int newpin) 491 { 492 struct irq_pin_list *entry; 493 494 for_each_irq_pin(entry, cfg->irq_2_pin) { 495 if (entry->apic == oldapic && entry->pin == oldpin) { 496 entry->apic = newapic; 497 entry->pin = newpin; 498 /* every one is different, right? */ 499 return; 500 } 501 } 502 503 /* old apic/pin didn't exist, so just add new ones */ 504 add_pin_to_irq_node(cfg, node, newapic, newpin); 505 } 506 507 static void __io_apic_modify_irq(struct irq_pin_list *entry, 508 int mask_and, int mask_or, 509 void (*final)(struct irq_pin_list *entry)) 510 { 511 unsigned int reg, pin; 512 513 pin = entry->pin; 514 reg = io_apic_read(entry->apic, 0x10 + pin * 2); 515 reg &= mask_and; 516 reg |= mask_or; 517 io_apic_modify(entry->apic, 0x10 + pin * 2, reg); 518 if (final) 519 final(entry); 520 } 521 522 static void io_apic_modify_irq(struct irq_cfg *cfg, 523 int mask_and, int mask_or, 524 void (*final)(struct irq_pin_list *entry)) 525 { 526 struct irq_pin_list *entry; 527 528 for_each_irq_pin(entry, cfg->irq_2_pin) 529 __io_apic_modify_irq(entry, mask_and, mask_or, final); 530 } 531 532 static void __mask_and_edge_IO_APIC_irq(struct irq_pin_list *entry) 533 { 534 __io_apic_modify_irq(entry, ~IO_APIC_REDIR_LEVEL_TRIGGER, 535 IO_APIC_REDIR_MASKED, NULL); 536 } 537 538 static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry) 539 { 540 __io_apic_modify_irq(entry, ~IO_APIC_REDIR_MASKED, 541 IO_APIC_REDIR_LEVEL_TRIGGER, NULL); 542 } 543 544 static void io_apic_sync(struct irq_pin_list *entry) 545 { 546 /* 547 * Synchronize the IO-APIC and the CPU by doing 548 * a dummy read from the IO-APIC 549 */ 550 struct io_apic __iomem *io_apic; 551 io_apic = io_apic_base(entry->apic); 552 readl(&io_apic->data); 553 } 554 555 static void mask_ioapic(struct irq_cfg *cfg) 556 { 557 unsigned long flags; 558 559 raw_spin_lock_irqsave(&ioapic_lock, flags); 560 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); 561 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 562 } 563 564 static void mask_ioapic_irq(struct irq_data *data) 565 { 566 mask_ioapic(data->chip_data); 567 } 568 569 static void __unmask_ioapic(struct irq_cfg *cfg) 570 { 571 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); 572 } 573 574 static void unmask_ioapic(struct irq_cfg *cfg) 575 { 576 unsigned long flags; 577 578 raw_spin_lock_irqsave(&ioapic_lock, flags); 579 __unmask_ioapic(cfg); 580 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 581 } 582 583 static void unmask_ioapic_irq(struct irq_data *data) 584 { 585 unmask_ioapic(data->chip_data); 586 } 587 588 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) 589 { 590 struct IO_APIC_route_entry entry; 591 592 /* Check delivery_mode to be sure we're not clearing an SMI pin */ 593 entry = ioapic_read_entry(apic, pin); 594 if (entry.delivery_mode == dest_SMI) 595 return; 596 /* 597 * Disable it in the IO-APIC irq-routing table: 598 */ 599 ioapic_mask_entry(apic, pin); 600 } 601 602 static void clear_IO_APIC (void) 603 { 604 int apic, pin; 605 606 for (apic = 0; apic < nr_ioapics; apic++) 607 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) 608 clear_IO_APIC_pin(apic, pin); 609 } 610 611 #ifdef CONFIG_X86_32 612 /* 613 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to 614 * specific CPU-side IRQs. 615 */ 616 617 #define MAX_PIRQS 8 618 static int pirq_entries[MAX_PIRQS] = { 619 [0 ... MAX_PIRQS - 1] = -1 620 }; 621 622 static int __init ioapic_pirq_setup(char *str) 623 { 624 int i, max; 625 int ints[MAX_PIRQS+1]; 626 627 get_options(str, ARRAY_SIZE(ints), ints); 628 629 apic_printk(APIC_VERBOSE, KERN_INFO 630 "PIRQ redirection, working around broken MP-BIOS.\n"); 631 max = MAX_PIRQS; 632 if (ints[0] < MAX_PIRQS) 633 max = ints[0]; 634 635 for (i = 0; i < max; i++) { 636 apic_printk(APIC_VERBOSE, KERN_DEBUG 637 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]); 638 /* 639 * PIRQs are mapped upside down, usually. 640 */ 641 pirq_entries[MAX_PIRQS-i-1] = ints[i+1]; 642 } 643 return 1; 644 } 645 646 __setup("pirq=", ioapic_pirq_setup); 647 #endif /* CONFIG_X86_32 */ 648 649 /* 650 * Saves all the IO-APIC RTE's 651 */ 652 int save_ioapic_entries(void) 653 { 654 int apic, pin; 655 int err = 0; 656 657 for (apic = 0; apic < nr_ioapics; apic++) { 658 if (!ioapics[apic].saved_registers) { 659 err = -ENOMEM; 660 continue; 661 } 662 663 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) 664 ioapics[apic].saved_registers[pin] = 665 ioapic_read_entry(apic, pin); 666 } 667 668 return err; 669 } 670 671 /* 672 * Mask all IO APIC entries. 673 */ 674 void mask_ioapic_entries(void) 675 { 676 int apic, pin; 677 678 for (apic = 0; apic < nr_ioapics; apic++) { 679 if (!ioapics[apic].saved_registers) 680 continue; 681 682 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { 683 struct IO_APIC_route_entry entry; 684 685 entry = ioapics[apic].saved_registers[pin]; 686 if (!entry.mask) { 687 entry.mask = 1; 688 ioapic_write_entry(apic, pin, entry); 689 } 690 } 691 } 692 } 693 694 /* 695 * Restore IO APIC entries which was saved in the ioapic structure. 696 */ 697 int restore_ioapic_entries(void) 698 { 699 int apic, pin; 700 701 for (apic = 0; apic < nr_ioapics; apic++) { 702 if (!ioapics[apic].saved_registers) 703 continue; 704 705 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) 706 ioapic_write_entry(apic, pin, 707 ioapics[apic].saved_registers[pin]); 708 } 709 return 0; 710 } 711 712 /* 713 * Find the IRQ entry number of a certain pin. 714 */ 715 static int find_irq_entry(int apic, int pin, int type) 716 { 717 int i; 718 719 for (i = 0; i < mp_irq_entries; i++) 720 if (mp_irqs[i].irqtype == type && 721 (mp_irqs[i].dstapic == mpc_ioapic_id(apic) || 722 mp_irqs[i].dstapic == MP_APIC_ALL) && 723 mp_irqs[i].dstirq == pin) 724 return i; 725 726 return -1; 727 } 728 729 /* 730 * Find the pin to which IRQ[irq] (ISA) is connected 731 */ 732 static int __init find_isa_irq_pin(int irq, int type) 733 { 734 int i; 735 736 for (i = 0; i < mp_irq_entries; i++) { 737 int lbus = mp_irqs[i].srcbus; 738 739 if (test_bit(lbus, mp_bus_not_pci) && 740 (mp_irqs[i].irqtype == type) && 741 (mp_irqs[i].srcbusirq == irq)) 742 743 return mp_irqs[i].dstirq; 744 } 745 return -1; 746 } 747 748 static int __init find_isa_irq_apic(int irq, int type) 749 { 750 int i; 751 752 for (i = 0; i < mp_irq_entries; i++) { 753 int lbus = mp_irqs[i].srcbus; 754 755 if (test_bit(lbus, mp_bus_not_pci) && 756 (mp_irqs[i].irqtype == type) && 757 (mp_irqs[i].srcbusirq == irq)) 758 break; 759 } 760 if (i < mp_irq_entries) { 761 int apic; 762 for(apic = 0; apic < nr_ioapics; apic++) { 763 if (mpc_ioapic_id(apic) == mp_irqs[i].dstapic) 764 return apic; 765 } 766 } 767 768 return -1; 769 } 770 771 #if defined(CONFIG_EISA) || defined(CONFIG_MCA) 772 /* 773 * EISA Edge/Level control register, ELCR 774 */ 775 static int EISA_ELCR(unsigned int irq) 776 { 777 if (irq < legacy_pic->nr_legacy_irqs) { 778 unsigned int port = 0x4d0 + (irq >> 3); 779 return (inb(port) >> (irq & 7)) & 1; 780 } 781 apic_printk(APIC_VERBOSE, KERN_INFO 782 "Broken MPtable reports ISA irq %d\n", irq); 783 return 0; 784 } 785 786 #endif 787 788 /* ISA interrupts are always polarity zero edge triggered, 789 * when listed as conforming in the MP table. */ 790 791 #define default_ISA_trigger(idx) (0) 792 #define default_ISA_polarity(idx) (0) 793 794 /* EISA interrupts are always polarity zero and can be edge or level 795 * trigger depending on the ELCR value. If an interrupt is listed as 796 * EISA conforming in the MP table, that means its trigger type must 797 * be read in from the ELCR */ 798 799 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq)) 800 #define default_EISA_polarity(idx) default_ISA_polarity(idx) 801 802 /* PCI interrupts are always polarity one level triggered, 803 * when listed as conforming in the MP table. */ 804 805 #define default_PCI_trigger(idx) (1) 806 #define default_PCI_polarity(idx) (1) 807 808 /* MCA interrupts are always polarity zero level triggered, 809 * when listed as conforming in the MP table. */ 810 811 #define default_MCA_trigger(idx) (1) 812 #define default_MCA_polarity(idx) default_ISA_polarity(idx) 813 814 static int irq_polarity(int idx) 815 { 816 int bus = mp_irqs[idx].srcbus; 817 int polarity; 818 819 /* 820 * Determine IRQ line polarity (high active or low active): 821 */ 822 switch (mp_irqs[idx].irqflag & 3) 823 { 824 case 0: /* conforms, ie. bus-type dependent polarity */ 825 if (test_bit(bus, mp_bus_not_pci)) 826 polarity = default_ISA_polarity(idx); 827 else 828 polarity = default_PCI_polarity(idx); 829 break; 830 case 1: /* high active */ 831 { 832 polarity = 0; 833 break; 834 } 835 case 2: /* reserved */ 836 { 837 printk(KERN_WARNING "broken BIOS!!\n"); 838 polarity = 1; 839 break; 840 } 841 case 3: /* low active */ 842 { 843 polarity = 1; 844 break; 845 } 846 default: /* invalid */ 847 { 848 printk(KERN_WARNING "broken BIOS!!\n"); 849 polarity = 1; 850 break; 851 } 852 } 853 return polarity; 854 } 855 856 static int irq_trigger(int idx) 857 { 858 int bus = mp_irqs[idx].srcbus; 859 int trigger; 860 861 /* 862 * Determine IRQ trigger mode (edge or level sensitive): 863 */ 864 switch ((mp_irqs[idx].irqflag>>2) & 3) 865 { 866 case 0: /* conforms, ie. bus-type dependent */ 867 if (test_bit(bus, mp_bus_not_pci)) 868 trigger = default_ISA_trigger(idx); 869 else 870 trigger = default_PCI_trigger(idx); 871 #if defined(CONFIG_EISA) || defined(CONFIG_MCA) 872 switch (mp_bus_id_to_type[bus]) { 873 case MP_BUS_ISA: /* ISA pin */ 874 { 875 /* set before the switch */ 876 break; 877 } 878 case MP_BUS_EISA: /* EISA pin */ 879 { 880 trigger = default_EISA_trigger(idx); 881 break; 882 } 883 case MP_BUS_PCI: /* PCI pin */ 884 { 885 /* set before the switch */ 886 break; 887 } 888 case MP_BUS_MCA: /* MCA pin */ 889 { 890 trigger = default_MCA_trigger(idx); 891 break; 892 } 893 default: 894 { 895 printk(KERN_WARNING "broken BIOS!!\n"); 896 trigger = 1; 897 break; 898 } 899 } 900 #endif 901 break; 902 case 1: /* edge */ 903 { 904 trigger = 0; 905 break; 906 } 907 case 2: /* reserved */ 908 { 909 printk(KERN_WARNING "broken BIOS!!\n"); 910 trigger = 1; 911 break; 912 } 913 case 3: /* level */ 914 { 915 trigger = 1; 916 break; 917 } 918 default: /* invalid */ 919 { 920 printk(KERN_WARNING "broken BIOS!!\n"); 921 trigger = 0; 922 break; 923 } 924 } 925 return trigger; 926 } 927 928 static int pin_2_irq(int idx, int apic, int pin) 929 { 930 int irq; 931 int bus = mp_irqs[idx].srcbus; 932 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(apic); 933 934 /* 935 * Debugging check, we are in big trouble if this message pops up! 936 */ 937 if (mp_irqs[idx].dstirq != pin) 938 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); 939 940 if (test_bit(bus, mp_bus_not_pci)) { 941 irq = mp_irqs[idx].srcbusirq; 942 } else { 943 u32 gsi = gsi_cfg->gsi_base + pin; 944 945 if (gsi >= NR_IRQS_LEGACY) 946 irq = gsi; 947 else 948 irq = gsi_top + gsi; 949 } 950 951 #ifdef CONFIG_X86_32 952 /* 953 * PCI IRQ command line redirection. Yes, limits are hardcoded. 954 */ 955 if ((pin >= 16) && (pin <= 23)) { 956 if (pirq_entries[pin-16] != -1) { 957 if (!pirq_entries[pin-16]) { 958 apic_printk(APIC_VERBOSE, KERN_DEBUG 959 "disabling PIRQ%d\n", pin-16); 960 } else { 961 irq = pirq_entries[pin-16]; 962 apic_printk(APIC_VERBOSE, KERN_DEBUG 963 "using PIRQ%d -> IRQ %d\n", 964 pin-16, irq); 965 } 966 } 967 } 968 #endif 969 970 return irq; 971 } 972 973 /* 974 * Find a specific PCI IRQ entry. 975 * Not an __init, possibly needed by modules 976 */ 977 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, 978 struct io_apic_irq_attr *irq_attr) 979 { 980 int apic, i, best_guess = -1; 981 982 apic_printk(APIC_DEBUG, 983 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", 984 bus, slot, pin); 985 if (test_bit(bus, mp_bus_not_pci)) { 986 apic_printk(APIC_VERBOSE, 987 "PCI BIOS passed nonexistent PCI bus %d!\n", bus); 988 return -1; 989 } 990 for (i = 0; i < mp_irq_entries; i++) { 991 int lbus = mp_irqs[i].srcbus; 992 993 for (apic = 0; apic < nr_ioapics; apic++) 994 if (mpc_ioapic_id(apic) == mp_irqs[i].dstapic || 995 mp_irqs[i].dstapic == MP_APIC_ALL) 996 break; 997 998 if (!test_bit(lbus, mp_bus_not_pci) && 999 !mp_irqs[i].irqtype && 1000 (bus == lbus) && 1001 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) { 1002 int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq); 1003 1004 if (!(apic || IO_APIC_IRQ(irq))) 1005 continue; 1006 1007 if (pin == (mp_irqs[i].srcbusirq & 3)) { 1008 set_io_apic_irq_attr(irq_attr, apic, 1009 mp_irqs[i].dstirq, 1010 irq_trigger(i), 1011 irq_polarity(i)); 1012 return irq; 1013 } 1014 /* 1015 * Use the first all-but-pin matching entry as a 1016 * best-guess fuzzy result for broken mptables. 1017 */ 1018 if (best_guess < 0) { 1019 set_io_apic_irq_attr(irq_attr, apic, 1020 mp_irqs[i].dstirq, 1021 irq_trigger(i), 1022 irq_polarity(i)); 1023 best_guess = irq; 1024 } 1025 } 1026 } 1027 return best_guess; 1028 } 1029 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); 1030 1031 void lock_vector_lock(void) 1032 { 1033 /* Used to the online set of cpus does not change 1034 * during assign_irq_vector. 1035 */ 1036 raw_spin_lock(&vector_lock); 1037 } 1038 1039 void unlock_vector_lock(void) 1040 { 1041 raw_spin_unlock(&vector_lock); 1042 } 1043 1044 static int 1045 __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 1046 { 1047 /* 1048 * NOTE! The local APIC isn't very good at handling 1049 * multiple interrupts at the same interrupt level. 1050 * As the interrupt level is determined by taking the 1051 * vector number and shifting that right by 4, we 1052 * want to spread these out a bit so that they don't 1053 * all fall in the same interrupt level. 1054 * 1055 * Also, we've got to be careful not to trash gate 1056 * 0x80, because int 0x80 is hm, kind of importantish. ;) 1057 */ 1058 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; 1059 static int current_offset = VECTOR_OFFSET_START % 8; 1060 unsigned int old_vector; 1061 int cpu, err; 1062 cpumask_var_t tmp_mask; 1063 1064 if (cfg->move_in_progress) 1065 return -EBUSY; 1066 1067 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) 1068 return -ENOMEM; 1069 1070 old_vector = cfg->vector; 1071 if (old_vector) { 1072 cpumask_and(tmp_mask, mask, cpu_online_mask); 1073 cpumask_and(tmp_mask, cfg->domain, tmp_mask); 1074 if (!cpumask_empty(tmp_mask)) { 1075 free_cpumask_var(tmp_mask); 1076 return 0; 1077 } 1078 } 1079 1080 /* Only try and allocate irqs on cpus that are present */ 1081 err = -ENOSPC; 1082 for_each_cpu_and(cpu, mask, cpu_online_mask) { 1083 int new_cpu; 1084 int vector, offset; 1085 1086 apic->vector_allocation_domain(cpu, tmp_mask); 1087 1088 vector = current_vector; 1089 offset = current_offset; 1090 next: 1091 vector += 8; 1092 if (vector >= first_system_vector) { 1093 /* If out of vectors on large boxen, must share them. */ 1094 offset = (offset + 1) % 8; 1095 vector = FIRST_EXTERNAL_VECTOR + offset; 1096 } 1097 if (unlikely(current_vector == vector)) 1098 continue; 1099 1100 if (test_bit(vector, used_vectors)) 1101 goto next; 1102 1103 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 1104 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 1105 goto next; 1106 /* Found one! */ 1107 current_vector = vector; 1108 current_offset = offset; 1109 if (old_vector) { 1110 cfg->move_in_progress = 1; 1111 cpumask_copy(cfg->old_domain, cfg->domain); 1112 } 1113 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 1114 per_cpu(vector_irq, new_cpu)[vector] = irq; 1115 cfg->vector = vector; 1116 cpumask_copy(cfg->domain, tmp_mask); 1117 err = 0; 1118 break; 1119 } 1120 free_cpumask_var(tmp_mask); 1121 return err; 1122 } 1123 1124 int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 1125 { 1126 int err; 1127 unsigned long flags; 1128 1129 raw_spin_lock_irqsave(&vector_lock, flags); 1130 err = __assign_irq_vector(irq, cfg, mask); 1131 raw_spin_unlock_irqrestore(&vector_lock, flags); 1132 return err; 1133 } 1134 1135 static void __clear_irq_vector(int irq, struct irq_cfg *cfg) 1136 { 1137 int cpu, vector; 1138 1139 BUG_ON(!cfg->vector); 1140 1141 vector = cfg->vector; 1142 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) 1143 per_cpu(vector_irq, cpu)[vector] = -1; 1144 1145 cfg->vector = 0; 1146 cpumask_clear(cfg->domain); 1147 1148 if (likely(!cfg->move_in_progress)) 1149 return; 1150 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { 1151 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 1152 vector++) { 1153 if (per_cpu(vector_irq, cpu)[vector] != irq) 1154 continue; 1155 per_cpu(vector_irq, cpu)[vector] = -1; 1156 break; 1157 } 1158 } 1159 cfg->move_in_progress = 0; 1160 } 1161 1162 void __setup_vector_irq(int cpu) 1163 { 1164 /* Initialize vector_irq on a new cpu */ 1165 int irq, vector; 1166 struct irq_cfg *cfg; 1167 1168 /* 1169 * vector_lock will make sure that we don't run into irq vector 1170 * assignments that might be happening on another cpu in parallel, 1171 * while we setup our initial vector to irq mappings. 1172 */ 1173 raw_spin_lock(&vector_lock); 1174 /* Mark the inuse vectors */ 1175 for_each_active_irq(irq) { 1176 cfg = irq_get_chip_data(irq); 1177 if (!cfg) 1178 continue; 1179 /* 1180 * If it is a legacy IRQ handled by the legacy PIC, this cpu 1181 * will be part of the irq_cfg's domain. 1182 */ 1183 if (irq < legacy_pic->nr_legacy_irqs && !IO_APIC_IRQ(irq)) 1184 cpumask_set_cpu(cpu, cfg->domain); 1185 1186 if (!cpumask_test_cpu(cpu, cfg->domain)) 1187 continue; 1188 vector = cfg->vector; 1189 per_cpu(vector_irq, cpu)[vector] = irq; 1190 } 1191 /* Mark the free vectors */ 1192 for (vector = 0; vector < NR_VECTORS; ++vector) { 1193 irq = per_cpu(vector_irq, cpu)[vector]; 1194 if (irq < 0) 1195 continue; 1196 1197 cfg = irq_cfg(irq); 1198 if (!cpumask_test_cpu(cpu, cfg->domain)) 1199 per_cpu(vector_irq, cpu)[vector] = -1; 1200 } 1201 raw_spin_unlock(&vector_lock); 1202 } 1203 1204 static struct irq_chip ioapic_chip; 1205 static struct irq_chip ir_ioapic_chip; 1206 1207 #ifdef CONFIG_X86_32 1208 static inline int IO_APIC_irq_trigger(int irq) 1209 { 1210 int apic, idx, pin; 1211 1212 for (apic = 0; apic < nr_ioapics; apic++) { 1213 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { 1214 idx = find_irq_entry(apic, pin, mp_INT); 1215 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin))) 1216 return irq_trigger(idx); 1217 } 1218 } 1219 /* 1220 * nonexistent IRQs are edge default 1221 */ 1222 return 0; 1223 } 1224 #else 1225 static inline int IO_APIC_irq_trigger(int irq) 1226 { 1227 return 1; 1228 } 1229 #endif 1230 1231 static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg, 1232 unsigned long trigger) 1233 { 1234 struct irq_chip *chip = &ioapic_chip; 1235 irq_flow_handler_t hdl; 1236 bool fasteoi; 1237 1238 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1239 trigger == IOAPIC_LEVEL) { 1240 irq_set_status_flags(irq, IRQ_LEVEL); 1241 fasteoi = true; 1242 } else { 1243 irq_clear_status_flags(irq, IRQ_LEVEL); 1244 fasteoi = false; 1245 } 1246 1247 if (irq_remapped(cfg)) { 1248 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 1249 chip = &ir_ioapic_chip; 1250 fasteoi = trigger != 0; 1251 } 1252 1253 hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; 1254 irq_set_chip_and_handler_name(irq, chip, hdl, 1255 fasteoi ? "fasteoi" : "edge"); 1256 } 1257 1258 static int setup_ioapic_entry(int apic_id, int irq, 1259 struct IO_APIC_route_entry *entry, 1260 unsigned int destination, int trigger, 1261 int polarity, int vector, int pin) 1262 { 1263 /* 1264 * add it to the IO-APIC irq-routing table: 1265 */ 1266 memset(entry,0,sizeof(*entry)); 1267 1268 if (intr_remapping_enabled) { 1269 struct intel_iommu *iommu = map_ioapic_to_ir(apic_id); 1270 struct irte irte; 1271 struct IR_IO_APIC_route_entry *ir_entry = 1272 (struct IR_IO_APIC_route_entry *) entry; 1273 int index; 1274 1275 if (!iommu) 1276 panic("No mapping iommu for ioapic %d\n", apic_id); 1277 1278 index = alloc_irte(iommu, irq, 1); 1279 if (index < 0) 1280 panic("Failed to allocate IRTE for ioapic %d\n", apic_id); 1281 1282 prepare_irte(&irte, vector, destination); 1283 1284 /* Set source-id of interrupt request */ 1285 set_ioapic_sid(&irte, apic_id); 1286 1287 modify_irte(irq, &irte); 1288 1289 ir_entry->index2 = (index >> 15) & 0x1; 1290 ir_entry->zero = 0; 1291 ir_entry->format = 1; 1292 ir_entry->index = (index & 0x7fff); 1293 /* 1294 * IO-APIC RTE will be configured with virtual vector. 1295 * irq handler will do the explicit EOI to the io-apic. 1296 */ 1297 ir_entry->vector = pin; 1298 1299 apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: " 1300 "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d " 1301 "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X " 1302 "Avail:%X Vector:%02X Dest:%08X " 1303 "SID:%04X SQ:%X SVT:%X)\n", 1304 apic_id, irte.present, irte.fpd, irte.dst_mode, 1305 irte.redir_hint, irte.trigger_mode, irte.dlvry_mode, 1306 irte.avail, irte.vector, irte.dest_id, 1307 irte.sid, irte.sq, irte.svt); 1308 } else { 1309 entry->delivery_mode = apic->irq_delivery_mode; 1310 entry->dest_mode = apic->irq_dest_mode; 1311 entry->dest = destination; 1312 entry->vector = vector; 1313 } 1314 1315 entry->mask = 0; /* enable IRQ */ 1316 entry->trigger = trigger; 1317 entry->polarity = polarity; 1318 1319 /* Mask level triggered irqs. 1320 * Use IRQ_DELAYED_DISABLE for edge triggered irqs. 1321 */ 1322 if (trigger) 1323 entry->mask = 1; 1324 return 0; 1325 } 1326 1327 static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg, 1328 struct io_apic_irq_attr *attr) 1329 { 1330 struct IO_APIC_route_entry entry; 1331 unsigned int dest; 1332 1333 if (!IO_APIC_IRQ(irq)) 1334 return; 1335 /* 1336 * For legacy irqs, cfg->domain starts with cpu 0 for legacy 1337 * controllers like 8259. Now that IO-APIC can handle this irq, update 1338 * the cfg->domain. 1339 */ 1340 if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain)) 1341 apic->vector_allocation_domain(0, cfg->domain); 1342 1343 if (assign_irq_vector(irq, cfg, apic->target_cpus())) 1344 return; 1345 1346 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 1347 1348 apic_printk(APIC_VERBOSE,KERN_DEBUG 1349 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1350 "IRQ %d Mode:%i Active:%i Dest:%d)\n", 1351 attr->ioapic, mpc_ioapic_id(attr->ioapic), attr->ioapic_pin, 1352 cfg->vector, irq, attr->trigger, attr->polarity, dest); 1353 1354 1355 if (setup_ioapic_entry(mpc_ioapic_id(attr->ioapic), irq, &entry, 1356 dest, attr->trigger, attr->polarity, cfg->vector, 1357 attr->ioapic_pin)) { 1358 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1359 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); 1360 __clear_irq_vector(irq, cfg); 1361 return; 1362 } 1363 1364 ioapic_register_intr(irq, cfg, attr->trigger); 1365 if (irq < legacy_pic->nr_legacy_irqs) 1366 legacy_pic->mask(irq); 1367 1368 ioapic_write_entry(attr->ioapic, attr->ioapic_pin, entry); 1369 } 1370 1371 static bool __init io_apic_pin_not_connected(int idx, int apic_id, int pin) 1372 { 1373 if (idx != -1) 1374 return false; 1375 1376 apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n", 1377 mpc_ioapic_id(apic_id), pin); 1378 return true; 1379 } 1380 1381 static void __init __io_apic_setup_irqs(unsigned int apic_id) 1382 { 1383 int idx, node = cpu_to_node(0); 1384 struct io_apic_irq_attr attr; 1385 unsigned int pin, irq; 1386 1387 for (pin = 0; pin < ioapics[apic_id].nr_registers; pin++) { 1388 idx = find_irq_entry(apic_id, pin, mp_INT); 1389 if (io_apic_pin_not_connected(idx, apic_id, pin)) 1390 continue; 1391 1392 irq = pin_2_irq(idx, apic_id, pin); 1393 1394 if ((apic_id > 0) && (irq > 16)) 1395 continue; 1396 1397 /* 1398 * Skip the timer IRQ if there's a quirk handler 1399 * installed and if it returns 1: 1400 */ 1401 if (apic->multi_timer_check && 1402 apic->multi_timer_check(apic_id, irq)) 1403 continue; 1404 1405 set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx), 1406 irq_polarity(idx)); 1407 1408 io_apic_setup_irq_pin(irq, node, &attr); 1409 } 1410 } 1411 1412 static void __init setup_IO_APIC_irqs(void) 1413 { 1414 unsigned int apic_id; 1415 1416 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1417 1418 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) 1419 __io_apic_setup_irqs(apic_id); 1420 } 1421 1422 /* 1423 * for the gsit that is not in first ioapic 1424 * but could not use acpi_register_gsi() 1425 * like some special sci in IBM x3330 1426 */ 1427 void setup_IO_APIC_irq_extra(u32 gsi) 1428 { 1429 int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); 1430 struct io_apic_irq_attr attr; 1431 1432 /* 1433 * Convert 'gsi' to 'ioapic.pin'. 1434 */ 1435 apic_id = mp_find_ioapic(gsi); 1436 if (apic_id < 0) 1437 return; 1438 1439 pin = mp_find_ioapic_pin(apic_id, gsi); 1440 idx = find_irq_entry(apic_id, pin, mp_INT); 1441 if (idx == -1) 1442 return; 1443 1444 irq = pin_2_irq(idx, apic_id, pin); 1445 1446 /* Only handle the non legacy irqs on secondary ioapics */ 1447 if (apic_id == 0 || irq < NR_IRQS_LEGACY) 1448 return; 1449 1450 set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx), 1451 irq_polarity(idx)); 1452 1453 io_apic_setup_irq_pin_once(irq, node, &attr); 1454 } 1455 1456 /* 1457 * Set up the timer pin, possibly with the 8259A-master behind. 1458 */ 1459 static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, 1460 int vector) 1461 { 1462 struct IO_APIC_route_entry entry; 1463 1464 if (intr_remapping_enabled) 1465 return; 1466 1467 memset(&entry, 0, sizeof(entry)); 1468 1469 /* 1470 * We use logical delivery to get the timer IRQ 1471 * to the first CPU. 1472 */ 1473 entry.dest_mode = apic->irq_dest_mode; 1474 entry.mask = 0; /* don't mask IRQ for edge */ 1475 entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus()); 1476 entry.delivery_mode = apic->irq_delivery_mode; 1477 entry.polarity = 0; 1478 entry.trigger = 0; 1479 entry.vector = vector; 1480 1481 /* 1482 * The timer IRQ doesn't have to know that behind the 1483 * scene we may have a 8259A-master in AEOI mode ... 1484 */ 1485 irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, 1486 "edge"); 1487 1488 /* 1489 * Add it to the IO-APIC irq-routing table: 1490 */ 1491 ioapic_write_entry(apic_id, pin, entry); 1492 } 1493 1494 1495 __apicdebuginit(void) print_IO_APIC(void) 1496 { 1497 int apic, i; 1498 union IO_APIC_reg_00 reg_00; 1499 union IO_APIC_reg_01 reg_01; 1500 union IO_APIC_reg_02 reg_02; 1501 union IO_APIC_reg_03 reg_03; 1502 unsigned long flags; 1503 struct irq_cfg *cfg; 1504 unsigned int irq; 1505 1506 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); 1507 for (i = 0; i < nr_ioapics; i++) 1508 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", 1509 mpc_ioapic_id(i), ioapics[i].nr_registers); 1510 1511 /* 1512 * We are a bit conservative about what we expect. We have to 1513 * know about every hardware change ASAP. 1514 */ 1515 printk(KERN_INFO "testing the IO APIC.......................\n"); 1516 1517 for (apic = 0; apic < nr_ioapics; apic++) { 1518 1519 raw_spin_lock_irqsave(&ioapic_lock, flags); 1520 reg_00.raw = io_apic_read(apic, 0); 1521 reg_01.raw = io_apic_read(apic, 1); 1522 if (reg_01.bits.version >= 0x10) 1523 reg_02.raw = io_apic_read(apic, 2); 1524 if (reg_01.bits.version >= 0x20) 1525 reg_03.raw = io_apic_read(apic, 3); 1526 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1527 1528 printk("\n"); 1529 printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(apic)); 1530 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); 1531 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); 1532 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); 1533 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); 1534 1535 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01); 1536 printk(KERN_DEBUG "....... : max redirection entries: %02X\n", 1537 reg_01.bits.entries); 1538 1539 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); 1540 printk(KERN_DEBUG "....... : IO APIC version: %02X\n", 1541 reg_01.bits.version); 1542 1543 /* 1544 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, 1545 * but the value of reg_02 is read as the previous read register 1546 * value, so ignore it if reg_02 == reg_01. 1547 */ 1548 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { 1549 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); 1550 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); 1551 } 1552 1553 /* 1554 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 1555 * or reg_03, but the value of reg_0[23] is read as the previous read 1556 * register value, so ignore it if reg_03 == reg_0[12]. 1557 */ 1558 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && 1559 reg_03.raw != reg_01.raw) { 1560 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); 1561 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); 1562 } 1563 1564 printk(KERN_DEBUG ".... IRQ redirection table:\n"); 1565 1566 if (intr_remapping_enabled) { 1567 printk(KERN_DEBUG " NR Indx Fmt Mask Trig IRR" 1568 " Pol Stat Indx2 Zero Vect:\n"); 1569 } else { 1570 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol" 1571 " Stat Dmod Deli Vect:\n"); 1572 } 1573 1574 for (i = 0; i <= reg_01.bits.entries; i++) { 1575 if (intr_remapping_enabled) { 1576 struct IO_APIC_route_entry entry; 1577 struct IR_IO_APIC_route_entry *ir_entry; 1578 1579 entry = ioapic_read_entry(apic, i); 1580 ir_entry = (struct IR_IO_APIC_route_entry *) &entry; 1581 printk(KERN_DEBUG " %02x %04X ", 1582 i, 1583 ir_entry->index 1584 ); 1585 printk("%1d %1d %1d %1d %1d " 1586 "%1d %1d %X %02X\n", 1587 ir_entry->format, 1588 ir_entry->mask, 1589 ir_entry->trigger, 1590 ir_entry->irr, 1591 ir_entry->polarity, 1592 ir_entry->delivery_status, 1593 ir_entry->index2, 1594 ir_entry->zero, 1595 ir_entry->vector 1596 ); 1597 } else { 1598 struct IO_APIC_route_entry entry; 1599 1600 entry = ioapic_read_entry(apic, i); 1601 printk(KERN_DEBUG " %02x %02X ", 1602 i, 1603 entry.dest 1604 ); 1605 printk("%1d %1d %1d %1d %1d " 1606 "%1d %1d %02X\n", 1607 entry.mask, 1608 entry.trigger, 1609 entry.irr, 1610 entry.polarity, 1611 entry.delivery_status, 1612 entry.dest_mode, 1613 entry.delivery_mode, 1614 entry.vector 1615 ); 1616 } 1617 } 1618 } 1619 1620 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1621 for_each_active_irq(irq) { 1622 struct irq_pin_list *entry; 1623 1624 cfg = irq_get_chip_data(irq); 1625 if (!cfg) 1626 continue; 1627 entry = cfg->irq_2_pin; 1628 if (!entry) 1629 continue; 1630 printk(KERN_DEBUG "IRQ%d ", irq); 1631 for_each_irq_pin(entry, cfg->irq_2_pin) 1632 printk("-> %d:%d", entry->apic, entry->pin); 1633 printk("\n"); 1634 } 1635 1636 printk(KERN_INFO ".................................... done.\n"); 1637 1638 return; 1639 } 1640 1641 __apicdebuginit(void) print_APIC_field(int base) 1642 { 1643 int i; 1644 1645 printk(KERN_DEBUG); 1646 1647 for (i = 0; i < 8; i++) 1648 printk(KERN_CONT "%08x", apic_read(base + i*0x10)); 1649 1650 printk(KERN_CONT "\n"); 1651 } 1652 1653 __apicdebuginit(void) print_local_APIC(void *dummy) 1654 { 1655 unsigned int i, v, ver, maxlvt; 1656 u64 icr; 1657 1658 printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", 1659 smp_processor_id(), hard_smp_processor_id()); 1660 v = apic_read(APIC_ID); 1661 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id()); 1662 v = apic_read(APIC_LVR); 1663 printk(KERN_INFO "... APIC VERSION: %08x\n", v); 1664 ver = GET_APIC_VERSION(v); 1665 maxlvt = lapic_get_maxlvt(); 1666 1667 v = apic_read(APIC_TASKPRI); 1668 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); 1669 1670 if (APIC_INTEGRATED(ver)) { /* !82489DX */ 1671 if (!APIC_XAPIC(ver)) { 1672 v = apic_read(APIC_ARBPRI); 1673 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v, 1674 v & APIC_ARBPRI_MASK); 1675 } 1676 v = apic_read(APIC_PROCPRI); 1677 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v); 1678 } 1679 1680 /* 1681 * Remote read supported only in the 82489DX and local APIC for 1682 * Pentium processors. 1683 */ 1684 if (!APIC_INTEGRATED(ver) || maxlvt == 3) { 1685 v = apic_read(APIC_RRR); 1686 printk(KERN_DEBUG "... APIC RRR: %08x\n", v); 1687 } 1688 1689 v = apic_read(APIC_LDR); 1690 printk(KERN_DEBUG "... APIC LDR: %08x\n", v); 1691 if (!x2apic_enabled()) { 1692 v = apic_read(APIC_DFR); 1693 printk(KERN_DEBUG "... APIC DFR: %08x\n", v); 1694 } 1695 v = apic_read(APIC_SPIV); 1696 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v); 1697 1698 printk(KERN_DEBUG "... APIC ISR field:\n"); 1699 print_APIC_field(APIC_ISR); 1700 printk(KERN_DEBUG "... APIC TMR field:\n"); 1701 print_APIC_field(APIC_TMR); 1702 printk(KERN_DEBUG "... APIC IRR field:\n"); 1703 print_APIC_field(APIC_IRR); 1704 1705 if (APIC_INTEGRATED(ver)) { /* !82489DX */ 1706 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 1707 apic_write(APIC_ESR, 0); 1708 1709 v = apic_read(APIC_ESR); 1710 printk(KERN_DEBUG "... APIC ESR: %08x\n", v); 1711 } 1712 1713 icr = apic_icr_read(); 1714 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr); 1715 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32)); 1716 1717 v = apic_read(APIC_LVTT); 1718 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); 1719 1720 if (maxlvt > 3) { /* PC is LVT#4. */ 1721 v = apic_read(APIC_LVTPC); 1722 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v); 1723 } 1724 v = apic_read(APIC_LVT0); 1725 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v); 1726 v = apic_read(APIC_LVT1); 1727 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v); 1728 1729 if (maxlvt > 2) { /* ERR is LVT#3. */ 1730 v = apic_read(APIC_LVTERR); 1731 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v); 1732 } 1733 1734 v = apic_read(APIC_TMICT); 1735 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v); 1736 v = apic_read(APIC_TMCCT); 1737 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v); 1738 v = apic_read(APIC_TDCR); 1739 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v); 1740 1741 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { 1742 v = apic_read(APIC_EFEAT); 1743 maxlvt = (v >> 16) & 0xff; 1744 printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v); 1745 v = apic_read(APIC_ECTRL); 1746 printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v); 1747 for (i = 0; i < maxlvt; i++) { 1748 v = apic_read(APIC_EILVTn(i)); 1749 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v); 1750 } 1751 } 1752 printk("\n"); 1753 } 1754 1755 __apicdebuginit(void) print_local_APICs(int maxcpu) 1756 { 1757 int cpu; 1758 1759 if (!maxcpu) 1760 return; 1761 1762 preempt_disable(); 1763 for_each_online_cpu(cpu) { 1764 if (cpu >= maxcpu) 1765 break; 1766 smp_call_function_single(cpu, print_local_APIC, NULL, 1); 1767 } 1768 preempt_enable(); 1769 } 1770 1771 __apicdebuginit(void) print_PIC(void) 1772 { 1773 unsigned int v; 1774 unsigned long flags; 1775 1776 if (!legacy_pic->nr_legacy_irqs) 1777 return; 1778 1779 printk(KERN_DEBUG "\nprinting PIC contents\n"); 1780 1781 raw_spin_lock_irqsave(&i8259A_lock, flags); 1782 1783 v = inb(0xa1) << 8 | inb(0x21); 1784 printk(KERN_DEBUG "... PIC IMR: %04x\n", v); 1785 1786 v = inb(0xa0) << 8 | inb(0x20); 1787 printk(KERN_DEBUG "... PIC IRR: %04x\n", v); 1788 1789 outb(0x0b,0xa0); 1790 outb(0x0b,0x20); 1791 v = inb(0xa0) << 8 | inb(0x20); 1792 outb(0x0a,0xa0); 1793 outb(0x0a,0x20); 1794 1795 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 1796 1797 printk(KERN_DEBUG "... PIC ISR: %04x\n", v); 1798 1799 v = inb(0x4d1) << 8 | inb(0x4d0); 1800 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); 1801 } 1802 1803 static int __initdata show_lapic = 1; 1804 static __init int setup_show_lapic(char *arg) 1805 { 1806 int num = -1; 1807 1808 if (strcmp(arg, "all") == 0) { 1809 show_lapic = CONFIG_NR_CPUS; 1810 } else { 1811 get_option(&arg, &num); 1812 if (num >= 0) 1813 show_lapic = num; 1814 } 1815 1816 return 1; 1817 } 1818 __setup("show_lapic=", setup_show_lapic); 1819 1820 __apicdebuginit(int) print_ICs(void) 1821 { 1822 if (apic_verbosity == APIC_QUIET) 1823 return 0; 1824 1825 print_PIC(); 1826 1827 /* don't print out if apic is not there */ 1828 if (!cpu_has_apic && !apic_from_smp_config()) 1829 return 0; 1830 1831 print_local_APICs(show_lapic); 1832 print_IO_APIC(); 1833 1834 return 0; 1835 } 1836 1837 late_initcall(print_ICs); 1838 1839 1840 /* Where if anywhere is the i8259 connect in external int mode */ 1841 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 1842 1843 void __init enable_IO_APIC(void) 1844 { 1845 int i8259_apic, i8259_pin; 1846 int apic; 1847 1848 if (!legacy_pic->nr_legacy_irqs) 1849 return; 1850 1851 for(apic = 0; apic < nr_ioapics; apic++) { 1852 int pin; 1853 /* See if any of the pins is in ExtINT mode */ 1854 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { 1855 struct IO_APIC_route_entry entry; 1856 entry = ioapic_read_entry(apic, pin); 1857 1858 /* If the interrupt line is enabled and in ExtInt mode 1859 * I have found the pin where the i8259 is connected. 1860 */ 1861 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) { 1862 ioapic_i8259.apic = apic; 1863 ioapic_i8259.pin = pin; 1864 goto found_i8259; 1865 } 1866 } 1867 } 1868 found_i8259: 1869 /* Look to see what if the MP table has reported the ExtINT */ 1870 /* If we could not find the appropriate pin by looking at the ioapic 1871 * the i8259 probably is not connected the ioapic but give the 1872 * mptable a chance anyway. 1873 */ 1874 i8259_pin = find_isa_irq_pin(0, mp_ExtINT); 1875 i8259_apic = find_isa_irq_apic(0, mp_ExtINT); 1876 /* Trust the MP table if nothing is setup in the hardware */ 1877 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { 1878 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); 1879 ioapic_i8259.pin = i8259_pin; 1880 ioapic_i8259.apic = i8259_apic; 1881 } 1882 /* Complain if the MP table and the hardware disagree */ 1883 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && 1884 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) 1885 { 1886 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); 1887 } 1888 1889 /* 1890 * Do not trust the IO-APIC being empty at bootup 1891 */ 1892 clear_IO_APIC(); 1893 } 1894 1895 /* 1896 * Not an __init, needed by the reboot code 1897 */ 1898 void disable_IO_APIC(void) 1899 { 1900 /* 1901 * Clear the IO-APIC before rebooting: 1902 */ 1903 clear_IO_APIC(); 1904 1905 if (!legacy_pic->nr_legacy_irqs) 1906 return; 1907 1908 /* 1909 * If the i8259 is routed through an IOAPIC 1910 * Put that IOAPIC in virtual wire mode 1911 * so legacy interrupts can be delivered. 1912 * 1913 * With interrupt-remapping, for now we will use virtual wire A mode, 1914 * as virtual wire B is little complex (need to configure both 1915 * IOAPIC RTE as well as interrupt-remapping table entry). 1916 * As this gets called during crash dump, keep this simple for now. 1917 */ 1918 if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) { 1919 struct IO_APIC_route_entry entry; 1920 1921 memset(&entry, 0, sizeof(entry)); 1922 entry.mask = 0; /* Enabled */ 1923 entry.trigger = 0; /* Edge */ 1924 entry.irr = 0; 1925 entry.polarity = 0; /* High */ 1926 entry.delivery_status = 0; 1927 entry.dest_mode = 0; /* Physical */ 1928 entry.delivery_mode = dest_ExtINT; /* ExtInt */ 1929 entry.vector = 0; 1930 entry.dest = read_apic_id(); 1931 1932 /* 1933 * Add it to the IO-APIC irq-routing table: 1934 */ 1935 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); 1936 } 1937 1938 /* 1939 * Use virtual wire A mode when interrupt remapping is enabled. 1940 */ 1941 if (cpu_has_apic || apic_from_smp_config()) 1942 disconnect_bsp_APIC(!intr_remapping_enabled && 1943 ioapic_i8259.pin != -1); 1944 } 1945 1946 #ifdef CONFIG_X86_32 1947 /* 1948 * function to set the IO-APIC physical IDs based on the 1949 * values stored in the MPC table. 1950 * 1951 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999 1952 */ 1953 void __init setup_ioapic_ids_from_mpc_nocheck(void) 1954 { 1955 union IO_APIC_reg_00 reg_00; 1956 physid_mask_t phys_id_present_map; 1957 int apic_id; 1958 int i; 1959 unsigned char old_id; 1960 unsigned long flags; 1961 1962 /* 1963 * This is broken; anything with a real cpu count has to 1964 * circumvent this idiocy regardless. 1965 */ 1966 apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map); 1967 1968 /* 1969 * Set the IOAPIC ID to the value stored in the MPC table. 1970 */ 1971 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { 1972 1973 /* Read the register 0 value */ 1974 raw_spin_lock_irqsave(&ioapic_lock, flags); 1975 reg_00.raw = io_apic_read(apic_id, 0); 1976 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1977 1978 old_id = mpc_ioapic_id(apic_id); 1979 1980 if (mpc_ioapic_id(apic_id) >= get_physical_broadcast()) { 1981 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", 1982 apic_id, mpc_ioapic_id(apic_id)); 1983 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 1984 reg_00.bits.ID); 1985 ioapics[apic_id].mp_config.apicid = reg_00.bits.ID; 1986 } 1987 1988 /* 1989 * Sanity check, is the ID really free? Every APIC in a 1990 * system must have a unique ID or we get lots of nice 1991 * 'stuck on smp_invalidate_needed IPI wait' messages. 1992 */ 1993 if (apic->check_apicid_used(&phys_id_present_map, 1994 mpc_ioapic_id(apic_id))) { 1995 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", 1996 apic_id, mpc_ioapic_id(apic_id)); 1997 for (i = 0; i < get_physical_broadcast(); i++) 1998 if (!physid_isset(i, phys_id_present_map)) 1999 break; 2000 if (i >= get_physical_broadcast()) 2001 panic("Max APIC ID exceeded!\n"); 2002 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 2003 i); 2004 physid_set(i, phys_id_present_map); 2005 ioapics[apic_id].mp_config.apicid = i; 2006 } else { 2007 physid_mask_t tmp; 2008 apic->apicid_to_cpu_present(mpc_ioapic_id(apic_id), 2009 &tmp); 2010 apic_printk(APIC_VERBOSE, "Setting %d in the " 2011 "phys_id_present_map\n", 2012 mpc_ioapic_id(apic_id)); 2013 physids_or(phys_id_present_map, phys_id_present_map, tmp); 2014 } 2015 2016 /* 2017 * We need to adjust the IRQ routing table 2018 * if the ID changed. 2019 */ 2020 if (old_id != mpc_ioapic_id(apic_id)) 2021 for (i = 0; i < mp_irq_entries; i++) 2022 if (mp_irqs[i].dstapic == old_id) 2023 mp_irqs[i].dstapic 2024 = mpc_ioapic_id(apic_id); 2025 2026 /* 2027 * Update the ID register according to the right value 2028 * from the MPC table if they are different. 2029 */ 2030 if (mpc_ioapic_id(apic_id) == reg_00.bits.ID) 2031 continue; 2032 2033 apic_printk(APIC_VERBOSE, KERN_INFO 2034 "...changing IO-APIC physical APIC ID to %d ...", 2035 mpc_ioapic_id(apic_id)); 2036 2037 reg_00.bits.ID = mpc_ioapic_id(apic_id); 2038 raw_spin_lock_irqsave(&ioapic_lock, flags); 2039 io_apic_write(apic_id, 0, reg_00.raw); 2040 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2041 2042 /* 2043 * Sanity check 2044 */ 2045 raw_spin_lock_irqsave(&ioapic_lock, flags); 2046 reg_00.raw = io_apic_read(apic_id, 0); 2047 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2048 if (reg_00.bits.ID != mpc_ioapic_id(apic_id)) 2049 printk("could not set ID!\n"); 2050 else 2051 apic_printk(APIC_VERBOSE, " ok.\n"); 2052 } 2053 } 2054 2055 void __init setup_ioapic_ids_from_mpc(void) 2056 { 2057 2058 if (acpi_ioapic) 2059 return; 2060 /* 2061 * Don't check I/O APIC IDs for xAPIC systems. They have 2062 * no meaning without the serial APIC bus. 2063 */ 2064 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 2065 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) 2066 return; 2067 setup_ioapic_ids_from_mpc_nocheck(); 2068 } 2069 #endif 2070 2071 int no_timer_check __initdata; 2072 2073 static int __init notimercheck(char *s) 2074 { 2075 no_timer_check = 1; 2076 return 1; 2077 } 2078 __setup("no_timer_check", notimercheck); 2079 2080 /* 2081 * There is a nasty bug in some older SMP boards, their mptable lies 2082 * about the timer IRQ. We do the following to work around the situation: 2083 * 2084 * - timer IRQ defaults to IO-APIC IRQ 2085 * - if this function detects that timer IRQs are defunct, then we fall 2086 * back to ISA timer IRQs 2087 */ 2088 static int __init timer_irq_works(void) 2089 { 2090 unsigned long t1 = jiffies; 2091 unsigned long flags; 2092 2093 if (no_timer_check) 2094 return 1; 2095 2096 local_save_flags(flags); 2097 local_irq_enable(); 2098 /* Let ten ticks pass... */ 2099 mdelay((10 * 1000) / HZ); 2100 local_irq_restore(flags); 2101 2102 /* 2103 * Expect a few ticks at least, to be sure some possible 2104 * glue logic does not lock up after one or two first 2105 * ticks in a non-ExtINT mode. Also the local APIC 2106 * might have cached one ExtINT interrupt. Finally, at 2107 * least one tick may be lost due to delays. 2108 */ 2109 2110 /* jiffies wrap? */ 2111 if (time_after(jiffies, t1 + 4)) 2112 return 1; 2113 return 0; 2114 } 2115 2116 /* 2117 * In the SMP+IOAPIC case it might happen that there are an unspecified 2118 * number of pending IRQ events unhandled. These cases are very rare, 2119 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much 2120 * better to do it this way as thus we do not have to be aware of 2121 * 'pending' interrupts in the IRQ path, except at this point. 2122 */ 2123 /* 2124 * Edge triggered needs to resend any interrupt 2125 * that was delayed but this is now handled in the device 2126 * independent code. 2127 */ 2128 2129 /* 2130 * Starting up a edge-triggered IO-APIC interrupt is 2131 * nasty - we need to make sure that we get the edge. 2132 * If it is already asserted for some reason, we need 2133 * return 1 to indicate that is was pending. 2134 * 2135 * This is not complete - we should be able to fake 2136 * an edge even if it isn't on the 8259A... 2137 */ 2138 2139 static unsigned int startup_ioapic_irq(struct irq_data *data) 2140 { 2141 int was_pending = 0, irq = data->irq; 2142 unsigned long flags; 2143 2144 raw_spin_lock_irqsave(&ioapic_lock, flags); 2145 if (irq < legacy_pic->nr_legacy_irqs) { 2146 legacy_pic->mask(irq); 2147 if (legacy_pic->irq_pending(irq)) 2148 was_pending = 1; 2149 } 2150 __unmask_ioapic(data->chip_data); 2151 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2152 2153 return was_pending; 2154 } 2155 2156 static int ioapic_retrigger_irq(struct irq_data *data) 2157 { 2158 struct irq_cfg *cfg = data->chip_data; 2159 unsigned long flags; 2160 2161 raw_spin_lock_irqsave(&vector_lock, flags); 2162 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); 2163 raw_spin_unlock_irqrestore(&vector_lock, flags); 2164 2165 return 1; 2166 } 2167 2168 /* 2169 * Level and edge triggered IO-APIC interrupts need different handling, 2170 * so we use two separate IRQ descriptors. Edge triggered IRQs can be 2171 * handled with the level-triggered descriptor, but that one has slightly 2172 * more overhead. Level-triggered interrupts cannot be handled with the 2173 * edge-triggered handler, without risking IRQ storms and other ugly 2174 * races. 2175 */ 2176 2177 #ifdef CONFIG_SMP 2178 void send_cleanup_vector(struct irq_cfg *cfg) 2179 { 2180 cpumask_var_t cleanup_mask; 2181 2182 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { 2183 unsigned int i; 2184 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) 2185 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); 2186 } else { 2187 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); 2188 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); 2189 free_cpumask_var(cleanup_mask); 2190 } 2191 cfg->move_in_progress = 0; 2192 } 2193 2194 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) 2195 { 2196 int apic, pin; 2197 struct irq_pin_list *entry; 2198 u8 vector = cfg->vector; 2199 2200 for_each_irq_pin(entry, cfg->irq_2_pin) { 2201 unsigned int reg; 2202 2203 apic = entry->apic; 2204 pin = entry->pin; 2205 /* 2206 * With interrupt-remapping, destination information comes 2207 * from interrupt-remapping table entry. 2208 */ 2209 if (!irq_remapped(cfg)) 2210 io_apic_write(apic, 0x11 + pin*2, dest); 2211 reg = io_apic_read(apic, 0x10 + pin*2); 2212 reg &= ~IO_APIC_REDIR_VECTOR_MASK; 2213 reg |= vector; 2214 io_apic_modify(apic, 0x10 + pin*2, reg); 2215 } 2216 } 2217 2218 /* 2219 * Either sets data->affinity to a valid value, and returns 2220 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and 2221 * leaves data->affinity untouched. 2222 */ 2223 int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2224 unsigned int *dest_id) 2225 { 2226 struct irq_cfg *cfg = data->chip_data; 2227 2228 if (!cpumask_intersects(mask, cpu_online_mask)) 2229 return -1; 2230 2231 if (assign_irq_vector(data->irq, data->chip_data, mask)) 2232 return -1; 2233 2234 cpumask_copy(data->affinity, mask); 2235 2236 *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain); 2237 return 0; 2238 } 2239 2240 static int 2241 ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2242 bool force) 2243 { 2244 unsigned int dest, irq = data->irq; 2245 unsigned long flags; 2246 int ret; 2247 2248 raw_spin_lock_irqsave(&ioapic_lock, flags); 2249 ret = __ioapic_set_affinity(data, mask, &dest); 2250 if (!ret) { 2251 /* Only the high 8 bits are valid. */ 2252 dest = SET_APIC_LOGICAL_ID(dest); 2253 __target_IO_APIC_irq(irq, dest, data->chip_data); 2254 } 2255 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2256 return ret; 2257 } 2258 2259 #ifdef CONFIG_INTR_REMAP 2260 2261 /* 2262 * Migrate the IO-APIC irq in the presence of intr-remapping. 2263 * 2264 * For both level and edge triggered, irq migration is a simple atomic 2265 * update(of vector and cpu destination) of IRTE and flush the hardware cache. 2266 * 2267 * For level triggered, we eliminate the io-apic RTE modification (with the 2268 * updated vector information), by using a virtual vector (io-apic pin number). 2269 * Real vector that is used for interrupting cpu will be coming from 2270 * the interrupt-remapping table entry. 2271 */ 2272 static int 2273 ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2274 bool force) 2275 { 2276 struct irq_cfg *cfg = data->chip_data; 2277 unsigned int dest, irq = data->irq; 2278 struct irte irte; 2279 2280 if (!cpumask_intersects(mask, cpu_online_mask)) 2281 return -EINVAL; 2282 2283 if (get_irte(irq, &irte)) 2284 return -EBUSY; 2285 2286 if (assign_irq_vector(irq, cfg, mask)) 2287 return -EBUSY; 2288 2289 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); 2290 2291 irte.vector = cfg->vector; 2292 irte.dest_id = IRTE_DEST(dest); 2293 2294 /* 2295 * Modified the IRTE and flushes the Interrupt entry cache. 2296 */ 2297 modify_irte(irq, &irte); 2298 2299 if (cfg->move_in_progress) 2300 send_cleanup_vector(cfg); 2301 2302 cpumask_copy(data->affinity, mask); 2303 return 0; 2304 } 2305 2306 #else 2307 static inline int 2308 ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2309 bool force) 2310 { 2311 return 0; 2312 } 2313 #endif 2314 2315 asmlinkage void smp_irq_move_cleanup_interrupt(void) 2316 { 2317 unsigned vector, me; 2318 2319 ack_APIC_irq(); 2320 exit_idle(); 2321 irq_enter(); 2322 2323 me = smp_processor_id(); 2324 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { 2325 unsigned int irq; 2326 unsigned int irr; 2327 struct irq_desc *desc; 2328 struct irq_cfg *cfg; 2329 irq = __this_cpu_read(vector_irq[vector]); 2330 2331 if (irq == -1) 2332 continue; 2333 2334 desc = irq_to_desc(irq); 2335 if (!desc) 2336 continue; 2337 2338 cfg = irq_cfg(irq); 2339 raw_spin_lock(&desc->lock); 2340 2341 /* 2342 * Check if the irq migration is in progress. If so, we 2343 * haven't received the cleanup request yet for this irq. 2344 */ 2345 if (cfg->move_in_progress) 2346 goto unlock; 2347 2348 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2349 goto unlock; 2350 2351 irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); 2352 /* 2353 * Check if the vector that needs to be cleanedup is 2354 * registered at the cpu's IRR. If so, then this is not 2355 * the best time to clean it up. Lets clean it up in the 2356 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR 2357 * to myself. 2358 */ 2359 if (irr & (1 << (vector % 32))) { 2360 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); 2361 goto unlock; 2362 } 2363 __this_cpu_write(vector_irq[vector], -1); 2364 unlock: 2365 raw_spin_unlock(&desc->lock); 2366 } 2367 2368 irq_exit(); 2369 } 2370 2371 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) 2372 { 2373 unsigned me; 2374 2375 if (likely(!cfg->move_in_progress)) 2376 return; 2377 2378 me = smp_processor_id(); 2379 2380 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2381 send_cleanup_vector(cfg); 2382 } 2383 2384 static void irq_complete_move(struct irq_cfg *cfg) 2385 { 2386 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); 2387 } 2388 2389 void irq_force_complete_move(int irq) 2390 { 2391 struct irq_cfg *cfg = irq_get_chip_data(irq); 2392 2393 if (!cfg) 2394 return; 2395 2396 __irq_complete_move(cfg, cfg->vector); 2397 } 2398 #else 2399 static inline void irq_complete_move(struct irq_cfg *cfg) { } 2400 #endif 2401 2402 static void ack_apic_edge(struct irq_data *data) 2403 { 2404 irq_complete_move(data->chip_data); 2405 irq_move_irq(data); 2406 ack_APIC_irq(); 2407 } 2408 2409 atomic_t irq_mis_count; 2410 2411 /* 2412 * IO-APIC versions below 0x20 don't support EOI register. 2413 * For the record, here is the information about various versions: 2414 * 0Xh 82489DX 2415 * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant 2416 * 2Xh I/O(x)APIC which is PCI 2.2 Compliant 2417 * 30h-FFh Reserved 2418 * 2419 * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic 2420 * version as 0x2. This is an error with documentation and these ICH chips 2421 * use io-apic's of version 0x20. 2422 * 2423 * For IO-APIC's with EOI register, we use that to do an explicit EOI. 2424 * Otherwise, we simulate the EOI message manually by changing the trigger 2425 * mode to edge and then back to level, with RTE being masked during this. 2426 */ 2427 static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 2428 { 2429 struct irq_pin_list *entry; 2430 unsigned long flags; 2431 2432 raw_spin_lock_irqsave(&ioapic_lock, flags); 2433 for_each_irq_pin(entry, cfg->irq_2_pin) { 2434 if (mpc_ioapic_ver(entry->apic) >= 0x20) { 2435 /* 2436 * Intr-remapping uses pin number as the virtual vector 2437 * in the RTE. Actual vector is programmed in 2438 * intr-remapping table entry. Hence for the io-apic 2439 * EOI we use the pin number. 2440 */ 2441 if (irq_remapped(cfg)) 2442 io_apic_eoi(entry->apic, entry->pin); 2443 else 2444 io_apic_eoi(entry->apic, cfg->vector); 2445 } else { 2446 __mask_and_edge_IO_APIC_irq(entry); 2447 __unmask_and_level_IO_APIC_irq(entry); 2448 } 2449 } 2450 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2451 } 2452 2453 static void ack_apic_level(struct irq_data *data) 2454 { 2455 struct irq_cfg *cfg = data->chip_data; 2456 int i, do_unmask_irq = 0, irq = data->irq; 2457 unsigned long v; 2458 2459 irq_complete_move(cfg); 2460 #ifdef CONFIG_GENERIC_PENDING_IRQ 2461 /* If we are moving the irq we need to mask it */ 2462 if (unlikely(irqd_is_setaffinity_pending(data))) { 2463 do_unmask_irq = 1; 2464 mask_ioapic(cfg); 2465 } 2466 #endif 2467 2468 /* 2469 * It appears there is an erratum which affects at least version 0x11 2470 * of I/O APIC (that's the 82093AA and cores integrated into various 2471 * chipsets). Under certain conditions a level-triggered interrupt is 2472 * erroneously delivered as edge-triggered one but the respective IRR 2473 * bit gets set nevertheless. As a result the I/O unit expects an EOI 2474 * message but it will never arrive and further interrupts are blocked 2475 * from the source. The exact reason is so far unknown, but the 2476 * phenomenon was observed when two consecutive interrupt requests 2477 * from a given source get delivered to the same CPU and the source is 2478 * temporarily disabled in between. 2479 * 2480 * A workaround is to simulate an EOI message manually. We achieve it 2481 * by setting the trigger mode to edge and then to level when the edge 2482 * trigger mode gets detected in the TMR of a local APIC for a 2483 * level-triggered interrupt. We mask the source for the time of the 2484 * operation to prevent an edge-triggered interrupt escaping meanwhile. 2485 * The idea is from Manfred Spraul. --macro 2486 * 2487 * Also in the case when cpu goes offline, fixup_irqs() will forward 2488 * any unhandled interrupt on the offlined cpu to the new cpu 2489 * destination that is handling the corresponding interrupt. This 2490 * interrupt forwarding is done via IPI's. Hence, in this case also 2491 * level-triggered io-apic interrupt will be seen as an edge 2492 * interrupt in the IRR. And we can't rely on the cpu's EOI 2493 * to be broadcasted to the IO-APIC's which will clear the remoteIRR 2494 * corresponding to the level-triggered interrupt. Hence on IO-APIC's 2495 * supporting EOI register, we do an explicit EOI to clear the 2496 * remote IRR and on IO-APIC's which don't have an EOI register, 2497 * we use the above logic (mask+edge followed by unmask+level) from 2498 * Manfred Spraul to clear the remote IRR. 2499 */ 2500 i = cfg->vector; 2501 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); 2502 2503 /* 2504 * We must acknowledge the irq before we move it or the acknowledge will 2505 * not propagate properly. 2506 */ 2507 ack_APIC_irq(); 2508 2509 /* 2510 * Tail end of clearing remote IRR bit (either by delivering the EOI 2511 * message via io-apic EOI register write or simulating it using 2512 * mask+edge followed by unnask+level logic) manually when the 2513 * level triggered interrupt is seen as the edge triggered interrupt 2514 * at the cpu. 2515 */ 2516 if (!(v & (1 << (i & 0x1f)))) { 2517 atomic_inc(&irq_mis_count); 2518 2519 eoi_ioapic_irq(irq, cfg); 2520 } 2521 2522 /* Now we can move and renable the irq */ 2523 if (unlikely(do_unmask_irq)) { 2524 /* Only migrate the irq if the ack has been received. 2525 * 2526 * On rare occasions the broadcast level triggered ack gets 2527 * delayed going to ioapics, and if we reprogram the 2528 * vector while Remote IRR is still set the irq will never 2529 * fire again. 2530 * 2531 * To prevent this scenario we read the Remote IRR bit 2532 * of the ioapic. This has two effects. 2533 * - On any sane system the read of the ioapic will 2534 * flush writes (and acks) going to the ioapic from 2535 * this cpu. 2536 * - We get to see if the ACK has actually been delivered. 2537 * 2538 * Based on failed experiments of reprogramming the 2539 * ioapic entry from outside of irq context starting 2540 * with masking the ioapic entry and then polling until 2541 * Remote IRR was clear before reprogramming the 2542 * ioapic I don't trust the Remote IRR bit to be 2543 * completey accurate. 2544 * 2545 * However there appears to be no other way to plug 2546 * this race, so if the Remote IRR bit is not 2547 * accurate and is causing problems then it is a hardware bug 2548 * and you can go talk to the chipset vendor about it. 2549 */ 2550 if (!io_apic_level_ack_pending(cfg)) 2551 irq_move_masked_irq(data); 2552 unmask_ioapic(cfg); 2553 } 2554 } 2555 2556 #ifdef CONFIG_INTR_REMAP 2557 static void ir_ack_apic_edge(struct irq_data *data) 2558 { 2559 ack_APIC_irq(); 2560 } 2561 2562 static void ir_ack_apic_level(struct irq_data *data) 2563 { 2564 ack_APIC_irq(); 2565 eoi_ioapic_irq(data->irq, data->chip_data); 2566 } 2567 #endif /* CONFIG_INTR_REMAP */ 2568 2569 static struct irq_chip ioapic_chip __read_mostly = { 2570 .name = "IO-APIC", 2571 .irq_startup = startup_ioapic_irq, 2572 .irq_mask = mask_ioapic_irq, 2573 .irq_unmask = unmask_ioapic_irq, 2574 .irq_ack = ack_apic_edge, 2575 .irq_eoi = ack_apic_level, 2576 #ifdef CONFIG_SMP 2577 .irq_set_affinity = ioapic_set_affinity, 2578 #endif 2579 .irq_retrigger = ioapic_retrigger_irq, 2580 }; 2581 2582 static struct irq_chip ir_ioapic_chip __read_mostly = { 2583 .name = "IR-IO-APIC", 2584 .irq_startup = startup_ioapic_irq, 2585 .irq_mask = mask_ioapic_irq, 2586 .irq_unmask = unmask_ioapic_irq, 2587 #ifdef CONFIG_INTR_REMAP 2588 .irq_ack = ir_ack_apic_edge, 2589 .irq_eoi = ir_ack_apic_level, 2590 #ifdef CONFIG_SMP 2591 .irq_set_affinity = ir_ioapic_set_affinity, 2592 #endif 2593 #endif 2594 .irq_retrigger = ioapic_retrigger_irq, 2595 }; 2596 2597 static inline void init_IO_APIC_traps(void) 2598 { 2599 struct irq_cfg *cfg; 2600 unsigned int irq; 2601 2602 /* 2603 * NOTE! The local APIC isn't very good at handling 2604 * multiple interrupts at the same interrupt level. 2605 * As the interrupt level is determined by taking the 2606 * vector number and shifting that right by 4, we 2607 * want to spread these out a bit so that they don't 2608 * all fall in the same interrupt level. 2609 * 2610 * Also, we've got to be careful not to trash gate 2611 * 0x80, because int 0x80 is hm, kind of importantish. ;) 2612 */ 2613 for_each_active_irq(irq) { 2614 cfg = irq_get_chip_data(irq); 2615 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { 2616 /* 2617 * Hmm.. We don't have an entry for this, 2618 * so default to an old-fashioned 8259 2619 * interrupt if we can.. 2620 */ 2621 if (irq < legacy_pic->nr_legacy_irqs) 2622 legacy_pic->make_irq(irq); 2623 else 2624 /* Strange. Oh, well.. */ 2625 irq_set_chip(irq, &no_irq_chip); 2626 } 2627 } 2628 } 2629 2630 /* 2631 * The local APIC irq-chip implementation: 2632 */ 2633 2634 static void mask_lapic_irq(struct irq_data *data) 2635 { 2636 unsigned long v; 2637 2638 v = apic_read(APIC_LVT0); 2639 apic_write(APIC_LVT0, v | APIC_LVT_MASKED); 2640 } 2641 2642 static void unmask_lapic_irq(struct irq_data *data) 2643 { 2644 unsigned long v; 2645 2646 v = apic_read(APIC_LVT0); 2647 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); 2648 } 2649 2650 static void ack_lapic_irq(struct irq_data *data) 2651 { 2652 ack_APIC_irq(); 2653 } 2654 2655 static struct irq_chip lapic_chip __read_mostly = { 2656 .name = "local-APIC", 2657 .irq_mask = mask_lapic_irq, 2658 .irq_unmask = unmask_lapic_irq, 2659 .irq_ack = ack_lapic_irq, 2660 }; 2661 2662 static void lapic_register_intr(int irq) 2663 { 2664 irq_clear_status_flags(irq, IRQ_LEVEL); 2665 irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, 2666 "edge"); 2667 } 2668 2669 /* 2670 * This looks a bit hackish but it's about the only one way of sending 2671 * a few INTA cycles to 8259As and any associated glue logic. ICR does 2672 * not support the ExtINT mode, unfortunately. We need to send these 2673 * cycles as some i82489DX-based boards have glue logic that keeps the 2674 * 8259A interrupt line asserted until INTA. --macro 2675 */ 2676 static inline void __init unlock_ExtINT_logic(void) 2677 { 2678 int apic, pin, i; 2679 struct IO_APIC_route_entry entry0, entry1; 2680 unsigned char save_control, save_freq_select; 2681 2682 pin = find_isa_irq_pin(8, mp_INT); 2683 if (pin == -1) { 2684 WARN_ON_ONCE(1); 2685 return; 2686 } 2687 apic = find_isa_irq_apic(8, mp_INT); 2688 if (apic == -1) { 2689 WARN_ON_ONCE(1); 2690 return; 2691 } 2692 2693 entry0 = ioapic_read_entry(apic, pin); 2694 clear_IO_APIC_pin(apic, pin); 2695 2696 memset(&entry1, 0, sizeof(entry1)); 2697 2698 entry1.dest_mode = 0; /* physical delivery */ 2699 entry1.mask = 0; /* unmask IRQ now */ 2700 entry1.dest = hard_smp_processor_id(); 2701 entry1.delivery_mode = dest_ExtINT; 2702 entry1.polarity = entry0.polarity; 2703 entry1.trigger = 0; 2704 entry1.vector = 0; 2705 2706 ioapic_write_entry(apic, pin, entry1); 2707 2708 save_control = CMOS_READ(RTC_CONTROL); 2709 save_freq_select = CMOS_READ(RTC_FREQ_SELECT); 2710 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6, 2711 RTC_FREQ_SELECT); 2712 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL); 2713 2714 i = 100; 2715 while (i-- > 0) { 2716 mdelay(10); 2717 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF) 2718 i -= 10; 2719 } 2720 2721 CMOS_WRITE(save_control, RTC_CONTROL); 2722 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); 2723 clear_IO_APIC_pin(apic, pin); 2724 2725 ioapic_write_entry(apic, pin, entry0); 2726 } 2727 2728 static int disable_timer_pin_1 __initdata; 2729 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */ 2730 static int __init disable_timer_pin_setup(char *arg) 2731 { 2732 disable_timer_pin_1 = 1; 2733 return 0; 2734 } 2735 early_param("disable_timer_pin_1", disable_timer_pin_setup); 2736 2737 int timer_through_8259 __initdata; 2738 2739 /* 2740 * This code may look a bit paranoid, but it's supposed to cooperate with 2741 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ 2742 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast 2743 * fanatically on his truly buggy board. 2744 * 2745 * FIXME: really need to revamp this for all platforms. 2746 */ 2747 static inline void __init check_timer(void) 2748 { 2749 struct irq_cfg *cfg = irq_get_chip_data(0); 2750 int node = cpu_to_node(0); 2751 int apic1, pin1, apic2, pin2; 2752 unsigned long flags; 2753 int no_pin1 = 0; 2754 2755 local_irq_save(flags); 2756 2757 /* 2758 * get/set the timer IRQ vector: 2759 */ 2760 legacy_pic->mask(0); 2761 assign_irq_vector(0, cfg, apic->target_cpus()); 2762 2763 /* 2764 * As IRQ0 is to be enabled in the 8259A, the virtual 2765 * wire has to be disabled in the local APIC. Also 2766 * timer interrupts need to be acknowledged manually in 2767 * the 8259A for the i82489DX when using the NMI 2768 * watchdog as that APIC treats NMIs as level-triggered. 2769 * The AEOI mode will finish them in the 8259A 2770 * automatically. 2771 */ 2772 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); 2773 legacy_pic->init(1); 2774 2775 pin1 = find_isa_irq_pin(0, mp_INT); 2776 apic1 = find_isa_irq_apic(0, mp_INT); 2777 pin2 = ioapic_i8259.pin; 2778 apic2 = ioapic_i8259.apic; 2779 2780 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X " 2781 "apic1=%d pin1=%d apic2=%d pin2=%d\n", 2782 cfg->vector, apic1, pin1, apic2, pin2); 2783 2784 /* 2785 * Some BIOS writers are clueless and report the ExtINTA 2786 * I/O APIC input from the cascaded 8259A as the timer 2787 * interrupt input. So just in case, if only one pin 2788 * was found above, try it both directly and through the 2789 * 8259A. 2790 */ 2791 if (pin1 == -1) { 2792 if (intr_remapping_enabled) 2793 panic("BIOS bug: timer not connected to IO-APIC"); 2794 pin1 = pin2; 2795 apic1 = apic2; 2796 no_pin1 = 1; 2797 } else if (pin2 == -1) { 2798 pin2 = pin1; 2799 apic2 = apic1; 2800 } 2801 2802 if (pin1 != -1) { 2803 /* 2804 * Ok, does IRQ0 through the IOAPIC work? 2805 */ 2806 if (no_pin1) { 2807 add_pin_to_irq_node(cfg, node, apic1, pin1); 2808 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); 2809 } else { 2810 /* for edge trigger, setup_ioapic_irq already 2811 * leave it unmasked. 2812 * so only need to unmask if it is level-trigger 2813 * do we really have level trigger timer? 2814 */ 2815 int idx; 2816 idx = find_irq_entry(apic1, pin1, mp_INT); 2817 if (idx != -1 && irq_trigger(idx)) 2818 unmask_ioapic(cfg); 2819 } 2820 if (timer_irq_works()) { 2821 if (disable_timer_pin_1 > 0) 2822 clear_IO_APIC_pin(0, pin1); 2823 goto out; 2824 } 2825 if (intr_remapping_enabled) 2826 panic("timer doesn't work through Interrupt-remapped IO-APIC"); 2827 local_irq_disable(); 2828 clear_IO_APIC_pin(apic1, pin1); 2829 if (!no_pin1) 2830 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " 2831 "8254 timer not connected to IO-APIC\n"); 2832 2833 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer " 2834 "(IRQ0) through the 8259A ...\n"); 2835 apic_printk(APIC_QUIET, KERN_INFO 2836 "..... (found apic %d pin %d) ...\n", apic2, pin2); 2837 /* 2838 * legacy devices should be connected to IO APIC #0 2839 */ 2840 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); 2841 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); 2842 legacy_pic->unmask(0); 2843 if (timer_irq_works()) { 2844 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2845 timer_through_8259 = 1; 2846 goto out; 2847 } 2848 /* 2849 * Cleanup, just in case ... 2850 */ 2851 local_irq_disable(); 2852 legacy_pic->mask(0); 2853 clear_IO_APIC_pin(apic2, pin2); 2854 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); 2855 } 2856 2857 apic_printk(APIC_QUIET, KERN_INFO 2858 "...trying to set up timer as Virtual Wire IRQ...\n"); 2859 2860 lapic_register_intr(0); 2861 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ 2862 legacy_pic->unmask(0); 2863 2864 if (timer_irq_works()) { 2865 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2866 goto out; 2867 } 2868 local_irq_disable(); 2869 legacy_pic->mask(0); 2870 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); 2871 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); 2872 2873 apic_printk(APIC_QUIET, KERN_INFO 2874 "...trying to set up timer as ExtINT IRQ...\n"); 2875 2876 legacy_pic->init(0); 2877 legacy_pic->make_irq(0); 2878 apic_write(APIC_LVT0, APIC_DM_EXTINT); 2879 2880 unlock_ExtINT_logic(); 2881 2882 if (timer_irq_works()) { 2883 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2884 goto out; 2885 } 2886 local_irq_disable(); 2887 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); 2888 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " 2889 "report. Then try booting with the 'noapic' option.\n"); 2890 out: 2891 local_irq_restore(flags); 2892 } 2893 2894 /* 2895 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available 2896 * to devices. However there may be an I/O APIC pin available for 2897 * this interrupt regardless. The pin may be left unconnected, but 2898 * typically it will be reused as an ExtINT cascade interrupt for 2899 * the master 8259A. In the MPS case such a pin will normally be 2900 * reported as an ExtINT interrupt in the MP table. With ACPI 2901 * there is no provision for ExtINT interrupts, and in the absence 2902 * of an override it would be treated as an ordinary ISA I/O APIC 2903 * interrupt, that is edge-triggered and unmasked by default. We 2904 * used to do this, but it caused problems on some systems because 2905 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using 2906 * the same ExtINT cascade interrupt to drive the local APIC of the 2907 * bootstrap processor. Therefore we refrain from routing IRQ2 to 2908 * the I/O APIC in all cases now. No actual device should request 2909 * it anyway. --macro 2910 */ 2911 #define PIC_IRQS (1UL << PIC_CASCADE_IR) 2912 2913 void __init setup_IO_APIC(void) 2914 { 2915 2916 /* 2917 * calling enable_IO_APIC() is moved to setup_local_APIC for BP 2918 */ 2919 io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL; 2920 2921 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); 2922 /* 2923 * Set up IO-APIC IRQ routing. 2924 */ 2925 x86_init.mpparse.setup_ioapic_ids(); 2926 2927 sync_Arb_IDs(); 2928 setup_IO_APIC_irqs(); 2929 init_IO_APIC_traps(); 2930 if (legacy_pic->nr_legacy_irqs) 2931 check_timer(); 2932 } 2933 2934 /* 2935 * Called after all the initialization is done. If we didn't find any 2936 * APIC bugs then we can allow the modify fast path 2937 */ 2938 2939 static int __init io_apic_bug_finalize(void) 2940 { 2941 if (sis_apic_bug == -1) 2942 sis_apic_bug = 0; 2943 return 0; 2944 } 2945 2946 late_initcall(io_apic_bug_finalize); 2947 2948 static void resume_ioapic_id(int ioapic_id) 2949 { 2950 unsigned long flags; 2951 union IO_APIC_reg_00 reg_00; 2952 2953 2954 raw_spin_lock_irqsave(&ioapic_lock, flags); 2955 reg_00.raw = io_apic_read(ioapic_id, 0); 2956 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_id)) { 2957 reg_00.bits.ID = mpc_ioapic_id(ioapic_id); 2958 io_apic_write(ioapic_id, 0, reg_00.raw); 2959 } 2960 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2961 } 2962 2963 static void ioapic_resume(void) 2964 { 2965 int ioapic_id; 2966 2967 for (ioapic_id = nr_ioapics - 1; ioapic_id >= 0; ioapic_id--) 2968 resume_ioapic_id(ioapic_id); 2969 2970 restore_ioapic_entries(); 2971 } 2972 2973 static struct syscore_ops ioapic_syscore_ops = { 2974 .suspend = save_ioapic_entries, 2975 .resume = ioapic_resume, 2976 }; 2977 2978 static int __init ioapic_init_ops(void) 2979 { 2980 register_syscore_ops(&ioapic_syscore_ops); 2981 2982 return 0; 2983 } 2984 2985 device_initcall(ioapic_init_ops); 2986 2987 /* 2988 * Dynamic irq allocate and deallocation 2989 */ 2990 unsigned int create_irq_nr(unsigned int from, int node) 2991 { 2992 struct irq_cfg *cfg; 2993 unsigned long flags; 2994 unsigned int ret = 0; 2995 int irq; 2996 2997 if (from < nr_irqs_gsi) 2998 from = nr_irqs_gsi; 2999 3000 irq = alloc_irq_from(from, node); 3001 if (irq < 0) 3002 return 0; 3003 cfg = alloc_irq_cfg(irq, node); 3004 if (!cfg) { 3005 free_irq_at(irq, NULL); 3006 return 0; 3007 } 3008 3009 raw_spin_lock_irqsave(&vector_lock, flags); 3010 if (!__assign_irq_vector(irq, cfg, apic->target_cpus())) 3011 ret = irq; 3012 raw_spin_unlock_irqrestore(&vector_lock, flags); 3013 3014 if (ret) { 3015 irq_set_chip_data(irq, cfg); 3016 irq_clear_status_flags(irq, IRQ_NOREQUEST); 3017 } else { 3018 free_irq_at(irq, cfg); 3019 } 3020 return ret; 3021 } 3022 3023 int create_irq(void) 3024 { 3025 int node = cpu_to_node(0); 3026 unsigned int irq_want; 3027 int irq; 3028 3029 irq_want = nr_irqs_gsi; 3030 irq = create_irq_nr(irq_want, node); 3031 3032 if (irq == 0) 3033 irq = -1; 3034 3035 return irq; 3036 } 3037 3038 void destroy_irq(unsigned int irq) 3039 { 3040 struct irq_cfg *cfg = irq_get_chip_data(irq); 3041 unsigned long flags; 3042 3043 irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); 3044 3045 if (irq_remapped(cfg)) 3046 free_irte(irq); 3047 raw_spin_lock_irqsave(&vector_lock, flags); 3048 __clear_irq_vector(irq, cfg); 3049 raw_spin_unlock_irqrestore(&vector_lock, flags); 3050 free_irq_at(irq, cfg); 3051 } 3052 3053 /* 3054 * MSI message composition 3055 */ 3056 #ifdef CONFIG_PCI_MSI 3057 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, 3058 struct msi_msg *msg, u8 hpet_id) 3059 { 3060 struct irq_cfg *cfg; 3061 int err; 3062 unsigned dest; 3063 3064 if (disable_apic) 3065 return -ENXIO; 3066 3067 cfg = irq_cfg(irq); 3068 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3069 if (err) 3070 return err; 3071 3072 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 3073 3074 if (irq_remapped(cfg)) { 3075 struct irte irte; 3076 int ir_index; 3077 u16 sub_handle; 3078 3079 ir_index = map_irq_to_irte_handle(irq, &sub_handle); 3080 BUG_ON(ir_index == -1); 3081 3082 prepare_irte(&irte, cfg->vector, dest); 3083 3084 /* Set source-id of interrupt request */ 3085 if (pdev) 3086 set_msi_sid(&irte, pdev); 3087 else 3088 set_hpet_sid(&irte, hpet_id); 3089 3090 modify_irte(irq, &irte); 3091 3092 msg->address_hi = MSI_ADDR_BASE_HI; 3093 msg->data = sub_handle; 3094 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | 3095 MSI_ADDR_IR_SHV | 3096 MSI_ADDR_IR_INDEX1(ir_index) | 3097 MSI_ADDR_IR_INDEX2(ir_index); 3098 } else { 3099 if (x2apic_enabled()) 3100 msg->address_hi = MSI_ADDR_BASE_HI | 3101 MSI_ADDR_EXT_DEST_ID(dest); 3102 else 3103 msg->address_hi = MSI_ADDR_BASE_HI; 3104 3105 msg->address_lo = 3106 MSI_ADDR_BASE_LO | 3107 ((apic->irq_dest_mode == 0) ? 3108 MSI_ADDR_DEST_MODE_PHYSICAL: 3109 MSI_ADDR_DEST_MODE_LOGICAL) | 3110 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3111 MSI_ADDR_REDIRECTION_CPU: 3112 MSI_ADDR_REDIRECTION_LOWPRI) | 3113 MSI_ADDR_DEST_ID(dest); 3114 3115 msg->data = 3116 MSI_DATA_TRIGGER_EDGE | 3117 MSI_DATA_LEVEL_ASSERT | 3118 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3119 MSI_DATA_DELIVERY_FIXED: 3120 MSI_DATA_DELIVERY_LOWPRI) | 3121 MSI_DATA_VECTOR(cfg->vector); 3122 } 3123 return err; 3124 } 3125 3126 #ifdef CONFIG_SMP 3127 static int 3128 msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3129 { 3130 struct irq_cfg *cfg = data->chip_data; 3131 struct msi_msg msg; 3132 unsigned int dest; 3133 3134 if (__ioapic_set_affinity(data, mask, &dest)) 3135 return -1; 3136 3137 __get_cached_msi_msg(data->msi_desc, &msg); 3138 3139 msg.data &= ~MSI_DATA_VECTOR_MASK; 3140 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3141 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3142 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3143 3144 __write_msi_msg(data->msi_desc, &msg); 3145 3146 return 0; 3147 } 3148 #ifdef CONFIG_INTR_REMAP 3149 /* 3150 * Migrate the MSI irq to another cpumask. This migration is 3151 * done in the process context using interrupt-remapping hardware. 3152 */ 3153 static int 3154 ir_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, 3155 bool force) 3156 { 3157 struct irq_cfg *cfg = data->chip_data; 3158 unsigned int dest, irq = data->irq; 3159 struct irte irte; 3160 3161 if (get_irte(irq, &irte)) 3162 return -1; 3163 3164 if (__ioapic_set_affinity(data, mask, &dest)) 3165 return -1; 3166 3167 irte.vector = cfg->vector; 3168 irte.dest_id = IRTE_DEST(dest); 3169 3170 /* 3171 * atomically update the IRTE with the new destination and vector. 3172 */ 3173 modify_irte(irq, &irte); 3174 3175 /* 3176 * After this point, all the interrupts will start arriving 3177 * at the new destination. So, time to cleanup the previous 3178 * vector allocation. 3179 */ 3180 if (cfg->move_in_progress) 3181 send_cleanup_vector(cfg); 3182 3183 return 0; 3184 } 3185 3186 #endif 3187 #endif /* CONFIG_SMP */ 3188 3189 /* 3190 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, 3191 * which implement the MSI or MSI-X Capability Structure. 3192 */ 3193 static struct irq_chip msi_chip = { 3194 .name = "PCI-MSI", 3195 .irq_unmask = unmask_msi_irq, 3196 .irq_mask = mask_msi_irq, 3197 .irq_ack = ack_apic_edge, 3198 #ifdef CONFIG_SMP 3199 .irq_set_affinity = msi_set_affinity, 3200 #endif 3201 .irq_retrigger = ioapic_retrigger_irq, 3202 }; 3203 3204 static struct irq_chip msi_ir_chip = { 3205 .name = "IR-PCI-MSI", 3206 .irq_unmask = unmask_msi_irq, 3207 .irq_mask = mask_msi_irq, 3208 #ifdef CONFIG_INTR_REMAP 3209 .irq_ack = ir_ack_apic_edge, 3210 #ifdef CONFIG_SMP 3211 .irq_set_affinity = ir_msi_set_affinity, 3212 #endif 3213 #endif 3214 .irq_retrigger = ioapic_retrigger_irq, 3215 }; 3216 3217 /* 3218 * Map the PCI dev to the corresponding remapping hardware unit 3219 * and allocate 'nvec' consecutive interrupt-remapping table entries 3220 * in it. 3221 */ 3222 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec) 3223 { 3224 struct intel_iommu *iommu; 3225 int index; 3226 3227 iommu = map_dev_to_ir(dev); 3228 if (!iommu) { 3229 printk(KERN_ERR 3230 "Unable to map PCI %s to iommu\n", pci_name(dev)); 3231 return -ENOENT; 3232 } 3233 3234 index = alloc_irte(iommu, irq, nvec); 3235 if (index < 0) { 3236 printk(KERN_ERR 3237 "Unable to allocate %d IRTE for PCI %s\n", nvec, 3238 pci_name(dev)); 3239 return -ENOSPC; 3240 } 3241 return index; 3242 } 3243 3244 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) 3245 { 3246 struct irq_chip *chip = &msi_chip; 3247 struct msi_msg msg; 3248 int ret; 3249 3250 ret = msi_compose_msg(dev, irq, &msg, -1); 3251 if (ret < 0) 3252 return ret; 3253 3254 irq_set_msi_desc(irq, msidesc); 3255 write_msi_msg(irq, &msg); 3256 3257 if (irq_remapped(irq_get_chip_data(irq))) { 3258 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3259 chip = &msi_ir_chip; 3260 } 3261 3262 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); 3263 3264 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); 3265 3266 return 0; 3267 } 3268 3269 int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 3270 { 3271 int node, ret, sub_handle, index = 0; 3272 unsigned int irq, irq_want; 3273 struct msi_desc *msidesc; 3274 struct intel_iommu *iommu = NULL; 3275 3276 /* x86 doesn't support multiple MSI yet */ 3277 if (type == PCI_CAP_ID_MSI && nvec > 1) 3278 return 1; 3279 3280 node = dev_to_node(&dev->dev); 3281 irq_want = nr_irqs_gsi; 3282 sub_handle = 0; 3283 list_for_each_entry(msidesc, &dev->msi_list, list) { 3284 irq = create_irq_nr(irq_want, node); 3285 if (irq == 0) 3286 return -1; 3287 irq_want = irq + 1; 3288 if (!intr_remapping_enabled) 3289 goto no_ir; 3290 3291 if (!sub_handle) { 3292 /* 3293 * allocate the consecutive block of IRTE's 3294 * for 'nvec' 3295 */ 3296 index = msi_alloc_irte(dev, irq, nvec); 3297 if (index < 0) { 3298 ret = index; 3299 goto error; 3300 } 3301 } else { 3302 iommu = map_dev_to_ir(dev); 3303 if (!iommu) { 3304 ret = -ENOENT; 3305 goto error; 3306 } 3307 /* 3308 * setup the mapping between the irq and the IRTE 3309 * base index, the sub_handle pointing to the 3310 * appropriate interrupt remap table entry. 3311 */ 3312 set_irte_irq(irq, iommu, index, sub_handle); 3313 } 3314 no_ir: 3315 ret = setup_msi_irq(dev, msidesc, irq); 3316 if (ret < 0) 3317 goto error; 3318 sub_handle++; 3319 } 3320 return 0; 3321 3322 error: 3323 destroy_irq(irq); 3324 return ret; 3325 } 3326 3327 void native_teardown_msi_irq(unsigned int irq) 3328 { 3329 destroy_irq(irq); 3330 } 3331 3332 #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) 3333 #ifdef CONFIG_SMP 3334 static int 3335 dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, 3336 bool force) 3337 { 3338 struct irq_cfg *cfg = data->chip_data; 3339 unsigned int dest, irq = data->irq; 3340 struct msi_msg msg; 3341 3342 if (__ioapic_set_affinity(data, mask, &dest)) 3343 return -1; 3344 3345 dmar_msi_read(irq, &msg); 3346 3347 msg.data &= ~MSI_DATA_VECTOR_MASK; 3348 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3349 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3350 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3351 msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest); 3352 3353 dmar_msi_write(irq, &msg); 3354 3355 return 0; 3356 } 3357 3358 #endif /* CONFIG_SMP */ 3359 3360 static struct irq_chip dmar_msi_type = { 3361 .name = "DMAR_MSI", 3362 .irq_unmask = dmar_msi_unmask, 3363 .irq_mask = dmar_msi_mask, 3364 .irq_ack = ack_apic_edge, 3365 #ifdef CONFIG_SMP 3366 .irq_set_affinity = dmar_msi_set_affinity, 3367 #endif 3368 .irq_retrigger = ioapic_retrigger_irq, 3369 }; 3370 3371 int arch_setup_dmar_msi(unsigned int irq) 3372 { 3373 int ret; 3374 struct msi_msg msg; 3375 3376 ret = msi_compose_msg(NULL, irq, &msg, -1); 3377 if (ret < 0) 3378 return ret; 3379 dmar_msi_write(irq, &msg); 3380 irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, 3381 "edge"); 3382 return 0; 3383 } 3384 #endif 3385 3386 #ifdef CONFIG_HPET_TIMER 3387 3388 #ifdef CONFIG_SMP 3389 static int hpet_msi_set_affinity(struct irq_data *data, 3390 const struct cpumask *mask, bool force) 3391 { 3392 struct irq_cfg *cfg = data->chip_data; 3393 struct msi_msg msg; 3394 unsigned int dest; 3395 3396 if (__ioapic_set_affinity(data, mask, &dest)) 3397 return -1; 3398 3399 hpet_msi_read(data->handler_data, &msg); 3400 3401 msg.data &= ~MSI_DATA_VECTOR_MASK; 3402 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3403 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3404 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3405 3406 hpet_msi_write(data->handler_data, &msg); 3407 3408 return 0; 3409 } 3410 3411 #endif /* CONFIG_SMP */ 3412 3413 static struct irq_chip ir_hpet_msi_type = { 3414 .name = "IR-HPET_MSI", 3415 .irq_unmask = hpet_msi_unmask, 3416 .irq_mask = hpet_msi_mask, 3417 #ifdef CONFIG_INTR_REMAP 3418 .irq_ack = ir_ack_apic_edge, 3419 #ifdef CONFIG_SMP 3420 .irq_set_affinity = ir_msi_set_affinity, 3421 #endif 3422 #endif 3423 .irq_retrigger = ioapic_retrigger_irq, 3424 }; 3425 3426 static struct irq_chip hpet_msi_type = { 3427 .name = "HPET_MSI", 3428 .irq_unmask = hpet_msi_unmask, 3429 .irq_mask = hpet_msi_mask, 3430 .irq_ack = ack_apic_edge, 3431 #ifdef CONFIG_SMP 3432 .irq_set_affinity = hpet_msi_set_affinity, 3433 #endif 3434 .irq_retrigger = ioapic_retrigger_irq, 3435 }; 3436 3437 int arch_setup_hpet_msi(unsigned int irq, unsigned int id) 3438 { 3439 struct irq_chip *chip = &hpet_msi_type; 3440 struct msi_msg msg; 3441 int ret; 3442 3443 if (intr_remapping_enabled) { 3444 struct intel_iommu *iommu = map_hpet_to_ir(id); 3445 int index; 3446 3447 if (!iommu) 3448 return -1; 3449 3450 index = alloc_irte(iommu, irq, 1); 3451 if (index < 0) 3452 return -1; 3453 } 3454 3455 ret = msi_compose_msg(NULL, irq, &msg, id); 3456 if (ret < 0) 3457 return ret; 3458 3459 hpet_msi_write(irq_get_handler_data(irq), &msg); 3460 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3461 if (irq_remapped(irq_get_chip_data(irq))) 3462 chip = &ir_hpet_msi_type; 3463 3464 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); 3465 return 0; 3466 } 3467 #endif 3468 3469 #endif /* CONFIG_PCI_MSI */ 3470 /* 3471 * Hypertransport interrupt support 3472 */ 3473 #ifdef CONFIG_HT_IRQ 3474 3475 #ifdef CONFIG_SMP 3476 3477 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) 3478 { 3479 struct ht_irq_msg msg; 3480 fetch_ht_irq_msg(irq, &msg); 3481 3482 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK); 3483 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); 3484 3485 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest); 3486 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest); 3487 3488 write_ht_irq_msg(irq, &msg); 3489 } 3490 3491 static int 3492 ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3493 { 3494 struct irq_cfg *cfg = data->chip_data; 3495 unsigned int dest; 3496 3497 if (__ioapic_set_affinity(data, mask, &dest)) 3498 return -1; 3499 3500 target_ht_irq(data->irq, dest, cfg->vector); 3501 return 0; 3502 } 3503 3504 #endif 3505 3506 static struct irq_chip ht_irq_chip = { 3507 .name = "PCI-HT", 3508 .irq_mask = mask_ht_irq, 3509 .irq_unmask = unmask_ht_irq, 3510 .irq_ack = ack_apic_edge, 3511 #ifdef CONFIG_SMP 3512 .irq_set_affinity = ht_set_affinity, 3513 #endif 3514 .irq_retrigger = ioapic_retrigger_irq, 3515 }; 3516 3517 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) 3518 { 3519 struct irq_cfg *cfg; 3520 int err; 3521 3522 if (disable_apic) 3523 return -ENXIO; 3524 3525 cfg = irq_cfg(irq); 3526 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3527 if (!err) { 3528 struct ht_irq_msg msg; 3529 unsigned dest; 3530 3531 dest = apic->cpu_mask_to_apicid_and(cfg->domain, 3532 apic->target_cpus()); 3533 3534 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3535 3536 msg.address_lo = 3537 HT_IRQ_LOW_BASE | 3538 HT_IRQ_LOW_DEST_ID(dest) | 3539 HT_IRQ_LOW_VECTOR(cfg->vector) | 3540 ((apic->irq_dest_mode == 0) ? 3541 HT_IRQ_LOW_DM_PHYSICAL : 3542 HT_IRQ_LOW_DM_LOGICAL) | 3543 HT_IRQ_LOW_RQEOI_EDGE | 3544 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3545 HT_IRQ_LOW_MT_FIXED : 3546 HT_IRQ_LOW_MT_ARBITRATED) | 3547 HT_IRQ_LOW_IRQ_MASKED; 3548 3549 write_ht_irq_msg(irq, &msg); 3550 3551 irq_set_chip_and_handler_name(irq, &ht_irq_chip, 3552 handle_edge_irq, "edge"); 3553 3554 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); 3555 } 3556 return err; 3557 } 3558 #endif /* CONFIG_HT_IRQ */ 3559 3560 static int 3561 io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) 3562 { 3563 struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node); 3564 int ret; 3565 3566 if (!cfg) 3567 return -EINVAL; 3568 ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin); 3569 if (!ret) 3570 setup_ioapic_irq(irq, cfg, attr); 3571 return ret; 3572 } 3573 3574 int io_apic_setup_irq_pin_once(unsigned int irq, int node, 3575 struct io_apic_irq_attr *attr) 3576 { 3577 unsigned int id = attr->ioapic, pin = attr->ioapic_pin; 3578 int ret; 3579 3580 /* Avoid redundant programming */ 3581 if (test_bit(pin, ioapics[id].pin_programmed)) { 3582 pr_debug("Pin %d-%d already programmed\n", 3583 mpc_ioapic_id(id), pin); 3584 return 0; 3585 } 3586 ret = io_apic_setup_irq_pin(irq, node, attr); 3587 if (!ret) 3588 set_bit(pin, ioapics[id].pin_programmed); 3589 return ret; 3590 } 3591 3592 static int __init io_apic_get_redir_entries(int ioapic) 3593 { 3594 union IO_APIC_reg_01 reg_01; 3595 unsigned long flags; 3596 3597 raw_spin_lock_irqsave(&ioapic_lock, flags); 3598 reg_01.raw = io_apic_read(ioapic, 1); 3599 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3600 3601 /* The register returns the maximum index redir index 3602 * supported, which is one less than the total number of redir 3603 * entries. 3604 */ 3605 return reg_01.bits.entries + 1; 3606 } 3607 3608 static void __init probe_nr_irqs_gsi(void) 3609 { 3610 int nr; 3611 3612 nr = gsi_top + NR_IRQS_LEGACY; 3613 if (nr > nr_irqs_gsi) 3614 nr_irqs_gsi = nr; 3615 3616 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); 3617 } 3618 3619 int get_nr_irqs_gsi(void) 3620 { 3621 return nr_irqs_gsi; 3622 } 3623 3624 #ifdef CONFIG_SPARSE_IRQ 3625 int __init arch_probe_nr_irqs(void) 3626 { 3627 int nr; 3628 3629 if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) 3630 nr_irqs = NR_VECTORS * nr_cpu_ids; 3631 3632 nr = nr_irqs_gsi + 8 * nr_cpu_ids; 3633 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) 3634 /* 3635 * for MSI and HT dyn irq 3636 */ 3637 nr += nr_irqs_gsi * 16; 3638 #endif 3639 if (nr < nr_irqs) 3640 nr_irqs = nr; 3641 3642 return NR_IRQS_LEGACY; 3643 } 3644 #endif 3645 3646 int io_apic_set_pci_routing(struct device *dev, int irq, 3647 struct io_apic_irq_attr *irq_attr) 3648 { 3649 int node; 3650 3651 if (!IO_APIC_IRQ(irq)) { 3652 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", 3653 irq_attr->ioapic); 3654 return -EINVAL; 3655 } 3656 3657 node = dev ? dev_to_node(dev) : cpu_to_node(0); 3658 3659 return io_apic_setup_irq_pin_once(irq, node, irq_attr); 3660 } 3661 3662 #ifdef CONFIG_X86_32 3663 static int __init io_apic_get_unique_id(int ioapic, int apic_id) 3664 { 3665 union IO_APIC_reg_00 reg_00; 3666 static physid_mask_t apic_id_map = PHYSID_MASK_NONE; 3667 physid_mask_t tmp; 3668 unsigned long flags; 3669 int i = 0; 3670 3671 /* 3672 * The P4 platform supports up to 256 APIC IDs on two separate APIC 3673 * buses (one for LAPICs, one for IOAPICs), where predecessors only 3674 * supports up to 16 on one shared APIC bus. 3675 * 3676 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full 3677 * advantage of new APIC bus architecture. 3678 */ 3679 3680 if (physids_empty(apic_id_map)) 3681 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); 3682 3683 raw_spin_lock_irqsave(&ioapic_lock, flags); 3684 reg_00.raw = io_apic_read(ioapic, 0); 3685 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3686 3687 if (apic_id >= get_physical_broadcast()) { 3688 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " 3689 "%d\n", ioapic, apic_id, reg_00.bits.ID); 3690 apic_id = reg_00.bits.ID; 3691 } 3692 3693 /* 3694 * Every APIC in a system must have a unique ID or we get lots of nice 3695 * 'stuck on smp_invalidate_needed IPI wait' messages. 3696 */ 3697 if (apic->check_apicid_used(&apic_id_map, apic_id)) { 3698 3699 for (i = 0; i < get_physical_broadcast(); i++) { 3700 if (!apic->check_apicid_used(&apic_id_map, i)) 3701 break; 3702 } 3703 3704 if (i == get_physical_broadcast()) 3705 panic("Max apic_id exceeded!\n"); 3706 3707 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " 3708 "trying %d\n", ioapic, apic_id, i); 3709 3710 apic_id = i; 3711 } 3712 3713 apic->apicid_to_cpu_present(apic_id, &tmp); 3714 physids_or(apic_id_map, apic_id_map, tmp); 3715 3716 if (reg_00.bits.ID != apic_id) { 3717 reg_00.bits.ID = apic_id; 3718 3719 raw_spin_lock_irqsave(&ioapic_lock, flags); 3720 io_apic_write(ioapic, 0, reg_00.raw); 3721 reg_00.raw = io_apic_read(ioapic, 0); 3722 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3723 3724 /* Sanity check */ 3725 if (reg_00.bits.ID != apic_id) { 3726 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); 3727 return -1; 3728 } 3729 } 3730 3731 apic_printk(APIC_VERBOSE, KERN_INFO 3732 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); 3733 3734 return apic_id; 3735 } 3736 3737 static u8 __init io_apic_unique_id(u8 id) 3738 { 3739 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && 3740 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) 3741 return io_apic_get_unique_id(nr_ioapics, id); 3742 else 3743 return id; 3744 } 3745 #else 3746 static u8 __init io_apic_unique_id(u8 id) 3747 { 3748 int i; 3749 DECLARE_BITMAP(used, 256); 3750 3751 bitmap_zero(used, 256); 3752 for (i = 0; i < nr_ioapics; i++) { 3753 __set_bit(mpc_ioapic_id(i), used); 3754 } 3755 if (!test_bit(id, used)) 3756 return id; 3757 return find_first_zero_bit(used, 256); 3758 } 3759 #endif 3760 3761 static int __init io_apic_get_version(int ioapic) 3762 { 3763 union IO_APIC_reg_01 reg_01; 3764 unsigned long flags; 3765 3766 raw_spin_lock_irqsave(&ioapic_lock, flags); 3767 reg_01.raw = io_apic_read(ioapic, 1); 3768 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3769 3770 return reg_01.bits.version; 3771 } 3772 3773 int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) 3774 { 3775 int ioapic, pin, idx; 3776 3777 if (skip_ioapic_setup) 3778 return -1; 3779 3780 ioapic = mp_find_ioapic(gsi); 3781 if (ioapic < 0) 3782 return -1; 3783 3784 pin = mp_find_ioapic_pin(ioapic, gsi); 3785 if (pin < 0) 3786 return -1; 3787 3788 idx = find_irq_entry(ioapic, pin, mp_INT); 3789 if (idx < 0) 3790 return -1; 3791 3792 *trigger = irq_trigger(idx); 3793 *polarity = irq_polarity(idx); 3794 return 0; 3795 } 3796 3797 /* 3798 * This function currently is only a helper for the i386 smp boot process where 3799 * we need to reprogram the ioredtbls to cater for the cpus which have come online 3800 * so mask in all cases should simply be apic->target_cpus() 3801 */ 3802 #ifdef CONFIG_SMP 3803 void __init setup_ioapic_dest(void) 3804 { 3805 int pin, ioapic, irq, irq_entry; 3806 const struct cpumask *mask; 3807 struct irq_data *idata; 3808 3809 if (skip_ioapic_setup == 1) 3810 return; 3811 3812 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) 3813 for (pin = 0; pin < ioapics[ioapic].nr_registers; pin++) { 3814 irq_entry = find_irq_entry(ioapic, pin, mp_INT); 3815 if (irq_entry == -1) 3816 continue; 3817 irq = pin_2_irq(irq_entry, ioapic, pin); 3818 3819 if ((ioapic > 0) && (irq > 16)) 3820 continue; 3821 3822 idata = irq_get_irq_data(irq); 3823 3824 /* 3825 * Honour affinities which have been set in early boot 3826 */ 3827 if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata)) 3828 mask = idata->affinity; 3829 else 3830 mask = apic->target_cpus(); 3831 3832 if (intr_remapping_enabled) 3833 ir_ioapic_set_affinity(idata, mask, false); 3834 else 3835 ioapic_set_affinity(idata, mask, false); 3836 } 3837 3838 } 3839 #endif 3840 3841 #define IOAPIC_RESOURCE_NAME_SIZE 11 3842 3843 static struct resource *ioapic_resources; 3844 3845 static struct resource * __init ioapic_setup_resources(int nr_ioapics) 3846 { 3847 unsigned long n; 3848 struct resource *res; 3849 char *mem; 3850 int i; 3851 3852 if (nr_ioapics <= 0) 3853 return NULL; 3854 3855 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource); 3856 n *= nr_ioapics; 3857 3858 mem = alloc_bootmem(n); 3859 res = (void *)mem; 3860 3861 mem += sizeof(struct resource) * nr_ioapics; 3862 3863 for (i = 0; i < nr_ioapics; i++) { 3864 res[i].name = mem; 3865 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; 3866 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); 3867 mem += IOAPIC_RESOURCE_NAME_SIZE; 3868 } 3869 3870 ioapic_resources = res; 3871 3872 return res; 3873 } 3874 3875 void __init ioapic_and_gsi_init(void) 3876 { 3877 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; 3878 struct resource *ioapic_res; 3879 int i; 3880 3881 ioapic_res = ioapic_setup_resources(nr_ioapics); 3882 for (i = 0; i < nr_ioapics; i++) { 3883 if (smp_found_config) { 3884 ioapic_phys = mpc_ioapic_addr(i); 3885 #ifdef CONFIG_X86_32 3886 if (!ioapic_phys) { 3887 printk(KERN_ERR 3888 "WARNING: bogus zero IO-APIC " 3889 "address found in MPTABLE, " 3890 "disabling IO/APIC support!\n"); 3891 smp_found_config = 0; 3892 skip_ioapic_setup = 1; 3893 goto fake_ioapic_page; 3894 } 3895 #endif 3896 } else { 3897 #ifdef CONFIG_X86_32 3898 fake_ioapic_page: 3899 #endif 3900 ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); 3901 ioapic_phys = __pa(ioapic_phys); 3902 } 3903 set_fixmap_nocache(idx, ioapic_phys); 3904 apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n", 3905 __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK), 3906 ioapic_phys); 3907 idx++; 3908 3909 ioapic_res->start = ioapic_phys; 3910 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1; 3911 ioapic_res++; 3912 } 3913 3914 probe_nr_irqs_gsi(); 3915 } 3916 3917 void __init ioapic_insert_resources(void) 3918 { 3919 int i; 3920 struct resource *r = ioapic_resources; 3921 3922 if (!r) { 3923 if (nr_ioapics > 0) 3924 printk(KERN_ERR 3925 "IO APIC resources couldn't be allocated.\n"); 3926 return; 3927 } 3928 3929 for (i = 0; i < nr_ioapics; i++) { 3930 insert_resource(&iomem_resource, r); 3931 r++; 3932 } 3933 } 3934 3935 int mp_find_ioapic(u32 gsi) 3936 { 3937 int i = 0; 3938 3939 if (nr_ioapics == 0) 3940 return -1; 3941 3942 /* Find the IOAPIC that manages this GSI. */ 3943 for (i = 0; i < nr_ioapics; i++) { 3944 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(i); 3945 if ((gsi >= gsi_cfg->gsi_base) 3946 && (gsi <= gsi_cfg->gsi_end)) 3947 return i; 3948 } 3949 3950 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); 3951 return -1; 3952 } 3953 3954 int mp_find_ioapic_pin(int ioapic, u32 gsi) 3955 { 3956 struct mp_ioapic_gsi *gsi_cfg; 3957 3958 if (WARN_ON(ioapic == -1)) 3959 return -1; 3960 3961 gsi_cfg = mp_ioapic_gsi_routing(ioapic); 3962 if (WARN_ON(gsi > gsi_cfg->gsi_end)) 3963 return -1; 3964 3965 return gsi - gsi_cfg->gsi_base; 3966 } 3967 3968 static __init int bad_ioapic(unsigned long address) 3969 { 3970 if (nr_ioapics >= MAX_IO_APICS) { 3971 printk(KERN_WARNING "WARNING: Max # of I/O APICs (%d) exceeded " 3972 "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics); 3973 return 1; 3974 } 3975 if (!address) { 3976 printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address" 3977 " found in table, skipping!\n"); 3978 return 1; 3979 } 3980 return 0; 3981 } 3982 3983 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) 3984 { 3985 int idx = 0; 3986 int entries; 3987 struct mp_ioapic_gsi *gsi_cfg; 3988 3989 if (bad_ioapic(address)) 3990 return; 3991 3992 idx = nr_ioapics; 3993 3994 ioapics[idx].mp_config.type = MP_IOAPIC; 3995 ioapics[idx].mp_config.flags = MPC_APIC_USABLE; 3996 ioapics[idx].mp_config.apicaddr = address; 3997 3998 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); 3999 ioapics[idx].mp_config.apicid = io_apic_unique_id(id); 4000 ioapics[idx].mp_config.apicver = io_apic_get_version(idx); 4001 4002 /* 4003 * Build basic GSI lookup table to facilitate gsi->io_apic lookups 4004 * and to prevent reprogramming of IOAPIC pins (PCI GSIs). 4005 */ 4006 entries = io_apic_get_redir_entries(idx); 4007 gsi_cfg = mp_ioapic_gsi_routing(idx); 4008 gsi_cfg->gsi_base = gsi_base; 4009 gsi_cfg->gsi_end = gsi_base + entries - 1; 4010 4011 /* 4012 * The number of IO-APIC IRQ registers (== #pins): 4013 */ 4014 ioapics[idx].nr_registers = entries; 4015 4016 if (gsi_cfg->gsi_end >= gsi_top) 4017 gsi_top = gsi_cfg->gsi_end + 1; 4018 4019 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " 4020 "GSI %d-%d\n", idx, mpc_ioapic_id(idx), 4021 mpc_ioapic_ver(idx), mpc_ioapic_addr(idx), 4022 gsi_cfg->gsi_base, gsi_cfg->gsi_end); 4023 4024 nr_ioapics++; 4025 } 4026 4027 /* Enable IOAPIC early just for system timer */ 4028 void __init pre_init_apic_IRQ0(void) 4029 { 4030 struct io_apic_irq_attr attr = { 0, 0, 0, 0 }; 4031 4032 printk(KERN_INFO "Early APIC setup for system timer0\n"); 4033 #ifndef CONFIG_SMP 4034 physid_set_mask_of_physid(boot_cpu_physical_apicid, 4035 &phys_cpu_present_map); 4036 #endif 4037 setup_local_APIC(); 4038 4039 io_apic_setup_irq_pin(0, 0, &attr); 4040 irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, 4041 "edge"); 4042 } 4043