1 /* 2 * Intel IO-APIC support for multi-Pentium hosts. 3 * 4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo 5 * 6 * Many thanks to Stig Venaas for trying out countless experimental 7 * patches and reporting/debugging problems patiently! 8 * 9 * (c) 1999, Multiple IO-APIC support, developed by 10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and 11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>, 12 * further tested and cleaned up by Zach Brown <zab@redhat.com> 13 * and Ingo Molnar <mingo@redhat.com> 14 * 15 * Fixes 16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs; 17 * thanks to Eric Gilmore 18 * and Rolf G. Tews 19 * for testing these extensively 20 * Paul Diefenbaugh : Added full ACPI support 21 */ 22 23 #include <linux/mm.h> 24 #include <linux/interrupt.h> 25 #include <linux/init.h> 26 #include <linux/delay.h> 27 #include <linux/sched.h> 28 #include <linux/pci.h> 29 #include <linux/mc146818rtc.h> 30 #include <linux/compiler.h> 31 #include <linux/acpi.h> 32 #include <linux/module.h> 33 #include <linux/syscore_ops.h> 34 #include <linux/msi.h> 35 #include <linux/htirq.h> 36 #include <linux/freezer.h> 37 #include <linux/kthread.h> 38 #include <linux/jiffies.h> /* time_after() */ 39 #include <linux/slab.h> 40 #ifdef CONFIG_ACPI 41 #include <acpi/acpi_bus.h> 42 #endif 43 #include <linux/bootmem.h> 44 #include <linux/dmar.h> 45 #include <linux/hpet.h> 46 47 #include <asm/idle.h> 48 #include <asm/io.h> 49 #include <asm/smp.h> 50 #include <asm/cpu.h> 51 #include <asm/desc.h> 52 #include <asm/proto.h> 53 #include <asm/acpi.h> 54 #include <asm/dma.h> 55 #include <asm/timer.h> 56 #include <asm/i8259.h> 57 #include <asm/msidef.h> 58 #include <asm/hypertransport.h> 59 #include <asm/setup.h> 60 #include <asm/irq_remapping.h> 61 #include <asm/hpet.h> 62 #include <asm/hw_irq.h> 63 64 #include <asm/apic.h> 65 66 #define __apicdebuginit(type) static type __init 67 #define for_each_irq_pin(entry, head) \ 68 for (entry = head; entry; entry = entry->next) 69 70 /* 71 * Is the SiS APIC rmw bug present ? 72 * -1 = don't know, 0 = no, 1 = yes 73 */ 74 int sis_apic_bug = -1; 75 76 static DEFINE_RAW_SPINLOCK(ioapic_lock); 77 static DEFINE_RAW_SPINLOCK(vector_lock); 78 79 static struct ioapic { 80 /* 81 * # of IRQ routing registers 82 */ 83 int nr_registers; 84 /* 85 * Saved state during suspend/resume, or while enabling intr-remap. 86 */ 87 struct IO_APIC_route_entry *saved_registers; 88 /* I/O APIC config */ 89 struct mpc_ioapic mp_config; 90 /* IO APIC gsi routing info */ 91 struct mp_ioapic_gsi gsi_config; 92 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); 93 } ioapics[MAX_IO_APICS]; 94 95 #define mpc_ioapic_ver(id) ioapics[id].mp_config.apicver 96 97 int mpc_ioapic_id(int id) 98 { 99 return ioapics[id].mp_config.apicid; 100 } 101 102 unsigned int mpc_ioapic_addr(int id) 103 { 104 return ioapics[id].mp_config.apicaddr; 105 } 106 107 struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int id) 108 { 109 return &ioapics[id].gsi_config; 110 } 111 112 int nr_ioapics; 113 114 /* The one past the highest gsi number used */ 115 u32 gsi_top; 116 117 /* MP IRQ source entries */ 118 struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; 119 120 /* # of MP IRQ source entries */ 121 int mp_irq_entries; 122 123 /* GSI interrupts */ 124 static int nr_irqs_gsi = NR_IRQS_LEGACY; 125 126 #if defined (CONFIG_MCA) || defined (CONFIG_EISA) 127 int mp_bus_id_to_type[MAX_MP_BUSSES]; 128 #endif 129 130 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); 131 132 int skip_ioapic_setup; 133 134 /** 135 * disable_ioapic_support() - disables ioapic support at runtime 136 */ 137 void disable_ioapic_support(void) 138 { 139 #ifdef CONFIG_PCI 140 noioapicquirk = 1; 141 noioapicreroute = -1; 142 #endif 143 skip_ioapic_setup = 1; 144 } 145 146 static int __init parse_noapic(char *str) 147 { 148 /* disable IO-APIC */ 149 disable_ioapic_support(); 150 return 0; 151 } 152 early_param("noapic", parse_noapic); 153 154 static int io_apic_setup_irq_pin(unsigned int irq, int node, 155 struct io_apic_irq_attr *attr); 156 157 /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ 158 void mp_save_irq(struct mpc_intsrc *m) 159 { 160 int i; 161 162 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," 163 " IRQ %02x, APIC ID %x, APIC INT %02x\n", 164 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus, 165 m->srcbusirq, m->dstapic, m->dstirq); 166 167 for (i = 0; i < mp_irq_entries; i++) { 168 if (!memcmp(&mp_irqs[i], m, sizeof(*m))) 169 return; 170 } 171 172 memcpy(&mp_irqs[mp_irq_entries], m, sizeof(*m)); 173 if (++mp_irq_entries == MAX_IRQ_SOURCES) 174 panic("Max # of irq sources exceeded!!\n"); 175 } 176 177 struct irq_pin_list { 178 int apic, pin; 179 struct irq_pin_list *next; 180 }; 181 182 static struct irq_pin_list *alloc_irq_pin_list(int node) 183 { 184 return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); 185 } 186 187 188 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 189 #ifdef CONFIG_SPARSE_IRQ 190 static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY]; 191 #else 192 static struct irq_cfg irq_cfgx[NR_IRQS]; 193 #endif 194 195 int __init arch_early_irq_init(void) 196 { 197 struct irq_cfg *cfg; 198 int count, node, i; 199 200 if (!legacy_pic->nr_legacy_irqs) { 201 nr_irqs_gsi = 0; 202 io_apic_irqs = ~0UL; 203 } 204 205 for (i = 0; i < nr_ioapics; i++) { 206 ioapics[i].saved_registers = 207 kzalloc(sizeof(struct IO_APIC_route_entry) * 208 ioapics[i].nr_registers, GFP_KERNEL); 209 if (!ioapics[i].saved_registers) 210 pr_err("IOAPIC %d: suspend/resume impossible!\n", i); 211 } 212 213 cfg = irq_cfgx; 214 count = ARRAY_SIZE(irq_cfgx); 215 node = cpu_to_node(0); 216 217 /* Make sure the legacy interrupts are marked in the bitmap */ 218 irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); 219 220 for (i = 0; i < count; i++) { 221 irq_set_chip_data(i, &cfg[i]); 222 zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); 223 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); 224 /* 225 * For legacy IRQ's, start with assigning irq0 to irq15 to 226 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. 227 */ 228 if (i < legacy_pic->nr_legacy_irqs) { 229 cfg[i].vector = IRQ0_VECTOR + i; 230 cpumask_set_cpu(0, cfg[i].domain); 231 } 232 } 233 234 return 0; 235 } 236 237 #ifdef CONFIG_SPARSE_IRQ 238 static struct irq_cfg *irq_cfg(unsigned int irq) 239 { 240 return irq_get_chip_data(irq); 241 } 242 243 static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) 244 { 245 struct irq_cfg *cfg; 246 247 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); 248 if (!cfg) 249 return NULL; 250 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) 251 goto out_cfg; 252 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) 253 goto out_domain; 254 return cfg; 255 out_domain: 256 free_cpumask_var(cfg->domain); 257 out_cfg: 258 kfree(cfg); 259 return NULL; 260 } 261 262 static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) 263 { 264 if (!cfg) 265 return; 266 irq_set_chip_data(at, NULL); 267 free_cpumask_var(cfg->domain); 268 free_cpumask_var(cfg->old_domain); 269 kfree(cfg); 270 } 271 272 #else 273 274 struct irq_cfg *irq_cfg(unsigned int irq) 275 { 276 return irq < nr_irqs ? irq_cfgx + irq : NULL; 277 } 278 279 static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) 280 { 281 return irq_cfgx + irq; 282 } 283 284 static inline void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { } 285 286 #endif 287 288 static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) 289 { 290 int res = irq_alloc_desc_at(at, node); 291 struct irq_cfg *cfg; 292 293 if (res < 0) { 294 if (res != -EEXIST) 295 return NULL; 296 cfg = irq_get_chip_data(at); 297 if (cfg) 298 return cfg; 299 } 300 301 cfg = alloc_irq_cfg(at, node); 302 if (cfg) 303 irq_set_chip_data(at, cfg); 304 else 305 irq_free_desc(at); 306 return cfg; 307 } 308 309 static int alloc_irq_from(unsigned int from, int node) 310 { 311 return irq_alloc_desc_from(from, node); 312 } 313 314 static void free_irq_at(unsigned int at, struct irq_cfg *cfg) 315 { 316 free_irq_cfg(at, cfg); 317 irq_free_desc(at); 318 } 319 320 struct io_apic { 321 unsigned int index; 322 unsigned int unused[3]; 323 unsigned int data; 324 unsigned int unused2[11]; 325 unsigned int eoi; 326 }; 327 328 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) 329 { 330 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) 331 + (mpc_ioapic_addr(idx) & ~PAGE_MASK); 332 } 333 334 static inline void io_apic_eoi(unsigned int apic, unsigned int vector) 335 { 336 struct io_apic __iomem *io_apic = io_apic_base(apic); 337 writel(vector, &io_apic->eoi); 338 } 339 340 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) 341 { 342 struct io_apic __iomem *io_apic = io_apic_base(apic); 343 writel(reg, &io_apic->index); 344 return readl(&io_apic->data); 345 } 346 347 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) 348 { 349 struct io_apic __iomem *io_apic = io_apic_base(apic); 350 writel(reg, &io_apic->index); 351 writel(value, &io_apic->data); 352 } 353 354 /* 355 * Re-write a value: to be used for read-modify-write 356 * cycles where the read already set up the index register. 357 * 358 * Older SiS APIC requires we rewrite the index register 359 */ 360 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) 361 { 362 struct io_apic __iomem *io_apic = io_apic_base(apic); 363 364 if (sis_apic_bug) 365 writel(reg, &io_apic->index); 366 writel(value, &io_apic->data); 367 } 368 369 static bool io_apic_level_ack_pending(struct irq_cfg *cfg) 370 { 371 struct irq_pin_list *entry; 372 unsigned long flags; 373 374 raw_spin_lock_irqsave(&ioapic_lock, flags); 375 for_each_irq_pin(entry, cfg->irq_2_pin) { 376 unsigned int reg; 377 int pin; 378 379 pin = entry->pin; 380 reg = io_apic_read(entry->apic, 0x10 + pin*2); 381 /* Is the remote IRR bit set? */ 382 if (reg & IO_APIC_REDIR_REMOTE_IRR) { 383 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 384 return true; 385 } 386 } 387 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 388 389 return false; 390 } 391 392 union entry_union { 393 struct { u32 w1, w2; }; 394 struct IO_APIC_route_entry entry; 395 }; 396 397 static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin) 398 { 399 union entry_union eu; 400 401 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); 402 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); 403 return eu.entry; 404 } 405 406 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) 407 { 408 union entry_union eu; 409 unsigned long flags; 410 raw_spin_lock_irqsave(&ioapic_lock, flags); 411 eu.entry = __ioapic_read_entry(apic, pin); 412 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 413 return eu.entry; 414 } 415 416 /* 417 * When we write a new IO APIC routing entry, we need to write the high 418 * word first! If the mask bit in the low word is clear, we will enable 419 * the interrupt, and we need to make sure the entry is fully populated 420 * before that happens. 421 */ 422 static void 423 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 424 { 425 union entry_union eu = {{0, 0}}; 426 427 eu.entry = e; 428 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 429 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 430 } 431 432 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 433 { 434 unsigned long flags; 435 raw_spin_lock_irqsave(&ioapic_lock, flags); 436 __ioapic_write_entry(apic, pin, e); 437 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 438 } 439 440 /* 441 * When we mask an IO APIC routing entry, we need to write the low 442 * word first, in order to set the mask bit before we change the 443 * high bits! 444 */ 445 static void ioapic_mask_entry(int apic, int pin) 446 { 447 unsigned long flags; 448 union entry_union eu = { .entry.mask = 1 }; 449 450 raw_spin_lock_irqsave(&ioapic_lock, flags); 451 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 452 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 453 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 454 } 455 456 /* 457 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are 458 * shared ISA-space IRQs, so we have to support them. We are super 459 * fast in the common case, and fast for shared ISA-space IRQs. 460 */ 461 static int 462 __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 463 { 464 struct irq_pin_list **last, *entry; 465 466 /* don't allow duplicates */ 467 last = &cfg->irq_2_pin; 468 for_each_irq_pin(entry, cfg->irq_2_pin) { 469 if (entry->apic == apic && entry->pin == pin) 470 return 0; 471 last = &entry->next; 472 } 473 474 entry = alloc_irq_pin_list(node); 475 if (!entry) { 476 printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", 477 node, apic, pin); 478 return -ENOMEM; 479 } 480 entry->apic = apic; 481 entry->pin = pin; 482 483 *last = entry; 484 return 0; 485 } 486 487 static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 488 { 489 if (__add_pin_to_irq_node(cfg, node, apic, pin)) 490 panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); 491 } 492 493 /* 494 * Reroute an IRQ to a different pin. 495 */ 496 static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, 497 int oldapic, int oldpin, 498 int newapic, int newpin) 499 { 500 struct irq_pin_list *entry; 501 502 for_each_irq_pin(entry, cfg->irq_2_pin) { 503 if (entry->apic == oldapic && entry->pin == oldpin) { 504 entry->apic = newapic; 505 entry->pin = newpin; 506 /* every one is different, right? */ 507 return; 508 } 509 } 510 511 /* old apic/pin didn't exist, so just add new ones */ 512 add_pin_to_irq_node(cfg, node, newapic, newpin); 513 } 514 515 static void __io_apic_modify_irq(struct irq_pin_list *entry, 516 int mask_and, int mask_or, 517 void (*final)(struct irq_pin_list *entry)) 518 { 519 unsigned int reg, pin; 520 521 pin = entry->pin; 522 reg = io_apic_read(entry->apic, 0x10 + pin * 2); 523 reg &= mask_and; 524 reg |= mask_or; 525 io_apic_modify(entry->apic, 0x10 + pin * 2, reg); 526 if (final) 527 final(entry); 528 } 529 530 static void io_apic_modify_irq(struct irq_cfg *cfg, 531 int mask_and, int mask_or, 532 void (*final)(struct irq_pin_list *entry)) 533 { 534 struct irq_pin_list *entry; 535 536 for_each_irq_pin(entry, cfg->irq_2_pin) 537 __io_apic_modify_irq(entry, mask_and, mask_or, final); 538 } 539 540 static void io_apic_sync(struct irq_pin_list *entry) 541 { 542 /* 543 * Synchronize the IO-APIC and the CPU by doing 544 * a dummy read from the IO-APIC 545 */ 546 struct io_apic __iomem *io_apic; 547 io_apic = io_apic_base(entry->apic); 548 readl(&io_apic->data); 549 } 550 551 static void mask_ioapic(struct irq_cfg *cfg) 552 { 553 unsigned long flags; 554 555 raw_spin_lock_irqsave(&ioapic_lock, flags); 556 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); 557 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 558 } 559 560 static void mask_ioapic_irq(struct irq_data *data) 561 { 562 mask_ioapic(data->chip_data); 563 } 564 565 static void __unmask_ioapic(struct irq_cfg *cfg) 566 { 567 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); 568 } 569 570 static void unmask_ioapic(struct irq_cfg *cfg) 571 { 572 unsigned long flags; 573 574 raw_spin_lock_irqsave(&ioapic_lock, flags); 575 __unmask_ioapic(cfg); 576 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 577 } 578 579 static void unmask_ioapic_irq(struct irq_data *data) 580 { 581 unmask_ioapic(data->chip_data); 582 } 583 584 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) 585 { 586 struct IO_APIC_route_entry entry; 587 588 /* Check delivery_mode to be sure we're not clearing an SMI pin */ 589 entry = ioapic_read_entry(apic, pin); 590 if (entry.delivery_mode == dest_SMI) 591 return; 592 593 /* 594 * Make sure the entry is masked and re-read the contents to check 595 * if it is a level triggered pin and if the remote-IRR is set. 596 */ 597 if (!entry.mask) { 598 entry.mask = 1; 599 ioapic_write_entry(apic, pin, entry); 600 entry = ioapic_read_entry(apic, pin); 601 } 602 603 if (entry.irr) { 604 /* 605 * Make sure the trigger mode is set to level. Explicit EOI 606 * doesn't clear the remote-IRR if the trigger mode is not 607 * set to level. 608 */ 609 if (!entry.trigger) { 610 entry.trigger = IOAPIC_LEVEL; 611 ioapic_write_entry(apic, pin, entry); 612 } 613 614 if (mpc_ioapic_ver(apic) >= 0x20) { 615 unsigned long flags; 616 617 raw_spin_lock_irqsave(&ioapic_lock, flags); 618 io_apic_eoi(apic, entry.vector); 619 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 620 } else { 621 /* 622 * Mechanism by which we clear remote-IRR in this 623 * case is by changing the trigger mode to edge and 624 * back to level. 625 */ 626 entry.trigger = IOAPIC_EDGE; 627 ioapic_write_entry(apic, pin, entry); 628 entry.trigger = IOAPIC_LEVEL; 629 ioapic_write_entry(apic, pin, entry); 630 } 631 } 632 633 /* 634 * Clear the rest of the bits in the IO-APIC RTE except for the mask 635 * bit. 636 */ 637 ioapic_mask_entry(apic, pin); 638 entry = ioapic_read_entry(apic, pin); 639 if (entry.irr) 640 printk(KERN_ERR "Unable to reset IRR for apic: %d, pin :%d\n", 641 mpc_ioapic_id(apic), pin); 642 } 643 644 static void clear_IO_APIC (void) 645 { 646 int apic, pin; 647 648 for (apic = 0; apic < nr_ioapics; apic++) 649 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) 650 clear_IO_APIC_pin(apic, pin); 651 } 652 653 #ifdef CONFIG_X86_32 654 /* 655 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to 656 * specific CPU-side IRQs. 657 */ 658 659 #define MAX_PIRQS 8 660 static int pirq_entries[MAX_PIRQS] = { 661 [0 ... MAX_PIRQS - 1] = -1 662 }; 663 664 static int __init ioapic_pirq_setup(char *str) 665 { 666 int i, max; 667 int ints[MAX_PIRQS+1]; 668 669 get_options(str, ARRAY_SIZE(ints), ints); 670 671 apic_printk(APIC_VERBOSE, KERN_INFO 672 "PIRQ redirection, working around broken MP-BIOS.\n"); 673 max = MAX_PIRQS; 674 if (ints[0] < MAX_PIRQS) 675 max = ints[0]; 676 677 for (i = 0; i < max; i++) { 678 apic_printk(APIC_VERBOSE, KERN_DEBUG 679 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]); 680 /* 681 * PIRQs are mapped upside down, usually. 682 */ 683 pirq_entries[MAX_PIRQS-i-1] = ints[i+1]; 684 } 685 return 1; 686 } 687 688 __setup("pirq=", ioapic_pirq_setup); 689 #endif /* CONFIG_X86_32 */ 690 691 /* 692 * Saves all the IO-APIC RTE's 693 */ 694 int save_ioapic_entries(void) 695 { 696 int apic, pin; 697 int err = 0; 698 699 for (apic = 0; apic < nr_ioapics; apic++) { 700 if (!ioapics[apic].saved_registers) { 701 err = -ENOMEM; 702 continue; 703 } 704 705 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) 706 ioapics[apic].saved_registers[pin] = 707 ioapic_read_entry(apic, pin); 708 } 709 710 return err; 711 } 712 713 /* 714 * Mask all IO APIC entries. 715 */ 716 void mask_ioapic_entries(void) 717 { 718 int apic, pin; 719 720 for (apic = 0; apic < nr_ioapics; apic++) { 721 if (!ioapics[apic].saved_registers) 722 continue; 723 724 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { 725 struct IO_APIC_route_entry entry; 726 727 entry = ioapics[apic].saved_registers[pin]; 728 if (!entry.mask) { 729 entry.mask = 1; 730 ioapic_write_entry(apic, pin, entry); 731 } 732 } 733 } 734 } 735 736 /* 737 * Restore IO APIC entries which was saved in the ioapic structure. 738 */ 739 int restore_ioapic_entries(void) 740 { 741 int apic, pin; 742 743 for (apic = 0; apic < nr_ioapics; apic++) { 744 if (!ioapics[apic].saved_registers) 745 continue; 746 747 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) 748 ioapic_write_entry(apic, pin, 749 ioapics[apic].saved_registers[pin]); 750 } 751 return 0; 752 } 753 754 /* 755 * Find the IRQ entry number of a certain pin. 756 */ 757 static int find_irq_entry(int apic, int pin, int type) 758 { 759 int i; 760 761 for (i = 0; i < mp_irq_entries; i++) 762 if (mp_irqs[i].irqtype == type && 763 (mp_irqs[i].dstapic == mpc_ioapic_id(apic) || 764 mp_irqs[i].dstapic == MP_APIC_ALL) && 765 mp_irqs[i].dstirq == pin) 766 return i; 767 768 return -1; 769 } 770 771 /* 772 * Find the pin to which IRQ[irq] (ISA) is connected 773 */ 774 static int __init find_isa_irq_pin(int irq, int type) 775 { 776 int i; 777 778 for (i = 0; i < mp_irq_entries; i++) { 779 int lbus = mp_irqs[i].srcbus; 780 781 if (test_bit(lbus, mp_bus_not_pci) && 782 (mp_irqs[i].irqtype == type) && 783 (mp_irqs[i].srcbusirq == irq)) 784 785 return mp_irqs[i].dstirq; 786 } 787 return -1; 788 } 789 790 static int __init find_isa_irq_apic(int irq, int type) 791 { 792 int i; 793 794 for (i = 0; i < mp_irq_entries; i++) { 795 int lbus = mp_irqs[i].srcbus; 796 797 if (test_bit(lbus, mp_bus_not_pci) && 798 (mp_irqs[i].irqtype == type) && 799 (mp_irqs[i].srcbusirq == irq)) 800 break; 801 } 802 if (i < mp_irq_entries) { 803 int apic; 804 for(apic = 0; apic < nr_ioapics; apic++) { 805 if (mpc_ioapic_id(apic) == mp_irqs[i].dstapic) 806 return apic; 807 } 808 } 809 810 return -1; 811 } 812 813 #if defined(CONFIG_EISA) || defined(CONFIG_MCA) 814 /* 815 * EISA Edge/Level control register, ELCR 816 */ 817 static int EISA_ELCR(unsigned int irq) 818 { 819 if (irq < legacy_pic->nr_legacy_irqs) { 820 unsigned int port = 0x4d0 + (irq >> 3); 821 return (inb(port) >> (irq & 7)) & 1; 822 } 823 apic_printk(APIC_VERBOSE, KERN_INFO 824 "Broken MPtable reports ISA irq %d\n", irq); 825 return 0; 826 } 827 828 #endif 829 830 /* ISA interrupts are always polarity zero edge triggered, 831 * when listed as conforming in the MP table. */ 832 833 #define default_ISA_trigger(idx) (0) 834 #define default_ISA_polarity(idx) (0) 835 836 /* EISA interrupts are always polarity zero and can be edge or level 837 * trigger depending on the ELCR value. If an interrupt is listed as 838 * EISA conforming in the MP table, that means its trigger type must 839 * be read in from the ELCR */ 840 841 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq)) 842 #define default_EISA_polarity(idx) default_ISA_polarity(idx) 843 844 /* PCI interrupts are always polarity one level triggered, 845 * when listed as conforming in the MP table. */ 846 847 #define default_PCI_trigger(idx) (1) 848 #define default_PCI_polarity(idx) (1) 849 850 /* MCA interrupts are always polarity zero level triggered, 851 * when listed as conforming in the MP table. */ 852 853 #define default_MCA_trigger(idx) (1) 854 #define default_MCA_polarity(idx) default_ISA_polarity(idx) 855 856 static int irq_polarity(int idx) 857 { 858 int bus = mp_irqs[idx].srcbus; 859 int polarity; 860 861 /* 862 * Determine IRQ line polarity (high active or low active): 863 */ 864 switch (mp_irqs[idx].irqflag & 3) 865 { 866 case 0: /* conforms, ie. bus-type dependent polarity */ 867 if (test_bit(bus, mp_bus_not_pci)) 868 polarity = default_ISA_polarity(idx); 869 else 870 polarity = default_PCI_polarity(idx); 871 break; 872 case 1: /* high active */ 873 { 874 polarity = 0; 875 break; 876 } 877 case 2: /* reserved */ 878 { 879 printk(KERN_WARNING "broken BIOS!!\n"); 880 polarity = 1; 881 break; 882 } 883 case 3: /* low active */ 884 { 885 polarity = 1; 886 break; 887 } 888 default: /* invalid */ 889 { 890 printk(KERN_WARNING "broken BIOS!!\n"); 891 polarity = 1; 892 break; 893 } 894 } 895 return polarity; 896 } 897 898 static int irq_trigger(int idx) 899 { 900 int bus = mp_irqs[idx].srcbus; 901 int trigger; 902 903 /* 904 * Determine IRQ trigger mode (edge or level sensitive): 905 */ 906 switch ((mp_irqs[idx].irqflag>>2) & 3) 907 { 908 case 0: /* conforms, ie. bus-type dependent */ 909 if (test_bit(bus, mp_bus_not_pci)) 910 trigger = default_ISA_trigger(idx); 911 else 912 trigger = default_PCI_trigger(idx); 913 #if defined(CONFIG_EISA) || defined(CONFIG_MCA) 914 switch (mp_bus_id_to_type[bus]) { 915 case MP_BUS_ISA: /* ISA pin */ 916 { 917 /* set before the switch */ 918 break; 919 } 920 case MP_BUS_EISA: /* EISA pin */ 921 { 922 trigger = default_EISA_trigger(idx); 923 break; 924 } 925 case MP_BUS_PCI: /* PCI pin */ 926 { 927 /* set before the switch */ 928 break; 929 } 930 case MP_BUS_MCA: /* MCA pin */ 931 { 932 trigger = default_MCA_trigger(idx); 933 break; 934 } 935 default: 936 { 937 printk(KERN_WARNING "broken BIOS!!\n"); 938 trigger = 1; 939 break; 940 } 941 } 942 #endif 943 break; 944 case 1: /* edge */ 945 { 946 trigger = 0; 947 break; 948 } 949 case 2: /* reserved */ 950 { 951 printk(KERN_WARNING "broken BIOS!!\n"); 952 trigger = 1; 953 break; 954 } 955 case 3: /* level */ 956 { 957 trigger = 1; 958 break; 959 } 960 default: /* invalid */ 961 { 962 printk(KERN_WARNING "broken BIOS!!\n"); 963 trigger = 0; 964 break; 965 } 966 } 967 return trigger; 968 } 969 970 static int pin_2_irq(int idx, int apic, int pin) 971 { 972 int irq; 973 int bus = mp_irqs[idx].srcbus; 974 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(apic); 975 976 /* 977 * Debugging check, we are in big trouble if this message pops up! 978 */ 979 if (mp_irqs[idx].dstirq != pin) 980 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); 981 982 if (test_bit(bus, mp_bus_not_pci)) { 983 irq = mp_irqs[idx].srcbusirq; 984 } else { 985 u32 gsi = gsi_cfg->gsi_base + pin; 986 987 if (gsi >= NR_IRQS_LEGACY) 988 irq = gsi; 989 else 990 irq = gsi_top + gsi; 991 } 992 993 #ifdef CONFIG_X86_32 994 /* 995 * PCI IRQ command line redirection. Yes, limits are hardcoded. 996 */ 997 if ((pin >= 16) && (pin <= 23)) { 998 if (pirq_entries[pin-16] != -1) { 999 if (!pirq_entries[pin-16]) { 1000 apic_printk(APIC_VERBOSE, KERN_DEBUG 1001 "disabling PIRQ%d\n", pin-16); 1002 } else { 1003 irq = pirq_entries[pin-16]; 1004 apic_printk(APIC_VERBOSE, KERN_DEBUG 1005 "using PIRQ%d -> IRQ %d\n", 1006 pin-16, irq); 1007 } 1008 } 1009 } 1010 #endif 1011 1012 return irq; 1013 } 1014 1015 /* 1016 * Find a specific PCI IRQ entry. 1017 * Not an __init, possibly needed by modules 1018 */ 1019 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, 1020 struct io_apic_irq_attr *irq_attr) 1021 { 1022 int apic, i, best_guess = -1; 1023 1024 apic_printk(APIC_DEBUG, 1025 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", 1026 bus, slot, pin); 1027 if (test_bit(bus, mp_bus_not_pci)) { 1028 apic_printk(APIC_VERBOSE, 1029 "PCI BIOS passed nonexistent PCI bus %d!\n", bus); 1030 return -1; 1031 } 1032 for (i = 0; i < mp_irq_entries; i++) { 1033 int lbus = mp_irqs[i].srcbus; 1034 1035 for (apic = 0; apic < nr_ioapics; apic++) 1036 if (mpc_ioapic_id(apic) == mp_irqs[i].dstapic || 1037 mp_irqs[i].dstapic == MP_APIC_ALL) 1038 break; 1039 1040 if (!test_bit(lbus, mp_bus_not_pci) && 1041 !mp_irqs[i].irqtype && 1042 (bus == lbus) && 1043 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) { 1044 int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq); 1045 1046 if (!(apic || IO_APIC_IRQ(irq))) 1047 continue; 1048 1049 if (pin == (mp_irqs[i].srcbusirq & 3)) { 1050 set_io_apic_irq_attr(irq_attr, apic, 1051 mp_irqs[i].dstirq, 1052 irq_trigger(i), 1053 irq_polarity(i)); 1054 return irq; 1055 } 1056 /* 1057 * Use the first all-but-pin matching entry as a 1058 * best-guess fuzzy result for broken mptables. 1059 */ 1060 if (best_guess < 0) { 1061 set_io_apic_irq_attr(irq_attr, apic, 1062 mp_irqs[i].dstirq, 1063 irq_trigger(i), 1064 irq_polarity(i)); 1065 best_guess = irq; 1066 } 1067 } 1068 } 1069 return best_guess; 1070 } 1071 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); 1072 1073 void lock_vector_lock(void) 1074 { 1075 /* Used to the online set of cpus does not change 1076 * during assign_irq_vector. 1077 */ 1078 raw_spin_lock(&vector_lock); 1079 } 1080 1081 void unlock_vector_lock(void) 1082 { 1083 raw_spin_unlock(&vector_lock); 1084 } 1085 1086 static int 1087 __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 1088 { 1089 /* 1090 * NOTE! The local APIC isn't very good at handling 1091 * multiple interrupts at the same interrupt level. 1092 * As the interrupt level is determined by taking the 1093 * vector number and shifting that right by 4, we 1094 * want to spread these out a bit so that they don't 1095 * all fall in the same interrupt level. 1096 * 1097 * Also, we've got to be careful not to trash gate 1098 * 0x80, because int 0x80 is hm, kind of importantish. ;) 1099 */ 1100 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; 1101 static int current_offset = VECTOR_OFFSET_START % 8; 1102 unsigned int old_vector; 1103 int cpu, err; 1104 cpumask_var_t tmp_mask; 1105 1106 if (cfg->move_in_progress) 1107 return -EBUSY; 1108 1109 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) 1110 return -ENOMEM; 1111 1112 old_vector = cfg->vector; 1113 if (old_vector) { 1114 cpumask_and(tmp_mask, mask, cpu_online_mask); 1115 cpumask_and(tmp_mask, cfg->domain, tmp_mask); 1116 if (!cpumask_empty(tmp_mask)) { 1117 free_cpumask_var(tmp_mask); 1118 return 0; 1119 } 1120 } 1121 1122 /* Only try and allocate irqs on cpus that are present */ 1123 err = -ENOSPC; 1124 for_each_cpu_and(cpu, mask, cpu_online_mask) { 1125 int new_cpu; 1126 int vector, offset; 1127 1128 apic->vector_allocation_domain(cpu, tmp_mask); 1129 1130 vector = current_vector; 1131 offset = current_offset; 1132 next: 1133 vector += 8; 1134 if (vector >= first_system_vector) { 1135 /* If out of vectors on large boxen, must share them. */ 1136 offset = (offset + 1) % 8; 1137 vector = FIRST_EXTERNAL_VECTOR + offset; 1138 } 1139 if (unlikely(current_vector == vector)) 1140 continue; 1141 1142 if (test_bit(vector, used_vectors)) 1143 goto next; 1144 1145 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 1146 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 1147 goto next; 1148 /* Found one! */ 1149 current_vector = vector; 1150 current_offset = offset; 1151 if (old_vector) { 1152 cfg->move_in_progress = 1; 1153 cpumask_copy(cfg->old_domain, cfg->domain); 1154 } 1155 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 1156 per_cpu(vector_irq, new_cpu)[vector] = irq; 1157 cfg->vector = vector; 1158 cpumask_copy(cfg->domain, tmp_mask); 1159 err = 0; 1160 break; 1161 } 1162 free_cpumask_var(tmp_mask); 1163 return err; 1164 } 1165 1166 int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 1167 { 1168 int err; 1169 unsigned long flags; 1170 1171 raw_spin_lock_irqsave(&vector_lock, flags); 1172 err = __assign_irq_vector(irq, cfg, mask); 1173 raw_spin_unlock_irqrestore(&vector_lock, flags); 1174 return err; 1175 } 1176 1177 static void __clear_irq_vector(int irq, struct irq_cfg *cfg) 1178 { 1179 int cpu, vector; 1180 1181 BUG_ON(!cfg->vector); 1182 1183 vector = cfg->vector; 1184 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) 1185 per_cpu(vector_irq, cpu)[vector] = -1; 1186 1187 cfg->vector = 0; 1188 cpumask_clear(cfg->domain); 1189 1190 if (likely(!cfg->move_in_progress)) 1191 return; 1192 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { 1193 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 1194 vector++) { 1195 if (per_cpu(vector_irq, cpu)[vector] != irq) 1196 continue; 1197 per_cpu(vector_irq, cpu)[vector] = -1; 1198 break; 1199 } 1200 } 1201 cfg->move_in_progress = 0; 1202 } 1203 1204 void __setup_vector_irq(int cpu) 1205 { 1206 /* Initialize vector_irq on a new cpu */ 1207 int irq, vector; 1208 struct irq_cfg *cfg; 1209 1210 /* 1211 * vector_lock will make sure that we don't run into irq vector 1212 * assignments that might be happening on another cpu in parallel, 1213 * while we setup our initial vector to irq mappings. 1214 */ 1215 raw_spin_lock(&vector_lock); 1216 /* Mark the inuse vectors */ 1217 for_each_active_irq(irq) { 1218 cfg = irq_get_chip_data(irq); 1219 if (!cfg) 1220 continue; 1221 /* 1222 * If it is a legacy IRQ handled by the legacy PIC, this cpu 1223 * will be part of the irq_cfg's domain. 1224 */ 1225 if (irq < legacy_pic->nr_legacy_irqs && !IO_APIC_IRQ(irq)) 1226 cpumask_set_cpu(cpu, cfg->domain); 1227 1228 if (!cpumask_test_cpu(cpu, cfg->domain)) 1229 continue; 1230 vector = cfg->vector; 1231 per_cpu(vector_irq, cpu)[vector] = irq; 1232 } 1233 /* Mark the free vectors */ 1234 for (vector = 0; vector < NR_VECTORS; ++vector) { 1235 irq = per_cpu(vector_irq, cpu)[vector]; 1236 if (irq < 0) 1237 continue; 1238 1239 cfg = irq_cfg(irq); 1240 if (!cpumask_test_cpu(cpu, cfg->domain)) 1241 per_cpu(vector_irq, cpu)[vector] = -1; 1242 } 1243 raw_spin_unlock(&vector_lock); 1244 } 1245 1246 static struct irq_chip ioapic_chip; 1247 1248 #ifdef CONFIG_X86_32 1249 static inline int IO_APIC_irq_trigger(int irq) 1250 { 1251 int apic, idx, pin; 1252 1253 for (apic = 0; apic < nr_ioapics; apic++) { 1254 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { 1255 idx = find_irq_entry(apic, pin, mp_INT); 1256 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin))) 1257 return irq_trigger(idx); 1258 } 1259 } 1260 /* 1261 * nonexistent IRQs are edge default 1262 */ 1263 return 0; 1264 } 1265 #else 1266 static inline int IO_APIC_irq_trigger(int irq) 1267 { 1268 return 1; 1269 } 1270 #endif 1271 1272 static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg, 1273 unsigned long trigger) 1274 { 1275 struct irq_chip *chip = &ioapic_chip; 1276 irq_flow_handler_t hdl; 1277 bool fasteoi; 1278 1279 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1280 trigger == IOAPIC_LEVEL) { 1281 irq_set_status_flags(irq, IRQ_LEVEL); 1282 fasteoi = true; 1283 } else { 1284 irq_clear_status_flags(irq, IRQ_LEVEL); 1285 fasteoi = false; 1286 } 1287 1288 if (irq_remapped(cfg)) { 1289 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 1290 irq_remap_modify_chip_defaults(chip); 1291 fasteoi = trigger != 0; 1292 } 1293 1294 hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; 1295 irq_set_chip_and_handler_name(irq, chip, hdl, 1296 fasteoi ? "fasteoi" : "edge"); 1297 } 1298 1299 static int setup_ioapic_entry(int apic_id, int irq, 1300 struct IO_APIC_route_entry *entry, 1301 unsigned int destination, int trigger, 1302 int polarity, int vector, int pin) 1303 { 1304 /* 1305 * add it to the IO-APIC irq-routing table: 1306 */ 1307 memset(entry,0,sizeof(*entry)); 1308 1309 if (intr_remapping_enabled) { 1310 struct intel_iommu *iommu = map_ioapic_to_ir(apic_id); 1311 struct irte irte; 1312 struct IR_IO_APIC_route_entry *ir_entry = 1313 (struct IR_IO_APIC_route_entry *) entry; 1314 int index; 1315 1316 if (!iommu) 1317 panic("No mapping iommu for ioapic %d\n", apic_id); 1318 1319 index = alloc_irte(iommu, irq, 1); 1320 if (index < 0) 1321 panic("Failed to allocate IRTE for ioapic %d\n", apic_id); 1322 1323 prepare_irte(&irte, vector, destination); 1324 1325 /* Set source-id of interrupt request */ 1326 set_ioapic_sid(&irte, apic_id); 1327 1328 modify_irte(irq, &irte); 1329 1330 ir_entry->index2 = (index >> 15) & 0x1; 1331 ir_entry->zero = 0; 1332 ir_entry->format = 1; 1333 ir_entry->index = (index & 0x7fff); 1334 /* 1335 * IO-APIC RTE will be configured with virtual vector. 1336 * irq handler will do the explicit EOI to the io-apic. 1337 */ 1338 ir_entry->vector = pin; 1339 1340 apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: " 1341 "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d " 1342 "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X " 1343 "Avail:%X Vector:%02X Dest:%08X " 1344 "SID:%04X SQ:%X SVT:%X)\n", 1345 apic_id, irte.present, irte.fpd, irte.dst_mode, 1346 irte.redir_hint, irte.trigger_mode, irte.dlvry_mode, 1347 irte.avail, irte.vector, irte.dest_id, 1348 irte.sid, irte.sq, irte.svt); 1349 } else { 1350 entry->delivery_mode = apic->irq_delivery_mode; 1351 entry->dest_mode = apic->irq_dest_mode; 1352 entry->dest = destination; 1353 entry->vector = vector; 1354 } 1355 1356 entry->mask = 0; /* enable IRQ */ 1357 entry->trigger = trigger; 1358 entry->polarity = polarity; 1359 1360 /* Mask level triggered irqs. 1361 * Use IRQ_DELAYED_DISABLE for edge triggered irqs. 1362 */ 1363 if (trigger) 1364 entry->mask = 1; 1365 return 0; 1366 } 1367 1368 static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq, 1369 struct irq_cfg *cfg, int trigger, int polarity) 1370 { 1371 struct IO_APIC_route_entry entry; 1372 unsigned int dest; 1373 1374 if (!IO_APIC_IRQ(irq)) 1375 return; 1376 /* 1377 * For legacy irqs, cfg->domain starts with cpu 0 for legacy 1378 * controllers like 8259. Now that IO-APIC can handle this irq, update 1379 * the cfg->domain. 1380 */ 1381 if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain)) 1382 apic->vector_allocation_domain(0, cfg->domain); 1383 1384 if (assign_irq_vector(irq, cfg, apic->target_cpus())) 1385 return; 1386 1387 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 1388 1389 apic_printk(APIC_VERBOSE,KERN_DEBUG 1390 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1391 "IRQ %d Mode:%i Active:%i Dest:%d)\n", 1392 apic_id, mpc_ioapic_id(apic_id), pin, cfg->vector, 1393 irq, trigger, polarity, dest); 1394 1395 1396 if (setup_ioapic_entry(mpc_ioapic_id(apic_id), irq, &entry, 1397 dest, trigger, polarity, cfg->vector, pin)) { 1398 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1399 mpc_ioapic_id(apic_id), pin); 1400 __clear_irq_vector(irq, cfg); 1401 return; 1402 } 1403 1404 ioapic_register_intr(irq, cfg, trigger); 1405 if (irq < legacy_pic->nr_legacy_irqs) 1406 legacy_pic->mask(irq); 1407 1408 ioapic_write_entry(apic_id, pin, entry); 1409 } 1410 1411 static bool __init io_apic_pin_not_connected(int idx, int apic_id, int pin) 1412 { 1413 if (idx != -1) 1414 return false; 1415 1416 apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n", 1417 mpc_ioapic_id(apic_id), pin); 1418 return true; 1419 } 1420 1421 static void __init __io_apic_setup_irqs(unsigned int apic_id) 1422 { 1423 int idx, node = cpu_to_node(0); 1424 struct io_apic_irq_attr attr; 1425 unsigned int pin, irq; 1426 1427 for (pin = 0; pin < ioapics[apic_id].nr_registers; pin++) { 1428 idx = find_irq_entry(apic_id, pin, mp_INT); 1429 if (io_apic_pin_not_connected(idx, apic_id, pin)) 1430 continue; 1431 1432 irq = pin_2_irq(idx, apic_id, pin); 1433 1434 if ((apic_id > 0) && (irq > 16)) 1435 continue; 1436 1437 /* 1438 * Skip the timer IRQ if there's a quirk handler 1439 * installed and if it returns 1: 1440 */ 1441 if (apic->multi_timer_check && 1442 apic->multi_timer_check(apic_id, irq)) 1443 continue; 1444 1445 set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx), 1446 irq_polarity(idx)); 1447 1448 io_apic_setup_irq_pin(irq, node, &attr); 1449 } 1450 } 1451 1452 static void __init setup_IO_APIC_irqs(void) 1453 { 1454 unsigned int apic_id; 1455 1456 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1457 1458 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) 1459 __io_apic_setup_irqs(apic_id); 1460 } 1461 1462 /* 1463 * for the gsit that is not in first ioapic 1464 * but could not use acpi_register_gsi() 1465 * like some special sci in IBM x3330 1466 */ 1467 void setup_IO_APIC_irq_extra(u32 gsi) 1468 { 1469 int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); 1470 struct io_apic_irq_attr attr; 1471 1472 /* 1473 * Convert 'gsi' to 'ioapic.pin'. 1474 */ 1475 apic_id = mp_find_ioapic(gsi); 1476 if (apic_id < 0) 1477 return; 1478 1479 pin = mp_find_ioapic_pin(apic_id, gsi); 1480 idx = find_irq_entry(apic_id, pin, mp_INT); 1481 if (idx == -1) 1482 return; 1483 1484 irq = pin_2_irq(idx, apic_id, pin); 1485 1486 /* Only handle the non legacy irqs on secondary ioapics */ 1487 if (apic_id == 0 || irq < NR_IRQS_LEGACY) 1488 return; 1489 1490 set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx), 1491 irq_polarity(idx)); 1492 1493 io_apic_setup_irq_pin_once(irq, node, &attr); 1494 } 1495 1496 /* 1497 * Set up the timer pin, possibly with the 8259A-master behind. 1498 */ 1499 static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, 1500 int vector) 1501 { 1502 struct IO_APIC_route_entry entry; 1503 1504 if (intr_remapping_enabled) 1505 return; 1506 1507 memset(&entry, 0, sizeof(entry)); 1508 1509 /* 1510 * We use logical delivery to get the timer IRQ 1511 * to the first CPU. 1512 */ 1513 entry.dest_mode = apic->irq_dest_mode; 1514 entry.mask = 0; /* don't mask IRQ for edge */ 1515 entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus()); 1516 entry.delivery_mode = apic->irq_delivery_mode; 1517 entry.polarity = 0; 1518 entry.trigger = 0; 1519 entry.vector = vector; 1520 1521 /* 1522 * The timer IRQ doesn't have to know that behind the 1523 * scene we may have a 8259A-master in AEOI mode ... 1524 */ 1525 irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, 1526 "edge"); 1527 1528 /* 1529 * Add it to the IO-APIC irq-routing table: 1530 */ 1531 ioapic_write_entry(apic_id, pin, entry); 1532 } 1533 1534 1535 __apicdebuginit(void) print_IO_APIC(void) 1536 { 1537 int apic, i; 1538 union IO_APIC_reg_00 reg_00; 1539 union IO_APIC_reg_01 reg_01; 1540 union IO_APIC_reg_02 reg_02; 1541 union IO_APIC_reg_03 reg_03; 1542 unsigned long flags; 1543 struct irq_cfg *cfg; 1544 unsigned int irq; 1545 1546 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); 1547 for (i = 0; i < nr_ioapics; i++) 1548 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", 1549 mpc_ioapic_id(i), ioapics[i].nr_registers); 1550 1551 /* 1552 * We are a bit conservative about what we expect. We have to 1553 * know about every hardware change ASAP. 1554 */ 1555 printk(KERN_INFO "testing the IO APIC.......................\n"); 1556 1557 for (apic = 0; apic < nr_ioapics; apic++) { 1558 1559 raw_spin_lock_irqsave(&ioapic_lock, flags); 1560 reg_00.raw = io_apic_read(apic, 0); 1561 reg_01.raw = io_apic_read(apic, 1); 1562 if (reg_01.bits.version >= 0x10) 1563 reg_02.raw = io_apic_read(apic, 2); 1564 if (reg_01.bits.version >= 0x20) 1565 reg_03.raw = io_apic_read(apic, 3); 1566 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1567 1568 printk("\n"); 1569 printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(apic)); 1570 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); 1571 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); 1572 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); 1573 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); 1574 1575 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01); 1576 printk(KERN_DEBUG "....... : max redirection entries: %02X\n", 1577 reg_01.bits.entries); 1578 1579 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); 1580 printk(KERN_DEBUG "....... : IO APIC version: %02X\n", 1581 reg_01.bits.version); 1582 1583 /* 1584 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, 1585 * but the value of reg_02 is read as the previous read register 1586 * value, so ignore it if reg_02 == reg_01. 1587 */ 1588 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { 1589 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); 1590 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); 1591 } 1592 1593 /* 1594 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 1595 * or reg_03, but the value of reg_0[23] is read as the previous read 1596 * register value, so ignore it if reg_03 == reg_0[12]. 1597 */ 1598 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && 1599 reg_03.raw != reg_01.raw) { 1600 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); 1601 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); 1602 } 1603 1604 printk(KERN_DEBUG ".... IRQ redirection table:\n"); 1605 1606 if (intr_remapping_enabled) { 1607 printk(KERN_DEBUG " NR Indx Fmt Mask Trig IRR" 1608 " Pol Stat Indx2 Zero Vect:\n"); 1609 } else { 1610 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol" 1611 " Stat Dmod Deli Vect:\n"); 1612 } 1613 1614 for (i = 0; i <= reg_01.bits.entries; i++) { 1615 if (intr_remapping_enabled) { 1616 struct IO_APIC_route_entry entry; 1617 struct IR_IO_APIC_route_entry *ir_entry; 1618 1619 entry = ioapic_read_entry(apic, i); 1620 ir_entry = (struct IR_IO_APIC_route_entry *) &entry; 1621 printk(KERN_DEBUG " %02x %04X ", 1622 i, 1623 ir_entry->index 1624 ); 1625 printk("%1d %1d %1d %1d %1d " 1626 "%1d %1d %X %02X\n", 1627 ir_entry->format, 1628 ir_entry->mask, 1629 ir_entry->trigger, 1630 ir_entry->irr, 1631 ir_entry->polarity, 1632 ir_entry->delivery_status, 1633 ir_entry->index2, 1634 ir_entry->zero, 1635 ir_entry->vector 1636 ); 1637 } else { 1638 struct IO_APIC_route_entry entry; 1639 1640 entry = ioapic_read_entry(apic, i); 1641 printk(KERN_DEBUG " %02x %02X ", 1642 i, 1643 entry.dest 1644 ); 1645 printk("%1d %1d %1d %1d %1d " 1646 "%1d %1d %02X\n", 1647 entry.mask, 1648 entry.trigger, 1649 entry.irr, 1650 entry.polarity, 1651 entry.delivery_status, 1652 entry.dest_mode, 1653 entry.delivery_mode, 1654 entry.vector 1655 ); 1656 } 1657 } 1658 } 1659 1660 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1661 for_each_active_irq(irq) { 1662 struct irq_pin_list *entry; 1663 1664 cfg = irq_get_chip_data(irq); 1665 if (!cfg) 1666 continue; 1667 entry = cfg->irq_2_pin; 1668 if (!entry) 1669 continue; 1670 printk(KERN_DEBUG "IRQ%d ", irq); 1671 for_each_irq_pin(entry, cfg->irq_2_pin) 1672 printk("-> %d:%d", entry->apic, entry->pin); 1673 printk("\n"); 1674 } 1675 1676 printk(KERN_INFO ".................................... done.\n"); 1677 1678 return; 1679 } 1680 1681 __apicdebuginit(void) print_APIC_field(int base) 1682 { 1683 int i; 1684 1685 printk(KERN_DEBUG); 1686 1687 for (i = 0; i < 8; i++) 1688 printk(KERN_CONT "%08x", apic_read(base + i*0x10)); 1689 1690 printk(KERN_CONT "\n"); 1691 } 1692 1693 __apicdebuginit(void) print_local_APIC(void *dummy) 1694 { 1695 unsigned int i, v, ver, maxlvt; 1696 u64 icr; 1697 1698 printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", 1699 smp_processor_id(), hard_smp_processor_id()); 1700 v = apic_read(APIC_ID); 1701 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id()); 1702 v = apic_read(APIC_LVR); 1703 printk(KERN_INFO "... APIC VERSION: %08x\n", v); 1704 ver = GET_APIC_VERSION(v); 1705 maxlvt = lapic_get_maxlvt(); 1706 1707 v = apic_read(APIC_TASKPRI); 1708 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); 1709 1710 if (APIC_INTEGRATED(ver)) { /* !82489DX */ 1711 if (!APIC_XAPIC(ver)) { 1712 v = apic_read(APIC_ARBPRI); 1713 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v, 1714 v & APIC_ARBPRI_MASK); 1715 } 1716 v = apic_read(APIC_PROCPRI); 1717 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v); 1718 } 1719 1720 /* 1721 * Remote read supported only in the 82489DX and local APIC for 1722 * Pentium processors. 1723 */ 1724 if (!APIC_INTEGRATED(ver) || maxlvt == 3) { 1725 v = apic_read(APIC_RRR); 1726 printk(KERN_DEBUG "... APIC RRR: %08x\n", v); 1727 } 1728 1729 v = apic_read(APIC_LDR); 1730 printk(KERN_DEBUG "... APIC LDR: %08x\n", v); 1731 if (!x2apic_enabled()) { 1732 v = apic_read(APIC_DFR); 1733 printk(KERN_DEBUG "... APIC DFR: %08x\n", v); 1734 } 1735 v = apic_read(APIC_SPIV); 1736 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v); 1737 1738 printk(KERN_DEBUG "... APIC ISR field:\n"); 1739 print_APIC_field(APIC_ISR); 1740 printk(KERN_DEBUG "... APIC TMR field:\n"); 1741 print_APIC_field(APIC_TMR); 1742 printk(KERN_DEBUG "... APIC IRR field:\n"); 1743 print_APIC_field(APIC_IRR); 1744 1745 if (APIC_INTEGRATED(ver)) { /* !82489DX */ 1746 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 1747 apic_write(APIC_ESR, 0); 1748 1749 v = apic_read(APIC_ESR); 1750 printk(KERN_DEBUG "... APIC ESR: %08x\n", v); 1751 } 1752 1753 icr = apic_icr_read(); 1754 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr); 1755 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32)); 1756 1757 v = apic_read(APIC_LVTT); 1758 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); 1759 1760 if (maxlvt > 3) { /* PC is LVT#4. */ 1761 v = apic_read(APIC_LVTPC); 1762 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v); 1763 } 1764 v = apic_read(APIC_LVT0); 1765 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v); 1766 v = apic_read(APIC_LVT1); 1767 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v); 1768 1769 if (maxlvt > 2) { /* ERR is LVT#3. */ 1770 v = apic_read(APIC_LVTERR); 1771 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v); 1772 } 1773 1774 v = apic_read(APIC_TMICT); 1775 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v); 1776 v = apic_read(APIC_TMCCT); 1777 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v); 1778 v = apic_read(APIC_TDCR); 1779 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v); 1780 1781 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { 1782 v = apic_read(APIC_EFEAT); 1783 maxlvt = (v >> 16) & 0xff; 1784 printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v); 1785 v = apic_read(APIC_ECTRL); 1786 printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v); 1787 for (i = 0; i < maxlvt; i++) { 1788 v = apic_read(APIC_EILVTn(i)); 1789 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v); 1790 } 1791 } 1792 printk("\n"); 1793 } 1794 1795 __apicdebuginit(void) print_local_APICs(int maxcpu) 1796 { 1797 int cpu; 1798 1799 if (!maxcpu) 1800 return; 1801 1802 preempt_disable(); 1803 for_each_online_cpu(cpu) { 1804 if (cpu >= maxcpu) 1805 break; 1806 smp_call_function_single(cpu, print_local_APIC, NULL, 1); 1807 } 1808 preempt_enable(); 1809 } 1810 1811 __apicdebuginit(void) print_PIC(void) 1812 { 1813 unsigned int v; 1814 unsigned long flags; 1815 1816 if (!legacy_pic->nr_legacy_irqs) 1817 return; 1818 1819 printk(KERN_DEBUG "\nprinting PIC contents\n"); 1820 1821 raw_spin_lock_irqsave(&i8259A_lock, flags); 1822 1823 v = inb(0xa1) << 8 | inb(0x21); 1824 printk(KERN_DEBUG "... PIC IMR: %04x\n", v); 1825 1826 v = inb(0xa0) << 8 | inb(0x20); 1827 printk(KERN_DEBUG "... PIC IRR: %04x\n", v); 1828 1829 outb(0x0b,0xa0); 1830 outb(0x0b,0x20); 1831 v = inb(0xa0) << 8 | inb(0x20); 1832 outb(0x0a,0xa0); 1833 outb(0x0a,0x20); 1834 1835 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 1836 1837 printk(KERN_DEBUG "... PIC ISR: %04x\n", v); 1838 1839 v = inb(0x4d1) << 8 | inb(0x4d0); 1840 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); 1841 } 1842 1843 static int __initdata show_lapic = 1; 1844 static __init int setup_show_lapic(char *arg) 1845 { 1846 int num = -1; 1847 1848 if (strcmp(arg, "all") == 0) { 1849 show_lapic = CONFIG_NR_CPUS; 1850 } else { 1851 get_option(&arg, &num); 1852 if (num >= 0) 1853 show_lapic = num; 1854 } 1855 1856 return 1; 1857 } 1858 __setup("show_lapic=", setup_show_lapic); 1859 1860 __apicdebuginit(int) print_ICs(void) 1861 { 1862 if (apic_verbosity == APIC_QUIET) 1863 return 0; 1864 1865 print_PIC(); 1866 1867 /* don't print out if apic is not there */ 1868 if (!cpu_has_apic && !apic_from_smp_config()) 1869 return 0; 1870 1871 print_local_APICs(show_lapic); 1872 print_IO_APIC(); 1873 1874 return 0; 1875 } 1876 1877 late_initcall(print_ICs); 1878 1879 1880 /* Where if anywhere is the i8259 connect in external int mode */ 1881 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 1882 1883 void __init enable_IO_APIC(void) 1884 { 1885 int i8259_apic, i8259_pin; 1886 int apic; 1887 1888 if (!legacy_pic->nr_legacy_irqs) 1889 return; 1890 1891 for(apic = 0; apic < nr_ioapics; apic++) { 1892 int pin; 1893 /* See if any of the pins is in ExtINT mode */ 1894 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { 1895 struct IO_APIC_route_entry entry; 1896 entry = ioapic_read_entry(apic, pin); 1897 1898 /* If the interrupt line is enabled and in ExtInt mode 1899 * I have found the pin where the i8259 is connected. 1900 */ 1901 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) { 1902 ioapic_i8259.apic = apic; 1903 ioapic_i8259.pin = pin; 1904 goto found_i8259; 1905 } 1906 } 1907 } 1908 found_i8259: 1909 /* Look to see what if the MP table has reported the ExtINT */ 1910 /* If we could not find the appropriate pin by looking at the ioapic 1911 * the i8259 probably is not connected the ioapic but give the 1912 * mptable a chance anyway. 1913 */ 1914 i8259_pin = find_isa_irq_pin(0, mp_ExtINT); 1915 i8259_apic = find_isa_irq_apic(0, mp_ExtINT); 1916 /* Trust the MP table if nothing is setup in the hardware */ 1917 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { 1918 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); 1919 ioapic_i8259.pin = i8259_pin; 1920 ioapic_i8259.apic = i8259_apic; 1921 } 1922 /* Complain if the MP table and the hardware disagree */ 1923 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && 1924 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) 1925 { 1926 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); 1927 } 1928 1929 /* 1930 * Do not trust the IO-APIC being empty at bootup 1931 */ 1932 clear_IO_APIC(); 1933 } 1934 1935 /* 1936 * Not an __init, needed by the reboot code 1937 */ 1938 void disable_IO_APIC(void) 1939 { 1940 /* 1941 * Clear the IO-APIC before rebooting: 1942 */ 1943 clear_IO_APIC(); 1944 1945 if (!legacy_pic->nr_legacy_irqs) 1946 return; 1947 1948 /* 1949 * If the i8259 is routed through an IOAPIC 1950 * Put that IOAPIC in virtual wire mode 1951 * so legacy interrupts can be delivered. 1952 * 1953 * With interrupt-remapping, for now we will use virtual wire A mode, 1954 * as virtual wire B is little complex (need to configure both 1955 * IOAPIC RTE as well as interrupt-remapping table entry). 1956 * As this gets called during crash dump, keep this simple for now. 1957 */ 1958 if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) { 1959 struct IO_APIC_route_entry entry; 1960 1961 memset(&entry, 0, sizeof(entry)); 1962 entry.mask = 0; /* Enabled */ 1963 entry.trigger = 0; /* Edge */ 1964 entry.irr = 0; 1965 entry.polarity = 0; /* High */ 1966 entry.delivery_status = 0; 1967 entry.dest_mode = 0; /* Physical */ 1968 entry.delivery_mode = dest_ExtINT; /* ExtInt */ 1969 entry.vector = 0; 1970 entry.dest = read_apic_id(); 1971 1972 /* 1973 * Add it to the IO-APIC irq-routing table: 1974 */ 1975 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); 1976 } 1977 1978 /* 1979 * Use virtual wire A mode when interrupt remapping is enabled. 1980 */ 1981 if (cpu_has_apic || apic_from_smp_config()) 1982 disconnect_bsp_APIC(!intr_remapping_enabled && 1983 ioapic_i8259.pin != -1); 1984 } 1985 1986 #ifdef CONFIG_X86_32 1987 /* 1988 * function to set the IO-APIC physical IDs based on the 1989 * values stored in the MPC table. 1990 * 1991 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999 1992 */ 1993 void __init setup_ioapic_ids_from_mpc_nocheck(void) 1994 { 1995 union IO_APIC_reg_00 reg_00; 1996 physid_mask_t phys_id_present_map; 1997 int apic_id; 1998 int i; 1999 unsigned char old_id; 2000 unsigned long flags; 2001 2002 /* 2003 * This is broken; anything with a real cpu count has to 2004 * circumvent this idiocy regardless. 2005 */ 2006 apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map); 2007 2008 /* 2009 * Set the IOAPIC ID to the value stored in the MPC table. 2010 */ 2011 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { 2012 2013 /* Read the register 0 value */ 2014 raw_spin_lock_irqsave(&ioapic_lock, flags); 2015 reg_00.raw = io_apic_read(apic_id, 0); 2016 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2017 2018 old_id = mpc_ioapic_id(apic_id); 2019 2020 if (mpc_ioapic_id(apic_id) >= get_physical_broadcast()) { 2021 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", 2022 apic_id, mpc_ioapic_id(apic_id)); 2023 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 2024 reg_00.bits.ID); 2025 ioapics[apic_id].mp_config.apicid = reg_00.bits.ID; 2026 } 2027 2028 /* 2029 * Sanity check, is the ID really free? Every APIC in a 2030 * system must have a unique ID or we get lots of nice 2031 * 'stuck on smp_invalidate_needed IPI wait' messages. 2032 */ 2033 if (apic->check_apicid_used(&phys_id_present_map, 2034 mpc_ioapic_id(apic_id))) { 2035 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", 2036 apic_id, mpc_ioapic_id(apic_id)); 2037 for (i = 0; i < get_physical_broadcast(); i++) 2038 if (!physid_isset(i, phys_id_present_map)) 2039 break; 2040 if (i >= get_physical_broadcast()) 2041 panic("Max APIC ID exceeded!\n"); 2042 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 2043 i); 2044 physid_set(i, phys_id_present_map); 2045 ioapics[apic_id].mp_config.apicid = i; 2046 } else { 2047 physid_mask_t tmp; 2048 apic->apicid_to_cpu_present(mpc_ioapic_id(apic_id), 2049 &tmp); 2050 apic_printk(APIC_VERBOSE, "Setting %d in the " 2051 "phys_id_present_map\n", 2052 mpc_ioapic_id(apic_id)); 2053 physids_or(phys_id_present_map, phys_id_present_map, tmp); 2054 } 2055 2056 /* 2057 * We need to adjust the IRQ routing table 2058 * if the ID changed. 2059 */ 2060 if (old_id != mpc_ioapic_id(apic_id)) 2061 for (i = 0; i < mp_irq_entries; i++) 2062 if (mp_irqs[i].dstapic == old_id) 2063 mp_irqs[i].dstapic 2064 = mpc_ioapic_id(apic_id); 2065 2066 /* 2067 * Update the ID register according to the right value 2068 * from the MPC table if they are different. 2069 */ 2070 if (mpc_ioapic_id(apic_id) == reg_00.bits.ID) 2071 continue; 2072 2073 apic_printk(APIC_VERBOSE, KERN_INFO 2074 "...changing IO-APIC physical APIC ID to %d ...", 2075 mpc_ioapic_id(apic_id)); 2076 2077 reg_00.bits.ID = mpc_ioapic_id(apic_id); 2078 raw_spin_lock_irqsave(&ioapic_lock, flags); 2079 io_apic_write(apic_id, 0, reg_00.raw); 2080 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2081 2082 /* 2083 * Sanity check 2084 */ 2085 raw_spin_lock_irqsave(&ioapic_lock, flags); 2086 reg_00.raw = io_apic_read(apic_id, 0); 2087 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2088 if (reg_00.bits.ID != mpc_ioapic_id(apic_id)) 2089 printk("could not set ID!\n"); 2090 else 2091 apic_printk(APIC_VERBOSE, " ok.\n"); 2092 } 2093 } 2094 2095 void __init setup_ioapic_ids_from_mpc(void) 2096 { 2097 2098 if (acpi_ioapic) 2099 return; 2100 /* 2101 * Don't check I/O APIC IDs for xAPIC systems. They have 2102 * no meaning without the serial APIC bus. 2103 */ 2104 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 2105 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) 2106 return; 2107 setup_ioapic_ids_from_mpc_nocheck(); 2108 } 2109 #endif 2110 2111 int no_timer_check __initdata; 2112 2113 static int __init notimercheck(char *s) 2114 { 2115 no_timer_check = 1; 2116 return 1; 2117 } 2118 __setup("no_timer_check", notimercheck); 2119 2120 /* 2121 * There is a nasty bug in some older SMP boards, their mptable lies 2122 * about the timer IRQ. We do the following to work around the situation: 2123 * 2124 * - timer IRQ defaults to IO-APIC IRQ 2125 * - if this function detects that timer IRQs are defunct, then we fall 2126 * back to ISA timer IRQs 2127 */ 2128 static int __init timer_irq_works(void) 2129 { 2130 unsigned long t1 = jiffies; 2131 unsigned long flags; 2132 2133 if (no_timer_check) 2134 return 1; 2135 2136 local_save_flags(flags); 2137 local_irq_enable(); 2138 /* Let ten ticks pass... */ 2139 mdelay((10 * 1000) / HZ); 2140 local_irq_restore(flags); 2141 2142 /* 2143 * Expect a few ticks at least, to be sure some possible 2144 * glue logic does not lock up after one or two first 2145 * ticks in a non-ExtINT mode. Also the local APIC 2146 * might have cached one ExtINT interrupt. Finally, at 2147 * least one tick may be lost due to delays. 2148 */ 2149 2150 /* jiffies wrap? */ 2151 if (time_after(jiffies, t1 + 4)) 2152 return 1; 2153 return 0; 2154 } 2155 2156 /* 2157 * In the SMP+IOAPIC case it might happen that there are an unspecified 2158 * number of pending IRQ events unhandled. These cases are very rare, 2159 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much 2160 * better to do it this way as thus we do not have to be aware of 2161 * 'pending' interrupts in the IRQ path, except at this point. 2162 */ 2163 /* 2164 * Edge triggered needs to resend any interrupt 2165 * that was delayed but this is now handled in the device 2166 * independent code. 2167 */ 2168 2169 /* 2170 * Starting up a edge-triggered IO-APIC interrupt is 2171 * nasty - we need to make sure that we get the edge. 2172 * If it is already asserted for some reason, we need 2173 * return 1 to indicate that is was pending. 2174 * 2175 * This is not complete - we should be able to fake 2176 * an edge even if it isn't on the 8259A... 2177 */ 2178 2179 static unsigned int startup_ioapic_irq(struct irq_data *data) 2180 { 2181 int was_pending = 0, irq = data->irq; 2182 unsigned long flags; 2183 2184 raw_spin_lock_irqsave(&ioapic_lock, flags); 2185 if (irq < legacy_pic->nr_legacy_irqs) { 2186 legacy_pic->mask(irq); 2187 if (legacy_pic->irq_pending(irq)) 2188 was_pending = 1; 2189 } 2190 __unmask_ioapic(data->chip_data); 2191 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2192 2193 return was_pending; 2194 } 2195 2196 static int ioapic_retrigger_irq(struct irq_data *data) 2197 { 2198 struct irq_cfg *cfg = data->chip_data; 2199 unsigned long flags; 2200 2201 raw_spin_lock_irqsave(&vector_lock, flags); 2202 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); 2203 raw_spin_unlock_irqrestore(&vector_lock, flags); 2204 2205 return 1; 2206 } 2207 2208 /* 2209 * Level and edge triggered IO-APIC interrupts need different handling, 2210 * so we use two separate IRQ descriptors. Edge triggered IRQs can be 2211 * handled with the level-triggered descriptor, but that one has slightly 2212 * more overhead. Level-triggered interrupts cannot be handled with the 2213 * edge-triggered handler, without risking IRQ storms and other ugly 2214 * races. 2215 */ 2216 2217 #ifdef CONFIG_SMP 2218 void send_cleanup_vector(struct irq_cfg *cfg) 2219 { 2220 cpumask_var_t cleanup_mask; 2221 2222 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { 2223 unsigned int i; 2224 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) 2225 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); 2226 } else { 2227 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); 2228 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); 2229 free_cpumask_var(cleanup_mask); 2230 } 2231 cfg->move_in_progress = 0; 2232 } 2233 2234 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) 2235 { 2236 int apic, pin; 2237 struct irq_pin_list *entry; 2238 u8 vector = cfg->vector; 2239 2240 for_each_irq_pin(entry, cfg->irq_2_pin) { 2241 unsigned int reg; 2242 2243 apic = entry->apic; 2244 pin = entry->pin; 2245 /* 2246 * With interrupt-remapping, destination information comes 2247 * from interrupt-remapping table entry. 2248 */ 2249 if (!irq_remapped(cfg)) 2250 io_apic_write(apic, 0x11 + pin*2, dest); 2251 reg = io_apic_read(apic, 0x10 + pin*2); 2252 reg &= ~IO_APIC_REDIR_VECTOR_MASK; 2253 reg |= vector; 2254 io_apic_modify(apic, 0x10 + pin*2, reg); 2255 } 2256 } 2257 2258 /* 2259 * Either sets data->affinity to a valid value, and returns 2260 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and 2261 * leaves data->affinity untouched. 2262 */ 2263 int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2264 unsigned int *dest_id) 2265 { 2266 struct irq_cfg *cfg = data->chip_data; 2267 2268 if (!cpumask_intersects(mask, cpu_online_mask)) 2269 return -1; 2270 2271 if (assign_irq_vector(data->irq, data->chip_data, mask)) 2272 return -1; 2273 2274 cpumask_copy(data->affinity, mask); 2275 2276 *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain); 2277 return 0; 2278 } 2279 2280 static int 2281 ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2282 bool force) 2283 { 2284 unsigned int dest, irq = data->irq; 2285 unsigned long flags; 2286 int ret; 2287 2288 raw_spin_lock_irqsave(&ioapic_lock, flags); 2289 ret = __ioapic_set_affinity(data, mask, &dest); 2290 if (!ret) { 2291 /* Only the high 8 bits are valid. */ 2292 dest = SET_APIC_LOGICAL_ID(dest); 2293 __target_IO_APIC_irq(irq, dest, data->chip_data); 2294 } 2295 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2296 return ret; 2297 } 2298 2299 #ifdef CONFIG_IRQ_REMAP 2300 2301 /* 2302 * Migrate the IO-APIC irq in the presence of intr-remapping. 2303 * 2304 * For both level and edge triggered, irq migration is a simple atomic 2305 * update(of vector and cpu destination) of IRTE and flush the hardware cache. 2306 * 2307 * For level triggered, we eliminate the io-apic RTE modification (with the 2308 * updated vector information), by using a virtual vector (io-apic pin number). 2309 * Real vector that is used for interrupting cpu will be coming from 2310 * the interrupt-remapping table entry. 2311 * 2312 * As the migration is a simple atomic update of IRTE, the same mechanism 2313 * is used to migrate MSI irq's in the presence of interrupt-remapping. 2314 */ 2315 static int 2316 ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2317 bool force) 2318 { 2319 struct irq_cfg *cfg = data->chip_data; 2320 unsigned int dest, irq = data->irq; 2321 struct irte irte; 2322 2323 if (!cpumask_intersects(mask, cpu_online_mask)) 2324 return -EINVAL; 2325 2326 if (get_irte(irq, &irte)) 2327 return -EBUSY; 2328 2329 if (assign_irq_vector(irq, cfg, mask)) 2330 return -EBUSY; 2331 2332 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); 2333 2334 irte.vector = cfg->vector; 2335 irte.dest_id = IRTE_DEST(dest); 2336 2337 /* 2338 * Atomically updates the IRTE with the new destination, vector 2339 * and flushes the interrupt entry cache. 2340 */ 2341 modify_irte(irq, &irte); 2342 2343 /* 2344 * After this point, all the interrupts will start arriving 2345 * at the new destination. So, time to cleanup the previous 2346 * vector allocation. 2347 */ 2348 if (cfg->move_in_progress) 2349 send_cleanup_vector(cfg); 2350 2351 cpumask_copy(data->affinity, mask); 2352 return 0; 2353 } 2354 2355 #else 2356 static inline int 2357 ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2358 bool force) 2359 { 2360 return 0; 2361 } 2362 #endif 2363 2364 asmlinkage void smp_irq_move_cleanup_interrupt(void) 2365 { 2366 unsigned vector, me; 2367 2368 ack_APIC_irq(); 2369 exit_idle(); 2370 irq_enter(); 2371 2372 me = smp_processor_id(); 2373 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { 2374 unsigned int irq; 2375 unsigned int irr; 2376 struct irq_desc *desc; 2377 struct irq_cfg *cfg; 2378 irq = __this_cpu_read(vector_irq[vector]); 2379 2380 if (irq == -1) 2381 continue; 2382 2383 desc = irq_to_desc(irq); 2384 if (!desc) 2385 continue; 2386 2387 cfg = irq_cfg(irq); 2388 raw_spin_lock(&desc->lock); 2389 2390 /* 2391 * Check if the irq migration is in progress. If so, we 2392 * haven't received the cleanup request yet for this irq. 2393 */ 2394 if (cfg->move_in_progress) 2395 goto unlock; 2396 2397 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2398 goto unlock; 2399 2400 irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); 2401 /* 2402 * Check if the vector that needs to be cleanedup is 2403 * registered at the cpu's IRR. If so, then this is not 2404 * the best time to clean it up. Lets clean it up in the 2405 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR 2406 * to myself. 2407 */ 2408 if (irr & (1 << (vector % 32))) { 2409 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); 2410 goto unlock; 2411 } 2412 __this_cpu_write(vector_irq[vector], -1); 2413 unlock: 2414 raw_spin_unlock(&desc->lock); 2415 } 2416 2417 irq_exit(); 2418 } 2419 2420 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) 2421 { 2422 unsigned me; 2423 2424 if (likely(!cfg->move_in_progress)) 2425 return; 2426 2427 me = smp_processor_id(); 2428 2429 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2430 send_cleanup_vector(cfg); 2431 } 2432 2433 static void irq_complete_move(struct irq_cfg *cfg) 2434 { 2435 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); 2436 } 2437 2438 void irq_force_complete_move(int irq) 2439 { 2440 struct irq_cfg *cfg = irq_get_chip_data(irq); 2441 2442 if (!cfg) 2443 return; 2444 2445 __irq_complete_move(cfg, cfg->vector); 2446 } 2447 #else 2448 static inline void irq_complete_move(struct irq_cfg *cfg) { } 2449 #endif 2450 2451 static void ack_apic_edge(struct irq_data *data) 2452 { 2453 irq_complete_move(data->chip_data); 2454 irq_move_irq(data); 2455 ack_APIC_irq(); 2456 } 2457 2458 atomic_t irq_mis_count; 2459 2460 /* 2461 * IO-APIC versions below 0x20 don't support EOI register. 2462 * For the record, here is the information about various versions: 2463 * 0Xh 82489DX 2464 * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant 2465 * 2Xh I/O(x)APIC which is PCI 2.2 Compliant 2466 * 30h-FFh Reserved 2467 * 2468 * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic 2469 * version as 0x2. This is an error with documentation and these ICH chips 2470 * use io-apic's of version 0x20. 2471 * 2472 * For IO-APIC's with EOI register, we use that to do an explicit EOI. 2473 * Otherwise, we simulate the EOI message manually by changing the trigger 2474 * mode to edge and then back to level, with RTE being masked during this. 2475 */ 2476 static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 2477 { 2478 struct irq_pin_list *entry; 2479 unsigned long flags; 2480 2481 raw_spin_lock_irqsave(&ioapic_lock, flags); 2482 for_each_irq_pin(entry, cfg->irq_2_pin) { 2483 if (mpc_ioapic_ver(entry->apic) >= 0x20) { 2484 /* 2485 * Intr-remapping uses pin number as the virtual vector 2486 * in the RTE. Actual vector is programmed in 2487 * intr-remapping table entry. Hence for the io-apic 2488 * EOI we use the pin number. 2489 */ 2490 if (irq_remapped(cfg)) 2491 io_apic_eoi(entry->apic, entry->pin); 2492 else 2493 io_apic_eoi(entry->apic, cfg->vector); 2494 } else { 2495 struct IO_APIC_route_entry rte, rte1; 2496 2497 rte = rte1 = 2498 __ioapic_read_entry(entry->apic, entry->pin); 2499 2500 /* 2501 * Mask the entry and change the trigger mode to edge. 2502 */ 2503 rte1.mask = 1; 2504 rte1.trigger = IOAPIC_EDGE; 2505 2506 __ioapic_write_entry(apic, pin, rte1); 2507 2508 /* 2509 * Restore the previous level triggered entry. 2510 */ 2511 __ioapic_write_entry(apic, pin, rte); 2512 } 2513 } 2514 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2515 } 2516 2517 static void ack_apic_level(struct irq_data *data) 2518 { 2519 struct irq_cfg *cfg = data->chip_data; 2520 int i, do_unmask_irq = 0, irq = data->irq; 2521 unsigned long v; 2522 2523 irq_complete_move(cfg); 2524 #ifdef CONFIG_GENERIC_PENDING_IRQ 2525 /* If we are moving the irq we need to mask it */ 2526 if (unlikely(irqd_is_setaffinity_pending(data))) { 2527 do_unmask_irq = 1; 2528 mask_ioapic(cfg); 2529 } 2530 #endif 2531 2532 /* 2533 * It appears there is an erratum which affects at least version 0x11 2534 * of I/O APIC (that's the 82093AA and cores integrated into various 2535 * chipsets). Under certain conditions a level-triggered interrupt is 2536 * erroneously delivered as edge-triggered one but the respective IRR 2537 * bit gets set nevertheless. As a result the I/O unit expects an EOI 2538 * message but it will never arrive and further interrupts are blocked 2539 * from the source. The exact reason is so far unknown, but the 2540 * phenomenon was observed when two consecutive interrupt requests 2541 * from a given source get delivered to the same CPU and the source is 2542 * temporarily disabled in between. 2543 * 2544 * A workaround is to simulate an EOI message manually. We achieve it 2545 * by setting the trigger mode to edge and then to level when the edge 2546 * trigger mode gets detected in the TMR of a local APIC for a 2547 * level-triggered interrupt. We mask the source for the time of the 2548 * operation to prevent an edge-triggered interrupt escaping meanwhile. 2549 * The idea is from Manfred Spraul. --macro 2550 * 2551 * Also in the case when cpu goes offline, fixup_irqs() will forward 2552 * any unhandled interrupt on the offlined cpu to the new cpu 2553 * destination that is handling the corresponding interrupt. This 2554 * interrupt forwarding is done via IPI's. Hence, in this case also 2555 * level-triggered io-apic interrupt will be seen as an edge 2556 * interrupt in the IRR. And we can't rely on the cpu's EOI 2557 * to be broadcasted to the IO-APIC's which will clear the remoteIRR 2558 * corresponding to the level-triggered interrupt. Hence on IO-APIC's 2559 * supporting EOI register, we do an explicit EOI to clear the 2560 * remote IRR and on IO-APIC's which don't have an EOI register, 2561 * we use the above logic (mask+edge followed by unmask+level) from 2562 * Manfred Spraul to clear the remote IRR. 2563 */ 2564 i = cfg->vector; 2565 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); 2566 2567 /* 2568 * We must acknowledge the irq before we move it or the acknowledge will 2569 * not propagate properly. 2570 */ 2571 ack_APIC_irq(); 2572 2573 /* 2574 * Tail end of clearing remote IRR bit (either by delivering the EOI 2575 * message via io-apic EOI register write or simulating it using 2576 * mask+edge followed by unnask+level logic) manually when the 2577 * level triggered interrupt is seen as the edge triggered interrupt 2578 * at the cpu. 2579 */ 2580 if (!(v & (1 << (i & 0x1f)))) { 2581 atomic_inc(&irq_mis_count); 2582 2583 eoi_ioapic_irq(irq, cfg); 2584 } 2585 2586 /* Now we can move and renable the irq */ 2587 if (unlikely(do_unmask_irq)) { 2588 /* Only migrate the irq if the ack has been received. 2589 * 2590 * On rare occasions the broadcast level triggered ack gets 2591 * delayed going to ioapics, and if we reprogram the 2592 * vector while Remote IRR is still set the irq will never 2593 * fire again. 2594 * 2595 * To prevent this scenario we read the Remote IRR bit 2596 * of the ioapic. This has two effects. 2597 * - On any sane system the read of the ioapic will 2598 * flush writes (and acks) going to the ioapic from 2599 * this cpu. 2600 * - We get to see if the ACK has actually been delivered. 2601 * 2602 * Based on failed experiments of reprogramming the 2603 * ioapic entry from outside of irq context starting 2604 * with masking the ioapic entry and then polling until 2605 * Remote IRR was clear before reprogramming the 2606 * ioapic I don't trust the Remote IRR bit to be 2607 * completey accurate. 2608 * 2609 * However there appears to be no other way to plug 2610 * this race, so if the Remote IRR bit is not 2611 * accurate and is causing problems then it is a hardware bug 2612 * and you can go talk to the chipset vendor about it. 2613 */ 2614 if (!io_apic_level_ack_pending(cfg)) 2615 irq_move_masked_irq(data); 2616 unmask_ioapic(cfg); 2617 } 2618 } 2619 2620 #ifdef CONFIG_IRQ_REMAP 2621 static void ir_ack_apic_edge(struct irq_data *data) 2622 { 2623 ack_APIC_irq(); 2624 } 2625 2626 static void ir_ack_apic_level(struct irq_data *data) 2627 { 2628 ack_APIC_irq(); 2629 eoi_ioapic_irq(data->irq, data->chip_data); 2630 } 2631 2632 static void ir_print_prefix(struct irq_data *data, struct seq_file *p) 2633 { 2634 seq_printf(p, " IR-%s", data->chip->name); 2635 } 2636 2637 static void irq_remap_modify_chip_defaults(struct irq_chip *chip) 2638 { 2639 chip->irq_print_chip = ir_print_prefix; 2640 chip->irq_ack = ir_ack_apic_edge; 2641 chip->irq_eoi = ir_ack_apic_level; 2642 2643 #ifdef CONFIG_SMP 2644 chip->irq_set_affinity = ir_ioapic_set_affinity; 2645 #endif 2646 } 2647 #endif /* CONFIG_IRQ_REMAP */ 2648 2649 static struct irq_chip ioapic_chip __read_mostly = { 2650 .name = "IO-APIC", 2651 .irq_startup = startup_ioapic_irq, 2652 .irq_mask = mask_ioapic_irq, 2653 .irq_unmask = unmask_ioapic_irq, 2654 .irq_ack = ack_apic_edge, 2655 .irq_eoi = ack_apic_level, 2656 #ifdef CONFIG_SMP 2657 .irq_set_affinity = ioapic_set_affinity, 2658 #endif 2659 .irq_retrigger = ioapic_retrigger_irq, 2660 }; 2661 2662 static inline void init_IO_APIC_traps(void) 2663 { 2664 struct irq_cfg *cfg; 2665 unsigned int irq; 2666 2667 /* 2668 * NOTE! The local APIC isn't very good at handling 2669 * multiple interrupts at the same interrupt level. 2670 * As the interrupt level is determined by taking the 2671 * vector number and shifting that right by 4, we 2672 * want to spread these out a bit so that they don't 2673 * all fall in the same interrupt level. 2674 * 2675 * Also, we've got to be careful not to trash gate 2676 * 0x80, because int 0x80 is hm, kind of importantish. ;) 2677 */ 2678 for_each_active_irq(irq) { 2679 cfg = irq_get_chip_data(irq); 2680 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { 2681 /* 2682 * Hmm.. We don't have an entry for this, 2683 * so default to an old-fashioned 8259 2684 * interrupt if we can.. 2685 */ 2686 if (irq < legacy_pic->nr_legacy_irqs) 2687 legacy_pic->make_irq(irq); 2688 else 2689 /* Strange. Oh, well.. */ 2690 irq_set_chip(irq, &no_irq_chip); 2691 } 2692 } 2693 } 2694 2695 /* 2696 * The local APIC irq-chip implementation: 2697 */ 2698 2699 static void mask_lapic_irq(struct irq_data *data) 2700 { 2701 unsigned long v; 2702 2703 v = apic_read(APIC_LVT0); 2704 apic_write(APIC_LVT0, v | APIC_LVT_MASKED); 2705 } 2706 2707 static void unmask_lapic_irq(struct irq_data *data) 2708 { 2709 unsigned long v; 2710 2711 v = apic_read(APIC_LVT0); 2712 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); 2713 } 2714 2715 static void ack_lapic_irq(struct irq_data *data) 2716 { 2717 ack_APIC_irq(); 2718 } 2719 2720 static struct irq_chip lapic_chip __read_mostly = { 2721 .name = "local-APIC", 2722 .irq_mask = mask_lapic_irq, 2723 .irq_unmask = unmask_lapic_irq, 2724 .irq_ack = ack_lapic_irq, 2725 }; 2726 2727 static void lapic_register_intr(int irq) 2728 { 2729 irq_clear_status_flags(irq, IRQ_LEVEL); 2730 irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, 2731 "edge"); 2732 } 2733 2734 /* 2735 * This looks a bit hackish but it's about the only one way of sending 2736 * a few INTA cycles to 8259As and any associated glue logic. ICR does 2737 * not support the ExtINT mode, unfortunately. We need to send these 2738 * cycles as some i82489DX-based boards have glue logic that keeps the 2739 * 8259A interrupt line asserted until INTA. --macro 2740 */ 2741 static inline void __init unlock_ExtINT_logic(void) 2742 { 2743 int apic, pin, i; 2744 struct IO_APIC_route_entry entry0, entry1; 2745 unsigned char save_control, save_freq_select; 2746 2747 pin = find_isa_irq_pin(8, mp_INT); 2748 if (pin == -1) { 2749 WARN_ON_ONCE(1); 2750 return; 2751 } 2752 apic = find_isa_irq_apic(8, mp_INT); 2753 if (apic == -1) { 2754 WARN_ON_ONCE(1); 2755 return; 2756 } 2757 2758 entry0 = ioapic_read_entry(apic, pin); 2759 clear_IO_APIC_pin(apic, pin); 2760 2761 memset(&entry1, 0, sizeof(entry1)); 2762 2763 entry1.dest_mode = 0; /* physical delivery */ 2764 entry1.mask = 0; /* unmask IRQ now */ 2765 entry1.dest = hard_smp_processor_id(); 2766 entry1.delivery_mode = dest_ExtINT; 2767 entry1.polarity = entry0.polarity; 2768 entry1.trigger = 0; 2769 entry1.vector = 0; 2770 2771 ioapic_write_entry(apic, pin, entry1); 2772 2773 save_control = CMOS_READ(RTC_CONTROL); 2774 save_freq_select = CMOS_READ(RTC_FREQ_SELECT); 2775 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6, 2776 RTC_FREQ_SELECT); 2777 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL); 2778 2779 i = 100; 2780 while (i-- > 0) { 2781 mdelay(10); 2782 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF) 2783 i -= 10; 2784 } 2785 2786 CMOS_WRITE(save_control, RTC_CONTROL); 2787 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); 2788 clear_IO_APIC_pin(apic, pin); 2789 2790 ioapic_write_entry(apic, pin, entry0); 2791 } 2792 2793 static int disable_timer_pin_1 __initdata; 2794 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */ 2795 static int __init disable_timer_pin_setup(char *arg) 2796 { 2797 disable_timer_pin_1 = 1; 2798 return 0; 2799 } 2800 early_param("disable_timer_pin_1", disable_timer_pin_setup); 2801 2802 int timer_through_8259 __initdata; 2803 2804 /* 2805 * This code may look a bit paranoid, but it's supposed to cooperate with 2806 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ 2807 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast 2808 * fanatically on his truly buggy board. 2809 * 2810 * FIXME: really need to revamp this for all platforms. 2811 */ 2812 static inline void __init check_timer(void) 2813 { 2814 struct irq_cfg *cfg = irq_get_chip_data(0); 2815 int node = cpu_to_node(0); 2816 int apic1, pin1, apic2, pin2; 2817 unsigned long flags; 2818 int no_pin1 = 0; 2819 2820 local_irq_save(flags); 2821 2822 /* 2823 * get/set the timer IRQ vector: 2824 */ 2825 legacy_pic->mask(0); 2826 assign_irq_vector(0, cfg, apic->target_cpus()); 2827 2828 /* 2829 * As IRQ0 is to be enabled in the 8259A, the virtual 2830 * wire has to be disabled in the local APIC. Also 2831 * timer interrupts need to be acknowledged manually in 2832 * the 8259A for the i82489DX when using the NMI 2833 * watchdog as that APIC treats NMIs as level-triggered. 2834 * The AEOI mode will finish them in the 8259A 2835 * automatically. 2836 */ 2837 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); 2838 legacy_pic->init(1); 2839 2840 pin1 = find_isa_irq_pin(0, mp_INT); 2841 apic1 = find_isa_irq_apic(0, mp_INT); 2842 pin2 = ioapic_i8259.pin; 2843 apic2 = ioapic_i8259.apic; 2844 2845 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X " 2846 "apic1=%d pin1=%d apic2=%d pin2=%d\n", 2847 cfg->vector, apic1, pin1, apic2, pin2); 2848 2849 /* 2850 * Some BIOS writers are clueless and report the ExtINTA 2851 * I/O APIC input from the cascaded 8259A as the timer 2852 * interrupt input. So just in case, if only one pin 2853 * was found above, try it both directly and through the 2854 * 8259A. 2855 */ 2856 if (pin1 == -1) { 2857 if (intr_remapping_enabled) 2858 panic("BIOS bug: timer not connected to IO-APIC"); 2859 pin1 = pin2; 2860 apic1 = apic2; 2861 no_pin1 = 1; 2862 } else if (pin2 == -1) { 2863 pin2 = pin1; 2864 apic2 = apic1; 2865 } 2866 2867 if (pin1 != -1) { 2868 /* 2869 * Ok, does IRQ0 through the IOAPIC work? 2870 */ 2871 if (no_pin1) { 2872 add_pin_to_irq_node(cfg, node, apic1, pin1); 2873 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); 2874 } else { 2875 /* for edge trigger, setup_ioapic_irq already 2876 * leave it unmasked. 2877 * so only need to unmask if it is level-trigger 2878 * do we really have level trigger timer? 2879 */ 2880 int idx; 2881 idx = find_irq_entry(apic1, pin1, mp_INT); 2882 if (idx != -1 && irq_trigger(idx)) 2883 unmask_ioapic(cfg); 2884 } 2885 if (timer_irq_works()) { 2886 if (disable_timer_pin_1 > 0) 2887 clear_IO_APIC_pin(0, pin1); 2888 goto out; 2889 } 2890 if (intr_remapping_enabled) 2891 panic("timer doesn't work through Interrupt-remapped IO-APIC"); 2892 local_irq_disable(); 2893 clear_IO_APIC_pin(apic1, pin1); 2894 if (!no_pin1) 2895 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " 2896 "8254 timer not connected to IO-APIC\n"); 2897 2898 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer " 2899 "(IRQ0) through the 8259A ...\n"); 2900 apic_printk(APIC_QUIET, KERN_INFO 2901 "..... (found apic %d pin %d) ...\n", apic2, pin2); 2902 /* 2903 * legacy devices should be connected to IO APIC #0 2904 */ 2905 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); 2906 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); 2907 legacy_pic->unmask(0); 2908 if (timer_irq_works()) { 2909 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2910 timer_through_8259 = 1; 2911 goto out; 2912 } 2913 /* 2914 * Cleanup, just in case ... 2915 */ 2916 local_irq_disable(); 2917 legacy_pic->mask(0); 2918 clear_IO_APIC_pin(apic2, pin2); 2919 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); 2920 } 2921 2922 apic_printk(APIC_QUIET, KERN_INFO 2923 "...trying to set up timer as Virtual Wire IRQ...\n"); 2924 2925 lapic_register_intr(0); 2926 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ 2927 legacy_pic->unmask(0); 2928 2929 if (timer_irq_works()) { 2930 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2931 goto out; 2932 } 2933 local_irq_disable(); 2934 legacy_pic->mask(0); 2935 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); 2936 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); 2937 2938 apic_printk(APIC_QUIET, KERN_INFO 2939 "...trying to set up timer as ExtINT IRQ...\n"); 2940 2941 legacy_pic->init(0); 2942 legacy_pic->make_irq(0); 2943 apic_write(APIC_LVT0, APIC_DM_EXTINT); 2944 2945 unlock_ExtINT_logic(); 2946 2947 if (timer_irq_works()) { 2948 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2949 goto out; 2950 } 2951 local_irq_disable(); 2952 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); 2953 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " 2954 "report. Then try booting with the 'noapic' option.\n"); 2955 out: 2956 local_irq_restore(flags); 2957 } 2958 2959 /* 2960 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available 2961 * to devices. However there may be an I/O APIC pin available for 2962 * this interrupt regardless. The pin may be left unconnected, but 2963 * typically it will be reused as an ExtINT cascade interrupt for 2964 * the master 8259A. In the MPS case such a pin will normally be 2965 * reported as an ExtINT interrupt in the MP table. With ACPI 2966 * there is no provision for ExtINT interrupts, and in the absence 2967 * of an override it would be treated as an ordinary ISA I/O APIC 2968 * interrupt, that is edge-triggered and unmasked by default. We 2969 * used to do this, but it caused problems on some systems because 2970 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using 2971 * the same ExtINT cascade interrupt to drive the local APIC of the 2972 * bootstrap processor. Therefore we refrain from routing IRQ2 to 2973 * the I/O APIC in all cases now. No actual device should request 2974 * it anyway. --macro 2975 */ 2976 #define PIC_IRQS (1UL << PIC_CASCADE_IR) 2977 2978 void __init setup_IO_APIC(void) 2979 { 2980 2981 /* 2982 * calling enable_IO_APIC() is moved to setup_local_APIC for BP 2983 */ 2984 io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL; 2985 2986 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); 2987 /* 2988 * Set up IO-APIC IRQ routing. 2989 */ 2990 x86_init.mpparse.setup_ioapic_ids(); 2991 2992 sync_Arb_IDs(); 2993 setup_IO_APIC_irqs(); 2994 init_IO_APIC_traps(); 2995 if (legacy_pic->nr_legacy_irqs) 2996 check_timer(); 2997 } 2998 2999 /* 3000 * Called after all the initialization is done. If we didn't find any 3001 * APIC bugs then we can allow the modify fast path 3002 */ 3003 3004 static int __init io_apic_bug_finalize(void) 3005 { 3006 if (sis_apic_bug == -1) 3007 sis_apic_bug = 0; 3008 return 0; 3009 } 3010 3011 late_initcall(io_apic_bug_finalize); 3012 3013 static void resume_ioapic_id(int ioapic_id) 3014 { 3015 unsigned long flags; 3016 union IO_APIC_reg_00 reg_00; 3017 3018 3019 raw_spin_lock_irqsave(&ioapic_lock, flags); 3020 reg_00.raw = io_apic_read(ioapic_id, 0); 3021 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_id)) { 3022 reg_00.bits.ID = mpc_ioapic_id(ioapic_id); 3023 io_apic_write(ioapic_id, 0, reg_00.raw); 3024 } 3025 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3026 } 3027 3028 static void ioapic_resume(void) 3029 { 3030 int ioapic_id; 3031 3032 for (ioapic_id = nr_ioapics - 1; ioapic_id >= 0; ioapic_id--) 3033 resume_ioapic_id(ioapic_id); 3034 3035 restore_ioapic_entries(); 3036 } 3037 3038 static struct syscore_ops ioapic_syscore_ops = { 3039 .suspend = save_ioapic_entries, 3040 .resume = ioapic_resume, 3041 }; 3042 3043 static int __init ioapic_init_ops(void) 3044 { 3045 register_syscore_ops(&ioapic_syscore_ops); 3046 3047 return 0; 3048 } 3049 3050 device_initcall(ioapic_init_ops); 3051 3052 /* 3053 * Dynamic irq allocate and deallocation 3054 */ 3055 unsigned int create_irq_nr(unsigned int from, int node) 3056 { 3057 struct irq_cfg *cfg; 3058 unsigned long flags; 3059 unsigned int ret = 0; 3060 int irq; 3061 3062 if (from < nr_irqs_gsi) 3063 from = nr_irqs_gsi; 3064 3065 irq = alloc_irq_from(from, node); 3066 if (irq < 0) 3067 return 0; 3068 cfg = alloc_irq_cfg(irq, node); 3069 if (!cfg) { 3070 free_irq_at(irq, NULL); 3071 return 0; 3072 } 3073 3074 raw_spin_lock_irqsave(&vector_lock, flags); 3075 if (!__assign_irq_vector(irq, cfg, apic->target_cpus())) 3076 ret = irq; 3077 raw_spin_unlock_irqrestore(&vector_lock, flags); 3078 3079 if (ret) { 3080 irq_set_chip_data(irq, cfg); 3081 irq_clear_status_flags(irq, IRQ_NOREQUEST); 3082 } else { 3083 free_irq_at(irq, cfg); 3084 } 3085 return ret; 3086 } 3087 3088 int create_irq(void) 3089 { 3090 int node = cpu_to_node(0); 3091 unsigned int irq_want; 3092 int irq; 3093 3094 irq_want = nr_irqs_gsi; 3095 irq = create_irq_nr(irq_want, node); 3096 3097 if (irq == 0) 3098 irq = -1; 3099 3100 return irq; 3101 } 3102 3103 void destroy_irq(unsigned int irq) 3104 { 3105 struct irq_cfg *cfg = irq_get_chip_data(irq); 3106 unsigned long flags; 3107 3108 irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); 3109 3110 if (irq_remapped(cfg)) 3111 free_irte(irq); 3112 raw_spin_lock_irqsave(&vector_lock, flags); 3113 __clear_irq_vector(irq, cfg); 3114 raw_spin_unlock_irqrestore(&vector_lock, flags); 3115 free_irq_at(irq, cfg); 3116 } 3117 3118 /* 3119 * MSI message composition 3120 */ 3121 #ifdef CONFIG_PCI_MSI 3122 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, 3123 struct msi_msg *msg, u8 hpet_id) 3124 { 3125 struct irq_cfg *cfg; 3126 int err; 3127 unsigned dest; 3128 3129 if (disable_apic) 3130 return -ENXIO; 3131 3132 cfg = irq_cfg(irq); 3133 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3134 if (err) 3135 return err; 3136 3137 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 3138 3139 if (irq_remapped(cfg)) { 3140 struct irte irte; 3141 int ir_index; 3142 u16 sub_handle; 3143 3144 ir_index = map_irq_to_irte_handle(irq, &sub_handle); 3145 BUG_ON(ir_index == -1); 3146 3147 prepare_irte(&irte, cfg->vector, dest); 3148 3149 /* Set source-id of interrupt request */ 3150 if (pdev) 3151 set_msi_sid(&irte, pdev); 3152 else 3153 set_hpet_sid(&irte, hpet_id); 3154 3155 modify_irte(irq, &irte); 3156 3157 msg->address_hi = MSI_ADDR_BASE_HI; 3158 msg->data = sub_handle; 3159 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | 3160 MSI_ADDR_IR_SHV | 3161 MSI_ADDR_IR_INDEX1(ir_index) | 3162 MSI_ADDR_IR_INDEX2(ir_index); 3163 } else { 3164 if (x2apic_enabled()) 3165 msg->address_hi = MSI_ADDR_BASE_HI | 3166 MSI_ADDR_EXT_DEST_ID(dest); 3167 else 3168 msg->address_hi = MSI_ADDR_BASE_HI; 3169 3170 msg->address_lo = 3171 MSI_ADDR_BASE_LO | 3172 ((apic->irq_dest_mode == 0) ? 3173 MSI_ADDR_DEST_MODE_PHYSICAL: 3174 MSI_ADDR_DEST_MODE_LOGICAL) | 3175 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3176 MSI_ADDR_REDIRECTION_CPU: 3177 MSI_ADDR_REDIRECTION_LOWPRI) | 3178 MSI_ADDR_DEST_ID(dest); 3179 3180 msg->data = 3181 MSI_DATA_TRIGGER_EDGE | 3182 MSI_DATA_LEVEL_ASSERT | 3183 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3184 MSI_DATA_DELIVERY_FIXED: 3185 MSI_DATA_DELIVERY_LOWPRI) | 3186 MSI_DATA_VECTOR(cfg->vector); 3187 } 3188 return err; 3189 } 3190 3191 #ifdef CONFIG_SMP 3192 static int 3193 msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3194 { 3195 struct irq_cfg *cfg = data->chip_data; 3196 struct msi_msg msg; 3197 unsigned int dest; 3198 3199 if (__ioapic_set_affinity(data, mask, &dest)) 3200 return -1; 3201 3202 __get_cached_msi_msg(data->msi_desc, &msg); 3203 3204 msg.data &= ~MSI_DATA_VECTOR_MASK; 3205 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3206 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3207 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3208 3209 __write_msi_msg(data->msi_desc, &msg); 3210 3211 return 0; 3212 } 3213 #endif /* CONFIG_SMP */ 3214 3215 /* 3216 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, 3217 * which implement the MSI or MSI-X Capability Structure. 3218 */ 3219 static struct irq_chip msi_chip = { 3220 .name = "PCI-MSI", 3221 .irq_unmask = unmask_msi_irq, 3222 .irq_mask = mask_msi_irq, 3223 .irq_ack = ack_apic_edge, 3224 #ifdef CONFIG_SMP 3225 .irq_set_affinity = msi_set_affinity, 3226 #endif 3227 .irq_retrigger = ioapic_retrigger_irq, 3228 }; 3229 3230 /* 3231 * Map the PCI dev to the corresponding remapping hardware unit 3232 * and allocate 'nvec' consecutive interrupt-remapping table entries 3233 * in it. 3234 */ 3235 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec) 3236 { 3237 struct intel_iommu *iommu; 3238 int index; 3239 3240 iommu = map_dev_to_ir(dev); 3241 if (!iommu) { 3242 printk(KERN_ERR 3243 "Unable to map PCI %s to iommu\n", pci_name(dev)); 3244 return -ENOENT; 3245 } 3246 3247 index = alloc_irte(iommu, irq, nvec); 3248 if (index < 0) { 3249 printk(KERN_ERR 3250 "Unable to allocate %d IRTE for PCI %s\n", nvec, 3251 pci_name(dev)); 3252 return -ENOSPC; 3253 } 3254 return index; 3255 } 3256 3257 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) 3258 { 3259 struct irq_chip *chip = &msi_chip; 3260 struct msi_msg msg; 3261 int ret; 3262 3263 ret = msi_compose_msg(dev, irq, &msg, -1); 3264 if (ret < 0) 3265 return ret; 3266 3267 irq_set_msi_desc(irq, msidesc); 3268 write_msi_msg(irq, &msg); 3269 3270 if (irq_remapped(irq_get_chip_data(irq))) { 3271 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3272 irq_remap_modify_chip_defaults(chip); 3273 } 3274 3275 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); 3276 3277 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); 3278 3279 return 0; 3280 } 3281 3282 int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 3283 { 3284 int node, ret, sub_handle, index = 0; 3285 unsigned int irq, irq_want; 3286 struct msi_desc *msidesc; 3287 struct intel_iommu *iommu = NULL; 3288 3289 /* x86 doesn't support multiple MSI yet */ 3290 if (type == PCI_CAP_ID_MSI && nvec > 1) 3291 return 1; 3292 3293 node = dev_to_node(&dev->dev); 3294 irq_want = nr_irqs_gsi; 3295 sub_handle = 0; 3296 list_for_each_entry(msidesc, &dev->msi_list, list) { 3297 irq = create_irq_nr(irq_want, node); 3298 if (irq == 0) 3299 return -1; 3300 irq_want = irq + 1; 3301 if (!intr_remapping_enabled) 3302 goto no_ir; 3303 3304 if (!sub_handle) { 3305 /* 3306 * allocate the consecutive block of IRTE's 3307 * for 'nvec' 3308 */ 3309 index = msi_alloc_irte(dev, irq, nvec); 3310 if (index < 0) { 3311 ret = index; 3312 goto error; 3313 } 3314 } else { 3315 iommu = map_dev_to_ir(dev); 3316 if (!iommu) { 3317 ret = -ENOENT; 3318 goto error; 3319 } 3320 /* 3321 * setup the mapping between the irq and the IRTE 3322 * base index, the sub_handle pointing to the 3323 * appropriate interrupt remap table entry. 3324 */ 3325 set_irte_irq(irq, iommu, index, sub_handle); 3326 } 3327 no_ir: 3328 ret = setup_msi_irq(dev, msidesc, irq); 3329 if (ret < 0) 3330 goto error; 3331 sub_handle++; 3332 } 3333 return 0; 3334 3335 error: 3336 destroy_irq(irq); 3337 return ret; 3338 } 3339 3340 void native_teardown_msi_irq(unsigned int irq) 3341 { 3342 destroy_irq(irq); 3343 } 3344 3345 #ifdef CONFIG_DMAR_TABLE 3346 #ifdef CONFIG_SMP 3347 static int 3348 dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, 3349 bool force) 3350 { 3351 struct irq_cfg *cfg = data->chip_data; 3352 unsigned int dest, irq = data->irq; 3353 struct msi_msg msg; 3354 3355 if (__ioapic_set_affinity(data, mask, &dest)) 3356 return -1; 3357 3358 dmar_msi_read(irq, &msg); 3359 3360 msg.data &= ~MSI_DATA_VECTOR_MASK; 3361 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3362 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3363 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3364 msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest); 3365 3366 dmar_msi_write(irq, &msg); 3367 3368 return 0; 3369 } 3370 3371 #endif /* CONFIG_SMP */ 3372 3373 static struct irq_chip dmar_msi_type = { 3374 .name = "DMAR_MSI", 3375 .irq_unmask = dmar_msi_unmask, 3376 .irq_mask = dmar_msi_mask, 3377 .irq_ack = ack_apic_edge, 3378 #ifdef CONFIG_SMP 3379 .irq_set_affinity = dmar_msi_set_affinity, 3380 #endif 3381 .irq_retrigger = ioapic_retrigger_irq, 3382 }; 3383 3384 int arch_setup_dmar_msi(unsigned int irq) 3385 { 3386 int ret; 3387 struct msi_msg msg; 3388 3389 ret = msi_compose_msg(NULL, irq, &msg, -1); 3390 if (ret < 0) 3391 return ret; 3392 dmar_msi_write(irq, &msg); 3393 irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, 3394 "edge"); 3395 return 0; 3396 } 3397 #endif 3398 3399 #ifdef CONFIG_HPET_TIMER 3400 3401 #ifdef CONFIG_SMP 3402 static int hpet_msi_set_affinity(struct irq_data *data, 3403 const struct cpumask *mask, bool force) 3404 { 3405 struct irq_cfg *cfg = data->chip_data; 3406 struct msi_msg msg; 3407 unsigned int dest; 3408 3409 if (__ioapic_set_affinity(data, mask, &dest)) 3410 return -1; 3411 3412 hpet_msi_read(data->handler_data, &msg); 3413 3414 msg.data &= ~MSI_DATA_VECTOR_MASK; 3415 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3416 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3417 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3418 3419 hpet_msi_write(data->handler_data, &msg); 3420 3421 return 0; 3422 } 3423 3424 #endif /* CONFIG_SMP */ 3425 3426 static struct irq_chip hpet_msi_type = { 3427 .name = "HPET_MSI", 3428 .irq_unmask = hpet_msi_unmask, 3429 .irq_mask = hpet_msi_mask, 3430 .irq_ack = ack_apic_edge, 3431 #ifdef CONFIG_SMP 3432 .irq_set_affinity = hpet_msi_set_affinity, 3433 #endif 3434 .irq_retrigger = ioapic_retrigger_irq, 3435 }; 3436 3437 int arch_setup_hpet_msi(unsigned int irq, unsigned int id) 3438 { 3439 struct irq_chip *chip = &hpet_msi_type; 3440 struct msi_msg msg; 3441 int ret; 3442 3443 if (intr_remapping_enabled) { 3444 struct intel_iommu *iommu = map_hpet_to_ir(id); 3445 int index; 3446 3447 if (!iommu) 3448 return -1; 3449 3450 index = alloc_irte(iommu, irq, 1); 3451 if (index < 0) 3452 return -1; 3453 } 3454 3455 ret = msi_compose_msg(NULL, irq, &msg, id); 3456 if (ret < 0) 3457 return ret; 3458 3459 hpet_msi_write(irq_get_handler_data(irq), &msg); 3460 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3461 if (irq_remapped(irq_get_chip_data(irq))) 3462 irq_remap_modify_chip_defaults(chip); 3463 3464 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); 3465 return 0; 3466 } 3467 #endif 3468 3469 #endif /* CONFIG_PCI_MSI */ 3470 /* 3471 * Hypertransport interrupt support 3472 */ 3473 #ifdef CONFIG_HT_IRQ 3474 3475 #ifdef CONFIG_SMP 3476 3477 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) 3478 { 3479 struct ht_irq_msg msg; 3480 fetch_ht_irq_msg(irq, &msg); 3481 3482 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK); 3483 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); 3484 3485 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest); 3486 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest); 3487 3488 write_ht_irq_msg(irq, &msg); 3489 } 3490 3491 static int 3492 ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3493 { 3494 struct irq_cfg *cfg = data->chip_data; 3495 unsigned int dest; 3496 3497 if (__ioapic_set_affinity(data, mask, &dest)) 3498 return -1; 3499 3500 target_ht_irq(data->irq, dest, cfg->vector); 3501 return 0; 3502 } 3503 3504 #endif 3505 3506 static struct irq_chip ht_irq_chip = { 3507 .name = "PCI-HT", 3508 .irq_mask = mask_ht_irq, 3509 .irq_unmask = unmask_ht_irq, 3510 .irq_ack = ack_apic_edge, 3511 #ifdef CONFIG_SMP 3512 .irq_set_affinity = ht_set_affinity, 3513 #endif 3514 .irq_retrigger = ioapic_retrigger_irq, 3515 }; 3516 3517 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) 3518 { 3519 struct irq_cfg *cfg; 3520 int err; 3521 3522 if (disable_apic) 3523 return -ENXIO; 3524 3525 cfg = irq_cfg(irq); 3526 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3527 if (!err) { 3528 struct ht_irq_msg msg; 3529 unsigned dest; 3530 3531 dest = apic->cpu_mask_to_apicid_and(cfg->domain, 3532 apic->target_cpus()); 3533 3534 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3535 3536 msg.address_lo = 3537 HT_IRQ_LOW_BASE | 3538 HT_IRQ_LOW_DEST_ID(dest) | 3539 HT_IRQ_LOW_VECTOR(cfg->vector) | 3540 ((apic->irq_dest_mode == 0) ? 3541 HT_IRQ_LOW_DM_PHYSICAL : 3542 HT_IRQ_LOW_DM_LOGICAL) | 3543 HT_IRQ_LOW_RQEOI_EDGE | 3544 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3545 HT_IRQ_LOW_MT_FIXED : 3546 HT_IRQ_LOW_MT_ARBITRATED) | 3547 HT_IRQ_LOW_IRQ_MASKED; 3548 3549 write_ht_irq_msg(irq, &msg); 3550 3551 irq_set_chip_and_handler_name(irq, &ht_irq_chip, 3552 handle_edge_irq, "edge"); 3553 3554 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); 3555 } 3556 return err; 3557 } 3558 #endif /* CONFIG_HT_IRQ */ 3559 3560 static int 3561 io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) 3562 { 3563 struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node); 3564 int ret; 3565 3566 if (!cfg) 3567 return -EINVAL; 3568 ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin); 3569 if (!ret) 3570 setup_ioapic_irq(attr->ioapic, attr->ioapic_pin, irq, cfg, 3571 attr->trigger, attr->polarity); 3572 return ret; 3573 } 3574 3575 int io_apic_setup_irq_pin_once(unsigned int irq, int node, 3576 struct io_apic_irq_attr *attr) 3577 { 3578 unsigned int id = attr->ioapic, pin = attr->ioapic_pin; 3579 int ret; 3580 3581 /* Avoid redundant programming */ 3582 if (test_bit(pin, ioapics[id].pin_programmed)) { 3583 pr_debug("Pin %d-%d already programmed\n", 3584 mpc_ioapic_id(id), pin); 3585 return 0; 3586 } 3587 ret = io_apic_setup_irq_pin(irq, node, attr); 3588 if (!ret) 3589 set_bit(pin, ioapics[id].pin_programmed); 3590 return ret; 3591 } 3592 3593 static int __init io_apic_get_redir_entries(int ioapic) 3594 { 3595 union IO_APIC_reg_01 reg_01; 3596 unsigned long flags; 3597 3598 raw_spin_lock_irqsave(&ioapic_lock, flags); 3599 reg_01.raw = io_apic_read(ioapic, 1); 3600 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3601 3602 /* The register returns the maximum index redir index 3603 * supported, which is one less than the total number of redir 3604 * entries. 3605 */ 3606 return reg_01.bits.entries + 1; 3607 } 3608 3609 static void __init probe_nr_irqs_gsi(void) 3610 { 3611 int nr; 3612 3613 nr = gsi_top + NR_IRQS_LEGACY; 3614 if (nr > nr_irqs_gsi) 3615 nr_irqs_gsi = nr; 3616 3617 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); 3618 } 3619 3620 int get_nr_irqs_gsi(void) 3621 { 3622 return nr_irqs_gsi; 3623 } 3624 3625 #ifdef CONFIG_SPARSE_IRQ 3626 int __init arch_probe_nr_irqs(void) 3627 { 3628 int nr; 3629 3630 if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) 3631 nr_irqs = NR_VECTORS * nr_cpu_ids; 3632 3633 nr = nr_irqs_gsi + 8 * nr_cpu_ids; 3634 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) 3635 /* 3636 * for MSI and HT dyn irq 3637 */ 3638 nr += nr_irqs_gsi * 16; 3639 #endif 3640 if (nr < nr_irqs) 3641 nr_irqs = nr; 3642 3643 return NR_IRQS_LEGACY; 3644 } 3645 #endif 3646 3647 int io_apic_set_pci_routing(struct device *dev, int irq, 3648 struct io_apic_irq_attr *irq_attr) 3649 { 3650 int node; 3651 3652 if (!IO_APIC_IRQ(irq)) { 3653 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", 3654 irq_attr->ioapic); 3655 return -EINVAL; 3656 } 3657 3658 node = dev ? dev_to_node(dev) : cpu_to_node(0); 3659 3660 return io_apic_setup_irq_pin_once(irq, node, irq_attr); 3661 } 3662 3663 #ifdef CONFIG_X86_32 3664 static int __init io_apic_get_unique_id(int ioapic, int apic_id) 3665 { 3666 union IO_APIC_reg_00 reg_00; 3667 static physid_mask_t apic_id_map = PHYSID_MASK_NONE; 3668 physid_mask_t tmp; 3669 unsigned long flags; 3670 int i = 0; 3671 3672 /* 3673 * The P4 platform supports up to 256 APIC IDs on two separate APIC 3674 * buses (one for LAPICs, one for IOAPICs), where predecessors only 3675 * supports up to 16 on one shared APIC bus. 3676 * 3677 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full 3678 * advantage of new APIC bus architecture. 3679 */ 3680 3681 if (physids_empty(apic_id_map)) 3682 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); 3683 3684 raw_spin_lock_irqsave(&ioapic_lock, flags); 3685 reg_00.raw = io_apic_read(ioapic, 0); 3686 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3687 3688 if (apic_id >= get_physical_broadcast()) { 3689 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " 3690 "%d\n", ioapic, apic_id, reg_00.bits.ID); 3691 apic_id = reg_00.bits.ID; 3692 } 3693 3694 /* 3695 * Every APIC in a system must have a unique ID or we get lots of nice 3696 * 'stuck on smp_invalidate_needed IPI wait' messages. 3697 */ 3698 if (apic->check_apicid_used(&apic_id_map, apic_id)) { 3699 3700 for (i = 0; i < get_physical_broadcast(); i++) { 3701 if (!apic->check_apicid_used(&apic_id_map, i)) 3702 break; 3703 } 3704 3705 if (i == get_physical_broadcast()) 3706 panic("Max apic_id exceeded!\n"); 3707 3708 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " 3709 "trying %d\n", ioapic, apic_id, i); 3710 3711 apic_id = i; 3712 } 3713 3714 apic->apicid_to_cpu_present(apic_id, &tmp); 3715 physids_or(apic_id_map, apic_id_map, tmp); 3716 3717 if (reg_00.bits.ID != apic_id) { 3718 reg_00.bits.ID = apic_id; 3719 3720 raw_spin_lock_irqsave(&ioapic_lock, flags); 3721 io_apic_write(ioapic, 0, reg_00.raw); 3722 reg_00.raw = io_apic_read(ioapic, 0); 3723 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3724 3725 /* Sanity check */ 3726 if (reg_00.bits.ID != apic_id) { 3727 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); 3728 return -1; 3729 } 3730 } 3731 3732 apic_printk(APIC_VERBOSE, KERN_INFO 3733 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); 3734 3735 return apic_id; 3736 } 3737 3738 static u8 __init io_apic_unique_id(u8 id) 3739 { 3740 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && 3741 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) 3742 return io_apic_get_unique_id(nr_ioapics, id); 3743 else 3744 return id; 3745 } 3746 #else 3747 static u8 __init io_apic_unique_id(u8 id) 3748 { 3749 int i; 3750 DECLARE_BITMAP(used, 256); 3751 3752 bitmap_zero(used, 256); 3753 for (i = 0; i < nr_ioapics; i++) { 3754 __set_bit(mpc_ioapic_id(i), used); 3755 } 3756 if (!test_bit(id, used)) 3757 return id; 3758 return find_first_zero_bit(used, 256); 3759 } 3760 #endif 3761 3762 static int __init io_apic_get_version(int ioapic) 3763 { 3764 union IO_APIC_reg_01 reg_01; 3765 unsigned long flags; 3766 3767 raw_spin_lock_irqsave(&ioapic_lock, flags); 3768 reg_01.raw = io_apic_read(ioapic, 1); 3769 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3770 3771 return reg_01.bits.version; 3772 } 3773 3774 int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) 3775 { 3776 int ioapic, pin, idx; 3777 3778 if (skip_ioapic_setup) 3779 return -1; 3780 3781 ioapic = mp_find_ioapic(gsi); 3782 if (ioapic < 0) 3783 return -1; 3784 3785 pin = mp_find_ioapic_pin(ioapic, gsi); 3786 if (pin < 0) 3787 return -1; 3788 3789 idx = find_irq_entry(ioapic, pin, mp_INT); 3790 if (idx < 0) 3791 return -1; 3792 3793 *trigger = irq_trigger(idx); 3794 *polarity = irq_polarity(idx); 3795 return 0; 3796 } 3797 3798 /* 3799 * This function currently is only a helper for the i386 smp boot process where 3800 * we need to reprogram the ioredtbls to cater for the cpus which have come online 3801 * so mask in all cases should simply be apic->target_cpus() 3802 */ 3803 #ifdef CONFIG_SMP 3804 void __init setup_ioapic_dest(void) 3805 { 3806 int pin, ioapic, irq, irq_entry; 3807 const struct cpumask *mask; 3808 struct irq_data *idata; 3809 3810 if (skip_ioapic_setup == 1) 3811 return; 3812 3813 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) 3814 for (pin = 0; pin < ioapics[ioapic].nr_registers; pin++) { 3815 irq_entry = find_irq_entry(ioapic, pin, mp_INT); 3816 if (irq_entry == -1) 3817 continue; 3818 irq = pin_2_irq(irq_entry, ioapic, pin); 3819 3820 if ((ioapic > 0) && (irq > 16)) 3821 continue; 3822 3823 idata = irq_get_irq_data(irq); 3824 3825 /* 3826 * Honour affinities which have been set in early boot 3827 */ 3828 if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata)) 3829 mask = idata->affinity; 3830 else 3831 mask = apic->target_cpus(); 3832 3833 if (intr_remapping_enabled) 3834 ir_ioapic_set_affinity(idata, mask, false); 3835 else 3836 ioapic_set_affinity(idata, mask, false); 3837 } 3838 3839 } 3840 #endif 3841 3842 #define IOAPIC_RESOURCE_NAME_SIZE 11 3843 3844 static struct resource *ioapic_resources; 3845 3846 static struct resource * __init ioapic_setup_resources(int nr_ioapics) 3847 { 3848 unsigned long n; 3849 struct resource *res; 3850 char *mem; 3851 int i; 3852 3853 if (nr_ioapics <= 0) 3854 return NULL; 3855 3856 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource); 3857 n *= nr_ioapics; 3858 3859 mem = alloc_bootmem(n); 3860 res = (void *)mem; 3861 3862 mem += sizeof(struct resource) * nr_ioapics; 3863 3864 for (i = 0; i < nr_ioapics; i++) { 3865 res[i].name = mem; 3866 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; 3867 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); 3868 mem += IOAPIC_RESOURCE_NAME_SIZE; 3869 } 3870 3871 ioapic_resources = res; 3872 3873 return res; 3874 } 3875 3876 void __init ioapic_and_gsi_init(void) 3877 { 3878 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; 3879 struct resource *ioapic_res; 3880 int i; 3881 3882 ioapic_res = ioapic_setup_resources(nr_ioapics); 3883 for (i = 0; i < nr_ioapics; i++) { 3884 if (smp_found_config) { 3885 ioapic_phys = mpc_ioapic_addr(i); 3886 #ifdef CONFIG_X86_32 3887 if (!ioapic_phys) { 3888 printk(KERN_ERR 3889 "WARNING: bogus zero IO-APIC " 3890 "address found in MPTABLE, " 3891 "disabling IO/APIC support!\n"); 3892 smp_found_config = 0; 3893 skip_ioapic_setup = 1; 3894 goto fake_ioapic_page; 3895 } 3896 #endif 3897 } else { 3898 #ifdef CONFIG_X86_32 3899 fake_ioapic_page: 3900 #endif 3901 ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); 3902 ioapic_phys = __pa(ioapic_phys); 3903 } 3904 set_fixmap_nocache(idx, ioapic_phys); 3905 apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n", 3906 __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK), 3907 ioapic_phys); 3908 idx++; 3909 3910 ioapic_res->start = ioapic_phys; 3911 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1; 3912 ioapic_res++; 3913 } 3914 3915 probe_nr_irqs_gsi(); 3916 } 3917 3918 void __init ioapic_insert_resources(void) 3919 { 3920 int i; 3921 struct resource *r = ioapic_resources; 3922 3923 if (!r) { 3924 if (nr_ioapics > 0) 3925 printk(KERN_ERR 3926 "IO APIC resources couldn't be allocated.\n"); 3927 return; 3928 } 3929 3930 for (i = 0; i < nr_ioapics; i++) { 3931 insert_resource(&iomem_resource, r); 3932 r++; 3933 } 3934 } 3935 3936 int mp_find_ioapic(u32 gsi) 3937 { 3938 int i = 0; 3939 3940 if (nr_ioapics == 0) 3941 return -1; 3942 3943 /* Find the IOAPIC that manages this GSI. */ 3944 for (i = 0; i < nr_ioapics; i++) { 3945 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(i); 3946 if ((gsi >= gsi_cfg->gsi_base) 3947 && (gsi <= gsi_cfg->gsi_end)) 3948 return i; 3949 } 3950 3951 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); 3952 return -1; 3953 } 3954 3955 int mp_find_ioapic_pin(int ioapic, u32 gsi) 3956 { 3957 struct mp_ioapic_gsi *gsi_cfg; 3958 3959 if (WARN_ON(ioapic == -1)) 3960 return -1; 3961 3962 gsi_cfg = mp_ioapic_gsi_routing(ioapic); 3963 if (WARN_ON(gsi > gsi_cfg->gsi_end)) 3964 return -1; 3965 3966 return gsi - gsi_cfg->gsi_base; 3967 } 3968 3969 static __init int bad_ioapic(unsigned long address) 3970 { 3971 if (nr_ioapics >= MAX_IO_APICS) { 3972 printk(KERN_WARNING "WARNING: Max # of I/O APICs (%d) exceeded " 3973 "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics); 3974 return 1; 3975 } 3976 if (!address) { 3977 printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address" 3978 " found in table, skipping!\n"); 3979 return 1; 3980 } 3981 return 0; 3982 } 3983 3984 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) 3985 { 3986 int idx = 0; 3987 int entries; 3988 struct mp_ioapic_gsi *gsi_cfg; 3989 3990 if (bad_ioapic(address)) 3991 return; 3992 3993 idx = nr_ioapics; 3994 3995 ioapics[idx].mp_config.type = MP_IOAPIC; 3996 ioapics[idx].mp_config.flags = MPC_APIC_USABLE; 3997 ioapics[idx].mp_config.apicaddr = address; 3998 3999 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); 4000 ioapics[idx].mp_config.apicid = io_apic_unique_id(id); 4001 ioapics[idx].mp_config.apicver = io_apic_get_version(idx); 4002 4003 /* 4004 * Build basic GSI lookup table to facilitate gsi->io_apic lookups 4005 * and to prevent reprogramming of IOAPIC pins (PCI GSIs). 4006 */ 4007 entries = io_apic_get_redir_entries(idx); 4008 gsi_cfg = mp_ioapic_gsi_routing(idx); 4009 gsi_cfg->gsi_base = gsi_base; 4010 gsi_cfg->gsi_end = gsi_base + entries - 1; 4011 4012 /* 4013 * The number of IO-APIC IRQ registers (== #pins): 4014 */ 4015 ioapics[idx].nr_registers = entries; 4016 4017 if (gsi_cfg->gsi_end >= gsi_top) 4018 gsi_top = gsi_cfg->gsi_end + 1; 4019 4020 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " 4021 "GSI %d-%d\n", idx, mpc_ioapic_id(idx), 4022 mpc_ioapic_ver(idx), mpc_ioapic_addr(idx), 4023 gsi_cfg->gsi_base, gsi_cfg->gsi_end); 4024 4025 nr_ioapics++; 4026 } 4027 4028 /* Enable IOAPIC early just for system timer */ 4029 void __init pre_init_apic_IRQ0(void) 4030 { 4031 struct io_apic_irq_attr attr = { 0, 0, 0, 0 }; 4032 4033 printk(KERN_INFO "Early APIC setup for system timer0\n"); 4034 #ifndef CONFIG_SMP 4035 physid_set_mask_of_physid(boot_cpu_physical_apicid, 4036 &phys_cpu_present_map); 4037 #endif 4038 setup_local_APIC(); 4039 4040 io_apic_setup_irq_pin(0, 0, &attr); 4041 irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, 4042 "edge"); 4043 } 4044