1 /* 2 * Intel IO-APIC support for multi-Pentium hosts. 3 * 4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo 5 * 6 * Many thanks to Stig Venaas for trying out countless experimental 7 * patches and reporting/debugging problems patiently! 8 * 9 * (c) 1999, Multiple IO-APIC support, developed by 10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and 11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>, 12 * further tested and cleaned up by Zach Brown <zab@redhat.com> 13 * and Ingo Molnar <mingo@redhat.com> 14 * 15 * Fixes 16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs; 17 * thanks to Eric Gilmore 18 * and Rolf G. Tews 19 * for testing these extensively 20 * Paul Diefenbaugh : Added full ACPI support 21 */ 22 23 #include <linux/mm.h> 24 #include <linux/interrupt.h> 25 #include <linux/init.h> 26 #include <linux/delay.h> 27 #include <linux/sched.h> 28 #include <linux/pci.h> 29 #include <linux/mc146818rtc.h> 30 #include <linux/compiler.h> 31 #include <linux/acpi.h> 32 #include <linux/module.h> 33 #include <linux/syscore_ops.h> 34 #include <linux/msi.h> 35 #include <linux/htirq.h> 36 #include <linux/freezer.h> 37 #include <linux/kthread.h> 38 #include <linux/jiffies.h> /* time_after() */ 39 #include <linux/slab.h> 40 #ifdef CONFIG_ACPI 41 #include <acpi/acpi_bus.h> 42 #endif 43 #include <linux/bootmem.h> 44 #include <linux/dmar.h> 45 #include <linux/hpet.h> 46 47 #include <asm/idle.h> 48 #include <asm/io.h> 49 #include <asm/smp.h> 50 #include <asm/cpu.h> 51 #include <asm/desc.h> 52 #include <asm/proto.h> 53 #include <asm/acpi.h> 54 #include <asm/dma.h> 55 #include <asm/timer.h> 56 #include <asm/i8259.h> 57 #include <asm/msidef.h> 58 #include <asm/hypertransport.h> 59 #include <asm/setup.h> 60 #include <asm/irq_remapping.h> 61 #include <asm/hpet.h> 62 #include <asm/hw_irq.h> 63 64 #include <asm/apic.h> 65 66 #define __apicdebuginit(type) static type __init 67 68 #define for_each_irq_pin(entry, head) \ 69 for (entry = head; entry; entry = entry->next) 70 71 #ifdef CONFIG_IRQ_REMAP 72 static void irq_remap_modify_chip_defaults(struct irq_chip *chip); 73 static inline bool irq_remapped(struct irq_cfg *cfg) 74 { 75 return cfg->irq_2_iommu.iommu != NULL; 76 } 77 #else 78 static inline bool irq_remapped(struct irq_cfg *cfg) 79 { 80 return false; 81 } 82 static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip) 83 { 84 } 85 #endif 86 87 /* 88 * Is the SiS APIC rmw bug present ? 89 * -1 = don't know, 0 = no, 1 = yes 90 */ 91 int sis_apic_bug = -1; 92 93 static DEFINE_RAW_SPINLOCK(ioapic_lock); 94 static DEFINE_RAW_SPINLOCK(vector_lock); 95 96 static struct ioapic { 97 /* 98 * # of IRQ routing registers 99 */ 100 int nr_registers; 101 /* 102 * Saved state during suspend/resume, or while enabling intr-remap. 103 */ 104 struct IO_APIC_route_entry *saved_registers; 105 /* I/O APIC config */ 106 struct mpc_ioapic mp_config; 107 /* IO APIC gsi routing info */ 108 struct mp_ioapic_gsi gsi_config; 109 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); 110 } ioapics[MAX_IO_APICS]; 111 112 #define mpc_ioapic_ver(ioapic_idx) ioapics[ioapic_idx].mp_config.apicver 113 114 int mpc_ioapic_id(int ioapic_idx) 115 { 116 return ioapics[ioapic_idx].mp_config.apicid; 117 } 118 119 unsigned int mpc_ioapic_addr(int ioapic_idx) 120 { 121 return ioapics[ioapic_idx].mp_config.apicaddr; 122 } 123 124 struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic_idx) 125 { 126 return &ioapics[ioapic_idx].gsi_config; 127 } 128 129 int nr_ioapics; 130 131 /* The one past the highest gsi number used */ 132 u32 gsi_top; 133 134 /* MP IRQ source entries */ 135 struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; 136 137 /* # of MP IRQ source entries */ 138 int mp_irq_entries; 139 140 /* GSI interrupts */ 141 static int nr_irqs_gsi = NR_IRQS_LEGACY; 142 143 #ifdef CONFIG_EISA 144 int mp_bus_id_to_type[MAX_MP_BUSSES]; 145 #endif 146 147 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); 148 149 int skip_ioapic_setup; 150 151 /** 152 * disable_ioapic_support() - disables ioapic support at runtime 153 */ 154 void disable_ioapic_support(void) 155 { 156 #ifdef CONFIG_PCI 157 noioapicquirk = 1; 158 noioapicreroute = -1; 159 #endif 160 skip_ioapic_setup = 1; 161 } 162 163 static int __init parse_noapic(char *str) 164 { 165 /* disable IO-APIC */ 166 disable_ioapic_support(); 167 return 0; 168 } 169 early_param("noapic", parse_noapic); 170 171 static int io_apic_setup_irq_pin(unsigned int irq, int node, 172 struct io_apic_irq_attr *attr); 173 174 /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ 175 void mp_save_irq(struct mpc_intsrc *m) 176 { 177 int i; 178 179 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," 180 " IRQ %02x, APIC ID %x, APIC INT %02x\n", 181 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus, 182 m->srcbusirq, m->dstapic, m->dstirq); 183 184 for (i = 0; i < mp_irq_entries; i++) { 185 if (!memcmp(&mp_irqs[i], m, sizeof(*m))) 186 return; 187 } 188 189 memcpy(&mp_irqs[mp_irq_entries], m, sizeof(*m)); 190 if (++mp_irq_entries == MAX_IRQ_SOURCES) 191 panic("Max # of irq sources exceeded!!\n"); 192 } 193 194 struct irq_pin_list { 195 int apic, pin; 196 struct irq_pin_list *next; 197 }; 198 199 static struct irq_pin_list *alloc_irq_pin_list(int node) 200 { 201 return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); 202 } 203 204 205 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 206 static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY]; 207 208 int __init arch_early_irq_init(void) 209 { 210 struct irq_cfg *cfg; 211 int count, node, i; 212 213 if (!legacy_pic->nr_legacy_irqs) 214 io_apic_irqs = ~0UL; 215 216 for (i = 0; i < nr_ioapics; i++) { 217 ioapics[i].saved_registers = 218 kzalloc(sizeof(struct IO_APIC_route_entry) * 219 ioapics[i].nr_registers, GFP_KERNEL); 220 if (!ioapics[i].saved_registers) 221 pr_err("IOAPIC %d: suspend/resume impossible!\n", i); 222 } 223 224 cfg = irq_cfgx; 225 count = ARRAY_SIZE(irq_cfgx); 226 node = cpu_to_node(0); 227 228 /* Make sure the legacy interrupts are marked in the bitmap */ 229 irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); 230 231 for (i = 0; i < count; i++) { 232 irq_set_chip_data(i, &cfg[i]); 233 zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); 234 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); 235 /* 236 * For legacy IRQ's, start with assigning irq0 to irq15 to 237 * IRQ0_VECTOR to IRQ15_VECTOR for all cpu's. 238 */ 239 if (i < legacy_pic->nr_legacy_irqs) { 240 cfg[i].vector = IRQ0_VECTOR + i; 241 cpumask_setall(cfg[i].domain); 242 } 243 } 244 245 return 0; 246 } 247 248 static struct irq_cfg *irq_cfg(unsigned int irq) 249 { 250 return irq_get_chip_data(irq); 251 } 252 253 static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) 254 { 255 struct irq_cfg *cfg; 256 257 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); 258 if (!cfg) 259 return NULL; 260 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) 261 goto out_cfg; 262 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) 263 goto out_domain; 264 return cfg; 265 out_domain: 266 free_cpumask_var(cfg->domain); 267 out_cfg: 268 kfree(cfg); 269 return NULL; 270 } 271 272 static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) 273 { 274 if (!cfg) 275 return; 276 irq_set_chip_data(at, NULL); 277 free_cpumask_var(cfg->domain); 278 free_cpumask_var(cfg->old_domain); 279 kfree(cfg); 280 } 281 282 static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) 283 { 284 int res = irq_alloc_desc_at(at, node); 285 struct irq_cfg *cfg; 286 287 if (res < 0) { 288 if (res != -EEXIST) 289 return NULL; 290 cfg = irq_get_chip_data(at); 291 if (cfg) 292 return cfg; 293 } 294 295 cfg = alloc_irq_cfg(at, node); 296 if (cfg) 297 irq_set_chip_data(at, cfg); 298 else 299 irq_free_desc(at); 300 return cfg; 301 } 302 303 static int alloc_irq_from(unsigned int from, int node) 304 { 305 return irq_alloc_desc_from(from, node); 306 } 307 308 static void free_irq_at(unsigned int at, struct irq_cfg *cfg) 309 { 310 free_irq_cfg(at, cfg); 311 irq_free_desc(at); 312 } 313 314 315 struct io_apic { 316 unsigned int index; 317 unsigned int unused[3]; 318 unsigned int data; 319 unsigned int unused2[11]; 320 unsigned int eoi; 321 }; 322 323 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) 324 { 325 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) 326 + (mpc_ioapic_addr(idx) & ~PAGE_MASK); 327 } 328 329 static inline void io_apic_eoi(unsigned int apic, unsigned int vector) 330 { 331 struct io_apic __iomem *io_apic = io_apic_base(apic); 332 writel(vector, &io_apic->eoi); 333 } 334 335 unsigned int native_io_apic_read(unsigned int apic, unsigned int reg) 336 { 337 struct io_apic __iomem *io_apic = io_apic_base(apic); 338 writel(reg, &io_apic->index); 339 return readl(&io_apic->data); 340 } 341 342 void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) 343 { 344 struct io_apic __iomem *io_apic = io_apic_base(apic); 345 346 writel(reg, &io_apic->index); 347 writel(value, &io_apic->data); 348 } 349 350 /* 351 * Re-write a value: to be used for read-modify-write 352 * cycles where the read already set up the index register. 353 * 354 * Older SiS APIC requires we rewrite the index register 355 */ 356 void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) 357 { 358 struct io_apic __iomem *io_apic = io_apic_base(apic); 359 360 if (sis_apic_bug) 361 writel(reg, &io_apic->index); 362 writel(value, &io_apic->data); 363 } 364 365 union entry_union { 366 struct { u32 w1, w2; }; 367 struct IO_APIC_route_entry entry; 368 }; 369 370 static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin) 371 { 372 union entry_union eu; 373 374 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); 375 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); 376 377 return eu.entry; 378 } 379 380 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) 381 { 382 union entry_union eu; 383 unsigned long flags; 384 385 raw_spin_lock_irqsave(&ioapic_lock, flags); 386 eu.entry = __ioapic_read_entry(apic, pin); 387 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 388 389 return eu.entry; 390 } 391 392 /* 393 * When we write a new IO APIC routing entry, we need to write the high 394 * word first! If the mask bit in the low word is clear, we will enable 395 * the interrupt, and we need to make sure the entry is fully populated 396 * before that happens. 397 */ 398 static void __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 399 { 400 union entry_union eu = {{0, 0}}; 401 402 eu.entry = e; 403 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 404 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 405 } 406 407 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 408 { 409 unsigned long flags; 410 411 raw_spin_lock_irqsave(&ioapic_lock, flags); 412 __ioapic_write_entry(apic, pin, e); 413 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 414 } 415 416 /* 417 * When we mask an IO APIC routing entry, we need to write the low 418 * word first, in order to set the mask bit before we change the 419 * high bits! 420 */ 421 static void ioapic_mask_entry(int apic, int pin) 422 { 423 unsigned long flags; 424 union entry_union eu = { .entry.mask = 1 }; 425 426 raw_spin_lock_irqsave(&ioapic_lock, flags); 427 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 428 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 429 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 430 } 431 432 /* 433 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are 434 * shared ISA-space IRQs, so we have to support them. We are super 435 * fast in the common case, and fast for shared ISA-space IRQs. 436 */ 437 static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 438 { 439 struct irq_pin_list **last, *entry; 440 441 /* don't allow duplicates */ 442 last = &cfg->irq_2_pin; 443 for_each_irq_pin(entry, cfg->irq_2_pin) { 444 if (entry->apic == apic && entry->pin == pin) 445 return 0; 446 last = &entry->next; 447 } 448 449 entry = alloc_irq_pin_list(node); 450 if (!entry) { 451 pr_err("can not alloc irq_pin_list (%d,%d,%d)\n", 452 node, apic, pin); 453 return -ENOMEM; 454 } 455 entry->apic = apic; 456 entry->pin = pin; 457 458 *last = entry; 459 return 0; 460 } 461 462 static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 463 { 464 if (__add_pin_to_irq_node(cfg, node, apic, pin)) 465 panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); 466 } 467 468 /* 469 * Reroute an IRQ to a different pin. 470 */ 471 static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, 472 int oldapic, int oldpin, 473 int newapic, int newpin) 474 { 475 struct irq_pin_list *entry; 476 477 for_each_irq_pin(entry, cfg->irq_2_pin) { 478 if (entry->apic == oldapic && entry->pin == oldpin) { 479 entry->apic = newapic; 480 entry->pin = newpin; 481 /* every one is different, right? */ 482 return; 483 } 484 } 485 486 /* old apic/pin didn't exist, so just add new ones */ 487 add_pin_to_irq_node(cfg, node, newapic, newpin); 488 } 489 490 static void __io_apic_modify_irq(struct irq_pin_list *entry, 491 int mask_and, int mask_or, 492 void (*final)(struct irq_pin_list *entry)) 493 { 494 unsigned int reg, pin; 495 496 pin = entry->pin; 497 reg = io_apic_read(entry->apic, 0x10 + pin * 2); 498 reg &= mask_and; 499 reg |= mask_or; 500 io_apic_modify(entry->apic, 0x10 + pin * 2, reg); 501 if (final) 502 final(entry); 503 } 504 505 static void io_apic_modify_irq(struct irq_cfg *cfg, 506 int mask_and, int mask_or, 507 void (*final)(struct irq_pin_list *entry)) 508 { 509 struct irq_pin_list *entry; 510 511 for_each_irq_pin(entry, cfg->irq_2_pin) 512 __io_apic_modify_irq(entry, mask_and, mask_or, final); 513 } 514 515 static void io_apic_sync(struct irq_pin_list *entry) 516 { 517 /* 518 * Synchronize the IO-APIC and the CPU by doing 519 * a dummy read from the IO-APIC 520 */ 521 struct io_apic __iomem *io_apic; 522 523 io_apic = io_apic_base(entry->apic); 524 readl(&io_apic->data); 525 } 526 527 static void mask_ioapic(struct irq_cfg *cfg) 528 { 529 unsigned long flags; 530 531 raw_spin_lock_irqsave(&ioapic_lock, flags); 532 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); 533 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 534 } 535 536 static void mask_ioapic_irq(struct irq_data *data) 537 { 538 mask_ioapic(data->chip_data); 539 } 540 541 static void __unmask_ioapic(struct irq_cfg *cfg) 542 { 543 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); 544 } 545 546 static void unmask_ioapic(struct irq_cfg *cfg) 547 { 548 unsigned long flags; 549 550 raw_spin_lock_irqsave(&ioapic_lock, flags); 551 __unmask_ioapic(cfg); 552 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 553 } 554 555 static void unmask_ioapic_irq(struct irq_data *data) 556 { 557 unmask_ioapic(data->chip_data); 558 } 559 560 /* 561 * IO-APIC versions below 0x20 don't support EOI register. 562 * For the record, here is the information about various versions: 563 * 0Xh 82489DX 564 * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant 565 * 2Xh I/O(x)APIC which is PCI 2.2 Compliant 566 * 30h-FFh Reserved 567 * 568 * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic 569 * version as 0x2. This is an error with documentation and these ICH chips 570 * use io-apic's of version 0x20. 571 * 572 * For IO-APIC's with EOI register, we use that to do an explicit EOI. 573 * Otherwise, we simulate the EOI message manually by changing the trigger 574 * mode to edge and then back to level, with RTE being masked during this. 575 */ 576 static void __eoi_ioapic_pin(int apic, int pin, int vector, struct irq_cfg *cfg) 577 { 578 if (mpc_ioapic_ver(apic) >= 0x20) { 579 /* 580 * Intr-remapping uses pin number as the virtual vector 581 * in the RTE. Actual vector is programmed in 582 * intr-remapping table entry. Hence for the io-apic 583 * EOI we use the pin number. 584 */ 585 if (cfg && irq_remapped(cfg)) 586 io_apic_eoi(apic, pin); 587 else 588 io_apic_eoi(apic, vector); 589 } else { 590 struct IO_APIC_route_entry entry, entry1; 591 592 entry = entry1 = __ioapic_read_entry(apic, pin); 593 594 /* 595 * Mask the entry and change the trigger mode to edge. 596 */ 597 entry1.mask = 1; 598 entry1.trigger = IOAPIC_EDGE; 599 600 __ioapic_write_entry(apic, pin, entry1); 601 602 /* 603 * Restore the previous level triggered entry. 604 */ 605 __ioapic_write_entry(apic, pin, entry); 606 } 607 } 608 609 static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 610 { 611 struct irq_pin_list *entry; 612 unsigned long flags; 613 614 raw_spin_lock_irqsave(&ioapic_lock, flags); 615 for_each_irq_pin(entry, cfg->irq_2_pin) 616 __eoi_ioapic_pin(entry->apic, entry->pin, cfg->vector, cfg); 617 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 618 } 619 620 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) 621 { 622 struct IO_APIC_route_entry entry; 623 624 /* Check delivery_mode to be sure we're not clearing an SMI pin */ 625 entry = ioapic_read_entry(apic, pin); 626 if (entry.delivery_mode == dest_SMI) 627 return; 628 629 /* 630 * Make sure the entry is masked and re-read the contents to check 631 * if it is a level triggered pin and if the remote-IRR is set. 632 */ 633 if (!entry.mask) { 634 entry.mask = 1; 635 ioapic_write_entry(apic, pin, entry); 636 entry = ioapic_read_entry(apic, pin); 637 } 638 639 if (entry.irr) { 640 unsigned long flags; 641 642 /* 643 * Make sure the trigger mode is set to level. Explicit EOI 644 * doesn't clear the remote-IRR if the trigger mode is not 645 * set to level. 646 */ 647 if (!entry.trigger) { 648 entry.trigger = IOAPIC_LEVEL; 649 ioapic_write_entry(apic, pin, entry); 650 } 651 652 raw_spin_lock_irqsave(&ioapic_lock, flags); 653 __eoi_ioapic_pin(apic, pin, entry.vector, NULL); 654 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 655 } 656 657 /* 658 * Clear the rest of the bits in the IO-APIC RTE except for the mask 659 * bit. 660 */ 661 ioapic_mask_entry(apic, pin); 662 entry = ioapic_read_entry(apic, pin); 663 if (entry.irr) 664 pr_err("Unable to reset IRR for apic: %d, pin :%d\n", 665 mpc_ioapic_id(apic), pin); 666 } 667 668 static void clear_IO_APIC (void) 669 { 670 int apic, pin; 671 672 for (apic = 0; apic < nr_ioapics; apic++) 673 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) 674 clear_IO_APIC_pin(apic, pin); 675 } 676 677 #ifdef CONFIG_X86_32 678 /* 679 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to 680 * specific CPU-side IRQs. 681 */ 682 683 #define MAX_PIRQS 8 684 static int pirq_entries[MAX_PIRQS] = { 685 [0 ... MAX_PIRQS - 1] = -1 686 }; 687 688 static int __init ioapic_pirq_setup(char *str) 689 { 690 int i, max; 691 int ints[MAX_PIRQS+1]; 692 693 get_options(str, ARRAY_SIZE(ints), ints); 694 695 apic_printk(APIC_VERBOSE, KERN_INFO 696 "PIRQ redirection, working around broken MP-BIOS.\n"); 697 max = MAX_PIRQS; 698 if (ints[0] < MAX_PIRQS) 699 max = ints[0]; 700 701 for (i = 0; i < max; i++) { 702 apic_printk(APIC_VERBOSE, KERN_DEBUG 703 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]); 704 /* 705 * PIRQs are mapped upside down, usually. 706 */ 707 pirq_entries[MAX_PIRQS-i-1] = ints[i+1]; 708 } 709 return 1; 710 } 711 712 __setup("pirq=", ioapic_pirq_setup); 713 #endif /* CONFIG_X86_32 */ 714 715 /* 716 * Saves all the IO-APIC RTE's 717 */ 718 int save_ioapic_entries(void) 719 { 720 int apic, pin; 721 int err = 0; 722 723 for (apic = 0; apic < nr_ioapics; apic++) { 724 if (!ioapics[apic].saved_registers) { 725 err = -ENOMEM; 726 continue; 727 } 728 729 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) 730 ioapics[apic].saved_registers[pin] = 731 ioapic_read_entry(apic, pin); 732 } 733 734 return err; 735 } 736 737 /* 738 * Mask all IO APIC entries. 739 */ 740 void mask_ioapic_entries(void) 741 { 742 int apic, pin; 743 744 for (apic = 0; apic < nr_ioapics; apic++) { 745 if (!ioapics[apic].saved_registers) 746 continue; 747 748 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { 749 struct IO_APIC_route_entry entry; 750 751 entry = ioapics[apic].saved_registers[pin]; 752 if (!entry.mask) { 753 entry.mask = 1; 754 ioapic_write_entry(apic, pin, entry); 755 } 756 } 757 } 758 } 759 760 /* 761 * Restore IO APIC entries which was saved in the ioapic structure. 762 */ 763 int restore_ioapic_entries(void) 764 { 765 int apic, pin; 766 767 for (apic = 0; apic < nr_ioapics; apic++) { 768 if (!ioapics[apic].saved_registers) 769 continue; 770 771 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) 772 ioapic_write_entry(apic, pin, 773 ioapics[apic].saved_registers[pin]); 774 } 775 return 0; 776 } 777 778 /* 779 * Find the IRQ entry number of a certain pin. 780 */ 781 static int find_irq_entry(int ioapic_idx, int pin, int type) 782 { 783 int i; 784 785 for (i = 0; i < mp_irq_entries; i++) 786 if (mp_irqs[i].irqtype == type && 787 (mp_irqs[i].dstapic == mpc_ioapic_id(ioapic_idx) || 788 mp_irqs[i].dstapic == MP_APIC_ALL) && 789 mp_irqs[i].dstirq == pin) 790 return i; 791 792 return -1; 793 } 794 795 /* 796 * Find the pin to which IRQ[irq] (ISA) is connected 797 */ 798 static int __init find_isa_irq_pin(int irq, int type) 799 { 800 int i; 801 802 for (i = 0; i < mp_irq_entries; i++) { 803 int lbus = mp_irqs[i].srcbus; 804 805 if (test_bit(lbus, mp_bus_not_pci) && 806 (mp_irqs[i].irqtype == type) && 807 (mp_irqs[i].srcbusirq == irq)) 808 809 return mp_irqs[i].dstirq; 810 } 811 return -1; 812 } 813 814 static int __init find_isa_irq_apic(int irq, int type) 815 { 816 int i; 817 818 for (i = 0; i < mp_irq_entries; i++) { 819 int lbus = mp_irqs[i].srcbus; 820 821 if (test_bit(lbus, mp_bus_not_pci) && 822 (mp_irqs[i].irqtype == type) && 823 (mp_irqs[i].srcbusirq == irq)) 824 break; 825 } 826 827 if (i < mp_irq_entries) { 828 int ioapic_idx; 829 830 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) 831 if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic) 832 return ioapic_idx; 833 } 834 835 return -1; 836 } 837 838 #ifdef CONFIG_EISA 839 /* 840 * EISA Edge/Level control register, ELCR 841 */ 842 static int EISA_ELCR(unsigned int irq) 843 { 844 if (irq < legacy_pic->nr_legacy_irqs) { 845 unsigned int port = 0x4d0 + (irq >> 3); 846 return (inb(port) >> (irq & 7)) & 1; 847 } 848 apic_printk(APIC_VERBOSE, KERN_INFO 849 "Broken MPtable reports ISA irq %d\n", irq); 850 return 0; 851 } 852 853 #endif 854 855 /* ISA interrupts are always polarity zero edge triggered, 856 * when listed as conforming in the MP table. */ 857 858 #define default_ISA_trigger(idx) (0) 859 #define default_ISA_polarity(idx) (0) 860 861 /* EISA interrupts are always polarity zero and can be edge or level 862 * trigger depending on the ELCR value. If an interrupt is listed as 863 * EISA conforming in the MP table, that means its trigger type must 864 * be read in from the ELCR */ 865 866 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq)) 867 #define default_EISA_polarity(idx) default_ISA_polarity(idx) 868 869 /* PCI interrupts are always polarity one level triggered, 870 * when listed as conforming in the MP table. */ 871 872 #define default_PCI_trigger(idx) (1) 873 #define default_PCI_polarity(idx) (1) 874 875 static int irq_polarity(int idx) 876 { 877 int bus = mp_irqs[idx].srcbus; 878 int polarity; 879 880 /* 881 * Determine IRQ line polarity (high active or low active): 882 */ 883 switch (mp_irqs[idx].irqflag & 3) 884 { 885 case 0: /* conforms, ie. bus-type dependent polarity */ 886 if (test_bit(bus, mp_bus_not_pci)) 887 polarity = default_ISA_polarity(idx); 888 else 889 polarity = default_PCI_polarity(idx); 890 break; 891 case 1: /* high active */ 892 { 893 polarity = 0; 894 break; 895 } 896 case 2: /* reserved */ 897 { 898 pr_warn("broken BIOS!!\n"); 899 polarity = 1; 900 break; 901 } 902 case 3: /* low active */ 903 { 904 polarity = 1; 905 break; 906 } 907 default: /* invalid */ 908 { 909 pr_warn("broken BIOS!!\n"); 910 polarity = 1; 911 break; 912 } 913 } 914 return polarity; 915 } 916 917 static int irq_trigger(int idx) 918 { 919 int bus = mp_irqs[idx].srcbus; 920 int trigger; 921 922 /* 923 * Determine IRQ trigger mode (edge or level sensitive): 924 */ 925 switch ((mp_irqs[idx].irqflag>>2) & 3) 926 { 927 case 0: /* conforms, ie. bus-type dependent */ 928 if (test_bit(bus, mp_bus_not_pci)) 929 trigger = default_ISA_trigger(idx); 930 else 931 trigger = default_PCI_trigger(idx); 932 #ifdef CONFIG_EISA 933 switch (mp_bus_id_to_type[bus]) { 934 case MP_BUS_ISA: /* ISA pin */ 935 { 936 /* set before the switch */ 937 break; 938 } 939 case MP_BUS_EISA: /* EISA pin */ 940 { 941 trigger = default_EISA_trigger(idx); 942 break; 943 } 944 case MP_BUS_PCI: /* PCI pin */ 945 { 946 /* set before the switch */ 947 break; 948 } 949 default: 950 { 951 pr_warn("broken BIOS!!\n"); 952 trigger = 1; 953 break; 954 } 955 } 956 #endif 957 break; 958 case 1: /* edge */ 959 { 960 trigger = 0; 961 break; 962 } 963 case 2: /* reserved */ 964 { 965 pr_warn("broken BIOS!!\n"); 966 trigger = 1; 967 break; 968 } 969 case 3: /* level */ 970 { 971 trigger = 1; 972 break; 973 } 974 default: /* invalid */ 975 { 976 pr_warn("broken BIOS!!\n"); 977 trigger = 0; 978 break; 979 } 980 } 981 return trigger; 982 } 983 984 static int pin_2_irq(int idx, int apic, int pin) 985 { 986 int irq; 987 int bus = mp_irqs[idx].srcbus; 988 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(apic); 989 990 /* 991 * Debugging check, we are in big trouble if this message pops up! 992 */ 993 if (mp_irqs[idx].dstirq != pin) 994 pr_err("broken BIOS or MPTABLE parser, ayiee!!\n"); 995 996 if (test_bit(bus, mp_bus_not_pci)) { 997 irq = mp_irqs[idx].srcbusirq; 998 } else { 999 u32 gsi = gsi_cfg->gsi_base + pin; 1000 1001 if (gsi >= NR_IRQS_LEGACY) 1002 irq = gsi; 1003 else 1004 irq = gsi_top + gsi; 1005 } 1006 1007 #ifdef CONFIG_X86_32 1008 /* 1009 * PCI IRQ command line redirection. Yes, limits are hardcoded. 1010 */ 1011 if ((pin >= 16) && (pin <= 23)) { 1012 if (pirq_entries[pin-16] != -1) { 1013 if (!pirq_entries[pin-16]) { 1014 apic_printk(APIC_VERBOSE, KERN_DEBUG 1015 "disabling PIRQ%d\n", pin-16); 1016 } else { 1017 irq = pirq_entries[pin-16]; 1018 apic_printk(APIC_VERBOSE, KERN_DEBUG 1019 "using PIRQ%d -> IRQ %d\n", 1020 pin-16, irq); 1021 } 1022 } 1023 } 1024 #endif 1025 1026 return irq; 1027 } 1028 1029 /* 1030 * Find a specific PCI IRQ entry. 1031 * Not an __init, possibly needed by modules 1032 */ 1033 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, 1034 struct io_apic_irq_attr *irq_attr) 1035 { 1036 int ioapic_idx, i, best_guess = -1; 1037 1038 apic_printk(APIC_DEBUG, 1039 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", 1040 bus, slot, pin); 1041 if (test_bit(bus, mp_bus_not_pci)) { 1042 apic_printk(APIC_VERBOSE, 1043 "PCI BIOS passed nonexistent PCI bus %d!\n", bus); 1044 return -1; 1045 } 1046 for (i = 0; i < mp_irq_entries; i++) { 1047 int lbus = mp_irqs[i].srcbus; 1048 1049 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) 1050 if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic || 1051 mp_irqs[i].dstapic == MP_APIC_ALL) 1052 break; 1053 1054 if (!test_bit(lbus, mp_bus_not_pci) && 1055 !mp_irqs[i].irqtype && 1056 (bus == lbus) && 1057 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) { 1058 int irq = pin_2_irq(i, ioapic_idx, mp_irqs[i].dstirq); 1059 1060 if (!(ioapic_idx || IO_APIC_IRQ(irq))) 1061 continue; 1062 1063 if (pin == (mp_irqs[i].srcbusirq & 3)) { 1064 set_io_apic_irq_attr(irq_attr, ioapic_idx, 1065 mp_irqs[i].dstirq, 1066 irq_trigger(i), 1067 irq_polarity(i)); 1068 return irq; 1069 } 1070 /* 1071 * Use the first all-but-pin matching entry as a 1072 * best-guess fuzzy result for broken mptables. 1073 */ 1074 if (best_guess < 0) { 1075 set_io_apic_irq_attr(irq_attr, ioapic_idx, 1076 mp_irqs[i].dstirq, 1077 irq_trigger(i), 1078 irq_polarity(i)); 1079 best_guess = irq; 1080 } 1081 } 1082 } 1083 return best_guess; 1084 } 1085 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); 1086 1087 void lock_vector_lock(void) 1088 { 1089 /* Used to the online set of cpus does not change 1090 * during assign_irq_vector. 1091 */ 1092 raw_spin_lock(&vector_lock); 1093 } 1094 1095 void unlock_vector_lock(void) 1096 { 1097 raw_spin_unlock(&vector_lock); 1098 } 1099 1100 static int 1101 __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 1102 { 1103 /* 1104 * NOTE! The local APIC isn't very good at handling 1105 * multiple interrupts at the same interrupt level. 1106 * As the interrupt level is determined by taking the 1107 * vector number and shifting that right by 4, we 1108 * want to spread these out a bit so that they don't 1109 * all fall in the same interrupt level. 1110 * 1111 * Also, we've got to be careful not to trash gate 1112 * 0x80, because int 0x80 is hm, kind of importantish. ;) 1113 */ 1114 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; 1115 static int current_offset = VECTOR_OFFSET_START % 16; 1116 int cpu, err; 1117 cpumask_var_t tmp_mask; 1118 1119 if (cfg->move_in_progress) 1120 return -EBUSY; 1121 1122 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) 1123 return -ENOMEM; 1124 1125 /* Only try and allocate irqs on cpus that are present */ 1126 err = -ENOSPC; 1127 cpumask_clear(cfg->old_domain); 1128 cpu = cpumask_first_and(mask, cpu_online_mask); 1129 while (cpu < nr_cpu_ids) { 1130 int new_cpu, vector, offset; 1131 1132 apic->vector_allocation_domain(cpu, tmp_mask, mask); 1133 1134 if (cpumask_subset(tmp_mask, cfg->domain)) { 1135 err = 0; 1136 if (cpumask_equal(tmp_mask, cfg->domain)) 1137 break; 1138 /* 1139 * New cpumask using the vector is a proper subset of 1140 * the current in use mask. So cleanup the vector 1141 * allocation for the members that are not used anymore. 1142 */ 1143 cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask); 1144 cfg->move_in_progress = 1145 cpumask_intersects(cfg->old_domain, cpu_online_mask); 1146 cpumask_and(cfg->domain, cfg->domain, tmp_mask); 1147 break; 1148 } 1149 1150 vector = current_vector; 1151 offset = current_offset; 1152 next: 1153 vector += 16; 1154 if (vector >= first_system_vector) { 1155 offset = (offset + 1) % 16; 1156 vector = FIRST_EXTERNAL_VECTOR + offset; 1157 } 1158 1159 if (unlikely(current_vector == vector)) { 1160 cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask); 1161 cpumask_andnot(tmp_mask, mask, cfg->old_domain); 1162 cpu = cpumask_first_and(tmp_mask, cpu_online_mask); 1163 continue; 1164 } 1165 1166 if (test_bit(vector, used_vectors)) 1167 goto next; 1168 1169 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 1170 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 1171 goto next; 1172 /* Found one! */ 1173 current_vector = vector; 1174 current_offset = offset; 1175 if (cfg->vector) { 1176 cpumask_copy(cfg->old_domain, cfg->domain); 1177 cfg->move_in_progress = 1178 cpumask_intersects(cfg->old_domain, cpu_online_mask); 1179 } 1180 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 1181 per_cpu(vector_irq, new_cpu)[vector] = irq; 1182 cfg->vector = vector; 1183 cpumask_copy(cfg->domain, tmp_mask); 1184 err = 0; 1185 break; 1186 } 1187 free_cpumask_var(tmp_mask); 1188 return err; 1189 } 1190 1191 int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 1192 { 1193 int err; 1194 unsigned long flags; 1195 1196 raw_spin_lock_irqsave(&vector_lock, flags); 1197 err = __assign_irq_vector(irq, cfg, mask); 1198 raw_spin_unlock_irqrestore(&vector_lock, flags); 1199 return err; 1200 } 1201 1202 static void __clear_irq_vector(int irq, struct irq_cfg *cfg) 1203 { 1204 int cpu, vector; 1205 1206 BUG_ON(!cfg->vector); 1207 1208 vector = cfg->vector; 1209 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) 1210 per_cpu(vector_irq, cpu)[vector] = -1; 1211 1212 cfg->vector = 0; 1213 cpumask_clear(cfg->domain); 1214 1215 if (likely(!cfg->move_in_progress)) 1216 return; 1217 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { 1218 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 1219 vector++) { 1220 if (per_cpu(vector_irq, cpu)[vector] != irq) 1221 continue; 1222 per_cpu(vector_irq, cpu)[vector] = -1; 1223 break; 1224 } 1225 } 1226 cfg->move_in_progress = 0; 1227 } 1228 1229 void __setup_vector_irq(int cpu) 1230 { 1231 /* Initialize vector_irq on a new cpu */ 1232 int irq, vector; 1233 struct irq_cfg *cfg; 1234 1235 /* 1236 * vector_lock will make sure that we don't run into irq vector 1237 * assignments that might be happening on another cpu in parallel, 1238 * while we setup our initial vector to irq mappings. 1239 */ 1240 raw_spin_lock(&vector_lock); 1241 /* Mark the inuse vectors */ 1242 for_each_active_irq(irq) { 1243 cfg = irq_get_chip_data(irq); 1244 if (!cfg) 1245 continue; 1246 1247 if (!cpumask_test_cpu(cpu, cfg->domain)) 1248 continue; 1249 vector = cfg->vector; 1250 per_cpu(vector_irq, cpu)[vector] = irq; 1251 } 1252 /* Mark the free vectors */ 1253 for (vector = 0; vector < NR_VECTORS; ++vector) { 1254 irq = per_cpu(vector_irq, cpu)[vector]; 1255 if (irq < 0) 1256 continue; 1257 1258 cfg = irq_cfg(irq); 1259 if (!cpumask_test_cpu(cpu, cfg->domain)) 1260 per_cpu(vector_irq, cpu)[vector] = -1; 1261 } 1262 raw_spin_unlock(&vector_lock); 1263 } 1264 1265 static struct irq_chip ioapic_chip; 1266 1267 #ifdef CONFIG_X86_32 1268 static inline int IO_APIC_irq_trigger(int irq) 1269 { 1270 int apic, idx, pin; 1271 1272 for (apic = 0; apic < nr_ioapics; apic++) { 1273 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { 1274 idx = find_irq_entry(apic, pin, mp_INT); 1275 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin))) 1276 return irq_trigger(idx); 1277 } 1278 } 1279 /* 1280 * nonexistent IRQs are edge default 1281 */ 1282 return 0; 1283 } 1284 #else 1285 static inline int IO_APIC_irq_trigger(int irq) 1286 { 1287 return 1; 1288 } 1289 #endif 1290 1291 static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg, 1292 unsigned long trigger) 1293 { 1294 struct irq_chip *chip = &ioapic_chip; 1295 irq_flow_handler_t hdl; 1296 bool fasteoi; 1297 1298 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1299 trigger == IOAPIC_LEVEL) { 1300 irq_set_status_flags(irq, IRQ_LEVEL); 1301 fasteoi = true; 1302 } else { 1303 irq_clear_status_flags(irq, IRQ_LEVEL); 1304 fasteoi = false; 1305 } 1306 1307 if (irq_remapped(cfg)) { 1308 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 1309 irq_remap_modify_chip_defaults(chip); 1310 fasteoi = trigger != 0; 1311 } 1312 1313 hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; 1314 irq_set_chip_and_handler_name(irq, chip, hdl, 1315 fasteoi ? "fasteoi" : "edge"); 1316 } 1317 1318 static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, 1319 unsigned int destination, int vector, 1320 struct io_apic_irq_attr *attr) 1321 { 1322 if (irq_remapping_enabled) 1323 return setup_ioapic_remapped_entry(irq, entry, destination, 1324 vector, attr); 1325 1326 memset(entry, 0, sizeof(*entry)); 1327 1328 entry->delivery_mode = apic->irq_delivery_mode; 1329 entry->dest_mode = apic->irq_dest_mode; 1330 entry->dest = destination; 1331 entry->vector = vector; 1332 entry->mask = 0; /* enable IRQ */ 1333 entry->trigger = attr->trigger; 1334 entry->polarity = attr->polarity; 1335 1336 /* 1337 * Mask level triggered irqs. 1338 * Use IRQ_DELAYED_DISABLE for edge triggered irqs. 1339 */ 1340 if (attr->trigger) 1341 entry->mask = 1; 1342 1343 return 0; 1344 } 1345 1346 static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg, 1347 struct io_apic_irq_attr *attr) 1348 { 1349 struct IO_APIC_route_entry entry; 1350 unsigned int dest; 1351 1352 if (!IO_APIC_IRQ(irq)) 1353 return; 1354 1355 if (assign_irq_vector(irq, cfg, apic->target_cpus())) 1356 return; 1357 1358 if (apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus(), 1359 &dest)) { 1360 pr_warn("Failed to obtain apicid for ioapic %d, pin %d\n", 1361 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); 1362 __clear_irq_vector(irq, cfg); 1363 1364 return; 1365 } 1366 1367 apic_printk(APIC_VERBOSE,KERN_DEBUG 1368 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1369 "IRQ %d Mode:%i Active:%i Dest:%d)\n", 1370 attr->ioapic, mpc_ioapic_id(attr->ioapic), attr->ioapic_pin, 1371 cfg->vector, irq, attr->trigger, attr->polarity, dest); 1372 1373 if (setup_ioapic_entry(irq, &entry, dest, cfg->vector, attr)) { 1374 pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1375 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); 1376 __clear_irq_vector(irq, cfg); 1377 1378 return; 1379 } 1380 1381 ioapic_register_intr(irq, cfg, attr->trigger); 1382 if (irq < legacy_pic->nr_legacy_irqs) 1383 legacy_pic->mask(irq); 1384 1385 ioapic_write_entry(attr->ioapic, attr->ioapic_pin, entry); 1386 } 1387 1388 static bool __init io_apic_pin_not_connected(int idx, int ioapic_idx, int pin) 1389 { 1390 if (idx != -1) 1391 return false; 1392 1393 apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n", 1394 mpc_ioapic_id(ioapic_idx), pin); 1395 return true; 1396 } 1397 1398 static void __init __io_apic_setup_irqs(unsigned int ioapic_idx) 1399 { 1400 int idx, node = cpu_to_node(0); 1401 struct io_apic_irq_attr attr; 1402 unsigned int pin, irq; 1403 1404 for (pin = 0; pin < ioapics[ioapic_idx].nr_registers; pin++) { 1405 idx = find_irq_entry(ioapic_idx, pin, mp_INT); 1406 if (io_apic_pin_not_connected(idx, ioapic_idx, pin)) 1407 continue; 1408 1409 irq = pin_2_irq(idx, ioapic_idx, pin); 1410 1411 if ((ioapic_idx > 0) && (irq > 16)) 1412 continue; 1413 1414 /* 1415 * Skip the timer IRQ if there's a quirk handler 1416 * installed and if it returns 1: 1417 */ 1418 if (apic->multi_timer_check && 1419 apic->multi_timer_check(ioapic_idx, irq)) 1420 continue; 1421 1422 set_io_apic_irq_attr(&attr, ioapic_idx, pin, irq_trigger(idx), 1423 irq_polarity(idx)); 1424 1425 io_apic_setup_irq_pin(irq, node, &attr); 1426 } 1427 } 1428 1429 static void __init setup_IO_APIC_irqs(void) 1430 { 1431 unsigned int ioapic_idx; 1432 1433 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1434 1435 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) 1436 __io_apic_setup_irqs(ioapic_idx); 1437 } 1438 1439 /* 1440 * for the gsit that is not in first ioapic 1441 * but could not use acpi_register_gsi() 1442 * like some special sci in IBM x3330 1443 */ 1444 void setup_IO_APIC_irq_extra(u32 gsi) 1445 { 1446 int ioapic_idx = 0, pin, idx, irq, node = cpu_to_node(0); 1447 struct io_apic_irq_attr attr; 1448 1449 /* 1450 * Convert 'gsi' to 'ioapic.pin'. 1451 */ 1452 ioapic_idx = mp_find_ioapic(gsi); 1453 if (ioapic_idx < 0) 1454 return; 1455 1456 pin = mp_find_ioapic_pin(ioapic_idx, gsi); 1457 idx = find_irq_entry(ioapic_idx, pin, mp_INT); 1458 if (idx == -1) 1459 return; 1460 1461 irq = pin_2_irq(idx, ioapic_idx, pin); 1462 1463 /* Only handle the non legacy irqs on secondary ioapics */ 1464 if (ioapic_idx == 0 || irq < NR_IRQS_LEGACY) 1465 return; 1466 1467 set_io_apic_irq_attr(&attr, ioapic_idx, pin, irq_trigger(idx), 1468 irq_polarity(idx)); 1469 1470 io_apic_setup_irq_pin_once(irq, node, &attr); 1471 } 1472 1473 /* 1474 * Set up the timer pin, possibly with the 8259A-master behind. 1475 */ 1476 static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx, 1477 unsigned int pin, int vector) 1478 { 1479 struct IO_APIC_route_entry entry; 1480 unsigned int dest; 1481 1482 if (irq_remapping_enabled) 1483 return; 1484 1485 memset(&entry, 0, sizeof(entry)); 1486 1487 /* 1488 * We use logical delivery to get the timer IRQ 1489 * to the first CPU. 1490 */ 1491 if (unlikely(apic->cpu_mask_to_apicid_and(apic->target_cpus(), 1492 apic->target_cpus(), &dest))) 1493 dest = BAD_APICID; 1494 1495 entry.dest_mode = apic->irq_dest_mode; 1496 entry.mask = 0; /* don't mask IRQ for edge */ 1497 entry.dest = dest; 1498 entry.delivery_mode = apic->irq_delivery_mode; 1499 entry.polarity = 0; 1500 entry.trigger = 0; 1501 entry.vector = vector; 1502 1503 /* 1504 * The timer IRQ doesn't have to know that behind the 1505 * scene we may have a 8259A-master in AEOI mode ... 1506 */ 1507 irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, 1508 "edge"); 1509 1510 /* 1511 * Add it to the IO-APIC irq-routing table: 1512 */ 1513 ioapic_write_entry(ioapic_idx, pin, entry); 1514 } 1515 1516 __apicdebuginit(void) print_IO_APIC(int ioapic_idx) 1517 { 1518 int i; 1519 union IO_APIC_reg_00 reg_00; 1520 union IO_APIC_reg_01 reg_01; 1521 union IO_APIC_reg_02 reg_02; 1522 union IO_APIC_reg_03 reg_03; 1523 unsigned long flags; 1524 1525 raw_spin_lock_irqsave(&ioapic_lock, flags); 1526 reg_00.raw = io_apic_read(ioapic_idx, 0); 1527 reg_01.raw = io_apic_read(ioapic_idx, 1); 1528 if (reg_01.bits.version >= 0x10) 1529 reg_02.raw = io_apic_read(ioapic_idx, 2); 1530 if (reg_01.bits.version >= 0x20) 1531 reg_03.raw = io_apic_read(ioapic_idx, 3); 1532 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1533 1534 printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx)); 1535 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); 1536 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); 1537 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); 1538 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); 1539 1540 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01); 1541 printk(KERN_DEBUG "....... : max redirection entries: %02X\n", 1542 reg_01.bits.entries); 1543 1544 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); 1545 printk(KERN_DEBUG "....... : IO APIC version: %02X\n", 1546 reg_01.bits.version); 1547 1548 /* 1549 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, 1550 * but the value of reg_02 is read as the previous read register 1551 * value, so ignore it if reg_02 == reg_01. 1552 */ 1553 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { 1554 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); 1555 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); 1556 } 1557 1558 /* 1559 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 1560 * or reg_03, but the value of reg_0[23] is read as the previous read 1561 * register value, so ignore it if reg_03 == reg_0[12]. 1562 */ 1563 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && 1564 reg_03.raw != reg_01.raw) { 1565 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); 1566 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); 1567 } 1568 1569 printk(KERN_DEBUG ".... IRQ redirection table:\n"); 1570 1571 if (irq_remapping_enabled) { 1572 printk(KERN_DEBUG " NR Indx Fmt Mask Trig IRR" 1573 " Pol Stat Indx2 Zero Vect:\n"); 1574 } else { 1575 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol" 1576 " Stat Dmod Deli Vect:\n"); 1577 } 1578 1579 for (i = 0; i <= reg_01.bits.entries; i++) { 1580 if (irq_remapping_enabled) { 1581 struct IO_APIC_route_entry entry; 1582 struct IR_IO_APIC_route_entry *ir_entry; 1583 1584 entry = ioapic_read_entry(ioapic_idx, i); 1585 ir_entry = (struct IR_IO_APIC_route_entry *) &entry; 1586 printk(KERN_DEBUG " %02x %04X ", 1587 i, 1588 ir_entry->index 1589 ); 1590 pr_cont("%1d %1d %1d %1d %1d " 1591 "%1d %1d %X %02X\n", 1592 ir_entry->format, 1593 ir_entry->mask, 1594 ir_entry->trigger, 1595 ir_entry->irr, 1596 ir_entry->polarity, 1597 ir_entry->delivery_status, 1598 ir_entry->index2, 1599 ir_entry->zero, 1600 ir_entry->vector 1601 ); 1602 } else { 1603 struct IO_APIC_route_entry entry; 1604 1605 entry = ioapic_read_entry(ioapic_idx, i); 1606 printk(KERN_DEBUG " %02x %02X ", 1607 i, 1608 entry.dest 1609 ); 1610 pr_cont("%1d %1d %1d %1d %1d " 1611 "%1d %1d %02X\n", 1612 entry.mask, 1613 entry.trigger, 1614 entry.irr, 1615 entry.polarity, 1616 entry.delivery_status, 1617 entry.dest_mode, 1618 entry.delivery_mode, 1619 entry.vector 1620 ); 1621 } 1622 } 1623 } 1624 1625 __apicdebuginit(void) print_IO_APICs(void) 1626 { 1627 int ioapic_idx; 1628 struct irq_cfg *cfg; 1629 unsigned int irq; 1630 struct irq_chip *chip; 1631 1632 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); 1633 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) 1634 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", 1635 mpc_ioapic_id(ioapic_idx), 1636 ioapics[ioapic_idx].nr_registers); 1637 1638 /* 1639 * We are a bit conservative about what we expect. We have to 1640 * know about every hardware change ASAP. 1641 */ 1642 printk(KERN_INFO "testing the IO APIC.......................\n"); 1643 1644 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) 1645 print_IO_APIC(ioapic_idx); 1646 1647 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1648 for_each_active_irq(irq) { 1649 struct irq_pin_list *entry; 1650 1651 chip = irq_get_chip(irq); 1652 if (chip != &ioapic_chip) 1653 continue; 1654 1655 cfg = irq_get_chip_data(irq); 1656 if (!cfg) 1657 continue; 1658 entry = cfg->irq_2_pin; 1659 if (!entry) 1660 continue; 1661 printk(KERN_DEBUG "IRQ%d ", irq); 1662 for_each_irq_pin(entry, cfg->irq_2_pin) 1663 pr_cont("-> %d:%d", entry->apic, entry->pin); 1664 pr_cont("\n"); 1665 } 1666 1667 printk(KERN_INFO ".................................... done.\n"); 1668 } 1669 1670 __apicdebuginit(void) print_APIC_field(int base) 1671 { 1672 int i; 1673 1674 printk(KERN_DEBUG); 1675 1676 for (i = 0; i < 8; i++) 1677 pr_cont("%08x", apic_read(base + i*0x10)); 1678 1679 pr_cont("\n"); 1680 } 1681 1682 __apicdebuginit(void) print_local_APIC(void *dummy) 1683 { 1684 unsigned int i, v, ver, maxlvt; 1685 u64 icr; 1686 1687 printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", 1688 smp_processor_id(), hard_smp_processor_id()); 1689 v = apic_read(APIC_ID); 1690 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id()); 1691 v = apic_read(APIC_LVR); 1692 printk(KERN_INFO "... APIC VERSION: %08x\n", v); 1693 ver = GET_APIC_VERSION(v); 1694 maxlvt = lapic_get_maxlvt(); 1695 1696 v = apic_read(APIC_TASKPRI); 1697 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); 1698 1699 if (APIC_INTEGRATED(ver)) { /* !82489DX */ 1700 if (!APIC_XAPIC(ver)) { 1701 v = apic_read(APIC_ARBPRI); 1702 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v, 1703 v & APIC_ARBPRI_MASK); 1704 } 1705 v = apic_read(APIC_PROCPRI); 1706 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v); 1707 } 1708 1709 /* 1710 * Remote read supported only in the 82489DX and local APIC for 1711 * Pentium processors. 1712 */ 1713 if (!APIC_INTEGRATED(ver) || maxlvt == 3) { 1714 v = apic_read(APIC_RRR); 1715 printk(KERN_DEBUG "... APIC RRR: %08x\n", v); 1716 } 1717 1718 v = apic_read(APIC_LDR); 1719 printk(KERN_DEBUG "... APIC LDR: %08x\n", v); 1720 if (!x2apic_enabled()) { 1721 v = apic_read(APIC_DFR); 1722 printk(KERN_DEBUG "... APIC DFR: %08x\n", v); 1723 } 1724 v = apic_read(APIC_SPIV); 1725 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v); 1726 1727 printk(KERN_DEBUG "... APIC ISR field:\n"); 1728 print_APIC_field(APIC_ISR); 1729 printk(KERN_DEBUG "... APIC TMR field:\n"); 1730 print_APIC_field(APIC_TMR); 1731 printk(KERN_DEBUG "... APIC IRR field:\n"); 1732 print_APIC_field(APIC_IRR); 1733 1734 if (APIC_INTEGRATED(ver)) { /* !82489DX */ 1735 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 1736 apic_write(APIC_ESR, 0); 1737 1738 v = apic_read(APIC_ESR); 1739 printk(KERN_DEBUG "... APIC ESR: %08x\n", v); 1740 } 1741 1742 icr = apic_icr_read(); 1743 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr); 1744 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32)); 1745 1746 v = apic_read(APIC_LVTT); 1747 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); 1748 1749 if (maxlvt > 3) { /* PC is LVT#4. */ 1750 v = apic_read(APIC_LVTPC); 1751 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v); 1752 } 1753 v = apic_read(APIC_LVT0); 1754 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v); 1755 v = apic_read(APIC_LVT1); 1756 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v); 1757 1758 if (maxlvt > 2) { /* ERR is LVT#3. */ 1759 v = apic_read(APIC_LVTERR); 1760 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v); 1761 } 1762 1763 v = apic_read(APIC_TMICT); 1764 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v); 1765 v = apic_read(APIC_TMCCT); 1766 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v); 1767 v = apic_read(APIC_TDCR); 1768 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v); 1769 1770 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { 1771 v = apic_read(APIC_EFEAT); 1772 maxlvt = (v >> 16) & 0xff; 1773 printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v); 1774 v = apic_read(APIC_ECTRL); 1775 printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v); 1776 for (i = 0; i < maxlvt; i++) { 1777 v = apic_read(APIC_EILVTn(i)); 1778 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v); 1779 } 1780 } 1781 pr_cont("\n"); 1782 } 1783 1784 __apicdebuginit(void) print_local_APICs(int maxcpu) 1785 { 1786 int cpu; 1787 1788 if (!maxcpu) 1789 return; 1790 1791 preempt_disable(); 1792 for_each_online_cpu(cpu) { 1793 if (cpu >= maxcpu) 1794 break; 1795 smp_call_function_single(cpu, print_local_APIC, NULL, 1); 1796 } 1797 preempt_enable(); 1798 } 1799 1800 __apicdebuginit(void) print_PIC(void) 1801 { 1802 unsigned int v; 1803 unsigned long flags; 1804 1805 if (!legacy_pic->nr_legacy_irqs) 1806 return; 1807 1808 printk(KERN_DEBUG "\nprinting PIC contents\n"); 1809 1810 raw_spin_lock_irqsave(&i8259A_lock, flags); 1811 1812 v = inb(0xa1) << 8 | inb(0x21); 1813 printk(KERN_DEBUG "... PIC IMR: %04x\n", v); 1814 1815 v = inb(0xa0) << 8 | inb(0x20); 1816 printk(KERN_DEBUG "... PIC IRR: %04x\n", v); 1817 1818 outb(0x0b,0xa0); 1819 outb(0x0b,0x20); 1820 v = inb(0xa0) << 8 | inb(0x20); 1821 outb(0x0a,0xa0); 1822 outb(0x0a,0x20); 1823 1824 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 1825 1826 printk(KERN_DEBUG "... PIC ISR: %04x\n", v); 1827 1828 v = inb(0x4d1) << 8 | inb(0x4d0); 1829 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); 1830 } 1831 1832 static int __initdata show_lapic = 1; 1833 static __init int setup_show_lapic(char *arg) 1834 { 1835 int num = -1; 1836 1837 if (strcmp(arg, "all") == 0) { 1838 show_lapic = CONFIG_NR_CPUS; 1839 } else { 1840 get_option(&arg, &num); 1841 if (num >= 0) 1842 show_lapic = num; 1843 } 1844 1845 return 1; 1846 } 1847 __setup("show_lapic=", setup_show_lapic); 1848 1849 __apicdebuginit(int) print_ICs(void) 1850 { 1851 if (apic_verbosity == APIC_QUIET) 1852 return 0; 1853 1854 print_PIC(); 1855 1856 /* don't print out if apic is not there */ 1857 if (!cpu_has_apic && !apic_from_smp_config()) 1858 return 0; 1859 1860 print_local_APICs(show_lapic); 1861 print_IO_APICs(); 1862 1863 return 0; 1864 } 1865 1866 late_initcall(print_ICs); 1867 1868 1869 /* Where if anywhere is the i8259 connect in external int mode */ 1870 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 1871 1872 void __init enable_IO_APIC(void) 1873 { 1874 int i8259_apic, i8259_pin; 1875 int apic; 1876 1877 if (!legacy_pic->nr_legacy_irqs) 1878 return; 1879 1880 for(apic = 0; apic < nr_ioapics; apic++) { 1881 int pin; 1882 /* See if any of the pins is in ExtINT mode */ 1883 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { 1884 struct IO_APIC_route_entry entry; 1885 entry = ioapic_read_entry(apic, pin); 1886 1887 /* If the interrupt line is enabled and in ExtInt mode 1888 * I have found the pin where the i8259 is connected. 1889 */ 1890 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) { 1891 ioapic_i8259.apic = apic; 1892 ioapic_i8259.pin = pin; 1893 goto found_i8259; 1894 } 1895 } 1896 } 1897 found_i8259: 1898 /* Look to see what if the MP table has reported the ExtINT */ 1899 /* If we could not find the appropriate pin by looking at the ioapic 1900 * the i8259 probably is not connected the ioapic but give the 1901 * mptable a chance anyway. 1902 */ 1903 i8259_pin = find_isa_irq_pin(0, mp_ExtINT); 1904 i8259_apic = find_isa_irq_apic(0, mp_ExtINT); 1905 /* Trust the MP table if nothing is setup in the hardware */ 1906 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { 1907 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); 1908 ioapic_i8259.pin = i8259_pin; 1909 ioapic_i8259.apic = i8259_apic; 1910 } 1911 /* Complain if the MP table and the hardware disagree */ 1912 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && 1913 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) 1914 { 1915 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); 1916 } 1917 1918 /* 1919 * Do not trust the IO-APIC being empty at bootup 1920 */ 1921 clear_IO_APIC(); 1922 } 1923 1924 /* 1925 * Not an __init, needed by the reboot code 1926 */ 1927 void disable_IO_APIC(void) 1928 { 1929 /* 1930 * Clear the IO-APIC before rebooting: 1931 */ 1932 clear_IO_APIC(); 1933 1934 if (!legacy_pic->nr_legacy_irqs) 1935 return; 1936 1937 /* 1938 * If the i8259 is routed through an IOAPIC 1939 * Put that IOAPIC in virtual wire mode 1940 * so legacy interrupts can be delivered. 1941 * 1942 * With interrupt-remapping, for now we will use virtual wire A mode, 1943 * as virtual wire B is little complex (need to configure both 1944 * IOAPIC RTE as well as interrupt-remapping table entry). 1945 * As this gets called during crash dump, keep this simple for now. 1946 */ 1947 if (ioapic_i8259.pin != -1 && !irq_remapping_enabled) { 1948 struct IO_APIC_route_entry entry; 1949 1950 memset(&entry, 0, sizeof(entry)); 1951 entry.mask = 0; /* Enabled */ 1952 entry.trigger = 0; /* Edge */ 1953 entry.irr = 0; 1954 entry.polarity = 0; /* High */ 1955 entry.delivery_status = 0; 1956 entry.dest_mode = 0; /* Physical */ 1957 entry.delivery_mode = dest_ExtINT; /* ExtInt */ 1958 entry.vector = 0; 1959 entry.dest = read_apic_id(); 1960 1961 /* 1962 * Add it to the IO-APIC irq-routing table: 1963 */ 1964 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); 1965 } 1966 1967 /* 1968 * Use virtual wire A mode when interrupt remapping is enabled. 1969 */ 1970 if (cpu_has_apic || apic_from_smp_config()) 1971 disconnect_bsp_APIC(!irq_remapping_enabled && 1972 ioapic_i8259.pin != -1); 1973 } 1974 1975 #ifdef CONFIG_X86_32 1976 /* 1977 * function to set the IO-APIC physical IDs based on the 1978 * values stored in the MPC table. 1979 * 1980 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999 1981 */ 1982 void __init setup_ioapic_ids_from_mpc_nocheck(void) 1983 { 1984 union IO_APIC_reg_00 reg_00; 1985 physid_mask_t phys_id_present_map; 1986 int ioapic_idx; 1987 int i; 1988 unsigned char old_id; 1989 unsigned long flags; 1990 1991 /* 1992 * This is broken; anything with a real cpu count has to 1993 * circumvent this idiocy regardless. 1994 */ 1995 apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map); 1996 1997 /* 1998 * Set the IOAPIC ID to the value stored in the MPC table. 1999 */ 2000 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) { 2001 /* Read the register 0 value */ 2002 raw_spin_lock_irqsave(&ioapic_lock, flags); 2003 reg_00.raw = io_apic_read(ioapic_idx, 0); 2004 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2005 2006 old_id = mpc_ioapic_id(ioapic_idx); 2007 2008 if (mpc_ioapic_id(ioapic_idx) >= get_physical_broadcast()) { 2009 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", 2010 ioapic_idx, mpc_ioapic_id(ioapic_idx)); 2011 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 2012 reg_00.bits.ID); 2013 ioapics[ioapic_idx].mp_config.apicid = reg_00.bits.ID; 2014 } 2015 2016 /* 2017 * Sanity check, is the ID really free? Every APIC in a 2018 * system must have a unique ID or we get lots of nice 2019 * 'stuck on smp_invalidate_needed IPI wait' messages. 2020 */ 2021 if (apic->check_apicid_used(&phys_id_present_map, 2022 mpc_ioapic_id(ioapic_idx))) { 2023 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", 2024 ioapic_idx, mpc_ioapic_id(ioapic_idx)); 2025 for (i = 0; i < get_physical_broadcast(); i++) 2026 if (!physid_isset(i, phys_id_present_map)) 2027 break; 2028 if (i >= get_physical_broadcast()) 2029 panic("Max APIC ID exceeded!\n"); 2030 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 2031 i); 2032 physid_set(i, phys_id_present_map); 2033 ioapics[ioapic_idx].mp_config.apicid = i; 2034 } else { 2035 physid_mask_t tmp; 2036 apic->apicid_to_cpu_present(mpc_ioapic_id(ioapic_idx), 2037 &tmp); 2038 apic_printk(APIC_VERBOSE, "Setting %d in the " 2039 "phys_id_present_map\n", 2040 mpc_ioapic_id(ioapic_idx)); 2041 physids_or(phys_id_present_map, phys_id_present_map, tmp); 2042 } 2043 2044 /* 2045 * We need to adjust the IRQ routing table 2046 * if the ID changed. 2047 */ 2048 if (old_id != mpc_ioapic_id(ioapic_idx)) 2049 for (i = 0; i < mp_irq_entries; i++) 2050 if (mp_irqs[i].dstapic == old_id) 2051 mp_irqs[i].dstapic 2052 = mpc_ioapic_id(ioapic_idx); 2053 2054 /* 2055 * Update the ID register according to the right value 2056 * from the MPC table if they are different. 2057 */ 2058 if (mpc_ioapic_id(ioapic_idx) == reg_00.bits.ID) 2059 continue; 2060 2061 apic_printk(APIC_VERBOSE, KERN_INFO 2062 "...changing IO-APIC physical APIC ID to %d ...", 2063 mpc_ioapic_id(ioapic_idx)); 2064 2065 reg_00.bits.ID = mpc_ioapic_id(ioapic_idx); 2066 raw_spin_lock_irqsave(&ioapic_lock, flags); 2067 io_apic_write(ioapic_idx, 0, reg_00.raw); 2068 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2069 2070 /* 2071 * Sanity check 2072 */ 2073 raw_spin_lock_irqsave(&ioapic_lock, flags); 2074 reg_00.raw = io_apic_read(ioapic_idx, 0); 2075 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2076 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) 2077 pr_cont("could not set ID!\n"); 2078 else 2079 apic_printk(APIC_VERBOSE, " ok.\n"); 2080 } 2081 } 2082 2083 void __init setup_ioapic_ids_from_mpc(void) 2084 { 2085 2086 if (acpi_ioapic) 2087 return; 2088 /* 2089 * Don't check I/O APIC IDs for xAPIC systems. They have 2090 * no meaning without the serial APIC bus. 2091 */ 2092 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 2093 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) 2094 return; 2095 setup_ioapic_ids_from_mpc_nocheck(); 2096 } 2097 #endif 2098 2099 int no_timer_check __initdata; 2100 2101 static int __init notimercheck(char *s) 2102 { 2103 no_timer_check = 1; 2104 return 1; 2105 } 2106 __setup("no_timer_check", notimercheck); 2107 2108 /* 2109 * There is a nasty bug in some older SMP boards, their mptable lies 2110 * about the timer IRQ. We do the following to work around the situation: 2111 * 2112 * - timer IRQ defaults to IO-APIC IRQ 2113 * - if this function detects that timer IRQs are defunct, then we fall 2114 * back to ISA timer IRQs 2115 */ 2116 static int __init timer_irq_works(void) 2117 { 2118 unsigned long t1 = jiffies; 2119 unsigned long flags; 2120 2121 if (no_timer_check) 2122 return 1; 2123 2124 local_save_flags(flags); 2125 local_irq_enable(); 2126 /* Let ten ticks pass... */ 2127 mdelay((10 * 1000) / HZ); 2128 local_irq_restore(flags); 2129 2130 /* 2131 * Expect a few ticks at least, to be sure some possible 2132 * glue logic does not lock up after one or two first 2133 * ticks in a non-ExtINT mode. Also the local APIC 2134 * might have cached one ExtINT interrupt. Finally, at 2135 * least one tick may be lost due to delays. 2136 */ 2137 2138 /* jiffies wrap? */ 2139 if (time_after(jiffies, t1 + 4)) 2140 return 1; 2141 return 0; 2142 } 2143 2144 /* 2145 * In the SMP+IOAPIC case it might happen that there are an unspecified 2146 * number of pending IRQ events unhandled. These cases are very rare, 2147 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much 2148 * better to do it this way as thus we do not have to be aware of 2149 * 'pending' interrupts in the IRQ path, except at this point. 2150 */ 2151 /* 2152 * Edge triggered needs to resend any interrupt 2153 * that was delayed but this is now handled in the device 2154 * independent code. 2155 */ 2156 2157 /* 2158 * Starting up a edge-triggered IO-APIC interrupt is 2159 * nasty - we need to make sure that we get the edge. 2160 * If it is already asserted for some reason, we need 2161 * return 1 to indicate that is was pending. 2162 * 2163 * This is not complete - we should be able to fake 2164 * an edge even if it isn't on the 8259A... 2165 */ 2166 2167 static unsigned int startup_ioapic_irq(struct irq_data *data) 2168 { 2169 int was_pending = 0, irq = data->irq; 2170 unsigned long flags; 2171 2172 raw_spin_lock_irqsave(&ioapic_lock, flags); 2173 if (irq < legacy_pic->nr_legacy_irqs) { 2174 legacy_pic->mask(irq); 2175 if (legacy_pic->irq_pending(irq)) 2176 was_pending = 1; 2177 } 2178 __unmask_ioapic(data->chip_data); 2179 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2180 2181 return was_pending; 2182 } 2183 2184 static int ioapic_retrigger_irq(struct irq_data *data) 2185 { 2186 struct irq_cfg *cfg = data->chip_data; 2187 unsigned long flags; 2188 int cpu; 2189 2190 raw_spin_lock_irqsave(&vector_lock, flags); 2191 cpu = cpumask_first_and(cfg->domain, cpu_online_mask); 2192 apic->send_IPI_mask(cpumask_of(cpu), cfg->vector); 2193 raw_spin_unlock_irqrestore(&vector_lock, flags); 2194 2195 return 1; 2196 } 2197 2198 /* 2199 * Level and edge triggered IO-APIC interrupts need different handling, 2200 * so we use two separate IRQ descriptors. Edge triggered IRQs can be 2201 * handled with the level-triggered descriptor, but that one has slightly 2202 * more overhead. Level-triggered interrupts cannot be handled with the 2203 * edge-triggered handler, without risking IRQ storms and other ugly 2204 * races. 2205 */ 2206 2207 #ifdef CONFIG_SMP 2208 void send_cleanup_vector(struct irq_cfg *cfg) 2209 { 2210 cpumask_var_t cleanup_mask; 2211 2212 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { 2213 unsigned int i; 2214 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) 2215 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); 2216 } else { 2217 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); 2218 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); 2219 free_cpumask_var(cleanup_mask); 2220 } 2221 cfg->move_in_progress = 0; 2222 } 2223 2224 asmlinkage void smp_irq_move_cleanup_interrupt(void) 2225 { 2226 unsigned vector, me; 2227 2228 ack_APIC_irq(); 2229 irq_enter(); 2230 exit_idle(); 2231 2232 me = smp_processor_id(); 2233 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { 2234 unsigned int irq; 2235 unsigned int irr; 2236 struct irq_desc *desc; 2237 struct irq_cfg *cfg; 2238 irq = __this_cpu_read(vector_irq[vector]); 2239 2240 if (irq == -1) 2241 continue; 2242 2243 desc = irq_to_desc(irq); 2244 if (!desc) 2245 continue; 2246 2247 cfg = irq_cfg(irq); 2248 if (!cfg) 2249 continue; 2250 2251 raw_spin_lock(&desc->lock); 2252 2253 /* 2254 * Check if the irq migration is in progress. If so, we 2255 * haven't received the cleanup request yet for this irq. 2256 */ 2257 if (cfg->move_in_progress) 2258 goto unlock; 2259 2260 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2261 goto unlock; 2262 2263 irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); 2264 /* 2265 * Check if the vector that needs to be cleanedup is 2266 * registered at the cpu's IRR. If so, then this is not 2267 * the best time to clean it up. Lets clean it up in the 2268 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR 2269 * to myself. 2270 */ 2271 if (irr & (1 << (vector % 32))) { 2272 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); 2273 goto unlock; 2274 } 2275 __this_cpu_write(vector_irq[vector], -1); 2276 unlock: 2277 raw_spin_unlock(&desc->lock); 2278 } 2279 2280 irq_exit(); 2281 } 2282 2283 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) 2284 { 2285 unsigned me; 2286 2287 if (likely(!cfg->move_in_progress)) 2288 return; 2289 2290 me = smp_processor_id(); 2291 2292 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2293 send_cleanup_vector(cfg); 2294 } 2295 2296 static void irq_complete_move(struct irq_cfg *cfg) 2297 { 2298 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); 2299 } 2300 2301 void irq_force_complete_move(int irq) 2302 { 2303 struct irq_cfg *cfg = irq_get_chip_data(irq); 2304 2305 if (!cfg) 2306 return; 2307 2308 __irq_complete_move(cfg, cfg->vector); 2309 } 2310 #else 2311 static inline void irq_complete_move(struct irq_cfg *cfg) { } 2312 #endif 2313 2314 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) 2315 { 2316 int apic, pin; 2317 struct irq_pin_list *entry; 2318 u8 vector = cfg->vector; 2319 2320 for_each_irq_pin(entry, cfg->irq_2_pin) { 2321 unsigned int reg; 2322 2323 apic = entry->apic; 2324 pin = entry->pin; 2325 /* 2326 * With interrupt-remapping, destination information comes 2327 * from interrupt-remapping table entry. 2328 */ 2329 if (!irq_remapped(cfg)) 2330 io_apic_write(apic, 0x11 + pin*2, dest); 2331 reg = io_apic_read(apic, 0x10 + pin*2); 2332 reg &= ~IO_APIC_REDIR_VECTOR_MASK; 2333 reg |= vector; 2334 io_apic_modify(apic, 0x10 + pin*2, reg); 2335 } 2336 } 2337 2338 /* 2339 * Either sets data->affinity to a valid value, and returns 2340 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and 2341 * leaves data->affinity untouched. 2342 */ 2343 int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2344 unsigned int *dest_id) 2345 { 2346 struct irq_cfg *cfg = data->chip_data; 2347 unsigned int irq = data->irq; 2348 int err; 2349 2350 if (!config_enabled(CONFIG_SMP)) 2351 return -1; 2352 2353 if (!cpumask_intersects(mask, cpu_online_mask)) 2354 return -EINVAL; 2355 2356 err = assign_irq_vector(irq, cfg, mask); 2357 if (err) 2358 return err; 2359 2360 err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id); 2361 if (err) { 2362 if (assign_irq_vector(irq, cfg, data->affinity)) 2363 pr_err("Failed to recover vector for irq %d\n", irq); 2364 return err; 2365 } 2366 2367 cpumask_copy(data->affinity, mask); 2368 2369 return 0; 2370 } 2371 2372 static int 2373 ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2374 bool force) 2375 { 2376 unsigned int dest, irq = data->irq; 2377 unsigned long flags; 2378 int ret; 2379 2380 if (!config_enabled(CONFIG_SMP)) 2381 return -1; 2382 2383 raw_spin_lock_irqsave(&ioapic_lock, flags); 2384 ret = __ioapic_set_affinity(data, mask, &dest); 2385 if (!ret) { 2386 /* Only the high 8 bits are valid. */ 2387 dest = SET_APIC_LOGICAL_ID(dest); 2388 __target_IO_APIC_irq(irq, dest, data->chip_data); 2389 ret = IRQ_SET_MASK_OK_NOCOPY; 2390 } 2391 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2392 return ret; 2393 } 2394 2395 static void ack_apic_edge(struct irq_data *data) 2396 { 2397 irq_complete_move(data->chip_data); 2398 irq_move_irq(data); 2399 ack_APIC_irq(); 2400 } 2401 2402 atomic_t irq_mis_count; 2403 2404 #ifdef CONFIG_GENERIC_PENDING_IRQ 2405 static bool io_apic_level_ack_pending(struct irq_cfg *cfg) 2406 { 2407 struct irq_pin_list *entry; 2408 unsigned long flags; 2409 2410 raw_spin_lock_irqsave(&ioapic_lock, flags); 2411 for_each_irq_pin(entry, cfg->irq_2_pin) { 2412 unsigned int reg; 2413 int pin; 2414 2415 pin = entry->pin; 2416 reg = io_apic_read(entry->apic, 0x10 + pin*2); 2417 /* Is the remote IRR bit set? */ 2418 if (reg & IO_APIC_REDIR_REMOTE_IRR) { 2419 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2420 return true; 2421 } 2422 } 2423 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2424 2425 return false; 2426 } 2427 2428 static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg) 2429 { 2430 /* If we are moving the irq we need to mask it */ 2431 if (unlikely(irqd_is_setaffinity_pending(data))) { 2432 mask_ioapic(cfg); 2433 return true; 2434 } 2435 return false; 2436 } 2437 2438 static inline void ioapic_irqd_unmask(struct irq_data *data, 2439 struct irq_cfg *cfg, bool masked) 2440 { 2441 if (unlikely(masked)) { 2442 /* Only migrate the irq if the ack has been received. 2443 * 2444 * On rare occasions the broadcast level triggered ack gets 2445 * delayed going to ioapics, and if we reprogram the 2446 * vector while Remote IRR is still set the irq will never 2447 * fire again. 2448 * 2449 * To prevent this scenario we read the Remote IRR bit 2450 * of the ioapic. This has two effects. 2451 * - On any sane system the read of the ioapic will 2452 * flush writes (and acks) going to the ioapic from 2453 * this cpu. 2454 * - We get to see if the ACK has actually been delivered. 2455 * 2456 * Based on failed experiments of reprogramming the 2457 * ioapic entry from outside of irq context starting 2458 * with masking the ioapic entry and then polling until 2459 * Remote IRR was clear before reprogramming the 2460 * ioapic I don't trust the Remote IRR bit to be 2461 * completey accurate. 2462 * 2463 * However there appears to be no other way to plug 2464 * this race, so if the Remote IRR bit is not 2465 * accurate and is causing problems then it is a hardware bug 2466 * and you can go talk to the chipset vendor about it. 2467 */ 2468 if (!io_apic_level_ack_pending(cfg)) 2469 irq_move_masked_irq(data); 2470 unmask_ioapic(cfg); 2471 } 2472 } 2473 #else 2474 static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg) 2475 { 2476 return false; 2477 } 2478 static inline void ioapic_irqd_unmask(struct irq_data *data, 2479 struct irq_cfg *cfg, bool masked) 2480 { 2481 } 2482 #endif 2483 2484 static void ack_apic_level(struct irq_data *data) 2485 { 2486 struct irq_cfg *cfg = data->chip_data; 2487 int i, irq = data->irq; 2488 unsigned long v; 2489 bool masked; 2490 2491 irq_complete_move(cfg); 2492 masked = ioapic_irqd_mask(data, cfg); 2493 2494 /* 2495 * It appears there is an erratum which affects at least version 0x11 2496 * of I/O APIC (that's the 82093AA and cores integrated into various 2497 * chipsets). Under certain conditions a level-triggered interrupt is 2498 * erroneously delivered as edge-triggered one but the respective IRR 2499 * bit gets set nevertheless. As a result the I/O unit expects an EOI 2500 * message but it will never arrive and further interrupts are blocked 2501 * from the source. The exact reason is so far unknown, but the 2502 * phenomenon was observed when two consecutive interrupt requests 2503 * from a given source get delivered to the same CPU and the source is 2504 * temporarily disabled in between. 2505 * 2506 * A workaround is to simulate an EOI message manually. We achieve it 2507 * by setting the trigger mode to edge and then to level when the edge 2508 * trigger mode gets detected in the TMR of a local APIC for a 2509 * level-triggered interrupt. We mask the source for the time of the 2510 * operation to prevent an edge-triggered interrupt escaping meanwhile. 2511 * The idea is from Manfred Spraul. --macro 2512 * 2513 * Also in the case when cpu goes offline, fixup_irqs() will forward 2514 * any unhandled interrupt on the offlined cpu to the new cpu 2515 * destination that is handling the corresponding interrupt. This 2516 * interrupt forwarding is done via IPI's. Hence, in this case also 2517 * level-triggered io-apic interrupt will be seen as an edge 2518 * interrupt in the IRR. And we can't rely on the cpu's EOI 2519 * to be broadcasted to the IO-APIC's which will clear the remoteIRR 2520 * corresponding to the level-triggered interrupt. Hence on IO-APIC's 2521 * supporting EOI register, we do an explicit EOI to clear the 2522 * remote IRR and on IO-APIC's which don't have an EOI register, 2523 * we use the above logic (mask+edge followed by unmask+level) from 2524 * Manfred Spraul to clear the remote IRR. 2525 */ 2526 i = cfg->vector; 2527 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); 2528 2529 /* 2530 * We must acknowledge the irq before we move it or the acknowledge will 2531 * not propagate properly. 2532 */ 2533 ack_APIC_irq(); 2534 2535 /* 2536 * Tail end of clearing remote IRR bit (either by delivering the EOI 2537 * message via io-apic EOI register write or simulating it using 2538 * mask+edge followed by unnask+level logic) manually when the 2539 * level triggered interrupt is seen as the edge triggered interrupt 2540 * at the cpu. 2541 */ 2542 if (!(v & (1 << (i & 0x1f)))) { 2543 atomic_inc(&irq_mis_count); 2544 2545 eoi_ioapic_irq(irq, cfg); 2546 } 2547 2548 ioapic_irqd_unmask(data, cfg, masked); 2549 } 2550 2551 #ifdef CONFIG_IRQ_REMAP 2552 static void ir_ack_apic_edge(struct irq_data *data) 2553 { 2554 ack_APIC_irq(); 2555 } 2556 2557 static void ir_ack_apic_level(struct irq_data *data) 2558 { 2559 ack_APIC_irq(); 2560 eoi_ioapic_irq(data->irq, data->chip_data); 2561 } 2562 2563 static void ir_print_prefix(struct irq_data *data, struct seq_file *p) 2564 { 2565 seq_printf(p, " IR-%s", data->chip->name); 2566 } 2567 2568 static void irq_remap_modify_chip_defaults(struct irq_chip *chip) 2569 { 2570 chip->irq_print_chip = ir_print_prefix; 2571 chip->irq_ack = ir_ack_apic_edge; 2572 chip->irq_eoi = ir_ack_apic_level; 2573 2574 chip->irq_set_affinity = set_remapped_irq_affinity; 2575 } 2576 #endif /* CONFIG_IRQ_REMAP */ 2577 2578 static struct irq_chip ioapic_chip __read_mostly = { 2579 .name = "IO-APIC", 2580 .irq_startup = startup_ioapic_irq, 2581 .irq_mask = mask_ioapic_irq, 2582 .irq_unmask = unmask_ioapic_irq, 2583 .irq_ack = ack_apic_edge, 2584 .irq_eoi = ack_apic_level, 2585 .irq_set_affinity = ioapic_set_affinity, 2586 .irq_retrigger = ioapic_retrigger_irq, 2587 }; 2588 2589 static inline void init_IO_APIC_traps(void) 2590 { 2591 struct irq_cfg *cfg; 2592 unsigned int irq; 2593 2594 /* 2595 * NOTE! The local APIC isn't very good at handling 2596 * multiple interrupts at the same interrupt level. 2597 * As the interrupt level is determined by taking the 2598 * vector number and shifting that right by 4, we 2599 * want to spread these out a bit so that they don't 2600 * all fall in the same interrupt level. 2601 * 2602 * Also, we've got to be careful not to trash gate 2603 * 0x80, because int 0x80 is hm, kind of importantish. ;) 2604 */ 2605 for_each_active_irq(irq) { 2606 cfg = irq_get_chip_data(irq); 2607 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { 2608 /* 2609 * Hmm.. We don't have an entry for this, 2610 * so default to an old-fashioned 8259 2611 * interrupt if we can.. 2612 */ 2613 if (irq < legacy_pic->nr_legacy_irqs) 2614 legacy_pic->make_irq(irq); 2615 else 2616 /* Strange. Oh, well.. */ 2617 irq_set_chip(irq, &no_irq_chip); 2618 } 2619 } 2620 } 2621 2622 /* 2623 * The local APIC irq-chip implementation: 2624 */ 2625 2626 static void mask_lapic_irq(struct irq_data *data) 2627 { 2628 unsigned long v; 2629 2630 v = apic_read(APIC_LVT0); 2631 apic_write(APIC_LVT0, v | APIC_LVT_MASKED); 2632 } 2633 2634 static void unmask_lapic_irq(struct irq_data *data) 2635 { 2636 unsigned long v; 2637 2638 v = apic_read(APIC_LVT0); 2639 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); 2640 } 2641 2642 static void ack_lapic_irq(struct irq_data *data) 2643 { 2644 ack_APIC_irq(); 2645 } 2646 2647 static struct irq_chip lapic_chip __read_mostly = { 2648 .name = "local-APIC", 2649 .irq_mask = mask_lapic_irq, 2650 .irq_unmask = unmask_lapic_irq, 2651 .irq_ack = ack_lapic_irq, 2652 }; 2653 2654 static void lapic_register_intr(int irq) 2655 { 2656 irq_clear_status_flags(irq, IRQ_LEVEL); 2657 irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, 2658 "edge"); 2659 } 2660 2661 /* 2662 * This looks a bit hackish but it's about the only one way of sending 2663 * a few INTA cycles to 8259As and any associated glue logic. ICR does 2664 * not support the ExtINT mode, unfortunately. We need to send these 2665 * cycles as some i82489DX-based boards have glue logic that keeps the 2666 * 8259A interrupt line asserted until INTA. --macro 2667 */ 2668 static inline void __init unlock_ExtINT_logic(void) 2669 { 2670 int apic, pin, i; 2671 struct IO_APIC_route_entry entry0, entry1; 2672 unsigned char save_control, save_freq_select; 2673 2674 pin = find_isa_irq_pin(8, mp_INT); 2675 if (pin == -1) { 2676 WARN_ON_ONCE(1); 2677 return; 2678 } 2679 apic = find_isa_irq_apic(8, mp_INT); 2680 if (apic == -1) { 2681 WARN_ON_ONCE(1); 2682 return; 2683 } 2684 2685 entry0 = ioapic_read_entry(apic, pin); 2686 clear_IO_APIC_pin(apic, pin); 2687 2688 memset(&entry1, 0, sizeof(entry1)); 2689 2690 entry1.dest_mode = 0; /* physical delivery */ 2691 entry1.mask = 0; /* unmask IRQ now */ 2692 entry1.dest = hard_smp_processor_id(); 2693 entry1.delivery_mode = dest_ExtINT; 2694 entry1.polarity = entry0.polarity; 2695 entry1.trigger = 0; 2696 entry1.vector = 0; 2697 2698 ioapic_write_entry(apic, pin, entry1); 2699 2700 save_control = CMOS_READ(RTC_CONTROL); 2701 save_freq_select = CMOS_READ(RTC_FREQ_SELECT); 2702 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6, 2703 RTC_FREQ_SELECT); 2704 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL); 2705 2706 i = 100; 2707 while (i-- > 0) { 2708 mdelay(10); 2709 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF) 2710 i -= 10; 2711 } 2712 2713 CMOS_WRITE(save_control, RTC_CONTROL); 2714 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); 2715 clear_IO_APIC_pin(apic, pin); 2716 2717 ioapic_write_entry(apic, pin, entry0); 2718 } 2719 2720 static int disable_timer_pin_1 __initdata; 2721 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */ 2722 static int __init disable_timer_pin_setup(char *arg) 2723 { 2724 disable_timer_pin_1 = 1; 2725 return 0; 2726 } 2727 early_param("disable_timer_pin_1", disable_timer_pin_setup); 2728 2729 int timer_through_8259 __initdata; 2730 2731 /* 2732 * This code may look a bit paranoid, but it's supposed to cooperate with 2733 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ 2734 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast 2735 * fanatically on his truly buggy board. 2736 * 2737 * FIXME: really need to revamp this for all platforms. 2738 */ 2739 static inline void __init check_timer(void) 2740 { 2741 struct irq_cfg *cfg = irq_get_chip_data(0); 2742 int node = cpu_to_node(0); 2743 int apic1, pin1, apic2, pin2; 2744 unsigned long flags; 2745 int no_pin1 = 0; 2746 2747 local_irq_save(flags); 2748 2749 /* 2750 * get/set the timer IRQ vector: 2751 */ 2752 legacy_pic->mask(0); 2753 assign_irq_vector(0, cfg, apic->target_cpus()); 2754 2755 /* 2756 * As IRQ0 is to be enabled in the 8259A, the virtual 2757 * wire has to be disabled in the local APIC. Also 2758 * timer interrupts need to be acknowledged manually in 2759 * the 8259A for the i82489DX when using the NMI 2760 * watchdog as that APIC treats NMIs as level-triggered. 2761 * The AEOI mode will finish them in the 8259A 2762 * automatically. 2763 */ 2764 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); 2765 legacy_pic->init(1); 2766 2767 pin1 = find_isa_irq_pin(0, mp_INT); 2768 apic1 = find_isa_irq_apic(0, mp_INT); 2769 pin2 = ioapic_i8259.pin; 2770 apic2 = ioapic_i8259.apic; 2771 2772 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X " 2773 "apic1=%d pin1=%d apic2=%d pin2=%d\n", 2774 cfg->vector, apic1, pin1, apic2, pin2); 2775 2776 /* 2777 * Some BIOS writers are clueless and report the ExtINTA 2778 * I/O APIC input from the cascaded 8259A as the timer 2779 * interrupt input. So just in case, if only one pin 2780 * was found above, try it both directly and through the 2781 * 8259A. 2782 */ 2783 if (pin1 == -1) { 2784 if (irq_remapping_enabled) 2785 panic("BIOS bug: timer not connected to IO-APIC"); 2786 pin1 = pin2; 2787 apic1 = apic2; 2788 no_pin1 = 1; 2789 } else if (pin2 == -1) { 2790 pin2 = pin1; 2791 apic2 = apic1; 2792 } 2793 2794 if (pin1 != -1) { 2795 /* 2796 * Ok, does IRQ0 through the IOAPIC work? 2797 */ 2798 if (no_pin1) { 2799 add_pin_to_irq_node(cfg, node, apic1, pin1); 2800 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); 2801 } else { 2802 /* for edge trigger, setup_ioapic_irq already 2803 * leave it unmasked. 2804 * so only need to unmask if it is level-trigger 2805 * do we really have level trigger timer? 2806 */ 2807 int idx; 2808 idx = find_irq_entry(apic1, pin1, mp_INT); 2809 if (idx != -1 && irq_trigger(idx)) 2810 unmask_ioapic(cfg); 2811 } 2812 if (timer_irq_works()) { 2813 if (disable_timer_pin_1 > 0) 2814 clear_IO_APIC_pin(0, pin1); 2815 goto out; 2816 } 2817 if (irq_remapping_enabled) 2818 panic("timer doesn't work through Interrupt-remapped IO-APIC"); 2819 local_irq_disable(); 2820 clear_IO_APIC_pin(apic1, pin1); 2821 if (!no_pin1) 2822 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " 2823 "8254 timer not connected to IO-APIC\n"); 2824 2825 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer " 2826 "(IRQ0) through the 8259A ...\n"); 2827 apic_printk(APIC_QUIET, KERN_INFO 2828 "..... (found apic %d pin %d) ...\n", apic2, pin2); 2829 /* 2830 * legacy devices should be connected to IO APIC #0 2831 */ 2832 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); 2833 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); 2834 legacy_pic->unmask(0); 2835 if (timer_irq_works()) { 2836 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2837 timer_through_8259 = 1; 2838 goto out; 2839 } 2840 /* 2841 * Cleanup, just in case ... 2842 */ 2843 local_irq_disable(); 2844 legacy_pic->mask(0); 2845 clear_IO_APIC_pin(apic2, pin2); 2846 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); 2847 } 2848 2849 apic_printk(APIC_QUIET, KERN_INFO 2850 "...trying to set up timer as Virtual Wire IRQ...\n"); 2851 2852 lapic_register_intr(0); 2853 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ 2854 legacy_pic->unmask(0); 2855 2856 if (timer_irq_works()) { 2857 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2858 goto out; 2859 } 2860 local_irq_disable(); 2861 legacy_pic->mask(0); 2862 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); 2863 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); 2864 2865 apic_printk(APIC_QUIET, KERN_INFO 2866 "...trying to set up timer as ExtINT IRQ...\n"); 2867 2868 legacy_pic->init(0); 2869 legacy_pic->make_irq(0); 2870 apic_write(APIC_LVT0, APIC_DM_EXTINT); 2871 2872 unlock_ExtINT_logic(); 2873 2874 if (timer_irq_works()) { 2875 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2876 goto out; 2877 } 2878 local_irq_disable(); 2879 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); 2880 if (x2apic_preenabled) 2881 apic_printk(APIC_QUIET, KERN_INFO 2882 "Perhaps problem with the pre-enabled x2apic mode\n" 2883 "Try booting with x2apic and interrupt-remapping disabled in the bios.\n"); 2884 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " 2885 "report. Then try booting with the 'noapic' option.\n"); 2886 out: 2887 local_irq_restore(flags); 2888 } 2889 2890 /* 2891 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available 2892 * to devices. However there may be an I/O APIC pin available for 2893 * this interrupt regardless. The pin may be left unconnected, but 2894 * typically it will be reused as an ExtINT cascade interrupt for 2895 * the master 8259A. In the MPS case such a pin will normally be 2896 * reported as an ExtINT interrupt in the MP table. With ACPI 2897 * there is no provision for ExtINT interrupts, and in the absence 2898 * of an override it would be treated as an ordinary ISA I/O APIC 2899 * interrupt, that is edge-triggered and unmasked by default. We 2900 * used to do this, but it caused problems on some systems because 2901 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using 2902 * the same ExtINT cascade interrupt to drive the local APIC of the 2903 * bootstrap processor. Therefore we refrain from routing IRQ2 to 2904 * the I/O APIC in all cases now. No actual device should request 2905 * it anyway. --macro 2906 */ 2907 #define PIC_IRQS (1UL << PIC_CASCADE_IR) 2908 2909 void __init setup_IO_APIC(void) 2910 { 2911 2912 /* 2913 * calling enable_IO_APIC() is moved to setup_local_APIC for BP 2914 */ 2915 io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL; 2916 2917 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); 2918 /* 2919 * Set up IO-APIC IRQ routing. 2920 */ 2921 x86_init.mpparse.setup_ioapic_ids(); 2922 2923 sync_Arb_IDs(); 2924 setup_IO_APIC_irqs(); 2925 init_IO_APIC_traps(); 2926 if (legacy_pic->nr_legacy_irqs) 2927 check_timer(); 2928 } 2929 2930 /* 2931 * Called after all the initialization is done. If we didn't find any 2932 * APIC bugs then we can allow the modify fast path 2933 */ 2934 2935 static int __init io_apic_bug_finalize(void) 2936 { 2937 if (sis_apic_bug == -1) 2938 sis_apic_bug = 0; 2939 return 0; 2940 } 2941 2942 late_initcall(io_apic_bug_finalize); 2943 2944 static void resume_ioapic_id(int ioapic_idx) 2945 { 2946 unsigned long flags; 2947 union IO_APIC_reg_00 reg_00; 2948 2949 raw_spin_lock_irqsave(&ioapic_lock, flags); 2950 reg_00.raw = io_apic_read(ioapic_idx, 0); 2951 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) { 2952 reg_00.bits.ID = mpc_ioapic_id(ioapic_idx); 2953 io_apic_write(ioapic_idx, 0, reg_00.raw); 2954 } 2955 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2956 } 2957 2958 static void ioapic_resume(void) 2959 { 2960 int ioapic_idx; 2961 2962 for (ioapic_idx = nr_ioapics - 1; ioapic_idx >= 0; ioapic_idx--) 2963 resume_ioapic_id(ioapic_idx); 2964 2965 restore_ioapic_entries(); 2966 } 2967 2968 static struct syscore_ops ioapic_syscore_ops = { 2969 .suspend = save_ioapic_entries, 2970 .resume = ioapic_resume, 2971 }; 2972 2973 static int __init ioapic_init_ops(void) 2974 { 2975 register_syscore_ops(&ioapic_syscore_ops); 2976 2977 return 0; 2978 } 2979 2980 device_initcall(ioapic_init_ops); 2981 2982 /* 2983 * Dynamic irq allocate and deallocation 2984 */ 2985 unsigned int create_irq_nr(unsigned int from, int node) 2986 { 2987 struct irq_cfg *cfg; 2988 unsigned long flags; 2989 unsigned int ret = 0; 2990 int irq; 2991 2992 if (from < nr_irqs_gsi) 2993 from = nr_irqs_gsi; 2994 2995 irq = alloc_irq_from(from, node); 2996 if (irq < 0) 2997 return 0; 2998 cfg = alloc_irq_cfg(irq, node); 2999 if (!cfg) { 3000 free_irq_at(irq, NULL); 3001 return 0; 3002 } 3003 3004 raw_spin_lock_irqsave(&vector_lock, flags); 3005 if (!__assign_irq_vector(irq, cfg, apic->target_cpus())) 3006 ret = irq; 3007 raw_spin_unlock_irqrestore(&vector_lock, flags); 3008 3009 if (ret) { 3010 irq_set_chip_data(irq, cfg); 3011 irq_clear_status_flags(irq, IRQ_NOREQUEST); 3012 } else { 3013 free_irq_at(irq, cfg); 3014 } 3015 return ret; 3016 } 3017 3018 int create_irq(void) 3019 { 3020 int node = cpu_to_node(0); 3021 unsigned int irq_want; 3022 int irq; 3023 3024 irq_want = nr_irqs_gsi; 3025 irq = create_irq_nr(irq_want, node); 3026 3027 if (irq == 0) 3028 irq = -1; 3029 3030 return irq; 3031 } 3032 3033 void destroy_irq(unsigned int irq) 3034 { 3035 struct irq_cfg *cfg = irq_get_chip_data(irq); 3036 unsigned long flags; 3037 3038 irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); 3039 3040 if (irq_remapped(cfg)) 3041 free_remapped_irq(irq); 3042 raw_spin_lock_irqsave(&vector_lock, flags); 3043 __clear_irq_vector(irq, cfg); 3044 raw_spin_unlock_irqrestore(&vector_lock, flags); 3045 free_irq_at(irq, cfg); 3046 } 3047 3048 /* 3049 * MSI message composition 3050 */ 3051 #ifdef CONFIG_PCI_MSI 3052 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, 3053 struct msi_msg *msg, u8 hpet_id) 3054 { 3055 struct irq_cfg *cfg; 3056 int err; 3057 unsigned dest; 3058 3059 if (disable_apic) 3060 return -ENXIO; 3061 3062 cfg = irq_cfg(irq); 3063 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3064 if (err) 3065 return err; 3066 3067 err = apic->cpu_mask_to_apicid_and(cfg->domain, 3068 apic->target_cpus(), &dest); 3069 if (err) 3070 return err; 3071 3072 if (irq_remapped(cfg)) { 3073 compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id); 3074 return err; 3075 } 3076 3077 if (x2apic_enabled()) 3078 msg->address_hi = MSI_ADDR_BASE_HI | 3079 MSI_ADDR_EXT_DEST_ID(dest); 3080 else 3081 msg->address_hi = MSI_ADDR_BASE_HI; 3082 3083 msg->address_lo = 3084 MSI_ADDR_BASE_LO | 3085 ((apic->irq_dest_mode == 0) ? 3086 MSI_ADDR_DEST_MODE_PHYSICAL: 3087 MSI_ADDR_DEST_MODE_LOGICAL) | 3088 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3089 MSI_ADDR_REDIRECTION_CPU: 3090 MSI_ADDR_REDIRECTION_LOWPRI) | 3091 MSI_ADDR_DEST_ID(dest); 3092 3093 msg->data = 3094 MSI_DATA_TRIGGER_EDGE | 3095 MSI_DATA_LEVEL_ASSERT | 3096 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3097 MSI_DATA_DELIVERY_FIXED: 3098 MSI_DATA_DELIVERY_LOWPRI) | 3099 MSI_DATA_VECTOR(cfg->vector); 3100 3101 return err; 3102 } 3103 3104 static int 3105 msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3106 { 3107 struct irq_cfg *cfg = data->chip_data; 3108 struct msi_msg msg; 3109 unsigned int dest; 3110 3111 if (__ioapic_set_affinity(data, mask, &dest)) 3112 return -1; 3113 3114 __get_cached_msi_msg(data->msi_desc, &msg); 3115 3116 msg.data &= ~MSI_DATA_VECTOR_MASK; 3117 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3118 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3119 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3120 3121 __write_msi_msg(data->msi_desc, &msg); 3122 3123 return IRQ_SET_MASK_OK_NOCOPY; 3124 } 3125 3126 /* 3127 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, 3128 * which implement the MSI or MSI-X Capability Structure. 3129 */ 3130 static struct irq_chip msi_chip = { 3131 .name = "PCI-MSI", 3132 .irq_unmask = unmask_msi_irq, 3133 .irq_mask = mask_msi_irq, 3134 .irq_ack = ack_apic_edge, 3135 .irq_set_affinity = msi_set_affinity, 3136 .irq_retrigger = ioapic_retrigger_irq, 3137 }; 3138 3139 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) 3140 { 3141 struct irq_chip *chip = &msi_chip; 3142 struct msi_msg msg; 3143 int ret; 3144 3145 ret = msi_compose_msg(dev, irq, &msg, -1); 3146 if (ret < 0) 3147 return ret; 3148 3149 irq_set_msi_desc(irq, msidesc); 3150 write_msi_msg(irq, &msg); 3151 3152 if (irq_remapped(irq_get_chip_data(irq))) { 3153 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3154 irq_remap_modify_chip_defaults(chip); 3155 } 3156 3157 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); 3158 3159 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); 3160 3161 return 0; 3162 } 3163 3164 int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 3165 { 3166 int node, ret, sub_handle, index = 0; 3167 unsigned int irq, irq_want; 3168 struct msi_desc *msidesc; 3169 3170 /* x86 doesn't support multiple MSI yet */ 3171 if (type == PCI_CAP_ID_MSI && nvec > 1) 3172 return 1; 3173 3174 node = dev_to_node(&dev->dev); 3175 irq_want = nr_irqs_gsi; 3176 sub_handle = 0; 3177 list_for_each_entry(msidesc, &dev->msi_list, list) { 3178 irq = create_irq_nr(irq_want, node); 3179 if (irq == 0) 3180 return -1; 3181 irq_want = irq + 1; 3182 if (!irq_remapping_enabled) 3183 goto no_ir; 3184 3185 if (!sub_handle) { 3186 /* 3187 * allocate the consecutive block of IRTE's 3188 * for 'nvec' 3189 */ 3190 index = msi_alloc_remapped_irq(dev, irq, nvec); 3191 if (index < 0) { 3192 ret = index; 3193 goto error; 3194 } 3195 } else { 3196 ret = msi_setup_remapped_irq(dev, irq, index, 3197 sub_handle); 3198 if (ret < 0) 3199 goto error; 3200 } 3201 no_ir: 3202 ret = setup_msi_irq(dev, msidesc, irq); 3203 if (ret < 0) 3204 goto error; 3205 sub_handle++; 3206 } 3207 return 0; 3208 3209 error: 3210 destroy_irq(irq); 3211 return ret; 3212 } 3213 3214 void native_teardown_msi_irq(unsigned int irq) 3215 { 3216 destroy_irq(irq); 3217 } 3218 3219 #ifdef CONFIG_DMAR_TABLE 3220 static int 3221 dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, 3222 bool force) 3223 { 3224 struct irq_cfg *cfg = data->chip_data; 3225 unsigned int dest, irq = data->irq; 3226 struct msi_msg msg; 3227 3228 if (__ioapic_set_affinity(data, mask, &dest)) 3229 return -1; 3230 3231 dmar_msi_read(irq, &msg); 3232 3233 msg.data &= ~MSI_DATA_VECTOR_MASK; 3234 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3235 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3236 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3237 msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest); 3238 3239 dmar_msi_write(irq, &msg); 3240 3241 return IRQ_SET_MASK_OK_NOCOPY; 3242 } 3243 3244 static struct irq_chip dmar_msi_type = { 3245 .name = "DMAR_MSI", 3246 .irq_unmask = dmar_msi_unmask, 3247 .irq_mask = dmar_msi_mask, 3248 .irq_ack = ack_apic_edge, 3249 .irq_set_affinity = dmar_msi_set_affinity, 3250 .irq_retrigger = ioapic_retrigger_irq, 3251 }; 3252 3253 int arch_setup_dmar_msi(unsigned int irq) 3254 { 3255 int ret; 3256 struct msi_msg msg; 3257 3258 ret = msi_compose_msg(NULL, irq, &msg, -1); 3259 if (ret < 0) 3260 return ret; 3261 dmar_msi_write(irq, &msg); 3262 irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, 3263 "edge"); 3264 return 0; 3265 } 3266 #endif 3267 3268 #ifdef CONFIG_HPET_TIMER 3269 3270 static int hpet_msi_set_affinity(struct irq_data *data, 3271 const struct cpumask *mask, bool force) 3272 { 3273 struct irq_cfg *cfg = data->chip_data; 3274 struct msi_msg msg; 3275 unsigned int dest; 3276 3277 if (__ioapic_set_affinity(data, mask, &dest)) 3278 return -1; 3279 3280 hpet_msi_read(data->handler_data, &msg); 3281 3282 msg.data &= ~MSI_DATA_VECTOR_MASK; 3283 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3284 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3285 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3286 3287 hpet_msi_write(data->handler_data, &msg); 3288 3289 return IRQ_SET_MASK_OK_NOCOPY; 3290 } 3291 3292 static struct irq_chip hpet_msi_type = { 3293 .name = "HPET_MSI", 3294 .irq_unmask = hpet_msi_unmask, 3295 .irq_mask = hpet_msi_mask, 3296 .irq_ack = ack_apic_edge, 3297 .irq_set_affinity = hpet_msi_set_affinity, 3298 .irq_retrigger = ioapic_retrigger_irq, 3299 }; 3300 3301 int arch_setup_hpet_msi(unsigned int irq, unsigned int id) 3302 { 3303 struct irq_chip *chip = &hpet_msi_type; 3304 struct msi_msg msg; 3305 int ret; 3306 3307 if (irq_remapping_enabled) { 3308 ret = setup_hpet_msi_remapped(irq, id); 3309 if (ret) 3310 return ret; 3311 } 3312 3313 ret = msi_compose_msg(NULL, irq, &msg, id); 3314 if (ret < 0) 3315 return ret; 3316 3317 hpet_msi_write(irq_get_handler_data(irq), &msg); 3318 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3319 if (irq_remapped(irq_get_chip_data(irq))) 3320 irq_remap_modify_chip_defaults(chip); 3321 3322 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); 3323 return 0; 3324 } 3325 #endif 3326 3327 #endif /* CONFIG_PCI_MSI */ 3328 /* 3329 * Hypertransport interrupt support 3330 */ 3331 #ifdef CONFIG_HT_IRQ 3332 3333 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) 3334 { 3335 struct ht_irq_msg msg; 3336 fetch_ht_irq_msg(irq, &msg); 3337 3338 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK); 3339 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); 3340 3341 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest); 3342 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest); 3343 3344 write_ht_irq_msg(irq, &msg); 3345 } 3346 3347 static int 3348 ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3349 { 3350 struct irq_cfg *cfg = data->chip_data; 3351 unsigned int dest; 3352 3353 if (__ioapic_set_affinity(data, mask, &dest)) 3354 return -1; 3355 3356 target_ht_irq(data->irq, dest, cfg->vector); 3357 return IRQ_SET_MASK_OK_NOCOPY; 3358 } 3359 3360 static struct irq_chip ht_irq_chip = { 3361 .name = "PCI-HT", 3362 .irq_mask = mask_ht_irq, 3363 .irq_unmask = unmask_ht_irq, 3364 .irq_ack = ack_apic_edge, 3365 .irq_set_affinity = ht_set_affinity, 3366 .irq_retrigger = ioapic_retrigger_irq, 3367 }; 3368 3369 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) 3370 { 3371 struct irq_cfg *cfg; 3372 struct ht_irq_msg msg; 3373 unsigned dest; 3374 int err; 3375 3376 if (disable_apic) 3377 return -ENXIO; 3378 3379 cfg = irq_cfg(irq); 3380 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3381 if (err) 3382 return err; 3383 3384 err = apic->cpu_mask_to_apicid_and(cfg->domain, 3385 apic->target_cpus(), &dest); 3386 if (err) 3387 return err; 3388 3389 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3390 3391 msg.address_lo = 3392 HT_IRQ_LOW_BASE | 3393 HT_IRQ_LOW_DEST_ID(dest) | 3394 HT_IRQ_LOW_VECTOR(cfg->vector) | 3395 ((apic->irq_dest_mode == 0) ? 3396 HT_IRQ_LOW_DM_PHYSICAL : 3397 HT_IRQ_LOW_DM_LOGICAL) | 3398 HT_IRQ_LOW_RQEOI_EDGE | 3399 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3400 HT_IRQ_LOW_MT_FIXED : 3401 HT_IRQ_LOW_MT_ARBITRATED) | 3402 HT_IRQ_LOW_IRQ_MASKED; 3403 3404 write_ht_irq_msg(irq, &msg); 3405 3406 irq_set_chip_and_handler_name(irq, &ht_irq_chip, 3407 handle_edge_irq, "edge"); 3408 3409 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); 3410 3411 return 0; 3412 } 3413 #endif /* CONFIG_HT_IRQ */ 3414 3415 static int 3416 io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) 3417 { 3418 struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node); 3419 int ret; 3420 3421 if (!cfg) 3422 return -EINVAL; 3423 ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin); 3424 if (!ret) 3425 setup_ioapic_irq(irq, cfg, attr); 3426 return ret; 3427 } 3428 3429 int io_apic_setup_irq_pin_once(unsigned int irq, int node, 3430 struct io_apic_irq_attr *attr) 3431 { 3432 unsigned int ioapic_idx = attr->ioapic, pin = attr->ioapic_pin; 3433 int ret; 3434 3435 /* Avoid redundant programming */ 3436 if (test_bit(pin, ioapics[ioapic_idx].pin_programmed)) { 3437 pr_debug("Pin %d-%d already programmed\n", 3438 mpc_ioapic_id(ioapic_idx), pin); 3439 return 0; 3440 } 3441 ret = io_apic_setup_irq_pin(irq, node, attr); 3442 if (!ret) 3443 set_bit(pin, ioapics[ioapic_idx].pin_programmed); 3444 return ret; 3445 } 3446 3447 static int __init io_apic_get_redir_entries(int ioapic) 3448 { 3449 union IO_APIC_reg_01 reg_01; 3450 unsigned long flags; 3451 3452 raw_spin_lock_irqsave(&ioapic_lock, flags); 3453 reg_01.raw = io_apic_read(ioapic, 1); 3454 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3455 3456 /* The register returns the maximum index redir index 3457 * supported, which is one less than the total number of redir 3458 * entries. 3459 */ 3460 return reg_01.bits.entries + 1; 3461 } 3462 3463 static void __init probe_nr_irqs_gsi(void) 3464 { 3465 int nr; 3466 3467 nr = gsi_top + NR_IRQS_LEGACY; 3468 if (nr > nr_irqs_gsi) 3469 nr_irqs_gsi = nr; 3470 3471 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); 3472 } 3473 3474 int get_nr_irqs_gsi(void) 3475 { 3476 return nr_irqs_gsi; 3477 } 3478 3479 int __init arch_probe_nr_irqs(void) 3480 { 3481 int nr; 3482 3483 if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) 3484 nr_irqs = NR_VECTORS * nr_cpu_ids; 3485 3486 nr = nr_irqs_gsi + 8 * nr_cpu_ids; 3487 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) 3488 /* 3489 * for MSI and HT dyn irq 3490 */ 3491 nr += nr_irqs_gsi * 16; 3492 #endif 3493 if (nr < nr_irqs) 3494 nr_irqs = nr; 3495 3496 return NR_IRQS_LEGACY; 3497 } 3498 3499 int io_apic_set_pci_routing(struct device *dev, int irq, 3500 struct io_apic_irq_attr *irq_attr) 3501 { 3502 int node; 3503 3504 if (!IO_APIC_IRQ(irq)) { 3505 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", 3506 irq_attr->ioapic); 3507 return -EINVAL; 3508 } 3509 3510 node = dev ? dev_to_node(dev) : cpu_to_node(0); 3511 3512 return io_apic_setup_irq_pin_once(irq, node, irq_attr); 3513 } 3514 3515 #ifdef CONFIG_X86_32 3516 static int __init io_apic_get_unique_id(int ioapic, int apic_id) 3517 { 3518 union IO_APIC_reg_00 reg_00; 3519 static physid_mask_t apic_id_map = PHYSID_MASK_NONE; 3520 physid_mask_t tmp; 3521 unsigned long flags; 3522 int i = 0; 3523 3524 /* 3525 * The P4 platform supports up to 256 APIC IDs on two separate APIC 3526 * buses (one for LAPICs, one for IOAPICs), where predecessors only 3527 * supports up to 16 on one shared APIC bus. 3528 * 3529 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full 3530 * advantage of new APIC bus architecture. 3531 */ 3532 3533 if (physids_empty(apic_id_map)) 3534 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); 3535 3536 raw_spin_lock_irqsave(&ioapic_lock, flags); 3537 reg_00.raw = io_apic_read(ioapic, 0); 3538 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3539 3540 if (apic_id >= get_physical_broadcast()) { 3541 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " 3542 "%d\n", ioapic, apic_id, reg_00.bits.ID); 3543 apic_id = reg_00.bits.ID; 3544 } 3545 3546 /* 3547 * Every APIC in a system must have a unique ID or we get lots of nice 3548 * 'stuck on smp_invalidate_needed IPI wait' messages. 3549 */ 3550 if (apic->check_apicid_used(&apic_id_map, apic_id)) { 3551 3552 for (i = 0; i < get_physical_broadcast(); i++) { 3553 if (!apic->check_apicid_used(&apic_id_map, i)) 3554 break; 3555 } 3556 3557 if (i == get_physical_broadcast()) 3558 panic("Max apic_id exceeded!\n"); 3559 3560 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " 3561 "trying %d\n", ioapic, apic_id, i); 3562 3563 apic_id = i; 3564 } 3565 3566 apic->apicid_to_cpu_present(apic_id, &tmp); 3567 physids_or(apic_id_map, apic_id_map, tmp); 3568 3569 if (reg_00.bits.ID != apic_id) { 3570 reg_00.bits.ID = apic_id; 3571 3572 raw_spin_lock_irqsave(&ioapic_lock, flags); 3573 io_apic_write(ioapic, 0, reg_00.raw); 3574 reg_00.raw = io_apic_read(ioapic, 0); 3575 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3576 3577 /* Sanity check */ 3578 if (reg_00.bits.ID != apic_id) { 3579 pr_err("IOAPIC[%d]: Unable to change apic_id!\n", 3580 ioapic); 3581 return -1; 3582 } 3583 } 3584 3585 apic_printk(APIC_VERBOSE, KERN_INFO 3586 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); 3587 3588 return apic_id; 3589 } 3590 3591 static u8 __init io_apic_unique_id(u8 id) 3592 { 3593 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && 3594 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) 3595 return io_apic_get_unique_id(nr_ioapics, id); 3596 else 3597 return id; 3598 } 3599 #else 3600 static u8 __init io_apic_unique_id(u8 id) 3601 { 3602 int i; 3603 DECLARE_BITMAP(used, 256); 3604 3605 bitmap_zero(used, 256); 3606 for (i = 0; i < nr_ioapics; i++) { 3607 __set_bit(mpc_ioapic_id(i), used); 3608 } 3609 if (!test_bit(id, used)) 3610 return id; 3611 return find_first_zero_bit(used, 256); 3612 } 3613 #endif 3614 3615 static int __init io_apic_get_version(int ioapic) 3616 { 3617 union IO_APIC_reg_01 reg_01; 3618 unsigned long flags; 3619 3620 raw_spin_lock_irqsave(&ioapic_lock, flags); 3621 reg_01.raw = io_apic_read(ioapic, 1); 3622 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3623 3624 return reg_01.bits.version; 3625 } 3626 3627 int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) 3628 { 3629 int ioapic, pin, idx; 3630 3631 if (skip_ioapic_setup) 3632 return -1; 3633 3634 ioapic = mp_find_ioapic(gsi); 3635 if (ioapic < 0) 3636 return -1; 3637 3638 pin = mp_find_ioapic_pin(ioapic, gsi); 3639 if (pin < 0) 3640 return -1; 3641 3642 idx = find_irq_entry(ioapic, pin, mp_INT); 3643 if (idx < 0) 3644 return -1; 3645 3646 *trigger = irq_trigger(idx); 3647 *polarity = irq_polarity(idx); 3648 return 0; 3649 } 3650 3651 /* 3652 * This function currently is only a helper for the i386 smp boot process where 3653 * we need to reprogram the ioredtbls to cater for the cpus which have come online 3654 * so mask in all cases should simply be apic->target_cpus() 3655 */ 3656 #ifdef CONFIG_SMP 3657 void __init setup_ioapic_dest(void) 3658 { 3659 int pin, ioapic, irq, irq_entry; 3660 const struct cpumask *mask; 3661 struct irq_data *idata; 3662 3663 if (skip_ioapic_setup == 1) 3664 return; 3665 3666 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) 3667 for (pin = 0; pin < ioapics[ioapic].nr_registers; pin++) { 3668 irq_entry = find_irq_entry(ioapic, pin, mp_INT); 3669 if (irq_entry == -1) 3670 continue; 3671 irq = pin_2_irq(irq_entry, ioapic, pin); 3672 3673 if ((ioapic > 0) && (irq > 16)) 3674 continue; 3675 3676 idata = irq_get_irq_data(irq); 3677 3678 /* 3679 * Honour affinities which have been set in early boot 3680 */ 3681 if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata)) 3682 mask = idata->affinity; 3683 else 3684 mask = apic->target_cpus(); 3685 3686 if (irq_remapping_enabled) 3687 set_remapped_irq_affinity(idata, mask, false); 3688 else 3689 ioapic_set_affinity(idata, mask, false); 3690 } 3691 3692 } 3693 #endif 3694 3695 #define IOAPIC_RESOURCE_NAME_SIZE 11 3696 3697 static struct resource *ioapic_resources; 3698 3699 static struct resource * __init ioapic_setup_resources(int nr_ioapics) 3700 { 3701 unsigned long n; 3702 struct resource *res; 3703 char *mem; 3704 int i; 3705 3706 if (nr_ioapics <= 0) 3707 return NULL; 3708 3709 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource); 3710 n *= nr_ioapics; 3711 3712 mem = alloc_bootmem(n); 3713 res = (void *)mem; 3714 3715 mem += sizeof(struct resource) * nr_ioapics; 3716 3717 for (i = 0; i < nr_ioapics; i++) { 3718 res[i].name = mem; 3719 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; 3720 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); 3721 mem += IOAPIC_RESOURCE_NAME_SIZE; 3722 } 3723 3724 ioapic_resources = res; 3725 3726 return res; 3727 } 3728 3729 void __init native_io_apic_init_mappings(void) 3730 { 3731 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; 3732 struct resource *ioapic_res; 3733 int i; 3734 3735 ioapic_res = ioapic_setup_resources(nr_ioapics); 3736 for (i = 0; i < nr_ioapics; i++) { 3737 if (smp_found_config) { 3738 ioapic_phys = mpc_ioapic_addr(i); 3739 #ifdef CONFIG_X86_32 3740 if (!ioapic_phys) { 3741 printk(KERN_ERR 3742 "WARNING: bogus zero IO-APIC " 3743 "address found in MPTABLE, " 3744 "disabling IO/APIC support!\n"); 3745 smp_found_config = 0; 3746 skip_ioapic_setup = 1; 3747 goto fake_ioapic_page; 3748 } 3749 #endif 3750 } else { 3751 #ifdef CONFIG_X86_32 3752 fake_ioapic_page: 3753 #endif 3754 ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); 3755 ioapic_phys = __pa(ioapic_phys); 3756 } 3757 set_fixmap_nocache(idx, ioapic_phys); 3758 apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n", 3759 __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK), 3760 ioapic_phys); 3761 idx++; 3762 3763 ioapic_res->start = ioapic_phys; 3764 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1; 3765 ioapic_res++; 3766 } 3767 3768 probe_nr_irqs_gsi(); 3769 } 3770 3771 void __init ioapic_insert_resources(void) 3772 { 3773 int i; 3774 struct resource *r = ioapic_resources; 3775 3776 if (!r) { 3777 if (nr_ioapics > 0) 3778 printk(KERN_ERR 3779 "IO APIC resources couldn't be allocated.\n"); 3780 return; 3781 } 3782 3783 for (i = 0; i < nr_ioapics; i++) { 3784 insert_resource(&iomem_resource, r); 3785 r++; 3786 } 3787 } 3788 3789 int mp_find_ioapic(u32 gsi) 3790 { 3791 int i = 0; 3792 3793 if (nr_ioapics == 0) 3794 return -1; 3795 3796 /* Find the IOAPIC that manages this GSI. */ 3797 for (i = 0; i < nr_ioapics; i++) { 3798 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(i); 3799 if ((gsi >= gsi_cfg->gsi_base) 3800 && (gsi <= gsi_cfg->gsi_end)) 3801 return i; 3802 } 3803 3804 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); 3805 return -1; 3806 } 3807 3808 int mp_find_ioapic_pin(int ioapic, u32 gsi) 3809 { 3810 struct mp_ioapic_gsi *gsi_cfg; 3811 3812 if (WARN_ON(ioapic == -1)) 3813 return -1; 3814 3815 gsi_cfg = mp_ioapic_gsi_routing(ioapic); 3816 if (WARN_ON(gsi > gsi_cfg->gsi_end)) 3817 return -1; 3818 3819 return gsi - gsi_cfg->gsi_base; 3820 } 3821 3822 static __init int bad_ioapic(unsigned long address) 3823 { 3824 if (nr_ioapics >= MAX_IO_APICS) { 3825 pr_warn("WARNING: Max # of I/O APICs (%d) exceeded (found %d), skipping\n", 3826 MAX_IO_APICS, nr_ioapics); 3827 return 1; 3828 } 3829 if (!address) { 3830 pr_warn("WARNING: Bogus (zero) I/O APIC address found in table, skipping!\n"); 3831 return 1; 3832 } 3833 return 0; 3834 } 3835 3836 static __init int bad_ioapic_register(int idx) 3837 { 3838 union IO_APIC_reg_00 reg_00; 3839 union IO_APIC_reg_01 reg_01; 3840 union IO_APIC_reg_02 reg_02; 3841 3842 reg_00.raw = io_apic_read(idx, 0); 3843 reg_01.raw = io_apic_read(idx, 1); 3844 reg_02.raw = io_apic_read(idx, 2); 3845 3846 if (reg_00.raw == -1 && reg_01.raw == -1 && reg_02.raw == -1) { 3847 pr_warn("I/O APIC 0x%x registers return all ones, skipping!\n", 3848 mpc_ioapic_addr(idx)); 3849 return 1; 3850 } 3851 3852 return 0; 3853 } 3854 3855 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) 3856 { 3857 int idx = 0; 3858 int entries; 3859 struct mp_ioapic_gsi *gsi_cfg; 3860 3861 if (bad_ioapic(address)) 3862 return; 3863 3864 idx = nr_ioapics; 3865 3866 ioapics[idx].mp_config.type = MP_IOAPIC; 3867 ioapics[idx].mp_config.flags = MPC_APIC_USABLE; 3868 ioapics[idx].mp_config.apicaddr = address; 3869 3870 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); 3871 3872 if (bad_ioapic_register(idx)) { 3873 clear_fixmap(FIX_IO_APIC_BASE_0 + idx); 3874 return; 3875 } 3876 3877 ioapics[idx].mp_config.apicid = io_apic_unique_id(id); 3878 ioapics[idx].mp_config.apicver = io_apic_get_version(idx); 3879 3880 /* 3881 * Build basic GSI lookup table to facilitate gsi->io_apic lookups 3882 * and to prevent reprogramming of IOAPIC pins (PCI GSIs). 3883 */ 3884 entries = io_apic_get_redir_entries(idx); 3885 gsi_cfg = mp_ioapic_gsi_routing(idx); 3886 gsi_cfg->gsi_base = gsi_base; 3887 gsi_cfg->gsi_end = gsi_base + entries - 1; 3888 3889 /* 3890 * The number of IO-APIC IRQ registers (== #pins): 3891 */ 3892 ioapics[idx].nr_registers = entries; 3893 3894 if (gsi_cfg->gsi_end >= gsi_top) 3895 gsi_top = gsi_cfg->gsi_end + 1; 3896 3897 pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n", 3898 idx, mpc_ioapic_id(idx), 3899 mpc_ioapic_ver(idx), mpc_ioapic_addr(idx), 3900 gsi_cfg->gsi_base, gsi_cfg->gsi_end); 3901 3902 nr_ioapics++; 3903 } 3904 3905 /* Enable IOAPIC early just for system timer */ 3906 void __init pre_init_apic_IRQ0(void) 3907 { 3908 struct io_apic_irq_attr attr = { 0, 0, 0, 0 }; 3909 3910 printk(KERN_INFO "Early APIC setup for system timer0\n"); 3911 #ifndef CONFIG_SMP 3912 physid_set_mask_of_physid(boot_cpu_physical_apicid, 3913 &phys_cpu_present_map); 3914 #endif 3915 setup_local_APIC(); 3916 3917 io_apic_setup_irq_pin(0, 0, &attr); 3918 irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, 3919 "edge"); 3920 } 3921