1 /* 2 * Intel IO-APIC support for multi-Pentium hosts. 3 * 4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo 5 * 6 * Many thanks to Stig Venaas for trying out countless experimental 7 * patches and reporting/debugging problems patiently! 8 * 9 * (c) 1999, Multiple IO-APIC support, developed by 10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and 11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>, 12 * further tested and cleaned up by Zach Brown <zab@redhat.com> 13 * and Ingo Molnar <mingo@redhat.com> 14 * 15 * Fixes 16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs; 17 * thanks to Eric Gilmore 18 * and Rolf G. Tews 19 * for testing these extensively 20 * Paul Diefenbaugh : Added full ACPI support 21 */ 22 23 #include <linux/mm.h> 24 #include <linux/interrupt.h> 25 #include <linux/init.h> 26 #include <linux/delay.h> 27 #include <linux/sched.h> 28 #include <linux/pci.h> 29 #include <linux/mc146818rtc.h> 30 #include <linux/compiler.h> 31 #include <linux/acpi.h> 32 #include <linux/module.h> 33 #include <linux/syscore_ops.h> 34 #include <linux/msi.h> 35 #include <linux/htirq.h> 36 #include <linux/freezer.h> 37 #include <linux/kthread.h> 38 #include <linux/jiffies.h> /* time_after() */ 39 #include <linux/slab.h> 40 #ifdef CONFIG_ACPI 41 #include <acpi/acpi_bus.h> 42 #endif 43 #include <linux/bootmem.h> 44 #include <linux/dmar.h> 45 #include <linux/hpet.h> 46 47 #include <asm/idle.h> 48 #include <asm/io.h> 49 #include <asm/smp.h> 50 #include <asm/cpu.h> 51 #include <asm/desc.h> 52 #include <asm/proto.h> 53 #include <asm/acpi.h> 54 #include <asm/dma.h> 55 #include <asm/timer.h> 56 #include <asm/i8259.h> 57 #include <asm/msidef.h> 58 #include <asm/hypertransport.h> 59 #include <asm/setup.h> 60 #include <asm/irq_remapping.h> 61 #include <asm/hpet.h> 62 #include <asm/hw_irq.h> 63 64 #include <asm/apic.h> 65 66 #define __apicdebuginit(type) static type __init 67 68 #define for_each_irq_pin(entry, head) \ 69 for (entry = head; entry; entry = entry->next) 70 71 /* 72 * Is the SiS APIC rmw bug present ? 73 * -1 = don't know, 0 = no, 1 = yes 74 */ 75 int sis_apic_bug = -1; 76 77 static DEFINE_RAW_SPINLOCK(ioapic_lock); 78 static DEFINE_RAW_SPINLOCK(vector_lock); 79 80 static struct ioapic { 81 /* 82 * # of IRQ routing registers 83 */ 84 int nr_registers; 85 /* 86 * Saved state during suspend/resume, or while enabling intr-remap. 87 */ 88 struct IO_APIC_route_entry *saved_registers; 89 /* I/O APIC config */ 90 struct mpc_ioapic mp_config; 91 /* IO APIC gsi routing info */ 92 struct mp_ioapic_gsi gsi_config; 93 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); 94 } ioapics[MAX_IO_APICS]; 95 96 #define mpc_ioapic_ver(ioapic_idx) ioapics[ioapic_idx].mp_config.apicver 97 98 int mpc_ioapic_id(int ioapic_idx) 99 { 100 return ioapics[ioapic_idx].mp_config.apicid; 101 } 102 103 unsigned int mpc_ioapic_addr(int ioapic_idx) 104 { 105 return ioapics[ioapic_idx].mp_config.apicaddr; 106 } 107 108 struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic_idx) 109 { 110 return &ioapics[ioapic_idx].gsi_config; 111 } 112 113 int nr_ioapics; 114 115 /* The one past the highest gsi number used */ 116 u32 gsi_top; 117 118 /* MP IRQ source entries */ 119 struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; 120 121 /* # of MP IRQ source entries */ 122 int mp_irq_entries; 123 124 /* GSI interrupts */ 125 static int nr_irqs_gsi = NR_IRQS_LEGACY; 126 127 #ifdef CONFIG_EISA 128 int mp_bus_id_to_type[MAX_MP_BUSSES]; 129 #endif 130 131 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); 132 133 int skip_ioapic_setup; 134 135 /** 136 * disable_ioapic_support() - disables ioapic support at runtime 137 */ 138 void disable_ioapic_support(void) 139 { 140 #ifdef CONFIG_PCI 141 noioapicquirk = 1; 142 noioapicreroute = -1; 143 #endif 144 skip_ioapic_setup = 1; 145 } 146 147 static int __init parse_noapic(char *str) 148 { 149 /* disable IO-APIC */ 150 disable_ioapic_support(); 151 return 0; 152 } 153 early_param("noapic", parse_noapic); 154 155 static int io_apic_setup_irq_pin(unsigned int irq, int node, 156 struct io_apic_irq_attr *attr); 157 158 /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ 159 void mp_save_irq(struct mpc_intsrc *m) 160 { 161 int i; 162 163 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," 164 " IRQ %02x, APIC ID %x, APIC INT %02x\n", 165 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus, 166 m->srcbusirq, m->dstapic, m->dstirq); 167 168 for (i = 0; i < mp_irq_entries; i++) { 169 if (!memcmp(&mp_irqs[i], m, sizeof(*m))) 170 return; 171 } 172 173 memcpy(&mp_irqs[mp_irq_entries], m, sizeof(*m)); 174 if (++mp_irq_entries == MAX_IRQ_SOURCES) 175 panic("Max # of irq sources exceeded!!\n"); 176 } 177 178 struct irq_pin_list { 179 int apic, pin; 180 struct irq_pin_list *next; 181 }; 182 183 static struct irq_pin_list *alloc_irq_pin_list(int node) 184 { 185 return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); 186 } 187 188 189 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 190 static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY]; 191 192 int __init arch_early_irq_init(void) 193 { 194 struct irq_cfg *cfg; 195 int count, node, i; 196 197 if (!legacy_pic->nr_legacy_irqs) 198 io_apic_irqs = ~0UL; 199 200 for (i = 0; i < nr_ioapics; i++) { 201 ioapics[i].saved_registers = 202 kzalloc(sizeof(struct IO_APIC_route_entry) * 203 ioapics[i].nr_registers, GFP_KERNEL); 204 if (!ioapics[i].saved_registers) 205 pr_err("IOAPIC %d: suspend/resume impossible!\n", i); 206 } 207 208 cfg = irq_cfgx; 209 count = ARRAY_SIZE(irq_cfgx); 210 node = cpu_to_node(0); 211 212 /* Make sure the legacy interrupts are marked in the bitmap */ 213 irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); 214 215 for (i = 0; i < count; i++) { 216 irq_set_chip_data(i, &cfg[i]); 217 zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); 218 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); 219 /* 220 * For legacy IRQ's, start with assigning irq0 to irq15 to 221 * IRQ0_VECTOR to IRQ15_VECTOR for all cpu's. 222 */ 223 if (i < legacy_pic->nr_legacy_irqs) { 224 cfg[i].vector = IRQ0_VECTOR + i; 225 cpumask_setall(cfg[i].domain); 226 } 227 } 228 229 return 0; 230 } 231 232 static struct irq_cfg *irq_cfg(unsigned int irq) 233 { 234 return irq_get_chip_data(irq); 235 } 236 237 static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) 238 { 239 struct irq_cfg *cfg; 240 241 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); 242 if (!cfg) 243 return NULL; 244 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) 245 goto out_cfg; 246 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) 247 goto out_domain; 248 return cfg; 249 out_domain: 250 free_cpumask_var(cfg->domain); 251 out_cfg: 252 kfree(cfg); 253 return NULL; 254 } 255 256 static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) 257 { 258 if (!cfg) 259 return; 260 irq_set_chip_data(at, NULL); 261 free_cpumask_var(cfg->domain); 262 free_cpumask_var(cfg->old_domain); 263 kfree(cfg); 264 } 265 266 static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) 267 { 268 int res = irq_alloc_desc_at(at, node); 269 struct irq_cfg *cfg; 270 271 if (res < 0) { 272 if (res != -EEXIST) 273 return NULL; 274 cfg = irq_get_chip_data(at); 275 if (cfg) 276 return cfg; 277 } 278 279 cfg = alloc_irq_cfg(at, node); 280 if (cfg) 281 irq_set_chip_data(at, cfg); 282 else 283 irq_free_desc(at); 284 return cfg; 285 } 286 287 static int alloc_irqs_from(unsigned int from, unsigned int count, int node) 288 { 289 return irq_alloc_descs_from(from, count, node); 290 } 291 292 static void free_irq_at(unsigned int at, struct irq_cfg *cfg) 293 { 294 free_irq_cfg(at, cfg); 295 irq_free_desc(at); 296 } 297 298 299 struct io_apic { 300 unsigned int index; 301 unsigned int unused[3]; 302 unsigned int data; 303 unsigned int unused2[11]; 304 unsigned int eoi; 305 }; 306 307 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) 308 { 309 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) 310 + (mpc_ioapic_addr(idx) & ~PAGE_MASK); 311 } 312 313 static inline void io_apic_eoi(unsigned int apic, unsigned int vector) 314 { 315 struct io_apic __iomem *io_apic = io_apic_base(apic); 316 writel(vector, &io_apic->eoi); 317 } 318 319 unsigned int native_io_apic_read(unsigned int apic, unsigned int reg) 320 { 321 struct io_apic __iomem *io_apic = io_apic_base(apic); 322 writel(reg, &io_apic->index); 323 return readl(&io_apic->data); 324 } 325 326 void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) 327 { 328 struct io_apic __iomem *io_apic = io_apic_base(apic); 329 330 writel(reg, &io_apic->index); 331 writel(value, &io_apic->data); 332 } 333 334 /* 335 * Re-write a value: to be used for read-modify-write 336 * cycles where the read already set up the index register. 337 * 338 * Older SiS APIC requires we rewrite the index register 339 */ 340 void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) 341 { 342 struct io_apic __iomem *io_apic = io_apic_base(apic); 343 344 if (sis_apic_bug) 345 writel(reg, &io_apic->index); 346 writel(value, &io_apic->data); 347 } 348 349 union entry_union { 350 struct { u32 w1, w2; }; 351 struct IO_APIC_route_entry entry; 352 }; 353 354 static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin) 355 { 356 union entry_union eu; 357 358 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); 359 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); 360 361 return eu.entry; 362 } 363 364 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) 365 { 366 union entry_union eu; 367 unsigned long flags; 368 369 raw_spin_lock_irqsave(&ioapic_lock, flags); 370 eu.entry = __ioapic_read_entry(apic, pin); 371 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 372 373 return eu.entry; 374 } 375 376 /* 377 * When we write a new IO APIC routing entry, we need to write the high 378 * word first! If the mask bit in the low word is clear, we will enable 379 * the interrupt, and we need to make sure the entry is fully populated 380 * before that happens. 381 */ 382 static void __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 383 { 384 union entry_union eu = {{0, 0}}; 385 386 eu.entry = e; 387 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 388 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 389 } 390 391 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 392 { 393 unsigned long flags; 394 395 raw_spin_lock_irqsave(&ioapic_lock, flags); 396 __ioapic_write_entry(apic, pin, e); 397 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 398 } 399 400 /* 401 * When we mask an IO APIC routing entry, we need to write the low 402 * word first, in order to set the mask bit before we change the 403 * high bits! 404 */ 405 static void ioapic_mask_entry(int apic, int pin) 406 { 407 unsigned long flags; 408 union entry_union eu = { .entry.mask = 1 }; 409 410 raw_spin_lock_irqsave(&ioapic_lock, flags); 411 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 412 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 413 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 414 } 415 416 /* 417 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are 418 * shared ISA-space IRQs, so we have to support them. We are super 419 * fast in the common case, and fast for shared ISA-space IRQs. 420 */ 421 static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 422 { 423 struct irq_pin_list **last, *entry; 424 425 /* don't allow duplicates */ 426 last = &cfg->irq_2_pin; 427 for_each_irq_pin(entry, cfg->irq_2_pin) { 428 if (entry->apic == apic && entry->pin == pin) 429 return 0; 430 last = &entry->next; 431 } 432 433 entry = alloc_irq_pin_list(node); 434 if (!entry) { 435 pr_err("can not alloc irq_pin_list (%d,%d,%d)\n", 436 node, apic, pin); 437 return -ENOMEM; 438 } 439 entry->apic = apic; 440 entry->pin = pin; 441 442 *last = entry; 443 return 0; 444 } 445 446 static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 447 { 448 if (__add_pin_to_irq_node(cfg, node, apic, pin)) 449 panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); 450 } 451 452 /* 453 * Reroute an IRQ to a different pin. 454 */ 455 static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, 456 int oldapic, int oldpin, 457 int newapic, int newpin) 458 { 459 struct irq_pin_list *entry; 460 461 for_each_irq_pin(entry, cfg->irq_2_pin) { 462 if (entry->apic == oldapic && entry->pin == oldpin) { 463 entry->apic = newapic; 464 entry->pin = newpin; 465 /* every one is different, right? */ 466 return; 467 } 468 } 469 470 /* old apic/pin didn't exist, so just add new ones */ 471 add_pin_to_irq_node(cfg, node, newapic, newpin); 472 } 473 474 static void __io_apic_modify_irq(struct irq_pin_list *entry, 475 int mask_and, int mask_or, 476 void (*final)(struct irq_pin_list *entry)) 477 { 478 unsigned int reg, pin; 479 480 pin = entry->pin; 481 reg = io_apic_read(entry->apic, 0x10 + pin * 2); 482 reg &= mask_and; 483 reg |= mask_or; 484 io_apic_modify(entry->apic, 0x10 + pin * 2, reg); 485 if (final) 486 final(entry); 487 } 488 489 static void io_apic_modify_irq(struct irq_cfg *cfg, 490 int mask_and, int mask_or, 491 void (*final)(struct irq_pin_list *entry)) 492 { 493 struct irq_pin_list *entry; 494 495 for_each_irq_pin(entry, cfg->irq_2_pin) 496 __io_apic_modify_irq(entry, mask_and, mask_or, final); 497 } 498 499 static void io_apic_sync(struct irq_pin_list *entry) 500 { 501 /* 502 * Synchronize the IO-APIC and the CPU by doing 503 * a dummy read from the IO-APIC 504 */ 505 struct io_apic __iomem *io_apic; 506 507 io_apic = io_apic_base(entry->apic); 508 readl(&io_apic->data); 509 } 510 511 static void mask_ioapic(struct irq_cfg *cfg) 512 { 513 unsigned long flags; 514 515 raw_spin_lock_irqsave(&ioapic_lock, flags); 516 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); 517 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 518 } 519 520 static void mask_ioapic_irq(struct irq_data *data) 521 { 522 mask_ioapic(data->chip_data); 523 } 524 525 static void __unmask_ioapic(struct irq_cfg *cfg) 526 { 527 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); 528 } 529 530 static void unmask_ioapic(struct irq_cfg *cfg) 531 { 532 unsigned long flags; 533 534 raw_spin_lock_irqsave(&ioapic_lock, flags); 535 __unmask_ioapic(cfg); 536 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 537 } 538 539 static void unmask_ioapic_irq(struct irq_data *data) 540 { 541 unmask_ioapic(data->chip_data); 542 } 543 544 /* 545 * IO-APIC versions below 0x20 don't support EOI register. 546 * For the record, here is the information about various versions: 547 * 0Xh 82489DX 548 * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant 549 * 2Xh I/O(x)APIC which is PCI 2.2 Compliant 550 * 30h-FFh Reserved 551 * 552 * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic 553 * version as 0x2. This is an error with documentation and these ICH chips 554 * use io-apic's of version 0x20. 555 * 556 * For IO-APIC's with EOI register, we use that to do an explicit EOI. 557 * Otherwise, we simulate the EOI message manually by changing the trigger 558 * mode to edge and then back to level, with RTE being masked during this. 559 */ 560 static void __eoi_ioapic_pin(int apic, int pin, int vector, struct irq_cfg *cfg) 561 { 562 if (mpc_ioapic_ver(apic) >= 0x20) { 563 /* 564 * Intr-remapping uses pin number as the virtual vector 565 * in the RTE. Actual vector is programmed in 566 * intr-remapping table entry. Hence for the io-apic 567 * EOI we use the pin number. 568 */ 569 if (cfg && irq_remapped(cfg)) 570 io_apic_eoi(apic, pin); 571 else 572 io_apic_eoi(apic, vector); 573 } else { 574 struct IO_APIC_route_entry entry, entry1; 575 576 entry = entry1 = __ioapic_read_entry(apic, pin); 577 578 /* 579 * Mask the entry and change the trigger mode to edge. 580 */ 581 entry1.mask = 1; 582 entry1.trigger = IOAPIC_EDGE; 583 584 __ioapic_write_entry(apic, pin, entry1); 585 586 /* 587 * Restore the previous level triggered entry. 588 */ 589 __ioapic_write_entry(apic, pin, entry); 590 } 591 } 592 593 void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 594 { 595 struct irq_pin_list *entry; 596 unsigned long flags; 597 598 raw_spin_lock_irqsave(&ioapic_lock, flags); 599 for_each_irq_pin(entry, cfg->irq_2_pin) 600 __eoi_ioapic_pin(entry->apic, entry->pin, cfg->vector, cfg); 601 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 602 } 603 604 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) 605 { 606 struct IO_APIC_route_entry entry; 607 608 /* Check delivery_mode to be sure we're not clearing an SMI pin */ 609 entry = ioapic_read_entry(apic, pin); 610 if (entry.delivery_mode == dest_SMI) 611 return; 612 613 /* 614 * Make sure the entry is masked and re-read the contents to check 615 * if it is a level triggered pin and if the remote-IRR is set. 616 */ 617 if (!entry.mask) { 618 entry.mask = 1; 619 ioapic_write_entry(apic, pin, entry); 620 entry = ioapic_read_entry(apic, pin); 621 } 622 623 if (entry.irr) { 624 unsigned long flags; 625 626 /* 627 * Make sure the trigger mode is set to level. Explicit EOI 628 * doesn't clear the remote-IRR if the trigger mode is not 629 * set to level. 630 */ 631 if (!entry.trigger) { 632 entry.trigger = IOAPIC_LEVEL; 633 ioapic_write_entry(apic, pin, entry); 634 } 635 636 raw_spin_lock_irqsave(&ioapic_lock, flags); 637 __eoi_ioapic_pin(apic, pin, entry.vector, NULL); 638 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 639 } 640 641 /* 642 * Clear the rest of the bits in the IO-APIC RTE except for the mask 643 * bit. 644 */ 645 ioapic_mask_entry(apic, pin); 646 entry = ioapic_read_entry(apic, pin); 647 if (entry.irr) 648 pr_err("Unable to reset IRR for apic: %d, pin :%d\n", 649 mpc_ioapic_id(apic), pin); 650 } 651 652 static void clear_IO_APIC (void) 653 { 654 int apic, pin; 655 656 for (apic = 0; apic < nr_ioapics; apic++) 657 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) 658 clear_IO_APIC_pin(apic, pin); 659 } 660 661 #ifdef CONFIG_X86_32 662 /* 663 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to 664 * specific CPU-side IRQs. 665 */ 666 667 #define MAX_PIRQS 8 668 static int pirq_entries[MAX_PIRQS] = { 669 [0 ... MAX_PIRQS - 1] = -1 670 }; 671 672 static int __init ioapic_pirq_setup(char *str) 673 { 674 int i, max; 675 int ints[MAX_PIRQS+1]; 676 677 get_options(str, ARRAY_SIZE(ints), ints); 678 679 apic_printk(APIC_VERBOSE, KERN_INFO 680 "PIRQ redirection, working around broken MP-BIOS.\n"); 681 max = MAX_PIRQS; 682 if (ints[0] < MAX_PIRQS) 683 max = ints[0]; 684 685 for (i = 0; i < max; i++) { 686 apic_printk(APIC_VERBOSE, KERN_DEBUG 687 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]); 688 /* 689 * PIRQs are mapped upside down, usually. 690 */ 691 pirq_entries[MAX_PIRQS-i-1] = ints[i+1]; 692 } 693 return 1; 694 } 695 696 __setup("pirq=", ioapic_pirq_setup); 697 #endif /* CONFIG_X86_32 */ 698 699 /* 700 * Saves all the IO-APIC RTE's 701 */ 702 int save_ioapic_entries(void) 703 { 704 int apic, pin; 705 int err = 0; 706 707 for (apic = 0; apic < nr_ioapics; apic++) { 708 if (!ioapics[apic].saved_registers) { 709 err = -ENOMEM; 710 continue; 711 } 712 713 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) 714 ioapics[apic].saved_registers[pin] = 715 ioapic_read_entry(apic, pin); 716 } 717 718 return err; 719 } 720 721 /* 722 * Mask all IO APIC entries. 723 */ 724 void mask_ioapic_entries(void) 725 { 726 int apic, pin; 727 728 for (apic = 0; apic < nr_ioapics; apic++) { 729 if (!ioapics[apic].saved_registers) 730 continue; 731 732 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { 733 struct IO_APIC_route_entry entry; 734 735 entry = ioapics[apic].saved_registers[pin]; 736 if (!entry.mask) { 737 entry.mask = 1; 738 ioapic_write_entry(apic, pin, entry); 739 } 740 } 741 } 742 } 743 744 /* 745 * Restore IO APIC entries which was saved in the ioapic structure. 746 */ 747 int restore_ioapic_entries(void) 748 { 749 int apic, pin; 750 751 for (apic = 0; apic < nr_ioapics; apic++) { 752 if (!ioapics[apic].saved_registers) 753 continue; 754 755 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) 756 ioapic_write_entry(apic, pin, 757 ioapics[apic].saved_registers[pin]); 758 } 759 return 0; 760 } 761 762 /* 763 * Find the IRQ entry number of a certain pin. 764 */ 765 static int find_irq_entry(int ioapic_idx, int pin, int type) 766 { 767 int i; 768 769 for (i = 0; i < mp_irq_entries; i++) 770 if (mp_irqs[i].irqtype == type && 771 (mp_irqs[i].dstapic == mpc_ioapic_id(ioapic_idx) || 772 mp_irqs[i].dstapic == MP_APIC_ALL) && 773 mp_irqs[i].dstirq == pin) 774 return i; 775 776 return -1; 777 } 778 779 /* 780 * Find the pin to which IRQ[irq] (ISA) is connected 781 */ 782 static int __init find_isa_irq_pin(int irq, int type) 783 { 784 int i; 785 786 for (i = 0; i < mp_irq_entries; i++) { 787 int lbus = mp_irqs[i].srcbus; 788 789 if (test_bit(lbus, mp_bus_not_pci) && 790 (mp_irqs[i].irqtype == type) && 791 (mp_irqs[i].srcbusirq == irq)) 792 793 return mp_irqs[i].dstirq; 794 } 795 return -1; 796 } 797 798 static int __init find_isa_irq_apic(int irq, int type) 799 { 800 int i; 801 802 for (i = 0; i < mp_irq_entries; i++) { 803 int lbus = mp_irqs[i].srcbus; 804 805 if (test_bit(lbus, mp_bus_not_pci) && 806 (mp_irqs[i].irqtype == type) && 807 (mp_irqs[i].srcbusirq == irq)) 808 break; 809 } 810 811 if (i < mp_irq_entries) { 812 int ioapic_idx; 813 814 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) 815 if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic) 816 return ioapic_idx; 817 } 818 819 return -1; 820 } 821 822 #ifdef CONFIG_EISA 823 /* 824 * EISA Edge/Level control register, ELCR 825 */ 826 static int EISA_ELCR(unsigned int irq) 827 { 828 if (irq < legacy_pic->nr_legacy_irqs) { 829 unsigned int port = 0x4d0 + (irq >> 3); 830 return (inb(port) >> (irq & 7)) & 1; 831 } 832 apic_printk(APIC_VERBOSE, KERN_INFO 833 "Broken MPtable reports ISA irq %d\n", irq); 834 return 0; 835 } 836 837 #endif 838 839 /* ISA interrupts are always polarity zero edge triggered, 840 * when listed as conforming in the MP table. */ 841 842 #define default_ISA_trigger(idx) (0) 843 #define default_ISA_polarity(idx) (0) 844 845 /* EISA interrupts are always polarity zero and can be edge or level 846 * trigger depending on the ELCR value. If an interrupt is listed as 847 * EISA conforming in the MP table, that means its trigger type must 848 * be read in from the ELCR */ 849 850 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq)) 851 #define default_EISA_polarity(idx) default_ISA_polarity(idx) 852 853 /* PCI interrupts are always polarity one level triggered, 854 * when listed as conforming in the MP table. */ 855 856 #define default_PCI_trigger(idx) (1) 857 #define default_PCI_polarity(idx) (1) 858 859 static int irq_polarity(int idx) 860 { 861 int bus = mp_irqs[idx].srcbus; 862 int polarity; 863 864 /* 865 * Determine IRQ line polarity (high active or low active): 866 */ 867 switch (mp_irqs[idx].irqflag & 3) 868 { 869 case 0: /* conforms, ie. bus-type dependent polarity */ 870 if (test_bit(bus, mp_bus_not_pci)) 871 polarity = default_ISA_polarity(idx); 872 else 873 polarity = default_PCI_polarity(idx); 874 break; 875 case 1: /* high active */ 876 { 877 polarity = 0; 878 break; 879 } 880 case 2: /* reserved */ 881 { 882 pr_warn("broken BIOS!!\n"); 883 polarity = 1; 884 break; 885 } 886 case 3: /* low active */ 887 { 888 polarity = 1; 889 break; 890 } 891 default: /* invalid */ 892 { 893 pr_warn("broken BIOS!!\n"); 894 polarity = 1; 895 break; 896 } 897 } 898 return polarity; 899 } 900 901 static int irq_trigger(int idx) 902 { 903 int bus = mp_irqs[idx].srcbus; 904 int trigger; 905 906 /* 907 * Determine IRQ trigger mode (edge or level sensitive): 908 */ 909 switch ((mp_irqs[idx].irqflag>>2) & 3) 910 { 911 case 0: /* conforms, ie. bus-type dependent */ 912 if (test_bit(bus, mp_bus_not_pci)) 913 trigger = default_ISA_trigger(idx); 914 else 915 trigger = default_PCI_trigger(idx); 916 #ifdef CONFIG_EISA 917 switch (mp_bus_id_to_type[bus]) { 918 case MP_BUS_ISA: /* ISA pin */ 919 { 920 /* set before the switch */ 921 break; 922 } 923 case MP_BUS_EISA: /* EISA pin */ 924 { 925 trigger = default_EISA_trigger(idx); 926 break; 927 } 928 case MP_BUS_PCI: /* PCI pin */ 929 { 930 /* set before the switch */ 931 break; 932 } 933 default: 934 { 935 pr_warn("broken BIOS!!\n"); 936 trigger = 1; 937 break; 938 } 939 } 940 #endif 941 break; 942 case 1: /* edge */ 943 { 944 trigger = 0; 945 break; 946 } 947 case 2: /* reserved */ 948 { 949 pr_warn("broken BIOS!!\n"); 950 trigger = 1; 951 break; 952 } 953 case 3: /* level */ 954 { 955 trigger = 1; 956 break; 957 } 958 default: /* invalid */ 959 { 960 pr_warn("broken BIOS!!\n"); 961 trigger = 0; 962 break; 963 } 964 } 965 return trigger; 966 } 967 968 static int pin_2_irq(int idx, int apic, int pin) 969 { 970 int irq; 971 int bus = mp_irqs[idx].srcbus; 972 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(apic); 973 974 /* 975 * Debugging check, we are in big trouble if this message pops up! 976 */ 977 if (mp_irqs[idx].dstirq != pin) 978 pr_err("broken BIOS or MPTABLE parser, ayiee!!\n"); 979 980 if (test_bit(bus, mp_bus_not_pci)) { 981 irq = mp_irqs[idx].srcbusirq; 982 } else { 983 u32 gsi = gsi_cfg->gsi_base + pin; 984 985 if (gsi >= NR_IRQS_LEGACY) 986 irq = gsi; 987 else 988 irq = gsi_top + gsi; 989 } 990 991 #ifdef CONFIG_X86_32 992 /* 993 * PCI IRQ command line redirection. Yes, limits are hardcoded. 994 */ 995 if ((pin >= 16) && (pin <= 23)) { 996 if (pirq_entries[pin-16] != -1) { 997 if (!pirq_entries[pin-16]) { 998 apic_printk(APIC_VERBOSE, KERN_DEBUG 999 "disabling PIRQ%d\n", pin-16); 1000 } else { 1001 irq = pirq_entries[pin-16]; 1002 apic_printk(APIC_VERBOSE, KERN_DEBUG 1003 "using PIRQ%d -> IRQ %d\n", 1004 pin-16, irq); 1005 } 1006 } 1007 } 1008 #endif 1009 1010 return irq; 1011 } 1012 1013 /* 1014 * Find a specific PCI IRQ entry. 1015 * Not an __init, possibly needed by modules 1016 */ 1017 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, 1018 struct io_apic_irq_attr *irq_attr) 1019 { 1020 int ioapic_idx, i, best_guess = -1; 1021 1022 apic_printk(APIC_DEBUG, 1023 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", 1024 bus, slot, pin); 1025 if (test_bit(bus, mp_bus_not_pci)) { 1026 apic_printk(APIC_VERBOSE, 1027 "PCI BIOS passed nonexistent PCI bus %d!\n", bus); 1028 return -1; 1029 } 1030 for (i = 0; i < mp_irq_entries; i++) { 1031 int lbus = mp_irqs[i].srcbus; 1032 1033 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) 1034 if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic || 1035 mp_irqs[i].dstapic == MP_APIC_ALL) 1036 break; 1037 1038 if (!test_bit(lbus, mp_bus_not_pci) && 1039 !mp_irqs[i].irqtype && 1040 (bus == lbus) && 1041 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) { 1042 int irq = pin_2_irq(i, ioapic_idx, mp_irqs[i].dstirq); 1043 1044 if (!(ioapic_idx || IO_APIC_IRQ(irq))) 1045 continue; 1046 1047 if (pin == (mp_irqs[i].srcbusirq & 3)) { 1048 set_io_apic_irq_attr(irq_attr, ioapic_idx, 1049 mp_irqs[i].dstirq, 1050 irq_trigger(i), 1051 irq_polarity(i)); 1052 return irq; 1053 } 1054 /* 1055 * Use the first all-but-pin matching entry as a 1056 * best-guess fuzzy result for broken mptables. 1057 */ 1058 if (best_guess < 0) { 1059 set_io_apic_irq_attr(irq_attr, ioapic_idx, 1060 mp_irqs[i].dstirq, 1061 irq_trigger(i), 1062 irq_polarity(i)); 1063 best_guess = irq; 1064 } 1065 } 1066 } 1067 return best_guess; 1068 } 1069 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); 1070 1071 void lock_vector_lock(void) 1072 { 1073 /* Used to the online set of cpus does not change 1074 * during assign_irq_vector. 1075 */ 1076 raw_spin_lock(&vector_lock); 1077 } 1078 1079 void unlock_vector_lock(void) 1080 { 1081 raw_spin_unlock(&vector_lock); 1082 } 1083 1084 static int 1085 __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 1086 { 1087 /* 1088 * NOTE! The local APIC isn't very good at handling 1089 * multiple interrupts at the same interrupt level. 1090 * As the interrupt level is determined by taking the 1091 * vector number and shifting that right by 4, we 1092 * want to spread these out a bit so that they don't 1093 * all fall in the same interrupt level. 1094 * 1095 * Also, we've got to be careful not to trash gate 1096 * 0x80, because int 0x80 is hm, kind of importantish. ;) 1097 */ 1098 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; 1099 static int current_offset = VECTOR_OFFSET_START % 16; 1100 int cpu, err; 1101 cpumask_var_t tmp_mask; 1102 1103 if (cfg->move_in_progress) 1104 return -EBUSY; 1105 1106 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) 1107 return -ENOMEM; 1108 1109 /* Only try and allocate irqs on cpus that are present */ 1110 err = -ENOSPC; 1111 cpumask_clear(cfg->old_domain); 1112 cpu = cpumask_first_and(mask, cpu_online_mask); 1113 while (cpu < nr_cpu_ids) { 1114 int new_cpu, vector, offset; 1115 1116 apic->vector_allocation_domain(cpu, tmp_mask, mask); 1117 1118 if (cpumask_subset(tmp_mask, cfg->domain)) { 1119 err = 0; 1120 if (cpumask_equal(tmp_mask, cfg->domain)) 1121 break; 1122 /* 1123 * New cpumask using the vector is a proper subset of 1124 * the current in use mask. So cleanup the vector 1125 * allocation for the members that are not used anymore. 1126 */ 1127 cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask); 1128 cfg->move_in_progress = 1129 cpumask_intersects(cfg->old_domain, cpu_online_mask); 1130 cpumask_and(cfg->domain, cfg->domain, tmp_mask); 1131 break; 1132 } 1133 1134 vector = current_vector; 1135 offset = current_offset; 1136 next: 1137 vector += 16; 1138 if (vector >= first_system_vector) { 1139 offset = (offset + 1) % 16; 1140 vector = FIRST_EXTERNAL_VECTOR + offset; 1141 } 1142 1143 if (unlikely(current_vector == vector)) { 1144 cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask); 1145 cpumask_andnot(tmp_mask, mask, cfg->old_domain); 1146 cpu = cpumask_first_and(tmp_mask, cpu_online_mask); 1147 continue; 1148 } 1149 1150 if (test_bit(vector, used_vectors)) 1151 goto next; 1152 1153 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 1154 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 1155 goto next; 1156 /* Found one! */ 1157 current_vector = vector; 1158 current_offset = offset; 1159 if (cfg->vector) { 1160 cpumask_copy(cfg->old_domain, cfg->domain); 1161 cfg->move_in_progress = 1162 cpumask_intersects(cfg->old_domain, cpu_online_mask); 1163 } 1164 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 1165 per_cpu(vector_irq, new_cpu)[vector] = irq; 1166 cfg->vector = vector; 1167 cpumask_copy(cfg->domain, tmp_mask); 1168 err = 0; 1169 break; 1170 } 1171 free_cpumask_var(tmp_mask); 1172 return err; 1173 } 1174 1175 int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 1176 { 1177 int err; 1178 unsigned long flags; 1179 1180 raw_spin_lock_irqsave(&vector_lock, flags); 1181 err = __assign_irq_vector(irq, cfg, mask); 1182 raw_spin_unlock_irqrestore(&vector_lock, flags); 1183 return err; 1184 } 1185 1186 static void __clear_irq_vector(int irq, struct irq_cfg *cfg) 1187 { 1188 int cpu, vector; 1189 1190 BUG_ON(!cfg->vector); 1191 1192 vector = cfg->vector; 1193 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) 1194 per_cpu(vector_irq, cpu)[vector] = -1; 1195 1196 cfg->vector = 0; 1197 cpumask_clear(cfg->domain); 1198 1199 if (likely(!cfg->move_in_progress)) 1200 return; 1201 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { 1202 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 1203 vector++) { 1204 if (per_cpu(vector_irq, cpu)[vector] != irq) 1205 continue; 1206 per_cpu(vector_irq, cpu)[vector] = -1; 1207 break; 1208 } 1209 } 1210 cfg->move_in_progress = 0; 1211 } 1212 1213 void __setup_vector_irq(int cpu) 1214 { 1215 /* Initialize vector_irq on a new cpu */ 1216 int irq, vector; 1217 struct irq_cfg *cfg; 1218 1219 /* 1220 * vector_lock will make sure that we don't run into irq vector 1221 * assignments that might be happening on another cpu in parallel, 1222 * while we setup our initial vector to irq mappings. 1223 */ 1224 raw_spin_lock(&vector_lock); 1225 /* Mark the inuse vectors */ 1226 for_each_active_irq(irq) { 1227 cfg = irq_get_chip_data(irq); 1228 if (!cfg) 1229 continue; 1230 1231 if (!cpumask_test_cpu(cpu, cfg->domain)) 1232 continue; 1233 vector = cfg->vector; 1234 per_cpu(vector_irq, cpu)[vector] = irq; 1235 } 1236 /* Mark the free vectors */ 1237 for (vector = 0; vector < NR_VECTORS; ++vector) { 1238 irq = per_cpu(vector_irq, cpu)[vector]; 1239 if (irq < 0) 1240 continue; 1241 1242 cfg = irq_cfg(irq); 1243 if (!cpumask_test_cpu(cpu, cfg->domain)) 1244 per_cpu(vector_irq, cpu)[vector] = -1; 1245 } 1246 raw_spin_unlock(&vector_lock); 1247 } 1248 1249 static struct irq_chip ioapic_chip; 1250 1251 #ifdef CONFIG_X86_32 1252 static inline int IO_APIC_irq_trigger(int irq) 1253 { 1254 int apic, idx, pin; 1255 1256 for (apic = 0; apic < nr_ioapics; apic++) { 1257 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { 1258 idx = find_irq_entry(apic, pin, mp_INT); 1259 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin))) 1260 return irq_trigger(idx); 1261 } 1262 } 1263 /* 1264 * nonexistent IRQs are edge default 1265 */ 1266 return 0; 1267 } 1268 #else 1269 static inline int IO_APIC_irq_trigger(int irq) 1270 { 1271 return 1; 1272 } 1273 #endif 1274 1275 static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg, 1276 unsigned long trigger) 1277 { 1278 struct irq_chip *chip = &ioapic_chip; 1279 irq_flow_handler_t hdl; 1280 bool fasteoi; 1281 1282 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1283 trigger == IOAPIC_LEVEL) { 1284 irq_set_status_flags(irq, IRQ_LEVEL); 1285 fasteoi = true; 1286 } else { 1287 irq_clear_status_flags(irq, IRQ_LEVEL); 1288 fasteoi = false; 1289 } 1290 1291 if (irq_remapped(cfg)) { 1292 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 1293 irq_remap_modify_chip_defaults(chip); 1294 fasteoi = trigger != 0; 1295 } 1296 1297 hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; 1298 irq_set_chip_and_handler_name(irq, chip, hdl, 1299 fasteoi ? "fasteoi" : "edge"); 1300 } 1301 1302 int native_setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, 1303 unsigned int destination, int vector, 1304 struct io_apic_irq_attr *attr) 1305 { 1306 memset(entry, 0, sizeof(*entry)); 1307 1308 entry->delivery_mode = apic->irq_delivery_mode; 1309 entry->dest_mode = apic->irq_dest_mode; 1310 entry->dest = destination; 1311 entry->vector = vector; 1312 entry->mask = 0; /* enable IRQ */ 1313 entry->trigger = attr->trigger; 1314 entry->polarity = attr->polarity; 1315 1316 /* 1317 * Mask level triggered irqs. 1318 * Use IRQ_DELAYED_DISABLE for edge triggered irqs. 1319 */ 1320 if (attr->trigger) 1321 entry->mask = 1; 1322 1323 return 0; 1324 } 1325 1326 static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg, 1327 struct io_apic_irq_attr *attr) 1328 { 1329 struct IO_APIC_route_entry entry; 1330 unsigned int dest; 1331 1332 if (!IO_APIC_IRQ(irq)) 1333 return; 1334 1335 if (assign_irq_vector(irq, cfg, apic->target_cpus())) 1336 return; 1337 1338 if (apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus(), 1339 &dest)) { 1340 pr_warn("Failed to obtain apicid for ioapic %d, pin %d\n", 1341 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); 1342 __clear_irq_vector(irq, cfg); 1343 1344 return; 1345 } 1346 1347 apic_printk(APIC_VERBOSE,KERN_DEBUG 1348 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1349 "IRQ %d Mode:%i Active:%i Dest:%d)\n", 1350 attr->ioapic, mpc_ioapic_id(attr->ioapic), attr->ioapic_pin, 1351 cfg->vector, irq, attr->trigger, attr->polarity, dest); 1352 1353 if (x86_io_apic_ops.setup_entry(irq, &entry, dest, cfg->vector, attr)) { 1354 pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1355 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); 1356 __clear_irq_vector(irq, cfg); 1357 1358 return; 1359 } 1360 1361 ioapic_register_intr(irq, cfg, attr->trigger); 1362 if (irq < legacy_pic->nr_legacy_irqs) 1363 legacy_pic->mask(irq); 1364 1365 ioapic_write_entry(attr->ioapic, attr->ioapic_pin, entry); 1366 } 1367 1368 static bool __init io_apic_pin_not_connected(int idx, int ioapic_idx, int pin) 1369 { 1370 if (idx != -1) 1371 return false; 1372 1373 apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n", 1374 mpc_ioapic_id(ioapic_idx), pin); 1375 return true; 1376 } 1377 1378 static void __init __io_apic_setup_irqs(unsigned int ioapic_idx) 1379 { 1380 int idx, node = cpu_to_node(0); 1381 struct io_apic_irq_attr attr; 1382 unsigned int pin, irq; 1383 1384 for (pin = 0; pin < ioapics[ioapic_idx].nr_registers; pin++) { 1385 idx = find_irq_entry(ioapic_idx, pin, mp_INT); 1386 if (io_apic_pin_not_connected(idx, ioapic_idx, pin)) 1387 continue; 1388 1389 irq = pin_2_irq(idx, ioapic_idx, pin); 1390 1391 if ((ioapic_idx > 0) && (irq > 16)) 1392 continue; 1393 1394 /* 1395 * Skip the timer IRQ if there's a quirk handler 1396 * installed and if it returns 1: 1397 */ 1398 if (apic->multi_timer_check && 1399 apic->multi_timer_check(ioapic_idx, irq)) 1400 continue; 1401 1402 set_io_apic_irq_attr(&attr, ioapic_idx, pin, irq_trigger(idx), 1403 irq_polarity(idx)); 1404 1405 io_apic_setup_irq_pin(irq, node, &attr); 1406 } 1407 } 1408 1409 static void __init setup_IO_APIC_irqs(void) 1410 { 1411 unsigned int ioapic_idx; 1412 1413 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1414 1415 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) 1416 __io_apic_setup_irqs(ioapic_idx); 1417 } 1418 1419 /* 1420 * for the gsit that is not in first ioapic 1421 * but could not use acpi_register_gsi() 1422 * like some special sci in IBM x3330 1423 */ 1424 void setup_IO_APIC_irq_extra(u32 gsi) 1425 { 1426 int ioapic_idx = 0, pin, idx, irq, node = cpu_to_node(0); 1427 struct io_apic_irq_attr attr; 1428 1429 /* 1430 * Convert 'gsi' to 'ioapic.pin'. 1431 */ 1432 ioapic_idx = mp_find_ioapic(gsi); 1433 if (ioapic_idx < 0) 1434 return; 1435 1436 pin = mp_find_ioapic_pin(ioapic_idx, gsi); 1437 idx = find_irq_entry(ioapic_idx, pin, mp_INT); 1438 if (idx == -1) 1439 return; 1440 1441 irq = pin_2_irq(idx, ioapic_idx, pin); 1442 1443 /* Only handle the non legacy irqs on secondary ioapics */ 1444 if (ioapic_idx == 0 || irq < NR_IRQS_LEGACY) 1445 return; 1446 1447 set_io_apic_irq_attr(&attr, ioapic_idx, pin, irq_trigger(idx), 1448 irq_polarity(idx)); 1449 1450 io_apic_setup_irq_pin_once(irq, node, &attr); 1451 } 1452 1453 /* 1454 * Set up the timer pin, possibly with the 8259A-master behind. 1455 */ 1456 static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx, 1457 unsigned int pin, int vector) 1458 { 1459 struct IO_APIC_route_entry entry; 1460 unsigned int dest; 1461 1462 memset(&entry, 0, sizeof(entry)); 1463 1464 /* 1465 * We use logical delivery to get the timer IRQ 1466 * to the first CPU. 1467 */ 1468 if (unlikely(apic->cpu_mask_to_apicid_and(apic->target_cpus(), 1469 apic->target_cpus(), &dest))) 1470 dest = BAD_APICID; 1471 1472 entry.dest_mode = apic->irq_dest_mode; 1473 entry.mask = 0; /* don't mask IRQ for edge */ 1474 entry.dest = dest; 1475 entry.delivery_mode = apic->irq_delivery_mode; 1476 entry.polarity = 0; 1477 entry.trigger = 0; 1478 entry.vector = vector; 1479 1480 /* 1481 * The timer IRQ doesn't have to know that behind the 1482 * scene we may have a 8259A-master in AEOI mode ... 1483 */ 1484 irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, 1485 "edge"); 1486 1487 /* 1488 * Add it to the IO-APIC irq-routing table: 1489 */ 1490 ioapic_write_entry(ioapic_idx, pin, entry); 1491 } 1492 1493 void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries) 1494 { 1495 int i; 1496 1497 pr_debug(" NR Dst Mask Trig IRR Pol Stat Dmod Deli Vect:\n"); 1498 1499 for (i = 0; i <= nr_entries; i++) { 1500 struct IO_APIC_route_entry entry; 1501 1502 entry = ioapic_read_entry(apic, i); 1503 1504 pr_debug(" %02x %02X ", i, entry.dest); 1505 pr_cont("%1d %1d %1d %1d %1d " 1506 "%1d %1d %02X\n", 1507 entry.mask, 1508 entry.trigger, 1509 entry.irr, 1510 entry.polarity, 1511 entry.delivery_status, 1512 entry.dest_mode, 1513 entry.delivery_mode, 1514 entry.vector); 1515 } 1516 } 1517 1518 void intel_ir_io_apic_print_entries(unsigned int apic, 1519 unsigned int nr_entries) 1520 { 1521 int i; 1522 1523 pr_debug(" NR Indx Fmt Mask Trig IRR Pol Stat Indx2 Zero Vect:\n"); 1524 1525 for (i = 0; i <= nr_entries; i++) { 1526 struct IR_IO_APIC_route_entry *ir_entry; 1527 struct IO_APIC_route_entry entry; 1528 1529 entry = ioapic_read_entry(apic, i); 1530 1531 ir_entry = (struct IR_IO_APIC_route_entry *)&entry; 1532 1533 pr_debug(" %02x %04X ", i, ir_entry->index); 1534 pr_cont("%1d %1d %1d %1d %1d " 1535 "%1d %1d %X %02X\n", 1536 ir_entry->format, 1537 ir_entry->mask, 1538 ir_entry->trigger, 1539 ir_entry->irr, 1540 ir_entry->polarity, 1541 ir_entry->delivery_status, 1542 ir_entry->index2, 1543 ir_entry->zero, 1544 ir_entry->vector); 1545 } 1546 } 1547 1548 __apicdebuginit(void) print_IO_APIC(int ioapic_idx) 1549 { 1550 union IO_APIC_reg_00 reg_00; 1551 union IO_APIC_reg_01 reg_01; 1552 union IO_APIC_reg_02 reg_02; 1553 union IO_APIC_reg_03 reg_03; 1554 unsigned long flags; 1555 1556 raw_spin_lock_irqsave(&ioapic_lock, flags); 1557 reg_00.raw = io_apic_read(ioapic_idx, 0); 1558 reg_01.raw = io_apic_read(ioapic_idx, 1); 1559 if (reg_01.bits.version >= 0x10) 1560 reg_02.raw = io_apic_read(ioapic_idx, 2); 1561 if (reg_01.bits.version >= 0x20) 1562 reg_03.raw = io_apic_read(ioapic_idx, 3); 1563 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1564 1565 printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx)); 1566 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); 1567 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); 1568 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); 1569 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); 1570 1571 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01); 1572 printk(KERN_DEBUG "....... : max redirection entries: %02X\n", 1573 reg_01.bits.entries); 1574 1575 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); 1576 printk(KERN_DEBUG "....... : IO APIC version: %02X\n", 1577 reg_01.bits.version); 1578 1579 /* 1580 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, 1581 * but the value of reg_02 is read as the previous read register 1582 * value, so ignore it if reg_02 == reg_01. 1583 */ 1584 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { 1585 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); 1586 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); 1587 } 1588 1589 /* 1590 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 1591 * or reg_03, but the value of reg_0[23] is read as the previous read 1592 * register value, so ignore it if reg_03 == reg_0[12]. 1593 */ 1594 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && 1595 reg_03.raw != reg_01.raw) { 1596 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); 1597 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); 1598 } 1599 1600 printk(KERN_DEBUG ".... IRQ redirection table:\n"); 1601 1602 x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries); 1603 } 1604 1605 __apicdebuginit(void) print_IO_APICs(void) 1606 { 1607 int ioapic_idx; 1608 struct irq_cfg *cfg; 1609 unsigned int irq; 1610 struct irq_chip *chip; 1611 1612 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); 1613 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) 1614 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", 1615 mpc_ioapic_id(ioapic_idx), 1616 ioapics[ioapic_idx].nr_registers); 1617 1618 /* 1619 * We are a bit conservative about what we expect. We have to 1620 * know about every hardware change ASAP. 1621 */ 1622 printk(KERN_INFO "testing the IO APIC.......................\n"); 1623 1624 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) 1625 print_IO_APIC(ioapic_idx); 1626 1627 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1628 for_each_active_irq(irq) { 1629 struct irq_pin_list *entry; 1630 1631 chip = irq_get_chip(irq); 1632 if (chip != &ioapic_chip) 1633 continue; 1634 1635 cfg = irq_get_chip_data(irq); 1636 if (!cfg) 1637 continue; 1638 entry = cfg->irq_2_pin; 1639 if (!entry) 1640 continue; 1641 printk(KERN_DEBUG "IRQ%d ", irq); 1642 for_each_irq_pin(entry, cfg->irq_2_pin) 1643 pr_cont("-> %d:%d", entry->apic, entry->pin); 1644 pr_cont("\n"); 1645 } 1646 1647 printk(KERN_INFO ".................................... done.\n"); 1648 } 1649 1650 __apicdebuginit(void) print_APIC_field(int base) 1651 { 1652 int i; 1653 1654 printk(KERN_DEBUG); 1655 1656 for (i = 0; i < 8; i++) 1657 pr_cont("%08x", apic_read(base + i*0x10)); 1658 1659 pr_cont("\n"); 1660 } 1661 1662 __apicdebuginit(void) print_local_APIC(void *dummy) 1663 { 1664 unsigned int i, v, ver, maxlvt; 1665 u64 icr; 1666 1667 printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", 1668 smp_processor_id(), hard_smp_processor_id()); 1669 v = apic_read(APIC_ID); 1670 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id()); 1671 v = apic_read(APIC_LVR); 1672 printk(KERN_INFO "... APIC VERSION: %08x\n", v); 1673 ver = GET_APIC_VERSION(v); 1674 maxlvt = lapic_get_maxlvt(); 1675 1676 v = apic_read(APIC_TASKPRI); 1677 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); 1678 1679 if (APIC_INTEGRATED(ver)) { /* !82489DX */ 1680 if (!APIC_XAPIC(ver)) { 1681 v = apic_read(APIC_ARBPRI); 1682 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v, 1683 v & APIC_ARBPRI_MASK); 1684 } 1685 v = apic_read(APIC_PROCPRI); 1686 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v); 1687 } 1688 1689 /* 1690 * Remote read supported only in the 82489DX and local APIC for 1691 * Pentium processors. 1692 */ 1693 if (!APIC_INTEGRATED(ver) || maxlvt == 3) { 1694 v = apic_read(APIC_RRR); 1695 printk(KERN_DEBUG "... APIC RRR: %08x\n", v); 1696 } 1697 1698 v = apic_read(APIC_LDR); 1699 printk(KERN_DEBUG "... APIC LDR: %08x\n", v); 1700 if (!x2apic_enabled()) { 1701 v = apic_read(APIC_DFR); 1702 printk(KERN_DEBUG "... APIC DFR: %08x\n", v); 1703 } 1704 v = apic_read(APIC_SPIV); 1705 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v); 1706 1707 printk(KERN_DEBUG "... APIC ISR field:\n"); 1708 print_APIC_field(APIC_ISR); 1709 printk(KERN_DEBUG "... APIC TMR field:\n"); 1710 print_APIC_field(APIC_TMR); 1711 printk(KERN_DEBUG "... APIC IRR field:\n"); 1712 print_APIC_field(APIC_IRR); 1713 1714 if (APIC_INTEGRATED(ver)) { /* !82489DX */ 1715 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 1716 apic_write(APIC_ESR, 0); 1717 1718 v = apic_read(APIC_ESR); 1719 printk(KERN_DEBUG "... APIC ESR: %08x\n", v); 1720 } 1721 1722 icr = apic_icr_read(); 1723 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr); 1724 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32)); 1725 1726 v = apic_read(APIC_LVTT); 1727 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); 1728 1729 if (maxlvt > 3) { /* PC is LVT#4. */ 1730 v = apic_read(APIC_LVTPC); 1731 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v); 1732 } 1733 v = apic_read(APIC_LVT0); 1734 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v); 1735 v = apic_read(APIC_LVT1); 1736 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v); 1737 1738 if (maxlvt > 2) { /* ERR is LVT#3. */ 1739 v = apic_read(APIC_LVTERR); 1740 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v); 1741 } 1742 1743 v = apic_read(APIC_TMICT); 1744 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v); 1745 v = apic_read(APIC_TMCCT); 1746 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v); 1747 v = apic_read(APIC_TDCR); 1748 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v); 1749 1750 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { 1751 v = apic_read(APIC_EFEAT); 1752 maxlvt = (v >> 16) & 0xff; 1753 printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v); 1754 v = apic_read(APIC_ECTRL); 1755 printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v); 1756 for (i = 0; i < maxlvt; i++) { 1757 v = apic_read(APIC_EILVTn(i)); 1758 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v); 1759 } 1760 } 1761 pr_cont("\n"); 1762 } 1763 1764 __apicdebuginit(void) print_local_APICs(int maxcpu) 1765 { 1766 int cpu; 1767 1768 if (!maxcpu) 1769 return; 1770 1771 preempt_disable(); 1772 for_each_online_cpu(cpu) { 1773 if (cpu >= maxcpu) 1774 break; 1775 smp_call_function_single(cpu, print_local_APIC, NULL, 1); 1776 } 1777 preempt_enable(); 1778 } 1779 1780 __apicdebuginit(void) print_PIC(void) 1781 { 1782 unsigned int v; 1783 unsigned long flags; 1784 1785 if (!legacy_pic->nr_legacy_irqs) 1786 return; 1787 1788 printk(KERN_DEBUG "\nprinting PIC contents\n"); 1789 1790 raw_spin_lock_irqsave(&i8259A_lock, flags); 1791 1792 v = inb(0xa1) << 8 | inb(0x21); 1793 printk(KERN_DEBUG "... PIC IMR: %04x\n", v); 1794 1795 v = inb(0xa0) << 8 | inb(0x20); 1796 printk(KERN_DEBUG "... PIC IRR: %04x\n", v); 1797 1798 outb(0x0b,0xa0); 1799 outb(0x0b,0x20); 1800 v = inb(0xa0) << 8 | inb(0x20); 1801 outb(0x0a,0xa0); 1802 outb(0x0a,0x20); 1803 1804 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 1805 1806 printk(KERN_DEBUG "... PIC ISR: %04x\n", v); 1807 1808 v = inb(0x4d1) << 8 | inb(0x4d0); 1809 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); 1810 } 1811 1812 static int __initdata show_lapic = 1; 1813 static __init int setup_show_lapic(char *arg) 1814 { 1815 int num = -1; 1816 1817 if (strcmp(arg, "all") == 0) { 1818 show_lapic = CONFIG_NR_CPUS; 1819 } else { 1820 get_option(&arg, &num); 1821 if (num >= 0) 1822 show_lapic = num; 1823 } 1824 1825 return 1; 1826 } 1827 __setup("show_lapic=", setup_show_lapic); 1828 1829 __apicdebuginit(int) print_ICs(void) 1830 { 1831 if (apic_verbosity == APIC_QUIET) 1832 return 0; 1833 1834 print_PIC(); 1835 1836 /* don't print out if apic is not there */ 1837 if (!cpu_has_apic && !apic_from_smp_config()) 1838 return 0; 1839 1840 print_local_APICs(show_lapic); 1841 print_IO_APICs(); 1842 1843 return 0; 1844 } 1845 1846 late_initcall(print_ICs); 1847 1848 1849 /* Where if anywhere is the i8259 connect in external int mode */ 1850 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 1851 1852 void __init enable_IO_APIC(void) 1853 { 1854 int i8259_apic, i8259_pin; 1855 int apic; 1856 1857 if (!legacy_pic->nr_legacy_irqs) 1858 return; 1859 1860 for(apic = 0; apic < nr_ioapics; apic++) { 1861 int pin; 1862 /* See if any of the pins is in ExtINT mode */ 1863 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { 1864 struct IO_APIC_route_entry entry; 1865 entry = ioapic_read_entry(apic, pin); 1866 1867 /* If the interrupt line is enabled and in ExtInt mode 1868 * I have found the pin where the i8259 is connected. 1869 */ 1870 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) { 1871 ioapic_i8259.apic = apic; 1872 ioapic_i8259.pin = pin; 1873 goto found_i8259; 1874 } 1875 } 1876 } 1877 found_i8259: 1878 /* Look to see what if the MP table has reported the ExtINT */ 1879 /* If we could not find the appropriate pin by looking at the ioapic 1880 * the i8259 probably is not connected the ioapic but give the 1881 * mptable a chance anyway. 1882 */ 1883 i8259_pin = find_isa_irq_pin(0, mp_ExtINT); 1884 i8259_apic = find_isa_irq_apic(0, mp_ExtINT); 1885 /* Trust the MP table if nothing is setup in the hardware */ 1886 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { 1887 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); 1888 ioapic_i8259.pin = i8259_pin; 1889 ioapic_i8259.apic = i8259_apic; 1890 } 1891 /* Complain if the MP table and the hardware disagree */ 1892 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && 1893 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) 1894 { 1895 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); 1896 } 1897 1898 /* 1899 * Do not trust the IO-APIC being empty at bootup 1900 */ 1901 clear_IO_APIC(); 1902 } 1903 1904 void native_disable_io_apic(void) 1905 { 1906 /* 1907 * If the i8259 is routed through an IOAPIC 1908 * Put that IOAPIC in virtual wire mode 1909 * so legacy interrupts can be delivered. 1910 */ 1911 if (ioapic_i8259.pin != -1) { 1912 struct IO_APIC_route_entry entry; 1913 1914 memset(&entry, 0, sizeof(entry)); 1915 entry.mask = 0; /* Enabled */ 1916 entry.trigger = 0; /* Edge */ 1917 entry.irr = 0; 1918 entry.polarity = 0; /* High */ 1919 entry.delivery_status = 0; 1920 entry.dest_mode = 0; /* Physical */ 1921 entry.delivery_mode = dest_ExtINT; /* ExtInt */ 1922 entry.vector = 0; 1923 entry.dest = read_apic_id(); 1924 1925 /* 1926 * Add it to the IO-APIC irq-routing table: 1927 */ 1928 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); 1929 } 1930 1931 if (cpu_has_apic || apic_from_smp_config()) 1932 disconnect_bsp_APIC(ioapic_i8259.pin != -1); 1933 1934 } 1935 1936 /* 1937 * Not an __init, needed by the reboot code 1938 */ 1939 void disable_IO_APIC(void) 1940 { 1941 /* 1942 * Clear the IO-APIC before rebooting: 1943 */ 1944 clear_IO_APIC(); 1945 1946 if (!legacy_pic->nr_legacy_irqs) 1947 return; 1948 1949 x86_io_apic_ops.disable(); 1950 } 1951 1952 #ifdef CONFIG_X86_32 1953 /* 1954 * function to set the IO-APIC physical IDs based on the 1955 * values stored in the MPC table. 1956 * 1957 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999 1958 */ 1959 void __init setup_ioapic_ids_from_mpc_nocheck(void) 1960 { 1961 union IO_APIC_reg_00 reg_00; 1962 physid_mask_t phys_id_present_map; 1963 int ioapic_idx; 1964 int i; 1965 unsigned char old_id; 1966 unsigned long flags; 1967 1968 /* 1969 * This is broken; anything with a real cpu count has to 1970 * circumvent this idiocy regardless. 1971 */ 1972 apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map); 1973 1974 /* 1975 * Set the IOAPIC ID to the value stored in the MPC table. 1976 */ 1977 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) { 1978 /* Read the register 0 value */ 1979 raw_spin_lock_irqsave(&ioapic_lock, flags); 1980 reg_00.raw = io_apic_read(ioapic_idx, 0); 1981 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1982 1983 old_id = mpc_ioapic_id(ioapic_idx); 1984 1985 if (mpc_ioapic_id(ioapic_idx) >= get_physical_broadcast()) { 1986 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", 1987 ioapic_idx, mpc_ioapic_id(ioapic_idx)); 1988 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 1989 reg_00.bits.ID); 1990 ioapics[ioapic_idx].mp_config.apicid = reg_00.bits.ID; 1991 } 1992 1993 /* 1994 * Sanity check, is the ID really free? Every APIC in a 1995 * system must have a unique ID or we get lots of nice 1996 * 'stuck on smp_invalidate_needed IPI wait' messages. 1997 */ 1998 if (apic->check_apicid_used(&phys_id_present_map, 1999 mpc_ioapic_id(ioapic_idx))) { 2000 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", 2001 ioapic_idx, mpc_ioapic_id(ioapic_idx)); 2002 for (i = 0; i < get_physical_broadcast(); i++) 2003 if (!physid_isset(i, phys_id_present_map)) 2004 break; 2005 if (i >= get_physical_broadcast()) 2006 panic("Max APIC ID exceeded!\n"); 2007 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 2008 i); 2009 physid_set(i, phys_id_present_map); 2010 ioapics[ioapic_idx].mp_config.apicid = i; 2011 } else { 2012 physid_mask_t tmp; 2013 apic->apicid_to_cpu_present(mpc_ioapic_id(ioapic_idx), 2014 &tmp); 2015 apic_printk(APIC_VERBOSE, "Setting %d in the " 2016 "phys_id_present_map\n", 2017 mpc_ioapic_id(ioapic_idx)); 2018 physids_or(phys_id_present_map, phys_id_present_map, tmp); 2019 } 2020 2021 /* 2022 * We need to adjust the IRQ routing table 2023 * if the ID changed. 2024 */ 2025 if (old_id != mpc_ioapic_id(ioapic_idx)) 2026 for (i = 0; i < mp_irq_entries; i++) 2027 if (mp_irqs[i].dstapic == old_id) 2028 mp_irqs[i].dstapic 2029 = mpc_ioapic_id(ioapic_idx); 2030 2031 /* 2032 * Update the ID register according to the right value 2033 * from the MPC table if they are different. 2034 */ 2035 if (mpc_ioapic_id(ioapic_idx) == reg_00.bits.ID) 2036 continue; 2037 2038 apic_printk(APIC_VERBOSE, KERN_INFO 2039 "...changing IO-APIC physical APIC ID to %d ...", 2040 mpc_ioapic_id(ioapic_idx)); 2041 2042 reg_00.bits.ID = mpc_ioapic_id(ioapic_idx); 2043 raw_spin_lock_irqsave(&ioapic_lock, flags); 2044 io_apic_write(ioapic_idx, 0, reg_00.raw); 2045 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2046 2047 /* 2048 * Sanity check 2049 */ 2050 raw_spin_lock_irqsave(&ioapic_lock, flags); 2051 reg_00.raw = io_apic_read(ioapic_idx, 0); 2052 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2053 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) 2054 pr_cont("could not set ID!\n"); 2055 else 2056 apic_printk(APIC_VERBOSE, " ok.\n"); 2057 } 2058 } 2059 2060 void __init setup_ioapic_ids_from_mpc(void) 2061 { 2062 2063 if (acpi_ioapic) 2064 return; 2065 /* 2066 * Don't check I/O APIC IDs for xAPIC systems. They have 2067 * no meaning without the serial APIC bus. 2068 */ 2069 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 2070 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) 2071 return; 2072 setup_ioapic_ids_from_mpc_nocheck(); 2073 } 2074 #endif 2075 2076 int no_timer_check __initdata; 2077 2078 static int __init notimercheck(char *s) 2079 { 2080 no_timer_check = 1; 2081 return 1; 2082 } 2083 __setup("no_timer_check", notimercheck); 2084 2085 /* 2086 * There is a nasty bug in some older SMP boards, their mptable lies 2087 * about the timer IRQ. We do the following to work around the situation: 2088 * 2089 * - timer IRQ defaults to IO-APIC IRQ 2090 * - if this function detects that timer IRQs are defunct, then we fall 2091 * back to ISA timer IRQs 2092 */ 2093 static int __init timer_irq_works(void) 2094 { 2095 unsigned long t1 = jiffies; 2096 unsigned long flags; 2097 2098 if (no_timer_check) 2099 return 1; 2100 2101 local_save_flags(flags); 2102 local_irq_enable(); 2103 /* Let ten ticks pass... */ 2104 mdelay((10 * 1000) / HZ); 2105 local_irq_restore(flags); 2106 2107 /* 2108 * Expect a few ticks at least, to be sure some possible 2109 * glue logic does not lock up after one or two first 2110 * ticks in a non-ExtINT mode. Also the local APIC 2111 * might have cached one ExtINT interrupt. Finally, at 2112 * least one tick may be lost due to delays. 2113 */ 2114 2115 /* jiffies wrap? */ 2116 if (time_after(jiffies, t1 + 4)) 2117 return 1; 2118 return 0; 2119 } 2120 2121 /* 2122 * In the SMP+IOAPIC case it might happen that there are an unspecified 2123 * number of pending IRQ events unhandled. These cases are very rare, 2124 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much 2125 * better to do it this way as thus we do not have to be aware of 2126 * 'pending' interrupts in the IRQ path, except at this point. 2127 */ 2128 /* 2129 * Edge triggered needs to resend any interrupt 2130 * that was delayed but this is now handled in the device 2131 * independent code. 2132 */ 2133 2134 /* 2135 * Starting up a edge-triggered IO-APIC interrupt is 2136 * nasty - we need to make sure that we get the edge. 2137 * If it is already asserted for some reason, we need 2138 * return 1 to indicate that is was pending. 2139 * 2140 * This is not complete - we should be able to fake 2141 * an edge even if it isn't on the 8259A... 2142 */ 2143 2144 static unsigned int startup_ioapic_irq(struct irq_data *data) 2145 { 2146 int was_pending = 0, irq = data->irq; 2147 unsigned long flags; 2148 2149 raw_spin_lock_irqsave(&ioapic_lock, flags); 2150 if (irq < legacy_pic->nr_legacy_irqs) { 2151 legacy_pic->mask(irq); 2152 if (legacy_pic->irq_pending(irq)) 2153 was_pending = 1; 2154 } 2155 __unmask_ioapic(data->chip_data); 2156 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2157 2158 return was_pending; 2159 } 2160 2161 static int ioapic_retrigger_irq(struct irq_data *data) 2162 { 2163 struct irq_cfg *cfg = data->chip_data; 2164 unsigned long flags; 2165 int cpu; 2166 2167 raw_spin_lock_irqsave(&vector_lock, flags); 2168 cpu = cpumask_first_and(cfg->domain, cpu_online_mask); 2169 apic->send_IPI_mask(cpumask_of(cpu), cfg->vector); 2170 raw_spin_unlock_irqrestore(&vector_lock, flags); 2171 2172 return 1; 2173 } 2174 2175 /* 2176 * Level and edge triggered IO-APIC interrupts need different handling, 2177 * so we use two separate IRQ descriptors. Edge triggered IRQs can be 2178 * handled with the level-triggered descriptor, but that one has slightly 2179 * more overhead. Level-triggered interrupts cannot be handled with the 2180 * edge-triggered handler, without risking IRQ storms and other ugly 2181 * races. 2182 */ 2183 2184 #ifdef CONFIG_SMP 2185 void send_cleanup_vector(struct irq_cfg *cfg) 2186 { 2187 cpumask_var_t cleanup_mask; 2188 2189 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { 2190 unsigned int i; 2191 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) 2192 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); 2193 } else { 2194 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); 2195 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); 2196 free_cpumask_var(cleanup_mask); 2197 } 2198 cfg->move_in_progress = 0; 2199 } 2200 2201 asmlinkage void smp_irq_move_cleanup_interrupt(void) 2202 { 2203 unsigned vector, me; 2204 2205 ack_APIC_irq(); 2206 irq_enter(); 2207 exit_idle(); 2208 2209 me = smp_processor_id(); 2210 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { 2211 unsigned int irq; 2212 unsigned int irr; 2213 struct irq_desc *desc; 2214 struct irq_cfg *cfg; 2215 irq = __this_cpu_read(vector_irq[vector]); 2216 2217 if (irq == -1) 2218 continue; 2219 2220 desc = irq_to_desc(irq); 2221 if (!desc) 2222 continue; 2223 2224 cfg = irq_cfg(irq); 2225 if (!cfg) 2226 continue; 2227 2228 raw_spin_lock(&desc->lock); 2229 2230 /* 2231 * Check if the irq migration is in progress. If so, we 2232 * haven't received the cleanup request yet for this irq. 2233 */ 2234 if (cfg->move_in_progress) 2235 goto unlock; 2236 2237 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2238 goto unlock; 2239 2240 irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); 2241 /* 2242 * Check if the vector that needs to be cleanedup is 2243 * registered at the cpu's IRR. If so, then this is not 2244 * the best time to clean it up. Lets clean it up in the 2245 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR 2246 * to myself. 2247 */ 2248 if (irr & (1 << (vector % 32))) { 2249 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); 2250 goto unlock; 2251 } 2252 __this_cpu_write(vector_irq[vector], -1); 2253 unlock: 2254 raw_spin_unlock(&desc->lock); 2255 } 2256 2257 irq_exit(); 2258 } 2259 2260 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) 2261 { 2262 unsigned me; 2263 2264 if (likely(!cfg->move_in_progress)) 2265 return; 2266 2267 me = smp_processor_id(); 2268 2269 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2270 send_cleanup_vector(cfg); 2271 } 2272 2273 static void irq_complete_move(struct irq_cfg *cfg) 2274 { 2275 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); 2276 } 2277 2278 void irq_force_complete_move(int irq) 2279 { 2280 struct irq_cfg *cfg = irq_get_chip_data(irq); 2281 2282 if (!cfg) 2283 return; 2284 2285 __irq_complete_move(cfg, cfg->vector); 2286 } 2287 #else 2288 static inline void irq_complete_move(struct irq_cfg *cfg) { } 2289 #endif 2290 2291 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) 2292 { 2293 int apic, pin; 2294 struct irq_pin_list *entry; 2295 u8 vector = cfg->vector; 2296 2297 for_each_irq_pin(entry, cfg->irq_2_pin) { 2298 unsigned int reg; 2299 2300 apic = entry->apic; 2301 pin = entry->pin; 2302 /* 2303 * With interrupt-remapping, destination information comes 2304 * from interrupt-remapping table entry. 2305 */ 2306 if (!irq_remapped(cfg)) 2307 io_apic_write(apic, 0x11 + pin*2, dest); 2308 reg = io_apic_read(apic, 0x10 + pin*2); 2309 reg &= ~IO_APIC_REDIR_VECTOR_MASK; 2310 reg |= vector; 2311 io_apic_modify(apic, 0x10 + pin*2, reg); 2312 } 2313 } 2314 2315 /* 2316 * Either sets data->affinity to a valid value, and returns 2317 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and 2318 * leaves data->affinity untouched. 2319 */ 2320 int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2321 unsigned int *dest_id) 2322 { 2323 struct irq_cfg *cfg = data->chip_data; 2324 unsigned int irq = data->irq; 2325 int err; 2326 2327 if (!config_enabled(CONFIG_SMP)) 2328 return -1; 2329 2330 if (!cpumask_intersects(mask, cpu_online_mask)) 2331 return -EINVAL; 2332 2333 err = assign_irq_vector(irq, cfg, mask); 2334 if (err) 2335 return err; 2336 2337 err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id); 2338 if (err) { 2339 if (assign_irq_vector(irq, cfg, data->affinity)) 2340 pr_err("Failed to recover vector for irq %d\n", irq); 2341 return err; 2342 } 2343 2344 cpumask_copy(data->affinity, mask); 2345 2346 return 0; 2347 } 2348 2349 2350 int native_ioapic_set_affinity(struct irq_data *data, 2351 const struct cpumask *mask, 2352 bool force) 2353 { 2354 unsigned int dest, irq = data->irq; 2355 unsigned long flags; 2356 int ret; 2357 2358 if (!config_enabled(CONFIG_SMP)) 2359 return -1; 2360 2361 raw_spin_lock_irqsave(&ioapic_lock, flags); 2362 ret = __ioapic_set_affinity(data, mask, &dest); 2363 if (!ret) { 2364 /* Only the high 8 bits are valid. */ 2365 dest = SET_APIC_LOGICAL_ID(dest); 2366 __target_IO_APIC_irq(irq, dest, data->chip_data); 2367 ret = IRQ_SET_MASK_OK_NOCOPY; 2368 } 2369 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2370 return ret; 2371 } 2372 2373 static void ack_apic_edge(struct irq_data *data) 2374 { 2375 irq_complete_move(data->chip_data); 2376 irq_move_irq(data); 2377 ack_APIC_irq(); 2378 } 2379 2380 atomic_t irq_mis_count; 2381 2382 #ifdef CONFIG_GENERIC_PENDING_IRQ 2383 static bool io_apic_level_ack_pending(struct irq_cfg *cfg) 2384 { 2385 struct irq_pin_list *entry; 2386 unsigned long flags; 2387 2388 raw_spin_lock_irqsave(&ioapic_lock, flags); 2389 for_each_irq_pin(entry, cfg->irq_2_pin) { 2390 unsigned int reg; 2391 int pin; 2392 2393 pin = entry->pin; 2394 reg = io_apic_read(entry->apic, 0x10 + pin*2); 2395 /* Is the remote IRR bit set? */ 2396 if (reg & IO_APIC_REDIR_REMOTE_IRR) { 2397 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2398 return true; 2399 } 2400 } 2401 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2402 2403 return false; 2404 } 2405 2406 static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg) 2407 { 2408 /* If we are moving the irq we need to mask it */ 2409 if (unlikely(irqd_is_setaffinity_pending(data))) { 2410 mask_ioapic(cfg); 2411 return true; 2412 } 2413 return false; 2414 } 2415 2416 static inline void ioapic_irqd_unmask(struct irq_data *data, 2417 struct irq_cfg *cfg, bool masked) 2418 { 2419 if (unlikely(masked)) { 2420 /* Only migrate the irq if the ack has been received. 2421 * 2422 * On rare occasions the broadcast level triggered ack gets 2423 * delayed going to ioapics, and if we reprogram the 2424 * vector while Remote IRR is still set the irq will never 2425 * fire again. 2426 * 2427 * To prevent this scenario we read the Remote IRR bit 2428 * of the ioapic. This has two effects. 2429 * - On any sane system the read of the ioapic will 2430 * flush writes (and acks) going to the ioapic from 2431 * this cpu. 2432 * - We get to see if the ACK has actually been delivered. 2433 * 2434 * Based on failed experiments of reprogramming the 2435 * ioapic entry from outside of irq context starting 2436 * with masking the ioapic entry and then polling until 2437 * Remote IRR was clear before reprogramming the 2438 * ioapic I don't trust the Remote IRR bit to be 2439 * completey accurate. 2440 * 2441 * However there appears to be no other way to plug 2442 * this race, so if the Remote IRR bit is not 2443 * accurate and is causing problems then it is a hardware bug 2444 * and you can go talk to the chipset vendor about it. 2445 */ 2446 if (!io_apic_level_ack_pending(cfg)) 2447 irq_move_masked_irq(data); 2448 unmask_ioapic(cfg); 2449 } 2450 } 2451 #else 2452 static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg) 2453 { 2454 return false; 2455 } 2456 static inline void ioapic_irqd_unmask(struct irq_data *data, 2457 struct irq_cfg *cfg, bool masked) 2458 { 2459 } 2460 #endif 2461 2462 static void ack_apic_level(struct irq_data *data) 2463 { 2464 struct irq_cfg *cfg = data->chip_data; 2465 int i, irq = data->irq; 2466 unsigned long v; 2467 bool masked; 2468 2469 irq_complete_move(cfg); 2470 masked = ioapic_irqd_mask(data, cfg); 2471 2472 /* 2473 * It appears there is an erratum which affects at least version 0x11 2474 * of I/O APIC (that's the 82093AA and cores integrated into various 2475 * chipsets). Under certain conditions a level-triggered interrupt is 2476 * erroneously delivered as edge-triggered one but the respective IRR 2477 * bit gets set nevertheless. As a result the I/O unit expects an EOI 2478 * message but it will never arrive and further interrupts are blocked 2479 * from the source. The exact reason is so far unknown, but the 2480 * phenomenon was observed when two consecutive interrupt requests 2481 * from a given source get delivered to the same CPU and the source is 2482 * temporarily disabled in between. 2483 * 2484 * A workaround is to simulate an EOI message manually. We achieve it 2485 * by setting the trigger mode to edge and then to level when the edge 2486 * trigger mode gets detected in the TMR of a local APIC for a 2487 * level-triggered interrupt. We mask the source for the time of the 2488 * operation to prevent an edge-triggered interrupt escaping meanwhile. 2489 * The idea is from Manfred Spraul. --macro 2490 * 2491 * Also in the case when cpu goes offline, fixup_irqs() will forward 2492 * any unhandled interrupt on the offlined cpu to the new cpu 2493 * destination that is handling the corresponding interrupt. This 2494 * interrupt forwarding is done via IPI's. Hence, in this case also 2495 * level-triggered io-apic interrupt will be seen as an edge 2496 * interrupt in the IRR. And we can't rely on the cpu's EOI 2497 * to be broadcasted to the IO-APIC's which will clear the remoteIRR 2498 * corresponding to the level-triggered interrupt. Hence on IO-APIC's 2499 * supporting EOI register, we do an explicit EOI to clear the 2500 * remote IRR and on IO-APIC's which don't have an EOI register, 2501 * we use the above logic (mask+edge followed by unmask+level) from 2502 * Manfred Spraul to clear the remote IRR. 2503 */ 2504 i = cfg->vector; 2505 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); 2506 2507 /* 2508 * We must acknowledge the irq before we move it or the acknowledge will 2509 * not propagate properly. 2510 */ 2511 ack_APIC_irq(); 2512 2513 /* 2514 * Tail end of clearing remote IRR bit (either by delivering the EOI 2515 * message via io-apic EOI register write or simulating it using 2516 * mask+edge followed by unnask+level logic) manually when the 2517 * level triggered interrupt is seen as the edge triggered interrupt 2518 * at the cpu. 2519 */ 2520 if (!(v & (1 << (i & 0x1f)))) { 2521 atomic_inc(&irq_mis_count); 2522 2523 eoi_ioapic_irq(irq, cfg); 2524 } 2525 2526 ioapic_irqd_unmask(data, cfg, masked); 2527 } 2528 2529 static struct irq_chip ioapic_chip __read_mostly = { 2530 .name = "IO-APIC", 2531 .irq_startup = startup_ioapic_irq, 2532 .irq_mask = mask_ioapic_irq, 2533 .irq_unmask = unmask_ioapic_irq, 2534 .irq_ack = ack_apic_edge, 2535 .irq_eoi = ack_apic_level, 2536 .irq_set_affinity = native_ioapic_set_affinity, 2537 .irq_retrigger = ioapic_retrigger_irq, 2538 }; 2539 2540 static inline void init_IO_APIC_traps(void) 2541 { 2542 struct irq_cfg *cfg; 2543 unsigned int irq; 2544 2545 /* 2546 * NOTE! The local APIC isn't very good at handling 2547 * multiple interrupts at the same interrupt level. 2548 * As the interrupt level is determined by taking the 2549 * vector number and shifting that right by 4, we 2550 * want to spread these out a bit so that they don't 2551 * all fall in the same interrupt level. 2552 * 2553 * Also, we've got to be careful not to trash gate 2554 * 0x80, because int 0x80 is hm, kind of importantish. ;) 2555 */ 2556 for_each_active_irq(irq) { 2557 cfg = irq_get_chip_data(irq); 2558 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { 2559 /* 2560 * Hmm.. We don't have an entry for this, 2561 * so default to an old-fashioned 8259 2562 * interrupt if we can.. 2563 */ 2564 if (irq < legacy_pic->nr_legacy_irqs) 2565 legacy_pic->make_irq(irq); 2566 else 2567 /* Strange. Oh, well.. */ 2568 irq_set_chip(irq, &no_irq_chip); 2569 } 2570 } 2571 } 2572 2573 /* 2574 * The local APIC irq-chip implementation: 2575 */ 2576 2577 static void mask_lapic_irq(struct irq_data *data) 2578 { 2579 unsigned long v; 2580 2581 v = apic_read(APIC_LVT0); 2582 apic_write(APIC_LVT0, v | APIC_LVT_MASKED); 2583 } 2584 2585 static void unmask_lapic_irq(struct irq_data *data) 2586 { 2587 unsigned long v; 2588 2589 v = apic_read(APIC_LVT0); 2590 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); 2591 } 2592 2593 static void ack_lapic_irq(struct irq_data *data) 2594 { 2595 ack_APIC_irq(); 2596 } 2597 2598 static struct irq_chip lapic_chip __read_mostly = { 2599 .name = "local-APIC", 2600 .irq_mask = mask_lapic_irq, 2601 .irq_unmask = unmask_lapic_irq, 2602 .irq_ack = ack_lapic_irq, 2603 }; 2604 2605 static void lapic_register_intr(int irq) 2606 { 2607 irq_clear_status_flags(irq, IRQ_LEVEL); 2608 irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, 2609 "edge"); 2610 } 2611 2612 /* 2613 * This looks a bit hackish but it's about the only one way of sending 2614 * a few INTA cycles to 8259As and any associated glue logic. ICR does 2615 * not support the ExtINT mode, unfortunately. We need to send these 2616 * cycles as some i82489DX-based boards have glue logic that keeps the 2617 * 8259A interrupt line asserted until INTA. --macro 2618 */ 2619 static inline void __init unlock_ExtINT_logic(void) 2620 { 2621 int apic, pin, i; 2622 struct IO_APIC_route_entry entry0, entry1; 2623 unsigned char save_control, save_freq_select; 2624 2625 pin = find_isa_irq_pin(8, mp_INT); 2626 if (pin == -1) { 2627 WARN_ON_ONCE(1); 2628 return; 2629 } 2630 apic = find_isa_irq_apic(8, mp_INT); 2631 if (apic == -1) { 2632 WARN_ON_ONCE(1); 2633 return; 2634 } 2635 2636 entry0 = ioapic_read_entry(apic, pin); 2637 clear_IO_APIC_pin(apic, pin); 2638 2639 memset(&entry1, 0, sizeof(entry1)); 2640 2641 entry1.dest_mode = 0; /* physical delivery */ 2642 entry1.mask = 0; /* unmask IRQ now */ 2643 entry1.dest = hard_smp_processor_id(); 2644 entry1.delivery_mode = dest_ExtINT; 2645 entry1.polarity = entry0.polarity; 2646 entry1.trigger = 0; 2647 entry1.vector = 0; 2648 2649 ioapic_write_entry(apic, pin, entry1); 2650 2651 save_control = CMOS_READ(RTC_CONTROL); 2652 save_freq_select = CMOS_READ(RTC_FREQ_SELECT); 2653 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6, 2654 RTC_FREQ_SELECT); 2655 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL); 2656 2657 i = 100; 2658 while (i-- > 0) { 2659 mdelay(10); 2660 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF) 2661 i -= 10; 2662 } 2663 2664 CMOS_WRITE(save_control, RTC_CONTROL); 2665 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); 2666 clear_IO_APIC_pin(apic, pin); 2667 2668 ioapic_write_entry(apic, pin, entry0); 2669 } 2670 2671 static int disable_timer_pin_1 __initdata; 2672 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */ 2673 static int __init disable_timer_pin_setup(char *arg) 2674 { 2675 disable_timer_pin_1 = 1; 2676 return 0; 2677 } 2678 early_param("disable_timer_pin_1", disable_timer_pin_setup); 2679 2680 int timer_through_8259 __initdata; 2681 2682 /* 2683 * This code may look a bit paranoid, but it's supposed to cooperate with 2684 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ 2685 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast 2686 * fanatically on his truly buggy board. 2687 * 2688 * FIXME: really need to revamp this for all platforms. 2689 */ 2690 static inline void __init check_timer(void) 2691 { 2692 struct irq_cfg *cfg = irq_get_chip_data(0); 2693 int node = cpu_to_node(0); 2694 int apic1, pin1, apic2, pin2; 2695 unsigned long flags; 2696 int no_pin1 = 0; 2697 2698 local_irq_save(flags); 2699 2700 /* 2701 * get/set the timer IRQ vector: 2702 */ 2703 legacy_pic->mask(0); 2704 assign_irq_vector(0, cfg, apic->target_cpus()); 2705 2706 /* 2707 * As IRQ0 is to be enabled in the 8259A, the virtual 2708 * wire has to be disabled in the local APIC. Also 2709 * timer interrupts need to be acknowledged manually in 2710 * the 8259A for the i82489DX when using the NMI 2711 * watchdog as that APIC treats NMIs as level-triggered. 2712 * The AEOI mode will finish them in the 8259A 2713 * automatically. 2714 */ 2715 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); 2716 legacy_pic->init(1); 2717 2718 pin1 = find_isa_irq_pin(0, mp_INT); 2719 apic1 = find_isa_irq_apic(0, mp_INT); 2720 pin2 = ioapic_i8259.pin; 2721 apic2 = ioapic_i8259.apic; 2722 2723 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X " 2724 "apic1=%d pin1=%d apic2=%d pin2=%d\n", 2725 cfg->vector, apic1, pin1, apic2, pin2); 2726 2727 /* 2728 * Some BIOS writers are clueless and report the ExtINTA 2729 * I/O APIC input from the cascaded 8259A as the timer 2730 * interrupt input. So just in case, if only one pin 2731 * was found above, try it both directly and through the 2732 * 8259A. 2733 */ 2734 if (pin1 == -1) { 2735 panic_if_irq_remap("BIOS bug: timer not connected to IO-APIC"); 2736 pin1 = pin2; 2737 apic1 = apic2; 2738 no_pin1 = 1; 2739 } else if (pin2 == -1) { 2740 pin2 = pin1; 2741 apic2 = apic1; 2742 } 2743 2744 if (pin1 != -1) { 2745 /* 2746 * Ok, does IRQ0 through the IOAPIC work? 2747 */ 2748 if (no_pin1) { 2749 add_pin_to_irq_node(cfg, node, apic1, pin1); 2750 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); 2751 } else { 2752 /* for edge trigger, setup_ioapic_irq already 2753 * leave it unmasked. 2754 * so only need to unmask if it is level-trigger 2755 * do we really have level trigger timer? 2756 */ 2757 int idx; 2758 idx = find_irq_entry(apic1, pin1, mp_INT); 2759 if (idx != -1 && irq_trigger(idx)) 2760 unmask_ioapic(cfg); 2761 } 2762 if (timer_irq_works()) { 2763 if (disable_timer_pin_1 > 0) 2764 clear_IO_APIC_pin(0, pin1); 2765 goto out; 2766 } 2767 panic_if_irq_remap("timer doesn't work through Interrupt-remapped IO-APIC"); 2768 local_irq_disable(); 2769 clear_IO_APIC_pin(apic1, pin1); 2770 if (!no_pin1) 2771 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " 2772 "8254 timer not connected to IO-APIC\n"); 2773 2774 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer " 2775 "(IRQ0) through the 8259A ...\n"); 2776 apic_printk(APIC_QUIET, KERN_INFO 2777 "..... (found apic %d pin %d) ...\n", apic2, pin2); 2778 /* 2779 * legacy devices should be connected to IO APIC #0 2780 */ 2781 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); 2782 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); 2783 legacy_pic->unmask(0); 2784 if (timer_irq_works()) { 2785 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2786 timer_through_8259 = 1; 2787 goto out; 2788 } 2789 /* 2790 * Cleanup, just in case ... 2791 */ 2792 local_irq_disable(); 2793 legacy_pic->mask(0); 2794 clear_IO_APIC_pin(apic2, pin2); 2795 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); 2796 } 2797 2798 apic_printk(APIC_QUIET, KERN_INFO 2799 "...trying to set up timer as Virtual Wire IRQ...\n"); 2800 2801 lapic_register_intr(0); 2802 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ 2803 legacy_pic->unmask(0); 2804 2805 if (timer_irq_works()) { 2806 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2807 goto out; 2808 } 2809 local_irq_disable(); 2810 legacy_pic->mask(0); 2811 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); 2812 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); 2813 2814 apic_printk(APIC_QUIET, KERN_INFO 2815 "...trying to set up timer as ExtINT IRQ...\n"); 2816 2817 legacy_pic->init(0); 2818 legacy_pic->make_irq(0); 2819 apic_write(APIC_LVT0, APIC_DM_EXTINT); 2820 2821 unlock_ExtINT_logic(); 2822 2823 if (timer_irq_works()) { 2824 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2825 goto out; 2826 } 2827 local_irq_disable(); 2828 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); 2829 if (x2apic_preenabled) 2830 apic_printk(APIC_QUIET, KERN_INFO 2831 "Perhaps problem with the pre-enabled x2apic mode\n" 2832 "Try booting with x2apic and interrupt-remapping disabled in the bios.\n"); 2833 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " 2834 "report. Then try booting with the 'noapic' option.\n"); 2835 out: 2836 local_irq_restore(flags); 2837 } 2838 2839 /* 2840 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available 2841 * to devices. However there may be an I/O APIC pin available for 2842 * this interrupt regardless. The pin may be left unconnected, but 2843 * typically it will be reused as an ExtINT cascade interrupt for 2844 * the master 8259A. In the MPS case such a pin will normally be 2845 * reported as an ExtINT interrupt in the MP table. With ACPI 2846 * there is no provision for ExtINT interrupts, and in the absence 2847 * of an override it would be treated as an ordinary ISA I/O APIC 2848 * interrupt, that is edge-triggered and unmasked by default. We 2849 * used to do this, but it caused problems on some systems because 2850 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using 2851 * the same ExtINT cascade interrupt to drive the local APIC of the 2852 * bootstrap processor. Therefore we refrain from routing IRQ2 to 2853 * the I/O APIC in all cases now. No actual device should request 2854 * it anyway. --macro 2855 */ 2856 #define PIC_IRQS (1UL << PIC_CASCADE_IR) 2857 2858 void __init setup_IO_APIC(void) 2859 { 2860 2861 /* 2862 * calling enable_IO_APIC() is moved to setup_local_APIC for BP 2863 */ 2864 io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL; 2865 2866 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); 2867 /* 2868 * Set up IO-APIC IRQ routing. 2869 */ 2870 x86_init.mpparse.setup_ioapic_ids(); 2871 2872 sync_Arb_IDs(); 2873 setup_IO_APIC_irqs(); 2874 init_IO_APIC_traps(); 2875 if (legacy_pic->nr_legacy_irqs) 2876 check_timer(); 2877 } 2878 2879 /* 2880 * Called after all the initialization is done. If we didn't find any 2881 * APIC bugs then we can allow the modify fast path 2882 */ 2883 2884 static int __init io_apic_bug_finalize(void) 2885 { 2886 if (sis_apic_bug == -1) 2887 sis_apic_bug = 0; 2888 return 0; 2889 } 2890 2891 late_initcall(io_apic_bug_finalize); 2892 2893 static void resume_ioapic_id(int ioapic_idx) 2894 { 2895 unsigned long flags; 2896 union IO_APIC_reg_00 reg_00; 2897 2898 raw_spin_lock_irqsave(&ioapic_lock, flags); 2899 reg_00.raw = io_apic_read(ioapic_idx, 0); 2900 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) { 2901 reg_00.bits.ID = mpc_ioapic_id(ioapic_idx); 2902 io_apic_write(ioapic_idx, 0, reg_00.raw); 2903 } 2904 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2905 } 2906 2907 static void ioapic_resume(void) 2908 { 2909 int ioapic_idx; 2910 2911 for (ioapic_idx = nr_ioapics - 1; ioapic_idx >= 0; ioapic_idx--) 2912 resume_ioapic_id(ioapic_idx); 2913 2914 restore_ioapic_entries(); 2915 } 2916 2917 static struct syscore_ops ioapic_syscore_ops = { 2918 .suspend = save_ioapic_entries, 2919 .resume = ioapic_resume, 2920 }; 2921 2922 static int __init ioapic_init_ops(void) 2923 { 2924 register_syscore_ops(&ioapic_syscore_ops); 2925 2926 return 0; 2927 } 2928 2929 device_initcall(ioapic_init_ops); 2930 2931 /* 2932 * Dynamic irq allocate and deallocation 2933 */ 2934 unsigned int __create_irqs(unsigned int from, unsigned int count, int node) 2935 { 2936 struct irq_cfg **cfg; 2937 unsigned long flags; 2938 int irq, i; 2939 2940 if (from < nr_irqs_gsi) 2941 from = nr_irqs_gsi; 2942 2943 cfg = kzalloc_node(count * sizeof(cfg[0]), GFP_KERNEL, node); 2944 if (!cfg) 2945 return 0; 2946 2947 irq = alloc_irqs_from(from, count, node); 2948 if (irq < 0) 2949 goto out_cfgs; 2950 2951 for (i = 0; i < count; i++) { 2952 cfg[i] = alloc_irq_cfg(irq + i, node); 2953 if (!cfg[i]) 2954 goto out_irqs; 2955 } 2956 2957 raw_spin_lock_irqsave(&vector_lock, flags); 2958 for (i = 0; i < count; i++) 2959 if (__assign_irq_vector(irq + i, cfg[i], apic->target_cpus())) 2960 goto out_vecs; 2961 raw_spin_unlock_irqrestore(&vector_lock, flags); 2962 2963 for (i = 0; i < count; i++) { 2964 irq_set_chip_data(irq + i, cfg[i]); 2965 irq_clear_status_flags(irq + i, IRQ_NOREQUEST); 2966 } 2967 2968 kfree(cfg); 2969 return irq; 2970 2971 out_vecs: 2972 for (i--; i >= 0; i--) 2973 __clear_irq_vector(irq + i, cfg[i]); 2974 raw_spin_unlock_irqrestore(&vector_lock, flags); 2975 out_irqs: 2976 for (i = 0; i < count; i++) 2977 free_irq_at(irq + i, cfg[i]); 2978 out_cfgs: 2979 kfree(cfg); 2980 return 0; 2981 } 2982 2983 unsigned int create_irq_nr(unsigned int from, int node) 2984 { 2985 return __create_irqs(from, 1, node); 2986 } 2987 2988 int create_irq(void) 2989 { 2990 int node = cpu_to_node(0); 2991 unsigned int irq_want; 2992 int irq; 2993 2994 irq_want = nr_irqs_gsi; 2995 irq = create_irq_nr(irq_want, node); 2996 2997 if (irq == 0) 2998 irq = -1; 2999 3000 return irq; 3001 } 3002 3003 void destroy_irq(unsigned int irq) 3004 { 3005 struct irq_cfg *cfg = irq_get_chip_data(irq); 3006 unsigned long flags; 3007 3008 irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); 3009 3010 if (irq_remapped(cfg)) 3011 free_remapped_irq(irq); 3012 raw_spin_lock_irqsave(&vector_lock, flags); 3013 __clear_irq_vector(irq, cfg); 3014 raw_spin_unlock_irqrestore(&vector_lock, flags); 3015 free_irq_at(irq, cfg); 3016 } 3017 3018 void destroy_irqs(unsigned int irq, unsigned int count) 3019 { 3020 unsigned int i; 3021 3022 for (i = 0; i < count; i++) 3023 destroy_irq(irq + i); 3024 } 3025 3026 /* 3027 * MSI message composition 3028 */ 3029 #ifdef CONFIG_PCI_MSI 3030 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, 3031 struct msi_msg *msg, u8 hpet_id) 3032 { 3033 struct irq_cfg *cfg; 3034 int err; 3035 unsigned dest; 3036 3037 if (disable_apic) 3038 return -ENXIO; 3039 3040 cfg = irq_cfg(irq); 3041 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3042 if (err) 3043 return err; 3044 3045 err = apic->cpu_mask_to_apicid_and(cfg->domain, 3046 apic->target_cpus(), &dest); 3047 if (err) 3048 return err; 3049 3050 if (irq_remapped(cfg)) { 3051 compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id); 3052 return 0; 3053 } 3054 3055 if (x2apic_enabled()) 3056 msg->address_hi = MSI_ADDR_BASE_HI | 3057 MSI_ADDR_EXT_DEST_ID(dest); 3058 else 3059 msg->address_hi = MSI_ADDR_BASE_HI; 3060 3061 msg->address_lo = 3062 MSI_ADDR_BASE_LO | 3063 ((apic->irq_dest_mode == 0) ? 3064 MSI_ADDR_DEST_MODE_PHYSICAL: 3065 MSI_ADDR_DEST_MODE_LOGICAL) | 3066 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3067 MSI_ADDR_REDIRECTION_CPU: 3068 MSI_ADDR_REDIRECTION_LOWPRI) | 3069 MSI_ADDR_DEST_ID(dest); 3070 3071 msg->data = 3072 MSI_DATA_TRIGGER_EDGE | 3073 MSI_DATA_LEVEL_ASSERT | 3074 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3075 MSI_DATA_DELIVERY_FIXED: 3076 MSI_DATA_DELIVERY_LOWPRI) | 3077 MSI_DATA_VECTOR(cfg->vector); 3078 3079 return 0; 3080 } 3081 3082 static int 3083 msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3084 { 3085 struct irq_cfg *cfg = data->chip_data; 3086 struct msi_msg msg; 3087 unsigned int dest; 3088 3089 if (__ioapic_set_affinity(data, mask, &dest)) 3090 return -1; 3091 3092 __get_cached_msi_msg(data->msi_desc, &msg); 3093 3094 msg.data &= ~MSI_DATA_VECTOR_MASK; 3095 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3096 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3097 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3098 3099 __write_msi_msg(data->msi_desc, &msg); 3100 3101 return IRQ_SET_MASK_OK_NOCOPY; 3102 } 3103 3104 /* 3105 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, 3106 * which implement the MSI or MSI-X Capability Structure. 3107 */ 3108 static struct irq_chip msi_chip = { 3109 .name = "PCI-MSI", 3110 .irq_unmask = unmask_msi_irq, 3111 .irq_mask = mask_msi_irq, 3112 .irq_ack = ack_apic_edge, 3113 .irq_set_affinity = msi_set_affinity, 3114 .irq_retrigger = ioapic_retrigger_irq, 3115 }; 3116 3117 int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, 3118 unsigned int irq_base, unsigned int irq_offset) 3119 { 3120 struct irq_chip *chip = &msi_chip; 3121 struct msi_msg msg; 3122 unsigned int irq = irq_base + irq_offset; 3123 int ret; 3124 3125 ret = msi_compose_msg(dev, irq, &msg, -1); 3126 if (ret < 0) 3127 return ret; 3128 3129 irq_set_msi_desc_off(irq_base, irq_offset, msidesc); 3130 3131 /* 3132 * MSI-X message is written per-IRQ, the offset is always 0. 3133 * MSI message denotes a contiguous group of IRQs, written for 0th IRQ. 3134 */ 3135 if (!irq_offset) 3136 write_msi_msg(irq, &msg); 3137 3138 if (irq_remapped(irq_get_chip_data(irq))) { 3139 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3140 irq_remap_modify_chip_defaults(chip); 3141 } 3142 3143 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); 3144 3145 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); 3146 3147 return 0; 3148 } 3149 3150 int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 3151 { 3152 unsigned int irq, irq_want; 3153 struct msi_desc *msidesc; 3154 int node, ret; 3155 3156 /* Multiple MSI vectors only supported with interrupt remapping */ 3157 if (type == PCI_CAP_ID_MSI && nvec > 1) 3158 return 1; 3159 3160 node = dev_to_node(&dev->dev); 3161 irq_want = nr_irqs_gsi; 3162 list_for_each_entry(msidesc, &dev->msi_list, list) { 3163 irq = create_irq_nr(irq_want, node); 3164 if (irq == 0) 3165 return -ENOSPC; 3166 3167 irq_want = irq + 1; 3168 3169 ret = setup_msi_irq(dev, msidesc, irq, 0); 3170 if (ret < 0) 3171 goto error; 3172 } 3173 return 0; 3174 3175 error: 3176 destroy_irq(irq); 3177 return ret; 3178 } 3179 3180 void native_teardown_msi_irq(unsigned int irq) 3181 { 3182 destroy_irq(irq); 3183 } 3184 3185 #ifdef CONFIG_DMAR_TABLE 3186 static int 3187 dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, 3188 bool force) 3189 { 3190 struct irq_cfg *cfg = data->chip_data; 3191 unsigned int dest, irq = data->irq; 3192 struct msi_msg msg; 3193 3194 if (__ioapic_set_affinity(data, mask, &dest)) 3195 return -1; 3196 3197 dmar_msi_read(irq, &msg); 3198 3199 msg.data &= ~MSI_DATA_VECTOR_MASK; 3200 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3201 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3202 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3203 msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest); 3204 3205 dmar_msi_write(irq, &msg); 3206 3207 return IRQ_SET_MASK_OK_NOCOPY; 3208 } 3209 3210 static struct irq_chip dmar_msi_type = { 3211 .name = "DMAR_MSI", 3212 .irq_unmask = dmar_msi_unmask, 3213 .irq_mask = dmar_msi_mask, 3214 .irq_ack = ack_apic_edge, 3215 .irq_set_affinity = dmar_msi_set_affinity, 3216 .irq_retrigger = ioapic_retrigger_irq, 3217 }; 3218 3219 int arch_setup_dmar_msi(unsigned int irq) 3220 { 3221 int ret; 3222 struct msi_msg msg; 3223 3224 ret = msi_compose_msg(NULL, irq, &msg, -1); 3225 if (ret < 0) 3226 return ret; 3227 dmar_msi_write(irq, &msg); 3228 irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, 3229 "edge"); 3230 return 0; 3231 } 3232 #endif 3233 3234 #ifdef CONFIG_HPET_TIMER 3235 3236 static int hpet_msi_set_affinity(struct irq_data *data, 3237 const struct cpumask *mask, bool force) 3238 { 3239 struct irq_cfg *cfg = data->chip_data; 3240 struct msi_msg msg; 3241 unsigned int dest; 3242 3243 if (__ioapic_set_affinity(data, mask, &dest)) 3244 return -1; 3245 3246 hpet_msi_read(data->handler_data, &msg); 3247 3248 msg.data &= ~MSI_DATA_VECTOR_MASK; 3249 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3250 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3251 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3252 3253 hpet_msi_write(data->handler_data, &msg); 3254 3255 return IRQ_SET_MASK_OK_NOCOPY; 3256 } 3257 3258 static struct irq_chip hpet_msi_type = { 3259 .name = "HPET_MSI", 3260 .irq_unmask = hpet_msi_unmask, 3261 .irq_mask = hpet_msi_mask, 3262 .irq_ack = ack_apic_edge, 3263 .irq_set_affinity = hpet_msi_set_affinity, 3264 .irq_retrigger = ioapic_retrigger_irq, 3265 }; 3266 3267 int default_setup_hpet_msi(unsigned int irq, unsigned int id) 3268 { 3269 struct irq_chip *chip = &hpet_msi_type; 3270 struct msi_msg msg; 3271 int ret; 3272 3273 ret = msi_compose_msg(NULL, irq, &msg, id); 3274 if (ret < 0) 3275 return ret; 3276 3277 hpet_msi_write(irq_get_handler_data(irq), &msg); 3278 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3279 if (irq_remapped(irq_get_chip_data(irq))) 3280 irq_remap_modify_chip_defaults(chip); 3281 3282 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); 3283 return 0; 3284 } 3285 #endif 3286 3287 #endif /* CONFIG_PCI_MSI */ 3288 /* 3289 * Hypertransport interrupt support 3290 */ 3291 #ifdef CONFIG_HT_IRQ 3292 3293 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) 3294 { 3295 struct ht_irq_msg msg; 3296 fetch_ht_irq_msg(irq, &msg); 3297 3298 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK); 3299 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); 3300 3301 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest); 3302 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest); 3303 3304 write_ht_irq_msg(irq, &msg); 3305 } 3306 3307 static int 3308 ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3309 { 3310 struct irq_cfg *cfg = data->chip_data; 3311 unsigned int dest; 3312 3313 if (__ioapic_set_affinity(data, mask, &dest)) 3314 return -1; 3315 3316 target_ht_irq(data->irq, dest, cfg->vector); 3317 return IRQ_SET_MASK_OK_NOCOPY; 3318 } 3319 3320 static struct irq_chip ht_irq_chip = { 3321 .name = "PCI-HT", 3322 .irq_mask = mask_ht_irq, 3323 .irq_unmask = unmask_ht_irq, 3324 .irq_ack = ack_apic_edge, 3325 .irq_set_affinity = ht_set_affinity, 3326 .irq_retrigger = ioapic_retrigger_irq, 3327 }; 3328 3329 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) 3330 { 3331 struct irq_cfg *cfg; 3332 struct ht_irq_msg msg; 3333 unsigned dest; 3334 int err; 3335 3336 if (disable_apic) 3337 return -ENXIO; 3338 3339 cfg = irq_cfg(irq); 3340 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3341 if (err) 3342 return err; 3343 3344 err = apic->cpu_mask_to_apicid_and(cfg->domain, 3345 apic->target_cpus(), &dest); 3346 if (err) 3347 return err; 3348 3349 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3350 3351 msg.address_lo = 3352 HT_IRQ_LOW_BASE | 3353 HT_IRQ_LOW_DEST_ID(dest) | 3354 HT_IRQ_LOW_VECTOR(cfg->vector) | 3355 ((apic->irq_dest_mode == 0) ? 3356 HT_IRQ_LOW_DM_PHYSICAL : 3357 HT_IRQ_LOW_DM_LOGICAL) | 3358 HT_IRQ_LOW_RQEOI_EDGE | 3359 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3360 HT_IRQ_LOW_MT_FIXED : 3361 HT_IRQ_LOW_MT_ARBITRATED) | 3362 HT_IRQ_LOW_IRQ_MASKED; 3363 3364 write_ht_irq_msg(irq, &msg); 3365 3366 irq_set_chip_and_handler_name(irq, &ht_irq_chip, 3367 handle_edge_irq, "edge"); 3368 3369 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); 3370 3371 return 0; 3372 } 3373 #endif /* CONFIG_HT_IRQ */ 3374 3375 static int 3376 io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) 3377 { 3378 struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node); 3379 int ret; 3380 3381 if (!cfg) 3382 return -EINVAL; 3383 ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin); 3384 if (!ret) 3385 setup_ioapic_irq(irq, cfg, attr); 3386 return ret; 3387 } 3388 3389 int io_apic_setup_irq_pin_once(unsigned int irq, int node, 3390 struct io_apic_irq_attr *attr) 3391 { 3392 unsigned int ioapic_idx = attr->ioapic, pin = attr->ioapic_pin; 3393 int ret; 3394 3395 /* Avoid redundant programming */ 3396 if (test_bit(pin, ioapics[ioapic_idx].pin_programmed)) { 3397 pr_debug("Pin %d-%d already programmed\n", 3398 mpc_ioapic_id(ioapic_idx), pin); 3399 return 0; 3400 } 3401 ret = io_apic_setup_irq_pin(irq, node, attr); 3402 if (!ret) 3403 set_bit(pin, ioapics[ioapic_idx].pin_programmed); 3404 return ret; 3405 } 3406 3407 static int __init io_apic_get_redir_entries(int ioapic) 3408 { 3409 union IO_APIC_reg_01 reg_01; 3410 unsigned long flags; 3411 3412 raw_spin_lock_irqsave(&ioapic_lock, flags); 3413 reg_01.raw = io_apic_read(ioapic, 1); 3414 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3415 3416 /* The register returns the maximum index redir index 3417 * supported, which is one less than the total number of redir 3418 * entries. 3419 */ 3420 return reg_01.bits.entries + 1; 3421 } 3422 3423 static void __init probe_nr_irqs_gsi(void) 3424 { 3425 int nr; 3426 3427 nr = gsi_top + NR_IRQS_LEGACY; 3428 if (nr > nr_irqs_gsi) 3429 nr_irqs_gsi = nr; 3430 3431 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); 3432 } 3433 3434 int get_nr_irqs_gsi(void) 3435 { 3436 return nr_irqs_gsi; 3437 } 3438 3439 int __init arch_probe_nr_irqs(void) 3440 { 3441 int nr; 3442 3443 if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) 3444 nr_irqs = NR_VECTORS * nr_cpu_ids; 3445 3446 nr = nr_irqs_gsi + 8 * nr_cpu_ids; 3447 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) 3448 /* 3449 * for MSI and HT dyn irq 3450 */ 3451 nr += nr_irqs_gsi * 16; 3452 #endif 3453 if (nr < nr_irqs) 3454 nr_irqs = nr; 3455 3456 return NR_IRQS_LEGACY; 3457 } 3458 3459 int io_apic_set_pci_routing(struct device *dev, int irq, 3460 struct io_apic_irq_attr *irq_attr) 3461 { 3462 int node; 3463 3464 if (!IO_APIC_IRQ(irq)) { 3465 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", 3466 irq_attr->ioapic); 3467 return -EINVAL; 3468 } 3469 3470 node = dev ? dev_to_node(dev) : cpu_to_node(0); 3471 3472 return io_apic_setup_irq_pin_once(irq, node, irq_attr); 3473 } 3474 3475 #ifdef CONFIG_X86_32 3476 static int __init io_apic_get_unique_id(int ioapic, int apic_id) 3477 { 3478 union IO_APIC_reg_00 reg_00; 3479 static physid_mask_t apic_id_map = PHYSID_MASK_NONE; 3480 physid_mask_t tmp; 3481 unsigned long flags; 3482 int i = 0; 3483 3484 /* 3485 * The P4 platform supports up to 256 APIC IDs on two separate APIC 3486 * buses (one for LAPICs, one for IOAPICs), where predecessors only 3487 * supports up to 16 on one shared APIC bus. 3488 * 3489 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full 3490 * advantage of new APIC bus architecture. 3491 */ 3492 3493 if (physids_empty(apic_id_map)) 3494 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); 3495 3496 raw_spin_lock_irqsave(&ioapic_lock, flags); 3497 reg_00.raw = io_apic_read(ioapic, 0); 3498 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3499 3500 if (apic_id >= get_physical_broadcast()) { 3501 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " 3502 "%d\n", ioapic, apic_id, reg_00.bits.ID); 3503 apic_id = reg_00.bits.ID; 3504 } 3505 3506 /* 3507 * Every APIC in a system must have a unique ID or we get lots of nice 3508 * 'stuck on smp_invalidate_needed IPI wait' messages. 3509 */ 3510 if (apic->check_apicid_used(&apic_id_map, apic_id)) { 3511 3512 for (i = 0; i < get_physical_broadcast(); i++) { 3513 if (!apic->check_apicid_used(&apic_id_map, i)) 3514 break; 3515 } 3516 3517 if (i == get_physical_broadcast()) 3518 panic("Max apic_id exceeded!\n"); 3519 3520 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " 3521 "trying %d\n", ioapic, apic_id, i); 3522 3523 apic_id = i; 3524 } 3525 3526 apic->apicid_to_cpu_present(apic_id, &tmp); 3527 physids_or(apic_id_map, apic_id_map, tmp); 3528 3529 if (reg_00.bits.ID != apic_id) { 3530 reg_00.bits.ID = apic_id; 3531 3532 raw_spin_lock_irqsave(&ioapic_lock, flags); 3533 io_apic_write(ioapic, 0, reg_00.raw); 3534 reg_00.raw = io_apic_read(ioapic, 0); 3535 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3536 3537 /* Sanity check */ 3538 if (reg_00.bits.ID != apic_id) { 3539 pr_err("IOAPIC[%d]: Unable to change apic_id!\n", 3540 ioapic); 3541 return -1; 3542 } 3543 } 3544 3545 apic_printk(APIC_VERBOSE, KERN_INFO 3546 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); 3547 3548 return apic_id; 3549 } 3550 3551 static u8 __init io_apic_unique_id(u8 id) 3552 { 3553 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && 3554 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) 3555 return io_apic_get_unique_id(nr_ioapics, id); 3556 else 3557 return id; 3558 } 3559 #else 3560 static u8 __init io_apic_unique_id(u8 id) 3561 { 3562 int i; 3563 DECLARE_BITMAP(used, 256); 3564 3565 bitmap_zero(used, 256); 3566 for (i = 0; i < nr_ioapics; i++) { 3567 __set_bit(mpc_ioapic_id(i), used); 3568 } 3569 if (!test_bit(id, used)) 3570 return id; 3571 return find_first_zero_bit(used, 256); 3572 } 3573 #endif 3574 3575 static int __init io_apic_get_version(int ioapic) 3576 { 3577 union IO_APIC_reg_01 reg_01; 3578 unsigned long flags; 3579 3580 raw_spin_lock_irqsave(&ioapic_lock, flags); 3581 reg_01.raw = io_apic_read(ioapic, 1); 3582 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3583 3584 return reg_01.bits.version; 3585 } 3586 3587 int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) 3588 { 3589 int ioapic, pin, idx; 3590 3591 if (skip_ioapic_setup) 3592 return -1; 3593 3594 ioapic = mp_find_ioapic(gsi); 3595 if (ioapic < 0) 3596 return -1; 3597 3598 pin = mp_find_ioapic_pin(ioapic, gsi); 3599 if (pin < 0) 3600 return -1; 3601 3602 idx = find_irq_entry(ioapic, pin, mp_INT); 3603 if (idx < 0) 3604 return -1; 3605 3606 *trigger = irq_trigger(idx); 3607 *polarity = irq_polarity(idx); 3608 return 0; 3609 } 3610 3611 /* 3612 * This function currently is only a helper for the i386 smp boot process where 3613 * we need to reprogram the ioredtbls to cater for the cpus which have come online 3614 * so mask in all cases should simply be apic->target_cpus() 3615 */ 3616 #ifdef CONFIG_SMP 3617 void __init setup_ioapic_dest(void) 3618 { 3619 int pin, ioapic, irq, irq_entry; 3620 const struct cpumask *mask; 3621 struct irq_data *idata; 3622 3623 if (skip_ioapic_setup == 1) 3624 return; 3625 3626 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) 3627 for (pin = 0; pin < ioapics[ioapic].nr_registers; pin++) { 3628 irq_entry = find_irq_entry(ioapic, pin, mp_INT); 3629 if (irq_entry == -1) 3630 continue; 3631 irq = pin_2_irq(irq_entry, ioapic, pin); 3632 3633 if ((ioapic > 0) && (irq > 16)) 3634 continue; 3635 3636 idata = irq_get_irq_data(irq); 3637 3638 /* 3639 * Honour affinities which have been set in early boot 3640 */ 3641 if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata)) 3642 mask = idata->affinity; 3643 else 3644 mask = apic->target_cpus(); 3645 3646 x86_io_apic_ops.set_affinity(idata, mask, false); 3647 } 3648 3649 } 3650 #endif 3651 3652 #define IOAPIC_RESOURCE_NAME_SIZE 11 3653 3654 static struct resource *ioapic_resources; 3655 3656 static struct resource * __init ioapic_setup_resources(int nr_ioapics) 3657 { 3658 unsigned long n; 3659 struct resource *res; 3660 char *mem; 3661 int i; 3662 3663 if (nr_ioapics <= 0) 3664 return NULL; 3665 3666 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource); 3667 n *= nr_ioapics; 3668 3669 mem = alloc_bootmem(n); 3670 res = (void *)mem; 3671 3672 mem += sizeof(struct resource) * nr_ioapics; 3673 3674 for (i = 0; i < nr_ioapics; i++) { 3675 res[i].name = mem; 3676 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; 3677 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); 3678 mem += IOAPIC_RESOURCE_NAME_SIZE; 3679 } 3680 3681 ioapic_resources = res; 3682 3683 return res; 3684 } 3685 3686 void __init native_io_apic_init_mappings(void) 3687 { 3688 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; 3689 struct resource *ioapic_res; 3690 int i; 3691 3692 ioapic_res = ioapic_setup_resources(nr_ioapics); 3693 for (i = 0; i < nr_ioapics; i++) { 3694 if (smp_found_config) { 3695 ioapic_phys = mpc_ioapic_addr(i); 3696 #ifdef CONFIG_X86_32 3697 if (!ioapic_phys) { 3698 printk(KERN_ERR 3699 "WARNING: bogus zero IO-APIC " 3700 "address found in MPTABLE, " 3701 "disabling IO/APIC support!\n"); 3702 smp_found_config = 0; 3703 skip_ioapic_setup = 1; 3704 goto fake_ioapic_page; 3705 } 3706 #endif 3707 } else { 3708 #ifdef CONFIG_X86_32 3709 fake_ioapic_page: 3710 #endif 3711 ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); 3712 ioapic_phys = __pa(ioapic_phys); 3713 } 3714 set_fixmap_nocache(idx, ioapic_phys); 3715 apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n", 3716 __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK), 3717 ioapic_phys); 3718 idx++; 3719 3720 ioapic_res->start = ioapic_phys; 3721 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1; 3722 ioapic_res++; 3723 } 3724 3725 probe_nr_irqs_gsi(); 3726 } 3727 3728 void __init ioapic_insert_resources(void) 3729 { 3730 int i; 3731 struct resource *r = ioapic_resources; 3732 3733 if (!r) { 3734 if (nr_ioapics > 0) 3735 printk(KERN_ERR 3736 "IO APIC resources couldn't be allocated.\n"); 3737 return; 3738 } 3739 3740 for (i = 0; i < nr_ioapics; i++) { 3741 insert_resource(&iomem_resource, r); 3742 r++; 3743 } 3744 } 3745 3746 int mp_find_ioapic(u32 gsi) 3747 { 3748 int i = 0; 3749 3750 if (nr_ioapics == 0) 3751 return -1; 3752 3753 /* Find the IOAPIC that manages this GSI. */ 3754 for (i = 0; i < nr_ioapics; i++) { 3755 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(i); 3756 if ((gsi >= gsi_cfg->gsi_base) 3757 && (gsi <= gsi_cfg->gsi_end)) 3758 return i; 3759 } 3760 3761 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); 3762 return -1; 3763 } 3764 3765 int mp_find_ioapic_pin(int ioapic, u32 gsi) 3766 { 3767 struct mp_ioapic_gsi *gsi_cfg; 3768 3769 if (WARN_ON(ioapic == -1)) 3770 return -1; 3771 3772 gsi_cfg = mp_ioapic_gsi_routing(ioapic); 3773 if (WARN_ON(gsi > gsi_cfg->gsi_end)) 3774 return -1; 3775 3776 return gsi - gsi_cfg->gsi_base; 3777 } 3778 3779 static __init int bad_ioapic(unsigned long address) 3780 { 3781 if (nr_ioapics >= MAX_IO_APICS) { 3782 pr_warn("WARNING: Max # of I/O APICs (%d) exceeded (found %d), skipping\n", 3783 MAX_IO_APICS, nr_ioapics); 3784 return 1; 3785 } 3786 if (!address) { 3787 pr_warn("WARNING: Bogus (zero) I/O APIC address found in table, skipping!\n"); 3788 return 1; 3789 } 3790 return 0; 3791 } 3792 3793 static __init int bad_ioapic_register(int idx) 3794 { 3795 union IO_APIC_reg_00 reg_00; 3796 union IO_APIC_reg_01 reg_01; 3797 union IO_APIC_reg_02 reg_02; 3798 3799 reg_00.raw = io_apic_read(idx, 0); 3800 reg_01.raw = io_apic_read(idx, 1); 3801 reg_02.raw = io_apic_read(idx, 2); 3802 3803 if (reg_00.raw == -1 && reg_01.raw == -1 && reg_02.raw == -1) { 3804 pr_warn("I/O APIC 0x%x registers return all ones, skipping!\n", 3805 mpc_ioapic_addr(idx)); 3806 return 1; 3807 } 3808 3809 return 0; 3810 } 3811 3812 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) 3813 { 3814 int idx = 0; 3815 int entries; 3816 struct mp_ioapic_gsi *gsi_cfg; 3817 3818 if (bad_ioapic(address)) 3819 return; 3820 3821 idx = nr_ioapics; 3822 3823 ioapics[idx].mp_config.type = MP_IOAPIC; 3824 ioapics[idx].mp_config.flags = MPC_APIC_USABLE; 3825 ioapics[idx].mp_config.apicaddr = address; 3826 3827 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); 3828 3829 if (bad_ioapic_register(idx)) { 3830 clear_fixmap(FIX_IO_APIC_BASE_0 + idx); 3831 return; 3832 } 3833 3834 ioapics[idx].mp_config.apicid = io_apic_unique_id(id); 3835 ioapics[idx].mp_config.apicver = io_apic_get_version(idx); 3836 3837 /* 3838 * Build basic GSI lookup table to facilitate gsi->io_apic lookups 3839 * and to prevent reprogramming of IOAPIC pins (PCI GSIs). 3840 */ 3841 entries = io_apic_get_redir_entries(idx); 3842 gsi_cfg = mp_ioapic_gsi_routing(idx); 3843 gsi_cfg->gsi_base = gsi_base; 3844 gsi_cfg->gsi_end = gsi_base + entries - 1; 3845 3846 /* 3847 * The number of IO-APIC IRQ registers (== #pins): 3848 */ 3849 ioapics[idx].nr_registers = entries; 3850 3851 if (gsi_cfg->gsi_end >= gsi_top) 3852 gsi_top = gsi_cfg->gsi_end + 1; 3853 3854 pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n", 3855 idx, mpc_ioapic_id(idx), 3856 mpc_ioapic_ver(idx), mpc_ioapic_addr(idx), 3857 gsi_cfg->gsi_base, gsi_cfg->gsi_end); 3858 3859 nr_ioapics++; 3860 } 3861 3862 /* Enable IOAPIC early just for system timer */ 3863 void __init pre_init_apic_IRQ0(void) 3864 { 3865 struct io_apic_irq_attr attr = { 0, 0, 0, 0 }; 3866 3867 printk(KERN_INFO "Early APIC setup for system timer0\n"); 3868 #ifndef CONFIG_SMP 3869 physid_set_mask_of_physid(boot_cpu_physical_apicid, 3870 &phys_cpu_present_map); 3871 #endif 3872 setup_local_APIC(); 3873 3874 io_apic_setup_irq_pin(0, 0, &attr); 3875 irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, 3876 "edge"); 3877 } 3878