1 /* 2 * Intel IO-APIC support for multi-Pentium hosts. 3 * 4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo 5 * 6 * Many thanks to Stig Venaas for trying out countless experimental 7 * patches and reporting/debugging problems patiently! 8 * 9 * (c) 1999, Multiple IO-APIC support, developed by 10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and 11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>, 12 * further tested and cleaned up by Zach Brown <zab@redhat.com> 13 * and Ingo Molnar <mingo@redhat.com> 14 * 15 * Fixes 16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs; 17 * thanks to Eric Gilmore 18 * and Rolf G. Tews 19 * for testing these extensively 20 * Paul Diefenbaugh : Added full ACPI support 21 */ 22 23 #include <linux/mm.h> 24 #include <linux/interrupt.h> 25 #include <linux/init.h> 26 #include <linux/delay.h> 27 #include <linux/sched.h> 28 #include <linux/pci.h> 29 #include <linux/mc146818rtc.h> 30 #include <linux/compiler.h> 31 #include <linux/acpi.h> 32 #include <linux/module.h> 33 #include <linux/syscore_ops.h> 34 #include <linux/msi.h> 35 #include <linux/htirq.h> 36 #include <linux/freezer.h> 37 #include <linux/kthread.h> 38 #include <linux/jiffies.h> /* time_after() */ 39 #include <linux/slab.h> 40 #ifdef CONFIG_ACPI 41 #include <acpi/acpi_bus.h> 42 #endif 43 #include <linux/bootmem.h> 44 #include <linux/dmar.h> 45 #include <linux/hpet.h> 46 47 #include <asm/idle.h> 48 #include <asm/io.h> 49 #include <asm/smp.h> 50 #include <asm/cpu.h> 51 #include <asm/desc.h> 52 #include <asm/proto.h> 53 #include <asm/acpi.h> 54 #include <asm/dma.h> 55 #include <asm/timer.h> 56 #include <asm/i8259.h> 57 #include <asm/msidef.h> 58 #include <asm/hypertransport.h> 59 #include <asm/setup.h> 60 #include <asm/irq_remapping.h> 61 #include <asm/hpet.h> 62 #include <asm/hw_irq.h> 63 64 #include <asm/apic.h> 65 66 #define __apicdebuginit(type) static type __init 67 #define for_each_irq_pin(entry, head) \ 68 for (entry = head; entry; entry = entry->next) 69 70 /* 71 * Is the SiS APIC rmw bug present ? 72 * -1 = don't know, 0 = no, 1 = yes 73 */ 74 int sis_apic_bug = -1; 75 76 static DEFINE_RAW_SPINLOCK(ioapic_lock); 77 static DEFINE_RAW_SPINLOCK(vector_lock); 78 79 static struct ioapic { 80 /* 81 * # of IRQ routing registers 82 */ 83 int nr_registers; 84 /* 85 * Saved state during suspend/resume, or while enabling intr-remap. 86 */ 87 struct IO_APIC_route_entry *saved_registers; 88 /* I/O APIC config */ 89 struct mpc_ioapic mp_config; 90 /* IO APIC gsi routing info */ 91 struct mp_ioapic_gsi gsi_config; 92 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); 93 } ioapics[MAX_IO_APICS]; 94 95 #define mpc_ioapic_ver(id) ioapics[id].mp_config.apicver 96 97 int mpc_ioapic_id(int id) 98 { 99 return ioapics[id].mp_config.apicid; 100 } 101 102 unsigned int mpc_ioapic_addr(int id) 103 { 104 return ioapics[id].mp_config.apicaddr; 105 } 106 107 struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int id) 108 { 109 return &ioapics[id].gsi_config; 110 } 111 112 int nr_ioapics; 113 114 /* The one past the highest gsi number used */ 115 u32 gsi_top; 116 117 /* MP IRQ source entries */ 118 struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; 119 120 /* # of MP IRQ source entries */ 121 int mp_irq_entries; 122 123 /* GSI interrupts */ 124 static int nr_irqs_gsi = NR_IRQS_LEGACY; 125 126 #if defined (CONFIG_MCA) || defined (CONFIG_EISA) 127 int mp_bus_id_to_type[MAX_MP_BUSSES]; 128 #endif 129 130 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); 131 132 int skip_ioapic_setup; 133 134 /** 135 * disable_ioapic_support() - disables ioapic support at runtime 136 */ 137 void disable_ioapic_support(void) 138 { 139 #ifdef CONFIG_PCI 140 noioapicquirk = 1; 141 noioapicreroute = -1; 142 #endif 143 skip_ioapic_setup = 1; 144 } 145 146 static int __init parse_noapic(char *str) 147 { 148 /* disable IO-APIC */ 149 disable_ioapic_support(); 150 return 0; 151 } 152 early_param("noapic", parse_noapic); 153 154 static int io_apic_setup_irq_pin(unsigned int irq, int node, 155 struct io_apic_irq_attr *attr); 156 157 /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ 158 void mp_save_irq(struct mpc_intsrc *m) 159 { 160 int i; 161 162 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," 163 " IRQ %02x, APIC ID %x, APIC INT %02x\n", 164 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus, 165 m->srcbusirq, m->dstapic, m->dstirq); 166 167 for (i = 0; i < mp_irq_entries; i++) { 168 if (!memcmp(&mp_irqs[i], m, sizeof(*m))) 169 return; 170 } 171 172 memcpy(&mp_irqs[mp_irq_entries], m, sizeof(*m)); 173 if (++mp_irq_entries == MAX_IRQ_SOURCES) 174 panic("Max # of irq sources exceeded!!\n"); 175 } 176 177 struct irq_pin_list { 178 int apic, pin; 179 struct irq_pin_list *next; 180 }; 181 182 static struct irq_pin_list *alloc_irq_pin_list(int node) 183 { 184 return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); 185 } 186 187 188 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 189 #ifdef CONFIG_SPARSE_IRQ 190 static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY]; 191 #else 192 static struct irq_cfg irq_cfgx[NR_IRQS]; 193 #endif 194 195 int __init arch_early_irq_init(void) 196 { 197 struct irq_cfg *cfg; 198 int count, node, i; 199 200 if (!legacy_pic->nr_legacy_irqs) { 201 nr_irqs_gsi = 0; 202 io_apic_irqs = ~0UL; 203 } 204 205 for (i = 0; i < nr_ioapics; i++) { 206 ioapics[i].saved_registers = 207 kzalloc(sizeof(struct IO_APIC_route_entry) * 208 ioapics[i].nr_registers, GFP_KERNEL); 209 if (!ioapics[i].saved_registers) 210 pr_err("IOAPIC %d: suspend/resume impossible!\n", i); 211 } 212 213 cfg = irq_cfgx; 214 count = ARRAY_SIZE(irq_cfgx); 215 node = cpu_to_node(0); 216 217 /* Make sure the legacy interrupts are marked in the bitmap */ 218 irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); 219 220 for (i = 0; i < count; i++) { 221 irq_set_chip_data(i, &cfg[i]); 222 zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); 223 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); 224 /* 225 * For legacy IRQ's, start with assigning irq0 to irq15 to 226 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. 227 */ 228 if (i < legacy_pic->nr_legacy_irqs) { 229 cfg[i].vector = IRQ0_VECTOR + i; 230 cpumask_set_cpu(0, cfg[i].domain); 231 } 232 } 233 234 return 0; 235 } 236 237 #ifdef CONFIG_SPARSE_IRQ 238 static struct irq_cfg *irq_cfg(unsigned int irq) 239 { 240 return irq_get_chip_data(irq); 241 } 242 243 static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) 244 { 245 struct irq_cfg *cfg; 246 247 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); 248 if (!cfg) 249 return NULL; 250 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) 251 goto out_cfg; 252 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) 253 goto out_domain; 254 return cfg; 255 out_domain: 256 free_cpumask_var(cfg->domain); 257 out_cfg: 258 kfree(cfg); 259 return NULL; 260 } 261 262 static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) 263 { 264 if (!cfg) 265 return; 266 irq_set_chip_data(at, NULL); 267 free_cpumask_var(cfg->domain); 268 free_cpumask_var(cfg->old_domain); 269 kfree(cfg); 270 } 271 272 #else 273 274 struct irq_cfg *irq_cfg(unsigned int irq) 275 { 276 return irq < nr_irqs ? irq_cfgx + irq : NULL; 277 } 278 279 static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) 280 { 281 return irq_cfgx + irq; 282 } 283 284 static inline void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { } 285 286 #endif 287 288 static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) 289 { 290 int res = irq_alloc_desc_at(at, node); 291 struct irq_cfg *cfg; 292 293 if (res < 0) { 294 if (res != -EEXIST) 295 return NULL; 296 cfg = irq_get_chip_data(at); 297 if (cfg) 298 return cfg; 299 } 300 301 cfg = alloc_irq_cfg(at, node); 302 if (cfg) 303 irq_set_chip_data(at, cfg); 304 else 305 irq_free_desc(at); 306 return cfg; 307 } 308 309 static int alloc_irq_from(unsigned int from, int node) 310 { 311 return irq_alloc_desc_from(from, node); 312 } 313 314 static void free_irq_at(unsigned int at, struct irq_cfg *cfg) 315 { 316 free_irq_cfg(at, cfg); 317 irq_free_desc(at); 318 } 319 320 struct io_apic { 321 unsigned int index; 322 unsigned int unused[3]; 323 unsigned int data; 324 unsigned int unused2[11]; 325 unsigned int eoi; 326 }; 327 328 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) 329 { 330 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) 331 + (mpc_ioapic_addr(idx) & ~PAGE_MASK); 332 } 333 334 static inline void io_apic_eoi(unsigned int apic, unsigned int vector) 335 { 336 struct io_apic __iomem *io_apic = io_apic_base(apic); 337 writel(vector, &io_apic->eoi); 338 } 339 340 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) 341 { 342 struct io_apic __iomem *io_apic = io_apic_base(apic); 343 writel(reg, &io_apic->index); 344 return readl(&io_apic->data); 345 } 346 347 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) 348 { 349 struct io_apic __iomem *io_apic = io_apic_base(apic); 350 writel(reg, &io_apic->index); 351 writel(value, &io_apic->data); 352 } 353 354 /* 355 * Re-write a value: to be used for read-modify-write 356 * cycles where the read already set up the index register. 357 * 358 * Older SiS APIC requires we rewrite the index register 359 */ 360 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) 361 { 362 struct io_apic __iomem *io_apic = io_apic_base(apic); 363 364 if (sis_apic_bug) 365 writel(reg, &io_apic->index); 366 writel(value, &io_apic->data); 367 } 368 369 static bool io_apic_level_ack_pending(struct irq_cfg *cfg) 370 { 371 struct irq_pin_list *entry; 372 unsigned long flags; 373 374 raw_spin_lock_irqsave(&ioapic_lock, flags); 375 for_each_irq_pin(entry, cfg->irq_2_pin) { 376 unsigned int reg; 377 int pin; 378 379 pin = entry->pin; 380 reg = io_apic_read(entry->apic, 0x10 + pin*2); 381 /* Is the remote IRR bit set? */ 382 if (reg & IO_APIC_REDIR_REMOTE_IRR) { 383 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 384 return true; 385 } 386 } 387 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 388 389 return false; 390 } 391 392 union entry_union { 393 struct { u32 w1, w2; }; 394 struct IO_APIC_route_entry entry; 395 }; 396 397 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) 398 { 399 union entry_union eu; 400 unsigned long flags; 401 raw_spin_lock_irqsave(&ioapic_lock, flags); 402 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); 403 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); 404 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 405 return eu.entry; 406 } 407 408 /* 409 * When we write a new IO APIC routing entry, we need to write the high 410 * word first! If the mask bit in the low word is clear, we will enable 411 * the interrupt, and we need to make sure the entry is fully populated 412 * before that happens. 413 */ 414 static void 415 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 416 { 417 union entry_union eu = {{0, 0}}; 418 419 eu.entry = e; 420 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 421 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 422 } 423 424 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 425 { 426 unsigned long flags; 427 raw_spin_lock_irqsave(&ioapic_lock, flags); 428 __ioapic_write_entry(apic, pin, e); 429 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 430 } 431 432 /* 433 * When we mask an IO APIC routing entry, we need to write the low 434 * word first, in order to set the mask bit before we change the 435 * high bits! 436 */ 437 static void ioapic_mask_entry(int apic, int pin) 438 { 439 unsigned long flags; 440 union entry_union eu = { .entry.mask = 1 }; 441 442 raw_spin_lock_irqsave(&ioapic_lock, flags); 443 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 444 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 445 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 446 } 447 448 /* 449 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are 450 * shared ISA-space IRQs, so we have to support them. We are super 451 * fast in the common case, and fast for shared ISA-space IRQs. 452 */ 453 static int 454 __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 455 { 456 struct irq_pin_list **last, *entry; 457 458 /* don't allow duplicates */ 459 last = &cfg->irq_2_pin; 460 for_each_irq_pin(entry, cfg->irq_2_pin) { 461 if (entry->apic == apic && entry->pin == pin) 462 return 0; 463 last = &entry->next; 464 } 465 466 entry = alloc_irq_pin_list(node); 467 if (!entry) { 468 printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", 469 node, apic, pin); 470 return -ENOMEM; 471 } 472 entry->apic = apic; 473 entry->pin = pin; 474 475 *last = entry; 476 return 0; 477 } 478 479 static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 480 { 481 if (__add_pin_to_irq_node(cfg, node, apic, pin)) 482 panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); 483 } 484 485 /* 486 * Reroute an IRQ to a different pin. 487 */ 488 static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, 489 int oldapic, int oldpin, 490 int newapic, int newpin) 491 { 492 struct irq_pin_list *entry; 493 494 for_each_irq_pin(entry, cfg->irq_2_pin) { 495 if (entry->apic == oldapic && entry->pin == oldpin) { 496 entry->apic = newapic; 497 entry->pin = newpin; 498 /* every one is different, right? */ 499 return; 500 } 501 } 502 503 /* old apic/pin didn't exist, so just add new ones */ 504 add_pin_to_irq_node(cfg, node, newapic, newpin); 505 } 506 507 static void __io_apic_modify_irq(struct irq_pin_list *entry, 508 int mask_and, int mask_or, 509 void (*final)(struct irq_pin_list *entry)) 510 { 511 unsigned int reg, pin; 512 513 pin = entry->pin; 514 reg = io_apic_read(entry->apic, 0x10 + pin * 2); 515 reg &= mask_and; 516 reg |= mask_or; 517 io_apic_modify(entry->apic, 0x10 + pin * 2, reg); 518 if (final) 519 final(entry); 520 } 521 522 static void io_apic_modify_irq(struct irq_cfg *cfg, 523 int mask_and, int mask_or, 524 void (*final)(struct irq_pin_list *entry)) 525 { 526 struct irq_pin_list *entry; 527 528 for_each_irq_pin(entry, cfg->irq_2_pin) 529 __io_apic_modify_irq(entry, mask_and, mask_or, final); 530 } 531 532 static void __mask_and_edge_IO_APIC_irq(struct irq_pin_list *entry) 533 { 534 __io_apic_modify_irq(entry, ~IO_APIC_REDIR_LEVEL_TRIGGER, 535 IO_APIC_REDIR_MASKED, NULL); 536 } 537 538 static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry) 539 { 540 __io_apic_modify_irq(entry, ~IO_APIC_REDIR_MASKED, 541 IO_APIC_REDIR_LEVEL_TRIGGER, NULL); 542 } 543 544 static void io_apic_sync(struct irq_pin_list *entry) 545 { 546 /* 547 * Synchronize the IO-APIC and the CPU by doing 548 * a dummy read from the IO-APIC 549 */ 550 struct io_apic __iomem *io_apic; 551 io_apic = io_apic_base(entry->apic); 552 readl(&io_apic->data); 553 } 554 555 static void mask_ioapic(struct irq_cfg *cfg) 556 { 557 unsigned long flags; 558 559 raw_spin_lock_irqsave(&ioapic_lock, flags); 560 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); 561 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 562 } 563 564 static void mask_ioapic_irq(struct irq_data *data) 565 { 566 mask_ioapic(data->chip_data); 567 } 568 569 static void __unmask_ioapic(struct irq_cfg *cfg) 570 { 571 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); 572 } 573 574 static void unmask_ioapic(struct irq_cfg *cfg) 575 { 576 unsigned long flags; 577 578 raw_spin_lock_irqsave(&ioapic_lock, flags); 579 __unmask_ioapic(cfg); 580 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 581 } 582 583 static void unmask_ioapic_irq(struct irq_data *data) 584 { 585 unmask_ioapic(data->chip_data); 586 } 587 588 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) 589 { 590 struct IO_APIC_route_entry entry; 591 592 /* Check delivery_mode to be sure we're not clearing an SMI pin */ 593 entry = ioapic_read_entry(apic, pin); 594 if (entry.delivery_mode == dest_SMI) 595 return; 596 /* 597 * Disable it in the IO-APIC irq-routing table: 598 */ 599 ioapic_mask_entry(apic, pin); 600 } 601 602 static void clear_IO_APIC (void) 603 { 604 int apic, pin; 605 606 for (apic = 0; apic < nr_ioapics; apic++) 607 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) 608 clear_IO_APIC_pin(apic, pin); 609 } 610 611 #ifdef CONFIG_X86_32 612 /* 613 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to 614 * specific CPU-side IRQs. 615 */ 616 617 #define MAX_PIRQS 8 618 static int pirq_entries[MAX_PIRQS] = { 619 [0 ... MAX_PIRQS - 1] = -1 620 }; 621 622 static int __init ioapic_pirq_setup(char *str) 623 { 624 int i, max; 625 int ints[MAX_PIRQS+1]; 626 627 get_options(str, ARRAY_SIZE(ints), ints); 628 629 apic_printk(APIC_VERBOSE, KERN_INFO 630 "PIRQ redirection, working around broken MP-BIOS.\n"); 631 max = MAX_PIRQS; 632 if (ints[0] < MAX_PIRQS) 633 max = ints[0]; 634 635 for (i = 0; i < max; i++) { 636 apic_printk(APIC_VERBOSE, KERN_DEBUG 637 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]); 638 /* 639 * PIRQs are mapped upside down, usually. 640 */ 641 pirq_entries[MAX_PIRQS-i-1] = ints[i+1]; 642 } 643 return 1; 644 } 645 646 __setup("pirq=", ioapic_pirq_setup); 647 #endif /* CONFIG_X86_32 */ 648 649 /* 650 * Saves all the IO-APIC RTE's 651 */ 652 int save_ioapic_entries(void) 653 { 654 int apic, pin; 655 int err = 0; 656 657 for (apic = 0; apic < nr_ioapics; apic++) { 658 if (!ioapics[apic].saved_registers) { 659 err = -ENOMEM; 660 continue; 661 } 662 663 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) 664 ioapics[apic].saved_registers[pin] = 665 ioapic_read_entry(apic, pin); 666 } 667 668 return err; 669 } 670 671 /* 672 * Mask all IO APIC entries. 673 */ 674 void mask_ioapic_entries(void) 675 { 676 int apic, pin; 677 678 for (apic = 0; apic < nr_ioapics; apic++) { 679 if (!ioapics[apic].saved_registers) 680 continue; 681 682 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { 683 struct IO_APIC_route_entry entry; 684 685 entry = ioapics[apic].saved_registers[pin]; 686 if (!entry.mask) { 687 entry.mask = 1; 688 ioapic_write_entry(apic, pin, entry); 689 } 690 } 691 } 692 } 693 694 /* 695 * Restore IO APIC entries which was saved in the ioapic structure. 696 */ 697 int restore_ioapic_entries(void) 698 { 699 int apic, pin; 700 701 for (apic = 0; apic < nr_ioapics; apic++) { 702 if (!ioapics[apic].saved_registers) 703 continue; 704 705 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) 706 ioapic_write_entry(apic, pin, 707 ioapics[apic].saved_registers[pin]); 708 } 709 return 0; 710 } 711 712 /* 713 * Find the IRQ entry number of a certain pin. 714 */ 715 static int find_irq_entry(int apic, int pin, int type) 716 { 717 int i; 718 719 for (i = 0; i < mp_irq_entries; i++) 720 if (mp_irqs[i].irqtype == type && 721 (mp_irqs[i].dstapic == mpc_ioapic_id(apic) || 722 mp_irqs[i].dstapic == MP_APIC_ALL) && 723 mp_irqs[i].dstirq == pin) 724 return i; 725 726 return -1; 727 } 728 729 /* 730 * Find the pin to which IRQ[irq] (ISA) is connected 731 */ 732 static int __init find_isa_irq_pin(int irq, int type) 733 { 734 int i; 735 736 for (i = 0; i < mp_irq_entries; i++) { 737 int lbus = mp_irqs[i].srcbus; 738 739 if (test_bit(lbus, mp_bus_not_pci) && 740 (mp_irqs[i].irqtype == type) && 741 (mp_irqs[i].srcbusirq == irq)) 742 743 return mp_irqs[i].dstirq; 744 } 745 return -1; 746 } 747 748 static int __init find_isa_irq_apic(int irq, int type) 749 { 750 int i; 751 752 for (i = 0; i < mp_irq_entries; i++) { 753 int lbus = mp_irqs[i].srcbus; 754 755 if (test_bit(lbus, mp_bus_not_pci) && 756 (mp_irqs[i].irqtype == type) && 757 (mp_irqs[i].srcbusirq == irq)) 758 break; 759 } 760 if (i < mp_irq_entries) { 761 int apic; 762 for(apic = 0; apic < nr_ioapics; apic++) { 763 if (mpc_ioapic_id(apic) == mp_irqs[i].dstapic) 764 return apic; 765 } 766 } 767 768 return -1; 769 } 770 771 #if defined(CONFIG_EISA) || defined(CONFIG_MCA) 772 /* 773 * EISA Edge/Level control register, ELCR 774 */ 775 static int EISA_ELCR(unsigned int irq) 776 { 777 if (irq < legacy_pic->nr_legacy_irqs) { 778 unsigned int port = 0x4d0 + (irq >> 3); 779 return (inb(port) >> (irq & 7)) & 1; 780 } 781 apic_printk(APIC_VERBOSE, KERN_INFO 782 "Broken MPtable reports ISA irq %d\n", irq); 783 return 0; 784 } 785 786 #endif 787 788 /* ISA interrupts are always polarity zero edge triggered, 789 * when listed as conforming in the MP table. */ 790 791 #define default_ISA_trigger(idx) (0) 792 #define default_ISA_polarity(idx) (0) 793 794 /* EISA interrupts are always polarity zero and can be edge or level 795 * trigger depending on the ELCR value. If an interrupt is listed as 796 * EISA conforming in the MP table, that means its trigger type must 797 * be read in from the ELCR */ 798 799 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq)) 800 #define default_EISA_polarity(idx) default_ISA_polarity(idx) 801 802 /* PCI interrupts are always polarity one level triggered, 803 * when listed as conforming in the MP table. */ 804 805 #define default_PCI_trigger(idx) (1) 806 #define default_PCI_polarity(idx) (1) 807 808 /* MCA interrupts are always polarity zero level triggered, 809 * when listed as conforming in the MP table. */ 810 811 #define default_MCA_trigger(idx) (1) 812 #define default_MCA_polarity(idx) default_ISA_polarity(idx) 813 814 static int irq_polarity(int idx) 815 { 816 int bus = mp_irqs[idx].srcbus; 817 int polarity; 818 819 /* 820 * Determine IRQ line polarity (high active or low active): 821 */ 822 switch (mp_irqs[idx].irqflag & 3) 823 { 824 case 0: /* conforms, ie. bus-type dependent polarity */ 825 if (test_bit(bus, mp_bus_not_pci)) 826 polarity = default_ISA_polarity(idx); 827 else 828 polarity = default_PCI_polarity(idx); 829 break; 830 case 1: /* high active */ 831 { 832 polarity = 0; 833 break; 834 } 835 case 2: /* reserved */ 836 { 837 printk(KERN_WARNING "broken BIOS!!\n"); 838 polarity = 1; 839 break; 840 } 841 case 3: /* low active */ 842 { 843 polarity = 1; 844 break; 845 } 846 default: /* invalid */ 847 { 848 printk(KERN_WARNING "broken BIOS!!\n"); 849 polarity = 1; 850 break; 851 } 852 } 853 return polarity; 854 } 855 856 static int irq_trigger(int idx) 857 { 858 int bus = mp_irqs[idx].srcbus; 859 int trigger; 860 861 /* 862 * Determine IRQ trigger mode (edge or level sensitive): 863 */ 864 switch ((mp_irqs[idx].irqflag>>2) & 3) 865 { 866 case 0: /* conforms, ie. bus-type dependent */ 867 if (test_bit(bus, mp_bus_not_pci)) 868 trigger = default_ISA_trigger(idx); 869 else 870 trigger = default_PCI_trigger(idx); 871 #if defined(CONFIG_EISA) || defined(CONFIG_MCA) 872 switch (mp_bus_id_to_type[bus]) { 873 case MP_BUS_ISA: /* ISA pin */ 874 { 875 /* set before the switch */ 876 break; 877 } 878 case MP_BUS_EISA: /* EISA pin */ 879 { 880 trigger = default_EISA_trigger(idx); 881 break; 882 } 883 case MP_BUS_PCI: /* PCI pin */ 884 { 885 /* set before the switch */ 886 break; 887 } 888 case MP_BUS_MCA: /* MCA pin */ 889 { 890 trigger = default_MCA_trigger(idx); 891 break; 892 } 893 default: 894 { 895 printk(KERN_WARNING "broken BIOS!!\n"); 896 trigger = 1; 897 break; 898 } 899 } 900 #endif 901 break; 902 case 1: /* edge */ 903 { 904 trigger = 0; 905 break; 906 } 907 case 2: /* reserved */ 908 { 909 printk(KERN_WARNING "broken BIOS!!\n"); 910 trigger = 1; 911 break; 912 } 913 case 3: /* level */ 914 { 915 trigger = 1; 916 break; 917 } 918 default: /* invalid */ 919 { 920 printk(KERN_WARNING "broken BIOS!!\n"); 921 trigger = 0; 922 break; 923 } 924 } 925 return trigger; 926 } 927 928 static int pin_2_irq(int idx, int apic, int pin) 929 { 930 int irq; 931 int bus = mp_irqs[idx].srcbus; 932 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(apic); 933 934 /* 935 * Debugging check, we are in big trouble if this message pops up! 936 */ 937 if (mp_irqs[idx].dstirq != pin) 938 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); 939 940 if (test_bit(bus, mp_bus_not_pci)) { 941 irq = mp_irqs[idx].srcbusirq; 942 } else { 943 u32 gsi = gsi_cfg->gsi_base + pin; 944 945 if (gsi >= NR_IRQS_LEGACY) 946 irq = gsi; 947 else 948 irq = gsi_top + gsi; 949 } 950 951 #ifdef CONFIG_X86_32 952 /* 953 * PCI IRQ command line redirection. Yes, limits are hardcoded. 954 */ 955 if ((pin >= 16) && (pin <= 23)) { 956 if (pirq_entries[pin-16] != -1) { 957 if (!pirq_entries[pin-16]) { 958 apic_printk(APIC_VERBOSE, KERN_DEBUG 959 "disabling PIRQ%d\n", pin-16); 960 } else { 961 irq = pirq_entries[pin-16]; 962 apic_printk(APIC_VERBOSE, KERN_DEBUG 963 "using PIRQ%d -> IRQ %d\n", 964 pin-16, irq); 965 } 966 } 967 } 968 #endif 969 970 return irq; 971 } 972 973 /* 974 * Find a specific PCI IRQ entry. 975 * Not an __init, possibly needed by modules 976 */ 977 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, 978 struct io_apic_irq_attr *irq_attr) 979 { 980 int apic, i, best_guess = -1; 981 982 apic_printk(APIC_DEBUG, 983 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", 984 bus, slot, pin); 985 if (test_bit(bus, mp_bus_not_pci)) { 986 apic_printk(APIC_VERBOSE, 987 "PCI BIOS passed nonexistent PCI bus %d!\n", bus); 988 return -1; 989 } 990 for (i = 0; i < mp_irq_entries; i++) { 991 int lbus = mp_irqs[i].srcbus; 992 993 for (apic = 0; apic < nr_ioapics; apic++) 994 if (mpc_ioapic_id(apic) == mp_irqs[i].dstapic || 995 mp_irqs[i].dstapic == MP_APIC_ALL) 996 break; 997 998 if (!test_bit(lbus, mp_bus_not_pci) && 999 !mp_irqs[i].irqtype && 1000 (bus == lbus) && 1001 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) { 1002 int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq); 1003 1004 if (!(apic || IO_APIC_IRQ(irq))) 1005 continue; 1006 1007 if (pin == (mp_irqs[i].srcbusirq & 3)) { 1008 set_io_apic_irq_attr(irq_attr, apic, 1009 mp_irqs[i].dstirq, 1010 irq_trigger(i), 1011 irq_polarity(i)); 1012 return irq; 1013 } 1014 /* 1015 * Use the first all-but-pin matching entry as a 1016 * best-guess fuzzy result for broken mptables. 1017 */ 1018 if (best_guess < 0) { 1019 set_io_apic_irq_attr(irq_attr, apic, 1020 mp_irqs[i].dstirq, 1021 irq_trigger(i), 1022 irq_polarity(i)); 1023 best_guess = irq; 1024 } 1025 } 1026 } 1027 return best_guess; 1028 } 1029 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); 1030 1031 void lock_vector_lock(void) 1032 { 1033 /* Used to the online set of cpus does not change 1034 * during assign_irq_vector. 1035 */ 1036 raw_spin_lock(&vector_lock); 1037 } 1038 1039 void unlock_vector_lock(void) 1040 { 1041 raw_spin_unlock(&vector_lock); 1042 } 1043 1044 static int 1045 __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 1046 { 1047 /* 1048 * NOTE! The local APIC isn't very good at handling 1049 * multiple interrupts at the same interrupt level. 1050 * As the interrupt level is determined by taking the 1051 * vector number and shifting that right by 4, we 1052 * want to spread these out a bit so that they don't 1053 * all fall in the same interrupt level. 1054 * 1055 * Also, we've got to be careful not to trash gate 1056 * 0x80, because int 0x80 is hm, kind of importantish. ;) 1057 */ 1058 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; 1059 static int current_offset = VECTOR_OFFSET_START % 8; 1060 unsigned int old_vector; 1061 int cpu, err; 1062 cpumask_var_t tmp_mask; 1063 1064 if (cfg->move_in_progress) 1065 return -EBUSY; 1066 1067 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) 1068 return -ENOMEM; 1069 1070 old_vector = cfg->vector; 1071 if (old_vector) { 1072 cpumask_and(tmp_mask, mask, cpu_online_mask); 1073 cpumask_and(tmp_mask, cfg->domain, tmp_mask); 1074 if (!cpumask_empty(tmp_mask)) { 1075 free_cpumask_var(tmp_mask); 1076 return 0; 1077 } 1078 } 1079 1080 /* Only try and allocate irqs on cpus that are present */ 1081 err = -ENOSPC; 1082 for_each_cpu_and(cpu, mask, cpu_online_mask) { 1083 int new_cpu; 1084 int vector, offset; 1085 1086 apic->vector_allocation_domain(cpu, tmp_mask); 1087 1088 vector = current_vector; 1089 offset = current_offset; 1090 next: 1091 vector += 8; 1092 if (vector >= first_system_vector) { 1093 /* If out of vectors on large boxen, must share them. */ 1094 offset = (offset + 1) % 8; 1095 vector = FIRST_EXTERNAL_VECTOR + offset; 1096 } 1097 if (unlikely(current_vector == vector)) 1098 continue; 1099 1100 if (test_bit(vector, used_vectors)) 1101 goto next; 1102 1103 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 1104 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 1105 goto next; 1106 /* Found one! */ 1107 current_vector = vector; 1108 current_offset = offset; 1109 if (old_vector) { 1110 cfg->move_in_progress = 1; 1111 cpumask_copy(cfg->old_domain, cfg->domain); 1112 } 1113 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 1114 per_cpu(vector_irq, new_cpu)[vector] = irq; 1115 cfg->vector = vector; 1116 cpumask_copy(cfg->domain, tmp_mask); 1117 err = 0; 1118 break; 1119 } 1120 free_cpumask_var(tmp_mask); 1121 return err; 1122 } 1123 1124 int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 1125 { 1126 int err; 1127 unsigned long flags; 1128 1129 raw_spin_lock_irqsave(&vector_lock, flags); 1130 err = __assign_irq_vector(irq, cfg, mask); 1131 raw_spin_unlock_irqrestore(&vector_lock, flags); 1132 return err; 1133 } 1134 1135 static void __clear_irq_vector(int irq, struct irq_cfg *cfg) 1136 { 1137 int cpu, vector; 1138 1139 BUG_ON(!cfg->vector); 1140 1141 vector = cfg->vector; 1142 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) 1143 per_cpu(vector_irq, cpu)[vector] = -1; 1144 1145 cfg->vector = 0; 1146 cpumask_clear(cfg->domain); 1147 1148 if (likely(!cfg->move_in_progress)) 1149 return; 1150 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { 1151 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 1152 vector++) { 1153 if (per_cpu(vector_irq, cpu)[vector] != irq) 1154 continue; 1155 per_cpu(vector_irq, cpu)[vector] = -1; 1156 break; 1157 } 1158 } 1159 cfg->move_in_progress = 0; 1160 } 1161 1162 void __setup_vector_irq(int cpu) 1163 { 1164 /* Initialize vector_irq on a new cpu */ 1165 int irq, vector; 1166 struct irq_cfg *cfg; 1167 1168 /* 1169 * vector_lock will make sure that we don't run into irq vector 1170 * assignments that might be happening on another cpu in parallel, 1171 * while we setup our initial vector to irq mappings. 1172 */ 1173 raw_spin_lock(&vector_lock); 1174 /* Mark the inuse vectors */ 1175 for_each_active_irq(irq) { 1176 cfg = irq_get_chip_data(irq); 1177 if (!cfg) 1178 continue; 1179 /* 1180 * If it is a legacy IRQ handled by the legacy PIC, this cpu 1181 * will be part of the irq_cfg's domain. 1182 */ 1183 if (irq < legacy_pic->nr_legacy_irqs && !IO_APIC_IRQ(irq)) 1184 cpumask_set_cpu(cpu, cfg->domain); 1185 1186 if (!cpumask_test_cpu(cpu, cfg->domain)) 1187 continue; 1188 vector = cfg->vector; 1189 per_cpu(vector_irq, cpu)[vector] = irq; 1190 } 1191 /* Mark the free vectors */ 1192 for (vector = 0; vector < NR_VECTORS; ++vector) { 1193 irq = per_cpu(vector_irq, cpu)[vector]; 1194 if (irq < 0) 1195 continue; 1196 1197 cfg = irq_cfg(irq); 1198 if (!cpumask_test_cpu(cpu, cfg->domain)) 1199 per_cpu(vector_irq, cpu)[vector] = -1; 1200 } 1201 raw_spin_unlock(&vector_lock); 1202 } 1203 1204 static struct irq_chip ioapic_chip; 1205 static struct irq_chip ir_ioapic_chip; 1206 1207 #ifdef CONFIG_X86_32 1208 static inline int IO_APIC_irq_trigger(int irq) 1209 { 1210 int apic, idx, pin; 1211 1212 for (apic = 0; apic < nr_ioapics; apic++) { 1213 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { 1214 idx = find_irq_entry(apic, pin, mp_INT); 1215 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin))) 1216 return irq_trigger(idx); 1217 } 1218 } 1219 /* 1220 * nonexistent IRQs are edge default 1221 */ 1222 return 0; 1223 } 1224 #else 1225 static inline int IO_APIC_irq_trigger(int irq) 1226 { 1227 return 1; 1228 } 1229 #endif 1230 1231 static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg, 1232 unsigned long trigger) 1233 { 1234 struct irq_chip *chip = &ioapic_chip; 1235 irq_flow_handler_t hdl; 1236 bool fasteoi; 1237 1238 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1239 trigger == IOAPIC_LEVEL) { 1240 irq_set_status_flags(irq, IRQ_LEVEL); 1241 fasteoi = true; 1242 } else { 1243 irq_clear_status_flags(irq, IRQ_LEVEL); 1244 fasteoi = false; 1245 } 1246 1247 if (irq_remapped(cfg)) { 1248 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 1249 chip = &ir_ioapic_chip; 1250 fasteoi = trigger != 0; 1251 } 1252 1253 hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; 1254 irq_set_chip_and_handler_name(irq, chip, hdl, 1255 fasteoi ? "fasteoi" : "edge"); 1256 } 1257 1258 static int setup_ioapic_entry(int apic_id, int irq, 1259 struct IO_APIC_route_entry *entry, 1260 unsigned int destination, int trigger, 1261 int polarity, int vector, int pin) 1262 { 1263 /* 1264 * add it to the IO-APIC irq-routing table: 1265 */ 1266 memset(entry,0,sizeof(*entry)); 1267 1268 if (intr_remapping_enabled) { 1269 struct intel_iommu *iommu = map_ioapic_to_ir(apic_id); 1270 struct irte irte; 1271 struct IR_IO_APIC_route_entry *ir_entry = 1272 (struct IR_IO_APIC_route_entry *) entry; 1273 int index; 1274 1275 if (!iommu) 1276 panic("No mapping iommu for ioapic %d\n", apic_id); 1277 1278 index = alloc_irte(iommu, irq, 1); 1279 if (index < 0) 1280 panic("Failed to allocate IRTE for ioapic %d\n", apic_id); 1281 1282 prepare_irte(&irte, vector, destination); 1283 1284 /* Set source-id of interrupt request */ 1285 set_ioapic_sid(&irte, apic_id); 1286 1287 modify_irte(irq, &irte); 1288 1289 ir_entry->index2 = (index >> 15) & 0x1; 1290 ir_entry->zero = 0; 1291 ir_entry->format = 1; 1292 ir_entry->index = (index & 0x7fff); 1293 /* 1294 * IO-APIC RTE will be configured with virtual vector. 1295 * irq handler will do the explicit EOI to the io-apic. 1296 */ 1297 ir_entry->vector = pin; 1298 } else { 1299 entry->delivery_mode = apic->irq_delivery_mode; 1300 entry->dest_mode = apic->irq_dest_mode; 1301 entry->dest = destination; 1302 entry->vector = vector; 1303 } 1304 1305 entry->mask = 0; /* enable IRQ */ 1306 entry->trigger = trigger; 1307 entry->polarity = polarity; 1308 1309 /* Mask level triggered irqs. 1310 * Use IRQ_DELAYED_DISABLE for edge triggered irqs. 1311 */ 1312 if (trigger) 1313 entry->mask = 1; 1314 return 0; 1315 } 1316 1317 static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq, 1318 struct irq_cfg *cfg, int trigger, int polarity) 1319 { 1320 struct IO_APIC_route_entry entry; 1321 unsigned int dest; 1322 1323 if (!IO_APIC_IRQ(irq)) 1324 return; 1325 /* 1326 * For legacy irqs, cfg->domain starts with cpu 0 for legacy 1327 * controllers like 8259. Now that IO-APIC can handle this irq, update 1328 * the cfg->domain. 1329 */ 1330 if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain)) 1331 apic->vector_allocation_domain(0, cfg->domain); 1332 1333 if (assign_irq_vector(irq, cfg, apic->target_cpus())) 1334 return; 1335 1336 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 1337 1338 apic_printk(APIC_VERBOSE,KERN_DEBUG 1339 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1340 "IRQ %d Mode:%i Active:%i)\n", 1341 apic_id, mpc_ioapic_id(apic_id), pin, cfg->vector, 1342 irq, trigger, polarity); 1343 1344 1345 if (setup_ioapic_entry(mpc_ioapic_id(apic_id), irq, &entry, 1346 dest, trigger, polarity, cfg->vector, pin)) { 1347 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1348 mpc_ioapic_id(apic_id), pin); 1349 __clear_irq_vector(irq, cfg); 1350 return; 1351 } 1352 1353 ioapic_register_intr(irq, cfg, trigger); 1354 if (irq < legacy_pic->nr_legacy_irqs) 1355 legacy_pic->mask(irq); 1356 1357 ioapic_write_entry(apic_id, pin, entry); 1358 } 1359 1360 static bool __init io_apic_pin_not_connected(int idx, int apic_id, int pin) 1361 { 1362 if (idx != -1) 1363 return false; 1364 1365 apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n", 1366 mpc_ioapic_id(apic_id), pin); 1367 return true; 1368 } 1369 1370 static void __init __io_apic_setup_irqs(unsigned int apic_id) 1371 { 1372 int idx, node = cpu_to_node(0); 1373 struct io_apic_irq_attr attr; 1374 unsigned int pin, irq; 1375 1376 for (pin = 0; pin < ioapics[apic_id].nr_registers; pin++) { 1377 idx = find_irq_entry(apic_id, pin, mp_INT); 1378 if (io_apic_pin_not_connected(idx, apic_id, pin)) 1379 continue; 1380 1381 irq = pin_2_irq(idx, apic_id, pin); 1382 1383 if ((apic_id > 0) && (irq > 16)) 1384 continue; 1385 1386 /* 1387 * Skip the timer IRQ if there's a quirk handler 1388 * installed and if it returns 1: 1389 */ 1390 if (apic->multi_timer_check && 1391 apic->multi_timer_check(apic_id, irq)) 1392 continue; 1393 1394 set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx), 1395 irq_polarity(idx)); 1396 1397 io_apic_setup_irq_pin(irq, node, &attr); 1398 } 1399 } 1400 1401 static void __init setup_IO_APIC_irqs(void) 1402 { 1403 unsigned int apic_id; 1404 1405 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1406 1407 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) 1408 __io_apic_setup_irqs(apic_id); 1409 } 1410 1411 /* 1412 * for the gsit that is not in first ioapic 1413 * but could not use acpi_register_gsi() 1414 * like some special sci in IBM x3330 1415 */ 1416 void setup_IO_APIC_irq_extra(u32 gsi) 1417 { 1418 int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); 1419 struct io_apic_irq_attr attr; 1420 1421 /* 1422 * Convert 'gsi' to 'ioapic.pin'. 1423 */ 1424 apic_id = mp_find_ioapic(gsi); 1425 if (apic_id < 0) 1426 return; 1427 1428 pin = mp_find_ioapic_pin(apic_id, gsi); 1429 idx = find_irq_entry(apic_id, pin, mp_INT); 1430 if (idx == -1) 1431 return; 1432 1433 irq = pin_2_irq(idx, apic_id, pin); 1434 1435 /* Only handle the non legacy irqs on secondary ioapics */ 1436 if (apic_id == 0 || irq < NR_IRQS_LEGACY) 1437 return; 1438 1439 set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx), 1440 irq_polarity(idx)); 1441 1442 io_apic_setup_irq_pin_once(irq, node, &attr); 1443 } 1444 1445 /* 1446 * Set up the timer pin, possibly with the 8259A-master behind. 1447 */ 1448 static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, 1449 int vector) 1450 { 1451 struct IO_APIC_route_entry entry; 1452 1453 if (intr_remapping_enabled) 1454 return; 1455 1456 memset(&entry, 0, sizeof(entry)); 1457 1458 /* 1459 * We use logical delivery to get the timer IRQ 1460 * to the first CPU. 1461 */ 1462 entry.dest_mode = apic->irq_dest_mode; 1463 entry.mask = 0; /* don't mask IRQ for edge */ 1464 entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus()); 1465 entry.delivery_mode = apic->irq_delivery_mode; 1466 entry.polarity = 0; 1467 entry.trigger = 0; 1468 entry.vector = vector; 1469 1470 /* 1471 * The timer IRQ doesn't have to know that behind the 1472 * scene we may have a 8259A-master in AEOI mode ... 1473 */ 1474 irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, 1475 "edge"); 1476 1477 /* 1478 * Add it to the IO-APIC irq-routing table: 1479 */ 1480 ioapic_write_entry(apic_id, pin, entry); 1481 } 1482 1483 1484 __apicdebuginit(void) print_IO_APIC(void) 1485 { 1486 int apic, i; 1487 union IO_APIC_reg_00 reg_00; 1488 union IO_APIC_reg_01 reg_01; 1489 union IO_APIC_reg_02 reg_02; 1490 union IO_APIC_reg_03 reg_03; 1491 unsigned long flags; 1492 struct irq_cfg *cfg; 1493 unsigned int irq; 1494 1495 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); 1496 for (i = 0; i < nr_ioapics; i++) 1497 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", 1498 mpc_ioapic_id(i), ioapics[i].nr_registers); 1499 1500 /* 1501 * We are a bit conservative about what we expect. We have to 1502 * know about every hardware change ASAP. 1503 */ 1504 printk(KERN_INFO "testing the IO APIC.......................\n"); 1505 1506 for (apic = 0; apic < nr_ioapics; apic++) { 1507 1508 raw_spin_lock_irqsave(&ioapic_lock, flags); 1509 reg_00.raw = io_apic_read(apic, 0); 1510 reg_01.raw = io_apic_read(apic, 1); 1511 if (reg_01.bits.version >= 0x10) 1512 reg_02.raw = io_apic_read(apic, 2); 1513 if (reg_01.bits.version >= 0x20) 1514 reg_03.raw = io_apic_read(apic, 3); 1515 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1516 1517 printk("\n"); 1518 printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(apic)); 1519 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); 1520 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); 1521 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); 1522 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); 1523 1524 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01); 1525 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries); 1526 1527 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); 1528 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version); 1529 1530 /* 1531 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, 1532 * but the value of reg_02 is read as the previous read register 1533 * value, so ignore it if reg_02 == reg_01. 1534 */ 1535 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { 1536 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); 1537 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); 1538 } 1539 1540 /* 1541 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 1542 * or reg_03, but the value of reg_0[23] is read as the previous read 1543 * register value, so ignore it if reg_03 == reg_0[12]. 1544 */ 1545 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && 1546 reg_03.raw != reg_01.raw) { 1547 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); 1548 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); 1549 } 1550 1551 printk(KERN_DEBUG ".... IRQ redirection table:\n"); 1552 1553 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol" 1554 " Stat Dmod Deli Vect:\n"); 1555 1556 for (i = 0; i <= reg_01.bits.entries; i++) { 1557 struct IO_APIC_route_entry entry; 1558 1559 entry = ioapic_read_entry(apic, i); 1560 1561 printk(KERN_DEBUG " %02x %03X ", 1562 i, 1563 entry.dest 1564 ); 1565 1566 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n", 1567 entry.mask, 1568 entry.trigger, 1569 entry.irr, 1570 entry.polarity, 1571 entry.delivery_status, 1572 entry.dest_mode, 1573 entry.delivery_mode, 1574 entry.vector 1575 ); 1576 } 1577 } 1578 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1579 for_each_active_irq(irq) { 1580 struct irq_pin_list *entry; 1581 1582 cfg = irq_get_chip_data(irq); 1583 if (!cfg) 1584 continue; 1585 entry = cfg->irq_2_pin; 1586 if (!entry) 1587 continue; 1588 printk(KERN_DEBUG "IRQ%d ", irq); 1589 for_each_irq_pin(entry, cfg->irq_2_pin) 1590 printk("-> %d:%d", entry->apic, entry->pin); 1591 printk("\n"); 1592 } 1593 1594 printk(KERN_INFO ".................................... done.\n"); 1595 1596 return; 1597 } 1598 1599 __apicdebuginit(void) print_APIC_field(int base) 1600 { 1601 int i; 1602 1603 printk(KERN_DEBUG); 1604 1605 for (i = 0; i < 8; i++) 1606 printk(KERN_CONT "%08x", apic_read(base + i*0x10)); 1607 1608 printk(KERN_CONT "\n"); 1609 } 1610 1611 __apicdebuginit(void) print_local_APIC(void *dummy) 1612 { 1613 unsigned int i, v, ver, maxlvt; 1614 u64 icr; 1615 1616 printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", 1617 smp_processor_id(), hard_smp_processor_id()); 1618 v = apic_read(APIC_ID); 1619 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id()); 1620 v = apic_read(APIC_LVR); 1621 printk(KERN_INFO "... APIC VERSION: %08x\n", v); 1622 ver = GET_APIC_VERSION(v); 1623 maxlvt = lapic_get_maxlvt(); 1624 1625 v = apic_read(APIC_TASKPRI); 1626 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); 1627 1628 if (APIC_INTEGRATED(ver)) { /* !82489DX */ 1629 if (!APIC_XAPIC(ver)) { 1630 v = apic_read(APIC_ARBPRI); 1631 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v, 1632 v & APIC_ARBPRI_MASK); 1633 } 1634 v = apic_read(APIC_PROCPRI); 1635 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v); 1636 } 1637 1638 /* 1639 * Remote read supported only in the 82489DX and local APIC for 1640 * Pentium processors. 1641 */ 1642 if (!APIC_INTEGRATED(ver) || maxlvt == 3) { 1643 v = apic_read(APIC_RRR); 1644 printk(KERN_DEBUG "... APIC RRR: %08x\n", v); 1645 } 1646 1647 v = apic_read(APIC_LDR); 1648 printk(KERN_DEBUG "... APIC LDR: %08x\n", v); 1649 if (!x2apic_enabled()) { 1650 v = apic_read(APIC_DFR); 1651 printk(KERN_DEBUG "... APIC DFR: %08x\n", v); 1652 } 1653 v = apic_read(APIC_SPIV); 1654 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v); 1655 1656 printk(KERN_DEBUG "... APIC ISR field:\n"); 1657 print_APIC_field(APIC_ISR); 1658 printk(KERN_DEBUG "... APIC TMR field:\n"); 1659 print_APIC_field(APIC_TMR); 1660 printk(KERN_DEBUG "... APIC IRR field:\n"); 1661 print_APIC_field(APIC_IRR); 1662 1663 if (APIC_INTEGRATED(ver)) { /* !82489DX */ 1664 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 1665 apic_write(APIC_ESR, 0); 1666 1667 v = apic_read(APIC_ESR); 1668 printk(KERN_DEBUG "... APIC ESR: %08x\n", v); 1669 } 1670 1671 icr = apic_icr_read(); 1672 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr); 1673 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32)); 1674 1675 v = apic_read(APIC_LVTT); 1676 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); 1677 1678 if (maxlvt > 3) { /* PC is LVT#4. */ 1679 v = apic_read(APIC_LVTPC); 1680 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v); 1681 } 1682 v = apic_read(APIC_LVT0); 1683 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v); 1684 v = apic_read(APIC_LVT1); 1685 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v); 1686 1687 if (maxlvt > 2) { /* ERR is LVT#3. */ 1688 v = apic_read(APIC_LVTERR); 1689 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v); 1690 } 1691 1692 v = apic_read(APIC_TMICT); 1693 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v); 1694 v = apic_read(APIC_TMCCT); 1695 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v); 1696 v = apic_read(APIC_TDCR); 1697 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v); 1698 1699 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { 1700 v = apic_read(APIC_EFEAT); 1701 maxlvt = (v >> 16) & 0xff; 1702 printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v); 1703 v = apic_read(APIC_ECTRL); 1704 printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v); 1705 for (i = 0; i < maxlvt; i++) { 1706 v = apic_read(APIC_EILVTn(i)); 1707 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v); 1708 } 1709 } 1710 printk("\n"); 1711 } 1712 1713 __apicdebuginit(void) print_local_APICs(int maxcpu) 1714 { 1715 int cpu; 1716 1717 if (!maxcpu) 1718 return; 1719 1720 preempt_disable(); 1721 for_each_online_cpu(cpu) { 1722 if (cpu >= maxcpu) 1723 break; 1724 smp_call_function_single(cpu, print_local_APIC, NULL, 1); 1725 } 1726 preempt_enable(); 1727 } 1728 1729 __apicdebuginit(void) print_PIC(void) 1730 { 1731 unsigned int v; 1732 unsigned long flags; 1733 1734 if (!legacy_pic->nr_legacy_irqs) 1735 return; 1736 1737 printk(KERN_DEBUG "\nprinting PIC contents\n"); 1738 1739 raw_spin_lock_irqsave(&i8259A_lock, flags); 1740 1741 v = inb(0xa1) << 8 | inb(0x21); 1742 printk(KERN_DEBUG "... PIC IMR: %04x\n", v); 1743 1744 v = inb(0xa0) << 8 | inb(0x20); 1745 printk(KERN_DEBUG "... PIC IRR: %04x\n", v); 1746 1747 outb(0x0b,0xa0); 1748 outb(0x0b,0x20); 1749 v = inb(0xa0) << 8 | inb(0x20); 1750 outb(0x0a,0xa0); 1751 outb(0x0a,0x20); 1752 1753 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 1754 1755 printk(KERN_DEBUG "... PIC ISR: %04x\n", v); 1756 1757 v = inb(0x4d1) << 8 | inb(0x4d0); 1758 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); 1759 } 1760 1761 static int __initdata show_lapic = 1; 1762 static __init int setup_show_lapic(char *arg) 1763 { 1764 int num = -1; 1765 1766 if (strcmp(arg, "all") == 0) { 1767 show_lapic = CONFIG_NR_CPUS; 1768 } else { 1769 get_option(&arg, &num); 1770 if (num >= 0) 1771 show_lapic = num; 1772 } 1773 1774 return 1; 1775 } 1776 __setup("show_lapic=", setup_show_lapic); 1777 1778 __apicdebuginit(int) print_ICs(void) 1779 { 1780 if (apic_verbosity == APIC_QUIET) 1781 return 0; 1782 1783 print_PIC(); 1784 1785 /* don't print out if apic is not there */ 1786 if (!cpu_has_apic && !apic_from_smp_config()) 1787 return 0; 1788 1789 print_local_APICs(show_lapic); 1790 print_IO_APIC(); 1791 1792 return 0; 1793 } 1794 1795 late_initcall(print_ICs); 1796 1797 1798 /* Where if anywhere is the i8259 connect in external int mode */ 1799 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 1800 1801 void __init enable_IO_APIC(void) 1802 { 1803 int i8259_apic, i8259_pin; 1804 int apic; 1805 1806 if (!legacy_pic->nr_legacy_irqs) 1807 return; 1808 1809 for(apic = 0; apic < nr_ioapics; apic++) { 1810 int pin; 1811 /* See if any of the pins is in ExtINT mode */ 1812 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { 1813 struct IO_APIC_route_entry entry; 1814 entry = ioapic_read_entry(apic, pin); 1815 1816 /* If the interrupt line is enabled and in ExtInt mode 1817 * I have found the pin where the i8259 is connected. 1818 */ 1819 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) { 1820 ioapic_i8259.apic = apic; 1821 ioapic_i8259.pin = pin; 1822 goto found_i8259; 1823 } 1824 } 1825 } 1826 found_i8259: 1827 /* Look to see what if the MP table has reported the ExtINT */ 1828 /* If we could not find the appropriate pin by looking at the ioapic 1829 * the i8259 probably is not connected the ioapic but give the 1830 * mptable a chance anyway. 1831 */ 1832 i8259_pin = find_isa_irq_pin(0, mp_ExtINT); 1833 i8259_apic = find_isa_irq_apic(0, mp_ExtINT); 1834 /* Trust the MP table if nothing is setup in the hardware */ 1835 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { 1836 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); 1837 ioapic_i8259.pin = i8259_pin; 1838 ioapic_i8259.apic = i8259_apic; 1839 } 1840 /* Complain if the MP table and the hardware disagree */ 1841 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && 1842 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) 1843 { 1844 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); 1845 } 1846 1847 /* 1848 * Do not trust the IO-APIC being empty at bootup 1849 */ 1850 clear_IO_APIC(); 1851 } 1852 1853 /* 1854 * Not an __init, needed by the reboot code 1855 */ 1856 void disable_IO_APIC(void) 1857 { 1858 /* 1859 * Clear the IO-APIC before rebooting: 1860 */ 1861 clear_IO_APIC(); 1862 1863 if (!legacy_pic->nr_legacy_irqs) 1864 return; 1865 1866 /* 1867 * If the i8259 is routed through an IOAPIC 1868 * Put that IOAPIC in virtual wire mode 1869 * so legacy interrupts can be delivered. 1870 * 1871 * With interrupt-remapping, for now we will use virtual wire A mode, 1872 * as virtual wire B is little complex (need to configure both 1873 * IOAPIC RTE as well as interrupt-remapping table entry). 1874 * As this gets called during crash dump, keep this simple for now. 1875 */ 1876 if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) { 1877 struct IO_APIC_route_entry entry; 1878 1879 memset(&entry, 0, sizeof(entry)); 1880 entry.mask = 0; /* Enabled */ 1881 entry.trigger = 0; /* Edge */ 1882 entry.irr = 0; 1883 entry.polarity = 0; /* High */ 1884 entry.delivery_status = 0; 1885 entry.dest_mode = 0; /* Physical */ 1886 entry.delivery_mode = dest_ExtINT; /* ExtInt */ 1887 entry.vector = 0; 1888 entry.dest = read_apic_id(); 1889 1890 /* 1891 * Add it to the IO-APIC irq-routing table: 1892 */ 1893 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); 1894 } 1895 1896 /* 1897 * Use virtual wire A mode when interrupt remapping is enabled. 1898 */ 1899 if (cpu_has_apic || apic_from_smp_config()) 1900 disconnect_bsp_APIC(!intr_remapping_enabled && 1901 ioapic_i8259.pin != -1); 1902 } 1903 1904 #ifdef CONFIG_X86_32 1905 /* 1906 * function to set the IO-APIC physical IDs based on the 1907 * values stored in the MPC table. 1908 * 1909 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999 1910 */ 1911 void __init setup_ioapic_ids_from_mpc_nocheck(void) 1912 { 1913 union IO_APIC_reg_00 reg_00; 1914 physid_mask_t phys_id_present_map; 1915 int apic_id; 1916 int i; 1917 unsigned char old_id; 1918 unsigned long flags; 1919 1920 /* 1921 * This is broken; anything with a real cpu count has to 1922 * circumvent this idiocy regardless. 1923 */ 1924 apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map); 1925 1926 /* 1927 * Set the IOAPIC ID to the value stored in the MPC table. 1928 */ 1929 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { 1930 1931 /* Read the register 0 value */ 1932 raw_spin_lock_irqsave(&ioapic_lock, flags); 1933 reg_00.raw = io_apic_read(apic_id, 0); 1934 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1935 1936 old_id = mpc_ioapic_id(apic_id); 1937 1938 if (mpc_ioapic_id(apic_id) >= get_physical_broadcast()) { 1939 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", 1940 apic_id, mpc_ioapic_id(apic_id)); 1941 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 1942 reg_00.bits.ID); 1943 ioapics[apic_id].mp_config.apicid = reg_00.bits.ID; 1944 } 1945 1946 /* 1947 * Sanity check, is the ID really free? Every APIC in a 1948 * system must have a unique ID or we get lots of nice 1949 * 'stuck on smp_invalidate_needed IPI wait' messages. 1950 */ 1951 if (apic->check_apicid_used(&phys_id_present_map, 1952 mpc_ioapic_id(apic_id))) { 1953 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", 1954 apic_id, mpc_ioapic_id(apic_id)); 1955 for (i = 0; i < get_physical_broadcast(); i++) 1956 if (!physid_isset(i, phys_id_present_map)) 1957 break; 1958 if (i >= get_physical_broadcast()) 1959 panic("Max APIC ID exceeded!\n"); 1960 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 1961 i); 1962 physid_set(i, phys_id_present_map); 1963 ioapics[apic_id].mp_config.apicid = i; 1964 } else { 1965 physid_mask_t tmp; 1966 apic->apicid_to_cpu_present(mpc_ioapic_id(apic_id), 1967 &tmp); 1968 apic_printk(APIC_VERBOSE, "Setting %d in the " 1969 "phys_id_present_map\n", 1970 mpc_ioapic_id(apic_id)); 1971 physids_or(phys_id_present_map, phys_id_present_map, tmp); 1972 } 1973 1974 /* 1975 * We need to adjust the IRQ routing table 1976 * if the ID changed. 1977 */ 1978 if (old_id != mpc_ioapic_id(apic_id)) 1979 for (i = 0; i < mp_irq_entries; i++) 1980 if (mp_irqs[i].dstapic == old_id) 1981 mp_irqs[i].dstapic 1982 = mpc_ioapic_id(apic_id); 1983 1984 /* 1985 * Update the ID register according to the right value 1986 * from the MPC table if they are different. 1987 */ 1988 if (mpc_ioapic_id(apic_id) == reg_00.bits.ID) 1989 continue; 1990 1991 apic_printk(APIC_VERBOSE, KERN_INFO 1992 "...changing IO-APIC physical APIC ID to %d ...", 1993 mpc_ioapic_id(apic_id)); 1994 1995 reg_00.bits.ID = mpc_ioapic_id(apic_id); 1996 raw_spin_lock_irqsave(&ioapic_lock, flags); 1997 io_apic_write(apic_id, 0, reg_00.raw); 1998 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1999 2000 /* 2001 * Sanity check 2002 */ 2003 raw_spin_lock_irqsave(&ioapic_lock, flags); 2004 reg_00.raw = io_apic_read(apic_id, 0); 2005 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2006 if (reg_00.bits.ID != mpc_ioapic_id(apic_id)) 2007 printk("could not set ID!\n"); 2008 else 2009 apic_printk(APIC_VERBOSE, " ok.\n"); 2010 } 2011 } 2012 2013 void __init setup_ioapic_ids_from_mpc(void) 2014 { 2015 2016 if (acpi_ioapic) 2017 return; 2018 /* 2019 * Don't check I/O APIC IDs for xAPIC systems. They have 2020 * no meaning without the serial APIC bus. 2021 */ 2022 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 2023 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) 2024 return; 2025 setup_ioapic_ids_from_mpc_nocheck(); 2026 } 2027 #endif 2028 2029 int no_timer_check __initdata; 2030 2031 static int __init notimercheck(char *s) 2032 { 2033 no_timer_check = 1; 2034 return 1; 2035 } 2036 __setup("no_timer_check", notimercheck); 2037 2038 /* 2039 * There is a nasty bug in some older SMP boards, their mptable lies 2040 * about the timer IRQ. We do the following to work around the situation: 2041 * 2042 * - timer IRQ defaults to IO-APIC IRQ 2043 * - if this function detects that timer IRQs are defunct, then we fall 2044 * back to ISA timer IRQs 2045 */ 2046 static int __init timer_irq_works(void) 2047 { 2048 unsigned long t1 = jiffies; 2049 unsigned long flags; 2050 2051 if (no_timer_check) 2052 return 1; 2053 2054 local_save_flags(flags); 2055 local_irq_enable(); 2056 /* Let ten ticks pass... */ 2057 mdelay((10 * 1000) / HZ); 2058 local_irq_restore(flags); 2059 2060 /* 2061 * Expect a few ticks at least, to be sure some possible 2062 * glue logic does not lock up after one or two first 2063 * ticks in a non-ExtINT mode. Also the local APIC 2064 * might have cached one ExtINT interrupt. Finally, at 2065 * least one tick may be lost due to delays. 2066 */ 2067 2068 /* jiffies wrap? */ 2069 if (time_after(jiffies, t1 + 4)) 2070 return 1; 2071 return 0; 2072 } 2073 2074 /* 2075 * In the SMP+IOAPIC case it might happen that there are an unspecified 2076 * number of pending IRQ events unhandled. These cases are very rare, 2077 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much 2078 * better to do it this way as thus we do not have to be aware of 2079 * 'pending' interrupts in the IRQ path, except at this point. 2080 */ 2081 /* 2082 * Edge triggered needs to resend any interrupt 2083 * that was delayed but this is now handled in the device 2084 * independent code. 2085 */ 2086 2087 /* 2088 * Starting up a edge-triggered IO-APIC interrupt is 2089 * nasty - we need to make sure that we get the edge. 2090 * If it is already asserted for some reason, we need 2091 * return 1 to indicate that is was pending. 2092 * 2093 * This is not complete - we should be able to fake 2094 * an edge even if it isn't on the 8259A... 2095 */ 2096 2097 static unsigned int startup_ioapic_irq(struct irq_data *data) 2098 { 2099 int was_pending = 0, irq = data->irq; 2100 unsigned long flags; 2101 2102 raw_spin_lock_irqsave(&ioapic_lock, flags); 2103 if (irq < legacy_pic->nr_legacy_irqs) { 2104 legacy_pic->mask(irq); 2105 if (legacy_pic->irq_pending(irq)) 2106 was_pending = 1; 2107 } 2108 __unmask_ioapic(data->chip_data); 2109 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2110 2111 return was_pending; 2112 } 2113 2114 static int ioapic_retrigger_irq(struct irq_data *data) 2115 { 2116 struct irq_cfg *cfg = data->chip_data; 2117 unsigned long flags; 2118 2119 raw_spin_lock_irqsave(&vector_lock, flags); 2120 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); 2121 raw_spin_unlock_irqrestore(&vector_lock, flags); 2122 2123 return 1; 2124 } 2125 2126 /* 2127 * Level and edge triggered IO-APIC interrupts need different handling, 2128 * so we use two separate IRQ descriptors. Edge triggered IRQs can be 2129 * handled with the level-triggered descriptor, but that one has slightly 2130 * more overhead. Level-triggered interrupts cannot be handled with the 2131 * edge-triggered handler, without risking IRQ storms and other ugly 2132 * races. 2133 */ 2134 2135 #ifdef CONFIG_SMP 2136 void send_cleanup_vector(struct irq_cfg *cfg) 2137 { 2138 cpumask_var_t cleanup_mask; 2139 2140 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { 2141 unsigned int i; 2142 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) 2143 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); 2144 } else { 2145 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); 2146 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); 2147 free_cpumask_var(cleanup_mask); 2148 } 2149 cfg->move_in_progress = 0; 2150 } 2151 2152 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) 2153 { 2154 int apic, pin; 2155 struct irq_pin_list *entry; 2156 u8 vector = cfg->vector; 2157 2158 for_each_irq_pin(entry, cfg->irq_2_pin) { 2159 unsigned int reg; 2160 2161 apic = entry->apic; 2162 pin = entry->pin; 2163 /* 2164 * With interrupt-remapping, destination information comes 2165 * from interrupt-remapping table entry. 2166 */ 2167 if (!irq_remapped(cfg)) 2168 io_apic_write(apic, 0x11 + pin*2, dest); 2169 reg = io_apic_read(apic, 0x10 + pin*2); 2170 reg &= ~IO_APIC_REDIR_VECTOR_MASK; 2171 reg |= vector; 2172 io_apic_modify(apic, 0x10 + pin*2, reg); 2173 } 2174 } 2175 2176 /* 2177 * Either sets data->affinity to a valid value, and returns 2178 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and 2179 * leaves data->affinity untouched. 2180 */ 2181 int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2182 unsigned int *dest_id) 2183 { 2184 struct irq_cfg *cfg = data->chip_data; 2185 2186 if (!cpumask_intersects(mask, cpu_online_mask)) 2187 return -1; 2188 2189 if (assign_irq_vector(data->irq, data->chip_data, mask)) 2190 return -1; 2191 2192 cpumask_copy(data->affinity, mask); 2193 2194 *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain); 2195 return 0; 2196 } 2197 2198 static int 2199 ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2200 bool force) 2201 { 2202 unsigned int dest, irq = data->irq; 2203 unsigned long flags; 2204 int ret; 2205 2206 raw_spin_lock_irqsave(&ioapic_lock, flags); 2207 ret = __ioapic_set_affinity(data, mask, &dest); 2208 if (!ret) { 2209 /* Only the high 8 bits are valid. */ 2210 dest = SET_APIC_LOGICAL_ID(dest); 2211 __target_IO_APIC_irq(irq, dest, data->chip_data); 2212 } 2213 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2214 return ret; 2215 } 2216 2217 #ifdef CONFIG_INTR_REMAP 2218 2219 /* 2220 * Migrate the IO-APIC irq in the presence of intr-remapping. 2221 * 2222 * For both level and edge triggered, irq migration is a simple atomic 2223 * update(of vector and cpu destination) of IRTE and flush the hardware cache. 2224 * 2225 * For level triggered, we eliminate the io-apic RTE modification (with the 2226 * updated vector information), by using a virtual vector (io-apic pin number). 2227 * Real vector that is used for interrupting cpu will be coming from 2228 * the interrupt-remapping table entry. 2229 */ 2230 static int 2231 ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2232 bool force) 2233 { 2234 struct irq_cfg *cfg = data->chip_data; 2235 unsigned int dest, irq = data->irq; 2236 struct irte irte; 2237 2238 if (!cpumask_intersects(mask, cpu_online_mask)) 2239 return -EINVAL; 2240 2241 if (get_irte(irq, &irte)) 2242 return -EBUSY; 2243 2244 if (assign_irq_vector(irq, cfg, mask)) 2245 return -EBUSY; 2246 2247 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); 2248 2249 irte.vector = cfg->vector; 2250 irte.dest_id = IRTE_DEST(dest); 2251 2252 /* 2253 * Modified the IRTE and flushes the Interrupt entry cache. 2254 */ 2255 modify_irte(irq, &irte); 2256 2257 if (cfg->move_in_progress) 2258 send_cleanup_vector(cfg); 2259 2260 cpumask_copy(data->affinity, mask); 2261 return 0; 2262 } 2263 2264 #else 2265 static inline int 2266 ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2267 bool force) 2268 { 2269 return 0; 2270 } 2271 #endif 2272 2273 asmlinkage void smp_irq_move_cleanup_interrupt(void) 2274 { 2275 unsigned vector, me; 2276 2277 ack_APIC_irq(); 2278 exit_idle(); 2279 irq_enter(); 2280 2281 me = smp_processor_id(); 2282 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { 2283 unsigned int irq; 2284 unsigned int irr; 2285 struct irq_desc *desc; 2286 struct irq_cfg *cfg; 2287 irq = __this_cpu_read(vector_irq[vector]); 2288 2289 if (irq == -1) 2290 continue; 2291 2292 desc = irq_to_desc(irq); 2293 if (!desc) 2294 continue; 2295 2296 cfg = irq_cfg(irq); 2297 raw_spin_lock(&desc->lock); 2298 2299 /* 2300 * Check if the irq migration is in progress. If so, we 2301 * haven't received the cleanup request yet for this irq. 2302 */ 2303 if (cfg->move_in_progress) 2304 goto unlock; 2305 2306 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2307 goto unlock; 2308 2309 irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); 2310 /* 2311 * Check if the vector that needs to be cleanedup is 2312 * registered at the cpu's IRR. If so, then this is not 2313 * the best time to clean it up. Lets clean it up in the 2314 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR 2315 * to myself. 2316 */ 2317 if (irr & (1 << (vector % 32))) { 2318 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); 2319 goto unlock; 2320 } 2321 __this_cpu_write(vector_irq[vector], -1); 2322 unlock: 2323 raw_spin_unlock(&desc->lock); 2324 } 2325 2326 irq_exit(); 2327 } 2328 2329 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) 2330 { 2331 unsigned me; 2332 2333 if (likely(!cfg->move_in_progress)) 2334 return; 2335 2336 me = smp_processor_id(); 2337 2338 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2339 send_cleanup_vector(cfg); 2340 } 2341 2342 static void irq_complete_move(struct irq_cfg *cfg) 2343 { 2344 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); 2345 } 2346 2347 void irq_force_complete_move(int irq) 2348 { 2349 struct irq_cfg *cfg = irq_get_chip_data(irq); 2350 2351 if (!cfg) 2352 return; 2353 2354 __irq_complete_move(cfg, cfg->vector); 2355 } 2356 #else 2357 static inline void irq_complete_move(struct irq_cfg *cfg) { } 2358 #endif 2359 2360 static void ack_apic_edge(struct irq_data *data) 2361 { 2362 irq_complete_move(data->chip_data); 2363 irq_move_irq(data); 2364 ack_APIC_irq(); 2365 } 2366 2367 atomic_t irq_mis_count; 2368 2369 /* 2370 * IO-APIC versions below 0x20 don't support EOI register. 2371 * For the record, here is the information about various versions: 2372 * 0Xh 82489DX 2373 * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant 2374 * 2Xh I/O(x)APIC which is PCI 2.2 Compliant 2375 * 30h-FFh Reserved 2376 * 2377 * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic 2378 * version as 0x2. This is an error with documentation and these ICH chips 2379 * use io-apic's of version 0x20. 2380 * 2381 * For IO-APIC's with EOI register, we use that to do an explicit EOI. 2382 * Otherwise, we simulate the EOI message manually by changing the trigger 2383 * mode to edge and then back to level, with RTE being masked during this. 2384 */ 2385 static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 2386 { 2387 struct irq_pin_list *entry; 2388 unsigned long flags; 2389 2390 raw_spin_lock_irqsave(&ioapic_lock, flags); 2391 for_each_irq_pin(entry, cfg->irq_2_pin) { 2392 if (mpc_ioapic_ver(entry->apic) >= 0x20) { 2393 /* 2394 * Intr-remapping uses pin number as the virtual vector 2395 * in the RTE. Actual vector is programmed in 2396 * intr-remapping table entry. Hence for the io-apic 2397 * EOI we use the pin number. 2398 */ 2399 if (irq_remapped(cfg)) 2400 io_apic_eoi(entry->apic, entry->pin); 2401 else 2402 io_apic_eoi(entry->apic, cfg->vector); 2403 } else { 2404 __mask_and_edge_IO_APIC_irq(entry); 2405 __unmask_and_level_IO_APIC_irq(entry); 2406 } 2407 } 2408 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2409 } 2410 2411 static void ack_apic_level(struct irq_data *data) 2412 { 2413 struct irq_cfg *cfg = data->chip_data; 2414 int i, do_unmask_irq = 0, irq = data->irq; 2415 unsigned long v; 2416 2417 irq_complete_move(cfg); 2418 #ifdef CONFIG_GENERIC_PENDING_IRQ 2419 /* If we are moving the irq we need to mask it */ 2420 if (unlikely(irqd_is_setaffinity_pending(data))) { 2421 do_unmask_irq = 1; 2422 mask_ioapic(cfg); 2423 } 2424 #endif 2425 2426 /* 2427 * It appears there is an erratum which affects at least version 0x11 2428 * of I/O APIC (that's the 82093AA and cores integrated into various 2429 * chipsets). Under certain conditions a level-triggered interrupt is 2430 * erroneously delivered as edge-triggered one but the respective IRR 2431 * bit gets set nevertheless. As a result the I/O unit expects an EOI 2432 * message but it will never arrive and further interrupts are blocked 2433 * from the source. The exact reason is so far unknown, but the 2434 * phenomenon was observed when two consecutive interrupt requests 2435 * from a given source get delivered to the same CPU and the source is 2436 * temporarily disabled in between. 2437 * 2438 * A workaround is to simulate an EOI message manually. We achieve it 2439 * by setting the trigger mode to edge and then to level when the edge 2440 * trigger mode gets detected in the TMR of a local APIC for a 2441 * level-triggered interrupt. We mask the source for the time of the 2442 * operation to prevent an edge-triggered interrupt escaping meanwhile. 2443 * The idea is from Manfred Spraul. --macro 2444 * 2445 * Also in the case when cpu goes offline, fixup_irqs() will forward 2446 * any unhandled interrupt on the offlined cpu to the new cpu 2447 * destination that is handling the corresponding interrupt. This 2448 * interrupt forwarding is done via IPI's. Hence, in this case also 2449 * level-triggered io-apic interrupt will be seen as an edge 2450 * interrupt in the IRR. And we can't rely on the cpu's EOI 2451 * to be broadcasted to the IO-APIC's which will clear the remoteIRR 2452 * corresponding to the level-triggered interrupt. Hence on IO-APIC's 2453 * supporting EOI register, we do an explicit EOI to clear the 2454 * remote IRR and on IO-APIC's which don't have an EOI register, 2455 * we use the above logic (mask+edge followed by unmask+level) from 2456 * Manfred Spraul to clear the remote IRR. 2457 */ 2458 i = cfg->vector; 2459 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); 2460 2461 /* 2462 * We must acknowledge the irq before we move it or the acknowledge will 2463 * not propagate properly. 2464 */ 2465 ack_APIC_irq(); 2466 2467 /* 2468 * Tail end of clearing remote IRR bit (either by delivering the EOI 2469 * message via io-apic EOI register write or simulating it using 2470 * mask+edge followed by unnask+level logic) manually when the 2471 * level triggered interrupt is seen as the edge triggered interrupt 2472 * at the cpu. 2473 */ 2474 if (!(v & (1 << (i & 0x1f)))) { 2475 atomic_inc(&irq_mis_count); 2476 2477 eoi_ioapic_irq(irq, cfg); 2478 } 2479 2480 /* Now we can move and renable the irq */ 2481 if (unlikely(do_unmask_irq)) { 2482 /* Only migrate the irq if the ack has been received. 2483 * 2484 * On rare occasions the broadcast level triggered ack gets 2485 * delayed going to ioapics, and if we reprogram the 2486 * vector while Remote IRR is still set the irq will never 2487 * fire again. 2488 * 2489 * To prevent this scenario we read the Remote IRR bit 2490 * of the ioapic. This has two effects. 2491 * - On any sane system the read of the ioapic will 2492 * flush writes (and acks) going to the ioapic from 2493 * this cpu. 2494 * - We get to see if the ACK has actually been delivered. 2495 * 2496 * Based on failed experiments of reprogramming the 2497 * ioapic entry from outside of irq context starting 2498 * with masking the ioapic entry and then polling until 2499 * Remote IRR was clear before reprogramming the 2500 * ioapic I don't trust the Remote IRR bit to be 2501 * completey accurate. 2502 * 2503 * However there appears to be no other way to plug 2504 * this race, so if the Remote IRR bit is not 2505 * accurate and is causing problems then it is a hardware bug 2506 * and you can go talk to the chipset vendor about it. 2507 */ 2508 if (!io_apic_level_ack_pending(cfg)) 2509 irq_move_masked_irq(data); 2510 unmask_ioapic(cfg); 2511 } 2512 } 2513 2514 #ifdef CONFIG_INTR_REMAP 2515 static void ir_ack_apic_edge(struct irq_data *data) 2516 { 2517 ack_APIC_irq(); 2518 } 2519 2520 static void ir_ack_apic_level(struct irq_data *data) 2521 { 2522 ack_APIC_irq(); 2523 eoi_ioapic_irq(data->irq, data->chip_data); 2524 } 2525 #endif /* CONFIG_INTR_REMAP */ 2526 2527 static struct irq_chip ioapic_chip __read_mostly = { 2528 .name = "IO-APIC", 2529 .irq_startup = startup_ioapic_irq, 2530 .irq_mask = mask_ioapic_irq, 2531 .irq_unmask = unmask_ioapic_irq, 2532 .irq_ack = ack_apic_edge, 2533 .irq_eoi = ack_apic_level, 2534 #ifdef CONFIG_SMP 2535 .irq_set_affinity = ioapic_set_affinity, 2536 #endif 2537 .irq_retrigger = ioapic_retrigger_irq, 2538 }; 2539 2540 static struct irq_chip ir_ioapic_chip __read_mostly = { 2541 .name = "IR-IO-APIC", 2542 .irq_startup = startup_ioapic_irq, 2543 .irq_mask = mask_ioapic_irq, 2544 .irq_unmask = unmask_ioapic_irq, 2545 #ifdef CONFIG_INTR_REMAP 2546 .irq_ack = ir_ack_apic_edge, 2547 .irq_eoi = ir_ack_apic_level, 2548 #ifdef CONFIG_SMP 2549 .irq_set_affinity = ir_ioapic_set_affinity, 2550 #endif 2551 #endif 2552 .irq_retrigger = ioapic_retrigger_irq, 2553 }; 2554 2555 static inline void init_IO_APIC_traps(void) 2556 { 2557 struct irq_cfg *cfg; 2558 unsigned int irq; 2559 2560 /* 2561 * NOTE! The local APIC isn't very good at handling 2562 * multiple interrupts at the same interrupt level. 2563 * As the interrupt level is determined by taking the 2564 * vector number and shifting that right by 4, we 2565 * want to spread these out a bit so that they don't 2566 * all fall in the same interrupt level. 2567 * 2568 * Also, we've got to be careful not to trash gate 2569 * 0x80, because int 0x80 is hm, kind of importantish. ;) 2570 */ 2571 for_each_active_irq(irq) { 2572 cfg = irq_get_chip_data(irq); 2573 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { 2574 /* 2575 * Hmm.. We don't have an entry for this, 2576 * so default to an old-fashioned 8259 2577 * interrupt if we can.. 2578 */ 2579 if (irq < legacy_pic->nr_legacy_irqs) 2580 legacy_pic->make_irq(irq); 2581 else 2582 /* Strange. Oh, well.. */ 2583 irq_set_chip(irq, &no_irq_chip); 2584 } 2585 } 2586 } 2587 2588 /* 2589 * The local APIC irq-chip implementation: 2590 */ 2591 2592 static void mask_lapic_irq(struct irq_data *data) 2593 { 2594 unsigned long v; 2595 2596 v = apic_read(APIC_LVT0); 2597 apic_write(APIC_LVT0, v | APIC_LVT_MASKED); 2598 } 2599 2600 static void unmask_lapic_irq(struct irq_data *data) 2601 { 2602 unsigned long v; 2603 2604 v = apic_read(APIC_LVT0); 2605 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); 2606 } 2607 2608 static void ack_lapic_irq(struct irq_data *data) 2609 { 2610 ack_APIC_irq(); 2611 } 2612 2613 static struct irq_chip lapic_chip __read_mostly = { 2614 .name = "local-APIC", 2615 .irq_mask = mask_lapic_irq, 2616 .irq_unmask = unmask_lapic_irq, 2617 .irq_ack = ack_lapic_irq, 2618 }; 2619 2620 static void lapic_register_intr(int irq) 2621 { 2622 irq_clear_status_flags(irq, IRQ_LEVEL); 2623 irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, 2624 "edge"); 2625 } 2626 2627 /* 2628 * This looks a bit hackish but it's about the only one way of sending 2629 * a few INTA cycles to 8259As and any associated glue logic. ICR does 2630 * not support the ExtINT mode, unfortunately. We need to send these 2631 * cycles as some i82489DX-based boards have glue logic that keeps the 2632 * 8259A interrupt line asserted until INTA. --macro 2633 */ 2634 static inline void __init unlock_ExtINT_logic(void) 2635 { 2636 int apic, pin, i; 2637 struct IO_APIC_route_entry entry0, entry1; 2638 unsigned char save_control, save_freq_select; 2639 2640 pin = find_isa_irq_pin(8, mp_INT); 2641 if (pin == -1) { 2642 WARN_ON_ONCE(1); 2643 return; 2644 } 2645 apic = find_isa_irq_apic(8, mp_INT); 2646 if (apic == -1) { 2647 WARN_ON_ONCE(1); 2648 return; 2649 } 2650 2651 entry0 = ioapic_read_entry(apic, pin); 2652 clear_IO_APIC_pin(apic, pin); 2653 2654 memset(&entry1, 0, sizeof(entry1)); 2655 2656 entry1.dest_mode = 0; /* physical delivery */ 2657 entry1.mask = 0; /* unmask IRQ now */ 2658 entry1.dest = hard_smp_processor_id(); 2659 entry1.delivery_mode = dest_ExtINT; 2660 entry1.polarity = entry0.polarity; 2661 entry1.trigger = 0; 2662 entry1.vector = 0; 2663 2664 ioapic_write_entry(apic, pin, entry1); 2665 2666 save_control = CMOS_READ(RTC_CONTROL); 2667 save_freq_select = CMOS_READ(RTC_FREQ_SELECT); 2668 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6, 2669 RTC_FREQ_SELECT); 2670 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL); 2671 2672 i = 100; 2673 while (i-- > 0) { 2674 mdelay(10); 2675 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF) 2676 i -= 10; 2677 } 2678 2679 CMOS_WRITE(save_control, RTC_CONTROL); 2680 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); 2681 clear_IO_APIC_pin(apic, pin); 2682 2683 ioapic_write_entry(apic, pin, entry0); 2684 } 2685 2686 static int disable_timer_pin_1 __initdata; 2687 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */ 2688 static int __init disable_timer_pin_setup(char *arg) 2689 { 2690 disable_timer_pin_1 = 1; 2691 return 0; 2692 } 2693 early_param("disable_timer_pin_1", disable_timer_pin_setup); 2694 2695 int timer_through_8259 __initdata; 2696 2697 /* 2698 * This code may look a bit paranoid, but it's supposed to cooperate with 2699 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ 2700 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast 2701 * fanatically on his truly buggy board. 2702 * 2703 * FIXME: really need to revamp this for all platforms. 2704 */ 2705 static inline void __init check_timer(void) 2706 { 2707 struct irq_cfg *cfg = irq_get_chip_data(0); 2708 int node = cpu_to_node(0); 2709 int apic1, pin1, apic2, pin2; 2710 unsigned long flags; 2711 int no_pin1 = 0; 2712 2713 local_irq_save(flags); 2714 2715 /* 2716 * get/set the timer IRQ vector: 2717 */ 2718 legacy_pic->mask(0); 2719 assign_irq_vector(0, cfg, apic->target_cpus()); 2720 2721 /* 2722 * As IRQ0 is to be enabled in the 8259A, the virtual 2723 * wire has to be disabled in the local APIC. Also 2724 * timer interrupts need to be acknowledged manually in 2725 * the 8259A for the i82489DX when using the NMI 2726 * watchdog as that APIC treats NMIs as level-triggered. 2727 * The AEOI mode will finish them in the 8259A 2728 * automatically. 2729 */ 2730 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); 2731 legacy_pic->init(1); 2732 2733 pin1 = find_isa_irq_pin(0, mp_INT); 2734 apic1 = find_isa_irq_apic(0, mp_INT); 2735 pin2 = ioapic_i8259.pin; 2736 apic2 = ioapic_i8259.apic; 2737 2738 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X " 2739 "apic1=%d pin1=%d apic2=%d pin2=%d\n", 2740 cfg->vector, apic1, pin1, apic2, pin2); 2741 2742 /* 2743 * Some BIOS writers are clueless and report the ExtINTA 2744 * I/O APIC input from the cascaded 8259A as the timer 2745 * interrupt input. So just in case, if only one pin 2746 * was found above, try it both directly and through the 2747 * 8259A. 2748 */ 2749 if (pin1 == -1) { 2750 if (intr_remapping_enabled) 2751 panic("BIOS bug: timer not connected to IO-APIC"); 2752 pin1 = pin2; 2753 apic1 = apic2; 2754 no_pin1 = 1; 2755 } else if (pin2 == -1) { 2756 pin2 = pin1; 2757 apic2 = apic1; 2758 } 2759 2760 if (pin1 != -1) { 2761 /* 2762 * Ok, does IRQ0 through the IOAPIC work? 2763 */ 2764 if (no_pin1) { 2765 add_pin_to_irq_node(cfg, node, apic1, pin1); 2766 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); 2767 } else { 2768 /* for edge trigger, setup_ioapic_irq already 2769 * leave it unmasked. 2770 * so only need to unmask if it is level-trigger 2771 * do we really have level trigger timer? 2772 */ 2773 int idx; 2774 idx = find_irq_entry(apic1, pin1, mp_INT); 2775 if (idx != -1 && irq_trigger(idx)) 2776 unmask_ioapic(cfg); 2777 } 2778 if (timer_irq_works()) { 2779 if (disable_timer_pin_1 > 0) 2780 clear_IO_APIC_pin(0, pin1); 2781 goto out; 2782 } 2783 if (intr_remapping_enabled) 2784 panic("timer doesn't work through Interrupt-remapped IO-APIC"); 2785 local_irq_disable(); 2786 clear_IO_APIC_pin(apic1, pin1); 2787 if (!no_pin1) 2788 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " 2789 "8254 timer not connected to IO-APIC\n"); 2790 2791 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer " 2792 "(IRQ0) through the 8259A ...\n"); 2793 apic_printk(APIC_QUIET, KERN_INFO 2794 "..... (found apic %d pin %d) ...\n", apic2, pin2); 2795 /* 2796 * legacy devices should be connected to IO APIC #0 2797 */ 2798 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); 2799 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); 2800 legacy_pic->unmask(0); 2801 if (timer_irq_works()) { 2802 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2803 timer_through_8259 = 1; 2804 goto out; 2805 } 2806 /* 2807 * Cleanup, just in case ... 2808 */ 2809 local_irq_disable(); 2810 legacy_pic->mask(0); 2811 clear_IO_APIC_pin(apic2, pin2); 2812 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); 2813 } 2814 2815 apic_printk(APIC_QUIET, KERN_INFO 2816 "...trying to set up timer as Virtual Wire IRQ...\n"); 2817 2818 lapic_register_intr(0); 2819 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ 2820 legacy_pic->unmask(0); 2821 2822 if (timer_irq_works()) { 2823 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2824 goto out; 2825 } 2826 local_irq_disable(); 2827 legacy_pic->mask(0); 2828 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); 2829 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); 2830 2831 apic_printk(APIC_QUIET, KERN_INFO 2832 "...trying to set up timer as ExtINT IRQ...\n"); 2833 2834 legacy_pic->init(0); 2835 legacy_pic->make_irq(0); 2836 apic_write(APIC_LVT0, APIC_DM_EXTINT); 2837 2838 unlock_ExtINT_logic(); 2839 2840 if (timer_irq_works()) { 2841 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2842 goto out; 2843 } 2844 local_irq_disable(); 2845 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); 2846 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " 2847 "report. Then try booting with the 'noapic' option.\n"); 2848 out: 2849 local_irq_restore(flags); 2850 } 2851 2852 /* 2853 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available 2854 * to devices. However there may be an I/O APIC pin available for 2855 * this interrupt regardless. The pin may be left unconnected, but 2856 * typically it will be reused as an ExtINT cascade interrupt for 2857 * the master 8259A. In the MPS case such a pin will normally be 2858 * reported as an ExtINT interrupt in the MP table. With ACPI 2859 * there is no provision for ExtINT interrupts, and in the absence 2860 * of an override it would be treated as an ordinary ISA I/O APIC 2861 * interrupt, that is edge-triggered and unmasked by default. We 2862 * used to do this, but it caused problems on some systems because 2863 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using 2864 * the same ExtINT cascade interrupt to drive the local APIC of the 2865 * bootstrap processor. Therefore we refrain from routing IRQ2 to 2866 * the I/O APIC in all cases now. No actual device should request 2867 * it anyway. --macro 2868 */ 2869 #define PIC_IRQS (1UL << PIC_CASCADE_IR) 2870 2871 void __init setup_IO_APIC(void) 2872 { 2873 2874 /* 2875 * calling enable_IO_APIC() is moved to setup_local_APIC for BP 2876 */ 2877 io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL; 2878 2879 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); 2880 /* 2881 * Set up IO-APIC IRQ routing. 2882 */ 2883 x86_init.mpparse.setup_ioapic_ids(); 2884 2885 sync_Arb_IDs(); 2886 setup_IO_APIC_irqs(); 2887 init_IO_APIC_traps(); 2888 if (legacy_pic->nr_legacy_irqs) 2889 check_timer(); 2890 } 2891 2892 /* 2893 * Called after all the initialization is done. If we didn't find any 2894 * APIC bugs then we can allow the modify fast path 2895 */ 2896 2897 static int __init io_apic_bug_finalize(void) 2898 { 2899 if (sis_apic_bug == -1) 2900 sis_apic_bug = 0; 2901 return 0; 2902 } 2903 2904 late_initcall(io_apic_bug_finalize); 2905 2906 static void resume_ioapic_id(int ioapic_id) 2907 { 2908 unsigned long flags; 2909 union IO_APIC_reg_00 reg_00; 2910 2911 2912 raw_spin_lock_irqsave(&ioapic_lock, flags); 2913 reg_00.raw = io_apic_read(ioapic_id, 0); 2914 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_id)) { 2915 reg_00.bits.ID = mpc_ioapic_id(ioapic_id); 2916 io_apic_write(ioapic_id, 0, reg_00.raw); 2917 } 2918 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2919 } 2920 2921 static void ioapic_resume(void) 2922 { 2923 int ioapic_id; 2924 2925 for (ioapic_id = nr_ioapics - 1; ioapic_id >= 0; ioapic_id--) 2926 resume_ioapic_id(ioapic_id); 2927 2928 restore_ioapic_entries(); 2929 } 2930 2931 static struct syscore_ops ioapic_syscore_ops = { 2932 .suspend = save_ioapic_entries, 2933 .resume = ioapic_resume, 2934 }; 2935 2936 static int __init ioapic_init_ops(void) 2937 { 2938 register_syscore_ops(&ioapic_syscore_ops); 2939 2940 return 0; 2941 } 2942 2943 device_initcall(ioapic_init_ops); 2944 2945 /* 2946 * Dynamic irq allocate and deallocation 2947 */ 2948 unsigned int create_irq_nr(unsigned int from, int node) 2949 { 2950 struct irq_cfg *cfg; 2951 unsigned long flags; 2952 unsigned int ret = 0; 2953 int irq; 2954 2955 if (from < nr_irqs_gsi) 2956 from = nr_irqs_gsi; 2957 2958 irq = alloc_irq_from(from, node); 2959 if (irq < 0) 2960 return 0; 2961 cfg = alloc_irq_cfg(irq, node); 2962 if (!cfg) { 2963 free_irq_at(irq, NULL); 2964 return 0; 2965 } 2966 2967 raw_spin_lock_irqsave(&vector_lock, flags); 2968 if (!__assign_irq_vector(irq, cfg, apic->target_cpus())) 2969 ret = irq; 2970 raw_spin_unlock_irqrestore(&vector_lock, flags); 2971 2972 if (ret) { 2973 irq_set_chip_data(irq, cfg); 2974 irq_clear_status_flags(irq, IRQ_NOREQUEST); 2975 } else { 2976 free_irq_at(irq, cfg); 2977 } 2978 return ret; 2979 } 2980 2981 int create_irq(void) 2982 { 2983 int node = cpu_to_node(0); 2984 unsigned int irq_want; 2985 int irq; 2986 2987 irq_want = nr_irqs_gsi; 2988 irq = create_irq_nr(irq_want, node); 2989 2990 if (irq == 0) 2991 irq = -1; 2992 2993 return irq; 2994 } 2995 2996 void destroy_irq(unsigned int irq) 2997 { 2998 struct irq_cfg *cfg = irq_get_chip_data(irq); 2999 unsigned long flags; 3000 3001 irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); 3002 3003 if (irq_remapped(cfg)) 3004 free_irte(irq); 3005 raw_spin_lock_irqsave(&vector_lock, flags); 3006 __clear_irq_vector(irq, cfg); 3007 raw_spin_unlock_irqrestore(&vector_lock, flags); 3008 free_irq_at(irq, cfg); 3009 } 3010 3011 /* 3012 * MSI message composition 3013 */ 3014 #ifdef CONFIG_PCI_MSI 3015 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, 3016 struct msi_msg *msg, u8 hpet_id) 3017 { 3018 struct irq_cfg *cfg; 3019 int err; 3020 unsigned dest; 3021 3022 if (disable_apic) 3023 return -ENXIO; 3024 3025 cfg = irq_cfg(irq); 3026 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3027 if (err) 3028 return err; 3029 3030 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 3031 3032 if (irq_remapped(cfg)) { 3033 struct irte irte; 3034 int ir_index; 3035 u16 sub_handle; 3036 3037 ir_index = map_irq_to_irte_handle(irq, &sub_handle); 3038 BUG_ON(ir_index == -1); 3039 3040 prepare_irte(&irte, cfg->vector, dest); 3041 3042 /* Set source-id of interrupt request */ 3043 if (pdev) 3044 set_msi_sid(&irte, pdev); 3045 else 3046 set_hpet_sid(&irte, hpet_id); 3047 3048 modify_irte(irq, &irte); 3049 3050 msg->address_hi = MSI_ADDR_BASE_HI; 3051 msg->data = sub_handle; 3052 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | 3053 MSI_ADDR_IR_SHV | 3054 MSI_ADDR_IR_INDEX1(ir_index) | 3055 MSI_ADDR_IR_INDEX2(ir_index); 3056 } else { 3057 if (x2apic_enabled()) 3058 msg->address_hi = MSI_ADDR_BASE_HI | 3059 MSI_ADDR_EXT_DEST_ID(dest); 3060 else 3061 msg->address_hi = MSI_ADDR_BASE_HI; 3062 3063 msg->address_lo = 3064 MSI_ADDR_BASE_LO | 3065 ((apic->irq_dest_mode == 0) ? 3066 MSI_ADDR_DEST_MODE_PHYSICAL: 3067 MSI_ADDR_DEST_MODE_LOGICAL) | 3068 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3069 MSI_ADDR_REDIRECTION_CPU: 3070 MSI_ADDR_REDIRECTION_LOWPRI) | 3071 MSI_ADDR_DEST_ID(dest); 3072 3073 msg->data = 3074 MSI_DATA_TRIGGER_EDGE | 3075 MSI_DATA_LEVEL_ASSERT | 3076 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3077 MSI_DATA_DELIVERY_FIXED: 3078 MSI_DATA_DELIVERY_LOWPRI) | 3079 MSI_DATA_VECTOR(cfg->vector); 3080 } 3081 return err; 3082 } 3083 3084 #ifdef CONFIG_SMP 3085 static int 3086 msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3087 { 3088 struct irq_cfg *cfg = data->chip_data; 3089 struct msi_msg msg; 3090 unsigned int dest; 3091 3092 if (__ioapic_set_affinity(data, mask, &dest)) 3093 return -1; 3094 3095 __get_cached_msi_msg(data->msi_desc, &msg); 3096 3097 msg.data &= ~MSI_DATA_VECTOR_MASK; 3098 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3099 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3100 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3101 3102 __write_msi_msg(data->msi_desc, &msg); 3103 3104 return 0; 3105 } 3106 #ifdef CONFIG_INTR_REMAP 3107 /* 3108 * Migrate the MSI irq to another cpumask. This migration is 3109 * done in the process context using interrupt-remapping hardware. 3110 */ 3111 static int 3112 ir_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, 3113 bool force) 3114 { 3115 struct irq_cfg *cfg = data->chip_data; 3116 unsigned int dest, irq = data->irq; 3117 struct irte irte; 3118 3119 if (get_irte(irq, &irte)) 3120 return -1; 3121 3122 if (__ioapic_set_affinity(data, mask, &dest)) 3123 return -1; 3124 3125 irte.vector = cfg->vector; 3126 irte.dest_id = IRTE_DEST(dest); 3127 3128 /* 3129 * atomically update the IRTE with the new destination and vector. 3130 */ 3131 modify_irte(irq, &irte); 3132 3133 /* 3134 * After this point, all the interrupts will start arriving 3135 * at the new destination. So, time to cleanup the previous 3136 * vector allocation. 3137 */ 3138 if (cfg->move_in_progress) 3139 send_cleanup_vector(cfg); 3140 3141 return 0; 3142 } 3143 3144 #endif 3145 #endif /* CONFIG_SMP */ 3146 3147 /* 3148 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, 3149 * which implement the MSI or MSI-X Capability Structure. 3150 */ 3151 static struct irq_chip msi_chip = { 3152 .name = "PCI-MSI", 3153 .irq_unmask = unmask_msi_irq, 3154 .irq_mask = mask_msi_irq, 3155 .irq_ack = ack_apic_edge, 3156 #ifdef CONFIG_SMP 3157 .irq_set_affinity = msi_set_affinity, 3158 #endif 3159 .irq_retrigger = ioapic_retrigger_irq, 3160 }; 3161 3162 static struct irq_chip msi_ir_chip = { 3163 .name = "IR-PCI-MSI", 3164 .irq_unmask = unmask_msi_irq, 3165 .irq_mask = mask_msi_irq, 3166 #ifdef CONFIG_INTR_REMAP 3167 .irq_ack = ir_ack_apic_edge, 3168 #ifdef CONFIG_SMP 3169 .irq_set_affinity = ir_msi_set_affinity, 3170 #endif 3171 #endif 3172 .irq_retrigger = ioapic_retrigger_irq, 3173 }; 3174 3175 /* 3176 * Map the PCI dev to the corresponding remapping hardware unit 3177 * and allocate 'nvec' consecutive interrupt-remapping table entries 3178 * in it. 3179 */ 3180 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec) 3181 { 3182 struct intel_iommu *iommu; 3183 int index; 3184 3185 iommu = map_dev_to_ir(dev); 3186 if (!iommu) { 3187 printk(KERN_ERR 3188 "Unable to map PCI %s to iommu\n", pci_name(dev)); 3189 return -ENOENT; 3190 } 3191 3192 index = alloc_irte(iommu, irq, nvec); 3193 if (index < 0) { 3194 printk(KERN_ERR 3195 "Unable to allocate %d IRTE for PCI %s\n", nvec, 3196 pci_name(dev)); 3197 return -ENOSPC; 3198 } 3199 return index; 3200 } 3201 3202 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) 3203 { 3204 struct irq_chip *chip = &msi_chip; 3205 struct msi_msg msg; 3206 int ret; 3207 3208 ret = msi_compose_msg(dev, irq, &msg, -1); 3209 if (ret < 0) 3210 return ret; 3211 3212 irq_set_msi_desc(irq, msidesc); 3213 write_msi_msg(irq, &msg); 3214 3215 if (irq_remapped(irq_get_chip_data(irq))) { 3216 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3217 chip = &msi_ir_chip; 3218 } 3219 3220 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); 3221 3222 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); 3223 3224 return 0; 3225 } 3226 3227 int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 3228 { 3229 int node, ret, sub_handle, index = 0; 3230 unsigned int irq, irq_want; 3231 struct msi_desc *msidesc; 3232 struct intel_iommu *iommu = NULL; 3233 3234 /* x86 doesn't support multiple MSI yet */ 3235 if (type == PCI_CAP_ID_MSI && nvec > 1) 3236 return 1; 3237 3238 node = dev_to_node(&dev->dev); 3239 irq_want = nr_irqs_gsi; 3240 sub_handle = 0; 3241 list_for_each_entry(msidesc, &dev->msi_list, list) { 3242 irq = create_irq_nr(irq_want, node); 3243 if (irq == 0) 3244 return -1; 3245 irq_want = irq + 1; 3246 if (!intr_remapping_enabled) 3247 goto no_ir; 3248 3249 if (!sub_handle) { 3250 /* 3251 * allocate the consecutive block of IRTE's 3252 * for 'nvec' 3253 */ 3254 index = msi_alloc_irte(dev, irq, nvec); 3255 if (index < 0) { 3256 ret = index; 3257 goto error; 3258 } 3259 } else { 3260 iommu = map_dev_to_ir(dev); 3261 if (!iommu) { 3262 ret = -ENOENT; 3263 goto error; 3264 } 3265 /* 3266 * setup the mapping between the irq and the IRTE 3267 * base index, the sub_handle pointing to the 3268 * appropriate interrupt remap table entry. 3269 */ 3270 set_irte_irq(irq, iommu, index, sub_handle); 3271 } 3272 no_ir: 3273 ret = setup_msi_irq(dev, msidesc, irq); 3274 if (ret < 0) 3275 goto error; 3276 sub_handle++; 3277 } 3278 return 0; 3279 3280 error: 3281 destroy_irq(irq); 3282 return ret; 3283 } 3284 3285 void native_teardown_msi_irq(unsigned int irq) 3286 { 3287 destroy_irq(irq); 3288 } 3289 3290 #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) 3291 #ifdef CONFIG_SMP 3292 static int 3293 dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, 3294 bool force) 3295 { 3296 struct irq_cfg *cfg = data->chip_data; 3297 unsigned int dest, irq = data->irq; 3298 struct msi_msg msg; 3299 3300 if (__ioapic_set_affinity(data, mask, &dest)) 3301 return -1; 3302 3303 dmar_msi_read(irq, &msg); 3304 3305 msg.data &= ~MSI_DATA_VECTOR_MASK; 3306 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3307 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3308 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3309 msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest); 3310 3311 dmar_msi_write(irq, &msg); 3312 3313 return 0; 3314 } 3315 3316 #endif /* CONFIG_SMP */ 3317 3318 static struct irq_chip dmar_msi_type = { 3319 .name = "DMAR_MSI", 3320 .irq_unmask = dmar_msi_unmask, 3321 .irq_mask = dmar_msi_mask, 3322 .irq_ack = ack_apic_edge, 3323 #ifdef CONFIG_SMP 3324 .irq_set_affinity = dmar_msi_set_affinity, 3325 #endif 3326 .irq_retrigger = ioapic_retrigger_irq, 3327 }; 3328 3329 int arch_setup_dmar_msi(unsigned int irq) 3330 { 3331 int ret; 3332 struct msi_msg msg; 3333 3334 ret = msi_compose_msg(NULL, irq, &msg, -1); 3335 if (ret < 0) 3336 return ret; 3337 dmar_msi_write(irq, &msg); 3338 irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, 3339 "edge"); 3340 return 0; 3341 } 3342 #endif 3343 3344 #ifdef CONFIG_HPET_TIMER 3345 3346 #ifdef CONFIG_SMP 3347 static int hpet_msi_set_affinity(struct irq_data *data, 3348 const struct cpumask *mask, bool force) 3349 { 3350 struct irq_cfg *cfg = data->chip_data; 3351 struct msi_msg msg; 3352 unsigned int dest; 3353 3354 if (__ioapic_set_affinity(data, mask, &dest)) 3355 return -1; 3356 3357 hpet_msi_read(data->handler_data, &msg); 3358 3359 msg.data &= ~MSI_DATA_VECTOR_MASK; 3360 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3361 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3362 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3363 3364 hpet_msi_write(data->handler_data, &msg); 3365 3366 return 0; 3367 } 3368 3369 #endif /* CONFIG_SMP */ 3370 3371 static struct irq_chip ir_hpet_msi_type = { 3372 .name = "IR-HPET_MSI", 3373 .irq_unmask = hpet_msi_unmask, 3374 .irq_mask = hpet_msi_mask, 3375 #ifdef CONFIG_INTR_REMAP 3376 .irq_ack = ir_ack_apic_edge, 3377 #ifdef CONFIG_SMP 3378 .irq_set_affinity = ir_msi_set_affinity, 3379 #endif 3380 #endif 3381 .irq_retrigger = ioapic_retrigger_irq, 3382 }; 3383 3384 static struct irq_chip hpet_msi_type = { 3385 .name = "HPET_MSI", 3386 .irq_unmask = hpet_msi_unmask, 3387 .irq_mask = hpet_msi_mask, 3388 .irq_ack = ack_apic_edge, 3389 #ifdef CONFIG_SMP 3390 .irq_set_affinity = hpet_msi_set_affinity, 3391 #endif 3392 .irq_retrigger = ioapic_retrigger_irq, 3393 }; 3394 3395 int arch_setup_hpet_msi(unsigned int irq, unsigned int id) 3396 { 3397 struct irq_chip *chip = &hpet_msi_type; 3398 struct msi_msg msg; 3399 int ret; 3400 3401 if (intr_remapping_enabled) { 3402 struct intel_iommu *iommu = map_hpet_to_ir(id); 3403 int index; 3404 3405 if (!iommu) 3406 return -1; 3407 3408 index = alloc_irte(iommu, irq, 1); 3409 if (index < 0) 3410 return -1; 3411 } 3412 3413 ret = msi_compose_msg(NULL, irq, &msg, id); 3414 if (ret < 0) 3415 return ret; 3416 3417 hpet_msi_write(irq_get_handler_data(irq), &msg); 3418 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3419 if (irq_remapped(irq_get_chip_data(irq))) 3420 chip = &ir_hpet_msi_type; 3421 3422 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); 3423 return 0; 3424 } 3425 #endif 3426 3427 #endif /* CONFIG_PCI_MSI */ 3428 /* 3429 * Hypertransport interrupt support 3430 */ 3431 #ifdef CONFIG_HT_IRQ 3432 3433 #ifdef CONFIG_SMP 3434 3435 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) 3436 { 3437 struct ht_irq_msg msg; 3438 fetch_ht_irq_msg(irq, &msg); 3439 3440 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK); 3441 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); 3442 3443 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest); 3444 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest); 3445 3446 write_ht_irq_msg(irq, &msg); 3447 } 3448 3449 static int 3450 ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3451 { 3452 struct irq_cfg *cfg = data->chip_data; 3453 unsigned int dest; 3454 3455 if (__ioapic_set_affinity(data, mask, &dest)) 3456 return -1; 3457 3458 target_ht_irq(data->irq, dest, cfg->vector); 3459 return 0; 3460 } 3461 3462 #endif 3463 3464 static struct irq_chip ht_irq_chip = { 3465 .name = "PCI-HT", 3466 .irq_mask = mask_ht_irq, 3467 .irq_unmask = unmask_ht_irq, 3468 .irq_ack = ack_apic_edge, 3469 #ifdef CONFIG_SMP 3470 .irq_set_affinity = ht_set_affinity, 3471 #endif 3472 .irq_retrigger = ioapic_retrigger_irq, 3473 }; 3474 3475 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) 3476 { 3477 struct irq_cfg *cfg; 3478 int err; 3479 3480 if (disable_apic) 3481 return -ENXIO; 3482 3483 cfg = irq_cfg(irq); 3484 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3485 if (!err) { 3486 struct ht_irq_msg msg; 3487 unsigned dest; 3488 3489 dest = apic->cpu_mask_to_apicid_and(cfg->domain, 3490 apic->target_cpus()); 3491 3492 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3493 3494 msg.address_lo = 3495 HT_IRQ_LOW_BASE | 3496 HT_IRQ_LOW_DEST_ID(dest) | 3497 HT_IRQ_LOW_VECTOR(cfg->vector) | 3498 ((apic->irq_dest_mode == 0) ? 3499 HT_IRQ_LOW_DM_PHYSICAL : 3500 HT_IRQ_LOW_DM_LOGICAL) | 3501 HT_IRQ_LOW_RQEOI_EDGE | 3502 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3503 HT_IRQ_LOW_MT_FIXED : 3504 HT_IRQ_LOW_MT_ARBITRATED) | 3505 HT_IRQ_LOW_IRQ_MASKED; 3506 3507 write_ht_irq_msg(irq, &msg); 3508 3509 irq_set_chip_and_handler_name(irq, &ht_irq_chip, 3510 handle_edge_irq, "edge"); 3511 3512 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); 3513 } 3514 return err; 3515 } 3516 #endif /* CONFIG_HT_IRQ */ 3517 3518 static int 3519 io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) 3520 { 3521 struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node); 3522 int ret; 3523 3524 if (!cfg) 3525 return -EINVAL; 3526 ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin); 3527 if (!ret) 3528 setup_ioapic_irq(attr->ioapic, attr->ioapic_pin, irq, cfg, 3529 attr->trigger, attr->polarity); 3530 return ret; 3531 } 3532 3533 int io_apic_setup_irq_pin_once(unsigned int irq, int node, 3534 struct io_apic_irq_attr *attr) 3535 { 3536 unsigned int id = attr->ioapic, pin = attr->ioapic_pin; 3537 int ret; 3538 3539 /* Avoid redundant programming */ 3540 if (test_bit(pin, ioapics[id].pin_programmed)) { 3541 pr_debug("Pin %d-%d already programmed\n", 3542 mpc_ioapic_id(id), pin); 3543 return 0; 3544 } 3545 ret = io_apic_setup_irq_pin(irq, node, attr); 3546 if (!ret) 3547 set_bit(pin, ioapics[id].pin_programmed); 3548 return ret; 3549 } 3550 3551 static int __init io_apic_get_redir_entries(int ioapic) 3552 { 3553 union IO_APIC_reg_01 reg_01; 3554 unsigned long flags; 3555 3556 raw_spin_lock_irqsave(&ioapic_lock, flags); 3557 reg_01.raw = io_apic_read(ioapic, 1); 3558 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3559 3560 /* The register returns the maximum index redir index 3561 * supported, which is one less than the total number of redir 3562 * entries. 3563 */ 3564 return reg_01.bits.entries + 1; 3565 } 3566 3567 static void __init probe_nr_irqs_gsi(void) 3568 { 3569 int nr; 3570 3571 nr = gsi_top + NR_IRQS_LEGACY; 3572 if (nr > nr_irqs_gsi) 3573 nr_irqs_gsi = nr; 3574 3575 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); 3576 } 3577 3578 int get_nr_irqs_gsi(void) 3579 { 3580 return nr_irqs_gsi; 3581 } 3582 3583 #ifdef CONFIG_SPARSE_IRQ 3584 int __init arch_probe_nr_irqs(void) 3585 { 3586 int nr; 3587 3588 if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) 3589 nr_irqs = NR_VECTORS * nr_cpu_ids; 3590 3591 nr = nr_irqs_gsi + 8 * nr_cpu_ids; 3592 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) 3593 /* 3594 * for MSI and HT dyn irq 3595 */ 3596 nr += nr_irqs_gsi * 16; 3597 #endif 3598 if (nr < nr_irqs) 3599 nr_irqs = nr; 3600 3601 return NR_IRQS_LEGACY; 3602 } 3603 #endif 3604 3605 int io_apic_set_pci_routing(struct device *dev, int irq, 3606 struct io_apic_irq_attr *irq_attr) 3607 { 3608 int node; 3609 3610 if (!IO_APIC_IRQ(irq)) { 3611 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", 3612 irq_attr->ioapic); 3613 return -EINVAL; 3614 } 3615 3616 node = dev ? dev_to_node(dev) : cpu_to_node(0); 3617 3618 return io_apic_setup_irq_pin_once(irq, node, irq_attr); 3619 } 3620 3621 #ifdef CONFIG_X86_32 3622 static int __init io_apic_get_unique_id(int ioapic, int apic_id) 3623 { 3624 union IO_APIC_reg_00 reg_00; 3625 static physid_mask_t apic_id_map = PHYSID_MASK_NONE; 3626 physid_mask_t tmp; 3627 unsigned long flags; 3628 int i = 0; 3629 3630 /* 3631 * The P4 platform supports up to 256 APIC IDs on two separate APIC 3632 * buses (one for LAPICs, one for IOAPICs), where predecessors only 3633 * supports up to 16 on one shared APIC bus. 3634 * 3635 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full 3636 * advantage of new APIC bus architecture. 3637 */ 3638 3639 if (physids_empty(apic_id_map)) 3640 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); 3641 3642 raw_spin_lock_irqsave(&ioapic_lock, flags); 3643 reg_00.raw = io_apic_read(ioapic, 0); 3644 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3645 3646 if (apic_id >= get_physical_broadcast()) { 3647 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " 3648 "%d\n", ioapic, apic_id, reg_00.bits.ID); 3649 apic_id = reg_00.bits.ID; 3650 } 3651 3652 /* 3653 * Every APIC in a system must have a unique ID or we get lots of nice 3654 * 'stuck on smp_invalidate_needed IPI wait' messages. 3655 */ 3656 if (apic->check_apicid_used(&apic_id_map, apic_id)) { 3657 3658 for (i = 0; i < get_physical_broadcast(); i++) { 3659 if (!apic->check_apicid_used(&apic_id_map, i)) 3660 break; 3661 } 3662 3663 if (i == get_physical_broadcast()) 3664 panic("Max apic_id exceeded!\n"); 3665 3666 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " 3667 "trying %d\n", ioapic, apic_id, i); 3668 3669 apic_id = i; 3670 } 3671 3672 apic->apicid_to_cpu_present(apic_id, &tmp); 3673 physids_or(apic_id_map, apic_id_map, tmp); 3674 3675 if (reg_00.bits.ID != apic_id) { 3676 reg_00.bits.ID = apic_id; 3677 3678 raw_spin_lock_irqsave(&ioapic_lock, flags); 3679 io_apic_write(ioapic, 0, reg_00.raw); 3680 reg_00.raw = io_apic_read(ioapic, 0); 3681 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3682 3683 /* Sanity check */ 3684 if (reg_00.bits.ID != apic_id) { 3685 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); 3686 return -1; 3687 } 3688 } 3689 3690 apic_printk(APIC_VERBOSE, KERN_INFO 3691 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); 3692 3693 return apic_id; 3694 } 3695 3696 static u8 __init io_apic_unique_id(u8 id) 3697 { 3698 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && 3699 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) 3700 return io_apic_get_unique_id(nr_ioapics, id); 3701 else 3702 return id; 3703 } 3704 #else 3705 static u8 __init io_apic_unique_id(u8 id) 3706 { 3707 int i; 3708 DECLARE_BITMAP(used, 256); 3709 3710 bitmap_zero(used, 256); 3711 for (i = 0; i < nr_ioapics; i++) { 3712 __set_bit(mpc_ioapic_id(i), used); 3713 } 3714 if (!test_bit(id, used)) 3715 return id; 3716 return find_first_zero_bit(used, 256); 3717 } 3718 #endif 3719 3720 static int __init io_apic_get_version(int ioapic) 3721 { 3722 union IO_APIC_reg_01 reg_01; 3723 unsigned long flags; 3724 3725 raw_spin_lock_irqsave(&ioapic_lock, flags); 3726 reg_01.raw = io_apic_read(ioapic, 1); 3727 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3728 3729 return reg_01.bits.version; 3730 } 3731 3732 int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) 3733 { 3734 int ioapic, pin, idx; 3735 3736 if (skip_ioapic_setup) 3737 return -1; 3738 3739 ioapic = mp_find_ioapic(gsi); 3740 if (ioapic < 0) 3741 return -1; 3742 3743 pin = mp_find_ioapic_pin(ioapic, gsi); 3744 if (pin < 0) 3745 return -1; 3746 3747 idx = find_irq_entry(ioapic, pin, mp_INT); 3748 if (idx < 0) 3749 return -1; 3750 3751 *trigger = irq_trigger(idx); 3752 *polarity = irq_polarity(idx); 3753 return 0; 3754 } 3755 3756 /* 3757 * This function currently is only a helper for the i386 smp boot process where 3758 * we need to reprogram the ioredtbls to cater for the cpus which have come online 3759 * so mask in all cases should simply be apic->target_cpus() 3760 */ 3761 #ifdef CONFIG_SMP 3762 void __init setup_ioapic_dest(void) 3763 { 3764 int pin, ioapic, irq, irq_entry; 3765 const struct cpumask *mask; 3766 struct irq_data *idata; 3767 3768 if (skip_ioapic_setup == 1) 3769 return; 3770 3771 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) 3772 for (pin = 0; pin < ioapics[ioapic].nr_registers; pin++) { 3773 irq_entry = find_irq_entry(ioapic, pin, mp_INT); 3774 if (irq_entry == -1) 3775 continue; 3776 irq = pin_2_irq(irq_entry, ioapic, pin); 3777 3778 if ((ioapic > 0) && (irq > 16)) 3779 continue; 3780 3781 idata = irq_get_irq_data(irq); 3782 3783 /* 3784 * Honour affinities which have been set in early boot 3785 */ 3786 if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata)) 3787 mask = idata->affinity; 3788 else 3789 mask = apic->target_cpus(); 3790 3791 if (intr_remapping_enabled) 3792 ir_ioapic_set_affinity(idata, mask, false); 3793 else 3794 ioapic_set_affinity(idata, mask, false); 3795 } 3796 3797 } 3798 #endif 3799 3800 #define IOAPIC_RESOURCE_NAME_SIZE 11 3801 3802 static struct resource *ioapic_resources; 3803 3804 static struct resource * __init ioapic_setup_resources(int nr_ioapics) 3805 { 3806 unsigned long n; 3807 struct resource *res; 3808 char *mem; 3809 int i; 3810 3811 if (nr_ioapics <= 0) 3812 return NULL; 3813 3814 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource); 3815 n *= nr_ioapics; 3816 3817 mem = alloc_bootmem(n); 3818 res = (void *)mem; 3819 3820 mem += sizeof(struct resource) * nr_ioapics; 3821 3822 for (i = 0; i < nr_ioapics; i++) { 3823 res[i].name = mem; 3824 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; 3825 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); 3826 mem += IOAPIC_RESOURCE_NAME_SIZE; 3827 } 3828 3829 ioapic_resources = res; 3830 3831 return res; 3832 } 3833 3834 void __init ioapic_and_gsi_init(void) 3835 { 3836 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; 3837 struct resource *ioapic_res; 3838 int i; 3839 3840 ioapic_res = ioapic_setup_resources(nr_ioapics); 3841 for (i = 0; i < nr_ioapics; i++) { 3842 if (smp_found_config) { 3843 ioapic_phys = mpc_ioapic_addr(i); 3844 #ifdef CONFIG_X86_32 3845 if (!ioapic_phys) { 3846 printk(KERN_ERR 3847 "WARNING: bogus zero IO-APIC " 3848 "address found in MPTABLE, " 3849 "disabling IO/APIC support!\n"); 3850 smp_found_config = 0; 3851 skip_ioapic_setup = 1; 3852 goto fake_ioapic_page; 3853 } 3854 #endif 3855 } else { 3856 #ifdef CONFIG_X86_32 3857 fake_ioapic_page: 3858 #endif 3859 ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); 3860 ioapic_phys = __pa(ioapic_phys); 3861 } 3862 set_fixmap_nocache(idx, ioapic_phys); 3863 apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n", 3864 __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK), 3865 ioapic_phys); 3866 idx++; 3867 3868 ioapic_res->start = ioapic_phys; 3869 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1; 3870 ioapic_res++; 3871 } 3872 3873 probe_nr_irqs_gsi(); 3874 } 3875 3876 void __init ioapic_insert_resources(void) 3877 { 3878 int i; 3879 struct resource *r = ioapic_resources; 3880 3881 if (!r) { 3882 if (nr_ioapics > 0) 3883 printk(KERN_ERR 3884 "IO APIC resources couldn't be allocated.\n"); 3885 return; 3886 } 3887 3888 for (i = 0; i < nr_ioapics; i++) { 3889 insert_resource(&iomem_resource, r); 3890 r++; 3891 } 3892 } 3893 3894 int mp_find_ioapic(u32 gsi) 3895 { 3896 int i = 0; 3897 3898 if (nr_ioapics == 0) 3899 return -1; 3900 3901 /* Find the IOAPIC that manages this GSI. */ 3902 for (i = 0; i < nr_ioapics; i++) { 3903 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(i); 3904 if ((gsi >= gsi_cfg->gsi_base) 3905 && (gsi <= gsi_cfg->gsi_end)) 3906 return i; 3907 } 3908 3909 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); 3910 return -1; 3911 } 3912 3913 int mp_find_ioapic_pin(int ioapic, u32 gsi) 3914 { 3915 struct mp_ioapic_gsi *gsi_cfg; 3916 3917 if (WARN_ON(ioapic == -1)) 3918 return -1; 3919 3920 gsi_cfg = mp_ioapic_gsi_routing(ioapic); 3921 if (WARN_ON(gsi > gsi_cfg->gsi_end)) 3922 return -1; 3923 3924 return gsi - gsi_cfg->gsi_base; 3925 } 3926 3927 static __init int bad_ioapic(unsigned long address) 3928 { 3929 if (nr_ioapics >= MAX_IO_APICS) { 3930 printk(KERN_WARNING "WARNING: Max # of I/O APICs (%d) exceeded " 3931 "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics); 3932 return 1; 3933 } 3934 if (!address) { 3935 printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address" 3936 " found in table, skipping!\n"); 3937 return 1; 3938 } 3939 return 0; 3940 } 3941 3942 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) 3943 { 3944 int idx = 0; 3945 int entries; 3946 struct mp_ioapic_gsi *gsi_cfg; 3947 3948 if (bad_ioapic(address)) 3949 return; 3950 3951 idx = nr_ioapics; 3952 3953 ioapics[idx].mp_config.type = MP_IOAPIC; 3954 ioapics[idx].mp_config.flags = MPC_APIC_USABLE; 3955 ioapics[idx].mp_config.apicaddr = address; 3956 3957 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); 3958 ioapics[idx].mp_config.apicid = io_apic_unique_id(id); 3959 ioapics[idx].mp_config.apicver = io_apic_get_version(idx); 3960 3961 /* 3962 * Build basic GSI lookup table to facilitate gsi->io_apic lookups 3963 * and to prevent reprogramming of IOAPIC pins (PCI GSIs). 3964 */ 3965 entries = io_apic_get_redir_entries(idx); 3966 gsi_cfg = mp_ioapic_gsi_routing(idx); 3967 gsi_cfg->gsi_base = gsi_base; 3968 gsi_cfg->gsi_end = gsi_base + entries - 1; 3969 3970 /* 3971 * The number of IO-APIC IRQ registers (== #pins): 3972 */ 3973 ioapics[idx].nr_registers = entries; 3974 3975 if (gsi_cfg->gsi_end >= gsi_top) 3976 gsi_top = gsi_cfg->gsi_end + 1; 3977 3978 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " 3979 "GSI %d-%d\n", idx, mpc_ioapic_id(idx), 3980 mpc_ioapic_ver(idx), mpc_ioapic_addr(idx), 3981 gsi_cfg->gsi_base, gsi_cfg->gsi_end); 3982 3983 nr_ioapics++; 3984 } 3985 3986 /* Enable IOAPIC early just for system timer */ 3987 void __init pre_init_apic_IRQ0(void) 3988 { 3989 struct io_apic_irq_attr attr = { 0, 0, 0, 0 }; 3990 3991 printk(KERN_INFO "Early APIC setup for system timer0\n"); 3992 #ifndef CONFIG_SMP 3993 physid_set_mask_of_physid(boot_cpu_physical_apicid, 3994 &phys_cpu_present_map); 3995 #endif 3996 setup_local_APIC(); 3997 3998 io_apic_setup_irq_pin(0, 0, &attr); 3999 irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, 4000 "edge"); 4001 } 4002