1 /* 2 * Intel IO-APIC support for multi-Pentium hosts. 3 * 4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo 5 * 6 * Many thanks to Stig Venaas for trying out countless experimental 7 * patches and reporting/debugging problems patiently! 8 * 9 * (c) 1999, Multiple IO-APIC support, developed by 10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and 11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>, 12 * further tested and cleaned up by Zach Brown <zab@redhat.com> 13 * and Ingo Molnar <mingo@redhat.com> 14 * 15 * Fixes 16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs; 17 * thanks to Eric Gilmore 18 * and Rolf G. Tews 19 * for testing these extensively 20 * Paul Diefenbaugh : Added full ACPI support 21 */ 22 23 #include <linux/mm.h> 24 #include <linux/interrupt.h> 25 #include <linux/init.h> 26 #include <linux/delay.h> 27 #include <linux/sched.h> 28 #include <linux/pci.h> 29 #include <linux/mc146818rtc.h> 30 #include <linux/compiler.h> 31 #include <linux/acpi.h> 32 #include <linux/module.h> 33 #include <linux/syscore_ops.h> 34 #include <linux/msi.h> 35 #include <linux/htirq.h> 36 #include <linux/freezer.h> 37 #include <linux/kthread.h> 38 #include <linux/jiffies.h> /* time_after() */ 39 #include <linux/slab.h> 40 #ifdef CONFIG_ACPI 41 #include <acpi/acpi_bus.h> 42 #endif 43 #include <linux/bootmem.h> 44 #include <linux/dmar.h> 45 #include <linux/hpet.h> 46 47 #include <asm/idle.h> 48 #include <asm/io.h> 49 #include <asm/smp.h> 50 #include <asm/cpu.h> 51 #include <asm/desc.h> 52 #include <asm/proto.h> 53 #include <asm/acpi.h> 54 #include <asm/dma.h> 55 #include <asm/timer.h> 56 #include <asm/i8259.h> 57 #include <asm/msidef.h> 58 #include <asm/hypertransport.h> 59 #include <asm/setup.h> 60 #include <asm/irq_remapping.h> 61 #include <asm/hpet.h> 62 #include <asm/hw_irq.h> 63 64 #include <asm/apic.h> 65 66 #define __apicdebuginit(type) static type __init 67 #define for_each_irq_pin(entry, head) \ 68 for (entry = head; entry; entry = entry->next) 69 70 /* 71 * Is the SiS APIC rmw bug present ? 72 * -1 = don't know, 0 = no, 1 = yes 73 */ 74 int sis_apic_bug = -1; 75 76 static DEFINE_RAW_SPINLOCK(ioapic_lock); 77 static DEFINE_RAW_SPINLOCK(vector_lock); 78 79 /* 80 * # of IRQ routing registers 81 */ 82 int nr_ioapic_registers[MAX_IO_APICS]; 83 84 /* I/O APIC entries */ 85 struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; 86 int nr_ioapics; 87 88 /* IO APIC gsi routing info */ 89 struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS]; 90 91 /* The one past the highest gsi number used */ 92 u32 gsi_top; 93 94 /* MP IRQ source entries */ 95 struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; 96 97 /* # of MP IRQ source entries */ 98 int mp_irq_entries; 99 100 /* GSI interrupts */ 101 static int nr_irqs_gsi = NR_IRQS_LEGACY; 102 103 /* 104 * Saved I/O APIC state during suspend/resume, or while enabling intr-remap. 105 */ 106 static struct IO_APIC_route_entry *ioapic_saved_data[MAX_IO_APICS]; 107 108 #if defined (CONFIG_MCA) || defined (CONFIG_EISA) 109 int mp_bus_id_to_type[MAX_MP_BUSSES]; 110 #endif 111 112 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); 113 114 int skip_ioapic_setup; 115 116 /** 117 * disable_ioapic_support() - disables ioapic support at runtime 118 */ 119 void disable_ioapic_support(void) 120 { 121 #ifdef CONFIG_PCI 122 noioapicquirk = 1; 123 noioapicreroute = -1; 124 #endif 125 skip_ioapic_setup = 1; 126 } 127 128 static int __init parse_noapic(char *str) 129 { 130 /* disable IO-APIC */ 131 disable_ioapic_support(); 132 return 0; 133 } 134 early_param("noapic", parse_noapic); 135 136 static int io_apic_setup_irq_pin(unsigned int irq, int node, 137 struct io_apic_irq_attr *attr); 138 139 /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ 140 void mp_save_irq(struct mpc_intsrc *m) 141 { 142 int i; 143 144 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," 145 " IRQ %02x, APIC ID %x, APIC INT %02x\n", 146 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus, 147 m->srcbusirq, m->dstapic, m->dstirq); 148 149 for (i = 0; i < mp_irq_entries; i++) { 150 if (!memcmp(&mp_irqs[i], m, sizeof(*m))) 151 return; 152 } 153 154 memcpy(&mp_irqs[mp_irq_entries], m, sizeof(*m)); 155 if (++mp_irq_entries == MAX_IRQ_SOURCES) 156 panic("Max # of irq sources exceeded!!\n"); 157 } 158 159 struct irq_pin_list { 160 int apic, pin; 161 struct irq_pin_list *next; 162 }; 163 164 static struct irq_pin_list *alloc_irq_pin_list(int node) 165 { 166 return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); 167 } 168 169 170 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 171 #ifdef CONFIG_SPARSE_IRQ 172 static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY]; 173 #else 174 static struct irq_cfg irq_cfgx[NR_IRQS]; 175 #endif 176 177 int __init arch_early_irq_init(void) 178 { 179 struct irq_cfg *cfg; 180 int count, node, i; 181 182 if (!legacy_pic->nr_legacy_irqs) { 183 nr_irqs_gsi = 0; 184 io_apic_irqs = ~0UL; 185 } 186 187 for (i = 0; i < nr_ioapics; i++) { 188 ioapic_saved_data[i] = 189 kzalloc(sizeof(struct IO_APIC_route_entry) * 190 nr_ioapic_registers[i], GFP_KERNEL); 191 if (!ioapic_saved_data[i]) 192 pr_err("IOAPIC %d: suspend/resume impossible!\n", i); 193 } 194 195 cfg = irq_cfgx; 196 count = ARRAY_SIZE(irq_cfgx); 197 node = cpu_to_node(0); 198 199 /* Make sure the legacy interrupts are marked in the bitmap */ 200 irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); 201 202 for (i = 0; i < count; i++) { 203 irq_set_chip_data(i, &cfg[i]); 204 zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); 205 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); 206 /* 207 * For legacy IRQ's, start with assigning irq0 to irq15 to 208 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. 209 */ 210 if (i < legacy_pic->nr_legacy_irqs) { 211 cfg[i].vector = IRQ0_VECTOR + i; 212 cpumask_set_cpu(0, cfg[i].domain); 213 } 214 } 215 216 return 0; 217 } 218 219 #ifdef CONFIG_SPARSE_IRQ 220 static struct irq_cfg *irq_cfg(unsigned int irq) 221 { 222 return irq_get_chip_data(irq); 223 } 224 225 static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) 226 { 227 struct irq_cfg *cfg; 228 229 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); 230 if (!cfg) 231 return NULL; 232 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) 233 goto out_cfg; 234 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) 235 goto out_domain; 236 return cfg; 237 out_domain: 238 free_cpumask_var(cfg->domain); 239 out_cfg: 240 kfree(cfg); 241 return NULL; 242 } 243 244 static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) 245 { 246 if (!cfg) 247 return; 248 irq_set_chip_data(at, NULL); 249 free_cpumask_var(cfg->domain); 250 free_cpumask_var(cfg->old_domain); 251 kfree(cfg); 252 } 253 254 #else 255 256 struct irq_cfg *irq_cfg(unsigned int irq) 257 { 258 return irq < nr_irqs ? irq_cfgx + irq : NULL; 259 } 260 261 static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) 262 { 263 return irq_cfgx + irq; 264 } 265 266 static inline void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { } 267 268 #endif 269 270 static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) 271 { 272 int res = irq_alloc_desc_at(at, node); 273 struct irq_cfg *cfg; 274 275 if (res < 0) { 276 if (res != -EEXIST) 277 return NULL; 278 cfg = irq_get_chip_data(at); 279 if (cfg) 280 return cfg; 281 } 282 283 cfg = alloc_irq_cfg(at, node); 284 if (cfg) 285 irq_set_chip_data(at, cfg); 286 else 287 irq_free_desc(at); 288 return cfg; 289 } 290 291 static int alloc_irq_from(unsigned int from, int node) 292 { 293 return irq_alloc_desc_from(from, node); 294 } 295 296 static void free_irq_at(unsigned int at, struct irq_cfg *cfg) 297 { 298 free_irq_cfg(at, cfg); 299 irq_free_desc(at); 300 } 301 302 struct io_apic { 303 unsigned int index; 304 unsigned int unused[3]; 305 unsigned int data; 306 unsigned int unused2[11]; 307 unsigned int eoi; 308 }; 309 310 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) 311 { 312 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) 313 + (mp_ioapics[idx].apicaddr & ~PAGE_MASK); 314 } 315 316 static inline void io_apic_eoi(unsigned int apic, unsigned int vector) 317 { 318 struct io_apic __iomem *io_apic = io_apic_base(apic); 319 writel(vector, &io_apic->eoi); 320 } 321 322 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) 323 { 324 struct io_apic __iomem *io_apic = io_apic_base(apic); 325 writel(reg, &io_apic->index); 326 return readl(&io_apic->data); 327 } 328 329 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) 330 { 331 struct io_apic __iomem *io_apic = io_apic_base(apic); 332 writel(reg, &io_apic->index); 333 writel(value, &io_apic->data); 334 } 335 336 /* 337 * Re-write a value: to be used for read-modify-write 338 * cycles where the read already set up the index register. 339 * 340 * Older SiS APIC requires we rewrite the index register 341 */ 342 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) 343 { 344 struct io_apic __iomem *io_apic = io_apic_base(apic); 345 346 if (sis_apic_bug) 347 writel(reg, &io_apic->index); 348 writel(value, &io_apic->data); 349 } 350 351 static bool io_apic_level_ack_pending(struct irq_cfg *cfg) 352 { 353 struct irq_pin_list *entry; 354 unsigned long flags; 355 356 raw_spin_lock_irqsave(&ioapic_lock, flags); 357 for_each_irq_pin(entry, cfg->irq_2_pin) { 358 unsigned int reg; 359 int pin; 360 361 pin = entry->pin; 362 reg = io_apic_read(entry->apic, 0x10 + pin*2); 363 /* Is the remote IRR bit set? */ 364 if (reg & IO_APIC_REDIR_REMOTE_IRR) { 365 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 366 return true; 367 } 368 } 369 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 370 371 return false; 372 } 373 374 union entry_union { 375 struct { u32 w1, w2; }; 376 struct IO_APIC_route_entry entry; 377 }; 378 379 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) 380 { 381 union entry_union eu; 382 unsigned long flags; 383 raw_spin_lock_irqsave(&ioapic_lock, flags); 384 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); 385 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); 386 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 387 return eu.entry; 388 } 389 390 /* 391 * When we write a new IO APIC routing entry, we need to write the high 392 * word first! If the mask bit in the low word is clear, we will enable 393 * the interrupt, and we need to make sure the entry is fully populated 394 * before that happens. 395 */ 396 static void 397 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 398 { 399 union entry_union eu = {{0, 0}}; 400 401 eu.entry = e; 402 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 403 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 404 } 405 406 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 407 { 408 unsigned long flags; 409 raw_spin_lock_irqsave(&ioapic_lock, flags); 410 __ioapic_write_entry(apic, pin, e); 411 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 412 } 413 414 /* 415 * When we mask an IO APIC routing entry, we need to write the low 416 * word first, in order to set the mask bit before we change the 417 * high bits! 418 */ 419 static void ioapic_mask_entry(int apic, int pin) 420 { 421 unsigned long flags; 422 union entry_union eu = { .entry.mask = 1 }; 423 424 raw_spin_lock_irqsave(&ioapic_lock, flags); 425 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 426 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 427 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 428 } 429 430 /* 431 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are 432 * shared ISA-space IRQs, so we have to support them. We are super 433 * fast in the common case, and fast for shared ISA-space IRQs. 434 */ 435 static int 436 __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 437 { 438 struct irq_pin_list **last, *entry; 439 440 /* don't allow duplicates */ 441 last = &cfg->irq_2_pin; 442 for_each_irq_pin(entry, cfg->irq_2_pin) { 443 if (entry->apic == apic && entry->pin == pin) 444 return 0; 445 last = &entry->next; 446 } 447 448 entry = alloc_irq_pin_list(node); 449 if (!entry) { 450 printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", 451 node, apic, pin); 452 return -ENOMEM; 453 } 454 entry->apic = apic; 455 entry->pin = pin; 456 457 *last = entry; 458 return 0; 459 } 460 461 static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 462 { 463 if (__add_pin_to_irq_node(cfg, node, apic, pin)) 464 panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); 465 } 466 467 /* 468 * Reroute an IRQ to a different pin. 469 */ 470 static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, 471 int oldapic, int oldpin, 472 int newapic, int newpin) 473 { 474 struct irq_pin_list *entry; 475 476 for_each_irq_pin(entry, cfg->irq_2_pin) { 477 if (entry->apic == oldapic && entry->pin == oldpin) { 478 entry->apic = newapic; 479 entry->pin = newpin; 480 /* every one is different, right? */ 481 return; 482 } 483 } 484 485 /* old apic/pin didn't exist, so just add new ones */ 486 add_pin_to_irq_node(cfg, node, newapic, newpin); 487 } 488 489 static void __io_apic_modify_irq(struct irq_pin_list *entry, 490 int mask_and, int mask_or, 491 void (*final)(struct irq_pin_list *entry)) 492 { 493 unsigned int reg, pin; 494 495 pin = entry->pin; 496 reg = io_apic_read(entry->apic, 0x10 + pin * 2); 497 reg &= mask_and; 498 reg |= mask_or; 499 io_apic_modify(entry->apic, 0x10 + pin * 2, reg); 500 if (final) 501 final(entry); 502 } 503 504 static void io_apic_modify_irq(struct irq_cfg *cfg, 505 int mask_and, int mask_or, 506 void (*final)(struct irq_pin_list *entry)) 507 { 508 struct irq_pin_list *entry; 509 510 for_each_irq_pin(entry, cfg->irq_2_pin) 511 __io_apic_modify_irq(entry, mask_and, mask_or, final); 512 } 513 514 static void __mask_and_edge_IO_APIC_irq(struct irq_pin_list *entry) 515 { 516 __io_apic_modify_irq(entry, ~IO_APIC_REDIR_LEVEL_TRIGGER, 517 IO_APIC_REDIR_MASKED, NULL); 518 } 519 520 static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry) 521 { 522 __io_apic_modify_irq(entry, ~IO_APIC_REDIR_MASKED, 523 IO_APIC_REDIR_LEVEL_TRIGGER, NULL); 524 } 525 526 static void io_apic_sync(struct irq_pin_list *entry) 527 { 528 /* 529 * Synchronize the IO-APIC and the CPU by doing 530 * a dummy read from the IO-APIC 531 */ 532 struct io_apic __iomem *io_apic; 533 io_apic = io_apic_base(entry->apic); 534 readl(&io_apic->data); 535 } 536 537 static void mask_ioapic(struct irq_cfg *cfg) 538 { 539 unsigned long flags; 540 541 raw_spin_lock_irqsave(&ioapic_lock, flags); 542 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); 543 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 544 } 545 546 static void mask_ioapic_irq(struct irq_data *data) 547 { 548 mask_ioapic(data->chip_data); 549 } 550 551 static void __unmask_ioapic(struct irq_cfg *cfg) 552 { 553 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); 554 } 555 556 static void unmask_ioapic(struct irq_cfg *cfg) 557 { 558 unsigned long flags; 559 560 raw_spin_lock_irqsave(&ioapic_lock, flags); 561 __unmask_ioapic(cfg); 562 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 563 } 564 565 static void unmask_ioapic_irq(struct irq_data *data) 566 { 567 unmask_ioapic(data->chip_data); 568 } 569 570 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) 571 { 572 struct IO_APIC_route_entry entry; 573 574 /* Check delivery_mode to be sure we're not clearing an SMI pin */ 575 entry = ioapic_read_entry(apic, pin); 576 if (entry.delivery_mode == dest_SMI) 577 return; 578 /* 579 * Disable it in the IO-APIC irq-routing table: 580 */ 581 ioapic_mask_entry(apic, pin); 582 } 583 584 static void clear_IO_APIC (void) 585 { 586 int apic, pin; 587 588 for (apic = 0; apic < nr_ioapics; apic++) 589 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) 590 clear_IO_APIC_pin(apic, pin); 591 } 592 593 #ifdef CONFIG_X86_32 594 /* 595 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to 596 * specific CPU-side IRQs. 597 */ 598 599 #define MAX_PIRQS 8 600 static int pirq_entries[MAX_PIRQS] = { 601 [0 ... MAX_PIRQS - 1] = -1 602 }; 603 604 static int __init ioapic_pirq_setup(char *str) 605 { 606 int i, max; 607 int ints[MAX_PIRQS+1]; 608 609 get_options(str, ARRAY_SIZE(ints), ints); 610 611 apic_printk(APIC_VERBOSE, KERN_INFO 612 "PIRQ redirection, working around broken MP-BIOS.\n"); 613 max = MAX_PIRQS; 614 if (ints[0] < MAX_PIRQS) 615 max = ints[0]; 616 617 for (i = 0; i < max; i++) { 618 apic_printk(APIC_VERBOSE, KERN_DEBUG 619 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]); 620 /* 621 * PIRQs are mapped upside down, usually. 622 */ 623 pirq_entries[MAX_PIRQS-i-1] = ints[i+1]; 624 } 625 return 1; 626 } 627 628 __setup("pirq=", ioapic_pirq_setup); 629 #endif /* CONFIG_X86_32 */ 630 631 /* 632 * Saves all the IO-APIC RTE's 633 */ 634 int save_ioapic_entries(void) 635 { 636 int apic, pin; 637 int err = 0; 638 639 for (apic = 0; apic < nr_ioapics; apic++) { 640 if (!ioapic_saved_data[apic]) { 641 err = -ENOMEM; 642 continue; 643 } 644 645 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) 646 ioapic_saved_data[apic][pin] = 647 ioapic_read_entry(apic, pin); 648 } 649 650 return err; 651 } 652 653 /* 654 * Mask all IO APIC entries. 655 */ 656 void mask_ioapic_entries(void) 657 { 658 int apic, pin; 659 660 for (apic = 0; apic < nr_ioapics; apic++) { 661 if (!ioapic_saved_data[apic]) 662 continue; 663 664 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { 665 struct IO_APIC_route_entry entry; 666 667 entry = ioapic_saved_data[apic][pin]; 668 if (!entry.mask) { 669 entry.mask = 1; 670 ioapic_write_entry(apic, pin, entry); 671 } 672 } 673 } 674 } 675 676 /* 677 * Restore IO APIC entries which was saved in ioapic_saved_data 678 */ 679 int restore_ioapic_entries(void) 680 { 681 int apic, pin; 682 683 for (apic = 0; apic < nr_ioapics; apic++) { 684 if (!ioapic_saved_data[apic]) 685 continue; 686 687 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) 688 ioapic_write_entry(apic, pin, 689 ioapic_saved_data[apic][pin]); 690 } 691 return 0; 692 } 693 694 /* 695 * Find the IRQ entry number of a certain pin. 696 */ 697 static int find_irq_entry(int apic, int pin, int type) 698 { 699 int i; 700 701 for (i = 0; i < mp_irq_entries; i++) 702 if (mp_irqs[i].irqtype == type && 703 (mp_irqs[i].dstapic == mp_ioapics[apic].apicid || 704 mp_irqs[i].dstapic == MP_APIC_ALL) && 705 mp_irqs[i].dstirq == pin) 706 return i; 707 708 return -1; 709 } 710 711 /* 712 * Find the pin to which IRQ[irq] (ISA) is connected 713 */ 714 static int __init find_isa_irq_pin(int irq, int type) 715 { 716 int i; 717 718 for (i = 0; i < mp_irq_entries; i++) { 719 int lbus = mp_irqs[i].srcbus; 720 721 if (test_bit(lbus, mp_bus_not_pci) && 722 (mp_irqs[i].irqtype == type) && 723 (mp_irqs[i].srcbusirq == irq)) 724 725 return mp_irqs[i].dstirq; 726 } 727 return -1; 728 } 729 730 static int __init find_isa_irq_apic(int irq, int type) 731 { 732 int i; 733 734 for (i = 0; i < mp_irq_entries; i++) { 735 int lbus = mp_irqs[i].srcbus; 736 737 if (test_bit(lbus, mp_bus_not_pci) && 738 (mp_irqs[i].irqtype == type) && 739 (mp_irqs[i].srcbusirq == irq)) 740 break; 741 } 742 if (i < mp_irq_entries) { 743 int apic; 744 for(apic = 0; apic < nr_ioapics; apic++) { 745 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic) 746 return apic; 747 } 748 } 749 750 return -1; 751 } 752 753 #if defined(CONFIG_EISA) || defined(CONFIG_MCA) 754 /* 755 * EISA Edge/Level control register, ELCR 756 */ 757 static int EISA_ELCR(unsigned int irq) 758 { 759 if (irq < legacy_pic->nr_legacy_irqs) { 760 unsigned int port = 0x4d0 + (irq >> 3); 761 return (inb(port) >> (irq & 7)) & 1; 762 } 763 apic_printk(APIC_VERBOSE, KERN_INFO 764 "Broken MPtable reports ISA irq %d\n", irq); 765 return 0; 766 } 767 768 #endif 769 770 /* ISA interrupts are always polarity zero edge triggered, 771 * when listed as conforming in the MP table. */ 772 773 #define default_ISA_trigger(idx) (0) 774 #define default_ISA_polarity(idx) (0) 775 776 /* EISA interrupts are always polarity zero and can be edge or level 777 * trigger depending on the ELCR value. If an interrupt is listed as 778 * EISA conforming in the MP table, that means its trigger type must 779 * be read in from the ELCR */ 780 781 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq)) 782 #define default_EISA_polarity(idx) default_ISA_polarity(idx) 783 784 /* PCI interrupts are always polarity one level triggered, 785 * when listed as conforming in the MP table. */ 786 787 #define default_PCI_trigger(idx) (1) 788 #define default_PCI_polarity(idx) (1) 789 790 /* MCA interrupts are always polarity zero level triggered, 791 * when listed as conforming in the MP table. */ 792 793 #define default_MCA_trigger(idx) (1) 794 #define default_MCA_polarity(idx) default_ISA_polarity(idx) 795 796 static int irq_polarity(int idx) 797 { 798 int bus = mp_irqs[idx].srcbus; 799 int polarity; 800 801 /* 802 * Determine IRQ line polarity (high active or low active): 803 */ 804 switch (mp_irqs[idx].irqflag & 3) 805 { 806 case 0: /* conforms, ie. bus-type dependent polarity */ 807 if (test_bit(bus, mp_bus_not_pci)) 808 polarity = default_ISA_polarity(idx); 809 else 810 polarity = default_PCI_polarity(idx); 811 break; 812 case 1: /* high active */ 813 { 814 polarity = 0; 815 break; 816 } 817 case 2: /* reserved */ 818 { 819 printk(KERN_WARNING "broken BIOS!!\n"); 820 polarity = 1; 821 break; 822 } 823 case 3: /* low active */ 824 { 825 polarity = 1; 826 break; 827 } 828 default: /* invalid */ 829 { 830 printk(KERN_WARNING "broken BIOS!!\n"); 831 polarity = 1; 832 break; 833 } 834 } 835 return polarity; 836 } 837 838 static int irq_trigger(int idx) 839 { 840 int bus = mp_irqs[idx].srcbus; 841 int trigger; 842 843 /* 844 * Determine IRQ trigger mode (edge or level sensitive): 845 */ 846 switch ((mp_irqs[idx].irqflag>>2) & 3) 847 { 848 case 0: /* conforms, ie. bus-type dependent */ 849 if (test_bit(bus, mp_bus_not_pci)) 850 trigger = default_ISA_trigger(idx); 851 else 852 trigger = default_PCI_trigger(idx); 853 #if defined(CONFIG_EISA) || defined(CONFIG_MCA) 854 switch (mp_bus_id_to_type[bus]) { 855 case MP_BUS_ISA: /* ISA pin */ 856 { 857 /* set before the switch */ 858 break; 859 } 860 case MP_BUS_EISA: /* EISA pin */ 861 { 862 trigger = default_EISA_trigger(idx); 863 break; 864 } 865 case MP_BUS_PCI: /* PCI pin */ 866 { 867 /* set before the switch */ 868 break; 869 } 870 case MP_BUS_MCA: /* MCA pin */ 871 { 872 trigger = default_MCA_trigger(idx); 873 break; 874 } 875 default: 876 { 877 printk(KERN_WARNING "broken BIOS!!\n"); 878 trigger = 1; 879 break; 880 } 881 } 882 #endif 883 break; 884 case 1: /* edge */ 885 { 886 trigger = 0; 887 break; 888 } 889 case 2: /* reserved */ 890 { 891 printk(KERN_WARNING "broken BIOS!!\n"); 892 trigger = 1; 893 break; 894 } 895 case 3: /* level */ 896 { 897 trigger = 1; 898 break; 899 } 900 default: /* invalid */ 901 { 902 printk(KERN_WARNING "broken BIOS!!\n"); 903 trigger = 0; 904 break; 905 } 906 } 907 return trigger; 908 } 909 910 static int pin_2_irq(int idx, int apic, int pin) 911 { 912 int irq; 913 int bus = mp_irqs[idx].srcbus; 914 915 /* 916 * Debugging check, we are in big trouble if this message pops up! 917 */ 918 if (mp_irqs[idx].dstirq != pin) 919 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); 920 921 if (test_bit(bus, mp_bus_not_pci)) { 922 irq = mp_irqs[idx].srcbusirq; 923 } else { 924 u32 gsi = mp_gsi_routing[apic].gsi_base + pin; 925 926 if (gsi >= NR_IRQS_LEGACY) 927 irq = gsi; 928 else 929 irq = gsi_top + gsi; 930 } 931 932 #ifdef CONFIG_X86_32 933 /* 934 * PCI IRQ command line redirection. Yes, limits are hardcoded. 935 */ 936 if ((pin >= 16) && (pin <= 23)) { 937 if (pirq_entries[pin-16] != -1) { 938 if (!pirq_entries[pin-16]) { 939 apic_printk(APIC_VERBOSE, KERN_DEBUG 940 "disabling PIRQ%d\n", pin-16); 941 } else { 942 irq = pirq_entries[pin-16]; 943 apic_printk(APIC_VERBOSE, KERN_DEBUG 944 "using PIRQ%d -> IRQ %d\n", 945 pin-16, irq); 946 } 947 } 948 } 949 #endif 950 951 return irq; 952 } 953 954 /* 955 * Find a specific PCI IRQ entry. 956 * Not an __init, possibly needed by modules 957 */ 958 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, 959 struct io_apic_irq_attr *irq_attr) 960 { 961 int apic, i, best_guess = -1; 962 963 apic_printk(APIC_DEBUG, 964 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", 965 bus, slot, pin); 966 if (test_bit(bus, mp_bus_not_pci)) { 967 apic_printk(APIC_VERBOSE, 968 "PCI BIOS passed nonexistent PCI bus %d!\n", bus); 969 return -1; 970 } 971 for (i = 0; i < mp_irq_entries; i++) { 972 int lbus = mp_irqs[i].srcbus; 973 974 for (apic = 0; apic < nr_ioapics; apic++) 975 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic || 976 mp_irqs[i].dstapic == MP_APIC_ALL) 977 break; 978 979 if (!test_bit(lbus, mp_bus_not_pci) && 980 !mp_irqs[i].irqtype && 981 (bus == lbus) && 982 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) { 983 int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq); 984 985 if (!(apic || IO_APIC_IRQ(irq))) 986 continue; 987 988 if (pin == (mp_irqs[i].srcbusirq & 3)) { 989 set_io_apic_irq_attr(irq_attr, apic, 990 mp_irqs[i].dstirq, 991 irq_trigger(i), 992 irq_polarity(i)); 993 return irq; 994 } 995 /* 996 * Use the first all-but-pin matching entry as a 997 * best-guess fuzzy result for broken mptables. 998 */ 999 if (best_guess < 0) { 1000 set_io_apic_irq_attr(irq_attr, apic, 1001 mp_irqs[i].dstirq, 1002 irq_trigger(i), 1003 irq_polarity(i)); 1004 best_guess = irq; 1005 } 1006 } 1007 } 1008 return best_guess; 1009 } 1010 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); 1011 1012 void lock_vector_lock(void) 1013 { 1014 /* Used to the online set of cpus does not change 1015 * during assign_irq_vector. 1016 */ 1017 raw_spin_lock(&vector_lock); 1018 } 1019 1020 void unlock_vector_lock(void) 1021 { 1022 raw_spin_unlock(&vector_lock); 1023 } 1024 1025 static int 1026 __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 1027 { 1028 /* 1029 * NOTE! The local APIC isn't very good at handling 1030 * multiple interrupts at the same interrupt level. 1031 * As the interrupt level is determined by taking the 1032 * vector number and shifting that right by 4, we 1033 * want to spread these out a bit so that they don't 1034 * all fall in the same interrupt level. 1035 * 1036 * Also, we've got to be careful not to trash gate 1037 * 0x80, because int 0x80 is hm, kind of importantish. ;) 1038 */ 1039 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; 1040 static int current_offset = VECTOR_OFFSET_START % 8; 1041 unsigned int old_vector; 1042 int cpu, err; 1043 cpumask_var_t tmp_mask; 1044 1045 if (cfg->move_in_progress) 1046 return -EBUSY; 1047 1048 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) 1049 return -ENOMEM; 1050 1051 old_vector = cfg->vector; 1052 if (old_vector) { 1053 cpumask_and(tmp_mask, mask, cpu_online_mask); 1054 cpumask_and(tmp_mask, cfg->domain, tmp_mask); 1055 if (!cpumask_empty(tmp_mask)) { 1056 free_cpumask_var(tmp_mask); 1057 return 0; 1058 } 1059 } 1060 1061 /* Only try and allocate irqs on cpus that are present */ 1062 err = -ENOSPC; 1063 for_each_cpu_and(cpu, mask, cpu_online_mask) { 1064 int new_cpu; 1065 int vector, offset; 1066 1067 apic->vector_allocation_domain(cpu, tmp_mask); 1068 1069 vector = current_vector; 1070 offset = current_offset; 1071 next: 1072 vector += 8; 1073 if (vector >= first_system_vector) { 1074 /* If out of vectors on large boxen, must share them. */ 1075 offset = (offset + 1) % 8; 1076 vector = FIRST_EXTERNAL_VECTOR + offset; 1077 } 1078 if (unlikely(current_vector == vector)) 1079 continue; 1080 1081 if (test_bit(vector, used_vectors)) 1082 goto next; 1083 1084 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 1085 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 1086 goto next; 1087 /* Found one! */ 1088 current_vector = vector; 1089 current_offset = offset; 1090 if (old_vector) { 1091 cfg->move_in_progress = 1; 1092 cpumask_copy(cfg->old_domain, cfg->domain); 1093 } 1094 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 1095 per_cpu(vector_irq, new_cpu)[vector] = irq; 1096 cfg->vector = vector; 1097 cpumask_copy(cfg->domain, tmp_mask); 1098 err = 0; 1099 break; 1100 } 1101 free_cpumask_var(tmp_mask); 1102 return err; 1103 } 1104 1105 int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 1106 { 1107 int err; 1108 unsigned long flags; 1109 1110 raw_spin_lock_irqsave(&vector_lock, flags); 1111 err = __assign_irq_vector(irq, cfg, mask); 1112 raw_spin_unlock_irqrestore(&vector_lock, flags); 1113 return err; 1114 } 1115 1116 static void __clear_irq_vector(int irq, struct irq_cfg *cfg) 1117 { 1118 int cpu, vector; 1119 1120 BUG_ON(!cfg->vector); 1121 1122 vector = cfg->vector; 1123 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) 1124 per_cpu(vector_irq, cpu)[vector] = -1; 1125 1126 cfg->vector = 0; 1127 cpumask_clear(cfg->domain); 1128 1129 if (likely(!cfg->move_in_progress)) 1130 return; 1131 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { 1132 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 1133 vector++) { 1134 if (per_cpu(vector_irq, cpu)[vector] != irq) 1135 continue; 1136 per_cpu(vector_irq, cpu)[vector] = -1; 1137 break; 1138 } 1139 } 1140 cfg->move_in_progress = 0; 1141 } 1142 1143 void __setup_vector_irq(int cpu) 1144 { 1145 /* Initialize vector_irq on a new cpu */ 1146 int irq, vector; 1147 struct irq_cfg *cfg; 1148 1149 /* 1150 * vector_lock will make sure that we don't run into irq vector 1151 * assignments that might be happening on another cpu in parallel, 1152 * while we setup our initial vector to irq mappings. 1153 */ 1154 raw_spin_lock(&vector_lock); 1155 /* Mark the inuse vectors */ 1156 for_each_active_irq(irq) { 1157 cfg = irq_get_chip_data(irq); 1158 if (!cfg) 1159 continue; 1160 /* 1161 * If it is a legacy IRQ handled by the legacy PIC, this cpu 1162 * will be part of the irq_cfg's domain. 1163 */ 1164 if (irq < legacy_pic->nr_legacy_irqs && !IO_APIC_IRQ(irq)) 1165 cpumask_set_cpu(cpu, cfg->domain); 1166 1167 if (!cpumask_test_cpu(cpu, cfg->domain)) 1168 continue; 1169 vector = cfg->vector; 1170 per_cpu(vector_irq, cpu)[vector] = irq; 1171 } 1172 /* Mark the free vectors */ 1173 for (vector = 0; vector < NR_VECTORS; ++vector) { 1174 irq = per_cpu(vector_irq, cpu)[vector]; 1175 if (irq < 0) 1176 continue; 1177 1178 cfg = irq_cfg(irq); 1179 if (!cpumask_test_cpu(cpu, cfg->domain)) 1180 per_cpu(vector_irq, cpu)[vector] = -1; 1181 } 1182 raw_spin_unlock(&vector_lock); 1183 } 1184 1185 static struct irq_chip ioapic_chip; 1186 static struct irq_chip ir_ioapic_chip; 1187 1188 #ifdef CONFIG_X86_32 1189 static inline int IO_APIC_irq_trigger(int irq) 1190 { 1191 int apic, idx, pin; 1192 1193 for (apic = 0; apic < nr_ioapics; apic++) { 1194 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { 1195 idx = find_irq_entry(apic, pin, mp_INT); 1196 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin))) 1197 return irq_trigger(idx); 1198 } 1199 } 1200 /* 1201 * nonexistent IRQs are edge default 1202 */ 1203 return 0; 1204 } 1205 #else 1206 static inline int IO_APIC_irq_trigger(int irq) 1207 { 1208 return 1; 1209 } 1210 #endif 1211 1212 static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg, 1213 unsigned long trigger) 1214 { 1215 struct irq_chip *chip = &ioapic_chip; 1216 irq_flow_handler_t hdl; 1217 bool fasteoi; 1218 1219 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1220 trigger == IOAPIC_LEVEL) { 1221 irq_set_status_flags(irq, IRQ_LEVEL); 1222 fasteoi = true; 1223 } else { 1224 irq_clear_status_flags(irq, IRQ_LEVEL); 1225 fasteoi = false; 1226 } 1227 1228 if (irq_remapped(cfg)) { 1229 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 1230 chip = &ir_ioapic_chip; 1231 fasteoi = trigger != 0; 1232 } 1233 1234 hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; 1235 irq_set_chip_and_handler_name(irq, chip, hdl, 1236 fasteoi ? "fasteoi" : "edge"); 1237 } 1238 1239 static int setup_ioapic_entry(int apic_id, int irq, 1240 struct IO_APIC_route_entry *entry, 1241 unsigned int destination, int trigger, 1242 int polarity, int vector, int pin) 1243 { 1244 /* 1245 * add it to the IO-APIC irq-routing table: 1246 */ 1247 memset(entry,0,sizeof(*entry)); 1248 1249 if (intr_remapping_enabled) { 1250 struct intel_iommu *iommu = map_ioapic_to_ir(apic_id); 1251 struct irte irte; 1252 struct IR_IO_APIC_route_entry *ir_entry = 1253 (struct IR_IO_APIC_route_entry *) entry; 1254 int index; 1255 1256 if (!iommu) 1257 panic("No mapping iommu for ioapic %d\n", apic_id); 1258 1259 index = alloc_irte(iommu, irq, 1); 1260 if (index < 0) 1261 panic("Failed to allocate IRTE for ioapic %d\n", apic_id); 1262 1263 prepare_irte(&irte, vector, destination); 1264 1265 /* Set source-id of interrupt request */ 1266 set_ioapic_sid(&irte, apic_id); 1267 1268 modify_irte(irq, &irte); 1269 1270 ir_entry->index2 = (index >> 15) & 0x1; 1271 ir_entry->zero = 0; 1272 ir_entry->format = 1; 1273 ir_entry->index = (index & 0x7fff); 1274 /* 1275 * IO-APIC RTE will be configured with virtual vector. 1276 * irq handler will do the explicit EOI to the io-apic. 1277 */ 1278 ir_entry->vector = pin; 1279 } else { 1280 entry->delivery_mode = apic->irq_delivery_mode; 1281 entry->dest_mode = apic->irq_dest_mode; 1282 entry->dest = destination; 1283 entry->vector = vector; 1284 } 1285 1286 entry->mask = 0; /* enable IRQ */ 1287 entry->trigger = trigger; 1288 entry->polarity = polarity; 1289 1290 /* Mask level triggered irqs. 1291 * Use IRQ_DELAYED_DISABLE for edge triggered irqs. 1292 */ 1293 if (trigger) 1294 entry->mask = 1; 1295 return 0; 1296 } 1297 1298 static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq, 1299 struct irq_cfg *cfg, int trigger, int polarity) 1300 { 1301 struct IO_APIC_route_entry entry; 1302 unsigned int dest; 1303 1304 if (!IO_APIC_IRQ(irq)) 1305 return; 1306 /* 1307 * For legacy irqs, cfg->domain starts with cpu 0 for legacy 1308 * controllers like 8259. Now that IO-APIC can handle this irq, update 1309 * the cfg->domain. 1310 */ 1311 if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain)) 1312 apic->vector_allocation_domain(0, cfg->domain); 1313 1314 if (assign_irq_vector(irq, cfg, apic->target_cpus())) 1315 return; 1316 1317 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 1318 1319 apic_printk(APIC_VERBOSE,KERN_DEBUG 1320 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1321 "IRQ %d Mode:%i Active:%i)\n", 1322 apic_id, mp_ioapics[apic_id].apicid, pin, cfg->vector, 1323 irq, trigger, polarity); 1324 1325 1326 if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry, 1327 dest, trigger, polarity, cfg->vector, pin)) { 1328 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1329 mp_ioapics[apic_id].apicid, pin); 1330 __clear_irq_vector(irq, cfg); 1331 return; 1332 } 1333 1334 ioapic_register_intr(irq, cfg, trigger); 1335 if (irq < legacy_pic->nr_legacy_irqs) 1336 legacy_pic->mask(irq); 1337 1338 ioapic_write_entry(apic_id, pin, entry); 1339 } 1340 1341 static struct { 1342 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); 1343 } mp_ioapic_routing[MAX_IO_APICS]; 1344 1345 static bool __init io_apic_pin_not_connected(int idx, int apic_id, int pin) 1346 { 1347 if (idx != -1) 1348 return false; 1349 1350 apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n", 1351 mp_ioapics[apic_id].apicid, pin); 1352 return true; 1353 } 1354 1355 static void __init __io_apic_setup_irqs(unsigned int apic_id) 1356 { 1357 int idx, node = cpu_to_node(0); 1358 struct io_apic_irq_attr attr; 1359 unsigned int pin, irq; 1360 1361 for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) { 1362 idx = find_irq_entry(apic_id, pin, mp_INT); 1363 if (io_apic_pin_not_connected(idx, apic_id, pin)) 1364 continue; 1365 1366 irq = pin_2_irq(idx, apic_id, pin); 1367 1368 if ((apic_id > 0) && (irq > 16)) 1369 continue; 1370 1371 /* 1372 * Skip the timer IRQ if there's a quirk handler 1373 * installed and if it returns 1: 1374 */ 1375 if (apic->multi_timer_check && 1376 apic->multi_timer_check(apic_id, irq)) 1377 continue; 1378 1379 set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx), 1380 irq_polarity(idx)); 1381 1382 io_apic_setup_irq_pin(irq, node, &attr); 1383 } 1384 } 1385 1386 static void __init setup_IO_APIC_irqs(void) 1387 { 1388 unsigned int apic_id; 1389 1390 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1391 1392 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) 1393 __io_apic_setup_irqs(apic_id); 1394 } 1395 1396 /* 1397 * for the gsit that is not in first ioapic 1398 * but could not use acpi_register_gsi() 1399 * like some special sci in IBM x3330 1400 */ 1401 void setup_IO_APIC_irq_extra(u32 gsi) 1402 { 1403 int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); 1404 struct io_apic_irq_attr attr; 1405 1406 /* 1407 * Convert 'gsi' to 'ioapic.pin'. 1408 */ 1409 apic_id = mp_find_ioapic(gsi); 1410 if (apic_id < 0) 1411 return; 1412 1413 pin = mp_find_ioapic_pin(apic_id, gsi); 1414 idx = find_irq_entry(apic_id, pin, mp_INT); 1415 if (idx == -1) 1416 return; 1417 1418 irq = pin_2_irq(idx, apic_id, pin); 1419 1420 /* Only handle the non legacy irqs on secondary ioapics */ 1421 if (apic_id == 0 || irq < NR_IRQS_LEGACY) 1422 return; 1423 1424 set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx), 1425 irq_polarity(idx)); 1426 1427 io_apic_setup_irq_pin_once(irq, node, &attr); 1428 } 1429 1430 /* 1431 * Set up the timer pin, possibly with the 8259A-master behind. 1432 */ 1433 static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, 1434 int vector) 1435 { 1436 struct IO_APIC_route_entry entry; 1437 1438 if (intr_remapping_enabled) 1439 return; 1440 1441 memset(&entry, 0, sizeof(entry)); 1442 1443 /* 1444 * We use logical delivery to get the timer IRQ 1445 * to the first CPU. 1446 */ 1447 entry.dest_mode = apic->irq_dest_mode; 1448 entry.mask = 0; /* don't mask IRQ for edge */ 1449 entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus()); 1450 entry.delivery_mode = apic->irq_delivery_mode; 1451 entry.polarity = 0; 1452 entry.trigger = 0; 1453 entry.vector = vector; 1454 1455 /* 1456 * The timer IRQ doesn't have to know that behind the 1457 * scene we may have a 8259A-master in AEOI mode ... 1458 */ 1459 irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, 1460 "edge"); 1461 1462 /* 1463 * Add it to the IO-APIC irq-routing table: 1464 */ 1465 ioapic_write_entry(apic_id, pin, entry); 1466 } 1467 1468 1469 __apicdebuginit(void) print_IO_APIC(void) 1470 { 1471 int apic, i; 1472 union IO_APIC_reg_00 reg_00; 1473 union IO_APIC_reg_01 reg_01; 1474 union IO_APIC_reg_02 reg_02; 1475 union IO_APIC_reg_03 reg_03; 1476 unsigned long flags; 1477 struct irq_cfg *cfg; 1478 unsigned int irq; 1479 1480 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); 1481 for (i = 0; i < nr_ioapics; i++) 1482 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", 1483 mp_ioapics[i].apicid, nr_ioapic_registers[i]); 1484 1485 /* 1486 * We are a bit conservative about what we expect. We have to 1487 * know about every hardware change ASAP. 1488 */ 1489 printk(KERN_INFO "testing the IO APIC.......................\n"); 1490 1491 for (apic = 0; apic < nr_ioapics; apic++) { 1492 1493 raw_spin_lock_irqsave(&ioapic_lock, flags); 1494 reg_00.raw = io_apic_read(apic, 0); 1495 reg_01.raw = io_apic_read(apic, 1); 1496 if (reg_01.bits.version >= 0x10) 1497 reg_02.raw = io_apic_read(apic, 2); 1498 if (reg_01.bits.version >= 0x20) 1499 reg_03.raw = io_apic_read(apic, 3); 1500 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1501 1502 printk("\n"); 1503 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid); 1504 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); 1505 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); 1506 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); 1507 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); 1508 1509 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01); 1510 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries); 1511 1512 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); 1513 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version); 1514 1515 /* 1516 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, 1517 * but the value of reg_02 is read as the previous read register 1518 * value, so ignore it if reg_02 == reg_01. 1519 */ 1520 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { 1521 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); 1522 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); 1523 } 1524 1525 /* 1526 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 1527 * or reg_03, but the value of reg_0[23] is read as the previous read 1528 * register value, so ignore it if reg_03 == reg_0[12]. 1529 */ 1530 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && 1531 reg_03.raw != reg_01.raw) { 1532 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); 1533 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); 1534 } 1535 1536 printk(KERN_DEBUG ".... IRQ redirection table:\n"); 1537 1538 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol" 1539 " Stat Dmod Deli Vect:\n"); 1540 1541 for (i = 0; i <= reg_01.bits.entries; i++) { 1542 struct IO_APIC_route_entry entry; 1543 1544 entry = ioapic_read_entry(apic, i); 1545 1546 printk(KERN_DEBUG " %02x %03X ", 1547 i, 1548 entry.dest 1549 ); 1550 1551 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n", 1552 entry.mask, 1553 entry.trigger, 1554 entry.irr, 1555 entry.polarity, 1556 entry.delivery_status, 1557 entry.dest_mode, 1558 entry.delivery_mode, 1559 entry.vector 1560 ); 1561 } 1562 } 1563 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1564 for_each_active_irq(irq) { 1565 struct irq_pin_list *entry; 1566 1567 cfg = irq_get_chip_data(irq); 1568 if (!cfg) 1569 continue; 1570 entry = cfg->irq_2_pin; 1571 if (!entry) 1572 continue; 1573 printk(KERN_DEBUG "IRQ%d ", irq); 1574 for_each_irq_pin(entry, cfg->irq_2_pin) 1575 printk("-> %d:%d", entry->apic, entry->pin); 1576 printk("\n"); 1577 } 1578 1579 printk(KERN_INFO ".................................... done.\n"); 1580 1581 return; 1582 } 1583 1584 __apicdebuginit(void) print_APIC_field(int base) 1585 { 1586 int i; 1587 1588 printk(KERN_DEBUG); 1589 1590 for (i = 0; i < 8; i++) 1591 printk(KERN_CONT "%08x", apic_read(base + i*0x10)); 1592 1593 printk(KERN_CONT "\n"); 1594 } 1595 1596 __apicdebuginit(void) print_local_APIC(void *dummy) 1597 { 1598 unsigned int i, v, ver, maxlvt; 1599 u64 icr; 1600 1601 printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", 1602 smp_processor_id(), hard_smp_processor_id()); 1603 v = apic_read(APIC_ID); 1604 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id()); 1605 v = apic_read(APIC_LVR); 1606 printk(KERN_INFO "... APIC VERSION: %08x\n", v); 1607 ver = GET_APIC_VERSION(v); 1608 maxlvt = lapic_get_maxlvt(); 1609 1610 v = apic_read(APIC_TASKPRI); 1611 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); 1612 1613 if (APIC_INTEGRATED(ver)) { /* !82489DX */ 1614 if (!APIC_XAPIC(ver)) { 1615 v = apic_read(APIC_ARBPRI); 1616 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v, 1617 v & APIC_ARBPRI_MASK); 1618 } 1619 v = apic_read(APIC_PROCPRI); 1620 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v); 1621 } 1622 1623 /* 1624 * Remote read supported only in the 82489DX and local APIC for 1625 * Pentium processors. 1626 */ 1627 if (!APIC_INTEGRATED(ver) || maxlvt == 3) { 1628 v = apic_read(APIC_RRR); 1629 printk(KERN_DEBUG "... APIC RRR: %08x\n", v); 1630 } 1631 1632 v = apic_read(APIC_LDR); 1633 printk(KERN_DEBUG "... APIC LDR: %08x\n", v); 1634 if (!x2apic_enabled()) { 1635 v = apic_read(APIC_DFR); 1636 printk(KERN_DEBUG "... APIC DFR: %08x\n", v); 1637 } 1638 v = apic_read(APIC_SPIV); 1639 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v); 1640 1641 printk(KERN_DEBUG "... APIC ISR field:\n"); 1642 print_APIC_field(APIC_ISR); 1643 printk(KERN_DEBUG "... APIC TMR field:\n"); 1644 print_APIC_field(APIC_TMR); 1645 printk(KERN_DEBUG "... APIC IRR field:\n"); 1646 print_APIC_field(APIC_IRR); 1647 1648 if (APIC_INTEGRATED(ver)) { /* !82489DX */ 1649 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 1650 apic_write(APIC_ESR, 0); 1651 1652 v = apic_read(APIC_ESR); 1653 printk(KERN_DEBUG "... APIC ESR: %08x\n", v); 1654 } 1655 1656 icr = apic_icr_read(); 1657 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr); 1658 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32)); 1659 1660 v = apic_read(APIC_LVTT); 1661 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); 1662 1663 if (maxlvt > 3) { /* PC is LVT#4. */ 1664 v = apic_read(APIC_LVTPC); 1665 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v); 1666 } 1667 v = apic_read(APIC_LVT0); 1668 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v); 1669 v = apic_read(APIC_LVT1); 1670 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v); 1671 1672 if (maxlvt > 2) { /* ERR is LVT#3. */ 1673 v = apic_read(APIC_LVTERR); 1674 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v); 1675 } 1676 1677 v = apic_read(APIC_TMICT); 1678 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v); 1679 v = apic_read(APIC_TMCCT); 1680 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v); 1681 v = apic_read(APIC_TDCR); 1682 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v); 1683 1684 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { 1685 v = apic_read(APIC_EFEAT); 1686 maxlvt = (v >> 16) & 0xff; 1687 printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v); 1688 v = apic_read(APIC_ECTRL); 1689 printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v); 1690 for (i = 0; i < maxlvt; i++) { 1691 v = apic_read(APIC_EILVTn(i)); 1692 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v); 1693 } 1694 } 1695 printk("\n"); 1696 } 1697 1698 __apicdebuginit(void) print_local_APICs(int maxcpu) 1699 { 1700 int cpu; 1701 1702 if (!maxcpu) 1703 return; 1704 1705 preempt_disable(); 1706 for_each_online_cpu(cpu) { 1707 if (cpu >= maxcpu) 1708 break; 1709 smp_call_function_single(cpu, print_local_APIC, NULL, 1); 1710 } 1711 preempt_enable(); 1712 } 1713 1714 __apicdebuginit(void) print_PIC(void) 1715 { 1716 unsigned int v; 1717 unsigned long flags; 1718 1719 if (!legacy_pic->nr_legacy_irqs) 1720 return; 1721 1722 printk(KERN_DEBUG "\nprinting PIC contents\n"); 1723 1724 raw_spin_lock_irqsave(&i8259A_lock, flags); 1725 1726 v = inb(0xa1) << 8 | inb(0x21); 1727 printk(KERN_DEBUG "... PIC IMR: %04x\n", v); 1728 1729 v = inb(0xa0) << 8 | inb(0x20); 1730 printk(KERN_DEBUG "... PIC IRR: %04x\n", v); 1731 1732 outb(0x0b,0xa0); 1733 outb(0x0b,0x20); 1734 v = inb(0xa0) << 8 | inb(0x20); 1735 outb(0x0a,0xa0); 1736 outb(0x0a,0x20); 1737 1738 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 1739 1740 printk(KERN_DEBUG "... PIC ISR: %04x\n", v); 1741 1742 v = inb(0x4d1) << 8 | inb(0x4d0); 1743 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); 1744 } 1745 1746 static int __initdata show_lapic = 1; 1747 static __init int setup_show_lapic(char *arg) 1748 { 1749 int num = -1; 1750 1751 if (strcmp(arg, "all") == 0) { 1752 show_lapic = CONFIG_NR_CPUS; 1753 } else { 1754 get_option(&arg, &num); 1755 if (num >= 0) 1756 show_lapic = num; 1757 } 1758 1759 return 1; 1760 } 1761 __setup("show_lapic=", setup_show_lapic); 1762 1763 __apicdebuginit(int) print_ICs(void) 1764 { 1765 if (apic_verbosity == APIC_QUIET) 1766 return 0; 1767 1768 print_PIC(); 1769 1770 /* don't print out if apic is not there */ 1771 if (!cpu_has_apic && !apic_from_smp_config()) 1772 return 0; 1773 1774 print_local_APICs(show_lapic); 1775 print_IO_APIC(); 1776 1777 return 0; 1778 } 1779 1780 fs_initcall(print_ICs); 1781 1782 1783 /* Where if anywhere is the i8259 connect in external int mode */ 1784 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 1785 1786 void __init enable_IO_APIC(void) 1787 { 1788 int i8259_apic, i8259_pin; 1789 int apic; 1790 1791 if (!legacy_pic->nr_legacy_irqs) 1792 return; 1793 1794 for(apic = 0; apic < nr_ioapics; apic++) { 1795 int pin; 1796 /* See if any of the pins is in ExtINT mode */ 1797 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { 1798 struct IO_APIC_route_entry entry; 1799 entry = ioapic_read_entry(apic, pin); 1800 1801 /* If the interrupt line is enabled and in ExtInt mode 1802 * I have found the pin where the i8259 is connected. 1803 */ 1804 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) { 1805 ioapic_i8259.apic = apic; 1806 ioapic_i8259.pin = pin; 1807 goto found_i8259; 1808 } 1809 } 1810 } 1811 found_i8259: 1812 /* Look to see what if the MP table has reported the ExtINT */ 1813 /* If we could not find the appropriate pin by looking at the ioapic 1814 * the i8259 probably is not connected the ioapic but give the 1815 * mptable a chance anyway. 1816 */ 1817 i8259_pin = find_isa_irq_pin(0, mp_ExtINT); 1818 i8259_apic = find_isa_irq_apic(0, mp_ExtINT); 1819 /* Trust the MP table if nothing is setup in the hardware */ 1820 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { 1821 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); 1822 ioapic_i8259.pin = i8259_pin; 1823 ioapic_i8259.apic = i8259_apic; 1824 } 1825 /* Complain if the MP table and the hardware disagree */ 1826 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && 1827 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) 1828 { 1829 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); 1830 } 1831 1832 /* 1833 * Do not trust the IO-APIC being empty at bootup 1834 */ 1835 clear_IO_APIC(); 1836 } 1837 1838 /* 1839 * Not an __init, needed by the reboot code 1840 */ 1841 void disable_IO_APIC(void) 1842 { 1843 /* 1844 * Clear the IO-APIC before rebooting: 1845 */ 1846 clear_IO_APIC(); 1847 1848 if (!legacy_pic->nr_legacy_irqs) 1849 return; 1850 1851 /* 1852 * If the i8259 is routed through an IOAPIC 1853 * Put that IOAPIC in virtual wire mode 1854 * so legacy interrupts can be delivered. 1855 * 1856 * With interrupt-remapping, for now we will use virtual wire A mode, 1857 * as virtual wire B is little complex (need to configure both 1858 * IOAPIC RTE as well as interrupt-remapping table entry). 1859 * As this gets called during crash dump, keep this simple for now. 1860 */ 1861 if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) { 1862 struct IO_APIC_route_entry entry; 1863 1864 memset(&entry, 0, sizeof(entry)); 1865 entry.mask = 0; /* Enabled */ 1866 entry.trigger = 0; /* Edge */ 1867 entry.irr = 0; 1868 entry.polarity = 0; /* High */ 1869 entry.delivery_status = 0; 1870 entry.dest_mode = 0; /* Physical */ 1871 entry.delivery_mode = dest_ExtINT; /* ExtInt */ 1872 entry.vector = 0; 1873 entry.dest = read_apic_id(); 1874 1875 /* 1876 * Add it to the IO-APIC irq-routing table: 1877 */ 1878 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); 1879 } 1880 1881 /* 1882 * Use virtual wire A mode when interrupt remapping is enabled. 1883 */ 1884 if (cpu_has_apic || apic_from_smp_config()) 1885 disconnect_bsp_APIC(!intr_remapping_enabled && 1886 ioapic_i8259.pin != -1); 1887 } 1888 1889 #ifdef CONFIG_X86_32 1890 /* 1891 * function to set the IO-APIC physical IDs based on the 1892 * values stored in the MPC table. 1893 * 1894 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999 1895 */ 1896 void __init setup_ioapic_ids_from_mpc_nocheck(void) 1897 { 1898 union IO_APIC_reg_00 reg_00; 1899 physid_mask_t phys_id_present_map; 1900 int apic_id; 1901 int i; 1902 unsigned char old_id; 1903 unsigned long flags; 1904 1905 /* 1906 * This is broken; anything with a real cpu count has to 1907 * circumvent this idiocy regardless. 1908 */ 1909 apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map); 1910 1911 /* 1912 * Set the IOAPIC ID to the value stored in the MPC table. 1913 */ 1914 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { 1915 1916 /* Read the register 0 value */ 1917 raw_spin_lock_irqsave(&ioapic_lock, flags); 1918 reg_00.raw = io_apic_read(apic_id, 0); 1919 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1920 1921 old_id = mp_ioapics[apic_id].apicid; 1922 1923 if (mp_ioapics[apic_id].apicid >= get_physical_broadcast()) { 1924 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", 1925 apic_id, mp_ioapics[apic_id].apicid); 1926 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 1927 reg_00.bits.ID); 1928 mp_ioapics[apic_id].apicid = reg_00.bits.ID; 1929 } 1930 1931 /* 1932 * Sanity check, is the ID really free? Every APIC in a 1933 * system must have a unique ID or we get lots of nice 1934 * 'stuck on smp_invalidate_needed IPI wait' messages. 1935 */ 1936 if (apic->check_apicid_used(&phys_id_present_map, 1937 mp_ioapics[apic_id].apicid)) { 1938 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", 1939 apic_id, mp_ioapics[apic_id].apicid); 1940 for (i = 0; i < get_physical_broadcast(); i++) 1941 if (!physid_isset(i, phys_id_present_map)) 1942 break; 1943 if (i >= get_physical_broadcast()) 1944 panic("Max APIC ID exceeded!\n"); 1945 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 1946 i); 1947 physid_set(i, phys_id_present_map); 1948 mp_ioapics[apic_id].apicid = i; 1949 } else { 1950 physid_mask_t tmp; 1951 apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid, &tmp); 1952 apic_printk(APIC_VERBOSE, "Setting %d in the " 1953 "phys_id_present_map\n", 1954 mp_ioapics[apic_id].apicid); 1955 physids_or(phys_id_present_map, phys_id_present_map, tmp); 1956 } 1957 1958 /* 1959 * We need to adjust the IRQ routing table 1960 * if the ID changed. 1961 */ 1962 if (old_id != mp_ioapics[apic_id].apicid) 1963 for (i = 0; i < mp_irq_entries; i++) 1964 if (mp_irqs[i].dstapic == old_id) 1965 mp_irqs[i].dstapic 1966 = mp_ioapics[apic_id].apicid; 1967 1968 /* 1969 * Update the ID register according to the right value 1970 * from the MPC table if they are different. 1971 */ 1972 if (mp_ioapics[apic_id].apicid == reg_00.bits.ID) 1973 continue; 1974 1975 apic_printk(APIC_VERBOSE, KERN_INFO 1976 "...changing IO-APIC physical APIC ID to %d ...", 1977 mp_ioapics[apic_id].apicid); 1978 1979 reg_00.bits.ID = mp_ioapics[apic_id].apicid; 1980 raw_spin_lock_irqsave(&ioapic_lock, flags); 1981 io_apic_write(apic_id, 0, reg_00.raw); 1982 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1983 1984 /* 1985 * Sanity check 1986 */ 1987 raw_spin_lock_irqsave(&ioapic_lock, flags); 1988 reg_00.raw = io_apic_read(apic_id, 0); 1989 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1990 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid) 1991 printk("could not set ID!\n"); 1992 else 1993 apic_printk(APIC_VERBOSE, " ok.\n"); 1994 } 1995 } 1996 1997 void __init setup_ioapic_ids_from_mpc(void) 1998 { 1999 2000 if (acpi_ioapic) 2001 return; 2002 /* 2003 * Don't check I/O APIC IDs for xAPIC systems. They have 2004 * no meaning without the serial APIC bus. 2005 */ 2006 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 2007 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) 2008 return; 2009 setup_ioapic_ids_from_mpc_nocheck(); 2010 } 2011 #endif 2012 2013 int no_timer_check __initdata; 2014 2015 static int __init notimercheck(char *s) 2016 { 2017 no_timer_check = 1; 2018 return 1; 2019 } 2020 __setup("no_timer_check", notimercheck); 2021 2022 /* 2023 * There is a nasty bug in some older SMP boards, their mptable lies 2024 * about the timer IRQ. We do the following to work around the situation: 2025 * 2026 * - timer IRQ defaults to IO-APIC IRQ 2027 * - if this function detects that timer IRQs are defunct, then we fall 2028 * back to ISA timer IRQs 2029 */ 2030 static int __init timer_irq_works(void) 2031 { 2032 unsigned long t1 = jiffies; 2033 unsigned long flags; 2034 2035 if (no_timer_check) 2036 return 1; 2037 2038 local_save_flags(flags); 2039 local_irq_enable(); 2040 /* Let ten ticks pass... */ 2041 mdelay((10 * 1000) / HZ); 2042 local_irq_restore(flags); 2043 2044 /* 2045 * Expect a few ticks at least, to be sure some possible 2046 * glue logic does not lock up after one or two first 2047 * ticks in a non-ExtINT mode. Also the local APIC 2048 * might have cached one ExtINT interrupt. Finally, at 2049 * least one tick may be lost due to delays. 2050 */ 2051 2052 /* jiffies wrap? */ 2053 if (time_after(jiffies, t1 + 4)) 2054 return 1; 2055 return 0; 2056 } 2057 2058 /* 2059 * In the SMP+IOAPIC case it might happen that there are an unspecified 2060 * number of pending IRQ events unhandled. These cases are very rare, 2061 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much 2062 * better to do it this way as thus we do not have to be aware of 2063 * 'pending' interrupts in the IRQ path, except at this point. 2064 */ 2065 /* 2066 * Edge triggered needs to resend any interrupt 2067 * that was delayed but this is now handled in the device 2068 * independent code. 2069 */ 2070 2071 /* 2072 * Starting up a edge-triggered IO-APIC interrupt is 2073 * nasty - we need to make sure that we get the edge. 2074 * If it is already asserted for some reason, we need 2075 * return 1 to indicate that is was pending. 2076 * 2077 * This is not complete - we should be able to fake 2078 * an edge even if it isn't on the 8259A... 2079 */ 2080 2081 static unsigned int startup_ioapic_irq(struct irq_data *data) 2082 { 2083 int was_pending = 0, irq = data->irq; 2084 unsigned long flags; 2085 2086 raw_spin_lock_irqsave(&ioapic_lock, flags); 2087 if (irq < legacy_pic->nr_legacy_irqs) { 2088 legacy_pic->mask(irq); 2089 if (legacy_pic->irq_pending(irq)) 2090 was_pending = 1; 2091 } 2092 __unmask_ioapic(data->chip_data); 2093 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2094 2095 return was_pending; 2096 } 2097 2098 static int ioapic_retrigger_irq(struct irq_data *data) 2099 { 2100 struct irq_cfg *cfg = data->chip_data; 2101 unsigned long flags; 2102 2103 raw_spin_lock_irqsave(&vector_lock, flags); 2104 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); 2105 raw_spin_unlock_irqrestore(&vector_lock, flags); 2106 2107 return 1; 2108 } 2109 2110 /* 2111 * Level and edge triggered IO-APIC interrupts need different handling, 2112 * so we use two separate IRQ descriptors. Edge triggered IRQs can be 2113 * handled with the level-triggered descriptor, but that one has slightly 2114 * more overhead. Level-triggered interrupts cannot be handled with the 2115 * edge-triggered handler, without risking IRQ storms and other ugly 2116 * races. 2117 */ 2118 2119 #ifdef CONFIG_SMP 2120 void send_cleanup_vector(struct irq_cfg *cfg) 2121 { 2122 cpumask_var_t cleanup_mask; 2123 2124 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { 2125 unsigned int i; 2126 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) 2127 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); 2128 } else { 2129 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); 2130 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); 2131 free_cpumask_var(cleanup_mask); 2132 } 2133 cfg->move_in_progress = 0; 2134 } 2135 2136 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) 2137 { 2138 int apic, pin; 2139 struct irq_pin_list *entry; 2140 u8 vector = cfg->vector; 2141 2142 for_each_irq_pin(entry, cfg->irq_2_pin) { 2143 unsigned int reg; 2144 2145 apic = entry->apic; 2146 pin = entry->pin; 2147 /* 2148 * With interrupt-remapping, destination information comes 2149 * from interrupt-remapping table entry. 2150 */ 2151 if (!irq_remapped(cfg)) 2152 io_apic_write(apic, 0x11 + pin*2, dest); 2153 reg = io_apic_read(apic, 0x10 + pin*2); 2154 reg &= ~IO_APIC_REDIR_VECTOR_MASK; 2155 reg |= vector; 2156 io_apic_modify(apic, 0x10 + pin*2, reg); 2157 } 2158 } 2159 2160 /* 2161 * Either sets data->affinity to a valid value, and returns 2162 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and 2163 * leaves data->affinity untouched. 2164 */ 2165 int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2166 unsigned int *dest_id) 2167 { 2168 struct irq_cfg *cfg = data->chip_data; 2169 2170 if (!cpumask_intersects(mask, cpu_online_mask)) 2171 return -1; 2172 2173 if (assign_irq_vector(data->irq, data->chip_data, mask)) 2174 return -1; 2175 2176 cpumask_copy(data->affinity, mask); 2177 2178 *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain); 2179 return 0; 2180 } 2181 2182 static int 2183 ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2184 bool force) 2185 { 2186 unsigned int dest, irq = data->irq; 2187 unsigned long flags; 2188 int ret; 2189 2190 raw_spin_lock_irqsave(&ioapic_lock, flags); 2191 ret = __ioapic_set_affinity(data, mask, &dest); 2192 if (!ret) { 2193 /* Only the high 8 bits are valid. */ 2194 dest = SET_APIC_LOGICAL_ID(dest); 2195 __target_IO_APIC_irq(irq, dest, data->chip_data); 2196 } 2197 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2198 return ret; 2199 } 2200 2201 #ifdef CONFIG_INTR_REMAP 2202 2203 /* 2204 * Migrate the IO-APIC irq in the presence of intr-remapping. 2205 * 2206 * For both level and edge triggered, irq migration is a simple atomic 2207 * update(of vector and cpu destination) of IRTE and flush the hardware cache. 2208 * 2209 * For level triggered, we eliminate the io-apic RTE modification (with the 2210 * updated vector information), by using a virtual vector (io-apic pin number). 2211 * Real vector that is used for interrupting cpu will be coming from 2212 * the interrupt-remapping table entry. 2213 */ 2214 static int 2215 ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2216 bool force) 2217 { 2218 struct irq_cfg *cfg = data->chip_data; 2219 unsigned int dest, irq = data->irq; 2220 struct irte irte; 2221 2222 if (!cpumask_intersects(mask, cpu_online_mask)) 2223 return -EINVAL; 2224 2225 if (get_irte(irq, &irte)) 2226 return -EBUSY; 2227 2228 if (assign_irq_vector(irq, cfg, mask)) 2229 return -EBUSY; 2230 2231 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); 2232 2233 irte.vector = cfg->vector; 2234 irte.dest_id = IRTE_DEST(dest); 2235 2236 /* 2237 * Modified the IRTE and flushes the Interrupt entry cache. 2238 */ 2239 modify_irte(irq, &irte); 2240 2241 if (cfg->move_in_progress) 2242 send_cleanup_vector(cfg); 2243 2244 cpumask_copy(data->affinity, mask); 2245 return 0; 2246 } 2247 2248 #else 2249 static inline int 2250 ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2251 bool force) 2252 { 2253 return 0; 2254 } 2255 #endif 2256 2257 asmlinkage void smp_irq_move_cleanup_interrupt(void) 2258 { 2259 unsigned vector, me; 2260 2261 ack_APIC_irq(); 2262 exit_idle(); 2263 irq_enter(); 2264 2265 me = smp_processor_id(); 2266 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { 2267 unsigned int irq; 2268 unsigned int irr; 2269 struct irq_desc *desc; 2270 struct irq_cfg *cfg; 2271 irq = __this_cpu_read(vector_irq[vector]); 2272 2273 if (irq == -1) 2274 continue; 2275 2276 desc = irq_to_desc(irq); 2277 if (!desc) 2278 continue; 2279 2280 cfg = irq_cfg(irq); 2281 raw_spin_lock(&desc->lock); 2282 2283 /* 2284 * Check if the irq migration is in progress. If so, we 2285 * haven't received the cleanup request yet for this irq. 2286 */ 2287 if (cfg->move_in_progress) 2288 goto unlock; 2289 2290 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2291 goto unlock; 2292 2293 irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); 2294 /* 2295 * Check if the vector that needs to be cleanedup is 2296 * registered at the cpu's IRR. If so, then this is not 2297 * the best time to clean it up. Lets clean it up in the 2298 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR 2299 * to myself. 2300 */ 2301 if (irr & (1 << (vector % 32))) { 2302 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); 2303 goto unlock; 2304 } 2305 __this_cpu_write(vector_irq[vector], -1); 2306 unlock: 2307 raw_spin_unlock(&desc->lock); 2308 } 2309 2310 irq_exit(); 2311 } 2312 2313 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) 2314 { 2315 unsigned me; 2316 2317 if (likely(!cfg->move_in_progress)) 2318 return; 2319 2320 me = smp_processor_id(); 2321 2322 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2323 send_cleanup_vector(cfg); 2324 } 2325 2326 static void irq_complete_move(struct irq_cfg *cfg) 2327 { 2328 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); 2329 } 2330 2331 void irq_force_complete_move(int irq) 2332 { 2333 struct irq_cfg *cfg = irq_get_chip_data(irq); 2334 2335 if (!cfg) 2336 return; 2337 2338 __irq_complete_move(cfg, cfg->vector); 2339 } 2340 #else 2341 static inline void irq_complete_move(struct irq_cfg *cfg) { } 2342 #endif 2343 2344 static void ack_apic_edge(struct irq_data *data) 2345 { 2346 irq_complete_move(data->chip_data); 2347 irq_move_irq(data); 2348 ack_APIC_irq(); 2349 } 2350 2351 atomic_t irq_mis_count; 2352 2353 /* 2354 * IO-APIC versions below 0x20 don't support EOI register. 2355 * For the record, here is the information about various versions: 2356 * 0Xh 82489DX 2357 * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant 2358 * 2Xh I/O(x)APIC which is PCI 2.2 Compliant 2359 * 30h-FFh Reserved 2360 * 2361 * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic 2362 * version as 0x2. This is an error with documentation and these ICH chips 2363 * use io-apic's of version 0x20. 2364 * 2365 * For IO-APIC's with EOI register, we use that to do an explicit EOI. 2366 * Otherwise, we simulate the EOI message manually by changing the trigger 2367 * mode to edge and then back to level, with RTE being masked during this. 2368 */ 2369 static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 2370 { 2371 struct irq_pin_list *entry; 2372 unsigned long flags; 2373 2374 raw_spin_lock_irqsave(&ioapic_lock, flags); 2375 for_each_irq_pin(entry, cfg->irq_2_pin) { 2376 if (mp_ioapics[entry->apic].apicver >= 0x20) { 2377 /* 2378 * Intr-remapping uses pin number as the virtual vector 2379 * in the RTE. Actual vector is programmed in 2380 * intr-remapping table entry. Hence for the io-apic 2381 * EOI we use the pin number. 2382 */ 2383 if (irq_remapped(cfg)) 2384 io_apic_eoi(entry->apic, entry->pin); 2385 else 2386 io_apic_eoi(entry->apic, cfg->vector); 2387 } else { 2388 __mask_and_edge_IO_APIC_irq(entry); 2389 __unmask_and_level_IO_APIC_irq(entry); 2390 } 2391 } 2392 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2393 } 2394 2395 static void ack_apic_level(struct irq_data *data) 2396 { 2397 struct irq_cfg *cfg = data->chip_data; 2398 int i, do_unmask_irq = 0, irq = data->irq; 2399 unsigned long v; 2400 2401 irq_complete_move(cfg); 2402 #ifdef CONFIG_GENERIC_PENDING_IRQ 2403 /* If we are moving the irq we need to mask it */ 2404 if (unlikely(irqd_is_setaffinity_pending(data))) { 2405 do_unmask_irq = 1; 2406 mask_ioapic(cfg); 2407 } 2408 #endif 2409 2410 /* 2411 * It appears there is an erratum which affects at least version 0x11 2412 * of I/O APIC (that's the 82093AA and cores integrated into various 2413 * chipsets). Under certain conditions a level-triggered interrupt is 2414 * erroneously delivered as edge-triggered one but the respective IRR 2415 * bit gets set nevertheless. As a result the I/O unit expects an EOI 2416 * message but it will never arrive and further interrupts are blocked 2417 * from the source. The exact reason is so far unknown, but the 2418 * phenomenon was observed when two consecutive interrupt requests 2419 * from a given source get delivered to the same CPU and the source is 2420 * temporarily disabled in between. 2421 * 2422 * A workaround is to simulate an EOI message manually. We achieve it 2423 * by setting the trigger mode to edge and then to level when the edge 2424 * trigger mode gets detected in the TMR of a local APIC for a 2425 * level-triggered interrupt. We mask the source for the time of the 2426 * operation to prevent an edge-triggered interrupt escaping meanwhile. 2427 * The idea is from Manfred Spraul. --macro 2428 * 2429 * Also in the case when cpu goes offline, fixup_irqs() will forward 2430 * any unhandled interrupt on the offlined cpu to the new cpu 2431 * destination that is handling the corresponding interrupt. This 2432 * interrupt forwarding is done via IPI's. Hence, in this case also 2433 * level-triggered io-apic interrupt will be seen as an edge 2434 * interrupt in the IRR. And we can't rely on the cpu's EOI 2435 * to be broadcasted to the IO-APIC's which will clear the remoteIRR 2436 * corresponding to the level-triggered interrupt. Hence on IO-APIC's 2437 * supporting EOI register, we do an explicit EOI to clear the 2438 * remote IRR and on IO-APIC's which don't have an EOI register, 2439 * we use the above logic (mask+edge followed by unmask+level) from 2440 * Manfred Spraul to clear the remote IRR. 2441 */ 2442 i = cfg->vector; 2443 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); 2444 2445 /* 2446 * We must acknowledge the irq before we move it or the acknowledge will 2447 * not propagate properly. 2448 */ 2449 ack_APIC_irq(); 2450 2451 /* 2452 * Tail end of clearing remote IRR bit (either by delivering the EOI 2453 * message via io-apic EOI register write or simulating it using 2454 * mask+edge followed by unnask+level logic) manually when the 2455 * level triggered interrupt is seen as the edge triggered interrupt 2456 * at the cpu. 2457 */ 2458 if (!(v & (1 << (i & 0x1f)))) { 2459 atomic_inc(&irq_mis_count); 2460 2461 eoi_ioapic_irq(irq, cfg); 2462 } 2463 2464 /* Now we can move and renable the irq */ 2465 if (unlikely(do_unmask_irq)) { 2466 /* Only migrate the irq if the ack has been received. 2467 * 2468 * On rare occasions the broadcast level triggered ack gets 2469 * delayed going to ioapics, and if we reprogram the 2470 * vector while Remote IRR is still set the irq will never 2471 * fire again. 2472 * 2473 * To prevent this scenario we read the Remote IRR bit 2474 * of the ioapic. This has two effects. 2475 * - On any sane system the read of the ioapic will 2476 * flush writes (and acks) going to the ioapic from 2477 * this cpu. 2478 * - We get to see if the ACK has actually been delivered. 2479 * 2480 * Based on failed experiments of reprogramming the 2481 * ioapic entry from outside of irq context starting 2482 * with masking the ioapic entry and then polling until 2483 * Remote IRR was clear before reprogramming the 2484 * ioapic I don't trust the Remote IRR bit to be 2485 * completey accurate. 2486 * 2487 * However there appears to be no other way to plug 2488 * this race, so if the Remote IRR bit is not 2489 * accurate and is causing problems then it is a hardware bug 2490 * and you can go talk to the chipset vendor about it. 2491 */ 2492 if (!io_apic_level_ack_pending(cfg)) 2493 irq_move_masked_irq(data); 2494 unmask_ioapic(cfg); 2495 } 2496 } 2497 2498 #ifdef CONFIG_INTR_REMAP 2499 static void ir_ack_apic_edge(struct irq_data *data) 2500 { 2501 ack_APIC_irq(); 2502 } 2503 2504 static void ir_ack_apic_level(struct irq_data *data) 2505 { 2506 ack_APIC_irq(); 2507 eoi_ioapic_irq(data->irq, data->chip_data); 2508 } 2509 #endif /* CONFIG_INTR_REMAP */ 2510 2511 static struct irq_chip ioapic_chip __read_mostly = { 2512 .name = "IO-APIC", 2513 .irq_startup = startup_ioapic_irq, 2514 .irq_mask = mask_ioapic_irq, 2515 .irq_unmask = unmask_ioapic_irq, 2516 .irq_ack = ack_apic_edge, 2517 .irq_eoi = ack_apic_level, 2518 #ifdef CONFIG_SMP 2519 .irq_set_affinity = ioapic_set_affinity, 2520 #endif 2521 .irq_retrigger = ioapic_retrigger_irq, 2522 }; 2523 2524 static struct irq_chip ir_ioapic_chip __read_mostly = { 2525 .name = "IR-IO-APIC", 2526 .irq_startup = startup_ioapic_irq, 2527 .irq_mask = mask_ioapic_irq, 2528 .irq_unmask = unmask_ioapic_irq, 2529 #ifdef CONFIG_INTR_REMAP 2530 .irq_ack = ir_ack_apic_edge, 2531 .irq_eoi = ir_ack_apic_level, 2532 #ifdef CONFIG_SMP 2533 .irq_set_affinity = ir_ioapic_set_affinity, 2534 #endif 2535 #endif 2536 .irq_retrigger = ioapic_retrigger_irq, 2537 }; 2538 2539 static inline void init_IO_APIC_traps(void) 2540 { 2541 struct irq_cfg *cfg; 2542 unsigned int irq; 2543 2544 /* 2545 * NOTE! The local APIC isn't very good at handling 2546 * multiple interrupts at the same interrupt level. 2547 * As the interrupt level is determined by taking the 2548 * vector number and shifting that right by 4, we 2549 * want to spread these out a bit so that they don't 2550 * all fall in the same interrupt level. 2551 * 2552 * Also, we've got to be careful not to trash gate 2553 * 0x80, because int 0x80 is hm, kind of importantish. ;) 2554 */ 2555 for_each_active_irq(irq) { 2556 cfg = irq_get_chip_data(irq); 2557 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { 2558 /* 2559 * Hmm.. We don't have an entry for this, 2560 * so default to an old-fashioned 8259 2561 * interrupt if we can.. 2562 */ 2563 if (irq < legacy_pic->nr_legacy_irqs) 2564 legacy_pic->make_irq(irq); 2565 else 2566 /* Strange. Oh, well.. */ 2567 irq_set_chip(irq, &no_irq_chip); 2568 } 2569 } 2570 } 2571 2572 /* 2573 * The local APIC irq-chip implementation: 2574 */ 2575 2576 static void mask_lapic_irq(struct irq_data *data) 2577 { 2578 unsigned long v; 2579 2580 v = apic_read(APIC_LVT0); 2581 apic_write(APIC_LVT0, v | APIC_LVT_MASKED); 2582 } 2583 2584 static void unmask_lapic_irq(struct irq_data *data) 2585 { 2586 unsigned long v; 2587 2588 v = apic_read(APIC_LVT0); 2589 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); 2590 } 2591 2592 static void ack_lapic_irq(struct irq_data *data) 2593 { 2594 ack_APIC_irq(); 2595 } 2596 2597 static struct irq_chip lapic_chip __read_mostly = { 2598 .name = "local-APIC", 2599 .irq_mask = mask_lapic_irq, 2600 .irq_unmask = unmask_lapic_irq, 2601 .irq_ack = ack_lapic_irq, 2602 }; 2603 2604 static void lapic_register_intr(int irq) 2605 { 2606 irq_clear_status_flags(irq, IRQ_LEVEL); 2607 irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, 2608 "edge"); 2609 } 2610 2611 /* 2612 * This looks a bit hackish but it's about the only one way of sending 2613 * a few INTA cycles to 8259As and any associated glue logic. ICR does 2614 * not support the ExtINT mode, unfortunately. We need to send these 2615 * cycles as some i82489DX-based boards have glue logic that keeps the 2616 * 8259A interrupt line asserted until INTA. --macro 2617 */ 2618 static inline void __init unlock_ExtINT_logic(void) 2619 { 2620 int apic, pin, i; 2621 struct IO_APIC_route_entry entry0, entry1; 2622 unsigned char save_control, save_freq_select; 2623 2624 pin = find_isa_irq_pin(8, mp_INT); 2625 if (pin == -1) { 2626 WARN_ON_ONCE(1); 2627 return; 2628 } 2629 apic = find_isa_irq_apic(8, mp_INT); 2630 if (apic == -1) { 2631 WARN_ON_ONCE(1); 2632 return; 2633 } 2634 2635 entry0 = ioapic_read_entry(apic, pin); 2636 clear_IO_APIC_pin(apic, pin); 2637 2638 memset(&entry1, 0, sizeof(entry1)); 2639 2640 entry1.dest_mode = 0; /* physical delivery */ 2641 entry1.mask = 0; /* unmask IRQ now */ 2642 entry1.dest = hard_smp_processor_id(); 2643 entry1.delivery_mode = dest_ExtINT; 2644 entry1.polarity = entry0.polarity; 2645 entry1.trigger = 0; 2646 entry1.vector = 0; 2647 2648 ioapic_write_entry(apic, pin, entry1); 2649 2650 save_control = CMOS_READ(RTC_CONTROL); 2651 save_freq_select = CMOS_READ(RTC_FREQ_SELECT); 2652 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6, 2653 RTC_FREQ_SELECT); 2654 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL); 2655 2656 i = 100; 2657 while (i-- > 0) { 2658 mdelay(10); 2659 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF) 2660 i -= 10; 2661 } 2662 2663 CMOS_WRITE(save_control, RTC_CONTROL); 2664 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); 2665 clear_IO_APIC_pin(apic, pin); 2666 2667 ioapic_write_entry(apic, pin, entry0); 2668 } 2669 2670 static int disable_timer_pin_1 __initdata; 2671 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */ 2672 static int __init disable_timer_pin_setup(char *arg) 2673 { 2674 disable_timer_pin_1 = 1; 2675 return 0; 2676 } 2677 early_param("disable_timer_pin_1", disable_timer_pin_setup); 2678 2679 int timer_through_8259 __initdata; 2680 2681 /* 2682 * This code may look a bit paranoid, but it's supposed to cooperate with 2683 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ 2684 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast 2685 * fanatically on his truly buggy board. 2686 * 2687 * FIXME: really need to revamp this for all platforms. 2688 */ 2689 static inline void __init check_timer(void) 2690 { 2691 struct irq_cfg *cfg = irq_get_chip_data(0); 2692 int node = cpu_to_node(0); 2693 int apic1, pin1, apic2, pin2; 2694 unsigned long flags; 2695 int no_pin1 = 0; 2696 2697 local_irq_save(flags); 2698 2699 /* 2700 * get/set the timer IRQ vector: 2701 */ 2702 legacy_pic->mask(0); 2703 assign_irq_vector(0, cfg, apic->target_cpus()); 2704 2705 /* 2706 * As IRQ0 is to be enabled in the 8259A, the virtual 2707 * wire has to be disabled in the local APIC. Also 2708 * timer interrupts need to be acknowledged manually in 2709 * the 8259A for the i82489DX when using the NMI 2710 * watchdog as that APIC treats NMIs as level-triggered. 2711 * The AEOI mode will finish them in the 8259A 2712 * automatically. 2713 */ 2714 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); 2715 legacy_pic->init(1); 2716 2717 pin1 = find_isa_irq_pin(0, mp_INT); 2718 apic1 = find_isa_irq_apic(0, mp_INT); 2719 pin2 = ioapic_i8259.pin; 2720 apic2 = ioapic_i8259.apic; 2721 2722 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X " 2723 "apic1=%d pin1=%d apic2=%d pin2=%d\n", 2724 cfg->vector, apic1, pin1, apic2, pin2); 2725 2726 /* 2727 * Some BIOS writers are clueless and report the ExtINTA 2728 * I/O APIC input from the cascaded 8259A as the timer 2729 * interrupt input. So just in case, if only one pin 2730 * was found above, try it both directly and through the 2731 * 8259A. 2732 */ 2733 if (pin1 == -1) { 2734 if (intr_remapping_enabled) 2735 panic("BIOS bug: timer not connected to IO-APIC"); 2736 pin1 = pin2; 2737 apic1 = apic2; 2738 no_pin1 = 1; 2739 } else if (pin2 == -1) { 2740 pin2 = pin1; 2741 apic2 = apic1; 2742 } 2743 2744 if (pin1 != -1) { 2745 /* 2746 * Ok, does IRQ0 through the IOAPIC work? 2747 */ 2748 if (no_pin1) { 2749 add_pin_to_irq_node(cfg, node, apic1, pin1); 2750 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); 2751 } else { 2752 /* for edge trigger, setup_ioapic_irq already 2753 * leave it unmasked. 2754 * so only need to unmask if it is level-trigger 2755 * do we really have level trigger timer? 2756 */ 2757 int idx; 2758 idx = find_irq_entry(apic1, pin1, mp_INT); 2759 if (idx != -1 && irq_trigger(idx)) 2760 unmask_ioapic(cfg); 2761 } 2762 if (timer_irq_works()) { 2763 if (disable_timer_pin_1 > 0) 2764 clear_IO_APIC_pin(0, pin1); 2765 goto out; 2766 } 2767 if (intr_remapping_enabled) 2768 panic("timer doesn't work through Interrupt-remapped IO-APIC"); 2769 local_irq_disable(); 2770 clear_IO_APIC_pin(apic1, pin1); 2771 if (!no_pin1) 2772 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " 2773 "8254 timer not connected to IO-APIC\n"); 2774 2775 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer " 2776 "(IRQ0) through the 8259A ...\n"); 2777 apic_printk(APIC_QUIET, KERN_INFO 2778 "..... (found apic %d pin %d) ...\n", apic2, pin2); 2779 /* 2780 * legacy devices should be connected to IO APIC #0 2781 */ 2782 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); 2783 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); 2784 legacy_pic->unmask(0); 2785 if (timer_irq_works()) { 2786 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2787 timer_through_8259 = 1; 2788 goto out; 2789 } 2790 /* 2791 * Cleanup, just in case ... 2792 */ 2793 local_irq_disable(); 2794 legacy_pic->mask(0); 2795 clear_IO_APIC_pin(apic2, pin2); 2796 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); 2797 } 2798 2799 apic_printk(APIC_QUIET, KERN_INFO 2800 "...trying to set up timer as Virtual Wire IRQ...\n"); 2801 2802 lapic_register_intr(0); 2803 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ 2804 legacy_pic->unmask(0); 2805 2806 if (timer_irq_works()) { 2807 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2808 goto out; 2809 } 2810 local_irq_disable(); 2811 legacy_pic->mask(0); 2812 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); 2813 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); 2814 2815 apic_printk(APIC_QUIET, KERN_INFO 2816 "...trying to set up timer as ExtINT IRQ...\n"); 2817 2818 legacy_pic->init(0); 2819 legacy_pic->make_irq(0); 2820 apic_write(APIC_LVT0, APIC_DM_EXTINT); 2821 2822 unlock_ExtINT_logic(); 2823 2824 if (timer_irq_works()) { 2825 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2826 goto out; 2827 } 2828 local_irq_disable(); 2829 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); 2830 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " 2831 "report. Then try booting with the 'noapic' option.\n"); 2832 out: 2833 local_irq_restore(flags); 2834 } 2835 2836 /* 2837 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available 2838 * to devices. However there may be an I/O APIC pin available for 2839 * this interrupt regardless. The pin may be left unconnected, but 2840 * typically it will be reused as an ExtINT cascade interrupt for 2841 * the master 8259A. In the MPS case such a pin will normally be 2842 * reported as an ExtINT interrupt in the MP table. With ACPI 2843 * there is no provision for ExtINT interrupts, and in the absence 2844 * of an override it would be treated as an ordinary ISA I/O APIC 2845 * interrupt, that is edge-triggered and unmasked by default. We 2846 * used to do this, but it caused problems on some systems because 2847 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using 2848 * the same ExtINT cascade interrupt to drive the local APIC of the 2849 * bootstrap processor. Therefore we refrain from routing IRQ2 to 2850 * the I/O APIC in all cases now. No actual device should request 2851 * it anyway. --macro 2852 */ 2853 #define PIC_IRQS (1UL << PIC_CASCADE_IR) 2854 2855 void __init setup_IO_APIC(void) 2856 { 2857 2858 /* 2859 * calling enable_IO_APIC() is moved to setup_local_APIC for BP 2860 */ 2861 io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL; 2862 2863 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); 2864 /* 2865 * Set up IO-APIC IRQ routing. 2866 */ 2867 x86_init.mpparse.setup_ioapic_ids(); 2868 2869 sync_Arb_IDs(); 2870 setup_IO_APIC_irqs(); 2871 init_IO_APIC_traps(); 2872 if (legacy_pic->nr_legacy_irqs) 2873 check_timer(); 2874 } 2875 2876 /* 2877 * Called after all the initialization is done. If we didn't find any 2878 * APIC bugs then we can allow the modify fast path 2879 */ 2880 2881 static int __init io_apic_bug_finalize(void) 2882 { 2883 if (sis_apic_bug == -1) 2884 sis_apic_bug = 0; 2885 return 0; 2886 } 2887 2888 late_initcall(io_apic_bug_finalize); 2889 2890 static void suspend_ioapic(int ioapic_id) 2891 { 2892 struct IO_APIC_route_entry *saved_data = ioapic_saved_data[ioapic_id]; 2893 int i; 2894 2895 if (!saved_data) 2896 return; 2897 2898 for (i = 0; i < nr_ioapic_registers[ioapic_id]; i++) 2899 saved_data[i] = ioapic_read_entry(ioapic_id, i); 2900 } 2901 2902 static int ioapic_suspend(void) 2903 { 2904 int ioapic_id; 2905 2906 for (ioapic_id = 0; ioapic_id < nr_ioapics; ioapic_id++) 2907 suspend_ioapic(ioapic_id); 2908 2909 return 0; 2910 } 2911 2912 static void resume_ioapic(int ioapic_id) 2913 { 2914 struct IO_APIC_route_entry *saved_data = ioapic_saved_data[ioapic_id]; 2915 unsigned long flags; 2916 union IO_APIC_reg_00 reg_00; 2917 int i; 2918 2919 if (!saved_data) 2920 return; 2921 2922 raw_spin_lock_irqsave(&ioapic_lock, flags); 2923 reg_00.raw = io_apic_read(ioapic_id, 0); 2924 if (reg_00.bits.ID != mp_ioapics[ioapic_id].apicid) { 2925 reg_00.bits.ID = mp_ioapics[ioapic_id].apicid; 2926 io_apic_write(ioapic_id, 0, reg_00.raw); 2927 } 2928 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2929 for (i = 0; i < nr_ioapic_registers[ioapic_id]; i++) 2930 ioapic_write_entry(ioapic_id, i, saved_data[i]); 2931 } 2932 2933 static void ioapic_resume(void) 2934 { 2935 int ioapic_id; 2936 2937 for (ioapic_id = nr_ioapics - 1; ioapic_id >= 0; ioapic_id--) 2938 resume_ioapic(ioapic_id); 2939 } 2940 2941 static struct syscore_ops ioapic_syscore_ops = { 2942 .suspend = ioapic_suspend, 2943 .resume = ioapic_resume, 2944 }; 2945 2946 static int __init ioapic_init_ops(void) 2947 { 2948 register_syscore_ops(&ioapic_syscore_ops); 2949 2950 return 0; 2951 } 2952 2953 device_initcall(ioapic_init_ops); 2954 2955 /* 2956 * Dynamic irq allocate and deallocation 2957 */ 2958 unsigned int create_irq_nr(unsigned int from, int node) 2959 { 2960 struct irq_cfg *cfg; 2961 unsigned long flags; 2962 unsigned int ret = 0; 2963 int irq; 2964 2965 if (from < nr_irqs_gsi) 2966 from = nr_irqs_gsi; 2967 2968 irq = alloc_irq_from(from, node); 2969 if (irq < 0) 2970 return 0; 2971 cfg = alloc_irq_cfg(irq, node); 2972 if (!cfg) { 2973 free_irq_at(irq, NULL); 2974 return 0; 2975 } 2976 2977 raw_spin_lock_irqsave(&vector_lock, flags); 2978 if (!__assign_irq_vector(irq, cfg, apic->target_cpus())) 2979 ret = irq; 2980 raw_spin_unlock_irqrestore(&vector_lock, flags); 2981 2982 if (ret) { 2983 irq_set_chip_data(irq, cfg); 2984 irq_clear_status_flags(irq, IRQ_NOREQUEST); 2985 } else { 2986 free_irq_at(irq, cfg); 2987 } 2988 return ret; 2989 } 2990 2991 int create_irq(void) 2992 { 2993 int node = cpu_to_node(0); 2994 unsigned int irq_want; 2995 int irq; 2996 2997 irq_want = nr_irqs_gsi; 2998 irq = create_irq_nr(irq_want, node); 2999 3000 if (irq == 0) 3001 irq = -1; 3002 3003 return irq; 3004 } 3005 3006 void destroy_irq(unsigned int irq) 3007 { 3008 struct irq_cfg *cfg = irq_get_chip_data(irq); 3009 unsigned long flags; 3010 3011 irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); 3012 3013 if (irq_remapped(cfg)) 3014 free_irte(irq); 3015 raw_spin_lock_irqsave(&vector_lock, flags); 3016 __clear_irq_vector(irq, cfg); 3017 raw_spin_unlock_irqrestore(&vector_lock, flags); 3018 free_irq_at(irq, cfg); 3019 } 3020 3021 /* 3022 * MSI message composition 3023 */ 3024 #ifdef CONFIG_PCI_MSI 3025 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, 3026 struct msi_msg *msg, u8 hpet_id) 3027 { 3028 struct irq_cfg *cfg; 3029 int err; 3030 unsigned dest; 3031 3032 if (disable_apic) 3033 return -ENXIO; 3034 3035 cfg = irq_cfg(irq); 3036 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3037 if (err) 3038 return err; 3039 3040 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 3041 3042 if (irq_remapped(cfg)) { 3043 struct irte irte; 3044 int ir_index; 3045 u16 sub_handle; 3046 3047 ir_index = map_irq_to_irte_handle(irq, &sub_handle); 3048 BUG_ON(ir_index == -1); 3049 3050 prepare_irte(&irte, cfg->vector, dest); 3051 3052 /* Set source-id of interrupt request */ 3053 if (pdev) 3054 set_msi_sid(&irte, pdev); 3055 else 3056 set_hpet_sid(&irte, hpet_id); 3057 3058 modify_irte(irq, &irte); 3059 3060 msg->address_hi = MSI_ADDR_BASE_HI; 3061 msg->data = sub_handle; 3062 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | 3063 MSI_ADDR_IR_SHV | 3064 MSI_ADDR_IR_INDEX1(ir_index) | 3065 MSI_ADDR_IR_INDEX2(ir_index); 3066 } else { 3067 if (x2apic_enabled()) 3068 msg->address_hi = MSI_ADDR_BASE_HI | 3069 MSI_ADDR_EXT_DEST_ID(dest); 3070 else 3071 msg->address_hi = MSI_ADDR_BASE_HI; 3072 3073 msg->address_lo = 3074 MSI_ADDR_BASE_LO | 3075 ((apic->irq_dest_mode == 0) ? 3076 MSI_ADDR_DEST_MODE_PHYSICAL: 3077 MSI_ADDR_DEST_MODE_LOGICAL) | 3078 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3079 MSI_ADDR_REDIRECTION_CPU: 3080 MSI_ADDR_REDIRECTION_LOWPRI) | 3081 MSI_ADDR_DEST_ID(dest); 3082 3083 msg->data = 3084 MSI_DATA_TRIGGER_EDGE | 3085 MSI_DATA_LEVEL_ASSERT | 3086 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3087 MSI_DATA_DELIVERY_FIXED: 3088 MSI_DATA_DELIVERY_LOWPRI) | 3089 MSI_DATA_VECTOR(cfg->vector); 3090 } 3091 return err; 3092 } 3093 3094 #ifdef CONFIG_SMP 3095 static int 3096 msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3097 { 3098 struct irq_cfg *cfg = data->chip_data; 3099 struct msi_msg msg; 3100 unsigned int dest; 3101 3102 if (__ioapic_set_affinity(data, mask, &dest)) 3103 return -1; 3104 3105 __get_cached_msi_msg(data->msi_desc, &msg); 3106 3107 msg.data &= ~MSI_DATA_VECTOR_MASK; 3108 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3109 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3110 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3111 3112 __write_msi_msg(data->msi_desc, &msg); 3113 3114 return 0; 3115 } 3116 #ifdef CONFIG_INTR_REMAP 3117 /* 3118 * Migrate the MSI irq to another cpumask. This migration is 3119 * done in the process context using interrupt-remapping hardware. 3120 */ 3121 static int 3122 ir_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, 3123 bool force) 3124 { 3125 struct irq_cfg *cfg = data->chip_data; 3126 unsigned int dest, irq = data->irq; 3127 struct irte irte; 3128 3129 if (get_irte(irq, &irte)) 3130 return -1; 3131 3132 if (__ioapic_set_affinity(data, mask, &dest)) 3133 return -1; 3134 3135 irte.vector = cfg->vector; 3136 irte.dest_id = IRTE_DEST(dest); 3137 3138 /* 3139 * atomically update the IRTE with the new destination and vector. 3140 */ 3141 modify_irte(irq, &irte); 3142 3143 /* 3144 * After this point, all the interrupts will start arriving 3145 * at the new destination. So, time to cleanup the previous 3146 * vector allocation. 3147 */ 3148 if (cfg->move_in_progress) 3149 send_cleanup_vector(cfg); 3150 3151 return 0; 3152 } 3153 3154 #endif 3155 #endif /* CONFIG_SMP */ 3156 3157 /* 3158 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, 3159 * which implement the MSI or MSI-X Capability Structure. 3160 */ 3161 static struct irq_chip msi_chip = { 3162 .name = "PCI-MSI", 3163 .irq_unmask = unmask_msi_irq, 3164 .irq_mask = mask_msi_irq, 3165 .irq_ack = ack_apic_edge, 3166 #ifdef CONFIG_SMP 3167 .irq_set_affinity = msi_set_affinity, 3168 #endif 3169 .irq_retrigger = ioapic_retrigger_irq, 3170 }; 3171 3172 static struct irq_chip msi_ir_chip = { 3173 .name = "IR-PCI-MSI", 3174 .irq_unmask = unmask_msi_irq, 3175 .irq_mask = mask_msi_irq, 3176 #ifdef CONFIG_INTR_REMAP 3177 .irq_ack = ir_ack_apic_edge, 3178 #ifdef CONFIG_SMP 3179 .irq_set_affinity = ir_msi_set_affinity, 3180 #endif 3181 #endif 3182 .irq_retrigger = ioapic_retrigger_irq, 3183 }; 3184 3185 /* 3186 * Map the PCI dev to the corresponding remapping hardware unit 3187 * and allocate 'nvec' consecutive interrupt-remapping table entries 3188 * in it. 3189 */ 3190 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec) 3191 { 3192 struct intel_iommu *iommu; 3193 int index; 3194 3195 iommu = map_dev_to_ir(dev); 3196 if (!iommu) { 3197 printk(KERN_ERR 3198 "Unable to map PCI %s to iommu\n", pci_name(dev)); 3199 return -ENOENT; 3200 } 3201 3202 index = alloc_irte(iommu, irq, nvec); 3203 if (index < 0) { 3204 printk(KERN_ERR 3205 "Unable to allocate %d IRTE for PCI %s\n", nvec, 3206 pci_name(dev)); 3207 return -ENOSPC; 3208 } 3209 return index; 3210 } 3211 3212 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) 3213 { 3214 struct irq_chip *chip = &msi_chip; 3215 struct msi_msg msg; 3216 int ret; 3217 3218 ret = msi_compose_msg(dev, irq, &msg, -1); 3219 if (ret < 0) 3220 return ret; 3221 3222 irq_set_msi_desc(irq, msidesc); 3223 write_msi_msg(irq, &msg); 3224 3225 if (irq_remapped(irq_get_chip_data(irq))) { 3226 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3227 chip = &msi_ir_chip; 3228 } 3229 3230 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); 3231 3232 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); 3233 3234 return 0; 3235 } 3236 3237 int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 3238 { 3239 int node, ret, sub_handle, index = 0; 3240 unsigned int irq, irq_want; 3241 struct msi_desc *msidesc; 3242 struct intel_iommu *iommu = NULL; 3243 3244 /* x86 doesn't support multiple MSI yet */ 3245 if (type == PCI_CAP_ID_MSI && nvec > 1) 3246 return 1; 3247 3248 node = dev_to_node(&dev->dev); 3249 irq_want = nr_irqs_gsi; 3250 sub_handle = 0; 3251 list_for_each_entry(msidesc, &dev->msi_list, list) { 3252 irq = create_irq_nr(irq_want, node); 3253 if (irq == 0) 3254 return -1; 3255 irq_want = irq + 1; 3256 if (!intr_remapping_enabled) 3257 goto no_ir; 3258 3259 if (!sub_handle) { 3260 /* 3261 * allocate the consecutive block of IRTE's 3262 * for 'nvec' 3263 */ 3264 index = msi_alloc_irte(dev, irq, nvec); 3265 if (index < 0) { 3266 ret = index; 3267 goto error; 3268 } 3269 } else { 3270 iommu = map_dev_to_ir(dev); 3271 if (!iommu) { 3272 ret = -ENOENT; 3273 goto error; 3274 } 3275 /* 3276 * setup the mapping between the irq and the IRTE 3277 * base index, the sub_handle pointing to the 3278 * appropriate interrupt remap table entry. 3279 */ 3280 set_irte_irq(irq, iommu, index, sub_handle); 3281 } 3282 no_ir: 3283 ret = setup_msi_irq(dev, msidesc, irq); 3284 if (ret < 0) 3285 goto error; 3286 sub_handle++; 3287 } 3288 return 0; 3289 3290 error: 3291 destroy_irq(irq); 3292 return ret; 3293 } 3294 3295 void native_teardown_msi_irq(unsigned int irq) 3296 { 3297 destroy_irq(irq); 3298 } 3299 3300 #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) 3301 #ifdef CONFIG_SMP 3302 static int 3303 dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, 3304 bool force) 3305 { 3306 struct irq_cfg *cfg = data->chip_data; 3307 unsigned int dest, irq = data->irq; 3308 struct msi_msg msg; 3309 3310 if (__ioapic_set_affinity(data, mask, &dest)) 3311 return -1; 3312 3313 dmar_msi_read(irq, &msg); 3314 3315 msg.data &= ~MSI_DATA_VECTOR_MASK; 3316 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3317 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3318 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3319 msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest); 3320 3321 dmar_msi_write(irq, &msg); 3322 3323 return 0; 3324 } 3325 3326 #endif /* CONFIG_SMP */ 3327 3328 static struct irq_chip dmar_msi_type = { 3329 .name = "DMAR_MSI", 3330 .irq_unmask = dmar_msi_unmask, 3331 .irq_mask = dmar_msi_mask, 3332 .irq_ack = ack_apic_edge, 3333 #ifdef CONFIG_SMP 3334 .irq_set_affinity = dmar_msi_set_affinity, 3335 #endif 3336 .irq_retrigger = ioapic_retrigger_irq, 3337 }; 3338 3339 int arch_setup_dmar_msi(unsigned int irq) 3340 { 3341 int ret; 3342 struct msi_msg msg; 3343 3344 ret = msi_compose_msg(NULL, irq, &msg, -1); 3345 if (ret < 0) 3346 return ret; 3347 dmar_msi_write(irq, &msg); 3348 irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, 3349 "edge"); 3350 return 0; 3351 } 3352 #endif 3353 3354 #ifdef CONFIG_HPET_TIMER 3355 3356 #ifdef CONFIG_SMP 3357 static int hpet_msi_set_affinity(struct irq_data *data, 3358 const struct cpumask *mask, bool force) 3359 { 3360 struct irq_cfg *cfg = data->chip_data; 3361 struct msi_msg msg; 3362 unsigned int dest; 3363 3364 if (__ioapic_set_affinity(data, mask, &dest)) 3365 return -1; 3366 3367 hpet_msi_read(data->handler_data, &msg); 3368 3369 msg.data &= ~MSI_DATA_VECTOR_MASK; 3370 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3371 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3372 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3373 3374 hpet_msi_write(data->handler_data, &msg); 3375 3376 return 0; 3377 } 3378 3379 #endif /* CONFIG_SMP */ 3380 3381 static struct irq_chip ir_hpet_msi_type = { 3382 .name = "IR-HPET_MSI", 3383 .irq_unmask = hpet_msi_unmask, 3384 .irq_mask = hpet_msi_mask, 3385 #ifdef CONFIG_INTR_REMAP 3386 .irq_ack = ir_ack_apic_edge, 3387 #ifdef CONFIG_SMP 3388 .irq_set_affinity = ir_msi_set_affinity, 3389 #endif 3390 #endif 3391 .irq_retrigger = ioapic_retrigger_irq, 3392 }; 3393 3394 static struct irq_chip hpet_msi_type = { 3395 .name = "HPET_MSI", 3396 .irq_unmask = hpet_msi_unmask, 3397 .irq_mask = hpet_msi_mask, 3398 .irq_ack = ack_apic_edge, 3399 #ifdef CONFIG_SMP 3400 .irq_set_affinity = hpet_msi_set_affinity, 3401 #endif 3402 .irq_retrigger = ioapic_retrigger_irq, 3403 }; 3404 3405 int arch_setup_hpet_msi(unsigned int irq, unsigned int id) 3406 { 3407 struct irq_chip *chip = &hpet_msi_type; 3408 struct msi_msg msg; 3409 int ret; 3410 3411 if (intr_remapping_enabled) { 3412 struct intel_iommu *iommu = map_hpet_to_ir(id); 3413 int index; 3414 3415 if (!iommu) 3416 return -1; 3417 3418 index = alloc_irte(iommu, irq, 1); 3419 if (index < 0) 3420 return -1; 3421 } 3422 3423 ret = msi_compose_msg(NULL, irq, &msg, id); 3424 if (ret < 0) 3425 return ret; 3426 3427 hpet_msi_write(irq_get_handler_data(irq), &msg); 3428 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3429 if (irq_remapped(irq_get_chip_data(irq))) 3430 chip = &ir_hpet_msi_type; 3431 3432 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); 3433 return 0; 3434 } 3435 #endif 3436 3437 #endif /* CONFIG_PCI_MSI */ 3438 /* 3439 * Hypertransport interrupt support 3440 */ 3441 #ifdef CONFIG_HT_IRQ 3442 3443 #ifdef CONFIG_SMP 3444 3445 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) 3446 { 3447 struct ht_irq_msg msg; 3448 fetch_ht_irq_msg(irq, &msg); 3449 3450 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK); 3451 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); 3452 3453 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest); 3454 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest); 3455 3456 write_ht_irq_msg(irq, &msg); 3457 } 3458 3459 static int 3460 ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3461 { 3462 struct irq_cfg *cfg = data->chip_data; 3463 unsigned int dest; 3464 3465 if (__ioapic_set_affinity(data, mask, &dest)) 3466 return -1; 3467 3468 target_ht_irq(data->irq, dest, cfg->vector); 3469 return 0; 3470 } 3471 3472 #endif 3473 3474 static struct irq_chip ht_irq_chip = { 3475 .name = "PCI-HT", 3476 .irq_mask = mask_ht_irq, 3477 .irq_unmask = unmask_ht_irq, 3478 .irq_ack = ack_apic_edge, 3479 #ifdef CONFIG_SMP 3480 .irq_set_affinity = ht_set_affinity, 3481 #endif 3482 .irq_retrigger = ioapic_retrigger_irq, 3483 }; 3484 3485 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) 3486 { 3487 struct irq_cfg *cfg; 3488 int err; 3489 3490 if (disable_apic) 3491 return -ENXIO; 3492 3493 cfg = irq_cfg(irq); 3494 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3495 if (!err) { 3496 struct ht_irq_msg msg; 3497 unsigned dest; 3498 3499 dest = apic->cpu_mask_to_apicid_and(cfg->domain, 3500 apic->target_cpus()); 3501 3502 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3503 3504 msg.address_lo = 3505 HT_IRQ_LOW_BASE | 3506 HT_IRQ_LOW_DEST_ID(dest) | 3507 HT_IRQ_LOW_VECTOR(cfg->vector) | 3508 ((apic->irq_dest_mode == 0) ? 3509 HT_IRQ_LOW_DM_PHYSICAL : 3510 HT_IRQ_LOW_DM_LOGICAL) | 3511 HT_IRQ_LOW_RQEOI_EDGE | 3512 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3513 HT_IRQ_LOW_MT_FIXED : 3514 HT_IRQ_LOW_MT_ARBITRATED) | 3515 HT_IRQ_LOW_IRQ_MASKED; 3516 3517 write_ht_irq_msg(irq, &msg); 3518 3519 irq_set_chip_and_handler_name(irq, &ht_irq_chip, 3520 handle_edge_irq, "edge"); 3521 3522 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); 3523 } 3524 return err; 3525 } 3526 #endif /* CONFIG_HT_IRQ */ 3527 3528 static int 3529 io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) 3530 { 3531 struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node); 3532 int ret; 3533 3534 if (!cfg) 3535 return -EINVAL; 3536 ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin); 3537 if (!ret) 3538 setup_ioapic_irq(attr->ioapic, attr->ioapic_pin, irq, cfg, 3539 attr->trigger, attr->polarity); 3540 return ret; 3541 } 3542 3543 int io_apic_setup_irq_pin_once(unsigned int irq, int node, 3544 struct io_apic_irq_attr *attr) 3545 { 3546 unsigned int id = attr->ioapic, pin = attr->ioapic_pin; 3547 int ret; 3548 3549 /* Avoid redundant programming */ 3550 if (test_bit(pin, mp_ioapic_routing[id].pin_programmed)) { 3551 pr_debug("Pin %d-%d already programmed\n", 3552 mp_ioapics[id].apicid, pin); 3553 return 0; 3554 } 3555 ret = io_apic_setup_irq_pin(irq, node, attr); 3556 if (!ret) 3557 set_bit(pin, mp_ioapic_routing[id].pin_programmed); 3558 return ret; 3559 } 3560 3561 static int __init io_apic_get_redir_entries(int ioapic) 3562 { 3563 union IO_APIC_reg_01 reg_01; 3564 unsigned long flags; 3565 3566 raw_spin_lock_irqsave(&ioapic_lock, flags); 3567 reg_01.raw = io_apic_read(ioapic, 1); 3568 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3569 3570 /* The register returns the maximum index redir index 3571 * supported, which is one less than the total number of redir 3572 * entries. 3573 */ 3574 return reg_01.bits.entries + 1; 3575 } 3576 3577 static void __init probe_nr_irqs_gsi(void) 3578 { 3579 int nr; 3580 3581 nr = gsi_top + NR_IRQS_LEGACY; 3582 if (nr > nr_irqs_gsi) 3583 nr_irqs_gsi = nr; 3584 3585 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); 3586 } 3587 3588 int get_nr_irqs_gsi(void) 3589 { 3590 return nr_irqs_gsi; 3591 } 3592 3593 #ifdef CONFIG_SPARSE_IRQ 3594 int __init arch_probe_nr_irqs(void) 3595 { 3596 int nr; 3597 3598 if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) 3599 nr_irqs = NR_VECTORS * nr_cpu_ids; 3600 3601 nr = nr_irqs_gsi + 8 * nr_cpu_ids; 3602 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) 3603 /* 3604 * for MSI and HT dyn irq 3605 */ 3606 nr += nr_irqs_gsi * 16; 3607 #endif 3608 if (nr < nr_irqs) 3609 nr_irqs = nr; 3610 3611 return NR_IRQS_LEGACY; 3612 } 3613 #endif 3614 3615 int io_apic_set_pci_routing(struct device *dev, int irq, 3616 struct io_apic_irq_attr *irq_attr) 3617 { 3618 int node; 3619 3620 if (!IO_APIC_IRQ(irq)) { 3621 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", 3622 irq_attr->ioapic); 3623 return -EINVAL; 3624 } 3625 3626 node = dev ? dev_to_node(dev) : cpu_to_node(0); 3627 3628 return io_apic_setup_irq_pin_once(irq, node, irq_attr); 3629 } 3630 3631 #ifdef CONFIG_X86_32 3632 static int __init io_apic_get_unique_id(int ioapic, int apic_id) 3633 { 3634 union IO_APIC_reg_00 reg_00; 3635 static physid_mask_t apic_id_map = PHYSID_MASK_NONE; 3636 physid_mask_t tmp; 3637 unsigned long flags; 3638 int i = 0; 3639 3640 /* 3641 * The P4 platform supports up to 256 APIC IDs on two separate APIC 3642 * buses (one for LAPICs, one for IOAPICs), where predecessors only 3643 * supports up to 16 on one shared APIC bus. 3644 * 3645 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full 3646 * advantage of new APIC bus architecture. 3647 */ 3648 3649 if (physids_empty(apic_id_map)) 3650 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); 3651 3652 raw_spin_lock_irqsave(&ioapic_lock, flags); 3653 reg_00.raw = io_apic_read(ioapic, 0); 3654 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3655 3656 if (apic_id >= get_physical_broadcast()) { 3657 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " 3658 "%d\n", ioapic, apic_id, reg_00.bits.ID); 3659 apic_id = reg_00.bits.ID; 3660 } 3661 3662 /* 3663 * Every APIC in a system must have a unique ID or we get lots of nice 3664 * 'stuck on smp_invalidate_needed IPI wait' messages. 3665 */ 3666 if (apic->check_apicid_used(&apic_id_map, apic_id)) { 3667 3668 for (i = 0; i < get_physical_broadcast(); i++) { 3669 if (!apic->check_apicid_used(&apic_id_map, i)) 3670 break; 3671 } 3672 3673 if (i == get_physical_broadcast()) 3674 panic("Max apic_id exceeded!\n"); 3675 3676 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " 3677 "trying %d\n", ioapic, apic_id, i); 3678 3679 apic_id = i; 3680 } 3681 3682 apic->apicid_to_cpu_present(apic_id, &tmp); 3683 physids_or(apic_id_map, apic_id_map, tmp); 3684 3685 if (reg_00.bits.ID != apic_id) { 3686 reg_00.bits.ID = apic_id; 3687 3688 raw_spin_lock_irqsave(&ioapic_lock, flags); 3689 io_apic_write(ioapic, 0, reg_00.raw); 3690 reg_00.raw = io_apic_read(ioapic, 0); 3691 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3692 3693 /* Sanity check */ 3694 if (reg_00.bits.ID != apic_id) { 3695 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); 3696 return -1; 3697 } 3698 } 3699 3700 apic_printk(APIC_VERBOSE, KERN_INFO 3701 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); 3702 3703 return apic_id; 3704 } 3705 3706 static u8 __init io_apic_unique_id(u8 id) 3707 { 3708 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && 3709 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) 3710 return io_apic_get_unique_id(nr_ioapics, id); 3711 else 3712 return id; 3713 } 3714 #else 3715 static u8 __init io_apic_unique_id(u8 id) 3716 { 3717 int i; 3718 DECLARE_BITMAP(used, 256); 3719 3720 bitmap_zero(used, 256); 3721 for (i = 0; i < nr_ioapics; i++) { 3722 struct mpc_ioapic *ia = &mp_ioapics[i]; 3723 __set_bit(ia->apicid, used); 3724 } 3725 if (!test_bit(id, used)) 3726 return id; 3727 return find_first_zero_bit(used, 256); 3728 } 3729 #endif 3730 3731 static int __init io_apic_get_version(int ioapic) 3732 { 3733 union IO_APIC_reg_01 reg_01; 3734 unsigned long flags; 3735 3736 raw_spin_lock_irqsave(&ioapic_lock, flags); 3737 reg_01.raw = io_apic_read(ioapic, 1); 3738 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3739 3740 return reg_01.bits.version; 3741 } 3742 3743 int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) 3744 { 3745 int ioapic, pin, idx; 3746 3747 if (skip_ioapic_setup) 3748 return -1; 3749 3750 ioapic = mp_find_ioapic(gsi); 3751 if (ioapic < 0) 3752 return -1; 3753 3754 pin = mp_find_ioapic_pin(ioapic, gsi); 3755 if (pin < 0) 3756 return -1; 3757 3758 idx = find_irq_entry(ioapic, pin, mp_INT); 3759 if (idx < 0) 3760 return -1; 3761 3762 *trigger = irq_trigger(idx); 3763 *polarity = irq_polarity(idx); 3764 return 0; 3765 } 3766 3767 /* 3768 * This function currently is only a helper for the i386 smp boot process where 3769 * we need to reprogram the ioredtbls to cater for the cpus which have come online 3770 * so mask in all cases should simply be apic->target_cpus() 3771 */ 3772 #ifdef CONFIG_SMP 3773 void __init setup_ioapic_dest(void) 3774 { 3775 int pin, ioapic, irq, irq_entry; 3776 const struct cpumask *mask; 3777 struct irq_data *idata; 3778 3779 if (skip_ioapic_setup == 1) 3780 return; 3781 3782 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) 3783 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) { 3784 irq_entry = find_irq_entry(ioapic, pin, mp_INT); 3785 if (irq_entry == -1) 3786 continue; 3787 irq = pin_2_irq(irq_entry, ioapic, pin); 3788 3789 if ((ioapic > 0) && (irq > 16)) 3790 continue; 3791 3792 idata = irq_get_irq_data(irq); 3793 3794 /* 3795 * Honour affinities which have been set in early boot 3796 */ 3797 if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata)) 3798 mask = idata->affinity; 3799 else 3800 mask = apic->target_cpus(); 3801 3802 if (intr_remapping_enabled) 3803 ir_ioapic_set_affinity(idata, mask, false); 3804 else 3805 ioapic_set_affinity(idata, mask, false); 3806 } 3807 3808 } 3809 #endif 3810 3811 #define IOAPIC_RESOURCE_NAME_SIZE 11 3812 3813 static struct resource *ioapic_resources; 3814 3815 static struct resource * __init ioapic_setup_resources(int nr_ioapics) 3816 { 3817 unsigned long n; 3818 struct resource *res; 3819 char *mem; 3820 int i; 3821 3822 if (nr_ioapics <= 0) 3823 return NULL; 3824 3825 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource); 3826 n *= nr_ioapics; 3827 3828 mem = alloc_bootmem(n); 3829 res = (void *)mem; 3830 3831 mem += sizeof(struct resource) * nr_ioapics; 3832 3833 for (i = 0; i < nr_ioapics; i++) { 3834 res[i].name = mem; 3835 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; 3836 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); 3837 mem += IOAPIC_RESOURCE_NAME_SIZE; 3838 } 3839 3840 ioapic_resources = res; 3841 3842 return res; 3843 } 3844 3845 void __init ioapic_and_gsi_init(void) 3846 { 3847 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; 3848 struct resource *ioapic_res; 3849 int i; 3850 3851 ioapic_res = ioapic_setup_resources(nr_ioapics); 3852 for (i = 0; i < nr_ioapics; i++) { 3853 if (smp_found_config) { 3854 ioapic_phys = mp_ioapics[i].apicaddr; 3855 #ifdef CONFIG_X86_32 3856 if (!ioapic_phys) { 3857 printk(KERN_ERR 3858 "WARNING: bogus zero IO-APIC " 3859 "address found in MPTABLE, " 3860 "disabling IO/APIC support!\n"); 3861 smp_found_config = 0; 3862 skip_ioapic_setup = 1; 3863 goto fake_ioapic_page; 3864 } 3865 #endif 3866 } else { 3867 #ifdef CONFIG_X86_32 3868 fake_ioapic_page: 3869 #endif 3870 ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); 3871 ioapic_phys = __pa(ioapic_phys); 3872 } 3873 set_fixmap_nocache(idx, ioapic_phys); 3874 apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n", 3875 __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK), 3876 ioapic_phys); 3877 idx++; 3878 3879 ioapic_res->start = ioapic_phys; 3880 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1; 3881 ioapic_res++; 3882 } 3883 3884 probe_nr_irqs_gsi(); 3885 } 3886 3887 void __init ioapic_insert_resources(void) 3888 { 3889 int i; 3890 struct resource *r = ioapic_resources; 3891 3892 if (!r) { 3893 if (nr_ioapics > 0) 3894 printk(KERN_ERR 3895 "IO APIC resources couldn't be allocated.\n"); 3896 return; 3897 } 3898 3899 for (i = 0; i < nr_ioapics; i++) { 3900 insert_resource(&iomem_resource, r); 3901 r++; 3902 } 3903 } 3904 3905 int mp_find_ioapic(u32 gsi) 3906 { 3907 int i = 0; 3908 3909 if (nr_ioapics == 0) 3910 return -1; 3911 3912 /* Find the IOAPIC that manages this GSI. */ 3913 for (i = 0; i < nr_ioapics; i++) { 3914 if ((gsi >= mp_gsi_routing[i].gsi_base) 3915 && (gsi <= mp_gsi_routing[i].gsi_end)) 3916 return i; 3917 } 3918 3919 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); 3920 return -1; 3921 } 3922 3923 int mp_find_ioapic_pin(int ioapic, u32 gsi) 3924 { 3925 if (WARN_ON(ioapic == -1)) 3926 return -1; 3927 if (WARN_ON(gsi > mp_gsi_routing[ioapic].gsi_end)) 3928 return -1; 3929 3930 return gsi - mp_gsi_routing[ioapic].gsi_base; 3931 } 3932 3933 static __init int bad_ioapic(unsigned long address) 3934 { 3935 if (nr_ioapics >= MAX_IO_APICS) { 3936 printk(KERN_WARNING "WARNING: Max # of I/O APICs (%d) exceeded " 3937 "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics); 3938 return 1; 3939 } 3940 if (!address) { 3941 printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address" 3942 " found in table, skipping!\n"); 3943 return 1; 3944 } 3945 return 0; 3946 } 3947 3948 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) 3949 { 3950 int idx = 0; 3951 int entries; 3952 3953 if (bad_ioapic(address)) 3954 return; 3955 3956 idx = nr_ioapics; 3957 3958 mp_ioapics[idx].type = MP_IOAPIC; 3959 mp_ioapics[idx].flags = MPC_APIC_USABLE; 3960 mp_ioapics[idx].apicaddr = address; 3961 3962 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); 3963 mp_ioapics[idx].apicid = io_apic_unique_id(id); 3964 mp_ioapics[idx].apicver = io_apic_get_version(idx); 3965 3966 /* 3967 * Build basic GSI lookup table to facilitate gsi->io_apic lookups 3968 * and to prevent reprogramming of IOAPIC pins (PCI GSIs). 3969 */ 3970 entries = io_apic_get_redir_entries(idx); 3971 mp_gsi_routing[idx].gsi_base = gsi_base; 3972 mp_gsi_routing[idx].gsi_end = gsi_base + entries - 1; 3973 3974 /* 3975 * The number of IO-APIC IRQ registers (== #pins): 3976 */ 3977 nr_ioapic_registers[idx] = entries; 3978 3979 if (mp_gsi_routing[idx].gsi_end >= gsi_top) 3980 gsi_top = mp_gsi_routing[idx].gsi_end + 1; 3981 3982 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " 3983 "GSI %d-%d\n", idx, mp_ioapics[idx].apicid, 3984 mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr, 3985 mp_gsi_routing[idx].gsi_base, mp_gsi_routing[idx].gsi_end); 3986 3987 nr_ioapics++; 3988 } 3989 3990 /* Enable IOAPIC early just for system timer */ 3991 void __init pre_init_apic_IRQ0(void) 3992 { 3993 struct io_apic_irq_attr attr = { 0, 0, 0, 0 }; 3994 3995 printk(KERN_INFO "Early APIC setup for system timer0\n"); 3996 #ifndef CONFIG_SMP 3997 physid_set_mask_of_physid(boot_cpu_physical_apicid, 3998 &phys_cpu_present_map); 3999 #endif 4000 setup_local_APIC(); 4001 4002 io_apic_setup_irq_pin(0, 0, &attr); 4003 irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, 4004 "edge"); 4005 } 4006