1 /* 2 * APIC support 3 * 4 * Copyright (c) 2004-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/> 18 */ 19 #include "qemu/thread.h" 20 #include "hw/i386/apic_internal.h" 21 #include "hw/i386/apic.h" 22 #include "hw/i386/ioapic.h" 23 #include "hw/pci/msi.h" 24 #include "qemu/host-utils.h" 25 #include "trace.h" 26 #include "hw/i386/pc.h" 27 #include "hw/i386/apic-msidef.h" 28 29 #define MAX_APIC_WORDS 8 30 31 #define SYNC_FROM_VAPIC 0x1 32 #define SYNC_TO_VAPIC 0x2 33 #define SYNC_ISR_IRR_TO_VAPIC 0x4 34 35 static APICCommonState *local_apics[MAX_APICS + 1]; 36 37 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode); 38 static void apic_update_irq(APICCommonState *s); 39 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, 40 uint8_t dest, uint8_t dest_mode); 41 42 /* Find first bit starting from msb */ 43 static int apic_fls_bit(uint32_t value) 44 { 45 return 31 - clz32(value); 46 } 47 48 /* Find first bit starting from lsb */ 49 static int apic_ffs_bit(uint32_t value) 50 { 51 return ctz32(value); 52 } 53 54 static inline void apic_set_bit(uint32_t *tab, int index) 55 { 56 int i, mask; 57 i = index >> 5; 58 mask = 1 << (index & 0x1f); 59 tab[i] |= mask; 60 } 61 62 static inline void apic_reset_bit(uint32_t *tab, int index) 63 { 64 int i, mask; 65 i = index >> 5; 66 mask = 1 << (index & 0x1f); 67 tab[i] &= ~mask; 68 } 69 70 static inline int apic_get_bit(uint32_t *tab, int index) 71 { 72 int i, mask; 73 i = index >> 5; 74 mask = 1 << (index & 0x1f); 75 return !!(tab[i] & mask); 76 } 77 78 /* return -1 if no bit is set */ 79 static int get_highest_priority_int(uint32_t *tab) 80 { 81 int i; 82 for (i = 7; i >= 0; i--) { 83 if (tab[i] != 0) { 84 return i * 32 + apic_fls_bit(tab[i]); 85 } 86 } 87 return -1; 88 } 89 90 static void apic_sync_vapic(APICCommonState *s, int sync_type) 91 { 92 VAPICState vapic_state; 93 size_t length; 94 off_t start; 95 int vector; 96 97 if (!s->vapic_paddr) { 98 return; 99 } 100 if (sync_type & SYNC_FROM_VAPIC) { 101 cpu_physical_memory_rw(s->vapic_paddr, (void *)&vapic_state, 102 sizeof(vapic_state), 0); 103 s->tpr = vapic_state.tpr; 104 } 105 if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) { 106 start = offsetof(VAPICState, isr); 107 length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr); 108 109 if (sync_type & SYNC_TO_VAPIC) { 110 assert(qemu_cpu_is_self(CPU(s->cpu))); 111 112 vapic_state.tpr = s->tpr; 113 vapic_state.enabled = 1; 114 start = 0; 115 length = sizeof(VAPICState); 116 } 117 118 vector = get_highest_priority_int(s->isr); 119 if (vector < 0) { 120 vector = 0; 121 } 122 vapic_state.isr = vector & 0xf0; 123 124 vapic_state.zero = 0; 125 126 vector = get_highest_priority_int(s->irr); 127 if (vector < 0) { 128 vector = 0; 129 } 130 vapic_state.irr = vector & 0xff; 131 132 cpu_physical_memory_write_rom(s->vapic_paddr + start, 133 ((void *)&vapic_state) + start, length); 134 } 135 } 136 137 static void apic_vapic_base_update(APICCommonState *s) 138 { 139 apic_sync_vapic(s, SYNC_TO_VAPIC); 140 } 141 142 static void apic_local_deliver(APICCommonState *s, int vector) 143 { 144 uint32_t lvt = s->lvt[vector]; 145 int trigger_mode; 146 147 trace_apic_local_deliver(vector, (lvt >> 8) & 7); 148 149 if (lvt & APIC_LVT_MASKED) 150 return; 151 152 switch ((lvt >> 8) & 7) { 153 case APIC_DM_SMI: 154 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SMI); 155 break; 156 157 case APIC_DM_NMI: 158 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_NMI); 159 break; 160 161 case APIC_DM_EXTINT: 162 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HARD); 163 break; 164 165 case APIC_DM_FIXED: 166 trigger_mode = APIC_TRIGGER_EDGE; 167 if ((vector == APIC_LVT_LINT0 || vector == APIC_LVT_LINT1) && 168 (lvt & APIC_LVT_LEVEL_TRIGGER)) 169 trigger_mode = APIC_TRIGGER_LEVEL; 170 apic_set_irq(s, lvt & 0xff, trigger_mode); 171 } 172 } 173 174 void apic_deliver_pic_intr(DeviceState *dev, int level) 175 { 176 APICCommonState *s = APIC_COMMON(dev); 177 178 if (level) { 179 apic_local_deliver(s, APIC_LVT_LINT0); 180 } else { 181 uint32_t lvt = s->lvt[APIC_LVT_LINT0]; 182 183 switch ((lvt >> 8) & 7) { 184 case APIC_DM_FIXED: 185 if (!(lvt & APIC_LVT_LEVEL_TRIGGER)) 186 break; 187 apic_reset_bit(s->irr, lvt & 0xff); 188 /* fall through */ 189 case APIC_DM_EXTINT: 190 cpu_reset_interrupt(CPU(s->cpu), CPU_INTERRUPT_HARD); 191 break; 192 } 193 } 194 } 195 196 static void apic_external_nmi(APICCommonState *s) 197 { 198 apic_local_deliver(s, APIC_LVT_LINT1); 199 } 200 201 #define foreach_apic(apic, deliver_bitmask, code) \ 202 {\ 203 int __i, __j, __mask;\ 204 for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\ 205 __mask = deliver_bitmask[__i];\ 206 if (__mask) {\ 207 for(__j = 0; __j < 32; __j++) {\ 208 if (__mask & (1 << __j)) {\ 209 apic = local_apics[__i * 32 + __j];\ 210 if (apic) {\ 211 code;\ 212 }\ 213 }\ 214 }\ 215 }\ 216 }\ 217 } 218 219 static void apic_bus_deliver(const uint32_t *deliver_bitmask, 220 uint8_t delivery_mode, uint8_t vector_num, 221 uint8_t trigger_mode) 222 { 223 APICCommonState *apic_iter; 224 225 switch (delivery_mode) { 226 case APIC_DM_LOWPRI: 227 /* XXX: search for focus processor, arbitration */ 228 { 229 int i, d; 230 d = -1; 231 for(i = 0; i < MAX_APIC_WORDS; i++) { 232 if (deliver_bitmask[i]) { 233 d = i * 32 + apic_ffs_bit(deliver_bitmask[i]); 234 break; 235 } 236 } 237 if (d >= 0) { 238 apic_iter = local_apics[d]; 239 if (apic_iter) { 240 apic_set_irq(apic_iter, vector_num, trigger_mode); 241 } 242 } 243 } 244 return; 245 246 case APIC_DM_FIXED: 247 break; 248 249 case APIC_DM_SMI: 250 foreach_apic(apic_iter, deliver_bitmask, 251 cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_SMI) 252 ); 253 return; 254 255 case APIC_DM_NMI: 256 foreach_apic(apic_iter, deliver_bitmask, 257 cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_NMI) 258 ); 259 return; 260 261 case APIC_DM_INIT: 262 /* normal INIT IPI sent to processors */ 263 foreach_apic(apic_iter, deliver_bitmask, 264 cpu_interrupt(CPU(apic_iter->cpu), 265 CPU_INTERRUPT_INIT) 266 ); 267 return; 268 269 case APIC_DM_EXTINT: 270 /* handled in I/O APIC code */ 271 break; 272 273 default: 274 return; 275 } 276 277 foreach_apic(apic_iter, deliver_bitmask, 278 apic_set_irq(apic_iter, vector_num, trigger_mode) ); 279 } 280 281 void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, 282 uint8_t vector_num, uint8_t trigger_mode) 283 { 284 uint32_t deliver_bitmask[MAX_APIC_WORDS]; 285 286 trace_apic_deliver_irq(dest, dest_mode, delivery_mode, vector_num, 287 trigger_mode); 288 289 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); 290 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); 291 } 292 293 static void apic_set_base(APICCommonState *s, uint64_t val) 294 { 295 s->apicbase = (val & 0xfffff000) | 296 (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE)); 297 /* if disabled, cannot be enabled again */ 298 if (!(val & MSR_IA32_APICBASE_ENABLE)) { 299 s->apicbase &= ~MSR_IA32_APICBASE_ENABLE; 300 cpu_clear_apic_feature(&s->cpu->env); 301 s->spurious_vec &= ~APIC_SV_ENABLE; 302 } 303 } 304 305 static void apic_set_tpr(APICCommonState *s, uint8_t val) 306 { 307 /* Updates from cr8 are ignored while the VAPIC is active */ 308 if (!s->vapic_paddr) { 309 s->tpr = val << 4; 310 apic_update_irq(s); 311 } 312 } 313 314 static uint8_t apic_get_tpr(APICCommonState *s) 315 { 316 apic_sync_vapic(s, SYNC_FROM_VAPIC); 317 return s->tpr >> 4; 318 } 319 320 static int apic_get_ppr(APICCommonState *s) 321 { 322 int tpr, isrv, ppr; 323 324 tpr = (s->tpr >> 4); 325 isrv = get_highest_priority_int(s->isr); 326 if (isrv < 0) 327 isrv = 0; 328 isrv >>= 4; 329 if (tpr >= isrv) 330 ppr = s->tpr; 331 else 332 ppr = isrv << 4; 333 return ppr; 334 } 335 336 static int apic_get_arb_pri(APICCommonState *s) 337 { 338 /* XXX: arbitration */ 339 return 0; 340 } 341 342 343 /* 344 * <0 - low prio interrupt, 345 * 0 - no interrupt, 346 * >0 - interrupt number 347 */ 348 static int apic_irq_pending(APICCommonState *s) 349 { 350 int irrv, ppr; 351 irrv = get_highest_priority_int(s->irr); 352 if (irrv < 0) { 353 return 0; 354 } 355 ppr = apic_get_ppr(s); 356 if (ppr && (irrv & 0xf0) <= (ppr & 0xf0)) { 357 return -1; 358 } 359 360 return irrv; 361 } 362 363 /* signal the CPU if an irq is pending */ 364 static void apic_update_irq(APICCommonState *s) 365 { 366 CPUState *cpu; 367 368 if (!(s->spurious_vec & APIC_SV_ENABLE)) { 369 return; 370 } 371 cpu = CPU(s->cpu); 372 if (!qemu_cpu_is_self(cpu)) { 373 cpu_interrupt(cpu, CPU_INTERRUPT_POLL); 374 } else if (apic_irq_pending(s) > 0) { 375 cpu_interrupt(cpu, CPU_INTERRUPT_HARD); 376 } 377 } 378 379 void apic_poll_irq(DeviceState *dev) 380 { 381 APICCommonState *s = APIC_COMMON(dev); 382 383 apic_sync_vapic(s, SYNC_FROM_VAPIC); 384 apic_update_irq(s); 385 } 386 387 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode) 388 { 389 apic_report_irq_delivered(!apic_get_bit(s->irr, vector_num)); 390 391 apic_set_bit(s->irr, vector_num); 392 if (trigger_mode) 393 apic_set_bit(s->tmr, vector_num); 394 else 395 apic_reset_bit(s->tmr, vector_num); 396 if (s->vapic_paddr) { 397 apic_sync_vapic(s, SYNC_ISR_IRR_TO_VAPIC); 398 /* 399 * The vcpu thread needs to see the new IRR before we pull its current 400 * TPR value. That way, if we miss a lowering of the TRP, the guest 401 * has the chance to notice the new IRR and poll for IRQs on its own. 402 */ 403 smp_wmb(); 404 apic_sync_vapic(s, SYNC_FROM_VAPIC); 405 } 406 apic_update_irq(s); 407 } 408 409 static void apic_eoi(APICCommonState *s) 410 { 411 int isrv; 412 isrv = get_highest_priority_int(s->isr); 413 if (isrv < 0) 414 return; 415 apic_reset_bit(s->isr, isrv); 416 if (!(s->spurious_vec & APIC_SV_DIRECTED_IO) && apic_get_bit(s->tmr, isrv)) { 417 ioapic_eoi_broadcast(isrv); 418 } 419 apic_sync_vapic(s, SYNC_FROM_VAPIC | SYNC_TO_VAPIC); 420 apic_update_irq(s); 421 } 422 423 static int apic_find_dest(uint8_t dest) 424 { 425 APICCommonState *apic = local_apics[dest]; 426 int i; 427 428 if (apic && apic->id == dest) 429 return dest; /* shortcut in case apic->id == apic->idx */ 430 431 for (i = 0; i < MAX_APICS; i++) { 432 apic = local_apics[i]; 433 if (apic && apic->id == dest) 434 return i; 435 if (!apic) 436 break; 437 } 438 439 return -1; 440 } 441 442 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, 443 uint8_t dest, uint8_t dest_mode) 444 { 445 APICCommonState *apic_iter; 446 int i; 447 448 if (dest_mode == 0) { 449 if (dest == 0xff) { 450 memset(deliver_bitmask, 0xff, MAX_APIC_WORDS * sizeof(uint32_t)); 451 } else { 452 int idx = apic_find_dest(dest); 453 memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t)); 454 if (idx >= 0) 455 apic_set_bit(deliver_bitmask, idx); 456 } 457 } else { 458 /* XXX: cluster mode */ 459 memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t)); 460 for(i = 0; i < MAX_APICS; i++) { 461 apic_iter = local_apics[i]; 462 if (apic_iter) { 463 if (apic_iter->dest_mode == 0xf) { 464 if (dest & apic_iter->log_dest) 465 apic_set_bit(deliver_bitmask, i); 466 } else if (apic_iter->dest_mode == 0x0) { 467 if ((dest & 0xf0) == (apic_iter->log_dest & 0xf0) && 468 (dest & apic_iter->log_dest & 0x0f)) { 469 apic_set_bit(deliver_bitmask, i); 470 } 471 } 472 } else { 473 break; 474 } 475 } 476 } 477 } 478 479 static void apic_startup(APICCommonState *s, int vector_num) 480 { 481 s->sipi_vector = vector_num; 482 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI); 483 } 484 485 void apic_sipi(DeviceState *dev) 486 { 487 APICCommonState *s = APIC_COMMON(dev); 488 489 cpu_reset_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI); 490 491 if (!s->wait_for_sipi) 492 return; 493 cpu_x86_load_seg_cache_sipi(s->cpu, s->sipi_vector); 494 s->wait_for_sipi = 0; 495 } 496 497 static void apic_deliver(DeviceState *dev, uint8_t dest, uint8_t dest_mode, 498 uint8_t delivery_mode, uint8_t vector_num, 499 uint8_t trigger_mode) 500 { 501 APICCommonState *s = APIC_COMMON(dev); 502 uint32_t deliver_bitmask[MAX_APIC_WORDS]; 503 int dest_shorthand = (s->icr[0] >> 18) & 3; 504 APICCommonState *apic_iter; 505 506 switch (dest_shorthand) { 507 case 0: 508 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); 509 break; 510 case 1: 511 memset(deliver_bitmask, 0x00, sizeof(deliver_bitmask)); 512 apic_set_bit(deliver_bitmask, s->idx); 513 break; 514 case 2: 515 memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask)); 516 break; 517 case 3: 518 memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask)); 519 apic_reset_bit(deliver_bitmask, s->idx); 520 break; 521 } 522 523 switch (delivery_mode) { 524 case APIC_DM_INIT: 525 { 526 int trig_mode = (s->icr[0] >> 15) & 1; 527 int level = (s->icr[0] >> 14) & 1; 528 if (level == 0 && trig_mode == 1) { 529 foreach_apic(apic_iter, deliver_bitmask, 530 apic_iter->arb_id = apic_iter->id ); 531 return; 532 } 533 } 534 break; 535 536 case APIC_DM_SIPI: 537 foreach_apic(apic_iter, deliver_bitmask, 538 apic_startup(apic_iter, vector_num) ); 539 return; 540 } 541 542 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); 543 } 544 545 static bool apic_check_pic(APICCommonState *s) 546 { 547 if (!apic_accept_pic_intr(&s->busdev.qdev) || !pic_get_output(isa_pic)) { 548 return false; 549 } 550 apic_deliver_pic_intr(&s->busdev.qdev, 1); 551 return true; 552 } 553 554 int apic_get_interrupt(DeviceState *dev) 555 { 556 APICCommonState *s = APIC_COMMON(dev); 557 int intno; 558 559 /* if the APIC is installed or enabled, we let the 8259 handle the 560 IRQs */ 561 if (!s) 562 return -1; 563 if (!(s->spurious_vec & APIC_SV_ENABLE)) 564 return -1; 565 566 apic_sync_vapic(s, SYNC_FROM_VAPIC); 567 intno = apic_irq_pending(s); 568 569 if (intno == 0) { 570 apic_sync_vapic(s, SYNC_TO_VAPIC); 571 return -1; 572 } else if (intno < 0) { 573 apic_sync_vapic(s, SYNC_TO_VAPIC); 574 return s->spurious_vec & 0xff; 575 } 576 apic_reset_bit(s->irr, intno); 577 apic_set_bit(s->isr, intno); 578 apic_sync_vapic(s, SYNC_TO_VAPIC); 579 580 /* re-inject if there is still a pending PIC interrupt */ 581 apic_check_pic(s); 582 583 apic_update_irq(s); 584 585 return intno; 586 } 587 588 int apic_accept_pic_intr(DeviceState *dev) 589 { 590 APICCommonState *s = APIC_COMMON(dev); 591 uint32_t lvt0; 592 593 if (!s) 594 return -1; 595 596 lvt0 = s->lvt[APIC_LVT_LINT0]; 597 598 if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) == 0 || 599 (lvt0 & APIC_LVT_MASKED) == 0) 600 return 1; 601 602 return 0; 603 } 604 605 static uint32_t apic_get_current_count(APICCommonState *s) 606 { 607 int64_t d; 608 uint32_t val; 609 d = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->initial_count_load_time) >> 610 s->count_shift; 611 if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) { 612 /* periodic */ 613 val = s->initial_count - (d % ((uint64_t)s->initial_count + 1)); 614 } else { 615 if (d >= s->initial_count) 616 val = 0; 617 else 618 val = s->initial_count - d; 619 } 620 return val; 621 } 622 623 static void apic_timer_update(APICCommonState *s, int64_t current_time) 624 { 625 if (apic_next_timer(s, current_time)) { 626 timer_mod(s->timer, s->next_time); 627 } else { 628 timer_del(s->timer); 629 } 630 } 631 632 static void apic_timer(void *opaque) 633 { 634 APICCommonState *s = opaque; 635 636 apic_local_deliver(s, APIC_LVT_TIMER); 637 apic_timer_update(s, s->next_time); 638 } 639 640 static uint32_t apic_mem_readb(void *opaque, hwaddr addr) 641 { 642 return 0; 643 } 644 645 static uint32_t apic_mem_readw(void *opaque, hwaddr addr) 646 { 647 return 0; 648 } 649 650 static void apic_mem_writeb(void *opaque, hwaddr addr, uint32_t val) 651 { 652 } 653 654 static void apic_mem_writew(void *opaque, hwaddr addr, uint32_t val) 655 { 656 } 657 658 static uint32_t apic_mem_readl(void *opaque, hwaddr addr) 659 { 660 DeviceState *dev; 661 APICCommonState *s; 662 uint32_t val; 663 int index; 664 665 dev = cpu_get_current_apic(); 666 if (!dev) { 667 return 0; 668 } 669 s = APIC_COMMON(dev); 670 671 index = (addr >> 4) & 0xff; 672 switch(index) { 673 case 0x02: /* id */ 674 val = s->id << 24; 675 break; 676 case 0x03: /* version */ 677 val = 0x11 | ((APIC_LVT_NB - 1) << 16); /* version 0x11 */ 678 break; 679 case 0x08: 680 apic_sync_vapic(s, SYNC_FROM_VAPIC); 681 if (apic_report_tpr_access) { 682 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_READ); 683 } 684 val = s->tpr; 685 break; 686 case 0x09: 687 val = apic_get_arb_pri(s); 688 break; 689 case 0x0a: 690 /* ppr */ 691 val = apic_get_ppr(s); 692 break; 693 case 0x0b: 694 val = 0; 695 break; 696 case 0x0d: 697 val = s->log_dest << 24; 698 break; 699 case 0x0e: 700 val = s->dest_mode << 28; 701 break; 702 case 0x0f: 703 val = s->spurious_vec; 704 break; 705 case 0x10 ... 0x17: 706 val = s->isr[index & 7]; 707 break; 708 case 0x18 ... 0x1f: 709 val = s->tmr[index & 7]; 710 break; 711 case 0x20 ... 0x27: 712 val = s->irr[index & 7]; 713 break; 714 case 0x28: 715 val = s->esr; 716 break; 717 case 0x30: 718 case 0x31: 719 val = s->icr[index & 1]; 720 break; 721 case 0x32 ... 0x37: 722 val = s->lvt[index - 0x32]; 723 break; 724 case 0x38: 725 val = s->initial_count; 726 break; 727 case 0x39: 728 val = apic_get_current_count(s); 729 break; 730 case 0x3e: 731 val = s->divide_conf; 732 break; 733 default: 734 s->esr |= ESR_ILLEGAL_ADDRESS; 735 val = 0; 736 break; 737 } 738 trace_apic_mem_readl(addr, val); 739 return val; 740 } 741 742 static void apic_send_msi(hwaddr addr, uint32_t data) 743 { 744 uint8_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; 745 uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; 746 uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; 747 uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; 748 uint8_t delivery = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7; 749 /* XXX: Ignore redirection hint. */ 750 apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode); 751 } 752 753 static void apic_mem_writel(void *opaque, hwaddr addr, uint32_t val) 754 { 755 DeviceState *dev; 756 APICCommonState *s; 757 int index = (addr >> 4) & 0xff; 758 if (addr > 0xfff || !index) { 759 /* MSI and MMIO APIC are at the same memory location, 760 * but actually not on the global bus: MSI is on PCI bus 761 * APIC is connected directly to the CPU. 762 * Mapping them on the global bus happens to work because 763 * MSI registers are reserved in APIC MMIO and vice versa. */ 764 apic_send_msi(addr, val); 765 return; 766 } 767 768 dev = cpu_get_current_apic(); 769 if (!dev) { 770 return; 771 } 772 s = APIC_COMMON(dev); 773 774 trace_apic_mem_writel(addr, val); 775 776 switch(index) { 777 case 0x02: 778 s->id = (val >> 24); 779 break; 780 case 0x03: 781 break; 782 case 0x08: 783 if (apic_report_tpr_access) { 784 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_WRITE); 785 } 786 s->tpr = val; 787 apic_sync_vapic(s, SYNC_TO_VAPIC); 788 apic_update_irq(s); 789 break; 790 case 0x09: 791 case 0x0a: 792 break; 793 case 0x0b: /* EOI */ 794 apic_eoi(s); 795 break; 796 case 0x0d: 797 s->log_dest = val >> 24; 798 break; 799 case 0x0e: 800 s->dest_mode = val >> 28; 801 break; 802 case 0x0f: 803 s->spurious_vec = val & 0x1ff; 804 apic_update_irq(s); 805 break; 806 case 0x10 ... 0x17: 807 case 0x18 ... 0x1f: 808 case 0x20 ... 0x27: 809 case 0x28: 810 break; 811 case 0x30: 812 s->icr[0] = val; 813 apic_deliver(dev, (s->icr[1] >> 24) & 0xff, (s->icr[0] >> 11) & 1, 814 (s->icr[0] >> 8) & 7, (s->icr[0] & 0xff), 815 (s->icr[0] >> 15) & 1); 816 break; 817 case 0x31: 818 s->icr[1] = val; 819 break; 820 case 0x32 ... 0x37: 821 { 822 int n = index - 0x32; 823 s->lvt[n] = val; 824 if (n == APIC_LVT_TIMER) { 825 apic_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); 826 } else if (n == APIC_LVT_LINT0 && apic_check_pic(s)) { 827 apic_update_irq(s); 828 } 829 } 830 break; 831 case 0x38: 832 s->initial_count = val; 833 s->initial_count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 834 apic_timer_update(s, s->initial_count_load_time); 835 break; 836 case 0x39: 837 break; 838 case 0x3e: 839 { 840 int v; 841 s->divide_conf = val & 0xb; 842 v = (s->divide_conf & 3) | ((s->divide_conf >> 1) & 4); 843 s->count_shift = (v + 1) & 7; 844 } 845 break; 846 default: 847 s->esr |= ESR_ILLEGAL_ADDRESS; 848 break; 849 } 850 } 851 852 static void apic_pre_save(APICCommonState *s) 853 { 854 apic_sync_vapic(s, SYNC_FROM_VAPIC); 855 } 856 857 static void apic_post_load(APICCommonState *s) 858 { 859 if (s->timer_expiry != -1) { 860 timer_mod(s->timer, s->timer_expiry); 861 } else { 862 timer_del(s->timer); 863 } 864 } 865 866 static const MemoryRegionOps apic_io_ops = { 867 .old_mmio = { 868 .read = { apic_mem_readb, apic_mem_readw, apic_mem_readl, }, 869 .write = { apic_mem_writeb, apic_mem_writew, apic_mem_writel, }, 870 }, 871 .endianness = DEVICE_NATIVE_ENDIAN, 872 }; 873 874 static void apic_realize(DeviceState *dev, Error **errp) 875 { 876 APICCommonState *s = APIC_COMMON(dev); 877 878 memory_region_init_io(&s->io_memory, OBJECT(s), &apic_io_ops, s, "apic-msi", 879 APIC_SPACE_SIZE); 880 881 s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s); 882 local_apics[s->idx] = s; 883 884 msi_supported = true; 885 } 886 887 static void apic_class_init(ObjectClass *klass, void *data) 888 { 889 APICCommonClass *k = APIC_COMMON_CLASS(klass); 890 891 k->realize = apic_realize; 892 k->set_base = apic_set_base; 893 k->set_tpr = apic_set_tpr; 894 k->get_tpr = apic_get_tpr; 895 k->vapic_base_update = apic_vapic_base_update; 896 k->external_nmi = apic_external_nmi; 897 k->pre_save = apic_pre_save; 898 k->post_load = apic_post_load; 899 } 900 901 static const TypeInfo apic_info = { 902 .name = "apic", 903 .instance_size = sizeof(APICCommonState), 904 .parent = TYPE_APIC_COMMON, 905 .class_init = apic_class_init, 906 }; 907 908 static void apic_register_types(void) 909 { 910 type_register_static(&apic_info); 911 } 912 913 type_init(apic_register_types) 914