1 /* 2 * APIC support 3 * 4 * Copyright (c) 2004-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/> 18 */ 19 #include "qemu/thread.h" 20 #include "hw/i386/apic_internal.h" 21 #include "hw/i386/apic.h" 22 #include "hw/i386/ioapic.h" 23 #include "hw/pci/msi.h" 24 #include "qemu/host-utils.h" 25 #include "trace.h" 26 #include "hw/i386/pc.h" 27 #include "hw/i386/apic-msidef.h" 28 29 #define MAX_APIC_WORDS 8 30 31 #define SYNC_FROM_VAPIC 0x1 32 #define SYNC_TO_VAPIC 0x2 33 #define SYNC_ISR_IRR_TO_VAPIC 0x4 34 35 static APICCommonState *local_apics[MAX_APICS + 1]; 36 37 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode); 38 static void apic_update_irq(APICCommonState *s); 39 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, 40 uint8_t dest, uint8_t dest_mode); 41 42 /* Find first bit starting from msb */ 43 static int apic_fls_bit(uint32_t value) 44 { 45 return 31 - clz32(value); 46 } 47 48 /* Find first bit starting from lsb */ 49 static int apic_ffs_bit(uint32_t value) 50 { 51 return ctz32(value); 52 } 53 54 static inline void apic_set_bit(uint32_t *tab, int index) 55 { 56 int i, mask; 57 i = index >> 5; 58 mask = 1 << (index & 0x1f); 59 tab[i] |= mask; 60 } 61 62 static inline void apic_reset_bit(uint32_t *tab, int index) 63 { 64 int i, mask; 65 i = index >> 5; 66 mask = 1 << (index & 0x1f); 67 tab[i] &= ~mask; 68 } 69 70 static inline int apic_get_bit(uint32_t *tab, int index) 71 { 72 int i, mask; 73 i = index >> 5; 74 mask = 1 << (index & 0x1f); 75 return !!(tab[i] & mask); 76 } 77 78 /* return -1 if no bit is set */ 79 static int get_highest_priority_int(uint32_t *tab) 80 { 81 int i; 82 for (i = 7; i >= 0; i--) { 83 if (tab[i] != 0) { 84 return i * 32 + apic_fls_bit(tab[i]); 85 } 86 } 87 return -1; 88 } 89 90 static void apic_sync_vapic(APICCommonState *s, int sync_type) 91 { 92 VAPICState vapic_state; 93 size_t length; 94 off_t start; 95 int vector; 96 97 if (!s->vapic_paddr) { 98 return; 99 } 100 if (sync_type & SYNC_FROM_VAPIC) { 101 cpu_physical_memory_read(s->vapic_paddr, &vapic_state, 102 sizeof(vapic_state)); 103 s->tpr = vapic_state.tpr; 104 } 105 if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) { 106 start = offsetof(VAPICState, isr); 107 length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr); 108 109 if (sync_type & SYNC_TO_VAPIC) { 110 assert(qemu_cpu_is_self(CPU(s->cpu))); 111 112 vapic_state.tpr = s->tpr; 113 vapic_state.enabled = 1; 114 start = 0; 115 length = sizeof(VAPICState); 116 } 117 118 vector = get_highest_priority_int(s->isr); 119 if (vector < 0) { 120 vector = 0; 121 } 122 vapic_state.isr = vector & 0xf0; 123 124 vapic_state.zero = 0; 125 126 vector = get_highest_priority_int(s->irr); 127 if (vector < 0) { 128 vector = 0; 129 } 130 vapic_state.irr = vector & 0xff; 131 132 cpu_physical_memory_write_rom(&address_space_memory, 133 s->vapic_paddr + start, 134 ((void *)&vapic_state) + start, length); 135 } 136 } 137 138 static void apic_vapic_base_update(APICCommonState *s) 139 { 140 apic_sync_vapic(s, SYNC_TO_VAPIC); 141 } 142 143 static void apic_local_deliver(APICCommonState *s, int vector) 144 { 145 uint32_t lvt = s->lvt[vector]; 146 int trigger_mode; 147 148 trace_apic_local_deliver(vector, (lvt >> 8) & 7); 149 150 if (lvt & APIC_LVT_MASKED) 151 return; 152 153 switch ((lvt >> 8) & 7) { 154 case APIC_DM_SMI: 155 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SMI); 156 break; 157 158 case APIC_DM_NMI: 159 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_NMI); 160 break; 161 162 case APIC_DM_EXTINT: 163 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HARD); 164 break; 165 166 case APIC_DM_FIXED: 167 trigger_mode = APIC_TRIGGER_EDGE; 168 if ((vector == APIC_LVT_LINT0 || vector == APIC_LVT_LINT1) && 169 (lvt & APIC_LVT_LEVEL_TRIGGER)) 170 trigger_mode = APIC_TRIGGER_LEVEL; 171 apic_set_irq(s, lvt & 0xff, trigger_mode); 172 } 173 } 174 175 void apic_deliver_pic_intr(DeviceState *dev, int level) 176 { 177 APICCommonState *s = APIC_COMMON(dev); 178 179 if (level) { 180 apic_local_deliver(s, APIC_LVT_LINT0); 181 } else { 182 uint32_t lvt = s->lvt[APIC_LVT_LINT0]; 183 184 switch ((lvt >> 8) & 7) { 185 case APIC_DM_FIXED: 186 if (!(lvt & APIC_LVT_LEVEL_TRIGGER)) 187 break; 188 apic_reset_bit(s->irr, lvt & 0xff); 189 /* fall through */ 190 case APIC_DM_EXTINT: 191 apic_update_irq(s); 192 break; 193 } 194 } 195 } 196 197 static void apic_external_nmi(APICCommonState *s) 198 { 199 apic_local_deliver(s, APIC_LVT_LINT1); 200 } 201 202 #define foreach_apic(apic, deliver_bitmask, code) \ 203 {\ 204 int __i, __j;\ 205 for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\ 206 uint32_t __mask = deliver_bitmask[__i];\ 207 if (__mask) {\ 208 for(__j = 0; __j < 32; __j++) {\ 209 if (__mask & (1U << __j)) {\ 210 apic = local_apics[__i * 32 + __j];\ 211 if (apic) {\ 212 code;\ 213 }\ 214 }\ 215 }\ 216 }\ 217 }\ 218 } 219 220 static void apic_bus_deliver(const uint32_t *deliver_bitmask, 221 uint8_t delivery_mode, uint8_t vector_num, 222 uint8_t trigger_mode) 223 { 224 APICCommonState *apic_iter; 225 226 switch (delivery_mode) { 227 case APIC_DM_LOWPRI: 228 /* XXX: search for focus processor, arbitration */ 229 { 230 int i, d; 231 d = -1; 232 for(i = 0; i < MAX_APIC_WORDS; i++) { 233 if (deliver_bitmask[i]) { 234 d = i * 32 + apic_ffs_bit(deliver_bitmask[i]); 235 break; 236 } 237 } 238 if (d >= 0) { 239 apic_iter = local_apics[d]; 240 if (apic_iter) { 241 apic_set_irq(apic_iter, vector_num, trigger_mode); 242 } 243 } 244 } 245 return; 246 247 case APIC_DM_FIXED: 248 break; 249 250 case APIC_DM_SMI: 251 foreach_apic(apic_iter, deliver_bitmask, 252 cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_SMI) 253 ); 254 return; 255 256 case APIC_DM_NMI: 257 foreach_apic(apic_iter, deliver_bitmask, 258 cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_NMI) 259 ); 260 return; 261 262 case APIC_DM_INIT: 263 /* normal INIT IPI sent to processors */ 264 foreach_apic(apic_iter, deliver_bitmask, 265 cpu_interrupt(CPU(apic_iter->cpu), 266 CPU_INTERRUPT_INIT) 267 ); 268 return; 269 270 case APIC_DM_EXTINT: 271 /* handled in I/O APIC code */ 272 break; 273 274 default: 275 return; 276 } 277 278 foreach_apic(apic_iter, deliver_bitmask, 279 apic_set_irq(apic_iter, vector_num, trigger_mode) ); 280 } 281 282 void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, 283 uint8_t vector_num, uint8_t trigger_mode) 284 { 285 uint32_t deliver_bitmask[MAX_APIC_WORDS]; 286 287 trace_apic_deliver_irq(dest, dest_mode, delivery_mode, vector_num, 288 trigger_mode); 289 290 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); 291 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); 292 } 293 294 static void apic_set_base(APICCommonState *s, uint64_t val) 295 { 296 s->apicbase = (val & 0xfffff000) | 297 (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE)); 298 /* if disabled, cannot be enabled again */ 299 if (!(val & MSR_IA32_APICBASE_ENABLE)) { 300 s->apicbase &= ~MSR_IA32_APICBASE_ENABLE; 301 cpu_clear_apic_feature(&s->cpu->env); 302 s->spurious_vec &= ~APIC_SV_ENABLE; 303 } 304 } 305 306 static void apic_set_tpr(APICCommonState *s, uint8_t val) 307 { 308 /* Updates from cr8 are ignored while the VAPIC is active */ 309 if (!s->vapic_paddr) { 310 s->tpr = val << 4; 311 apic_update_irq(s); 312 } 313 } 314 315 static uint8_t apic_get_tpr(APICCommonState *s) 316 { 317 apic_sync_vapic(s, SYNC_FROM_VAPIC); 318 return s->tpr >> 4; 319 } 320 321 static int apic_get_ppr(APICCommonState *s) 322 { 323 int tpr, isrv, ppr; 324 325 tpr = (s->tpr >> 4); 326 isrv = get_highest_priority_int(s->isr); 327 if (isrv < 0) 328 isrv = 0; 329 isrv >>= 4; 330 if (tpr >= isrv) 331 ppr = s->tpr; 332 else 333 ppr = isrv << 4; 334 return ppr; 335 } 336 337 static int apic_get_arb_pri(APICCommonState *s) 338 { 339 /* XXX: arbitration */ 340 return 0; 341 } 342 343 344 /* 345 * <0 - low prio interrupt, 346 * 0 - no interrupt, 347 * >0 - interrupt number 348 */ 349 static int apic_irq_pending(APICCommonState *s) 350 { 351 int irrv, ppr; 352 353 if (!(s->spurious_vec & APIC_SV_ENABLE)) { 354 return 0; 355 } 356 357 irrv = get_highest_priority_int(s->irr); 358 if (irrv < 0) { 359 return 0; 360 } 361 ppr = apic_get_ppr(s); 362 if (ppr && (irrv & 0xf0) <= (ppr & 0xf0)) { 363 return -1; 364 } 365 366 return irrv; 367 } 368 369 /* signal the CPU if an irq is pending */ 370 static void apic_update_irq(APICCommonState *s) 371 { 372 CPUState *cpu; 373 374 cpu = CPU(s->cpu); 375 if (!qemu_cpu_is_self(cpu)) { 376 cpu_interrupt(cpu, CPU_INTERRUPT_POLL); 377 } else if (apic_irq_pending(s) > 0) { 378 cpu_interrupt(cpu, CPU_INTERRUPT_HARD); 379 } else if (!apic_accept_pic_intr(&s->busdev.qdev) || !pic_get_output(isa_pic)) { 380 cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD); 381 } 382 } 383 384 void apic_poll_irq(DeviceState *dev) 385 { 386 APICCommonState *s = APIC_COMMON(dev); 387 388 apic_sync_vapic(s, SYNC_FROM_VAPIC); 389 apic_update_irq(s); 390 } 391 392 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode) 393 { 394 apic_report_irq_delivered(!apic_get_bit(s->irr, vector_num)); 395 396 apic_set_bit(s->irr, vector_num); 397 if (trigger_mode) 398 apic_set_bit(s->tmr, vector_num); 399 else 400 apic_reset_bit(s->tmr, vector_num); 401 if (s->vapic_paddr) { 402 apic_sync_vapic(s, SYNC_ISR_IRR_TO_VAPIC); 403 /* 404 * The vcpu thread needs to see the new IRR before we pull its current 405 * TPR value. That way, if we miss a lowering of the TRP, the guest 406 * has the chance to notice the new IRR and poll for IRQs on its own. 407 */ 408 smp_wmb(); 409 apic_sync_vapic(s, SYNC_FROM_VAPIC); 410 } 411 apic_update_irq(s); 412 } 413 414 static void apic_eoi(APICCommonState *s) 415 { 416 int isrv; 417 isrv = get_highest_priority_int(s->isr); 418 if (isrv < 0) 419 return; 420 apic_reset_bit(s->isr, isrv); 421 if (!(s->spurious_vec & APIC_SV_DIRECTED_IO) && apic_get_bit(s->tmr, isrv)) { 422 ioapic_eoi_broadcast(isrv); 423 } 424 apic_sync_vapic(s, SYNC_FROM_VAPIC | SYNC_TO_VAPIC); 425 apic_update_irq(s); 426 } 427 428 static int apic_find_dest(uint8_t dest) 429 { 430 APICCommonState *apic = local_apics[dest]; 431 int i; 432 433 if (apic && apic->id == dest) 434 return dest; /* shortcut in case apic->id == apic->idx */ 435 436 for (i = 0; i < MAX_APICS; i++) { 437 apic = local_apics[i]; 438 if (apic && apic->id == dest) 439 return i; 440 if (!apic) 441 break; 442 } 443 444 return -1; 445 } 446 447 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, 448 uint8_t dest, uint8_t dest_mode) 449 { 450 APICCommonState *apic_iter; 451 int i; 452 453 if (dest_mode == 0) { 454 if (dest == 0xff) { 455 memset(deliver_bitmask, 0xff, MAX_APIC_WORDS * sizeof(uint32_t)); 456 } else { 457 int idx = apic_find_dest(dest); 458 memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t)); 459 if (idx >= 0) 460 apic_set_bit(deliver_bitmask, idx); 461 } 462 } else { 463 /* XXX: cluster mode */ 464 memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t)); 465 for(i = 0; i < MAX_APICS; i++) { 466 apic_iter = local_apics[i]; 467 if (apic_iter) { 468 if (apic_iter->dest_mode == 0xf) { 469 if (dest & apic_iter->log_dest) 470 apic_set_bit(deliver_bitmask, i); 471 } else if (apic_iter->dest_mode == 0x0) { 472 if ((dest & 0xf0) == (apic_iter->log_dest & 0xf0) && 473 (dest & apic_iter->log_dest & 0x0f)) { 474 apic_set_bit(deliver_bitmask, i); 475 } 476 } 477 } else { 478 break; 479 } 480 } 481 } 482 } 483 484 static void apic_startup(APICCommonState *s, int vector_num) 485 { 486 s->sipi_vector = vector_num; 487 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI); 488 } 489 490 void apic_sipi(DeviceState *dev) 491 { 492 APICCommonState *s = APIC_COMMON(dev); 493 494 cpu_reset_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI); 495 496 if (!s->wait_for_sipi) 497 return; 498 cpu_x86_load_seg_cache_sipi(s->cpu, s->sipi_vector); 499 s->wait_for_sipi = 0; 500 } 501 502 static void apic_deliver(DeviceState *dev, uint8_t dest, uint8_t dest_mode, 503 uint8_t delivery_mode, uint8_t vector_num, 504 uint8_t trigger_mode) 505 { 506 APICCommonState *s = APIC_COMMON(dev); 507 uint32_t deliver_bitmask[MAX_APIC_WORDS]; 508 int dest_shorthand = (s->icr[0] >> 18) & 3; 509 APICCommonState *apic_iter; 510 511 switch (dest_shorthand) { 512 case 0: 513 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); 514 break; 515 case 1: 516 memset(deliver_bitmask, 0x00, sizeof(deliver_bitmask)); 517 apic_set_bit(deliver_bitmask, s->idx); 518 break; 519 case 2: 520 memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask)); 521 break; 522 case 3: 523 memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask)); 524 apic_reset_bit(deliver_bitmask, s->idx); 525 break; 526 } 527 528 switch (delivery_mode) { 529 case APIC_DM_INIT: 530 { 531 int trig_mode = (s->icr[0] >> 15) & 1; 532 int level = (s->icr[0] >> 14) & 1; 533 if (level == 0 && trig_mode == 1) { 534 foreach_apic(apic_iter, deliver_bitmask, 535 apic_iter->arb_id = apic_iter->id ); 536 return; 537 } 538 } 539 break; 540 541 case APIC_DM_SIPI: 542 foreach_apic(apic_iter, deliver_bitmask, 543 apic_startup(apic_iter, vector_num) ); 544 return; 545 } 546 547 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); 548 } 549 550 static bool apic_check_pic(APICCommonState *s) 551 { 552 if (!apic_accept_pic_intr(&s->busdev.qdev) || !pic_get_output(isa_pic)) { 553 return false; 554 } 555 apic_deliver_pic_intr(&s->busdev.qdev, 1); 556 return true; 557 } 558 559 int apic_get_interrupt(DeviceState *dev) 560 { 561 APICCommonState *s = APIC_COMMON(dev); 562 int intno; 563 564 /* if the APIC is installed or enabled, we let the 8259 handle the 565 IRQs */ 566 if (!s) 567 return -1; 568 if (!(s->spurious_vec & APIC_SV_ENABLE)) 569 return -1; 570 571 apic_sync_vapic(s, SYNC_FROM_VAPIC); 572 intno = apic_irq_pending(s); 573 574 /* if there is an interrupt from the 8259, let the caller handle 575 * that first since ExtINT interrupts ignore the priority. 576 */ 577 if (intno == 0 || apic_check_pic(s)) { 578 apic_sync_vapic(s, SYNC_TO_VAPIC); 579 return -1; 580 } else if (intno < 0) { 581 apic_sync_vapic(s, SYNC_TO_VAPIC); 582 return s->spurious_vec & 0xff; 583 } 584 apic_reset_bit(s->irr, intno); 585 apic_set_bit(s->isr, intno); 586 apic_sync_vapic(s, SYNC_TO_VAPIC); 587 588 apic_update_irq(s); 589 590 return intno; 591 } 592 593 int apic_accept_pic_intr(DeviceState *dev) 594 { 595 APICCommonState *s = APIC_COMMON(dev); 596 uint32_t lvt0; 597 598 if (!s) 599 return -1; 600 601 lvt0 = s->lvt[APIC_LVT_LINT0]; 602 603 if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) == 0 || 604 (lvt0 & APIC_LVT_MASKED) == 0) 605 return 1; 606 607 return 0; 608 } 609 610 static uint32_t apic_get_current_count(APICCommonState *s) 611 { 612 int64_t d; 613 uint32_t val; 614 d = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->initial_count_load_time) >> 615 s->count_shift; 616 if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) { 617 /* periodic */ 618 val = s->initial_count - (d % ((uint64_t)s->initial_count + 1)); 619 } else { 620 if (d >= s->initial_count) 621 val = 0; 622 else 623 val = s->initial_count - d; 624 } 625 return val; 626 } 627 628 static void apic_timer_update(APICCommonState *s, int64_t current_time) 629 { 630 if (apic_next_timer(s, current_time)) { 631 timer_mod(s->timer, s->next_time); 632 } else { 633 timer_del(s->timer); 634 } 635 } 636 637 static void apic_timer(void *opaque) 638 { 639 APICCommonState *s = opaque; 640 641 apic_local_deliver(s, APIC_LVT_TIMER); 642 apic_timer_update(s, s->next_time); 643 } 644 645 static uint32_t apic_mem_readb(void *opaque, hwaddr addr) 646 { 647 return 0; 648 } 649 650 static uint32_t apic_mem_readw(void *opaque, hwaddr addr) 651 { 652 return 0; 653 } 654 655 static void apic_mem_writeb(void *opaque, hwaddr addr, uint32_t val) 656 { 657 } 658 659 static void apic_mem_writew(void *opaque, hwaddr addr, uint32_t val) 660 { 661 } 662 663 static uint32_t apic_mem_readl(void *opaque, hwaddr addr) 664 { 665 DeviceState *dev; 666 APICCommonState *s; 667 uint32_t val; 668 int index; 669 670 dev = cpu_get_current_apic(); 671 if (!dev) { 672 return 0; 673 } 674 s = APIC_COMMON(dev); 675 676 index = (addr >> 4) & 0xff; 677 switch(index) { 678 case 0x02: /* id */ 679 val = s->id << 24; 680 break; 681 case 0x03: /* version */ 682 val = s->version | ((APIC_LVT_NB - 1) << 16); 683 break; 684 case 0x08: 685 apic_sync_vapic(s, SYNC_FROM_VAPIC); 686 if (apic_report_tpr_access) { 687 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_READ); 688 } 689 val = s->tpr; 690 break; 691 case 0x09: 692 val = apic_get_arb_pri(s); 693 break; 694 case 0x0a: 695 /* ppr */ 696 val = apic_get_ppr(s); 697 break; 698 case 0x0b: 699 val = 0; 700 break; 701 case 0x0d: 702 val = s->log_dest << 24; 703 break; 704 case 0x0e: 705 val = (s->dest_mode << 28) | 0xfffffff; 706 break; 707 case 0x0f: 708 val = s->spurious_vec; 709 break; 710 case 0x10 ... 0x17: 711 val = s->isr[index & 7]; 712 break; 713 case 0x18 ... 0x1f: 714 val = s->tmr[index & 7]; 715 break; 716 case 0x20 ... 0x27: 717 val = s->irr[index & 7]; 718 break; 719 case 0x28: 720 val = s->esr; 721 break; 722 case 0x30: 723 case 0x31: 724 val = s->icr[index & 1]; 725 break; 726 case 0x32 ... 0x37: 727 val = s->lvt[index - 0x32]; 728 break; 729 case 0x38: 730 val = s->initial_count; 731 break; 732 case 0x39: 733 val = apic_get_current_count(s); 734 break; 735 case 0x3e: 736 val = s->divide_conf; 737 break; 738 default: 739 s->esr |= ESR_ILLEGAL_ADDRESS; 740 val = 0; 741 break; 742 } 743 trace_apic_mem_readl(addr, val); 744 return val; 745 } 746 747 static void apic_send_msi(hwaddr addr, uint32_t data) 748 { 749 uint8_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; 750 uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; 751 uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; 752 uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; 753 uint8_t delivery = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7; 754 /* XXX: Ignore redirection hint. */ 755 apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode); 756 } 757 758 static void apic_mem_writel(void *opaque, hwaddr addr, uint32_t val) 759 { 760 DeviceState *dev; 761 APICCommonState *s; 762 int index = (addr >> 4) & 0xff; 763 if (addr > 0xfff || !index) { 764 /* MSI and MMIO APIC are at the same memory location, 765 * but actually not on the global bus: MSI is on PCI bus 766 * APIC is connected directly to the CPU. 767 * Mapping them on the global bus happens to work because 768 * MSI registers are reserved in APIC MMIO and vice versa. */ 769 apic_send_msi(addr, val); 770 return; 771 } 772 773 dev = cpu_get_current_apic(); 774 if (!dev) { 775 return; 776 } 777 s = APIC_COMMON(dev); 778 779 trace_apic_mem_writel(addr, val); 780 781 switch(index) { 782 case 0x02: 783 s->id = (val >> 24); 784 break; 785 case 0x03: 786 break; 787 case 0x08: 788 if (apic_report_tpr_access) { 789 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_WRITE); 790 } 791 s->tpr = val; 792 apic_sync_vapic(s, SYNC_TO_VAPIC); 793 apic_update_irq(s); 794 break; 795 case 0x09: 796 case 0x0a: 797 break; 798 case 0x0b: /* EOI */ 799 apic_eoi(s); 800 break; 801 case 0x0d: 802 s->log_dest = val >> 24; 803 break; 804 case 0x0e: 805 s->dest_mode = val >> 28; 806 break; 807 case 0x0f: 808 s->spurious_vec = val & 0x1ff; 809 apic_update_irq(s); 810 break; 811 case 0x10 ... 0x17: 812 case 0x18 ... 0x1f: 813 case 0x20 ... 0x27: 814 case 0x28: 815 break; 816 case 0x30: 817 s->icr[0] = val; 818 apic_deliver(dev, (s->icr[1] >> 24) & 0xff, (s->icr[0] >> 11) & 1, 819 (s->icr[0] >> 8) & 7, (s->icr[0] & 0xff), 820 (s->icr[0] >> 15) & 1); 821 break; 822 case 0x31: 823 s->icr[1] = val; 824 break; 825 case 0x32 ... 0x37: 826 { 827 int n = index - 0x32; 828 s->lvt[n] = val; 829 if (n == APIC_LVT_TIMER) { 830 apic_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); 831 } else if (n == APIC_LVT_LINT0 && apic_check_pic(s)) { 832 apic_update_irq(s); 833 } 834 } 835 break; 836 case 0x38: 837 s->initial_count = val; 838 s->initial_count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 839 apic_timer_update(s, s->initial_count_load_time); 840 break; 841 case 0x39: 842 break; 843 case 0x3e: 844 { 845 int v; 846 s->divide_conf = val & 0xb; 847 v = (s->divide_conf & 3) | ((s->divide_conf >> 1) & 4); 848 s->count_shift = (v + 1) & 7; 849 } 850 break; 851 default: 852 s->esr |= ESR_ILLEGAL_ADDRESS; 853 break; 854 } 855 } 856 857 static void apic_pre_save(APICCommonState *s) 858 { 859 apic_sync_vapic(s, SYNC_FROM_VAPIC); 860 } 861 862 static void apic_post_load(APICCommonState *s) 863 { 864 if (s->timer_expiry != -1) { 865 timer_mod(s->timer, s->timer_expiry); 866 } else { 867 timer_del(s->timer); 868 } 869 } 870 871 static const MemoryRegionOps apic_io_ops = { 872 .old_mmio = { 873 .read = { apic_mem_readb, apic_mem_readw, apic_mem_readl, }, 874 .write = { apic_mem_writeb, apic_mem_writew, apic_mem_writel, }, 875 }, 876 .endianness = DEVICE_NATIVE_ENDIAN, 877 }; 878 879 static void apic_realize(DeviceState *dev, Error **errp) 880 { 881 APICCommonState *s = APIC_COMMON(dev); 882 883 memory_region_init_io(&s->io_memory, OBJECT(s), &apic_io_ops, s, "apic-msi", 884 APIC_SPACE_SIZE); 885 886 s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s); 887 local_apics[s->idx] = s; 888 889 msi_supported = true; 890 } 891 892 static void apic_class_init(ObjectClass *klass, void *data) 893 { 894 APICCommonClass *k = APIC_COMMON_CLASS(klass); 895 896 k->realize = apic_realize; 897 k->set_base = apic_set_base; 898 k->set_tpr = apic_set_tpr; 899 k->get_tpr = apic_get_tpr; 900 k->vapic_base_update = apic_vapic_base_update; 901 k->external_nmi = apic_external_nmi; 902 k->pre_save = apic_pre_save; 903 k->post_load = apic_post_load; 904 } 905 906 static const TypeInfo apic_info = { 907 .name = "apic", 908 .instance_size = sizeof(APICCommonState), 909 .parent = TYPE_APIC_COMMON, 910 .class_init = apic_class_init, 911 }; 912 913 static void apic_register_types(void) 914 { 915 type_register_static(&apic_info); 916 } 917 918 type_init(apic_register_types) 919