1 /* 2 * APIC support 3 * 4 * Copyright (c) 2004-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/> 18 */ 19 #include "qemu/thread.h" 20 #include "hw/i386/apic_internal.h" 21 #include "hw/i386/apic.h" 22 #include "hw/i386/ioapic.h" 23 #include "hw/pci/msi.h" 24 #include "qemu/host-utils.h" 25 #include "trace.h" 26 #include "hw/i386/pc.h" 27 #include "hw/i386/apic-msidef.h" 28 29 #define MAX_APIC_WORDS 8 30 31 #define SYNC_FROM_VAPIC 0x1 32 #define SYNC_TO_VAPIC 0x2 33 #define SYNC_ISR_IRR_TO_VAPIC 0x4 34 35 static APICCommonState *local_apics[MAX_APICS + 1]; 36 37 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode); 38 static void apic_update_irq(APICCommonState *s); 39 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, 40 uint8_t dest, uint8_t dest_mode); 41 42 /* Find first bit starting from msb */ 43 static int apic_fls_bit(uint32_t value) 44 { 45 return 31 - clz32(value); 46 } 47 48 /* Find first bit starting from lsb */ 49 static int apic_ffs_bit(uint32_t value) 50 { 51 return ctz32(value); 52 } 53 54 static inline void apic_set_bit(uint32_t *tab, int index) 55 { 56 int i, mask; 57 i = index >> 5; 58 mask = 1 << (index & 0x1f); 59 tab[i] |= mask; 60 } 61 62 static inline void apic_reset_bit(uint32_t *tab, int index) 63 { 64 int i, mask; 65 i = index >> 5; 66 mask = 1 << (index & 0x1f); 67 tab[i] &= ~mask; 68 } 69 70 static inline int apic_get_bit(uint32_t *tab, int index) 71 { 72 int i, mask; 73 i = index >> 5; 74 mask = 1 << (index & 0x1f); 75 return !!(tab[i] & mask); 76 } 77 78 /* return -1 if no bit is set */ 79 static int get_highest_priority_int(uint32_t *tab) 80 { 81 int i; 82 for (i = 7; i >= 0; i--) { 83 if (tab[i] != 0) { 84 return i * 32 + apic_fls_bit(tab[i]); 85 } 86 } 87 return -1; 88 } 89 90 static void apic_sync_vapic(APICCommonState *s, int sync_type) 91 { 92 VAPICState vapic_state; 93 size_t length; 94 off_t start; 95 int vector; 96 97 if (!s->vapic_paddr) { 98 return; 99 } 100 if (sync_type & SYNC_FROM_VAPIC) { 101 cpu_physical_memory_read(s->vapic_paddr, &vapic_state, 102 sizeof(vapic_state)); 103 s->tpr = vapic_state.tpr; 104 } 105 if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) { 106 start = offsetof(VAPICState, isr); 107 length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr); 108 109 if (sync_type & SYNC_TO_VAPIC) { 110 assert(qemu_cpu_is_self(CPU(s->cpu))); 111 112 vapic_state.tpr = s->tpr; 113 vapic_state.enabled = 1; 114 start = 0; 115 length = sizeof(VAPICState); 116 } 117 118 vector = get_highest_priority_int(s->isr); 119 if (vector < 0) { 120 vector = 0; 121 } 122 vapic_state.isr = vector & 0xf0; 123 124 vapic_state.zero = 0; 125 126 vector = get_highest_priority_int(s->irr); 127 if (vector < 0) { 128 vector = 0; 129 } 130 vapic_state.irr = vector & 0xff; 131 132 cpu_physical_memory_write_rom(&address_space_memory, 133 s->vapic_paddr + start, 134 ((void *)&vapic_state) + start, length); 135 } 136 } 137 138 static void apic_vapic_base_update(APICCommonState *s) 139 { 140 apic_sync_vapic(s, SYNC_TO_VAPIC); 141 } 142 143 static void apic_local_deliver(APICCommonState *s, int vector) 144 { 145 uint32_t lvt = s->lvt[vector]; 146 int trigger_mode; 147 148 trace_apic_local_deliver(vector, (lvt >> 8) & 7); 149 150 if (lvt & APIC_LVT_MASKED) 151 return; 152 153 switch ((lvt >> 8) & 7) { 154 case APIC_DM_SMI: 155 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SMI); 156 break; 157 158 case APIC_DM_NMI: 159 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_NMI); 160 break; 161 162 case APIC_DM_EXTINT: 163 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HARD); 164 break; 165 166 case APIC_DM_FIXED: 167 trigger_mode = APIC_TRIGGER_EDGE; 168 if ((vector == APIC_LVT_LINT0 || vector == APIC_LVT_LINT1) && 169 (lvt & APIC_LVT_LEVEL_TRIGGER)) 170 trigger_mode = APIC_TRIGGER_LEVEL; 171 apic_set_irq(s, lvt & 0xff, trigger_mode); 172 } 173 } 174 175 void apic_deliver_pic_intr(DeviceState *dev, int level) 176 { 177 APICCommonState *s = APIC_COMMON(dev); 178 179 if (level) { 180 apic_local_deliver(s, APIC_LVT_LINT0); 181 } else { 182 uint32_t lvt = s->lvt[APIC_LVT_LINT0]; 183 184 switch ((lvt >> 8) & 7) { 185 case APIC_DM_FIXED: 186 if (!(lvt & APIC_LVT_LEVEL_TRIGGER)) 187 break; 188 apic_reset_bit(s->irr, lvt & 0xff); 189 /* fall through */ 190 case APIC_DM_EXTINT: 191 cpu_reset_interrupt(CPU(s->cpu), CPU_INTERRUPT_HARD); 192 break; 193 } 194 } 195 } 196 197 static void apic_external_nmi(APICCommonState *s) 198 { 199 apic_local_deliver(s, APIC_LVT_LINT1); 200 } 201 202 #define foreach_apic(apic, deliver_bitmask, code) \ 203 {\ 204 int __i, __j;\ 205 for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\ 206 uint32_t __mask = deliver_bitmask[__i];\ 207 if (__mask) {\ 208 for(__j = 0; __j < 32; __j++) {\ 209 if (__mask & (1U << __j)) {\ 210 apic = local_apics[__i * 32 + __j];\ 211 if (apic) {\ 212 code;\ 213 }\ 214 }\ 215 }\ 216 }\ 217 }\ 218 } 219 220 static void apic_bus_deliver(const uint32_t *deliver_bitmask, 221 uint8_t delivery_mode, uint8_t vector_num, 222 uint8_t trigger_mode) 223 { 224 APICCommonState *apic_iter; 225 226 switch (delivery_mode) { 227 case APIC_DM_LOWPRI: 228 /* XXX: search for focus processor, arbitration */ 229 { 230 int i, d; 231 d = -1; 232 for(i = 0; i < MAX_APIC_WORDS; i++) { 233 if (deliver_bitmask[i]) { 234 d = i * 32 + apic_ffs_bit(deliver_bitmask[i]); 235 break; 236 } 237 } 238 if (d >= 0) { 239 apic_iter = local_apics[d]; 240 if (apic_iter) { 241 apic_set_irq(apic_iter, vector_num, trigger_mode); 242 } 243 } 244 } 245 return; 246 247 case APIC_DM_FIXED: 248 break; 249 250 case APIC_DM_SMI: 251 foreach_apic(apic_iter, deliver_bitmask, 252 cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_SMI) 253 ); 254 return; 255 256 case APIC_DM_NMI: 257 foreach_apic(apic_iter, deliver_bitmask, 258 cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_NMI) 259 ); 260 return; 261 262 case APIC_DM_INIT: 263 /* normal INIT IPI sent to processors */ 264 foreach_apic(apic_iter, deliver_bitmask, 265 cpu_interrupt(CPU(apic_iter->cpu), 266 CPU_INTERRUPT_INIT) 267 ); 268 return; 269 270 case APIC_DM_EXTINT: 271 /* handled in I/O APIC code */ 272 break; 273 274 default: 275 return; 276 } 277 278 foreach_apic(apic_iter, deliver_bitmask, 279 apic_set_irq(apic_iter, vector_num, trigger_mode) ); 280 } 281 282 void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, 283 uint8_t vector_num, uint8_t trigger_mode) 284 { 285 uint32_t deliver_bitmask[MAX_APIC_WORDS]; 286 287 trace_apic_deliver_irq(dest, dest_mode, delivery_mode, vector_num, 288 trigger_mode); 289 290 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); 291 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); 292 } 293 294 static void apic_set_base(APICCommonState *s, uint64_t val) 295 { 296 s->apicbase = (val & 0xfffff000) | 297 (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE)); 298 /* if disabled, cannot be enabled again */ 299 if (!(val & MSR_IA32_APICBASE_ENABLE)) { 300 s->apicbase &= ~MSR_IA32_APICBASE_ENABLE; 301 cpu_clear_apic_feature(&s->cpu->env); 302 s->spurious_vec &= ~APIC_SV_ENABLE; 303 } 304 } 305 306 static void apic_set_tpr(APICCommonState *s, uint8_t val) 307 { 308 /* Updates from cr8 are ignored while the VAPIC is active */ 309 if (!s->vapic_paddr) { 310 s->tpr = val << 4; 311 apic_update_irq(s); 312 } 313 } 314 315 static uint8_t apic_get_tpr(APICCommonState *s) 316 { 317 apic_sync_vapic(s, SYNC_FROM_VAPIC); 318 return s->tpr >> 4; 319 } 320 321 static int apic_get_ppr(APICCommonState *s) 322 { 323 int tpr, isrv, ppr; 324 325 tpr = (s->tpr >> 4); 326 isrv = get_highest_priority_int(s->isr); 327 if (isrv < 0) 328 isrv = 0; 329 isrv >>= 4; 330 if (tpr >= isrv) 331 ppr = s->tpr; 332 else 333 ppr = isrv << 4; 334 return ppr; 335 } 336 337 static int apic_get_arb_pri(APICCommonState *s) 338 { 339 /* XXX: arbitration */ 340 return 0; 341 } 342 343 344 /* 345 * <0 - low prio interrupt, 346 * 0 - no interrupt, 347 * >0 - interrupt number 348 */ 349 static int apic_irq_pending(APICCommonState *s) 350 { 351 int irrv, ppr; 352 irrv = get_highest_priority_int(s->irr); 353 if (irrv < 0) { 354 return 0; 355 } 356 ppr = apic_get_ppr(s); 357 if (ppr && (irrv & 0xf0) <= (ppr & 0xf0)) { 358 return -1; 359 } 360 361 return irrv; 362 } 363 364 /* signal the CPU if an irq is pending */ 365 static void apic_update_irq(APICCommonState *s) 366 { 367 CPUState *cpu; 368 369 if (!(s->spurious_vec & APIC_SV_ENABLE)) { 370 return; 371 } 372 cpu = CPU(s->cpu); 373 if (!qemu_cpu_is_self(cpu)) { 374 cpu_interrupt(cpu, CPU_INTERRUPT_POLL); 375 } else if (apic_irq_pending(s) > 0) { 376 cpu_interrupt(cpu, CPU_INTERRUPT_HARD); 377 } 378 } 379 380 void apic_poll_irq(DeviceState *dev) 381 { 382 APICCommonState *s = APIC_COMMON(dev); 383 384 apic_sync_vapic(s, SYNC_FROM_VAPIC); 385 apic_update_irq(s); 386 } 387 388 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode) 389 { 390 apic_report_irq_delivered(!apic_get_bit(s->irr, vector_num)); 391 392 apic_set_bit(s->irr, vector_num); 393 if (trigger_mode) 394 apic_set_bit(s->tmr, vector_num); 395 else 396 apic_reset_bit(s->tmr, vector_num); 397 if (s->vapic_paddr) { 398 apic_sync_vapic(s, SYNC_ISR_IRR_TO_VAPIC); 399 /* 400 * The vcpu thread needs to see the new IRR before we pull its current 401 * TPR value. That way, if we miss a lowering of the TRP, the guest 402 * has the chance to notice the new IRR and poll for IRQs on its own. 403 */ 404 smp_wmb(); 405 apic_sync_vapic(s, SYNC_FROM_VAPIC); 406 } 407 apic_update_irq(s); 408 } 409 410 static void apic_eoi(APICCommonState *s) 411 { 412 int isrv; 413 isrv = get_highest_priority_int(s->isr); 414 if (isrv < 0) 415 return; 416 apic_reset_bit(s->isr, isrv); 417 if (!(s->spurious_vec & APIC_SV_DIRECTED_IO) && apic_get_bit(s->tmr, isrv)) { 418 ioapic_eoi_broadcast(isrv); 419 } 420 apic_sync_vapic(s, SYNC_FROM_VAPIC | SYNC_TO_VAPIC); 421 apic_update_irq(s); 422 } 423 424 static int apic_find_dest(uint8_t dest) 425 { 426 APICCommonState *apic = local_apics[dest]; 427 int i; 428 429 if (apic && apic->id == dest) 430 return dest; /* shortcut in case apic->id == apic->idx */ 431 432 for (i = 0; i < MAX_APICS; i++) { 433 apic = local_apics[i]; 434 if (apic && apic->id == dest) 435 return i; 436 if (!apic) 437 break; 438 } 439 440 return -1; 441 } 442 443 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, 444 uint8_t dest, uint8_t dest_mode) 445 { 446 APICCommonState *apic_iter; 447 int i; 448 449 if (dest_mode == 0) { 450 if (dest == 0xff) { 451 memset(deliver_bitmask, 0xff, MAX_APIC_WORDS * sizeof(uint32_t)); 452 } else { 453 int idx = apic_find_dest(dest); 454 memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t)); 455 if (idx >= 0) 456 apic_set_bit(deliver_bitmask, idx); 457 } 458 } else { 459 /* XXX: cluster mode */ 460 memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t)); 461 for(i = 0; i < MAX_APICS; i++) { 462 apic_iter = local_apics[i]; 463 if (apic_iter) { 464 if (apic_iter->dest_mode == 0xf) { 465 if (dest & apic_iter->log_dest) 466 apic_set_bit(deliver_bitmask, i); 467 } else if (apic_iter->dest_mode == 0x0) { 468 if ((dest & 0xf0) == (apic_iter->log_dest & 0xf0) && 469 (dest & apic_iter->log_dest & 0x0f)) { 470 apic_set_bit(deliver_bitmask, i); 471 } 472 } 473 } else { 474 break; 475 } 476 } 477 } 478 } 479 480 static void apic_startup(APICCommonState *s, int vector_num) 481 { 482 s->sipi_vector = vector_num; 483 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI); 484 } 485 486 void apic_sipi(DeviceState *dev) 487 { 488 APICCommonState *s = APIC_COMMON(dev); 489 490 cpu_reset_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI); 491 492 if (!s->wait_for_sipi) 493 return; 494 cpu_x86_load_seg_cache_sipi(s->cpu, s->sipi_vector); 495 s->wait_for_sipi = 0; 496 } 497 498 static void apic_deliver(DeviceState *dev, uint8_t dest, uint8_t dest_mode, 499 uint8_t delivery_mode, uint8_t vector_num, 500 uint8_t trigger_mode) 501 { 502 APICCommonState *s = APIC_COMMON(dev); 503 uint32_t deliver_bitmask[MAX_APIC_WORDS]; 504 int dest_shorthand = (s->icr[0] >> 18) & 3; 505 APICCommonState *apic_iter; 506 507 switch (dest_shorthand) { 508 case 0: 509 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); 510 break; 511 case 1: 512 memset(deliver_bitmask, 0x00, sizeof(deliver_bitmask)); 513 apic_set_bit(deliver_bitmask, s->idx); 514 break; 515 case 2: 516 memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask)); 517 break; 518 case 3: 519 memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask)); 520 apic_reset_bit(deliver_bitmask, s->idx); 521 break; 522 } 523 524 switch (delivery_mode) { 525 case APIC_DM_INIT: 526 { 527 int trig_mode = (s->icr[0] >> 15) & 1; 528 int level = (s->icr[0] >> 14) & 1; 529 if (level == 0 && trig_mode == 1) { 530 foreach_apic(apic_iter, deliver_bitmask, 531 apic_iter->arb_id = apic_iter->id ); 532 return; 533 } 534 } 535 break; 536 537 case APIC_DM_SIPI: 538 foreach_apic(apic_iter, deliver_bitmask, 539 apic_startup(apic_iter, vector_num) ); 540 return; 541 } 542 543 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); 544 } 545 546 static bool apic_check_pic(APICCommonState *s) 547 { 548 if (!apic_accept_pic_intr(&s->busdev.qdev) || !pic_get_output(isa_pic)) { 549 return false; 550 } 551 apic_deliver_pic_intr(&s->busdev.qdev, 1); 552 return true; 553 } 554 555 int apic_get_interrupt(DeviceState *dev) 556 { 557 APICCommonState *s = APIC_COMMON(dev); 558 int intno; 559 560 /* if the APIC is installed or enabled, we let the 8259 handle the 561 IRQs */ 562 if (!s) 563 return -1; 564 if (!(s->spurious_vec & APIC_SV_ENABLE)) 565 return -1; 566 567 apic_sync_vapic(s, SYNC_FROM_VAPIC); 568 intno = apic_irq_pending(s); 569 570 if (intno == 0) { 571 apic_sync_vapic(s, SYNC_TO_VAPIC); 572 return -1; 573 } else if (intno < 0) { 574 apic_sync_vapic(s, SYNC_TO_VAPIC); 575 return s->spurious_vec & 0xff; 576 } 577 apic_reset_bit(s->irr, intno); 578 apic_set_bit(s->isr, intno); 579 apic_sync_vapic(s, SYNC_TO_VAPIC); 580 581 /* re-inject if there is still a pending PIC interrupt */ 582 apic_check_pic(s); 583 584 apic_update_irq(s); 585 586 return intno; 587 } 588 589 int apic_accept_pic_intr(DeviceState *dev) 590 { 591 APICCommonState *s = APIC_COMMON(dev); 592 uint32_t lvt0; 593 594 if (!s) 595 return -1; 596 597 lvt0 = s->lvt[APIC_LVT_LINT0]; 598 599 if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) == 0 || 600 (lvt0 & APIC_LVT_MASKED) == 0) 601 return 1; 602 603 return 0; 604 } 605 606 static uint32_t apic_get_current_count(APICCommonState *s) 607 { 608 int64_t d; 609 uint32_t val; 610 d = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->initial_count_load_time) >> 611 s->count_shift; 612 if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) { 613 /* periodic */ 614 val = s->initial_count - (d % ((uint64_t)s->initial_count + 1)); 615 } else { 616 if (d >= s->initial_count) 617 val = 0; 618 else 619 val = s->initial_count - d; 620 } 621 return val; 622 } 623 624 static void apic_timer_update(APICCommonState *s, int64_t current_time) 625 { 626 if (apic_next_timer(s, current_time)) { 627 timer_mod(s->timer, s->next_time); 628 } else { 629 timer_del(s->timer); 630 } 631 } 632 633 static void apic_timer(void *opaque) 634 { 635 APICCommonState *s = opaque; 636 637 apic_local_deliver(s, APIC_LVT_TIMER); 638 apic_timer_update(s, s->next_time); 639 } 640 641 static uint32_t apic_mem_readb(void *opaque, hwaddr addr) 642 { 643 return 0; 644 } 645 646 static uint32_t apic_mem_readw(void *opaque, hwaddr addr) 647 { 648 return 0; 649 } 650 651 static void apic_mem_writeb(void *opaque, hwaddr addr, uint32_t val) 652 { 653 } 654 655 static void apic_mem_writew(void *opaque, hwaddr addr, uint32_t val) 656 { 657 } 658 659 static uint32_t apic_mem_readl(void *opaque, hwaddr addr) 660 { 661 DeviceState *dev; 662 APICCommonState *s; 663 uint32_t val; 664 int index; 665 666 dev = cpu_get_current_apic(); 667 if (!dev) { 668 return 0; 669 } 670 s = APIC_COMMON(dev); 671 672 index = (addr >> 4) & 0xff; 673 switch(index) { 674 case 0x02: /* id */ 675 val = s->id << 24; 676 break; 677 case 0x03: /* version */ 678 val = s->version | ((APIC_LVT_NB - 1) << 16); 679 break; 680 case 0x08: 681 apic_sync_vapic(s, SYNC_FROM_VAPIC); 682 if (apic_report_tpr_access) { 683 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_READ); 684 } 685 val = s->tpr; 686 break; 687 case 0x09: 688 val = apic_get_arb_pri(s); 689 break; 690 case 0x0a: 691 /* ppr */ 692 val = apic_get_ppr(s); 693 break; 694 case 0x0b: 695 val = 0; 696 break; 697 case 0x0d: 698 val = s->log_dest << 24; 699 break; 700 case 0x0e: 701 val = s->dest_mode << 28; 702 break; 703 case 0x0f: 704 val = s->spurious_vec; 705 break; 706 case 0x10 ... 0x17: 707 val = s->isr[index & 7]; 708 break; 709 case 0x18 ... 0x1f: 710 val = s->tmr[index & 7]; 711 break; 712 case 0x20 ... 0x27: 713 val = s->irr[index & 7]; 714 break; 715 case 0x28: 716 val = s->esr; 717 break; 718 case 0x30: 719 case 0x31: 720 val = s->icr[index & 1]; 721 break; 722 case 0x32 ... 0x37: 723 val = s->lvt[index - 0x32]; 724 break; 725 case 0x38: 726 val = s->initial_count; 727 break; 728 case 0x39: 729 val = apic_get_current_count(s); 730 break; 731 case 0x3e: 732 val = s->divide_conf; 733 break; 734 default: 735 s->esr |= ESR_ILLEGAL_ADDRESS; 736 val = 0; 737 break; 738 } 739 trace_apic_mem_readl(addr, val); 740 return val; 741 } 742 743 static void apic_send_msi(hwaddr addr, uint32_t data) 744 { 745 uint8_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; 746 uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; 747 uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; 748 uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; 749 uint8_t delivery = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7; 750 /* XXX: Ignore redirection hint. */ 751 apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode); 752 } 753 754 static void apic_mem_writel(void *opaque, hwaddr addr, uint32_t val) 755 { 756 DeviceState *dev; 757 APICCommonState *s; 758 int index = (addr >> 4) & 0xff; 759 if (addr > 0xfff || !index) { 760 /* MSI and MMIO APIC are at the same memory location, 761 * but actually not on the global bus: MSI is on PCI bus 762 * APIC is connected directly to the CPU. 763 * Mapping them on the global bus happens to work because 764 * MSI registers are reserved in APIC MMIO and vice versa. */ 765 apic_send_msi(addr, val); 766 return; 767 } 768 769 dev = cpu_get_current_apic(); 770 if (!dev) { 771 return; 772 } 773 s = APIC_COMMON(dev); 774 775 trace_apic_mem_writel(addr, val); 776 777 switch(index) { 778 case 0x02: 779 s->id = (val >> 24); 780 break; 781 case 0x03: 782 break; 783 case 0x08: 784 if (apic_report_tpr_access) { 785 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_WRITE); 786 } 787 s->tpr = val; 788 apic_sync_vapic(s, SYNC_TO_VAPIC); 789 apic_update_irq(s); 790 break; 791 case 0x09: 792 case 0x0a: 793 break; 794 case 0x0b: /* EOI */ 795 apic_eoi(s); 796 break; 797 case 0x0d: 798 s->log_dest = val >> 24; 799 break; 800 case 0x0e: 801 s->dest_mode = val >> 28; 802 break; 803 case 0x0f: 804 s->spurious_vec = val & 0x1ff; 805 apic_update_irq(s); 806 break; 807 case 0x10 ... 0x17: 808 case 0x18 ... 0x1f: 809 case 0x20 ... 0x27: 810 case 0x28: 811 break; 812 case 0x30: 813 s->icr[0] = val; 814 apic_deliver(dev, (s->icr[1] >> 24) & 0xff, (s->icr[0] >> 11) & 1, 815 (s->icr[0] >> 8) & 7, (s->icr[0] & 0xff), 816 (s->icr[0] >> 15) & 1); 817 break; 818 case 0x31: 819 s->icr[1] = val; 820 break; 821 case 0x32 ... 0x37: 822 { 823 int n = index - 0x32; 824 s->lvt[n] = val; 825 if (n == APIC_LVT_TIMER) { 826 apic_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); 827 } else if (n == APIC_LVT_LINT0 && apic_check_pic(s)) { 828 apic_update_irq(s); 829 } 830 } 831 break; 832 case 0x38: 833 s->initial_count = val; 834 s->initial_count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 835 apic_timer_update(s, s->initial_count_load_time); 836 break; 837 case 0x39: 838 break; 839 case 0x3e: 840 { 841 int v; 842 s->divide_conf = val & 0xb; 843 v = (s->divide_conf & 3) | ((s->divide_conf >> 1) & 4); 844 s->count_shift = (v + 1) & 7; 845 } 846 break; 847 default: 848 s->esr |= ESR_ILLEGAL_ADDRESS; 849 break; 850 } 851 } 852 853 static void apic_pre_save(APICCommonState *s) 854 { 855 apic_sync_vapic(s, SYNC_FROM_VAPIC); 856 } 857 858 static void apic_post_load(APICCommonState *s) 859 { 860 if (s->timer_expiry != -1) { 861 timer_mod(s->timer, s->timer_expiry); 862 } else { 863 timer_del(s->timer); 864 } 865 } 866 867 static const MemoryRegionOps apic_io_ops = { 868 .old_mmio = { 869 .read = { apic_mem_readb, apic_mem_readw, apic_mem_readl, }, 870 .write = { apic_mem_writeb, apic_mem_writew, apic_mem_writel, }, 871 }, 872 .endianness = DEVICE_NATIVE_ENDIAN, 873 }; 874 875 static void apic_realize(DeviceState *dev, Error **errp) 876 { 877 APICCommonState *s = APIC_COMMON(dev); 878 879 memory_region_init_io(&s->io_memory, OBJECT(s), &apic_io_ops, s, "apic-msi", 880 APIC_SPACE_SIZE); 881 882 s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s); 883 local_apics[s->idx] = s; 884 885 msi_supported = true; 886 } 887 888 static void apic_class_init(ObjectClass *klass, void *data) 889 { 890 APICCommonClass *k = APIC_COMMON_CLASS(klass); 891 892 k->realize = apic_realize; 893 k->set_base = apic_set_base; 894 k->set_tpr = apic_set_tpr; 895 k->get_tpr = apic_get_tpr; 896 k->vapic_base_update = apic_vapic_base_update; 897 k->external_nmi = apic_external_nmi; 898 k->pre_save = apic_pre_save; 899 k->post_load = apic_post_load; 900 } 901 902 static const TypeInfo apic_info = { 903 .name = "apic", 904 .instance_size = sizeof(APICCommonState), 905 .parent = TYPE_APIC_COMMON, 906 .class_init = apic_class_init, 907 }; 908 909 static void apic_register_types(void) 910 { 911 type_register_static(&apic_info); 912 } 913 914 type_init(apic_register_types) 915