1 /* 2 * APIC support 3 * 4 * Copyright (c) 2004-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/> 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/thread.h" 21 #include "hw/i386/apic_internal.h" 22 #include "hw/i386/apic.h" 23 #include "hw/i386/ioapic.h" 24 #include "hw/pci/msi.h" 25 #include "qemu/host-utils.h" 26 #include "trace.h" 27 #include "hw/i386/pc.h" 28 #include "hw/i386/apic-msidef.h" 29 30 #define MAX_APIC_WORDS 8 31 32 #define SYNC_FROM_VAPIC 0x1 33 #define SYNC_TO_VAPIC 0x2 34 #define SYNC_ISR_IRR_TO_VAPIC 0x4 35 36 static APICCommonState *local_apics[MAX_APICS + 1]; 37 38 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode); 39 static void apic_update_irq(APICCommonState *s); 40 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, 41 uint8_t dest, uint8_t dest_mode); 42 43 /* Find first bit starting from msb */ 44 static int apic_fls_bit(uint32_t value) 45 { 46 return 31 - clz32(value); 47 } 48 49 /* Find first bit starting from lsb */ 50 static int apic_ffs_bit(uint32_t value) 51 { 52 return ctz32(value); 53 } 54 55 static inline void apic_reset_bit(uint32_t *tab, int index) 56 { 57 int i, mask; 58 i = index >> 5; 59 mask = 1 << (index & 0x1f); 60 tab[i] &= ~mask; 61 } 62 63 /* return -1 if no bit is set */ 64 static int get_highest_priority_int(uint32_t *tab) 65 { 66 int i; 67 for (i = 7; i >= 0; i--) { 68 if (tab[i] != 0) { 69 return i * 32 + apic_fls_bit(tab[i]); 70 } 71 } 72 return -1; 73 } 74 75 static void apic_sync_vapic(APICCommonState *s, int sync_type) 76 { 77 VAPICState vapic_state; 78 size_t length; 79 off_t start; 80 int vector; 81 82 if (!s->vapic_paddr) { 83 return; 84 } 85 if (sync_type & SYNC_FROM_VAPIC) { 86 cpu_physical_memory_read(s->vapic_paddr, &vapic_state, 87 sizeof(vapic_state)); 88 s->tpr = vapic_state.tpr; 89 } 90 if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) { 91 start = offsetof(VAPICState, isr); 92 length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr); 93 94 if (sync_type & SYNC_TO_VAPIC) { 95 assert(qemu_cpu_is_self(CPU(s->cpu))); 96 97 vapic_state.tpr = s->tpr; 98 vapic_state.enabled = 1; 99 start = 0; 100 length = sizeof(VAPICState); 101 } 102 103 vector = get_highest_priority_int(s->isr); 104 if (vector < 0) { 105 vector = 0; 106 } 107 vapic_state.isr = vector & 0xf0; 108 109 vapic_state.zero = 0; 110 111 vector = get_highest_priority_int(s->irr); 112 if (vector < 0) { 113 vector = 0; 114 } 115 vapic_state.irr = vector & 0xff; 116 117 cpu_physical_memory_write_rom(&address_space_memory, 118 s->vapic_paddr + start, 119 ((void *)&vapic_state) + start, length); 120 } 121 } 122 123 static void apic_vapic_base_update(APICCommonState *s) 124 { 125 apic_sync_vapic(s, SYNC_TO_VAPIC); 126 } 127 128 static void apic_local_deliver(APICCommonState *s, int vector) 129 { 130 uint32_t lvt = s->lvt[vector]; 131 int trigger_mode; 132 133 trace_apic_local_deliver(vector, (lvt >> 8) & 7); 134 135 if (lvt & APIC_LVT_MASKED) 136 return; 137 138 switch ((lvt >> 8) & 7) { 139 case APIC_DM_SMI: 140 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SMI); 141 break; 142 143 case APIC_DM_NMI: 144 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_NMI); 145 break; 146 147 case APIC_DM_EXTINT: 148 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HARD); 149 break; 150 151 case APIC_DM_FIXED: 152 trigger_mode = APIC_TRIGGER_EDGE; 153 if ((vector == APIC_LVT_LINT0 || vector == APIC_LVT_LINT1) && 154 (lvt & APIC_LVT_LEVEL_TRIGGER)) 155 trigger_mode = APIC_TRIGGER_LEVEL; 156 apic_set_irq(s, lvt & 0xff, trigger_mode); 157 } 158 } 159 160 void apic_deliver_pic_intr(DeviceState *dev, int level) 161 { 162 APICCommonState *s = APIC_COMMON(dev); 163 164 if (level) { 165 apic_local_deliver(s, APIC_LVT_LINT0); 166 } else { 167 uint32_t lvt = s->lvt[APIC_LVT_LINT0]; 168 169 switch ((lvt >> 8) & 7) { 170 case APIC_DM_FIXED: 171 if (!(lvt & APIC_LVT_LEVEL_TRIGGER)) 172 break; 173 apic_reset_bit(s->irr, lvt & 0xff); 174 /* fall through */ 175 case APIC_DM_EXTINT: 176 apic_update_irq(s); 177 break; 178 } 179 } 180 } 181 182 static void apic_external_nmi(APICCommonState *s) 183 { 184 apic_local_deliver(s, APIC_LVT_LINT1); 185 } 186 187 #define foreach_apic(apic, deliver_bitmask, code) \ 188 {\ 189 int __i, __j;\ 190 for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\ 191 uint32_t __mask = deliver_bitmask[__i];\ 192 if (__mask) {\ 193 for(__j = 0; __j < 32; __j++) {\ 194 if (__mask & (1U << __j)) {\ 195 apic = local_apics[__i * 32 + __j];\ 196 if (apic) {\ 197 code;\ 198 }\ 199 }\ 200 }\ 201 }\ 202 }\ 203 } 204 205 static void apic_bus_deliver(const uint32_t *deliver_bitmask, 206 uint8_t delivery_mode, uint8_t vector_num, 207 uint8_t trigger_mode) 208 { 209 APICCommonState *apic_iter; 210 211 switch (delivery_mode) { 212 case APIC_DM_LOWPRI: 213 /* XXX: search for focus processor, arbitration */ 214 { 215 int i, d; 216 d = -1; 217 for(i = 0; i < MAX_APIC_WORDS; i++) { 218 if (deliver_bitmask[i]) { 219 d = i * 32 + apic_ffs_bit(deliver_bitmask[i]); 220 break; 221 } 222 } 223 if (d >= 0) { 224 apic_iter = local_apics[d]; 225 if (apic_iter) { 226 apic_set_irq(apic_iter, vector_num, trigger_mode); 227 } 228 } 229 } 230 return; 231 232 case APIC_DM_FIXED: 233 break; 234 235 case APIC_DM_SMI: 236 foreach_apic(apic_iter, deliver_bitmask, 237 cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_SMI) 238 ); 239 return; 240 241 case APIC_DM_NMI: 242 foreach_apic(apic_iter, deliver_bitmask, 243 cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_NMI) 244 ); 245 return; 246 247 case APIC_DM_INIT: 248 /* normal INIT IPI sent to processors */ 249 foreach_apic(apic_iter, deliver_bitmask, 250 cpu_interrupt(CPU(apic_iter->cpu), 251 CPU_INTERRUPT_INIT) 252 ); 253 return; 254 255 case APIC_DM_EXTINT: 256 /* handled in I/O APIC code */ 257 break; 258 259 default: 260 return; 261 } 262 263 foreach_apic(apic_iter, deliver_bitmask, 264 apic_set_irq(apic_iter, vector_num, trigger_mode) ); 265 } 266 267 void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, 268 uint8_t vector_num, uint8_t trigger_mode) 269 { 270 uint32_t deliver_bitmask[MAX_APIC_WORDS]; 271 272 trace_apic_deliver_irq(dest, dest_mode, delivery_mode, vector_num, 273 trigger_mode); 274 275 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); 276 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); 277 } 278 279 static void apic_set_base(APICCommonState *s, uint64_t val) 280 { 281 s->apicbase = (val & 0xfffff000) | 282 (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE)); 283 /* if disabled, cannot be enabled again */ 284 if (!(val & MSR_IA32_APICBASE_ENABLE)) { 285 s->apicbase &= ~MSR_IA32_APICBASE_ENABLE; 286 cpu_clear_apic_feature(&s->cpu->env); 287 s->spurious_vec &= ~APIC_SV_ENABLE; 288 } 289 } 290 291 static void apic_set_tpr(APICCommonState *s, uint8_t val) 292 { 293 /* Updates from cr8 are ignored while the VAPIC is active */ 294 if (!s->vapic_paddr) { 295 s->tpr = val << 4; 296 apic_update_irq(s); 297 } 298 } 299 300 static uint8_t apic_get_tpr(APICCommonState *s) 301 { 302 apic_sync_vapic(s, SYNC_FROM_VAPIC); 303 return s->tpr >> 4; 304 } 305 306 int apic_get_ppr(APICCommonState *s) 307 { 308 int tpr, isrv, ppr; 309 310 tpr = (s->tpr >> 4); 311 isrv = get_highest_priority_int(s->isr); 312 if (isrv < 0) 313 isrv = 0; 314 isrv >>= 4; 315 if (tpr >= isrv) 316 ppr = s->tpr; 317 else 318 ppr = isrv << 4; 319 return ppr; 320 } 321 322 static int apic_get_arb_pri(APICCommonState *s) 323 { 324 /* XXX: arbitration */ 325 return 0; 326 } 327 328 329 /* 330 * <0 - low prio interrupt, 331 * 0 - no interrupt, 332 * >0 - interrupt number 333 */ 334 static int apic_irq_pending(APICCommonState *s) 335 { 336 int irrv, ppr; 337 338 if (!(s->spurious_vec & APIC_SV_ENABLE)) { 339 return 0; 340 } 341 342 irrv = get_highest_priority_int(s->irr); 343 if (irrv < 0) { 344 return 0; 345 } 346 ppr = apic_get_ppr(s); 347 if (ppr && (irrv & 0xf0) <= (ppr & 0xf0)) { 348 return -1; 349 } 350 351 return irrv; 352 } 353 354 /* signal the CPU if an irq is pending */ 355 static void apic_update_irq(APICCommonState *s) 356 { 357 CPUState *cpu; 358 DeviceState *dev = (DeviceState *)s; 359 360 cpu = CPU(s->cpu); 361 if (!qemu_cpu_is_self(cpu)) { 362 cpu_interrupt(cpu, CPU_INTERRUPT_POLL); 363 } else if (apic_irq_pending(s) > 0) { 364 cpu_interrupt(cpu, CPU_INTERRUPT_HARD); 365 } else if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) { 366 cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD); 367 } 368 } 369 370 void apic_poll_irq(DeviceState *dev) 371 { 372 APICCommonState *s = APIC_COMMON(dev); 373 374 apic_sync_vapic(s, SYNC_FROM_VAPIC); 375 apic_update_irq(s); 376 } 377 378 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode) 379 { 380 apic_report_irq_delivered(!apic_get_bit(s->irr, vector_num)); 381 382 apic_set_bit(s->irr, vector_num); 383 if (trigger_mode) 384 apic_set_bit(s->tmr, vector_num); 385 else 386 apic_reset_bit(s->tmr, vector_num); 387 if (s->vapic_paddr) { 388 apic_sync_vapic(s, SYNC_ISR_IRR_TO_VAPIC); 389 /* 390 * The vcpu thread needs to see the new IRR before we pull its current 391 * TPR value. That way, if we miss a lowering of the TRP, the guest 392 * has the chance to notice the new IRR and poll for IRQs on its own. 393 */ 394 smp_wmb(); 395 apic_sync_vapic(s, SYNC_FROM_VAPIC); 396 } 397 apic_update_irq(s); 398 } 399 400 static void apic_eoi(APICCommonState *s) 401 { 402 int isrv; 403 isrv = get_highest_priority_int(s->isr); 404 if (isrv < 0) 405 return; 406 apic_reset_bit(s->isr, isrv); 407 if (!(s->spurious_vec & APIC_SV_DIRECTED_IO) && apic_get_bit(s->tmr, isrv)) { 408 ioapic_eoi_broadcast(isrv); 409 } 410 apic_sync_vapic(s, SYNC_FROM_VAPIC | SYNC_TO_VAPIC); 411 apic_update_irq(s); 412 } 413 414 static int apic_find_dest(uint8_t dest) 415 { 416 APICCommonState *apic = local_apics[dest]; 417 int i; 418 419 if (apic && apic->id == dest) 420 return dest; /* shortcut in case apic->id == apic->idx */ 421 422 for (i = 0; i < MAX_APICS; i++) { 423 apic = local_apics[i]; 424 if (apic && apic->id == dest) 425 return i; 426 if (!apic) 427 break; 428 } 429 430 return -1; 431 } 432 433 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, 434 uint8_t dest, uint8_t dest_mode) 435 { 436 APICCommonState *apic_iter; 437 int i; 438 439 if (dest_mode == 0) { 440 if (dest == 0xff) { 441 memset(deliver_bitmask, 0xff, MAX_APIC_WORDS * sizeof(uint32_t)); 442 } else { 443 int idx = apic_find_dest(dest); 444 memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t)); 445 if (idx >= 0) 446 apic_set_bit(deliver_bitmask, idx); 447 } 448 } else { 449 /* XXX: cluster mode */ 450 memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t)); 451 for(i = 0; i < MAX_APICS; i++) { 452 apic_iter = local_apics[i]; 453 if (apic_iter) { 454 if (apic_iter->dest_mode == 0xf) { 455 if (dest & apic_iter->log_dest) 456 apic_set_bit(deliver_bitmask, i); 457 } else if (apic_iter->dest_mode == 0x0) { 458 if ((dest & 0xf0) == (apic_iter->log_dest & 0xf0) && 459 (dest & apic_iter->log_dest & 0x0f)) { 460 apic_set_bit(deliver_bitmask, i); 461 } 462 } 463 } else { 464 break; 465 } 466 } 467 } 468 } 469 470 static void apic_startup(APICCommonState *s, int vector_num) 471 { 472 s->sipi_vector = vector_num; 473 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI); 474 } 475 476 void apic_sipi(DeviceState *dev) 477 { 478 APICCommonState *s = APIC_COMMON(dev); 479 480 cpu_reset_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI); 481 482 if (!s->wait_for_sipi) 483 return; 484 cpu_x86_load_seg_cache_sipi(s->cpu, s->sipi_vector); 485 s->wait_for_sipi = 0; 486 } 487 488 static void apic_deliver(DeviceState *dev, uint8_t dest, uint8_t dest_mode, 489 uint8_t delivery_mode, uint8_t vector_num, 490 uint8_t trigger_mode) 491 { 492 APICCommonState *s = APIC_COMMON(dev); 493 uint32_t deliver_bitmask[MAX_APIC_WORDS]; 494 int dest_shorthand = (s->icr[0] >> 18) & 3; 495 APICCommonState *apic_iter; 496 497 switch (dest_shorthand) { 498 case 0: 499 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); 500 break; 501 case 1: 502 memset(deliver_bitmask, 0x00, sizeof(deliver_bitmask)); 503 apic_set_bit(deliver_bitmask, s->idx); 504 break; 505 case 2: 506 memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask)); 507 break; 508 case 3: 509 memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask)); 510 apic_reset_bit(deliver_bitmask, s->idx); 511 break; 512 } 513 514 switch (delivery_mode) { 515 case APIC_DM_INIT: 516 { 517 int trig_mode = (s->icr[0] >> 15) & 1; 518 int level = (s->icr[0] >> 14) & 1; 519 if (level == 0 && trig_mode == 1) { 520 foreach_apic(apic_iter, deliver_bitmask, 521 apic_iter->arb_id = apic_iter->id ); 522 return; 523 } 524 } 525 break; 526 527 case APIC_DM_SIPI: 528 foreach_apic(apic_iter, deliver_bitmask, 529 apic_startup(apic_iter, vector_num) ); 530 return; 531 } 532 533 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); 534 } 535 536 static bool apic_check_pic(APICCommonState *s) 537 { 538 DeviceState *dev = (DeviceState *)s; 539 540 if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) { 541 return false; 542 } 543 apic_deliver_pic_intr(dev, 1); 544 return true; 545 } 546 547 int apic_get_interrupt(DeviceState *dev) 548 { 549 APICCommonState *s = APIC_COMMON(dev); 550 int intno; 551 552 /* if the APIC is installed or enabled, we let the 8259 handle the 553 IRQs */ 554 if (!s) 555 return -1; 556 if (!(s->spurious_vec & APIC_SV_ENABLE)) 557 return -1; 558 559 apic_sync_vapic(s, SYNC_FROM_VAPIC); 560 intno = apic_irq_pending(s); 561 562 /* if there is an interrupt from the 8259, let the caller handle 563 * that first since ExtINT interrupts ignore the priority. 564 */ 565 if (intno == 0 || apic_check_pic(s)) { 566 apic_sync_vapic(s, SYNC_TO_VAPIC); 567 return -1; 568 } else if (intno < 0) { 569 apic_sync_vapic(s, SYNC_TO_VAPIC); 570 return s->spurious_vec & 0xff; 571 } 572 apic_reset_bit(s->irr, intno); 573 apic_set_bit(s->isr, intno); 574 apic_sync_vapic(s, SYNC_TO_VAPIC); 575 576 apic_update_irq(s); 577 578 return intno; 579 } 580 581 int apic_accept_pic_intr(DeviceState *dev) 582 { 583 APICCommonState *s = APIC_COMMON(dev); 584 uint32_t lvt0; 585 586 if (!s) 587 return -1; 588 589 lvt0 = s->lvt[APIC_LVT_LINT0]; 590 591 if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) == 0 || 592 (lvt0 & APIC_LVT_MASKED) == 0) 593 return 1; 594 595 return 0; 596 } 597 598 static uint32_t apic_get_current_count(APICCommonState *s) 599 { 600 int64_t d; 601 uint32_t val; 602 d = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->initial_count_load_time) >> 603 s->count_shift; 604 if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) { 605 /* periodic */ 606 val = s->initial_count - (d % ((uint64_t)s->initial_count + 1)); 607 } else { 608 if (d >= s->initial_count) 609 val = 0; 610 else 611 val = s->initial_count - d; 612 } 613 return val; 614 } 615 616 static void apic_timer_update(APICCommonState *s, int64_t current_time) 617 { 618 if (apic_next_timer(s, current_time)) { 619 timer_mod(s->timer, s->next_time); 620 } else { 621 timer_del(s->timer); 622 } 623 } 624 625 static void apic_timer(void *opaque) 626 { 627 APICCommonState *s = opaque; 628 629 apic_local_deliver(s, APIC_LVT_TIMER); 630 apic_timer_update(s, s->next_time); 631 } 632 633 static uint32_t apic_mem_readb(void *opaque, hwaddr addr) 634 { 635 return 0; 636 } 637 638 static uint32_t apic_mem_readw(void *opaque, hwaddr addr) 639 { 640 return 0; 641 } 642 643 static void apic_mem_writeb(void *opaque, hwaddr addr, uint32_t val) 644 { 645 } 646 647 static void apic_mem_writew(void *opaque, hwaddr addr, uint32_t val) 648 { 649 } 650 651 static uint32_t apic_mem_readl(void *opaque, hwaddr addr) 652 { 653 DeviceState *dev; 654 APICCommonState *s; 655 uint32_t val; 656 int index; 657 658 dev = cpu_get_current_apic(); 659 if (!dev) { 660 return 0; 661 } 662 s = APIC_COMMON(dev); 663 664 index = (addr >> 4) & 0xff; 665 switch(index) { 666 case 0x02: /* id */ 667 val = s->id << 24; 668 break; 669 case 0x03: /* version */ 670 val = s->version | ((APIC_LVT_NB - 1) << 16); 671 break; 672 case 0x08: 673 apic_sync_vapic(s, SYNC_FROM_VAPIC); 674 if (apic_report_tpr_access) { 675 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_READ); 676 } 677 val = s->tpr; 678 break; 679 case 0x09: 680 val = apic_get_arb_pri(s); 681 break; 682 case 0x0a: 683 /* ppr */ 684 val = apic_get_ppr(s); 685 break; 686 case 0x0b: 687 val = 0; 688 break; 689 case 0x0d: 690 val = s->log_dest << 24; 691 break; 692 case 0x0e: 693 val = (s->dest_mode << 28) | 0xfffffff; 694 break; 695 case 0x0f: 696 val = s->spurious_vec; 697 break; 698 case 0x10 ... 0x17: 699 val = s->isr[index & 7]; 700 break; 701 case 0x18 ... 0x1f: 702 val = s->tmr[index & 7]; 703 break; 704 case 0x20 ... 0x27: 705 val = s->irr[index & 7]; 706 break; 707 case 0x28: 708 val = s->esr; 709 break; 710 case 0x30: 711 case 0x31: 712 val = s->icr[index & 1]; 713 break; 714 case 0x32 ... 0x37: 715 val = s->lvt[index - 0x32]; 716 break; 717 case 0x38: 718 val = s->initial_count; 719 break; 720 case 0x39: 721 val = apic_get_current_count(s); 722 break; 723 case 0x3e: 724 val = s->divide_conf; 725 break; 726 default: 727 s->esr |= APIC_ESR_ILLEGAL_ADDRESS; 728 val = 0; 729 break; 730 } 731 trace_apic_mem_readl(addr, val); 732 return val; 733 } 734 735 static void apic_send_msi(hwaddr addr, uint32_t data) 736 { 737 uint8_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; 738 uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; 739 uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; 740 uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; 741 uint8_t delivery = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7; 742 /* XXX: Ignore redirection hint. */ 743 apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode); 744 } 745 746 static void apic_mem_writel(void *opaque, hwaddr addr, uint32_t val) 747 { 748 DeviceState *dev; 749 APICCommonState *s; 750 int index = (addr >> 4) & 0xff; 751 if (addr > 0xfff || !index) { 752 /* MSI and MMIO APIC are at the same memory location, 753 * but actually not on the global bus: MSI is on PCI bus 754 * APIC is connected directly to the CPU. 755 * Mapping them on the global bus happens to work because 756 * MSI registers are reserved in APIC MMIO and vice versa. */ 757 apic_send_msi(addr, val); 758 return; 759 } 760 761 dev = cpu_get_current_apic(); 762 if (!dev) { 763 return; 764 } 765 s = APIC_COMMON(dev); 766 767 trace_apic_mem_writel(addr, val); 768 769 switch(index) { 770 case 0x02: 771 s->id = (val >> 24); 772 break; 773 case 0x03: 774 break; 775 case 0x08: 776 if (apic_report_tpr_access) { 777 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_WRITE); 778 } 779 s->tpr = val; 780 apic_sync_vapic(s, SYNC_TO_VAPIC); 781 apic_update_irq(s); 782 break; 783 case 0x09: 784 case 0x0a: 785 break; 786 case 0x0b: /* EOI */ 787 apic_eoi(s); 788 break; 789 case 0x0d: 790 s->log_dest = val >> 24; 791 break; 792 case 0x0e: 793 s->dest_mode = val >> 28; 794 break; 795 case 0x0f: 796 s->spurious_vec = val & 0x1ff; 797 apic_update_irq(s); 798 break; 799 case 0x10 ... 0x17: 800 case 0x18 ... 0x1f: 801 case 0x20 ... 0x27: 802 case 0x28: 803 break; 804 case 0x30: 805 s->icr[0] = val; 806 apic_deliver(dev, (s->icr[1] >> 24) & 0xff, (s->icr[0] >> 11) & 1, 807 (s->icr[0] >> 8) & 7, (s->icr[0] & 0xff), 808 (s->icr[0] >> 15) & 1); 809 break; 810 case 0x31: 811 s->icr[1] = val; 812 break; 813 case 0x32 ... 0x37: 814 { 815 int n = index - 0x32; 816 s->lvt[n] = val; 817 if (n == APIC_LVT_TIMER) { 818 apic_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); 819 } else if (n == APIC_LVT_LINT0 && apic_check_pic(s)) { 820 apic_update_irq(s); 821 } 822 } 823 break; 824 case 0x38: 825 s->initial_count = val; 826 s->initial_count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 827 apic_timer_update(s, s->initial_count_load_time); 828 break; 829 case 0x39: 830 break; 831 case 0x3e: 832 { 833 int v; 834 s->divide_conf = val & 0xb; 835 v = (s->divide_conf & 3) | ((s->divide_conf >> 1) & 4); 836 s->count_shift = (v + 1) & 7; 837 } 838 break; 839 default: 840 s->esr |= APIC_ESR_ILLEGAL_ADDRESS; 841 break; 842 } 843 } 844 845 static void apic_pre_save(APICCommonState *s) 846 { 847 apic_sync_vapic(s, SYNC_FROM_VAPIC); 848 } 849 850 static void apic_post_load(APICCommonState *s) 851 { 852 if (s->timer_expiry != -1) { 853 timer_mod(s->timer, s->timer_expiry); 854 } else { 855 timer_del(s->timer); 856 } 857 } 858 859 static const MemoryRegionOps apic_io_ops = { 860 .old_mmio = { 861 .read = { apic_mem_readb, apic_mem_readw, apic_mem_readl, }, 862 .write = { apic_mem_writeb, apic_mem_writew, apic_mem_writel, }, 863 }, 864 .endianness = DEVICE_NATIVE_ENDIAN, 865 }; 866 867 static void apic_realize(DeviceState *dev, Error **errp) 868 { 869 APICCommonState *s = APIC_COMMON(dev); 870 871 memory_region_init_io(&s->io_memory, OBJECT(s), &apic_io_ops, s, "apic-msi", 872 APIC_SPACE_SIZE); 873 874 s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s); 875 local_apics[s->idx] = s; 876 877 msi_nonbroken = true; 878 } 879 880 static void apic_class_init(ObjectClass *klass, void *data) 881 { 882 APICCommonClass *k = APIC_COMMON_CLASS(klass); 883 884 k->realize = apic_realize; 885 k->set_base = apic_set_base; 886 k->set_tpr = apic_set_tpr; 887 k->get_tpr = apic_get_tpr; 888 k->vapic_base_update = apic_vapic_base_update; 889 k->external_nmi = apic_external_nmi; 890 k->pre_save = apic_pre_save; 891 k->post_load = apic_post_load; 892 } 893 894 static const TypeInfo apic_info = { 895 .name = "apic", 896 .instance_size = sizeof(APICCommonState), 897 .parent = TYPE_APIC_COMMON, 898 .class_init = apic_class_init, 899 }; 900 901 static void apic_register_types(void) 902 { 903 type_register_static(&apic_info); 904 } 905 906 type_init(apic_register_types) 907