1 /* 2 * APIC support 3 * 4 * Copyright (c) 2004-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/> 18 */ 19 #include "qemu/thread.h" 20 #include "hw/i386/apic_internal.h" 21 #include "hw/i386/apic.h" 22 #include "hw/i386/ioapic.h" 23 #include "hw/pci/msi.h" 24 #include "qemu/host-utils.h" 25 #include "trace.h" 26 #include "hw/i386/pc.h" 27 #include "hw/i386/apic-msidef.h" 28 29 #define MAX_APIC_WORDS 8 30 31 #define SYNC_FROM_VAPIC 0x1 32 #define SYNC_TO_VAPIC 0x2 33 #define SYNC_ISR_IRR_TO_VAPIC 0x4 34 35 static APICCommonState *local_apics[MAX_APICS + 1]; 36 37 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode); 38 static void apic_update_irq(APICCommonState *s); 39 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, 40 uint8_t dest, uint8_t dest_mode); 41 42 /* Find first bit starting from msb */ 43 static int apic_fls_bit(uint32_t value) 44 { 45 return 31 - clz32(value); 46 } 47 48 /* Find first bit starting from lsb */ 49 static int apic_ffs_bit(uint32_t value) 50 { 51 return ctz32(value); 52 } 53 54 static inline void apic_reset_bit(uint32_t *tab, int index) 55 { 56 int i, mask; 57 i = index >> 5; 58 mask = 1 << (index & 0x1f); 59 tab[i] &= ~mask; 60 } 61 62 /* return -1 if no bit is set */ 63 static int get_highest_priority_int(uint32_t *tab) 64 { 65 int i; 66 for (i = 7; i >= 0; i--) { 67 if (tab[i] != 0) { 68 return i * 32 + apic_fls_bit(tab[i]); 69 } 70 } 71 return -1; 72 } 73 74 static void apic_sync_vapic(APICCommonState *s, int sync_type) 75 { 76 VAPICState vapic_state; 77 size_t length; 78 off_t start; 79 int vector; 80 81 if (!s->vapic_paddr) { 82 return; 83 } 84 if (sync_type & SYNC_FROM_VAPIC) { 85 cpu_physical_memory_read(s->vapic_paddr, &vapic_state, 86 sizeof(vapic_state)); 87 s->tpr = vapic_state.tpr; 88 } 89 if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) { 90 start = offsetof(VAPICState, isr); 91 length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr); 92 93 if (sync_type & SYNC_TO_VAPIC) { 94 assert(qemu_cpu_is_self(CPU(s->cpu))); 95 96 vapic_state.tpr = s->tpr; 97 vapic_state.enabled = 1; 98 start = 0; 99 length = sizeof(VAPICState); 100 } 101 102 vector = get_highest_priority_int(s->isr); 103 if (vector < 0) { 104 vector = 0; 105 } 106 vapic_state.isr = vector & 0xf0; 107 108 vapic_state.zero = 0; 109 110 vector = get_highest_priority_int(s->irr); 111 if (vector < 0) { 112 vector = 0; 113 } 114 vapic_state.irr = vector & 0xff; 115 116 cpu_physical_memory_write_rom(&address_space_memory, 117 s->vapic_paddr + start, 118 ((void *)&vapic_state) + start, length); 119 } 120 } 121 122 static void apic_vapic_base_update(APICCommonState *s) 123 { 124 apic_sync_vapic(s, SYNC_TO_VAPIC); 125 } 126 127 static void apic_local_deliver(APICCommonState *s, int vector) 128 { 129 uint32_t lvt = s->lvt[vector]; 130 int trigger_mode; 131 132 trace_apic_local_deliver(vector, (lvt >> 8) & 7); 133 134 if (lvt & APIC_LVT_MASKED) 135 return; 136 137 switch ((lvt >> 8) & 7) { 138 case APIC_DM_SMI: 139 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SMI); 140 break; 141 142 case APIC_DM_NMI: 143 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_NMI); 144 break; 145 146 case APIC_DM_EXTINT: 147 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HARD); 148 break; 149 150 case APIC_DM_FIXED: 151 trigger_mode = APIC_TRIGGER_EDGE; 152 if ((vector == APIC_LVT_LINT0 || vector == APIC_LVT_LINT1) && 153 (lvt & APIC_LVT_LEVEL_TRIGGER)) 154 trigger_mode = APIC_TRIGGER_LEVEL; 155 apic_set_irq(s, lvt & 0xff, trigger_mode); 156 } 157 } 158 159 void apic_deliver_pic_intr(DeviceState *dev, int level) 160 { 161 APICCommonState *s = APIC_COMMON(dev); 162 163 if (level) { 164 apic_local_deliver(s, APIC_LVT_LINT0); 165 } else { 166 uint32_t lvt = s->lvt[APIC_LVT_LINT0]; 167 168 switch ((lvt >> 8) & 7) { 169 case APIC_DM_FIXED: 170 if (!(lvt & APIC_LVT_LEVEL_TRIGGER)) 171 break; 172 apic_reset_bit(s->irr, lvt & 0xff); 173 /* fall through */ 174 case APIC_DM_EXTINT: 175 apic_update_irq(s); 176 break; 177 } 178 } 179 } 180 181 static void apic_external_nmi(APICCommonState *s) 182 { 183 apic_local_deliver(s, APIC_LVT_LINT1); 184 } 185 186 #define foreach_apic(apic, deliver_bitmask, code) \ 187 {\ 188 int __i, __j;\ 189 for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\ 190 uint32_t __mask = deliver_bitmask[__i];\ 191 if (__mask) {\ 192 for(__j = 0; __j < 32; __j++) {\ 193 if (__mask & (1U << __j)) {\ 194 apic = local_apics[__i * 32 + __j];\ 195 if (apic) {\ 196 code;\ 197 }\ 198 }\ 199 }\ 200 }\ 201 }\ 202 } 203 204 static void apic_bus_deliver(const uint32_t *deliver_bitmask, 205 uint8_t delivery_mode, uint8_t vector_num, 206 uint8_t trigger_mode) 207 { 208 APICCommonState *apic_iter; 209 210 switch (delivery_mode) { 211 case APIC_DM_LOWPRI: 212 /* XXX: search for focus processor, arbitration */ 213 { 214 int i, d; 215 d = -1; 216 for(i = 0; i < MAX_APIC_WORDS; i++) { 217 if (deliver_bitmask[i]) { 218 d = i * 32 + apic_ffs_bit(deliver_bitmask[i]); 219 break; 220 } 221 } 222 if (d >= 0) { 223 apic_iter = local_apics[d]; 224 if (apic_iter) { 225 apic_set_irq(apic_iter, vector_num, trigger_mode); 226 } 227 } 228 } 229 return; 230 231 case APIC_DM_FIXED: 232 break; 233 234 case APIC_DM_SMI: 235 foreach_apic(apic_iter, deliver_bitmask, 236 cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_SMI) 237 ); 238 return; 239 240 case APIC_DM_NMI: 241 foreach_apic(apic_iter, deliver_bitmask, 242 cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_NMI) 243 ); 244 return; 245 246 case APIC_DM_INIT: 247 /* normal INIT IPI sent to processors */ 248 foreach_apic(apic_iter, deliver_bitmask, 249 cpu_interrupt(CPU(apic_iter->cpu), 250 CPU_INTERRUPT_INIT) 251 ); 252 return; 253 254 case APIC_DM_EXTINT: 255 /* handled in I/O APIC code */ 256 break; 257 258 default: 259 return; 260 } 261 262 foreach_apic(apic_iter, deliver_bitmask, 263 apic_set_irq(apic_iter, vector_num, trigger_mode) ); 264 } 265 266 void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, 267 uint8_t vector_num, uint8_t trigger_mode) 268 { 269 uint32_t deliver_bitmask[MAX_APIC_WORDS]; 270 271 trace_apic_deliver_irq(dest, dest_mode, delivery_mode, vector_num, 272 trigger_mode); 273 274 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); 275 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); 276 } 277 278 static void apic_set_base(APICCommonState *s, uint64_t val) 279 { 280 s->apicbase = (val & 0xfffff000) | 281 (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE)); 282 /* if disabled, cannot be enabled again */ 283 if (!(val & MSR_IA32_APICBASE_ENABLE)) { 284 s->apicbase &= ~MSR_IA32_APICBASE_ENABLE; 285 cpu_clear_apic_feature(&s->cpu->env); 286 s->spurious_vec &= ~APIC_SV_ENABLE; 287 } 288 } 289 290 static void apic_set_tpr(APICCommonState *s, uint8_t val) 291 { 292 /* Updates from cr8 are ignored while the VAPIC is active */ 293 if (!s->vapic_paddr) { 294 s->tpr = val << 4; 295 apic_update_irq(s); 296 } 297 } 298 299 static uint8_t apic_get_tpr(APICCommonState *s) 300 { 301 apic_sync_vapic(s, SYNC_FROM_VAPIC); 302 return s->tpr >> 4; 303 } 304 305 int apic_get_ppr(APICCommonState *s) 306 { 307 int tpr, isrv, ppr; 308 309 tpr = (s->tpr >> 4); 310 isrv = get_highest_priority_int(s->isr); 311 if (isrv < 0) 312 isrv = 0; 313 isrv >>= 4; 314 if (tpr >= isrv) 315 ppr = s->tpr; 316 else 317 ppr = isrv << 4; 318 return ppr; 319 } 320 321 static int apic_get_arb_pri(APICCommonState *s) 322 { 323 /* XXX: arbitration */ 324 return 0; 325 } 326 327 328 /* 329 * <0 - low prio interrupt, 330 * 0 - no interrupt, 331 * >0 - interrupt number 332 */ 333 static int apic_irq_pending(APICCommonState *s) 334 { 335 int irrv, ppr; 336 337 if (!(s->spurious_vec & APIC_SV_ENABLE)) { 338 return 0; 339 } 340 341 irrv = get_highest_priority_int(s->irr); 342 if (irrv < 0) { 343 return 0; 344 } 345 ppr = apic_get_ppr(s); 346 if (ppr && (irrv & 0xf0) <= (ppr & 0xf0)) { 347 return -1; 348 } 349 350 return irrv; 351 } 352 353 /* signal the CPU if an irq is pending */ 354 static void apic_update_irq(APICCommonState *s) 355 { 356 CPUState *cpu; 357 DeviceState *dev = (DeviceState *)s; 358 359 cpu = CPU(s->cpu); 360 if (!qemu_cpu_is_self(cpu)) { 361 cpu_interrupt(cpu, CPU_INTERRUPT_POLL); 362 } else if (apic_irq_pending(s) > 0) { 363 cpu_interrupt(cpu, CPU_INTERRUPT_HARD); 364 } else if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) { 365 cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD); 366 } 367 } 368 369 void apic_poll_irq(DeviceState *dev) 370 { 371 APICCommonState *s = APIC_COMMON(dev); 372 373 apic_sync_vapic(s, SYNC_FROM_VAPIC); 374 apic_update_irq(s); 375 } 376 377 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode) 378 { 379 apic_report_irq_delivered(!apic_get_bit(s->irr, vector_num)); 380 381 apic_set_bit(s->irr, vector_num); 382 if (trigger_mode) 383 apic_set_bit(s->tmr, vector_num); 384 else 385 apic_reset_bit(s->tmr, vector_num); 386 if (s->vapic_paddr) { 387 apic_sync_vapic(s, SYNC_ISR_IRR_TO_VAPIC); 388 /* 389 * The vcpu thread needs to see the new IRR before we pull its current 390 * TPR value. That way, if we miss a lowering of the TRP, the guest 391 * has the chance to notice the new IRR and poll for IRQs on its own. 392 */ 393 smp_wmb(); 394 apic_sync_vapic(s, SYNC_FROM_VAPIC); 395 } 396 apic_update_irq(s); 397 } 398 399 static void apic_eoi(APICCommonState *s) 400 { 401 int isrv; 402 isrv = get_highest_priority_int(s->isr); 403 if (isrv < 0) 404 return; 405 apic_reset_bit(s->isr, isrv); 406 if (!(s->spurious_vec & APIC_SV_DIRECTED_IO) && apic_get_bit(s->tmr, isrv)) { 407 ioapic_eoi_broadcast(isrv); 408 } 409 apic_sync_vapic(s, SYNC_FROM_VAPIC | SYNC_TO_VAPIC); 410 apic_update_irq(s); 411 } 412 413 static int apic_find_dest(uint8_t dest) 414 { 415 APICCommonState *apic = local_apics[dest]; 416 int i; 417 418 if (apic && apic->id == dest) 419 return dest; /* shortcut in case apic->id == apic->idx */ 420 421 for (i = 0; i < MAX_APICS; i++) { 422 apic = local_apics[i]; 423 if (apic && apic->id == dest) 424 return i; 425 if (!apic) 426 break; 427 } 428 429 return -1; 430 } 431 432 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, 433 uint8_t dest, uint8_t dest_mode) 434 { 435 APICCommonState *apic_iter; 436 int i; 437 438 if (dest_mode == 0) { 439 if (dest == 0xff) { 440 memset(deliver_bitmask, 0xff, MAX_APIC_WORDS * sizeof(uint32_t)); 441 } else { 442 int idx = apic_find_dest(dest); 443 memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t)); 444 if (idx >= 0) 445 apic_set_bit(deliver_bitmask, idx); 446 } 447 } else { 448 /* XXX: cluster mode */ 449 memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t)); 450 for(i = 0; i < MAX_APICS; i++) { 451 apic_iter = local_apics[i]; 452 if (apic_iter) { 453 if (apic_iter->dest_mode == 0xf) { 454 if (dest & apic_iter->log_dest) 455 apic_set_bit(deliver_bitmask, i); 456 } else if (apic_iter->dest_mode == 0x0) { 457 if ((dest & 0xf0) == (apic_iter->log_dest & 0xf0) && 458 (dest & apic_iter->log_dest & 0x0f)) { 459 apic_set_bit(deliver_bitmask, i); 460 } 461 } 462 } else { 463 break; 464 } 465 } 466 } 467 } 468 469 static void apic_startup(APICCommonState *s, int vector_num) 470 { 471 s->sipi_vector = vector_num; 472 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI); 473 } 474 475 void apic_sipi(DeviceState *dev) 476 { 477 APICCommonState *s = APIC_COMMON(dev); 478 479 cpu_reset_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI); 480 481 if (!s->wait_for_sipi) 482 return; 483 cpu_x86_load_seg_cache_sipi(s->cpu, s->sipi_vector); 484 s->wait_for_sipi = 0; 485 } 486 487 static void apic_deliver(DeviceState *dev, uint8_t dest, uint8_t dest_mode, 488 uint8_t delivery_mode, uint8_t vector_num, 489 uint8_t trigger_mode) 490 { 491 APICCommonState *s = APIC_COMMON(dev); 492 uint32_t deliver_bitmask[MAX_APIC_WORDS]; 493 int dest_shorthand = (s->icr[0] >> 18) & 3; 494 APICCommonState *apic_iter; 495 496 switch (dest_shorthand) { 497 case 0: 498 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); 499 break; 500 case 1: 501 memset(deliver_bitmask, 0x00, sizeof(deliver_bitmask)); 502 apic_set_bit(deliver_bitmask, s->idx); 503 break; 504 case 2: 505 memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask)); 506 break; 507 case 3: 508 memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask)); 509 apic_reset_bit(deliver_bitmask, s->idx); 510 break; 511 } 512 513 switch (delivery_mode) { 514 case APIC_DM_INIT: 515 { 516 int trig_mode = (s->icr[0] >> 15) & 1; 517 int level = (s->icr[0] >> 14) & 1; 518 if (level == 0 && trig_mode == 1) { 519 foreach_apic(apic_iter, deliver_bitmask, 520 apic_iter->arb_id = apic_iter->id ); 521 return; 522 } 523 } 524 break; 525 526 case APIC_DM_SIPI: 527 foreach_apic(apic_iter, deliver_bitmask, 528 apic_startup(apic_iter, vector_num) ); 529 return; 530 } 531 532 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); 533 } 534 535 static bool apic_check_pic(APICCommonState *s) 536 { 537 DeviceState *dev = (DeviceState *)s; 538 539 if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) { 540 return false; 541 } 542 apic_deliver_pic_intr(dev, 1); 543 return true; 544 } 545 546 int apic_get_interrupt(DeviceState *dev) 547 { 548 APICCommonState *s = APIC_COMMON(dev); 549 int intno; 550 551 /* if the APIC is installed or enabled, we let the 8259 handle the 552 IRQs */ 553 if (!s) 554 return -1; 555 if (!(s->spurious_vec & APIC_SV_ENABLE)) 556 return -1; 557 558 apic_sync_vapic(s, SYNC_FROM_VAPIC); 559 intno = apic_irq_pending(s); 560 561 /* if there is an interrupt from the 8259, let the caller handle 562 * that first since ExtINT interrupts ignore the priority. 563 */ 564 if (intno == 0 || apic_check_pic(s)) { 565 apic_sync_vapic(s, SYNC_TO_VAPIC); 566 return -1; 567 } else if (intno < 0) { 568 apic_sync_vapic(s, SYNC_TO_VAPIC); 569 return s->spurious_vec & 0xff; 570 } 571 apic_reset_bit(s->irr, intno); 572 apic_set_bit(s->isr, intno); 573 apic_sync_vapic(s, SYNC_TO_VAPIC); 574 575 apic_update_irq(s); 576 577 return intno; 578 } 579 580 int apic_accept_pic_intr(DeviceState *dev) 581 { 582 APICCommonState *s = APIC_COMMON(dev); 583 uint32_t lvt0; 584 585 if (!s) 586 return -1; 587 588 lvt0 = s->lvt[APIC_LVT_LINT0]; 589 590 if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) == 0 || 591 (lvt0 & APIC_LVT_MASKED) == 0) 592 return 1; 593 594 return 0; 595 } 596 597 static uint32_t apic_get_current_count(APICCommonState *s) 598 { 599 int64_t d; 600 uint32_t val; 601 d = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->initial_count_load_time) >> 602 s->count_shift; 603 if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) { 604 /* periodic */ 605 val = s->initial_count - (d % ((uint64_t)s->initial_count + 1)); 606 } else { 607 if (d >= s->initial_count) 608 val = 0; 609 else 610 val = s->initial_count - d; 611 } 612 return val; 613 } 614 615 static void apic_timer_update(APICCommonState *s, int64_t current_time) 616 { 617 if (apic_next_timer(s, current_time)) { 618 timer_mod(s->timer, s->next_time); 619 } else { 620 timer_del(s->timer); 621 } 622 } 623 624 static void apic_timer(void *opaque) 625 { 626 APICCommonState *s = opaque; 627 628 apic_local_deliver(s, APIC_LVT_TIMER); 629 apic_timer_update(s, s->next_time); 630 } 631 632 static uint32_t apic_mem_readb(void *opaque, hwaddr addr) 633 { 634 return 0; 635 } 636 637 static uint32_t apic_mem_readw(void *opaque, hwaddr addr) 638 { 639 return 0; 640 } 641 642 static void apic_mem_writeb(void *opaque, hwaddr addr, uint32_t val) 643 { 644 } 645 646 static void apic_mem_writew(void *opaque, hwaddr addr, uint32_t val) 647 { 648 } 649 650 static uint32_t apic_mem_readl(void *opaque, hwaddr addr) 651 { 652 DeviceState *dev; 653 APICCommonState *s; 654 uint32_t val; 655 int index; 656 657 dev = cpu_get_current_apic(); 658 if (!dev) { 659 return 0; 660 } 661 s = APIC_COMMON(dev); 662 663 index = (addr >> 4) & 0xff; 664 switch(index) { 665 case 0x02: /* id */ 666 val = s->id << 24; 667 break; 668 case 0x03: /* version */ 669 val = s->version | ((APIC_LVT_NB - 1) << 16); 670 break; 671 case 0x08: 672 apic_sync_vapic(s, SYNC_FROM_VAPIC); 673 if (apic_report_tpr_access) { 674 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_READ); 675 } 676 val = s->tpr; 677 break; 678 case 0x09: 679 val = apic_get_arb_pri(s); 680 break; 681 case 0x0a: 682 /* ppr */ 683 val = apic_get_ppr(s); 684 break; 685 case 0x0b: 686 val = 0; 687 break; 688 case 0x0d: 689 val = s->log_dest << 24; 690 break; 691 case 0x0e: 692 val = (s->dest_mode << 28) | 0xfffffff; 693 break; 694 case 0x0f: 695 val = s->spurious_vec; 696 break; 697 case 0x10 ... 0x17: 698 val = s->isr[index & 7]; 699 break; 700 case 0x18 ... 0x1f: 701 val = s->tmr[index & 7]; 702 break; 703 case 0x20 ... 0x27: 704 val = s->irr[index & 7]; 705 break; 706 case 0x28: 707 val = s->esr; 708 break; 709 case 0x30: 710 case 0x31: 711 val = s->icr[index & 1]; 712 break; 713 case 0x32 ... 0x37: 714 val = s->lvt[index - 0x32]; 715 break; 716 case 0x38: 717 val = s->initial_count; 718 break; 719 case 0x39: 720 val = apic_get_current_count(s); 721 break; 722 case 0x3e: 723 val = s->divide_conf; 724 break; 725 default: 726 s->esr |= APIC_ESR_ILLEGAL_ADDRESS; 727 val = 0; 728 break; 729 } 730 trace_apic_mem_readl(addr, val); 731 return val; 732 } 733 734 static void apic_send_msi(hwaddr addr, uint32_t data) 735 { 736 uint8_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; 737 uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; 738 uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; 739 uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; 740 uint8_t delivery = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7; 741 /* XXX: Ignore redirection hint. */ 742 apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode); 743 } 744 745 static void apic_mem_writel(void *opaque, hwaddr addr, uint32_t val) 746 { 747 DeviceState *dev; 748 APICCommonState *s; 749 int index = (addr >> 4) & 0xff; 750 if (addr > 0xfff || !index) { 751 /* MSI and MMIO APIC are at the same memory location, 752 * but actually not on the global bus: MSI is on PCI bus 753 * APIC is connected directly to the CPU. 754 * Mapping them on the global bus happens to work because 755 * MSI registers are reserved in APIC MMIO and vice versa. */ 756 apic_send_msi(addr, val); 757 return; 758 } 759 760 dev = cpu_get_current_apic(); 761 if (!dev) { 762 return; 763 } 764 s = APIC_COMMON(dev); 765 766 trace_apic_mem_writel(addr, val); 767 768 switch(index) { 769 case 0x02: 770 s->id = (val >> 24); 771 break; 772 case 0x03: 773 break; 774 case 0x08: 775 if (apic_report_tpr_access) { 776 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_WRITE); 777 } 778 s->tpr = val; 779 apic_sync_vapic(s, SYNC_TO_VAPIC); 780 apic_update_irq(s); 781 break; 782 case 0x09: 783 case 0x0a: 784 break; 785 case 0x0b: /* EOI */ 786 apic_eoi(s); 787 break; 788 case 0x0d: 789 s->log_dest = val >> 24; 790 break; 791 case 0x0e: 792 s->dest_mode = val >> 28; 793 break; 794 case 0x0f: 795 s->spurious_vec = val & 0x1ff; 796 apic_update_irq(s); 797 break; 798 case 0x10 ... 0x17: 799 case 0x18 ... 0x1f: 800 case 0x20 ... 0x27: 801 case 0x28: 802 break; 803 case 0x30: 804 s->icr[0] = val; 805 apic_deliver(dev, (s->icr[1] >> 24) & 0xff, (s->icr[0] >> 11) & 1, 806 (s->icr[0] >> 8) & 7, (s->icr[0] & 0xff), 807 (s->icr[0] >> 15) & 1); 808 break; 809 case 0x31: 810 s->icr[1] = val; 811 break; 812 case 0x32 ... 0x37: 813 { 814 int n = index - 0x32; 815 s->lvt[n] = val; 816 if (n == APIC_LVT_TIMER) { 817 apic_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); 818 } else if (n == APIC_LVT_LINT0 && apic_check_pic(s)) { 819 apic_update_irq(s); 820 } 821 } 822 break; 823 case 0x38: 824 s->initial_count = val; 825 s->initial_count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 826 apic_timer_update(s, s->initial_count_load_time); 827 break; 828 case 0x39: 829 break; 830 case 0x3e: 831 { 832 int v; 833 s->divide_conf = val & 0xb; 834 v = (s->divide_conf & 3) | ((s->divide_conf >> 1) & 4); 835 s->count_shift = (v + 1) & 7; 836 } 837 break; 838 default: 839 s->esr |= APIC_ESR_ILLEGAL_ADDRESS; 840 break; 841 } 842 } 843 844 static void apic_pre_save(APICCommonState *s) 845 { 846 apic_sync_vapic(s, SYNC_FROM_VAPIC); 847 } 848 849 static void apic_post_load(APICCommonState *s) 850 { 851 if (s->timer_expiry != -1) { 852 timer_mod(s->timer, s->timer_expiry); 853 } else { 854 timer_del(s->timer); 855 } 856 } 857 858 static const MemoryRegionOps apic_io_ops = { 859 .old_mmio = { 860 .read = { apic_mem_readb, apic_mem_readw, apic_mem_readl, }, 861 .write = { apic_mem_writeb, apic_mem_writew, apic_mem_writel, }, 862 }, 863 .endianness = DEVICE_NATIVE_ENDIAN, 864 }; 865 866 static void apic_realize(DeviceState *dev, Error **errp) 867 { 868 APICCommonState *s = APIC_COMMON(dev); 869 870 memory_region_init_io(&s->io_memory, OBJECT(s), &apic_io_ops, s, "apic-msi", 871 APIC_SPACE_SIZE); 872 873 s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s); 874 local_apics[s->idx] = s; 875 876 msi_supported = true; 877 } 878 879 static void apic_class_init(ObjectClass *klass, void *data) 880 { 881 APICCommonClass *k = APIC_COMMON_CLASS(klass); 882 883 k->realize = apic_realize; 884 k->set_base = apic_set_base; 885 k->set_tpr = apic_set_tpr; 886 k->get_tpr = apic_get_tpr; 887 k->vapic_base_update = apic_vapic_base_update; 888 k->external_nmi = apic_external_nmi; 889 k->pre_save = apic_pre_save; 890 k->post_load = apic_post_load; 891 } 892 893 static const TypeInfo apic_info = { 894 .name = "apic", 895 .instance_size = sizeof(APICCommonState), 896 .parent = TYPE_APIC_COMMON, 897 .class_init = apic_class_init, 898 }; 899 900 static void apic_register_types(void) 901 { 902 type_register_static(&apic_info); 903 } 904 905 type_init(apic_register_types) 906