1 /* 2 * APIC support 3 * 4 * Copyright (c) 2004-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/> 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/thread.h" 21 #include "qemu/error-report.h" 22 #include "hw/i386/apic_internal.h" 23 #include "hw/i386/apic.h" 24 #include "hw/intc/ioapic.h" 25 #include "hw/intc/i8259.h" 26 #include "hw/intc/kvm_irqcount.h" 27 #include "hw/pci/msi.h" 28 #include "qemu/host-utils.h" 29 #include "sysemu/kvm.h" 30 #include "trace.h" 31 #include "hw/i386/apic-msidef.h" 32 #include "qapi/error.h" 33 #include "qom/object.h" 34 35 #define SYNC_FROM_VAPIC 0x1 36 #define SYNC_TO_VAPIC 0x2 37 #define SYNC_ISR_IRR_TO_VAPIC 0x4 38 39 static APICCommonState **local_apics; 40 static uint32_t max_apics; 41 static uint32_t max_apic_words; 42 43 #define TYPE_APIC "apic" 44 /*This is reusing the APICCommonState typedef from APIC_COMMON */ 45 DECLARE_INSTANCE_CHECKER(APICCommonState, APIC, 46 TYPE_APIC) 47 48 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode); 49 static void apic_update_irq(APICCommonState *s); 50 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, 51 uint32_t dest, uint8_t dest_mode); 52 53 void apic_set_max_apic_id(uint32_t max_apic_id) 54 { 55 int word_size = 32; 56 57 /* round up the max apic id to next multiple of words */ 58 max_apics = (max_apic_id + word_size - 1) & ~(word_size - 1); 59 60 local_apics = g_malloc0(sizeof(*local_apics) * max_apics); 61 max_apic_words = max_apics >> 5; 62 } 63 64 65 /* Find first bit starting from msb */ 66 static int apic_fls_bit(uint32_t value) 67 { 68 return 31 - clz32(value); 69 } 70 71 /* Find first bit starting from lsb */ 72 static int apic_ffs_bit(uint32_t value) 73 { 74 return ctz32(value); 75 } 76 77 static inline void apic_reset_bit(uint32_t *tab, int index) 78 { 79 int i, mask; 80 i = index >> 5; 81 mask = 1 << (index & 0x1f); 82 tab[i] &= ~mask; 83 } 84 85 /* return -1 if no bit is set */ 86 static int get_highest_priority_int(uint32_t *tab) 87 { 88 int i; 89 for (i = 7; i >= 0; i--) { 90 if (tab[i] != 0) { 91 return i * 32 + apic_fls_bit(tab[i]); 92 } 93 } 94 return -1; 95 } 96 97 static void apic_sync_vapic(APICCommonState *s, int sync_type) 98 { 99 VAPICState vapic_state; 100 size_t length; 101 off_t start; 102 int vector; 103 104 if (!s->vapic_paddr) { 105 return; 106 } 107 if (sync_type & SYNC_FROM_VAPIC) { 108 cpu_physical_memory_read(s->vapic_paddr, &vapic_state, 109 sizeof(vapic_state)); 110 s->tpr = vapic_state.tpr; 111 } 112 if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) { 113 start = offsetof(VAPICState, isr); 114 length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr); 115 116 if (sync_type & SYNC_TO_VAPIC) { 117 assert(qemu_cpu_is_self(CPU(s->cpu))); 118 119 vapic_state.tpr = s->tpr; 120 vapic_state.enabled = 1; 121 start = 0; 122 length = sizeof(VAPICState); 123 } 124 125 vector = get_highest_priority_int(s->isr); 126 if (vector < 0) { 127 vector = 0; 128 } 129 vapic_state.isr = vector & 0xf0; 130 131 vapic_state.zero = 0; 132 133 vector = get_highest_priority_int(s->irr); 134 if (vector < 0) { 135 vector = 0; 136 } 137 vapic_state.irr = vector & 0xff; 138 139 address_space_write_rom(&address_space_memory, 140 s->vapic_paddr + start, 141 MEMTXATTRS_UNSPECIFIED, 142 ((void *)&vapic_state) + start, length); 143 } 144 } 145 146 static void apic_vapic_base_update(APICCommonState *s) 147 { 148 apic_sync_vapic(s, SYNC_TO_VAPIC); 149 } 150 151 static void apic_local_deliver(APICCommonState *s, int vector) 152 { 153 uint32_t lvt = s->lvt[vector]; 154 int trigger_mode; 155 156 trace_apic_local_deliver(vector, (lvt >> 8) & 7); 157 158 if (lvt & APIC_LVT_MASKED) 159 return; 160 161 switch ((lvt >> 8) & 7) { 162 case APIC_DM_SMI: 163 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SMI); 164 break; 165 166 case APIC_DM_NMI: 167 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_NMI); 168 break; 169 170 case APIC_DM_EXTINT: 171 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HARD); 172 break; 173 174 case APIC_DM_FIXED: 175 trigger_mode = APIC_TRIGGER_EDGE; 176 if ((vector == APIC_LVT_LINT0 || vector == APIC_LVT_LINT1) && 177 (lvt & APIC_LVT_LEVEL_TRIGGER)) 178 trigger_mode = APIC_TRIGGER_LEVEL; 179 apic_set_irq(s, lvt & 0xff, trigger_mode); 180 } 181 } 182 183 void apic_deliver_pic_intr(DeviceState *dev, int level) 184 { 185 APICCommonState *s = APIC(dev); 186 187 if (level) { 188 apic_local_deliver(s, APIC_LVT_LINT0); 189 } else { 190 uint32_t lvt = s->lvt[APIC_LVT_LINT0]; 191 192 switch ((lvt >> 8) & 7) { 193 case APIC_DM_FIXED: 194 if (!(lvt & APIC_LVT_LEVEL_TRIGGER)) 195 break; 196 apic_reset_bit(s->irr, lvt & 0xff); 197 /* fall through */ 198 case APIC_DM_EXTINT: 199 apic_update_irq(s); 200 break; 201 } 202 } 203 } 204 205 static void apic_external_nmi(APICCommonState *s) 206 { 207 apic_local_deliver(s, APIC_LVT_LINT1); 208 } 209 210 #define foreach_apic(apic, deliver_bitmask, code) \ 211 {\ 212 int __i, __j;\ 213 for (__i = 0; __i < max_apic_words; __i++) {\ 214 uint32_t __mask = deliver_bitmask[__i];\ 215 if (__mask) {\ 216 for (__j = 0; __j < 32; __j++) {\ 217 if (__mask & (1U << __j)) {\ 218 apic = local_apics[__i * 32 + __j];\ 219 if (apic) {\ 220 code;\ 221 }\ 222 }\ 223 }\ 224 }\ 225 }\ 226 } 227 228 static void apic_bus_deliver(const uint32_t *deliver_bitmask, 229 uint8_t delivery_mode, uint8_t vector_num, 230 uint8_t trigger_mode) 231 { 232 APICCommonState *apic_iter; 233 234 switch (delivery_mode) { 235 case APIC_DM_LOWPRI: 236 /* XXX: search for focus processor, arbitration */ 237 { 238 int i, d; 239 d = -1; 240 for (i = 0; i < max_apic_words; i++) { 241 if (deliver_bitmask[i]) { 242 d = i * 32 + apic_ffs_bit(deliver_bitmask[i]); 243 break; 244 } 245 } 246 if (d >= 0) { 247 apic_iter = local_apics[d]; 248 if (apic_iter) { 249 apic_set_irq(apic_iter, vector_num, trigger_mode); 250 } 251 } 252 } 253 return; 254 255 case APIC_DM_FIXED: 256 break; 257 258 case APIC_DM_SMI: 259 foreach_apic(apic_iter, deliver_bitmask, 260 cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_SMI) 261 ); 262 return; 263 264 case APIC_DM_NMI: 265 foreach_apic(apic_iter, deliver_bitmask, 266 cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_NMI) 267 ); 268 return; 269 270 case APIC_DM_INIT: 271 /* normal INIT IPI sent to processors */ 272 foreach_apic(apic_iter, deliver_bitmask, 273 cpu_interrupt(CPU(apic_iter->cpu), 274 CPU_INTERRUPT_INIT) 275 ); 276 return; 277 278 case APIC_DM_EXTINT: 279 /* handled in I/O APIC code */ 280 break; 281 282 default: 283 return; 284 } 285 286 foreach_apic(apic_iter, deliver_bitmask, 287 apic_set_irq(apic_iter, vector_num, trigger_mode) ); 288 } 289 290 static void apic_deliver_irq(uint32_t dest, uint8_t dest_mode, 291 uint8_t delivery_mode, uint8_t vector_num, 292 uint8_t trigger_mode) 293 { 294 uint32_t *deliver_bitmask = g_malloc(max_apic_words * sizeof(uint32_t)); 295 296 trace_apic_deliver_irq(dest, dest_mode, delivery_mode, vector_num, 297 trigger_mode); 298 299 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); 300 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); 301 g_free(deliver_bitmask); 302 } 303 304 bool is_x2apic_mode(DeviceState *dev) 305 { 306 APICCommonState *s = APIC(dev); 307 308 return s->apicbase & MSR_IA32_APICBASE_EXTD; 309 } 310 311 static int apic_set_base_check(APICCommonState *s, uint64_t val) 312 { 313 /* Enable x2apic when x2apic is not supported by CPU */ 314 if (!cpu_has_x2apic_feature(&s->cpu->env) && 315 val & MSR_IA32_APICBASE_EXTD) { 316 return -1; 317 } 318 319 /* 320 * Transition into invalid state 321 * (s->apicbase & MSR_IA32_APICBASE_ENABLE == 0) && 322 * (s->apicbase & MSR_IA32_APICBASE_EXTD) == 1 323 */ 324 if (!(val & MSR_IA32_APICBASE_ENABLE) && 325 (val & MSR_IA32_APICBASE_EXTD)) { 326 return -1; 327 } 328 329 /* Invalid transition from disabled mode to x2APIC */ 330 if (!(s->apicbase & MSR_IA32_APICBASE_ENABLE) && 331 !(s->apicbase & MSR_IA32_APICBASE_EXTD) && 332 (val & MSR_IA32_APICBASE_ENABLE) && 333 (val & MSR_IA32_APICBASE_EXTD)) { 334 return -1; 335 } 336 337 /* Invalid transition from x2APIC to xAPIC */ 338 if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) && 339 (s->apicbase & MSR_IA32_APICBASE_EXTD) && 340 (val & MSR_IA32_APICBASE_ENABLE) && 341 !(val & MSR_IA32_APICBASE_EXTD)) { 342 return -1; 343 } 344 345 return 0; 346 } 347 348 static int apic_set_base(APICCommonState *s, uint64_t val) 349 { 350 if (apic_set_base_check(s, val) < 0) { 351 return -1; 352 } 353 354 s->apicbase = (val & 0xfffff000) | 355 (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE)); 356 /* if disabled, cannot be enabled again */ 357 if (!(val & MSR_IA32_APICBASE_ENABLE)) { 358 s->apicbase &= ~MSR_IA32_APICBASE_ENABLE; 359 cpu_clear_apic_feature(&s->cpu->env); 360 s->spurious_vec &= ~APIC_SV_ENABLE; 361 } 362 363 /* Transition from disabled mode to xAPIC */ 364 if (!(s->apicbase & MSR_IA32_APICBASE_ENABLE) && 365 (val & MSR_IA32_APICBASE_ENABLE)) { 366 s->apicbase |= MSR_IA32_APICBASE_ENABLE; 367 cpu_set_apic_feature(&s->cpu->env); 368 } 369 370 /* Transition from xAPIC to x2APIC */ 371 if (cpu_has_x2apic_feature(&s->cpu->env) && 372 !(s->apicbase & MSR_IA32_APICBASE_EXTD) && 373 (val & MSR_IA32_APICBASE_EXTD)) { 374 s->apicbase |= MSR_IA32_APICBASE_EXTD; 375 376 s->log_dest = ((s->initial_apic_id & 0xffff0) << 16) | 377 (1 << (s->initial_apic_id & 0xf)); 378 } 379 380 return 0; 381 } 382 383 static void apic_set_tpr(APICCommonState *s, uint8_t val) 384 { 385 /* Updates from cr8 are ignored while the VAPIC is active */ 386 if (!s->vapic_paddr) { 387 s->tpr = val << 4; 388 apic_update_irq(s); 389 } 390 } 391 392 int apic_get_highest_priority_irr(DeviceState *dev) 393 { 394 APICCommonState *s; 395 396 if (!dev) { 397 /* no interrupts */ 398 return -1; 399 } 400 s = APIC_COMMON(dev); 401 return get_highest_priority_int(s->irr); 402 } 403 404 static uint8_t apic_get_tpr(APICCommonState *s) 405 { 406 apic_sync_vapic(s, SYNC_FROM_VAPIC); 407 return s->tpr >> 4; 408 } 409 410 int apic_get_ppr(APICCommonState *s) 411 { 412 int tpr, isrv, ppr; 413 414 tpr = (s->tpr >> 4); 415 isrv = get_highest_priority_int(s->isr); 416 if (isrv < 0) 417 isrv = 0; 418 isrv >>= 4; 419 if (tpr >= isrv) 420 ppr = s->tpr; 421 else 422 ppr = isrv << 4; 423 return ppr; 424 } 425 426 static int apic_get_arb_pri(APICCommonState *s) 427 { 428 /* XXX: arbitration */ 429 return 0; 430 } 431 432 433 /* 434 * <0 - low prio interrupt, 435 * 0 - no interrupt, 436 * >0 - interrupt number 437 */ 438 static int apic_irq_pending(APICCommonState *s) 439 { 440 int irrv, ppr; 441 442 if (!(s->spurious_vec & APIC_SV_ENABLE)) { 443 return 0; 444 } 445 446 irrv = get_highest_priority_int(s->irr); 447 if (irrv < 0) { 448 return 0; 449 } 450 ppr = apic_get_ppr(s); 451 if (ppr && (irrv & 0xf0) <= (ppr & 0xf0)) { 452 return -1; 453 } 454 455 return irrv; 456 } 457 458 /* signal the CPU if an irq is pending */ 459 static void apic_update_irq(APICCommonState *s) 460 { 461 CPUState *cpu; 462 DeviceState *dev = (DeviceState *)s; 463 464 cpu = CPU(s->cpu); 465 if (!qemu_cpu_is_self(cpu)) { 466 cpu_interrupt(cpu, CPU_INTERRUPT_POLL); 467 } else if (apic_irq_pending(s) > 0) { 468 cpu_interrupt(cpu, CPU_INTERRUPT_HARD); 469 } else if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) { 470 cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD); 471 } 472 } 473 474 void apic_poll_irq(DeviceState *dev) 475 { 476 APICCommonState *s = APIC(dev); 477 478 apic_sync_vapic(s, SYNC_FROM_VAPIC); 479 apic_update_irq(s); 480 } 481 482 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode) 483 { 484 kvm_report_irq_delivered(!apic_get_bit(s->irr, vector_num)); 485 486 apic_set_bit(s->irr, vector_num); 487 if (trigger_mode) 488 apic_set_bit(s->tmr, vector_num); 489 else 490 apic_reset_bit(s->tmr, vector_num); 491 if (s->vapic_paddr) { 492 apic_sync_vapic(s, SYNC_ISR_IRR_TO_VAPIC); 493 /* 494 * The vcpu thread needs to see the new IRR before we pull its current 495 * TPR value. That way, if we miss a lowering of the TRP, the guest 496 * has the chance to notice the new IRR and poll for IRQs on its own. 497 */ 498 smp_wmb(); 499 apic_sync_vapic(s, SYNC_FROM_VAPIC); 500 } 501 apic_update_irq(s); 502 } 503 504 static void apic_eoi(APICCommonState *s) 505 { 506 int isrv; 507 isrv = get_highest_priority_int(s->isr); 508 if (isrv < 0) 509 return; 510 apic_reset_bit(s->isr, isrv); 511 if (!(s->spurious_vec & APIC_SV_DIRECTED_IO) && apic_get_bit(s->tmr, isrv)) { 512 ioapic_eoi_broadcast(isrv); 513 } 514 apic_sync_vapic(s, SYNC_FROM_VAPIC | SYNC_TO_VAPIC); 515 apic_update_irq(s); 516 } 517 518 static bool apic_match_dest(APICCommonState *apic, uint32_t dest) 519 { 520 if (is_x2apic_mode(&apic->parent_obj)) { 521 return apic->initial_apic_id == dest; 522 } else { 523 return apic->id == (uint8_t)dest; 524 } 525 } 526 527 static void apic_find_dest(uint32_t *deliver_bitmask, uint32_t dest) 528 { 529 APICCommonState *apic = NULL; 530 int i; 531 532 for (i = 0; i < max_apics; i++) { 533 apic = local_apics[i]; 534 if (apic && apic_match_dest(apic, dest)) { 535 apic_set_bit(deliver_bitmask, i); 536 } 537 } 538 } 539 540 /* 541 * Deliver interrupt to x2APIC CPUs if it is x2APIC broadcast. 542 * Otherwise, deliver interrupt to xAPIC CPUs if it is xAPIC 543 * broadcast. 544 */ 545 static void apic_get_broadcast_bitmask(uint32_t *deliver_bitmask, 546 bool is_x2apic_broadcast) 547 { 548 int i; 549 APICCommonState *apic_iter; 550 551 for (i = 0; i < max_apics; i++) { 552 apic_iter = local_apics[i]; 553 if (apic_iter) { 554 bool apic_in_x2apic = is_x2apic_mode(&apic_iter->parent_obj); 555 556 if (is_x2apic_broadcast && apic_in_x2apic) { 557 apic_set_bit(deliver_bitmask, i); 558 } else if (!is_x2apic_broadcast && !apic_in_x2apic) { 559 apic_set_bit(deliver_bitmask, i); 560 } 561 } 562 } 563 } 564 565 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, 566 uint32_t dest, uint8_t dest_mode) 567 { 568 APICCommonState *apic; 569 int i; 570 571 memset(deliver_bitmask, 0x00, max_apic_words * sizeof(uint32_t)); 572 573 /* 574 * x2APIC broadcast is delivered to all x2APIC CPUs regardless of 575 * destination mode. In case the destination mode is physical, it is 576 * broadcasted to all xAPIC CPUs too. Otherwise, if the destination 577 * mode is logical, we need to continue checking if xAPIC CPUs accepts 578 * the interrupt. 579 */ 580 if (dest == 0xffffffff) { 581 if (dest_mode == APIC_DESTMODE_PHYSICAL) { 582 memset(deliver_bitmask, 0xff, max_apic_words * sizeof(uint32_t)); 583 return; 584 } else { 585 apic_get_broadcast_bitmask(deliver_bitmask, true); 586 } 587 } 588 589 if (dest_mode == APIC_DESTMODE_PHYSICAL) { 590 apic_find_dest(deliver_bitmask, dest); 591 /* Any APIC in xAPIC mode will interpret 0xFF as broadcast */ 592 if (dest == 0xff) { 593 apic_get_broadcast_bitmask(deliver_bitmask, false); 594 } 595 } else { 596 /* XXX: logical mode */ 597 for (i = 0; i < max_apics; i++) { 598 apic = local_apics[i]; 599 if (apic) { 600 /* x2APIC logical mode */ 601 if (apic->apicbase & MSR_IA32_APICBASE_EXTD) { 602 if ((dest >> 16) == (apic->extended_log_dest >> 16) && 603 (dest & apic->extended_log_dest & 0xffff)) { 604 apic_set_bit(deliver_bitmask, i); 605 } 606 continue; 607 } 608 609 /* xAPIC logical mode */ 610 dest = (uint8_t)dest; 611 if (apic->dest_mode == APIC_DESTMODE_LOGICAL_FLAT) { 612 if (dest & apic->log_dest) { 613 apic_set_bit(deliver_bitmask, i); 614 } 615 } else if (apic->dest_mode == APIC_DESTMODE_LOGICAL_CLUSTER) { 616 /* 617 * In cluster model of xAPIC logical mode IPI, 4 higher 618 * bits are used as cluster address, 4 lower bits are 619 * the bitmask for local APICs in the cluster. The IPI 620 * is delivered to an APIC if the cluster address 621 * matches and the APIC's address bit in the cluster is 622 * set in bitmask of destination ID in IPI. 623 * 624 * The cluster address ranges from 0 - 14, the cluster 625 * address 15 (0xf) is the broadcast address to all 626 * clusters. 627 */ 628 if ((dest & 0xf0) == 0xf0 || 629 (dest & 0xf0) == (apic->log_dest & 0xf0)) { 630 if (dest & apic->log_dest & 0x0f) { 631 apic_set_bit(deliver_bitmask, i); 632 } 633 } 634 } 635 } 636 } 637 } 638 } 639 640 static void apic_startup(APICCommonState *s, int vector_num) 641 { 642 s->sipi_vector = vector_num; 643 cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI); 644 } 645 646 void apic_sipi(DeviceState *dev) 647 { 648 APICCommonState *s = APIC(dev); 649 650 cpu_reset_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI); 651 652 if (!s->wait_for_sipi) 653 return; 654 cpu_x86_load_seg_cache_sipi(s->cpu, s->sipi_vector); 655 s->wait_for_sipi = 0; 656 } 657 658 static void apic_deliver(DeviceState *dev, uint32_t dest, uint8_t dest_mode, 659 uint8_t delivery_mode, uint8_t vector_num, 660 uint8_t trigger_mode, uint8_t dest_shorthand) 661 { 662 APICCommonState *s = APIC(dev); 663 APICCommonState *apic_iter; 664 uint32_t deliver_bitmask_size = max_apic_words * sizeof(uint32_t); 665 uint32_t *deliver_bitmask = g_malloc(deliver_bitmask_size); 666 uint32_t current_apic_id; 667 668 if (is_x2apic_mode(dev)) { 669 current_apic_id = s->initial_apic_id; 670 } else { 671 current_apic_id = s->id; 672 } 673 674 switch (dest_shorthand) { 675 case 0: 676 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); 677 break; 678 case 1: 679 memset(deliver_bitmask, 0x00, deliver_bitmask_size); 680 apic_set_bit(deliver_bitmask, current_apic_id); 681 break; 682 case 2: 683 memset(deliver_bitmask, 0xff, deliver_bitmask_size); 684 break; 685 case 3: 686 memset(deliver_bitmask, 0xff, deliver_bitmask_size); 687 apic_reset_bit(deliver_bitmask, current_apic_id); 688 break; 689 } 690 691 switch (delivery_mode) { 692 case APIC_DM_INIT: 693 { 694 int trig_mode = (s->icr[0] >> 15) & 1; 695 int level = (s->icr[0] >> 14) & 1; 696 if (level == 0 && trig_mode == 1) { 697 foreach_apic(apic_iter, deliver_bitmask, 698 apic_iter->arb_id = apic_iter->id ); 699 return; 700 } 701 } 702 break; 703 704 case APIC_DM_SIPI: 705 foreach_apic(apic_iter, deliver_bitmask, 706 apic_startup(apic_iter, vector_num) ); 707 return; 708 } 709 710 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); 711 g_free(deliver_bitmask); 712 } 713 714 static bool apic_check_pic(APICCommonState *s) 715 { 716 DeviceState *dev = (DeviceState *)s; 717 718 if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) { 719 return false; 720 } 721 apic_deliver_pic_intr(dev, 1); 722 return true; 723 } 724 725 int apic_get_interrupt(DeviceState *dev) 726 { 727 APICCommonState *s = APIC(dev); 728 int intno; 729 730 /* if the APIC is installed or enabled, we let the 8259 handle the 731 IRQs */ 732 if (!s) 733 return -1; 734 if (!(s->spurious_vec & APIC_SV_ENABLE)) 735 return -1; 736 737 apic_sync_vapic(s, SYNC_FROM_VAPIC); 738 intno = apic_irq_pending(s); 739 740 /* if there is an interrupt from the 8259, let the caller handle 741 * that first since ExtINT interrupts ignore the priority. 742 */ 743 if (intno == 0 || apic_check_pic(s)) { 744 apic_sync_vapic(s, SYNC_TO_VAPIC); 745 return -1; 746 } else if (intno < 0) { 747 apic_sync_vapic(s, SYNC_TO_VAPIC); 748 return s->spurious_vec & 0xff; 749 } 750 apic_reset_bit(s->irr, intno); 751 apic_set_bit(s->isr, intno); 752 apic_sync_vapic(s, SYNC_TO_VAPIC); 753 754 apic_update_irq(s); 755 756 return intno; 757 } 758 759 int apic_accept_pic_intr(DeviceState *dev) 760 { 761 APICCommonState *s = APIC(dev); 762 uint32_t lvt0; 763 764 if (!s) 765 return -1; 766 767 lvt0 = s->lvt[APIC_LVT_LINT0]; 768 769 if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) == 0 || 770 (lvt0 & APIC_LVT_MASKED) == 0) 771 return isa_pic != NULL; 772 773 return 0; 774 } 775 776 static void apic_timer_update(APICCommonState *s, int64_t current_time) 777 { 778 if (apic_next_timer(s, current_time)) { 779 timer_mod(s->timer, s->next_time); 780 } else { 781 timer_del(s->timer); 782 } 783 } 784 785 static void apic_timer(void *opaque) 786 { 787 APICCommonState *s = opaque; 788 789 apic_local_deliver(s, APIC_LVT_TIMER); 790 apic_timer_update(s, s->next_time); 791 } 792 793 static int apic_register_read(int index, uint64_t *value) 794 { 795 DeviceState *dev; 796 APICCommonState *s; 797 uint32_t val; 798 int ret = 0; 799 800 dev = cpu_get_current_apic(); 801 if (!dev) { 802 return -1; 803 } 804 s = APIC(dev); 805 806 switch(index) { 807 case 0x02: /* id */ 808 if (is_x2apic_mode(dev)) { 809 val = s->initial_apic_id; 810 } else { 811 val = s->id << 24; 812 } 813 break; 814 case 0x03: /* version */ 815 val = s->version | ((APIC_LVT_NB - 1) << 16); 816 break; 817 case 0x08: 818 apic_sync_vapic(s, SYNC_FROM_VAPIC); 819 if (apic_report_tpr_access) { 820 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_READ); 821 } 822 val = s->tpr; 823 break; 824 case 0x09: 825 val = apic_get_arb_pri(s); 826 break; 827 case 0x0a: 828 /* ppr */ 829 val = apic_get_ppr(s); 830 break; 831 case 0x0b: 832 val = 0; 833 break; 834 case 0x0d: 835 if (is_x2apic_mode(dev)) { 836 val = s->extended_log_dest; 837 } else { 838 val = s->log_dest << 24; 839 } 840 break; 841 case 0x0e: 842 if (is_x2apic_mode(dev)) { 843 val = 0; 844 ret = -1; 845 } else { 846 val = (s->dest_mode << 28) | 0xfffffff; 847 } 848 break; 849 case 0x0f: 850 val = s->spurious_vec; 851 break; 852 case 0x10 ... 0x17: 853 val = s->isr[index & 7]; 854 break; 855 case 0x18 ... 0x1f: 856 val = s->tmr[index & 7]; 857 break; 858 case 0x20 ... 0x27: 859 val = s->irr[index & 7]; 860 break; 861 case 0x28: 862 val = s->esr; 863 break; 864 case 0x30: 865 case 0x31: 866 val = s->icr[index & 1]; 867 break; 868 case 0x32 ... 0x37: 869 val = s->lvt[index - 0x32]; 870 break; 871 case 0x38: 872 val = s->initial_count; 873 break; 874 case 0x39: 875 val = apic_get_current_count(s); 876 break; 877 case 0x3e: 878 val = s->divide_conf; 879 break; 880 default: 881 s->esr |= APIC_ESR_ILLEGAL_ADDRESS; 882 val = 0; 883 ret = -1; 884 break; 885 } 886 887 trace_apic_register_read(index, val); 888 *value = val; 889 return ret; 890 } 891 892 static uint64_t apic_mem_read(void *opaque, hwaddr addr, unsigned size) 893 { 894 uint64_t val; 895 int index; 896 897 if (size < 4) { 898 return 0; 899 } 900 901 index = (addr >> 4) & 0xff; 902 apic_register_read(index, &val); 903 904 return val; 905 } 906 907 int apic_msr_read(int index, uint64_t *val) 908 { 909 DeviceState *dev; 910 911 dev = cpu_get_current_apic(); 912 if (!dev) { 913 return -1; 914 } 915 916 if (!is_x2apic_mode(dev)) { 917 return -1; 918 } 919 920 return apic_register_read(index, val); 921 } 922 923 static void apic_send_msi(MSIMessage *msi) 924 { 925 uint64_t addr = msi->address; 926 uint32_t data = msi->data; 927 uint32_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; 928 /* 929 * The higher 3 bytes of destination id is stored in higher word of 930 * msi address. See x86_iommu_irq_to_msi_message() 931 */ 932 dest = dest | (addr >> 32); 933 uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; 934 uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; 935 uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; 936 uint8_t delivery = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7; 937 /* XXX: Ignore redirection hint. */ 938 apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode); 939 } 940 941 static int apic_register_write(int index, uint64_t val) 942 { 943 DeviceState *dev; 944 APICCommonState *s; 945 946 dev = cpu_get_current_apic(); 947 if (!dev) { 948 return -1; 949 } 950 s = APIC(dev); 951 952 trace_apic_register_write(index, val); 953 954 switch(index) { 955 case 0x02: 956 if (is_x2apic_mode(dev)) { 957 return -1; 958 } 959 960 s->id = (val >> 24); 961 break; 962 case 0x03: 963 break; 964 case 0x08: 965 if (apic_report_tpr_access) { 966 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_WRITE); 967 } 968 s->tpr = val; 969 apic_sync_vapic(s, SYNC_TO_VAPIC); 970 apic_update_irq(s); 971 break; 972 case 0x09: 973 case 0x0a: 974 break; 975 case 0x0b: /* EOI */ 976 apic_eoi(s); 977 break; 978 case 0x0d: 979 if (is_x2apic_mode(dev)) { 980 return -1; 981 } 982 983 s->log_dest = val >> 24; 984 break; 985 case 0x0e: 986 if (is_x2apic_mode(dev)) { 987 return -1; 988 } 989 990 s->dest_mode = val >> 28; 991 break; 992 case 0x0f: 993 s->spurious_vec = val & 0x1ff; 994 apic_update_irq(s); 995 break; 996 case 0x10 ... 0x17: 997 case 0x18 ... 0x1f: 998 case 0x20 ... 0x27: 999 case 0x28: 1000 break; 1001 case 0x30: { 1002 uint32_t dest; 1003 1004 s->icr[0] = val; 1005 if (is_x2apic_mode(dev)) { 1006 s->icr[1] = val >> 32; 1007 dest = s->icr[1]; 1008 } else { 1009 dest = (s->icr[1] >> 24) & 0xff; 1010 } 1011 1012 apic_deliver(dev, dest, (s->icr[0] >> 11) & 1, 1013 (s->icr[0] >> 8) & 7, (s->icr[0] & 0xff), 1014 (s->icr[0] >> 15) & 1, (s->icr[0] >> 18) & 3); 1015 break; 1016 } 1017 case 0x31: 1018 if (is_x2apic_mode(dev)) { 1019 return -1; 1020 } 1021 1022 s->icr[1] = val; 1023 break; 1024 case 0x32 ... 0x37: 1025 { 1026 int n = index - 0x32; 1027 s->lvt[n] = val; 1028 if (n == APIC_LVT_TIMER) { 1029 apic_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); 1030 } else if (n == APIC_LVT_LINT0 && apic_check_pic(s)) { 1031 apic_update_irq(s); 1032 } 1033 } 1034 break; 1035 case 0x38: 1036 s->initial_count = val; 1037 s->initial_count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 1038 apic_timer_update(s, s->initial_count_load_time); 1039 break; 1040 case 0x39: 1041 break; 1042 case 0x3e: 1043 { 1044 int v; 1045 s->divide_conf = val & 0xb; 1046 v = (s->divide_conf & 3) | ((s->divide_conf >> 1) & 4); 1047 s->count_shift = (v + 1) & 7; 1048 } 1049 break; 1050 case 0x3f: { 1051 int vector = val & 0xff; 1052 1053 if (!is_x2apic_mode(dev)) { 1054 return -1; 1055 } 1056 1057 /* 1058 * Self IPI is identical to IPI with 1059 * - Destination shorthand: 1 (Self) 1060 * - Trigger mode: 0 (Edge) 1061 * - Delivery mode: 0 (Fixed) 1062 */ 1063 apic_deliver(dev, 0, 0, APIC_DM_FIXED, vector, 0, 1); 1064 1065 break; 1066 } 1067 default: 1068 s->esr |= APIC_ESR_ILLEGAL_ADDRESS; 1069 return -1; 1070 } 1071 1072 return 0; 1073 } 1074 1075 static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val, 1076 unsigned size) 1077 { 1078 int index = (addr >> 4) & 0xff; 1079 1080 if (size < 4) { 1081 return; 1082 } 1083 1084 if (addr > 0xfff || !index) { 1085 /* 1086 * MSI and MMIO APIC are at the same memory location, 1087 * but actually not on the global bus: MSI is on PCI bus 1088 * APIC is connected directly to the CPU. 1089 * Mapping them on the global bus happens to work because 1090 * MSI registers are reserved in APIC MMIO and vice versa. 1091 */ 1092 MSIMessage msi = { .address = addr, .data = val }; 1093 apic_send_msi(&msi); 1094 return; 1095 } 1096 1097 apic_register_write(index, val); 1098 } 1099 1100 int apic_msr_write(int index, uint64_t val) 1101 { 1102 DeviceState *dev; 1103 1104 dev = cpu_get_current_apic(); 1105 if (!dev) { 1106 return -1; 1107 } 1108 1109 if (!is_x2apic_mode(dev)) { 1110 return -1; 1111 } 1112 1113 return apic_register_write(index, val); 1114 } 1115 1116 static void apic_pre_save(APICCommonState *s) 1117 { 1118 apic_sync_vapic(s, SYNC_FROM_VAPIC); 1119 } 1120 1121 static void apic_post_load(APICCommonState *s) 1122 { 1123 if (s->timer_expiry != -1) { 1124 timer_mod(s->timer, s->timer_expiry); 1125 } else { 1126 timer_del(s->timer); 1127 } 1128 } 1129 1130 static const MemoryRegionOps apic_io_ops = { 1131 .read = apic_mem_read, 1132 .write = apic_mem_write, 1133 .impl.min_access_size = 1, 1134 .impl.max_access_size = 4, 1135 .valid.min_access_size = 1, 1136 .valid.max_access_size = 4, 1137 .endianness = DEVICE_NATIVE_ENDIAN, 1138 }; 1139 1140 static void apic_realize(DeviceState *dev, Error **errp) 1141 { 1142 APICCommonState *s = APIC(dev); 1143 1144 if (kvm_enabled()) { 1145 warn_report("Userspace local APIC is deprecated for KVM."); 1146 warn_report("Do not use kernel-irqchip except for the -M isapc machine type."); 1147 } 1148 1149 memory_region_init_io(&s->io_memory, OBJECT(s), &apic_io_ops, s, "apic-msi", 1150 APIC_SPACE_SIZE); 1151 1152 /* 1153 * apic-msi's apic_mem_write can call into ioapic_eoi_broadcast, which can 1154 * write back to apic-msi. As such mark the apic-msi region re-entrancy 1155 * safe. 1156 */ 1157 s->io_memory.disable_reentrancy_guard = true; 1158 1159 s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s); 1160 1161 /* 1162 * The --machine none does not call apic_set_max_apic_id before creating 1163 * apic, so we need to call it here and set it to 1 which is the max cpus 1164 * in machine none. 1165 */ 1166 if (!local_apics) { 1167 apic_set_max_apic_id(1); 1168 } 1169 local_apics[s->initial_apic_id] = s; 1170 1171 msi_nonbroken = true; 1172 } 1173 1174 static void apic_unrealize(DeviceState *dev) 1175 { 1176 APICCommonState *s = APIC(dev); 1177 1178 timer_free(s->timer); 1179 local_apics[s->initial_apic_id] = NULL; 1180 } 1181 1182 static void apic_class_init(ObjectClass *klass, void *data) 1183 { 1184 APICCommonClass *k = APIC_COMMON_CLASS(klass); 1185 1186 k->realize = apic_realize; 1187 k->unrealize = apic_unrealize; 1188 k->set_base = apic_set_base; 1189 k->set_tpr = apic_set_tpr; 1190 k->get_tpr = apic_get_tpr; 1191 k->vapic_base_update = apic_vapic_base_update; 1192 k->external_nmi = apic_external_nmi; 1193 k->pre_save = apic_pre_save; 1194 k->post_load = apic_post_load; 1195 k->send_msi = apic_send_msi; 1196 } 1197 1198 static const TypeInfo apic_info = { 1199 .name = TYPE_APIC, 1200 .instance_size = sizeof(APICCommonState), 1201 .parent = TYPE_APIC_COMMON, 1202 .class_init = apic_class_init, 1203 }; 1204 1205 static void apic_register_types(void) 1206 { 1207 type_register_static(&apic_info); 1208 } 1209 1210 type_init(apic_register_types) 1211