1 2 /* 3 * Local APIC virtualization 4 * 5 * Copyright (C) 2006 Qumranet, Inc. 6 * Copyright (C) 2007 Novell 7 * Copyright (C) 2007 Intel 8 * Copyright 2009 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Dor Laor <dor.laor@qumranet.com> 12 * Gregory Haskins <ghaskins@novell.com> 13 * Yaozu (Eddie) Dong <eddie.dong@intel.com> 14 * 15 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation. 16 * 17 * This work is licensed under the terms of the GNU GPL, version 2. See 18 * the COPYING file in the top-level directory. 19 */ 20 21 #include <linux/kvm_host.h> 22 #include <linux/kvm.h> 23 #include <linux/mm.h> 24 #include <linux/highmem.h> 25 #include <linux/smp.h> 26 #include <linux/hrtimer.h> 27 #include <linux/io.h> 28 #include <linux/export.h> 29 #include <linux/math64.h> 30 #include <linux/slab.h> 31 #include <asm/processor.h> 32 #include <asm/msr.h> 33 #include <asm/page.h> 34 #include <asm/current.h> 35 #include <asm/apicdef.h> 36 #include <asm/delay.h> 37 #include <linux/atomic.h> 38 #include <linux/jump_label.h> 39 #include "kvm_cache_regs.h" 40 #include "irq.h" 41 #include "trace.h" 42 #include "x86.h" 43 #include "cpuid.h" 44 #include "hyperv.h" 45 46 #ifndef CONFIG_X86_64 47 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) 48 #else 49 #define mod_64(x, y) ((x) % (y)) 50 #endif 51 52 #define PRId64 "d" 53 #define PRIx64 "llx" 54 #define PRIu64 "u" 55 #define PRIo64 "o" 56 57 #define APIC_BUS_CYCLE_NS 1 58 59 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */ 60 #define apic_debug(fmt, arg...) 61 62 /* 14 is the version for Xeon and Pentium 8.4.8*/ 63 #define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16)) 64 #define LAPIC_MMIO_LENGTH (1 << 12) 65 /* followed define is not in apicdef.h */ 66 #define APIC_SHORT_MASK 0xc0000 67 #define APIC_DEST_NOSHORT 0x0 68 #define APIC_DEST_MASK 0x800 69 #define MAX_APIC_VECTOR 256 70 #define APIC_VECTORS_PER_REG 32 71 72 #define APIC_BROADCAST 0xFF 73 #define X2APIC_BROADCAST 0xFFFFFFFFul 74 75 static inline int apic_test_vector(int vec, void *bitmap) 76 { 77 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 78 } 79 80 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector) 81 { 82 struct kvm_lapic *apic = vcpu->arch.apic; 83 84 return apic_test_vector(vector, apic->regs + APIC_ISR) || 85 apic_test_vector(vector, apic->regs + APIC_IRR); 86 } 87 88 static inline void apic_clear_vector(int vec, void *bitmap) 89 { 90 clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 91 } 92 93 static inline int __apic_test_and_set_vector(int vec, void *bitmap) 94 { 95 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 96 } 97 98 static inline int __apic_test_and_clear_vector(int vec, void *bitmap) 99 { 100 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 101 } 102 103 struct static_key_deferred apic_hw_disabled __read_mostly; 104 struct static_key_deferred apic_sw_disabled __read_mostly; 105 106 static inline int apic_enabled(struct kvm_lapic *apic) 107 { 108 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic); 109 } 110 111 #define LVT_MASK \ 112 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK) 113 114 #define LINT_MASK \ 115 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \ 116 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER) 117 118 static inline u8 kvm_xapic_id(struct kvm_lapic *apic) 119 { 120 return kvm_lapic_get_reg(apic, APIC_ID) >> 24; 121 } 122 123 static inline u32 kvm_x2apic_id(struct kvm_lapic *apic) 124 { 125 return apic->vcpu->vcpu_id; 126 } 127 128 static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map, 129 u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) { 130 switch (map->mode) { 131 case KVM_APIC_MODE_X2APIC: { 132 u32 offset = (dest_id >> 16) * 16; 133 u32 max_apic_id = map->max_apic_id; 134 135 if (offset <= max_apic_id) { 136 u8 cluster_size = min(max_apic_id - offset + 1, 16U); 137 138 *cluster = &map->phys_map[offset]; 139 *mask = dest_id & (0xffff >> (16 - cluster_size)); 140 } else { 141 *mask = 0; 142 } 143 144 return true; 145 } 146 case KVM_APIC_MODE_XAPIC_FLAT: 147 *cluster = map->xapic_flat_map; 148 *mask = dest_id & 0xff; 149 return true; 150 case KVM_APIC_MODE_XAPIC_CLUSTER: 151 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf]; 152 *mask = dest_id & 0xf; 153 return true; 154 default: 155 /* Not optimized. */ 156 return false; 157 } 158 } 159 160 static void kvm_apic_map_free(struct rcu_head *rcu) 161 { 162 struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu); 163 164 kvfree(map); 165 } 166 167 static void recalculate_apic_map(struct kvm *kvm) 168 { 169 struct kvm_apic_map *new, *old = NULL; 170 struct kvm_vcpu *vcpu; 171 int i; 172 u32 max_id = 255; /* enough space for any xAPIC ID */ 173 174 mutex_lock(&kvm->arch.apic_map_lock); 175 176 kvm_for_each_vcpu(i, vcpu, kvm) 177 if (kvm_apic_present(vcpu)) 178 max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic)); 179 180 new = kvzalloc(sizeof(struct kvm_apic_map) + 181 sizeof(struct kvm_lapic *) * ((u64)max_id + 1), GFP_KERNEL); 182 183 if (!new) 184 goto out; 185 186 new->max_apic_id = max_id; 187 188 kvm_for_each_vcpu(i, vcpu, kvm) { 189 struct kvm_lapic *apic = vcpu->arch.apic; 190 struct kvm_lapic **cluster; 191 u16 mask; 192 u32 ldr; 193 u8 xapic_id; 194 u32 x2apic_id; 195 196 if (!kvm_apic_present(vcpu)) 197 continue; 198 199 xapic_id = kvm_xapic_id(apic); 200 x2apic_id = kvm_x2apic_id(apic); 201 202 /* Hotplug hack: see kvm_apic_match_physical_addr(), ... */ 203 if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) && 204 x2apic_id <= new->max_apic_id) 205 new->phys_map[x2apic_id] = apic; 206 /* 207 * ... xAPIC ID of VCPUs with APIC ID > 0xff will wrap-around, 208 * prevent them from masking VCPUs with APIC ID <= 0xff. 209 */ 210 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id]) 211 new->phys_map[xapic_id] = apic; 212 213 ldr = kvm_lapic_get_reg(apic, APIC_LDR); 214 215 if (apic_x2apic_mode(apic)) { 216 new->mode |= KVM_APIC_MODE_X2APIC; 217 } else if (ldr) { 218 ldr = GET_APIC_LOGICAL_ID(ldr); 219 if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT) 220 new->mode |= KVM_APIC_MODE_XAPIC_FLAT; 221 else 222 new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER; 223 } 224 225 if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask)) 226 continue; 227 228 if (mask) 229 cluster[ffs(mask) - 1] = apic; 230 } 231 out: 232 old = rcu_dereference_protected(kvm->arch.apic_map, 233 lockdep_is_held(&kvm->arch.apic_map_lock)); 234 rcu_assign_pointer(kvm->arch.apic_map, new); 235 mutex_unlock(&kvm->arch.apic_map_lock); 236 237 if (old) 238 call_rcu(&old->rcu, kvm_apic_map_free); 239 240 kvm_make_scan_ioapic_request(kvm); 241 } 242 243 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) 244 { 245 bool enabled = val & APIC_SPIV_APIC_ENABLED; 246 247 kvm_lapic_set_reg(apic, APIC_SPIV, val); 248 249 if (enabled != apic->sw_enabled) { 250 apic->sw_enabled = enabled; 251 if (enabled) { 252 static_key_slow_dec_deferred(&apic_sw_disabled); 253 recalculate_apic_map(apic->vcpu->kvm); 254 } else 255 static_key_slow_inc(&apic_sw_disabled.key); 256 } 257 } 258 259 static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id) 260 { 261 kvm_lapic_set_reg(apic, APIC_ID, id << 24); 262 recalculate_apic_map(apic->vcpu->kvm); 263 } 264 265 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id) 266 { 267 kvm_lapic_set_reg(apic, APIC_LDR, id); 268 recalculate_apic_map(apic->vcpu->kvm); 269 } 270 271 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id) 272 { 273 u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf)); 274 275 WARN_ON_ONCE(id != apic->vcpu->vcpu_id); 276 277 kvm_lapic_set_reg(apic, APIC_ID, id); 278 kvm_lapic_set_reg(apic, APIC_LDR, ldr); 279 recalculate_apic_map(apic->vcpu->kvm); 280 } 281 282 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type) 283 { 284 return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED); 285 } 286 287 static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type) 288 { 289 return kvm_lapic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK; 290 } 291 292 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic) 293 { 294 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT; 295 } 296 297 static inline int apic_lvtt_period(struct kvm_lapic *apic) 298 { 299 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC; 300 } 301 302 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic) 303 { 304 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE; 305 } 306 307 static inline int apic_lvt_nmi_mode(u32 lvt_val) 308 { 309 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI; 310 } 311 312 void kvm_apic_set_version(struct kvm_vcpu *vcpu) 313 { 314 struct kvm_lapic *apic = vcpu->arch.apic; 315 struct kvm_cpuid_entry2 *feat; 316 u32 v = APIC_VERSION; 317 318 if (!lapic_in_kernel(vcpu)) 319 return; 320 321 feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0); 322 if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31)))) 323 v |= APIC_LVR_DIRECTED_EOI; 324 kvm_lapic_set_reg(apic, APIC_LVR, v); 325 } 326 327 static const unsigned int apic_lvt_mask[KVM_APIC_LVT_NUM] = { 328 LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */ 329 LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */ 330 LVT_MASK | APIC_MODE_MASK, /* LVTPC */ 331 LINT_MASK, LINT_MASK, /* LVT0-1 */ 332 LVT_MASK /* LVTERR */ 333 }; 334 335 static int find_highest_vector(void *bitmap) 336 { 337 int vec; 338 u32 *reg; 339 340 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG; 341 vec >= 0; vec -= APIC_VECTORS_PER_REG) { 342 reg = bitmap + REG_POS(vec); 343 if (*reg) 344 return __fls(*reg) + vec; 345 } 346 347 return -1; 348 } 349 350 static u8 count_vectors(void *bitmap) 351 { 352 int vec; 353 u32 *reg; 354 u8 count = 0; 355 356 for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) { 357 reg = bitmap + REG_POS(vec); 358 count += hweight32(*reg); 359 } 360 361 return count; 362 } 363 364 int __kvm_apic_update_irr(u32 *pir, void *regs) 365 { 366 u32 i, vec; 367 u32 pir_val, irr_val; 368 int max_irr = -1; 369 370 for (i = vec = 0; i <= 7; i++, vec += 32) { 371 pir_val = READ_ONCE(pir[i]); 372 irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10)); 373 if (pir_val) { 374 irr_val |= xchg(&pir[i], 0); 375 *((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val; 376 } 377 if (irr_val) 378 max_irr = __fls(irr_val) + vec; 379 } 380 381 return max_irr; 382 } 383 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr); 384 385 int kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir) 386 { 387 struct kvm_lapic *apic = vcpu->arch.apic; 388 389 return __kvm_apic_update_irr(pir, apic->regs); 390 } 391 EXPORT_SYMBOL_GPL(kvm_apic_update_irr); 392 393 static inline int apic_search_irr(struct kvm_lapic *apic) 394 { 395 return find_highest_vector(apic->regs + APIC_IRR); 396 } 397 398 static inline int apic_find_highest_irr(struct kvm_lapic *apic) 399 { 400 int result; 401 402 /* 403 * Note that irr_pending is just a hint. It will be always 404 * true with virtual interrupt delivery enabled. 405 */ 406 if (!apic->irr_pending) 407 return -1; 408 409 result = apic_search_irr(apic); 410 ASSERT(result == -1 || result >= 16); 411 412 return result; 413 } 414 415 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) 416 { 417 struct kvm_vcpu *vcpu; 418 419 vcpu = apic->vcpu; 420 421 if (unlikely(vcpu->arch.apicv_active)) { 422 /* need to update RVI */ 423 apic_clear_vector(vec, apic->regs + APIC_IRR); 424 kvm_x86_ops->hwapic_irr_update(vcpu, 425 apic_find_highest_irr(apic)); 426 } else { 427 apic->irr_pending = false; 428 apic_clear_vector(vec, apic->regs + APIC_IRR); 429 if (apic_search_irr(apic) != -1) 430 apic->irr_pending = true; 431 } 432 } 433 434 static inline void apic_set_isr(int vec, struct kvm_lapic *apic) 435 { 436 struct kvm_vcpu *vcpu; 437 438 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR)) 439 return; 440 441 vcpu = apic->vcpu; 442 443 /* 444 * With APIC virtualization enabled, all caching is disabled 445 * because the processor can modify ISR under the hood. Instead 446 * just set SVI. 447 */ 448 if (unlikely(vcpu->arch.apicv_active)) 449 kvm_x86_ops->hwapic_isr_update(vcpu, vec); 450 else { 451 ++apic->isr_count; 452 BUG_ON(apic->isr_count > MAX_APIC_VECTOR); 453 /* 454 * ISR (in service register) bit is set when injecting an interrupt. 455 * The highest vector is injected. Thus the latest bit set matches 456 * the highest bit in ISR. 457 */ 458 apic->highest_isr_cache = vec; 459 } 460 } 461 462 static inline int apic_find_highest_isr(struct kvm_lapic *apic) 463 { 464 int result; 465 466 /* 467 * Note that isr_count is always 1, and highest_isr_cache 468 * is always -1, with APIC virtualization enabled. 469 */ 470 if (!apic->isr_count) 471 return -1; 472 if (likely(apic->highest_isr_cache != -1)) 473 return apic->highest_isr_cache; 474 475 result = find_highest_vector(apic->regs + APIC_ISR); 476 ASSERT(result == -1 || result >= 16); 477 478 return result; 479 } 480 481 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) 482 { 483 struct kvm_vcpu *vcpu; 484 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR)) 485 return; 486 487 vcpu = apic->vcpu; 488 489 /* 490 * We do get here for APIC virtualization enabled if the guest 491 * uses the Hyper-V APIC enlightenment. In this case we may need 492 * to trigger a new interrupt delivery by writing the SVI field; 493 * on the other hand isr_count and highest_isr_cache are unused 494 * and must be left alone. 495 */ 496 if (unlikely(vcpu->arch.apicv_active)) 497 kvm_x86_ops->hwapic_isr_update(vcpu, 498 apic_find_highest_isr(apic)); 499 else { 500 --apic->isr_count; 501 BUG_ON(apic->isr_count < 0); 502 apic->highest_isr_cache = -1; 503 } 504 } 505 506 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) 507 { 508 /* This may race with setting of irr in __apic_accept_irq() and 509 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq 510 * will cause vmexit immediately and the value will be recalculated 511 * on the next vmentry. 512 */ 513 return apic_find_highest_irr(vcpu->arch.apic); 514 } 515 EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr); 516 517 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, 518 int vector, int level, int trig_mode, 519 struct dest_map *dest_map); 520 521 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, 522 struct dest_map *dest_map) 523 { 524 struct kvm_lapic *apic = vcpu->arch.apic; 525 526 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector, 527 irq->level, irq->trig_mode, dest_map); 528 } 529 530 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val) 531 { 532 533 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val, 534 sizeof(val)); 535 } 536 537 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val) 538 { 539 540 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val, 541 sizeof(*val)); 542 } 543 544 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu) 545 { 546 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; 547 } 548 549 static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu) 550 { 551 u8 val; 552 if (pv_eoi_get_user(vcpu, &val) < 0) 553 apic_debug("Can't read EOI MSR value: 0x%llx\n", 554 (unsigned long long)vcpu->arch.pv_eoi.msr_val); 555 return val & 0x1; 556 } 557 558 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu) 559 { 560 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) { 561 apic_debug("Can't set EOI MSR value: 0x%llx\n", 562 (unsigned long long)vcpu->arch.pv_eoi.msr_val); 563 return; 564 } 565 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); 566 } 567 568 static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu) 569 { 570 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) { 571 apic_debug("Can't clear EOI MSR value: 0x%llx\n", 572 (unsigned long long)vcpu->arch.pv_eoi.msr_val); 573 return; 574 } 575 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); 576 } 577 578 static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr) 579 { 580 int highest_irr; 581 if (kvm_x86_ops->sync_pir_to_irr && apic->vcpu->arch.apicv_active) 582 highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu); 583 else 584 highest_irr = apic_find_highest_irr(apic); 585 if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr) 586 return -1; 587 return highest_irr; 588 } 589 590 static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr) 591 { 592 u32 tpr, isrv, ppr, old_ppr; 593 int isr; 594 595 old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI); 596 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI); 597 isr = apic_find_highest_isr(apic); 598 isrv = (isr != -1) ? isr : 0; 599 600 if ((tpr & 0xf0) >= (isrv & 0xf0)) 601 ppr = tpr & 0xff; 602 else 603 ppr = isrv & 0xf0; 604 605 apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x", 606 apic, ppr, isr, isrv); 607 608 *new_ppr = ppr; 609 if (old_ppr != ppr) 610 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr); 611 612 return ppr < old_ppr; 613 } 614 615 static void apic_update_ppr(struct kvm_lapic *apic) 616 { 617 u32 ppr; 618 619 if (__apic_update_ppr(apic, &ppr) && 620 apic_has_interrupt_for_ppr(apic, ppr) != -1) 621 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); 622 } 623 624 void kvm_apic_update_ppr(struct kvm_vcpu *vcpu) 625 { 626 apic_update_ppr(vcpu->arch.apic); 627 } 628 EXPORT_SYMBOL_GPL(kvm_apic_update_ppr); 629 630 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr) 631 { 632 kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr); 633 apic_update_ppr(apic); 634 } 635 636 static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda) 637 { 638 return mda == (apic_x2apic_mode(apic) ? 639 X2APIC_BROADCAST : APIC_BROADCAST); 640 } 641 642 static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda) 643 { 644 if (kvm_apic_broadcast(apic, mda)) 645 return true; 646 647 if (apic_x2apic_mode(apic)) 648 return mda == kvm_x2apic_id(apic); 649 650 /* 651 * Hotplug hack: Make LAPIC in xAPIC mode also accept interrupts as if 652 * it were in x2APIC mode. Hotplugged VCPUs start in xAPIC mode and 653 * this allows unique addressing of VCPUs with APIC ID over 0xff. 654 * The 0xff condition is needed because writeable xAPIC ID. 655 */ 656 if (kvm_x2apic_id(apic) > 0xff && mda == kvm_x2apic_id(apic)) 657 return true; 658 659 return mda == kvm_xapic_id(apic); 660 } 661 662 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda) 663 { 664 u32 logical_id; 665 666 if (kvm_apic_broadcast(apic, mda)) 667 return true; 668 669 logical_id = kvm_lapic_get_reg(apic, APIC_LDR); 670 671 if (apic_x2apic_mode(apic)) 672 return ((logical_id >> 16) == (mda >> 16)) 673 && (logical_id & mda & 0xffff) != 0; 674 675 logical_id = GET_APIC_LOGICAL_ID(logical_id); 676 677 switch (kvm_lapic_get_reg(apic, APIC_DFR)) { 678 case APIC_DFR_FLAT: 679 return (logical_id & mda) != 0; 680 case APIC_DFR_CLUSTER: 681 return ((logical_id >> 4) == (mda >> 4)) 682 && (logical_id & mda & 0xf) != 0; 683 default: 684 apic_debug("Bad DFR vcpu %d: %08x\n", 685 apic->vcpu->vcpu_id, kvm_lapic_get_reg(apic, APIC_DFR)); 686 return false; 687 } 688 } 689 690 /* The KVM local APIC implementation has two quirks: 691 * 692 * - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs 693 * in xAPIC mode if the "destination & 0xff" matches its xAPIC ID. 694 * KVM doesn't do that aliasing. 695 * 696 * - in-kernel IOAPIC messages have to be delivered directly to 697 * x2APIC, because the kernel does not support interrupt remapping. 698 * In order to support broadcast without interrupt remapping, x2APIC 699 * rewrites the destination of non-IPI messages from APIC_BROADCAST 700 * to X2APIC_BROADCAST. 701 * 702 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is 703 * important when userspace wants to use x2APIC-format MSIs, because 704 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7". 705 */ 706 static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id, 707 struct kvm_lapic *source, struct kvm_lapic *target) 708 { 709 bool ipi = source != NULL; 710 711 if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled && 712 !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target)) 713 return X2APIC_BROADCAST; 714 715 return dest_id; 716 } 717 718 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, 719 int short_hand, unsigned int dest, int dest_mode) 720 { 721 struct kvm_lapic *target = vcpu->arch.apic; 722 u32 mda = kvm_apic_mda(vcpu, dest, source, target); 723 724 apic_debug("target %p, source %p, dest 0x%x, " 725 "dest_mode 0x%x, short_hand 0x%x\n", 726 target, source, dest, dest_mode, short_hand); 727 728 ASSERT(target); 729 switch (short_hand) { 730 case APIC_DEST_NOSHORT: 731 if (dest_mode == APIC_DEST_PHYSICAL) 732 return kvm_apic_match_physical_addr(target, mda); 733 else 734 return kvm_apic_match_logical_addr(target, mda); 735 case APIC_DEST_SELF: 736 return target == source; 737 case APIC_DEST_ALLINC: 738 return true; 739 case APIC_DEST_ALLBUT: 740 return target != source; 741 default: 742 apic_debug("kvm: apic: Bad dest shorthand value %x\n", 743 short_hand); 744 return false; 745 } 746 } 747 EXPORT_SYMBOL_GPL(kvm_apic_match_dest); 748 749 int kvm_vector_to_index(u32 vector, u32 dest_vcpus, 750 const unsigned long *bitmap, u32 bitmap_size) 751 { 752 u32 mod; 753 int i, idx = -1; 754 755 mod = vector % dest_vcpus; 756 757 for (i = 0; i <= mod; i++) { 758 idx = find_next_bit(bitmap, bitmap_size, idx + 1); 759 BUG_ON(idx == bitmap_size); 760 } 761 762 return idx; 763 } 764 765 static void kvm_apic_disabled_lapic_found(struct kvm *kvm) 766 { 767 if (!kvm->arch.disabled_lapic_found) { 768 kvm->arch.disabled_lapic_found = true; 769 printk(KERN_INFO 770 "Disabled LAPIC found during irq injection\n"); 771 } 772 } 773 774 static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src, 775 struct kvm_lapic_irq *irq, struct kvm_apic_map *map) 776 { 777 if (kvm->arch.x2apic_broadcast_quirk_disabled) { 778 if ((irq->dest_id == APIC_BROADCAST && 779 map->mode != KVM_APIC_MODE_X2APIC)) 780 return true; 781 if (irq->dest_id == X2APIC_BROADCAST) 782 return true; 783 } else { 784 bool x2apic_ipi = src && *src && apic_x2apic_mode(*src); 785 if (irq->dest_id == (x2apic_ipi ? 786 X2APIC_BROADCAST : APIC_BROADCAST)) 787 return true; 788 } 789 790 return false; 791 } 792 793 /* Return true if the interrupt can be handled by using *bitmap as index mask 794 * for valid destinations in *dst array. 795 * Return false if kvm_apic_map_get_dest_lapic did nothing useful. 796 * Note: we may have zero kvm_lapic destinations when we return true, which 797 * means that the interrupt should be dropped. In this case, *bitmap would be 798 * zero and *dst undefined. 799 */ 800 static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm, 801 struct kvm_lapic **src, struct kvm_lapic_irq *irq, 802 struct kvm_apic_map *map, struct kvm_lapic ***dst, 803 unsigned long *bitmap) 804 { 805 int i, lowest; 806 807 if (irq->shorthand == APIC_DEST_SELF && src) { 808 *dst = src; 809 *bitmap = 1; 810 return true; 811 } else if (irq->shorthand) 812 return false; 813 814 if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map)) 815 return false; 816 817 if (irq->dest_mode == APIC_DEST_PHYSICAL) { 818 if (irq->dest_id > map->max_apic_id) { 819 *bitmap = 0; 820 } else { 821 *dst = &map->phys_map[irq->dest_id]; 822 *bitmap = 1; 823 } 824 return true; 825 } 826 827 *bitmap = 0; 828 if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst, 829 (u16 *)bitmap)) 830 return false; 831 832 if (!kvm_lowest_prio_delivery(irq)) 833 return true; 834 835 if (!kvm_vector_hashing_enabled()) { 836 lowest = -1; 837 for_each_set_bit(i, bitmap, 16) { 838 if (!(*dst)[i]) 839 continue; 840 if (lowest < 0) 841 lowest = i; 842 else if (kvm_apic_compare_prio((*dst)[i]->vcpu, 843 (*dst)[lowest]->vcpu) < 0) 844 lowest = i; 845 } 846 } else { 847 if (!*bitmap) 848 return true; 849 850 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap), 851 bitmap, 16); 852 853 if (!(*dst)[lowest]) { 854 kvm_apic_disabled_lapic_found(kvm); 855 *bitmap = 0; 856 return true; 857 } 858 } 859 860 *bitmap = (lowest >= 0) ? 1 << lowest : 0; 861 862 return true; 863 } 864 865 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, 866 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map) 867 { 868 struct kvm_apic_map *map; 869 unsigned long bitmap; 870 struct kvm_lapic **dst = NULL; 871 int i; 872 bool ret; 873 874 *r = -1; 875 876 if (irq->shorthand == APIC_DEST_SELF) { 877 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map); 878 return true; 879 } 880 881 rcu_read_lock(); 882 map = rcu_dereference(kvm->arch.apic_map); 883 884 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap); 885 if (ret) 886 for_each_set_bit(i, &bitmap, 16) { 887 if (!dst[i]) 888 continue; 889 if (*r < 0) 890 *r = 0; 891 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map); 892 } 893 894 rcu_read_unlock(); 895 return ret; 896 } 897 898 /* 899 * This routine tries to handler interrupts in posted mode, here is how 900 * it deals with different cases: 901 * - For single-destination interrupts, handle it in posted mode 902 * - Else if vector hashing is enabled and it is a lowest-priority 903 * interrupt, handle it in posted mode and use the following mechanism 904 * to find the destinaiton vCPU. 905 * 1. For lowest-priority interrupts, store all the possible 906 * destination vCPUs in an array. 907 * 2. Use "guest vector % max number of destination vCPUs" to find 908 * the right destination vCPU in the array for the lowest-priority 909 * interrupt. 910 * - Otherwise, use remapped mode to inject the interrupt. 911 */ 912 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq, 913 struct kvm_vcpu **dest_vcpu) 914 { 915 struct kvm_apic_map *map; 916 unsigned long bitmap; 917 struct kvm_lapic **dst = NULL; 918 bool ret = false; 919 920 if (irq->shorthand) 921 return false; 922 923 rcu_read_lock(); 924 map = rcu_dereference(kvm->arch.apic_map); 925 926 if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) && 927 hweight16(bitmap) == 1) { 928 unsigned long i = find_first_bit(&bitmap, 16); 929 930 if (dst[i]) { 931 *dest_vcpu = dst[i]->vcpu; 932 ret = true; 933 } 934 } 935 936 rcu_read_unlock(); 937 return ret; 938 } 939 940 /* 941 * Add a pending IRQ into lapic. 942 * Return 1 if successfully added and 0 if discarded. 943 */ 944 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, 945 int vector, int level, int trig_mode, 946 struct dest_map *dest_map) 947 { 948 int result = 0; 949 struct kvm_vcpu *vcpu = apic->vcpu; 950 951 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, 952 trig_mode, vector); 953 switch (delivery_mode) { 954 case APIC_DM_LOWEST: 955 vcpu->arch.apic_arb_prio++; 956 case APIC_DM_FIXED: 957 if (unlikely(trig_mode && !level)) 958 break; 959 960 /* FIXME add logic for vcpu on reset */ 961 if (unlikely(!apic_enabled(apic))) 962 break; 963 964 result = 1; 965 966 if (dest_map) { 967 __set_bit(vcpu->vcpu_id, dest_map->map); 968 dest_map->vectors[vcpu->vcpu_id] = vector; 969 } 970 971 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) { 972 if (trig_mode) 973 kvm_lapic_set_vector(vector, apic->regs + APIC_TMR); 974 else 975 apic_clear_vector(vector, apic->regs + APIC_TMR); 976 } 977 978 if (vcpu->arch.apicv_active) 979 kvm_x86_ops->deliver_posted_interrupt(vcpu, vector); 980 else { 981 kvm_lapic_set_irr(vector, apic); 982 983 kvm_make_request(KVM_REQ_EVENT, vcpu); 984 kvm_vcpu_kick(vcpu); 985 } 986 break; 987 988 case APIC_DM_REMRD: 989 result = 1; 990 vcpu->arch.pv.pv_unhalted = 1; 991 kvm_make_request(KVM_REQ_EVENT, vcpu); 992 kvm_vcpu_kick(vcpu); 993 break; 994 995 case APIC_DM_SMI: 996 result = 1; 997 kvm_make_request(KVM_REQ_SMI, vcpu); 998 kvm_vcpu_kick(vcpu); 999 break; 1000 1001 case APIC_DM_NMI: 1002 result = 1; 1003 kvm_inject_nmi(vcpu); 1004 kvm_vcpu_kick(vcpu); 1005 break; 1006 1007 case APIC_DM_INIT: 1008 if (!trig_mode || level) { 1009 result = 1; 1010 /* assumes that there are only KVM_APIC_INIT/SIPI */ 1011 apic->pending_events = (1UL << KVM_APIC_INIT); 1012 /* make sure pending_events is visible before sending 1013 * the request */ 1014 smp_wmb(); 1015 kvm_make_request(KVM_REQ_EVENT, vcpu); 1016 kvm_vcpu_kick(vcpu); 1017 } else { 1018 apic_debug("Ignoring de-assert INIT to vcpu %d\n", 1019 vcpu->vcpu_id); 1020 } 1021 break; 1022 1023 case APIC_DM_STARTUP: 1024 apic_debug("SIPI to vcpu %d vector 0x%02x\n", 1025 vcpu->vcpu_id, vector); 1026 result = 1; 1027 apic->sipi_vector = vector; 1028 /* make sure sipi_vector is visible for the receiver */ 1029 smp_wmb(); 1030 set_bit(KVM_APIC_SIPI, &apic->pending_events); 1031 kvm_make_request(KVM_REQ_EVENT, vcpu); 1032 kvm_vcpu_kick(vcpu); 1033 break; 1034 1035 case APIC_DM_EXTINT: 1036 /* 1037 * Should only be called by kvm_apic_local_deliver() with LVT0, 1038 * before NMI watchdog was enabled. Already handled by 1039 * kvm_apic_accept_pic_intr(). 1040 */ 1041 break; 1042 1043 default: 1044 printk(KERN_ERR "TODO: unsupported delivery mode %x\n", 1045 delivery_mode); 1046 break; 1047 } 1048 return result; 1049 } 1050 1051 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) 1052 { 1053 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio; 1054 } 1055 1056 static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector) 1057 { 1058 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors); 1059 } 1060 1061 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) 1062 { 1063 int trigger_mode; 1064 1065 /* Eoi the ioapic only if the ioapic doesn't own the vector. */ 1066 if (!kvm_ioapic_handles_vector(apic, vector)) 1067 return; 1068 1069 /* Request a KVM exit to inform the userspace IOAPIC. */ 1070 if (irqchip_split(apic->vcpu->kvm)) { 1071 apic->vcpu->arch.pending_ioapic_eoi = vector; 1072 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu); 1073 return; 1074 } 1075 1076 if (apic_test_vector(vector, apic->regs + APIC_TMR)) 1077 trigger_mode = IOAPIC_LEVEL_TRIG; 1078 else 1079 trigger_mode = IOAPIC_EDGE_TRIG; 1080 1081 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode); 1082 } 1083 1084 static int apic_set_eoi(struct kvm_lapic *apic) 1085 { 1086 int vector = apic_find_highest_isr(apic); 1087 1088 trace_kvm_eoi(apic, vector); 1089 1090 /* 1091 * Not every write EOI will has corresponding ISR, 1092 * one example is when Kernel check timer on setup_IO_APIC 1093 */ 1094 if (vector == -1) 1095 return vector; 1096 1097 apic_clear_isr(vector, apic); 1098 apic_update_ppr(apic); 1099 1100 if (test_bit(vector, vcpu_to_synic(apic->vcpu)->vec_bitmap)) 1101 kvm_hv_synic_send_eoi(apic->vcpu, vector); 1102 1103 kvm_ioapic_send_eoi(apic, vector); 1104 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); 1105 return vector; 1106 } 1107 1108 /* 1109 * this interface assumes a trap-like exit, which has already finished 1110 * desired side effect including vISR and vPPR update. 1111 */ 1112 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector) 1113 { 1114 struct kvm_lapic *apic = vcpu->arch.apic; 1115 1116 trace_kvm_eoi(apic, vector); 1117 1118 kvm_ioapic_send_eoi(apic, vector); 1119 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); 1120 } 1121 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated); 1122 1123 static void apic_send_ipi(struct kvm_lapic *apic) 1124 { 1125 u32 icr_low = kvm_lapic_get_reg(apic, APIC_ICR); 1126 u32 icr_high = kvm_lapic_get_reg(apic, APIC_ICR2); 1127 struct kvm_lapic_irq irq; 1128 1129 irq.vector = icr_low & APIC_VECTOR_MASK; 1130 irq.delivery_mode = icr_low & APIC_MODE_MASK; 1131 irq.dest_mode = icr_low & APIC_DEST_MASK; 1132 irq.level = (icr_low & APIC_INT_ASSERT) != 0; 1133 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG; 1134 irq.shorthand = icr_low & APIC_SHORT_MASK; 1135 irq.msi_redir_hint = false; 1136 if (apic_x2apic_mode(apic)) 1137 irq.dest_id = icr_high; 1138 else 1139 irq.dest_id = GET_APIC_DEST_FIELD(icr_high); 1140 1141 trace_kvm_apic_ipi(icr_low, irq.dest_id); 1142 1143 apic_debug("icr_high 0x%x, icr_low 0x%x, " 1144 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, " 1145 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x, " 1146 "msi_redir_hint 0x%x\n", 1147 icr_high, icr_low, irq.shorthand, irq.dest_id, 1148 irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode, 1149 irq.vector, irq.msi_redir_hint); 1150 1151 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL); 1152 } 1153 1154 static u32 apic_get_tmcct(struct kvm_lapic *apic) 1155 { 1156 ktime_t remaining, now; 1157 s64 ns; 1158 u32 tmcct; 1159 1160 ASSERT(apic != NULL); 1161 1162 /* if initial count is 0, current count should also be 0 */ 1163 if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 || 1164 apic->lapic_timer.period == 0) 1165 return 0; 1166 1167 now = ktime_get(); 1168 remaining = ktime_sub(apic->lapic_timer.target_expiration, now); 1169 if (ktime_to_ns(remaining) < 0) 1170 remaining = 0; 1171 1172 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period); 1173 tmcct = div64_u64(ns, 1174 (APIC_BUS_CYCLE_NS * apic->divide_count)); 1175 1176 return tmcct; 1177 } 1178 1179 static void __report_tpr_access(struct kvm_lapic *apic, bool write) 1180 { 1181 struct kvm_vcpu *vcpu = apic->vcpu; 1182 struct kvm_run *run = vcpu->run; 1183 1184 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu); 1185 run->tpr_access.rip = kvm_rip_read(vcpu); 1186 run->tpr_access.is_write = write; 1187 } 1188 1189 static inline void report_tpr_access(struct kvm_lapic *apic, bool write) 1190 { 1191 if (apic->vcpu->arch.tpr_access_reporting) 1192 __report_tpr_access(apic, write); 1193 } 1194 1195 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset) 1196 { 1197 u32 val = 0; 1198 1199 if (offset >= LAPIC_MMIO_LENGTH) 1200 return 0; 1201 1202 switch (offset) { 1203 case APIC_ARBPRI: 1204 apic_debug("Access APIC ARBPRI register which is for P6\n"); 1205 break; 1206 1207 case APIC_TMCCT: /* Timer CCR */ 1208 if (apic_lvtt_tscdeadline(apic)) 1209 return 0; 1210 1211 val = apic_get_tmcct(apic); 1212 break; 1213 case APIC_PROCPRI: 1214 apic_update_ppr(apic); 1215 val = kvm_lapic_get_reg(apic, offset); 1216 break; 1217 case APIC_TASKPRI: 1218 report_tpr_access(apic, false); 1219 /* fall thru */ 1220 default: 1221 val = kvm_lapic_get_reg(apic, offset); 1222 break; 1223 } 1224 1225 return val; 1226 } 1227 1228 static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev) 1229 { 1230 return container_of(dev, struct kvm_lapic, dev); 1231 } 1232 1233 int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len, 1234 void *data) 1235 { 1236 unsigned char alignment = offset & 0xf; 1237 u32 result; 1238 /* this bitmask has a bit cleared for each reserved register */ 1239 static const u64 rmask = 0x43ff01ffffffe70cULL; 1240 1241 if ((alignment + len) > 4) { 1242 apic_debug("KVM_APIC_READ: alignment error %x %d\n", 1243 offset, len); 1244 return 1; 1245 } 1246 1247 if (offset > 0x3f0 || !(rmask & (1ULL << (offset >> 4)))) { 1248 apic_debug("KVM_APIC_READ: read reserved register %x\n", 1249 offset); 1250 return 1; 1251 } 1252 1253 result = __apic_read(apic, offset & ~0xf); 1254 1255 trace_kvm_apic_read(offset, result); 1256 1257 switch (len) { 1258 case 1: 1259 case 2: 1260 case 4: 1261 memcpy(data, (char *)&result + alignment, len); 1262 break; 1263 default: 1264 printk(KERN_ERR "Local APIC read with len = %x, " 1265 "should be 1,2, or 4 instead\n", len); 1266 break; 1267 } 1268 return 0; 1269 } 1270 EXPORT_SYMBOL_GPL(kvm_lapic_reg_read); 1271 1272 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr) 1273 { 1274 return kvm_apic_hw_enabled(apic) && 1275 addr >= apic->base_address && 1276 addr < apic->base_address + LAPIC_MMIO_LENGTH; 1277 } 1278 1279 static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, 1280 gpa_t address, int len, void *data) 1281 { 1282 struct kvm_lapic *apic = to_lapic(this); 1283 u32 offset = address - apic->base_address; 1284 1285 if (!apic_mmio_in_range(apic, address)) 1286 return -EOPNOTSUPP; 1287 1288 kvm_lapic_reg_read(apic, offset, len, data); 1289 1290 return 0; 1291 } 1292 1293 static void update_divide_count(struct kvm_lapic *apic) 1294 { 1295 u32 tmp1, tmp2, tdcr; 1296 1297 tdcr = kvm_lapic_get_reg(apic, APIC_TDCR); 1298 tmp1 = tdcr & 0xf; 1299 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1; 1300 apic->divide_count = 0x1 << (tmp2 & 0x7); 1301 1302 apic_debug("timer divide count is 0x%x\n", 1303 apic->divide_count); 1304 } 1305 1306 static void apic_update_lvtt(struct kvm_lapic *apic) 1307 { 1308 u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) & 1309 apic->lapic_timer.timer_mode_mask; 1310 1311 if (apic->lapic_timer.timer_mode != timer_mode) { 1312 apic->lapic_timer.timer_mode = timer_mode; 1313 hrtimer_cancel(&apic->lapic_timer.timer); 1314 } 1315 } 1316 1317 static void apic_timer_expired(struct kvm_lapic *apic) 1318 { 1319 struct kvm_vcpu *vcpu = apic->vcpu; 1320 struct swait_queue_head *q = &vcpu->wq; 1321 struct kvm_timer *ktimer = &apic->lapic_timer; 1322 1323 if (atomic_read(&apic->lapic_timer.pending)) 1324 return; 1325 1326 atomic_inc(&apic->lapic_timer.pending); 1327 kvm_set_pending_timer(vcpu); 1328 1329 if (swait_active(q)) 1330 swake_up(q); 1331 1332 if (apic_lvtt_tscdeadline(apic)) 1333 ktimer->expired_tscdeadline = ktimer->tscdeadline; 1334 } 1335 1336 /* 1337 * On APICv, this test will cause a busy wait 1338 * during a higher-priority task. 1339 */ 1340 1341 static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu) 1342 { 1343 struct kvm_lapic *apic = vcpu->arch.apic; 1344 u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT); 1345 1346 if (kvm_apic_hw_enabled(apic)) { 1347 int vec = reg & APIC_VECTOR_MASK; 1348 void *bitmap = apic->regs + APIC_ISR; 1349 1350 if (vcpu->arch.apicv_active) 1351 bitmap = apic->regs + APIC_IRR; 1352 1353 if (apic_test_vector(vec, bitmap)) 1354 return true; 1355 } 1356 return false; 1357 } 1358 1359 void wait_lapic_expire(struct kvm_vcpu *vcpu) 1360 { 1361 struct kvm_lapic *apic = vcpu->arch.apic; 1362 u64 guest_tsc, tsc_deadline; 1363 1364 if (!lapic_in_kernel(vcpu)) 1365 return; 1366 1367 if (apic->lapic_timer.expired_tscdeadline == 0) 1368 return; 1369 1370 if (!lapic_timer_int_injected(vcpu)) 1371 return; 1372 1373 tsc_deadline = apic->lapic_timer.expired_tscdeadline; 1374 apic->lapic_timer.expired_tscdeadline = 0; 1375 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); 1376 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline); 1377 1378 /* __delay is delay_tsc whenever the hardware has TSC, thus always. */ 1379 if (guest_tsc < tsc_deadline) 1380 __delay(min(tsc_deadline - guest_tsc, 1381 nsec_to_cycles(vcpu, lapic_timer_advance_ns))); 1382 } 1383 1384 static void start_sw_tscdeadline(struct kvm_lapic *apic) 1385 { 1386 u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline; 1387 u64 ns = 0; 1388 ktime_t expire; 1389 struct kvm_vcpu *vcpu = apic->vcpu; 1390 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz; 1391 unsigned long flags; 1392 ktime_t now; 1393 1394 if (unlikely(!tscdeadline || !this_tsc_khz)) 1395 return; 1396 1397 local_irq_save(flags); 1398 1399 now = ktime_get(); 1400 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); 1401 if (likely(tscdeadline > guest_tsc)) { 1402 ns = (tscdeadline - guest_tsc) * 1000000ULL; 1403 do_div(ns, this_tsc_khz); 1404 expire = ktime_add_ns(now, ns); 1405 expire = ktime_sub_ns(expire, lapic_timer_advance_ns); 1406 hrtimer_start(&apic->lapic_timer.timer, 1407 expire, HRTIMER_MODE_ABS_PINNED); 1408 } else 1409 apic_timer_expired(apic); 1410 1411 local_irq_restore(flags); 1412 } 1413 1414 static void start_sw_period(struct kvm_lapic *apic) 1415 { 1416 if (!apic->lapic_timer.period) 1417 return; 1418 1419 if (apic_lvtt_oneshot(apic) && 1420 ktime_after(ktime_get(), 1421 apic->lapic_timer.target_expiration)) { 1422 apic_timer_expired(apic); 1423 return; 1424 } 1425 1426 hrtimer_start(&apic->lapic_timer.timer, 1427 apic->lapic_timer.target_expiration, 1428 HRTIMER_MODE_ABS_PINNED); 1429 } 1430 1431 static bool set_target_expiration(struct kvm_lapic *apic) 1432 { 1433 ktime_t now; 1434 u64 tscl = rdtsc(); 1435 1436 now = ktime_get(); 1437 apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT) 1438 * APIC_BUS_CYCLE_NS * apic->divide_count; 1439 1440 if (!apic->lapic_timer.period) 1441 return false; 1442 1443 /* 1444 * Do not allow the guest to program periodic timers with small 1445 * interval, since the hrtimers are not throttled by the host 1446 * scheduler. 1447 */ 1448 if (apic_lvtt_period(apic)) { 1449 s64 min_period = min_timer_period_us * 1000LL; 1450 1451 if (apic->lapic_timer.period < min_period) { 1452 pr_info_ratelimited( 1453 "kvm: vcpu %i: requested %lld ns " 1454 "lapic timer period limited to %lld ns\n", 1455 apic->vcpu->vcpu_id, 1456 apic->lapic_timer.period, min_period); 1457 apic->lapic_timer.period = min_period; 1458 } 1459 } 1460 1461 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016" 1462 PRIx64 ", " 1463 "timer initial count 0x%x, period %lldns, " 1464 "expire @ 0x%016" PRIx64 ".\n", __func__, 1465 APIC_BUS_CYCLE_NS, ktime_to_ns(now), 1466 kvm_lapic_get_reg(apic, APIC_TMICT), 1467 apic->lapic_timer.period, 1468 ktime_to_ns(ktime_add_ns(now, 1469 apic->lapic_timer.period))); 1470 1471 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) + 1472 nsec_to_cycles(apic->vcpu, apic->lapic_timer.period); 1473 apic->lapic_timer.target_expiration = ktime_add_ns(now, apic->lapic_timer.period); 1474 1475 return true; 1476 } 1477 1478 static void advance_periodic_target_expiration(struct kvm_lapic *apic) 1479 { 1480 apic->lapic_timer.tscdeadline += 1481 nsec_to_cycles(apic->vcpu, apic->lapic_timer.period); 1482 apic->lapic_timer.target_expiration = 1483 ktime_add_ns(apic->lapic_timer.target_expiration, 1484 apic->lapic_timer.period); 1485 } 1486 1487 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu) 1488 { 1489 if (!lapic_in_kernel(vcpu)) 1490 return false; 1491 1492 return vcpu->arch.apic->lapic_timer.hv_timer_in_use; 1493 } 1494 EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use); 1495 1496 static void cancel_hv_timer(struct kvm_lapic *apic) 1497 { 1498 WARN_ON(preemptible()); 1499 WARN_ON(!apic->lapic_timer.hv_timer_in_use); 1500 kvm_x86_ops->cancel_hv_timer(apic->vcpu); 1501 apic->lapic_timer.hv_timer_in_use = false; 1502 } 1503 1504 static bool start_hv_timer(struct kvm_lapic *apic) 1505 { 1506 struct kvm_timer *ktimer = &apic->lapic_timer; 1507 int r; 1508 1509 WARN_ON(preemptible()); 1510 if (!kvm_x86_ops->set_hv_timer) 1511 return false; 1512 1513 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending)) 1514 return false; 1515 1516 r = kvm_x86_ops->set_hv_timer(apic->vcpu, ktimer->tscdeadline); 1517 if (r < 0) 1518 return false; 1519 1520 ktimer->hv_timer_in_use = true; 1521 hrtimer_cancel(&ktimer->timer); 1522 1523 /* 1524 * Also recheck ktimer->pending, in case the sw timer triggered in 1525 * the window. For periodic timer, leave the hv timer running for 1526 * simplicity, and the deadline will be recomputed on the next vmexit. 1527 */ 1528 if (!apic_lvtt_period(apic) && (r || atomic_read(&ktimer->pending))) { 1529 if (r) 1530 apic_timer_expired(apic); 1531 return false; 1532 } 1533 1534 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, true); 1535 return true; 1536 } 1537 1538 static void start_sw_timer(struct kvm_lapic *apic) 1539 { 1540 struct kvm_timer *ktimer = &apic->lapic_timer; 1541 1542 WARN_ON(preemptible()); 1543 if (apic->lapic_timer.hv_timer_in_use) 1544 cancel_hv_timer(apic); 1545 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending)) 1546 return; 1547 1548 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) 1549 start_sw_period(apic); 1550 else if (apic_lvtt_tscdeadline(apic)) 1551 start_sw_tscdeadline(apic); 1552 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false); 1553 } 1554 1555 static void restart_apic_timer(struct kvm_lapic *apic) 1556 { 1557 preempt_disable(); 1558 if (!start_hv_timer(apic)) 1559 start_sw_timer(apic); 1560 preempt_enable(); 1561 } 1562 1563 void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu) 1564 { 1565 struct kvm_lapic *apic = vcpu->arch.apic; 1566 1567 preempt_disable(); 1568 /* If the preempt notifier has already run, it also called apic_timer_expired */ 1569 if (!apic->lapic_timer.hv_timer_in_use) 1570 goto out; 1571 WARN_ON(swait_active(&vcpu->wq)); 1572 cancel_hv_timer(apic); 1573 apic_timer_expired(apic); 1574 1575 if (apic_lvtt_period(apic) && apic->lapic_timer.period) { 1576 advance_periodic_target_expiration(apic); 1577 restart_apic_timer(apic); 1578 } 1579 out: 1580 preempt_enable(); 1581 } 1582 EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer); 1583 1584 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu) 1585 { 1586 restart_apic_timer(vcpu->arch.apic); 1587 } 1588 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer); 1589 1590 void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu) 1591 { 1592 struct kvm_lapic *apic = vcpu->arch.apic; 1593 1594 preempt_disable(); 1595 /* Possibly the TSC deadline timer is not enabled yet */ 1596 if (apic->lapic_timer.hv_timer_in_use) 1597 start_sw_timer(apic); 1598 preempt_enable(); 1599 } 1600 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer); 1601 1602 void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu) 1603 { 1604 struct kvm_lapic *apic = vcpu->arch.apic; 1605 1606 WARN_ON(!apic->lapic_timer.hv_timer_in_use); 1607 restart_apic_timer(apic); 1608 } 1609 1610 static void start_apic_timer(struct kvm_lapic *apic) 1611 { 1612 atomic_set(&apic->lapic_timer.pending, 0); 1613 1614 if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) 1615 && !set_target_expiration(apic)) 1616 return; 1617 1618 restart_apic_timer(apic); 1619 } 1620 1621 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) 1622 { 1623 bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val); 1624 1625 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) { 1626 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode; 1627 if (lvt0_in_nmi_mode) { 1628 apic_debug("Receive NMI setting on APIC_LVT0 " 1629 "for cpu %d\n", apic->vcpu->vcpu_id); 1630 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); 1631 } else 1632 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); 1633 } 1634 } 1635 1636 int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) 1637 { 1638 int ret = 0; 1639 1640 trace_kvm_apic_write(reg, val); 1641 1642 switch (reg) { 1643 case APIC_ID: /* Local APIC ID */ 1644 if (!apic_x2apic_mode(apic)) 1645 kvm_apic_set_xapic_id(apic, val >> 24); 1646 else 1647 ret = 1; 1648 break; 1649 1650 case APIC_TASKPRI: 1651 report_tpr_access(apic, true); 1652 apic_set_tpr(apic, val & 0xff); 1653 break; 1654 1655 case APIC_EOI: 1656 apic_set_eoi(apic); 1657 break; 1658 1659 case APIC_LDR: 1660 if (!apic_x2apic_mode(apic)) 1661 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK); 1662 else 1663 ret = 1; 1664 break; 1665 1666 case APIC_DFR: 1667 if (!apic_x2apic_mode(apic)) { 1668 kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF); 1669 recalculate_apic_map(apic->vcpu->kvm); 1670 } else 1671 ret = 1; 1672 break; 1673 1674 case APIC_SPIV: { 1675 u32 mask = 0x3ff; 1676 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI) 1677 mask |= APIC_SPIV_DIRECTED_EOI; 1678 apic_set_spiv(apic, val & mask); 1679 if (!(val & APIC_SPIV_APIC_ENABLED)) { 1680 int i; 1681 u32 lvt_val; 1682 1683 for (i = 0; i < KVM_APIC_LVT_NUM; i++) { 1684 lvt_val = kvm_lapic_get_reg(apic, 1685 APIC_LVTT + 0x10 * i); 1686 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, 1687 lvt_val | APIC_LVT_MASKED); 1688 } 1689 apic_update_lvtt(apic); 1690 atomic_set(&apic->lapic_timer.pending, 0); 1691 1692 } 1693 break; 1694 } 1695 case APIC_ICR: 1696 /* No delay here, so we always clear the pending bit */ 1697 kvm_lapic_set_reg(apic, APIC_ICR, val & ~(1 << 12)); 1698 apic_send_ipi(apic); 1699 break; 1700 1701 case APIC_ICR2: 1702 if (!apic_x2apic_mode(apic)) 1703 val &= 0xff000000; 1704 kvm_lapic_set_reg(apic, APIC_ICR2, val); 1705 break; 1706 1707 case APIC_LVT0: 1708 apic_manage_nmi_watchdog(apic, val); 1709 case APIC_LVTTHMR: 1710 case APIC_LVTPC: 1711 case APIC_LVT1: 1712 case APIC_LVTERR: 1713 /* TODO: Check vector */ 1714 if (!kvm_apic_sw_enabled(apic)) 1715 val |= APIC_LVT_MASKED; 1716 1717 val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4]; 1718 kvm_lapic_set_reg(apic, reg, val); 1719 1720 break; 1721 1722 case APIC_LVTT: 1723 if (!kvm_apic_sw_enabled(apic)) 1724 val |= APIC_LVT_MASKED; 1725 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask); 1726 kvm_lapic_set_reg(apic, APIC_LVTT, val); 1727 apic_update_lvtt(apic); 1728 break; 1729 1730 case APIC_TMICT: 1731 if (apic_lvtt_tscdeadline(apic)) 1732 break; 1733 1734 hrtimer_cancel(&apic->lapic_timer.timer); 1735 kvm_lapic_set_reg(apic, APIC_TMICT, val); 1736 start_apic_timer(apic); 1737 break; 1738 1739 case APIC_TDCR: 1740 if (val & 4) 1741 apic_debug("KVM_WRITE:TDCR %x\n", val); 1742 kvm_lapic_set_reg(apic, APIC_TDCR, val); 1743 update_divide_count(apic); 1744 break; 1745 1746 case APIC_ESR: 1747 if (apic_x2apic_mode(apic) && val != 0) { 1748 apic_debug("KVM_WRITE:ESR not zero %x\n", val); 1749 ret = 1; 1750 } 1751 break; 1752 1753 case APIC_SELF_IPI: 1754 if (apic_x2apic_mode(apic)) { 1755 kvm_lapic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff)); 1756 } else 1757 ret = 1; 1758 break; 1759 default: 1760 ret = 1; 1761 break; 1762 } 1763 if (ret) 1764 apic_debug("Local APIC Write to read-only register %x\n", reg); 1765 return ret; 1766 } 1767 EXPORT_SYMBOL_GPL(kvm_lapic_reg_write); 1768 1769 static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, 1770 gpa_t address, int len, const void *data) 1771 { 1772 struct kvm_lapic *apic = to_lapic(this); 1773 unsigned int offset = address - apic->base_address; 1774 u32 val; 1775 1776 if (!apic_mmio_in_range(apic, address)) 1777 return -EOPNOTSUPP; 1778 1779 /* 1780 * APIC register must be aligned on 128-bits boundary. 1781 * 32/64/128 bits registers must be accessed thru 32 bits. 1782 * Refer SDM 8.4.1 1783 */ 1784 if (len != 4 || (offset & 0xf)) { 1785 /* Don't shout loud, $infamous_os would cause only noise. */ 1786 apic_debug("apic write: bad size=%d %lx\n", len, (long)address); 1787 return 0; 1788 } 1789 1790 val = *(u32*)data; 1791 1792 /* too common printing */ 1793 if (offset != APIC_EOI) 1794 apic_debug("%s: offset 0x%x with length 0x%x, and value is " 1795 "0x%x\n", __func__, offset, len, val); 1796 1797 kvm_lapic_reg_write(apic, offset & 0xff0, val); 1798 1799 return 0; 1800 } 1801 1802 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu) 1803 { 1804 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0); 1805 } 1806 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi); 1807 1808 /* emulate APIC access in a trap manner */ 1809 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) 1810 { 1811 u32 val = 0; 1812 1813 /* hw has done the conditional check and inst decode */ 1814 offset &= 0xff0; 1815 1816 kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val); 1817 1818 /* TODO: optimize to just emulate side effect w/o one more write */ 1819 kvm_lapic_reg_write(vcpu->arch.apic, offset, val); 1820 } 1821 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode); 1822 1823 void kvm_free_lapic(struct kvm_vcpu *vcpu) 1824 { 1825 struct kvm_lapic *apic = vcpu->arch.apic; 1826 1827 if (!vcpu->arch.apic) 1828 return; 1829 1830 hrtimer_cancel(&apic->lapic_timer.timer); 1831 1832 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)) 1833 static_key_slow_dec_deferred(&apic_hw_disabled); 1834 1835 if (!apic->sw_enabled) 1836 static_key_slow_dec_deferred(&apic_sw_disabled); 1837 1838 if (apic->regs) 1839 free_page((unsigned long)apic->regs); 1840 1841 kfree(apic); 1842 } 1843 1844 /* 1845 *---------------------------------------------------------------------- 1846 * LAPIC interface 1847 *---------------------------------------------------------------------- 1848 */ 1849 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu) 1850 { 1851 struct kvm_lapic *apic = vcpu->arch.apic; 1852 1853 if (!lapic_in_kernel(vcpu) || 1854 !apic_lvtt_tscdeadline(apic)) 1855 return 0; 1856 1857 return apic->lapic_timer.tscdeadline; 1858 } 1859 1860 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) 1861 { 1862 struct kvm_lapic *apic = vcpu->arch.apic; 1863 1864 if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) || 1865 apic_lvtt_period(apic)) 1866 return; 1867 1868 hrtimer_cancel(&apic->lapic_timer.timer); 1869 apic->lapic_timer.tscdeadline = data; 1870 start_apic_timer(apic); 1871 } 1872 1873 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8) 1874 { 1875 struct kvm_lapic *apic = vcpu->arch.apic; 1876 1877 apic_set_tpr(apic, ((cr8 & 0x0f) << 4) 1878 | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4)); 1879 } 1880 1881 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu) 1882 { 1883 u64 tpr; 1884 1885 tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI); 1886 1887 return (tpr & 0xf0) >> 4; 1888 } 1889 1890 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) 1891 { 1892 u64 old_value = vcpu->arch.apic_base; 1893 struct kvm_lapic *apic = vcpu->arch.apic; 1894 1895 if (!apic) 1896 value |= MSR_IA32_APICBASE_BSP; 1897 1898 vcpu->arch.apic_base = value; 1899 1900 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) 1901 kvm_update_cpuid(vcpu); 1902 1903 if (!apic) 1904 return; 1905 1906 /* update jump label if enable bit changes */ 1907 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) { 1908 if (value & MSR_IA32_APICBASE_ENABLE) { 1909 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id); 1910 static_key_slow_dec_deferred(&apic_hw_disabled); 1911 } else { 1912 static_key_slow_inc(&apic_hw_disabled.key); 1913 recalculate_apic_map(vcpu->kvm); 1914 } 1915 } 1916 1917 if ((old_value ^ value) & X2APIC_ENABLE) { 1918 if (value & X2APIC_ENABLE) { 1919 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id); 1920 kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true); 1921 } else 1922 kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false); 1923 } 1924 1925 apic->base_address = apic->vcpu->arch.apic_base & 1926 MSR_IA32_APICBASE_BASE; 1927 1928 if ((value & MSR_IA32_APICBASE_ENABLE) && 1929 apic->base_address != APIC_DEFAULT_PHYS_BASE) 1930 pr_warn_once("APIC base relocation is unsupported by KVM"); 1931 1932 /* with FSB delivery interrupt, we can restart APIC functionality */ 1933 apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is " 1934 "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address); 1935 1936 } 1937 1938 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) 1939 { 1940 struct kvm_lapic *apic; 1941 int i; 1942 1943 apic_debug("%s\n", __func__); 1944 1945 ASSERT(vcpu); 1946 apic = vcpu->arch.apic; 1947 ASSERT(apic != NULL); 1948 1949 /* Stop the timer in case it's a reset to an active apic */ 1950 hrtimer_cancel(&apic->lapic_timer.timer); 1951 1952 if (!init_event) { 1953 kvm_lapic_set_base(vcpu, APIC_DEFAULT_PHYS_BASE | 1954 MSR_IA32_APICBASE_ENABLE); 1955 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id); 1956 } 1957 kvm_apic_set_version(apic->vcpu); 1958 1959 for (i = 0; i < KVM_APIC_LVT_NUM; i++) 1960 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); 1961 apic_update_lvtt(apic); 1962 if (kvm_vcpu_is_reset_bsp(vcpu) && 1963 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED)) 1964 kvm_lapic_set_reg(apic, APIC_LVT0, 1965 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); 1966 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0)); 1967 1968 kvm_lapic_set_reg(apic, APIC_DFR, 0xffffffffU); 1969 apic_set_spiv(apic, 0xff); 1970 kvm_lapic_set_reg(apic, APIC_TASKPRI, 0); 1971 if (!apic_x2apic_mode(apic)) 1972 kvm_apic_set_ldr(apic, 0); 1973 kvm_lapic_set_reg(apic, APIC_ESR, 0); 1974 kvm_lapic_set_reg(apic, APIC_ICR, 0); 1975 kvm_lapic_set_reg(apic, APIC_ICR2, 0); 1976 kvm_lapic_set_reg(apic, APIC_TDCR, 0); 1977 kvm_lapic_set_reg(apic, APIC_TMICT, 0); 1978 for (i = 0; i < 8; i++) { 1979 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0); 1980 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0); 1981 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0); 1982 } 1983 apic->irr_pending = vcpu->arch.apicv_active; 1984 apic->isr_count = vcpu->arch.apicv_active ? 1 : 0; 1985 apic->highest_isr_cache = -1; 1986 update_divide_count(apic); 1987 atomic_set(&apic->lapic_timer.pending, 0); 1988 if (kvm_vcpu_is_bsp(vcpu)) 1989 kvm_lapic_set_base(vcpu, 1990 vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP); 1991 vcpu->arch.pv_eoi.msr_val = 0; 1992 apic_update_ppr(apic); 1993 1994 vcpu->arch.apic_arb_prio = 0; 1995 vcpu->arch.apic_attention = 0; 1996 1997 apic_debug("%s: vcpu=%p, id=0x%x, base_msr=" 1998 "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__, 1999 vcpu, kvm_lapic_get_reg(apic, APIC_ID), 2000 vcpu->arch.apic_base, apic->base_address); 2001 } 2002 2003 /* 2004 *---------------------------------------------------------------------- 2005 * timer interface 2006 *---------------------------------------------------------------------- 2007 */ 2008 2009 static bool lapic_is_periodic(struct kvm_lapic *apic) 2010 { 2011 return apic_lvtt_period(apic); 2012 } 2013 2014 int apic_has_pending_timer(struct kvm_vcpu *vcpu) 2015 { 2016 struct kvm_lapic *apic = vcpu->arch.apic; 2017 2018 if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT)) 2019 return atomic_read(&apic->lapic_timer.pending); 2020 2021 return 0; 2022 } 2023 2024 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type) 2025 { 2026 u32 reg = kvm_lapic_get_reg(apic, lvt_type); 2027 int vector, mode, trig_mode; 2028 2029 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) { 2030 vector = reg & APIC_VECTOR_MASK; 2031 mode = reg & APIC_MODE_MASK; 2032 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER; 2033 return __apic_accept_irq(apic, mode, vector, 1, trig_mode, 2034 NULL); 2035 } 2036 return 0; 2037 } 2038 2039 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu) 2040 { 2041 struct kvm_lapic *apic = vcpu->arch.apic; 2042 2043 if (apic) 2044 kvm_apic_local_deliver(apic, APIC_LVT0); 2045 } 2046 2047 static const struct kvm_io_device_ops apic_mmio_ops = { 2048 .read = apic_mmio_read, 2049 .write = apic_mmio_write, 2050 }; 2051 2052 static enum hrtimer_restart apic_timer_fn(struct hrtimer *data) 2053 { 2054 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer); 2055 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer); 2056 2057 apic_timer_expired(apic); 2058 2059 if (lapic_is_periodic(apic)) { 2060 advance_periodic_target_expiration(apic); 2061 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period); 2062 return HRTIMER_RESTART; 2063 } else 2064 return HRTIMER_NORESTART; 2065 } 2066 2067 int kvm_create_lapic(struct kvm_vcpu *vcpu) 2068 { 2069 struct kvm_lapic *apic; 2070 2071 ASSERT(vcpu != NULL); 2072 apic_debug("apic_init %d\n", vcpu->vcpu_id); 2073 2074 apic = kzalloc(sizeof(*apic), GFP_KERNEL); 2075 if (!apic) 2076 goto nomem; 2077 2078 vcpu->arch.apic = apic; 2079 2080 apic->regs = (void *)get_zeroed_page(GFP_KERNEL); 2081 if (!apic->regs) { 2082 printk(KERN_ERR "malloc apic regs error for vcpu %x\n", 2083 vcpu->vcpu_id); 2084 goto nomem_free_apic; 2085 } 2086 apic->vcpu = vcpu; 2087 2088 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, 2089 HRTIMER_MODE_ABS_PINNED); 2090 apic->lapic_timer.timer.function = apic_timer_fn; 2091 2092 /* 2093 * APIC is created enabled. This will prevent kvm_lapic_set_base from 2094 * thinking that APIC satet has changed. 2095 */ 2096 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE; 2097 static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */ 2098 kvm_lapic_reset(vcpu, false); 2099 kvm_iodevice_init(&apic->dev, &apic_mmio_ops); 2100 2101 return 0; 2102 nomem_free_apic: 2103 kfree(apic); 2104 nomem: 2105 return -ENOMEM; 2106 } 2107 2108 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) 2109 { 2110 struct kvm_lapic *apic = vcpu->arch.apic; 2111 u32 ppr; 2112 2113 if (!apic_enabled(apic)) 2114 return -1; 2115 2116 __apic_update_ppr(apic, &ppr); 2117 return apic_has_interrupt_for_ppr(apic, ppr); 2118 } 2119 2120 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu) 2121 { 2122 u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0); 2123 int r = 0; 2124 2125 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) 2126 r = 1; 2127 if ((lvt0 & APIC_LVT_MASKED) == 0 && 2128 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT) 2129 r = 1; 2130 return r; 2131 } 2132 2133 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) 2134 { 2135 struct kvm_lapic *apic = vcpu->arch.apic; 2136 2137 if (atomic_read(&apic->lapic_timer.pending) > 0) { 2138 kvm_apic_local_deliver(apic, APIC_LVTT); 2139 if (apic_lvtt_tscdeadline(apic)) 2140 apic->lapic_timer.tscdeadline = 0; 2141 if (apic_lvtt_oneshot(apic)) { 2142 apic->lapic_timer.tscdeadline = 0; 2143 apic->lapic_timer.target_expiration = 0; 2144 } 2145 atomic_set(&apic->lapic_timer.pending, 0); 2146 } 2147 } 2148 2149 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) 2150 { 2151 int vector = kvm_apic_has_interrupt(vcpu); 2152 struct kvm_lapic *apic = vcpu->arch.apic; 2153 u32 ppr; 2154 2155 if (vector == -1) 2156 return -1; 2157 2158 /* 2159 * We get here even with APIC virtualization enabled, if doing 2160 * nested virtualization and L1 runs with the "acknowledge interrupt 2161 * on exit" mode. Then we cannot inject the interrupt via RVI, 2162 * because the process would deliver it through the IDT. 2163 */ 2164 2165 apic_clear_irr(vector, apic); 2166 if (test_bit(vector, vcpu_to_synic(vcpu)->auto_eoi_bitmap)) { 2167 /* 2168 * For auto-EOI interrupts, there might be another pending 2169 * interrupt above PPR, so check whether to raise another 2170 * KVM_REQ_EVENT. 2171 */ 2172 apic_update_ppr(apic); 2173 } else { 2174 /* 2175 * For normal interrupts, PPR has been raised and there cannot 2176 * be a higher-priority pending interrupt---except if there was 2177 * a concurrent interrupt injection, but that would have 2178 * triggered KVM_REQ_EVENT already. 2179 */ 2180 apic_set_isr(vector, apic); 2181 __apic_update_ppr(apic, &ppr); 2182 } 2183 2184 return vector; 2185 } 2186 2187 static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, 2188 struct kvm_lapic_state *s, bool set) 2189 { 2190 if (apic_x2apic_mode(vcpu->arch.apic)) { 2191 u32 *id = (u32 *)(s->regs + APIC_ID); 2192 2193 if (vcpu->kvm->arch.x2apic_format) { 2194 if (*id != vcpu->vcpu_id) 2195 return -EINVAL; 2196 } else { 2197 if (set) 2198 *id >>= 24; 2199 else 2200 *id <<= 24; 2201 } 2202 } 2203 2204 return 0; 2205 } 2206 2207 int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) 2208 { 2209 memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s)); 2210 return kvm_apic_state_fixup(vcpu, s, false); 2211 } 2212 2213 int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) 2214 { 2215 struct kvm_lapic *apic = vcpu->arch.apic; 2216 int r; 2217 2218 2219 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base); 2220 /* set SPIV separately to get count of SW disabled APICs right */ 2221 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV))); 2222 2223 r = kvm_apic_state_fixup(vcpu, s, true); 2224 if (r) 2225 return r; 2226 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s); 2227 2228 recalculate_apic_map(vcpu->kvm); 2229 kvm_apic_set_version(vcpu); 2230 2231 apic_update_ppr(apic); 2232 hrtimer_cancel(&apic->lapic_timer.timer); 2233 apic_update_lvtt(apic); 2234 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0)); 2235 update_divide_count(apic); 2236 start_apic_timer(apic); 2237 apic->irr_pending = true; 2238 apic->isr_count = vcpu->arch.apicv_active ? 2239 1 : count_vectors(apic->regs + APIC_ISR); 2240 apic->highest_isr_cache = -1; 2241 if (vcpu->arch.apicv_active) { 2242 kvm_x86_ops->apicv_post_state_restore(vcpu); 2243 kvm_x86_ops->hwapic_irr_update(vcpu, 2244 apic_find_highest_irr(apic)); 2245 kvm_x86_ops->hwapic_isr_update(vcpu, 2246 apic_find_highest_isr(apic)); 2247 } 2248 kvm_make_request(KVM_REQ_EVENT, vcpu); 2249 if (ioapic_in_kernel(vcpu->kvm)) 2250 kvm_rtc_eoi_tracking_restore_one(vcpu); 2251 2252 vcpu->arch.apic_arb_prio = 0; 2253 2254 return 0; 2255 } 2256 2257 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) 2258 { 2259 struct hrtimer *timer; 2260 2261 if (!lapic_in_kernel(vcpu)) 2262 return; 2263 2264 timer = &vcpu->arch.apic->lapic_timer.timer; 2265 if (hrtimer_cancel(timer)) 2266 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); 2267 } 2268 2269 /* 2270 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt 2271 * 2272 * Detect whether guest triggered PV EOI since the 2273 * last entry. If yes, set EOI on guests's behalf. 2274 * Clear PV EOI in guest memory in any case. 2275 */ 2276 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu, 2277 struct kvm_lapic *apic) 2278 { 2279 bool pending; 2280 int vector; 2281 /* 2282 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host 2283 * and KVM_PV_EOI_ENABLED in guest memory as follows: 2284 * 2285 * KVM_APIC_PV_EOI_PENDING is unset: 2286 * -> host disabled PV EOI. 2287 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set: 2288 * -> host enabled PV EOI, guest did not execute EOI yet. 2289 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset: 2290 * -> host enabled PV EOI, guest executed EOI. 2291 */ 2292 BUG_ON(!pv_eoi_enabled(vcpu)); 2293 pending = pv_eoi_get_pending(vcpu); 2294 /* 2295 * Clear pending bit in any case: it will be set again on vmentry. 2296 * While this might not be ideal from performance point of view, 2297 * this makes sure pv eoi is only enabled when we know it's safe. 2298 */ 2299 pv_eoi_clr_pending(vcpu); 2300 if (pending) 2301 return; 2302 vector = apic_set_eoi(apic); 2303 trace_kvm_pv_eoi(apic, vector); 2304 } 2305 2306 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) 2307 { 2308 u32 data; 2309 2310 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) 2311 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); 2312 2313 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) 2314 return; 2315 2316 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, 2317 sizeof(u32))) 2318 return; 2319 2320 apic_set_tpr(vcpu->arch.apic, data & 0xff); 2321 } 2322 2323 /* 2324 * apic_sync_pv_eoi_to_guest - called before vmentry 2325 * 2326 * Detect whether it's safe to enable PV EOI and 2327 * if yes do so. 2328 */ 2329 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu, 2330 struct kvm_lapic *apic) 2331 { 2332 if (!pv_eoi_enabled(vcpu) || 2333 /* IRR set or many bits in ISR: could be nested. */ 2334 apic->irr_pending || 2335 /* Cache not set: could be safe but we don't bother. */ 2336 apic->highest_isr_cache == -1 || 2337 /* Need EOI to update ioapic. */ 2338 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) { 2339 /* 2340 * PV EOI was disabled by apic_sync_pv_eoi_from_guest 2341 * so we need not do anything here. 2342 */ 2343 return; 2344 } 2345 2346 pv_eoi_set_pending(apic->vcpu); 2347 } 2348 2349 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) 2350 { 2351 u32 data, tpr; 2352 int max_irr, max_isr; 2353 struct kvm_lapic *apic = vcpu->arch.apic; 2354 2355 apic_sync_pv_eoi_to_guest(vcpu, apic); 2356 2357 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) 2358 return; 2359 2360 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff; 2361 max_irr = apic_find_highest_irr(apic); 2362 if (max_irr < 0) 2363 max_irr = 0; 2364 max_isr = apic_find_highest_isr(apic); 2365 if (max_isr < 0) 2366 max_isr = 0; 2367 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); 2368 2369 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, 2370 sizeof(u32)); 2371 } 2372 2373 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) 2374 { 2375 if (vapic_addr) { 2376 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, 2377 &vcpu->arch.apic->vapic_cache, 2378 vapic_addr, sizeof(u32))) 2379 return -EINVAL; 2380 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); 2381 } else { 2382 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); 2383 } 2384 2385 vcpu->arch.apic->vapic_addr = vapic_addr; 2386 return 0; 2387 } 2388 2389 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) 2390 { 2391 struct kvm_lapic *apic = vcpu->arch.apic; 2392 u32 reg = (msr - APIC_BASE_MSR) << 4; 2393 2394 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic)) 2395 return 1; 2396 2397 if (reg == APIC_ICR2) 2398 return 1; 2399 2400 /* if this is ICR write vector before command */ 2401 if (reg == APIC_ICR) 2402 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); 2403 return kvm_lapic_reg_write(apic, reg, (u32)data); 2404 } 2405 2406 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data) 2407 { 2408 struct kvm_lapic *apic = vcpu->arch.apic; 2409 u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0; 2410 2411 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic)) 2412 return 1; 2413 2414 if (reg == APIC_DFR || reg == APIC_ICR2) { 2415 apic_debug("KVM_APIC_READ: read x2apic reserved register %x\n", 2416 reg); 2417 return 1; 2418 } 2419 2420 if (kvm_lapic_reg_read(apic, reg, 4, &low)) 2421 return 1; 2422 if (reg == APIC_ICR) 2423 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high); 2424 2425 *data = (((u64)high) << 32) | low; 2426 2427 return 0; 2428 } 2429 2430 int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data) 2431 { 2432 struct kvm_lapic *apic = vcpu->arch.apic; 2433 2434 if (!lapic_in_kernel(vcpu)) 2435 return 1; 2436 2437 /* if this is ICR write vector before command */ 2438 if (reg == APIC_ICR) 2439 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); 2440 return kvm_lapic_reg_write(apic, reg, (u32)data); 2441 } 2442 2443 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data) 2444 { 2445 struct kvm_lapic *apic = vcpu->arch.apic; 2446 u32 low, high = 0; 2447 2448 if (!lapic_in_kernel(vcpu)) 2449 return 1; 2450 2451 if (kvm_lapic_reg_read(apic, reg, 4, &low)) 2452 return 1; 2453 if (reg == APIC_ICR) 2454 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high); 2455 2456 *data = (((u64)high) << 32) | low; 2457 2458 return 0; 2459 } 2460 2461 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) 2462 { 2463 u64 addr = data & ~KVM_MSR_ENABLED; 2464 if (!IS_ALIGNED(addr, 4)) 2465 return 1; 2466 2467 vcpu->arch.pv_eoi.msr_val = data; 2468 if (!pv_eoi_enabled(vcpu)) 2469 return 0; 2470 return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, 2471 addr, sizeof(u8)); 2472 } 2473 2474 void kvm_apic_accept_events(struct kvm_vcpu *vcpu) 2475 { 2476 struct kvm_lapic *apic = vcpu->arch.apic; 2477 u8 sipi_vector; 2478 unsigned long pe; 2479 2480 if (!lapic_in_kernel(vcpu) || !apic->pending_events) 2481 return; 2482 2483 /* 2484 * INITs are latched while in SMM. Because an SMM CPU cannot 2485 * be in KVM_MP_STATE_INIT_RECEIVED state, just eat SIPIs 2486 * and delay processing of INIT until the next RSM. 2487 */ 2488 if (is_smm(vcpu)) { 2489 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED); 2490 if (test_bit(KVM_APIC_SIPI, &apic->pending_events)) 2491 clear_bit(KVM_APIC_SIPI, &apic->pending_events); 2492 return; 2493 } 2494 2495 pe = xchg(&apic->pending_events, 0); 2496 if (test_bit(KVM_APIC_INIT, &pe)) { 2497 kvm_lapic_reset(vcpu, true); 2498 kvm_vcpu_reset(vcpu, true); 2499 if (kvm_vcpu_is_bsp(apic->vcpu)) 2500 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 2501 else 2502 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 2503 } 2504 if (test_bit(KVM_APIC_SIPI, &pe) && 2505 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { 2506 /* evaluate pending_events before reading the vector */ 2507 smp_rmb(); 2508 sipi_vector = apic->sipi_vector; 2509 apic_debug("vcpu %d received sipi with vector # %x\n", 2510 vcpu->vcpu_id, sipi_vector); 2511 kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector); 2512 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 2513 } 2514 } 2515 2516 void kvm_lapic_init(void) 2517 { 2518 /* do not patch jump label more than once per second */ 2519 jump_label_rate_limit(&apic_hw_disabled, HZ); 2520 jump_label_rate_limit(&apic_sw_disabled, HZ); 2521 } 2522 2523 void kvm_lapic_exit(void) 2524 { 2525 static_key_deferred_flush(&apic_hw_disabled); 2526 static_key_deferred_flush(&apic_sw_disabled); 2527 } 2528