1 2 /* 3 * Local APIC virtualization 4 * 5 * Copyright (C) 2006 Qumranet, Inc. 6 * Copyright (C) 2007 Novell 7 * Copyright (C) 2007 Intel 8 * Copyright 2009 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Dor Laor <dor.laor@qumranet.com> 12 * Gregory Haskins <ghaskins@novell.com> 13 * Yaozu (Eddie) Dong <eddie.dong@intel.com> 14 * 15 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation. 16 * 17 * This work is licensed under the terms of the GNU GPL, version 2. See 18 * the COPYING file in the top-level directory. 19 */ 20 21 #include <linux/kvm_host.h> 22 #include <linux/kvm.h> 23 #include <linux/mm.h> 24 #include <linux/highmem.h> 25 #include <linux/smp.h> 26 #include <linux/hrtimer.h> 27 #include <linux/io.h> 28 #include <linux/module.h> 29 #include <linux/math64.h> 30 #include <linux/slab.h> 31 #include <asm/processor.h> 32 #include <asm/msr.h> 33 #include <asm/page.h> 34 #include <asm/current.h> 35 #include <asm/apicdef.h> 36 #include <asm/delay.h> 37 #include <linux/atomic.h> 38 #include <linux/jump_label.h> 39 #include "kvm_cache_regs.h" 40 #include "irq.h" 41 #include "trace.h" 42 #include "x86.h" 43 #include "cpuid.h" 44 #include "hyperv.h" 45 46 #ifndef CONFIG_X86_64 47 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) 48 #else 49 #define mod_64(x, y) ((x) % (y)) 50 #endif 51 52 #define PRId64 "d" 53 #define PRIx64 "llx" 54 #define PRIu64 "u" 55 #define PRIo64 "o" 56 57 #define APIC_BUS_CYCLE_NS 1 58 59 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */ 60 #define apic_debug(fmt, arg...) 61 62 #define APIC_LVT_NUM 6 63 /* 14 is the version for Xeon and Pentium 8.4.8*/ 64 #define APIC_VERSION (0x14UL | ((APIC_LVT_NUM - 1) << 16)) 65 #define LAPIC_MMIO_LENGTH (1 << 12) 66 /* followed define is not in apicdef.h */ 67 #define APIC_SHORT_MASK 0xc0000 68 #define APIC_DEST_NOSHORT 0x0 69 #define APIC_DEST_MASK 0x800 70 #define MAX_APIC_VECTOR 256 71 #define APIC_VECTORS_PER_REG 32 72 73 #define APIC_BROADCAST 0xFF 74 #define X2APIC_BROADCAST 0xFFFFFFFFul 75 76 #define VEC_POS(v) ((v) & (32 - 1)) 77 #define REG_POS(v) (((v) >> 5) << 4) 78 79 static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val) 80 { 81 *((u32 *) (apic->regs + reg_off)) = val; 82 } 83 84 static inline int apic_test_vector(int vec, void *bitmap) 85 { 86 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 87 } 88 89 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector) 90 { 91 struct kvm_lapic *apic = vcpu->arch.apic; 92 93 return apic_test_vector(vector, apic->regs + APIC_ISR) || 94 apic_test_vector(vector, apic->regs + APIC_IRR); 95 } 96 97 static inline void apic_set_vector(int vec, void *bitmap) 98 { 99 set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 100 } 101 102 static inline void apic_clear_vector(int vec, void *bitmap) 103 { 104 clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 105 } 106 107 static inline int __apic_test_and_set_vector(int vec, void *bitmap) 108 { 109 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 110 } 111 112 static inline int __apic_test_and_clear_vector(int vec, void *bitmap) 113 { 114 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 115 } 116 117 struct static_key_deferred apic_hw_disabled __read_mostly; 118 struct static_key_deferred apic_sw_disabled __read_mostly; 119 120 static inline int apic_enabled(struct kvm_lapic *apic) 121 { 122 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic); 123 } 124 125 #define LVT_MASK \ 126 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK) 127 128 #define LINT_MASK \ 129 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \ 130 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER) 131 132 /* The logical map is definitely wrong if we have multiple 133 * modes at the same time. (Physical map is always right.) 134 */ 135 static inline bool kvm_apic_logical_map_valid(struct kvm_apic_map *map) 136 { 137 return !(map->mode & (map->mode - 1)); 138 } 139 140 static inline void 141 apic_logical_id(struct kvm_apic_map *map, u32 dest_id, u16 *cid, u16 *lid) 142 { 143 unsigned lid_bits; 144 145 BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_CLUSTER != 4); 146 BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_FLAT != 8); 147 BUILD_BUG_ON(KVM_APIC_MODE_X2APIC != 16); 148 lid_bits = map->mode; 149 150 *cid = dest_id >> lid_bits; 151 *lid = dest_id & ((1 << lid_bits) - 1); 152 } 153 154 static void recalculate_apic_map(struct kvm *kvm) 155 { 156 struct kvm_apic_map *new, *old = NULL; 157 struct kvm_vcpu *vcpu; 158 int i; 159 160 new = kzalloc(sizeof(struct kvm_apic_map), GFP_KERNEL); 161 162 mutex_lock(&kvm->arch.apic_map_lock); 163 164 if (!new) 165 goto out; 166 167 kvm_for_each_vcpu(i, vcpu, kvm) { 168 struct kvm_lapic *apic = vcpu->arch.apic; 169 u16 cid, lid; 170 u32 ldr, aid; 171 172 if (!kvm_apic_present(vcpu)) 173 continue; 174 175 aid = kvm_apic_id(apic); 176 ldr = kvm_apic_get_reg(apic, APIC_LDR); 177 178 if (aid < ARRAY_SIZE(new->phys_map)) 179 new->phys_map[aid] = apic; 180 181 if (apic_x2apic_mode(apic)) { 182 new->mode |= KVM_APIC_MODE_X2APIC; 183 } else if (ldr) { 184 ldr = GET_APIC_LOGICAL_ID(ldr); 185 if (kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT) 186 new->mode |= KVM_APIC_MODE_XAPIC_FLAT; 187 else 188 new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER; 189 } 190 191 if (!kvm_apic_logical_map_valid(new)) 192 continue; 193 194 apic_logical_id(new, ldr, &cid, &lid); 195 196 if (lid && cid < ARRAY_SIZE(new->logical_map)) 197 new->logical_map[cid][ffs(lid) - 1] = apic; 198 } 199 out: 200 old = rcu_dereference_protected(kvm->arch.apic_map, 201 lockdep_is_held(&kvm->arch.apic_map_lock)); 202 rcu_assign_pointer(kvm->arch.apic_map, new); 203 mutex_unlock(&kvm->arch.apic_map_lock); 204 205 if (old) 206 kfree_rcu(old, rcu); 207 208 kvm_make_scan_ioapic_request(kvm); 209 } 210 211 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) 212 { 213 bool enabled = val & APIC_SPIV_APIC_ENABLED; 214 215 apic_set_reg(apic, APIC_SPIV, val); 216 217 if (enabled != apic->sw_enabled) { 218 apic->sw_enabled = enabled; 219 if (enabled) { 220 static_key_slow_dec_deferred(&apic_sw_disabled); 221 recalculate_apic_map(apic->vcpu->kvm); 222 } else 223 static_key_slow_inc(&apic_sw_disabled.key); 224 } 225 } 226 227 static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id) 228 { 229 apic_set_reg(apic, APIC_ID, id << 24); 230 recalculate_apic_map(apic->vcpu->kvm); 231 } 232 233 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id) 234 { 235 apic_set_reg(apic, APIC_LDR, id); 236 recalculate_apic_map(apic->vcpu->kvm); 237 } 238 239 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u8 id) 240 { 241 u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf)); 242 243 apic_set_reg(apic, APIC_ID, id << 24); 244 apic_set_reg(apic, APIC_LDR, ldr); 245 recalculate_apic_map(apic->vcpu->kvm); 246 } 247 248 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type) 249 { 250 return !(kvm_apic_get_reg(apic, lvt_type) & APIC_LVT_MASKED); 251 } 252 253 static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type) 254 { 255 return kvm_apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK; 256 } 257 258 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic) 259 { 260 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT; 261 } 262 263 static inline int apic_lvtt_period(struct kvm_lapic *apic) 264 { 265 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC; 266 } 267 268 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic) 269 { 270 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE; 271 } 272 273 static inline int apic_lvt_nmi_mode(u32 lvt_val) 274 { 275 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI; 276 } 277 278 void kvm_apic_set_version(struct kvm_vcpu *vcpu) 279 { 280 struct kvm_lapic *apic = vcpu->arch.apic; 281 struct kvm_cpuid_entry2 *feat; 282 u32 v = APIC_VERSION; 283 284 if (!lapic_in_kernel(vcpu)) 285 return; 286 287 feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0); 288 if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31)))) 289 v |= APIC_LVR_DIRECTED_EOI; 290 apic_set_reg(apic, APIC_LVR, v); 291 } 292 293 static const unsigned int apic_lvt_mask[APIC_LVT_NUM] = { 294 LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */ 295 LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */ 296 LVT_MASK | APIC_MODE_MASK, /* LVTPC */ 297 LINT_MASK, LINT_MASK, /* LVT0-1 */ 298 LVT_MASK /* LVTERR */ 299 }; 300 301 static int find_highest_vector(void *bitmap) 302 { 303 int vec; 304 u32 *reg; 305 306 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG; 307 vec >= 0; vec -= APIC_VECTORS_PER_REG) { 308 reg = bitmap + REG_POS(vec); 309 if (*reg) 310 return fls(*reg) - 1 + vec; 311 } 312 313 return -1; 314 } 315 316 static u8 count_vectors(void *bitmap) 317 { 318 int vec; 319 u32 *reg; 320 u8 count = 0; 321 322 for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) { 323 reg = bitmap + REG_POS(vec); 324 count += hweight32(*reg); 325 } 326 327 return count; 328 } 329 330 void __kvm_apic_update_irr(u32 *pir, void *regs) 331 { 332 u32 i, pir_val; 333 334 for (i = 0; i <= 7; i++) { 335 pir_val = xchg(&pir[i], 0); 336 if (pir_val) 337 *((u32 *)(regs + APIC_IRR + i * 0x10)) |= pir_val; 338 } 339 } 340 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr); 341 342 void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir) 343 { 344 struct kvm_lapic *apic = vcpu->arch.apic; 345 346 __kvm_apic_update_irr(pir, apic->regs); 347 348 kvm_make_request(KVM_REQ_EVENT, vcpu); 349 } 350 EXPORT_SYMBOL_GPL(kvm_apic_update_irr); 351 352 static inline void apic_set_irr(int vec, struct kvm_lapic *apic) 353 { 354 apic_set_vector(vec, apic->regs + APIC_IRR); 355 /* 356 * irr_pending must be true if any interrupt is pending; set it after 357 * APIC_IRR to avoid race with apic_clear_irr 358 */ 359 apic->irr_pending = true; 360 } 361 362 static inline int apic_search_irr(struct kvm_lapic *apic) 363 { 364 return find_highest_vector(apic->regs + APIC_IRR); 365 } 366 367 static inline int apic_find_highest_irr(struct kvm_lapic *apic) 368 { 369 int result; 370 371 /* 372 * Note that irr_pending is just a hint. It will be always 373 * true with virtual interrupt delivery enabled. 374 */ 375 if (!apic->irr_pending) 376 return -1; 377 378 if (apic->vcpu->arch.apicv_active) 379 kvm_x86_ops->sync_pir_to_irr(apic->vcpu); 380 result = apic_search_irr(apic); 381 ASSERT(result == -1 || result >= 16); 382 383 return result; 384 } 385 386 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) 387 { 388 struct kvm_vcpu *vcpu; 389 390 vcpu = apic->vcpu; 391 392 if (unlikely(vcpu->arch.apicv_active)) { 393 /* try to update RVI */ 394 apic_clear_vector(vec, apic->regs + APIC_IRR); 395 kvm_make_request(KVM_REQ_EVENT, vcpu); 396 } else { 397 apic->irr_pending = false; 398 apic_clear_vector(vec, apic->regs + APIC_IRR); 399 if (apic_search_irr(apic) != -1) 400 apic->irr_pending = true; 401 } 402 } 403 404 static inline void apic_set_isr(int vec, struct kvm_lapic *apic) 405 { 406 struct kvm_vcpu *vcpu; 407 408 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR)) 409 return; 410 411 vcpu = apic->vcpu; 412 413 /* 414 * With APIC virtualization enabled, all caching is disabled 415 * because the processor can modify ISR under the hood. Instead 416 * just set SVI. 417 */ 418 if (unlikely(vcpu->arch.apicv_active)) 419 kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec); 420 else { 421 ++apic->isr_count; 422 BUG_ON(apic->isr_count > MAX_APIC_VECTOR); 423 /* 424 * ISR (in service register) bit is set when injecting an interrupt. 425 * The highest vector is injected. Thus the latest bit set matches 426 * the highest bit in ISR. 427 */ 428 apic->highest_isr_cache = vec; 429 } 430 } 431 432 static inline int apic_find_highest_isr(struct kvm_lapic *apic) 433 { 434 int result; 435 436 /* 437 * Note that isr_count is always 1, and highest_isr_cache 438 * is always -1, with APIC virtualization enabled. 439 */ 440 if (!apic->isr_count) 441 return -1; 442 if (likely(apic->highest_isr_cache != -1)) 443 return apic->highest_isr_cache; 444 445 result = find_highest_vector(apic->regs + APIC_ISR); 446 ASSERT(result == -1 || result >= 16); 447 448 return result; 449 } 450 451 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) 452 { 453 struct kvm_vcpu *vcpu; 454 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR)) 455 return; 456 457 vcpu = apic->vcpu; 458 459 /* 460 * We do get here for APIC virtualization enabled if the guest 461 * uses the Hyper-V APIC enlightenment. In this case we may need 462 * to trigger a new interrupt delivery by writing the SVI field; 463 * on the other hand isr_count and highest_isr_cache are unused 464 * and must be left alone. 465 */ 466 if (unlikely(vcpu->arch.apicv_active)) 467 kvm_x86_ops->hwapic_isr_update(vcpu->kvm, 468 apic_find_highest_isr(apic)); 469 else { 470 --apic->isr_count; 471 BUG_ON(apic->isr_count < 0); 472 apic->highest_isr_cache = -1; 473 } 474 } 475 476 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) 477 { 478 /* This may race with setting of irr in __apic_accept_irq() and 479 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq 480 * will cause vmexit immediately and the value will be recalculated 481 * on the next vmentry. 482 */ 483 return apic_find_highest_irr(vcpu->arch.apic); 484 } 485 486 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, 487 int vector, int level, int trig_mode, 488 struct dest_map *dest_map); 489 490 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, 491 struct dest_map *dest_map) 492 { 493 struct kvm_lapic *apic = vcpu->arch.apic; 494 495 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector, 496 irq->level, irq->trig_mode, dest_map); 497 } 498 499 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val) 500 { 501 502 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val, 503 sizeof(val)); 504 } 505 506 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val) 507 { 508 509 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val, 510 sizeof(*val)); 511 } 512 513 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu) 514 { 515 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; 516 } 517 518 static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu) 519 { 520 u8 val; 521 if (pv_eoi_get_user(vcpu, &val) < 0) 522 apic_debug("Can't read EOI MSR value: 0x%llx\n", 523 (unsigned long long)vcpu->arch.pv_eoi.msr_val); 524 return val & 0x1; 525 } 526 527 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu) 528 { 529 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) { 530 apic_debug("Can't set EOI MSR value: 0x%llx\n", 531 (unsigned long long)vcpu->arch.pv_eoi.msr_val); 532 return; 533 } 534 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); 535 } 536 537 static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu) 538 { 539 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) { 540 apic_debug("Can't clear EOI MSR value: 0x%llx\n", 541 (unsigned long long)vcpu->arch.pv_eoi.msr_val); 542 return; 543 } 544 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); 545 } 546 547 static void apic_update_ppr(struct kvm_lapic *apic) 548 { 549 u32 tpr, isrv, ppr, old_ppr; 550 int isr; 551 552 old_ppr = kvm_apic_get_reg(apic, APIC_PROCPRI); 553 tpr = kvm_apic_get_reg(apic, APIC_TASKPRI); 554 isr = apic_find_highest_isr(apic); 555 isrv = (isr != -1) ? isr : 0; 556 557 if ((tpr & 0xf0) >= (isrv & 0xf0)) 558 ppr = tpr & 0xff; 559 else 560 ppr = isrv & 0xf0; 561 562 apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x", 563 apic, ppr, isr, isrv); 564 565 if (old_ppr != ppr) { 566 apic_set_reg(apic, APIC_PROCPRI, ppr); 567 if (ppr < old_ppr) 568 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); 569 } 570 } 571 572 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr) 573 { 574 apic_set_reg(apic, APIC_TASKPRI, tpr); 575 apic_update_ppr(apic); 576 } 577 578 static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda) 579 { 580 if (apic_x2apic_mode(apic)) 581 return mda == X2APIC_BROADCAST; 582 583 return GET_APIC_DEST_FIELD(mda) == APIC_BROADCAST; 584 } 585 586 static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda) 587 { 588 if (kvm_apic_broadcast(apic, mda)) 589 return true; 590 591 if (apic_x2apic_mode(apic)) 592 return mda == kvm_apic_id(apic); 593 594 return mda == SET_APIC_DEST_FIELD(kvm_apic_id(apic)); 595 } 596 597 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda) 598 { 599 u32 logical_id; 600 601 if (kvm_apic_broadcast(apic, mda)) 602 return true; 603 604 logical_id = kvm_apic_get_reg(apic, APIC_LDR); 605 606 if (apic_x2apic_mode(apic)) 607 return ((logical_id >> 16) == (mda >> 16)) 608 && (logical_id & mda & 0xffff) != 0; 609 610 logical_id = GET_APIC_LOGICAL_ID(logical_id); 611 mda = GET_APIC_DEST_FIELD(mda); 612 613 switch (kvm_apic_get_reg(apic, APIC_DFR)) { 614 case APIC_DFR_FLAT: 615 return (logical_id & mda) != 0; 616 case APIC_DFR_CLUSTER: 617 return ((logical_id >> 4) == (mda >> 4)) 618 && (logical_id & mda & 0xf) != 0; 619 default: 620 apic_debug("Bad DFR vcpu %d: %08x\n", 621 apic->vcpu->vcpu_id, kvm_apic_get_reg(apic, APIC_DFR)); 622 return false; 623 } 624 } 625 626 /* KVM APIC implementation has two quirks 627 * - dest always begins at 0 while xAPIC MDA has offset 24, 628 * - IOxAPIC messages have to be delivered (directly) to x2APIC. 629 */ 630 static u32 kvm_apic_mda(unsigned int dest_id, struct kvm_lapic *source, 631 struct kvm_lapic *target) 632 { 633 bool ipi = source != NULL; 634 bool x2apic_mda = apic_x2apic_mode(ipi ? source : target); 635 636 if (!ipi && dest_id == APIC_BROADCAST && x2apic_mda) 637 return X2APIC_BROADCAST; 638 639 return x2apic_mda ? dest_id : SET_APIC_DEST_FIELD(dest_id); 640 } 641 642 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, 643 int short_hand, unsigned int dest, int dest_mode) 644 { 645 struct kvm_lapic *target = vcpu->arch.apic; 646 u32 mda = kvm_apic_mda(dest, source, target); 647 648 apic_debug("target %p, source %p, dest 0x%x, " 649 "dest_mode 0x%x, short_hand 0x%x\n", 650 target, source, dest, dest_mode, short_hand); 651 652 ASSERT(target); 653 switch (short_hand) { 654 case APIC_DEST_NOSHORT: 655 if (dest_mode == APIC_DEST_PHYSICAL) 656 return kvm_apic_match_physical_addr(target, mda); 657 else 658 return kvm_apic_match_logical_addr(target, mda); 659 case APIC_DEST_SELF: 660 return target == source; 661 case APIC_DEST_ALLINC: 662 return true; 663 case APIC_DEST_ALLBUT: 664 return target != source; 665 default: 666 apic_debug("kvm: apic: Bad dest shorthand value %x\n", 667 short_hand); 668 return false; 669 } 670 } 671 672 int kvm_vector_to_index(u32 vector, u32 dest_vcpus, 673 const unsigned long *bitmap, u32 bitmap_size) 674 { 675 u32 mod; 676 int i, idx = -1; 677 678 mod = vector % dest_vcpus; 679 680 for (i = 0; i <= mod; i++) { 681 idx = find_next_bit(bitmap, bitmap_size, idx + 1); 682 BUG_ON(idx == bitmap_size); 683 } 684 685 return idx; 686 } 687 688 static void kvm_apic_disabled_lapic_found(struct kvm *kvm) 689 { 690 if (!kvm->arch.disabled_lapic_found) { 691 kvm->arch.disabled_lapic_found = true; 692 printk(KERN_INFO 693 "Disabled LAPIC found during irq injection\n"); 694 } 695 } 696 697 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, 698 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map) 699 { 700 struct kvm_apic_map *map; 701 unsigned long bitmap = 1; 702 struct kvm_lapic **dst; 703 int i; 704 bool ret, x2apic_ipi; 705 706 *r = -1; 707 708 if (irq->shorthand == APIC_DEST_SELF) { 709 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map); 710 return true; 711 } 712 713 if (irq->shorthand) 714 return false; 715 716 x2apic_ipi = src && apic_x2apic_mode(src); 717 if (irq->dest_id == (x2apic_ipi ? X2APIC_BROADCAST : APIC_BROADCAST)) 718 return false; 719 720 ret = true; 721 rcu_read_lock(); 722 map = rcu_dereference(kvm->arch.apic_map); 723 724 if (!map) { 725 ret = false; 726 goto out; 727 } 728 729 if (irq->dest_mode == APIC_DEST_PHYSICAL) { 730 if (irq->dest_id >= ARRAY_SIZE(map->phys_map)) 731 goto out; 732 733 dst = &map->phys_map[irq->dest_id]; 734 } else { 735 u16 cid; 736 737 if (!kvm_apic_logical_map_valid(map)) { 738 ret = false; 739 goto out; 740 } 741 742 apic_logical_id(map, irq->dest_id, &cid, (u16 *)&bitmap); 743 744 if (cid >= ARRAY_SIZE(map->logical_map)) 745 goto out; 746 747 dst = map->logical_map[cid]; 748 749 if (!kvm_lowest_prio_delivery(irq)) 750 goto set_irq; 751 752 if (!kvm_vector_hashing_enabled()) { 753 int l = -1; 754 for_each_set_bit(i, &bitmap, 16) { 755 if (!dst[i]) 756 continue; 757 if (l < 0) 758 l = i; 759 else if (kvm_apic_compare_prio(dst[i]->vcpu, 760 dst[l]->vcpu) < 0) 761 l = i; 762 } 763 bitmap = (l >= 0) ? 1 << l : 0; 764 } else { 765 int idx; 766 unsigned int dest_vcpus; 767 768 dest_vcpus = hweight16(bitmap); 769 if (dest_vcpus == 0) 770 goto out; 771 772 idx = kvm_vector_to_index(irq->vector, 773 dest_vcpus, &bitmap, 16); 774 775 if (!dst[idx]) { 776 kvm_apic_disabled_lapic_found(kvm); 777 goto out; 778 } 779 780 bitmap = (idx >= 0) ? 1 << idx : 0; 781 } 782 } 783 784 set_irq: 785 for_each_set_bit(i, &bitmap, 16) { 786 if (!dst[i]) 787 continue; 788 if (*r < 0) 789 *r = 0; 790 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map); 791 } 792 out: 793 rcu_read_unlock(); 794 return ret; 795 } 796 797 /* 798 * This routine tries to handler interrupts in posted mode, here is how 799 * it deals with different cases: 800 * - For single-destination interrupts, handle it in posted mode 801 * - Else if vector hashing is enabled and it is a lowest-priority 802 * interrupt, handle it in posted mode and use the following mechanism 803 * to find the destinaiton vCPU. 804 * 1. For lowest-priority interrupts, store all the possible 805 * destination vCPUs in an array. 806 * 2. Use "guest vector % max number of destination vCPUs" to find 807 * the right destination vCPU in the array for the lowest-priority 808 * interrupt. 809 * - Otherwise, use remapped mode to inject the interrupt. 810 */ 811 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq, 812 struct kvm_vcpu **dest_vcpu) 813 { 814 struct kvm_apic_map *map; 815 bool ret = false; 816 struct kvm_lapic *dst = NULL; 817 818 if (irq->shorthand) 819 return false; 820 821 rcu_read_lock(); 822 map = rcu_dereference(kvm->arch.apic_map); 823 824 if (!map) 825 goto out; 826 827 if (irq->dest_mode == APIC_DEST_PHYSICAL) { 828 if (irq->dest_id == 0xFF) 829 goto out; 830 831 if (irq->dest_id >= ARRAY_SIZE(map->phys_map)) 832 goto out; 833 834 dst = map->phys_map[irq->dest_id]; 835 if (dst && kvm_apic_present(dst->vcpu)) 836 *dest_vcpu = dst->vcpu; 837 else 838 goto out; 839 } else { 840 u16 cid; 841 unsigned long bitmap = 1; 842 int i, r = 0; 843 844 if (!kvm_apic_logical_map_valid(map)) 845 goto out; 846 847 apic_logical_id(map, irq->dest_id, &cid, (u16 *)&bitmap); 848 849 if (cid >= ARRAY_SIZE(map->logical_map)) 850 goto out; 851 852 if (kvm_vector_hashing_enabled() && 853 kvm_lowest_prio_delivery(irq)) { 854 int idx; 855 unsigned int dest_vcpus; 856 857 dest_vcpus = hweight16(bitmap); 858 if (dest_vcpus == 0) 859 goto out; 860 861 idx = kvm_vector_to_index(irq->vector, dest_vcpus, 862 &bitmap, 16); 863 864 dst = map->logical_map[cid][idx]; 865 if (!dst) { 866 kvm_apic_disabled_lapic_found(kvm); 867 goto out; 868 } 869 870 *dest_vcpu = dst->vcpu; 871 } else { 872 for_each_set_bit(i, &bitmap, 16) { 873 dst = map->logical_map[cid][i]; 874 if (++r == 2) 875 goto out; 876 } 877 878 if (dst && kvm_apic_present(dst->vcpu)) 879 *dest_vcpu = dst->vcpu; 880 else 881 goto out; 882 } 883 } 884 885 ret = true; 886 out: 887 rcu_read_unlock(); 888 return ret; 889 } 890 891 /* 892 * Add a pending IRQ into lapic. 893 * Return 1 if successfully added and 0 if discarded. 894 */ 895 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, 896 int vector, int level, int trig_mode, 897 struct dest_map *dest_map) 898 { 899 int result = 0; 900 struct kvm_vcpu *vcpu = apic->vcpu; 901 902 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, 903 trig_mode, vector); 904 switch (delivery_mode) { 905 case APIC_DM_LOWEST: 906 vcpu->arch.apic_arb_prio++; 907 case APIC_DM_FIXED: 908 if (unlikely(trig_mode && !level)) 909 break; 910 911 /* FIXME add logic for vcpu on reset */ 912 if (unlikely(!apic_enabled(apic))) 913 break; 914 915 result = 1; 916 917 if (dest_map) { 918 __set_bit(vcpu->vcpu_id, dest_map->map); 919 dest_map->vectors[vcpu->vcpu_id] = vector; 920 } 921 922 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) { 923 if (trig_mode) 924 apic_set_vector(vector, apic->regs + APIC_TMR); 925 else 926 apic_clear_vector(vector, apic->regs + APIC_TMR); 927 } 928 929 if (vcpu->arch.apicv_active) 930 kvm_x86_ops->deliver_posted_interrupt(vcpu, vector); 931 else { 932 apic_set_irr(vector, apic); 933 934 kvm_make_request(KVM_REQ_EVENT, vcpu); 935 kvm_vcpu_kick(vcpu); 936 } 937 break; 938 939 case APIC_DM_REMRD: 940 result = 1; 941 vcpu->arch.pv.pv_unhalted = 1; 942 kvm_make_request(KVM_REQ_EVENT, vcpu); 943 kvm_vcpu_kick(vcpu); 944 break; 945 946 case APIC_DM_SMI: 947 result = 1; 948 kvm_make_request(KVM_REQ_SMI, vcpu); 949 kvm_vcpu_kick(vcpu); 950 break; 951 952 case APIC_DM_NMI: 953 result = 1; 954 kvm_inject_nmi(vcpu); 955 kvm_vcpu_kick(vcpu); 956 break; 957 958 case APIC_DM_INIT: 959 if (!trig_mode || level) { 960 result = 1; 961 /* assumes that there are only KVM_APIC_INIT/SIPI */ 962 apic->pending_events = (1UL << KVM_APIC_INIT); 963 /* make sure pending_events is visible before sending 964 * the request */ 965 smp_wmb(); 966 kvm_make_request(KVM_REQ_EVENT, vcpu); 967 kvm_vcpu_kick(vcpu); 968 } else { 969 apic_debug("Ignoring de-assert INIT to vcpu %d\n", 970 vcpu->vcpu_id); 971 } 972 break; 973 974 case APIC_DM_STARTUP: 975 apic_debug("SIPI to vcpu %d vector 0x%02x\n", 976 vcpu->vcpu_id, vector); 977 result = 1; 978 apic->sipi_vector = vector; 979 /* make sure sipi_vector is visible for the receiver */ 980 smp_wmb(); 981 set_bit(KVM_APIC_SIPI, &apic->pending_events); 982 kvm_make_request(KVM_REQ_EVENT, vcpu); 983 kvm_vcpu_kick(vcpu); 984 break; 985 986 case APIC_DM_EXTINT: 987 /* 988 * Should only be called by kvm_apic_local_deliver() with LVT0, 989 * before NMI watchdog was enabled. Already handled by 990 * kvm_apic_accept_pic_intr(). 991 */ 992 break; 993 994 default: 995 printk(KERN_ERR "TODO: unsupported delivery mode %x\n", 996 delivery_mode); 997 break; 998 } 999 return result; 1000 } 1001 1002 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) 1003 { 1004 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio; 1005 } 1006 1007 static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector) 1008 { 1009 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors); 1010 } 1011 1012 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) 1013 { 1014 int trigger_mode; 1015 1016 /* Eoi the ioapic only if the ioapic doesn't own the vector. */ 1017 if (!kvm_ioapic_handles_vector(apic, vector)) 1018 return; 1019 1020 /* Request a KVM exit to inform the userspace IOAPIC. */ 1021 if (irqchip_split(apic->vcpu->kvm)) { 1022 apic->vcpu->arch.pending_ioapic_eoi = vector; 1023 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu); 1024 return; 1025 } 1026 1027 if (apic_test_vector(vector, apic->regs + APIC_TMR)) 1028 trigger_mode = IOAPIC_LEVEL_TRIG; 1029 else 1030 trigger_mode = IOAPIC_EDGE_TRIG; 1031 1032 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode); 1033 } 1034 1035 static int apic_set_eoi(struct kvm_lapic *apic) 1036 { 1037 int vector = apic_find_highest_isr(apic); 1038 1039 trace_kvm_eoi(apic, vector); 1040 1041 /* 1042 * Not every write EOI will has corresponding ISR, 1043 * one example is when Kernel check timer on setup_IO_APIC 1044 */ 1045 if (vector == -1) 1046 return vector; 1047 1048 apic_clear_isr(vector, apic); 1049 apic_update_ppr(apic); 1050 1051 if (test_bit(vector, vcpu_to_synic(apic->vcpu)->vec_bitmap)) 1052 kvm_hv_synic_send_eoi(apic->vcpu, vector); 1053 1054 kvm_ioapic_send_eoi(apic, vector); 1055 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); 1056 return vector; 1057 } 1058 1059 /* 1060 * this interface assumes a trap-like exit, which has already finished 1061 * desired side effect including vISR and vPPR update. 1062 */ 1063 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector) 1064 { 1065 struct kvm_lapic *apic = vcpu->arch.apic; 1066 1067 trace_kvm_eoi(apic, vector); 1068 1069 kvm_ioapic_send_eoi(apic, vector); 1070 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); 1071 } 1072 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated); 1073 1074 static void apic_send_ipi(struct kvm_lapic *apic) 1075 { 1076 u32 icr_low = kvm_apic_get_reg(apic, APIC_ICR); 1077 u32 icr_high = kvm_apic_get_reg(apic, APIC_ICR2); 1078 struct kvm_lapic_irq irq; 1079 1080 irq.vector = icr_low & APIC_VECTOR_MASK; 1081 irq.delivery_mode = icr_low & APIC_MODE_MASK; 1082 irq.dest_mode = icr_low & APIC_DEST_MASK; 1083 irq.level = (icr_low & APIC_INT_ASSERT) != 0; 1084 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG; 1085 irq.shorthand = icr_low & APIC_SHORT_MASK; 1086 irq.msi_redir_hint = false; 1087 if (apic_x2apic_mode(apic)) 1088 irq.dest_id = icr_high; 1089 else 1090 irq.dest_id = GET_APIC_DEST_FIELD(icr_high); 1091 1092 trace_kvm_apic_ipi(icr_low, irq.dest_id); 1093 1094 apic_debug("icr_high 0x%x, icr_low 0x%x, " 1095 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, " 1096 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x, " 1097 "msi_redir_hint 0x%x\n", 1098 icr_high, icr_low, irq.shorthand, irq.dest_id, 1099 irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode, 1100 irq.vector, irq.msi_redir_hint); 1101 1102 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL); 1103 } 1104 1105 static u32 apic_get_tmcct(struct kvm_lapic *apic) 1106 { 1107 ktime_t remaining; 1108 s64 ns; 1109 u32 tmcct; 1110 1111 ASSERT(apic != NULL); 1112 1113 /* if initial count is 0, current count should also be 0 */ 1114 if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 || 1115 apic->lapic_timer.period == 0) 1116 return 0; 1117 1118 remaining = hrtimer_get_remaining(&apic->lapic_timer.timer); 1119 if (ktime_to_ns(remaining) < 0) 1120 remaining = ktime_set(0, 0); 1121 1122 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period); 1123 tmcct = div64_u64(ns, 1124 (APIC_BUS_CYCLE_NS * apic->divide_count)); 1125 1126 return tmcct; 1127 } 1128 1129 static void __report_tpr_access(struct kvm_lapic *apic, bool write) 1130 { 1131 struct kvm_vcpu *vcpu = apic->vcpu; 1132 struct kvm_run *run = vcpu->run; 1133 1134 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu); 1135 run->tpr_access.rip = kvm_rip_read(vcpu); 1136 run->tpr_access.is_write = write; 1137 } 1138 1139 static inline void report_tpr_access(struct kvm_lapic *apic, bool write) 1140 { 1141 if (apic->vcpu->arch.tpr_access_reporting) 1142 __report_tpr_access(apic, write); 1143 } 1144 1145 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset) 1146 { 1147 u32 val = 0; 1148 1149 if (offset >= LAPIC_MMIO_LENGTH) 1150 return 0; 1151 1152 switch (offset) { 1153 case APIC_ID: 1154 if (apic_x2apic_mode(apic)) 1155 val = kvm_apic_id(apic); 1156 else 1157 val = kvm_apic_id(apic) << 24; 1158 break; 1159 case APIC_ARBPRI: 1160 apic_debug("Access APIC ARBPRI register which is for P6\n"); 1161 break; 1162 1163 case APIC_TMCCT: /* Timer CCR */ 1164 if (apic_lvtt_tscdeadline(apic)) 1165 return 0; 1166 1167 val = apic_get_tmcct(apic); 1168 break; 1169 case APIC_PROCPRI: 1170 apic_update_ppr(apic); 1171 val = kvm_apic_get_reg(apic, offset); 1172 break; 1173 case APIC_TASKPRI: 1174 report_tpr_access(apic, false); 1175 /* fall thru */ 1176 default: 1177 val = kvm_apic_get_reg(apic, offset); 1178 break; 1179 } 1180 1181 return val; 1182 } 1183 1184 static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev) 1185 { 1186 return container_of(dev, struct kvm_lapic, dev); 1187 } 1188 1189 static int apic_reg_read(struct kvm_lapic *apic, u32 offset, int len, 1190 void *data) 1191 { 1192 unsigned char alignment = offset & 0xf; 1193 u32 result; 1194 /* this bitmask has a bit cleared for each reserved register */ 1195 static const u64 rmask = 0x43ff01ffffffe70cULL; 1196 1197 if ((alignment + len) > 4) { 1198 apic_debug("KVM_APIC_READ: alignment error %x %d\n", 1199 offset, len); 1200 return 1; 1201 } 1202 1203 if (offset > 0x3f0 || !(rmask & (1ULL << (offset >> 4)))) { 1204 apic_debug("KVM_APIC_READ: read reserved register %x\n", 1205 offset); 1206 return 1; 1207 } 1208 1209 result = __apic_read(apic, offset & ~0xf); 1210 1211 trace_kvm_apic_read(offset, result); 1212 1213 switch (len) { 1214 case 1: 1215 case 2: 1216 case 4: 1217 memcpy(data, (char *)&result + alignment, len); 1218 break; 1219 default: 1220 printk(KERN_ERR "Local APIC read with len = %x, " 1221 "should be 1,2, or 4 instead\n", len); 1222 break; 1223 } 1224 return 0; 1225 } 1226 1227 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr) 1228 { 1229 return kvm_apic_hw_enabled(apic) && 1230 addr >= apic->base_address && 1231 addr < apic->base_address + LAPIC_MMIO_LENGTH; 1232 } 1233 1234 static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, 1235 gpa_t address, int len, void *data) 1236 { 1237 struct kvm_lapic *apic = to_lapic(this); 1238 u32 offset = address - apic->base_address; 1239 1240 if (!apic_mmio_in_range(apic, address)) 1241 return -EOPNOTSUPP; 1242 1243 apic_reg_read(apic, offset, len, data); 1244 1245 return 0; 1246 } 1247 1248 static void update_divide_count(struct kvm_lapic *apic) 1249 { 1250 u32 tmp1, tmp2, tdcr; 1251 1252 tdcr = kvm_apic_get_reg(apic, APIC_TDCR); 1253 tmp1 = tdcr & 0xf; 1254 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1; 1255 apic->divide_count = 0x1 << (tmp2 & 0x7); 1256 1257 apic_debug("timer divide count is 0x%x\n", 1258 apic->divide_count); 1259 } 1260 1261 static void apic_update_lvtt(struct kvm_lapic *apic) 1262 { 1263 u32 timer_mode = kvm_apic_get_reg(apic, APIC_LVTT) & 1264 apic->lapic_timer.timer_mode_mask; 1265 1266 if (apic->lapic_timer.timer_mode != timer_mode) { 1267 apic->lapic_timer.timer_mode = timer_mode; 1268 hrtimer_cancel(&apic->lapic_timer.timer); 1269 } 1270 } 1271 1272 static void apic_timer_expired(struct kvm_lapic *apic) 1273 { 1274 struct kvm_vcpu *vcpu = apic->vcpu; 1275 struct swait_queue_head *q = &vcpu->wq; 1276 struct kvm_timer *ktimer = &apic->lapic_timer; 1277 1278 if (atomic_read(&apic->lapic_timer.pending)) 1279 return; 1280 1281 atomic_inc(&apic->lapic_timer.pending); 1282 kvm_set_pending_timer(vcpu); 1283 1284 if (swait_active(q)) 1285 swake_up(q); 1286 1287 if (apic_lvtt_tscdeadline(apic)) 1288 ktimer->expired_tscdeadline = ktimer->tscdeadline; 1289 } 1290 1291 /* 1292 * On APICv, this test will cause a busy wait 1293 * during a higher-priority task. 1294 */ 1295 1296 static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu) 1297 { 1298 struct kvm_lapic *apic = vcpu->arch.apic; 1299 u32 reg = kvm_apic_get_reg(apic, APIC_LVTT); 1300 1301 if (kvm_apic_hw_enabled(apic)) { 1302 int vec = reg & APIC_VECTOR_MASK; 1303 void *bitmap = apic->regs + APIC_ISR; 1304 1305 if (vcpu->arch.apicv_active) 1306 bitmap = apic->regs + APIC_IRR; 1307 1308 if (apic_test_vector(vec, bitmap)) 1309 return true; 1310 } 1311 return false; 1312 } 1313 1314 void wait_lapic_expire(struct kvm_vcpu *vcpu) 1315 { 1316 struct kvm_lapic *apic = vcpu->arch.apic; 1317 u64 guest_tsc, tsc_deadline; 1318 1319 if (!lapic_in_kernel(vcpu)) 1320 return; 1321 1322 if (apic->lapic_timer.expired_tscdeadline == 0) 1323 return; 1324 1325 if (!lapic_timer_int_injected(vcpu)) 1326 return; 1327 1328 tsc_deadline = apic->lapic_timer.expired_tscdeadline; 1329 apic->lapic_timer.expired_tscdeadline = 0; 1330 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); 1331 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline); 1332 1333 /* __delay is delay_tsc whenever the hardware has TSC, thus always. */ 1334 if (guest_tsc < tsc_deadline) 1335 __delay(tsc_deadline - guest_tsc); 1336 } 1337 1338 static void start_apic_timer(struct kvm_lapic *apic) 1339 { 1340 ktime_t now; 1341 1342 atomic_set(&apic->lapic_timer.pending, 0); 1343 1344 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) { 1345 /* lapic timer in oneshot or periodic mode */ 1346 now = apic->lapic_timer.timer.base->get_time(); 1347 apic->lapic_timer.period = (u64)kvm_apic_get_reg(apic, APIC_TMICT) 1348 * APIC_BUS_CYCLE_NS * apic->divide_count; 1349 1350 if (!apic->lapic_timer.period) 1351 return; 1352 /* 1353 * Do not allow the guest to program periodic timers with small 1354 * interval, since the hrtimers are not throttled by the host 1355 * scheduler. 1356 */ 1357 if (apic_lvtt_period(apic)) { 1358 s64 min_period = min_timer_period_us * 1000LL; 1359 1360 if (apic->lapic_timer.period < min_period) { 1361 pr_info_ratelimited( 1362 "kvm: vcpu %i: requested %lld ns " 1363 "lapic timer period limited to %lld ns\n", 1364 apic->vcpu->vcpu_id, 1365 apic->lapic_timer.period, min_period); 1366 apic->lapic_timer.period = min_period; 1367 } 1368 } 1369 1370 hrtimer_start(&apic->lapic_timer.timer, 1371 ktime_add_ns(now, apic->lapic_timer.period), 1372 HRTIMER_MODE_ABS_PINNED); 1373 1374 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016" 1375 PRIx64 ", " 1376 "timer initial count 0x%x, period %lldns, " 1377 "expire @ 0x%016" PRIx64 ".\n", __func__, 1378 APIC_BUS_CYCLE_NS, ktime_to_ns(now), 1379 kvm_apic_get_reg(apic, APIC_TMICT), 1380 apic->lapic_timer.period, 1381 ktime_to_ns(ktime_add_ns(now, 1382 apic->lapic_timer.period))); 1383 } else if (apic_lvtt_tscdeadline(apic)) { 1384 /* lapic timer in tsc deadline mode */ 1385 u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline; 1386 u64 ns = 0; 1387 ktime_t expire; 1388 struct kvm_vcpu *vcpu = apic->vcpu; 1389 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz; 1390 unsigned long flags; 1391 1392 if (unlikely(!tscdeadline || !this_tsc_khz)) 1393 return; 1394 1395 local_irq_save(flags); 1396 1397 now = apic->lapic_timer.timer.base->get_time(); 1398 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); 1399 if (likely(tscdeadline > guest_tsc)) { 1400 ns = (tscdeadline - guest_tsc) * 1000000ULL; 1401 do_div(ns, this_tsc_khz); 1402 expire = ktime_add_ns(now, ns); 1403 expire = ktime_sub_ns(expire, lapic_timer_advance_ns); 1404 hrtimer_start(&apic->lapic_timer.timer, 1405 expire, HRTIMER_MODE_ABS_PINNED); 1406 } else 1407 apic_timer_expired(apic); 1408 1409 local_irq_restore(flags); 1410 } 1411 } 1412 1413 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) 1414 { 1415 bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val); 1416 1417 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) { 1418 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode; 1419 if (lvt0_in_nmi_mode) { 1420 apic_debug("Receive NMI setting on APIC_LVT0 " 1421 "for cpu %d\n", apic->vcpu->vcpu_id); 1422 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); 1423 } else 1424 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); 1425 } 1426 } 1427 1428 static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) 1429 { 1430 int ret = 0; 1431 1432 trace_kvm_apic_write(reg, val); 1433 1434 switch (reg) { 1435 case APIC_ID: /* Local APIC ID */ 1436 if (!apic_x2apic_mode(apic)) 1437 kvm_apic_set_id(apic, val >> 24); 1438 else 1439 ret = 1; 1440 break; 1441 1442 case APIC_TASKPRI: 1443 report_tpr_access(apic, true); 1444 apic_set_tpr(apic, val & 0xff); 1445 break; 1446 1447 case APIC_EOI: 1448 apic_set_eoi(apic); 1449 break; 1450 1451 case APIC_LDR: 1452 if (!apic_x2apic_mode(apic)) 1453 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK); 1454 else 1455 ret = 1; 1456 break; 1457 1458 case APIC_DFR: 1459 if (!apic_x2apic_mode(apic)) { 1460 apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF); 1461 recalculate_apic_map(apic->vcpu->kvm); 1462 } else 1463 ret = 1; 1464 break; 1465 1466 case APIC_SPIV: { 1467 u32 mask = 0x3ff; 1468 if (kvm_apic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI) 1469 mask |= APIC_SPIV_DIRECTED_EOI; 1470 apic_set_spiv(apic, val & mask); 1471 if (!(val & APIC_SPIV_APIC_ENABLED)) { 1472 int i; 1473 u32 lvt_val; 1474 1475 for (i = 0; i < APIC_LVT_NUM; i++) { 1476 lvt_val = kvm_apic_get_reg(apic, 1477 APIC_LVTT + 0x10 * i); 1478 apic_set_reg(apic, APIC_LVTT + 0x10 * i, 1479 lvt_val | APIC_LVT_MASKED); 1480 } 1481 apic_update_lvtt(apic); 1482 atomic_set(&apic->lapic_timer.pending, 0); 1483 1484 } 1485 break; 1486 } 1487 case APIC_ICR: 1488 /* No delay here, so we always clear the pending bit */ 1489 apic_set_reg(apic, APIC_ICR, val & ~(1 << 12)); 1490 apic_send_ipi(apic); 1491 break; 1492 1493 case APIC_ICR2: 1494 if (!apic_x2apic_mode(apic)) 1495 val &= 0xff000000; 1496 apic_set_reg(apic, APIC_ICR2, val); 1497 break; 1498 1499 case APIC_LVT0: 1500 apic_manage_nmi_watchdog(apic, val); 1501 case APIC_LVTTHMR: 1502 case APIC_LVTPC: 1503 case APIC_LVT1: 1504 case APIC_LVTERR: 1505 /* TODO: Check vector */ 1506 if (!kvm_apic_sw_enabled(apic)) 1507 val |= APIC_LVT_MASKED; 1508 1509 val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4]; 1510 apic_set_reg(apic, reg, val); 1511 1512 break; 1513 1514 case APIC_LVTT: 1515 if (!kvm_apic_sw_enabled(apic)) 1516 val |= APIC_LVT_MASKED; 1517 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask); 1518 apic_set_reg(apic, APIC_LVTT, val); 1519 apic_update_lvtt(apic); 1520 break; 1521 1522 case APIC_TMICT: 1523 if (apic_lvtt_tscdeadline(apic)) 1524 break; 1525 1526 hrtimer_cancel(&apic->lapic_timer.timer); 1527 apic_set_reg(apic, APIC_TMICT, val); 1528 start_apic_timer(apic); 1529 break; 1530 1531 case APIC_TDCR: 1532 if (val & 4) 1533 apic_debug("KVM_WRITE:TDCR %x\n", val); 1534 apic_set_reg(apic, APIC_TDCR, val); 1535 update_divide_count(apic); 1536 break; 1537 1538 case APIC_ESR: 1539 if (apic_x2apic_mode(apic) && val != 0) { 1540 apic_debug("KVM_WRITE:ESR not zero %x\n", val); 1541 ret = 1; 1542 } 1543 break; 1544 1545 case APIC_SELF_IPI: 1546 if (apic_x2apic_mode(apic)) { 1547 apic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff)); 1548 } else 1549 ret = 1; 1550 break; 1551 default: 1552 ret = 1; 1553 break; 1554 } 1555 if (ret) 1556 apic_debug("Local APIC Write to read-only register %x\n", reg); 1557 return ret; 1558 } 1559 1560 static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, 1561 gpa_t address, int len, const void *data) 1562 { 1563 struct kvm_lapic *apic = to_lapic(this); 1564 unsigned int offset = address - apic->base_address; 1565 u32 val; 1566 1567 if (!apic_mmio_in_range(apic, address)) 1568 return -EOPNOTSUPP; 1569 1570 /* 1571 * APIC register must be aligned on 128-bits boundary. 1572 * 32/64/128 bits registers must be accessed thru 32 bits. 1573 * Refer SDM 8.4.1 1574 */ 1575 if (len != 4 || (offset & 0xf)) { 1576 /* Don't shout loud, $infamous_os would cause only noise. */ 1577 apic_debug("apic write: bad size=%d %lx\n", len, (long)address); 1578 return 0; 1579 } 1580 1581 val = *(u32*)data; 1582 1583 /* too common printing */ 1584 if (offset != APIC_EOI) 1585 apic_debug("%s: offset 0x%x with length 0x%x, and value is " 1586 "0x%x\n", __func__, offset, len, val); 1587 1588 apic_reg_write(apic, offset & 0xff0, val); 1589 1590 return 0; 1591 } 1592 1593 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu) 1594 { 1595 apic_reg_write(vcpu->arch.apic, APIC_EOI, 0); 1596 } 1597 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi); 1598 1599 /* emulate APIC access in a trap manner */ 1600 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) 1601 { 1602 u32 val = 0; 1603 1604 /* hw has done the conditional check and inst decode */ 1605 offset &= 0xff0; 1606 1607 apic_reg_read(vcpu->arch.apic, offset, 4, &val); 1608 1609 /* TODO: optimize to just emulate side effect w/o one more write */ 1610 apic_reg_write(vcpu->arch.apic, offset, val); 1611 } 1612 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode); 1613 1614 void kvm_free_lapic(struct kvm_vcpu *vcpu) 1615 { 1616 struct kvm_lapic *apic = vcpu->arch.apic; 1617 1618 if (!vcpu->arch.apic) 1619 return; 1620 1621 hrtimer_cancel(&apic->lapic_timer.timer); 1622 1623 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)) 1624 static_key_slow_dec_deferred(&apic_hw_disabled); 1625 1626 if (!apic->sw_enabled) 1627 static_key_slow_dec_deferred(&apic_sw_disabled); 1628 1629 if (apic->regs) 1630 free_page((unsigned long)apic->regs); 1631 1632 kfree(apic); 1633 } 1634 1635 /* 1636 *---------------------------------------------------------------------- 1637 * LAPIC interface 1638 *---------------------------------------------------------------------- 1639 */ 1640 1641 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu) 1642 { 1643 struct kvm_lapic *apic = vcpu->arch.apic; 1644 1645 if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) || 1646 apic_lvtt_period(apic)) 1647 return 0; 1648 1649 return apic->lapic_timer.tscdeadline; 1650 } 1651 1652 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) 1653 { 1654 struct kvm_lapic *apic = vcpu->arch.apic; 1655 1656 if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) || 1657 apic_lvtt_period(apic)) 1658 return; 1659 1660 hrtimer_cancel(&apic->lapic_timer.timer); 1661 apic->lapic_timer.tscdeadline = data; 1662 start_apic_timer(apic); 1663 } 1664 1665 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8) 1666 { 1667 struct kvm_lapic *apic = vcpu->arch.apic; 1668 1669 apic_set_tpr(apic, ((cr8 & 0x0f) << 4) 1670 | (kvm_apic_get_reg(apic, APIC_TASKPRI) & 4)); 1671 } 1672 1673 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu) 1674 { 1675 u64 tpr; 1676 1677 tpr = (u64) kvm_apic_get_reg(vcpu->arch.apic, APIC_TASKPRI); 1678 1679 return (tpr & 0xf0) >> 4; 1680 } 1681 1682 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) 1683 { 1684 u64 old_value = vcpu->arch.apic_base; 1685 struct kvm_lapic *apic = vcpu->arch.apic; 1686 1687 if (!apic) { 1688 value |= MSR_IA32_APICBASE_BSP; 1689 vcpu->arch.apic_base = value; 1690 return; 1691 } 1692 1693 vcpu->arch.apic_base = value; 1694 1695 /* update jump label if enable bit changes */ 1696 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) { 1697 if (value & MSR_IA32_APICBASE_ENABLE) 1698 static_key_slow_dec_deferred(&apic_hw_disabled); 1699 else 1700 static_key_slow_inc(&apic_hw_disabled.key); 1701 recalculate_apic_map(vcpu->kvm); 1702 } 1703 1704 if ((old_value ^ value) & X2APIC_ENABLE) { 1705 if (value & X2APIC_ENABLE) { 1706 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id); 1707 kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true); 1708 } else 1709 kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false); 1710 } 1711 1712 apic->base_address = apic->vcpu->arch.apic_base & 1713 MSR_IA32_APICBASE_BASE; 1714 1715 if ((value & MSR_IA32_APICBASE_ENABLE) && 1716 apic->base_address != APIC_DEFAULT_PHYS_BASE) 1717 pr_warn_once("APIC base relocation is unsupported by KVM"); 1718 1719 /* with FSB delivery interrupt, we can restart APIC functionality */ 1720 apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is " 1721 "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address); 1722 1723 } 1724 1725 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) 1726 { 1727 struct kvm_lapic *apic; 1728 int i; 1729 1730 apic_debug("%s\n", __func__); 1731 1732 ASSERT(vcpu); 1733 apic = vcpu->arch.apic; 1734 ASSERT(apic != NULL); 1735 1736 /* Stop the timer in case it's a reset to an active apic */ 1737 hrtimer_cancel(&apic->lapic_timer.timer); 1738 1739 if (!init_event) 1740 kvm_apic_set_id(apic, vcpu->vcpu_id); 1741 kvm_apic_set_version(apic->vcpu); 1742 1743 for (i = 0; i < APIC_LVT_NUM; i++) 1744 apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); 1745 apic_update_lvtt(apic); 1746 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED)) 1747 apic_set_reg(apic, APIC_LVT0, 1748 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); 1749 apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0)); 1750 1751 apic_set_reg(apic, APIC_DFR, 0xffffffffU); 1752 apic_set_spiv(apic, 0xff); 1753 apic_set_reg(apic, APIC_TASKPRI, 0); 1754 if (!apic_x2apic_mode(apic)) 1755 kvm_apic_set_ldr(apic, 0); 1756 apic_set_reg(apic, APIC_ESR, 0); 1757 apic_set_reg(apic, APIC_ICR, 0); 1758 apic_set_reg(apic, APIC_ICR2, 0); 1759 apic_set_reg(apic, APIC_TDCR, 0); 1760 apic_set_reg(apic, APIC_TMICT, 0); 1761 for (i = 0; i < 8; i++) { 1762 apic_set_reg(apic, APIC_IRR + 0x10 * i, 0); 1763 apic_set_reg(apic, APIC_ISR + 0x10 * i, 0); 1764 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); 1765 } 1766 apic->irr_pending = vcpu->arch.apicv_active; 1767 apic->isr_count = vcpu->arch.apicv_active ? 1 : 0; 1768 apic->highest_isr_cache = -1; 1769 update_divide_count(apic); 1770 atomic_set(&apic->lapic_timer.pending, 0); 1771 if (kvm_vcpu_is_bsp(vcpu)) 1772 kvm_lapic_set_base(vcpu, 1773 vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP); 1774 vcpu->arch.pv_eoi.msr_val = 0; 1775 apic_update_ppr(apic); 1776 1777 vcpu->arch.apic_arb_prio = 0; 1778 vcpu->arch.apic_attention = 0; 1779 1780 apic_debug("%s: vcpu=%p, id=%d, base_msr=" 1781 "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__, 1782 vcpu, kvm_apic_id(apic), 1783 vcpu->arch.apic_base, apic->base_address); 1784 } 1785 1786 /* 1787 *---------------------------------------------------------------------- 1788 * timer interface 1789 *---------------------------------------------------------------------- 1790 */ 1791 1792 static bool lapic_is_periodic(struct kvm_lapic *apic) 1793 { 1794 return apic_lvtt_period(apic); 1795 } 1796 1797 int apic_has_pending_timer(struct kvm_vcpu *vcpu) 1798 { 1799 struct kvm_lapic *apic = vcpu->arch.apic; 1800 1801 if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT)) 1802 return atomic_read(&apic->lapic_timer.pending); 1803 1804 return 0; 1805 } 1806 1807 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type) 1808 { 1809 u32 reg = kvm_apic_get_reg(apic, lvt_type); 1810 int vector, mode, trig_mode; 1811 1812 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) { 1813 vector = reg & APIC_VECTOR_MASK; 1814 mode = reg & APIC_MODE_MASK; 1815 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER; 1816 return __apic_accept_irq(apic, mode, vector, 1, trig_mode, 1817 NULL); 1818 } 1819 return 0; 1820 } 1821 1822 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu) 1823 { 1824 struct kvm_lapic *apic = vcpu->arch.apic; 1825 1826 if (apic) 1827 kvm_apic_local_deliver(apic, APIC_LVT0); 1828 } 1829 1830 static const struct kvm_io_device_ops apic_mmio_ops = { 1831 .read = apic_mmio_read, 1832 .write = apic_mmio_write, 1833 }; 1834 1835 static enum hrtimer_restart apic_timer_fn(struct hrtimer *data) 1836 { 1837 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer); 1838 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer); 1839 1840 apic_timer_expired(apic); 1841 1842 if (lapic_is_periodic(apic)) { 1843 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period); 1844 return HRTIMER_RESTART; 1845 } else 1846 return HRTIMER_NORESTART; 1847 } 1848 1849 int kvm_create_lapic(struct kvm_vcpu *vcpu) 1850 { 1851 struct kvm_lapic *apic; 1852 1853 ASSERT(vcpu != NULL); 1854 apic_debug("apic_init %d\n", vcpu->vcpu_id); 1855 1856 apic = kzalloc(sizeof(*apic), GFP_KERNEL); 1857 if (!apic) 1858 goto nomem; 1859 1860 vcpu->arch.apic = apic; 1861 1862 apic->regs = (void *)get_zeroed_page(GFP_KERNEL); 1863 if (!apic->regs) { 1864 printk(KERN_ERR "malloc apic regs error for vcpu %x\n", 1865 vcpu->vcpu_id); 1866 goto nomem_free_apic; 1867 } 1868 apic->vcpu = vcpu; 1869 1870 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, 1871 HRTIMER_MODE_ABS_PINNED); 1872 apic->lapic_timer.timer.function = apic_timer_fn; 1873 1874 /* 1875 * APIC is created enabled. This will prevent kvm_lapic_set_base from 1876 * thinking that APIC satet has changed. 1877 */ 1878 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE; 1879 kvm_lapic_set_base(vcpu, 1880 APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE); 1881 1882 static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */ 1883 kvm_lapic_reset(vcpu, false); 1884 kvm_iodevice_init(&apic->dev, &apic_mmio_ops); 1885 1886 return 0; 1887 nomem_free_apic: 1888 kfree(apic); 1889 nomem: 1890 return -ENOMEM; 1891 } 1892 1893 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) 1894 { 1895 struct kvm_lapic *apic = vcpu->arch.apic; 1896 int highest_irr; 1897 1898 if (!apic_enabled(apic)) 1899 return -1; 1900 1901 apic_update_ppr(apic); 1902 highest_irr = apic_find_highest_irr(apic); 1903 if ((highest_irr == -1) || 1904 ((highest_irr & 0xF0) <= kvm_apic_get_reg(apic, APIC_PROCPRI))) 1905 return -1; 1906 return highest_irr; 1907 } 1908 1909 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu) 1910 { 1911 u32 lvt0 = kvm_apic_get_reg(vcpu->arch.apic, APIC_LVT0); 1912 int r = 0; 1913 1914 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) 1915 r = 1; 1916 if ((lvt0 & APIC_LVT_MASKED) == 0 && 1917 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT) 1918 r = 1; 1919 return r; 1920 } 1921 1922 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) 1923 { 1924 struct kvm_lapic *apic = vcpu->arch.apic; 1925 1926 if (atomic_read(&apic->lapic_timer.pending) > 0) { 1927 kvm_apic_local_deliver(apic, APIC_LVTT); 1928 if (apic_lvtt_tscdeadline(apic)) 1929 apic->lapic_timer.tscdeadline = 0; 1930 atomic_set(&apic->lapic_timer.pending, 0); 1931 } 1932 } 1933 1934 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) 1935 { 1936 int vector = kvm_apic_has_interrupt(vcpu); 1937 struct kvm_lapic *apic = vcpu->arch.apic; 1938 1939 if (vector == -1) 1940 return -1; 1941 1942 /* 1943 * We get here even with APIC virtualization enabled, if doing 1944 * nested virtualization and L1 runs with the "acknowledge interrupt 1945 * on exit" mode. Then we cannot inject the interrupt via RVI, 1946 * because the process would deliver it through the IDT. 1947 */ 1948 1949 apic_set_isr(vector, apic); 1950 apic_update_ppr(apic); 1951 apic_clear_irr(vector, apic); 1952 1953 if (test_bit(vector, vcpu_to_synic(vcpu)->auto_eoi_bitmap)) { 1954 apic_clear_isr(vector, apic); 1955 apic_update_ppr(apic); 1956 } 1957 1958 return vector; 1959 } 1960 1961 void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, 1962 struct kvm_lapic_state *s) 1963 { 1964 struct kvm_lapic *apic = vcpu->arch.apic; 1965 1966 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base); 1967 /* set SPIV separately to get count of SW disabled APICs right */ 1968 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV))); 1969 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s); 1970 /* call kvm_apic_set_id() to put apic into apic_map */ 1971 kvm_apic_set_id(apic, kvm_apic_id(apic)); 1972 kvm_apic_set_version(vcpu); 1973 1974 apic_update_ppr(apic); 1975 hrtimer_cancel(&apic->lapic_timer.timer); 1976 apic_update_lvtt(apic); 1977 apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0)); 1978 update_divide_count(apic); 1979 start_apic_timer(apic); 1980 apic->irr_pending = true; 1981 apic->isr_count = vcpu->arch.apicv_active ? 1982 1 : count_vectors(apic->regs + APIC_ISR); 1983 apic->highest_isr_cache = -1; 1984 if (vcpu->arch.apicv_active) { 1985 kvm_x86_ops->hwapic_irr_update(vcpu, 1986 apic_find_highest_irr(apic)); 1987 kvm_x86_ops->hwapic_isr_update(vcpu->kvm, 1988 apic_find_highest_isr(apic)); 1989 } 1990 kvm_make_request(KVM_REQ_EVENT, vcpu); 1991 if (ioapic_in_kernel(vcpu->kvm)) 1992 kvm_rtc_eoi_tracking_restore_one(vcpu); 1993 1994 vcpu->arch.apic_arb_prio = 0; 1995 } 1996 1997 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) 1998 { 1999 struct hrtimer *timer; 2000 2001 if (!lapic_in_kernel(vcpu)) 2002 return; 2003 2004 timer = &vcpu->arch.apic->lapic_timer.timer; 2005 if (hrtimer_cancel(timer)) 2006 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); 2007 } 2008 2009 /* 2010 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt 2011 * 2012 * Detect whether guest triggered PV EOI since the 2013 * last entry. If yes, set EOI on guests's behalf. 2014 * Clear PV EOI in guest memory in any case. 2015 */ 2016 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu, 2017 struct kvm_lapic *apic) 2018 { 2019 bool pending; 2020 int vector; 2021 /* 2022 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host 2023 * and KVM_PV_EOI_ENABLED in guest memory as follows: 2024 * 2025 * KVM_APIC_PV_EOI_PENDING is unset: 2026 * -> host disabled PV EOI. 2027 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set: 2028 * -> host enabled PV EOI, guest did not execute EOI yet. 2029 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset: 2030 * -> host enabled PV EOI, guest executed EOI. 2031 */ 2032 BUG_ON(!pv_eoi_enabled(vcpu)); 2033 pending = pv_eoi_get_pending(vcpu); 2034 /* 2035 * Clear pending bit in any case: it will be set again on vmentry. 2036 * While this might not be ideal from performance point of view, 2037 * this makes sure pv eoi is only enabled when we know it's safe. 2038 */ 2039 pv_eoi_clr_pending(vcpu); 2040 if (pending) 2041 return; 2042 vector = apic_set_eoi(apic); 2043 trace_kvm_pv_eoi(apic, vector); 2044 } 2045 2046 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) 2047 { 2048 u32 data; 2049 2050 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) 2051 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); 2052 2053 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) 2054 return; 2055 2056 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, 2057 sizeof(u32))) 2058 return; 2059 2060 apic_set_tpr(vcpu->arch.apic, data & 0xff); 2061 } 2062 2063 /* 2064 * apic_sync_pv_eoi_to_guest - called before vmentry 2065 * 2066 * Detect whether it's safe to enable PV EOI and 2067 * if yes do so. 2068 */ 2069 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu, 2070 struct kvm_lapic *apic) 2071 { 2072 if (!pv_eoi_enabled(vcpu) || 2073 /* IRR set or many bits in ISR: could be nested. */ 2074 apic->irr_pending || 2075 /* Cache not set: could be safe but we don't bother. */ 2076 apic->highest_isr_cache == -1 || 2077 /* Need EOI to update ioapic. */ 2078 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) { 2079 /* 2080 * PV EOI was disabled by apic_sync_pv_eoi_from_guest 2081 * so we need not do anything here. 2082 */ 2083 return; 2084 } 2085 2086 pv_eoi_set_pending(apic->vcpu); 2087 } 2088 2089 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) 2090 { 2091 u32 data, tpr; 2092 int max_irr, max_isr; 2093 struct kvm_lapic *apic = vcpu->arch.apic; 2094 2095 apic_sync_pv_eoi_to_guest(vcpu, apic); 2096 2097 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) 2098 return; 2099 2100 tpr = kvm_apic_get_reg(apic, APIC_TASKPRI) & 0xff; 2101 max_irr = apic_find_highest_irr(apic); 2102 if (max_irr < 0) 2103 max_irr = 0; 2104 max_isr = apic_find_highest_isr(apic); 2105 if (max_isr < 0) 2106 max_isr = 0; 2107 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); 2108 2109 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, 2110 sizeof(u32)); 2111 } 2112 2113 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) 2114 { 2115 if (vapic_addr) { 2116 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, 2117 &vcpu->arch.apic->vapic_cache, 2118 vapic_addr, sizeof(u32))) 2119 return -EINVAL; 2120 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); 2121 } else { 2122 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); 2123 } 2124 2125 vcpu->arch.apic->vapic_addr = vapic_addr; 2126 return 0; 2127 } 2128 2129 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) 2130 { 2131 struct kvm_lapic *apic = vcpu->arch.apic; 2132 u32 reg = (msr - APIC_BASE_MSR) << 4; 2133 2134 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic)) 2135 return 1; 2136 2137 if (reg == APIC_ICR2) 2138 return 1; 2139 2140 /* if this is ICR write vector before command */ 2141 if (reg == APIC_ICR) 2142 apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); 2143 return apic_reg_write(apic, reg, (u32)data); 2144 } 2145 2146 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data) 2147 { 2148 struct kvm_lapic *apic = vcpu->arch.apic; 2149 u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0; 2150 2151 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic)) 2152 return 1; 2153 2154 if (reg == APIC_DFR || reg == APIC_ICR2) { 2155 apic_debug("KVM_APIC_READ: read x2apic reserved register %x\n", 2156 reg); 2157 return 1; 2158 } 2159 2160 if (apic_reg_read(apic, reg, 4, &low)) 2161 return 1; 2162 if (reg == APIC_ICR) 2163 apic_reg_read(apic, APIC_ICR2, 4, &high); 2164 2165 *data = (((u64)high) << 32) | low; 2166 2167 return 0; 2168 } 2169 2170 int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data) 2171 { 2172 struct kvm_lapic *apic = vcpu->arch.apic; 2173 2174 if (!lapic_in_kernel(vcpu)) 2175 return 1; 2176 2177 /* if this is ICR write vector before command */ 2178 if (reg == APIC_ICR) 2179 apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); 2180 return apic_reg_write(apic, reg, (u32)data); 2181 } 2182 2183 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data) 2184 { 2185 struct kvm_lapic *apic = vcpu->arch.apic; 2186 u32 low, high = 0; 2187 2188 if (!lapic_in_kernel(vcpu)) 2189 return 1; 2190 2191 if (apic_reg_read(apic, reg, 4, &low)) 2192 return 1; 2193 if (reg == APIC_ICR) 2194 apic_reg_read(apic, APIC_ICR2, 4, &high); 2195 2196 *data = (((u64)high) << 32) | low; 2197 2198 return 0; 2199 } 2200 2201 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) 2202 { 2203 u64 addr = data & ~KVM_MSR_ENABLED; 2204 if (!IS_ALIGNED(addr, 4)) 2205 return 1; 2206 2207 vcpu->arch.pv_eoi.msr_val = data; 2208 if (!pv_eoi_enabled(vcpu)) 2209 return 0; 2210 return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, 2211 addr, sizeof(u8)); 2212 } 2213 2214 void kvm_apic_accept_events(struct kvm_vcpu *vcpu) 2215 { 2216 struct kvm_lapic *apic = vcpu->arch.apic; 2217 u8 sipi_vector; 2218 unsigned long pe; 2219 2220 if (!lapic_in_kernel(vcpu) || !apic->pending_events) 2221 return; 2222 2223 /* 2224 * INITs are latched while in SMM. Because an SMM CPU cannot 2225 * be in KVM_MP_STATE_INIT_RECEIVED state, just eat SIPIs 2226 * and delay processing of INIT until the next RSM. 2227 */ 2228 if (is_smm(vcpu)) { 2229 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED); 2230 if (test_bit(KVM_APIC_SIPI, &apic->pending_events)) 2231 clear_bit(KVM_APIC_SIPI, &apic->pending_events); 2232 return; 2233 } 2234 2235 pe = xchg(&apic->pending_events, 0); 2236 if (test_bit(KVM_APIC_INIT, &pe)) { 2237 kvm_lapic_reset(vcpu, true); 2238 kvm_vcpu_reset(vcpu, true); 2239 if (kvm_vcpu_is_bsp(apic->vcpu)) 2240 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 2241 else 2242 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 2243 } 2244 if (test_bit(KVM_APIC_SIPI, &pe) && 2245 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { 2246 /* evaluate pending_events before reading the vector */ 2247 smp_rmb(); 2248 sipi_vector = apic->sipi_vector; 2249 apic_debug("vcpu %d received sipi with vector # %x\n", 2250 vcpu->vcpu_id, sipi_vector); 2251 kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector); 2252 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 2253 } 2254 } 2255 2256 void kvm_lapic_init(void) 2257 { 2258 /* do not patch jump label more than once per second */ 2259 jump_label_rate_limit(&apic_hw_disabled, HZ); 2260 jump_label_rate_limit(&apic_sw_disabled, HZ); 2261 } 2262