1 2 /* 3 * Local APIC virtualization 4 * 5 * Copyright (C) 2006 Qumranet, Inc. 6 * Copyright (C) 2007 Novell 7 * Copyright (C) 2007 Intel 8 * Copyright 2009 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Dor Laor <dor.laor@qumranet.com> 12 * Gregory Haskins <ghaskins@novell.com> 13 * Yaozu (Eddie) Dong <eddie.dong@intel.com> 14 * 15 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation. 16 * 17 * This work is licensed under the terms of the GNU GPL, version 2. See 18 * the COPYING file in the top-level directory. 19 */ 20 21 #include <linux/kvm_host.h> 22 #include <linux/kvm.h> 23 #include <linux/mm.h> 24 #include <linux/highmem.h> 25 #include <linux/smp.h> 26 #include <linux/hrtimer.h> 27 #include <linux/io.h> 28 #include <linux/module.h> 29 #include <linux/math64.h> 30 #include <linux/slab.h> 31 #include <asm/processor.h> 32 #include <asm/msr.h> 33 #include <asm/page.h> 34 #include <asm/current.h> 35 #include <asm/apicdef.h> 36 #include <linux/atomic.h> 37 #include <linux/jump_label.h> 38 #include "kvm_cache_regs.h" 39 #include "irq.h" 40 #include "trace.h" 41 #include "x86.h" 42 #include "cpuid.h" 43 44 #ifndef CONFIG_X86_64 45 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) 46 #else 47 #define mod_64(x, y) ((x) % (y)) 48 #endif 49 50 #define PRId64 "d" 51 #define PRIx64 "llx" 52 #define PRIu64 "u" 53 #define PRIo64 "o" 54 55 #define APIC_BUS_CYCLE_NS 1 56 57 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */ 58 #define apic_debug(fmt, arg...) 59 60 #define APIC_LVT_NUM 6 61 /* 14 is the version for Xeon and Pentium 8.4.8*/ 62 #define APIC_VERSION (0x14UL | ((APIC_LVT_NUM - 1) << 16)) 63 #define LAPIC_MMIO_LENGTH (1 << 12) 64 /* followed define is not in apicdef.h */ 65 #define APIC_SHORT_MASK 0xc0000 66 #define APIC_DEST_NOSHORT 0x0 67 #define APIC_DEST_MASK 0x800 68 #define MAX_APIC_VECTOR 256 69 #define APIC_VECTORS_PER_REG 32 70 71 #define VEC_POS(v) ((v) & (32 - 1)) 72 #define REG_POS(v) (((v) >> 5) << 4) 73 74 static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val) 75 { 76 *((u32 *) (apic->regs + reg_off)) = val; 77 } 78 79 static inline int apic_test_vector(int vec, void *bitmap) 80 { 81 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 82 } 83 84 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector) 85 { 86 struct kvm_lapic *apic = vcpu->arch.apic; 87 88 return apic_test_vector(vector, apic->regs + APIC_ISR) || 89 apic_test_vector(vector, apic->regs + APIC_IRR); 90 } 91 92 static inline void apic_set_vector(int vec, void *bitmap) 93 { 94 set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 95 } 96 97 static inline void apic_clear_vector(int vec, void *bitmap) 98 { 99 clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 100 } 101 102 static inline int __apic_test_and_set_vector(int vec, void *bitmap) 103 { 104 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 105 } 106 107 static inline int __apic_test_and_clear_vector(int vec, void *bitmap) 108 { 109 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 110 } 111 112 struct static_key_deferred apic_hw_disabled __read_mostly; 113 struct static_key_deferred apic_sw_disabled __read_mostly; 114 115 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) 116 { 117 if ((kvm_apic_get_reg(apic, APIC_SPIV) ^ val) & APIC_SPIV_APIC_ENABLED) { 118 if (val & APIC_SPIV_APIC_ENABLED) 119 static_key_slow_dec_deferred(&apic_sw_disabled); 120 else 121 static_key_slow_inc(&apic_sw_disabled.key); 122 } 123 apic_set_reg(apic, APIC_SPIV, val); 124 } 125 126 static inline int apic_enabled(struct kvm_lapic *apic) 127 { 128 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic); 129 } 130 131 #define LVT_MASK \ 132 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK) 133 134 #define LINT_MASK \ 135 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \ 136 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER) 137 138 static inline int kvm_apic_id(struct kvm_lapic *apic) 139 { 140 return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; 141 } 142 143 #define KVM_X2APIC_CID_BITS 0 144 145 static void recalculate_apic_map(struct kvm *kvm) 146 { 147 struct kvm_apic_map *new, *old = NULL; 148 struct kvm_vcpu *vcpu; 149 int i; 150 151 new = kzalloc(sizeof(struct kvm_apic_map), GFP_KERNEL); 152 153 mutex_lock(&kvm->arch.apic_map_lock); 154 155 if (!new) 156 goto out; 157 158 new->ldr_bits = 8; 159 /* flat mode is default */ 160 new->cid_shift = 8; 161 new->cid_mask = 0; 162 new->lid_mask = 0xff; 163 164 kvm_for_each_vcpu(i, vcpu, kvm) { 165 struct kvm_lapic *apic = vcpu->arch.apic; 166 u16 cid, lid; 167 u32 ldr; 168 169 if (!kvm_apic_present(vcpu)) 170 continue; 171 172 /* 173 * All APICs have to be configured in the same mode by an OS. 174 * We take advatage of this while building logical id loockup 175 * table. After reset APICs are in xapic/flat mode, so if we 176 * find apic with different setting we assume this is the mode 177 * OS wants all apics to be in; build lookup table accordingly. 178 */ 179 if (apic_x2apic_mode(apic)) { 180 new->ldr_bits = 32; 181 new->cid_shift = 16; 182 new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1; 183 new->lid_mask = 0xffff; 184 } else if (kvm_apic_sw_enabled(apic) && 185 !new->cid_mask /* flat mode */ && 186 kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) { 187 new->cid_shift = 4; 188 new->cid_mask = 0xf; 189 new->lid_mask = 0xf; 190 } 191 192 new->phys_map[kvm_apic_id(apic)] = apic; 193 194 ldr = kvm_apic_get_reg(apic, APIC_LDR); 195 cid = apic_cluster_id(new, ldr); 196 lid = apic_logical_id(new, ldr); 197 198 if (lid) 199 new->logical_map[cid][ffs(lid) - 1] = apic; 200 } 201 out: 202 old = rcu_dereference_protected(kvm->arch.apic_map, 203 lockdep_is_held(&kvm->arch.apic_map_lock)); 204 rcu_assign_pointer(kvm->arch.apic_map, new); 205 mutex_unlock(&kvm->arch.apic_map_lock); 206 207 if (old) 208 kfree_rcu(old, rcu); 209 210 kvm_vcpu_request_scan_ioapic(kvm); 211 } 212 213 static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id) 214 { 215 apic_set_reg(apic, APIC_ID, id << 24); 216 recalculate_apic_map(apic->vcpu->kvm); 217 } 218 219 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id) 220 { 221 apic_set_reg(apic, APIC_LDR, id); 222 recalculate_apic_map(apic->vcpu->kvm); 223 } 224 225 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type) 226 { 227 return !(kvm_apic_get_reg(apic, lvt_type) & APIC_LVT_MASKED); 228 } 229 230 static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type) 231 { 232 return kvm_apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK; 233 } 234 235 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic) 236 { 237 return ((kvm_apic_get_reg(apic, APIC_LVTT) & 238 apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_ONESHOT); 239 } 240 241 static inline int apic_lvtt_period(struct kvm_lapic *apic) 242 { 243 return ((kvm_apic_get_reg(apic, APIC_LVTT) & 244 apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_PERIODIC); 245 } 246 247 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic) 248 { 249 return ((kvm_apic_get_reg(apic, APIC_LVTT) & 250 apic->lapic_timer.timer_mode_mask) == 251 APIC_LVT_TIMER_TSCDEADLINE); 252 } 253 254 static inline int apic_lvt_nmi_mode(u32 lvt_val) 255 { 256 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI; 257 } 258 259 void kvm_apic_set_version(struct kvm_vcpu *vcpu) 260 { 261 struct kvm_lapic *apic = vcpu->arch.apic; 262 struct kvm_cpuid_entry2 *feat; 263 u32 v = APIC_VERSION; 264 265 if (!kvm_vcpu_has_lapic(vcpu)) 266 return; 267 268 feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0); 269 if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31)))) 270 v |= APIC_LVR_DIRECTED_EOI; 271 apic_set_reg(apic, APIC_LVR, v); 272 } 273 274 static const unsigned int apic_lvt_mask[APIC_LVT_NUM] = { 275 LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */ 276 LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */ 277 LVT_MASK | APIC_MODE_MASK, /* LVTPC */ 278 LINT_MASK, LINT_MASK, /* LVT0-1 */ 279 LVT_MASK /* LVTERR */ 280 }; 281 282 static int find_highest_vector(void *bitmap) 283 { 284 int vec; 285 u32 *reg; 286 287 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG; 288 vec >= 0; vec -= APIC_VECTORS_PER_REG) { 289 reg = bitmap + REG_POS(vec); 290 if (*reg) 291 return fls(*reg) - 1 + vec; 292 } 293 294 return -1; 295 } 296 297 static u8 count_vectors(void *bitmap) 298 { 299 int vec; 300 u32 *reg; 301 u8 count = 0; 302 303 for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) { 304 reg = bitmap + REG_POS(vec); 305 count += hweight32(*reg); 306 } 307 308 return count; 309 } 310 311 void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir) 312 { 313 u32 i, pir_val; 314 struct kvm_lapic *apic = vcpu->arch.apic; 315 316 for (i = 0; i <= 7; i++) { 317 pir_val = xchg(&pir[i], 0); 318 if (pir_val) 319 *((u32 *)(apic->regs + APIC_IRR + i * 0x10)) |= pir_val; 320 } 321 } 322 EXPORT_SYMBOL_GPL(kvm_apic_update_irr); 323 324 static inline void apic_set_irr(int vec, struct kvm_lapic *apic) 325 { 326 apic->irr_pending = true; 327 apic_set_vector(vec, apic->regs + APIC_IRR); 328 } 329 330 static inline int apic_search_irr(struct kvm_lapic *apic) 331 { 332 return find_highest_vector(apic->regs + APIC_IRR); 333 } 334 335 static inline int apic_find_highest_irr(struct kvm_lapic *apic) 336 { 337 int result; 338 339 /* 340 * Note that irr_pending is just a hint. It will be always 341 * true with virtual interrupt delivery enabled. 342 */ 343 if (!apic->irr_pending) 344 return -1; 345 346 kvm_x86_ops->sync_pir_to_irr(apic->vcpu); 347 result = apic_search_irr(apic); 348 ASSERT(result == -1 || result >= 16); 349 350 return result; 351 } 352 353 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) 354 { 355 apic->irr_pending = false; 356 apic_clear_vector(vec, apic->regs + APIC_IRR); 357 if (apic_search_irr(apic) != -1) 358 apic->irr_pending = true; 359 } 360 361 static inline void apic_set_isr(int vec, struct kvm_lapic *apic) 362 { 363 /* Note that we never get here with APIC virtualization enabled. */ 364 365 if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR)) 366 ++apic->isr_count; 367 BUG_ON(apic->isr_count > MAX_APIC_VECTOR); 368 /* 369 * ISR (in service register) bit is set when injecting an interrupt. 370 * The highest vector is injected. Thus the latest bit set matches 371 * the highest bit in ISR. 372 */ 373 apic->highest_isr_cache = vec; 374 } 375 376 static inline int apic_find_highest_isr(struct kvm_lapic *apic) 377 { 378 int result; 379 380 /* 381 * Note that isr_count is always 1, and highest_isr_cache 382 * is always -1, with APIC virtualization enabled. 383 */ 384 if (!apic->isr_count) 385 return -1; 386 if (likely(apic->highest_isr_cache != -1)) 387 return apic->highest_isr_cache; 388 389 result = find_highest_vector(apic->regs + APIC_ISR); 390 ASSERT(result == -1 || result >= 16); 391 392 return result; 393 } 394 395 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) 396 { 397 struct kvm_vcpu *vcpu; 398 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR)) 399 return; 400 401 vcpu = apic->vcpu; 402 403 /* 404 * We do get here for APIC virtualization enabled if the guest 405 * uses the Hyper-V APIC enlightenment. In this case we may need 406 * to trigger a new interrupt delivery by writing the SVI field; 407 * on the other hand isr_count and highest_isr_cache are unused 408 * and must be left alone. 409 */ 410 if (unlikely(kvm_apic_vid_enabled(vcpu->kvm))) 411 kvm_x86_ops->hwapic_isr_update(vcpu->kvm, 412 apic_find_highest_isr(apic)); 413 else { 414 --apic->isr_count; 415 BUG_ON(apic->isr_count < 0); 416 apic->highest_isr_cache = -1; 417 } 418 } 419 420 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) 421 { 422 int highest_irr; 423 424 /* This may race with setting of irr in __apic_accept_irq() and 425 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq 426 * will cause vmexit immediately and the value will be recalculated 427 * on the next vmentry. 428 */ 429 if (!kvm_vcpu_has_lapic(vcpu)) 430 return 0; 431 highest_irr = apic_find_highest_irr(vcpu->arch.apic); 432 433 return highest_irr; 434 } 435 436 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, 437 int vector, int level, int trig_mode, 438 unsigned long *dest_map); 439 440 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, 441 unsigned long *dest_map) 442 { 443 struct kvm_lapic *apic = vcpu->arch.apic; 444 445 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector, 446 irq->level, irq->trig_mode, dest_map); 447 } 448 449 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val) 450 { 451 452 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val, 453 sizeof(val)); 454 } 455 456 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val) 457 { 458 459 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val, 460 sizeof(*val)); 461 } 462 463 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu) 464 { 465 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; 466 } 467 468 static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu) 469 { 470 u8 val; 471 if (pv_eoi_get_user(vcpu, &val) < 0) 472 apic_debug("Can't read EOI MSR value: 0x%llx\n", 473 (unsigned long long)vcpu->arch.pv_eoi.msr_val); 474 return val & 0x1; 475 } 476 477 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu) 478 { 479 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) { 480 apic_debug("Can't set EOI MSR value: 0x%llx\n", 481 (unsigned long long)vcpu->arch.pv_eoi.msr_val); 482 return; 483 } 484 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); 485 } 486 487 static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu) 488 { 489 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) { 490 apic_debug("Can't clear EOI MSR value: 0x%llx\n", 491 (unsigned long long)vcpu->arch.pv_eoi.msr_val); 492 return; 493 } 494 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); 495 } 496 497 void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr) 498 { 499 struct kvm_lapic *apic = vcpu->arch.apic; 500 int i; 501 502 for (i = 0; i < 8; i++) 503 apic_set_reg(apic, APIC_TMR + 0x10 * i, tmr[i]); 504 } 505 506 static void apic_update_ppr(struct kvm_lapic *apic) 507 { 508 u32 tpr, isrv, ppr, old_ppr; 509 int isr; 510 511 old_ppr = kvm_apic_get_reg(apic, APIC_PROCPRI); 512 tpr = kvm_apic_get_reg(apic, APIC_TASKPRI); 513 isr = apic_find_highest_isr(apic); 514 isrv = (isr != -1) ? isr : 0; 515 516 if ((tpr & 0xf0) >= (isrv & 0xf0)) 517 ppr = tpr & 0xff; 518 else 519 ppr = isrv & 0xf0; 520 521 apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x", 522 apic, ppr, isr, isrv); 523 524 if (old_ppr != ppr) { 525 apic_set_reg(apic, APIC_PROCPRI, ppr); 526 if (ppr < old_ppr) 527 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); 528 } 529 } 530 531 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr) 532 { 533 apic_set_reg(apic, APIC_TASKPRI, tpr); 534 apic_update_ppr(apic); 535 } 536 537 int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) 538 { 539 return dest == 0xff || kvm_apic_id(apic) == dest; 540 } 541 542 int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) 543 { 544 int result = 0; 545 u32 logical_id; 546 547 if (apic_x2apic_mode(apic)) { 548 logical_id = kvm_apic_get_reg(apic, APIC_LDR); 549 return logical_id & mda; 550 } 551 552 logical_id = GET_APIC_LOGICAL_ID(kvm_apic_get_reg(apic, APIC_LDR)); 553 554 switch (kvm_apic_get_reg(apic, APIC_DFR)) { 555 case APIC_DFR_FLAT: 556 if (logical_id & mda) 557 result = 1; 558 break; 559 case APIC_DFR_CLUSTER: 560 if (((logical_id >> 4) == (mda >> 0x4)) 561 && (logical_id & mda & 0xf)) 562 result = 1; 563 break; 564 default: 565 apic_debug("Bad DFR vcpu %d: %08x\n", 566 apic->vcpu->vcpu_id, kvm_apic_get_reg(apic, APIC_DFR)); 567 break; 568 } 569 570 return result; 571 } 572 573 int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, 574 int short_hand, int dest, int dest_mode) 575 { 576 int result = 0; 577 struct kvm_lapic *target = vcpu->arch.apic; 578 579 apic_debug("target %p, source %p, dest 0x%x, " 580 "dest_mode 0x%x, short_hand 0x%x\n", 581 target, source, dest, dest_mode, short_hand); 582 583 ASSERT(target); 584 switch (short_hand) { 585 case APIC_DEST_NOSHORT: 586 if (dest_mode == 0) 587 /* Physical mode. */ 588 result = kvm_apic_match_physical_addr(target, dest); 589 else 590 /* Logical mode. */ 591 result = kvm_apic_match_logical_addr(target, dest); 592 break; 593 case APIC_DEST_SELF: 594 result = (target == source); 595 break; 596 case APIC_DEST_ALLINC: 597 result = 1; 598 break; 599 case APIC_DEST_ALLBUT: 600 result = (target != source); 601 break; 602 default: 603 apic_debug("kvm: apic: Bad dest shorthand value %x\n", 604 short_hand); 605 break; 606 } 607 608 return result; 609 } 610 611 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, 612 struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map) 613 { 614 struct kvm_apic_map *map; 615 unsigned long bitmap = 1; 616 struct kvm_lapic **dst; 617 int i; 618 bool ret = false; 619 620 *r = -1; 621 622 if (irq->shorthand == APIC_DEST_SELF) { 623 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map); 624 return true; 625 } 626 627 if (irq->shorthand) 628 return false; 629 630 rcu_read_lock(); 631 map = rcu_dereference(kvm->arch.apic_map); 632 633 if (!map) 634 goto out; 635 636 if (irq->dest_mode == 0) { /* physical mode */ 637 if (irq->delivery_mode == APIC_DM_LOWEST || 638 irq->dest_id == 0xff) 639 goto out; 640 dst = &map->phys_map[irq->dest_id & 0xff]; 641 } else { 642 u32 mda = irq->dest_id << (32 - map->ldr_bits); 643 644 dst = map->logical_map[apic_cluster_id(map, mda)]; 645 646 bitmap = apic_logical_id(map, mda); 647 648 if (irq->delivery_mode == APIC_DM_LOWEST) { 649 int l = -1; 650 for_each_set_bit(i, &bitmap, 16) { 651 if (!dst[i]) 652 continue; 653 if (l < 0) 654 l = i; 655 else if (kvm_apic_compare_prio(dst[i]->vcpu, dst[l]->vcpu) < 0) 656 l = i; 657 } 658 659 bitmap = (l >= 0) ? 1 << l : 0; 660 } 661 } 662 663 for_each_set_bit(i, &bitmap, 16) { 664 if (!dst[i]) 665 continue; 666 if (*r < 0) 667 *r = 0; 668 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map); 669 } 670 671 ret = true; 672 out: 673 rcu_read_unlock(); 674 return ret; 675 } 676 677 /* 678 * Add a pending IRQ into lapic. 679 * Return 1 if successfully added and 0 if discarded. 680 */ 681 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, 682 int vector, int level, int trig_mode, 683 unsigned long *dest_map) 684 { 685 int result = 0; 686 struct kvm_vcpu *vcpu = apic->vcpu; 687 688 switch (delivery_mode) { 689 case APIC_DM_LOWEST: 690 vcpu->arch.apic_arb_prio++; 691 case APIC_DM_FIXED: 692 /* FIXME add logic for vcpu on reset */ 693 if (unlikely(!apic_enabled(apic))) 694 break; 695 696 result = 1; 697 698 if (dest_map) 699 __set_bit(vcpu->vcpu_id, dest_map); 700 701 if (kvm_x86_ops->deliver_posted_interrupt) 702 kvm_x86_ops->deliver_posted_interrupt(vcpu, vector); 703 else { 704 apic_set_irr(vector, apic); 705 706 kvm_make_request(KVM_REQ_EVENT, vcpu); 707 kvm_vcpu_kick(vcpu); 708 } 709 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, 710 trig_mode, vector, false); 711 break; 712 713 case APIC_DM_REMRD: 714 result = 1; 715 vcpu->arch.pv.pv_unhalted = 1; 716 kvm_make_request(KVM_REQ_EVENT, vcpu); 717 kvm_vcpu_kick(vcpu); 718 break; 719 720 case APIC_DM_SMI: 721 apic_debug("Ignoring guest SMI\n"); 722 break; 723 724 case APIC_DM_NMI: 725 result = 1; 726 kvm_inject_nmi(vcpu); 727 kvm_vcpu_kick(vcpu); 728 break; 729 730 case APIC_DM_INIT: 731 if (!trig_mode || level) { 732 result = 1; 733 /* assumes that there are only KVM_APIC_INIT/SIPI */ 734 apic->pending_events = (1UL << KVM_APIC_INIT); 735 /* make sure pending_events is visible before sending 736 * the request */ 737 smp_wmb(); 738 kvm_make_request(KVM_REQ_EVENT, vcpu); 739 kvm_vcpu_kick(vcpu); 740 } else { 741 apic_debug("Ignoring de-assert INIT to vcpu %d\n", 742 vcpu->vcpu_id); 743 } 744 break; 745 746 case APIC_DM_STARTUP: 747 apic_debug("SIPI to vcpu %d vector 0x%02x\n", 748 vcpu->vcpu_id, vector); 749 result = 1; 750 apic->sipi_vector = vector; 751 /* make sure sipi_vector is visible for the receiver */ 752 smp_wmb(); 753 set_bit(KVM_APIC_SIPI, &apic->pending_events); 754 kvm_make_request(KVM_REQ_EVENT, vcpu); 755 kvm_vcpu_kick(vcpu); 756 break; 757 758 case APIC_DM_EXTINT: 759 /* 760 * Should only be called by kvm_apic_local_deliver() with LVT0, 761 * before NMI watchdog was enabled. Already handled by 762 * kvm_apic_accept_pic_intr(). 763 */ 764 break; 765 766 default: 767 printk(KERN_ERR "TODO: unsupported delivery mode %x\n", 768 delivery_mode); 769 break; 770 } 771 return result; 772 } 773 774 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) 775 { 776 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio; 777 } 778 779 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) 780 { 781 if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) && 782 kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) { 783 int trigger_mode; 784 if (apic_test_vector(vector, apic->regs + APIC_TMR)) 785 trigger_mode = IOAPIC_LEVEL_TRIG; 786 else 787 trigger_mode = IOAPIC_EDGE_TRIG; 788 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode); 789 } 790 } 791 792 static int apic_set_eoi(struct kvm_lapic *apic) 793 { 794 int vector = apic_find_highest_isr(apic); 795 796 trace_kvm_eoi(apic, vector); 797 798 /* 799 * Not every write EOI will has corresponding ISR, 800 * one example is when Kernel check timer on setup_IO_APIC 801 */ 802 if (vector == -1) 803 return vector; 804 805 apic_clear_isr(vector, apic); 806 apic_update_ppr(apic); 807 808 kvm_ioapic_send_eoi(apic, vector); 809 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); 810 return vector; 811 } 812 813 /* 814 * this interface assumes a trap-like exit, which has already finished 815 * desired side effect including vISR and vPPR update. 816 */ 817 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector) 818 { 819 struct kvm_lapic *apic = vcpu->arch.apic; 820 821 trace_kvm_eoi(apic, vector); 822 823 kvm_ioapic_send_eoi(apic, vector); 824 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); 825 } 826 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated); 827 828 static void apic_send_ipi(struct kvm_lapic *apic) 829 { 830 u32 icr_low = kvm_apic_get_reg(apic, APIC_ICR); 831 u32 icr_high = kvm_apic_get_reg(apic, APIC_ICR2); 832 struct kvm_lapic_irq irq; 833 834 irq.vector = icr_low & APIC_VECTOR_MASK; 835 irq.delivery_mode = icr_low & APIC_MODE_MASK; 836 irq.dest_mode = icr_low & APIC_DEST_MASK; 837 irq.level = icr_low & APIC_INT_ASSERT; 838 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG; 839 irq.shorthand = icr_low & APIC_SHORT_MASK; 840 if (apic_x2apic_mode(apic)) 841 irq.dest_id = icr_high; 842 else 843 irq.dest_id = GET_APIC_DEST_FIELD(icr_high); 844 845 trace_kvm_apic_ipi(icr_low, irq.dest_id); 846 847 apic_debug("icr_high 0x%x, icr_low 0x%x, " 848 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, " 849 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n", 850 icr_high, icr_low, irq.shorthand, irq.dest_id, 851 irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode, 852 irq.vector); 853 854 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL); 855 } 856 857 static u32 apic_get_tmcct(struct kvm_lapic *apic) 858 { 859 ktime_t remaining; 860 s64 ns; 861 u32 tmcct; 862 863 ASSERT(apic != NULL); 864 865 /* if initial count is 0, current count should also be 0 */ 866 if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 || 867 apic->lapic_timer.period == 0) 868 return 0; 869 870 remaining = hrtimer_get_remaining(&apic->lapic_timer.timer); 871 if (ktime_to_ns(remaining) < 0) 872 remaining = ktime_set(0, 0); 873 874 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period); 875 tmcct = div64_u64(ns, 876 (APIC_BUS_CYCLE_NS * apic->divide_count)); 877 878 return tmcct; 879 } 880 881 static void __report_tpr_access(struct kvm_lapic *apic, bool write) 882 { 883 struct kvm_vcpu *vcpu = apic->vcpu; 884 struct kvm_run *run = vcpu->run; 885 886 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu); 887 run->tpr_access.rip = kvm_rip_read(vcpu); 888 run->tpr_access.is_write = write; 889 } 890 891 static inline void report_tpr_access(struct kvm_lapic *apic, bool write) 892 { 893 if (apic->vcpu->arch.tpr_access_reporting) 894 __report_tpr_access(apic, write); 895 } 896 897 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset) 898 { 899 u32 val = 0; 900 901 if (offset >= LAPIC_MMIO_LENGTH) 902 return 0; 903 904 switch (offset) { 905 case APIC_ID: 906 if (apic_x2apic_mode(apic)) 907 val = kvm_apic_id(apic); 908 else 909 val = kvm_apic_id(apic) << 24; 910 break; 911 case APIC_ARBPRI: 912 apic_debug("Access APIC ARBPRI register which is for P6\n"); 913 break; 914 915 case APIC_TMCCT: /* Timer CCR */ 916 if (apic_lvtt_tscdeadline(apic)) 917 return 0; 918 919 val = apic_get_tmcct(apic); 920 break; 921 case APIC_PROCPRI: 922 apic_update_ppr(apic); 923 val = kvm_apic_get_reg(apic, offset); 924 break; 925 case APIC_TASKPRI: 926 report_tpr_access(apic, false); 927 /* fall thru */ 928 default: 929 val = kvm_apic_get_reg(apic, offset); 930 break; 931 } 932 933 return val; 934 } 935 936 static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev) 937 { 938 return container_of(dev, struct kvm_lapic, dev); 939 } 940 941 static int apic_reg_read(struct kvm_lapic *apic, u32 offset, int len, 942 void *data) 943 { 944 unsigned char alignment = offset & 0xf; 945 u32 result; 946 /* this bitmask has a bit cleared for each reserved register */ 947 static const u64 rmask = 0x43ff01ffffffe70cULL; 948 949 if ((alignment + len) > 4) { 950 apic_debug("KVM_APIC_READ: alignment error %x %d\n", 951 offset, len); 952 return 1; 953 } 954 955 if (offset > 0x3f0 || !(rmask & (1ULL << (offset >> 4)))) { 956 apic_debug("KVM_APIC_READ: read reserved register %x\n", 957 offset); 958 return 1; 959 } 960 961 result = __apic_read(apic, offset & ~0xf); 962 963 trace_kvm_apic_read(offset, result); 964 965 switch (len) { 966 case 1: 967 case 2: 968 case 4: 969 memcpy(data, (char *)&result + alignment, len); 970 break; 971 default: 972 printk(KERN_ERR "Local APIC read with len = %x, " 973 "should be 1,2, or 4 instead\n", len); 974 break; 975 } 976 return 0; 977 } 978 979 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr) 980 { 981 return kvm_apic_hw_enabled(apic) && 982 addr >= apic->base_address && 983 addr < apic->base_address + LAPIC_MMIO_LENGTH; 984 } 985 986 static int apic_mmio_read(struct kvm_io_device *this, 987 gpa_t address, int len, void *data) 988 { 989 struct kvm_lapic *apic = to_lapic(this); 990 u32 offset = address - apic->base_address; 991 992 if (!apic_mmio_in_range(apic, address)) 993 return -EOPNOTSUPP; 994 995 apic_reg_read(apic, offset, len, data); 996 997 return 0; 998 } 999 1000 static void update_divide_count(struct kvm_lapic *apic) 1001 { 1002 u32 tmp1, tmp2, tdcr; 1003 1004 tdcr = kvm_apic_get_reg(apic, APIC_TDCR); 1005 tmp1 = tdcr & 0xf; 1006 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1; 1007 apic->divide_count = 0x1 << (tmp2 & 0x7); 1008 1009 apic_debug("timer divide count is 0x%x\n", 1010 apic->divide_count); 1011 } 1012 1013 static void start_apic_timer(struct kvm_lapic *apic) 1014 { 1015 ktime_t now; 1016 atomic_set(&apic->lapic_timer.pending, 0); 1017 1018 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) { 1019 /* lapic timer in oneshot or periodic mode */ 1020 now = apic->lapic_timer.timer.base->get_time(); 1021 apic->lapic_timer.period = (u64)kvm_apic_get_reg(apic, APIC_TMICT) 1022 * APIC_BUS_CYCLE_NS * apic->divide_count; 1023 1024 if (!apic->lapic_timer.period) 1025 return; 1026 /* 1027 * Do not allow the guest to program periodic timers with small 1028 * interval, since the hrtimers are not throttled by the host 1029 * scheduler. 1030 */ 1031 if (apic_lvtt_period(apic)) { 1032 s64 min_period = min_timer_period_us * 1000LL; 1033 1034 if (apic->lapic_timer.period < min_period) { 1035 pr_info_ratelimited( 1036 "kvm: vcpu %i: requested %lld ns " 1037 "lapic timer period limited to %lld ns\n", 1038 apic->vcpu->vcpu_id, 1039 apic->lapic_timer.period, min_period); 1040 apic->lapic_timer.period = min_period; 1041 } 1042 } 1043 1044 hrtimer_start(&apic->lapic_timer.timer, 1045 ktime_add_ns(now, apic->lapic_timer.period), 1046 HRTIMER_MODE_ABS); 1047 1048 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016" 1049 PRIx64 ", " 1050 "timer initial count 0x%x, period %lldns, " 1051 "expire @ 0x%016" PRIx64 ".\n", __func__, 1052 APIC_BUS_CYCLE_NS, ktime_to_ns(now), 1053 kvm_apic_get_reg(apic, APIC_TMICT), 1054 apic->lapic_timer.period, 1055 ktime_to_ns(ktime_add_ns(now, 1056 apic->lapic_timer.period))); 1057 } else if (apic_lvtt_tscdeadline(apic)) { 1058 /* lapic timer in tsc deadline mode */ 1059 u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline; 1060 u64 ns = 0; 1061 struct kvm_vcpu *vcpu = apic->vcpu; 1062 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz; 1063 unsigned long flags; 1064 1065 if (unlikely(!tscdeadline || !this_tsc_khz)) 1066 return; 1067 1068 local_irq_save(flags); 1069 1070 now = apic->lapic_timer.timer.base->get_time(); 1071 guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); 1072 if (likely(tscdeadline > guest_tsc)) { 1073 ns = (tscdeadline - guest_tsc) * 1000000ULL; 1074 do_div(ns, this_tsc_khz); 1075 } 1076 hrtimer_start(&apic->lapic_timer.timer, 1077 ktime_add_ns(now, ns), HRTIMER_MODE_ABS); 1078 1079 local_irq_restore(flags); 1080 } 1081 } 1082 1083 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) 1084 { 1085 int nmi_wd_enabled = apic_lvt_nmi_mode(kvm_apic_get_reg(apic, APIC_LVT0)); 1086 1087 if (apic_lvt_nmi_mode(lvt0_val)) { 1088 if (!nmi_wd_enabled) { 1089 apic_debug("Receive NMI setting on APIC_LVT0 " 1090 "for cpu %d\n", apic->vcpu->vcpu_id); 1091 apic->vcpu->kvm->arch.vapics_in_nmi_mode++; 1092 } 1093 } else if (nmi_wd_enabled) 1094 apic->vcpu->kvm->arch.vapics_in_nmi_mode--; 1095 } 1096 1097 static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) 1098 { 1099 int ret = 0; 1100 1101 trace_kvm_apic_write(reg, val); 1102 1103 switch (reg) { 1104 case APIC_ID: /* Local APIC ID */ 1105 if (!apic_x2apic_mode(apic)) 1106 kvm_apic_set_id(apic, val >> 24); 1107 else 1108 ret = 1; 1109 break; 1110 1111 case APIC_TASKPRI: 1112 report_tpr_access(apic, true); 1113 apic_set_tpr(apic, val & 0xff); 1114 break; 1115 1116 case APIC_EOI: 1117 apic_set_eoi(apic); 1118 break; 1119 1120 case APIC_LDR: 1121 if (!apic_x2apic_mode(apic)) 1122 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK); 1123 else 1124 ret = 1; 1125 break; 1126 1127 case APIC_DFR: 1128 if (!apic_x2apic_mode(apic)) { 1129 apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF); 1130 recalculate_apic_map(apic->vcpu->kvm); 1131 } else 1132 ret = 1; 1133 break; 1134 1135 case APIC_SPIV: { 1136 u32 mask = 0x3ff; 1137 if (kvm_apic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI) 1138 mask |= APIC_SPIV_DIRECTED_EOI; 1139 apic_set_spiv(apic, val & mask); 1140 if (!(val & APIC_SPIV_APIC_ENABLED)) { 1141 int i; 1142 u32 lvt_val; 1143 1144 for (i = 0; i < APIC_LVT_NUM; i++) { 1145 lvt_val = kvm_apic_get_reg(apic, 1146 APIC_LVTT + 0x10 * i); 1147 apic_set_reg(apic, APIC_LVTT + 0x10 * i, 1148 lvt_val | APIC_LVT_MASKED); 1149 } 1150 atomic_set(&apic->lapic_timer.pending, 0); 1151 1152 } 1153 break; 1154 } 1155 case APIC_ICR: 1156 /* No delay here, so we always clear the pending bit */ 1157 apic_set_reg(apic, APIC_ICR, val & ~(1 << 12)); 1158 apic_send_ipi(apic); 1159 break; 1160 1161 case APIC_ICR2: 1162 if (!apic_x2apic_mode(apic)) 1163 val &= 0xff000000; 1164 apic_set_reg(apic, APIC_ICR2, val); 1165 break; 1166 1167 case APIC_LVT0: 1168 apic_manage_nmi_watchdog(apic, val); 1169 case APIC_LVTTHMR: 1170 case APIC_LVTPC: 1171 case APIC_LVT1: 1172 case APIC_LVTERR: 1173 /* TODO: Check vector */ 1174 if (!kvm_apic_sw_enabled(apic)) 1175 val |= APIC_LVT_MASKED; 1176 1177 val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4]; 1178 apic_set_reg(apic, reg, val); 1179 1180 break; 1181 1182 case APIC_LVTT: 1183 if ((kvm_apic_get_reg(apic, APIC_LVTT) & 1184 apic->lapic_timer.timer_mode_mask) != 1185 (val & apic->lapic_timer.timer_mode_mask)) 1186 hrtimer_cancel(&apic->lapic_timer.timer); 1187 1188 if (!kvm_apic_sw_enabled(apic)) 1189 val |= APIC_LVT_MASKED; 1190 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask); 1191 apic_set_reg(apic, APIC_LVTT, val); 1192 break; 1193 1194 case APIC_TMICT: 1195 if (apic_lvtt_tscdeadline(apic)) 1196 break; 1197 1198 hrtimer_cancel(&apic->lapic_timer.timer); 1199 apic_set_reg(apic, APIC_TMICT, val); 1200 start_apic_timer(apic); 1201 break; 1202 1203 case APIC_TDCR: 1204 if (val & 4) 1205 apic_debug("KVM_WRITE:TDCR %x\n", val); 1206 apic_set_reg(apic, APIC_TDCR, val); 1207 update_divide_count(apic); 1208 break; 1209 1210 case APIC_ESR: 1211 if (apic_x2apic_mode(apic) && val != 0) { 1212 apic_debug("KVM_WRITE:ESR not zero %x\n", val); 1213 ret = 1; 1214 } 1215 break; 1216 1217 case APIC_SELF_IPI: 1218 if (apic_x2apic_mode(apic)) { 1219 apic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff)); 1220 } else 1221 ret = 1; 1222 break; 1223 default: 1224 ret = 1; 1225 break; 1226 } 1227 if (ret) 1228 apic_debug("Local APIC Write to read-only register %x\n", reg); 1229 return ret; 1230 } 1231 1232 static int apic_mmio_write(struct kvm_io_device *this, 1233 gpa_t address, int len, const void *data) 1234 { 1235 struct kvm_lapic *apic = to_lapic(this); 1236 unsigned int offset = address - apic->base_address; 1237 u32 val; 1238 1239 if (!apic_mmio_in_range(apic, address)) 1240 return -EOPNOTSUPP; 1241 1242 /* 1243 * APIC register must be aligned on 128-bits boundary. 1244 * 32/64/128 bits registers must be accessed thru 32 bits. 1245 * Refer SDM 8.4.1 1246 */ 1247 if (len != 4 || (offset & 0xf)) { 1248 /* Don't shout loud, $infamous_os would cause only noise. */ 1249 apic_debug("apic write: bad size=%d %lx\n", len, (long)address); 1250 return 0; 1251 } 1252 1253 val = *(u32*)data; 1254 1255 /* too common printing */ 1256 if (offset != APIC_EOI) 1257 apic_debug("%s: offset 0x%x with length 0x%x, and value is " 1258 "0x%x\n", __func__, offset, len, val); 1259 1260 apic_reg_write(apic, offset & 0xff0, val); 1261 1262 return 0; 1263 } 1264 1265 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu) 1266 { 1267 if (kvm_vcpu_has_lapic(vcpu)) 1268 apic_reg_write(vcpu->arch.apic, APIC_EOI, 0); 1269 } 1270 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi); 1271 1272 /* emulate APIC access in a trap manner */ 1273 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) 1274 { 1275 u32 val = 0; 1276 1277 /* hw has done the conditional check and inst decode */ 1278 offset &= 0xff0; 1279 1280 apic_reg_read(vcpu->arch.apic, offset, 4, &val); 1281 1282 /* TODO: optimize to just emulate side effect w/o one more write */ 1283 apic_reg_write(vcpu->arch.apic, offset, val); 1284 } 1285 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode); 1286 1287 void kvm_free_lapic(struct kvm_vcpu *vcpu) 1288 { 1289 struct kvm_lapic *apic = vcpu->arch.apic; 1290 1291 if (!vcpu->arch.apic) 1292 return; 1293 1294 hrtimer_cancel(&apic->lapic_timer.timer); 1295 1296 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)) 1297 static_key_slow_dec_deferred(&apic_hw_disabled); 1298 1299 if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED)) 1300 static_key_slow_dec_deferred(&apic_sw_disabled); 1301 1302 if (apic->regs) 1303 free_page((unsigned long)apic->regs); 1304 1305 kfree(apic); 1306 } 1307 1308 /* 1309 *---------------------------------------------------------------------- 1310 * LAPIC interface 1311 *---------------------------------------------------------------------- 1312 */ 1313 1314 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu) 1315 { 1316 struct kvm_lapic *apic = vcpu->arch.apic; 1317 1318 if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) || 1319 apic_lvtt_period(apic)) 1320 return 0; 1321 1322 return apic->lapic_timer.tscdeadline; 1323 } 1324 1325 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) 1326 { 1327 struct kvm_lapic *apic = vcpu->arch.apic; 1328 1329 if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) || 1330 apic_lvtt_period(apic)) 1331 return; 1332 1333 hrtimer_cancel(&apic->lapic_timer.timer); 1334 apic->lapic_timer.tscdeadline = data; 1335 start_apic_timer(apic); 1336 } 1337 1338 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8) 1339 { 1340 struct kvm_lapic *apic = vcpu->arch.apic; 1341 1342 if (!kvm_vcpu_has_lapic(vcpu)) 1343 return; 1344 1345 apic_set_tpr(apic, ((cr8 & 0x0f) << 4) 1346 | (kvm_apic_get_reg(apic, APIC_TASKPRI) & 4)); 1347 } 1348 1349 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu) 1350 { 1351 u64 tpr; 1352 1353 if (!kvm_vcpu_has_lapic(vcpu)) 1354 return 0; 1355 1356 tpr = (u64) kvm_apic_get_reg(vcpu->arch.apic, APIC_TASKPRI); 1357 1358 return (tpr & 0xf0) >> 4; 1359 } 1360 1361 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) 1362 { 1363 u64 old_value = vcpu->arch.apic_base; 1364 struct kvm_lapic *apic = vcpu->arch.apic; 1365 1366 if (!apic) { 1367 value |= MSR_IA32_APICBASE_BSP; 1368 vcpu->arch.apic_base = value; 1369 return; 1370 } 1371 1372 if (!kvm_vcpu_is_bsp(apic->vcpu)) 1373 value &= ~MSR_IA32_APICBASE_BSP; 1374 vcpu->arch.apic_base = value; 1375 1376 /* update jump label if enable bit changes */ 1377 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) { 1378 if (value & MSR_IA32_APICBASE_ENABLE) 1379 static_key_slow_dec_deferred(&apic_hw_disabled); 1380 else 1381 static_key_slow_inc(&apic_hw_disabled.key); 1382 recalculate_apic_map(vcpu->kvm); 1383 } 1384 1385 if ((old_value ^ value) & X2APIC_ENABLE) { 1386 if (value & X2APIC_ENABLE) { 1387 u32 id = kvm_apic_id(apic); 1388 u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf)); 1389 kvm_apic_set_ldr(apic, ldr); 1390 kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true); 1391 } else 1392 kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false); 1393 } 1394 1395 apic->base_address = apic->vcpu->arch.apic_base & 1396 MSR_IA32_APICBASE_BASE; 1397 1398 /* with FSB delivery interrupt, we can restart APIC functionality */ 1399 apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is " 1400 "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address); 1401 1402 } 1403 1404 void kvm_lapic_reset(struct kvm_vcpu *vcpu) 1405 { 1406 struct kvm_lapic *apic; 1407 int i; 1408 1409 apic_debug("%s\n", __func__); 1410 1411 ASSERT(vcpu); 1412 apic = vcpu->arch.apic; 1413 ASSERT(apic != NULL); 1414 1415 /* Stop the timer in case it's a reset to an active apic */ 1416 hrtimer_cancel(&apic->lapic_timer.timer); 1417 1418 kvm_apic_set_id(apic, vcpu->vcpu_id); 1419 kvm_apic_set_version(apic->vcpu); 1420 1421 for (i = 0; i < APIC_LVT_NUM; i++) 1422 apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); 1423 apic_set_reg(apic, APIC_LVT0, 1424 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); 1425 1426 apic_set_reg(apic, APIC_DFR, 0xffffffffU); 1427 apic_set_spiv(apic, 0xff); 1428 apic_set_reg(apic, APIC_TASKPRI, 0); 1429 kvm_apic_set_ldr(apic, 0); 1430 apic_set_reg(apic, APIC_ESR, 0); 1431 apic_set_reg(apic, APIC_ICR, 0); 1432 apic_set_reg(apic, APIC_ICR2, 0); 1433 apic_set_reg(apic, APIC_TDCR, 0); 1434 apic_set_reg(apic, APIC_TMICT, 0); 1435 for (i = 0; i < 8; i++) { 1436 apic_set_reg(apic, APIC_IRR + 0x10 * i, 0); 1437 apic_set_reg(apic, APIC_ISR + 0x10 * i, 0); 1438 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); 1439 } 1440 apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm); 1441 apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm); 1442 apic->highest_isr_cache = -1; 1443 update_divide_count(apic); 1444 atomic_set(&apic->lapic_timer.pending, 0); 1445 if (kvm_vcpu_is_bsp(vcpu)) 1446 kvm_lapic_set_base(vcpu, 1447 vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP); 1448 vcpu->arch.pv_eoi.msr_val = 0; 1449 apic_update_ppr(apic); 1450 1451 vcpu->arch.apic_arb_prio = 0; 1452 vcpu->arch.apic_attention = 0; 1453 1454 apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr=" 1455 "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__, 1456 vcpu, kvm_apic_id(apic), 1457 vcpu->arch.apic_base, apic->base_address); 1458 } 1459 1460 /* 1461 *---------------------------------------------------------------------- 1462 * timer interface 1463 *---------------------------------------------------------------------- 1464 */ 1465 1466 static bool lapic_is_periodic(struct kvm_lapic *apic) 1467 { 1468 return apic_lvtt_period(apic); 1469 } 1470 1471 int apic_has_pending_timer(struct kvm_vcpu *vcpu) 1472 { 1473 struct kvm_lapic *apic = vcpu->arch.apic; 1474 1475 if (kvm_vcpu_has_lapic(vcpu) && apic_enabled(apic) && 1476 apic_lvt_enabled(apic, APIC_LVTT)) 1477 return atomic_read(&apic->lapic_timer.pending); 1478 1479 return 0; 1480 } 1481 1482 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type) 1483 { 1484 u32 reg = kvm_apic_get_reg(apic, lvt_type); 1485 int vector, mode, trig_mode; 1486 1487 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) { 1488 vector = reg & APIC_VECTOR_MASK; 1489 mode = reg & APIC_MODE_MASK; 1490 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER; 1491 return __apic_accept_irq(apic, mode, vector, 1, trig_mode, 1492 NULL); 1493 } 1494 return 0; 1495 } 1496 1497 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu) 1498 { 1499 struct kvm_lapic *apic = vcpu->arch.apic; 1500 1501 if (apic) 1502 kvm_apic_local_deliver(apic, APIC_LVT0); 1503 } 1504 1505 static const struct kvm_io_device_ops apic_mmio_ops = { 1506 .read = apic_mmio_read, 1507 .write = apic_mmio_write, 1508 }; 1509 1510 static enum hrtimer_restart apic_timer_fn(struct hrtimer *data) 1511 { 1512 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer); 1513 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer); 1514 struct kvm_vcpu *vcpu = apic->vcpu; 1515 wait_queue_head_t *q = &vcpu->wq; 1516 1517 /* 1518 * There is a race window between reading and incrementing, but we do 1519 * not care about potentially losing timer events in the !reinject 1520 * case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked 1521 * in vcpu_enter_guest. 1522 */ 1523 if (!atomic_read(&ktimer->pending)) { 1524 atomic_inc(&ktimer->pending); 1525 /* FIXME: this code should not know anything about vcpus */ 1526 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); 1527 } 1528 1529 if (waitqueue_active(q)) 1530 wake_up_interruptible(q); 1531 1532 if (lapic_is_periodic(apic)) { 1533 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period); 1534 return HRTIMER_RESTART; 1535 } else 1536 return HRTIMER_NORESTART; 1537 } 1538 1539 int kvm_create_lapic(struct kvm_vcpu *vcpu) 1540 { 1541 struct kvm_lapic *apic; 1542 1543 ASSERT(vcpu != NULL); 1544 apic_debug("apic_init %d\n", vcpu->vcpu_id); 1545 1546 apic = kzalloc(sizeof(*apic), GFP_KERNEL); 1547 if (!apic) 1548 goto nomem; 1549 1550 vcpu->arch.apic = apic; 1551 1552 apic->regs = (void *)get_zeroed_page(GFP_KERNEL); 1553 if (!apic->regs) { 1554 printk(KERN_ERR "malloc apic regs error for vcpu %x\n", 1555 vcpu->vcpu_id); 1556 goto nomem_free_apic; 1557 } 1558 apic->vcpu = vcpu; 1559 1560 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, 1561 HRTIMER_MODE_ABS); 1562 apic->lapic_timer.timer.function = apic_timer_fn; 1563 1564 /* 1565 * APIC is created enabled. This will prevent kvm_lapic_set_base from 1566 * thinking that APIC satet has changed. 1567 */ 1568 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE; 1569 kvm_lapic_set_base(vcpu, 1570 APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE); 1571 1572 static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */ 1573 kvm_lapic_reset(vcpu); 1574 kvm_iodevice_init(&apic->dev, &apic_mmio_ops); 1575 1576 return 0; 1577 nomem_free_apic: 1578 kfree(apic); 1579 nomem: 1580 return -ENOMEM; 1581 } 1582 1583 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) 1584 { 1585 struct kvm_lapic *apic = vcpu->arch.apic; 1586 int highest_irr; 1587 1588 if (!kvm_vcpu_has_lapic(vcpu) || !apic_enabled(apic)) 1589 return -1; 1590 1591 apic_update_ppr(apic); 1592 highest_irr = apic_find_highest_irr(apic); 1593 if ((highest_irr == -1) || 1594 ((highest_irr & 0xF0) <= kvm_apic_get_reg(apic, APIC_PROCPRI))) 1595 return -1; 1596 return highest_irr; 1597 } 1598 1599 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu) 1600 { 1601 u32 lvt0 = kvm_apic_get_reg(vcpu->arch.apic, APIC_LVT0); 1602 int r = 0; 1603 1604 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) 1605 r = 1; 1606 if ((lvt0 & APIC_LVT_MASKED) == 0 && 1607 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT) 1608 r = 1; 1609 return r; 1610 } 1611 1612 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) 1613 { 1614 struct kvm_lapic *apic = vcpu->arch.apic; 1615 1616 if (!kvm_vcpu_has_lapic(vcpu)) 1617 return; 1618 1619 if (atomic_read(&apic->lapic_timer.pending) > 0) { 1620 kvm_apic_local_deliver(apic, APIC_LVTT); 1621 atomic_set(&apic->lapic_timer.pending, 0); 1622 } 1623 } 1624 1625 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) 1626 { 1627 int vector = kvm_apic_has_interrupt(vcpu); 1628 struct kvm_lapic *apic = vcpu->arch.apic; 1629 1630 /* Note that we never get here with APIC virtualization enabled. */ 1631 1632 if (vector == -1) 1633 return -1; 1634 1635 apic_set_isr(vector, apic); 1636 apic_update_ppr(apic); 1637 apic_clear_irr(vector, apic); 1638 return vector; 1639 } 1640 1641 void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, 1642 struct kvm_lapic_state *s) 1643 { 1644 struct kvm_lapic *apic = vcpu->arch.apic; 1645 1646 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base); 1647 /* set SPIV separately to get count of SW disabled APICs right */ 1648 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV))); 1649 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s); 1650 /* call kvm_apic_set_id() to put apic into apic_map */ 1651 kvm_apic_set_id(apic, kvm_apic_id(apic)); 1652 kvm_apic_set_version(vcpu); 1653 1654 apic_update_ppr(apic); 1655 hrtimer_cancel(&apic->lapic_timer.timer); 1656 update_divide_count(apic); 1657 start_apic_timer(apic); 1658 apic->irr_pending = true; 1659 apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ? 1660 1 : count_vectors(apic->regs + APIC_ISR); 1661 apic->highest_isr_cache = -1; 1662 kvm_x86_ops->hwapic_isr_update(vcpu->kvm, apic_find_highest_isr(apic)); 1663 kvm_make_request(KVM_REQ_EVENT, vcpu); 1664 kvm_rtc_eoi_tracking_restore_one(vcpu); 1665 } 1666 1667 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) 1668 { 1669 struct hrtimer *timer; 1670 1671 if (!kvm_vcpu_has_lapic(vcpu)) 1672 return; 1673 1674 timer = &vcpu->arch.apic->lapic_timer.timer; 1675 if (hrtimer_cancel(timer)) 1676 hrtimer_start_expires(timer, HRTIMER_MODE_ABS); 1677 } 1678 1679 /* 1680 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt 1681 * 1682 * Detect whether guest triggered PV EOI since the 1683 * last entry. If yes, set EOI on guests's behalf. 1684 * Clear PV EOI in guest memory in any case. 1685 */ 1686 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu, 1687 struct kvm_lapic *apic) 1688 { 1689 bool pending; 1690 int vector; 1691 /* 1692 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host 1693 * and KVM_PV_EOI_ENABLED in guest memory as follows: 1694 * 1695 * KVM_APIC_PV_EOI_PENDING is unset: 1696 * -> host disabled PV EOI. 1697 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set: 1698 * -> host enabled PV EOI, guest did not execute EOI yet. 1699 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset: 1700 * -> host enabled PV EOI, guest executed EOI. 1701 */ 1702 BUG_ON(!pv_eoi_enabled(vcpu)); 1703 pending = pv_eoi_get_pending(vcpu); 1704 /* 1705 * Clear pending bit in any case: it will be set again on vmentry. 1706 * While this might not be ideal from performance point of view, 1707 * this makes sure pv eoi is only enabled when we know it's safe. 1708 */ 1709 pv_eoi_clr_pending(vcpu); 1710 if (pending) 1711 return; 1712 vector = apic_set_eoi(apic); 1713 trace_kvm_pv_eoi(apic, vector); 1714 } 1715 1716 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) 1717 { 1718 u32 data; 1719 1720 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) 1721 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); 1722 1723 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) 1724 return; 1725 1726 kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, 1727 sizeof(u32)); 1728 1729 apic_set_tpr(vcpu->arch.apic, data & 0xff); 1730 } 1731 1732 /* 1733 * apic_sync_pv_eoi_to_guest - called before vmentry 1734 * 1735 * Detect whether it's safe to enable PV EOI and 1736 * if yes do so. 1737 */ 1738 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu, 1739 struct kvm_lapic *apic) 1740 { 1741 if (!pv_eoi_enabled(vcpu) || 1742 /* IRR set or many bits in ISR: could be nested. */ 1743 apic->irr_pending || 1744 /* Cache not set: could be safe but we don't bother. */ 1745 apic->highest_isr_cache == -1 || 1746 /* Need EOI to update ioapic. */ 1747 kvm_ioapic_handles_vector(vcpu->kvm, apic->highest_isr_cache)) { 1748 /* 1749 * PV EOI was disabled by apic_sync_pv_eoi_from_guest 1750 * so we need not do anything here. 1751 */ 1752 return; 1753 } 1754 1755 pv_eoi_set_pending(apic->vcpu); 1756 } 1757 1758 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) 1759 { 1760 u32 data, tpr; 1761 int max_irr, max_isr; 1762 struct kvm_lapic *apic = vcpu->arch.apic; 1763 1764 apic_sync_pv_eoi_to_guest(vcpu, apic); 1765 1766 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) 1767 return; 1768 1769 tpr = kvm_apic_get_reg(apic, APIC_TASKPRI) & 0xff; 1770 max_irr = apic_find_highest_irr(apic); 1771 if (max_irr < 0) 1772 max_irr = 0; 1773 max_isr = apic_find_highest_isr(apic); 1774 if (max_isr < 0) 1775 max_isr = 0; 1776 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); 1777 1778 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, 1779 sizeof(u32)); 1780 } 1781 1782 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) 1783 { 1784 if (vapic_addr) { 1785 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, 1786 &vcpu->arch.apic->vapic_cache, 1787 vapic_addr, sizeof(u32))) 1788 return -EINVAL; 1789 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); 1790 } else { 1791 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); 1792 } 1793 1794 vcpu->arch.apic->vapic_addr = vapic_addr; 1795 return 0; 1796 } 1797 1798 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1799 { 1800 struct kvm_lapic *apic = vcpu->arch.apic; 1801 u32 reg = (msr - APIC_BASE_MSR) << 4; 1802 1803 if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic)) 1804 return 1; 1805 1806 /* if this is ICR write vector before command */ 1807 if (msr == 0x830) 1808 apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); 1809 return apic_reg_write(apic, reg, (u32)data); 1810 } 1811 1812 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data) 1813 { 1814 struct kvm_lapic *apic = vcpu->arch.apic; 1815 u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0; 1816 1817 if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic)) 1818 return 1; 1819 1820 if (apic_reg_read(apic, reg, 4, &low)) 1821 return 1; 1822 if (msr == 0x830) 1823 apic_reg_read(apic, APIC_ICR2, 4, &high); 1824 1825 *data = (((u64)high) << 32) | low; 1826 1827 return 0; 1828 } 1829 1830 int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data) 1831 { 1832 struct kvm_lapic *apic = vcpu->arch.apic; 1833 1834 if (!kvm_vcpu_has_lapic(vcpu)) 1835 return 1; 1836 1837 /* if this is ICR write vector before command */ 1838 if (reg == APIC_ICR) 1839 apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); 1840 return apic_reg_write(apic, reg, (u32)data); 1841 } 1842 1843 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data) 1844 { 1845 struct kvm_lapic *apic = vcpu->arch.apic; 1846 u32 low, high = 0; 1847 1848 if (!kvm_vcpu_has_lapic(vcpu)) 1849 return 1; 1850 1851 if (apic_reg_read(apic, reg, 4, &low)) 1852 return 1; 1853 if (reg == APIC_ICR) 1854 apic_reg_read(apic, APIC_ICR2, 4, &high); 1855 1856 *data = (((u64)high) << 32) | low; 1857 1858 return 0; 1859 } 1860 1861 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) 1862 { 1863 u64 addr = data & ~KVM_MSR_ENABLED; 1864 if (!IS_ALIGNED(addr, 4)) 1865 return 1; 1866 1867 vcpu->arch.pv_eoi.msr_val = data; 1868 if (!pv_eoi_enabled(vcpu)) 1869 return 0; 1870 return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, 1871 addr, sizeof(u8)); 1872 } 1873 1874 void kvm_apic_accept_events(struct kvm_vcpu *vcpu) 1875 { 1876 struct kvm_lapic *apic = vcpu->arch.apic; 1877 unsigned int sipi_vector; 1878 unsigned long pe; 1879 1880 if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events) 1881 return; 1882 1883 pe = xchg(&apic->pending_events, 0); 1884 1885 if (test_bit(KVM_APIC_INIT, &pe)) { 1886 kvm_lapic_reset(vcpu); 1887 kvm_vcpu_reset(vcpu); 1888 if (kvm_vcpu_is_bsp(apic->vcpu)) 1889 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 1890 else 1891 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 1892 } 1893 if (test_bit(KVM_APIC_SIPI, &pe) && 1894 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { 1895 /* evaluate pending_events before reading the vector */ 1896 smp_rmb(); 1897 sipi_vector = apic->sipi_vector; 1898 pr_debug("vcpu %d received sipi with vector # %x\n", 1899 vcpu->vcpu_id, sipi_vector); 1900 kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector); 1901 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 1902 } 1903 } 1904 1905 void kvm_lapic_init(void) 1906 { 1907 /* do not patch jump label more than once per second */ 1908 jump_label_rate_limit(&apic_hw_disabled, HZ); 1909 jump_label_rate_limit(&apic_sw_disabled, HZ); 1910 } 1911