xref: /openbmc/linux/arch/x86/kvm/lapic.c (revision d2a266fa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /*
4  * Local APIC virtualization
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2007 Novell
8  * Copyright (C) 2007 Intel
9  * Copyright 2009 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Dor Laor <dor.laor@qumranet.com>
13  *   Gregory Haskins <ghaskins@novell.com>
14  *   Yaozu (Eddie) Dong <eddie.dong@intel.com>
15  *
16  * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
17  */
18 
19 #include <linux/kvm_host.h>
20 #include <linux/kvm.h>
21 #include <linux/mm.h>
22 #include <linux/highmem.h>
23 #include <linux/smp.h>
24 #include <linux/hrtimer.h>
25 #include <linux/io.h>
26 #include <linux/export.h>
27 #include <linux/math64.h>
28 #include <linux/slab.h>
29 #include <asm/processor.h>
30 #include <asm/msr.h>
31 #include <asm/page.h>
32 #include <asm/current.h>
33 #include <asm/apicdef.h>
34 #include <asm/delay.h>
35 #include <linux/atomic.h>
36 #include <linux/jump_label.h>
37 #include "kvm_cache_regs.h"
38 #include "irq.h"
39 #include "ioapic.h"
40 #include "trace.h"
41 #include "x86.h"
42 #include "cpuid.h"
43 #include "hyperv.h"
44 
45 #ifndef CONFIG_X86_64
46 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
47 #else
48 #define mod_64(x, y) ((x) % (y))
49 #endif
50 
51 #define PRId64 "d"
52 #define PRIx64 "llx"
53 #define PRIu64 "u"
54 #define PRIo64 "o"
55 
56 /* 14 is the version for Xeon and Pentium 8.4.8*/
57 #define APIC_VERSION			(0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
58 #define LAPIC_MMIO_LENGTH		(1 << 12)
59 /* followed define is not in apicdef.h */
60 #define MAX_APIC_VECTOR			256
61 #define APIC_VECTORS_PER_REG		32
62 
63 static bool lapic_timer_advance_dynamic __read_mostly;
64 #define LAPIC_TIMER_ADVANCE_ADJUST_MIN	100	/* clock cycles */
65 #define LAPIC_TIMER_ADVANCE_ADJUST_MAX	10000	/* clock cycles */
66 #define LAPIC_TIMER_ADVANCE_NS_INIT	1000
67 #define LAPIC_TIMER_ADVANCE_NS_MAX     5000
68 /* step-by-step approximation to mitigate fluctuation */
69 #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
70 
71 static inline int apic_test_vector(int vec, void *bitmap)
72 {
73 	return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
74 }
75 
76 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
77 {
78 	struct kvm_lapic *apic = vcpu->arch.apic;
79 
80 	return apic_test_vector(vector, apic->regs + APIC_ISR) ||
81 		apic_test_vector(vector, apic->regs + APIC_IRR);
82 }
83 
84 static inline int __apic_test_and_set_vector(int vec, void *bitmap)
85 {
86 	return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
87 }
88 
89 static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
90 {
91 	return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
92 }
93 
94 __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ);
95 __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ);
96 
97 static inline int apic_enabled(struct kvm_lapic *apic)
98 {
99 	return kvm_apic_sw_enabled(apic) &&	kvm_apic_hw_enabled(apic);
100 }
101 
102 #define LVT_MASK	\
103 	(APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
104 
105 #define LINT_MASK	\
106 	(LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
107 	 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
108 
109 static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
110 {
111 	return apic->vcpu->vcpu_id;
112 }
113 
114 static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
115 {
116 	return pi_inject_timer && kvm_vcpu_apicv_active(vcpu);
117 }
118 
119 bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
120 {
121 	return kvm_x86_ops.set_hv_timer
122 	       && !(kvm_mwait_in_guest(vcpu->kvm) ||
123 		    kvm_can_post_timer_interrupt(vcpu));
124 }
125 EXPORT_SYMBOL_GPL(kvm_can_use_hv_timer);
126 
127 static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
128 {
129 	return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE;
130 }
131 
132 static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
133 		u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
134 	switch (map->mode) {
135 	case KVM_APIC_MODE_X2APIC: {
136 		u32 offset = (dest_id >> 16) * 16;
137 		u32 max_apic_id = map->max_apic_id;
138 
139 		if (offset <= max_apic_id) {
140 			u8 cluster_size = min(max_apic_id - offset + 1, 16U);
141 
142 			offset = array_index_nospec(offset, map->max_apic_id + 1);
143 			*cluster = &map->phys_map[offset];
144 			*mask = dest_id & (0xffff >> (16 - cluster_size));
145 		} else {
146 			*mask = 0;
147 		}
148 
149 		return true;
150 		}
151 	case KVM_APIC_MODE_XAPIC_FLAT:
152 		*cluster = map->xapic_flat_map;
153 		*mask = dest_id & 0xff;
154 		return true;
155 	case KVM_APIC_MODE_XAPIC_CLUSTER:
156 		*cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
157 		*mask = dest_id & 0xf;
158 		return true;
159 	default:
160 		/* Not optimized. */
161 		return false;
162 	}
163 }
164 
165 static void kvm_apic_map_free(struct rcu_head *rcu)
166 {
167 	struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
168 
169 	kvfree(map);
170 }
171 
172 /*
173  * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
174  *
175  * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
176  * apic_map_lock_held.
177  */
178 enum {
179 	CLEAN,
180 	UPDATE_IN_PROGRESS,
181 	DIRTY
182 };
183 
184 void kvm_recalculate_apic_map(struct kvm *kvm)
185 {
186 	struct kvm_apic_map *new, *old = NULL;
187 	struct kvm_vcpu *vcpu;
188 	int i;
189 	u32 max_id = 255; /* enough space for any xAPIC ID */
190 
191 	/* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map.  */
192 	if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
193 		return;
194 
195 	mutex_lock(&kvm->arch.apic_map_lock);
196 	/*
197 	 * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map
198 	 * (if clean) or the APIC registers (if dirty).
199 	 */
200 	if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
201 				   DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
202 		/* Someone else has updated the map. */
203 		mutex_unlock(&kvm->arch.apic_map_lock);
204 		return;
205 	}
206 
207 	kvm_for_each_vcpu(i, vcpu, kvm)
208 		if (kvm_apic_present(vcpu))
209 			max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
210 
211 	new = kvzalloc(sizeof(struct kvm_apic_map) +
212 	                   sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
213 			   GFP_KERNEL_ACCOUNT);
214 
215 	if (!new)
216 		goto out;
217 
218 	new->max_apic_id = max_id;
219 
220 	kvm_for_each_vcpu(i, vcpu, kvm) {
221 		struct kvm_lapic *apic = vcpu->arch.apic;
222 		struct kvm_lapic **cluster;
223 		u16 mask;
224 		u32 ldr;
225 		u8 xapic_id;
226 		u32 x2apic_id;
227 
228 		if (!kvm_apic_present(vcpu))
229 			continue;
230 
231 		xapic_id = kvm_xapic_id(apic);
232 		x2apic_id = kvm_x2apic_id(apic);
233 
234 		/* Hotplug hack: see kvm_apic_match_physical_addr(), ... */
235 		if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
236 				x2apic_id <= new->max_apic_id)
237 			new->phys_map[x2apic_id] = apic;
238 		/*
239 		 * ... xAPIC ID of VCPUs with APIC ID > 0xff will wrap-around,
240 		 * prevent them from masking VCPUs with APIC ID <= 0xff.
241 		 */
242 		if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
243 			new->phys_map[xapic_id] = apic;
244 
245 		if (!kvm_apic_sw_enabled(apic))
246 			continue;
247 
248 		ldr = kvm_lapic_get_reg(apic, APIC_LDR);
249 
250 		if (apic_x2apic_mode(apic)) {
251 			new->mode |= KVM_APIC_MODE_X2APIC;
252 		} else if (ldr) {
253 			ldr = GET_APIC_LOGICAL_ID(ldr);
254 			if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
255 				new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
256 			else
257 				new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
258 		}
259 
260 		if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask))
261 			continue;
262 
263 		if (mask)
264 			cluster[ffs(mask) - 1] = apic;
265 	}
266 out:
267 	old = rcu_dereference_protected(kvm->arch.apic_map,
268 			lockdep_is_held(&kvm->arch.apic_map_lock));
269 	rcu_assign_pointer(kvm->arch.apic_map, new);
270 	/*
271 	 * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
272 	 * If another update has come in, leave it DIRTY.
273 	 */
274 	atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
275 			       UPDATE_IN_PROGRESS, CLEAN);
276 	mutex_unlock(&kvm->arch.apic_map_lock);
277 
278 	if (old)
279 		call_rcu(&old->rcu, kvm_apic_map_free);
280 
281 	kvm_make_scan_ioapic_request(kvm);
282 }
283 
284 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
285 {
286 	bool enabled = val & APIC_SPIV_APIC_ENABLED;
287 
288 	kvm_lapic_set_reg(apic, APIC_SPIV, val);
289 
290 	if (enabled != apic->sw_enabled) {
291 		apic->sw_enabled = enabled;
292 		if (enabled)
293 			static_branch_slow_dec_deferred(&apic_sw_disabled);
294 		else
295 			static_branch_inc(&apic_sw_disabled.key);
296 
297 		atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
298 	}
299 
300 	/* Check if there are APF page ready requests pending */
301 	if (enabled)
302 		kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
303 }
304 
305 static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
306 {
307 	kvm_lapic_set_reg(apic, APIC_ID, id << 24);
308 	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
309 }
310 
311 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
312 {
313 	kvm_lapic_set_reg(apic, APIC_LDR, id);
314 	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
315 }
316 
317 static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
318 {
319 	kvm_lapic_set_reg(apic, APIC_DFR, val);
320 	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
321 }
322 
323 static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
324 {
325 	return ((id >> 4) << 16) | (1 << (id & 0xf));
326 }
327 
328 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
329 {
330 	u32 ldr = kvm_apic_calc_x2apic_ldr(id);
331 
332 	WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
333 
334 	kvm_lapic_set_reg(apic, APIC_ID, id);
335 	kvm_lapic_set_reg(apic, APIC_LDR, ldr);
336 	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
337 }
338 
339 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
340 {
341 	return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
342 }
343 
344 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
345 {
346 	return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
347 }
348 
349 static inline int apic_lvtt_period(struct kvm_lapic *apic)
350 {
351 	return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
352 }
353 
354 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
355 {
356 	return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
357 }
358 
359 static inline int apic_lvt_nmi_mode(u32 lvt_val)
360 {
361 	return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
362 }
363 
364 void kvm_apic_set_version(struct kvm_vcpu *vcpu)
365 {
366 	struct kvm_lapic *apic = vcpu->arch.apic;
367 	u32 v = APIC_VERSION;
368 
369 	if (!lapic_in_kernel(vcpu))
370 		return;
371 
372 	/*
373 	 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
374 	 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
375 	 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
376 	 * version first and level-triggered interrupts never get EOIed in
377 	 * IOAPIC.
378 	 */
379 	if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) &&
380 	    !ioapic_in_kernel(vcpu->kvm))
381 		v |= APIC_LVR_DIRECTED_EOI;
382 	kvm_lapic_set_reg(apic, APIC_LVR, v);
383 }
384 
385 static const unsigned int apic_lvt_mask[KVM_APIC_LVT_NUM] = {
386 	LVT_MASK ,      /* part LVTT mask, timer mode mask added at runtime */
387 	LVT_MASK | APIC_MODE_MASK,	/* LVTTHMR */
388 	LVT_MASK | APIC_MODE_MASK,	/* LVTPC */
389 	LINT_MASK, LINT_MASK,	/* LVT0-1 */
390 	LVT_MASK		/* LVTERR */
391 };
392 
393 static int find_highest_vector(void *bitmap)
394 {
395 	int vec;
396 	u32 *reg;
397 
398 	for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
399 	     vec >= 0; vec -= APIC_VECTORS_PER_REG) {
400 		reg = bitmap + REG_POS(vec);
401 		if (*reg)
402 			return __fls(*reg) + vec;
403 	}
404 
405 	return -1;
406 }
407 
408 static u8 count_vectors(void *bitmap)
409 {
410 	int vec;
411 	u32 *reg;
412 	u8 count = 0;
413 
414 	for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
415 		reg = bitmap + REG_POS(vec);
416 		count += hweight32(*reg);
417 	}
418 
419 	return count;
420 }
421 
422 bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
423 {
424 	u32 i, vec;
425 	u32 pir_val, irr_val, prev_irr_val;
426 	int max_updated_irr;
427 
428 	max_updated_irr = -1;
429 	*max_irr = -1;
430 
431 	for (i = vec = 0; i <= 7; i++, vec += 32) {
432 		pir_val = READ_ONCE(pir[i]);
433 		irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10));
434 		if (pir_val) {
435 			prev_irr_val = irr_val;
436 			irr_val |= xchg(&pir[i], 0);
437 			*((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val;
438 			if (prev_irr_val != irr_val) {
439 				max_updated_irr =
440 					__fls(irr_val ^ prev_irr_val) + vec;
441 			}
442 		}
443 		if (irr_val)
444 			*max_irr = __fls(irr_val) + vec;
445 	}
446 
447 	return ((max_updated_irr != -1) &&
448 		(max_updated_irr == *max_irr));
449 }
450 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
451 
452 bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
453 {
454 	struct kvm_lapic *apic = vcpu->arch.apic;
455 
456 	return __kvm_apic_update_irr(pir, apic->regs, max_irr);
457 }
458 EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
459 
460 static inline int apic_search_irr(struct kvm_lapic *apic)
461 {
462 	return find_highest_vector(apic->regs + APIC_IRR);
463 }
464 
465 static inline int apic_find_highest_irr(struct kvm_lapic *apic)
466 {
467 	int result;
468 
469 	/*
470 	 * Note that irr_pending is just a hint. It will be always
471 	 * true with virtual interrupt delivery enabled.
472 	 */
473 	if (!apic->irr_pending)
474 		return -1;
475 
476 	result = apic_search_irr(apic);
477 	ASSERT(result == -1 || result >= 16);
478 
479 	return result;
480 }
481 
482 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
483 {
484 	struct kvm_vcpu *vcpu;
485 
486 	vcpu = apic->vcpu;
487 
488 	if (unlikely(vcpu->arch.apicv_active)) {
489 		/* need to update RVI */
490 		kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
491 		static_call(kvm_x86_hwapic_irr_update)(vcpu,
492 				apic_find_highest_irr(apic));
493 	} else {
494 		apic->irr_pending = false;
495 		kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
496 		if (apic_search_irr(apic) != -1)
497 			apic->irr_pending = true;
498 	}
499 }
500 
501 void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
502 {
503 	apic_clear_irr(vec, vcpu->arch.apic);
504 }
505 EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
506 
507 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
508 {
509 	struct kvm_vcpu *vcpu;
510 
511 	if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
512 		return;
513 
514 	vcpu = apic->vcpu;
515 
516 	/*
517 	 * With APIC virtualization enabled, all caching is disabled
518 	 * because the processor can modify ISR under the hood.  Instead
519 	 * just set SVI.
520 	 */
521 	if (unlikely(vcpu->arch.apicv_active))
522 		static_call(kvm_x86_hwapic_isr_update)(vcpu, vec);
523 	else {
524 		++apic->isr_count;
525 		BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
526 		/*
527 		 * ISR (in service register) bit is set when injecting an interrupt.
528 		 * The highest vector is injected. Thus the latest bit set matches
529 		 * the highest bit in ISR.
530 		 */
531 		apic->highest_isr_cache = vec;
532 	}
533 }
534 
535 static inline int apic_find_highest_isr(struct kvm_lapic *apic)
536 {
537 	int result;
538 
539 	/*
540 	 * Note that isr_count is always 1, and highest_isr_cache
541 	 * is always -1, with APIC virtualization enabled.
542 	 */
543 	if (!apic->isr_count)
544 		return -1;
545 	if (likely(apic->highest_isr_cache != -1))
546 		return apic->highest_isr_cache;
547 
548 	result = find_highest_vector(apic->regs + APIC_ISR);
549 	ASSERT(result == -1 || result >= 16);
550 
551 	return result;
552 }
553 
554 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
555 {
556 	struct kvm_vcpu *vcpu;
557 	if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
558 		return;
559 
560 	vcpu = apic->vcpu;
561 
562 	/*
563 	 * We do get here for APIC virtualization enabled if the guest
564 	 * uses the Hyper-V APIC enlightenment.  In this case we may need
565 	 * to trigger a new interrupt delivery by writing the SVI field;
566 	 * on the other hand isr_count and highest_isr_cache are unused
567 	 * and must be left alone.
568 	 */
569 	if (unlikely(vcpu->arch.apicv_active))
570 		static_call(kvm_x86_hwapic_isr_update)(vcpu,
571 						apic_find_highest_isr(apic));
572 	else {
573 		--apic->isr_count;
574 		BUG_ON(apic->isr_count < 0);
575 		apic->highest_isr_cache = -1;
576 	}
577 }
578 
579 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
580 {
581 	/* This may race with setting of irr in __apic_accept_irq() and
582 	 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
583 	 * will cause vmexit immediately and the value will be recalculated
584 	 * on the next vmentry.
585 	 */
586 	return apic_find_highest_irr(vcpu->arch.apic);
587 }
588 EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
589 
590 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
591 			     int vector, int level, int trig_mode,
592 			     struct dest_map *dest_map);
593 
594 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
595 		     struct dest_map *dest_map)
596 {
597 	struct kvm_lapic *apic = vcpu->arch.apic;
598 
599 	return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
600 			irq->level, irq->trig_mode, dest_map);
601 }
602 
603 static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
604 			 struct kvm_lapic_irq *irq, u32 min)
605 {
606 	int i, count = 0;
607 	struct kvm_vcpu *vcpu;
608 
609 	if (min > map->max_apic_id)
610 		return 0;
611 
612 	for_each_set_bit(i, ipi_bitmap,
613 		min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
614 		if (map->phys_map[min + i]) {
615 			vcpu = map->phys_map[min + i]->vcpu;
616 			count += kvm_apic_set_irq(vcpu, irq, NULL);
617 		}
618 	}
619 
620 	return count;
621 }
622 
623 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
624 		    unsigned long ipi_bitmap_high, u32 min,
625 		    unsigned long icr, int op_64_bit)
626 {
627 	struct kvm_apic_map *map;
628 	struct kvm_lapic_irq irq = {0};
629 	int cluster_size = op_64_bit ? 64 : 32;
630 	int count;
631 
632 	if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK))
633 		return -KVM_EINVAL;
634 
635 	irq.vector = icr & APIC_VECTOR_MASK;
636 	irq.delivery_mode = icr & APIC_MODE_MASK;
637 	irq.level = (icr & APIC_INT_ASSERT) != 0;
638 	irq.trig_mode = icr & APIC_INT_LEVELTRIG;
639 
640 	rcu_read_lock();
641 	map = rcu_dereference(kvm->arch.apic_map);
642 
643 	count = -EOPNOTSUPP;
644 	if (likely(map)) {
645 		count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min);
646 		min += cluster_size;
647 		count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min);
648 	}
649 
650 	rcu_read_unlock();
651 	return count;
652 }
653 
654 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
655 {
656 
657 	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
658 				      sizeof(val));
659 }
660 
661 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
662 {
663 
664 	return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
665 				      sizeof(*val));
666 }
667 
668 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
669 {
670 	return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
671 }
672 
673 static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
674 {
675 	u8 val;
676 	if (pv_eoi_get_user(vcpu, &val) < 0) {
677 		printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n",
678 			   (unsigned long long)vcpu->arch.pv_eoi.msr_val);
679 		return false;
680 	}
681 	return val & KVM_PV_EOI_ENABLED;
682 }
683 
684 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
685 {
686 	if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
687 		printk(KERN_WARNING "Can't set EOI MSR value: 0x%llx\n",
688 			   (unsigned long long)vcpu->arch.pv_eoi.msr_val);
689 		return;
690 	}
691 	__set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
692 }
693 
694 static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
695 {
696 	if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
697 		printk(KERN_WARNING "Can't clear EOI MSR value: 0x%llx\n",
698 			   (unsigned long long)vcpu->arch.pv_eoi.msr_val);
699 		return;
700 	}
701 	__clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
702 }
703 
704 static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
705 {
706 	int highest_irr;
707 	if (apic->vcpu->arch.apicv_active)
708 		highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
709 	else
710 		highest_irr = apic_find_highest_irr(apic);
711 	if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
712 		return -1;
713 	return highest_irr;
714 }
715 
716 static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
717 {
718 	u32 tpr, isrv, ppr, old_ppr;
719 	int isr;
720 
721 	old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
722 	tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
723 	isr = apic_find_highest_isr(apic);
724 	isrv = (isr != -1) ? isr : 0;
725 
726 	if ((tpr & 0xf0) >= (isrv & 0xf0))
727 		ppr = tpr & 0xff;
728 	else
729 		ppr = isrv & 0xf0;
730 
731 	*new_ppr = ppr;
732 	if (old_ppr != ppr)
733 		kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
734 
735 	return ppr < old_ppr;
736 }
737 
738 static void apic_update_ppr(struct kvm_lapic *apic)
739 {
740 	u32 ppr;
741 
742 	if (__apic_update_ppr(apic, &ppr) &&
743 	    apic_has_interrupt_for_ppr(apic, ppr) != -1)
744 		kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
745 }
746 
747 void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
748 {
749 	apic_update_ppr(vcpu->arch.apic);
750 }
751 EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
752 
753 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
754 {
755 	kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
756 	apic_update_ppr(apic);
757 }
758 
759 static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
760 {
761 	return mda == (apic_x2apic_mode(apic) ?
762 			X2APIC_BROADCAST : APIC_BROADCAST);
763 }
764 
765 static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
766 {
767 	if (kvm_apic_broadcast(apic, mda))
768 		return true;
769 
770 	if (apic_x2apic_mode(apic))
771 		return mda == kvm_x2apic_id(apic);
772 
773 	/*
774 	 * Hotplug hack: Make LAPIC in xAPIC mode also accept interrupts as if
775 	 * it were in x2APIC mode.  Hotplugged VCPUs start in xAPIC mode and
776 	 * this allows unique addressing of VCPUs with APIC ID over 0xff.
777 	 * The 0xff condition is needed because writeable xAPIC ID.
778 	 */
779 	if (kvm_x2apic_id(apic) > 0xff && mda == kvm_x2apic_id(apic))
780 		return true;
781 
782 	return mda == kvm_xapic_id(apic);
783 }
784 
785 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
786 {
787 	u32 logical_id;
788 
789 	if (kvm_apic_broadcast(apic, mda))
790 		return true;
791 
792 	logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
793 
794 	if (apic_x2apic_mode(apic))
795 		return ((logical_id >> 16) == (mda >> 16))
796 		       && (logical_id & mda & 0xffff) != 0;
797 
798 	logical_id = GET_APIC_LOGICAL_ID(logical_id);
799 
800 	switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
801 	case APIC_DFR_FLAT:
802 		return (logical_id & mda) != 0;
803 	case APIC_DFR_CLUSTER:
804 		return ((logical_id >> 4) == (mda >> 4))
805 		       && (logical_id & mda & 0xf) != 0;
806 	default:
807 		return false;
808 	}
809 }
810 
811 /* The KVM local APIC implementation has two quirks:
812  *
813  *  - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
814  *    in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
815  *    KVM doesn't do that aliasing.
816  *
817  *  - in-kernel IOAPIC messages have to be delivered directly to
818  *    x2APIC, because the kernel does not support interrupt remapping.
819  *    In order to support broadcast without interrupt remapping, x2APIC
820  *    rewrites the destination of non-IPI messages from APIC_BROADCAST
821  *    to X2APIC_BROADCAST.
822  *
823  * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API.  This is
824  * important when userspace wants to use x2APIC-format MSIs, because
825  * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
826  */
827 static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
828 		struct kvm_lapic *source, struct kvm_lapic *target)
829 {
830 	bool ipi = source != NULL;
831 
832 	if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
833 	    !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
834 		return X2APIC_BROADCAST;
835 
836 	return dest_id;
837 }
838 
839 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
840 			   int shorthand, unsigned int dest, int dest_mode)
841 {
842 	struct kvm_lapic *target = vcpu->arch.apic;
843 	u32 mda = kvm_apic_mda(vcpu, dest, source, target);
844 
845 	ASSERT(target);
846 	switch (shorthand) {
847 	case APIC_DEST_NOSHORT:
848 		if (dest_mode == APIC_DEST_PHYSICAL)
849 			return kvm_apic_match_physical_addr(target, mda);
850 		else
851 			return kvm_apic_match_logical_addr(target, mda);
852 	case APIC_DEST_SELF:
853 		return target == source;
854 	case APIC_DEST_ALLINC:
855 		return true;
856 	case APIC_DEST_ALLBUT:
857 		return target != source;
858 	default:
859 		return false;
860 	}
861 }
862 EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
863 
864 int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
865 		       const unsigned long *bitmap, u32 bitmap_size)
866 {
867 	u32 mod;
868 	int i, idx = -1;
869 
870 	mod = vector % dest_vcpus;
871 
872 	for (i = 0; i <= mod; i++) {
873 		idx = find_next_bit(bitmap, bitmap_size, idx + 1);
874 		BUG_ON(idx == bitmap_size);
875 	}
876 
877 	return idx;
878 }
879 
880 static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
881 {
882 	if (!kvm->arch.disabled_lapic_found) {
883 		kvm->arch.disabled_lapic_found = true;
884 		printk(KERN_INFO
885 		       "Disabled LAPIC found during irq injection\n");
886 	}
887 }
888 
889 static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
890 		struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
891 {
892 	if (kvm->arch.x2apic_broadcast_quirk_disabled) {
893 		if ((irq->dest_id == APIC_BROADCAST &&
894 				map->mode != KVM_APIC_MODE_X2APIC))
895 			return true;
896 		if (irq->dest_id == X2APIC_BROADCAST)
897 			return true;
898 	} else {
899 		bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
900 		if (irq->dest_id == (x2apic_ipi ?
901 		                     X2APIC_BROADCAST : APIC_BROADCAST))
902 			return true;
903 	}
904 
905 	return false;
906 }
907 
908 /* Return true if the interrupt can be handled by using *bitmap as index mask
909  * for valid destinations in *dst array.
910  * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
911  * Note: we may have zero kvm_lapic destinations when we return true, which
912  * means that the interrupt should be dropped.  In this case, *bitmap would be
913  * zero and *dst undefined.
914  */
915 static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
916 		struct kvm_lapic **src, struct kvm_lapic_irq *irq,
917 		struct kvm_apic_map *map, struct kvm_lapic ***dst,
918 		unsigned long *bitmap)
919 {
920 	int i, lowest;
921 
922 	if (irq->shorthand == APIC_DEST_SELF && src) {
923 		*dst = src;
924 		*bitmap = 1;
925 		return true;
926 	} else if (irq->shorthand)
927 		return false;
928 
929 	if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
930 		return false;
931 
932 	if (irq->dest_mode == APIC_DEST_PHYSICAL) {
933 		if (irq->dest_id > map->max_apic_id) {
934 			*bitmap = 0;
935 		} else {
936 			u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
937 			*dst = &map->phys_map[dest_id];
938 			*bitmap = 1;
939 		}
940 		return true;
941 	}
942 
943 	*bitmap = 0;
944 	if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
945 				(u16 *)bitmap))
946 		return false;
947 
948 	if (!kvm_lowest_prio_delivery(irq))
949 		return true;
950 
951 	if (!kvm_vector_hashing_enabled()) {
952 		lowest = -1;
953 		for_each_set_bit(i, bitmap, 16) {
954 			if (!(*dst)[i])
955 				continue;
956 			if (lowest < 0)
957 				lowest = i;
958 			else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
959 						(*dst)[lowest]->vcpu) < 0)
960 				lowest = i;
961 		}
962 	} else {
963 		if (!*bitmap)
964 			return true;
965 
966 		lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
967 				bitmap, 16);
968 
969 		if (!(*dst)[lowest]) {
970 			kvm_apic_disabled_lapic_found(kvm);
971 			*bitmap = 0;
972 			return true;
973 		}
974 	}
975 
976 	*bitmap = (lowest >= 0) ? 1 << lowest : 0;
977 
978 	return true;
979 }
980 
981 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
982 		struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
983 {
984 	struct kvm_apic_map *map;
985 	unsigned long bitmap;
986 	struct kvm_lapic **dst = NULL;
987 	int i;
988 	bool ret;
989 
990 	*r = -1;
991 
992 	if (irq->shorthand == APIC_DEST_SELF) {
993 		*r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
994 		return true;
995 	}
996 
997 	rcu_read_lock();
998 	map = rcu_dereference(kvm->arch.apic_map);
999 
1000 	ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
1001 	if (ret) {
1002 		*r = 0;
1003 		for_each_set_bit(i, &bitmap, 16) {
1004 			if (!dst[i])
1005 				continue;
1006 			*r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
1007 		}
1008 	}
1009 
1010 	rcu_read_unlock();
1011 	return ret;
1012 }
1013 
1014 /*
1015  * This routine tries to handle interrupts in posted mode, here is how
1016  * it deals with different cases:
1017  * - For single-destination interrupts, handle it in posted mode
1018  * - Else if vector hashing is enabled and it is a lowest-priority
1019  *   interrupt, handle it in posted mode and use the following mechanism
1020  *   to find the destination vCPU.
1021  *	1. For lowest-priority interrupts, store all the possible
1022  *	   destination vCPUs in an array.
1023  *	2. Use "guest vector % max number of destination vCPUs" to find
1024  *	   the right destination vCPU in the array for the lowest-priority
1025  *	   interrupt.
1026  * - Otherwise, use remapped mode to inject the interrupt.
1027  */
1028 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
1029 			struct kvm_vcpu **dest_vcpu)
1030 {
1031 	struct kvm_apic_map *map;
1032 	unsigned long bitmap;
1033 	struct kvm_lapic **dst = NULL;
1034 	bool ret = false;
1035 
1036 	if (irq->shorthand)
1037 		return false;
1038 
1039 	rcu_read_lock();
1040 	map = rcu_dereference(kvm->arch.apic_map);
1041 
1042 	if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
1043 			hweight16(bitmap) == 1) {
1044 		unsigned long i = find_first_bit(&bitmap, 16);
1045 
1046 		if (dst[i]) {
1047 			*dest_vcpu = dst[i]->vcpu;
1048 			ret = true;
1049 		}
1050 	}
1051 
1052 	rcu_read_unlock();
1053 	return ret;
1054 }
1055 
1056 /*
1057  * Add a pending IRQ into lapic.
1058  * Return 1 if successfully added and 0 if discarded.
1059  */
1060 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1061 			     int vector, int level, int trig_mode,
1062 			     struct dest_map *dest_map)
1063 {
1064 	int result = 0;
1065 	struct kvm_vcpu *vcpu = apic->vcpu;
1066 
1067 	trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
1068 				  trig_mode, vector);
1069 	switch (delivery_mode) {
1070 	case APIC_DM_LOWEST:
1071 		vcpu->arch.apic_arb_prio++;
1072 		fallthrough;
1073 	case APIC_DM_FIXED:
1074 		if (unlikely(trig_mode && !level))
1075 			break;
1076 
1077 		/* FIXME add logic for vcpu on reset */
1078 		if (unlikely(!apic_enabled(apic)))
1079 			break;
1080 
1081 		result = 1;
1082 
1083 		if (dest_map) {
1084 			__set_bit(vcpu->vcpu_id, dest_map->map);
1085 			dest_map->vectors[vcpu->vcpu_id] = vector;
1086 		}
1087 
1088 		if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1089 			if (trig_mode)
1090 				kvm_lapic_set_vector(vector,
1091 						     apic->regs + APIC_TMR);
1092 			else
1093 				kvm_lapic_clear_vector(vector,
1094 						       apic->regs + APIC_TMR);
1095 		}
1096 
1097 		if (static_call(kvm_x86_deliver_posted_interrupt)(vcpu, vector)) {
1098 			kvm_lapic_set_irr(vector, apic);
1099 			kvm_make_request(KVM_REQ_EVENT, vcpu);
1100 			kvm_vcpu_kick(vcpu);
1101 		}
1102 		break;
1103 
1104 	case APIC_DM_REMRD:
1105 		result = 1;
1106 		vcpu->arch.pv.pv_unhalted = 1;
1107 		kvm_make_request(KVM_REQ_EVENT, vcpu);
1108 		kvm_vcpu_kick(vcpu);
1109 		break;
1110 
1111 	case APIC_DM_SMI:
1112 		result = 1;
1113 		kvm_make_request(KVM_REQ_SMI, vcpu);
1114 		kvm_vcpu_kick(vcpu);
1115 		break;
1116 
1117 	case APIC_DM_NMI:
1118 		result = 1;
1119 		kvm_inject_nmi(vcpu);
1120 		kvm_vcpu_kick(vcpu);
1121 		break;
1122 
1123 	case APIC_DM_INIT:
1124 		if (!trig_mode || level) {
1125 			result = 1;
1126 			/* assumes that there are only KVM_APIC_INIT/SIPI */
1127 			apic->pending_events = (1UL << KVM_APIC_INIT);
1128 			kvm_make_request(KVM_REQ_EVENT, vcpu);
1129 			kvm_vcpu_kick(vcpu);
1130 		}
1131 		break;
1132 
1133 	case APIC_DM_STARTUP:
1134 		result = 1;
1135 		apic->sipi_vector = vector;
1136 		/* make sure sipi_vector is visible for the receiver */
1137 		smp_wmb();
1138 		set_bit(KVM_APIC_SIPI, &apic->pending_events);
1139 		kvm_make_request(KVM_REQ_EVENT, vcpu);
1140 		kvm_vcpu_kick(vcpu);
1141 		break;
1142 
1143 	case APIC_DM_EXTINT:
1144 		/*
1145 		 * Should only be called by kvm_apic_local_deliver() with LVT0,
1146 		 * before NMI watchdog was enabled. Already handled by
1147 		 * kvm_apic_accept_pic_intr().
1148 		 */
1149 		break;
1150 
1151 	default:
1152 		printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1153 		       delivery_mode);
1154 		break;
1155 	}
1156 	return result;
1157 }
1158 
1159 /*
1160  * This routine identifies the destination vcpus mask meant to receive the
1161  * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
1162  * out the destination vcpus array and set the bitmap or it traverses to
1163  * each available vcpu to identify the same.
1164  */
1165 void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
1166 			      unsigned long *vcpu_bitmap)
1167 {
1168 	struct kvm_lapic **dest_vcpu = NULL;
1169 	struct kvm_lapic *src = NULL;
1170 	struct kvm_apic_map *map;
1171 	struct kvm_vcpu *vcpu;
1172 	unsigned long bitmap;
1173 	int i, vcpu_idx;
1174 	bool ret;
1175 
1176 	rcu_read_lock();
1177 	map = rcu_dereference(kvm->arch.apic_map);
1178 
1179 	ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
1180 					  &bitmap);
1181 	if (ret) {
1182 		for_each_set_bit(i, &bitmap, 16) {
1183 			if (!dest_vcpu[i])
1184 				continue;
1185 			vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx;
1186 			__set_bit(vcpu_idx, vcpu_bitmap);
1187 		}
1188 	} else {
1189 		kvm_for_each_vcpu(i, vcpu, kvm) {
1190 			if (!kvm_apic_present(vcpu))
1191 				continue;
1192 			if (!kvm_apic_match_dest(vcpu, NULL,
1193 						 irq->shorthand,
1194 						 irq->dest_id,
1195 						 irq->dest_mode))
1196 				continue;
1197 			__set_bit(i, vcpu_bitmap);
1198 		}
1199 	}
1200 	rcu_read_unlock();
1201 }
1202 
1203 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1204 {
1205 	return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1206 }
1207 
1208 static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1209 {
1210 	return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1211 }
1212 
1213 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1214 {
1215 	int trigger_mode;
1216 
1217 	/* Eoi the ioapic only if the ioapic doesn't own the vector. */
1218 	if (!kvm_ioapic_handles_vector(apic, vector))
1219 		return;
1220 
1221 	/* Request a KVM exit to inform the userspace IOAPIC. */
1222 	if (irqchip_split(apic->vcpu->kvm)) {
1223 		apic->vcpu->arch.pending_ioapic_eoi = vector;
1224 		kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1225 		return;
1226 	}
1227 
1228 	if (apic_test_vector(vector, apic->regs + APIC_TMR))
1229 		trigger_mode = IOAPIC_LEVEL_TRIG;
1230 	else
1231 		trigger_mode = IOAPIC_EDGE_TRIG;
1232 
1233 	kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1234 }
1235 
1236 static int apic_set_eoi(struct kvm_lapic *apic)
1237 {
1238 	int vector = apic_find_highest_isr(apic);
1239 
1240 	trace_kvm_eoi(apic, vector);
1241 
1242 	/*
1243 	 * Not every write EOI will has corresponding ISR,
1244 	 * one example is when Kernel check timer on setup_IO_APIC
1245 	 */
1246 	if (vector == -1)
1247 		return vector;
1248 
1249 	apic_clear_isr(vector, apic);
1250 	apic_update_ppr(apic);
1251 
1252 	if (to_hv_vcpu(apic->vcpu) &&
1253 	    test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap))
1254 		kvm_hv_synic_send_eoi(apic->vcpu, vector);
1255 
1256 	kvm_ioapic_send_eoi(apic, vector);
1257 	kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1258 	return vector;
1259 }
1260 
1261 /*
1262  * this interface assumes a trap-like exit, which has already finished
1263  * desired side effect including vISR and vPPR update.
1264  */
1265 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1266 {
1267 	struct kvm_lapic *apic = vcpu->arch.apic;
1268 
1269 	trace_kvm_eoi(apic, vector);
1270 
1271 	kvm_ioapic_send_eoi(apic, vector);
1272 	kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1273 }
1274 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1275 
1276 void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
1277 {
1278 	struct kvm_lapic_irq irq;
1279 
1280 	irq.vector = icr_low & APIC_VECTOR_MASK;
1281 	irq.delivery_mode = icr_low & APIC_MODE_MASK;
1282 	irq.dest_mode = icr_low & APIC_DEST_MASK;
1283 	irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1284 	irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1285 	irq.shorthand = icr_low & APIC_SHORT_MASK;
1286 	irq.msi_redir_hint = false;
1287 	if (apic_x2apic_mode(apic))
1288 		irq.dest_id = icr_high;
1289 	else
1290 		irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
1291 
1292 	trace_kvm_apic_ipi(icr_low, irq.dest_id);
1293 
1294 	kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1295 }
1296 
1297 static u32 apic_get_tmcct(struct kvm_lapic *apic)
1298 {
1299 	ktime_t remaining, now;
1300 	s64 ns;
1301 	u32 tmcct;
1302 
1303 	ASSERT(apic != NULL);
1304 
1305 	/* if initial count is 0, current count should also be 0 */
1306 	if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1307 		apic->lapic_timer.period == 0)
1308 		return 0;
1309 
1310 	now = ktime_get();
1311 	remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1312 	if (ktime_to_ns(remaining) < 0)
1313 		remaining = 0;
1314 
1315 	ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1316 	tmcct = div64_u64(ns,
1317 			 (APIC_BUS_CYCLE_NS * apic->divide_count));
1318 
1319 	return tmcct;
1320 }
1321 
1322 static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1323 {
1324 	struct kvm_vcpu *vcpu = apic->vcpu;
1325 	struct kvm_run *run = vcpu->run;
1326 
1327 	kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1328 	run->tpr_access.rip = kvm_rip_read(vcpu);
1329 	run->tpr_access.is_write = write;
1330 }
1331 
1332 static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1333 {
1334 	if (apic->vcpu->arch.tpr_access_reporting)
1335 		__report_tpr_access(apic, write);
1336 }
1337 
1338 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1339 {
1340 	u32 val = 0;
1341 
1342 	if (offset >= LAPIC_MMIO_LENGTH)
1343 		return 0;
1344 
1345 	switch (offset) {
1346 	case APIC_ARBPRI:
1347 		break;
1348 
1349 	case APIC_TMCCT:	/* Timer CCR */
1350 		if (apic_lvtt_tscdeadline(apic))
1351 			return 0;
1352 
1353 		val = apic_get_tmcct(apic);
1354 		break;
1355 	case APIC_PROCPRI:
1356 		apic_update_ppr(apic);
1357 		val = kvm_lapic_get_reg(apic, offset);
1358 		break;
1359 	case APIC_TASKPRI:
1360 		report_tpr_access(apic, false);
1361 		fallthrough;
1362 	default:
1363 		val = kvm_lapic_get_reg(apic, offset);
1364 		break;
1365 	}
1366 
1367 	return val;
1368 }
1369 
1370 static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1371 {
1372 	return container_of(dev, struct kvm_lapic, dev);
1373 }
1374 
1375 #define APIC_REG_MASK(reg)	(1ull << ((reg) >> 4))
1376 #define APIC_REGS_MASK(first, count) \
1377 	(APIC_REG_MASK(first) * ((1ull << (count)) - 1))
1378 
1379 int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1380 		void *data)
1381 {
1382 	unsigned char alignment = offset & 0xf;
1383 	u32 result;
1384 	/* this bitmask has a bit cleared for each reserved register */
1385 	u64 valid_reg_mask =
1386 		APIC_REG_MASK(APIC_ID) |
1387 		APIC_REG_MASK(APIC_LVR) |
1388 		APIC_REG_MASK(APIC_TASKPRI) |
1389 		APIC_REG_MASK(APIC_PROCPRI) |
1390 		APIC_REG_MASK(APIC_LDR) |
1391 		APIC_REG_MASK(APIC_DFR) |
1392 		APIC_REG_MASK(APIC_SPIV) |
1393 		APIC_REGS_MASK(APIC_ISR, APIC_ISR_NR) |
1394 		APIC_REGS_MASK(APIC_TMR, APIC_ISR_NR) |
1395 		APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) |
1396 		APIC_REG_MASK(APIC_ESR) |
1397 		APIC_REG_MASK(APIC_ICR) |
1398 		APIC_REG_MASK(APIC_ICR2) |
1399 		APIC_REG_MASK(APIC_LVTT) |
1400 		APIC_REG_MASK(APIC_LVTTHMR) |
1401 		APIC_REG_MASK(APIC_LVTPC) |
1402 		APIC_REG_MASK(APIC_LVT0) |
1403 		APIC_REG_MASK(APIC_LVT1) |
1404 		APIC_REG_MASK(APIC_LVTERR) |
1405 		APIC_REG_MASK(APIC_TMICT) |
1406 		APIC_REG_MASK(APIC_TMCCT) |
1407 		APIC_REG_MASK(APIC_TDCR);
1408 
1409 	/* ARBPRI is not valid on x2APIC */
1410 	if (!apic_x2apic_mode(apic))
1411 		valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
1412 
1413 	if (alignment + len > 4)
1414 		return 1;
1415 
1416 	if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
1417 		return 1;
1418 
1419 	result = __apic_read(apic, offset & ~0xf);
1420 
1421 	trace_kvm_apic_read(offset, result);
1422 
1423 	switch (len) {
1424 	case 1:
1425 	case 2:
1426 	case 4:
1427 		memcpy(data, (char *)&result + alignment, len);
1428 		break;
1429 	default:
1430 		printk(KERN_ERR "Local APIC read with len = %x, "
1431 		       "should be 1,2, or 4 instead\n", len);
1432 		break;
1433 	}
1434 	return 0;
1435 }
1436 EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
1437 
1438 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1439 {
1440 	return addr >= apic->base_address &&
1441 		addr < apic->base_address + LAPIC_MMIO_LENGTH;
1442 }
1443 
1444 static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1445 			   gpa_t address, int len, void *data)
1446 {
1447 	struct kvm_lapic *apic = to_lapic(this);
1448 	u32 offset = address - apic->base_address;
1449 
1450 	if (!apic_mmio_in_range(apic, address))
1451 		return -EOPNOTSUPP;
1452 
1453 	if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1454 		if (!kvm_check_has_quirk(vcpu->kvm,
1455 					 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1456 			return -EOPNOTSUPP;
1457 
1458 		memset(data, 0xff, len);
1459 		return 0;
1460 	}
1461 
1462 	kvm_lapic_reg_read(apic, offset, len, data);
1463 
1464 	return 0;
1465 }
1466 
1467 static void update_divide_count(struct kvm_lapic *apic)
1468 {
1469 	u32 tmp1, tmp2, tdcr;
1470 
1471 	tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1472 	tmp1 = tdcr & 0xf;
1473 	tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1474 	apic->divide_count = 0x1 << (tmp2 & 0x7);
1475 }
1476 
1477 static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1478 {
1479 	/*
1480 	 * Do not allow the guest to program periodic timers with small
1481 	 * interval, since the hrtimers are not throttled by the host
1482 	 * scheduler.
1483 	 */
1484 	if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1485 		s64 min_period = min_timer_period_us * 1000LL;
1486 
1487 		if (apic->lapic_timer.period < min_period) {
1488 			pr_info_ratelimited(
1489 			    "kvm: vcpu %i: requested %lld ns "
1490 			    "lapic timer period limited to %lld ns\n",
1491 			    apic->vcpu->vcpu_id,
1492 			    apic->lapic_timer.period, min_period);
1493 			apic->lapic_timer.period = min_period;
1494 		}
1495 	}
1496 }
1497 
1498 static void cancel_hv_timer(struct kvm_lapic *apic);
1499 
1500 static void cancel_apic_timer(struct kvm_lapic *apic)
1501 {
1502 	hrtimer_cancel(&apic->lapic_timer.timer);
1503 	preempt_disable();
1504 	if (apic->lapic_timer.hv_timer_in_use)
1505 		cancel_hv_timer(apic);
1506 	preempt_enable();
1507 }
1508 
1509 static void apic_update_lvtt(struct kvm_lapic *apic)
1510 {
1511 	u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1512 			apic->lapic_timer.timer_mode_mask;
1513 
1514 	if (apic->lapic_timer.timer_mode != timer_mode) {
1515 		if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1516 				APIC_LVT_TIMER_TSCDEADLINE)) {
1517 			cancel_apic_timer(apic);
1518 			kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1519 			apic->lapic_timer.period = 0;
1520 			apic->lapic_timer.tscdeadline = 0;
1521 		}
1522 		apic->lapic_timer.timer_mode = timer_mode;
1523 		limit_periodic_timer_frequency(apic);
1524 	}
1525 }
1526 
1527 /*
1528  * On APICv, this test will cause a busy wait
1529  * during a higher-priority task.
1530  */
1531 
1532 static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1533 {
1534 	struct kvm_lapic *apic = vcpu->arch.apic;
1535 	u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1536 
1537 	if (kvm_apic_hw_enabled(apic)) {
1538 		int vec = reg & APIC_VECTOR_MASK;
1539 		void *bitmap = apic->regs + APIC_ISR;
1540 
1541 		if (vcpu->arch.apicv_active)
1542 			bitmap = apic->regs + APIC_IRR;
1543 
1544 		if (apic_test_vector(vec, bitmap))
1545 			return true;
1546 	}
1547 	return false;
1548 }
1549 
1550 static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
1551 {
1552 	u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
1553 
1554 	/*
1555 	 * If the guest TSC is running at a different ratio than the host, then
1556 	 * convert the delay to nanoseconds to achieve an accurate delay.  Note
1557 	 * that __delay() uses delay_tsc whenever the hardware has TSC, thus
1558 	 * always for VMX enabled hardware.
1559 	 */
1560 	if (vcpu->arch.tsc_scaling_ratio == kvm_default_tsc_scaling_ratio) {
1561 		__delay(min(guest_cycles,
1562 			nsec_to_cycles(vcpu, timer_advance_ns)));
1563 	} else {
1564 		u64 delay_ns = guest_cycles * 1000000ULL;
1565 		do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
1566 		ndelay(min_t(u32, delay_ns, timer_advance_ns));
1567 	}
1568 }
1569 
1570 static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
1571 					      s64 advance_expire_delta)
1572 {
1573 	struct kvm_lapic *apic = vcpu->arch.apic;
1574 	u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
1575 	u64 ns;
1576 
1577 	/* Do not adjust for tiny fluctuations or large random spikes. */
1578 	if (abs(advance_expire_delta) > LAPIC_TIMER_ADVANCE_ADJUST_MAX ||
1579 	    abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_MIN)
1580 		return;
1581 
1582 	/* too early */
1583 	if (advance_expire_delta < 0) {
1584 		ns = -advance_expire_delta * 1000000ULL;
1585 		do_div(ns, vcpu->arch.virtual_tsc_khz);
1586 		timer_advance_ns -= ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1587 	} else {
1588 	/* too late */
1589 		ns = advance_expire_delta * 1000000ULL;
1590 		do_div(ns, vcpu->arch.virtual_tsc_khz);
1591 		timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1592 	}
1593 
1594 	if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
1595 		timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
1596 	apic->lapic_timer.timer_advance_ns = timer_advance_ns;
1597 }
1598 
1599 static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1600 {
1601 	struct kvm_lapic *apic = vcpu->arch.apic;
1602 	u64 guest_tsc, tsc_deadline;
1603 
1604 	tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1605 	apic->lapic_timer.expired_tscdeadline = 0;
1606 	guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1607 	apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline;
1608 
1609 	if (lapic_timer_advance_dynamic) {
1610 		adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
1611 		/*
1612 		 * If the timer fired early, reread the TSC to account for the
1613 		 * overhead of the above adjustment to avoid waiting longer
1614 		 * than is necessary.
1615 		 */
1616 		if (guest_tsc < tsc_deadline)
1617 			guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1618 	}
1619 
1620 	if (guest_tsc < tsc_deadline)
1621 		__wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
1622 }
1623 
1624 void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1625 {
1626 	if (lapic_in_kernel(vcpu) &&
1627 	    vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1628 	    vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1629 	    lapic_timer_int_injected(vcpu))
1630 		__kvm_wait_lapic_expire(vcpu);
1631 }
1632 EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
1633 
1634 static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
1635 {
1636 	struct kvm_timer *ktimer = &apic->lapic_timer;
1637 
1638 	kvm_apic_local_deliver(apic, APIC_LVTT);
1639 	if (apic_lvtt_tscdeadline(apic)) {
1640 		ktimer->tscdeadline = 0;
1641 	} else if (apic_lvtt_oneshot(apic)) {
1642 		ktimer->tscdeadline = 0;
1643 		ktimer->target_expiration = 0;
1644 	}
1645 }
1646 
1647 static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
1648 {
1649 	struct kvm_vcpu *vcpu = apic->vcpu;
1650 	struct kvm_timer *ktimer = &apic->lapic_timer;
1651 
1652 	if (atomic_read(&apic->lapic_timer.pending))
1653 		return;
1654 
1655 	if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
1656 		ktimer->expired_tscdeadline = ktimer->tscdeadline;
1657 
1658 	if (!from_timer_fn && vcpu->arch.apicv_active) {
1659 		WARN_ON(kvm_get_running_vcpu() != vcpu);
1660 		kvm_apic_inject_pending_timer_irqs(apic);
1661 		return;
1662 	}
1663 
1664 	if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
1665 		/*
1666 		 * Ensure the guest's timer has truly expired before posting an
1667 		 * interrupt.  Open code the relevant checks to avoid querying
1668 		 * lapic_timer_int_injected(), which will be false since the
1669 		 * interrupt isn't yet injected.  Waiting until after injecting
1670 		 * is not an option since that won't help a posted interrupt.
1671 		 */
1672 		if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1673 		    vcpu->arch.apic->lapic_timer.timer_advance_ns)
1674 			__kvm_wait_lapic_expire(vcpu);
1675 		kvm_apic_inject_pending_timer_irqs(apic);
1676 		return;
1677 	}
1678 
1679 	atomic_inc(&apic->lapic_timer.pending);
1680 	kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1681 	if (from_timer_fn)
1682 		kvm_vcpu_kick(vcpu);
1683 }
1684 
1685 static void start_sw_tscdeadline(struct kvm_lapic *apic)
1686 {
1687 	struct kvm_timer *ktimer = &apic->lapic_timer;
1688 	u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
1689 	u64 ns = 0;
1690 	ktime_t expire;
1691 	struct kvm_vcpu *vcpu = apic->vcpu;
1692 	unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1693 	unsigned long flags;
1694 	ktime_t now;
1695 
1696 	if (unlikely(!tscdeadline || !this_tsc_khz))
1697 		return;
1698 
1699 	local_irq_save(flags);
1700 
1701 	now = ktime_get();
1702 	guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1703 
1704 	ns = (tscdeadline - guest_tsc) * 1000000ULL;
1705 	do_div(ns, this_tsc_khz);
1706 
1707 	if (likely(tscdeadline > guest_tsc) &&
1708 	    likely(ns > apic->lapic_timer.timer_advance_ns)) {
1709 		expire = ktime_add_ns(now, ns);
1710 		expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
1711 		hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
1712 	} else
1713 		apic_timer_expired(apic, false);
1714 
1715 	local_irq_restore(flags);
1716 }
1717 
1718 static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
1719 {
1720 	return (u64)tmict * APIC_BUS_CYCLE_NS * (u64)apic->divide_count;
1721 }
1722 
1723 static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1724 {
1725 	ktime_t now, remaining;
1726 	u64 ns_remaining_old, ns_remaining_new;
1727 
1728 	apic->lapic_timer.period =
1729 			tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1730 	limit_periodic_timer_frequency(apic);
1731 
1732 	now = ktime_get();
1733 	remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1734 	if (ktime_to_ns(remaining) < 0)
1735 		remaining = 0;
1736 
1737 	ns_remaining_old = ktime_to_ns(remaining);
1738 	ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
1739 	                                   apic->divide_count, old_divisor);
1740 
1741 	apic->lapic_timer.tscdeadline +=
1742 		nsec_to_cycles(apic->vcpu, ns_remaining_new) -
1743 		nsec_to_cycles(apic->vcpu, ns_remaining_old);
1744 	apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
1745 }
1746 
1747 static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
1748 {
1749 	ktime_t now;
1750 	u64 tscl = rdtsc();
1751 	s64 deadline;
1752 
1753 	now = ktime_get();
1754 	apic->lapic_timer.period =
1755 			tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1756 
1757 	if (!apic->lapic_timer.period) {
1758 		apic->lapic_timer.tscdeadline = 0;
1759 		return false;
1760 	}
1761 
1762 	limit_periodic_timer_frequency(apic);
1763 	deadline = apic->lapic_timer.period;
1764 
1765 	if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
1766 		if (unlikely(count_reg != APIC_TMICT)) {
1767 			deadline = tmict_to_ns(apic,
1768 				     kvm_lapic_get_reg(apic, count_reg));
1769 			if (unlikely(deadline <= 0))
1770 				deadline = apic->lapic_timer.period;
1771 			else if (unlikely(deadline > apic->lapic_timer.period)) {
1772 				pr_info_ratelimited(
1773 				    "kvm: vcpu %i: requested lapic timer restore with "
1774 				    "starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
1775 				    "Using initial count to start timer.\n",
1776 				    apic->vcpu->vcpu_id,
1777 				    count_reg,
1778 				    kvm_lapic_get_reg(apic, count_reg),
1779 				    deadline, apic->lapic_timer.period);
1780 				kvm_lapic_set_reg(apic, count_reg, 0);
1781 				deadline = apic->lapic_timer.period;
1782 			}
1783 		}
1784 	}
1785 
1786 	apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1787 		nsec_to_cycles(apic->vcpu, deadline);
1788 	apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
1789 
1790 	return true;
1791 }
1792 
1793 static void advance_periodic_target_expiration(struct kvm_lapic *apic)
1794 {
1795 	ktime_t now = ktime_get();
1796 	u64 tscl = rdtsc();
1797 	ktime_t delta;
1798 
1799 	/*
1800 	 * Synchronize both deadlines to the same time source or
1801 	 * differences in the periods (caused by differences in the
1802 	 * underlying clocks or numerical approximation errors) will
1803 	 * cause the two to drift apart over time as the errors
1804 	 * accumulate.
1805 	 */
1806 	apic->lapic_timer.target_expiration =
1807 		ktime_add_ns(apic->lapic_timer.target_expiration,
1808 				apic->lapic_timer.period);
1809 	delta = ktime_sub(apic->lapic_timer.target_expiration, now);
1810 	apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1811 		nsec_to_cycles(apic->vcpu, delta);
1812 }
1813 
1814 static void start_sw_period(struct kvm_lapic *apic)
1815 {
1816 	if (!apic->lapic_timer.period)
1817 		return;
1818 
1819 	if (ktime_after(ktime_get(),
1820 			apic->lapic_timer.target_expiration)) {
1821 		apic_timer_expired(apic, false);
1822 
1823 		if (apic_lvtt_oneshot(apic))
1824 			return;
1825 
1826 		advance_periodic_target_expiration(apic);
1827 	}
1828 
1829 	hrtimer_start(&apic->lapic_timer.timer,
1830 		apic->lapic_timer.target_expiration,
1831 		HRTIMER_MODE_ABS_HARD);
1832 }
1833 
1834 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
1835 {
1836 	if (!lapic_in_kernel(vcpu))
1837 		return false;
1838 
1839 	return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
1840 }
1841 EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
1842 
1843 static void cancel_hv_timer(struct kvm_lapic *apic)
1844 {
1845 	WARN_ON(preemptible());
1846 	WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1847 	static_call(kvm_x86_cancel_hv_timer)(apic->vcpu);
1848 	apic->lapic_timer.hv_timer_in_use = false;
1849 }
1850 
1851 static bool start_hv_timer(struct kvm_lapic *apic)
1852 {
1853 	struct kvm_timer *ktimer = &apic->lapic_timer;
1854 	struct kvm_vcpu *vcpu = apic->vcpu;
1855 	bool expired;
1856 
1857 	WARN_ON(preemptible());
1858 	if (!kvm_can_use_hv_timer(vcpu))
1859 		return false;
1860 
1861 	if (!ktimer->tscdeadline)
1862 		return false;
1863 
1864 	if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
1865 		return false;
1866 
1867 	ktimer->hv_timer_in_use = true;
1868 	hrtimer_cancel(&ktimer->timer);
1869 
1870 	/*
1871 	 * To simplify handling the periodic timer, leave the hv timer running
1872 	 * even if the deadline timer has expired, i.e. rely on the resulting
1873 	 * VM-Exit to recompute the periodic timer's target expiration.
1874 	 */
1875 	if (!apic_lvtt_period(apic)) {
1876 		/*
1877 		 * Cancel the hv timer if the sw timer fired while the hv timer
1878 		 * was being programmed, or if the hv timer itself expired.
1879 		 */
1880 		if (atomic_read(&ktimer->pending)) {
1881 			cancel_hv_timer(apic);
1882 		} else if (expired) {
1883 			apic_timer_expired(apic, false);
1884 			cancel_hv_timer(apic);
1885 		}
1886 	}
1887 
1888 	trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
1889 
1890 	return true;
1891 }
1892 
1893 static void start_sw_timer(struct kvm_lapic *apic)
1894 {
1895 	struct kvm_timer *ktimer = &apic->lapic_timer;
1896 
1897 	WARN_ON(preemptible());
1898 	if (apic->lapic_timer.hv_timer_in_use)
1899 		cancel_hv_timer(apic);
1900 	if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
1901 		return;
1902 
1903 	if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1904 		start_sw_period(apic);
1905 	else if (apic_lvtt_tscdeadline(apic))
1906 		start_sw_tscdeadline(apic);
1907 	trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
1908 }
1909 
1910 static void restart_apic_timer(struct kvm_lapic *apic)
1911 {
1912 	preempt_disable();
1913 
1914 	if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
1915 		goto out;
1916 
1917 	if (!start_hv_timer(apic))
1918 		start_sw_timer(apic);
1919 out:
1920 	preempt_enable();
1921 }
1922 
1923 void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
1924 {
1925 	struct kvm_lapic *apic = vcpu->arch.apic;
1926 
1927 	preempt_disable();
1928 	/* If the preempt notifier has already run, it also called apic_timer_expired */
1929 	if (!apic->lapic_timer.hv_timer_in_use)
1930 		goto out;
1931 	WARN_ON(rcuwait_active(&vcpu->wait));
1932 	apic_timer_expired(apic, false);
1933 	cancel_hv_timer(apic);
1934 
1935 	if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1936 		advance_periodic_target_expiration(apic);
1937 		restart_apic_timer(apic);
1938 	}
1939 out:
1940 	preempt_enable();
1941 }
1942 EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
1943 
1944 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
1945 {
1946 	restart_apic_timer(vcpu->arch.apic);
1947 }
1948 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
1949 
1950 void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
1951 {
1952 	struct kvm_lapic *apic = vcpu->arch.apic;
1953 
1954 	preempt_disable();
1955 	/* Possibly the TSC deadline timer is not enabled yet */
1956 	if (apic->lapic_timer.hv_timer_in_use)
1957 		start_sw_timer(apic);
1958 	preempt_enable();
1959 }
1960 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
1961 
1962 void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
1963 {
1964 	struct kvm_lapic *apic = vcpu->arch.apic;
1965 
1966 	WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1967 	restart_apic_timer(apic);
1968 }
1969 
1970 static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
1971 {
1972 	atomic_set(&apic->lapic_timer.pending, 0);
1973 
1974 	if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1975 	    && !set_target_expiration(apic, count_reg))
1976 		return;
1977 
1978 	restart_apic_timer(apic);
1979 }
1980 
1981 static void start_apic_timer(struct kvm_lapic *apic)
1982 {
1983 	__start_apic_timer(apic, APIC_TMICT);
1984 }
1985 
1986 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
1987 {
1988 	bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
1989 
1990 	if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
1991 		apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
1992 		if (lvt0_in_nmi_mode) {
1993 			atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1994 		} else
1995 			atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1996 	}
1997 }
1998 
1999 int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
2000 {
2001 	int ret = 0;
2002 
2003 	trace_kvm_apic_write(reg, val);
2004 
2005 	switch (reg) {
2006 	case APIC_ID:		/* Local APIC ID */
2007 		if (!apic_x2apic_mode(apic))
2008 			kvm_apic_set_xapic_id(apic, val >> 24);
2009 		else
2010 			ret = 1;
2011 		break;
2012 
2013 	case APIC_TASKPRI:
2014 		report_tpr_access(apic, true);
2015 		apic_set_tpr(apic, val & 0xff);
2016 		break;
2017 
2018 	case APIC_EOI:
2019 		apic_set_eoi(apic);
2020 		break;
2021 
2022 	case APIC_LDR:
2023 		if (!apic_x2apic_mode(apic))
2024 			kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
2025 		else
2026 			ret = 1;
2027 		break;
2028 
2029 	case APIC_DFR:
2030 		if (!apic_x2apic_mode(apic))
2031 			kvm_apic_set_dfr(apic, val | 0x0FFFFFFF);
2032 		else
2033 			ret = 1;
2034 		break;
2035 
2036 	case APIC_SPIV: {
2037 		u32 mask = 0x3ff;
2038 		if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
2039 			mask |= APIC_SPIV_DIRECTED_EOI;
2040 		apic_set_spiv(apic, val & mask);
2041 		if (!(val & APIC_SPIV_APIC_ENABLED)) {
2042 			int i;
2043 			u32 lvt_val;
2044 
2045 			for (i = 0; i < KVM_APIC_LVT_NUM; i++) {
2046 				lvt_val = kvm_lapic_get_reg(apic,
2047 						       APIC_LVTT + 0x10 * i);
2048 				kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i,
2049 					     lvt_val | APIC_LVT_MASKED);
2050 			}
2051 			apic_update_lvtt(apic);
2052 			atomic_set(&apic->lapic_timer.pending, 0);
2053 
2054 		}
2055 		break;
2056 	}
2057 	case APIC_ICR:
2058 		/* No delay here, so we always clear the pending bit */
2059 		val &= ~(1 << 12);
2060 		kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
2061 		kvm_lapic_set_reg(apic, APIC_ICR, val);
2062 		break;
2063 
2064 	case APIC_ICR2:
2065 		if (!apic_x2apic_mode(apic))
2066 			val &= 0xff000000;
2067 		kvm_lapic_set_reg(apic, APIC_ICR2, val);
2068 		break;
2069 
2070 	case APIC_LVT0:
2071 		apic_manage_nmi_watchdog(apic, val);
2072 		fallthrough;
2073 	case APIC_LVTTHMR:
2074 	case APIC_LVTPC:
2075 	case APIC_LVT1:
2076 	case APIC_LVTERR: {
2077 		/* TODO: Check vector */
2078 		size_t size;
2079 		u32 index;
2080 
2081 		if (!kvm_apic_sw_enabled(apic))
2082 			val |= APIC_LVT_MASKED;
2083 		size = ARRAY_SIZE(apic_lvt_mask);
2084 		index = array_index_nospec(
2085 				(reg - APIC_LVTT) >> 4, size);
2086 		val &= apic_lvt_mask[index];
2087 		kvm_lapic_set_reg(apic, reg, val);
2088 		break;
2089 	}
2090 
2091 	case APIC_LVTT:
2092 		if (!kvm_apic_sw_enabled(apic))
2093 			val |= APIC_LVT_MASKED;
2094 		val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
2095 		kvm_lapic_set_reg(apic, APIC_LVTT, val);
2096 		apic_update_lvtt(apic);
2097 		break;
2098 
2099 	case APIC_TMICT:
2100 		if (apic_lvtt_tscdeadline(apic))
2101 			break;
2102 
2103 		cancel_apic_timer(apic);
2104 		kvm_lapic_set_reg(apic, APIC_TMICT, val);
2105 		start_apic_timer(apic);
2106 		break;
2107 
2108 	case APIC_TDCR: {
2109 		uint32_t old_divisor = apic->divide_count;
2110 
2111 		kvm_lapic_set_reg(apic, APIC_TDCR, val & 0xb);
2112 		update_divide_count(apic);
2113 		if (apic->divide_count != old_divisor &&
2114 				apic->lapic_timer.period) {
2115 			hrtimer_cancel(&apic->lapic_timer.timer);
2116 			update_target_expiration(apic, old_divisor);
2117 			restart_apic_timer(apic);
2118 		}
2119 		break;
2120 	}
2121 	case APIC_ESR:
2122 		if (apic_x2apic_mode(apic) && val != 0)
2123 			ret = 1;
2124 		break;
2125 
2126 	case APIC_SELF_IPI:
2127 		if (apic_x2apic_mode(apic)) {
2128 			kvm_lapic_reg_write(apic, APIC_ICR,
2129 					    APIC_DEST_SELF | (val & APIC_VECTOR_MASK));
2130 		} else
2131 			ret = 1;
2132 		break;
2133 	default:
2134 		ret = 1;
2135 		break;
2136 	}
2137 
2138 	kvm_recalculate_apic_map(apic->vcpu->kvm);
2139 
2140 	return ret;
2141 }
2142 EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
2143 
2144 static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
2145 			    gpa_t address, int len, const void *data)
2146 {
2147 	struct kvm_lapic *apic = to_lapic(this);
2148 	unsigned int offset = address - apic->base_address;
2149 	u32 val;
2150 
2151 	if (!apic_mmio_in_range(apic, address))
2152 		return -EOPNOTSUPP;
2153 
2154 	if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
2155 		if (!kvm_check_has_quirk(vcpu->kvm,
2156 					 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
2157 			return -EOPNOTSUPP;
2158 
2159 		return 0;
2160 	}
2161 
2162 	/*
2163 	 * APIC register must be aligned on 128-bits boundary.
2164 	 * 32/64/128 bits registers must be accessed thru 32 bits.
2165 	 * Refer SDM 8.4.1
2166 	 */
2167 	if (len != 4 || (offset & 0xf))
2168 		return 0;
2169 
2170 	val = *(u32*)data;
2171 
2172 	kvm_lapic_reg_write(apic, offset & 0xff0, val);
2173 
2174 	return 0;
2175 }
2176 
2177 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
2178 {
2179 	kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2180 }
2181 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
2182 
2183 /* emulate APIC access in a trap manner */
2184 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
2185 {
2186 	u32 val = 0;
2187 
2188 	/* hw has done the conditional check and inst decode */
2189 	offset &= 0xff0;
2190 
2191 	kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val);
2192 
2193 	/* TODO: optimize to just emulate side effect w/o one more write */
2194 	kvm_lapic_reg_write(vcpu->arch.apic, offset, val);
2195 }
2196 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
2197 
2198 void kvm_free_lapic(struct kvm_vcpu *vcpu)
2199 {
2200 	struct kvm_lapic *apic = vcpu->arch.apic;
2201 
2202 	if (!vcpu->arch.apic)
2203 		return;
2204 
2205 	hrtimer_cancel(&apic->lapic_timer.timer);
2206 
2207 	if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2208 		static_branch_slow_dec_deferred(&apic_hw_disabled);
2209 
2210 	if (!apic->sw_enabled)
2211 		static_branch_slow_dec_deferred(&apic_sw_disabled);
2212 
2213 	if (apic->regs)
2214 		free_page((unsigned long)apic->regs);
2215 
2216 	kfree(apic);
2217 }
2218 
2219 /*
2220  *----------------------------------------------------------------------
2221  * LAPIC interface
2222  *----------------------------------------------------------------------
2223  */
2224 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
2225 {
2226 	struct kvm_lapic *apic = vcpu->arch.apic;
2227 
2228 	if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2229 		return 0;
2230 
2231 	return apic->lapic_timer.tscdeadline;
2232 }
2233 
2234 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
2235 {
2236 	struct kvm_lapic *apic = vcpu->arch.apic;
2237 
2238 	if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2239 		return;
2240 
2241 	hrtimer_cancel(&apic->lapic_timer.timer);
2242 	apic->lapic_timer.tscdeadline = data;
2243 	start_apic_timer(apic);
2244 }
2245 
2246 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
2247 {
2248 	struct kvm_lapic *apic = vcpu->arch.apic;
2249 
2250 	apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
2251 		     | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4));
2252 }
2253 
2254 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
2255 {
2256 	u64 tpr;
2257 
2258 	tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2259 
2260 	return (tpr & 0xf0) >> 4;
2261 }
2262 
2263 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
2264 {
2265 	u64 old_value = vcpu->arch.apic_base;
2266 	struct kvm_lapic *apic = vcpu->arch.apic;
2267 
2268 	if (!apic)
2269 		value |= MSR_IA32_APICBASE_BSP;
2270 
2271 	vcpu->arch.apic_base = value;
2272 
2273 	if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2274 		kvm_update_cpuid_runtime(vcpu);
2275 
2276 	if (!apic)
2277 		return;
2278 
2279 	/* update jump label if enable bit changes */
2280 	if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2281 		if (value & MSR_IA32_APICBASE_ENABLE) {
2282 			kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2283 			static_branch_slow_dec_deferred(&apic_hw_disabled);
2284 			/* Check if there are APF page ready requests pending */
2285 			kvm_make_request(KVM_REQ_APF_READY, vcpu);
2286 		} else {
2287 			static_branch_inc(&apic_hw_disabled.key);
2288 			atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2289 		}
2290 	}
2291 
2292 	if (((old_value ^ value) & X2APIC_ENABLE) && (value & X2APIC_ENABLE))
2293 		kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2294 
2295 	if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE))
2296 		static_call(kvm_x86_set_virtual_apic_mode)(vcpu);
2297 
2298 	apic->base_address = apic->vcpu->arch.apic_base &
2299 			     MSR_IA32_APICBASE_BASE;
2300 
2301 	if ((value & MSR_IA32_APICBASE_ENABLE) &&
2302 	     apic->base_address != APIC_DEFAULT_PHYS_BASE)
2303 		pr_warn_once("APIC base relocation is unsupported by KVM");
2304 }
2305 
2306 void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
2307 {
2308 	struct kvm_lapic *apic = vcpu->arch.apic;
2309 
2310 	if (vcpu->arch.apicv_active) {
2311 		/* irr_pending is always true when apicv is activated. */
2312 		apic->irr_pending = true;
2313 		apic->isr_count = 1;
2314 	} else {
2315 		apic->irr_pending = (apic_search_irr(apic) != -1);
2316 		apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2317 	}
2318 }
2319 EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);
2320 
2321 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
2322 {
2323 	struct kvm_lapic *apic = vcpu->arch.apic;
2324 	int i;
2325 
2326 	if (!apic)
2327 		return;
2328 
2329 	/* Stop the timer in case it's a reset to an active apic */
2330 	hrtimer_cancel(&apic->lapic_timer.timer);
2331 
2332 	if (!init_event) {
2333 		kvm_lapic_set_base(vcpu, APIC_DEFAULT_PHYS_BASE |
2334 		                         MSR_IA32_APICBASE_ENABLE);
2335 		kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2336 	}
2337 	kvm_apic_set_version(apic->vcpu);
2338 
2339 	for (i = 0; i < KVM_APIC_LVT_NUM; i++)
2340 		kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
2341 	apic_update_lvtt(apic);
2342 	if (kvm_vcpu_is_reset_bsp(vcpu) &&
2343 	    kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2344 		kvm_lapic_set_reg(apic, APIC_LVT0,
2345 			     SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2346 	apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2347 
2348 	kvm_apic_set_dfr(apic, 0xffffffffU);
2349 	apic_set_spiv(apic, 0xff);
2350 	kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2351 	if (!apic_x2apic_mode(apic))
2352 		kvm_apic_set_ldr(apic, 0);
2353 	kvm_lapic_set_reg(apic, APIC_ESR, 0);
2354 	kvm_lapic_set_reg(apic, APIC_ICR, 0);
2355 	kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2356 	kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2357 	kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2358 	for (i = 0; i < 8; i++) {
2359 		kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2360 		kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2361 		kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2362 	}
2363 	kvm_apic_update_apicv(vcpu);
2364 	apic->highest_isr_cache = -1;
2365 	update_divide_count(apic);
2366 	atomic_set(&apic->lapic_timer.pending, 0);
2367 	if (kvm_vcpu_is_bsp(vcpu))
2368 		kvm_lapic_set_base(vcpu,
2369 				vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
2370 	vcpu->arch.pv_eoi.msr_val = 0;
2371 	apic_update_ppr(apic);
2372 	if (vcpu->arch.apicv_active) {
2373 		static_call(kvm_x86_apicv_post_state_restore)(vcpu);
2374 		static_call(kvm_x86_hwapic_irr_update)(vcpu, -1);
2375 		static_call(kvm_x86_hwapic_isr_update)(vcpu, -1);
2376 	}
2377 
2378 	vcpu->arch.apic_arb_prio = 0;
2379 	vcpu->arch.apic_attention = 0;
2380 
2381 	kvm_recalculate_apic_map(vcpu->kvm);
2382 }
2383 
2384 /*
2385  *----------------------------------------------------------------------
2386  * timer interface
2387  *----------------------------------------------------------------------
2388  */
2389 
2390 static bool lapic_is_periodic(struct kvm_lapic *apic)
2391 {
2392 	return apic_lvtt_period(apic);
2393 }
2394 
2395 int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2396 {
2397 	struct kvm_lapic *apic = vcpu->arch.apic;
2398 
2399 	if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2400 		return atomic_read(&apic->lapic_timer.pending);
2401 
2402 	return 0;
2403 }
2404 
2405 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2406 {
2407 	u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2408 	int vector, mode, trig_mode;
2409 
2410 	if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2411 		vector = reg & APIC_VECTOR_MASK;
2412 		mode = reg & APIC_MODE_MASK;
2413 		trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2414 		return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
2415 					NULL);
2416 	}
2417 	return 0;
2418 }
2419 
2420 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2421 {
2422 	struct kvm_lapic *apic = vcpu->arch.apic;
2423 
2424 	if (apic)
2425 		kvm_apic_local_deliver(apic, APIC_LVT0);
2426 }
2427 
2428 static const struct kvm_io_device_ops apic_mmio_ops = {
2429 	.read     = apic_mmio_read,
2430 	.write    = apic_mmio_write,
2431 };
2432 
2433 static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2434 {
2435 	struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2436 	struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2437 
2438 	apic_timer_expired(apic, true);
2439 
2440 	if (lapic_is_periodic(apic)) {
2441 		advance_periodic_target_expiration(apic);
2442 		hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2443 		return HRTIMER_RESTART;
2444 	} else
2445 		return HRTIMER_NORESTART;
2446 }
2447 
2448 int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
2449 {
2450 	struct kvm_lapic *apic;
2451 
2452 	ASSERT(vcpu != NULL);
2453 
2454 	apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2455 	if (!apic)
2456 		goto nomem;
2457 
2458 	vcpu->arch.apic = apic;
2459 
2460 	apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2461 	if (!apic->regs) {
2462 		printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2463 		       vcpu->vcpu_id);
2464 		goto nomem_free_apic;
2465 	}
2466 	apic->vcpu = vcpu;
2467 
2468 	hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2469 		     HRTIMER_MODE_ABS_HARD);
2470 	apic->lapic_timer.timer.function = apic_timer_fn;
2471 	if (timer_advance_ns == -1) {
2472 		apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
2473 		lapic_timer_advance_dynamic = true;
2474 	} else {
2475 		apic->lapic_timer.timer_advance_ns = timer_advance_ns;
2476 		lapic_timer_advance_dynamic = false;
2477 	}
2478 
2479 	/*
2480 	 * APIC is created enabled. This will prevent kvm_lapic_set_base from
2481 	 * thinking that APIC state has changed.
2482 	 */
2483 	vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2484 	static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2485 	kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2486 
2487 	return 0;
2488 nomem_free_apic:
2489 	kfree(apic);
2490 	vcpu->arch.apic = NULL;
2491 nomem:
2492 	return -ENOMEM;
2493 }
2494 
2495 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2496 {
2497 	struct kvm_lapic *apic = vcpu->arch.apic;
2498 	u32 ppr;
2499 
2500 	if (!kvm_apic_present(vcpu))
2501 		return -1;
2502 
2503 	__apic_update_ppr(apic, &ppr);
2504 	return apic_has_interrupt_for_ppr(apic, ppr);
2505 }
2506 EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
2507 
2508 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2509 {
2510 	u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2511 
2512 	if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2513 		return 1;
2514 	if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2515 	    GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2516 		return 1;
2517 	return 0;
2518 }
2519 
2520 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2521 {
2522 	struct kvm_lapic *apic = vcpu->arch.apic;
2523 
2524 	if (atomic_read(&apic->lapic_timer.pending) > 0) {
2525 		kvm_apic_inject_pending_timer_irqs(apic);
2526 		atomic_set(&apic->lapic_timer.pending, 0);
2527 	}
2528 }
2529 
2530 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2531 {
2532 	int vector = kvm_apic_has_interrupt(vcpu);
2533 	struct kvm_lapic *apic = vcpu->arch.apic;
2534 	u32 ppr;
2535 
2536 	if (vector == -1)
2537 		return -1;
2538 
2539 	/*
2540 	 * We get here even with APIC virtualization enabled, if doing
2541 	 * nested virtualization and L1 runs with the "acknowledge interrupt
2542 	 * on exit" mode.  Then we cannot inject the interrupt via RVI,
2543 	 * because the process would deliver it through the IDT.
2544 	 */
2545 
2546 	apic_clear_irr(vector, apic);
2547 	if (to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) {
2548 		/*
2549 		 * For auto-EOI interrupts, there might be another pending
2550 		 * interrupt above PPR, so check whether to raise another
2551 		 * KVM_REQ_EVENT.
2552 		 */
2553 		apic_update_ppr(apic);
2554 	} else {
2555 		/*
2556 		 * For normal interrupts, PPR has been raised and there cannot
2557 		 * be a higher-priority pending interrupt---except if there was
2558 		 * a concurrent interrupt injection, but that would have
2559 		 * triggered KVM_REQ_EVENT already.
2560 		 */
2561 		apic_set_isr(vector, apic);
2562 		__apic_update_ppr(apic, &ppr);
2563 	}
2564 
2565 	return vector;
2566 }
2567 
2568 static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
2569 		struct kvm_lapic_state *s, bool set)
2570 {
2571 	if (apic_x2apic_mode(vcpu->arch.apic)) {
2572 		u32 *id = (u32 *)(s->regs + APIC_ID);
2573 		u32 *ldr = (u32 *)(s->regs + APIC_LDR);
2574 
2575 		if (vcpu->kvm->arch.x2apic_format) {
2576 			if (*id != vcpu->vcpu_id)
2577 				return -EINVAL;
2578 		} else {
2579 			if (set)
2580 				*id >>= 24;
2581 			else
2582 				*id <<= 24;
2583 		}
2584 
2585 		/* In x2APIC mode, the LDR is fixed and based on the id */
2586 		if (set)
2587 			*ldr = kvm_apic_calc_x2apic_ldr(*id);
2588 	}
2589 
2590 	return 0;
2591 }
2592 
2593 int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2594 {
2595 	memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2596 
2597 	/*
2598 	 * Get calculated timer current count for remaining timer period (if
2599 	 * any) and store it in the returned register set.
2600 	 */
2601 	__kvm_lapic_set_reg(s->regs, APIC_TMCCT,
2602 			    __apic_read(vcpu->arch.apic, APIC_TMCCT));
2603 
2604 	return kvm_apic_state_fixup(vcpu, s, false);
2605 }
2606 
2607 int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2608 {
2609 	struct kvm_lapic *apic = vcpu->arch.apic;
2610 	int r;
2611 
2612 	kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
2613 	/* set SPIV separately to get count of SW disabled APICs right */
2614 	apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
2615 
2616 	r = kvm_apic_state_fixup(vcpu, s, true);
2617 	if (r) {
2618 		kvm_recalculate_apic_map(vcpu->kvm);
2619 		return r;
2620 	}
2621 	memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
2622 
2623 	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2624 	kvm_recalculate_apic_map(vcpu->kvm);
2625 	kvm_apic_set_version(vcpu);
2626 
2627 	apic_update_ppr(apic);
2628 	hrtimer_cancel(&apic->lapic_timer.timer);
2629 	apic->lapic_timer.expired_tscdeadline = 0;
2630 	apic_update_lvtt(apic);
2631 	apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2632 	update_divide_count(apic);
2633 	__start_apic_timer(apic, APIC_TMCCT);
2634 	kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
2635 	kvm_apic_update_apicv(vcpu);
2636 	apic->highest_isr_cache = -1;
2637 	if (vcpu->arch.apicv_active) {
2638 		static_call(kvm_x86_apicv_post_state_restore)(vcpu);
2639 		static_call(kvm_x86_hwapic_irr_update)(vcpu,
2640 				apic_find_highest_irr(apic));
2641 		static_call(kvm_x86_hwapic_isr_update)(vcpu,
2642 				apic_find_highest_isr(apic));
2643 	}
2644 	kvm_make_request(KVM_REQ_EVENT, vcpu);
2645 	if (ioapic_in_kernel(vcpu->kvm))
2646 		kvm_rtc_eoi_tracking_restore_one(vcpu);
2647 
2648 	vcpu->arch.apic_arb_prio = 0;
2649 
2650 	return 0;
2651 }
2652 
2653 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2654 {
2655 	struct hrtimer *timer;
2656 
2657 	if (!lapic_in_kernel(vcpu) ||
2658 		kvm_can_post_timer_interrupt(vcpu))
2659 		return;
2660 
2661 	timer = &vcpu->arch.apic->lapic_timer.timer;
2662 	if (hrtimer_cancel(timer))
2663 		hrtimer_start_expires(timer, HRTIMER_MODE_ABS_HARD);
2664 }
2665 
2666 /*
2667  * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2668  *
2669  * Detect whether guest triggered PV EOI since the
2670  * last entry. If yes, set EOI on guests's behalf.
2671  * Clear PV EOI in guest memory in any case.
2672  */
2673 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
2674 					struct kvm_lapic *apic)
2675 {
2676 	bool pending;
2677 	int vector;
2678 	/*
2679 	 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
2680 	 * and KVM_PV_EOI_ENABLED in guest memory as follows:
2681 	 *
2682 	 * KVM_APIC_PV_EOI_PENDING is unset:
2683 	 * 	-> host disabled PV EOI.
2684 	 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
2685 	 * 	-> host enabled PV EOI, guest did not execute EOI yet.
2686 	 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
2687 	 * 	-> host enabled PV EOI, guest executed EOI.
2688 	 */
2689 	BUG_ON(!pv_eoi_enabled(vcpu));
2690 	pending = pv_eoi_get_pending(vcpu);
2691 	/*
2692 	 * Clear pending bit in any case: it will be set again on vmentry.
2693 	 * While this might not be ideal from performance point of view,
2694 	 * this makes sure pv eoi is only enabled when we know it's safe.
2695 	 */
2696 	pv_eoi_clr_pending(vcpu);
2697 	if (pending)
2698 		return;
2699 	vector = apic_set_eoi(apic);
2700 	trace_kvm_pv_eoi(apic, vector);
2701 }
2702 
2703 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
2704 {
2705 	u32 data;
2706 
2707 	if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
2708 		apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
2709 
2710 	if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2711 		return;
2712 
2713 	if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2714 				  sizeof(u32)))
2715 		return;
2716 
2717 	apic_set_tpr(vcpu->arch.apic, data & 0xff);
2718 }
2719 
2720 /*
2721  * apic_sync_pv_eoi_to_guest - called before vmentry
2722  *
2723  * Detect whether it's safe to enable PV EOI and
2724  * if yes do so.
2725  */
2726 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
2727 					struct kvm_lapic *apic)
2728 {
2729 	if (!pv_eoi_enabled(vcpu) ||
2730 	    /* IRR set or many bits in ISR: could be nested. */
2731 	    apic->irr_pending ||
2732 	    /* Cache not set: could be safe but we don't bother. */
2733 	    apic->highest_isr_cache == -1 ||
2734 	    /* Need EOI to update ioapic. */
2735 	    kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
2736 		/*
2737 		 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
2738 		 * so we need not do anything here.
2739 		 */
2740 		return;
2741 	}
2742 
2743 	pv_eoi_set_pending(apic->vcpu);
2744 }
2745 
2746 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
2747 {
2748 	u32 data, tpr;
2749 	int max_irr, max_isr;
2750 	struct kvm_lapic *apic = vcpu->arch.apic;
2751 
2752 	apic_sync_pv_eoi_to_guest(vcpu, apic);
2753 
2754 	if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2755 		return;
2756 
2757 	tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
2758 	max_irr = apic_find_highest_irr(apic);
2759 	if (max_irr < 0)
2760 		max_irr = 0;
2761 	max_isr = apic_find_highest_isr(apic);
2762 	if (max_isr < 0)
2763 		max_isr = 0;
2764 	data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
2765 
2766 	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2767 				sizeof(u32));
2768 }
2769 
2770 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
2771 {
2772 	if (vapic_addr) {
2773 		if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2774 					&vcpu->arch.apic->vapic_cache,
2775 					vapic_addr, sizeof(u32)))
2776 			return -EINVAL;
2777 		__set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2778 	} else {
2779 		__clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2780 	}
2781 
2782 	vcpu->arch.apic->vapic_addr = vapic_addr;
2783 	return 0;
2784 }
2785 
2786 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2787 {
2788 	struct kvm_lapic *apic = vcpu->arch.apic;
2789 	u32 reg = (msr - APIC_BASE_MSR) << 4;
2790 
2791 	if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2792 		return 1;
2793 
2794 	if (reg == APIC_ICR2)
2795 		return 1;
2796 
2797 	/* if this is ICR write vector before command */
2798 	if (reg == APIC_ICR)
2799 		kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2800 	return kvm_lapic_reg_write(apic, reg, (u32)data);
2801 }
2802 
2803 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
2804 {
2805 	struct kvm_lapic *apic = vcpu->arch.apic;
2806 	u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
2807 
2808 	if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2809 		return 1;
2810 
2811 	if (reg == APIC_DFR || reg == APIC_ICR2)
2812 		return 1;
2813 
2814 	if (kvm_lapic_reg_read(apic, reg, 4, &low))
2815 		return 1;
2816 	if (reg == APIC_ICR)
2817 		kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2818 
2819 	*data = (((u64)high) << 32) | low;
2820 
2821 	return 0;
2822 }
2823 
2824 int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
2825 {
2826 	struct kvm_lapic *apic = vcpu->arch.apic;
2827 
2828 	if (!lapic_in_kernel(vcpu))
2829 		return 1;
2830 
2831 	/* if this is ICR write vector before command */
2832 	if (reg == APIC_ICR)
2833 		kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2834 	return kvm_lapic_reg_write(apic, reg, (u32)data);
2835 }
2836 
2837 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
2838 {
2839 	struct kvm_lapic *apic = vcpu->arch.apic;
2840 	u32 low, high = 0;
2841 
2842 	if (!lapic_in_kernel(vcpu))
2843 		return 1;
2844 
2845 	if (kvm_lapic_reg_read(apic, reg, 4, &low))
2846 		return 1;
2847 	if (reg == APIC_ICR)
2848 		kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2849 
2850 	*data = (((u64)high) << 32) | low;
2851 
2852 	return 0;
2853 }
2854 
2855 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
2856 {
2857 	u64 addr = data & ~KVM_MSR_ENABLED;
2858 	struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
2859 	unsigned long new_len;
2860 
2861 	if (!IS_ALIGNED(addr, 4))
2862 		return 1;
2863 
2864 	vcpu->arch.pv_eoi.msr_val = data;
2865 	if (!pv_eoi_enabled(vcpu))
2866 		return 0;
2867 
2868 	if (addr == ghc->gpa && len <= ghc->len)
2869 		new_len = ghc->len;
2870 	else
2871 		new_len = len;
2872 
2873 	return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
2874 }
2875 
2876 int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
2877 {
2878 	struct kvm_lapic *apic = vcpu->arch.apic;
2879 	u8 sipi_vector;
2880 	int r;
2881 	unsigned long pe;
2882 
2883 	if (!lapic_in_kernel(vcpu))
2884 		return 0;
2885 
2886 	/*
2887 	 * Read pending events before calling the check_events
2888 	 * callback.
2889 	 */
2890 	pe = smp_load_acquire(&apic->pending_events);
2891 	if (!pe)
2892 		return 0;
2893 
2894 	if (is_guest_mode(vcpu)) {
2895 		r = kvm_check_nested_events(vcpu);
2896 		if (r < 0)
2897 			return r == -EBUSY ? 0 : r;
2898 		/*
2899 		 * If an event has happened and caused a vmexit,
2900 		 * we know INITs are latched and therefore
2901 		 * we will not incorrectly deliver an APIC
2902 		 * event instead of a vmexit.
2903 		 */
2904 	}
2905 
2906 	/*
2907 	 * INITs are latched while CPU is in specific states
2908 	 * (SMM, VMX root mode, SVM with GIF=0).
2909 	 * Because a CPU cannot be in these states immediately
2910 	 * after it has processed an INIT signal (and thus in
2911 	 * KVM_MP_STATE_INIT_RECEIVED state), just eat SIPIs
2912 	 * and leave the INIT pending.
2913 	 */
2914 	if (kvm_vcpu_latch_init(vcpu)) {
2915 		WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
2916 		if (test_bit(KVM_APIC_SIPI, &pe))
2917 			clear_bit(KVM_APIC_SIPI, &apic->pending_events);
2918 		return 0;
2919 	}
2920 
2921 	if (test_bit(KVM_APIC_INIT, &pe)) {
2922 		clear_bit(KVM_APIC_INIT, &apic->pending_events);
2923 		kvm_vcpu_reset(vcpu, true);
2924 		if (kvm_vcpu_is_bsp(apic->vcpu))
2925 			vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2926 		else
2927 			vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
2928 	}
2929 	if (test_bit(KVM_APIC_SIPI, &pe)) {
2930 		clear_bit(KVM_APIC_SIPI, &apic->pending_events);
2931 		if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
2932 			/* evaluate pending_events before reading the vector */
2933 			smp_rmb();
2934 			sipi_vector = apic->sipi_vector;
2935 			kvm_x86_ops.vcpu_deliver_sipi_vector(vcpu, sipi_vector);
2936 			vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2937 		}
2938 	}
2939 	return 0;
2940 }
2941 
2942 void kvm_lapic_exit(void)
2943 {
2944 	static_key_deferred_flush(&apic_hw_disabled);
2945 	static_key_deferred_flush(&apic_sw_disabled);
2946 }
2947