xref: /openbmc/linux/arch/x86/kvm/lapic.c (revision 2427f03f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /*
4  * Local APIC virtualization
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2007 Novell
8  * Copyright (C) 2007 Intel
9  * Copyright 2009 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Dor Laor <dor.laor@qumranet.com>
13  *   Gregory Haskins <ghaskins@novell.com>
14  *   Yaozu (Eddie) Dong <eddie.dong@intel.com>
15  *
16  * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
17  */
18 
19 #include <linux/kvm_host.h>
20 #include <linux/kvm.h>
21 #include <linux/mm.h>
22 #include <linux/highmem.h>
23 #include <linux/smp.h>
24 #include <linux/hrtimer.h>
25 #include <linux/io.h>
26 #include <linux/export.h>
27 #include <linux/math64.h>
28 #include <linux/slab.h>
29 #include <asm/processor.h>
30 #include <asm/msr.h>
31 #include <asm/page.h>
32 #include <asm/current.h>
33 #include <asm/apicdef.h>
34 #include <asm/delay.h>
35 #include <linux/atomic.h>
36 #include <linux/jump_label.h>
37 #include "kvm_cache_regs.h"
38 #include "irq.h"
39 #include "ioapic.h"
40 #include "trace.h"
41 #include "x86.h"
42 #include "cpuid.h"
43 #include "hyperv.h"
44 
45 #ifndef CONFIG_X86_64
46 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
47 #else
48 #define mod_64(x, y) ((x) % (y))
49 #endif
50 
51 #define PRId64 "d"
52 #define PRIx64 "llx"
53 #define PRIu64 "u"
54 #define PRIo64 "o"
55 
56 /* 14 is the version for Xeon and Pentium 8.4.8*/
57 #define APIC_VERSION			(0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
58 #define LAPIC_MMIO_LENGTH		(1 << 12)
59 /* followed define is not in apicdef.h */
60 #define MAX_APIC_VECTOR			256
61 #define APIC_VECTORS_PER_REG		32
62 
63 static bool lapic_timer_advance_dynamic __read_mostly;
64 #define LAPIC_TIMER_ADVANCE_ADJUST_MIN	100	/* clock cycles */
65 #define LAPIC_TIMER_ADVANCE_ADJUST_MAX	10000	/* clock cycles */
66 #define LAPIC_TIMER_ADVANCE_NS_INIT	1000
67 #define LAPIC_TIMER_ADVANCE_NS_MAX     5000
68 /* step-by-step approximation to mitigate fluctuation */
69 #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
70 
71 static inline int apic_test_vector(int vec, void *bitmap)
72 {
73 	return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
74 }
75 
76 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
77 {
78 	struct kvm_lapic *apic = vcpu->arch.apic;
79 
80 	return apic_test_vector(vector, apic->regs + APIC_ISR) ||
81 		apic_test_vector(vector, apic->regs + APIC_IRR);
82 }
83 
84 static inline int __apic_test_and_set_vector(int vec, void *bitmap)
85 {
86 	return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
87 }
88 
89 static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
90 {
91 	return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
92 }
93 
94 __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ);
95 __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ);
96 
97 static inline int apic_enabled(struct kvm_lapic *apic)
98 {
99 	return kvm_apic_sw_enabled(apic) &&	kvm_apic_hw_enabled(apic);
100 }
101 
102 #define LVT_MASK	\
103 	(APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
104 
105 #define LINT_MASK	\
106 	(LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
107 	 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
108 
109 static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
110 {
111 	return apic->vcpu->vcpu_id;
112 }
113 
114 static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
115 {
116 	return pi_inject_timer && kvm_vcpu_apicv_active(vcpu);
117 }
118 
119 bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
120 {
121 	return kvm_x86_ops.set_hv_timer
122 	       && !(kvm_mwait_in_guest(vcpu->kvm) ||
123 		    kvm_can_post_timer_interrupt(vcpu));
124 }
125 EXPORT_SYMBOL_GPL(kvm_can_use_hv_timer);
126 
127 static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
128 {
129 	return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE;
130 }
131 
132 static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
133 		u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
134 	switch (map->mode) {
135 	case KVM_APIC_MODE_X2APIC: {
136 		u32 offset = (dest_id >> 16) * 16;
137 		u32 max_apic_id = map->max_apic_id;
138 
139 		if (offset <= max_apic_id) {
140 			u8 cluster_size = min(max_apic_id - offset + 1, 16U);
141 
142 			offset = array_index_nospec(offset, map->max_apic_id + 1);
143 			*cluster = &map->phys_map[offset];
144 			*mask = dest_id & (0xffff >> (16 - cluster_size));
145 		} else {
146 			*mask = 0;
147 		}
148 
149 		return true;
150 		}
151 	case KVM_APIC_MODE_XAPIC_FLAT:
152 		*cluster = map->xapic_flat_map;
153 		*mask = dest_id & 0xff;
154 		return true;
155 	case KVM_APIC_MODE_XAPIC_CLUSTER:
156 		*cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
157 		*mask = dest_id & 0xf;
158 		return true;
159 	default:
160 		/* Not optimized. */
161 		return false;
162 	}
163 }
164 
165 static void kvm_apic_map_free(struct rcu_head *rcu)
166 {
167 	struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
168 
169 	kvfree(map);
170 }
171 
172 /*
173  * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
174  *
175  * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
176  * apic_map_lock_held.
177  */
178 enum {
179 	CLEAN,
180 	UPDATE_IN_PROGRESS,
181 	DIRTY
182 };
183 
184 void kvm_recalculate_apic_map(struct kvm *kvm)
185 {
186 	struct kvm_apic_map *new, *old = NULL;
187 	struct kvm_vcpu *vcpu;
188 	unsigned long i;
189 	u32 max_id = 255; /* enough space for any xAPIC ID */
190 
191 	/* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map.  */
192 	if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
193 		return;
194 
195 	WARN_ONCE(!irqchip_in_kernel(kvm),
196 		  "Dirty APIC map without an in-kernel local APIC");
197 
198 	mutex_lock(&kvm->arch.apic_map_lock);
199 	/*
200 	 * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map
201 	 * (if clean) or the APIC registers (if dirty).
202 	 */
203 	if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
204 				   DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
205 		/* Someone else has updated the map. */
206 		mutex_unlock(&kvm->arch.apic_map_lock);
207 		return;
208 	}
209 
210 	kvm_for_each_vcpu(i, vcpu, kvm)
211 		if (kvm_apic_present(vcpu))
212 			max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
213 
214 	new = kvzalloc(sizeof(struct kvm_apic_map) +
215 	                   sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
216 			   GFP_KERNEL_ACCOUNT);
217 
218 	if (!new)
219 		goto out;
220 
221 	new->max_apic_id = max_id;
222 
223 	kvm_for_each_vcpu(i, vcpu, kvm) {
224 		struct kvm_lapic *apic = vcpu->arch.apic;
225 		struct kvm_lapic **cluster;
226 		u16 mask;
227 		u32 ldr;
228 		u8 xapic_id;
229 		u32 x2apic_id;
230 
231 		if (!kvm_apic_present(vcpu))
232 			continue;
233 
234 		xapic_id = kvm_xapic_id(apic);
235 		x2apic_id = kvm_x2apic_id(apic);
236 
237 		/* Hotplug hack: see kvm_apic_match_physical_addr(), ... */
238 		if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
239 				x2apic_id <= new->max_apic_id)
240 			new->phys_map[x2apic_id] = apic;
241 		/*
242 		 * ... xAPIC ID of VCPUs with APIC ID > 0xff will wrap-around,
243 		 * prevent them from masking VCPUs with APIC ID <= 0xff.
244 		 */
245 		if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
246 			new->phys_map[xapic_id] = apic;
247 
248 		if (!kvm_apic_sw_enabled(apic))
249 			continue;
250 
251 		ldr = kvm_lapic_get_reg(apic, APIC_LDR);
252 
253 		if (apic_x2apic_mode(apic)) {
254 			new->mode |= KVM_APIC_MODE_X2APIC;
255 		} else if (ldr) {
256 			ldr = GET_APIC_LOGICAL_ID(ldr);
257 			if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
258 				new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
259 			else
260 				new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
261 		}
262 
263 		if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask))
264 			continue;
265 
266 		if (mask)
267 			cluster[ffs(mask) - 1] = apic;
268 	}
269 out:
270 	old = rcu_dereference_protected(kvm->arch.apic_map,
271 			lockdep_is_held(&kvm->arch.apic_map_lock));
272 	rcu_assign_pointer(kvm->arch.apic_map, new);
273 	/*
274 	 * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
275 	 * If another update has come in, leave it DIRTY.
276 	 */
277 	atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
278 			       UPDATE_IN_PROGRESS, CLEAN);
279 	mutex_unlock(&kvm->arch.apic_map_lock);
280 
281 	if (old)
282 		call_rcu(&old->rcu, kvm_apic_map_free);
283 
284 	kvm_make_scan_ioapic_request(kvm);
285 }
286 
287 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
288 {
289 	bool enabled = val & APIC_SPIV_APIC_ENABLED;
290 
291 	kvm_lapic_set_reg(apic, APIC_SPIV, val);
292 
293 	if (enabled != apic->sw_enabled) {
294 		apic->sw_enabled = enabled;
295 		if (enabled)
296 			static_branch_slow_dec_deferred(&apic_sw_disabled);
297 		else
298 			static_branch_inc(&apic_sw_disabled.key);
299 
300 		atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
301 	}
302 
303 	/* Check if there are APF page ready requests pending */
304 	if (enabled)
305 		kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
306 }
307 
308 static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
309 {
310 	kvm_lapic_set_reg(apic, APIC_ID, id << 24);
311 	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
312 }
313 
314 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
315 {
316 	kvm_lapic_set_reg(apic, APIC_LDR, id);
317 	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
318 }
319 
320 static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
321 {
322 	kvm_lapic_set_reg(apic, APIC_DFR, val);
323 	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
324 }
325 
326 static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
327 {
328 	return ((id >> 4) << 16) | (1 << (id & 0xf));
329 }
330 
331 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
332 {
333 	u32 ldr = kvm_apic_calc_x2apic_ldr(id);
334 
335 	WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
336 
337 	kvm_lapic_set_reg(apic, APIC_ID, id);
338 	kvm_lapic_set_reg(apic, APIC_LDR, ldr);
339 	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
340 }
341 
342 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
343 {
344 	return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
345 }
346 
347 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
348 {
349 	return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
350 }
351 
352 static inline int apic_lvtt_period(struct kvm_lapic *apic)
353 {
354 	return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
355 }
356 
357 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
358 {
359 	return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
360 }
361 
362 static inline int apic_lvt_nmi_mode(u32 lvt_val)
363 {
364 	return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
365 }
366 
367 void kvm_apic_set_version(struct kvm_vcpu *vcpu)
368 {
369 	struct kvm_lapic *apic = vcpu->arch.apic;
370 	u32 v = APIC_VERSION;
371 
372 	if (!lapic_in_kernel(vcpu))
373 		return;
374 
375 	/*
376 	 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
377 	 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
378 	 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
379 	 * version first and level-triggered interrupts never get EOIed in
380 	 * IOAPIC.
381 	 */
382 	if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) &&
383 	    !ioapic_in_kernel(vcpu->kvm))
384 		v |= APIC_LVR_DIRECTED_EOI;
385 	kvm_lapic_set_reg(apic, APIC_LVR, v);
386 }
387 
388 static const unsigned int apic_lvt_mask[KVM_APIC_LVT_NUM] = {
389 	LVT_MASK ,      /* part LVTT mask, timer mode mask added at runtime */
390 	LVT_MASK | APIC_MODE_MASK,	/* LVTTHMR */
391 	LVT_MASK | APIC_MODE_MASK,	/* LVTPC */
392 	LINT_MASK, LINT_MASK,	/* LVT0-1 */
393 	LVT_MASK		/* LVTERR */
394 };
395 
396 static int find_highest_vector(void *bitmap)
397 {
398 	int vec;
399 	u32 *reg;
400 
401 	for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
402 	     vec >= 0; vec -= APIC_VECTORS_PER_REG) {
403 		reg = bitmap + REG_POS(vec);
404 		if (*reg)
405 			return __fls(*reg) + vec;
406 	}
407 
408 	return -1;
409 }
410 
411 static u8 count_vectors(void *bitmap)
412 {
413 	int vec;
414 	u32 *reg;
415 	u8 count = 0;
416 
417 	for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
418 		reg = bitmap + REG_POS(vec);
419 		count += hweight32(*reg);
420 	}
421 
422 	return count;
423 }
424 
425 bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
426 {
427 	u32 i, vec;
428 	u32 pir_val, irr_val, prev_irr_val;
429 	int max_updated_irr;
430 
431 	max_updated_irr = -1;
432 	*max_irr = -1;
433 
434 	for (i = vec = 0; i <= 7; i++, vec += 32) {
435 		pir_val = READ_ONCE(pir[i]);
436 		irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10));
437 		if (pir_val) {
438 			prev_irr_val = irr_val;
439 			irr_val |= xchg(&pir[i], 0);
440 			*((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val;
441 			if (prev_irr_val != irr_val) {
442 				max_updated_irr =
443 					__fls(irr_val ^ prev_irr_val) + vec;
444 			}
445 		}
446 		if (irr_val)
447 			*max_irr = __fls(irr_val) + vec;
448 	}
449 
450 	return ((max_updated_irr != -1) &&
451 		(max_updated_irr == *max_irr));
452 }
453 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
454 
455 bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
456 {
457 	struct kvm_lapic *apic = vcpu->arch.apic;
458 
459 	return __kvm_apic_update_irr(pir, apic->regs, max_irr);
460 }
461 EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
462 
463 static inline int apic_search_irr(struct kvm_lapic *apic)
464 {
465 	return find_highest_vector(apic->regs + APIC_IRR);
466 }
467 
468 static inline int apic_find_highest_irr(struct kvm_lapic *apic)
469 {
470 	int result;
471 
472 	/*
473 	 * Note that irr_pending is just a hint. It will be always
474 	 * true with virtual interrupt delivery enabled.
475 	 */
476 	if (!apic->irr_pending)
477 		return -1;
478 
479 	result = apic_search_irr(apic);
480 	ASSERT(result == -1 || result >= 16);
481 
482 	return result;
483 }
484 
485 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
486 {
487 	struct kvm_vcpu *vcpu;
488 
489 	vcpu = apic->vcpu;
490 
491 	if (unlikely(vcpu->arch.apicv_active)) {
492 		/* need to update RVI */
493 		kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
494 		static_call(kvm_x86_hwapic_irr_update)(vcpu,
495 				apic_find_highest_irr(apic));
496 	} else {
497 		apic->irr_pending = false;
498 		kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
499 		if (apic_search_irr(apic) != -1)
500 			apic->irr_pending = true;
501 	}
502 }
503 
504 void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
505 {
506 	apic_clear_irr(vec, vcpu->arch.apic);
507 }
508 EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
509 
510 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
511 {
512 	struct kvm_vcpu *vcpu;
513 
514 	if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
515 		return;
516 
517 	vcpu = apic->vcpu;
518 
519 	/*
520 	 * With APIC virtualization enabled, all caching is disabled
521 	 * because the processor can modify ISR under the hood.  Instead
522 	 * just set SVI.
523 	 */
524 	if (unlikely(vcpu->arch.apicv_active))
525 		static_call(kvm_x86_hwapic_isr_update)(vcpu, vec);
526 	else {
527 		++apic->isr_count;
528 		BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
529 		/*
530 		 * ISR (in service register) bit is set when injecting an interrupt.
531 		 * The highest vector is injected. Thus the latest bit set matches
532 		 * the highest bit in ISR.
533 		 */
534 		apic->highest_isr_cache = vec;
535 	}
536 }
537 
538 static inline int apic_find_highest_isr(struct kvm_lapic *apic)
539 {
540 	int result;
541 
542 	/*
543 	 * Note that isr_count is always 1, and highest_isr_cache
544 	 * is always -1, with APIC virtualization enabled.
545 	 */
546 	if (!apic->isr_count)
547 		return -1;
548 	if (likely(apic->highest_isr_cache != -1))
549 		return apic->highest_isr_cache;
550 
551 	result = find_highest_vector(apic->regs + APIC_ISR);
552 	ASSERT(result == -1 || result >= 16);
553 
554 	return result;
555 }
556 
557 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
558 {
559 	struct kvm_vcpu *vcpu;
560 	if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
561 		return;
562 
563 	vcpu = apic->vcpu;
564 
565 	/*
566 	 * We do get here for APIC virtualization enabled if the guest
567 	 * uses the Hyper-V APIC enlightenment.  In this case we may need
568 	 * to trigger a new interrupt delivery by writing the SVI field;
569 	 * on the other hand isr_count and highest_isr_cache are unused
570 	 * and must be left alone.
571 	 */
572 	if (unlikely(vcpu->arch.apicv_active))
573 		static_call(kvm_x86_hwapic_isr_update)(vcpu,
574 						apic_find_highest_isr(apic));
575 	else {
576 		--apic->isr_count;
577 		BUG_ON(apic->isr_count < 0);
578 		apic->highest_isr_cache = -1;
579 	}
580 }
581 
582 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
583 {
584 	/* This may race with setting of irr in __apic_accept_irq() and
585 	 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
586 	 * will cause vmexit immediately and the value will be recalculated
587 	 * on the next vmentry.
588 	 */
589 	return apic_find_highest_irr(vcpu->arch.apic);
590 }
591 EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
592 
593 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
594 			     int vector, int level, int trig_mode,
595 			     struct dest_map *dest_map);
596 
597 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
598 		     struct dest_map *dest_map)
599 {
600 	struct kvm_lapic *apic = vcpu->arch.apic;
601 
602 	return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
603 			irq->level, irq->trig_mode, dest_map);
604 }
605 
606 static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
607 			 struct kvm_lapic_irq *irq, u32 min)
608 {
609 	int i, count = 0;
610 	struct kvm_vcpu *vcpu;
611 
612 	if (min > map->max_apic_id)
613 		return 0;
614 
615 	for_each_set_bit(i, ipi_bitmap,
616 		min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
617 		if (map->phys_map[min + i]) {
618 			vcpu = map->phys_map[min + i]->vcpu;
619 			count += kvm_apic_set_irq(vcpu, irq, NULL);
620 		}
621 	}
622 
623 	return count;
624 }
625 
626 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
627 		    unsigned long ipi_bitmap_high, u32 min,
628 		    unsigned long icr, int op_64_bit)
629 {
630 	struct kvm_apic_map *map;
631 	struct kvm_lapic_irq irq = {0};
632 	int cluster_size = op_64_bit ? 64 : 32;
633 	int count;
634 
635 	if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK))
636 		return -KVM_EINVAL;
637 
638 	irq.vector = icr & APIC_VECTOR_MASK;
639 	irq.delivery_mode = icr & APIC_MODE_MASK;
640 	irq.level = (icr & APIC_INT_ASSERT) != 0;
641 	irq.trig_mode = icr & APIC_INT_LEVELTRIG;
642 
643 	rcu_read_lock();
644 	map = rcu_dereference(kvm->arch.apic_map);
645 
646 	count = -EOPNOTSUPP;
647 	if (likely(map)) {
648 		count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min);
649 		min += cluster_size;
650 		count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min);
651 	}
652 
653 	rcu_read_unlock();
654 	return count;
655 }
656 
657 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
658 {
659 
660 	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
661 				      sizeof(val));
662 }
663 
664 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
665 {
666 
667 	return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
668 				      sizeof(*val));
669 }
670 
671 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
672 {
673 	return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
674 }
675 
676 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
677 {
678 	if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0)
679 		return;
680 
681 	__set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
682 }
683 
684 static bool pv_eoi_test_and_clr_pending(struct kvm_vcpu *vcpu)
685 {
686 	u8 val;
687 
688 	if (pv_eoi_get_user(vcpu, &val) < 0)
689 		return false;
690 
691 	val &= KVM_PV_EOI_ENABLED;
692 
693 	if (val && pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0)
694 		return false;
695 
696 	/*
697 	 * Clear pending bit in any case: it will be set again on vmentry.
698 	 * While this might not be ideal from performance point of view,
699 	 * this makes sure pv eoi is only enabled when we know it's safe.
700 	 */
701 	__clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
702 
703 	return val;
704 }
705 
706 static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
707 {
708 	int highest_irr;
709 	if (kvm_x86_ops.sync_pir_to_irr)
710 		highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
711 	else
712 		highest_irr = apic_find_highest_irr(apic);
713 	if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
714 		return -1;
715 	return highest_irr;
716 }
717 
718 static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
719 {
720 	u32 tpr, isrv, ppr, old_ppr;
721 	int isr;
722 
723 	old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
724 	tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
725 	isr = apic_find_highest_isr(apic);
726 	isrv = (isr != -1) ? isr : 0;
727 
728 	if ((tpr & 0xf0) >= (isrv & 0xf0))
729 		ppr = tpr & 0xff;
730 	else
731 		ppr = isrv & 0xf0;
732 
733 	*new_ppr = ppr;
734 	if (old_ppr != ppr)
735 		kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
736 
737 	return ppr < old_ppr;
738 }
739 
740 static void apic_update_ppr(struct kvm_lapic *apic)
741 {
742 	u32 ppr;
743 
744 	if (__apic_update_ppr(apic, &ppr) &&
745 	    apic_has_interrupt_for_ppr(apic, ppr) != -1)
746 		kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
747 }
748 
749 void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
750 {
751 	apic_update_ppr(vcpu->arch.apic);
752 }
753 EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
754 
755 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
756 {
757 	kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
758 	apic_update_ppr(apic);
759 }
760 
761 static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
762 {
763 	return mda == (apic_x2apic_mode(apic) ?
764 			X2APIC_BROADCAST : APIC_BROADCAST);
765 }
766 
767 static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
768 {
769 	if (kvm_apic_broadcast(apic, mda))
770 		return true;
771 
772 	if (apic_x2apic_mode(apic))
773 		return mda == kvm_x2apic_id(apic);
774 
775 	/*
776 	 * Hotplug hack: Make LAPIC in xAPIC mode also accept interrupts as if
777 	 * it were in x2APIC mode.  Hotplugged VCPUs start in xAPIC mode and
778 	 * this allows unique addressing of VCPUs with APIC ID over 0xff.
779 	 * The 0xff condition is needed because writeable xAPIC ID.
780 	 */
781 	if (kvm_x2apic_id(apic) > 0xff && mda == kvm_x2apic_id(apic))
782 		return true;
783 
784 	return mda == kvm_xapic_id(apic);
785 }
786 
787 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
788 {
789 	u32 logical_id;
790 
791 	if (kvm_apic_broadcast(apic, mda))
792 		return true;
793 
794 	logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
795 
796 	if (apic_x2apic_mode(apic))
797 		return ((logical_id >> 16) == (mda >> 16))
798 		       && (logical_id & mda & 0xffff) != 0;
799 
800 	logical_id = GET_APIC_LOGICAL_ID(logical_id);
801 
802 	switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
803 	case APIC_DFR_FLAT:
804 		return (logical_id & mda) != 0;
805 	case APIC_DFR_CLUSTER:
806 		return ((logical_id >> 4) == (mda >> 4))
807 		       && (logical_id & mda & 0xf) != 0;
808 	default:
809 		return false;
810 	}
811 }
812 
813 /* The KVM local APIC implementation has two quirks:
814  *
815  *  - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
816  *    in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
817  *    KVM doesn't do that aliasing.
818  *
819  *  - in-kernel IOAPIC messages have to be delivered directly to
820  *    x2APIC, because the kernel does not support interrupt remapping.
821  *    In order to support broadcast without interrupt remapping, x2APIC
822  *    rewrites the destination of non-IPI messages from APIC_BROADCAST
823  *    to X2APIC_BROADCAST.
824  *
825  * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API.  This is
826  * important when userspace wants to use x2APIC-format MSIs, because
827  * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
828  */
829 static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
830 		struct kvm_lapic *source, struct kvm_lapic *target)
831 {
832 	bool ipi = source != NULL;
833 
834 	if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
835 	    !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
836 		return X2APIC_BROADCAST;
837 
838 	return dest_id;
839 }
840 
841 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
842 			   int shorthand, unsigned int dest, int dest_mode)
843 {
844 	struct kvm_lapic *target = vcpu->arch.apic;
845 	u32 mda = kvm_apic_mda(vcpu, dest, source, target);
846 
847 	ASSERT(target);
848 	switch (shorthand) {
849 	case APIC_DEST_NOSHORT:
850 		if (dest_mode == APIC_DEST_PHYSICAL)
851 			return kvm_apic_match_physical_addr(target, mda);
852 		else
853 			return kvm_apic_match_logical_addr(target, mda);
854 	case APIC_DEST_SELF:
855 		return target == source;
856 	case APIC_DEST_ALLINC:
857 		return true;
858 	case APIC_DEST_ALLBUT:
859 		return target != source;
860 	default:
861 		return false;
862 	}
863 }
864 EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
865 
866 int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
867 		       const unsigned long *bitmap, u32 bitmap_size)
868 {
869 	u32 mod;
870 	int i, idx = -1;
871 
872 	mod = vector % dest_vcpus;
873 
874 	for (i = 0; i <= mod; i++) {
875 		idx = find_next_bit(bitmap, bitmap_size, idx + 1);
876 		BUG_ON(idx == bitmap_size);
877 	}
878 
879 	return idx;
880 }
881 
882 static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
883 {
884 	if (!kvm->arch.disabled_lapic_found) {
885 		kvm->arch.disabled_lapic_found = true;
886 		printk(KERN_INFO
887 		       "Disabled LAPIC found during irq injection\n");
888 	}
889 }
890 
891 static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
892 		struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
893 {
894 	if (kvm->arch.x2apic_broadcast_quirk_disabled) {
895 		if ((irq->dest_id == APIC_BROADCAST &&
896 				map->mode != KVM_APIC_MODE_X2APIC))
897 			return true;
898 		if (irq->dest_id == X2APIC_BROADCAST)
899 			return true;
900 	} else {
901 		bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
902 		if (irq->dest_id == (x2apic_ipi ?
903 		                     X2APIC_BROADCAST : APIC_BROADCAST))
904 			return true;
905 	}
906 
907 	return false;
908 }
909 
910 /* Return true if the interrupt can be handled by using *bitmap as index mask
911  * for valid destinations in *dst array.
912  * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
913  * Note: we may have zero kvm_lapic destinations when we return true, which
914  * means that the interrupt should be dropped.  In this case, *bitmap would be
915  * zero and *dst undefined.
916  */
917 static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
918 		struct kvm_lapic **src, struct kvm_lapic_irq *irq,
919 		struct kvm_apic_map *map, struct kvm_lapic ***dst,
920 		unsigned long *bitmap)
921 {
922 	int i, lowest;
923 
924 	if (irq->shorthand == APIC_DEST_SELF && src) {
925 		*dst = src;
926 		*bitmap = 1;
927 		return true;
928 	} else if (irq->shorthand)
929 		return false;
930 
931 	if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
932 		return false;
933 
934 	if (irq->dest_mode == APIC_DEST_PHYSICAL) {
935 		if (irq->dest_id > map->max_apic_id) {
936 			*bitmap = 0;
937 		} else {
938 			u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
939 			*dst = &map->phys_map[dest_id];
940 			*bitmap = 1;
941 		}
942 		return true;
943 	}
944 
945 	*bitmap = 0;
946 	if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
947 				(u16 *)bitmap))
948 		return false;
949 
950 	if (!kvm_lowest_prio_delivery(irq))
951 		return true;
952 
953 	if (!kvm_vector_hashing_enabled()) {
954 		lowest = -1;
955 		for_each_set_bit(i, bitmap, 16) {
956 			if (!(*dst)[i])
957 				continue;
958 			if (lowest < 0)
959 				lowest = i;
960 			else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
961 						(*dst)[lowest]->vcpu) < 0)
962 				lowest = i;
963 		}
964 	} else {
965 		if (!*bitmap)
966 			return true;
967 
968 		lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
969 				bitmap, 16);
970 
971 		if (!(*dst)[lowest]) {
972 			kvm_apic_disabled_lapic_found(kvm);
973 			*bitmap = 0;
974 			return true;
975 		}
976 	}
977 
978 	*bitmap = (lowest >= 0) ? 1 << lowest : 0;
979 
980 	return true;
981 }
982 
983 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
984 		struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
985 {
986 	struct kvm_apic_map *map;
987 	unsigned long bitmap;
988 	struct kvm_lapic **dst = NULL;
989 	int i;
990 	bool ret;
991 
992 	*r = -1;
993 
994 	if (irq->shorthand == APIC_DEST_SELF) {
995 		*r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
996 		return true;
997 	}
998 
999 	rcu_read_lock();
1000 	map = rcu_dereference(kvm->arch.apic_map);
1001 
1002 	ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
1003 	if (ret) {
1004 		*r = 0;
1005 		for_each_set_bit(i, &bitmap, 16) {
1006 			if (!dst[i])
1007 				continue;
1008 			*r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
1009 		}
1010 	}
1011 
1012 	rcu_read_unlock();
1013 	return ret;
1014 }
1015 
1016 /*
1017  * This routine tries to handle interrupts in posted mode, here is how
1018  * it deals with different cases:
1019  * - For single-destination interrupts, handle it in posted mode
1020  * - Else if vector hashing is enabled and it is a lowest-priority
1021  *   interrupt, handle it in posted mode and use the following mechanism
1022  *   to find the destination vCPU.
1023  *	1. For lowest-priority interrupts, store all the possible
1024  *	   destination vCPUs in an array.
1025  *	2. Use "guest vector % max number of destination vCPUs" to find
1026  *	   the right destination vCPU in the array for the lowest-priority
1027  *	   interrupt.
1028  * - Otherwise, use remapped mode to inject the interrupt.
1029  */
1030 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
1031 			struct kvm_vcpu **dest_vcpu)
1032 {
1033 	struct kvm_apic_map *map;
1034 	unsigned long bitmap;
1035 	struct kvm_lapic **dst = NULL;
1036 	bool ret = false;
1037 
1038 	if (irq->shorthand)
1039 		return false;
1040 
1041 	rcu_read_lock();
1042 	map = rcu_dereference(kvm->arch.apic_map);
1043 
1044 	if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
1045 			hweight16(bitmap) == 1) {
1046 		unsigned long i = find_first_bit(&bitmap, 16);
1047 
1048 		if (dst[i]) {
1049 			*dest_vcpu = dst[i]->vcpu;
1050 			ret = true;
1051 		}
1052 	}
1053 
1054 	rcu_read_unlock();
1055 	return ret;
1056 }
1057 
1058 /*
1059  * Add a pending IRQ into lapic.
1060  * Return 1 if successfully added and 0 if discarded.
1061  */
1062 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1063 			     int vector, int level, int trig_mode,
1064 			     struct dest_map *dest_map)
1065 {
1066 	int result = 0;
1067 	struct kvm_vcpu *vcpu = apic->vcpu;
1068 
1069 	trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
1070 				  trig_mode, vector);
1071 	switch (delivery_mode) {
1072 	case APIC_DM_LOWEST:
1073 		vcpu->arch.apic_arb_prio++;
1074 		fallthrough;
1075 	case APIC_DM_FIXED:
1076 		if (unlikely(trig_mode && !level))
1077 			break;
1078 
1079 		/* FIXME add logic for vcpu on reset */
1080 		if (unlikely(!apic_enabled(apic)))
1081 			break;
1082 
1083 		result = 1;
1084 
1085 		if (dest_map) {
1086 			__set_bit(vcpu->vcpu_id, dest_map->map);
1087 			dest_map->vectors[vcpu->vcpu_id] = vector;
1088 		}
1089 
1090 		if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1091 			if (trig_mode)
1092 				kvm_lapic_set_vector(vector,
1093 						     apic->regs + APIC_TMR);
1094 			else
1095 				kvm_lapic_clear_vector(vector,
1096 						       apic->regs + APIC_TMR);
1097 		}
1098 
1099 		if (static_call(kvm_x86_deliver_posted_interrupt)(vcpu, vector)) {
1100 			kvm_lapic_set_irr(vector, apic);
1101 			kvm_make_request(KVM_REQ_EVENT, vcpu);
1102 			kvm_vcpu_kick(vcpu);
1103 		} else {
1104 			trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
1105 						   trig_mode, vector);
1106 		}
1107 		break;
1108 
1109 	case APIC_DM_REMRD:
1110 		result = 1;
1111 		vcpu->arch.pv.pv_unhalted = 1;
1112 		kvm_make_request(KVM_REQ_EVENT, vcpu);
1113 		kvm_vcpu_kick(vcpu);
1114 		break;
1115 
1116 	case APIC_DM_SMI:
1117 		result = 1;
1118 		kvm_make_request(KVM_REQ_SMI, vcpu);
1119 		kvm_vcpu_kick(vcpu);
1120 		break;
1121 
1122 	case APIC_DM_NMI:
1123 		result = 1;
1124 		kvm_inject_nmi(vcpu);
1125 		kvm_vcpu_kick(vcpu);
1126 		break;
1127 
1128 	case APIC_DM_INIT:
1129 		if (!trig_mode || level) {
1130 			result = 1;
1131 			/* assumes that there are only KVM_APIC_INIT/SIPI */
1132 			apic->pending_events = (1UL << KVM_APIC_INIT);
1133 			kvm_make_request(KVM_REQ_EVENT, vcpu);
1134 			kvm_vcpu_kick(vcpu);
1135 		}
1136 		break;
1137 
1138 	case APIC_DM_STARTUP:
1139 		result = 1;
1140 		apic->sipi_vector = vector;
1141 		/* make sure sipi_vector is visible for the receiver */
1142 		smp_wmb();
1143 		set_bit(KVM_APIC_SIPI, &apic->pending_events);
1144 		kvm_make_request(KVM_REQ_EVENT, vcpu);
1145 		kvm_vcpu_kick(vcpu);
1146 		break;
1147 
1148 	case APIC_DM_EXTINT:
1149 		/*
1150 		 * Should only be called by kvm_apic_local_deliver() with LVT0,
1151 		 * before NMI watchdog was enabled. Already handled by
1152 		 * kvm_apic_accept_pic_intr().
1153 		 */
1154 		break;
1155 
1156 	default:
1157 		printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1158 		       delivery_mode);
1159 		break;
1160 	}
1161 	return result;
1162 }
1163 
1164 /*
1165  * This routine identifies the destination vcpus mask meant to receive the
1166  * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
1167  * out the destination vcpus array and set the bitmap or it traverses to
1168  * each available vcpu to identify the same.
1169  */
1170 void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
1171 			      unsigned long *vcpu_bitmap)
1172 {
1173 	struct kvm_lapic **dest_vcpu = NULL;
1174 	struct kvm_lapic *src = NULL;
1175 	struct kvm_apic_map *map;
1176 	struct kvm_vcpu *vcpu;
1177 	unsigned long bitmap, i;
1178 	int vcpu_idx;
1179 	bool ret;
1180 
1181 	rcu_read_lock();
1182 	map = rcu_dereference(kvm->arch.apic_map);
1183 
1184 	ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
1185 					  &bitmap);
1186 	if (ret) {
1187 		for_each_set_bit(i, &bitmap, 16) {
1188 			if (!dest_vcpu[i])
1189 				continue;
1190 			vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx;
1191 			__set_bit(vcpu_idx, vcpu_bitmap);
1192 		}
1193 	} else {
1194 		kvm_for_each_vcpu(i, vcpu, kvm) {
1195 			if (!kvm_apic_present(vcpu))
1196 				continue;
1197 			if (!kvm_apic_match_dest(vcpu, NULL,
1198 						 irq->shorthand,
1199 						 irq->dest_id,
1200 						 irq->dest_mode))
1201 				continue;
1202 			__set_bit(i, vcpu_bitmap);
1203 		}
1204 	}
1205 	rcu_read_unlock();
1206 }
1207 
1208 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1209 {
1210 	return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1211 }
1212 
1213 static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1214 {
1215 	return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1216 }
1217 
1218 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1219 {
1220 	int trigger_mode;
1221 
1222 	/* Eoi the ioapic only if the ioapic doesn't own the vector. */
1223 	if (!kvm_ioapic_handles_vector(apic, vector))
1224 		return;
1225 
1226 	/* Request a KVM exit to inform the userspace IOAPIC. */
1227 	if (irqchip_split(apic->vcpu->kvm)) {
1228 		apic->vcpu->arch.pending_ioapic_eoi = vector;
1229 		kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1230 		return;
1231 	}
1232 
1233 	if (apic_test_vector(vector, apic->regs + APIC_TMR))
1234 		trigger_mode = IOAPIC_LEVEL_TRIG;
1235 	else
1236 		trigger_mode = IOAPIC_EDGE_TRIG;
1237 
1238 	kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1239 }
1240 
1241 static int apic_set_eoi(struct kvm_lapic *apic)
1242 {
1243 	int vector = apic_find_highest_isr(apic);
1244 
1245 	trace_kvm_eoi(apic, vector);
1246 
1247 	/*
1248 	 * Not every write EOI will has corresponding ISR,
1249 	 * one example is when Kernel check timer on setup_IO_APIC
1250 	 */
1251 	if (vector == -1)
1252 		return vector;
1253 
1254 	apic_clear_isr(vector, apic);
1255 	apic_update_ppr(apic);
1256 
1257 	if (to_hv_vcpu(apic->vcpu) &&
1258 	    test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap))
1259 		kvm_hv_synic_send_eoi(apic->vcpu, vector);
1260 
1261 	kvm_ioapic_send_eoi(apic, vector);
1262 	kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1263 	return vector;
1264 }
1265 
1266 /*
1267  * this interface assumes a trap-like exit, which has already finished
1268  * desired side effect including vISR and vPPR update.
1269  */
1270 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1271 {
1272 	struct kvm_lapic *apic = vcpu->arch.apic;
1273 
1274 	trace_kvm_eoi(apic, vector);
1275 
1276 	kvm_ioapic_send_eoi(apic, vector);
1277 	kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1278 }
1279 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1280 
1281 void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
1282 {
1283 	struct kvm_lapic_irq irq;
1284 
1285 	irq.vector = icr_low & APIC_VECTOR_MASK;
1286 	irq.delivery_mode = icr_low & APIC_MODE_MASK;
1287 	irq.dest_mode = icr_low & APIC_DEST_MASK;
1288 	irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1289 	irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1290 	irq.shorthand = icr_low & APIC_SHORT_MASK;
1291 	irq.msi_redir_hint = false;
1292 	if (apic_x2apic_mode(apic))
1293 		irq.dest_id = icr_high;
1294 	else
1295 		irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
1296 
1297 	trace_kvm_apic_ipi(icr_low, irq.dest_id);
1298 
1299 	kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1300 }
1301 
1302 static u32 apic_get_tmcct(struct kvm_lapic *apic)
1303 {
1304 	ktime_t remaining, now;
1305 	s64 ns;
1306 	u32 tmcct;
1307 
1308 	ASSERT(apic != NULL);
1309 
1310 	/* if initial count is 0, current count should also be 0 */
1311 	if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1312 		apic->lapic_timer.period == 0)
1313 		return 0;
1314 
1315 	now = ktime_get();
1316 	remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1317 	if (ktime_to_ns(remaining) < 0)
1318 		remaining = 0;
1319 
1320 	ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1321 	tmcct = div64_u64(ns,
1322 			 (APIC_BUS_CYCLE_NS * apic->divide_count));
1323 
1324 	return tmcct;
1325 }
1326 
1327 static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1328 {
1329 	struct kvm_vcpu *vcpu = apic->vcpu;
1330 	struct kvm_run *run = vcpu->run;
1331 
1332 	kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1333 	run->tpr_access.rip = kvm_rip_read(vcpu);
1334 	run->tpr_access.is_write = write;
1335 }
1336 
1337 static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1338 {
1339 	if (apic->vcpu->arch.tpr_access_reporting)
1340 		__report_tpr_access(apic, write);
1341 }
1342 
1343 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1344 {
1345 	u32 val = 0;
1346 
1347 	if (offset >= LAPIC_MMIO_LENGTH)
1348 		return 0;
1349 
1350 	switch (offset) {
1351 	case APIC_ARBPRI:
1352 		break;
1353 
1354 	case APIC_TMCCT:	/* Timer CCR */
1355 		if (apic_lvtt_tscdeadline(apic))
1356 			return 0;
1357 
1358 		val = apic_get_tmcct(apic);
1359 		break;
1360 	case APIC_PROCPRI:
1361 		apic_update_ppr(apic);
1362 		val = kvm_lapic_get_reg(apic, offset);
1363 		break;
1364 	case APIC_TASKPRI:
1365 		report_tpr_access(apic, false);
1366 		fallthrough;
1367 	default:
1368 		val = kvm_lapic_get_reg(apic, offset);
1369 		break;
1370 	}
1371 
1372 	return val;
1373 }
1374 
1375 static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1376 {
1377 	return container_of(dev, struct kvm_lapic, dev);
1378 }
1379 
1380 #define APIC_REG_MASK(reg)	(1ull << ((reg) >> 4))
1381 #define APIC_REGS_MASK(first, count) \
1382 	(APIC_REG_MASK(first) * ((1ull << (count)) - 1))
1383 
1384 int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1385 		void *data)
1386 {
1387 	unsigned char alignment = offset & 0xf;
1388 	u32 result;
1389 	/* this bitmask has a bit cleared for each reserved register */
1390 	u64 valid_reg_mask =
1391 		APIC_REG_MASK(APIC_ID) |
1392 		APIC_REG_MASK(APIC_LVR) |
1393 		APIC_REG_MASK(APIC_TASKPRI) |
1394 		APIC_REG_MASK(APIC_PROCPRI) |
1395 		APIC_REG_MASK(APIC_LDR) |
1396 		APIC_REG_MASK(APIC_DFR) |
1397 		APIC_REG_MASK(APIC_SPIV) |
1398 		APIC_REGS_MASK(APIC_ISR, APIC_ISR_NR) |
1399 		APIC_REGS_MASK(APIC_TMR, APIC_ISR_NR) |
1400 		APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) |
1401 		APIC_REG_MASK(APIC_ESR) |
1402 		APIC_REG_MASK(APIC_ICR) |
1403 		APIC_REG_MASK(APIC_ICR2) |
1404 		APIC_REG_MASK(APIC_LVTT) |
1405 		APIC_REG_MASK(APIC_LVTTHMR) |
1406 		APIC_REG_MASK(APIC_LVTPC) |
1407 		APIC_REG_MASK(APIC_LVT0) |
1408 		APIC_REG_MASK(APIC_LVT1) |
1409 		APIC_REG_MASK(APIC_LVTERR) |
1410 		APIC_REG_MASK(APIC_TMICT) |
1411 		APIC_REG_MASK(APIC_TMCCT) |
1412 		APIC_REG_MASK(APIC_TDCR);
1413 
1414 	/* ARBPRI is not valid on x2APIC */
1415 	if (!apic_x2apic_mode(apic))
1416 		valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
1417 
1418 	if (alignment + len > 4)
1419 		return 1;
1420 
1421 	if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
1422 		return 1;
1423 
1424 	result = __apic_read(apic, offset & ~0xf);
1425 
1426 	trace_kvm_apic_read(offset, result);
1427 
1428 	switch (len) {
1429 	case 1:
1430 	case 2:
1431 	case 4:
1432 		memcpy(data, (char *)&result + alignment, len);
1433 		break;
1434 	default:
1435 		printk(KERN_ERR "Local APIC read with len = %x, "
1436 		       "should be 1,2, or 4 instead\n", len);
1437 		break;
1438 	}
1439 	return 0;
1440 }
1441 EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
1442 
1443 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1444 {
1445 	return addr >= apic->base_address &&
1446 		addr < apic->base_address + LAPIC_MMIO_LENGTH;
1447 }
1448 
1449 static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1450 			   gpa_t address, int len, void *data)
1451 {
1452 	struct kvm_lapic *apic = to_lapic(this);
1453 	u32 offset = address - apic->base_address;
1454 
1455 	if (!apic_mmio_in_range(apic, address))
1456 		return -EOPNOTSUPP;
1457 
1458 	if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1459 		if (!kvm_check_has_quirk(vcpu->kvm,
1460 					 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1461 			return -EOPNOTSUPP;
1462 
1463 		memset(data, 0xff, len);
1464 		return 0;
1465 	}
1466 
1467 	kvm_lapic_reg_read(apic, offset, len, data);
1468 
1469 	return 0;
1470 }
1471 
1472 static void update_divide_count(struct kvm_lapic *apic)
1473 {
1474 	u32 tmp1, tmp2, tdcr;
1475 
1476 	tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1477 	tmp1 = tdcr & 0xf;
1478 	tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1479 	apic->divide_count = 0x1 << (tmp2 & 0x7);
1480 }
1481 
1482 static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1483 {
1484 	/*
1485 	 * Do not allow the guest to program periodic timers with small
1486 	 * interval, since the hrtimers are not throttled by the host
1487 	 * scheduler.
1488 	 */
1489 	if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1490 		s64 min_period = min_timer_period_us * 1000LL;
1491 
1492 		if (apic->lapic_timer.period < min_period) {
1493 			pr_info_ratelimited(
1494 			    "kvm: vcpu %i: requested %lld ns "
1495 			    "lapic timer period limited to %lld ns\n",
1496 			    apic->vcpu->vcpu_id,
1497 			    apic->lapic_timer.period, min_period);
1498 			apic->lapic_timer.period = min_period;
1499 		}
1500 	}
1501 }
1502 
1503 static void cancel_hv_timer(struct kvm_lapic *apic);
1504 
1505 static void cancel_apic_timer(struct kvm_lapic *apic)
1506 {
1507 	hrtimer_cancel(&apic->lapic_timer.timer);
1508 	preempt_disable();
1509 	if (apic->lapic_timer.hv_timer_in_use)
1510 		cancel_hv_timer(apic);
1511 	preempt_enable();
1512 }
1513 
1514 static void apic_update_lvtt(struct kvm_lapic *apic)
1515 {
1516 	u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1517 			apic->lapic_timer.timer_mode_mask;
1518 
1519 	if (apic->lapic_timer.timer_mode != timer_mode) {
1520 		if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1521 				APIC_LVT_TIMER_TSCDEADLINE)) {
1522 			cancel_apic_timer(apic);
1523 			kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1524 			apic->lapic_timer.period = 0;
1525 			apic->lapic_timer.tscdeadline = 0;
1526 		}
1527 		apic->lapic_timer.timer_mode = timer_mode;
1528 		limit_periodic_timer_frequency(apic);
1529 	}
1530 }
1531 
1532 /*
1533  * On APICv, this test will cause a busy wait
1534  * during a higher-priority task.
1535  */
1536 
1537 static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1538 {
1539 	struct kvm_lapic *apic = vcpu->arch.apic;
1540 	u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1541 
1542 	if (kvm_apic_hw_enabled(apic)) {
1543 		int vec = reg & APIC_VECTOR_MASK;
1544 		void *bitmap = apic->regs + APIC_ISR;
1545 
1546 		if (vcpu->arch.apicv_active)
1547 			bitmap = apic->regs + APIC_IRR;
1548 
1549 		if (apic_test_vector(vec, bitmap))
1550 			return true;
1551 	}
1552 	return false;
1553 }
1554 
1555 static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
1556 {
1557 	u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
1558 
1559 	/*
1560 	 * If the guest TSC is running at a different ratio than the host, then
1561 	 * convert the delay to nanoseconds to achieve an accurate delay.  Note
1562 	 * that __delay() uses delay_tsc whenever the hardware has TSC, thus
1563 	 * always for VMX enabled hardware.
1564 	 */
1565 	if (vcpu->arch.tsc_scaling_ratio == kvm_default_tsc_scaling_ratio) {
1566 		__delay(min(guest_cycles,
1567 			nsec_to_cycles(vcpu, timer_advance_ns)));
1568 	} else {
1569 		u64 delay_ns = guest_cycles * 1000000ULL;
1570 		do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
1571 		ndelay(min_t(u32, delay_ns, timer_advance_ns));
1572 	}
1573 }
1574 
1575 static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
1576 					      s64 advance_expire_delta)
1577 {
1578 	struct kvm_lapic *apic = vcpu->arch.apic;
1579 	u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
1580 	u64 ns;
1581 
1582 	/* Do not adjust for tiny fluctuations or large random spikes. */
1583 	if (abs(advance_expire_delta) > LAPIC_TIMER_ADVANCE_ADJUST_MAX ||
1584 	    abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_MIN)
1585 		return;
1586 
1587 	/* too early */
1588 	if (advance_expire_delta < 0) {
1589 		ns = -advance_expire_delta * 1000000ULL;
1590 		do_div(ns, vcpu->arch.virtual_tsc_khz);
1591 		timer_advance_ns -= ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1592 	} else {
1593 	/* too late */
1594 		ns = advance_expire_delta * 1000000ULL;
1595 		do_div(ns, vcpu->arch.virtual_tsc_khz);
1596 		timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1597 	}
1598 
1599 	if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
1600 		timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
1601 	apic->lapic_timer.timer_advance_ns = timer_advance_ns;
1602 }
1603 
1604 static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1605 {
1606 	struct kvm_lapic *apic = vcpu->arch.apic;
1607 	u64 guest_tsc, tsc_deadline;
1608 
1609 	tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1610 	apic->lapic_timer.expired_tscdeadline = 0;
1611 	guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1612 	apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline;
1613 
1614 	if (lapic_timer_advance_dynamic) {
1615 		adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
1616 		/*
1617 		 * If the timer fired early, reread the TSC to account for the
1618 		 * overhead of the above adjustment to avoid waiting longer
1619 		 * than is necessary.
1620 		 */
1621 		if (guest_tsc < tsc_deadline)
1622 			guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1623 	}
1624 
1625 	if (guest_tsc < tsc_deadline)
1626 		__wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
1627 }
1628 
1629 void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1630 {
1631 	if (lapic_in_kernel(vcpu) &&
1632 	    vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1633 	    vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1634 	    lapic_timer_int_injected(vcpu))
1635 		__kvm_wait_lapic_expire(vcpu);
1636 }
1637 EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
1638 
1639 static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
1640 {
1641 	struct kvm_timer *ktimer = &apic->lapic_timer;
1642 
1643 	kvm_apic_local_deliver(apic, APIC_LVTT);
1644 	if (apic_lvtt_tscdeadline(apic)) {
1645 		ktimer->tscdeadline = 0;
1646 	} else if (apic_lvtt_oneshot(apic)) {
1647 		ktimer->tscdeadline = 0;
1648 		ktimer->target_expiration = 0;
1649 	}
1650 }
1651 
1652 static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
1653 {
1654 	struct kvm_vcpu *vcpu = apic->vcpu;
1655 	struct kvm_timer *ktimer = &apic->lapic_timer;
1656 
1657 	if (atomic_read(&apic->lapic_timer.pending))
1658 		return;
1659 
1660 	if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
1661 		ktimer->expired_tscdeadline = ktimer->tscdeadline;
1662 
1663 	if (!from_timer_fn && vcpu->arch.apicv_active) {
1664 		WARN_ON(kvm_get_running_vcpu() != vcpu);
1665 		kvm_apic_inject_pending_timer_irqs(apic);
1666 		return;
1667 	}
1668 
1669 	if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
1670 		/*
1671 		 * Ensure the guest's timer has truly expired before posting an
1672 		 * interrupt.  Open code the relevant checks to avoid querying
1673 		 * lapic_timer_int_injected(), which will be false since the
1674 		 * interrupt isn't yet injected.  Waiting until after injecting
1675 		 * is not an option since that won't help a posted interrupt.
1676 		 */
1677 		if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1678 		    vcpu->arch.apic->lapic_timer.timer_advance_ns)
1679 			__kvm_wait_lapic_expire(vcpu);
1680 		kvm_apic_inject_pending_timer_irqs(apic);
1681 		return;
1682 	}
1683 
1684 	atomic_inc(&apic->lapic_timer.pending);
1685 	kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1686 	if (from_timer_fn)
1687 		kvm_vcpu_kick(vcpu);
1688 }
1689 
1690 static void start_sw_tscdeadline(struct kvm_lapic *apic)
1691 {
1692 	struct kvm_timer *ktimer = &apic->lapic_timer;
1693 	u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
1694 	u64 ns = 0;
1695 	ktime_t expire;
1696 	struct kvm_vcpu *vcpu = apic->vcpu;
1697 	unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1698 	unsigned long flags;
1699 	ktime_t now;
1700 
1701 	if (unlikely(!tscdeadline || !this_tsc_khz))
1702 		return;
1703 
1704 	local_irq_save(flags);
1705 
1706 	now = ktime_get();
1707 	guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1708 
1709 	ns = (tscdeadline - guest_tsc) * 1000000ULL;
1710 	do_div(ns, this_tsc_khz);
1711 
1712 	if (likely(tscdeadline > guest_tsc) &&
1713 	    likely(ns > apic->lapic_timer.timer_advance_ns)) {
1714 		expire = ktime_add_ns(now, ns);
1715 		expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
1716 		hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
1717 	} else
1718 		apic_timer_expired(apic, false);
1719 
1720 	local_irq_restore(flags);
1721 }
1722 
1723 static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
1724 {
1725 	return (u64)tmict * APIC_BUS_CYCLE_NS * (u64)apic->divide_count;
1726 }
1727 
1728 static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1729 {
1730 	ktime_t now, remaining;
1731 	u64 ns_remaining_old, ns_remaining_new;
1732 
1733 	apic->lapic_timer.period =
1734 			tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1735 	limit_periodic_timer_frequency(apic);
1736 
1737 	now = ktime_get();
1738 	remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1739 	if (ktime_to_ns(remaining) < 0)
1740 		remaining = 0;
1741 
1742 	ns_remaining_old = ktime_to_ns(remaining);
1743 	ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
1744 	                                   apic->divide_count, old_divisor);
1745 
1746 	apic->lapic_timer.tscdeadline +=
1747 		nsec_to_cycles(apic->vcpu, ns_remaining_new) -
1748 		nsec_to_cycles(apic->vcpu, ns_remaining_old);
1749 	apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
1750 }
1751 
1752 static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
1753 {
1754 	ktime_t now;
1755 	u64 tscl = rdtsc();
1756 	s64 deadline;
1757 
1758 	now = ktime_get();
1759 	apic->lapic_timer.period =
1760 			tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1761 
1762 	if (!apic->lapic_timer.period) {
1763 		apic->lapic_timer.tscdeadline = 0;
1764 		return false;
1765 	}
1766 
1767 	limit_periodic_timer_frequency(apic);
1768 	deadline = apic->lapic_timer.period;
1769 
1770 	if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
1771 		if (unlikely(count_reg != APIC_TMICT)) {
1772 			deadline = tmict_to_ns(apic,
1773 				     kvm_lapic_get_reg(apic, count_reg));
1774 			if (unlikely(deadline <= 0))
1775 				deadline = apic->lapic_timer.period;
1776 			else if (unlikely(deadline > apic->lapic_timer.period)) {
1777 				pr_info_ratelimited(
1778 				    "kvm: vcpu %i: requested lapic timer restore with "
1779 				    "starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
1780 				    "Using initial count to start timer.\n",
1781 				    apic->vcpu->vcpu_id,
1782 				    count_reg,
1783 				    kvm_lapic_get_reg(apic, count_reg),
1784 				    deadline, apic->lapic_timer.period);
1785 				kvm_lapic_set_reg(apic, count_reg, 0);
1786 				deadline = apic->lapic_timer.period;
1787 			}
1788 		}
1789 	}
1790 
1791 	apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1792 		nsec_to_cycles(apic->vcpu, deadline);
1793 	apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
1794 
1795 	return true;
1796 }
1797 
1798 static void advance_periodic_target_expiration(struct kvm_lapic *apic)
1799 {
1800 	ktime_t now = ktime_get();
1801 	u64 tscl = rdtsc();
1802 	ktime_t delta;
1803 
1804 	/*
1805 	 * Synchronize both deadlines to the same time source or
1806 	 * differences in the periods (caused by differences in the
1807 	 * underlying clocks or numerical approximation errors) will
1808 	 * cause the two to drift apart over time as the errors
1809 	 * accumulate.
1810 	 */
1811 	apic->lapic_timer.target_expiration =
1812 		ktime_add_ns(apic->lapic_timer.target_expiration,
1813 				apic->lapic_timer.period);
1814 	delta = ktime_sub(apic->lapic_timer.target_expiration, now);
1815 	apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1816 		nsec_to_cycles(apic->vcpu, delta);
1817 }
1818 
1819 static void start_sw_period(struct kvm_lapic *apic)
1820 {
1821 	if (!apic->lapic_timer.period)
1822 		return;
1823 
1824 	if (ktime_after(ktime_get(),
1825 			apic->lapic_timer.target_expiration)) {
1826 		apic_timer_expired(apic, false);
1827 
1828 		if (apic_lvtt_oneshot(apic))
1829 			return;
1830 
1831 		advance_periodic_target_expiration(apic);
1832 	}
1833 
1834 	hrtimer_start(&apic->lapic_timer.timer,
1835 		apic->lapic_timer.target_expiration,
1836 		HRTIMER_MODE_ABS_HARD);
1837 }
1838 
1839 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
1840 {
1841 	if (!lapic_in_kernel(vcpu))
1842 		return false;
1843 
1844 	return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
1845 }
1846 EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
1847 
1848 static void cancel_hv_timer(struct kvm_lapic *apic)
1849 {
1850 	WARN_ON(preemptible());
1851 	WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1852 	static_call(kvm_x86_cancel_hv_timer)(apic->vcpu);
1853 	apic->lapic_timer.hv_timer_in_use = false;
1854 }
1855 
1856 static bool start_hv_timer(struct kvm_lapic *apic)
1857 {
1858 	struct kvm_timer *ktimer = &apic->lapic_timer;
1859 	struct kvm_vcpu *vcpu = apic->vcpu;
1860 	bool expired;
1861 
1862 	WARN_ON(preemptible());
1863 	if (!kvm_can_use_hv_timer(vcpu))
1864 		return false;
1865 
1866 	if (!ktimer->tscdeadline)
1867 		return false;
1868 
1869 	if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
1870 		return false;
1871 
1872 	ktimer->hv_timer_in_use = true;
1873 	hrtimer_cancel(&ktimer->timer);
1874 
1875 	/*
1876 	 * To simplify handling the periodic timer, leave the hv timer running
1877 	 * even if the deadline timer has expired, i.e. rely on the resulting
1878 	 * VM-Exit to recompute the periodic timer's target expiration.
1879 	 */
1880 	if (!apic_lvtt_period(apic)) {
1881 		/*
1882 		 * Cancel the hv timer if the sw timer fired while the hv timer
1883 		 * was being programmed, or if the hv timer itself expired.
1884 		 */
1885 		if (atomic_read(&ktimer->pending)) {
1886 			cancel_hv_timer(apic);
1887 		} else if (expired) {
1888 			apic_timer_expired(apic, false);
1889 			cancel_hv_timer(apic);
1890 		}
1891 	}
1892 
1893 	trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
1894 
1895 	return true;
1896 }
1897 
1898 static void start_sw_timer(struct kvm_lapic *apic)
1899 {
1900 	struct kvm_timer *ktimer = &apic->lapic_timer;
1901 
1902 	WARN_ON(preemptible());
1903 	if (apic->lapic_timer.hv_timer_in_use)
1904 		cancel_hv_timer(apic);
1905 	if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
1906 		return;
1907 
1908 	if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1909 		start_sw_period(apic);
1910 	else if (apic_lvtt_tscdeadline(apic))
1911 		start_sw_tscdeadline(apic);
1912 	trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
1913 }
1914 
1915 static void restart_apic_timer(struct kvm_lapic *apic)
1916 {
1917 	preempt_disable();
1918 
1919 	if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
1920 		goto out;
1921 
1922 	if (!start_hv_timer(apic))
1923 		start_sw_timer(apic);
1924 out:
1925 	preempt_enable();
1926 }
1927 
1928 void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
1929 {
1930 	struct kvm_lapic *apic = vcpu->arch.apic;
1931 
1932 	preempt_disable();
1933 	/* If the preempt notifier has already run, it also called apic_timer_expired */
1934 	if (!apic->lapic_timer.hv_timer_in_use)
1935 		goto out;
1936 	WARN_ON(kvm_vcpu_is_blocking(vcpu));
1937 	apic_timer_expired(apic, false);
1938 	cancel_hv_timer(apic);
1939 
1940 	if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1941 		advance_periodic_target_expiration(apic);
1942 		restart_apic_timer(apic);
1943 	}
1944 out:
1945 	preempt_enable();
1946 }
1947 EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
1948 
1949 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
1950 {
1951 	restart_apic_timer(vcpu->arch.apic);
1952 }
1953 
1954 void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
1955 {
1956 	struct kvm_lapic *apic = vcpu->arch.apic;
1957 
1958 	preempt_disable();
1959 	/* Possibly the TSC deadline timer is not enabled yet */
1960 	if (apic->lapic_timer.hv_timer_in_use)
1961 		start_sw_timer(apic);
1962 	preempt_enable();
1963 }
1964 
1965 void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
1966 {
1967 	struct kvm_lapic *apic = vcpu->arch.apic;
1968 
1969 	WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1970 	restart_apic_timer(apic);
1971 }
1972 
1973 static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
1974 {
1975 	atomic_set(&apic->lapic_timer.pending, 0);
1976 
1977 	if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1978 	    && !set_target_expiration(apic, count_reg))
1979 		return;
1980 
1981 	restart_apic_timer(apic);
1982 }
1983 
1984 static void start_apic_timer(struct kvm_lapic *apic)
1985 {
1986 	__start_apic_timer(apic, APIC_TMICT);
1987 }
1988 
1989 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
1990 {
1991 	bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
1992 
1993 	if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
1994 		apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
1995 		if (lvt0_in_nmi_mode) {
1996 			atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1997 		} else
1998 			atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1999 	}
2000 }
2001 
2002 int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
2003 {
2004 	int ret = 0;
2005 
2006 	trace_kvm_apic_write(reg, val);
2007 
2008 	switch (reg) {
2009 	case APIC_ID:		/* Local APIC ID */
2010 		if (!apic_x2apic_mode(apic))
2011 			kvm_apic_set_xapic_id(apic, val >> 24);
2012 		else
2013 			ret = 1;
2014 		break;
2015 
2016 	case APIC_TASKPRI:
2017 		report_tpr_access(apic, true);
2018 		apic_set_tpr(apic, val & 0xff);
2019 		break;
2020 
2021 	case APIC_EOI:
2022 		apic_set_eoi(apic);
2023 		break;
2024 
2025 	case APIC_LDR:
2026 		if (!apic_x2apic_mode(apic))
2027 			kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
2028 		else
2029 			ret = 1;
2030 		break;
2031 
2032 	case APIC_DFR:
2033 		if (!apic_x2apic_mode(apic))
2034 			kvm_apic_set_dfr(apic, val | 0x0FFFFFFF);
2035 		else
2036 			ret = 1;
2037 		break;
2038 
2039 	case APIC_SPIV: {
2040 		u32 mask = 0x3ff;
2041 		if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
2042 			mask |= APIC_SPIV_DIRECTED_EOI;
2043 		apic_set_spiv(apic, val & mask);
2044 		if (!(val & APIC_SPIV_APIC_ENABLED)) {
2045 			int i;
2046 			u32 lvt_val;
2047 
2048 			for (i = 0; i < KVM_APIC_LVT_NUM; i++) {
2049 				lvt_val = kvm_lapic_get_reg(apic,
2050 						       APIC_LVTT + 0x10 * i);
2051 				kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i,
2052 					     lvt_val | APIC_LVT_MASKED);
2053 			}
2054 			apic_update_lvtt(apic);
2055 			atomic_set(&apic->lapic_timer.pending, 0);
2056 
2057 		}
2058 		break;
2059 	}
2060 	case APIC_ICR:
2061 		/* No delay here, so we always clear the pending bit */
2062 		val &= ~(1 << 12);
2063 		kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
2064 		kvm_lapic_set_reg(apic, APIC_ICR, val);
2065 		break;
2066 
2067 	case APIC_ICR2:
2068 		if (!apic_x2apic_mode(apic))
2069 			val &= 0xff000000;
2070 		kvm_lapic_set_reg(apic, APIC_ICR2, val);
2071 		break;
2072 
2073 	case APIC_LVT0:
2074 		apic_manage_nmi_watchdog(apic, val);
2075 		fallthrough;
2076 	case APIC_LVTTHMR:
2077 	case APIC_LVTPC:
2078 	case APIC_LVT1:
2079 	case APIC_LVTERR: {
2080 		/* TODO: Check vector */
2081 		size_t size;
2082 		u32 index;
2083 
2084 		if (!kvm_apic_sw_enabled(apic))
2085 			val |= APIC_LVT_MASKED;
2086 		size = ARRAY_SIZE(apic_lvt_mask);
2087 		index = array_index_nospec(
2088 				(reg - APIC_LVTT) >> 4, size);
2089 		val &= apic_lvt_mask[index];
2090 		kvm_lapic_set_reg(apic, reg, val);
2091 		break;
2092 	}
2093 
2094 	case APIC_LVTT:
2095 		if (!kvm_apic_sw_enabled(apic))
2096 			val |= APIC_LVT_MASKED;
2097 		val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
2098 		kvm_lapic_set_reg(apic, APIC_LVTT, val);
2099 		apic_update_lvtt(apic);
2100 		break;
2101 
2102 	case APIC_TMICT:
2103 		if (apic_lvtt_tscdeadline(apic))
2104 			break;
2105 
2106 		cancel_apic_timer(apic);
2107 		kvm_lapic_set_reg(apic, APIC_TMICT, val);
2108 		start_apic_timer(apic);
2109 		break;
2110 
2111 	case APIC_TDCR: {
2112 		uint32_t old_divisor = apic->divide_count;
2113 
2114 		kvm_lapic_set_reg(apic, APIC_TDCR, val & 0xb);
2115 		update_divide_count(apic);
2116 		if (apic->divide_count != old_divisor &&
2117 				apic->lapic_timer.period) {
2118 			hrtimer_cancel(&apic->lapic_timer.timer);
2119 			update_target_expiration(apic, old_divisor);
2120 			restart_apic_timer(apic);
2121 		}
2122 		break;
2123 	}
2124 	case APIC_ESR:
2125 		if (apic_x2apic_mode(apic) && val != 0)
2126 			ret = 1;
2127 		break;
2128 
2129 	case APIC_SELF_IPI:
2130 		if (apic_x2apic_mode(apic)) {
2131 			kvm_lapic_reg_write(apic, APIC_ICR,
2132 					    APIC_DEST_SELF | (val & APIC_VECTOR_MASK));
2133 		} else
2134 			ret = 1;
2135 		break;
2136 	default:
2137 		ret = 1;
2138 		break;
2139 	}
2140 
2141 	kvm_recalculate_apic_map(apic->vcpu->kvm);
2142 
2143 	return ret;
2144 }
2145 EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
2146 
2147 static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
2148 			    gpa_t address, int len, const void *data)
2149 {
2150 	struct kvm_lapic *apic = to_lapic(this);
2151 	unsigned int offset = address - apic->base_address;
2152 	u32 val;
2153 
2154 	if (!apic_mmio_in_range(apic, address))
2155 		return -EOPNOTSUPP;
2156 
2157 	if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
2158 		if (!kvm_check_has_quirk(vcpu->kvm,
2159 					 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
2160 			return -EOPNOTSUPP;
2161 
2162 		return 0;
2163 	}
2164 
2165 	/*
2166 	 * APIC register must be aligned on 128-bits boundary.
2167 	 * 32/64/128 bits registers must be accessed thru 32 bits.
2168 	 * Refer SDM 8.4.1
2169 	 */
2170 	if (len != 4 || (offset & 0xf))
2171 		return 0;
2172 
2173 	val = *(u32*)data;
2174 
2175 	kvm_lapic_reg_write(apic, offset & 0xff0, val);
2176 
2177 	return 0;
2178 }
2179 
2180 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
2181 {
2182 	kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2183 }
2184 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
2185 
2186 /* emulate APIC access in a trap manner */
2187 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
2188 {
2189 	u32 val = 0;
2190 
2191 	/* hw has done the conditional check and inst decode */
2192 	offset &= 0xff0;
2193 
2194 	kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val);
2195 
2196 	/* TODO: optimize to just emulate side effect w/o one more write */
2197 	kvm_lapic_reg_write(vcpu->arch.apic, offset, val);
2198 }
2199 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
2200 
2201 void kvm_free_lapic(struct kvm_vcpu *vcpu)
2202 {
2203 	struct kvm_lapic *apic = vcpu->arch.apic;
2204 
2205 	if (!vcpu->arch.apic)
2206 		return;
2207 
2208 	hrtimer_cancel(&apic->lapic_timer.timer);
2209 
2210 	if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2211 		static_branch_slow_dec_deferred(&apic_hw_disabled);
2212 
2213 	if (!apic->sw_enabled)
2214 		static_branch_slow_dec_deferred(&apic_sw_disabled);
2215 
2216 	if (apic->regs)
2217 		free_page((unsigned long)apic->regs);
2218 
2219 	kfree(apic);
2220 }
2221 
2222 /*
2223  *----------------------------------------------------------------------
2224  * LAPIC interface
2225  *----------------------------------------------------------------------
2226  */
2227 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
2228 {
2229 	struct kvm_lapic *apic = vcpu->arch.apic;
2230 
2231 	if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2232 		return 0;
2233 
2234 	return apic->lapic_timer.tscdeadline;
2235 }
2236 
2237 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
2238 {
2239 	struct kvm_lapic *apic = vcpu->arch.apic;
2240 
2241 	if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2242 		return;
2243 
2244 	hrtimer_cancel(&apic->lapic_timer.timer);
2245 	apic->lapic_timer.tscdeadline = data;
2246 	start_apic_timer(apic);
2247 }
2248 
2249 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
2250 {
2251 	struct kvm_lapic *apic = vcpu->arch.apic;
2252 
2253 	apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
2254 		     | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4));
2255 }
2256 
2257 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
2258 {
2259 	u64 tpr;
2260 
2261 	tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2262 
2263 	return (tpr & 0xf0) >> 4;
2264 }
2265 
2266 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
2267 {
2268 	u64 old_value = vcpu->arch.apic_base;
2269 	struct kvm_lapic *apic = vcpu->arch.apic;
2270 
2271 	vcpu->arch.apic_base = value;
2272 
2273 	if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2274 		kvm_update_cpuid_runtime(vcpu);
2275 
2276 	if (!apic)
2277 		return;
2278 
2279 	/* update jump label if enable bit changes */
2280 	if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2281 		if (value & MSR_IA32_APICBASE_ENABLE) {
2282 			kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2283 			static_branch_slow_dec_deferred(&apic_hw_disabled);
2284 			/* Check if there are APF page ready requests pending */
2285 			kvm_make_request(KVM_REQ_APF_READY, vcpu);
2286 		} else {
2287 			static_branch_inc(&apic_hw_disabled.key);
2288 			atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2289 		}
2290 	}
2291 
2292 	if (((old_value ^ value) & X2APIC_ENABLE) && (value & X2APIC_ENABLE))
2293 		kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2294 
2295 	if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE))
2296 		static_call(kvm_x86_set_virtual_apic_mode)(vcpu);
2297 
2298 	apic->base_address = apic->vcpu->arch.apic_base &
2299 			     MSR_IA32_APICBASE_BASE;
2300 
2301 	if ((value & MSR_IA32_APICBASE_ENABLE) &&
2302 	     apic->base_address != APIC_DEFAULT_PHYS_BASE)
2303 		pr_warn_once("APIC base relocation is unsupported by KVM");
2304 }
2305 
2306 void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
2307 {
2308 	struct kvm_lapic *apic = vcpu->arch.apic;
2309 
2310 	if (vcpu->arch.apicv_active) {
2311 		/* irr_pending is always true when apicv is activated. */
2312 		apic->irr_pending = true;
2313 		apic->isr_count = 1;
2314 	} else {
2315 		apic->irr_pending = (apic_search_irr(apic) != -1);
2316 		apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2317 	}
2318 }
2319 EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);
2320 
2321 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
2322 {
2323 	struct kvm_lapic *apic = vcpu->arch.apic;
2324 	u64 msr_val;
2325 	int i;
2326 
2327 	if (!init_event) {
2328 		msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
2329 		if (kvm_vcpu_is_reset_bsp(vcpu))
2330 			msr_val |= MSR_IA32_APICBASE_BSP;
2331 		kvm_lapic_set_base(vcpu, msr_val);
2332 	}
2333 
2334 	if (!apic)
2335 		return;
2336 
2337 	/* Stop the timer in case it's a reset to an active apic */
2338 	hrtimer_cancel(&apic->lapic_timer.timer);
2339 
2340 	/* The xAPIC ID is set at RESET even if the APIC was already enabled. */
2341 	if (!init_event)
2342 		kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2343 	kvm_apic_set_version(apic->vcpu);
2344 
2345 	for (i = 0; i < KVM_APIC_LVT_NUM; i++)
2346 		kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
2347 	apic_update_lvtt(apic);
2348 	if (kvm_vcpu_is_reset_bsp(vcpu) &&
2349 	    kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2350 		kvm_lapic_set_reg(apic, APIC_LVT0,
2351 			     SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2352 	apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2353 
2354 	kvm_apic_set_dfr(apic, 0xffffffffU);
2355 	apic_set_spiv(apic, 0xff);
2356 	kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2357 	if (!apic_x2apic_mode(apic))
2358 		kvm_apic_set_ldr(apic, 0);
2359 	kvm_lapic_set_reg(apic, APIC_ESR, 0);
2360 	kvm_lapic_set_reg(apic, APIC_ICR, 0);
2361 	kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2362 	kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2363 	kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2364 	for (i = 0; i < 8; i++) {
2365 		kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2366 		kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2367 		kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2368 	}
2369 	kvm_apic_update_apicv(vcpu);
2370 	apic->highest_isr_cache = -1;
2371 	update_divide_count(apic);
2372 	atomic_set(&apic->lapic_timer.pending, 0);
2373 
2374 	vcpu->arch.pv_eoi.msr_val = 0;
2375 	apic_update_ppr(apic);
2376 	if (vcpu->arch.apicv_active) {
2377 		static_call(kvm_x86_apicv_post_state_restore)(vcpu);
2378 		static_call(kvm_x86_hwapic_irr_update)(vcpu, -1);
2379 		static_call(kvm_x86_hwapic_isr_update)(vcpu, -1);
2380 	}
2381 
2382 	vcpu->arch.apic_arb_prio = 0;
2383 	vcpu->arch.apic_attention = 0;
2384 
2385 	kvm_recalculate_apic_map(vcpu->kvm);
2386 }
2387 
2388 /*
2389  *----------------------------------------------------------------------
2390  * timer interface
2391  *----------------------------------------------------------------------
2392  */
2393 
2394 static bool lapic_is_periodic(struct kvm_lapic *apic)
2395 {
2396 	return apic_lvtt_period(apic);
2397 }
2398 
2399 int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2400 {
2401 	struct kvm_lapic *apic = vcpu->arch.apic;
2402 
2403 	if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2404 		return atomic_read(&apic->lapic_timer.pending);
2405 
2406 	return 0;
2407 }
2408 
2409 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2410 {
2411 	u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2412 	int vector, mode, trig_mode;
2413 
2414 	if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2415 		vector = reg & APIC_VECTOR_MASK;
2416 		mode = reg & APIC_MODE_MASK;
2417 		trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2418 		return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
2419 					NULL);
2420 	}
2421 	return 0;
2422 }
2423 
2424 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2425 {
2426 	struct kvm_lapic *apic = vcpu->arch.apic;
2427 
2428 	if (apic)
2429 		kvm_apic_local_deliver(apic, APIC_LVT0);
2430 }
2431 
2432 static const struct kvm_io_device_ops apic_mmio_ops = {
2433 	.read     = apic_mmio_read,
2434 	.write    = apic_mmio_write,
2435 };
2436 
2437 static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2438 {
2439 	struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2440 	struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2441 
2442 	apic_timer_expired(apic, true);
2443 
2444 	if (lapic_is_periodic(apic)) {
2445 		advance_periodic_target_expiration(apic);
2446 		hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2447 		return HRTIMER_RESTART;
2448 	} else
2449 		return HRTIMER_NORESTART;
2450 }
2451 
2452 int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
2453 {
2454 	struct kvm_lapic *apic;
2455 
2456 	ASSERT(vcpu != NULL);
2457 
2458 	apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2459 	if (!apic)
2460 		goto nomem;
2461 
2462 	vcpu->arch.apic = apic;
2463 
2464 	apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2465 	if (!apic->regs) {
2466 		printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2467 		       vcpu->vcpu_id);
2468 		goto nomem_free_apic;
2469 	}
2470 	apic->vcpu = vcpu;
2471 
2472 	hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2473 		     HRTIMER_MODE_ABS_HARD);
2474 	apic->lapic_timer.timer.function = apic_timer_fn;
2475 	if (timer_advance_ns == -1) {
2476 		apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
2477 		lapic_timer_advance_dynamic = true;
2478 	} else {
2479 		apic->lapic_timer.timer_advance_ns = timer_advance_ns;
2480 		lapic_timer_advance_dynamic = false;
2481 	}
2482 
2483 	/*
2484 	 * Stuff the APIC ENABLE bit in lieu of temporarily incrementing
2485 	 * apic_hw_disabled; the full RESET value is set by kvm_lapic_reset().
2486 	 */
2487 	vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2488 	static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2489 	kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2490 
2491 	return 0;
2492 nomem_free_apic:
2493 	kfree(apic);
2494 	vcpu->arch.apic = NULL;
2495 nomem:
2496 	return -ENOMEM;
2497 }
2498 
2499 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2500 {
2501 	struct kvm_lapic *apic = vcpu->arch.apic;
2502 	u32 ppr;
2503 
2504 	if (!kvm_apic_present(vcpu))
2505 		return -1;
2506 
2507 	__apic_update_ppr(apic, &ppr);
2508 	return apic_has_interrupt_for_ppr(apic, ppr);
2509 }
2510 EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
2511 
2512 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2513 {
2514 	u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2515 
2516 	if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2517 		return 1;
2518 	if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2519 	    GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2520 		return 1;
2521 	return 0;
2522 }
2523 
2524 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2525 {
2526 	struct kvm_lapic *apic = vcpu->arch.apic;
2527 
2528 	if (atomic_read(&apic->lapic_timer.pending) > 0) {
2529 		kvm_apic_inject_pending_timer_irqs(apic);
2530 		atomic_set(&apic->lapic_timer.pending, 0);
2531 	}
2532 }
2533 
2534 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2535 {
2536 	int vector = kvm_apic_has_interrupt(vcpu);
2537 	struct kvm_lapic *apic = vcpu->arch.apic;
2538 	u32 ppr;
2539 
2540 	if (vector == -1)
2541 		return -1;
2542 
2543 	/*
2544 	 * We get here even with APIC virtualization enabled, if doing
2545 	 * nested virtualization and L1 runs with the "acknowledge interrupt
2546 	 * on exit" mode.  Then we cannot inject the interrupt via RVI,
2547 	 * because the process would deliver it through the IDT.
2548 	 */
2549 
2550 	apic_clear_irr(vector, apic);
2551 	if (to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) {
2552 		/*
2553 		 * For auto-EOI interrupts, there might be another pending
2554 		 * interrupt above PPR, so check whether to raise another
2555 		 * KVM_REQ_EVENT.
2556 		 */
2557 		apic_update_ppr(apic);
2558 	} else {
2559 		/*
2560 		 * For normal interrupts, PPR has been raised and there cannot
2561 		 * be a higher-priority pending interrupt---except if there was
2562 		 * a concurrent interrupt injection, but that would have
2563 		 * triggered KVM_REQ_EVENT already.
2564 		 */
2565 		apic_set_isr(vector, apic);
2566 		__apic_update_ppr(apic, &ppr);
2567 	}
2568 
2569 	return vector;
2570 }
2571 
2572 static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
2573 		struct kvm_lapic_state *s, bool set)
2574 {
2575 	if (apic_x2apic_mode(vcpu->arch.apic)) {
2576 		u32 *id = (u32 *)(s->regs + APIC_ID);
2577 		u32 *ldr = (u32 *)(s->regs + APIC_LDR);
2578 
2579 		if (vcpu->kvm->arch.x2apic_format) {
2580 			if (*id != vcpu->vcpu_id)
2581 				return -EINVAL;
2582 		} else {
2583 			if (set)
2584 				*id >>= 24;
2585 			else
2586 				*id <<= 24;
2587 		}
2588 
2589 		/* In x2APIC mode, the LDR is fixed and based on the id */
2590 		if (set)
2591 			*ldr = kvm_apic_calc_x2apic_ldr(*id);
2592 	}
2593 
2594 	return 0;
2595 }
2596 
2597 int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2598 {
2599 	memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2600 
2601 	/*
2602 	 * Get calculated timer current count for remaining timer period (if
2603 	 * any) and store it in the returned register set.
2604 	 */
2605 	__kvm_lapic_set_reg(s->regs, APIC_TMCCT,
2606 			    __apic_read(vcpu->arch.apic, APIC_TMCCT));
2607 
2608 	return kvm_apic_state_fixup(vcpu, s, false);
2609 }
2610 
2611 int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2612 {
2613 	struct kvm_lapic *apic = vcpu->arch.apic;
2614 	int r;
2615 
2616 	kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
2617 	/* set SPIV separately to get count of SW disabled APICs right */
2618 	apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
2619 
2620 	r = kvm_apic_state_fixup(vcpu, s, true);
2621 	if (r) {
2622 		kvm_recalculate_apic_map(vcpu->kvm);
2623 		return r;
2624 	}
2625 	memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
2626 
2627 	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2628 	kvm_recalculate_apic_map(vcpu->kvm);
2629 	kvm_apic_set_version(vcpu);
2630 
2631 	apic_update_ppr(apic);
2632 	cancel_apic_timer(apic);
2633 	apic->lapic_timer.expired_tscdeadline = 0;
2634 	apic_update_lvtt(apic);
2635 	apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2636 	update_divide_count(apic);
2637 	__start_apic_timer(apic, APIC_TMCCT);
2638 	kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
2639 	kvm_apic_update_apicv(vcpu);
2640 	apic->highest_isr_cache = -1;
2641 	if (vcpu->arch.apicv_active) {
2642 		static_call(kvm_x86_apicv_post_state_restore)(vcpu);
2643 		static_call(kvm_x86_hwapic_irr_update)(vcpu,
2644 				apic_find_highest_irr(apic));
2645 		static_call(kvm_x86_hwapic_isr_update)(vcpu,
2646 				apic_find_highest_isr(apic));
2647 	}
2648 	kvm_make_request(KVM_REQ_EVENT, vcpu);
2649 	if (ioapic_in_kernel(vcpu->kvm))
2650 		kvm_rtc_eoi_tracking_restore_one(vcpu);
2651 
2652 	vcpu->arch.apic_arb_prio = 0;
2653 
2654 	return 0;
2655 }
2656 
2657 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2658 {
2659 	struct hrtimer *timer;
2660 
2661 	if (!lapic_in_kernel(vcpu) ||
2662 		kvm_can_post_timer_interrupt(vcpu))
2663 		return;
2664 
2665 	timer = &vcpu->arch.apic->lapic_timer.timer;
2666 	if (hrtimer_cancel(timer))
2667 		hrtimer_start_expires(timer, HRTIMER_MODE_ABS_HARD);
2668 }
2669 
2670 /*
2671  * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2672  *
2673  * Detect whether guest triggered PV EOI since the
2674  * last entry. If yes, set EOI on guests's behalf.
2675  * Clear PV EOI in guest memory in any case.
2676  */
2677 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
2678 					struct kvm_lapic *apic)
2679 {
2680 	int vector;
2681 	/*
2682 	 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
2683 	 * and KVM_PV_EOI_ENABLED in guest memory as follows:
2684 	 *
2685 	 * KVM_APIC_PV_EOI_PENDING is unset:
2686 	 * 	-> host disabled PV EOI.
2687 	 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
2688 	 * 	-> host enabled PV EOI, guest did not execute EOI yet.
2689 	 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
2690 	 * 	-> host enabled PV EOI, guest executed EOI.
2691 	 */
2692 	BUG_ON(!pv_eoi_enabled(vcpu));
2693 
2694 	if (pv_eoi_test_and_clr_pending(vcpu))
2695 		return;
2696 	vector = apic_set_eoi(apic);
2697 	trace_kvm_pv_eoi(apic, vector);
2698 }
2699 
2700 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
2701 {
2702 	u32 data;
2703 
2704 	if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
2705 		apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
2706 
2707 	if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2708 		return;
2709 
2710 	if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2711 				  sizeof(u32)))
2712 		return;
2713 
2714 	apic_set_tpr(vcpu->arch.apic, data & 0xff);
2715 }
2716 
2717 /*
2718  * apic_sync_pv_eoi_to_guest - called before vmentry
2719  *
2720  * Detect whether it's safe to enable PV EOI and
2721  * if yes do so.
2722  */
2723 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
2724 					struct kvm_lapic *apic)
2725 {
2726 	if (!pv_eoi_enabled(vcpu) ||
2727 	    /* IRR set or many bits in ISR: could be nested. */
2728 	    apic->irr_pending ||
2729 	    /* Cache not set: could be safe but we don't bother. */
2730 	    apic->highest_isr_cache == -1 ||
2731 	    /* Need EOI to update ioapic. */
2732 	    kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
2733 		/*
2734 		 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
2735 		 * so we need not do anything here.
2736 		 */
2737 		return;
2738 	}
2739 
2740 	pv_eoi_set_pending(apic->vcpu);
2741 }
2742 
2743 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
2744 {
2745 	u32 data, tpr;
2746 	int max_irr, max_isr;
2747 	struct kvm_lapic *apic = vcpu->arch.apic;
2748 
2749 	apic_sync_pv_eoi_to_guest(vcpu, apic);
2750 
2751 	if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2752 		return;
2753 
2754 	tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
2755 	max_irr = apic_find_highest_irr(apic);
2756 	if (max_irr < 0)
2757 		max_irr = 0;
2758 	max_isr = apic_find_highest_isr(apic);
2759 	if (max_isr < 0)
2760 		max_isr = 0;
2761 	data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
2762 
2763 	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2764 				sizeof(u32));
2765 }
2766 
2767 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
2768 {
2769 	if (vapic_addr) {
2770 		if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2771 					&vcpu->arch.apic->vapic_cache,
2772 					vapic_addr, sizeof(u32)))
2773 			return -EINVAL;
2774 		__set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2775 	} else {
2776 		__clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2777 	}
2778 
2779 	vcpu->arch.apic->vapic_addr = vapic_addr;
2780 	return 0;
2781 }
2782 
2783 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2784 {
2785 	struct kvm_lapic *apic = vcpu->arch.apic;
2786 	u32 reg = (msr - APIC_BASE_MSR) << 4;
2787 
2788 	if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2789 		return 1;
2790 
2791 	if (reg == APIC_ICR2)
2792 		return 1;
2793 
2794 	/* if this is ICR write vector before command */
2795 	if (reg == APIC_ICR)
2796 		kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2797 	return kvm_lapic_reg_write(apic, reg, (u32)data);
2798 }
2799 
2800 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
2801 {
2802 	struct kvm_lapic *apic = vcpu->arch.apic;
2803 	u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
2804 
2805 	if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2806 		return 1;
2807 
2808 	if (reg == APIC_DFR || reg == APIC_ICR2)
2809 		return 1;
2810 
2811 	if (kvm_lapic_reg_read(apic, reg, 4, &low))
2812 		return 1;
2813 	if (reg == APIC_ICR)
2814 		kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2815 
2816 	*data = (((u64)high) << 32) | low;
2817 
2818 	return 0;
2819 }
2820 
2821 int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
2822 {
2823 	struct kvm_lapic *apic = vcpu->arch.apic;
2824 
2825 	if (!lapic_in_kernel(vcpu))
2826 		return 1;
2827 
2828 	/* if this is ICR write vector before command */
2829 	if (reg == APIC_ICR)
2830 		kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2831 	return kvm_lapic_reg_write(apic, reg, (u32)data);
2832 }
2833 
2834 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
2835 {
2836 	struct kvm_lapic *apic = vcpu->arch.apic;
2837 	u32 low, high = 0;
2838 
2839 	if (!lapic_in_kernel(vcpu))
2840 		return 1;
2841 
2842 	if (kvm_lapic_reg_read(apic, reg, 4, &low))
2843 		return 1;
2844 	if (reg == APIC_ICR)
2845 		kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2846 
2847 	*data = (((u64)high) << 32) | low;
2848 
2849 	return 0;
2850 }
2851 
2852 int kvm_lapic_set_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
2853 {
2854 	u64 addr = data & ~KVM_MSR_ENABLED;
2855 	struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
2856 	unsigned long new_len;
2857 	int ret;
2858 
2859 	if (!IS_ALIGNED(addr, 4))
2860 		return 1;
2861 
2862 	if (data & KVM_MSR_ENABLED) {
2863 		if (addr == ghc->gpa && len <= ghc->len)
2864 			new_len = ghc->len;
2865 		else
2866 			new_len = len;
2867 
2868 		ret = kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
2869 		if (ret)
2870 			return ret;
2871 	}
2872 
2873 	vcpu->arch.pv_eoi.msr_val = data;
2874 
2875 	return 0;
2876 }
2877 
2878 int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
2879 {
2880 	struct kvm_lapic *apic = vcpu->arch.apic;
2881 	u8 sipi_vector;
2882 	int r;
2883 	unsigned long pe;
2884 
2885 	if (!lapic_in_kernel(vcpu))
2886 		return 0;
2887 
2888 	/*
2889 	 * Read pending events before calling the check_events
2890 	 * callback.
2891 	 */
2892 	pe = smp_load_acquire(&apic->pending_events);
2893 	if (!pe)
2894 		return 0;
2895 
2896 	if (is_guest_mode(vcpu)) {
2897 		r = kvm_check_nested_events(vcpu);
2898 		if (r < 0)
2899 			return r == -EBUSY ? 0 : r;
2900 		/*
2901 		 * If an event has happened and caused a vmexit,
2902 		 * we know INITs are latched and therefore
2903 		 * we will not incorrectly deliver an APIC
2904 		 * event instead of a vmexit.
2905 		 */
2906 	}
2907 
2908 	/*
2909 	 * INITs are latched while CPU is in specific states
2910 	 * (SMM, VMX root mode, SVM with GIF=0).
2911 	 * Because a CPU cannot be in these states immediately
2912 	 * after it has processed an INIT signal (and thus in
2913 	 * KVM_MP_STATE_INIT_RECEIVED state), just eat SIPIs
2914 	 * and leave the INIT pending.
2915 	 */
2916 	if (kvm_vcpu_latch_init(vcpu)) {
2917 		WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
2918 		if (test_bit(KVM_APIC_SIPI, &pe))
2919 			clear_bit(KVM_APIC_SIPI, &apic->pending_events);
2920 		return 0;
2921 	}
2922 
2923 	if (test_bit(KVM_APIC_INIT, &pe)) {
2924 		clear_bit(KVM_APIC_INIT, &apic->pending_events);
2925 		kvm_vcpu_reset(vcpu, true);
2926 		if (kvm_vcpu_is_bsp(apic->vcpu))
2927 			vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2928 		else
2929 			vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
2930 	}
2931 	if (test_bit(KVM_APIC_SIPI, &pe)) {
2932 		clear_bit(KVM_APIC_SIPI, &apic->pending_events);
2933 		if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
2934 			/* evaluate pending_events before reading the vector */
2935 			smp_rmb();
2936 			sipi_vector = apic->sipi_vector;
2937 			kvm_x86_ops.vcpu_deliver_sipi_vector(vcpu, sipi_vector);
2938 			vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2939 		}
2940 	}
2941 	return 0;
2942 }
2943 
2944 void kvm_lapic_exit(void)
2945 {
2946 	static_key_deferred_flush(&apic_hw_disabled);
2947 	WARN_ON(static_branch_unlikely(&apic_hw_disabled.key));
2948 	static_key_deferred_flush(&apic_sw_disabled);
2949 	WARN_ON(static_branch_unlikely(&apic_sw_disabled.key));
2950 }
2951