xref: /openbmc/linux/arch/x86/kvm/svm/avic.c (revision dd21bfa4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM support
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *   Avi Kivity   <avi@qumranet.com>
13  */
14 
15 #define pr_fmt(fmt) "SVM: " fmt
16 
17 #include <linux/kvm_types.h>
18 #include <linux/hashtable.h>
19 #include <linux/amd-iommu.h>
20 #include <linux/kvm_host.h>
21 
22 #include <asm/irq_remapping.h>
23 
24 #include "trace.h"
25 #include "lapic.h"
26 #include "x86.h"
27 #include "irq.h"
28 #include "svm.h"
29 
30 /* AVIC GATAG is encoded using VM and VCPU IDs */
31 #define AVIC_VCPU_ID_BITS		8
32 #define AVIC_VCPU_ID_MASK		((1 << AVIC_VCPU_ID_BITS) - 1)
33 
34 #define AVIC_VM_ID_BITS			24
35 #define AVIC_VM_ID_NR			(1 << AVIC_VM_ID_BITS)
36 #define AVIC_VM_ID_MASK			((1 << AVIC_VM_ID_BITS) - 1)
37 
38 #define AVIC_GATAG(x, y)		(((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
39 						(y & AVIC_VCPU_ID_MASK))
40 #define AVIC_GATAG_TO_VMID(x)		((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
41 #define AVIC_GATAG_TO_VCPUID(x)		(x & AVIC_VCPU_ID_MASK)
42 
43 /* Note:
44  * This hash table is used to map VM_ID to a struct kvm_svm,
45  * when handling AMD IOMMU GALOG notification to schedule in
46  * a particular vCPU.
47  */
48 #define SVM_VM_DATA_HASH_BITS	8
49 static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
50 static u32 next_vm_id = 0;
51 static bool next_vm_id_wrapped = 0;
52 static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
53 
54 /*
55  * This is a wrapper of struct amd_iommu_ir_data.
56  */
57 struct amd_svm_iommu_ir {
58 	struct list_head node;	/* Used by SVM for per-vcpu ir_list */
59 	void *data;		/* Storing pointer to struct amd_ir_data */
60 };
61 
62 
63 /* Note:
64  * This function is called from IOMMU driver to notify
65  * SVM to schedule in a particular vCPU of a particular VM.
66  */
67 int avic_ga_log_notifier(u32 ga_tag)
68 {
69 	unsigned long flags;
70 	struct kvm_svm *kvm_svm;
71 	struct kvm_vcpu *vcpu = NULL;
72 	u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
73 	u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
74 
75 	pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
76 	trace_kvm_avic_ga_log(vm_id, vcpu_id);
77 
78 	spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
79 	hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
80 		if (kvm_svm->avic_vm_id != vm_id)
81 			continue;
82 		vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
83 		break;
84 	}
85 	spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
86 
87 	/* Note:
88 	 * At this point, the IOMMU should have already set the pending
89 	 * bit in the vAPIC backing page. So, we just need to schedule
90 	 * in the vcpu.
91 	 */
92 	if (vcpu)
93 		kvm_vcpu_wake_up(vcpu);
94 
95 	return 0;
96 }
97 
98 void avic_vm_destroy(struct kvm *kvm)
99 {
100 	unsigned long flags;
101 	struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
102 
103 	if (!enable_apicv)
104 		return;
105 
106 	if (kvm_svm->avic_logical_id_table_page)
107 		__free_page(kvm_svm->avic_logical_id_table_page);
108 	if (kvm_svm->avic_physical_id_table_page)
109 		__free_page(kvm_svm->avic_physical_id_table_page);
110 
111 	spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
112 	hash_del(&kvm_svm->hnode);
113 	spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
114 }
115 
116 int avic_vm_init(struct kvm *kvm)
117 {
118 	unsigned long flags;
119 	int err = -ENOMEM;
120 	struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
121 	struct kvm_svm *k2;
122 	struct page *p_page;
123 	struct page *l_page;
124 	u32 vm_id;
125 
126 	if (!enable_apicv)
127 		return 0;
128 
129 	/* Allocating physical APIC ID table (4KB) */
130 	p_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
131 	if (!p_page)
132 		goto free_avic;
133 
134 	kvm_svm->avic_physical_id_table_page = p_page;
135 
136 	/* Allocating logical APIC ID table (4KB) */
137 	l_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
138 	if (!l_page)
139 		goto free_avic;
140 
141 	kvm_svm->avic_logical_id_table_page = l_page;
142 
143 	spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
144  again:
145 	vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK;
146 	if (vm_id == 0) { /* id is 1-based, zero is not okay */
147 		next_vm_id_wrapped = 1;
148 		goto again;
149 	}
150 	/* Is it still in use? Only possible if wrapped at least once */
151 	if (next_vm_id_wrapped) {
152 		hash_for_each_possible(svm_vm_data_hash, k2, hnode, vm_id) {
153 			if (k2->avic_vm_id == vm_id)
154 				goto again;
155 		}
156 	}
157 	kvm_svm->avic_vm_id = vm_id;
158 	hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id);
159 	spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
160 
161 	return 0;
162 
163 free_avic:
164 	avic_vm_destroy(kvm);
165 	return err;
166 }
167 
168 void avic_init_vmcb(struct vcpu_svm *svm)
169 {
170 	struct vmcb *vmcb = svm->vmcb;
171 	struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
172 	phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
173 	phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
174 	phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page));
175 
176 	vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
177 	vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
178 	vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
179 	vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT;
180 	vmcb->control.avic_vapic_bar = APIC_DEFAULT_PHYS_BASE & VMCB_AVIC_APIC_BAR_MASK;
181 
182 	if (kvm_apicv_activated(svm->vcpu.kvm))
183 		vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
184 	else
185 		vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
186 }
187 
188 static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
189 				       unsigned int index)
190 {
191 	u64 *avic_physical_id_table;
192 	struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
193 
194 	if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
195 		return NULL;
196 
197 	avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page);
198 
199 	return &avic_physical_id_table[index];
200 }
201 
202 /*
203  * Note:
204  * AVIC hardware walks the nested page table to check permissions,
205  * but does not use the SPA address specified in the leaf page
206  * table entry since it uses  address in the AVIC_BACKING_PAGE pointer
207  * field of the VMCB. Therefore, we set up the
208  * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
209  */
210 static int avic_alloc_access_page(struct kvm *kvm)
211 {
212 	void __user *ret;
213 	int r = 0;
214 
215 	mutex_lock(&kvm->slots_lock);
216 
217 	if (kvm->arch.apic_access_memslot_enabled)
218 		goto out;
219 
220 	ret = __x86_set_memory_region(kvm,
221 				      APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
222 				      APIC_DEFAULT_PHYS_BASE,
223 				      PAGE_SIZE);
224 	if (IS_ERR(ret)) {
225 		r = PTR_ERR(ret);
226 		goto out;
227 	}
228 
229 	kvm->arch.apic_access_memslot_enabled = true;
230 out:
231 	mutex_unlock(&kvm->slots_lock);
232 	return r;
233 }
234 
235 static int avic_init_backing_page(struct kvm_vcpu *vcpu)
236 {
237 	u64 *entry, new_entry;
238 	int id = vcpu->vcpu_id;
239 	struct vcpu_svm *svm = to_svm(vcpu);
240 
241 	if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
242 		return -EINVAL;
243 
244 	if (!vcpu->arch.apic->regs)
245 		return -EINVAL;
246 
247 	if (kvm_apicv_activated(vcpu->kvm)) {
248 		int ret;
249 
250 		ret = avic_alloc_access_page(vcpu->kvm);
251 		if (ret)
252 			return ret;
253 	}
254 
255 	svm->avic_backing_page = virt_to_page(vcpu->arch.apic->regs);
256 
257 	/* Setting AVIC backing page address in the phy APIC ID table */
258 	entry = avic_get_physical_id_entry(vcpu, id);
259 	if (!entry)
260 		return -EINVAL;
261 
262 	new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
263 			      AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
264 			      AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
265 	WRITE_ONCE(*entry, new_entry);
266 
267 	svm->avic_physical_id_cache = entry;
268 
269 	return 0;
270 }
271 
272 void avic_ring_doorbell(struct kvm_vcpu *vcpu)
273 {
274 	/*
275 	 * Note, the vCPU could get migrated to a different pCPU at any point,
276 	 * which could result in signalling the wrong/previous pCPU.  But if
277 	 * that happens the vCPU is guaranteed to do a VMRUN (after being
278 	 * migrated) and thus will process pending interrupts, i.e. a doorbell
279 	 * is not needed (and the spurious one is harmless).
280 	 */
281 	int cpu = READ_ONCE(vcpu->cpu);
282 
283 	if (cpu != get_cpu())
284 		wrmsrl(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
285 	put_cpu();
286 }
287 
288 static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
289 				   u32 icrl, u32 icrh)
290 {
291 	struct kvm_vcpu *vcpu;
292 	unsigned long i;
293 
294 	/*
295 	 * Wake any target vCPUs that are blocking, i.e. waiting for a wake
296 	 * event.  There's no need to signal doorbells, as hardware has handled
297 	 * vCPUs that were in guest at the time of the IPI, and vCPUs that have
298 	 * since entered the guest will have processed pending IRQs at VMRUN.
299 	 */
300 	kvm_for_each_vcpu(i, vcpu, kvm) {
301 		if (kvm_apic_match_dest(vcpu, source, icrl & APIC_SHORT_MASK,
302 					GET_APIC_DEST_FIELD(icrh),
303 					icrl & APIC_DEST_MASK)) {
304 			vcpu->arch.apic->irr_pending = true;
305 			svm_complete_interrupt_delivery(vcpu,
306 							icrl & APIC_MODE_MASK,
307 							icrl & APIC_INT_LEVELTRIG,
308 							icrl & APIC_VECTOR_MASK);
309 		}
310 	}
311 }
312 
313 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
314 {
315 	struct vcpu_svm *svm = to_svm(vcpu);
316 	u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
317 	u32 icrl = svm->vmcb->control.exit_info_1;
318 	u32 id = svm->vmcb->control.exit_info_2 >> 32;
319 	u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
320 	struct kvm_lapic *apic = vcpu->arch.apic;
321 
322 	trace_kvm_avic_incomplete_ipi(vcpu->vcpu_id, icrh, icrl, id, index);
323 
324 	switch (id) {
325 	case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
326 		/*
327 		 * AVIC hardware handles the generation of
328 		 * IPIs when the specified Message Type is Fixed
329 		 * (also known as fixed delivery mode) and
330 		 * the Trigger Mode is edge-triggered. The hardware
331 		 * also supports self and broadcast delivery modes
332 		 * specified via the Destination Shorthand(DSH)
333 		 * field of the ICRL. Logical and physical APIC ID
334 		 * formats are supported. All other IPI types cause
335 		 * a #VMEXIT, which needs to emulated.
336 		 */
337 		kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
338 		kvm_lapic_reg_write(apic, APIC_ICR, icrl);
339 		break;
340 	case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING:
341 		/*
342 		 * At this point, we expect that the AVIC HW has already
343 		 * set the appropriate IRR bits on the valid target
344 		 * vcpus. So, we just need to kick the appropriate vcpu.
345 		 */
346 		avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh);
347 		break;
348 	case AVIC_IPI_FAILURE_INVALID_TARGET:
349 		break;
350 	case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
351 		WARN_ONCE(1, "Invalid backing page\n");
352 		break;
353 	default:
354 		pr_err("Unknown IPI interception\n");
355 	}
356 
357 	return 1;
358 }
359 
360 static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
361 {
362 	struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
363 	int index;
364 	u32 *logical_apic_id_table;
365 	int dlid = GET_APIC_LOGICAL_ID(ldr);
366 
367 	if (!dlid)
368 		return NULL;
369 
370 	if (flat) { /* flat */
371 		index = ffs(dlid) - 1;
372 		if (index > 7)
373 			return NULL;
374 	} else { /* cluster */
375 		int cluster = (dlid & 0xf0) >> 4;
376 		int apic = ffs(dlid & 0x0f) - 1;
377 
378 		if ((apic < 0) || (apic > 7) ||
379 		    (cluster >= 0xf))
380 			return NULL;
381 		index = (cluster << 2) + apic;
382 	}
383 
384 	logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page);
385 
386 	return &logical_apic_id_table[index];
387 }
388 
389 static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr)
390 {
391 	bool flat;
392 	u32 *entry, new_entry;
393 
394 	flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
395 	entry = avic_get_logical_id_entry(vcpu, ldr, flat);
396 	if (!entry)
397 		return -EINVAL;
398 
399 	new_entry = READ_ONCE(*entry);
400 	new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
401 	new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
402 	new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
403 	WRITE_ONCE(*entry, new_entry);
404 
405 	return 0;
406 }
407 
408 static void avic_invalidate_logical_id_entry(struct kvm_vcpu *vcpu)
409 {
410 	struct vcpu_svm *svm = to_svm(vcpu);
411 	bool flat = svm->dfr_reg == APIC_DFR_FLAT;
412 	u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat);
413 
414 	if (entry)
415 		clear_bit(AVIC_LOGICAL_ID_ENTRY_VALID_BIT, (unsigned long *)entry);
416 }
417 
418 static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
419 {
420 	int ret = 0;
421 	struct vcpu_svm *svm = to_svm(vcpu);
422 	u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
423 	u32 id = kvm_xapic_id(vcpu->arch.apic);
424 
425 	if (ldr == svm->ldr_reg)
426 		return 0;
427 
428 	avic_invalidate_logical_id_entry(vcpu);
429 
430 	if (ldr)
431 		ret = avic_ldr_write(vcpu, id, ldr);
432 
433 	if (!ret)
434 		svm->ldr_reg = ldr;
435 
436 	return ret;
437 }
438 
439 static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
440 {
441 	u64 *old, *new;
442 	struct vcpu_svm *svm = to_svm(vcpu);
443 	u32 id = kvm_xapic_id(vcpu->arch.apic);
444 
445 	if (vcpu->vcpu_id == id)
446 		return 0;
447 
448 	old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id);
449 	new = avic_get_physical_id_entry(vcpu, id);
450 	if (!new || !old)
451 		return 1;
452 
453 	/* We need to move physical_id_entry to new offset */
454 	*new = *old;
455 	*old = 0ULL;
456 	to_svm(vcpu)->avic_physical_id_cache = new;
457 
458 	/*
459 	 * Also update the guest physical APIC ID in the logical
460 	 * APIC ID table entry if already setup the LDR.
461 	 */
462 	if (svm->ldr_reg)
463 		avic_handle_ldr_update(vcpu);
464 
465 	return 0;
466 }
467 
468 static void avic_handle_dfr_update(struct kvm_vcpu *vcpu)
469 {
470 	struct vcpu_svm *svm = to_svm(vcpu);
471 	u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
472 
473 	if (svm->dfr_reg == dfr)
474 		return;
475 
476 	avic_invalidate_logical_id_entry(vcpu);
477 	svm->dfr_reg = dfr;
478 }
479 
480 static int avic_unaccel_trap_write(struct vcpu_svm *svm)
481 {
482 	struct kvm_lapic *apic = svm->vcpu.arch.apic;
483 	u32 offset = svm->vmcb->control.exit_info_1 &
484 				AVIC_UNACCEL_ACCESS_OFFSET_MASK;
485 
486 	switch (offset) {
487 	case APIC_ID:
488 		if (avic_handle_apic_id_update(&svm->vcpu))
489 			return 0;
490 		break;
491 	case APIC_LDR:
492 		if (avic_handle_ldr_update(&svm->vcpu))
493 			return 0;
494 		break;
495 	case APIC_DFR:
496 		avic_handle_dfr_update(&svm->vcpu);
497 		break;
498 	default:
499 		break;
500 	}
501 
502 	kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
503 
504 	return 1;
505 }
506 
507 static bool is_avic_unaccelerated_access_trap(u32 offset)
508 {
509 	bool ret = false;
510 
511 	switch (offset) {
512 	case APIC_ID:
513 	case APIC_EOI:
514 	case APIC_RRR:
515 	case APIC_LDR:
516 	case APIC_DFR:
517 	case APIC_SPIV:
518 	case APIC_ESR:
519 	case APIC_ICR:
520 	case APIC_LVTT:
521 	case APIC_LVTTHMR:
522 	case APIC_LVTPC:
523 	case APIC_LVT0:
524 	case APIC_LVT1:
525 	case APIC_LVTERR:
526 	case APIC_TMICT:
527 	case APIC_TDCR:
528 		ret = true;
529 		break;
530 	default:
531 		break;
532 	}
533 	return ret;
534 }
535 
536 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu)
537 {
538 	struct vcpu_svm *svm = to_svm(vcpu);
539 	int ret = 0;
540 	u32 offset = svm->vmcb->control.exit_info_1 &
541 		     AVIC_UNACCEL_ACCESS_OFFSET_MASK;
542 	u32 vector = svm->vmcb->control.exit_info_2 &
543 		     AVIC_UNACCEL_ACCESS_VECTOR_MASK;
544 	bool write = (svm->vmcb->control.exit_info_1 >> 32) &
545 		     AVIC_UNACCEL_ACCESS_WRITE_MASK;
546 	bool trap = is_avic_unaccelerated_access_trap(offset);
547 
548 	trace_kvm_avic_unaccelerated_access(vcpu->vcpu_id, offset,
549 					    trap, write, vector);
550 	if (trap) {
551 		/* Handling Trap */
552 		WARN_ONCE(!write, "svm: Handling trap read.\n");
553 		ret = avic_unaccel_trap_write(svm);
554 	} else {
555 		/* Handling Fault */
556 		ret = kvm_emulate_instruction(vcpu, 0);
557 	}
558 
559 	return ret;
560 }
561 
562 int avic_init_vcpu(struct vcpu_svm *svm)
563 {
564 	int ret;
565 	struct kvm_vcpu *vcpu = &svm->vcpu;
566 
567 	if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm))
568 		return 0;
569 
570 	ret = avic_init_backing_page(vcpu);
571 	if (ret)
572 		return ret;
573 
574 	INIT_LIST_HEAD(&svm->ir_list);
575 	spin_lock_init(&svm->ir_list_lock);
576 	svm->dfr_reg = APIC_DFR_FLAT;
577 
578 	return ret;
579 }
580 
581 void avic_post_state_restore(struct kvm_vcpu *vcpu)
582 {
583 	if (avic_handle_apic_id_update(vcpu) != 0)
584 		return;
585 	avic_handle_dfr_update(vcpu);
586 	avic_handle_ldr_update(vcpu);
587 }
588 
589 void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
590 {
591 	return;
592 }
593 
594 void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
595 {
596 }
597 
598 void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
599 {
600 }
601 
602 static int svm_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
603 {
604 	int ret = 0;
605 	unsigned long flags;
606 	struct amd_svm_iommu_ir *ir;
607 	struct vcpu_svm *svm = to_svm(vcpu);
608 
609 	if (!kvm_arch_has_assigned_device(vcpu->kvm))
610 		return 0;
611 
612 	/*
613 	 * Here, we go through the per-vcpu ir_list to update all existing
614 	 * interrupt remapping table entry targeting this vcpu.
615 	 */
616 	spin_lock_irqsave(&svm->ir_list_lock, flags);
617 
618 	if (list_empty(&svm->ir_list))
619 		goto out;
620 
621 	list_for_each_entry(ir, &svm->ir_list, node) {
622 		if (activate)
623 			ret = amd_iommu_activate_guest_mode(ir->data);
624 		else
625 			ret = amd_iommu_deactivate_guest_mode(ir->data);
626 		if (ret)
627 			break;
628 	}
629 out:
630 	spin_unlock_irqrestore(&svm->ir_list_lock, flags);
631 	return ret;
632 }
633 
634 void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
635 {
636 	struct vcpu_svm *svm = to_svm(vcpu);
637 	struct vmcb *vmcb = svm->vmcb01.ptr;
638 	bool activated = kvm_vcpu_apicv_active(vcpu);
639 
640 	if (!enable_apicv)
641 		return;
642 
643 	if (activated) {
644 		/**
645 		 * During AVIC temporary deactivation, guest could update
646 		 * APIC ID, DFR and LDR registers, which would not be trapped
647 		 * by avic_unaccelerated_access_interception(). In this case,
648 		 * we need to check and update the AVIC logical APIC ID table
649 		 * accordingly before re-activating.
650 		 */
651 		avic_post_state_restore(vcpu);
652 		vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
653 	} else {
654 		vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
655 	}
656 	vmcb_mark_dirty(vmcb, VMCB_AVIC);
657 
658 	if (activated)
659 		avic_vcpu_load(vcpu, vcpu->cpu);
660 	else
661 		avic_vcpu_put(vcpu);
662 
663 	svm_set_pi_irte_mode(vcpu, activated);
664 }
665 
666 void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
667 {
668 	return;
669 }
670 
671 bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
672 {
673 	return false;
674 }
675 
676 static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
677 {
678 	unsigned long flags;
679 	struct amd_svm_iommu_ir *cur;
680 
681 	spin_lock_irqsave(&svm->ir_list_lock, flags);
682 	list_for_each_entry(cur, &svm->ir_list, node) {
683 		if (cur->data != pi->ir_data)
684 			continue;
685 		list_del(&cur->node);
686 		kfree(cur);
687 		break;
688 	}
689 	spin_unlock_irqrestore(&svm->ir_list_lock, flags);
690 }
691 
692 static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
693 {
694 	int ret = 0;
695 	unsigned long flags;
696 	struct amd_svm_iommu_ir *ir;
697 
698 	/**
699 	 * In some cases, the existing irte is updated and re-set,
700 	 * so we need to check here if it's already been * added
701 	 * to the ir_list.
702 	 */
703 	if (pi->ir_data && (pi->prev_ga_tag != 0)) {
704 		struct kvm *kvm = svm->vcpu.kvm;
705 		u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
706 		struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
707 		struct vcpu_svm *prev_svm;
708 
709 		if (!prev_vcpu) {
710 			ret = -EINVAL;
711 			goto out;
712 		}
713 
714 		prev_svm = to_svm(prev_vcpu);
715 		svm_ir_list_del(prev_svm, pi);
716 	}
717 
718 	/**
719 	 * Allocating new amd_iommu_pi_data, which will get
720 	 * add to the per-vcpu ir_list.
721 	 */
722 	ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL_ACCOUNT);
723 	if (!ir) {
724 		ret = -ENOMEM;
725 		goto out;
726 	}
727 	ir->data = pi->ir_data;
728 
729 	spin_lock_irqsave(&svm->ir_list_lock, flags);
730 	list_add(&ir->node, &svm->ir_list);
731 	spin_unlock_irqrestore(&svm->ir_list_lock, flags);
732 out:
733 	return ret;
734 }
735 
736 /*
737  * Note:
738  * The HW cannot support posting multicast/broadcast
739  * interrupts to a vCPU. So, we still use legacy interrupt
740  * remapping for these kind of interrupts.
741  *
742  * For lowest-priority interrupts, we only support
743  * those with single CPU as the destination, e.g. user
744  * configures the interrupts via /proc/irq or uses
745  * irqbalance to make the interrupts single-CPU.
746  */
747 static int
748 get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
749 		 struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
750 {
751 	struct kvm_lapic_irq irq;
752 	struct kvm_vcpu *vcpu = NULL;
753 
754 	kvm_set_msi_irq(kvm, e, &irq);
755 
756 	if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
757 	    !kvm_irq_is_postable(&irq)) {
758 		pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
759 			 __func__, irq.vector);
760 		return -1;
761 	}
762 
763 	pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
764 		 irq.vector);
765 	*svm = to_svm(vcpu);
766 	vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page));
767 	vcpu_info->vector = irq.vector;
768 
769 	return 0;
770 }
771 
772 /*
773  * svm_update_pi_irte - set IRTE for Posted-Interrupts
774  *
775  * @kvm: kvm
776  * @host_irq: host irq of the interrupt
777  * @guest_irq: gsi of the interrupt
778  * @set: set or unset PI
779  * returns 0 on success, < 0 on failure
780  */
781 int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
782 		       uint32_t guest_irq, bool set)
783 {
784 	struct kvm_kernel_irq_routing_entry *e;
785 	struct kvm_irq_routing_table *irq_rt;
786 	int idx, ret = -EINVAL;
787 
788 	if (!kvm_arch_has_assigned_device(kvm) ||
789 	    !irq_remapping_cap(IRQ_POSTING_CAP))
790 		return 0;
791 
792 	pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
793 		 __func__, host_irq, guest_irq, set);
794 
795 	idx = srcu_read_lock(&kvm->irq_srcu);
796 	irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
797 	WARN_ON(guest_irq >= irq_rt->nr_rt_entries);
798 
799 	hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
800 		struct vcpu_data vcpu_info;
801 		struct vcpu_svm *svm = NULL;
802 
803 		if (e->type != KVM_IRQ_ROUTING_MSI)
804 			continue;
805 
806 		/**
807 		 * Here, we setup with legacy mode in the following cases:
808 		 * 1. When cannot target interrupt to a specific vcpu.
809 		 * 2. Unsetting posted interrupt.
810 		 * 3. APIC virtualization is disabled for the vcpu.
811 		 * 4. IRQ has incompatible delivery mode (SMI, INIT, etc)
812 		 */
813 		if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
814 		    kvm_vcpu_apicv_active(&svm->vcpu)) {
815 			struct amd_iommu_pi_data pi;
816 
817 			/* Try to enable guest_mode in IRTE */
818 			pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
819 					    AVIC_HPA_MASK);
820 			pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
821 						     svm->vcpu.vcpu_id);
822 			pi.is_guest_mode = true;
823 			pi.vcpu_data = &vcpu_info;
824 			ret = irq_set_vcpu_affinity(host_irq, &pi);
825 
826 			/**
827 			 * Here, we successfully setting up vcpu affinity in
828 			 * IOMMU guest mode. Now, we need to store the posted
829 			 * interrupt information in a per-vcpu ir_list so that
830 			 * we can reference to them directly when we update vcpu
831 			 * scheduling information in IOMMU irte.
832 			 */
833 			if (!ret && pi.is_guest_mode)
834 				svm_ir_list_add(svm, &pi);
835 		} else {
836 			/* Use legacy mode in IRTE */
837 			struct amd_iommu_pi_data pi;
838 
839 			/**
840 			 * Here, pi is used to:
841 			 * - Tell IOMMU to use legacy mode for this interrupt.
842 			 * - Retrieve ga_tag of prior interrupt remapping data.
843 			 */
844 			pi.prev_ga_tag = 0;
845 			pi.is_guest_mode = false;
846 			ret = irq_set_vcpu_affinity(host_irq, &pi);
847 
848 			/**
849 			 * Check if the posted interrupt was previously
850 			 * setup with the guest_mode by checking if the ga_tag
851 			 * was cached. If so, we need to clean up the per-vcpu
852 			 * ir_list.
853 			 */
854 			if (!ret && pi.prev_ga_tag) {
855 				int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
856 				struct kvm_vcpu *vcpu;
857 
858 				vcpu = kvm_get_vcpu_by_id(kvm, id);
859 				if (vcpu)
860 					svm_ir_list_del(to_svm(vcpu), &pi);
861 			}
862 		}
863 
864 		if (!ret && svm) {
865 			trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
866 						 e->gsi, vcpu_info.vector,
867 						 vcpu_info.pi_desc_addr, set);
868 		}
869 
870 		if (ret < 0) {
871 			pr_err("%s: failed to update PI IRTE\n", __func__);
872 			goto out;
873 		}
874 	}
875 
876 	ret = 0;
877 out:
878 	srcu_read_unlock(&kvm->irq_srcu, idx);
879 	return ret;
880 }
881 
882 bool svm_check_apicv_inhibit_reasons(ulong bit)
883 {
884 	ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
885 			  BIT(APICV_INHIBIT_REASON_ABSENT) |
886 			  BIT(APICV_INHIBIT_REASON_HYPERV) |
887 			  BIT(APICV_INHIBIT_REASON_NESTED) |
888 			  BIT(APICV_INHIBIT_REASON_IRQWIN) |
889 			  BIT(APICV_INHIBIT_REASON_PIT_REINJ) |
890 			  BIT(APICV_INHIBIT_REASON_X2APIC) |
891 			  BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
892 
893 	return supported & BIT(bit);
894 }
895 
896 
897 static inline int
898 avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
899 {
900 	int ret = 0;
901 	unsigned long flags;
902 	struct amd_svm_iommu_ir *ir;
903 	struct vcpu_svm *svm = to_svm(vcpu);
904 
905 	if (!kvm_arch_has_assigned_device(vcpu->kvm))
906 		return 0;
907 
908 	/*
909 	 * Here, we go through the per-vcpu ir_list to update all existing
910 	 * interrupt remapping table entry targeting this vcpu.
911 	 */
912 	spin_lock_irqsave(&svm->ir_list_lock, flags);
913 
914 	if (list_empty(&svm->ir_list))
915 		goto out;
916 
917 	list_for_each_entry(ir, &svm->ir_list, node) {
918 		ret = amd_iommu_update_ga(cpu, r, ir->data);
919 		if (ret)
920 			break;
921 	}
922 out:
923 	spin_unlock_irqrestore(&svm->ir_list_lock, flags);
924 	return ret;
925 }
926 
927 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
928 {
929 	u64 entry;
930 	/* ID = 0xff (broadcast), ID > 0xff (reserved) */
931 	int h_physical_id = kvm_cpu_get_apicid(cpu);
932 	struct vcpu_svm *svm = to_svm(vcpu);
933 
934 	lockdep_assert_preemption_disabled();
935 
936 	/*
937 	 * Since the host physical APIC id is 8 bits,
938 	 * we can support host APIC ID upto 255.
939 	 */
940 	if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
941 		return;
942 
943 	/*
944 	 * No need to update anything if the vCPU is blocking, i.e. if the vCPU
945 	 * is being scheduled in after being preempted.  The CPU entries in the
946 	 * Physical APIC table and IRTE are consumed iff IsRun{ning} is '1'.
947 	 * If the vCPU was migrated, its new CPU value will be stuffed when the
948 	 * vCPU unblocks.
949 	 */
950 	if (kvm_vcpu_is_blocking(vcpu))
951 		return;
952 
953 	entry = READ_ONCE(*(svm->avic_physical_id_cache));
954 	WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
955 
956 	entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
957 	entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
958 	entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
959 
960 	WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
961 	avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
962 }
963 
964 void avic_vcpu_put(struct kvm_vcpu *vcpu)
965 {
966 	u64 entry;
967 	struct vcpu_svm *svm = to_svm(vcpu);
968 
969 	lockdep_assert_preemption_disabled();
970 
971 	entry = READ_ONCE(*(svm->avic_physical_id_cache));
972 
973 	/* Nothing to do if IsRunning == '0' due to vCPU blocking. */
974 	if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
975 		return;
976 
977 	avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
978 
979 	entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
980 	WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
981 }
982 
983 void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
984 {
985 	if (!kvm_vcpu_apicv_active(vcpu))
986 		return;
987 
988 	preempt_disable();
989 
990        /*
991         * Unload the AVIC when the vCPU is about to block, _before_
992         * the vCPU actually blocks.
993         *
994         * Any IRQs that arrive before IsRunning=0 will not cause an
995         * incomplete IPI vmexit on the source, therefore vIRR will also
996         * be checked by kvm_vcpu_check_block() before blocking.  The
997         * memory barrier implicit in set_current_state orders writing
998         * IsRunning=0 before reading the vIRR.  The processor needs a
999         * matching memory barrier on interrupt delivery between writing
1000         * IRR and reading IsRunning; the lack of this barrier might be
1001         * the cause of errata #1235).
1002         */
1003 	avic_vcpu_put(vcpu);
1004 
1005 	preempt_enable();
1006 }
1007 
1008 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
1009 {
1010 	int cpu;
1011 
1012 	if (!kvm_vcpu_apicv_active(vcpu))
1013 		return;
1014 
1015 	cpu = get_cpu();
1016 	WARN_ON(cpu != vcpu->cpu);
1017 
1018 	avic_vcpu_load(vcpu, cpu);
1019 
1020 	put_cpu();
1021 }
1022