xref: /openbmc/linux/arch/arm64/kvm/vgic/vgic-v4.c (revision b321c31c)
19ed24f4bSMarc Zyngier // SPDX-License-Identifier: GPL-2.0-only
29ed24f4bSMarc Zyngier /*
39ed24f4bSMarc Zyngier  * Copyright (C) 2017 ARM Ltd.
49ed24f4bSMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
59ed24f4bSMarc Zyngier  */
69ed24f4bSMarc Zyngier 
79ed24f4bSMarc Zyngier #include <linux/interrupt.h>
89ed24f4bSMarc Zyngier #include <linux/irq.h>
99ed24f4bSMarc Zyngier #include <linux/irqdomain.h>
109ed24f4bSMarc Zyngier #include <linux/kvm_host.h>
119ed24f4bSMarc Zyngier #include <linux/irqchip/arm-gic-v3.h>
129ed24f4bSMarc Zyngier 
139ed24f4bSMarc Zyngier #include "vgic.h"
149ed24f4bSMarc Zyngier 
159ed24f4bSMarc Zyngier /*
169ed24f4bSMarc Zyngier  * How KVM uses GICv4 (insert rude comments here):
179ed24f4bSMarc Zyngier  *
189ed24f4bSMarc Zyngier  * The vgic-v4 layer acts as a bridge between several entities:
199ed24f4bSMarc Zyngier  * - The GICv4 ITS representation offered by the ITS driver
209ed24f4bSMarc Zyngier  * - VFIO, which is in charge of the PCI endpoint
219ed24f4bSMarc Zyngier  * - The virtual ITS, which is the only thing the guest sees
229ed24f4bSMarc Zyngier  *
239ed24f4bSMarc Zyngier  * The configuration of VLPIs is triggered by a callback from VFIO,
249ed24f4bSMarc Zyngier  * instructing KVM that a PCI device has been configured to deliver
259ed24f4bSMarc Zyngier  * MSIs to a vITS.
269ed24f4bSMarc Zyngier  *
279ed24f4bSMarc Zyngier  * kvm_vgic_v4_set_forwarding() is thus called with the routing entry,
289ed24f4bSMarc Zyngier  * and this is used to find the corresponding vITS data structures
299ed24f4bSMarc Zyngier  * (ITS instance, device, event and irq) using a process that is
309ed24f4bSMarc Zyngier  * extremely similar to the injection of an MSI.
319ed24f4bSMarc Zyngier  *
329ed24f4bSMarc Zyngier  * At this stage, we can link the guest's view of an LPI (uniquely
339ed24f4bSMarc Zyngier  * identified by the routing entry) and the host irq, using the GICv4
349ed24f4bSMarc Zyngier  * driver mapping operation. Should the mapping succeed, we've then
359ed24f4bSMarc Zyngier  * successfully upgraded the guest's LPI to a VLPI. We can then start
369ed24f4bSMarc Zyngier  * with updating GICv4's view of the property table and generating an
379ed24f4bSMarc Zyngier  * INValidation in order to kickstart the delivery of this VLPI to the
389ed24f4bSMarc Zyngier  * guest directly, without software intervention. Well, almost.
399ed24f4bSMarc Zyngier  *
409ed24f4bSMarc Zyngier  * When the PCI endpoint is deconfigured, this operation is reversed
419ed24f4bSMarc Zyngier  * with VFIO calling kvm_vgic_v4_unset_forwarding().
429ed24f4bSMarc Zyngier  *
439ed24f4bSMarc Zyngier  * Once the VLPI has been mapped, it needs to follow any change the
449ed24f4bSMarc Zyngier  * guest performs on its LPI through the vITS. For that, a number of
459ed24f4bSMarc Zyngier  * command handlers have hooks to communicate these changes to the HW:
469ed24f4bSMarc Zyngier  * - Any invalidation triggers a call to its_prop_update_vlpi()
479ed24f4bSMarc Zyngier  * - The INT command results in a irq_set_irqchip_state(), which
489ed24f4bSMarc Zyngier  *   generates an INT on the corresponding VLPI.
499ed24f4bSMarc Zyngier  * - The CLEAR command results in a irq_set_irqchip_state(), which
509ed24f4bSMarc Zyngier  *   generates an CLEAR on the corresponding VLPI.
519ed24f4bSMarc Zyngier  * - DISCARD translates into an unmap, similar to a call to
529ed24f4bSMarc Zyngier  *   kvm_vgic_v4_unset_forwarding().
539ed24f4bSMarc Zyngier  * - MOVI is translated by an update of the existing mapping, changing
549ed24f4bSMarc Zyngier  *   the target vcpu, resulting in a VMOVI being generated.
559ed24f4bSMarc Zyngier  * - MOVALL is translated by a string of mapping updates (similar to
569ed24f4bSMarc Zyngier  *   the handling of MOVI). MOVALL is horrible.
579ed24f4bSMarc Zyngier  *
589ed24f4bSMarc Zyngier  * Note that a DISCARD/MAPTI sequence emitted from the guest without
599ed24f4bSMarc Zyngier  * reprogramming the PCI endpoint after MAPTI does not result in a
609ed24f4bSMarc Zyngier  * VLPI being mapped, as there is no callback from VFIO (the guest
619ed24f4bSMarc Zyngier  * will get the interrupt via the normal SW injection). Fixing this is
629ed24f4bSMarc Zyngier  * not trivial, and requires some horrible messing with the VFIO
639ed24f4bSMarc Zyngier  * internals. Not fun. Don't do that.
649ed24f4bSMarc Zyngier  *
659ed24f4bSMarc Zyngier  * Then there is the scheduling. Each time a vcpu is about to run on a
669ed24f4bSMarc Zyngier  * physical CPU, KVM must tell the corresponding redistributor about
679ed24f4bSMarc Zyngier  * it. And if we've migrated our vcpu from one CPU to another, we must
689ed24f4bSMarc Zyngier  * tell the ITS (so that the messages reach the right redistributor).
699ed24f4bSMarc Zyngier  * This is done in two steps: first issue a irq_set_affinity() on the
709ed24f4bSMarc Zyngier  * irq corresponding to the vcpu, then call its_make_vpe_resident().
719ed24f4bSMarc Zyngier  * You must be in a non-preemptible context. On exit, a call to
729ed24f4bSMarc Zyngier  * its_make_vpe_non_resident() tells the redistributor that we're done
739ed24f4bSMarc Zyngier  * with the vcpu.
749ed24f4bSMarc Zyngier  *
759ed24f4bSMarc Zyngier  * Finally, the doorbell handling: Each vcpu is allocated an interrupt
769ed24f4bSMarc Zyngier  * which will fire each time a VLPI is made pending whilst the vcpu is
779ed24f4bSMarc Zyngier  * not running. Each time the vcpu gets blocked, the doorbell
789ed24f4bSMarc Zyngier  * interrupt gets enabled. When the vcpu is unblocked (for whatever
799ed24f4bSMarc Zyngier  * reason), the doorbell interrupt is disabled.
809ed24f4bSMarc Zyngier  */
819ed24f4bSMarc Zyngier 
829ed24f4bSMarc Zyngier #define DB_IRQ_FLAGS	(IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY | IRQ_NO_BALANCING)
839ed24f4bSMarc Zyngier 
vgic_v4_doorbell_handler(int irq,void * info)849ed24f4bSMarc Zyngier static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
859ed24f4bSMarc Zyngier {
869ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu = info;
879ed24f4bSMarc Zyngier 
889ed24f4bSMarc Zyngier 	/* We got the message, no need to fire again */
899ed24f4bSMarc Zyngier 	if (!kvm_vgic_global_state.has_gicv4_1 &&
909ed24f4bSMarc Zyngier 	    !irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
919ed24f4bSMarc Zyngier 		disable_irq_nosync(irq);
929ed24f4bSMarc Zyngier 
93a3f574cdSMarc Zyngier 	/*
94a3f574cdSMarc Zyngier 	 * The v4.1 doorbell can fire concurrently with the vPE being
95a3f574cdSMarc Zyngier 	 * made non-resident. Ensure we only update pending_last
96a3f574cdSMarc Zyngier 	 * *after* the non-residency sequence has completed.
97a3f574cdSMarc Zyngier 	 */
98a3f574cdSMarc Zyngier 	raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
999ed24f4bSMarc Zyngier 	vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
100a3f574cdSMarc Zyngier 	raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
101a3f574cdSMarc Zyngier 
1029ed24f4bSMarc Zyngier 	kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
1039ed24f4bSMarc Zyngier 	kvm_vcpu_kick(vcpu);
1049ed24f4bSMarc Zyngier 
1059ed24f4bSMarc Zyngier 	return IRQ_HANDLED;
1069ed24f4bSMarc Zyngier }
1079ed24f4bSMarc Zyngier 
vgic_v4_sync_sgi_config(struct its_vpe * vpe,struct vgic_irq * irq)1089ed24f4bSMarc Zyngier static void vgic_v4_sync_sgi_config(struct its_vpe *vpe, struct vgic_irq *irq)
1099ed24f4bSMarc Zyngier {
1109ed24f4bSMarc Zyngier 	vpe->sgi_config[irq->intid].enabled	= irq->enabled;
1119ed24f4bSMarc Zyngier 	vpe->sgi_config[irq->intid].group 	= irq->group;
1129ed24f4bSMarc Zyngier 	vpe->sgi_config[irq->intid].priority	= irq->priority;
1139ed24f4bSMarc Zyngier }
1149ed24f4bSMarc Zyngier 
vgic_v4_enable_vsgis(struct kvm_vcpu * vcpu)1159ed24f4bSMarc Zyngier static void vgic_v4_enable_vsgis(struct kvm_vcpu *vcpu)
1169ed24f4bSMarc Zyngier {
1179ed24f4bSMarc Zyngier 	struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
1189ed24f4bSMarc Zyngier 	int i;
1199ed24f4bSMarc Zyngier 
1209ed24f4bSMarc Zyngier 	/*
1219ed24f4bSMarc Zyngier 	 * With GICv4.1, every virtual SGI can be directly injected. So
1229ed24f4bSMarc Zyngier 	 * let's pretend that they are HW interrupts, tied to a host
1239ed24f4bSMarc Zyngier 	 * IRQ. The SGI code will do its magic.
1249ed24f4bSMarc Zyngier 	 */
1259ed24f4bSMarc Zyngier 	for (i = 0; i < VGIC_NR_SGIS; i++) {
1269ed24f4bSMarc Zyngier 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
1279ed24f4bSMarc Zyngier 		struct irq_desc *desc;
1289ed24f4bSMarc Zyngier 		unsigned long flags;
1299ed24f4bSMarc Zyngier 		int ret;
1309ed24f4bSMarc Zyngier 
1319ed24f4bSMarc Zyngier 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
1329ed24f4bSMarc Zyngier 
1339ed24f4bSMarc Zyngier 		if (irq->hw)
1349ed24f4bSMarc Zyngier 			goto unlock;
1359ed24f4bSMarc Zyngier 
1369ed24f4bSMarc Zyngier 		irq->hw = true;
1379ed24f4bSMarc Zyngier 		irq->host_irq = irq_find_mapping(vpe->sgi_domain, i);
1389ed24f4bSMarc Zyngier 
1399ed24f4bSMarc Zyngier 		/* Transfer the full irq state to the vPE */
1409ed24f4bSMarc Zyngier 		vgic_v4_sync_sgi_config(vpe, irq);
1419ed24f4bSMarc Zyngier 		desc = irq_to_desc(irq->host_irq);
1429ed24f4bSMarc Zyngier 		ret = irq_domain_activate_irq(irq_desc_get_irq_data(desc),
1439ed24f4bSMarc Zyngier 					      false);
1449ed24f4bSMarc Zyngier 		if (!WARN_ON(ret)) {
1459ed24f4bSMarc Zyngier 			/* Transfer pending state */
1469ed24f4bSMarc Zyngier 			ret = irq_set_irqchip_state(irq->host_irq,
1479ed24f4bSMarc Zyngier 						    IRQCHIP_STATE_PENDING,
1489ed24f4bSMarc Zyngier 						    irq->pending_latch);
1499ed24f4bSMarc Zyngier 			WARN_ON(ret);
1509ed24f4bSMarc Zyngier 			irq->pending_latch = false;
1519ed24f4bSMarc Zyngier 		}
1529ed24f4bSMarc Zyngier 	unlock:
1539ed24f4bSMarc Zyngier 		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1549ed24f4bSMarc Zyngier 		vgic_put_irq(vcpu->kvm, irq);
1559ed24f4bSMarc Zyngier 	}
1569ed24f4bSMarc Zyngier }
1579ed24f4bSMarc Zyngier 
vgic_v4_disable_vsgis(struct kvm_vcpu * vcpu)1589ed24f4bSMarc Zyngier static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
1599ed24f4bSMarc Zyngier {
1609ed24f4bSMarc Zyngier 	int i;
1619ed24f4bSMarc Zyngier 
1629ed24f4bSMarc Zyngier 	for (i = 0; i < VGIC_NR_SGIS; i++) {
1639ed24f4bSMarc Zyngier 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
1649ed24f4bSMarc Zyngier 		struct irq_desc *desc;
1659ed24f4bSMarc Zyngier 		unsigned long flags;
1669ed24f4bSMarc Zyngier 		int ret;
1679ed24f4bSMarc Zyngier 
1689ed24f4bSMarc Zyngier 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
1699ed24f4bSMarc Zyngier 
1709ed24f4bSMarc Zyngier 		if (!irq->hw)
1719ed24f4bSMarc Zyngier 			goto unlock;
1729ed24f4bSMarc Zyngier 
1739ed24f4bSMarc Zyngier 		irq->hw = false;
1749ed24f4bSMarc Zyngier 		ret = irq_get_irqchip_state(irq->host_irq,
1759ed24f4bSMarc Zyngier 					    IRQCHIP_STATE_PENDING,
1769ed24f4bSMarc Zyngier 					    &irq->pending_latch);
1779ed24f4bSMarc Zyngier 		WARN_ON(ret);
1789ed24f4bSMarc Zyngier 
1799ed24f4bSMarc Zyngier 		desc = irq_to_desc(irq->host_irq);
1809ed24f4bSMarc Zyngier 		irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
1819ed24f4bSMarc Zyngier 	unlock:
1829ed24f4bSMarc Zyngier 		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1839ed24f4bSMarc Zyngier 		vgic_put_irq(vcpu->kvm, irq);
1849ed24f4bSMarc Zyngier 	}
1859ed24f4bSMarc Zyngier }
1869ed24f4bSMarc Zyngier 
vgic_v4_configure_vsgis(struct kvm * kvm)1879ed24f4bSMarc Zyngier void vgic_v4_configure_vsgis(struct kvm *kvm)
1889ed24f4bSMarc Zyngier {
1899ed24f4bSMarc Zyngier 	struct vgic_dist *dist = &kvm->arch.vgic;
1909ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu;
19146808a4cSMarc Zyngier 	unsigned long i;
1929ed24f4bSMarc Zyngier 
193c38b8400SJean-Philippe Brucker 	lockdep_assert_held(&kvm->arch.config_lock);
194c38b8400SJean-Philippe Brucker 
1959ed24f4bSMarc Zyngier 	kvm_arm_halt_guest(kvm);
1969ed24f4bSMarc Zyngier 
1979ed24f4bSMarc Zyngier 	kvm_for_each_vcpu(i, vcpu, kvm) {
1989ed24f4bSMarc Zyngier 		if (dist->nassgireq)
1999ed24f4bSMarc Zyngier 			vgic_v4_enable_vsgis(vcpu);
2009ed24f4bSMarc Zyngier 		else
2019ed24f4bSMarc Zyngier 			vgic_v4_disable_vsgis(vcpu);
2029ed24f4bSMarc Zyngier 	}
2039ed24f4bSMarc Zyngier 
2049ed24f4bSMarc Zyngier 	kvm_arm_resume_guest(kvm);
2059ed24f4bSMarc Zyngier }
2069ed24f4bSMarc Zyngier 
20780317fe4SShenming Lu /*
20880317fe4SShenming Lu  * Must be called with GICv4.1 and the vPE unmapped, which
20980317fe4SShenming Lu  * indicates the invalidation of any VPT caches associated
21080317fe4SShenming Lu  * with the vPE, thus we can get the VLPI state by peeking
21180317fe4SShenming Lu  * at the VPT.
21280317fe4SShenming Lu  */
vgic_v4_get_vlpi_state(struct vgic_irq * irq,bool * val)21380317fe4SShenming Lu void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val)
21480317fe4SShenming Lu {
21580317fe4SShenming Lu 	struct its_vpe *vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
21680317fe4SShenming Lu 	int mask = BIT(irq->intid % BITS_PER_BYTE);
21780317fe4SShenming Lu 	void *va;
21880317fe4SShenming Lu 	u8 *ptr;
21980317fe4SShenming Lu 
22080317fe4SShenming Lu 	va = page_address(vpe->vpt_page);
22180317fe4SShenming Lu 	ptr = va + irq->intid / BITS_PER_BYTE;
22280317fe4SShenming Lu 
22380317fe4SShenming Lu 	*val = !!(*ptr & mask);
22480317fe4SShenming Lu }
22580317fe4SShenming Lu 
vgic_v4_request_vpe_irq(struct kvm_vcpu * vcpu,int irq)226ef369168SMarc Zyngier int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
227ef369168SMarc Zyngier {
228ef369168SMarc Zyngier 	return request_irq(irq, vgic_v4_doorbell_handler, 0, "vcpu", vcpu);
229ef369168SMarc Zyngier }
230ef369168SMarc Zyngier 
2319ed24f4bSMarc Zyngier /**
2329ed24f4bSMarc Zyngier  * vgic_v4_init - Initialize the GICv4 data structures
2339ed24f4bSMarc Zyngier  * @kvm:	Pointer to the VM being initialized
2349ed24f4bSMarc Zyngier  *
2359ed24f4bSMarc Zyngier  * We may be called each time a vITS is created, or when the
236f0032773SOliver Upton  * vgic is initialized. In both cases, the number of vcpus
237f0032773SOliver Upton  * should now be fixed.
2389ed24f4bSMarc Zyngier  */
vgic_v4_init(struct kvm * kvm)2399ed24f4bSMarc Zyngier int vgic_v4_init(struct kvm *kvm)
2409ed24f4bSMarc Zyngier {
2419ed24f4bSMarc Zyngier 	struct vgic_dist *dist = &kvm->arch.vgic;
2429ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu;
24346808a4cSMarc Zyngier 	int nr_vcpus, ret;
24446808a4cSMarc Zyngier 	unsigned long i;
2459ed24f4bSMarc Zyngier 
246f0032773SOliver Upton 	lockdep_assert_held(&kvm->arch.config_lock);
247f0032773SOliver Upton 
2489ed24f4bSMarc Zyngier 	if (!kvm_vgic_global_state.has_gicv4)
2499ed24f4bSMarc Zyngier 		return 0; /* Nothing to see here... move along. */
2509ed24f4bSMarc Zyngier 
2519ed24f4bSMarc Zyngier 	if (dist->its_vm.vpes)
2529ed24f4bSMarc Zyngier 		return 0;
2539ed24f4bSMarc Zyngier 
2549ed24f4bSMarc Zyngier 	nr_vcpus = atomic_read(&kvm->online_vcpus);
2559ed24f4bSMarc Zyngier 
2569ed24f4bSMarc Zyngier 	dist->its_vm.vpes = kcalloc(nr_vcpus, sizeof(*dist->its_vm.vpes),
2573ef23167SJia He 				    GFP_KERNEL_ACCOUNT);
2589ed24f4bSMarc Zyngier 	if (!dist->its_vm.vpes)
2599ed24f4bSMarc Zyngier 		return -ENOMEM;
2609ed24f4bSMarc Zyngier 
2619ed24f4bSMarc Zyngier 	dist->its_vm.nr_vpes = nr_vcpus;
2629ed24f4bSMarc Zyngier 
2639ed24f4bSMarc Zyngier 	kvm_for_each_vcpu(i, vcpu, kvm)
2649ed24f4bSMarc Zyngier 		dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
2659ed24f4bSMarc Zyngier 
2669ed24f4bSMarc Zyngier 	ret = its_alloc_vcpu_irqs(&dist->its_vm);
2679ed24f4bSMarc Zyngier 	if (ret < 0) {
2689ed24f4bSMarc Zyngier 		kvm_err("VPE IRQ allocation failure\n");
2699ed24f4bSMarc Zyngier 		kfree(dist->its_vm.vpes);
2709ed24f4bSMarc Zyngier 		dist->its_vm.nr_vpes = 0;
2719ed24f4bSMarc Zyngier 		dist->its_vm.vpes = NULL;
2729ed24f4bSMarc Zyngier 		return ret;
2739ed24f4bSMarc Zyngier 	}
2749ed24f4bSMarc Zyngier 
2759ed24f4bSMarc Zyngier 	kvm_for_each_vcpu(i, vcpu, kvm) {
2769ed24f4bSMarc Zyngier 		int irq = dist->its_vm.vpes[i]->irq;
2779ed24f4bSMarc Zyngier 		unsigned long irq_flags = DB_IRQ_FLAGS;
2789ed24f4bSMarc Zyngier 
2799ed24f4bSMarc Zyngier 		/*
2809ed24f4bSMarc Zyngier 		 * Don't automatically enable the doorbell, as we're
2819ed24f4bSMarc Zyngier 		 * flipping it back and forth when the vcpu gets
2829ed24f4bSMarc Zyngier 		 * blocked. Also disable the lazy disabling, as the
2839ed24f4bSMarc Zyngier 		 * doorbell could kick us out of the guest too
2849ed24f4bSMarc Zyngier 		 * early...
2859ed24f4bSMarc Zyngier 		 *
2869ed24f4bSMarc Zyngier 		 * On GICv4.1, the doorbell is managed in HW and must
2879ed24f4bSMarc Zyngier 		 * be left enabled.
2889ed24f4bSMarc Zyngier 		 */
2899ed24f4bSMarc Zyngier 		if (kvm_vgic_global_state.has_gicv4_1)
2909ed24f4bSMarc Zyngier 			irq_flags &= ~IRQ_NOAUTOEN;
2919ed24f4bSMarc Zyngier 		irq_set_status_flags(irq, irq_flags);
2929ed24f4bSMarc Zyngier 
293ef369168SMarc Zyngier 		ret = vgic_v4_request_vpe_irq(vcpu, irq);
2949ed24f4bSMarc Zyngier 		if (ret) {
2959ed24f4bSMarc Zyngier 			kvm_err("failed to allocate vcpu IRQ%d\n", irq);
2969ed24f4bSMarc Zyngier 			/*
2979ed24f4bSMarc Zyngier 			 * Trick: adjust the number of vpes so we know
2989ed24f4bSMarc Zyngier 			 * how many to nuke on teardown...
2999ed24f4bSMarc Zyngier 			 */
3009ed24f4bSMarc Zyngier 			dist->its_vm.nr_vpes = i;
3019ed24f4bSMarc Zyngier 			break;
3029ed24f4bSMarc Zyngier 		}
3039ed24f4bSMarc Zyngier 	}
3049ed24f4bSMarc Zyngier 
3059ed24f4bSMarc Zyngier 	if (ret)
3069ed24f4bSMarc Zyngier 		vgic_v4_teardown(kvm);
3079ed24f4bSMarc Zyngier 
3089ed24f4bSMarc Zyngier 	return ret;
3099ed24f4bSMarc Zyngier }
3109ed24f4bSMarc Zyngier 
3119ed24f4bSMarc Zyngier /**
3129ed24f4bSMarc Zyngier  * vgic_v4_teardown - Free the GICv4 data structures
3139ed24f4bSMarc Zyngier  * @kvm:	Pointer to the VM being destroyed
3149ed24f4bSMarc Zyngier  */
vgic_v4_teardown(struct kvm * kvm)3159ed24f4bSMarc Zyngier void vgic_v4_teardown(struct kvm *kvm)
3169ed24f4bSMarc Zyngier {
3179ed24f4bSMarc Zyngier 	struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
3189ed24f4bSMarc Zyngier 	int i;
3199ed24f4bSMarc Zyngier 
320f0032773SOliver Upton 	lockdep_assert_held(&kvm->arch.config_lock);
321f0032773SOliver Upton 
3229ed24f4bSMarc Zyngier 	if (!its_vm->vpes)
3239ed24f4bSMarc Zyngier 		return;
3249ed24f4bSMarc Zyngier 
3259ed24f4bSMarc Zyngier 	for (i = 0; i < its_vm->nr_vpes; i++) {
3269ed24f4bSMarc Zyngier 		struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i);
3279ed24f4bSMarc Zyngier 		int irq = its_vm->vpes[i]->irq;
3289ed24f4bSMarc Zyngier 
3299ed24f4bSMarc Zyngier 		irq_clear_status_flags(irq, DB_IRQ_FLAGS);
3309ed24f4bSMarc Zyngier 		free_irq(irq, vcpu);
3319ed24f4bSMarc Zyngier 	}
3329ed24f4bSMarc Zyngier 
3339ed24f4bSMarc Zyngier 	its_free_vcpu_irqs(its_vm);
3349ed24f4bSMarc Zyngier 	kfree(its_vm->vpes);
3359ed24f4bSMarc Zyngier 	its_vm->nr_vpes = 0;
3369ed24f4bSMarc Zyngier 	its_vm->vpes = NULL;
3379ed24f4bSMarc Zyngier }
3389ed24f4bSMarc Zyngier 
vgic_v4_put(struct kvm_vcpu * vcpu)339*b321c31cSMarc Zyngier int vgic_v4_put(struct kvm_vcpu *vcpu)
3409ed24f4bSMarc Zyngier {
3419ed24f4bSMarc Zyngier 	struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
3429ed24f4bSMarc Zyngier 
3439ed24f4bSMarc Zyngier 	if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
3449ed24f4bSMarc Zyngier 		return 0;
3459ed24f4bSMarc Zyngier 
346*b321c31cSMarc Zyngier 	return its_make_vpe_non_resident(vpe, !!vcpu_get_flag(vcpu, IN_WFI));
3479ed24f4bSMarc Zyngier }
3489ed24f4bSMarc Zyngier 
vgic_v4_load(struct kvm_vcpu * vcpu)3499ed24f4bSMarc Zyngier int vgic_v4_load(struct kvm_vcpu *vcpu)
3509ed24f4bSMarc Zyngier {
3519ed24f4bSMarc Zyngier 	struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
3529ed24f4bSMarc Zyngier 	int err;
3539ed24f4bSMarc Zyngier 
3549ed24f4bSMarc Zyngier 	if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident)
3559ed24f4bSMarc Zyngier 		return 0;
3569ed24f4bSMarc Zyngier 
357*b321c31cSMarc Zyngier 	if (vcpu_get_flag(vcpu, IN_WFI))
358*b321c31cSMarc Zyngier 		return 0;
359*b321c31cSMarc Zyngier 
3609ed24f4bSMarc Zyngier 	/*
3619ed24f4bSMarc Zyngier 	 * Before making the VPE resident, make sure the redistributor
3629ed24f4bSMarc Zyngier 	 * corresponding to our current CPU expects us here. See the
3639ed24f4bSMarc Zyngier 	 * doc in drivers/irqchip/irq-gic-v4.c to understand how this
3649ed24f4bSMarc Zyngier 	 * turns into a VMOVP command at the ITS level.
3659ed24f4bSMarc Zyngier 	 */
3669ed24f4bSMarc Zyngier 	err = irq_set_affinity(vpe->irq, cpumask_of(smp_processor_id()));
3679ed24f4bSMarc Zyngier 	if (err)
3689ed24f4bSMarc Zyngier 		return err;
3699ed24f4bSMarc Zyngier 
3709ed24f4bSMarc Zyngier 	err = its_make_vpe_resident(vpe, false, vcpu->kvm->arch.vgic.enabled);
3719ed24f4bSMarc Zyngier 	if (err)
3729ed24f4bSMarc Zyngier 		return err;
3739ed24f4bSMarc Zyngier 
3749ed24f4bSMarc Zyngier 	/*
3759ed24f4bSMarc Zyngier 	 * Now that the VPE is resident, let's get rid of a potential
3769ed24f4bSMarc Zyngier 	 * doorbell interrupt that would still be pending. This is a
3779ed24f4bSMarc Zyngier 	 * GICv4.0 only "feature"...
3789ed24f4bSMarc Zyngier 	 */
3799ed24f4bSMarc Zyngier 	if (!kvm_vgic_global_state.has_gicv4_1)
3809ed24f4bSMarc Zyngier 		err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
3819ed24f4bSMarc Zyngier 
3829ed24f4bSMarc Zyngier 	return err;
3839ed24f4bSMarc Zyngier }
3849ed24f4bSMarc Zyngier 
vgic_v4_commit(struct kvm_vcpu * vcpu)38557e3cebdSShenming Lu void vgic_v4_commit(struct kvm_vcpu *vcpu)
38657e3cebdSShenming Lu {
38757e3cebdSShenming Lu 	struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
38857e3cebdSShenming Lu 
38957e3cebdSShenming Lu 	/*
39057e3cebdSShenming Lu 	 * No need to wait for the vPE to be ready across a shallow guest
39157e3cebdSShenming Lu 	 * exit, as only a vcpu_put will invalidate it.
39257e3cebdSShenming Lu 	 */
39357e3cebdSShenming Lu 	if (!vpe->ready)
39457e3cebdSShenming Lu 		its_commit_vpe(vpe);
39557e3cebdSShenming Lu }
39657e3cebdSShenming Lu 
vgic_get_its(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * irq_entry)3979ed24f4bSMarc Zyngier static struct vgic_its *vgic_get_its(struct kvm *kvm,
3989ed24f4bSMarc Zyngier 				     struct kvm_kernel_irq_routing_entry *irq_entry)
3999ed24f4bSMarc Zyngier {
4009ed24f4bSMarc Zyngier 	struct kvm_msi msi  = (struct kvm_msi) {
4019ed24f4bSMarc Zyngier 		.address_lo	= irq_entry->msi.address_lo,
4029ed24f4bSMarc Zyngier 		.address_hi	= irq_entry->msi.address_hi,
4039ed24f4bSMarc Zyngier 		.data		= irq_entry->msi.data,
4049ed24f4bSMarc Zyngier 		.flags		= irq_entry->msi.flags,
4059ed24f4bSMarc Zyngier 		.devid		= irq_entry->msi.devid,
4069ed24f4bSMarc Zyngier 	};
4079ed24f4bSMarc Zyngier 
4089ed24f4bSMarc Zyngier 	return vgic_msi_to_its(kvm, &msi);
4099ed24f4bSMarc Zyngier }
4109ed24f4bSMarc Zyngier 
kvm_vgic_v4_set_forwarding(struct kvm * kvm,int virq,struct kvm_kernel_irq_routing_entry * irq_entry)4119ed24f4bSMarc Zyngier int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
4129ed24f4bSMarc Zyngier 			       struct kvm_kernel_irq_routing_entry *irq_entry)
4139ed24f4bSMarc Zyngier {
4149ed24f4bSMarc Zyngier 	struct vgic_its *its;
4159ed24f4bSMarc Zyngier 	struct vgic_irq *irq;
4169ed24f4bSMarc Zyngier 	struct its_vlpi_map map;
41712df7429SZenghui Yu 	unsigned long flags;
4189ed24f4bSMarc Zyngier 	int ret;
4199ed24f4bSMarc Zyngier 
4209ed24f4bSMarc Zyngier 	if (!vgic_supports_direct_msis(kvm))
4219ed24f4bSMarc Zyngier 		return 0;
4229ed24f4bSMarc Zyngier 
4239ed24f4bSMarc Zyngier 	/*
4249ed24f4bSMarc Zyngier 	 * Get the ITS, and escape early on error (not a valid
4259ed24f4bSMarc Zyngier 	 * doorbell for any of our vITSs).
4269ed24f4bSMarc Zyngier 	 */
4279ed24f4bSMarc Zyngier 	its = vgic_get_its(kvm, irq_entry);
4289ed24f4bSMarc Zyngier 	if (IS_ERR(its))
4299ed24f4bSMarc Zyngier 		return 0;
4309ed24f4bSMarc Zyngier 
4319ed24f4bSMarc Zyngier 	mutex_lock(&its->its_lock);
4329ed24f4bSMarc Zyngier 
4339ed24f4bSMarc Zyngier 	/* Perform the actual DevID/EventID -> LPI translation. */
4349ed24f4bSMarc Zyngier 	ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
4359ed24f4bSMarc Zyngier 				   irq_entry->msi.data, &irq);
4369ed24f4bSMarc Zyngier 	if (ret)
4379ed24f4bSMarc Zyngier 		goto out;
4389ed24f4bSMarc Zyngier 
4399ed24f4bSMarc Zyngier 	/*
4409ed24f4bSMarc Zyngier 	 * Emit the mapping request. If it fails, the ITS probably
4419ed24f4bSMarc Zyngier 	 * isn't v4 compatible, so let's silently bail out. Holding
4429ed24f4bSMarc Zyngier 	 * the ITS lock should ensure that nothing can modify the
4439ed24f4bSMarc Zyngier 	 * target vcpu.
4449ed24f4bSMarc Zyngier 	 */
4459ed24f4bSMarc Zyngier 	map = (struct its_vlpi_map) {
4469ed24f4bSMarc Zyngier 		.vm		= &kvm->arch.vgic.its_vm,
4479ed24f4bSMarc Zyngier 		.vpe		= &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe,
4489ed24f4bSMarc Zyngier 		.vintid		= irq->intid,
4499ed24f4bSMarc Zyngier 		.properties	= ((irq->priority & 0xfc) |
4509ed24f4bSMarc Zyngier 				   (irq->enabled ? LPI_PROP_ENABLED : 0) |
4519ed24f4bSMarc Zyngier 				   LPI_PROP_GROUP1),
4529ed24f4bSMarc Zyngier 		.db_enabled	= true,
4539ed24f4bSMarc Zyngier 	};
4549ed24f4bSMarc Zyngier 
4559ed24f4bSMarc Zyngier 	ret = its_map_vlpi(virq, &map);
4569ed24f4bSMarc Zyngier 	if (ret)
4579ed24f4bSMarc Zyngier 		goto out;
4589ed24f4bSMarc Zyngier 
4599ed24f4bSMarc Zyngier 	irq->hw		= true;
4609ed24f4bSMarc Zyngier 	irq->host_irq	= virq;
4619ed24f4bSMarc Zyngier 	atomic_inc(&map.vpe->vlpi_count);
4629ed24f4bSMarc Zyngier 
46312df7429SZenghui Yu 	/* Transfer pending state */
46412df7429SZenghui Yu 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
46512df7429SZenghui Yu 	if (irq->pending_latch) {
46612df7429SZenghui Yu 		ret = irq_set_irqchip_state(irq->host_irq,
46712df7429SZenghui Yu 					    IRQCHIP_STATE_PENDING,
46812df7429SZenghui Yu 					    irq->pending_latch);
46912df7429SZenghui Yu 		WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
47012df7429SZenghui Yu 
47112df7429SZenghui Yu 		/*
47212df7429SZenghui Yu 		 * Clear pending_latch and communicate this state
47312df7429SZenghui Yu 		 * change via vgic_queue_irq_unlock.
47412df7429SZenghui Yu 		 */
47512df7429SZenghui Yu 		irq->pending_latch = false;
47612df7429SZenghui Yu 		vgic_queue_irq_unlock(kvm, irq, flags);
47712df7429SZenghui Yu 	} else {
47812df7429SZenghui Yu 		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
47912df7429SZenghui Yu 	}
48012df7429SZenghui Yu 
4819ed24f4bSMarc Zyngier out:
4829ed24f4bSMarc Zyngier 	mutex_unlock(&its->its_lock);
4839ed24f4bSMarc Zyngier 	return ret;
4849ed24f4bSMarc Zyngier }
4859ed24f4bSMarc Zyngier 
kvm_vgic_v4_unset_forwarding(struct kvm * kvm,int virq,struct kvm_kernel_irq_routing_entry * irq_entry)4869ed24f4bSMarc Zyngier int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
4879ed24f4bSMarc Zyngier 				 struct kvm_kernel_irq_routing_entry *irq_entry)
4889ed24f4bSMarc Zyngier {
4899ed24f4bSMarc Zyngier 	struct vgic_its *its;
4909ed24f4bSMarc Zyngier 	struct vgic_irq *irq;
4919ed24f4bSMarc Zyngier 	int ret;
4929ed24f4bSMarc Zyngier 
4939ed24f4bSMarc Zyngier 	if (!vgic_supports_direct_msis(kvm))
4949ed24f4bSMarc Zyngier 		return 0;
4959ed24f4bSMarc Zyngier 
4969ed24f4bSMarc Zyngier 	/*
4979ed24f4bSMarc Zyngier 	 * Get the ITS, and escape early on error (not a valid
4989ed24f4bSMarc Zyngier 	 * doorbell for any of our vITSs).
4999ed24f4bSMarc Zyngier 	 */
5009ed24f4bSMarc Zyngier 	its = vgic_get_its(kvm, irq_entry);
5019ed24f4bSMarc Zyngier 	if (IS_ERR(its))
5029ed24f4bSMarc Zyngier 		return 0;
5039ed24f4bSMarc Zyngier 
5049ed24f4bSMarc Zyngier 	mutex_lock(&its->its_lock);
5059ed24f4bSMarc Zyngier 
5069ed24f4bSMarc Zyngier 	ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
5079ed24f4bSMarc Zyngier 				   irq_entry->msi.data, &irq);
5089ed24f4bSMarc Zyngier 	if (ret)
5099ed24f4bSMarc Zyngier 		goto out;
5109ed24f4bSMarc Zyngier 
5119ed24f4bSMarc Zyngier 	WARN_ON(!(irq->hw && irq->host_irq == virq));
5129ed24f4bSMarc Zyngier 	if (irq->hw) {
5139ed24f4bSMarc Zyngier 		atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
5149ed24f4bSMarc Zyngier 		irq->hw = false;
5159ed24f4bSMarc Zyngier 		ret = its_unmap_vlpi(virq);
5169ed24f4bSMarc Zyngier 	}
5179ed24f4bSMarc Zyngier 
5189ed24f4bSMarc Zyngier out:
5199ed24f4bSMarc Zyngier 	mutex_unlock(&its->its_lock);
5209ed24f4bSMarc Zyngier 	return ret;
5219ed24f4bSMarc Zyngier }
522