xref: /openbmc/linux/arch/arm64/kvm/vgic/vgic-v3.c (revision 9739f6ef)
19ed24f4bSMarc Zyngier // SPDX-License-Identifier: GPL-2.0-only
29ed24f4bSMarc Zyngier 
39ed24f4bSMarc Zyngier #include <linux/irqchip/arm-gic-v3.h>
49ed24f4bSMarc Zyngier #include <linux/kvm.h>
59ed24f4bSMarc Zyngier #include <linux/kvm_host.h>
69ed24f4bSMarc Zyngier #include <kvm/arm_vgic.h>
79ed24f4bSMarc Zyngier #include <asm/kvm_hyp.h>
89ed24f4bSMarc Zyngier #include <asm/kvm_mmu.h>
99ed24f4bSMarc Zyngier #include <asm/kvm_asm.h>
109ed24f4bSMarc Zyngier 
119ed24f4bSMarc Zyngier #include "vgic.h"
129ed24f4bSMarc Zyngier 
139ed24f4bSMarc Zyngier static bool group0_trap;
149ed24f4bSMarc Zyngier static bool group1_trap;
159ed24f4bSMarc Zyngier static bool common_trap;
169ed24f4bSMarc Zyngier static bool gicv4_enable;
179ed24f4bSMarc Zyngier 
189ed24f4bSMarc Zyngier void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
199ed24f4bSMarc Zyngier {
209ed24f4bSMarc Zyngier 	struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
219ed24f4bSMarc Zyngier 
229ed24f4bSMarc Zyngier 	cpuif->vgic_hcr |= ICH_HCR_UIE;
239ed24f4bSMarc Zyngier }
249ed24f4bSMarc Zyngier 
259ed24f4bSMarc Zyngier static bool lr_signals_eoi_mi(u64 lr_val)
269ed24f4bSMarc Zyngier {
279ed24f4bSMarc Zyngier 	return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) &&
289ed24f4bSMarc Zyngier 	       !(lr_val & ICH_LR_HW);
299ed24f4bSMarc Zyngier }
309ed24f4bSMarc Zyngier 
319ed24f4bSMarc Zyngier void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
329ed24f4bSMarc Zyngier {
339ed24f4bSMarc Zyngier 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
349ed24f4bSMarc Zyngier 	struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
359ed24f4bSMarc Zyngier 	u32 model = vcpu->kvm->arch.vgic.vgic_model;
369ed24f4bSMarc Zyngier 	int lr;
379ed24f4bSMarc Zyngier 
389ed24f4bSMarc Zyngier 	DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
399ed24f4bSMarc Zyngier 
409ed24f4bSMarc Zyngier 	cpuif->vgic_hcr &= ~ICH_HCR_UIE;
419ed24f4bSMarc Zyngier 
42fc5d1f1aSChristoffer Dall 	for (lr = 0; lr < cpuif->used_lrs; lr++) {
439ed24f4bSMarc Zyngier 		u64 val = cpuif->vgic_lr[lr];
449ed24f4bSMarc Zyngier 		u32 intid, cpuid;
459ed24f4bSMarc Zyngier 		struct vgic_irq *irq;
469ed24f4bSMarc Zyngier 		bool is_v2_sgi = false;
479ed24f4bSMarc Zyngier 
489ed24f4bSMarc Zyngier 		cpuid = val & GICH_LR_PHYSID_CPUID;
499ed24f4bSMarc Zyngier 		cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
509ed24f4bSMarc Zyngier 
519ed24f4bSMarc Zyngier 		if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
529ed24f4bSMarc Zyngier 			intid = val & ICH_LR_VIRTUAL_ID_MASK;
539ed24f4bSMarc Zyngier 		} else {
549ed24f4bSMarc Zyngier 			intid = val & GICH_LR_VIRTUALID;
559ed24f4bSMarc Zyngier 			is_v2_sgi = vgic_irq_is_sgi(intid);
569ed24f4bSMarc Zyngier 		}
579ed24f4bSMarc Zyngier 
589ed24f4bSMarc Zyngier 		/* Notify fds when the guest EOI'ed a level-triggered IRQ */
599ed24f4bSMarc Zyngier 		if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
609ed24f4bSMarc Zyngier 			kvm_notify_acked_irq(vcpu->kvm, 0,
619ed24f4bSMarc Zyngier 					     intid - VGIC_NR_PRIVATE_IRQS);
629ed24f4bSMarc Zyngier 
639ed24f4bSMarc Zyngier 		irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
649ed24f4bSMarc Zyngier 		if (!irq)	/* An LPI could have been unmapped. */
659ed24f4bSMarc Zyngier 			continue;
669ed24f4bSMarc Zyngier 
679ed24f4bSMarc Zyngier 		raw_spin_lock(&irq->irq_lock);
689ed24f4bSMarc Zyngier 
699ed24f4bSMarc Zyngier 		/* Always preserve the active bit */
709ed24f4bSMarc Zyngier 		irq->active = !!(val & ICH_LR_ACTIVE_BIT);
719ed24f4bSMarc Zyngier 
729ed24f4bSMarc Zyngier 		if (irq->active && is_v2_sgi)
739ed24f4bSMarc Zyngier 			irq->active_source = cpuid;
749ed24f4bSMarc Zyngier 
759ed24f4bSMarc Zyngier 		/* Edge is the only case where we preserve the pending bit */
769ed24f4bSMarc Zyngier 		if (irq->config == VGIC_CONFIG_EDGE &&
779ed24f4bSMarc Zyngier 		    (val & ICH_LR_PENDING_BIT)) {
789ed24f4bSMarc Zyngier 			irq->pending_latch = true;
799ed24f4bSMarc Zyngier 
809ed24f4bSMarc Zyngier 			if (is_v2_sgi)
819ed24f4bSMarc Zyngier 				irq->source |= (1 << cpuid);
829ed24f4bSMarc Zyngier 		}
839ed24f4bSMarc Zyngier 
849ed24f4bSMarc Zyngier 		/*
859ed24f4bSMarc Zyngier 		 * Clear soft pending state when level irqs have been acked.
869ed24f4bSMarc Zyngier 		 */
879ed24f4bSMarc Zyngier 		if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
889ed24f4bSMarc Zyngier 			irq->pending_latch = false;
899ed24f4bSMarc Zyngier 
909ed24f4bSMarc Zyngier 		/*
919ed24f4bSMarc Zyngier 		 * Level-triggered mapped IRQs are special because we only
929ed24f4bSMarc Zyngier 		 * observe rising edges as input to the VGIC.
939ed24f4bSMarc Zyngier 		 *
949ed24f4bSMarc Zyngier 		 * If the guest never acked the interrupt we have to sample
959ed24f4bSMarc Zyngier 		 * the physical line and set the line level, because the
969ed24f4bSMarc Zyngier 		 * device state could have changed or we simply need to
979ed24f4bSMarc Zyngier 		 * process the still pending interrupt later.
989ed24f4bSMarc Zyngier 		 *
999ed24f4bSMarc Zyngier 		 * If this causes us to lower the level, we have to also clear
1009ed24f4bSMarc Zyngier 		 * the physical active state, since we will otherwise never be
1019ed24f4bSMarc Zyngier 		 * told when the interrupt becomes asserted again.
1029ed24f4bSMarc Zyngier 		 */
1039ed24f4bSMarc Zyngier 		if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT)) {
1049ed24f4bSMarc Zyngier 			irq->line_level = vgic_get_phys_line_level(irq);
1059ed24f4bSMarc Zyngier 
1069ed24f4bSMarc Zyngier 			if (!irq->line_level)
1079ed24f4bSMarc Zyngier 				vgic_irq_set_phys_active(irq, false);
1089ed24f4bSMarc Zyngier 		}
1099ed24f4bSMarc Zyngier 
1109ed24f4bSMarc Zyngier 		raw_spin_unlock(&irq->irq_lock);
1119ed24f4bSMarc Zyngier 		vgic_put_irq(vcpu->kvm, irq);
1129ed24f4bSMarc Zyngier 	}
1139ed24f4bSMarc Zyngier 
114fc5d1f1aSChristoffer Dall 	cpuif->used_lrs = 0;
1159ed24f4bSMarc Zyngier }
1169ed24f4bSMarc Zyngier 
1179ed24f4bSMarc Zyngier /* Requires the irq to be locked already */
1189ed24f4bSMarc Zyngier void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
1199ed24f4bSMarc Zyngier {
1209ed24f4bSMarc Zyngier 	u32 model = vcpu->kvm->arch.vgic.vgic_model;
1219ed24f4bSMarc Zyngier 	u64 val = irq->intid;
1229ed24f4bSMarc Zyngier 	bool allow_pending = true, is_v2_sgi;
1239ed24f4bSMarc Zyngier 
1249ed24f4bSMarc Zyngier 	is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
1259ed24f4bSMarc Zyngier 		     model == KVM_DEV_TYPE_ARM_VGIC_V2);
1269ed24f4bSMarc Zyngier 
1279ed24f4bSMarc Zyngier 	if (irq->active) {
1289ed24f4bSMarc Zyngier 		val |= ICH_LR_ACTIVE_BIT;
1299ed24f4bSMarc Zyngier 		if (is_v2_sgi)
1309ed24f4bSMarc Zyngier 			val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
1319ed24f4bSMarc Zyngier 		if (vgic_irq_is_multi_sgi(irq)) {
1329ed24f4bSMarc Zyngier 			allow_pending = false;
1339ed24f4bSMarc Zyngier 			val |= ICH_LR_EOI;
1349ed24f4bSMarc Zyngier 		}
1359ed24f4bSMarc Zyngier 	}
1369ed24f4bSMarc Zyngier 
1379ed24f4bSMarc Zyngier 	if (irq->hw) {
1389ed24f4bSMarc Zyngier 		val |= ICH_LR_HW;
1399ed24f4bSMarc Zyngier 		val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
1409ed24f4bSMarc Zyngier 		/*
1419ed24f4bSMarc Zyngier 		 * Never set pending+active on a HW interrupt, as the
1429ed24f4bSMarc Zyngier 		 * pending state is kept at the physical distributor
1439ed24f4bSMarc Zyngier 		 * level.
1449ed24f4bSMarc Zyngier 		 */
1459ed24f4bSMarc Zyngier 		if (irq->active)
1469ed24f4bSMarc Zyngier 			allow_pending = false;
1479ed24f4bSMarc Zyngier 	} else {
1489ed24f4bSMarc Zyngier 		if (irq->config == VGIC_CONFIG_LEVEL) {
1499ed24f4bSMarc Zyngier 			val |= ICH_LR_EOI;
1509ed24f4bSMarc Zyngier 
1519ed24f4bSMarc Zyngier 			/*
1529ed24f4bSMarc Zyngier 			 * Software resampling doesn't work very well
1539ed24f4bSMarc Zyngier 			 * if we allow P+A, so let's not do that.
1549ed24f4bSMarc Zyngier 			 */
1559ed24f4bSMarc Zyngier 			if (irq->active)
1569ed24f4bSMarc Zyngier 				allow_pending = false;
1579ed24f4bSMarc Zyngier 		}
1589ed24f4bSMarc Zyngier 	}
1599ed24f4bSMarc Zyngier 
1609ed24f4bSMarc Zyngier 	if (allow_pending && irq_is_pending(irq)) {
1619ed24f4bSMarc Zyngier 		val |= ICH_LR_PENDING_BIT;
1629ed24f4bSMarc Zyngier 
1639ed24f4bSMarc Zyngier 		if (irq->config == VGIC_CONFIG_EDGE)
1649ed24f4bSMarc Zyngier 			irq->pending_latch = false;
1659ed24f4bSMarc Zyngier 
1669ed24f4bSMarc Zyngier 		if (vgic_irq_is_sgi(irq->intid) &&
1679ed24f4bSMarc Zyngier 		    model == KVM_DEV_TYPE_ARM_VGIC_V2) {
1689ed24f4bSMarc Zyngier 			u32 src = ffs(irq->source);
1699ed24f4bSMarc Zyngier 
1709ed24f4bSMarc Zyngier 			if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
1719ed24f4bSMarc Zyngier 					   irq->intid))
1729ed24f4bSMarc Zyngier 				return;
1739ed24f4bSMarc Zyngier 
1749ed24f4bSMarc Zyngier 			val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
1759ed24f4bSMarc Zyngier 			irq->source &= ~(1 << (src - 1));
1769ed24f4bSMarc Zyngier 			if (irq->source) {
1779ed24f4bSMarc Zyngier 				irq->pending_latch = true;
1789ed24f4bSMarc Zyngier 				val |= ICH_LR_EOI;
1799ed24f4bSMarc Zyngier 			}
1809ed24f4bSMarc Zyngier 		}
1819ed24f4bSMarc Zyngier 	}
1829ed24f4bSMarc Zyngier 
1839ed24f4bSMarc Zyngier 	/*
1849ed24f4bSMarc Zyngier 	 * Level-triggered mapped IRQs are special because we only observe
1859ed24f4bSMarc Zyngier 	 * rising edges as input to the VGIC.  We therefore lower the line
1869ed24f4bSMarc Zyngier 	 * level here, so that we can take new virtual IRQs.  See
1879ed24f4bSMarc Zyngier 	 * vgic_v3_fold_lr_state for more info.
1889ed24f4bSMarc Zyngier 	 */
1899ed24f4bSMarc Zyngier 	if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT))
1909ed24f4bSMarc Zyngier 		irq->line_level = false;
1919ed24f4bSMarc Zyngier 
1929ed24f4bSMarc Zyngier 	if (irq->group)
1939ed24f4bSMarc Zyngier 		val |= ICH_LR_GROUP;
1949ed24f4bSMarc Zyngier 
1959ed24f4bSMarc Zyngier 	val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
1969ed24f4bSMarc Zyngier 
1979ed24f4bSMarc Zyngier 	vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
1989ed24f4bSMarc Zyngier }
1999ed24f4bSMarc Zyngier 
2009ed24f4bSMarc Zyngier void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
2019ed24f4bSMarc Zyngier {
2029ed24f4bSMarc Zyngier 	vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
2039ed24f4bSMarc Zyngier }
2049ed24f4bSMarc Zyngier 
2059ed24f4bSMarc Zyngier void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
2069ed24f4bSMarc Zyngier {
2079ed24f4bSMarc Zyngier 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
2089ed24f4bSMarc Zyngier 	u32 model = vcpu->kvm->arch.vgic.vgic_model;
2099ed24f4bSMarc Zyngier 	u32 vmcr;
2109ed24f4bSMarc Zyngier 
2119ed24f4bSMarc Zyngier 	if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
2129ed24f4bSMarc Zyngier 		vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
2139ed24f4bSMarc Zyngier 			ICH_VMCR_ACK_CTL_MASK;
2149ed24f4bSMarc Zyngier 		vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
2159ed24f4bSMarc Zyngier 			ICH_VMCR_FIQ_EN_MASK;
2169ed24f4bSMarc Zyngier 	} else {
2179ed24f4bSMarc Zyngier 		/*
2189ed24f4bSMarc Zyngier 		 * When emulating GICv3 on GICv3 with SRE=1 on the
2199ed24f4bSMarc Zyngier 		 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
2209ed24f4bSMarc Zyngier 		 */
2219ed24f4bSMarc Zyngier 		vmcr = ICH_VMCR_FIQ_EN_MASK;
2229ed24f4bSMarc Zyngier 	}
2239ed24f4bSMarc Zyngier 
2249ed24f4bSMarc Zyngier 	vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
2259ed24f4bSMarc Zyngier 	vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
2269ed24f4bSMarc Zyngier 	vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
2279ed24f4bSMarc Zyngier 	vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
2289ed24f4bSMarc Zyngier 	vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
2299ed24f4bSMarc Zyngier 	vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK;
2309ed24f4bSMarc Zyngier 	vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK;
2319ed24f4bSMarc Zyngier 
2329ed24f4bSMarc Zyngier 	cpu_if->vgic_vmcr = vmcr;
2339ed24f4bSMarc Zyngier }
2349ed24f4bSMarc Zyngier 
2359ed24f4bSMarc Zyngier void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
2369ed24f4bSMarc Zyngier {
2379ed24f4bSMarc Zyngier 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
2389ed24f4bSMarc Zyngier 	u32 model = vcpu->kvm->arch.vgic.vgic_model;
2399ed24f4bSMarc Zyngier 	u32 vmcr;
2409ed24f4bSMarc Zyngier 
2419ed24f4bSMarc Zyngier 	vmcr = cpu_if->vgic_vmcr;
2429ed24f4bSMarc Zyngier 
2439ed24f4bSMarc Zyngier 	if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
2449ed24f4bSMarc Zyngier 		vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
2459ed24f4bSMarc Zyngier 			ICH_VMCR_ACK_CTL_SHIFT;
2469ed24f4bSMarc Zyngier 		vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
2479ed24f4bSMarc Zyngier 			ICH_VMCR_FIQ_EN_SHIFT;
2489ed24f4bSMarc Zyngier 	} else {
2499ed24f4bSMarc Zyngier 		/*
2509ed24f4bSMarc Zyngier 		 * When emulating GICv3 on GICv3 with SRE=1 on the
2519ed24f4bSMarc Zyngier 		 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
2529ed24f4bSMarc Zyngier 		 */
2539ed24f4bSMarc Zyngier 		vmcrp->fiqen = 1;
2549ed24f4bSMarc Zyngier 		vmcrp->ackctl = 0;
2559ed24f4bSMarc Zyngier 	}
2569ed24f4bSMarc Zyngier 
2579ed24f4bSMarc Zyngier 	vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
2589ed24f4bSMarc Zyngier 	vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
2599ed24f4bSMarc Zyngier 	vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
2609ed24f4bSMarc Zyngier 	vmcrp->bpr  = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
2619ed24f4bSMarc Zyngier 	vmcrp->pmr  = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
2629ed24f4bSMarc Zyngier 	vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT;
2639ed24f4bSMarc Zyngier 	vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT;
2649ed24f4bSMarc Zyngier }
2659ed24f4bSMarc Zyngier 
2669ed24f4bSMarc Zyngier #define INITIAL_PENDBASER_VALUE						  \
2679ed24f4bSMarc Zyngier 	(GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb)		| \
2689ed24f4bSMarc Zyngier 	GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner)	| \
2699ed24f4bSMarc Zyngier 	GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
2709ed24f4bSMarc Zyngier 
2719ed24f4bSMarc Zyngier void vgic_v3_enable(struct kvm_vcpu *vcpu)
2729ed24f4bSMarc Zyngier {
2739ed24f4bSMarc Zyngier 	struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
2749ed24f4bSMarc Zyngier 
2759ed24f4bSMarc Zyngier 	/*
2769ed24f4bSMarc Zyngier 	 * By forcing VMCR to zero, the GIC will restore the binary
2779ed24f4bSMarc Zyngier 	 * points to their reset values. Anything else resets to zero
2789ed24f4bSMarc Zyngier 	 * anyway.
2799ed24f4bSMarc Zyngier 	 */
2809ed24f4bSMarc Zyngier 	vgic_v3->vgic_vmcr = 0;
2819ed24f4bSMarc Zyngier 
2829ed24f4bSMarc Zyngier 	/*
2839ed24f4bSMarc Zyngier 	 * If we are emulating a GICv3, we do it in an non-GICv2-compatible
2849ed24f4bSMarc Zyngier 	 * way, so we force SRE to 1 to demonstrate this to the guest.
2859ed24f4bSMarc Zyngier 	 * Also, we don't support any form of IRQ/FIQ bypass.
2869ed24f4bSMarc Zyngier 	 * This goes with the spec allowing the value to be RAO/WI.
2879ed24f4bSMarc Zyngier 	 */
2889ed24f4bSMarc Zyngier 	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
2899ed24f4bSMarc Zyngier 		vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
2909ed24f4bSMarc Zyngier 				     ICC_SRE_EL1_DFB |
2919ed24f4bSMarc Zyngier 				     ICC_SRE_EL1_SRE);
2929ed24f4bSMarc Zyngier 		vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
2939ed24f4bSMarc Zyngier 	} else {
2949ed24f4bSMarc Zyngier 		vgic_v3->vgic_sre = 0;
2959ed24f4bSMarc Zyngier 	}
2969ed24f4bSMarc Zyngier 
2979ed24f4bSMarc Zyngier 	vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 &
2989ed24f4bSMarc Zyngier 					   ICH_VTR_ID_BITS_MASK) >>
2999ed24f4bSMarc Zyngier 					   ICH_VTR_ID_BITS_SHIFT;
3009ed24f4bSMarc Zyngier 	vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 &
3019ed24f4bSMarc Zyngier 					    ICH_VTR_PRI_BITS_MASK) >>
3029ed24f4bSMarc Zyngier 					    ICH_VTR_PRI_BITS_SHIFT) + 1;
3039ed24f4bSMarc Zyngier 
3049ed24f4bSMarc Zyngier 	/* Get the show on the road... */
3059ed24f4bSMarc Zyngier 	vgic_v3->vgic_hcr = ICH_HCR_EN;
3069ed24f4bSMarc Zyngier 	if (group0_trap)
3079ed24f4bSMarc Zyngier 		vgic_v3->vgic_hcr |= ICH_HCR_TALL0;
3089ed24f4bSMarc Zyngier 	if (group1_trap)
3099ed24f4bSMarc Zyngier 		vgic_v3->vgic_hcr |= ICH_HCR_TALL1;
3109ed24f4bSMarc Zyngier 	if (common_trap)
3119ed24f4bSMarc Zyngier 		vgic_v3->vgic_hcr |= ICH_HCR_TC;
3129ed24f4bSMarc Zyngier }
3139ed24f4bSMarc Zyngier 
3149ed24f4bSMarc Zyngier int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
3159ed24f4bSMarc Zyngier {
3169ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu;
3179ed24f4bSMarc Zyngier 	int byte_offset, bit_nr;
3189ed24f4bSMarc Zyngier 	gpa_t pendbase, ptr;
3199ed24f4bSMarc Zyngier 	bool status;
3209ed24f4bSMarc Zyngier 	u8 val;
3219ed24f4bSMarc Zyngier 	int ret;
3229ed24f4bSMarc Zyngier 	unsigned long flags;
3239ed24f4bSMarc Zyngier 
3249ed24f4bSMarc Zyngier retry:
3259ed24f4bSMarc Zyngier 	vcpu = irq->target_vcpu;
3269ed24f4bSMarc Zyngier 	if (!vcpu)
3279ed24f4bSMarc Zyngier 		return 0;
3289ed24f4bSMarc Zyngier 
3299ed24f4bSMarc Zyngier 	pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
3309ed24f4bSMarc Zyngier 
3319ed24f4bSMarc Zyngier 	byte_offset = irq->intid / BITS_PER_BYTE;
3329ed24f4bSMarc Zyngier 	bit_nr = irq->intid % BITS_PER_BYTE;
3339ed24f4bSMarc Zyngier 	ptr = pendbase + byte_offset;
3349ed24f4bSMarc Zyngier 
3359ed24f4bSMarc Zyngier 	ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
3369ed24f4bSMarc Zyngier 	if (ret)
3379ed24f4bSMarc Zyngier 		return ret;
3389ed24f4bSMarc Zyngier 
3399ed24f4bSMarc Zyngier 	status = val & (1 << bit_nr);
3409ed24f4bSMarc Zyngier 
3419ed24f4bSMarc Zyngier 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
3429ed24f4bSMarc Zyngier 	if (irq->target_vcpu != vcpu) {
3439ed24f4bSMarc Zyngier 		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
3449ed24f4bSMarc Zyngier 		goto retry;
3459ed24f4bSMarc Zyngier 	}
3469ed24f4bSMarc Zyngier 	irq->pending_latch = status;
3479ed24f4bSMarc Zyngier 	vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
3489ed24f4bSMarc Zyngier 
3499ed24f4bSMarc Zyngier 	if (status) {
3509ed24f4bSMarc Zyngier 		/* clear consumed data */
3519ed24f4bSMarc Zyngier 		val &= ~(1 << bit_nr);
3529ed24f4bSMarc Zyngier 		ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
3539ed24f4bSMarc Zyngier 		if (ret)
3549ed24f4bSMarc Zyngier 			return ret;
3559ed24f4bSMarc Zyngier 	}
3569ed24f4bSMarc Zyngier 	return 0;
3579ed24f4bSMarc Zyngier }
3589ed24f4bSMarc Zyngier 
3599ed24f4bSMarc Zyngier /**
3609ed24f4bSMarc Zyngier  * vgic_v3_save_pending_tables - Save the pending tables into guest RAM
3619ed24f4bSMarc Zyngier  * kvm lock and all vcpu lock must be held
3629ed24f4bSMarc Zyngier  */
3639ed24f4bSMarc Zyngier int vgic_v3_save_pending_tables(struct kvm *kvm)
3649ed24f4bSMarc Zyngier {
3659ed24f4bSMarc Zyngier 	struct vgic_dist *dist = &kvm->arch.vgic;
3669ed24f4bSMarc Zyngier 	struct vgic_irq *irq;
3679ed24f4bSMarc Zyngier 	gpa_t last_ptr = ~(gpa_t)0;
3689ed24f4bSMarc Zyngier 	int ret;
3699ed24f4bSMarc Zyngier 	u8 val;
3709ed24f4bSMarc Zyngier 
3719ed24f4bSMarc Zyngier 	list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
3729ed24f4bSMarc Zyngier 		int byte_offset, bit_nr;
3739ed24f4bSMarc Zyngier 		struct kvm_vcpu *vcpu;
3749ed24f4bSMarc Zyngier 		gpa_t pendbase, ptr;
3759ed24f4bSMarc Zyngier 		bool stored;
3769ed24f4bSMarc Zyngier 
3779ed24f4bSMarc Zyngier 		vcpu = irq->target_vcpu;
3789ed24f4bSMarc Zyngier 		if (!vcpu)
3799ed24f4bSMarc Zyngier 			continue;
3809ed24f4bSMarc Zyngier 
3819ed24f4bSMarc Zyngier 		pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
3829ed24f4bSMarc Zyngier 
3839ed24f4bSMarc Zyngier 		byte_offset = irq->intid / BITS_PER_BYTE;
3849ed24f4bSMarc Zyngier 		bit_nr = irq->intid % BITS_PER_BYTE;
3859ed24f4bSMarc Zyngier 		ptr = pendbase + byte_offset;
3869ed24f4bSMarc Zyngier 
3879ed24f4bSMarc Zyngier 		if (ptr != last_ptr) {
3889ed24f4bSMarc Zyngier 			ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
3899ed24f4bSMarc Zyngier 			if (ret)
3909ed24f4bSMarc Zyngier 				return ret;
3919ed24f4bSMarc Zyngier 			last_ptr = ptr;
3929ed24f4bSMarc Zyngier 		}
3939ed24f4bSMarc Zyngier 
3949ed24f4bSMarc Zyngier 		stored = val & (1U << bit_nr);
3959ed24f4bSMarc Zyngier 		if (stored == irq->pending_latch)
3969ed24f4bSMarc Zyngier 			continue;
3979ed24f4bSMarc Zyngier 
3989ed24f4bSMarc Zyngier 		if (irq->pending_latch)
3999ed24f4bSMarc Zyngier 			val |= 1 << bit_nr;
4009ed24f4bSMarc Zyngier 		else
4019ed24f4bSMarc Zyngier 			val &= ~(1 << bit_nr);
4029ed24f4bSMarc Zyngier 
4039ed24f4bSMarc Zyngier 		ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
4049ed24f4bSMarc Zyngier 		if (ret)
4059ed24f4bSMarc Zyngier 			return ret;
4069ed24f4bSMarc Zyngier 	}
4079ed24f4bSMarc Zyngier 	return 0;
4089ed24f4bSMarc Zyngier }
4099ed24f4bSMarc Zyngier 
4109ed24f4bSMarc Zyngier /**
4119ed24f4bSMarc Zyngier  * vgic_v3_rdist_overlap - check if a region overlaps with any
4129ed24f4bSMarc Zyngier  * existing redistributor region
4139ed24f4bSMarc Zyngier  *
4149ed24f4bSMarc Zyngier  * @kvm: kvm handle
4159ed24f4bSMarc Zyngier  * @base: base of the region
4169ed24f4bSMarc Zyngier  * @size: size of region
4179ed24f4bSMarc Zyngier  *
4189ed24f4bSMarc Zyngier  * Return: true if there is an overlap
4199ed24f4bSMarc Zyngier  */
4209ed24f4bSMarc Zyngier bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size)
4219ed24f4bSMarc Zyngier {
4229ed24f4bSMarc Zyngier 	struct vgic_dist *d = &kvm->arch.vgic;
4239ed24f4bSMarc Zyngier 	struct vgic_redist_region *rdreg;
4249ed24f4bSMarc Zyngier 
4259ed24f4bSMarc Zyngier 	list_for_each_entry(rdreg, &d->rd_regions, list) {
4269ed24f4bSMarc Zyngier 		if ((base + size > rdreg->base) &&
4279ed24f4bSMarc Zyngier 			(base < rdreg->base + vgic_v3_rd_region_size(kvm, rdreg)))
4289ed24f4bSMarc Zyngier 			return true;
4299ed24f4bSMarc Zyngier 	}
4309ed24f4bSMarc Zyngier 	return false;
4319ed24f4bSMarc Zyngier }
4329ed24f4bSMarc Zyngier 
4339ed24f4bSMarc Zyngier /*
4349ed24f4bSMarc Zyngier  * Check for overlapping regions and for regions crossing the end of memory
4359ed24f4bSMarc Zyngier  * for base addresses which have already been set.
4369ed24f4bSMarc Zyngier  */
4379ed24f4bSMarc Zyngier bool vgic_v3_check_base(struct kvm *kvm)
4389ed24f4bSMarc Zyngier {
4399ed24f4bSMarc Zyngier 	struct vgic_dist *d = &kvm->arch.vgic;
4409ed24f4bSMarc Zyngier 	struct vgic_redist_region *rdreg;
4419ed24f4bSMarc Zyngier 
4429ed24f4bSMarc Zyngier 	if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
4439ed24f4bSMarc Zyngier 	    d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
4449ed24f4bSMarc Zyngier 		return false;
4459ed24f4bSMarc Zyngier 
4469ed24f4bSMarc Zyngier 	list_for_each_entry(rdreg, &d->rd_regions, list) {
4479ed24f4bSMarc Zyngier 		if (rdreg->base + vgic_v3_rd_region_size(kvm, rdreg) <
4489ed24f4bSMarc Zyngier 			rdreg->base)
4499ed24f4bSMarc Zyngier 			return false;
4509ed24f4bSMarc Zyngier 	}
4519ed24f4bSMarc Zyngier 
4529ed24f4bSMarc Zyngier 	if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base))
4539ed24f4bSMarc Zyngier 		return true;
4549ed24f4bSMarc Zyngier 
4559ed24f4bSMarc Zyngier 	return !vgic_v3_rdist_overlap(kvm, d->vgic_dist_base,
4569ed24f4bSMarc Zyngier 				      KVM_VGIC_V3_DIST_SIZE);
4579ed24f4bSMarc Zyngier }
4589ed24f4bSMarc Zyngier 
4599ed24f4bSMarc Zyngier /**
4609ed24f4bSMarc Zyngier  * vgic_v3_rdist_free_slot - Look up registered rdist regions and identify one
4619ed24f4bSMarc Zyngier  * which has free space to put a new rdist region.
4629ed24f4bSMarc Zyngier  *
4639ed24f4bSMarc Zyngier  * @rd_regions: redistributor region list head
4649ed24f4bSMarc Zyngier  *
4659ed24f4bSMarc Zyngier  * A redistributor regions maps n redistributors, n = region size / (2 x 64kB).
4669ed24f4bSMarc Zyngier  * Stride between redistributors is 0 and regions are filled in the index order.
4679ed24f4bSMarc Zyngier  *
4689ed24f4bSMarc Zyngier  * Return: the redist region handle, if any, that has space to map a new rdist
4699ed24f4bSMarc Zyngier  * region.
4709ed24f4bSMarc Zyngier  */
4719ed24f4bSMarc Zyngier struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rd_regions)
4729ed24f4bSMarc Zyngier {
4739ed24f4bSMarc Zyngier 	struct vgic_redist_region *rdreg;
4749ed24f4bSMarc Zyngier 
4759ed24f4bSMarc Zyngier 	list_for_each_entry(rdreg, rd_regions, list) {
4769ed24f4bSMarc Zyngier 		if (!vgic_v3_redist_region_full(rdreg))
4779ed24f4bSMarc Zyngier 			return rdreg;
4789ed24f4bSMarc Zyngier 	}
4799ed24f4bSMarc Zyngier 	return NULL;
4809ed24f4bSMarc Zyngier }
4819ed24f4bSMarc Zyngier 
4829ed24f4bSMarc Zyngier struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
4839ed24f4bSMarc Zyngier 							   u32 index)
4849ed24f4bSMarc Zyngier {
4859ed24f4bSMarc Zyngier 	struct list_head *rd_regions = &kvm->arch.vgic.rd_regions;
4869ed24f4bSMarc Zyngier 	struct vgic_redist_region *rdreg;
4879ed24f4bSMarc Zyngier 
4889ed24f4bSMarc Zyngier 	list_for_each_entry(rdreg, rd_regions, list) {
4899ed24f4bSMarc Zyngier 		if (rdreg->index == index)
4909ed24f4bSMarc Zyngier 			return rdreg;
4919ed24f4bSMarc Zyngier 	}
4929ed24f4bSMarc Zyngier 	return NULL;
4939ed24f4bSMarc Zyngier }
4949ed24f4bSMarc Zyngier 
4959ed24f4bSMarc Zyngier 
4969ed24f4bSMarc Zyngier int vgic_v3_map_resources(struct kvm *kvm)
4979ed24f4bSMarc Zyngier {
4989ed24f4bSMarc Zyngier 	struct vgic_dist *dist = &kvm->arch.vgic;
4999ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu;
5009ed24f4bSMarc Zyngier 	int ret = 0;
5019ed24f4bSMarc Zyngier 	int c;
5029ed24f4bSMarc Zyngier 
5039ed24f4bSMarc Zyngier 	kvm_for_each_vcpu(c, vcpu, kvm) {
5049ed24f4bSMarc Zyngier 		struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
5059ed24f4bSMarc Zyngier 
5069ed24f4bSMarc Zyngier 		if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
5079ed24f4bSMarc Zyngier 			kvm_debug("vcpu %d redistributor base not set\n", c);
508101068b5SMarc Zyngier 			return -ENXIO;
5099ed24f4bSMarc Zyngier 		}
5109ed24f4bSMarc Zyngier 	}
5119ed24f4bSMarc Zyngier 
5129ed24f4bSMarc Zyngier 	if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
5139ed24f4bSMarc Zyngier 		kvm_err("Need to set vgic distributor addresses first\n");
514101068b5SMarc Zyngier 		return -ENXIO;
5159ed24f4bSMarc Zyngier 	}
5169ed24f4bSMarc Zyngier 
5179ed24f4bSMarc Zyngier 	if (!vgic_v3_check_base(kvm)) {
5189ed24f4bSMarc Zyngier 		kvm_err("VGIC redist and dist frames overlap\n");
519101068b5SMarc Zyngier 		return -EINVAL;
5209ed24f4bSMarc Zyngier 	}
5219ed24f4bSMarc Zyngier 
5229ed24f4bSMarc Zyngier 	/*
5239ed24f4bSMarc Zyngier 	 * For a VGICv3 we require the userland to explicitly initialize
5249ed24f4bSMarc Zyngier 	 * the VGIC before we need to use it.
5259ed24f4bSMarc Zyngier 	 */
5269ed24f4bSMarc Zyngier 	if (!vgic_initialized(kvm)) {
527101068b5SMarc Zyngier 		return -EBUSY;
5289ed24f4bSMarc Zyngier 	}
5299ed24f4bSMarc Zyngier 
5309ed24f4bSMarc Zyngier 	ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
5319ed24f4bSMarc Zyngier 	if (ret) {
5329ed24f4bSMarc Zyngier 		kvm_err("Unable to register VGICv3 dist MMIO regions\n");
533101068b5SMarc Zyngier 		return ret;
5349ed24f4bSMarc Zyngier 	}
5359ed24f4bSMarc Zyngier 
5369ed24f4bSMarc Zyngier 	if (kvm_vgic_global_state.has_gicv4_1)
5379ed24f4bSMarc Zyngier 		vgic_v4_configure_vsgis(kvm);
5389ed24f4bSMarc Zyngier 
539101068b5SMarc Zyngier 	return 0;
5409ed24f4bSMarc Zyngier }
5419ed24f4bSMarc Zyngier 
5429ed24f4bSMarc Zyngier DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
5439ed24f4bSMarc Zyngier 
5449ed24f4bSMarc Zyngier static int __init early_group0_trap_cfg(char *buf)
5459ed24f4bSMarc Zyngier {
5469ed24f4bSMarc Zyngier 	return strtobool(buf, &group0_trap);
5479ed24f4bSMarc Zyngier }
5489ed24f4bSMarc Zyngier early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg);
5499ed24f4bSMarc Zyngier 
5509ed24f4bSMarc Zyngier static int __init early_group1_trap_cfg(char *buf)
5519ed24f4bSMarc Zyngier {
5529ed24f4bSMarc Zyngier 	return strtobool(buf, &group1_trap);
5539ed24f4bSMarc Zyngier }
5549ed24f4bSMarc Zyngier early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg);
5559ed24f4bSMarc Zyngier 
5569ed24f4bSMarc Zyngier static int __init early_common_trap_cfg(char *buf)
5579ed24f4bSMarc Zyngier {
5589ed24f4bSMarc Zyngier 	return strtobool(buf, &common_trap);
5599ed24f4bSMarc Zyngier }
5609ed24f4bSMarc Zyngier early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
5619ed24f4bSMarc Zyngier 
5629ed24f4bSMarc Zyngier static int __init early_gicv4_enable(char *buf)
5639ed24f4bSMarc Zyngier {
5649ed24f4bSMarc Zyngier 	return strtobool(buf, &gicv4_enable);
5659ed24f4bSMarc Zyngier }
5669ed24f4bSMarc Zyngier early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
5679ed24f4bSMarc Zyngier 
5689ed24f4bSMarc Zyngier /**
5699ed24f4bSMarc Zyngier  * vgic_v3_probe - probe for a VGICv3 compatible interrupt controller
5709ed24f4bSMarc Zyngier  * @info:	pointer to the GIC description
5719ed24f4bSMarc Zyngier  *
5729ed24f4bSMarc Zyngier  * Returns 0 if the VGICv3 has been probed successfully, returns an error code
5739ed24f4bSMarc Zyngier  * otherwise
5749ed24f4bSMarc Zyngier  */
5759ed24f4bSMarc Zyngier int vgic_v3_probe(const struct gic_kvm_info *info)
5769ed24f4bSMarc Zyngier {
577b9d699e2SMarc Zyngier 	u64 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config);
578*9739f6efSMarc Zyngier 	bool has_v2;
5799ed24f4bSMarc Zyngier 	int ret;
5809ed24f4bSMarc Zyngier 
581*9739f6efSMarc Zyngier 	has_v2 = ich_vtr_el2 >> 63;
582b9d699e2SMarc Zyngier 	ich_vtr_el2 = (u32)ich_vtr_el2;
583b9d699e2SMarc Zyngier 
5849ed24f4bSMarc Zyngier 	/*
585656012c7SFuad Tabba 	 * The ListRegs field is 5 bits, but there is an architectural
5869ed24f4bSMarc Zyngier 	 * maximum of 16 list registers. Just ignore bit 4...
5879ed24f4bSMarc Zyngier 	 */
5889ed24f4bSMarc Zyngier 	kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
5899ed24f4bSMarc Zyngier 	kvm_vgic_global_state.can_emulate_gicv2 = false;
5909ed24f4bSMarc Zyngier 	kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
5919ed24f4bSMarc Zyngier 
5929ed24f4bSMarc Zyngier 	/* GICv4 support? */
5939ed24f4bSMarc Zyngier 	if (info->has_v4) {
5949ed24f4bSMarc Zyngier 		kvm_vgic_global_state.has_gicv4 = gicv4_enable;
5959ed24f4bSMarc Zyngier 		kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
5969ed24f4bSMarc Zyngier 		kvm_info("GICv4%s support %sabled\n",
5979ed24f4bSMarc Zyngier 			 kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
5989ed24f4bSMarc Zyngier 			 gicv4_enable ? "en" : "dis");
5999ed24f4bSMarc Zyngier 	}
6009ed24f4bSMarc Zyngier 
601*9739f6efSMarc Zyngier 	kvm_vgic_global_state.vcpu_base = 0;
602*9739f6efSMarc Zyngier 
6039ed24f4bSMarc Zyngier 	if (!info->vcpu.start) {
6049ed24f4bSMarc Zyngier 		kvm_info("GICv3: no GICV resource entry\n");
605*9739f6efSMarc Zyngier 	} else if (!has_v2) {
606*9739f6efSMarc Zyngier 		pr_warn(FW_BUG "CPU interface incapable of MMIO access\n");
6079ed24f4bSMarc Zyngier 	} else if (!PAGE_ALIGNED(info->vcpu.start)) {
6089ed24f4bSMarc Zyngier 		pr_warn("GICV physical address 0x%llx not page aligned\n",
6099ed24f4bSMarc Zyngier 			(unsigned long long)info->vcpu.start);
6109ed24f4bSMarc Zyngier 	} else {
6119ed24f4bSMarc Zyngier 		kvm_vgic_global_state.vcpu_base = info->vcpu.start;
6129ed24f4bSMarc Zyngier 		kvm_vgic_global_state.can_emulate_gicv2 = true;
6139ed24f4bSMarc Zyngier 		ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
6149ed24f4bSMarc Zyngier 		if (ret) {
6159ed24f4bSMarc Zyngier 			kvm_err("Cannot register GICv2 KVM device.\n");
6169ed24f4bSMarc Zyngier 			return ret;
6179ed24f4bSMarc Zyngier 		}
6189ed24f4bSMarc Zyngier 		kvm_info("vgic-v2@%llx\n", info->vcpu.start);
6199ed24f4bSMarc Zyngier 	}
6209ed24f4bSMarc Zyngier 	ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
6219ed24f4bSMarc Zyngier 	if (ret) {
6229ed24f4bSMarc Zyngier 		kvm_err("Cannot register GICv3 KVM device.\n");
6239ed24f4bSMarc Zyngier 		kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2);
6249ed24f4bSMarc Zyngier 		return ret;
6259ed24f4bSMarc Zyngier 	}
6269ed24f4bSMarc Zyngier 
6279ed24f4bSMarc Zyngier 	if (kvm_vgic_global_state.vcpu_base == 0)
6289ed24f4bSMarc Zyngier 		kvm_info("disabling GICv2 emulation\n");
6299ed24f4bSMarc Zyngier 
6309ed24f4bSMarc Zyngier 	if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
6319ed24f4bSMarc Zyngier 		group0_trap = true;
6329ed24f4bSMarc Zyngier 		group1_trap = true;
6339ed24f4bSMarc Zyngier 	}
6349ed24f4bSMarc Zyngier 
6359ed24f4bSMarc Zyngier 	if (group0_trap || group1_trap || common_trap) {
6369ed24f4bSMarc Zyngier 		kvm_info("GICv3 sysreg trapping enabled ([%s%s%s], reduced performance)\n",
6379ed24f4bSMarc Zyngier 			 group0_trap ? "G0" : "",
6389ed24f4bSMarc Zyngier 			 group1_trap ? "G1" : "",
6399ed24f4bSMarc Zyngier 			 common_trap ? "C"  : "");
6409ed24f4bSMarc Zyngier 		static_branch_enable(&vgic_v3_cpuif_trap);
6419ed24f4bSMarc Zyngier 	}
6429ed24f4bSMarc Zyngier 
6439ed24f4bSMarc Zyngier 	kvm_vgic_global_state.vctrl_base = NULL;
6449ed24f4bSMarc Zyngier 	kvm_vgic_global_state.type = VGIC_V3;
6459ed24f4bSMarc Zyngier 	kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
6469ed24f4bSMarc Zyngier 
6479ed24f4bSMarc Zyngier 	return 0;
6489ed24f4bSMarc Zyngier }
6499ed24f4bSMarc Zyngier 
6509ed24f4bSMarc Zyngier void vgic_v3_load(struct kvm_vcpu *vcpu)
6519ed24f4bSMarc Zyngier {
6529ed24f4bSMarc Zyngier 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
6539ed24f4bSMarc Zyngier 
6549ed24f4bSMarc Zyngier 	/*
6559ed24f4bSMarc Zyngier 	 * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
6569ed24f4bSMarc Zyngier 	 * is dependent on ICC_SRE_EL1.SRE, and we have to perform the
6579ed24f4bSMarc Zyngier 	 * VMCR_EL2 save/restore in the world switch.
6589ed24f4bSMarc Zyngier 	 */
6599ed24f4bSMarc Zyngier 	if (likely(cpu_if->vgic_sre))
6609ed24f4bSMarc Zyngier 		kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
6619ed24f4bSMarc Zyngier 
662a071261dSAndrew Scull 	kvm_call_hyp(__vgic_v3_restore_aprs, cpu_if);
6639ed24f4bSMarc Zyngier 
6649ed24f4bSMarc Zyngier 	if (has_vhe())
665fc5d1f1aSChristoffer Dall 		__vgic_v3_activate_traps(cpu_if);
6669ed24f4bSMarc Zyngier 
6679ed24f4bSMarc Zyngier 	WARN_ON(vgic_v4_load(vcpu));
6689ed24f4bSMarc Zyngier }
6699ed24f4bSMarc Zyngier 
6709ed24f4bSMarc Zyngier void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
6719ed24f4bSMarc Zyngier {
6729ed24f4bSMarc Zyngier 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
6739ed24f4bSMarc Zyngier 
6749ed24f4bSMarc Zyngier 	if (likely(cpu_if->vgic_sre))
6759ed24f4bSMarc Zyngier 		cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
6769ed24f4bSMarc Zyngier }
6779ed24f4bSMarc Zyngier 
6789ed24f4bSMarc Zyngier void vgic_v3_put(struct kvm_vcpu *vcpu)
6799ed24f4bSMarc Zyngier {
680fc5d1f1aSChristoffer Dall 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
681fc5d1f1aSChristoffer Dall 
6829ed24f4bSMarc Zyngier 	WARN_ON(vgic_v4_put(vcpu, false));
6839ed24f4bSMarc Zyngier 
6849ed24f4bSMarc Zyngier 	vgic_v3_vmcr_sync(vcpu);
6859ed24f4bSMarc Zyngier 
686a071261dSAndrew Scull 	kvm_call_hyp(__vgic_v3_save_aprs, cpu_if);
6879ed24f4bSMarc Zyngier 
6889ed24f4bSMarc Zyngier 	if (has_vhe())
689fc5d1f1aSChristoffer Dall 		__vgic_v3_deactivate_traps(cpu_if);
6909ed24f4bSMarc Zyngier }
691