xref: /openbmc/linux/arch/arm64/kvm/vgic/vgic-v3.c (revision b321c31c)
19ed24f4bSMarc Zyngier // SPDX-License-Identifier: GPL-2.0-only
29ed24f4bSMarc Zyngier 
39ed24f4bSMarc Zyngier #include <linux/irqchip/arm-gic-v3.h>
4f66b7b15SShenming Lu #include <linux/irq.h>
5f66b7b15SShenming Lu #include <linux/irqdomain.h>
6016cbbd2SChristophe JAILLET #include <linux/kstrtox.h>
79ed24f4bSMarc Zyngier #include <linux/kvm.h>
89ed24f4bSMarc Zyngier #include <linux/kvm_host.h>
99ed24f4bSMarc Zyngier #include <kvm/arm_vgic.h>
109ed24f4bSMarc Zyngier #include <asm/kvm_hyp.h>
119ed24f4bSMarc Zyngier #include <asm/kvm_mmu.h>
129ed24f4bSMarc Zyngier #include <asm/kvm_asm.h>
139ed24f4bSMarc Zyngier 
149ed24f4bSMarc Zyngier #include "vgic.h"
159ed24f4bSMarc Zyngier 
169ed24f4bSMarc Zyngier static bool group0_trap;
179ed24f4bSMarc Zyngier static bool group1_trap;
189ed24f4bSMarc Zyngier static bool common_trap;
190924729bSMarc Zyngier static bool dir_trap;
209ed24f4bSMarc Zyngier static bool gicv4_enable;
219ed24f4bSMarc Zyngier 
vgic_v3_set_underflow(struct kvm_vcpu * vcpu)229ed24f4bSMarc Zyngier void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
239ed24f4bSMarc Zyngier {
249ed24f4bSMarc Zyngier 	struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
259ed24f4bSMarc Zyngier 
269ed24f4bSMarc Zyngier 	cpuif->vgic_hcr |= ICH_HCR_UIE;
279ed24f4bSMarc Zyngier }
289ed24f4bSMarc Zyngier 
lr_signals_eoi_mi(u64 lr_val)299ed24f4bSMarc Zyngier static bool lr_signals_eoi_mi(u64 lr_val)
309ed24f4bSMarc Zyngier {
319ed24f4bSMarc Zyngier 	return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) &&
329ed24f4bSMarc Zyngier 	       !(lr_val & ICH_LR_HW);
339ed24f4bSMarc Zyngier }
349ed24f4bSMarc Zyngier 
vgic_v3_fold_lr_state(struct kvm_vcpu * vcpu)359ed24f4bSMarc Zyngier void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
369ed24f4bSMarc Zyngier {
379ed24f4bSMarc Zyngier 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
389ed24f4bSMarc Zyngier 	struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
399ed24f4bSMarc Zyngier 	u32 model = vcpu->kvm->arch.vgic.vgic_model;
409ed24f4bSMarc Zyngier 	int lr;
419ed24f4bSMarc Zyngier 
429ed24f4bSMarc Zyngier 	DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
439ed24f4bSMarc Zyngier 
449ed24f4bSMarc Zyngier 	cpuif->vgic_hcr &= ~ICH_HCR_UIE;
459ed24f4bSMarc Zyngier 
46fc5d1f1aSChristoffer Dall 	for (lr = 0; lr < cpuif->used_lrs; lr++) {
479ed24f4bSMarc Zyngier 		u64 val = cpuif->vgic_lr[lr];
489ed24f4bSMarc Zyngier 		u32 intid, cpuid;
499ed24f4bSMarc Zyngier 		struct vgic_irq *irq;
509ed24f4bSMarc Zyngier 		bool is_v2_sgi = false;
513134cc8bSMarc Zyngier 		bool deactivated;
529ed24f4bSMarc Zyngier 
539ed24f4bSMarc Zyngier 		cpuid = val & GICH_LR_PHYSID_CPUID;
549ed24f4bSMarc Zyngier 		cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
559ed24f4bSMarc Zyngier 
569ed24f4bSMarc Zyngier 		if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
579ed24f4bSMarc Zyngier 			intid = val & ICH_LR_VIRTUAL_ID_MASK;
589ed24f4bSMarc Zyngier 		} else {
599ed24f4bSMarc Zyngier 			intid = val & GICH_LR_VIRTUALID;
609ed24f4bSMarc Zyngier 			is_v2_sgi = vgic_irq_is_sgi(intid);
619ed24f4bSMarc Zyngier 		}
629ed24f4bSMarc Zyngier 
639ed24f4bSMarc Zyngier 		/* Notify fds when the guest EOI'ed a level-triggered IRQ */
649ed24f4bSMarc Zyngier 		if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
659ed24f4bSMarc Zyngier 			kvm_notify_acked_irq(vcpu->kvm, 0,
669ed24f4bSMarc Zyngier 					     intid - VGIC_NR_PRIVATE_IRQS);
679ed24f4bSMarc Zyngier 
689ed24f4bSMarc Zyngier 		irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
699ed24f4bSMarc Zyngier 		if (!irq)	/* An LPI could have been unmapped. */
709ed24f4bSMarc Zyngier 			continue;
719ed24f4bSMarc Zyngier 
729ed24f4bSMarc Zyngier 		raw_spin_lock(&irq->irq_lock);
739ed24f4bSMarc Zyngier 
743134cc8bSMarc Zyngier 		/* Always preserve the active bit, note deactivation */
753134cc8bSMarc Zyngier 		deactivated = irq->active && !(val & ICH_LR_ACTIVE_BIT);
769ed24f4bSMarc Zyngier 		irq->active = !!(val & ICH_LR_ACTIVE_BIT);
779ed24f4bSMarc Zyngier 
789ed24f4bSMarc Zyngier 		if (irq->active && is_v2_sgi)
799ed24f4bSMarc Zyngier 			irq->active_source = cpuid;
809ed24f4bSMarc Zyngier 
819ed24f4bSMarc Zyngier 		/* Edge is the only case where we preserve the pending bit */
829ed24f4bSMarc Zyngier 		if (irq->config == VGIC_CONFIG_EDGE &&
839ed24f4bSMarc Zyngier 		    (val & ICH_LR_PENDING_BIT)) {
849ed24f4bSMarc Zyngier 			irq->pending_latch = true;
859ed24f4bSMarc Zyngier 
869ed24f4bSMarc Zyngier 			if (is_v2_sgi)
879ed24f4bSMarc Zyngier 				irq->source |= (1 << cpuid);
889ed24f4bSMarc Zyngier 		}
899ed24f4bSMarc Zyngier 
909ed24f4bSMarc Zyngier 		/*
919ed24f4bSMarc Zyngier 		 * Clear soft pending state when level irqs have been acked.
929ed24f4bSMarc Zyngier 		 */
939ed24f4bSMarc Zyngier 		if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
949ed24f4bSMarc Zyngier 			irq->pending_latch = false;
959ed24f4bSMarc Zyngier 
963134cc8bSMarc Zyngier 		/* Handle resampling for mapped interrupts if required */
973134cc8bSMarc Zyngier 		vgic_irq_handle_resampling(irq, deactivated, val & ICH_LR_PENDING_BIT);
989ed24f4bSMarc Zyngier 
999ed24f4bSMarc Zyngier 		raw_spin_unlock(&irq->irq_lock);
1009ed24f4bSMarc Zyngier 		vgic_put_irq(vcpu->kvm, irq);
1019ed24f4bSMarc Zyngier 	}
1029ed24f4bSMarc Zyngier 
103fc5d1f1aSChristoffer Dall 	cpuif->used_lrs = 0;
1049ed24f4bSMarc Zyngier }
1059ed24f4bSMarc Zyngier 
1069ed24f4bSMarc Zyngier /* Requires the irq to be locked already */
vgic_v3_populate_lr(struct kvm_vcpu * vcpu,struct vgic_irq * irq,int lr)1079ed24f4bSMarc Zyngier void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
1089ed24f4bSMarc Zyngier {
1099ed24f4bSMarc Zyngier 	u32 model = vcpu->kvm->arch.vgic.vgic_model;
1109ed24f4bSMarc Zyngier 	u64 val = irq->intid;
1119ed24f4bSMarc Zyngier 	bool allow_pending = true, is_v2_sgi;
1129ed24f4bSMarc Zyngier 
1139ed24f4bSMarc Zyngier 	is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
1149ed24f4bSMarc Zyngier 		     model == KVM_DEV_TYPE_ARM_VGIC_V2);
1159ed24f4bSMarc Zyngier 
1169ed24f4bSMarc Zyngier 	if (irq->active) {
1179ed24f4bSMarc Zyngier 		val |= ICH_LR_ACTIVE_BIT;
1189ed24f4bSMarc Zyngier 		if (is_v2_sgi)
1199ed24f4bSMarc Zyngier 			val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
1209ed24f4bSMarc Zyngier 		if (vgic_irq_is_multi_sgi(irq)) {
1219ed24f4bSMarc Zyngier 			allow_pending = false;
1229ed24f4bSMarc Zyngier 			val |= ICH_LR_EOI;
1239ed24f4bSMarc Zyngier 		}
1249ed24f4bSMarc Zyngier 	}
1259ed24f4bSMarc Zyngier 
126354920e7SMarc Zyngier 	if (irq->hw && !vgic_irq_needs_resampling(irq)) {
1279ed24f4bSMarc Zyngier 		val |= ICH_LR_HW;
1289ed24f4bSMarc Zyngier 		val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
1299ed24f4bSMarc Zyngier 		/*
1309ed24f4bSMarc Zyngier 		 * Never set pending+active on a HW interrupt, as the
1319ed24f4bSMarc Zyngier 		 * pending state is kept at the physical distributor
1329ed24f4bSMarc Zyngier 		 * level.
1339ed24f4bSMarc Zyngier 		 */
1349ed24f4bSMarc Zyngier 		if (irq->active)
1359ed24f4bSMarc Zyngier 			allow_pending = false;
1369ed24f4bSMarc Zyngier 	} else {
1379ed24f4bSMarc Zyngier 		if (irq->config == VGIC_CONFIG_LEVEL) {
1389ed24f4bSMarc Zyngier 			val |= ICH_LR_EOI;
1399ed24f4bSMarc Zyngier 
1409ed24f4bSMarc Zyngier 			/*
1419ed24f4bSMarc Zyngier 			 * Software resampling doesn't work very well
1429ed24f4bSMarc Zyngier 			 * if we allow P+A, so let's not do that.
1439ed24f4bSMarc Zyngier 			 */
1449ed24f4bSMarc Zyngier 			if (irq->active)
1459ed24f4bSMarc Zyngier 				allow_pending = false;
1469ed24f4bSMarc Zyngier 		}
1479ed24f4bSMarc Zyngier 	}
1489ed24f4bSMarc Zyngier 
1499ed24f4bSMarc Zyngier 	if (allow_pending && irq_is_pending(irq)) {
1509ed24f4bSMarc Zyngier 		val |= ICH_LR_PENDING_BIT;
1519ed24f4bSMarc Zyngier 
1529ed24f4bSMarc Zyngier 		if (irq->config == VGIC_CONFIG_EDGE)
1539ed24f4bSMarc Zyngier 			irq->pending_latch = false;
1549ed24f4bSMarc Zyngier 
1559ed24f4bSMarc Zyngier 		if (vgic_irq_is_sgi(irq->intid) &&
1569ed24f4bSMarc Zyngier 		    model == KVM_DEV_TYPE_ARM_VGIC_V2) {
1579ed24f4bSMarc Zyngier 			u32 src = ffs(irq->source);
1589ed24f4bSMarc Zyngier 
1599ed24f4bSMarc Zyngier 			if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
1609ed24f4bSMarc Zyngier 					   irq->intid))
1619ed24f4bSMarc Zyngier 				return;
1629ed24f4bSMarc Zyngier 
1639ed24f4bSMarc Zyngier 			val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
1649ed24f4bSMarc Zyngier 			irq->source &= ~(1 << (src - 1));
1659ed24f4bSMarc Zyngier 			if (irq->source) {
1669ed24f4bSMarc Zyngier 				irq->pending_latch = true;
1679ed24f4bSMarc Zyngier 				val |= ICH_LR_EOI;
1689ed24f4bSMarc Zyngier 			}
1699ed24f4bSMarc Zyngier 		}
1709ed24f4bSMarc Zyngier 	}
1719ed24f4bSMarc Zyngier 
1729ed24f4bSMarc Zyngier 	/*
1739ed24f4bSMarc Zyngier 	 * Level-triggered mapped IRQs are special because we only observe
1749ed24f4bSMarc Zyngier 	 * rising edges as input to the VGIC.  We therefore lower the line
1759ed24f4bSMarc Zyngier 	 * level here, so that we can take new virtual IRQs.  See
1769ed24f4bSMarc Zyngier 	 * vgic_v3_fold_lr_state for more info.
1779ed24f4bSMarc Zyngier 	 */
1789ed24f4bSMarc Zyngier 	if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT))
1799ed24f4bSMarc Zyngier 		irq->line_level = false;
1809ed24f4bSMarc Zyngier 
1819ed24f4bSMarc Zyngier 	if (irq->group)
1829ed24f4bSMarc Zyngier 		val |= ICH_LR_GROUP;
1839ed24f4bSMarc Zyngier 
1849ed24f4bSMarc Zyngier 	val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
1859ed24f4bSMarc Zyngier 
1869ed24f4bSMarc Zyngier 	vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
1879ed24f4bSMarc Zyngier }
1889ed24f4bSMarc Zyngier 
vgic_v3_clear_lr(struct kvm_vcpu * vcpu,int lr)1899ed24f4bSMarc Zyngier void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
1909ed24f4bSMarc Zyngier {
1919ed24f4bSMarc Zyngier 	vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
1929ed24f4bSMarc Zyngier }
1939ed24f4bSMarc Zyngier 
vgic_v3_set_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcrp)1949ed24f4bSMarc Zyngier void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
1959ed24f4bSMarc Zyngier {
1969ed24f4bSMarc Zyngier 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
1979ed24f4bSMarc Zyngier 	u32 model = vcpu->kvm->arch.vgic.vgic_model;
1989ed24f4bSMarc Zyngier 	u32 vmcr;
1999ed24f4bSMarc Zyngier 
2009ed24f4bSMarc Zyngier 	if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
2019ed24f4bSMarc Zyngier 		vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
2029ed24f4bSMarc Zyngier 			ICH_VMCR_ACK_CTL_MASK;
2039ed24f4bSMarc Zyngier 		vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
2049ed24f4bSMarc Zyngier 			ICH_VMCR_FIQ_EN_MASK;
2059ed24f4bSMarc Zyngier 	} else {
2069ed24f4bSMarc Zyngier 		/*
2079ed24f4bSMarc Zyngier 		 * When emulating GICv3 on GICv3 with SRE=1 on the
2089ed24f4bSMarc Zyngier 		 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
2099ed24f4bSMarc Zyngier 		 */
2109ed24f4bSMarc Zyngier 		vmcr = ICH_VMCR_FIQ_EN_MASK;
2119ed24f4bSMarc Zyngier 	}
2129ed24f4bSMarc Zyngier 
2139ed24f4bSMarc Zyngier 	vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
2149ed24f4bSMarc Zyngier 	vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
2159ed24f4bSMarc Zyngier 	vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
2169ed24f4bSMarc Zyngier 	vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
2179ed24f4bSMarc Zyngier 	vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
2189ed24f4bSMarc Zyngier 	vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK;
2199ed24f4bSMarc Zyngier 	vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK;
2209ed24f4bSMarc Zyngier 
2219ed24f4bSMarc Zyngier 	cpu_if->vgic_vmcr = vmcr;
2229ed24f4bSMarc Zyngier }
2239ed24f4bSMarc Zyngier 
vgic_v3_get_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcrp)2249ed24f4bSMarc Zyngier void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
2259ed24f4bSMarc Zyngier {
2269ed24f4bSMarc Zyngier 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
2279ed24f4bSMarc Zyngier 	u32 model = vcpu->kvm->arch.vgic.vgic_model;
2289ed24f4bSMarc Zyngier 	u32 vmcr;
2299ed24f4bSMarc Zyngier 
2309ed24f4bSMarc Zyngier 	vmcr = cpu_if->vgic_vmcr;
2319ed24f4bSMarc Zyngier 
2329ed24f4bSMarc Zyngier 	if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
2339ed24f4bSMarc Zyngier 		vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
2349ed24f4bSMarc Zyngier 			ICH_VMCR_ACK_CTL_SHIFT;
2359ed24f4bSMarc Zyngier 		vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
2369ed24f4bSMarc Zyngier 			ICH_VMCR_FIQ_EN_SHIFT;
2379ed24f4bSMarc Zyngier 	} else {
2389ed24f4bSMarc Zyngier 		/*
2399ed24f4bSMarc Zyngier 		 * When emulating GICv3 on GICv3 with SRE=1 on the
2409ed24f4bSMarc Zyngier 		 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
2419ed24f4bSMarc Zyngier 		 */
2429ed24f4bSMarc Zyngier 		vmcrp->fiqen = 1;
2439ed24f4bSMarc Zyngier 		vmcrp->ackctl = 0;
2449ed24f4bSMarc Zyngier 	}
2459ed24f4bSMarc Zyngier 
2469ed24f4bSMarc Zyngier 	vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
2479ed24f4bSMarc Zyngier 	vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
2489ed24f4bSMarc Zyngier 	vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
2499ed24f4bSMarc Zyngier 	vmcrp->bpr  = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
2509ed24f4bSMarc Zyngier 	vmcrp->pmr  = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
2519ed24f4bSMarc Zyngier 	vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT;
2529ed24f4bSMarc Zyngier 	vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT;
2539ed24f4bSMarc Zyngier }
2549ed24f4bSMarc Zyngier 
2559ed24f4bSMarc Zyngier #define INITIAL_PENDBASER_VALUE						  \
2569ed24f4bSMarc Zyngier 	(GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb)		| \
2579ed24f4bSMarc Zyngier 	GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner)	| \
2589ed24f4bSMarc Zyngier 	GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
2599ed24f4bSMarc Zyngier 
vgic_v3_enable(struct kvm_vcpu * vcpu)2609ed24f4bSMarc Zyngier void vgic_v3_enable(struct kvm_vcpu *vcpu)
2619ed24f4bSMarc Zyngier {
2629ed24f4bSMarc Zyngier 	struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
2639ed24f4bSMarc Zyngier 
2649ed24f4bSMarc Zyngier 	/*
2659ed24f4bSMarc Zyngier 	 * By forcing VMCR to zero, the GIC will restore the binary
2669ed24f4bSMarc Zyngier 	 * points to their reset values. Anything else resets to zero
2679ed24f4bSMarc Zyngier 	 * anyway.
2689ed24f4bSMarc Zyngier 	 */
2699ed24f4bSMarc Zyngier 	vgic_v3->vgic_vmcr = 0;
2709ed24f4bSMarc Zyngier 
2719ed24f4bSMarc Zyngier 	/*
2729ed24f4bSMarc Zyngier 	 * If we are emulating a GICv3, we do it in an non-GICv2-compatible
2739ed24f4bSMarc Zyngier 	 * way, so we force SRE to 1 to demonstrate this to the guest.
2749ed24f4bSMarc Zyngier 	 * Also, we don't support any form of IRQ/FIQ bypass.
2759ed24f4bSMarc Zyngier 	 * This goes with the spec allowing the value to be RAO/WI.
2769ed24f4bSMarc Zyngier 	 */
2779ed24f4bSMarc Zyngier 	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
2789ed24f4bSMarc Zyngier 		vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
2799ed24f4bSMarc Zyngier 				     ICC_SRE_EL1_DFB |
2809ed24f4bSMarc Zyngier 				     ICC_SRE_EL1_SRE);
2819ed24f4bSMarc Zyngier 		vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
2829ed24f4bSMarc Zyngier 	} else {
2839ed24f4bSMarc Zyngier 		vgic_v3->vgic_sre = 0;
2849ed24f4bSMarc Zyngier 	}
2859ed24f4bSMarc Zyngier 
2869ed24f4bSMarc Zyngier 	vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 &
2879ed24f4bSMarc Zyngier 					   ICH_VTR_ID_BITS_MASK) >>
2889ed24f4bSMarc Zyngier 					   ICH_VTR_ID_BITS_SHIFT;
2899ed24f4bSMarc Zyngier 	vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 &
2909ed24f4bSMarc Zyngier 					    ICH_VTR_PRI_BITS_MASK) >>
2919ed24f4bSMarc Zyngier 					    ICH_VTR_PRI_BITS_SHIFT) + 1;
2929ed24f4bSMarc Zyngier 
2939ed24f4bSMarc Zyngier 	/* Get the show on the road... */
2949ed24f4bSMarc Zyngier 	vgic_v3->vgic_hcr = ICH_HCR_EN;
2959ed24f4bSMarc Zyngier 	if (group0_trap)
2969ed24f4bSMarc Zyngier 		vgic_v3->vgic_hcr |= ICH_HCR_TALL0;
2979ed24f4bSMarc Zyngier 	if (group1_trap)
2989ed24f4bSMarc Zyngier 		vgic_v3->vgic_hcr |= ICH_HCR_TALL1;
2999ed24f4bSMarc Zyngier 	if (common_trap)
3009ed24f4bSMarc Zyngier 		vgic_v3->vgic_hcr |= ICH_HCR_TC;
3010924729bSMarc Zyngier 	if (dir_trap)
3020924729bSMarc Zyngier 		vgic_v3->vgic_hcr |= ICH_HCR_TDIR;
3039ed24f4bSMarc Zyngier }
3049ed24f4bSMarc Zyngier 
vgic_v3_lpi_sync_pending_status(struct kvm * kvm,struct vgic_irq * irq)3059ed24f4bSMarc Zyngier int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
3069ed24f4bSMarc Zyngier {
3079ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu;
3089ed24f4bSMarc Zyngier 	int byte_offset, bit_nr;
3099ed24f4bSMarc Zyngier 	gpa_t pendbase, ptr;
3109ed24f4bSMarc Zyngier 	bool status;
3119ed24f4bSMarc Zyngier 	u8 val;
3129ed24f4bSMarc Zyngier 	int ret;
3139ed24f4bSMarc Zyngier 	unsigned long flags;
3149ed24f4bSMarc Zyngier 
3159ed24f4bSMarc Zyngier retry:
3169ed24f4bSMarc Zyngier 	vcpu = irq->target_vcpu;
3179ed24f4bSMarc Zyngier 	if (!vcpu)
3189ed24f4bSMarc Zyngier 		return 0;
3199ed24f4bSMarc Zyngier 
3209ed24f4bSMarc Zyngier 	pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
3219ed24f4bSMarc Zyngier 
3229ed24f4bSMarc Zyngier 	byte_offset = irq->intid / BITS_PER_BYTE;
3239ed24f4bSMarc Zyngier 	bit_nr = irq->intid % BITS_PER_BYTE;
3249ed24f4bSMarc Zyngier 	ptr = pendbase + byte_offset;
3259ed24f4bSMarc Zyngier 
3269ed24f4bSMarc Zyngier 	ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
3279ed24f4bSMarc Zyngier 	if (ret)
3289ed24f4bSMarc Zyngier 		return ret;
3299ed24f4bSMarc Zyngier 
3309ed24f4bSMarc Zyngier 	status = val & (1 << bit_nr);
3319ed24f4bSMarc Zyngier 
3329ed24f4bSMarc Zyngier 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
3339ed24f4bSMarc Zyngier 	if (irq->target_vcpu != vcpu) {
3349ed24f4bSMarc Zyngier 		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
3359ed24f4bSMarc Zyngier 		goto retry;
3369ed24f4bSMarc Zyngier 	}
3379ed24f4bSMarc Zyngier 	irq->pending_latch = status;
3389ed24f4bSMarc Zyngier 	vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
3399ed24f4bSMarc Zyngier 
3409ed24f4bSMarc Zyngier 	if (status) {
3419ed24f4bSMarc Zyngier 		/* clear consumed data */
3429ed24f4bSMarc Zyngier 		val &= ~(1 << bit_nr);
3432f8b1ad2SGavin Shan 		ret = vgic_write_guest_lock(kvm, ptr, &val, 1);
3449ed24f4bSMarc Zyngier 		if (ret)
3459ed24f4bSMarc Zyngier 			return ret;
3469ed24f4bSMarc Zyngier 	}
3479ed24f4bSMarc Zyngier 	return 0;
3489ed24f4bSMarc Zyngier }
3499ed24f4bSMarc Zyngier 
350f66b7b15SShenming Lu /*
351f66b7b15SShenming Lu  * The deactivation of the doorbell interrupt will trigger the
352f66b7b15SShenming Lu  * unmapping of the associated vPE.
353f66b7b15SShenming Lu  */
unmap_all_vpes(struct kvm * kvm)354ef369168SMarc Zyngier static void unmap_all_vpes(struct kvm *kvm)
355f66b7b15SShenming Lu {
356ef369168SMarc Zyngier 	struct vgic_dist *dist = &kvm->arch.vgic;
357f66b7b15SShenming Lu 	int i;
358f66b7b15SShenming Lu 
359ef369168SMarc Zyngier 	for (i = 0; i < dist->its_vm.nr_vpes; i++)
360ef369168SMarc Zyngier 		free_irq(dist->its_vm.vpes[i]->irq, kvm_get_vcpu(kvm, i));
361f66b7b15SShenming Lu }
362f66b7b15SShenming Lu 
map_all_vpes(struct kvm * kvm)363ef369168SMarc Zyngier static void map_all_vpes(struct kvm *kvm)
364f66b7b15SShenming Lu {
365ef369168SMarc Zyngier 	struct vgic_dist *dist = &kvm->arch.vgic;
366f66b7b15SShenming Lu 	int i;
367f66b7b15SShenming Lu 
368ef369168SMarc Zyngier 	for (i = 0; i < dist->its_vm.nr_vpes; i++)
369ef369168SMarc Zyngier 		WARN_ON(vgic_v4_request_vpe_irq(kvm_get_vcpu(kvm, i),
370ef369168SMarc Zyngier 						dist->its_vm.vpes[i]->irq));
371f66b7b15SShenming Lu }
372f66b7b15SShenming Lu 
3739ed24f4bSMarc Zyngier /**
3749ed24f4bSMarc Zyngier  * vgic_v3_save_pending_tables - Save the pending tables into guest RAM
3759ed24f4bSMarc Zyngier  * kvm lock and all vcpu lock must be held
3769ed24f4bSMarc Zyngier  */
vgic_v3_save_pending_tables(struct kvm * kvm)3779ed24f4bSMarc Zyngier int vgic_v3_save_pending_tables(struct kvm *kvm)
3789ed24f4bSMarc Zyngier {
3799ed24f4bSMarc Zyngier 	struct vgic_dist *dist = &kvm->arch.vgic;
3809ed24f4bSMarc Zyngier 	struct vgic_irq *irq;
3819ed24f4bSMarc Zyngier 	gpa_t last_ptr = ~(gpa_t)0;
382f66b7b15SShenming Lu 	bool vlpi_avail = false;
383f66b7b15SShenming Lu 	int ret = 0;
3849ed24f4bSMarc Zyngier 	u8 val;
3859ed24f4bSMarc Zyngier 
386f66b7b15SShenming Lu 	if (unlikely(!vgic_initialized(kvm)))
387f66b7b15SShenming Lu 		return -ENXIO;
388f66b7b15SShenming Lu 
389f66b7b15SShenming Lu 	/*
390f66b7b15SShenming Lu 	 * A preparation for getting any VLPI states.
391f66b7b15SShenming Lu 	 * The above vgic initialized check also ensures that the allocation
392f66b7b15SShenming Lu 	 * and enabling of the doorbells have already been done.
393f66b7b15SShenming Lu 	 */
394f66b7b15SShenming Lu 	if (kvm_vgic_global_state.has_gicv4_1) {
395ef369168SMarc Zyngier 		unmap_all_vpes(kvm);
396f66b7b15SShenming Lu 		vlpi_avail = true;
397f66b7b15SShenming Lu 	}
398f66b7b15SShenming Lu 
3999ed24f4bSMarc Zyngier 	list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
4009ed24f4bSMarc Zyngier 		int byte_offset, bit_nr;
4019ed24f4bSMarc Zyngier 		struct kvm_vcpu *vcpu;
4029ed24f4bSMarc Zyngier 		gpa_t pendbase, ptr;
403f66b7b15SShenming Lu 		bool is_pending;
4049ed24f4bSMarc Zyngier 		bool stored;
4059ed24f4bSMarc Zyngier 
4069ed24f4bSMarc Zyngier 		vcpu = irq->target_vcpu;
4079ed24f4bSMarc Zyngier 		if (!vcpu)
4089ed24f4bSMarc Zyngier 			continue;
4099ed24f4bSMarc Zyngier 
4109ed24f4bSMarc Zyngier 		pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
4119ed24f4bSMarc Zyngier 
4129ed24f4bSMarc Zyngier 		byte_offset = irq->intid / BITS_PER_BYTE;
4139ed24f4bSMarc Zyngier 		bit_nr = irq->intid % BITS_PER_BYTE;
4149ed24f4bSMarc Zyngier 		ptr = pendbase + byte_offset;
4159ed24f4bSMarc Zyngier 
4169ed24f4bSMarc Zyngier 		if (ptr != last_ptr) {
4179ed24f4bSMarc Zyngier 			ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
4189ed24f4bSMarc Zyngier 			if (ret)
419f66b7b15SShenming Lu 				goto out;
4209ed24f4bSMarc Zyngier 			last_ptr = ptr;
4219ed24f4bSMarc Zyngier 		}
4229ed24f4bSMarc Zyngier 
4239ed24f4bSMarc Zyngier 		stored = val & (1U << bit_nr);
424f66b7b15SShenming Lu 
425f66b7b15SShenming Lu 		is_pending = irq->pending_latch;
426f66b7b15SShenming Lu 
427f66b7b15SShenming Lu 		if (irq->hw && vlpi_avail)
428f66b7b15SShenming Lu 			vgic_v4_get_vlpi_state(irq, &is_pending);
429f66b7b15SShenming Lu 
430f66b7b15SShenming Lu 		if (stored == is_pending)
4319ed24f4bSMarc Zyngier 			continue;
4329ed24f4bSMarc Zyngier 
433f66b7b15SShenming Lu 		if (is_pending)
4349ed24f4bSMarc Zyngier 			val |= 1 << bit_nr;
4359ed24f4bSMarc Zyngier 		else
4369ed24f4bSMarc Zyngier 			val &= ~(1 << bit_nr);
4379ed24f4bSMarc Zyngier 
4386028acbeSGavin Shan 		ret = vgic_write_guest_lock(kvm, ptr, &val, 1);
4399ed24f4bSMarc Zyngier 		if (ret)
440f66b7b15SShenming Lu 			goto out;
4419ed24f4bSMarc Zyngier 	}
442f66b7b15SShenming Lu 
443f66b7b15SShenming Lu out:
444f66b7b15SShenming Lu 	if (vlpi_avail)
445ef369168SMarc Zyngier 		map_all_vpes(kvm);
446f66b7b15SShenming Lu 
447f66b7b15SShenming Lu 	return ret;
4489ed24f4bSMarc Zyngier }
4499ed24f4bSMarc Zyngier 
4509ed24f4bSMarc Zyngier /**
4519ed24f4bSMarc Zyngier  * vgic_v3_rdist_overlap - check if a region overlaps with any
4529ed24f4bSMarc Zyngier  * existing redistributor region
4539ed24f4bSMarc Zyngier  *
4549ed24f4bSMarc Zyngier  * @kvm: kvm handle
4559ed24f4bSMarc Zyngier  * @base: base of the region
4569ed24f4bSMarc Zyngier  * @size: size of region
4579ed24f4bSMarc Zyngier  *
4589ed24f4bSMarc Zyngier  * Return: true if there is an overlap
4599ed24f4bSMarc Zyngier  */
vgic_v3_rdist_overlap(struct kvm * kvm,gpa_t base,size_t size)4609ed24f4bSMarc Zyngier bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size)
4619ed24f4bSMarc Zyngier {
4629ed24f4bSMarc Zyngier 	struct vgic_dist *d = &kvm->arch.vgic;
4639ed24f4bSMarc Zyngier 	struct vgic_redist_region *rdreg;
4649ed24f4bSMarc Zyngier 
4659ed24f4bSMarc Zyngier 	list_for_each_entry(rdreg, &d->rd_regions, list) {
4669ed24f4bSMarc Zyngier 		if ((base + size > rdreg->base) &&
4679ed24f4bSMarc Zyngier 			(base < rdreg->base + vgic_v3_rd_region_size(kvm, rdreg)))
4689ed24f4bSMarc Zyngier 			return true;
4699ed24f4bSMarc Zyngier 	}
4709ed24f4bSMarc Zyngier 	return false;
4719ed24f4bSMarc Zyngier }
4729ed24f4bSMarc Zyngier 
4739ed24f4bSMarc Zyngier /*
4749ed24f4bSMarc Zyngier  * Check for overlapping regions and for regions crossing the end of memory
4759ed24f4bSMarc Zyngier  * for base addresses which have already been set.
4769ed24f4bSMarc Zyngier  */
vgic_v3_check_base(struct kvm * kvm)4779ed24f4bSMarc Zyngier bool vgic_v3_check_base(struct kvm *kvm)
4789ed24f4bSMarc Zyngier {
4799ed24f4bSMarc Zyngier 	struct vgic_dist *d = &kvm->arch.vgic;
4809ed24f4bSMarc Zyngier 	struct vgic_redist_region *rdreg;
4819ed24f4bSMarc Zyngier 
4829ed24f4bSMarc Zyngier 	if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
4839ed24f4bSMarc Zyngier 	    d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
4849ed24f4bSMarc Zyngier 		return false;
4859ed24f4bSMarc Zyngier 
4869ed24f4bSMarc Zyngier 	list_for_each_entry(rdreg, &d->rd_regions, list) {
4874612d98fSRicardo Koller 		size_t sz = vgic_v3_rd_region_size(kvm, rdreg);
4884612d98fSRicardo Koller 
4894612d98fSRicardo Koller 		if (vgic_check_iorange(kvm, VGIC_ADDR_UNDEF,
4904612d98fSRicardo Koller 				       rdreg->base, SZ_64K, sz))
4919ed24f4bSMarc Zyngier 			return false;
4929ed24f4bSMarc Zyngier 	}
4939ed24f4bSMarc Zyngier 
4949ed24f4bSMarc Zyngier 	if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base))
4959ed24f4bSMarc Zyngier 		return true;
4969ed24f4bSMarc Zyngier 
4979ed24f4bSMarc Zyngier 	return !vgic_v3_rdist_overlap(kvm, d->vgic_dist_base,
4989ed24f4bSMarc Zyngier 				      KVM_VGIC_V3_DIST_SIZE);
4999ed24f4bSMarc Zyngier }
5009ed24f4bSMarc Zyngier 
5019ed24f4bSMarc Zyngier /**
5029ed24f4bSMarc Zyngier  * vgic_v3_rdist_free_slot - Look up registered rdist regions and identify one
5039ed24f4bSMarc Zyngier  * which has free space to put a new rdist region.
5049ed24f4bSMarc Zyngier  *
5059ed24f4bSMarc Zyngier  * @rd_regions: redistributor region list head
5069ed24f4bSMarc Zyngier  *
5079ed24f4bSMarc Zyngier  * A redistributor regions maps n redistributors, n = region size / (2 x 64kB).
5089ed24f4bSMarc Zyngier  * Stride between redistributors is 0 and regions are filled in the index order.
5099ed24f4bSMarc Zyngier  *
5109ed24f4bSMarc Zyngier  * Return: the redist region handle, if any, that has space to map a new rdist
5119ed24f4bSMarc Zyngier  * region.
5129ed24f4bSMarc Zyngier  */
vgic_v3_rdist_free_slot(struct list_head * rd_regions)5139ed24f4bSMarc Zyngier struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rd_regions)
5149ed24f4bSMarc Zyngier {
5159ed24f4bSMarc Zyngier 	struct vgic_redist_region *rdreg;
5169ed24f4bSMarc Zyngier 
5179ed24f4bSMarc Zyngier 	list_for_each_entry(rdreg, rd_regions, list) {
5189ed24f4bSMarc Zyngier 		if (!vgic_v3_redist_region_full(rdreg))
5199ed24f4bSMarc Zyngier 			return rdreg;
5209ed24f4bSMarc Zyngier 	}
5219ed24f4bSMarc Zyngier 	return NULL;
5229ed24f4bSMarc Zyngier }
5239ed24f4bSMarc Zyngier 
vgic_v3_rdist_region_from_index(struct kvm * kvm,u32 index)5249ed24f4bSMarc Zyngier struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
5259ed24f4bSMarc Zyngier 							   u32 index)
5269ed24f4bSMarc Zyngier {
5279ed24f4bSMarc Zyngier 	struct list_head *rd_regions = &kvm->arch.vgic.rd_regions;
5289ed24f4bSMarc Zyngier 	struct vgic_redist_region *rdreg;
5299ed24f4bSMarc Zyngier 
5309ed24f4bSMarc Zyngier 	list_for_each_entry(rdreg, rd_regions, list) {
5319ed24f4bSMarc Zyngier 		if (rdreg->index == index)
5329ed24f4bSMarc Zyngier 			return rdreg;
5339ed24f4bSMarc Zyngier 	}
5349ed24f4bSMarc Zyngier 	return NULL;
5359ed24f4bSMarc Zyngier }
5369ed24f4bSMarc Zyngier 
5379ed24f4bSMarc Zyngier 
vgic_v3_map_resources(struct kvm * kvm)5389ed24f4bSMarc Zyngier int vgic_v3_map_resources(struct kvm *kvm)
5399ed24f4bSMarc Zyngier {
5409ed24f4bSMarc Zyngier 	struct vgic_dist *dist = &kvm->arch.vgic;
5419ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu;
54246808a4cSMarc Zyngier 	unsigned long c;
5439ed24f4bSMarc Zyngier 
5449ed24f4bSMarc Zyngier 	kvm_for_each_vcpu(c, vcpu, kvm) {
5459ed24f4bSMarc Zyngier 		struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
5469ed24f4bSMarc Zyngier 
5479ed24f4bSMarc Zyngier 		if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
54846808a4cSMarc Zyngier 			kvm_debug("vcpu %ld redistributor base not set\n", c);
549101068b5SMarc Zyngier 			return -ENXIO;
5509ed24f4bSMarc Zyngier 		}
5519ed24f4bSMarc Zyngier 	}
5529ed24f4bSMarc Zyngier 
5539ed24f4bSMarc Zyngier 	if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
554440523b9SMarc Zyngier 		kvm_debug("Need to set vgic distributor addresses first\n");
555101068b5SMarc Zyngier 		return -ENXIO;
5569ed24f4bSMarc Zyngier 	}
5579ed24f4bSMarc Zyngier 
5589ed24f4bSMarc Zyngier 	if (!vgic_v3_check_base(kvm)) {
559440523b9SMarc Zyngier 		kvm_debug("VGIC redist and dist frames overlap\n");
560101068b5SMarc Zyngier 		return -EINVAL;
5619ed24f4bSMarc Zyngier 	}
5629ed24f4bSMarc Zyngier 
5639ed24f4bSMarc Zyngier 	/*
5649ed24f4bSMarc Zyngier 	 * For a VGICv3 we require the userland to explicitly initialize
5659ed24f4bSMarc Zyngier 	 * the VGIC before we need to use it.
5669ed24f4bSMarc Zyngier 	 */
5679ed24f4bSMarc Zyngier 	if (!vgic_initialized(kvm)) {
568101068b5SMarc Zyngier 		return -EBUSY;
5699ed24f4bSMarc Zyngier 	}
5709ed24f4bSMarc Zyngier 
5719ed24f4bSMarc Zyngier 	if (kvm_vgic_global_state.has_gicv4_1)
5729ed24f4bSMarc Zyngier 		vgic_v4_configure_vsgis(kvm);
5739ed24f4bSMarc Zyngier 
574101068b5SMarc Zyngier 	return 0;
5759ed24f4bSMarc Zyngier }
5769ed24f4bSMarc Zyngier 
5779ed24f4bSMarc Zyngier DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
5789ed24f4bSMarc Zyngier 
early_group0_trap_cfg(char * buf)5799ed24f4bSMarc Zyngier static int __init early_group0_trap_cfg(char *buf)
5809ed24f4bSMarc Zyngier {
581016cbbd2SChristophe JAILLET 	return kstrtobool(buf, &group0_trap);
5829ed24f4bSMarc Zyngier }
5839ed24f4bSMarc Zyngier early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg);
5849ed24f4bSMarc Zyngier 
early_group1_trap_cfg(char * buf)5859ed24f4bSMarc Zyngier static int __init early_group1_trap_cfg(char *buf)
5869ed24f4bSMarc Zyngier {
587016cbbd2SChristophe JAILLET 	return kstrtobool(buf, &group1_trap);
5889ed24f4bSMarc Zyngier }
5899ed24f4bSMarc Zyngier early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg);
5909ed24f4bSMarc Zyngier 
early_common_trap_cfg(char * buf)5919ed24f4bSMarc Zyngier static int __init early_common_trap_cfg(char *buf)
5929ed24f4bSMarc Zyngier {
593016cbbd2SChristophe JAILLET 	return kstrtobool(buf, &common_trap);
5949ed24f4bSMarc Zyngier }
5959ed24f4bSMarc Zyngier early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
5969ed24f4bSMarc Zyngier 
early_gicv4_enable(char * buf)5979ed24f4bSMarc Zyngier static int __init early_gicv4_enable(char *buf)
5989ed24f4bSMarc Zyngier {
599016cbbd2SChristophe JAILLET 	return kstrtobool(buf, &gicv4_enable);
6009ed24f4bSMarc Zyngier }
6019ed24f4bSMarc Zyngier early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
6029ed24f4bSMarc Zyngier 
603d11a327eSMarc Zyngier static const struct midr_range broken_seis[] = {
604d11a327eSMarc Zyngier 	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
605d11a327eSMarc Zyngier 	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
606cae88930SMarc Zyngier 	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_PRO),
607cae88930SMarc Zyngier 	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
608cae88930SMarc Zyngier 	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
609cae88930SMarc Zyngier 	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
610decb17aeSMarc Zyngier 	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
611decb17aeSMarc Zyngier 	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
612e910baa9SMarc Zyngier 	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO),
613e910baa9SMarc Zyngier 	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO),
614e910baa9SMarc Zyngier 	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX),
615e910baa9SMarc Zyngier 	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX),
616d11a327eSMarc Zyngier 	{},
617d11a327eSMarc Zyngier };
618d11a327eSMarc Zyngier 
vgic_v3_broken_seis(void)619d11a327eSMarc Zyngier static bool vgic_v3_broken_seis(void)
620d11a327eSMarc Zyngier {
621d11a327eSMarc Zyngier 	return ((kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) &&
622d11a327eSMarc Zyngier 		is_midr_in_range_list(read_cpuid_id(), broken_seis));
623d11a327eSMarc Zyngier }
624d11a327eSMarc Zyngier 
6259ed24f4bSMarc Zyngier /**
6269ed24f4bSMarc Zyngier  * vgic_v3_probe - probe for a VGICv3 compatible interrupt controller
6279ed24f4bSMarc Zyngier  * @info:	pointer to the GIC description
6289ed24f4bSMarc Zyngier  *
6299ed24f4bSMarc Zyngier  * Returns 0 if the VGICv3 has been probed successfully, returns an error code
6309ed24f4bSMarc Zyngier  * otherwise
6319ed24f4bSMarc Zyngier  */
vgic_v3_probe(const struct gic_kvm_info * info)6329ed24f4bSMarc Zyngier int vgic_v3_probe(const struct gic_kvm_info *info)
6339ed24f4bSMarc Zyngier {
634b9d699e2SMarc Zyngier 	u64 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config);
6359739f6efSMarc Zyngier 	bool has_v2;
6369ed24f4bSMarc Zyngier 	int ret;
6379ed24f4bSMarc Zyngier 
6389739f6efSMarc Zyngier 	has_v2 = ich_vtr_el2 >> 63;
639b9d699e2SMarc Zyngier 	ich_vtr_el2 = (u32)ich_vtr_el2;
640b9d699e2SMarc Zyngier 
6419ed24f4bSMarc Zyngier 	/*
642656012c7SFuad Tabba 	 * The ListRegs field is 5 bits, but there is an architectural
6439ed24f4bSMarc Zyngier 	 * maximum of 16 list registers. Just ignore bit 4...
6449ed24f4bSMarc Zyngier 	 */
6459ed24f4bSMarc Zyngier 	kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
6469ed24f4bSMarc Zyngier 	kvm_vgic_global_state.can_emulate_gicv2 = false;
6479ed24f4bSMarc Zyngier 	kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
6489ed24f4bSMarc Zyngier 
6499ed24f4bSMarc Zyngier 	/* GICv4 support? */
6509ed24f4bSMarc Zyngier 	if (info->has_v4) {
6519ed24f4bSMarc Zyngier 		kvm_vgic_global_state.has_gicv4 = gicv4_enable;
6529ed24f4bSMarc Zyngier 		kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
6539ed24f4bSMarc Zyngier 		kvm_info("GICv4%s support %sabled\n",
6549ed24f4bSMarc Zyngier 			 kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
6559ed24f4bSMarc Zyngier 			 gicv4_enable ? "en" : "dis");
6569ed24f4bSMarc Zyngier 	}
6579ed24f4bSMarc Zyngier 
6589739f6efSMarc Zyngier 	kvm_vgic_global_state.vcpu_base = 0;
6599739f6efSMarc Zyngier 
6609ed24f4bSMarc Zyngier 	if (!info->vcpu.start) {
6619ed24f4bSMarc Zyngier 		kvm_info("GICv3: no GICV resource entry\n");
6629739f6efSMarc Zyngier 	} else if (!has_v2) {
6639739f6efSMarc Zyngier 		pr_warn(FW_BUG "CPU interface incapable of MMIO access\n");
6649ed24f4bSMarc Zyngier 	} else if (!PAGE_ALIGNED(info->vcpu.start)) {
6659ed24f4bSMarc Zyngier 		pr_warn("GICV physical address 0x%llx not page aligned\n",
6669ed24f4bSMarc Zyngier 			(unsigned long long)info->vcpu.start);
667a770ee80SQuentin Perret 	} else if (kvm_get_mode() != KVM_MODE_PROTECTED) {
6689ed24f4bSMarc Zyngier 		kvm_vgic_global_state.vcpu_base = info->vcpu.start;
6699ed24f4bSMarc Zyngier 		kvm_vgic_global_state.can_emulate_gicv2 = true;
6709ed24f4bSMarc Zyngier 		ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
6719ed24f4bSMarc Zyngier 		if (ret) {
6729ed24f4bSMarc Zyngier 			kvm_err("Cannot register GICv2 KVM device.\n");
6739ed24f4bSMarc Zyngier 			return ret;
6749ed24f4bSMarc Zyngier 		}
6759ed24f4bSMarc Zyngier 		kvm_info("vgic-v2@%llx\n", info->vcpu.start);
6769ed24f4bSMarc Zyngier 	}
6779ed24f4bSMarc Zyngier 	ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
6789ed24f4bSMarc Zyngier 	if (ret) {
6799ed24f4bSMarc Zyngier 		kvm_err("Cannot register GICv3 KVM device.\n");
6809ed24f4bSMarc Zyngier 		kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2);
6819ed24f4bSMarc Zyngier 		return ret;
6829ed24f4bSMarc Zyngier 	}
6839ed24f4bSMarc Zyngier 
6849ed24f4bSMarc Zyngier 	if (kvm_vgic_global_state.vcpu_base == 0)
6859ed24f4bSMarc Zyngier 		kvm_info("disabling GICv2 emulation\n");
6869ed24f4bSMarc Zyngier 
6879ed24f4bSMarc Zyngier 	if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
6889ed24f4bSMarc Zyngier 		group0_trap = true;
6899ed24f4bSMarc Zyngier 		group1_trap = true;
6909ed24f4bSMarc Zyngier 	}
6919ed24f4bSMarc Zyngier 
692d11a327eSMarc Zyngier 	if (vgic_v3_broken_seis()) {
693d11a327eSMarc Zyngier 		kvm_info("GICv3 with broken locally generated SEI\n");
694df652bcfSMarc Zyngier 
695d11a327eSMarc Zyngier 		kvm_vgic_global_state.ich_vtr_el2 &= ~ICH_VTR_SEIS_MASK;
696df652bcfSMarc Zyngier 		group0_trap = true;
697df652bcfSMarc Zyngier 		group1_trap = true;
6980924729bSMarc Zyngier 		if (ich_vtr_el2 & ICH_VTR_TDS_MASK)
6990924729bSMarc Zyngier 			dir_trap = true;
7000924729bSMarc Zyngier 		else
701df652bcfSMarc Zyngier 			common_trap = true;
702df652bcfSMarc Zyngier 	}
703df652bcfSMarc Zyngier 
7040924729bSMarc Zyngier 	if (group0_trap || group1_trap || common_trap | dir_trap) {
7050924729bSMarc Zyngier 		kvm_info("GICv3 sysreg trapping enabled ([%s%s%s%s], reduced performance)\n",
7069ed24f4bSMarc Zyngier 			 group0_trap ? "G0" : "",
7079ed24f4bSMarc Zyngier 			 group1_trap ? "G1" : "",
7080924729bSMarc Zyngier 			 common_trap ? "C"  : "",
7090924729bSMarc Zyngier 			 dir_trap    ? "D"  : "");
7109ed24f4bSMarc Zyngier 		static_branch_enable(&vgic_v3_cpuif_trap);
7119ed24f4bSMarc Zyngier 	}
7129ed24f4bSMarc Zyngier 
7139ed24f4bSMarc Zyngier 	kvm_vgic_global_state.vctrl_base = NULL;
7149ed24f4bSMarc Zyngier 	kvm_vgic_global_state.type = VGIC_V3;
7159ed24f4bSMarc Zyngier 	kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
7169ed24f4bSMarc Zyngier 
7179ed24f4bSMarc Zyngier 	return 0;
7189ed24f4bSMarc Zyngier }
7199ed24f4bSMarc Zyngier 
vgic_v3_load(struct kvm_vcpu * vcpu)7209ed24f4bSMarc Zyngier void vgic_v3_load(struct kvm_vcpu *vcpu)
7219ed24f4bSMarc Zyngier {
7229ed24f4bSMarc Zyngier 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
7239ed24f4bSMarc Zyngier 
7249ed24f4bSMarc Zyngier 	/*
7259ed24f4bSMarc Zyngier 	 * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
7269ed24f4bSMarc Zyngier 	 * is dependent on ICC_SRE_EL1.SRE, and we have to perform the
7279ed24f4bSMarc Zyngier 	 * VMCR_EL2 save/restore in the world switch.
7289ed24f4bSMarc Zyngier 	 */
7299ed24f4bSMarc Zyngier 	if (likely(cpu_if->vgic_sre))
7309ed24f4bSMarc Zyngier 		kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
7319ed24f4bSMarc Zyngier 
732a071261dSAndrew Scull 	kvm_call_hyp(__vgic_v3_restore_aprs, cpu_if);
7339ed24f4bSMarc Zyngier 
7349ed24f4bSMarc Zyngier 	if (has_vhe())
735fc5d1f1aSChristoffer Dall 		__vgic_v3_activate_traps(cpu_if);
7369ed24f4bSMarc Zyngier 
7379ed24f4bSMarc Zyngier 	WARN_ON(vgic_v4_load(vcpu));
7389ed24f4bSMarc Zyngier }
7399ed24f4bSMarc Zyngier 
vgic_v3_vmcr_sync(struct kvm_vcpu * vcpu)7409ed24f4bSMarc Zyngier void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
7419ed24f4bSMarc Zyngier {
7429ed24f4bSMarc Zyngier 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
7439ed24f4bSMarc Zyngier 
7449ed24f4bSMarc Zyngier 	if (likely(cpu_if->vgic_sre))
7459ed24f4bSMarc Zyngier 		cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
7469ed24f4bSMarc Zyngier }
7479ed24f4bSMarc Zyngier 
vgic_v3_put(struct kvm_vcpu * vcpu)7489ed24f4bSMarc Zyngier void vgic_v3_put(struct kvm_vcpu *vcpu)
7499ed24f4bSMarc Zyngier {
750fc5d1f1aSChristoffer Dall 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
751fc5d1f1aSChristoffer Dall 
752*b321c31cSMarc Zyngier 	WARN_ON(vgic_v4_put(vcpu));
7539ed24f4bSMarc Zyngier 
7549ed24f4bSMarc Zyngier 	vgic_v3_vmcr_sync(vcpu);
7559ed24f4bSMarc Zyngier 
756a071261dSAndrew Scull 	kvm_call_hyp(__vgic_v3_save_aprs, cpu_if);
7579ed24f4bSMarc Zyngier 
7589ed24f4bSMarc Zyngier 	if (has_vhe())
759fc5d1f1aSChristoffer Dall 		__vgic_v3_deactivate_traps(cpu_if);
7609ed24f4bSMarc Zyngier }
761