xref: /openbmc/linux/arch/arm64/kvm/arm.c (revision 36fb4cd5)
19ed24f4bSMarc Zyngier // SPDX-License-Identifier: GPL-2.0-only
29ed24f4bSMarc Zyngier /*
39ed24f4bSMarc Zyngier  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
49ed24f4bSMarc Zyngier  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
59ed24f4bSMarc Zyngier  */
69ed24f4bSMarc Zyngier 
79ed24f4bSMarc Zyngier #include <linux/bug.h>
89ed24f4bSMarc Zyngier #include <linux/cpu_pm.h>
99ed24f4bSMarc Zyngier #include <linux/errno.h>
109ed24f4bSMarc Zyngier #include <linux/err.h>
119ed24f4bSMarc Zyngier #include <linux/kvm_host.h>
129ed24f4bSMarc Zyngier #include <linux/list.h>
139ed24f4bSMarc Zyngier #include <linux/module.h>
149ed24f4bSMarc Zyngier #include <linux/vmalloc.h>
159ed24f4bSMarc Zyngier #include <linux/fs.h>
169ed24f4bSMarc Zyngier #include <linux/mman.h>
179ed24f4bSMarc Zyngier #include <linux/sched.h>
189ed24f4bSMarc Zyngier #include <linux/kvm.h>
199ed24f4bSMarc Zyngier #include <linux/kvm_irqfd.h>
209ed24f4bSMarc Zyngier #include <linux/irqbypass.h>
219ed24f4bSMarc Zyngier #include <linux/sched/stat.h>
229ed24f4bSMarc Zyngier #include <trace/events/kvm.h>
239ed24f4bSMarc Zyngier 
249ed24f4bSMarc Zyngier #define CREATE_TRACE_POINTS
259ed24f4bSMarc Zyngier #include "trace_arm.h"
269ed24f4bSMarc Zyngier 
279ed24f4bSMarc Zyngier #include <linux/uaccess.h>
289ed24f4bSMarc Zyngier #include <asm/ptrace.h>
299ed24f4bSMarc Zyngier #include <asm/mman.h>
309ed24f4bSMarc Zyngier #include <asm/tlbflush.h>
319ed24f4bSMarc Zyngier #include <asm/cacheflush.h>
329ed24f4bSMarc Zyngier #include <asm/cpufeature.h>
339ed24f4bSMarc Zyngier #include <asm/virt.h>
349ed24f4bSMarc Zyngier #include <asm/kvm_arm.h>
359ed24f4bSMarc Zyngier #include <asm/kvm_asm.h>
369ed24f4bSMarc Zyngier #include <asm/kvm_mmu.h>
379ed24f4bSMarc Zyngier #include <asm/kvm_emulate.h>
389ed24f4bSMarc Zyngier #include <asm/kvm_coproc.h>
399ed24f4bSMarc Zyngier #include <asm/sections.h>
409ed24f4bSMarc Zyngier 
419ed24f4bSMarc Zyngier #include <kvm/arm_hypercalls.h>
429ed24f4bSMarc Zyngier #include <kvm/arm_pmu.h>
439ed24f4bSMarc Zyngier #include <kvm/arm_psci.h>
449ed24f4bSMarc Zyngier 
459ed24f4bSMarc Zyngier #ifdef REQUIRES_VIRT
469ed24f4bSMarc Zyngier __asm__(".arch_extension	virt");
479ed24f4bSMarc Zyngier #endif
489ed24f4bSMarc Zyngier 
4914ef9d04SMarc Zyngier DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
5014ef9d04SMarc Zyngier 
519ed24f4bSMarc Zyngier static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
5230c95391SDavid Brazdil unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
539ed24f4bSMarc Zyngier 
549ed24f4bSMarc Zyngier /* The VMID used in the VTTBR */
559ed24f4bSMarc Zyngier static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
569ed24f4bSMarc Zyngier static u32 kvm_next_vmid;
579ed24f4bSMarc Zyngier static DEFINE_SPINLOCK(kvm_vmid_lock);
589ed24f4bSMarc Zyngier 
599ed24f4bSMarc Zyngier static bool vgic_present;
609ed24f4bSMarc Zyngier 
619ed24f4bSMarc Zyngier static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
629ed24f4bSMarc Zyngier DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
639ed24f4bSMarc Zyngier 
649ed24f4bSMarc Zyngier int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
659ed24f4bSMarc Zyngier {
669ed24f4bSMarc Zyngier 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
679ed24f4bSMarc Zyngier }
689ed24f4bSMarc Zyngier 
699ed24f4bSMarc Zyngier int kvm_arch_hardware_setup(void *opaque)
709ed24f4bSMarc Zyngier {
719ed24f4bSMarc Zyngier 	return 0;
729ed24f4bSMarc Zyngier }
739ed24f4bSMarc Zyngier 
749ed24f4bSMarc Zyngier int kvm_arch_check_processor_compat(void *opaque)
759ed24f4bSMarc Zyngier {
769ed24f4bSMarc Zyngier 	return 0;
779ed24f4bSMarc Zyngier }
789ed24f4bSMarc Zyngier 
799ed24f4bSMarc Zyngier int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
809ed24f4bSMarc Zyngier 			    struct kvm_enable_cap *cap)
819ed24f4bSMarc Zyngier {
829ed24f4bSMarc Zyngier 	int r;
839ed24f4bSMarc Zyngier 
849ed24f4bSMarc Zyngier 	if (cap->flags)
859ed24f4bSMarc Zyngier 		return -EINVAL;
869ed24f4bSMarc Zyngier 
879ed24f4bSMarc Zyngier 	switch (cap->cap) {
889ed24f4bSMarc Zyngier 	case KVM_CAP_ARM_NISV_TO_USER:
899ed24f4bSMarc Zyngier 		r = 0;
909ed24f4bSMarc Zyngier 		kvm->arch.return_nisv_io_abort_to_user = true;
919ed24f4bSMarc Zyngier 		break;
929ed24f4bSMarc Zyngier 	default:
939ed24f4bSMarc Zyngier 		r = -EINVAL;
949ed24f4bSMarc Zyngier 		break;
959ed24f4bSMarc Zyngier 	}
969ed24f4bSMarc Zyngier 
979ed24f4bSMarc Zyngier 	return r;
989ed24f4bSMarc Zyngier }
999ed24f4bSMarc Zyngier 
1005107000fSMarc Zyngier static int kvm_arm_default_max_vcpus(void)
1015107000fSMarc Zyngier {
1025107000fSMarc Zyngier 	return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
1035107000fSMarc Zyngier }
1045107000fSMarc Zyngier 
1059ed24f4bSMarc Zyngier /**
1069ed24f4bSMarc Zyngier  * kvm_arch_init_vm - initializes a VM data structure
1079ed24f4bSMarc Zyngier  * @kvm:	pointer to the KVM struct
1089ed24f4bSMarc Zyngier  */
1099ed24f4bSMarc Zyngier int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1109ed24f4bSMarc Zyngier {
111a0e50aa3SChristoffer Dall 	int ret;
1129ed24f4bSMarc Zyngier 
1139ed24f4bSMarc Zyngier 	ret = kvm_arm_setup_stage2(kvm, type);
1149ed24f4bSMarc Zyngier 	if (ret)
1159ed24f4bSMarc Zyngier 		return ret;
1169ed24f4bSMarc Zyngier 
117a0e50aa3SChristoffer Dall 	ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu);
1189ed24f4bSMarc Zyngier 	if (ret)
119a0e50aa3SChristoffer Dall 		return ret;
1209ed24f4bSMarc Zyngier 
1219ed24f4bSMarc Zyngier 	ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
1229ed24f4bSMarc Zyngier 	if (ret)
1239ed24f4bSMarc Zyngier 		goto out_free_stage2_pgd;
1249ed24f4bSMarc Zyngier 
1259ed24f4bSMarc Zyngier 	kvm_vgic_early_init(kvm);
1269ed24f4bSMarc Zyngier 
1279ed24f4bSMarc Zyngier 	/* The maximum number of VCPUs is limited by the host's GIC model */
1285107000fSMarc Zyngier 	kvm->arch.max_vcpus = kvm_arm_default_max_vcpus();
1299ed24f4bSMarc Zyngier 
1309ed24f4bSMarc Zyngier 	return ret;
1319ed24f4bSMarc Zyngier out_free_stage2_pgd:
132a0e50aa3SChristoffer Dall 	kvm_free_stage2_pgd(&kvm->arch.mmu);
1339ed24f4bSMarc Zyngier 	return ret;
1349ed24f4bSMarc Zyngier }
1359ed24f4bSMarc Zyngier 
1369ed24f4bSMarc Zyngier vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1379ed24f4bSMarc Zyngier {
1389ed24f4bSMarc Zyngier 	return VM_FAULT_SIGBUS;
1399ed24f4bSMarc Zyngier }
1409ed24f4bSMarc Zyngier 
1419ed24f4bSMarc Zyngier 
1429ed24f4bSMarc Zyngier /**
1439ed24f4bSMarc Zyngier  * kvm_arch_destroy_vm - destroy the VM data structure
1449ed24f4bSMarc Zyngier  * @kvm:	pointer to the KVM struct
1459ed24f4bSMarc Zyngier  */
1469ed24f4bSMarc Zyngier void kvm_arch_destroy_vm(struct kvm *kvm)
1479ed24f4bSMarc Zyngier {
1489ed24f4bSMarc Zyngier 	int i;
1499ed24f4bSMarc Zyngier 
150d7eec236SMarc Zyngier 	bitmap_free(kvm->arch.pmu_filter);
151d7eec236SMarc Zyngier 
1529ed24f4bSMarc Zyngier 	kvm_vgic_destroy(kvm);
1539ed24f4bSMarc Zyngier 
1549ed24f4bSMarc Zyngier 	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
1559ed24f4bSMarc Zyngier 		if (kvm->vcpus[i]) {
1569ed24f4bSMarc Zyngier 			kvm_vcpu_destroy(kvm->vcpus[i]);
1579ed24f4bSMarc Zyngier 			kvm->vcpus[i] = NULL;
1589ed24f4bSMarc Zyngier 		}
1599ed24f4bSMarc Zyngier 	}
1609ed24f4bSMarc Zyngier 	atomic_set(&kvm->online_vcpus, 0);
1619ed24f4bSMarc Zyngier }
1629ed24f4bSMarc Zyngier 
1639ed24f4bSMarc Zyngier int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1649ed24f4bSMarc Zyngier {
1659ed24f4bSMarc Zyngier 	int r;
1669ed24f4bSMarc Zyngier 	switch (ext) {
1679ed24f4bSMarc Zyngier 	case KVM_CAP_IRQCHIP:
1689ed24f4bSMarc Zyngier 		r = vgic_present;
1699ed24f4bSMarc Zyngier 		break;
1709ed24f4bSMarc Zyngier 	case KVM_CAP_IOEVENTFD:
1719ed24f4bSMarc Zyngier 	case KVM_CAP_DEVICE_CTRL:
1729ed24f4bSMarc Zyngier 	case KVM_CAP_USER_MEMORY:
1739ed24f4bSMarc Zyngier 	case KVM_CAP_SYNC_MMU:
1749ed24f4bSMarc Zyngier 	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
1759ed24f4bSMarc Zyngier 	case KVM_CAP_ONE_REG:
1769ed24f4bSMarc Zyngier 	case KVM_CAP_ARM_PSCI:
1779ed24f4bSMarc Zyngier 	case KVM_CAP_ARM_PSCI_0_2:
1789ed24f4bSMarc Zyngier 	case KVM_CAP_READONLY_MEM:
1799ed24f4bSMarc Zyngier 	case KVM_CAP_MP_STATE:
1809ed24f4bSMarc Zyngier 	case KVM_CAP_IMMEDIATE_EXIT:
1819ed24f4bSMarc Zyngier 	case KVM_CAP_VCPU_EVENTS:
1829ed24f4bSMarc Zyngier 	case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
1839ed24f4bSMarc Zyngier 	case KVM_CAP_ARM_NISV_TO_USER:
1849ed24f4bSMarc Zyngier 	case KVM_CAP_ARM_INJECT_EXT_DABT:
185*36fb4cd5SWill Deacon 	case KVM_CAP_SET_GUEST_DEBUG:
186*36fb4cd5SWill Deacon 	case KVM_CAP_VCPU_ATTRIBUTES:
1879ed24f4bSMarc Zyngier 		r = 1;
1889ed24f4bSMarc Zyngier 		break;
1899ed24f4bSMarc Zyngier 	case KVM_CAP_ARM_SET_DEVICE_ADDR:
1909ed24f4bSMarc Zyngier 		r = 1;
1919ed24f4bSMarc Zyngier 		break;
1929ed24f4bSMarc Zyngier 	case KVM_CAP_NR_VCPUS:
1939ed24f4bSMarc Zyngier 		r = num_online_cpus();
1949ed24f4bSMarc Zyngier 		break;
1959ed24f4bSMarc Zyngier 	case KVM_CAP_MAX_VCPUS:
1969ed24f4bSMarc Zyngier 	case KVM_CAP_MAX_VCPU_ID:
1975107000fSMarc Zyngier 		if (kvm)
1985107000fSMarc Zyngier 			r = kvm->arch.max_vcpus;
1995107000fSMarc Zyngier 		else
2005107000fSMarc Zyngier 			r = kvm_arm_default_max_vcpus();
2019ed24f4bSMarc Zyngier 		break;
2029ed24f4bSMarc Zyngier 	case KVM_CAP_MSI_DEVID:
2039ed24f4bSMarc Zyngier 		if (!kvm)
2049ed24f4bSMarc Zyngier 			r = -EINVAL;
2059ed24f4bSMarc Zyngier 		else
2069ed24f4bSMarc Zyngier 			r = kvm->arch.vgic.msis_require_devid;
2079ed24f4bSMarc Zyngier 		break;
2089ed24f4bSMarc Zyngier 	case KVM_CAP_ARM_USER_IRQ:
2099ed24f4bSMarc Zyngier 		/*
2109ed24f4bSMarc Zyngier 		 * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
2119ed24f4bSMarc Zyngier 		 * (bump this number if adding more devices)
2129ed24f4bSMarc Zyngier 		 */
2139ed24f4bSMarc Zyngier 		r = 1;
2149ed24f4bSMarc Zyngier 		break;
215004a0124SAndrew Jones 	case KVM_CAP_STEAL_TIME:
216004a0124SAndrew Jones 		r = kvm_arm_pvtime_supported();
217004a0124SAndrew Jones 		break;
218*36fb4cd5SWill Deacon 	case KVM_CAP_ARM_EL1_32BIT:
219*36fb4cd5SWill Deacon 		r = cpus_have_const_cap(ARM64_HAS_32BIT_EL1);
2209ed24f4bSMarc Zyngier 		break;
221*36fb4cd5SWill Deacon 	case KVM_CAP_GUEST_DEBUG_HW_BPS:
222*36fb4cd5SWill Deacon 		r = get_num_brps();
223*36fb4cd5SWill Deacon 		break;
224*36fb4cd5SWill Deacon 	case KVM_CAP_GUEST_DEBUG_HW_WPS:
225*36fb4cd5SWill Deacon 		r = get_num_wrps();
226*36fb4cd5SWill Deacon 		break;
227*36fb4cd5SWill Deacon 	case KVM_CAP_ARM_PMU_V3:
228*36fb4cd5SWill Deacon 		r = kvm_arm_support_pmu_v3();
229*36fb4cd5SWill Deacon 		break;
230*36fb4cd5SWill Deacon 	case KVM_CAP_ARM_INJECT_SERROR_ESR:
231*36fb4cd5SWill Deacon 		r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
232*36fb4cd5SWill Deacon 		break;
233*36fb4cd5SWill Deacon 	case KVM_CAP_ARM_VM_IPA_SIZE:
234*36fb4cd5SWill Deacon 		r = get_kvm_ipa_limit();
235*36fb4cd5SWill Deacon 		break;
236*36fb4cd5SWill Deacon 	case KVM_CAP_ARM_SVE:
237*36fb4cd5SWill Deacon 		r = system_supports_sve();
238*36fb4cd5SWill Deacon 		break;
239*36fb4cd5SWill Deacon 	case KVM_CAP_ARM_PTRAUTH_ADDRESS:
240*36fb4cd5SWill Deacon 	case KVM_CAP_ARM_PTRAUTH_GENERIC:
241*36fb4cd5SWill Deacon 		r = system_has_full_ptr_auth();
242*36fb4cd5SWill Deacon 		break;
243*36fb4cd5SWill Deacon 	default:
244*36fb4cd5SWill Deacon 		r = 0;
2459ed24f4bSMarc Zyngier 	}
246*36fb4cd5SWill Deacon 
2479ed24f4bSMarc Zyngier 	return r;
2489ed24f4bSMarc Zyngier }
2499ed24f4bSMarc Zyngier 
2509ed24f4bSMarc Zyngier long kvm_arch_dev_ioctl(struct file *filp,
2519ed24f4bSMarc Zyngier 			unsigned int ioctl, unsigned long arg)
2529ed24f4bSMarc Zyngier {
2539ed24f4bSMarc Zyngier 	return -EINVAL;
2549ed24f4bSMarc Zyngier }
2559ed24f4bSMarc Zyngier 
2569ed24f4bSMarc Zyngier struct kvm *kvm_arch_alloc_vm(void)
2579ed24f4bSMarc Zyngier {
2589ed24f4bSMarc Zyngier 	if (!has_vhe())
2599ed24f4bSMarc Zyngier 		return kzalloc(sizeof(struct kvm), GFP_KERNEL);
2609ed24f4bSMarc Zyngier 
2619ed24f4bSMarc Zyngier 	return vzalloc(sizeof(struct kvm));
2629ed24f4bSMarc Zyngier }
2639ed24f4bSMarc Zyngier 
2649ed24f4bSMarc Zyngier void kvm_arch_free_vm(struct kvm *kvm)
2659ed24f4bSMarc Zyngier {
2669ed24f4bSMarc Zyngier 	if (!has_vhe())
2679ed24f4bSMarc Zyngier 		kfree(kvm);
2689ed24f4bSMarc Zyngier 	else
2699ed24f4bSMarc Zyngier 		vfree(kvm);
2709ed24f4bSMarc Zyngier }
2719ed24f4bSMarc Zyngier 
2729ed24f4bSMarc Zyngier int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
2739ed24f4bSMarc Zyngier {
2749ed24f4bSMarc Zyngier 	if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
2759ed24f4bSMarc Zyngier 		return -EBUSY;
2769ed24f4bSMarc Zyngier 
2779ed24f4bSMarc Zyngier 	if (id >= kvm->arch.max_vcpus)
2789ed24f4bSMarc Zyngier 		return -EINVAL;
2799ed24f4bSMarc Zyngier 
2809ed24f4bSMarc Zyngier 	return 0;
2819ed24f4bSMarc Zyngier }
2829ed24f4bSMarc Zyngier 
2839ed24f4bSMarc Zyngier int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
2849ed24f4bSMarc Zyngier {
2859ed24f4bSMarc Zyngier 	int err;
2869ed24f4bSMarc Zyngier 
2879ed24f4bSMarc Zyngier 	/* Force users to call KVM_ARM_VCPU_INIT */
2889ed24f4bSMarc Zyngier 	vcpu->arch.target = -1;
2899ed24f4bSMarc Zyngier 	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
2909ed24f4bSMarc Zyngier 
291e539451bSSean Christopherson 	vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
292e539451bSSean Christopherson 
2939ed24f4bSMarc Zyngier 	/* Set up the timer */
2949ed24f4bSMarc Zyngier 	kvm_timer_vcpu_init(vcpu);
2959ed24f4bSMarc Zyngier 
2969ed24f4bSMarc Zyngier 	kvm_pmu_vcpu_init(vcpu);
2979ed24f4bSMarc Zyngier 
2989ed24f4bSMarc Zyngier 	kvm_arm_reset_debug_ptr(vcpu);
2999ed24f4bSMarc Zyngier 
3009ed24f4bSMarc Zyngier 	kvm_arm_pvtime_vcpu_init(&vcpu->arch);
3019ed24f4bSMarc Zyngier 
302a0e50aa3SChristoffer Dall 	vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
303a0e50aa3SChristoffer Dall 
3049ed24f4bSMarc Zyngier 	err = kvm_vgic_vcpu_init(vcpu);
3059ed24f4bSMarc Zyngier 	if (err)
3069ed24f4bSMarc Zyngier 		return err;
3079ed24f4bSMarc Zyngier 
3089ed24f4bSMarc Zyngier 	return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
3099ed24f4bSMarc Zyngier }
3109ed24f4bSMarc Zyngier 
3119ed24f4bSMarc Zyngier void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
3129ed24f4bSMarc Zyngier {
3139ed24f4bSMarc Zyngier }
3149ed24f4bSMarc Zyngier 
3159ed24f4bSMarc Zyngier void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
3169ed24f4bSMarc Zyngier {
3179ed24f4bSMarc Zyngier 	if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
3189ed24f4bSMarc Zyngier 		static_branch_dec(&userspace_irqchip_in_use);
3199ed24f4bSMarc Zyngier 
3209af3e08bSWill Deacon 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
3219ed24f4bSMarc Zyngier 	kvm_timer_vcpu_terminate(vcpu);
3229ed24f4bSMarc Zyngier 	kvm_pmu_vcpu_destroy(vcpu);
3239ed24f4bSMarc Zyngier 
3249ed24f4bSMarc Zyngier 	kvm_arm_vcpu_destroy(vcpu);
3259ed24f4bSMarc Zyngier }
3269ed24f4bSMarc Zyngier 
3279ed24f4bSMarc Zyngier int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
3289ed24f4bSMarc Zyngier {
3299ed24f4bSMarc Zyngier 	return kvm_timer_is_pending(vcpu);
3309ed24f4bSMarc Zyngier }
3319ed24f4bSMarc Zyngier 
3329ed24f4bSMarc Zyngier void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
3339ed24f4bSMarc Zyngier {
3349ed24f4bSMarc Zyngier 	/*
3359ed24f4bSMarc Zyngier 	 * If we're about to block (most likely because we've just hit a
3369ed24f4bSMarc Zyngier 	 * WFI), we need to sync back the state of the GIC CPU interface
3379ed24f4bSMarc Zyngier 	 * so that we have the latest PMR and group enables. This ensures
3389ed24f4bSMarc Zyngier 	 * that kvm_arch_vcpu_runnable has up-to-date data to decide
3399ed24f4bSMarc Zyngier 	 * whether we have pending interrupts.
3409ed24f4bSMarc Zyngier 	 *
3419ed24f4bSMarc Zyngier 	 * For the same reason, we want to tell GICv4 that we need
3429ed24f4bSMarc Zyngier 	 * doorbells to be signalled, should an interrupt become pending.
3439ed24f4bSMarc Zyngier 	 */
3449ed24f4bSMarc Zyngier 	preempt_disable();
3459ed24f4bSMarc Zyngier 	kvm_vgic_vmcr_sync(vcpu);
3469ed24f4bSMarc Zyngier 	vgic_v4_put(vcpu, true);
3479ed24f4bSMarc Zyngier 	preempt_enable();
3489ed24f4bSMarc Zyngier }
3499ed24f4bSMarc Zyngier 
3509ed24f4bSMarc Zyngier void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
3519ed24f4bSMarc Zyngier {
3529ed24f4bSMarc Zyngier 	preempt_disable();
3539ed24f4bSMarc Zyngier 	vgic_v4_load(vcpu);
3549ed24f4bSMarc Zyngier 	preempt_enable();
3559ed24f4bSMarc Zyngier }
3569ed24f4bSMarc Zyngier 
3579ed24f4bSMarc Zyngier void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3589ed24f4bSMarc Zyngier {
359a0e50aa3SChristoffer Dall 	struct kvm_s2_mmu *mmu;
3609ed24f4bSMarc Zyngier 	int *last_ran;
3619ed24f4bSMarc Zyngier 
362a0e50aa3SChristoffer Dall 	mmu = vcpu->arch.hw_mmu;
363a0e50aa3SChristoffer Dall 	last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
3649ed24f4bSMarc Zyngier 
3659ed24f4bSMarc Zyngier 	/*
3669ed24f4bSMarc Zyngier 	 * We might get preempted before the vCPU actually runs, but
3679ed24f4bSMarc Zyngier 	 * over-invalidation doesn't affect correctness.
3689ed24f4bSMarc Zyngier 	 */
3699ed24f4bSMarc Zyngier 	if (*last_ran != vcpu->vcpu_id) {
370a0e50aa3SChristoffer Dall 		kvm_call_hyp(__kvm_tlb_flush_local_vmid, mmu);
3719ed24f4bSMarc Zyngier 		*last_ran = vcpu->vcpu_id;
3729ed24f4bSMarc Zyngier 	}
3739ed24f4bSMarc Zyngier 
3749ed24f4bSMarc Zyngier 	vcpu->cpu = cpu;
3759ed24f4bSMarc Zyngier 
3769ed24f4bSMarc Zyngier 	kvm_vgic_load(vcpu);
3779ed24f4bSMarc Zyngier 	kvm_timer_vcpu_load(vcpu);
37813aeb9b4SDavid Brazdil 	if (has_vhe())
37913aeb9b4SDavid Brazdil 		kvm_vcpu_load_sysregs_vhe(vcpu);
3809ed24f4bSMarc Zyngier 	kvm_arch_vcpu_load_fp(vcpu);
3819ed24f4bSMarc Zyngier 	kvm_vcpu_pmu_restore_guest(vcpu);
3829ed24f4bSMarc Zyngier 	if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
3839ed24f4bSMarc Zyngier 		kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
3849ed24f4bSMarc Zyngier 
3859ed24f4bSMarc Zyngier 	if (single_task_running())
3869ed24f4bSMarc Zyngier 		vcpu_clear_wfx_traps(vcpu);
3879ed24f4bSMarc Zyngier 	else
3889ed24f4bSMarc Zyngier 		vcpu_set_wfx_traps(vcpu);
3899ed24f4bSMarc Zyngier 
39029eb5a3cSMarc Zyngier 	if (vcpu_has_ptrauth(vcpu))
391ef3e40a7SMarc Zyngier 		vcpu_ptrauth_disable(vcpu);
3929ed24f4bSMarc Zyngier }
3939ed24f4bSMarc Zyngier 
3949ed24f4bSMarc Zyngier void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3959ed24f4bSMarc Zyngier {
3969ed24f4bSMarc Zyngier 	kvm_arch_vcpu_put_fp(vcpu);
39713aeb9b4SDavid Brazdil 	if (has_vhe())
39813aeb9b4SDavid Brazdil 		kvm_vcpu_put_sysregs_vhe(vcpu);
3999ed24f4bSMarc Zyngier 	kvm_timer_vcpu_put(vcpu);
4009ed24f4bSMarc Zyngier 	kvm_vgic_put(vcpu);
4019ed24f4bSMarc Zyngier 	kvm_vcpu_pmu_restore_host(vcpu);
4029ed24f4bSMarc Zyngier 
4039ed24f4bSMarc Zyngier 	vcpu->cpu = -1;
4049ed24f4bSMarc Zyngier }
4059ed24f4bSMarc Zyngier 
4069ed24f4bSMarc Zyngier static void vcpu_power_off(struct kvm_vcpu *vcpu)
4079ed24f4bSMarc Zyngier {
4089ed24f4bSMarc Zyngier 	vcpu->arch.power_off = true;
4099ed24f4bSMarc Zyngier 	kvm_make_request(KVM_REQ_SLEEP, vcpu);
4109ed24f4bSMarc Zyngier 	kvm_vcpu_kick(vcpu);
4119ed24f4bSMarc Zyngier }
4129ed24f4bSMarc Zyngier 
4139ed24f4bSMarc Zyngier int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
4149ed24f4bSMarc Zyngier 				    struct kvm_mp_state *mp_state)
4159ed24f4bSMarc Zyngier {
4169ed24f4bSMarc Zyngier 	if (vcpu->arch.power_off)
4179ed24f4bSMarc Zyngier 		mp_state->mp_state = KVM_MP_STATE_STOPPED;
4189ed24f4bSMarc Zyngier 	else
4199ed24f4bSMarc Zyngier 		mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
4209ed24f4bSMarc Zyngier 
4219ed24f4bSMarc Zyngier 	return 0;
4229ed24f4bSMarc Zyngier }
4239ed24f4bSMarc Zyngier 
4249ed24f4bSMarc Zyngier int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4259ed24f4bSMarc Zyngier 				    struct kvm_mp_state *mp_state)
4269ed24f4bSMarc Zyngier {
4279ed24f4bSMarc Zyngier 	int ret = 0;
4289ed24f4bSMarc Zyngier 
4299ed24f4bSMarc Zyngier 	switch (mp_state->mp_state) {
4309ed24f4bSMarc Zyngier 	case KVM_MP_STATE_RUNNABLE:
4319ed24f4bSMarc Zyngier 		vcpu->arch.power_off = false;
4329ed24f4bSMarc Zyngier 		break;
4339ed24f4bSMarc Zyngier 	case KVM_MP_STATE_STOPPED:
4349ed24f4bSMarc Zyngier 		vcpu_power_off(vcpu);
4359ed24f4bSMarc Zyngier 		break;
4369ed24f4bSMarc Zyngier 	default:
4379ed24f4bSMarc Zyngier 		ret = -EINVAL;
4389ed24f4bSMarc Zyngier 	}
4399ed24f4bSMarc Zyngier 
4409ed24f4bSMarc Zyngier 	return ret;
4419ed24f4bSMarc Zyngier }
4429ed24f4bSMarc Zyngier 
4439ed24f4bSMarc Zyngier /**
4449ed24f4bSMarc Zyngier  * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
4459ed24f4bSMarc Zyngier  * @v:		The VCPU pointer
4469ed24f4bSMarc Zyngier  *
4479ed24f4bSMarc Zyngier  * If the guest CPU is not waiting for interrupts or an interrupt line is
4489ed24f4bSMarc Zyngier  * asserted, the CPU is by definition runnable.
4499ed24f4bSMarc Zyngier  */
4509ed24f4bSMarc Zyngier int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
4519ed24f4bSMarc Zyngier {
4529ed24f4bSMarc Zyngier 	bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
4539ed24f4bSMarc Zyngier 	return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
4549ed24f4bSMarc Zyngier 		&& !v->arch.power_off && !v->arch.pause);
4559ed24f4bSMarc Zyngier }
4569ed24f4bSMarc Zyngier 
4579ed24f4bSMarc Zyngier bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
4589ed24f4bSMarc Zyngier {
4599ed24f4bSMarc Zyngier 	return vcpu_mode_priv(vcpu);
4609ed24f4bSMarc Zyngier }
4619ed24f4bSMarc Zyngier 
4629ed24f4bSMarc Zyngier /* Just ensure a guest exit from a particular CPU */
4639ed24f4bSMarc Zyngier static void exit_vm_noop(void *info)
4649ed24f4bSMarc Zyngier {
4659ed24f4bSMarc Zyngier }
4669ed24f4bSMarc Zyngier 
4679ed24f4bSMarc Zyngier void force_vm_exit(const cpumask_t *mask)
4689ed24f4bSMarc Zyngier {
4699ed24f4bSMarc Zyngier 	preempt_disable();
4709ed24f4bSMarc Zyngier 	smp_call_function_many(mask, exit_vm_noop, NULL, true);
4719ed24f4bSMarc Zyngier 	preempt_enable();
4729ed24f4bSMarc Zyngier }
4739ed24f4bSMarc Zyngier 
4749ed24f4bSMarc Zyngier /**
4759ed24f4bSMarc Zyngier  * need_new_vmid_gen - check that the VMID is still valid
4769ed24f4bSMarc Zyngier  * @vmid: The VMID to check
4779ed24f4bSMarc Zyngier  *
4789ed24f4bSMarc Zyngier  * return true if there is a new generation of VMIDs being used
4799ed24f4bSMarc Zyngier  *
4809ed24f4bSMarc Zyngier  * The hardware supports a limited set of values with the value zero reserved
4819ed24f4bSMarc Zyngier  * for the host, so we check if an assigned value belongs to a previous
482656012c7SFuad Tabba  * generation, which requires us to assign a new value. If we're the first to
483656012c7SFuad Tabba  * use a VMID for the new generation, we must flush necessary caches and TLBs
484656012c7SFuad Tabba  * on all CPUs.
4859ed24f4bSMarc Zyngier  */
4869ed24f4bSMarc Zyngier static bool need_new_vmid_gen(struct kvm_vmid *vmid)
4879ed24f4bSMarc Zyngier {
4889ed24f4bSMarc Zyngier 	u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
4899ed24f4bSMarc Zyngier 	smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
4909ed24f4bSMarc Zyngier 	return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen);
4919ed24f4bSMarc Zyngier }
4929ed24f4bSMarc Zyngier 
4939ed24f4bSMarc Zyngier /**
4949ed24f4bSMarc Zyngier  * update_vmid - Update the vmid with a valid VMID for the current generation
4959ed24f4bSMarc Zyngier  * @vmid: The stage-2 VMID information struct
4969ed24f4bSMarc Zyngier  */
4979ed24f4bSMarc Zyngier static void update_vmid(struct kvm_vmid *vmid)
4989ed24f4bSMarc Zyngier {
4999ed24f4bSMarc Zyngier 	if (!need_new_vmid_gen(vmid))
5009ed24f4bSMarc Zyngier 		return;
5019ed24f4bSMarc Zyngier 
5029ed24f4bSMarc Zyngier 	spin_lock(&kvm_vmid_lock);
5039ed24f4bSMarc Zyngier 
5049ed24f4bSMarc Zyngier 	/*
5059ed24f4bSMarc Zyngier 	 * We need to re-check the vmid_gen here to ensure that if another vcpu
5069ed24f4bSMarc Zyngier 	 * already allocated a valid vmid for this vm, then this vcpu should
5079ed24f4bSMarc Zyngier 	 * use the same vmid.
5089ed24f4bSMarc Zyngier 	 */
5099ed24f4bSMarc Zyngier 	if (!need_new_vmid_gen(vmid)) {
5109ed24f4bSMarc Zyngier 		spin_unlock(&kvm_vmid_lock);
5119ed24f4bSMarc Zyngier 		return;
5129ed24f4bSMarc Zyngier 	}
5139ed24f4bSMarc Zyngier 
5149ed24f4bSMarc Zyngier 	/* First user of a new VMID generation? */
5159ed24f4bSMarc Zyngier 	if (unlikely(kvm_next_vmid == 0)) {
5169ed24f4bSMarc Zyngier 		atomic64_inc(&kvm_vmid_gen);
5179ed24f4bSMarc Zyngier 		kvm_next_vmid = 1;
5189ed24f4bSMarc Zyngier 
5199ed24f4bSMarc Zyngier 		/*
5209ed24f4bSMarc Zyngier 		 * On SMP we know no other CPUs can use this CPU's or each
5219ed24f4bSMarc Zyngier 		 * other's VMID after force_vm_exit returns since the
5229ed24f4bSMarc Zyngier 		 * kvm_vmid_lock blocks them from reentry to the guest.
5239ed24f4bSMarc Zyngier 		 */
5249ed24f4bSMarc Zyngier 		force_vm_exit(cpu_all_mask);
5259ed24f4bSMarc Zyngier 		/*
5269ed24f4bSMarc Zyngier 		 * Now broadcast TLB + ICACHE invalidation over the inner
5279ed24f4bSMarc Zyngier 		 * shareable domain to make sure all data structures are
5289ed24f4bSMarc Zyngier 		 * clean.
5299ed24f4bSMarc Zyngier 		 */
5309ed24f4bSMarc Zyngier 		kvm_call_hyp(__kvm_flush_vm_context);
5319ed24f4bSMarc Zyngier 	}
5329ed24f4bSMarc Zyngier 
5339ed24f4bSMarc Zyngier 	vmid->vmid = kvm_next_vmid;
5349ed24f4bSMarc Zyngier 	kvm_next_vmid++;
5359ed24f4bSMarc Zyngier 	kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
5369ed24f4bSMarc Zyngier 
5379ed24f4bSMarc Zyngier 	smp_wmb();
5389ed24f4bSMarc Zyngier 	WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen));
5399ed24f4bSMarc Zyngier 
5409ed24f4bSMarc Zyngier 	spin_unlock(&kvm_vmid_lock);
5419ed24f4bSMarc Zyngier }
5429ed24f4bSMarc Zyngier 
5439ed24f4bSMarc Zyngier static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
5449ed24f4bSMarc Zyngier {
5459ed24f4bSMarc Zyngier 	struct kvm *kvm = vcpu->kvm;
5469ed24f4bSMarc Zyngier 	int ret = 0;
5479ed24f4bSMarc Zyngier 
5489ed24f4bSMarc Zyngier 	if (likely(vcpu->arch.has_run_once))
5499ed24f4bSMarc Zyngier 		return 0;
5509ed24f4bSMarc Zyngier 
5519ed24f4bSMarc Zyngier 	if (!kvm_arm_vcpu_is_finalized(vcpu))
5529ed24f4bSMarc Zyngier 		return -EPERM;
5539ed24f4bSMarc Zyngier 
5549ed24f4bSMarc Zyngier 	vcpu->arch.has_run_once = true;
5559ed24f4bSMarc Zyngier 
5569ed24f4bSMarc Zyngier 	if (likely(irqchip_in_kernel(kvm))) {
5579ed24f4bSMarc Zyngier 		/*
5589ed24f4bSMarc Zyngier 		 * Map the VGIC hardware resources before running a vcpu the
5599ed24f4bSMarc Zyngier 		 * first time on this VM.
5609ed24f4bSMarc Zyngier 		 */
5619ed24f4bSMarc Zyngier 		if (unlikely(!vgic_ready(kvm))) {
5629ed24f4bSMarc Zyngier 			ret = kvm_vgic_map_resources(kvm);
5639ed24f4bSMarc Zyngier 			if (ret)
5649ed24f4bSMarc Zyngier 				return ret;
5659ed24f4bSMarc Zyngier 		}
5669ed24f4bSMarc Zyngier 	} else {
5679ed24f4bSMarc Zyngier 		/*
5689ed24f4bSMarc Zyngier 		 * Tell the rest of the code that there are userspace irqchip
5699ed24f4bSMarc Zyngier 		 * VMs in the wild.
5709ed24f4bSMarc Zyngier 		 */
5719ed24f4bSMarc Zyngier 		static_branch_inc(&userspace_irqchip_in_use);
5729ed24f4bSMarc Zyngier 	}
5739ed24f4bSMarc Zyngier 
5749ed24f4bSMarc Zyngier 	ret = kvm_timer_enable(vcpu);
5759ed24f4bSMarc Zyngier 	if (ret)
5769ed24f4bSMarc Zyngier 		return ret;
5779ed24f4bSMarc Zyngier 
5789ed24f4bSMarc Zyngier 	ret = kvm_arm_pmu_v3_enable(vcpu);
5799ed24f4bSMarc Zyngier 
5809ed24f4bSMarc Zyngier 	return ret;
5819ed24f4bSMarc Zyngier }
5829ed24f4bSMarc Zyngier 
5839ed24f4bSMarc Zyngier bool kvm_arch_intc_initialized(struct kvm *kvm)
5849ed24f4bSMarc Zyngier {
5859ed24f4bSMarc Zyngier 	return vgic_initialized(kvm);
5869ed24f4bSMarc Zyngier }
5879ed24f4bSMarc Zyngier 
5889ed24f4bSMarc Zyngier void kvm_arm_halt_guest(struct kvm *kvm)
5899ed24f4bSMarc Zyngier {
5909ed24f4bSMarc Zyngier 	int i;
5919ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu;
5929ed24f4bSMarc Zyngier 
5939ed24f4bSMarc Zyngier 	kvm_for_each_vcpu(i, vcpu, kvm)
5949ed24f4bSMarc Zyngier 		vcpu->arch.pause = true;
5959ed24f4bSMarc Zyngier 	kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP);
5969ed24f4bSMarc Zyngier }
5979ed24f4bSMarc Zyngier 
5989ed24f4bSMarc Zyngier void kvm_arm_resume_guest(struct kvm *kvm)
5999ed24f4bSMarc Zyngier {
6009ed24f4bSMarc Zyngier 	int i;
6019ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu;
6029ed24f4bSMarc Zyngier 
6039ed24f4bSMarc Zyngier 	kvm_for_each_vcpu(i, vcpu, kvm) {
6049ed24f4bSMarc Zyngier 		vcpu->arch.pause = false;
60538060944SPaolo Bonzini 		rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
6069ed24f4bSMarc Zyngier 	}
6079ed24f4bSMarc Zyngier }
6089ed24f4bSMarc Zyngier 
6099ed24f4bSMarc Zyngier static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
6109ed24f4bSMarc Zyngier {
61138060944SPaolo Bonzini 	struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
6129ed24f4bSMarc Zyngier 
61338060944SPaolo Bonzini 	rcuwait_wait_event(wait,
61438060944SPaolo Bonzini 			   (!vcpu->arch.power_off) &&(!vcpu->arch.pause),
61538060944SPaolo Bonzini 			   TASK_INTERRUPTIBLE);
6169ed24f4bSMarc Zyngier 
6179ed24f4bSMarc Zyngier 	if (vcpu->arch.power_off || vcpu->arch.pause) {
6189ed24f4bSMarc Zyngier 		/* Awaken to handle a signal, request we sleep again later. */
6199ed24f4bSMarc Zyngier 		kvm_make_request(KVM_REQ_SLEEP, vcpu);
6209ed24f4bSMarc Zyngier 	}
6219ed24f4bSMarc Zyngier 
6229ed24f4bSMarc Zyngier 	/*
6239ed24f4bSMarc Zyngier 	 * Make sure we will observe a potential reset request if we've
6249ed24f4bSMarc Zyngier 	 * observed a change to the power state. Pairs with the smp_wmb() in
6259ed24f4bSMarc Zyngier 	 * kvm_psci_vcpu_on().
6269ed24f4bSMarc Zyngier 	 */
6279ed24f4bSMarc Zyngier 	smp_rmb();
6289ed24f4bSMarc Zyngier }
6299ed24f4bSMarc Zyngier 
6309ed24f4bSMarc Zyngier static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
6319ed24f4bSMarc Zyngier {
6329ed24f4bSMarc Zyngier 	return vcpu->arch.target >= 0;
6339ed24f4bSMarc Zyngier }
6349ed24f4bSMarc Zyngier 
6359ed24f4bSMarc Zyngier static void check_vcpu_requests(struct kvm_vcpu *vcpu)
6369ed24f4bSMarc Zyngier {
6379ed24f4bSMarc Zyngier 	if (kvm_request_pending(vcpu)) {
6389ed24f4bSMarc Zyngier 		if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
6399ed24f4bSMarc Zyngier 			vcpu_req_sleep(vcpu);
6409ed24f4bSMarc Zyngier 
6419ed24f4bSMarc Zyngier 		if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
6429ed24f4bSMarc Zyngier 			kvm_reset_vcpu(vcpu);
6439ed24f4bSMarc Zyngier 
6449ed24f4bSMarc Zyngier 		/*
6459ed24f4bSMarc Zyngier 		 * Clear IRQ_PENDING requests that were made to guarantee
6469ed24f4bSMarc Zyngier 		 * that a VCPU sees new virtual interrupts.
6479ed24f4bSMarc Zyngier 		 */
6489ed24f4bSMarc Zyngier 		kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
6499ed24f4bSMarc Zyngier 
6509ed24f4bSMarc Zyngier 		if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
6519ed24f4bSMarc Zyngier 			kvm_update_stolen_time(vcpu);
6529ed24f4bSMarc Zyngier 
6539ed24f4bSMarc Zyngier 		if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
6549ed24f4bSMarc Zyngier 			/* The distributor enable bits were changed */
6559ed24f4bSMarc Zyngier 			preempt_disable();
6569ed24f4bSMarc Zyngier 			vgic_v4_put(vcpu, false);
6579ed24f4bSMarc Zyngier 			vgic_v4_load(vcpu);
6589ed24f4bSMarc Zyngier 			preempt_enable();
6599ed24f4bSMarc Zyngier 		}
6609ed24f4bSMarc Zyngier 	}
6619ed24f4bSMarc Zyngier }
6629ed24f4bSMarc Zyngier 
6639ed24f4bSMarc Zyngier /**
6649ed24f4bSMarc Zyngier  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
6659ed24f4bSMarc Zyngier  * @vcpu:	The VCPU pointer
6669ed24f4bSMarc Zyngier  *
6679ed24f4bSMarc Zyngier  * This function is called through the VCPU_RUN ioctl called from user space. It
6689ed24f4bSMarc Zyngier  * will execute VM code in a loop until the time slice for the process is used
6699ed24f4bSMarc Zyngier  * or some emulation is needed from user space in which case the function will
6709ed24f4bSMarc Zyngier  * return with return value 0 and with the kvm_run structure filled in with the
6719ed24f4bSMarc Zyngier  * required data for the requested emulation.
6729ed24f4bSMarc Zyngier  */
67338060944SPaolo Bonzini int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
6749ed24f4bSMarc Zyngier {
67538060944SPaolo Bonzini 	struct kvm_run *run = vcpu->run;
6769ed24f4bSMarc Zyngier 	int ret;
6779ed24f4bSMarc Zyngier 
6789ed24f4bSMarc Zyngier 	if (unlikely(!kvm_vcpu_initialized(vcpu)))
6799ed24f4bSMarc Zyngier 		return -ENOEXEC;
6809ed24f4bSMarc Zyngier 
6819ed24f4bSMarc Zyngier 	ret = kvm_vcpu_first_run_init(vcpu);
6829ed24f4bSMarc Zyngier 	if (ret)
6839ed24f4bSMarc Zyngier 		return ret;
6849ed24f4bSMarc Zyngier 
6859ed24f4bSMarc Zyngier 	if (run->exit_reason == KVM_EXIT_MMIO) {
68674cc7e0cSTianjia Zhang 		ret = kvm_handle_mmio_return(vcpu);
6879ed24f4bSMarc Zyngier 		if (ret)
6889ed24f4bSMarc Zyngier 			return ret;
6899ed24f4bSMarc Zyngier 	}
6909ed24f4bSMarc Zyngier 
6919ed24f4bSMarc Zyngier 	if (run->immediate_exit)
6929ed24f4bSMarc Zyngier 		return -EINTR;
6939ed24f4bSMarc Zyngier 
6949ed24f4bSMarc Zyngier 	vcpu_load(vcpu);
6959ed24f4bSMarc Zyngier 
6969ed24f4bSMarc Zyngier 	kvm_sigset_activate(vcpu);
6979ed24f4bSMarc Zyngier 
6989ed24f4bSMarc Zyngier 	ret = 1;
6999ed24f4bSMarc Zyngier 	run->exit_reason = KVM_EXIT_UNKNOWN;
7009ed24f4bSMarc Zyngier 	while (ret > 0) {
7019ed24f4bSMarc Zyngier 		/*
7029ed24f4bSMarc Zyngier 		 * Check conditions before entering the guest
7039ed24f4bSMarc Zyngier 		 */
7049ed24f4bSMarc Zyngier 		cond_resched();
7059ed24f4bSMarc Zyngier 
706a0e50aa3SChristoffer Dall 		update_vmid(&vcpu->arch.hw_mmu->vmid);
7079ed24f4bSMarc Zyngier 
7089ed24f4bSMarc Zyngier 		check_vcpu_requests(vcpu);
7099ed24f4bSMarc Zyngier 
7109ed24f4bSMarc Zyngier 		/*
7119ed24f4bSMarc Zyngier 		 * Preparing the interrupts to be injected also
7129ed24f4bSMarc Zyngier 		 * involves poking the GIC, which must be done in a
7139ed24f4bSMarc Zyngier 		 * non-preemptible context.
7149ed24f4bSMarc Zyngier 		 */
7159ed24f4bSMarc Zyngier 		preempt_disable();
7169ed24f4bSMarc Zyngier 
7179ed24f4bSMarc Zyngier 		kvm_pmu_flush_hwstate(vcpu);
7189ed24f4bSMarc Zyngier 
7199ed24f4bSMarc Zyngier 		local_irq_disable();
7209ed24f4bSMarc Zyngier 
7219ed24f4bSMarc Zyngier 		kvm_vgic_flush_hwstate(vcpu);
7229ed24f4bSMarc Zyngier 
7239ed24f4bSMarc Zyngier 		/*
7249ed24f4bSMarc Zyngier 		 * Exit if we have a signal pending so that we can deliver the
7259ed24f4bSMarc Zyngier 		 * signal to user space.
7269ed24f4bSMarc Zyngier 		 */
7279ed24f4bSMarc Zyngier 		if (signal_pending(current)) {
7289ed24f4bSMarc Zyngier 			ret = -EINTR;
7299ed24f4bSMarc Zyngier 			run->exit_reason = KVM_EXIT_INTR;
7309ed24f4bSMarc Zyngier 		}
7319ed24f4bSMarc Zyngier 
7329ed24f4bSMarc Zyngier 		/*
7339ed24f4bSMarc Zyngier 		 * If we're using a userspace irqchip, then check if we need
7349ed24f4bSMarc Zyngier 		 * to tell a userspace irqchip about timer or PMU level
7359ed24f4bSMarc Zyngier 		 * changes and if so, exit to userspace (the actual level
7369ed24f4bSMarc Zyngier 		 * state gets updated in kvm_timer_update_run and
7379ed24f4bSMarc Zyngier 		 * kvm_pmu_update_run below).
7389ed24f4bSMarc Zyngier 		 */
7399ed24f4bSMarc Zyngier 		if (static_branch_unlikely(&userspace_irqchip_in_use)) {
7409ed24f4bSMarc Zyngier 			if (kvm_timer_should_notify_user(vcpu) ||
7419ed24f4bSMarc Zyngier 			    kvm_pmu_should_notify_user(vcpu)) {
7429ed24f4bSMarc Zyngier 				ret = -EINTR;
7439ed24f4bSMarc Zyngier 				run->exit_reason = KVM_EXIT_INTR;
7449ed24f4bSMarc Zyngier 			}
7459ed24f4bSMarc Zyngier 		}
7469ed24f4bSMarc Zyngier 
7479ed24f4bSMarc Zyngier 		/*
7489ed24f4bSMarc Zyngier 		 * Ensure we set mode to IN_GUEST_MODE after we disable
7499ed24f4bSMarc Zyngier 		 * interrupts and before the final VCPU requests check.
7509ed24f4bSMarc Zyngier 		 * See the comment in kvm_vcpu_exiting_guest_mode() and
7519ed24f4bSMarc Zyngier 		 * Documentation/virt/kvm/vcpu-requests.rst
7529ed24f4bSMarc Zyngier 		 */
7539ed24f4bSMarc Zyngier 		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
7549ed24f4bSMarc Zyngier 
755a0e50aa3SChristoffer Dall 		if (ret <= 0 || need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) ||
7569ed24f4bSMarc Zyngier 		    kvm_request_pending(vcpu)) {
7579ed24f4bSMarc Zyngier 			vcpu->mode = OUTSIDE_GUEST_MODE;
7589ed24f4bSMarc Zyngier 			isb(); /* Ensure work in x_flush_hwstate is committed */
7599ed24f4bSMarc Zyngier 			kvm_pmu_sync_hwstate(vcpu);
7609ed24f4bSMarc Zyngier 			if (static_branch_unlikely(&userspace_irqchip_in_use))
7613c5ff0c6SMarc Zyngier 				kvm_timer_sync_user(vcpu);
7629ed24f4bSMarc Zyngier 			kvm_vgic_sync_hwstate(vcpu);
7639ed24f4bSMarc Zyngier 			local_irq_enable();
7649ed24f4bSMarc Zyngier 			preempt_enable();
7659ed24f4bSMarc Zyngier 			continue;
7669ed24f4bSMarc Zyngier 		}
7679ed24f4bSMarc Zyngier 
7689ed24f4bSMarc Zyngier 		kvm_arm_setup_debug(vcpu);
7699ed24f4bSMarc Zyngier 
7709ed24f4bSMarc Zyngier 		/**************************************************************
7719ed24f4bSMarc Zyngier 		 * Enter the guest
7729ed24f4bSMarc Zyngier 		 */
7739ed24f4bSMarc Zyngier 		trace_kvm_entry(*vcpu_pc(vcpu));
7749ed24f4bSMarc Zyngier 		guest_enter_irqoff();
7759ed24f4bSMarc Zyngier 
77609cf57ebSDavid Brazdil 		ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
7779ed24f4bSMarc Zyngier 
7789ed24f4bSMarc Zyngier 		vcpu->mode = OUTSIDE_GUEST_MODE;
7799ed24f4bSMarc Zyngier 		vcpu->stat.exits++;
7809ed24f4bSMarc Zyngier 		/*
7819ed24f4bSMarc Zyngier 		 * Back from guest
7829ed24f4bSMarc Zyngier 		 *************************************************************/
7839ed24f4bSMarc Zyngier 
7849ed24f4bSMarc Zyngier 		kvm_arm_clear_debug(vcpu);
7859ed24f4bSMarc Zyngier 
7869ed24f4bSMarc Zyngier 		/*
7879ed24f4bSMarc Zyngier 		 * We must sync the PMU state before the vgic state so
7889ed24f4bSMarc Zyngier 		 * that the vgic can properly sample the updated state of the
7899ed24f4bSMarc Zyngier 		 * interrupt line.
7909ed24f4bSMarc Zyngier 		 */
7919ed24f4bSMarc Zyngier 		kvm_pmu_sync_hwstate(vcpu);
7929ed24f4bSMarc Zyngier 
7939ed24f4bSMarc Zyngier 		/*
7949ed24f4bSMarc Zyngier 		 * Sync the vgic state before syncing the timer state because
7959ed24f4bSMarc Zyngier 		 * the timer code needs to know if the virtual timer
7969ed24f4bSMarc Zyngier 		 * interrupts are active.
7979ed24f4bSMarc Zyngier 		 */
7989ed24f4bSMarc Zyngier 		kvm_vgic_sync_hwstate(vcpu);
7999ed24f4bSMarc Zyngier 
8009ed24f4bSMarc Zyngier 		/*
8019ed24f4bSMarc Zyngier 		 * Sync the timer hardware state before enabling interrupts as
8029ed24f4bSMarc Zyngier 		 * we don't want vtimer interrupts to race with syncing the
8039ed24f4bSMarc Zyngier 		 * timer virtual interrupt state.
8049ed24f4bSMarc Zyngier 		 */
8059ed24f4bSMarc Zyngier 		if (static_branch_unlikely(&userspace_irqchip_in_use))
8063c5ff0c6SMarc Zyngier 			kvm_timer_sync_user(vcpu);
8079ed24f4bSMarc Zyngier 
8089ed24f4bSMarc Zyngier 		kvm_arch_vcpu_ctxsync_fp(vcpu);
8099ed24f4bSMarc Zyngier 
8109ed24f4bSMarc Zyngier 		/*
8119ed24f4bSMarc Zyngier 		 * We may have taken a host interrupt in HYP mode (ie
8129ed24f4bSMarc Zyngier 		 * while executing the guest). This interrupt is still
8139ed24f4bSMarc Zyngier 		 * pending, as we haven't serviced it yet!
8149ed24f4bSMarc Zyngier 		 *
8159ed24f4bSMarc Zyngier 		 * We're now back in SVC mode, with interrupts
8169ed24f4bSMarc Zyngier 		 * disabled.  Enabling the interrupts now will have
8179ed24f4bSMarc Zyngier 		 * the effect of taking the interrupt again, in SVC
8189ed24f4bSMarc Zyngier 		 * mode this time.
8199ed24f4bSMarc Zyngier 		 */
8209ed24f4bSMarc Zyngier 		local_irq_enable();
8219ed24f4bSMarc Zyngier 
8229ed24f4bSMarc Zyngier 		/*
8239ed24f4bSMarc Zyngier 		 * We do local_irq_enable() before calling guest_exit() so
8249ed24f4bSMarc Zyngier 		 * that if a timer interrupt hits while running the guest we
8259ed24f4bSMarc Zyngier 		 * account that tick as being spent in the guest.  We enable
8269ed24f4bSMarc Zyngier 		 * preemption after calling guest_exit() so that if we get
8279ed24f4bSMarc Zyngier 		 * preempted we make sure ticks after that is not counted as
8289ed24f4bSMarc Zyngier 		 * guest time.
8299ed24f4bSMarc Zyngier 		 */
8309ed24f4bSMarc Zyngier 		guest_exit();
8319ed24f4bSMarc Zyngier 		trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
8329ed24f4bSMarc Zyngier 
8339ed24f4bSMarc Zyngier 		/* Exit types that need handling before we can be preempted */
83474cc7e0cSTianjia Zhang 		handle_exit_early(vcpu, ret);
8359ed24f4bSMarc Zyngier 
8369ed24f4bSMarc Zyngier 		preempt_enable();
8379ed24f4bSMarc Zyngier 
83822f55384SQais Yousef 		/*
83922f55384SQais Yousef 		 * The ARMv8 architecture doesn't give the hypervisor
84022f55384SQais Yousef 		 * a mechanism to prevent a guest from dropping to AArch32 EL0
84122f55384SQais Yousef 		 * if implemented by the CPU. If we spot the guest in such
84222f55384SQais Yousef 		 * state and that we decided it wasn't supposed to do so (like
84322f55384SQais Yousef 		 * with the asymmetric AArch32 case), return to userspace with
84422f55384SQais Yousef 		 * a fatal error.
84522f55384SQais Yousef 		 */
84622f55384SQais Yousef 		if (!system_supports_32bit_el0() && vcpu_mode_is_32bit(vcpu)) {
84722f55384SQais Yousef 			/*
84822f55384SQais Yousef 			 * As we have caught the guest red-handed, decide that
84922f55384SQais Yousef 			 * it isn't fit for purpose anymore by making the vcpu
85022f55384SQais Yousef 			 * invalid. The VMM can try and fix it by issuing  a
85122f55384SQais Yousef 			 * KVM_ARM_VCPU_INIT if it really wants to.
85222f55384SQais Yousef 			 */
85322f55384SQais Yousef 			vcpu->arch.target = -1;
85422f55384SQais Yousef 			ret = ARM_EXCEPTION_IL;
85522f55384SQais Yousef 		}
85622f55384SQais Yousef 
85774cc7e0cSTianjia Zhang 		ret = handle_exit(vcpu, ret);
8589ed24f4bSMarc Zyngier 	}
8599ed24f4bSMarc Zyngier 
8609ed24f4bSMarc Zyngier 	/* Tell userspace about in-kernel device output levels */
8619ed24f4bSMarc Zyngier 	if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
8629ed24f4bSMarc Zyngier 		kvm_timer_update_run(vcpu);
8639ed24f4bSMarc Zyngier 		kvm_pmu_update_run(vcpu);
8649ed24f4bSMarc Zyngier 	}
8659ed24f4bSMarc Zyngier 
8669ed24f4bSMarc Zyngier 	kvm_sigset_deactivate(vcpu);
8679ed24f4bSMarc Zyngier 
8689ed24f4bSMarc Zyngier 	vcpu_put(vcpu);
8699ed24f4bSMarc Zyngier 	return ret;
8709ed24f4bSMarc Zyngier }
8719ed24f4bSMarc Zyngier 
8729ed24f4bSMarc Zyngier static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
8739ed24f4bSMarc Zyngier {
8749ed24f4bSMarc Zyngier 	int bit_index;
8759ed24f4bSMarc Zyngier 	bool set;
8769ed24f4bSMarc Zyngier 	unsigned long *hcr;
8779ed24f4bSMarc Zyngier 
8789ed24f4bSMarc Zyngier 	if (number == KVM_ARM_IRQ_CPU_IRQ)
8799ed24f4bSMarc Zyngier 		bit_index = __ffs(HCR_VI);
8809ed24f4bSMarc Zyngier 	else /* KVM_ARM_IRQ_CPU_FIQ */
8819ed24f4bSMarc Zyngier 		bit_index = __ffs(HCR_VF);
8829ed24f4bSMarc Zyngier 
8839ed24f4bSMarc Zyngier 	hcr = vcpu_hcr(vcpu);
8849ed24f4bSMarc Zyngier 	if (level)
8859ed24f4bSMarc Zyngier 		set = test_and_set_bit(bit_index, hcr);
8869ed24f4bSMarc Zyngier 	else
8879ed24f4bSMarc Zyngier 		set = test_and_clear_bit(bit_index, hcr);
8889ed24f4bSMarc Zyngier 
8899ed24f4bSMarc Zyngier 	/*
8909ed24f4bSMarc Zyngier 	 * If we didn't change anything, no need to wake up or kick other CPUs
8919ed24f4bSMarc Zyngier 	 */
8929ed24f4bSMarc Zyngier 	if (set == level)
8939ed24f4bSMarc Zyngier 		return 0;
8949ed24f4bSMarc Zyngier 
8959ed24f4bSMarc Zyngier 	/*
8969ed24f4bSMarc Zyngier 	 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
8979ed24f4bSMarc Zyngier 	 * trigger a world-switch round on the running physical CPU to set the
8989ed24f4bSMarc Zyngier 	 * virtual IRQ/FIQ fields in the HCR appropriately.
8999ed24f4bSMarc Zyngier 	 */
9009ed24f4bSMarc Zyngier 	kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
9019ed24f4bSMarc Zyngier 	kvm_vcpu_kick(vcpu);
9029ed24f4bSMarc Zyngier 
9039ed24f4bSMarc Zyngier 	return 0;
9049ed24f4bSMarc Zyngier }
9059ed24f4bSMarc Zyngier 
9069ed24f4bSMarc Zyngier int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
9079ed24f4bSMarc Zyngier 			  bool line_status)
9089ed24f4bSMarc Zyngier {
9099ed24f4bSMarc Zyngier 	u32 irq = irq_level->irq;
9109ed24f4bSMarc Zyngier 	unsigned int irq_type, vcpu_idx, irq_num;
9119ed24f4bSMarc Zyngier 	int nrcpus = atomic_read(&kvm->online_vcpus);
9129ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu = NULL;
9139ed24f4bSMarc Zyngier 	bool level = irq_level->level;
9149ed24f4bSMarc Zyngier 
9159ed24f4bSMarc Zyngier 	irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
9169ed24f4bSMarc Zyngier 	vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
9179ed24f4bSMarc Zyngier 	vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
9189ed24f4bSMarc Zyngier 	irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
9199ed24f4bSMarc Zyngier 
9209ed24f4bSMarc Zyngier 	trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
9219ed24f4bSMarc Zyngier 
9229ed24f4bSMarc Zyngier 	switch (irq_type) {
9239ed24f4bSMarc Zyngier 	case KVM_ARM_IRQ_TYPE_CPU:
9249ed24f4bSMarc Zyngier 		if (irqchip_in_kernel(kvm))
9259ed24f4bSMarc Zyngier 			return -ENXIO;
9269ed24f4bSMarc Zyngier 
9279ed24f4bSMarc Zyngier 		if (vcpu_idx >= nrcpus)
9289ed24f4bSMarc Zyngier 			return -EINVAL;
9299ed24f4bSMarc Zyngier 
9309ed24f4bSMarc Zyngier 		vcpu = kvm_get_vcpu(kvm, vcpu_idx);
9319ed24f4bSMarc Zyngier 		if (!vcpu)
9329ed24f4bSMarc Zyngier 			return -EINVAL;
9339ed24f4bSMarc Zyngier 
9349ed24f4bSMarc Zyngier 		if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
9359ed24f4bSMarc Zyngier 			return -EINVAL;
9369ed24f4bSMarc Zyngier 
9379ed24f4bSMarc Zyngier 		return vcpu_interrupt_line(vcpu, irq_num, level);
9389ed24f4bSMarc Zyngier 	case KVM_ARM_IRQ_TYPE_PPI:
9399ed24f4bSMarc Zyngier 		if (!irqchip_in_kernel(kvm))
9409ed24f4bSMarc Zyngier 			return -ENXIO;
9419ed24f4bSMarc Zyngier 
9429ed24f4bSMarc Zyngier 		if (vcpu_idx >= nrcpus)
9439ed24f4bSMarc Zyngier 			return -EINVAL;
9449ed24f4bSMarc Zyngier 
9459ed24f4bSMarc Zyngier 		vcpu = kvm_get_vcpu(kvm, vcpu_idx);
9469ed24f4bSMarc Zyngier 		if (!vcpu)
9479ed24f4bSMarc Zyngier 			return -EINVAL;
9489ed24f4bSMarc Zyngier 
9499ed24f4bSMarc Zyngier 		if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
9509ed24f4bSMarc Zyngier 			return -EINVAL;
9519ed24f4bSMarc Zyngier 
9529ed24f4bSMarc Zyngier 		return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL);
9539ed24f4bSMarc Zyngier 	case KVM_ARM_IRQ_TYPE_SPI:
9549ed24f4bSMarc Zyngier 		if (!irqchip_in_kernel(kvm))
9559ed24f4bSMarc Zyngier 			return -ENXIO;
9569ed24f4bSMarc Zyngier 
9579ed24f4bSMarc Zyngier 		if (irq_num < VGIC_NR_PRIVATE_IRQS)
9589ed24f4bSMarc Zyngier 			return -EINVAL;
9599ed24f4bSMarc Zyngier 
9609ed24f4bSMarc Zyngier 		return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL);
9619ed24f4bSMarc Zyngier 	}
9629ed24f4bSMarc Zyngier 
9639ed24f4bSMarc Zyngier 	return -EINVAL;
9649ed24f4bSMarc Zyngier }
9659ed24f4bSMarc Zyngier 
9669ed24f4bSMarc Zyngier static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
9679ed24f4bSMarc Zyngier 			       const struct kvm_vcpu_init *init)
9689ed24f4bSMarc Zyngier {
9699ed24f4bSMarc Zyngier 	unsigned int i, ret;
9709ed24f4bSMarc Zyngier 	int phys_target = kvm_target_cpu();
9719ed24f4bSMarc Zyngier 
9729ed24f4bSMarc Zyngier 	if (init->target != phys_target)
9739ed24f4bSMarc Zyngier 		return -EINVAL;
9749ed24f4bSMarc Zyngier 
9759ed24f4bSMarc Zyngier 	/*
9769ed24f4bSMarc Zyngier 	 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
9779ed24f4bSMarc Zyngier 	 * use the same target.
9789ed24f4bSMarc Zyngier 	 */
9799ed24f4bSMarc Zyngier 	if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
9809ed24f4bSMarc Zyngier 		return -EINVAL;
9819ed24f4bSMarc Zyngier 
9829ed24f4bSMarc Zyngier 	/* -ENOENT for unknown features, -EINVAL for invalid combinations. */
9839ed24f4bSMarc Zyngier 	for (i = 0; i < sizeof(init->features) * 8; i++) {
9849ed24f4bSMarc Zyngier 		bool set = (init->features[i / 32] & (1 << (i % 32)));
9859ed24f4bSMarc Zyngier 
9869ed24f4bSMarc Zyngier 		if (set && i >= KVM_VCPU_MAX_FEATURES)
9879ed24f4bSMarc Zyngier 			return -ENOENT;
9889ed24f4bSMarc Zyngier 
9899ed24f4bSMarc Zyngier 		/*
9909ed24f4bSMarc Zyngier 		 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
9919ed24f4bSMarc Zyngier 		 * use the same feature set.
9929ed24f4bSMarc Zyngier 		 */
9939ed24f4bSMarc Zyngier 		if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
9949ed24f4bSMarc Zyngier 		    test_bit(i, vcpu->arch.features) != set)
9959ed24f4bSMarc Zyngier 			return -EINVAL;
9969ed24f4bSMarc Zyngier 
9979ed24f4bSMarc Zyngier 		if (set)
9989ed24f4bSMarc Zyngier 			set_bit(i, vcpu->arch.features);
9999ed24f4bSMarc Zyngier 	}
10009ed24f4bSMarc Zyngier 
10019ed24f4bSMarc Zyngier 	vcpu->arch.target = phys_target;
10029ed24f4bSMarc Zyngier 
10039ed24f4bSMarc Zyngier 	/* Now we know what it is, we can reset it. */
10049ed24f4bSMarc Zyngier 	ret = kvm_reset_vcpu(vcpu);
10059ed24f4bSMarc Zyngier 	if (ret) {
10069ed24f4bSMarc Zyngier 		vcpu->arch.target = -1;
10079ed24f4bSMarc Zyngier 		bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
10089ed24f4bSMarc Zyngier 	}
10099ed24f4bSMarc Zyngier 
10109ed24f4bSMarc Zyngier 	return ret;
10119ed24f4bSMarc Zyngier }
10129ed24f4bSMarc Zyngier 
10139ed24f4bSMarc Zyngier static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
10149ed24f4bSMarc Zyngier 					 struct kvm_vcpu_init *init)
10159ed24f4bSMarc Zyngier {
10169ed24f4bSMarc Zyngier 	int ret;
10179ed24f4bSMarc Zyngier 
10189ed24f4bSMarc Zyngier 	ret = kvm_vcpu_set_target(vcpu, init);
10199ed24f4bSMarc Zyngier 	if (ret)
10209ed24f4bSMarc Zyngier 		return ret;
10219ed24f4bSMarc Zyngier 
10229ed24f4bSMarc Zyngier 	/*
10239ed24f4bSMarc Zyngier 	 * Ensure a rebooted VM will fault in RAM pages and detect if the
10249ed24f4bSMarc Zyngier 	 * guest MMU is turned off and flush the caches as needed.
1025892713e9SZenghui Yu 	 *
10267ae2f3dbSMarc Zyngier 	 * S2FWB enforces all memory accesses to RAM being cacheable,
10277ae2f3dbSMarc Zyngier 	 * ensuring that the data side is always coherent. We still
10287ae2f3dbSMarc Zyngier 	 * need to invalidate the I-cache though, as FWB does *not*
10297ae2f3dbSMarc Zyngier 	 * imply CTR_EL0.DIC.
10309ed24f4bSMarc Zyngier 	 */
10317ae2f3dbSMarc Zyngier 	if (vcpu->arch.has_run_once) {
10327ae2f3dbSMarc Zyngier 		if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
10339ed24f4bSMarc Zyngier 			stage2_unmap_vm(vcpu->kvm);
10347ae2f3dbSMarc Zyngier 		else
10357ae2f3dbSMarc Zyngier 			__flush_icache_all();
10367ae2f3dbSMarc Zyngier 	}
10379ed24f4bSMarc Zyngier 
10389ed24f4bSMarc Zyngier 	vcpu_reset_hcr(vcpu);
10399ed24f4bSMarc Zyngier 
10409ed24f4bSMarc Zyngier 	/*
10419ed24f4bSMarc Zyngier 	 * Handle the "start in power-off" case.
10429ed24f4bSMarc Zyngier 	 */
10439ed24f4bSMarc Zyngier 	if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
10449ed24f4bSMarc Zyngier 		vcpu_power_off(vcpu);
10459ed24f4bSMarc Zyngier 	else
10469ed24f4bSMarc Zyngier 		vcpu->arch.power_off = false;
10479ed24f4bSMarc Zyngier 
10489ed24f4bSMarc Zyngier 	return 0;
10499ed24f4bSMarc Zyngier }
10509ed24f4bSMarc Zyngier 
10519ed24f4bSMarc Zyngier static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
10529ed24f4bSMarc Zyngier 				 struct kvm_device_attr *attr)
10539ed24f4bSMarc Zyngier {
10549ed24f4bSMarc Zyngier 	int ret = -ENXIO;
10559ed24f4bSMarc Zyngier 
10569ed24f4bSMarc Zyngier 	switch (attr->group) {
10579ed24f4bSMarc Zyngier 	default:
10589ed24f4bSMarc Zyngier 		ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
10599ed24f4bSMarc Zyngier 		break;
10609ed24f4bSMarc Zyngier 	}
10619ed24f4bSMarc Zyngier 
10629ed24f4bSMarc Zyngier 	return ret;
10639ed24f4bSMarc Zyngier }
10649ed24f4bSMarc Zyngier 
10659ed24f4bSMarc Zyngier static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
10669ed24f4bSMarc Zyngier 				 struct kvm_device_attr *attr)
10679ed24f4bSMarc Zyngier {
10689ed24f4bSMarc Zyngier 	int ret = -ENXIO;
10699ed24f4bSMarc Zyngier 
10709ed24f4bSMarc Zyngier 	switch (attr->group) {
10719ed24f4bSMarc Zyngier 	default:
10729ed24f4bSMarc Zyngier 		ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
10739ed24f4bSMarc Zyngier 		break;
10749ed24f4bSMarc Zyngier 	}
10759ed24f4bSMarc Zyngier 
10769ed24f4bSMarc Zyngier 	return ret;
10779ed24f4bSMarc Zyngier }
10789ed24f4bSMarc Zyngier 
10799ed24f4bSMarc Zyngier static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
10809ed24f4bSMarc Zyngier 				 struct kvm_device_attr *attr)
10819ed24f4bSMarc Zyngier {
10829ed24f4bSMarc Zyngier 	int ret = -ENXIO;
10839ed24f4bSMarc Zyngier 
10849ed24f4bSMarc Zyngier 	switch (attr->group) {
10859ed24f4bSMarc Zyngier 	default:
10869ed24f4bSMarc Zyngier 		ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
10879ed24f4bSMarc Zyngier 		break;
10889ed24f4bSMarc Zyngier 	}
10899ed24f4bSMarc Zyngier 
10909ed24f4bSMarc Zyngier 	return ret;
10919ed24f4bSMarc Zyngier }
10929ed24f4bSMarc Zyngier 
10939ed24f4bSMarc Zyngier static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
10949ed24f4bSMarc Zyngier 				   struct kvm_vcpu_events *events)
10959ed24f4bSMarc Zyngier {
10969ed24f4bSMarc Zyngier 	memset(events, 0, sizeof(*events));
10979ed24f4bSMarc Zyngier 
10989ed24f4bSMarc Zyngier 	return __kvm_arm_vcpu_get_events(vcpu, events);
10999ed24f4bSMarc Zyngier }
11009ed24f4bSMarc Zyngier 
11019ed24f4bSMarc Zyngier static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
11029ed24f4bSMarc Zyngier 				   struct kvm_vcpu_events *events)
11039ed24f4bSMarc Zyngier {
11049ed24f4bSMarc Zyngier 	int i;
11059ed24f4bSMarc Zyngier 
11069ed24f4bSMarc Zyngier 	/* check whether the reserved field is zero */
11079ed24f4bSMarc Zyngier 	for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
11089ed24f4bSMarc Zyngier 		if (events->reserved[i])
11099ed24f4bSMarc Zyngier 			return -EINVAL;
11109ed24f4bSMarc Zyngier 
11119ed24f4bSMarc Zyngier 	/* check whether the pad field is zero */
11129ed24f4bSMarc Zyngier 	for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
11139ed24f4bSMarc Zyngier 		if (events->exception.pad[i])
11149ed24f4bSMarc Zyngier 			return -EINVAL;
11159ed24f4bSMarc Zyngier 
11169ed24f4bSMarc Zyngier 	return __kvm_arm_vcpu_set_events(vcpu, events);
11179ed24f4bSMarc Zyngier }
11189ed24f4bSMarc Zyngier 
11199ed24f4bSMarc Zyngier long kvm_arch_vcpu_ioctl(struct file *filp,
11209ed24f4bSMarc Zyngier 			 unsigned int ioctl, unsigned long arg)
11219ed24f4bSMarc Zyngier {
11229ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu = filp->private_data;
11239ed24f4bSMarc Zyngier 	void __user *argp = (void __user *)arg;
11249ed24f4bSMarc Zyngier 	struct kvm_device_attr attr;
11259ed24f4bSMarc Zyngier 	long r;
11269ed24f4bSMarc Zyngier 
11279ed24f4bSMarc Zyngier 	switch (ioctl) {
11289ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_INIT: {
11299ed24f4bSMarc Zyngier 		struct kvm_vcpu_init init;
11309ed24f4bSMarc Zyngier 
11319ed24f4bSMarc Zyngier 		r = -EFAULT;
11329ed24f4bSMarc Zyngier 		if (copy_from_user(&init, argp, sizeof(init)))
11339ed24f4bSMarc Zyngier 			break;
11349ed24f4bSMarc Zyngier 
11359ed24f4bSMarc Zyngier 		r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
11369ed24f4bSMarc Zyngier 		break;
11379ed24f4bSMarc Zyngier 	}
11389ed24f4bSMarc Zyngier 	case KVM_SET_ONE_REG:
11399ed24f4bSMarc Zyngier 	case KVM_GET_ONE_REG: {
11409ed24f4bSMarc Zyngier 		struct kvm_one_reg reg;
11419ed24f4bSMarc Zyngier 
11429ed24f4bSMarc Zyngier 		r = -ENOEXEC;
11439ed24f4bSMarc Zyngier 		if (unlikely(!kvm_vcpu_initialized(vcpu)))
11449ed24f4bSMarc Zyngier 			break;
11459ed24f4bSMarc Zyngier 
11469ed24f4bSMarc Zyngier 		r = -EFAULT;
11479ed24f4bSMarc Zyngier 		if (copy_from_user(&reg, argp, sizeof(reg)))
11489ed24f4bSMarc Zyngier 			break;
11499ed24f4bSMarc Zyngier 
11509ed24f4bSMarc Zyngier 		if (ioctl == KVM_SET_ONE_REG)
11519ed24f4bSMarc Zyngier 			r = kvm_arm_set_reg(vcpu, &reg);
11529ed24f4bSMarc Zyngier 		else
11539ed24f4bSMarc Zyngier 			r = kvm_arm_get_reg(vcpu, &reg);
11549ed24f4bSMarc Zyngier 		break;
11559ed24f4bSMarc Zyngier 	}
11569ed24f4bSMarc Zyngier 	case KVM_GET_REG_LIST: {
11579ed24f4bSMarc Zyngier 		struct kvm_reg_list __user *user_list = argp;
11589ed24f4bSMarc Zyngier 		struct kvm_reg_list reg_list;
11599ed24f4bSMarc Zyngier 		unsigned n;
11609ed24f4bSMarc Zyngier 
11619ed24f4bSMarc Zyngier 		r = -ENOEXEC;
11629ed24f4bSMarc Zyngier 		if (unlikely(!kvm_vcpu_initialized(vcpu)))
11639ed24f4bSMarc Zyngier 			break;
11649ed24f4bSMarc Zyngier 
11659ed24f4bSMarc Zyngier 		r = -EPERM;
11669ed24f4bSMarc Zyngier 		if (!kvm_arm_vcpu_is_finalized(vcpu))
11679ed24f4bSMarc Zyngier 			break;
11689ed24f4bSMarc Zyngier 
11699ed24f4bSMarc Zyngier 		r = -EFAULT;
11709ed24f4bSMarc Zyngier 		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
11719ed24f4bSMarc Zyngier 			break;
11729ed24f4bSMarc Zyngier 		n = reg_list.n;
11739ed24f4bSMarc Zyngier 		reg_list.n = kvm_arm_num_regs(vcpu);
11749ed24f4bSMarc Zyngier 		if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
11759ed24f4bSMarc Zyngier 			break;
11769ed24f4bSMarc Zyngier 		r = -E2BIG;
11779ed24f4bSMarc Zyngier 		if (n < reg_list.n)
11789ed24f4bSMarc Zyngier 			break;
11799ed24f4bSMarc Zyngier 		r = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
11809ed24f4bSMarc Zyngier 		break;
11819ed24f4bSMarc Zyngier 	}
11829ed24f4bSMarc Zyngier 	case KVM_SET_DEVICE_ATTR: {
11839ed24f4bSMarc Zyngier 		r = -EFAULT;
11849ed24f4bSMarc Zyngier 		if (copy_from_user(&attr, argp, sizeof(attr)))
11859ed24f4bSMarc Zyngier 			break;
11869ed24f4bSMarc Zyngier 		r = kvm_arm_vcpu_set_attr(vcpu, &attr);
11879ed24f4bSMarc Zyngier 		break;
11889ed24f4bSMarc Zyngier 	}
11899ed24f4bSMarc Zyngier 	case KVM_GET_DEVICE_ATTR: {
11909ed24f4bSMarc Zyngier 		r = -EFAULT;
11919ed24f4bSMarc Zyngier 		if (copy_from_user(&attr, argp, sizeof(attr)))
11929ed24f4bSMarc Zyngier 			break;
11939ed24f4bSMarc Zyngier 		r = kvm_arm_vcpu_get_attr(vcpu, &attr);
11949ed24f4bSMarc Zyngier 		break;
11959ed24f4bSMarc Zyngier 	}
11969ed24f4bSMarc Zyngier 	case KVM_HAS_DEVICE_ATTR: {
11979ed24f4bSMarc Zyngier 		r = -EFAULT;
11989ed24f4bSMarc Zyngier 		if (copy_from_user(&attr, argp, sizeof(attr)))
11999ed24f4bSMarc Zyngier 			break;
12009ed24f4bSMarc Zyngier 		r = kvm_arm_vcpu_has_attr(vcpu, &attr);
12019ed24f4bSMarc Zyngier 		break;
12029ed24f4bSMarc Zyngier 	}
12039ed24f4bSMarc Zyngier 	case KVM_GET_VCPU_EVENTS: {
12049ed24f4bSMarc Zyngier 		struct kvm_vcpu_events events;
12059ed24f4bSMarc Zyngier 
12069ed24f4bSMarc Zyngier 		if (kvm_arm_vcpu_get_events(vcpu, &events))
12079ed24f4bSMarc Zyngier 			return -EINVAL;
12089ed24f4bSMarc Zyngier 
12099ed24f4bSMarc Zyngier 		if (copy_to_user(argp, &events, sizeof(events)))
12109ed24f4bSMarc Zyngier 			return -EFAULT;
12119ed24f4bSMarc Zyngier 
12129ed24f4bSMarc Zyngier 		return 0;
12139ed24f4bSMarc Zyngier 	}
12149ed24f4bSMarc Zyngier 	case KVM_SET_VCPU_EVENTS: {
12159ed24f4bSMarc Zyngier 		struct kvm_vcpu_events events;
12169ed24f4bSMarc Zyngier 
12179ed24f4bSMarc Zyngier 		if (copy_from_user(&events, argp, sizeof(events)))
12189ed24f4bSMarc Zyngier 			return -EFAULT;
12199ed24f4bSMarc Zyngier 
12209ed24f4bSMarc Zyngier 		return kvm_arm_vcpu_set_events(vcpu, &events);
12219ed24f4bSMarc Zyngier 	}
12229ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_FINALIZE: {
12239ed24f4bSMarc Zyngier 		int what;
12249ed24f4bSMarc Zyngier 
12259ed24f4bSMarc Zyngier 		if (!kvm_vcpu_initialized(vcpu))
12269ed24f4bSMarc Zyngier 			return -ENOEXEC;
12279ed24f4bSMarc Zyngier 
12289ed24f4bSMarc Zyngier 		if (get_user(what, (const int __user *)argp))
12299ed24f4bSMarc Zyngier 			return -EFAULT;
12309ed24f4bSMarc Zyngier 
12319ed24f4bSMarc Zyngier 		return kvm_arm_vcpu_finalize(vcpu, what);
12329ed24f4bSMarc Zyngier 	}
12339ed24f4bSMarc Zyngier 	default:
12349ed24f4bSMarc Zyngier 		r = -EINVAL;
12359ed24f4bSMarc Zyngier 	}
12369ed24f4bSMarc Zyngier 
12379ed24f4bSMarc Zyngier 	return r;
12389ed24f4bSMarc Zyngier }
12399ed24f4bSMarc Zyngier 
12409ed24f4bSMarc Zyngier void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
12419ed24f4bSMarc Zyngier {
12429ed24f4bSMarc Zyngier 
12439ed24f4bSMarc Zyngier }
12449ed24f4bSMarc Zyngier 
12459ed24f4bSMarc Zyngier void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
12469ed24f4bSMarc Zyngier 					struct kvm_memory_slot *memslot)
12479ed24f4bSMarc Zyngier {
12489ed24f4bSMarc Zyngier 	kvm_flush_remote_tlbs(kvm);
12499ed24f4bSMarc Zyngier }
12509ed24f4bSMarc Zyngier 
12519ed24f4bSMarc Zyngier static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
12529ed24f4bSMarc Zyngier 					struct kvm_arm_device_addr *dev_addr)
12539ed24f4bSMarc Zyngier {
12549ed24f4bSMarc Zyngier 	unsigned long dev_id, type;
12559ed24f4bSMarc Zyngier 
12569ed24f4bSMarc Zyngier 	dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
12579ed24f4bSMarc Zyngier 		KVM_ARM_DEVICE_ID_SHIFT;
12589ed24f4bSMarc Zyngier 	type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
12599ed24f4bSMarc Zyngier 		KVM_ARM_DEVICE_TYPE_SHIFT;
12609ed24f4bSMarc Zyngier 
12619ed24f4bSMarc Zyngier 	switch (dev_id) {
12629ed24f4bSMarc Zyngier 	case KVM_ARM_DEVICE_VGIC_V2:
12639ed24f4bSMarc Zyngier 		if (!vgic_present)
12649ed24f4bSMarc Zyngier 			return -ENXIO;
12659ed24f4bSMarc Zyngier 		return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
12669ed24f4bSMarc Zyngier 	default:
12679ed24f4bSMarc Zyngier 		return -ENODEV;
12689ed24f4bSMarc Zyngier 	}
12699ed24f4bSMarc Zyngier }
12709ed24f4bSMarc Zyngier 
12719ed24f4bSMarc Zyngier long kvm_arch_vm_ioctl(struct file *filp,
12729ed24f4bSMarc Zyngier 		       unsigned int ioctl, unsigned long arg)
12739ed24f4bSMarc Zyngier {
12749ed24f4bSMarc Zyngier 	struct kvm *kvm = filp->private_data;
12759ed24f4bSMarc Zyngier 	void __user *argp = (void __user *)arg;
12769ed24f4bSMarc Zyngier 
12779ed24f4bSMarc Zyngier 	switch (ioctl) {
12789ed24f4bSMarc Zyngier 	case KVM_CREATE_IRQCHIP: {
12799ed24f4bSMarc Zyngier 		int ret;
12809ed24f4bSMarc Zyngier 		if (!vgic_present)
12819ed24f4bSMarc Zyngier 			return -ENXIO;
12829ed24f4bSMarc Zyngier 		mutex_lock(&kvm->lock);
12839ed24f4bSMarc Zyngier 		ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
12849ed24f4bSMarc Zyngier 		mutex_unlock(&kvm->lock);
12859ed24f4bSMarc Zyngier 		return ret;
12869ed24f4bSMarc Zyngier 	}
12879ed24f4bSMarc Zyngier 	case KVM_ARM_SET_DEVICE_ADDR: {
12889ed24f4bSMarc Zyngier 		struct kvm_arm_device_addr dev_addr;
12899ed24f4bSMarc Zyngier 
12909ed24f4bSMarc Zyngier 		if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
12919ed24f4bSMarc Zyngier 			return -EFAULT;
12929ed24f4bSMarc Zyngier 		return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
12939ed24f4bSMarc Zyngier 	}
12949ed24f4bSMarc Zyngier 	case KVM_ARM_PREFERRED_TARGET: {
12959ed24f4bSMarc Zyngier 		int err;
12969ed24f4bSMarc Zyngier 		struct kvm_vcpu_init init;
12979ed24f4bSMarc Zyngier 
12989ed24f4bSMarc Zyngier 		err = kvm_vcpu_preferred_target(&init);
12999ed24f4bSMarc Zyngier 		if (err)
13009ed24f4bSMarc Zyngier 			return err;
13019ed24f4bSMarc Zyngier 
13029ed24f4bSMarc Zyngier 		if (copy_to_user(argp, &init, sizeof(init)))
13039ed24f4bSMarc Zyngier 			return -EFAULT;
13049ed24f4bSMarc Zyngier 
13059ed24f4bSMarc Zyngier 		return 0;
13069ed24f4bSMarc Zyngier 	}
13079ed24f4bSMarc Zyngier 	default:
13089ed24f4bSMarc Zyngier 		return -EINVAL;
13099ed24f4bSMarc Zyngier 	}
13109ed24f4bSMarc Zyngier }
13119ed24f4bSMarc Zyngier 
131230c95391SDavid Brazdil static unsigned long nvhe_percpu_size(void)
131330c95391SDavid Brazdil {
131430c95391SDavid Brazdil 	return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
131530c95391SDavid Brazdil 		(unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start);
131630c95391SDavid Brazdil }
131730c95391SDavid Brazdil 
131830c95391SDavid Brazdil static unsigned long nvhe_percpu_order(void)
131930c95391SDavid Brazdil {
132030c95391SDavid Brazdil 	unsigned long size = nvhe_percpu_size();
132130c95391SDavid Brazdil 
132230c95391SDavid Brazdil 	return size ? get_order(size) : 0;
132330c95391SDavid Brazdil }
132430c95391SDavid Brazdil 
13259ef2b48bSWill Deacon static int kvm_map_vectors(void)
13269ef2b48bSWill Deacon {
13279ef2b48bSWill Deacon 	/*
13289ef2b48bSWill Deacon 	 * SV2  = ARM64_SPECTRE_V2
13299ef2b48bSWill Deacon 	 * HEL2 = ARM64_HARDEN_EL2_VECTORS
13309ef2b48bSWill Deacon 	 *
13319ef2b48bSWill Deacon 	 * !SV2 + !HEL2 -> use direct vectors
13329ef2b48bSWill Deacon 	 *  SV2 + !HEL2 -> use hardened vectors in place
13339ef2b48bSWill Deacon 	 * !SV2 +  HEL2 -> allocate one vector slot and use exec mapping
13349ef2b48bSWill Deacon 	 *  SV2 +  HEL2 -> use hardened vectors and use exec mapping
13359ef2b48bSWill Deacon 	 */
13369ef2b48bSWill Deacon 	if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
13379ef2b48bSWill Deacon 		__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
13389ef2b48bSWill Deacon 		__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
13399ef2b48bSWill Deacon 	}
13409ef2b48bSWill Deacon 
13419ef2b48bSWill Deacon 	if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
13429ef2b48bSWill Deacon 		phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
13439ef2b48bSWill Deacon 		unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
13449ef2b48bSWill Deacon 
13459ef2b48bSWill Deacon 		/*
13469ef2b48bSWill Deacon 		 * Always allocate a spare vector slot, as we don't
13479ef2b48bSWill Deacon 		 * know yet which CPUs have a BP hardening slot that
13489ef2b48bSWill Deacon 		 * we can reuse.
13499ef2b48bSWill Deacon 		 */
13509ef2b48bSWill Deacon 		__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
13519ef2b48bSWill Deacon 		BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
13529ef2b48bSWill Deacon 		return create_hyp_exec_mappings(vect_pa, size,
13539ef2b48bSWill Deacon 						&__kvm_bp_vect_base);
13549ef2b48bSWill Deacon 	}
13559ef2b48bSWill Deacon 
13569ef2b48bSWill Deacon 	return 0;
13579ef2b48bSWill Deacon }
13589ef2b48bSWill Deacon 
13599ed24f4bSMarc Zyngier static void cpu_init_hyp_mode(void)
13609ed24f4bSMarc Zyngier {
13619ed24f4bSMarc Zyngier 	phys_addr_t pgd_ptr;
13629ed24f4bSMarc Zyngier 	unsigned long hyp_stack_ptr;
13639ed24f4bSMarc Zyngier 	unsigned long vector_ptr;
136471b3ec5fSDavid Brazdil 	unsigned long tpidr_el2;
136504e4caa8SAndrew Scull 	struct arm_smccc_res res;
13669ed24f4bSMarc Zyngier 
13679ed24f4bSMarc Zyngier 	/* Switch from the HYP stub to our own HYP init vector */
13689ed24f4bSMarc Zyngier 	__hyp_set_vectors(kvm_get_idmap_vector());
13699ed24f4bSMarc Zyngier 
137071b3ec5fSDavid Brazdil 	/*
137171b3ec5fSDavid Brazdil 	 * Calculate the raw per-cpu offset without a translation from the
137271b3ec5fSDavid Brazdil 	 * kernel's mapping to the linear mapping, and store it in tpidr_el2
137371b3ec5fSDavid Brazdil 	 * so that we can use adr_l to access per-cpu variables in EL2.
137471b3ec5fSDavid Brazdil 	 */
137530c95391SDavid Brazdil 	tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
137630c95391SDavid Brazdil 		    (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
137771b3ec5fSDavid Brazdil 
13789ed24f4bSMarc Zyngier 	pgd_ptr = kvm_mmu_get_httbr();
137971b3ec5fSDavid Brazdil 	hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE;
13805dc33bd1SAndrew Scull 	hyp_stack_ptr = kern_hyp_va(hyp_stack_ptr);
13816e3bfbb2SAndrew Scull 	vector_ptr = (unsigned long)kern_hyp_va(kvm_ksym_ref(__kvm_hyp_host_vector));
13829ed24f4bSMarc Zyngier 
138371b3ec5fSDavid Brazdil 	/*
138471b3ec5fSDavid Brazdil 	 * Call initialization code, and switch to the full blown HYP code.
138571b3ec5fSDavid Brazdil 	 * If the cpucaps haven't been finalized yet, something has gone very
138671b3ec5fSDavid Brazdil 	 * wrong, and hyp will crash and burn when it uses any
138771b3ec5fSDavid Brazdil 	 * cpus_have_const_cap() wrapper.
138871b3ec5fSDavid Brazdil 	 */
138971b3ec5fSDavid Brazdil 	BUG_ON(!system_capabilities_finalized());
139004e4caa8SAndrew Scull 	arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init),
139104e4caa8SAndrew Scull 			  pgd_ptr, tpidr_el2, hyp_stack_ptr, vector_ptr, &res);
139204e4caa8SAndrew Scull 	WARN_ON(res.a0 != SMCCC_RET_SUCCESS);
139371b3ec5fSDavid Brazdil 
139471b3ec5fSDavid Brazdil 	/*
139571b3ec5fSDavid Brazdil 	 * Disabling SSBD on a non-VHE system requires us to enable SSBS
139671b3ec5fSDavid Brazdil 	 * at EL2.
139771b3ec5fSDavid Brazdil 	 */
139871b3ec5fSDavid Brazdil 	if (this_cpu_has_cap(ARM64_SSBS) &&
1399d63d975aSMarc Zyngier 	    arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) {
140013aeb9b4SDavid Brazdil 		kvm_call_hyp_nvhe(__kvm_enable_ssbs);
140171b3ec5fSDavid Brazdil 	}
14029ed24f4bSMarc Zyngier }
14039ed24f4bSMarc Zyngier 
14049ed24f4bSMarc Zyngier static void cpu_hyp_reset(void)
14059ed24f4bSMarc Zyngier {
14069ed24f4bSMarc Zyngier 	if (!is_kernel_in_hyp_mode())
14079ed24f4bSMarc Zyngier 		__hyp_reset_vectors();
14089ed24f4bSMarc Zyngier }
14099ed24f4bSMarc Zyngier 
14109ed24f4bSMarc Zyngier static void cpu_hyp_reinit(void)
14119ed24f4bSMarc Zyngier {
14122a1198c9SDavid Brazdil 	kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt);
14139ed24f4bSMarc Zyngier 
14149ed24f4bSMarc Zyngier 	cpu_hyp_reset();
14159ed24f4bSMarc Zyngier 
141614ef9d04SMarc Zyngier 	*this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)kvm_get_hyp_vector();
1417a0e47952SAndrew Scull 
14189ed24f4bSMarc Zyngier 	if (is_kernel_in_hyp_mode())
14199ed24f4bSMarc Zyngier 		kvm_timer_init_vhe();
14209ed24f4bSMarc Zyngier 	else
14219ed24f4bSMarc Zyngier 		cpu_init_hyp_mode();
14229ed24f4bSMarc Zyngier 
14239ed24f4bSMarc Zyngier 	kvm_arm_init_debug();
14249ed24f4bSMarc Zyngier 
14259ed24f4bSMarc Zyngier 	if (vgic_present)
14269ed24f4bSMarc Zyngier 		kvm_vgic_init_cpu_hardware();
14279ed24f4bSMarc Zyngier }
14289ed24f4bSMarc Zyngier 
14299ed24f4bSMarc Zyngier static void _kvm_arch_hardware_enable(void *discard)
14309ed24f4bSMarc Zyngier {
14319ed24f4bSMarc Zyngier 	if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
14329ed24f4bSMarc Zyngier 		cpu_hyp_reinit();
14339ed24f4bSMarc Zyngier 		__this_cpu_write(kvm_arm_hardware_enabled, 1);
14349ed24f4bSMarc Zyngier 	}
14359ed24f4bSMarc Zyngier }
14369ed24f4bSMarc Zyngier 
14379ed24f4bSMarc Zyngier int kvm_arch_hardware_enable(void)
14389ed24f4bSMarc Zyngier {
14399ed24f4bSMarc Zyngier 	_kvm_arch_hardware_enable(NULL);
14409ed24f4bSMarc Zyngier 	return 0;
14419ed24f4bSMarc Zyngier }
14429ed24f4bSMarc Zyngier 
14439ed24f4bSMarc Zyngier static void _kvm_arch_hardware_disable(void *discard)
14449ed24f4bSMarc Zyngier {
14459ed24f4bSMarc Zyngier 	if (__this_cpu_read(kvm_arm_hardware_enabled)) {
14469ed24f4bSMarc Zyngier 		cpu_hyp_reset();
14479ed24f4bSMarc Zyngier 		__this_cpu_write(kvm_arm_hardware_enabled, 0);
14489ed24f4bSMarc Zyngier 	}
14499ed24f4bSMarc Zyngier }
14509ed24f4bSMarc Zyngier 
14519ed24f4bSMarc Zyngier void kvm_arch_hardware_disable(void)
14529ed24f4bSMarc Zyngier {
14539ed24f4bSMarc Zyngier 	_kvm_arch_hardware_disable(NULL);
14549ed24f4bSMarc Zyngier }
14559ed24f4bSMarc Zyngier 
14569ed24f4bSMarc Zyngier #ifdef CONFIG_CPU_PM
14579ed24f4bSMarc Zyngier static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
14589ed24f4bSMarc Zyngier 				    unsigned long cmd,
14599ed24f4bSMarc Zyngier 				    void *v)
14609ed24f4bSMarc Zyngier {
14619ed24f4bSMarc Zyngier 	/*
14629ed24f4bSMarc Zyngier 	 * kvm_arm_hardware_enabled is left with its old value over
14639ed24f4bSMarc Zyngier 	 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
14649ed24f4bSMarc Zyngier 	 * re-enable hyp.
14659ed24f4bSMarc Zyngier 	 */
14669ed24f4bSMarc Zyngier 	switch (cmd) {
14679ed24f4bSMarc Zyngier 	case CPU_PM_ENTER:
14689ed24f4bSMarc Zyngier 		if (__this_cpu_read(kvm_arm_hardware_enabled))
14699ed24f4bSMarc Zyngier 			/*
14709ed24f4bSMarc Zyngier 			 * don't update kvm_arm_hardware_enabled here
14719ed24f4bSMarc Zyngier 			 * so that the hardware will be re-enabled
14729ed24f4bSMarc Zyngier 			 * when we resume. See below.
14739ed24f4bSMarc Zyngier 			 */
14749ed24f4bSMarc Zyngier 			cpu_hyp_reset();
14759ed24f4bSMarc Zyngier 
14769ed24f4bSMarc Zyngier 		return NOTIFY_OK;
14779ed24f4bSMarc Zyngier 	case CPU_PM_ENTER_FAILED:
14789ed24f4bSMarc Zyngier 	case CPU_PM_EXIT:
14799ed24f4bSMarc Zyngier 		if (__this_cpu_read(kvm_arm_hardware_enabled))
14809ed24f4bSMarc Zyngier 			/* The hardware was enabled before suspend. */
14819ed24f4bSMarc Zyngier 			cpu_hyp_reinit();
14829ed24f4bSMarc Zyngier 
14839ed24f4bSMarc Zyngier 		return NOTIFY_OK;
14849ed24f4bSMarc Zyngier 
14859ed24f4bSMarc Zyngier 	default:
14869ed24f4bSMarc Zyngier 		return NOTIFY_DONE;
14879ed24f4bSMarc Zyngier 	}
14889ed24f4bSMarc Zyngier }
14899ed24f4bSMarc Zyngier 
14909ed24f4bSMarc Zyngier static struct notifier_block hyp_init_cpu_pm_nb = {
14919ed24f4bSMarc Zyngier 	.notifier_call = hyp_init_cpu_pm_notifier,
14929ed24f4bSMarc Zyngier };
14939ed24f4bSMarc Zyngier 
14949ed24f4bSMarc Zyngier static void __init hyp_cpu_pm_init(void)
14959ed24f4bSMarc Zyngier {
14969ed24f4bSMarc Zyngier 	cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
14979ed24f4bSMarc Zyngier }
14989ed24f4bSMarc Zyngier static void __init hyp_cpu_pm_exit(void)
14999ed24f4bSMarc Zyngier {
15009ed24f4bSMarc Zyngier 	cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
15019ed24f4bSMarc Zyngier }
15029ed24f4bSMarc Zyngier #else
15039ed24f4bSMarc Zyngier static inline void hyp_cpu_pm_init(void)
15049ed24f4bSMarc Zyngier {
15059ed24f4bSMarc Zyngier }
15069ed24f4bSMarc Zyngier static inline void hyp_cpu_pm_exit(void)
15079ed24f4bSMarc Zyngier {
15089ed24f4bSMarc Zyngier }
15099ed24f4bSMarc Zyngier #endif
15109ed24f4bSMarc Zyngier 
15119ed24f4bSMarc Zyngier static int init_common_resources(void)
15129ed24f4bSMarc Zyngier {
1513039aeb9dSLinus Torvalds 	return kvm_set_ipa_limit();
15149ed24f4bSMarc Zyngier }
15159ed24f4bSMarc Zyngier 
15169ed24f4bSMarc Zyngier static int init_subsystems(void)
15179ed24f4bSMarc Zyngier {
15189ed24f4bSMarc Zyngier 	int err = 0;
15199ed24f4bSMarc Zyngier 
15209ed24f4bSMarc Zyngier 	/*
15219ed24f4bSMarc Zyngier 	 * Enable hardware so that subsystem initialisation can access EL2.
15229ed24f4bSMarc Zyngier 	 */
15239ed24f4bSMarc Zyngier 	on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
15249ed24f4bSMarc Zyngier 
15259ed24f4bSMarc Zyngier 	/*
15269ed24f4bSMarc Zyngier 	 * Register CPU lower-power notifier
15279ed24f4bSMarc Zyngier 	 */
15289ed24f4bSMarc Zyngier 	hyp_cpu_pm_init();
15299ed24f4bSMarc Zyngier 
15309ed24f4bSMarc Zyngier 	/*
15319ed24f4bSMarc Zyngier 	 * Init HYP view of VGIC
15329ed24f4bSMarc Zyngier 	 */
15339ed24f4bSMarc Zyngier 	err = kvm_vgic_hyp_init();
15349ed24f4bSMarc Zyngier 	switch (err) {
15359ed24f4bSMarc Zyngier 	case 0:
15369ed24f4bSMarc Zyngier 		vgic_present = true;
15379ed24f4bSMarc Zyngier 		break;
15389ed24f4bSMarc Zyngier 	case -ENODEV:
15399ed24f4bSMarc Zyngier 	case -ENXIO:
15409ed24f4bSMarc Zyngier 		vgic_present = false;
15419ed24f4bSMarc Zyngier 		err = 0;
15429ed24f4bSMarc Zyngier 		break;
15439ed24f4bSMarc Zyngier 	default:
15449ed24f4bSMarc Zyngier 		goto out;
15459ed24f4bSMarc Zyngier 	}
15469ed24f4bSMarc Zyngier 
15479ed24f4bSMarc Zyngier 	/*
15489ed24f4bSMarc Zyngier 	 * Init HYP architected timer support
15499ed24f4bSMarc Zyngier 	 */
15509ed24f4bSMarc Zyngier 	err = kvm_timer_hyp_init(vgic_present);
15519ed24f4bSMarc Zyngier 	if (err)
15529ed24f4bSMarc Zyngier 		goto out;
15539ed24f4bSMarc Zyngier 
15549ed24f4bSMarc Zyngier 	kvm_perf_init();
15559ed24f4bSMarc Zyngier 	kvm_coproc_table_init();
15569ed24f4bSMarc Zyngier 
15579ed24f4bSMarc Zyngier out:
15589ed24f4bSMarc Zyngier 	on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
15599ed24f4bSMarc Zyngier 
15609ed24f4bSMarc Zyngier 	return err;
15619ed24f4bSMarc Zyngier }
15629ed24f4bSMarc Zyngier 
15639ed24f4bSMarc Zyngier static void teardown_hyp_mode(void)
15649ed24f4bSMarc Zyngier {
15659ed24f4bSMarc Zyngier 	int cpu;
15669ed24f4bSMarc Zyngier 
15679ed24f4bSMarc Zyngier 	free_hyp_pgds();
156830c95391SDavid Brazdil 	for_each_possible_cpu(cpu) {
15699ed24f4bSMarc Zyngier 		free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
157030c95391SDavid Brazdil 		free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order());
157130c95391SDavid Brazdil 	}
15729ed24f4bSMarc Zyngier }
15739ed24f4bSMarc Zyngier 
15749ed24f4bSMarc Zyngier /**
15759ed24f4bSMarc Zyngier  * Inits Hyp-mode on all online CPUs
15769ed24f4bSMarc Zyngier  */
15779ed24f4bSMarc Zyngier static int init_hyp_mode(void)
15789ed24f4bSMarc Zyngier {
15799ed24f4bSMarc Zyngier 	int cpu;
15809ed24f4bSMarc Zyngier 	int err = 0;
15819ed24f4bSMarc Zyngier 
15829ed24f4bSMarc Zyngier 	/*
15839ed24f4bSMarc Zyngier 	 * Allocate Hyp PGD and setup Hyp identity mapping
15849ed24f4bSMarc Zyngier 	 */
15859ed24f4bSMarc Zyngier 	err = kvm_mmu_init();
15869ed24f4bSMarc Zyngier 	if (err)
15879ed24f4bSMarc Zyngier 		goto out_err;
15889ed24f4bSMarc Zyngier 
15899ed24f4bSMarc Zyngier 	/*
15909ed24f4bSMarc Zyngier 	 * Allocate stack pages for Hypervisor-mode
15919ed24f4bSMarc Zyngier 	 */
15929ed24f4bSMarc Zyngier 	for_each_possible_cpu(cpu) {
15939ed24f4bSMarc Zyngier 		unsigned long stack_page;
15949ed24f4bSMarc Zyngier 
15959ed24f4bSMarc Zyngier 		stack_page = __get_free_page(GFP_KERNEL);
15969ed24f4bSMarc Zyngier 		if (!stack_page) {
15979ed24f4bSMarc Zyngier 			err = -ENOMEM;
15989ed24f4bSMarc Zyngier 			goto out_err;
15999ed24f4bSMarc Zyngier 		}
16009ed24f4bSMarc Zyngier 
16019ed24f4bSMarc Zyngier 		per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
16029ed24f4bSMarc Zyngier 	}
16039ed24f4bSMarc Zyngier 
16049ed24f4bSMarc Zyngier 	/*
160530c95391SDavid Brazdil 	 * Allocate and initialize pages for Hypervisor-mode percpu regions.
160630c95391SDavid Brazdil 	 */
160730c95391SDavid Brazdil 	for_each_possible_cpu(cpu) {
160830c95391SDavid Brazdil 		struct page *page;
160930c95391SDavid Brazdil 		void *page_addr;
161030c95391SDavid Brazdil 
161130c95391SDavid Brazdil 		page = alloc_pages(GFP_KERNEL, nvhe_percpu_order());
161230c95391SDavid Brazdil 		if (!page) {
161330c95391SDavid Brazdil 			err = -ENOMEM;
161430c95391SDavid Brazdil 			goto out_err;
161530c95391SDavid Brazdil 		}
161630c95391SDavid Brazdil 
161730c95391SDavid Brazdil 		page_addr = page_address(page);
161830c95391SDavid Brazdil 		memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size());
161930c95391SDavid Brazdil 		kvm_arm_hyp_percpu_base[cpu] = (unsigned long)page_addr;
162030c95391SDavid Brazdil 	}
162130c95391SDavid Brazdil 
162230c95391SDavid Brazdil 	/*
16239ed24f4bSMarc Zyngier 	 * Map the Hyp-code called directly from the host
16249ed24f4bSMarc Zyngier 	 */
16259ed24f4bSMarc Zyngier 	err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
16269ed24f4bSMarc Zyngier 				  kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
16279ed24f4bSMarc Zyngier 	if (err) {
16289ed24f4bSMarc Zyngier 		kvm_err("Cannot map world-switch code\n");
16299ed24f4bSMarc Zyngier 		goto out_err;
16309ed24f4bSMarc Zyngier 	}
16319ed24f4bSMarc Zyngier 
16329ed24f4bSMarc Zyngier 	err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
16339ed24f4bSMarc Zyngier 				  kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
16349ed24f4bSMarc Zyngier 	if (err) {
16359ed24f4bSMarc Zyngier 		kvm_err("Cannot map rodata section\n");
16369ed24f4bSMarc Zyngier 		goto out_err;
16379ed24f4bSMarc Zyngier 	}
16389ed24f4bSMarc Zyngier 
16399ed24f4bSMarc Zyngier 	err = create_hyp_mappings(kvm_ksym_ref(__bss_start),
16409ed24f4bSMarc Zyngier 				  kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
16419ed24f4bSMarc Zyngier 	if (err) {
16429ed24f4bSMarc Zyngier 		kvm_err("Cannot map bss section\n");
16439ed24f4bSMarc Zyngier 		goto out_err;
16449ed24f4bSMarc Zyngier 	}
16459ed24f4bSMarc Zyngier 
16469ed24f4bSMarc Zyngier 	err = kvm_map_vectors();
16479ed24f4bSMarc Zyngier 	if (err) {
16489ed24f4bSMarc Zyngier 		kvm_err("Cannot map vectors\n");
16499ed24f4bSMarc Zyngier 		goto out_err;
16509ed24f4bSMarc Zyngier 	}
16519ed24f4bSMarc Zyngier 
16529ed24f4bSMarc Zyngier 	/*
16539ed24f4bSMarc Zyngier 	 * Map the Hyp stack pages
16549ed24f4bSMarc Zyngier 	 */
16559ed24f4bSMarc Zyngier 	for_each_possible_cpu(cpu) {
16569ed24f4bSMarc Zyngier 		char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
16579ed24f4bSMarc Zyngier 		err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE,
16589ed24f4bSMarc Zyngier 					  PAGE_HYP);
16599ed24f4bSMarc Zyngier 
16609ed24f4bSMarc Zyngier 		if (err) {
16619ed24f4bSMarc Zyngier 			kvm_err("Cannot map hyp stack\n");
16629ed24f4bSMarc Zyngier 			goto out_err;
16639ed24f4bSMarc Zyngier 		}
16649ed24f4bSMarc Zyngier 	}
16659ed24f4bSMarc Zyngier 
166630c95391SDavid Brazdil 	/*
166730c95391SDavid Brazdil 	 * Map Hyp percpu pages
166830c95391SDavid Brazdil 	 */
16699ed24f4bSMarc Zyngier 	for_each_possible_cpu(cpu) {
167030c95391SDavid Brazdil 		char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu];
167130c95391SDavid Brazdil 		char *percpu_end = percpu_begin + nvhe_percpu_size();
16729ed24f4bSMarc Zyngier 
167330c95391SDavid Brazdil 		err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP);
16749ed24f4bSMarc Zyngier 
16759ed24f4bSMarc Zyngier 		if (err) {
167630c95391SDavid Brazdil 			kvm_err("Cannot map hyp percpu region\n");
16776e3bfbb2SAndrew Scull 			goto out_err;
16786e3bfbb2SAndrew Scull 		}
16799ed24f4bSMarc Zyngier 	}
16809ed24f4bSMarc Zyngier 
16819ed24f4bSMarc Zyngier 	return 0;
16829ed24f4bSMarc Zyngier 
16839ed24f4bSMarc Zyngier out_err:
16849ed24f4bSMarc Zyngier 	teardown_hyp_mode();
16859ed24f4bSMarc Zyngier 	kvm_err("error initializing Hyp mode: %d\n", err);
16869ed24f4bSMarc Zyngier 	return err;
16879ed24f4bSMarc Zyngier }
16889ed24f4bSMarc Zyngier 
16899ed24f4bSMarc Zyngier static void check_kvm_target_cpu(void *ret)
16909ed24f4bSMarc Zyngier {
16919ed24f4bSMarc Zyngier 	*(int *)ret = kvm_target_cpu();
16929ed24f4bSMarc Zyngier }
16939ed24f4bSMarc Zyngier 
16949ed24f4bSMarc Zyngier struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
16959ed24f4bSMarc Zyngier {
16969ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu;
16979ed24f4bSMarc Zyngier 	int i;
16989ed24f4bSMarc Zyngier 
16999ed24f4bSMarc Zyngier 	mpidr &= MPIDR_HWID_BITMASK;
17009ed24f4bSMarc Zyngier 	kvm_for_each_vcpu(i, vcpu, kvm) {
17019ed24f4bSMarc Zyngier 		if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
17029ed24f4bSMarc Zyngier 			return vcpu;
17039ed24f4bSMarc Zyngier 	}
17049ed24f4bSMarc Zyngier 	return NULL;
17059ed24f4bSMarc Zyngier }
17069ed24f4bSMarc Zyngier 
17079ed24f4bSMarc Zyngier bool kvm_arch_has_irq_bypass(void)
17089ed24f4bSMarc Zyngier {
17099ed24f4bSMarc Zyngier 	return true;
17109ed24f4bSMarc Zyngier }
17119ed24f4bSMarc Zyngier 
17129ed24f4bSMarc Zyngier int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
17139ed24f4bSMarc Zyngier 				      struct irq_bypass_producer *prod)
17149ed24f4bSMarc Zyngier {
17159ed24f4bSMarc Zyngier 	struct kvm_kernel_irqfd *irqfd =
17169ed24f4bSMarc Zyngier 		container_of(cons, struct kvm_kernel_irqfd, consumer);
17179ed24f4bSMarc Zyngier 
17189ed24f4bSMarc Zyngier 	return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
17199ed24f4bSMarc Zyngier 					  &irqfd->irq_entry);
17209ed24f4bSMarc Zyngier }
17219ed24f4bSMarc Zyngier void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
17229ed24f4bSMarc Zyngier 				      struct irq_bypass_producer *prod)
17239ed24f4bSMarc Zyngier {
17249ed24f4bSMarc Zyngier 	struct kvm_kernel_irqfd *irqfd =
17259ed24f4bSMarc Zyngier 		container_of(cons, struct kvm_kernel_irqfd, consumer);
17269ed24f4bSMarc Zyngier 
17279ed24f4bSMarc Zyngier 	kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
17289ed24f4bSMarc Zyngier 				     &irqfd->irq_entry);
17299ed24f4bSMarc Zyngier }
17309ed24f4bSMarc Zyngier 
17319ed24f4bSMarc Zyngier void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
17329ed24f4bSMarc Zyngier {
17339ed24f4bSMarc Zyngier 	struct kvm_kernel_irqfd *irqfd =
17349ed24f4bSMarc Zyngier 		container_of(cons, struct kvm_kernel_irqfd, consumer);
17359ed24f4bSMarc Zyngier 
17369ed24f4bSMarc Zyngier 	kvm_arm_halt_guest(irqfd->kvm);
17379ed24f4bSMarc Zyngier }
17389ed24f4bSMarc Zyngier 
17399ed24f4bSMarc Zyngier void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
17409ed24f4bSMarc Zyngier {
17419ed24f4bSMarc Zyngier 	struct kvm_kernel_irqfd *irqfd =
17429ed24f4bSMarc Zyngier 		container_of(cons, struct kvm_kernel_irqfd, consumer);
17439ed24f4bSMarc Zyngier 
17449ed24f4bSMarc Zyngier 	kvm_arm_resume_guest(irqfd->kvm);
17459ed24f4bSMarc Zyngier }
17469ed24f4bSMarc Zyngier 
17479ed24f4bSMarc Zyngier /**
17489ed24f4bSMarc Zyngier  * Initialize Hyp-mode and memory mappings on all CPUs.
17499ed24f4bSMarc Zyngier  */
17509ed24f4bSMarc Zyngier int kvm_arch_init(void *opaque)
17519ed24f4bSMarc Zyngier {
17529ed24f4bSMarc Zyngier 	int err;
17539ed24f4bSMarc Zyngier 	int ret, cpu;
17549ed24f4bSMarc Zyngier 	bool in_hyp_mode;
17559ed24f4bSMarc Zyngier 
17569ed24f4bSMarc Zyngier 	if (!is_hyp_mode_available()) {
17579ed24f4bSMarc Zyngier 		kvm_info("HYP mode not available\n");
17589ed24f4bSMarc Zyngier 		return -ENODEV;
17599ed24f4bSMarc Zyngier 	}
17609ed24f4bSMarc Zyngier 
17619ed24f4bSMarc Zyngier 	in_hyp_mode = is_kernel_in_hyp_mode();
17629ed24f4bSMarc Zyngier 
17639ed24f4bSMarc Zyngier 	if (!in_hyp_mode && kvm_arch_requires_vhe()) {
17649ed24f4bSMarc Zyngier 		kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n");
17659ed24f4bSMarc Zyngier 		return -ENODEV;
17669ed24f4bSMarc Zyngier 	}
17679ed24f4bSMarc Zyngier 
176896d389caSRob Herring 	if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
176996d389caSRob Herring 	    cpus_have_final_cap(ARM64_WORKAROUND_1508412))
1770abf532ccSRob Herring 		kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
1771abf532ccSRob Herring 			 "Only trusted guests should be used on this system.\n");
1772abf532ccSRob Herring 
17739ed24f4bSMarc Zyngier 	for_each_online_cpu(cpu) {
17749ed24f4bSMarc Zyngier 		smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
17759ed24f4bSMarc Zyngier 		if (ret < 0) {
17769ed24f4bSMarc Zyngier 			kvm_err("Error, CPU %d not supported!\n", cpu);
17779ed24f4bSMarc Zyngier 			return -ENODEV;
17789ed24f4bSMarc Zyngier 		}
17799ed24f4bSMarc Zyngier 	}
17809ed24f4bSMarc Zyngier 
17819ed24f4bSMarc Zyngier 	err = init_common_resources();
17829ed24f4bSMarc Zyngier 	if (err)
17839ed24f4bSMarc Zyngier 		return err;
17849ed24f4bSMarc Zyngier 
17859ed24f4bSMarc Zyngier 	err = kvm_arm_init_sve();
17869ed24f4bSMarc Zyngier 	if (err)
17879ed24f4bSMarc Zyngier 		return err;
17889ed24f4bSMarc Zyngier 
17899ed24f4bSMarc Zyngier 	if (!in_hyp_mode) {
17909ed24f4bSMarc Zyngier 		err = init_hyp_mode();
17919ed24f4bSMarc Zyngier 		if (err)
17929ed24f4bSMarc Zyngier 			goto out_err;
17939ed24f4bSMarc Zyngier 	}
17949ed24f4bSMarc Zyngier 
17959ed24f4bSMarc Zyngier 	err = init_subsystems();
17969ed24f4bSMarc Zyngier 	if (err)
17979ed24f4bSMarc Zyngier 		goto out_hyp;
17989ed24f4bSMarc Zyngier 
17999ed24f4bSMarc Zyngier 	if (in_hyp_mode)
18009ed24f4bSMarc Zyngier 		kvm_info("VHE mode initialized successfully\n");
18019ed24f4bSMarc Zyngier 	else
18029ed24f4bSMarc Zyngier 		kvm_info("Hyp mode initialized successfully\n");
18039ed24f4bSMarc Zyngier 
18049ed24f4bSMarc Zyngier 	return 0;
18059ed24f4bSMarc Zyngier 
18069ed24f4bSMarc Zyngier out_hyp:
18079ed24f4bSMarc Zyngier 	hyp_cpu_pm_exit();
18089ed24f4bSMarc Zyngier 	if (!in_hyp_mode)
18099ed24f4bSMarc Zyngier 		teardown_hyp_mode();
18109ed24f4bSMarc Zyngier out_err:
18119ed24f4bSMarc Zyngier 	return err;
18129ed24f4bSMarc Zyngier }
18139ed24f4bSMarc Zyngier 
18149ed24f4bSMarc Zyngier /* NOP: Compiling as a module not supported */
18159ed24f4bSMarc Zyngier void kvm_arch_exit(void)
18169ed24f4bSMarc Zyngier {
18179ed24f4bSMarc Zyngier 	kvm_perf_teardown();
18189ed24f4bSMarc Zyngier }
18199ed24f4bSMarc Zyngier 
18209ed24f4bSMarc Zyngier static int arm_init(void)
18219ed24f4bSMarc Zyngier {
18229ed24f4bSMarc Zyngier 	int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
18239ed24f4bSMarc Zyngier 	return rc;
18249ed24f4bSMarc Zyngier }
18259ed24f4bSMarc Zyngier 
18269ed24f4bSMarc Zyngier module_init(arm_init);
1827