xref: /openbmc/linux/arch/arm64/kvm/arm.c (revision eeeee719)
19ed24f4bSMarc Zyngier // SPDX-License-Identifier: GPL-2.0-only
29ed24f4bSMarc Zyngier /*
39ed24f4bSMarc Zyngier  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
49ed24f4bSMarc Zyngier  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
59ed24f4bSMarc Zyngier  */
69ed24f4bSMarc Zyngier 
79ed24f4bSMarc Zyngier #include <linux/bug.h>
89ed24f4bSMarc Zyngier #include <linux/cpu_pm.h>
99ed24f4bSMarc Zyngier #include <linux/errno.h>
109ed24f4bSMarc Zyngier #include <linux/err.h>
119ed24f4bSMarc Zyngier #include <linux/kvm_host.h>
129ed24f4bSMarc Zyngier #include <linux/list.h>
139ed24f4bSMarc Zyngier #include <linux/module.h>
149ed24f4bSMarc Zyngier #include <linux/vmalloc.h>
159ed24f4bSMarc Zyngier #include <linux/fs.h>
169ed24f4bSMarc Zyngier #include <linux/mman.h>
179ed24f4bSMarc Zyngier #include <linux/sched.h>
189ed24f4bSMarc Zyngier #include <linux/kvm.h>
199ed24f4bSMarc Zyngier #include <linux/kvm_irqfd.h>
209ed24f4bSMarc Zyngier #include <linux/irqbypass.h>
219ed24f4bSMarc Zyngier #include <linux/sched/stat.h>
22*eeeee719SDavid Brazdil #include <linux/psci.h>
239ed24f4bSMarc Zyngier #include <trace/events/kvm.h>
249ed24f4bSMarc Zyngier 
259ed24f4bSMarc Zyngier #define CREATE_TRACE_POINTS
269ed24f4bSMarc Zyngier #include "trace_arm.h"
279ed24f4bSMarc Zyngier 
289ed24f4bSMarc Zyngier #include <linux/uaccess.h>
299ed24f4bSMarc Zyngier #include <asm/ptrace.h>
309ed24f4bSMarc Zyngier #include <asm/mman.h>
319ed24f4bSMarc Zyngier #include <asm/tlbflush.h>
329ed24f4bSMarc Zyngier #include <asm/cacheflush.h>
339ed24f4bSMarc Zyngier #include <asm/cpufeature.h>
349ed24f4bSMarc Zyngier #include <asm/virt.h>
359ed24f4bSMarc Zyngier #include <asm/kvm_arm.h>
369ed24f4bSMarc Zyngier #include <asm/kvm_asm.h>
379ed24f4bSMarc Zyngier #include <asm/kvm_mmu.h>
389ed24f4bSMarc Zyngier #include <asm/kvm_emulate.h>
399ed24f4bSMarc Zyngier #include <asm/kvm_coproc.h>
409ed24f4bSMarc Zyngier #include <asm/sections.h>
419ed24f4bSMarc Zyngier 
429ed24f4bSMarc Zyngier #include <kvm/arm_hypercalls.h>
439ed24f4bSMarc Zyngier #include <kvm/arm_pmu.h>
449ed24f4bSMarc Zyngier #include <kvm/arm_psci.h>
459ed24f4bSMarc Zyngier 
469ed24f4bSMarc Zyngier #ifdef REQUIRES_VIRT
479ed24f4bSMarc Zyngier __asm__(".arch_extension	virt");
489ed24f4bSMarc Zyngier #endif
499ed24f4bSMarc Zyngier 
50d8b369c4SDavid Brazdil static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
51d8b369c4SDavid Brazdil 
5214ef9d04SMarc Zyngier DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
5314ef9d04SMarc Zyngier 
549ed24f4bSMarc Zyngier static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
5530c95391SDavid Brazdil unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
5663fec243SDavid Brazdil DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
579ed24f4bSMarc Zyngier 
589ed24f4bSMarc Zyngier /* The VMID used in the VTTBR */
599ed24f4bSMarc Zyngier static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
609ed24f4bSMarc Zyngier static u32 kvm_next_vmid;
619ed24f4bSMarc Zyngier static DEFINE_SPINLOCK(kvm_vmid_lock);
629ed24f4bSMarc Zyngier 
639ed24f4bSMarc Zyngier static bool vgic_present;
649ed24f4bSMarc Zyngier 
659ed24f4bSMarc Zyngier static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
669ed24f4bSMarc Zyngier DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
679ed24f4bSMarc Zyngier 
6894f5e8a4SDavid Brazdil extern u64 kvm_nvhe_sym(__cpu_logical_map)[NR_CPUS];
69*eeeee719SDavid Brazdil extern u32 kvm_nvhe_sym(kvm_host_psci_version);
70*eeeee719SDavid Brazdil extern struct psci_0_1_function_ids kvm_nvhe_sym(kvm_host_psci_0_1_function_ids);
7194f5e8a4SDavid Brazdil 
729ed24f4bSMarc Zyngier int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
739ed24f4bSMarc Zyngier {
749ed24f4bSMarc Zyngier 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
759ed24f4bSMarc Zyngier }
769ed24f4bSMarc Zyngier 
779ed24f4bSMarc Zyngier int kvm_arch_hardware_setup(void *opaque)
789ed24f4bSMarc Zyngier {
799ed24f4bSMarc Zyngier 	return 0;
809ed24f4bSMarc Zyngier }
819ed24f4bSMarc Zyngier 
829ed24f4bSMarc Zyngier int kvm_arch_check_processor_compat(void *opaque)
839ed24f4bSMarc Zyngier {
849ed24f4bSMarc Zyngier 	return 0;
859ed24f4bSMarc Zyngier }
869ed24f4bSMarc Zyngier 
879ed24f4bSMarc Zyngier int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
889ed24f4bSMarc Zyngier 			    struct kvm_enable_cap *cap)
899ed24f4bSMarc Zyngier {
909ed24f4bSMarc Zyngier 	int r;
919ed24f4bSMarc Zyngier 
929ed24f4bSMarc Zyngier 	if (cap->flags)
939ed24f4bSMarc Zyngier 		return -EINVAL;
949ed24f4bSMarc Zyngier 
959ed24f4bSMarc Zyngier 	switch (cap->cap) {
969ed24f4bSMarc Zyngier 	case KVM_CAP_ARM_NISV_TO_USER:
979ed24f4bSMarc Zyngier 		r = 0;
989ed24f4bSMarc Zyngier 		kvm->arch.return_nisv_io_abort_to_user = true;
999ed24f4bSMarc Zyngier 		break;
1009ed24f4bSMarc Zyngier 	default:
1019ed24f4bSMarc Zyngier 		r = -EINVAL;
1029ed24f4bSMarc Zyngier 		break;
1039ed24f4bSMarc Zyngier 	}
1049ed24f4bSMarc Zyngier 
1059ed24f4bSMarc Zyngier 	return r;
1069ed24f4bSMarc Zyngier }
1079ed24f4bSMarc Zyngier 
1085107000fSMarc Zyngier static int kvm_arm_default_max_vcpus(void)
1095107000fSMarc Zyngier {
1105107000fSMarc Zyngier 	return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
1115107000fSMarc Zyngier }
1125107000fSMarc Zyngier 
1139ed24f4bSMarc Zyngier /**
1149ed24f4bSMarc Zyngier  * kvm_arch_init_vm - initializes a VM data structure
1159ed24f4bSMarc Zyngier  * @kvm:	pointer to the KVM struct
1169ed24f4bSMarc Zyngier  */
1179ed24f4bSMarc Zyngier int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1189ed24f4bSMarc Zyngier {
119a0e50aa3SChristoffer Dall 	int ret;
1209ed24f4bSMarc Zyngier 
1219ed24f4bSMarc Zyngier 	ret = kvm_arm_setup_stage2(kvm, type);
1229ed24f4bSMarc Zyngier 	if (ret)
1239ed24f4bSMarc Zyngier 		return ret;
1249ed24f4bSMarc Zyngier 
125a0e50aa3SChristoffer Dall 	ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu);
1269ed24f4bSMarc Zyngier 	if (ret)
127a0e50aa3SChristoffer Dall 		return ret;
1289ed24f4bSMarc Zyngier 
1299ed24f4bSMarc Zyngier 	ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
1309ed24f4bSMarc Zyngier 	if (ret)
1319ed24f4bSMarc Zyngier 		goto out_free_stage2_pgd;
1329ed24f4bSMarc Zyngier 
1339ed24f4bSMarc Zyngier 	kvm_vgic_early_init(kvm);
1349ed24f4bSMarc Zyngier 
1359ed24f4bSMarc Zyngier 	/* The maximum number of VCPUs is limited by the host's GIC model */
1365107000fSMarc Zyngier 	kvm->arch.max_vcpus = kvm_arm_default_max_vcpus();
1379ed24f4bSMarc Zyngier 
1389ed24f4bSMarc Zyngier 	return ret;
1399ed24f4bSMarc Zyngier out_free_stage2_pgd:
140a0e50aa3SChristoffer Dall 	kvm_free_stage2_pgd(&kvm->arch.mmu);
1419ed24f4bSMarc Zyngier 	return ret;
1429ed24f4bSMarc Zyngier }
1439ed24f4bSMarc Zyngier 
1449ed24f4bSMarc Zyngier vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1459ed24f4bSMarc Zyngier {
1469ed24f4bSMarc Zyngier 	return VM_FAULT_SIGBUS;
1479ed24f4bSMarc Zyngier }
1489ed24f4bSMarc Zyngier 
1499ed24f4bSMarc Zyngier 
1509ed24f4bSMarc Zyngier /**
1519ed24f4bSMarc Zyngier  * kvm_arch_destroy_vm - destroy the VM data structure
1529ed24f4bSMarc Zyngier  * @kvm:	pointer to the KVM struct
1539ed24f4bSMarc Zyngier  */
1549ed24f4bSMarc Zyngier void kvm_arch_destroy_vm(struct kvm *kvm)
1559ed24f4bSMarc Zyngier {
1569ed24f4bSMarc Zyngier 	int i;
1579ed24f4bSMarc Zyngier 
158d7eec236SMarc Zyngier 	bitmap_free(kvm->arch.pmu_filter);
159d7eec236SMarc Zyngier 
1609ed24f4bSMarc Zyngier 	kvm_vgic_destroy(kvm);
1619ed24f4bSMarc Zyngier 
1629ed24f4bSMarc Zyngier 	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
1639ed24f4bSMarc Zyngier 		if (kvm->vcpus[i]) {
1649ed24f4bSMarc Zyngier 			kvm_vcpu_destroy(kvm->vcpus[i]);
1659ed24f4bSMarc Zyngier 			kvm->vcpus[i] = NULL;
1669ed24f4bSMarc Zyngier 		}
1679ed24f4bSMarc Zyngier 	}
1689ed24f4bSMarc Zyngier 	atomic_set(&kvm->online_vcpus, 0);
1699ed24f4bSMarc Zyngier }
1709ed24f4bSMarc Zyngier 
1719ed24f4bSMarc Zyngier int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1729ed24f4bSMarc Zyngier {
1739ed24f4bSMarc Zyngier 	int r;
1749ed24f4bSMarc Zyngier 	switch (ext) {
1759ed24f4bSMarc Zyngier 	case KVM_CAP_IRQCHIP:
1769ed24f4bSMarc Zyngier 		r = vgic_present;
1779ed24f4bSMarc Zyngier 		break;
1789ed24f4bSMarc Zyngier 	case KVM_CAP_IOEVENTFD:
1799ed24f4bSMarc Zyngier 	case KVM_CAP_DEVICE_CTRL:
1809ed24f4bSMarc Zyngier 	case KVM_CAP_USER_MEMORY:
1819ed24f4bSMarc Zyngier 	case KVM_CAP_SYNC_MMU:
1829ed24f4bSMarc Zyngier 	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
1839ed24f4bSMarc Zyngier 	case KVM_CAP_ONE_REG:
1849ed24f4bSMarc Zyngier 	case KVM_CAP_ARM_PSCI:
1859ed24f4bSMarc Zyngier 	case KVM_CAP_ARM_PSCI_0_2:
1869ed24f4bSMarc Zyngier 	case KVM_CAP_READONLY_MEM:
1879ed24f4bSMarc Zyngier 	case KVM_CAP_MP_STATE:
1889ed24f4bSMarc Zyngier 	case KVM_CAP_IMMEDIATE_EXIT:
1899ed24f4bSMarc Zyngier 	case KVM_CAP_VCPU_EVENTS:
1909ed24f4bSMarc Zyngier 	case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
1919ed24f4bSMarc Zyngier 	case KVM_CAP_ARM_NISV_TO_USER:
1929ed24f4bSMarc Zyngier 	case KVM_CAP_ARM_INJECT_EXT_DABT:
1939ed24f4bSMarc Zyngier 		r = 1;
1949ed24f4bSMarc Zyngier 		break;
1959ed24f4bSMarc Zyngier 	case KVM_CAP_ARM_SET_DEVICE_ADDR:
1969ed24f4bSMarc Zyngier 		r = 1;
1979ed24f4bSMarc Zyngier 		break;
1989ed24f4bSMarc Zyngier 	case KVM_CAP_NR_VCPUS:
1999ed24f4bSMarc Zyngier 		r = num_online_cpus();
2009ed24f4bSMarc Zyngier 		break;
2019ed24f4bSMarc Zyngier 	case KVM_CAP_MAX_VCPUS:
2029ed24f4bSMarc Zyngier 	case KVM_CAP_MAX_VCPU_ID:
2035107000fSMarc Zyngier 		if (kvm)
2045107000fSMarc Zyngier 			r = kvm->arch.max_vcpus;
2055107000fSMarc Zyngier 		else
2065107000fSMarc Zyngier 			r = kvm_arm_default_max_vcpus();
2079ed24f4bSMarc Zyngier 		break;
2089ed24f4bSMarc Zyngier 	case KVM_CAP_MSI_DEVID:
2099ed24f4bSMarc Zyngier 		if (!kvm)
2109ed24f4bSMarc Zyngier 			r = -EINVAL;
2119ed24f4bSMarc Zyngier 		else
2129ed24f4bSMarc Zyngier 			r = kvm->arch.vgic.msis_require_devid;
2139ed24f4bSMarc Zyngier 		break;
2149ed24f4bSMarc Zyngier 	case KVM_CAP_ARM_USER_IRQ:
2159ed24f4bSMarc Zyngier 		/*
2169ed24f4bSMarc Zyngier 		 * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
2179ed24f4bSMarc Zyngier 		 * (bump this number if adding more devices)
2189ed24f4bSMarc Zyngier 		 */
2199ed24f4bSMarc Zyngier 		r = 1;
2209ed24f4bSMarc Zyngier 		break;
221004a0124SAndrew Jones 	case KVM_CAP_STEAL_TIME:
222004a0124SAndrew Jones 		r = kvm_arm_pvtime_supported();
223004a0124SAndrew Jones 		break;
2249ed24f4bSMarc Zyngier 	default:
2259ed24f4bSMarc Zyngier 		r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
2269ed24f4bSMarc Zyngier 		break;
2279ed24f4bSMarc Zyngier 	}
2289ed24f4bSMarc Zyngier 	return r;
2299ed24f4bSMarc Zyngier }
2309ed24f4bSMarc Zyngier 
2319ed24f4bSMarc Zyngier long kvm_arch_dev_ioctl(struct file *filp,
2329ed24f4bSMarc Zyngier 			unsigned int ioctl, unsigned long arg)
2339ed24f4bSMarc Zyngier {
2349ed24f4bSMarc Zyngier 	return -EINVAL;
2359ed24f4bSMarc Zyngier }
2369ed24f4bSMarc Zyngier 
2379ed24f4bSMarc Zyngier struct kvm *kvm_arch_alloc_vm(void)
2389ed24f4bSMarc Zyngier {
2399ed24f4bSMarc Zyngier 	if (!has_vhe())
2409ed24f4bSMarc Zyngier 		return kzalloc(sizeof(struct kvm), GFP_KERNEL);
2419ed24f4bSMarc Zyngier 
2429ed24f4bSMarc Zyngier 	return vzalloc(sizeof(struct kvm));
2439ed24f4bSMarc Zyngier }
2449ed24f4bSMarc Zyngier 
2459ed24f4bSMarc Zyngier void kvm_arch_free_vm(struct kvm *kvm)
2469ed24f4bSMarc Zyngier {
2479ed24f4bSMarc Zyngier 	if (!has_vhe())
2489ed24f4bSMarc Zyngier 		kfree(kvm);
2499ed24f4bSMarc Zyngier 	else
2509ed24f4bSMarc Zyngier 		vfree(kvm);
2519ed24f4bSMarc Zyngier }
2529ed24f4bSMarc Zyngier 
2539ed24f4bSMarc Zyngier int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
2549ed24f4bSMarc Zyngier {
2559ed24f4bSMarc Zyngier 	if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
2569ed24f4bSMarc Zyngier 		return -EBUSY;
2579ed24f4bSMarc Zyngier 
2589ed24f4bSMarc Zyngier 	if (id >= kvm->arch.max_vcpus)
2599ed24f4bSMarc Zyngier 		return -EINVAL;
2609ed24f4bSMarc Zyngier 
2619ed24f4bSMarc Zyngier 	return 0;
2629ed24f4bSMarc Zyngier }
2639ed24f4bSMarc Zyngier 
2649ed24f4bSMarc Zyngier int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
2659ed24f4bSMarc Zyngier {
2669ed24f4bSMarc Zyngier 	int err;
2679ed24f4bSMarc Zyngier 
2689ed24f4bSMarc Zyngier 	/* Force users to call KVM_ARM_VCPU_INIT */
2699ed24f4bSMarc Zyngier 	vcpu->arch.target = -1;
2709ed24f4bSMarc Zyngier 	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
2719ed24f4bSMarc Zyngier 
272e539451bSSean Christopherson 	vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
273e539451bSSean Christopherson 
2749ed24f4bSMarc Zyngier 	/* Set up the timer */
2759ed24f4bSMarc Zyngier 	kvm_timer_vcpu_init(vcpu);
2769ed24f4bSMarc Zyngier 
2779ed24f4bSMarc Zyngier 	kvm_pmu_vcpu_init(vcpu);
2789ed24f4bSMarc Zyngier 
2799ed24f4bSMarc Zyngier 	kvm_arm_reset_debug_ptr(vcpu);
2809ed24f4bSMarc Zyngier 
2819ed24f4bSMarc Zyngier 	kvm_arm_pvtime_vcpu_init(&vcpu->arch);
2829ed24f4bSMarc Zyngier 
283a0e50aa3SChristoffer Dall 	vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
284a0e50aa3SChristoffer Dall 
2859ed24f4bSMarc Zyngier 	err = kvm_vgic_vcpu_init(vcpu);
2869ed24f4bSMarc Zyngier 	if (err)
2879ed24f4bSMarc Zyngier 		return err;
2889ed24f4bSMarc Zyngier 
2899ed24f4bSMarc Zyngier 	return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
2909ed24f4bSMarc Zyngier }
2919ed24f4bSMarc Zyngier 
2929ed24f4bSMarc Zyngier void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
2939ed24f4bSMarc Zyngier {
2949ed24f4bSMarc Zyngier }
2959ed24f4bSMarc Zyngier 
2969ed24f4bSMarc Zyngier void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2979ed24f4bSMarc Zyngier {
2989ed24f4bSMarc Zyngier 	if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
2999ed24f4bSMarc Zyngier 		static_branch_dec(&userspace_irqchip_in_use);
3009ed24f4bSMarc Zyngier 
3019af3e08bSWill Deacon 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
3029ed24f4bSMarc Zyngier 	kvm_timer_vcpu_terminate(vcpu);
3039ed24f4bSMarc Zyngier 	kvm_pmu_vcpu_destroy(vcpu);
3049ed24f4bSMarc Zyngier 
3059ed24f4bSMarc Zyngier 	kvm_arm_vcpu_destroy(vcpu);
3069ed24f4bSMarc Zyngier }
3079ed24f4bSMarc Zyngier 
3089ed24f4bSMarc Zyngier int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
3099ed24f4bSMarc Zyngier {
3109ed24f4bSMarc Zyngier 	return kvm_timer_is_pending(vcpu);
3119ed24f4bSMarc Zyngier }
3129ed24f4bSMarc Zyngier 
3139ed24f4bSMarc Zyngier void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
3149ed24f4bSMarc Zyngier {
3159ed24f4bSMarc Zyngier 	/*
3169ed24f4bSMarc Zyngier 	 * If we're about to block (most likely because we've just hit a
3179ed24f4bSMarc Zyngier 	 * WFI), we need to sync back the state of the GIC CPU interface
3189ed24f4bSMarc Zyngier 	 * so that we have the latest PMR and group enables. This ensures
3199ed24f4bSMarc Zyngier 	 * that kvm_arch_vcpu_runnable has up-to-date data to decide
3209ed24f4bSMarc Zyngier 	 * whether we have pending interrupts.
3219ed24f4bSMarc Zyngier 	 *
3229ed24f4bSMarc Zyngier 	 * For the same reason, we want to tell GICv4 that we need
3239ed24f4bSMarc Zyngier 	 * doorbells to be signalled, should an interrupt become pending.
3249ed24f4bSMarc Zyngier 	 */
3259ed24f4bSMarc Zyngier 	preempt_disable();
3269ed24f4bSMarc Zyngier 	kvm_vgic_vmcr_sync(vcpu);
3279ed24f4bSMarc Zyngier 	vgic_v4_put(vcpu, true);
3289ed24f4bSMarc Zyngier 	preempt_enable();
3299ed24f4bSMarc Zyngier }
3309ed24f4bSMarc Zyngier 
3319ed24f4bSMarc Zyngier void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
3329ed24f4bSMarc Zyngier {
3339ed24f4bSMarc Zyngier 	preempt_disable();
3349ed24f4bSMarc Zyngier 	vgic_v4_load(vcpu);
3359ed24f4bSMarc Zyngier 	preempt_enable();
3369ed24f4bSMarc Zyngier }
3379ed24f4bSMarc Zyngier 
3389ed24f4bSMarc Zyngier void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3399ed24f4bSMarc Zyngier {
340a0e50aa3SChristoffer Dall 	struct kvm_s2_mmu *mmu;
3419ed24f4bSMarc Zyngier 	int *last_ran;
3429ed24f4bSMarc Zyngier 
343a0e50aa3SChristoffer Dall 	mmu = vcpu->arch.hw_mmu;
344a0e50aa3SChristoffer Dall 	last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
3459ed24f4bSMarc Zyngier 
3469ed24f4bSMarc Zyngier 	/*
3479ed24f4bSMarc Zyngier 	 * We might get preempted before the vCPU actually runs, but
3489ed24f4bSMarc Zyngier 	 * over-invalidation doesn't affect correctness.
3499ed24f4bSMarc Zyngier 	 */
3509ed24f4bSMarc Zyngier 	if (*last_ran != vcpu->vcpu_id) {
351a0e50aa3SChristoffer Dall 		kvm_call_hyp(__kvm_tlb_flush_local_vmid, mmu);
3529ed24f4bSMarc Zyngier 		*last_ran = vcpu->vcpu_id;
3539ed24f4bSMarc Zyngier 	}
3549ed24f4bSMarc Zyngier 
3559ed24f4bSMarc Zyngier 	vcpu->cpu = cpu;
3569ed24f4bSMarc Zyngier 
3579ed24f4bSMarc Zyngier 	kvm_vgic_load(vcpu);
3589ed24f4bSMarc Zyngier 	kvm_timer_vcpu_load(vcpu);
35913aeb9b4SDavid Brazdil 	if (has_vhe())
36013aeb9b4SDavid Brazdil 		kvm_vcpu_load_sysregs_vhe(vcpu);
3619ed24f4bSMarc Zyngier 	kvm_arch_vcpu_load_fp(vcpu);
3629ed24f4bSMarc Zyngier 	kvm_vcpu_pmu_restore_guest(vcpu);
3639ed24f4bSMarc Zyngier 	if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
3649ed24f4bSMarc Zyngier 		kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
3659ed24f4bSMarc Zyngier 
3669ed24f4bSMarc Zyngier 	if (single_task_running())
3679ed24f4bSMarc Zyngier 		vcpu_clear_wfx_traps(vcpu);
3689ed24f4bSMarc Zyngier 	else
3699ed24f4bSMarc Zyngier 		vcpu_set_wfx_traps(vcpu);
3709ed24f4bSMarc Zyngier 
37129eb5a3cSMarc Zyngier 	if (vcpu_has_ptrauth(vcpu))
372ef3e40a7SMarc Zyngier 		vcpu_ptrauth_disable(vcpu);
3739ed24f4bSMarc Zyngier }
3749ed24f4bSMarc Zyngier 
3759ed24f4bSMarc Zyngier void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3769ed24f4bSMarc Zyngier {
3779ed24f4bSMarc Zyngier 	kvm_arch_vcpu_put_fp(vcpu);
37813aeb9b4SDavid Brazdil 	if (has_vhe())
37913aeb9b4SDavid Brazdil 		kvm_vcpu_put_sysregs_vhe(vcpu);
3809ed24f4bSMarc Zyngier 	kvm_timer_vcpu_put(vcpu);
3819ed24f4bSMarc Zyngier 	kvm_vgic_put(vcpu);
3829ed24f4bSMarc Zyngier 	kvm_vcpu_pmu_restore_host(vcpu);
3839ed24f4bSMarc Zyngier 
3849ed24f4bSMarc Zyngier 	vcpu->cpu = -1;
3859ed24f4bSMarc Zyngier }
3869ed24f4bSMarc Zyngier 
3879ed24f4bSMarc Zyngier static void vcpu_power_off(struct kvm_vcpu *vcpu)
3889ed24f4bSMarc Zyngier {
3899ed24f4bSMarc Zyngier 	vcpu->arch.power_off = true;
3909ed24f4bSMarc Zyngier 	kvm_make_request(KVM_REQ_SLEEP, vcpu);
3919ed24f4bSMarc Zyngier 	kvm_vcpu_kick(vcpu);
3929ed24f4bSMarc Zyngier }
3939ed24f4bSMarc Zyngier 
3949ed24f4bSMarc Zyngier int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3959ed24f4bSMarc Zyngier 				    struct kvm_mp_state *mp_state)
3969ed24f4bSMarc Zyngier {
3979ed24f4bSMarc Zyngier 	if (vcpu->arch.power_off)
3989ed24f4bSMarc Zyngier 		mp_state->mp_state = KVM_MP_STATE_STOPPED;
3999ed24f4bSMarc Zyngier 	else
4009ed24f4bSMarc Zyngier 		mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
4019ed24f4bSMarc Zyngier 
4029ed24f4bSMarc Zyngier 	return 0;
4039ed24f4bSMarc Zyngier }
4049ed24f4bSMarc Zyngier 
4059ed24f4bSMarc Zyngier int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4069ed24f4bSMarc Zyngier 				    struct kvm_mp_state *mp_state)
4079ed24f4bSMarc Zyngier {
4089ed24f4bSMarc Zyngier 	int ret = 0;
4099ed24f4bSMarc Zyngier 
4109ed24f4bSMarc Zyngier 	switch (mp_state->mp_state) {
4119ed24f4bSMarc Zyngier 	case KVM_MP_STATE_RUNNABLE:
4129ed24f4bSMarc Zyngier 		vcpu->arch.power_off = false;
4139ed24f4bSMarc Zyngier 		break;
4149ed24f4bSMarc Zyngier 	case KVM_MP_STATE_STOPPED:
4159ed24f4bSMarc Zyngier 		vcpu_power_off(vcpu);
4169ed24f4bSMarc Zyngier 		break;
4179ed24f4bSMarc Zyngier 	default:
4189ed24f4bSMarc Zyngier 		ret = -EINVAL;
4199ed24f4bSMarc Zyngier 	}
4209ed24f4bSMarc Zyngier 
4219ed24f4bSMarc Zyngier 	return ret;
4229ed24f4bSMarc Zyngier }
4239ed24f4bSMarc Zyngier 
4249ed24f4bSMarc Zyngier /**
4259ed24f4bSMarc Zyngier  * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
4269ed24f4bSMarc Zyngier  * @v:		The VCPU pointer
4279ed24f4bSMarc Zyngier  *
4289ed24f4bSMarc Zyngier  * If the guest CPU is not waiting for interrupts or an interrupt line is
4299ed24f4bSMarc Zyngier  * asserted, the CPU is by definition runnable.
4309ed24f4bSMarc Zyngier  */
4319ed24f4bSMarc Zyngier int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
4329ed24f4bSMarc Zyngier {
4339ed24f4bSMarc Zyngier 	bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
4349ed24f4bSMarc Zyngier 	return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
4359ed24f4bSMarc Zyngier 		&& !v->arch.power_off && !v->arch.pause);
4369ed24f4bSMarc Zyngier }
4379ed24f4bSMarc Zyngier 
4389ed24f4bSMarc Zyngier bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
4399ed24f4bSMarc Zyngier {
4409ed24f4bSMarc Zyngier 	return vcpu_mode_priv(vcpu);
4419ed24f4bSMarc Zyngier }
4429ed24f4bSMarc Zyngier 
4439ed24f4bSMarc Zyngier /* Just ensure a guest exit from a particular CPU */
4449ed24f4bSMarc Zyngier static void exit_vm_noop(void *info)
4459ed24f4bSMarc Zyngier {
4469ed24f4bSMarc Zyngier }
4479ed24f4bSMarc Zyngier 
4489ed24f4bSMarc Zyngier void force_vm_exit(const cpumask_t *mask)
4499ed24f4bSMarc Zyngier {
4509ed24f4bSMarc Zyngier 	preempt_disable();
4519ed24f4bSMarc Zyngier 	smp_call_function_many(mask, exit_vm_noop, NULL, true);
4529ed24f4bSMarc Zyngier 	preempt_enable();
4539ed24f4bSMarc Zyngier }
4549ed24f4bSMarc Zyngier 
4559ed24f4bSMarc Zyngier /**
4569ed24f4bSMarc Zyngier  * need_new_vmid_gen - check that the VMID is still valid
4579ed24f4bSMarc Zyngier  * @vmid: The VMID to check
4589ed24f4bSMarc Zyngier  *
4599ed24f4bSMarc Zyngier  * return true if there is a new generation of VMIDs being used
4609ed24f4bSMarc Zyngier  *
4619ed24f4bSMarc Zyngier  * The hardware supports a limited set of values with the value zero reserved
4629ed24f4bSMarc Zyngier  * for the host, so we check if an assigned value belongs to a previous
463656012c7SFuad Tabba  * generation, which requires us to assign a new value. If we're the first to
464656012c7SFuad Tabba  * use a VMID for the new generation, we must flush necessary caches and TLBs
465656012c7SFuad Tabba  * on all CPUs.
4669ed24f4bSMarc Zyngier  */
4679ed24f4bSMarc Zyngier static bool need_new_vmid_gen(struct kvm_vmid *vmid)
4689ed24f4bSMarc Zyngier {
4699ed24f4bSMarc Zyngier 	u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
4709ed24f4bSMarc Zyngier 	smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
4719ed24f4bSMarc Zyngier 	return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen);
4729ed24f4bSMarc Zyngier }
4739ed24f4bSMarc Zyngier 
4749ed24f4bSMarc Zyngier /**
4759ed24f4bSMarc Zyngier  * update_vmid - Update the vmid with a valid VMID for the current generation
4769ed24f4bSMarc Zyngier  * @vmid: The stage-2 VMID information struct
4779ed24f4bSMarc Zyngier  */
4789ed24f4bSMarc Zyngier static void update_vmid(struct kvm_vmid *vmid)
4799ed24f4bSMarc Zyngier {
4809ed24f4bSMarc Zyngier 	if (!need_new_vmid_gen(vmid))
4819ed24f4bSMarc Zyngier 		return;
4829ed24f4bSMarc Zyngier 
4839ed24f4bSMarc Zyngier 	spin_lock(&kvm_vmid_lock);
4849ed24f4bSMarc Zyngier 
4859ed24f4bSMarc Zyngier 	/*
4869ed24f4bSMarc Zyngier 	 * We need to re-check the vmid_gen here to ensure that if another vcpu
4879ed24f4bSMarc Zyngier 	 * already allocated a valid vmid for this vm, then this vcpu should
4889ed24f4bSMarc Zyngier 	 * use the same vmid.
4899ed24f4bSMarc Zyngier 	 */
4909ed24f4bSMarc Zyngier 	if (!need_new_vmid_gen(vmid)) {
4919ed24f4bSMarc Zyngier 		spin_unlock(&kvm_vmid_lock);
4929ed24f4bSMarc Zyngier 		return;
4939ed24f4bSMarc Zyngier 	}
4949ed24f4bSMarc Zyngier 
4959ed24f4bSMarc Zyngier 	/* First user of a new VMID generation? */
4969ed24f4bSMarc Zyngier 	if (unlikely(kvm_next_vmid == 0)) {
4979ed24f4bSMarc Zyngier 		atomic64_inc(&kvm_vmid_gen);
4989ed24f4bSMarc Zyngier 		kvm_next_vmid = 1;
4999ed24f4bSMarc Zyngier 
5009ed24f4bSMarc Zyngier 		/*
5019ed24f4bSMarc Zyngier 		 * On SMP we know no other CPUs can use this CPU's or each
5029ed24f4bSMarc Zyngier 		 * other's VMID after force_vm_exit returns since the
5039ed24f4bSMarc Zyngier 		 * kvm_vmid_lock blocks them from reentry to the guest.
5049ed24f4bSMarc Zyngier 		 */
5059ed24f4bSMarc Zyngier 		force_vm_exit(cpu_all_mask);
5069ed24f4bSMarc Zyngier 		/*
5079ed24f4bSMarc Zyngier 		 * Now broadcast TLB + ICACHE invalidation over the inner
5089ed24f4bSMarc Zyngier 		 * shareable domain to make sure all data structures are
5099ed24f4bSMarc Zyngier 		 * clean.
5109ed24f4bSMarc Zyngier 		 */
5119ed24f4bSMarc Zyngier 		kvm_call_hyp(__kvm_flush_vm_context);
5129ed24f4bSMarc Zyngier 	}
5139ed24f4bSMarc Zyngier 
5149ed24f4bSMarc Zyngier 	vmid->vmid = kvm_next_vmid;
5159ed24f4bSMarc Zyngier 	kvm_next_vmid++;
5169ed24f4bSMarc Zyngier 	kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
5179ed24f4bSMarc Zyngier 
5189ed24f4bSMarc Zyngier 	smp_wmb();
5199ed24f4bSMarc Zyngier 	WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen));
5209ed24f4bSMarc Zyngier 
5219ed24f4bSMarc Zyngier 	spin_unlock(&kvm_vmid_lock);
5229ed24f4bSMarc Zyngier }
5239ed24f4bSMarc Zyngier 
5249ed24f4bSMarc Zyngier static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
5259ed24f4bSMarc Zyngier {
5269ed24f4bSMarc Zyngier 	struct kvm *kvm = vcpu->kvm;
5279ed24f4bSMarc Zyngier 	int ret = 0;
5289ed24f4bSMarc Zyngier 
5299ed24f4bSMarc Zyngier 	if (likely(vcpu->arch.has_run_once))
5309ed24f4bSMarc Zyngier 		return 0;
5319ed24f4bSMarc Zyngier 
5329ed24f4bSMarc Zyngier 	if (!kvm_arm_vcpu_is_finalized(vcpu))
5339ed24f4bSMarc Zyngier 		return -EPERM;
5349ed24f4bSMarc Zyngier 
5359ed24f4bSMarc Zyngier 	vcpu->arch.has_run_once = true;
5369ed24f4bSMarc Zyngier 
5379ed24f4bSMarc Zyngier 	if (likely(irqchip_in_kernel(kvm))) {
5389ed24f4bSMarc Zyngier 		/*
5399ed24f4bSMarc Zyngier 		 * Map the VGIC hardware resources before running a vcpu the
5409ed24f4bSMarc Zyngier 		 * first time on this VM.
5419ed24f4bSMarc Zyngier 		 */
5429ed24f4bSMarc Zyngier 		if (unlikely(!vgic_ready(kvm))) {
5439ed24f4bSMarc Zyngier 			ret = kvm_vgic_map_resources(kvm);
5449ed24f4bSMarc Zyngier 			if (ret)
5459ed24f4bSMarc Zyngier 				return ret;
5469ed24f4bSMarc Zyngier 		}
5479ed24f4bSMarc Zyngier 	} else {
5489ed24f4bSMarc Zyngier 		/*
5499ed24f4bSMarc Zyngier 		 * Tell the rest of the code that there are userspace irqchip
5509ed24f4bSMarc Zyngier 		 * VMs in the wild.
5519ed24f4bSMarc Zyngier 		 */
5529ed24f4bSMarc Zyngier 		static_branch_inc(&userspace_irqchip_in_use);
5539ed24f4bSMarc Zyngier 	}
5549ed24f4bSMarc Zyngier 
5559ed24f4bSMarc Zyngier 	ret = kvm_timer_enable(vcpu);
5569ed24f4bSMarc Zyngier 	if (ret)
5579ed24f4bSMarc Zyngier 		return ret;
5589ed24f4bSMarc Zyngier 
5599ed24f4bSMarc Zyngier 	ret = kvm_arm_pmu_v3_enable(vcpu);
5609ed24f4bSMarc Zyngier 
5619ed24f4bSMarc Zyngier 	return ret;
5629ed24f4bSMarc Zyngier }
5639ed24f4bSMarc Zyngier 
5649ed24f4bSMarc Zyngier bool kvm_arch_intc_initialized(struct kvm *kvm)
5659ed24f4bSMarc Zyngier {
5669ed24f4bSMarc Zyngier 	return vgic_initialized(kvm);
5679ed24f4bSMarc Zyngier }
5689ed24f4bSMarc Zyngier 
5699ed24f4bSMarc Zyngier void kvm_arm_halt_guest(struct kvm *kvm)
5709ed24f4bSMarc Zyngier {
5719ed24f4bSMarc Zyngier 	int i;
5729ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu;
5739ed24f4bSMarc Zyngier 
5749ed24f4bSMarc Zyngier 	kvm_for_each_vcpu(i, vcpu, kvm)
5759ed24f4bSMarc Zyngier 		vcpu->arch.pause = true;
5769ed24f4bSMarc Zyngier 	kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP);
5779ed24f4bSMarc Zyngier }
5789ed24f4bSMarc Zyngier 
5799ed24f4bSMarc Zyngier void kvm_arm_resume_guest(struct kvm *kvm)
5809ed24f4bSMarc Zyngier {
5819ed24f4bSMarc Zyngier 	int i;
5829ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu;
5839ed24f4bSMarc Zyngier 
5849ed24f4bSMarc Zyngier 	kvm_for_each_vcpu(i, vcpu, kvm) {
5859ed24f4bSMarc Zyngier 		vcpu->arch.pause = false;
58638060944SPaolo Bonzini 		rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
5879ed24f4bSMarc Zyngier 	}
5889ed24f4bSMarc Zyngier }
5899ed24f4bSMarc Zyngier 
5909ed24f4bSMarc Zyngier static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
5919ed24f4bSMarc Zyngier {
59238060944SPaolo Bonzini 	struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
5939ed24f4bSMarc Zyngier 
59438060944SPaolo Bonzini 	rcuwait_wait_event(wait,
59538060944SPaolo Bonzini 			   (!vcpu->arch.power_off) &&(!vcpu->arch.pause),
59638060944SPaolo Bonzini 			   TASK_INTERRUPTIBLE);
5979ed24f4bSMarc Zyngier 
5989ed24f4bSMarc Zyngier 	if (vcpu->arch.power_off || vcpu->arch.pause) {
5999ed24f4bSMarc Zyngier 		/* Awaken to handle a signal, request we sleep again later. */
6009ed24f4bSMarc Zyngier 		kvm_make_request(KVM_REQ_SLEEP, vcpu);
6019ed24f4bSMarc Zyngier 	}
6029ed24f4bSMarc Zyngier 
6039ed24f4bSMarc Zyngier 	/*
6049ed24f4bSMarc Zyngier 	 * Make sure we will observe a potential reset request if we've
6059ed24f4bSMarc Zyngier 	 * observed a change to the power state. Pairs with the smp_wmb() in
6069ed24f4bSMarc Zyngier 	 * kvm_psci_vcpu_on().
6079ed24f4bSMarc Zyngier 	 */
6089ed24f4bSMarc Zyngier 	smp_rmb();
6099ed24f4bSMarc Zyngier }
6109ed24f4bSMarc Zyngier 
6119ed24f4bSMarc Zyngier static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
6129ed24f4bSMarc Zyngier {
6139ed24f4bSMarc Zyngier 	return vcpu->arch.target >= 0;
6149ed24f4bSMarc Zyngier }
6159ed24f4bSMarc Zyngier 
6169ed24f4bSMarc Zyngier static void check_vcpu_requests(struct kvm_vcpu *vcpu)
6179ed24f4bSMarc Zyngier {
6189ed24f4bSMarc Zyngier 	if (kvm_request_pending(vcpu)) {
6199ed24f4bSMarc Zyngier 		if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
6209ed24f4bSMarc Zyngier 			vcpu_req_sleep(vcpu);
6219ed24f4bSMarc Zyngier 
6229ed24f4bSMarc Zyngier 		if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
6239ed24f4bSMarc Zyngier 			kvm_reset_vcpu(vcpu);
6249ed24f4bSMarc Zyngier 
6259ed24f4bSMarc Zyngier 		/*
6269ed24f4bSMarc Zyngier 		 * Clear IRQ_PENDING requests that were made to guarantee
6279ed24f4bSMarc Zyngier 		 * that a VCPU sees new virtual interrupts.
6289ed24f4bSMarc Zyngier 		 */
6299ed24f4bSMarc Zyngier 		kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
6309ed24f4bSMarc Zyngier 
6319ed24f4bSMarc Zyngier 		if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
6329ed24f4bSMarc Zyngier 			kvm_update_stolen_time(vcpu);
6339ed24f4bSMarc Zyngier 
6349ed24f4bSMarc Zyngier 		if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
6359ed24f4bSMarc Zyngier 			/* The distributor enable bits were changed */
6369ed24f4bSMarc Zyngier 			preempt_disable();
6379ed24f4bSMarc Zyngier 			vgic_v4_put(vcpu, false);
6389ed24f4bSMarc Zyngier 			vgic_v4_load(vcpu);
6399ed24f4bSMarc Zyngier 			preempt_enable();
6409ed24f4bSMarc Zyngier 		}
6419ed24f4bSMarc Zyngier 	}
6429ed24f4bSMarc Zyngier }
6439ed24f4bSMarc Zyngier 
6449ed24f4bSMarc Zyngier /**
6459ed24f4bSMarc Zyngier  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
6469ed24f4bSMarc Zyngier  * @vcpu:	The VCPU pointer
6479ed24f4bSMarc Zyngier  *
6489ed24f4bSMarc Zyngier  * This function is called through the VCPU_RUN ioctl called from user space. It
6499ed24f4bSMarc Zyngier  * will execute VM code in a loop until the time slice for the process is used
6509ed24f4bSMarc Zyngier  * or some emulation is needed from user space in which case the function will
6519ed24f4bSMarc Zyngier  * return with return value 0 and with the kvm_run structure filled in with the
6529ed24f4bSMarc Zyngier  * required data for the requested emulation.
6539ed24f4bSMarc Zyngier  */
65438060944SPaolo Bonzini int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
6559ed24f4bSMarc Zyngier {
65638060944SPaolo Bonzini 	struct kvm_run *run = vcpu->run;
6579ed24f4bSMarc Zyngier 	int ret;
6589ed24f4bSMarc Zyngier 
6599ed24f4bSMarc Zyngier 	if (unlikely(!kvm_vcpu_initialized(vcpu)))
6609ed24f4bSMarc Zyngier 		return -ENOEXEC;
6619ed24f4bSMarc Zyngier 
6629ed24f4bSMarc Zyngier 	ret = kvm_vcpu_first_run_init(vcpu);
6639ed24f4bSMarc Zyngier 	if (ret)
6649ed24f4bSMarc Zyngier 		return ret;
6659ed24f4bSMarc Zyngier 
6669ed24f4bSMarc Zyngier 	if (run->exit_reason == KVM_EXIT_MMIO) {
66774cc7e0cSTianjia Zhang 		ret = kvm_handle_mmio_return(vcpu);
6689ed24f4bSMarc Zyngier 		if (ret)
6699ed24f4bSMarc Zyngier 			return ret;
6709ed24f4bSMarc Zyngier 	}
6719ed24f4bSMarc Zyngier 
6729ed24f4bSMarc Zyngier 	if (run->immediate_exit)
6739ed24f4bSMarc Zyngier 		return -EINTR;
6749ed24f4bSMarc Zyngier 
6759ed24f4bSMarc Zyngier 	vcpu_load(vcpu);
6769ed24f4bSMarc Zyngier 
6779ed24f4bSMarc Zyngier 	kvm_sigset_activate(vcpu);
6789ed24f4bSMarc Zyngier 
6799ed24f4bSMarc Zyngier 	ret = 1;
6809ed24f4bSMarc Zyngier 	run->exit_reason = KVM_EXIT_UNKNOWN;
6819ed24f4bSMarc Zyngier 	while (ret > 0) {
6829ed24f4bSMarc Zyngier 		/*
6839ed24f4bSMarc Zyngier 		 * Check conditions before entering the guest
6849ed24f4bSMarc Zyngier 		 */
6859ed24f4bSMarc Zyngier 		cond_resched();
6869ed24f4bSMarc Zyngier 
687a0e50aa3SChristoffer Dall 		update_vmid(&vcpu->arch.hw_mmu->vmid);
6889ed24f4bSMarc Zyngier 
6899ed24f4bSMarc Zyngier 		check_vcpu_requests(vcpu);
6909ed24f4bSMarc Zyngier 
6919ed24f4bSMarc Zyngier 		/*
6929ed24f4bSMarc Zyngier 		 * Preparing the interrupts to be injected also
6939ed24f4bSMarc Zyngier 		 * involves poking the GIC, which must be done in a
6949ed24f4bSMarc Zyngier 		 * non-preemptible context.
6959ed24f4bSMarc Zyngier 		 */
6969ed24f4bSMarc Zyngier 		preempt_disable();
6979ed24f4bSMarc Zyngier 
6989ed24f4bSMarc Zyngier 		kvm_pmu_flush_hwstate(vcpu);
6999ed24f4bSMarc Zyngier 
7009ed24f4bSMarc Zyngier 		local_irq_disable();
7019ed24f4bSMarc Zyngier 
7029ed24f4bSMarc Zyngier 		kvm_vgic_flush_hwstate(vcpu);
7039ed24f4bSMarc Zyngier 
7049ed24f4bSMarc Zyngier 		/*
7059ed24f4bSMarc Zyngier 		 * Exit if we have a signal pending so that we can deliver the
7069ed24f4bSMarc Zyngier 		 * signal to user space.
7079ed24f4bSMarc Zyngier 		 */
7089ed24f4bSMarc Zyngier 		if (signal_pending(current)) {
7099ed24f4bSMarc Zyngier 			ret = -EINTR;
7109ed24f4bSMarc Zyngier 			run->exit_reason = KVM_EXIT_INTR;
7119ed24f4bSMarc Zyngier 		}
7129ed24f4bSMarc Zyngier 
7139ed24f4bSMarc Zyngier 		/*
7149ed24f4bSMarc Zyngier 		 * If we're using a userspace irqchip, then check if we need
7159ed24f4bSMarc Zyngier 		 * to tell a userspace irqchip about timer or PMU level
7169ed24f4bSMarc Zyngier 		 * changes and if so, exit to userspace (the actual level
7179ed24f4bSMarc Zyngier 		 * state gets updated in kvm_timer_update_run and
7189ed24f4bSMarc Zyngier 		 * kvm_pmu_update_run below).
7199ed24f4bSMarc Zyngier 		 */
7209ed24f4bSMarc Zyngier 		if (static_branch_unlikely(&userspace_irqchip_in_use)) {
7219ed24f4bSMarc Zyngier 			if (kvm_timer_should_notify_user(vcpu) ||
7229ed24f4bSMarc Zyngier 			    kvm_pmu_should_notify_user(vcpu)) {
7239ed24f4bSMarc Zyngier 				ret = -EINTR;
7249ed24f4bSMarc Zyngier 				run->exit_reason = KVM_EXIT_INTR;
7259ed24f4bSMarc Zyngier 			}
7269ed24f4bSMarc Zyngier 		}
7279ed24f4bSMarc Zyngier 
7289ed24f4bSMarc Zyngier 		/*
7299ed24f4bSMarc Zyngier 		 * Ensure we set mode to IN_GUEST_MODE after we disable
7309ed24f4bSMarc Zyngier 		 * interrupts and before the final VCPU requests check.
7319ed24f4bSMarc Zyngier 		 * See the comment in kvm_vcpu_exiting_guest_mode() and
7329ed24f4bSMarc Zyngier 		 * Documentation/virt/kvm/vcpu-requests.rst
7339ed24f4bSMarc Zyngier 		 */
7349ed24f4bSMarc Zyngier 		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
7359ed24f4bSMarc Zyngier 
736a0e50aa3SChristoffer Dall 		if (ret <= 0 || need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) ||
7379ed24f4bSMarc Zyngier 		    kvm_request_pending(vcpu)) {
7389ed24f4bSMarc Zyngier 			vcpu->mode = OUTSIDE_GUEST_MODE;
7399ed24f4bSMarc Zyngier 			isb(); /* Ensure work in x_flush_hwstate is committed */
7409ed24f4bSMarc Zyngier 			kvm_pmu_sync_hwstate(vcpu);
7419ed24f4bSMarc Zyngier 			if (static_branch_unlikely(&userspace_irqchip_in_use))
7423c5ff0c6SMarc Zyngier 				kvm_timer_sync_user(vcpu);
7439ed24f4bSMarc Zyngier 			kvm_vgic_sync_hwstate(vcpu);
7449ed24f4bSMarc Zyngier 			local_irq_enable();
7459ed24f4bSMarc Zyngier 			preempt_enable();
7469ed24f4bSMarc Zyngier 			continue;
7479ed24f4bSMarc Zyngier 		}
7489ed24f4bSMarc Zyngier 
7499ed24f4bSMarc Zyngier 		kvm_arm_setup_debug(vcpu);
7509ed24f4bSMarc Zyngier 
7519ed24f4bSMarc Zyngier 		/**************************************************************
7529ed24f4bSMarc Zyngier 		 * Enter the guest
7539ed24f4bSMarc Zyngier 		 */
7549ed24f4bSMarc Zyngier 		trace_kvm_entry(*vcpu_pc(vcpu));
7559ed24f4bSMarc Zyngier 		guest_enter_irqoff();
7569ed24f4bSMarc Zyngier 
75709cf57ebSDavid Brazdil 		ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
7589ed24f4bSMarc Zyngier 
7599ed24f4bSMarc Zyngier 		vcpu->mode = OUTSIDE_GUEST_MODE;
7609ed24f4bSMarc Zyngier 		vcpu->stat.exits++;
7619ed24f4bSMarc Zyngier 		/*
7629ed24f4bSMarc Zyngier 		 * Back from guest
7639ed24f4bSMarc Zyngier 		 *************************************************************/
7649ed24f4bSMarc Zyngier 
7659ed24f4bSMarc Zyngier 		kvm_arm_clear_debug(vcpu);
7669ed24f4bSMarc Zyngier 
7679ed24f4bSMarc Zyngier 		/*
7689ed24f4bSMarc Zyngier 		 * We must sync the PMU state before the vgic state so
7699ed24f4bSMarc Zyngier 		 * that the vgic can properly sample the updated state of the
7709ed24f4bSMarc Zyngier 		 * interrupt line.
7719ed24f4bSMarc Zyngier 		 */
7729ed24f4bSMarc Zyngier 		kvm_pmu_sync_hwstate(vcpu);
7739ed24f4bSMarc Zyngier 
7749ed24f4bSMarc Zyngier 		/*
7759ed24f4bSMarc Zyngier 		 * Sync the vgic state before syncing the timer state because
7769ed24f4bSMarc Zyngier 		 * the timer code needs to know if the virtual timer
7779ed24f4bSMarc Zyngier 		 * interrupts are active.
7789ed24f4bSMarc Zyngier 		 */
7799ed24f4bSMarc Zyngier 		kvm_vgic_sync_hwstate(vcpu);
7809ed24f4bSMarc Zyngier 
7819ed24f4bSMarc Zyngier 		/*
7829ed24f4bSMarc Zyngier 		 * Sync the timer hardware state before enabling interrupts as
7839ed24f4bSMarc Zyngier 		 * we don't want vtimer interrupts to race with syncing the
7849ed24f4bSMarc Zyngier 		 * timer virtual interrupt state.
7859ed24f4bSMarc Zyngier 		 */
7869ed24f4bSMarc Zyngier 		if (static_branch_unlikely(&userspace_irqchip_in_use))
7873c5ff0c6SMarc Zyngier 			kvm_timer_sync_user(vcpu);
7889ed24f4bSMarc Zyngier 
7899ed24f4bSMarc Zyngier 		kvm_arch_vcpu_ctxsync_fp(vcpu);
7909ed24f4bSMarc Zyngier 
7919ed24f4bSMarc Zyngier 		/*
7929ed24f4bSMarc Zyngier 		 * We may have taken a host interrupt in HYP mode (ie
7939ed24f4bSMarc Zyngier 		 * while executing the guest). This interrupt is still
7949ed24f4bSMarc Zyngier 		 * pending, as we haven't serviced it yet!
7959ed24f4bSMarc Zyngier 		 *
7969ed24f4bSMarc Zyngier 		 * We're now back in SVC mode, with interrupts
7979ed24f4bSMarc Zyngier 		 * disabled.  Enabling the interrupts now will have
7989ed24f4bSMarc Zyngier 		 * the effect of taking the interrupt again, in SVC
7999ed24f4bSMarc Zyngier 		 * mode this time.
8009ed24f4bSMarc Zyngier 		 */
8019ed24f4bSMarc Zyngier 		local_irq_enable();
8029ed24f4bSMarc Zyngier 
8039ed24f4bSMarc Zyngier 		/*
8049ed24f4bSMarc Zyngier 		 * We do local_irq_enable() before calling guest_exit() so
8059ed24f4bSMarc Zyngier 		 * that if a timer interrupt hits while running the guest we
8069ed24f4bSMarc Zyngier 		 * account that tick as being spent in the guest.  We enable
8079ed24f4bSMarc Zyngier 		 * preemption after calling guest_exit() so that if we get
8089ed24f4bSMarc Zyngier 		 * preempted we make sure ticks after that is not counted as
8099ed24f4bSMarc Zyngier 		 * guest time.
8109ed24f4bSMarc Zyngier 		 */
8119ed24f4bSMarc Zyngier 		guest_exit();
8129ed24f4bSMarc Zyngier 		trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
8139ed24f4bSMarc Zyngier 
8149ed24f4bSMarc Zyngier 		/* Exit types that need handling before we can be preempted */
81574cc7e0cSTianjia Zhang 		handle_exit_early(vcpu, ret);
8169ed24f4bSMarc Zyngier 
8179ed24f4bSMarc Zyngier 		preempt_enable();
8189ed24f4bSMarc Zyngier 
81922f55384SQais Yousef 		/*
82022f55384SQais Yousef 		 * The ARMv8 architecture doesn't give the hypervisor
82122f55384SQais Yousef 		 * a mechanism to prevent a guest from dropping to AArch32 EL0
82222f55384SQais Yousef 		 * if implemented by the CPU. If we spot the guest in such
82322f55384SQais Yousef 		 * state and that we decided it wasn't supposed to do so (like
82422f55384SQais Yousef 		 * with the asymmetric AArch32 case), return to userspace with
82522f55384SQais Yousef 		 * a fatal error.
82622f55384SQais Yousef 		 */
82722f55384SQais Yousef 		if (!system_supports_32bit_el0() && vcpu_mode_is_32bit(vcpu)) {
82822f55384SQais Yousef 			/*
82922f55384SQais Yousef 			 * As we have caught the guest red-handed, decide that
83022f55384SQais Yousef 			 * it isn't fit for purpose anymore by making the vcpu
83122f55384SQais Yousef 			 * invalid. The VMM can try and fix it by issuing  a
83222f55384SQais Yousef 			 * KVM_ARM_VCPU_INIT if it really wants to.
83322f55384SQais Yousef 			 */
83422f55384SQais Yousef 			vcpu->arch.target = -1;
83522f55384SQais Yousef 			ret = ARM_EXCEPTION_IL;
83622f55384SQais Yousef 		}
83722f55384SQais Yousef 
83874cc7e0cSTianjia Zhang 		ret = handle_exit(vcpu, ret);
8399ed24f4bSMarc Zyngier 	}
8409ed24f4bSMarc Zyngier 
8419ed24f4bSMarc Zyngier 	/* Tell userspace about in-kernel device output levels */
8429ed24f4bSMarc Zyngier 	if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
8439ed24f4bSMarc Zyngier 		kvm_timer_update_run(vcpu);
8449ed24f4bSMarc Zyngier 		kvm_pmu_update_run(vcpu);
8459ed24f4bSMarc Zyngier 	}
8469ed24f4bSMarc Zyngier 
8479ed24f4bSMarc Zyngier 	kvm_sigset_deactivate(vcpu);
8489ed24f4bSMarc Zyngier 
8499ed24f4bSMarc Zyngier 	vcpu_put(vcpu);
8509ed24f4bSMarc Zyngier 	return ret;
8519ed24f4bSMarc Zyngier }
8529ed24f4bSMarc Zyngier 
8539ed24f4bSMarc Zyngier static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
8549ed24f4bSMarc Zyngier {
8559ed24f4bSMarc Zyngier 	int bit_index;
8569ed24f4bSMarc Zyngier 	bool set;
8579ed24f4bSMarc Zyngier 	unsigned long *hcr;
8589ed24f4bSMarc Zyngier 
8599ed24f4bSMarc Zyngier 	if (number == KVM_ARM_IRQ_CPU_IRQ)
8609ed24f4bSMarc Zyngier 		bit_index = __ffs(HCR_VI);
8619ed24f4bSMarc Zyngier 	else /* KVM_ARM_IRQ_CPU_FIQ */
8629ed24f4bSMarc Zyngier 		bit_index = __ffs(HCR_VF);
8639ed24f4bSMarc Zyngier 
8649ed24f4bSMarc Zyngier 	hcr = vcpu_hcr(vcpu);
8659ed24f4bSMarc Zyngier 	if (level)
8669ed24f4bSMarc Zyngier 		set = test_and_set_bit(bit_index, hcr);
8679ed24f4bSMarc Zyngier 	else
8689ed24f4bSMarc Zyngier 		set = test_and_clear_bit(bit_index, hcr);
8699ed24f4bSMarc Zyngier 
8709ed24f4bSMarc Zyngier 	/*
8719ed24f4bSMarc Zyngier 	 * If we didn't change anything, no need to wake up or kick other CPUs
8729ed24f4bSMarc Zyngier 	 */
8739ed24f4bSMarc Zyngier 	if (set == level)
8749ed24f4bSMarc Zyngier 		return 0;
8759ed24f4bSMarc Zyngier 
8769ed24f4bSMarc Zyngier 	/*
8779ed24f4bSMarc Zyngier 	 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
8789ed24f4bSMarc Zyngier 	 * trigger a world-switch round on the running physical CPU to set the
8799ed24f4bSMarc Zyngier 	 * virtual IRQ/FIQ fields in the HCR appropriately.
8809ed24f4bSMarc Zyngier 	 */
8819ed24f4bSMarc Zyngier 	kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
8829ed24f4bSMarc Zyngier 	kvm_vcpu_kick(vcpu);
8839ed24f4bSMarc Zyngier 
8849ed24f4bSMarc Zyngier 	return 0;
8859ed24f4bSMarc Zyngier }
8869ed24f4bSMarc Zyngier 
8879ed24f4bSMarc Zyngier int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
8889ed24f4bSMarc Zyngier 			  bool line_status)
8899ed24f4bSMarc Zyngier {
8909ed24f4bSMarc Zyngier 	u32 irq = irq_level->irq;
8919ed24f4bSMarc Zyngier 	unsigned int irq_type, vcpu_idx, irq_num;
8929ed24f4bSMarc Zyngier 	int nrcpus = atomic_read(&kvm->online_vcpus);
8939ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu = NULL;
8949ed24f4bSMarc Zyngier 	bool level = irq_level->level;
8959ed24f4bSMarc Zyngier 
8969ed24f4bSMarc Zyngier 	irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
8979ed24f4bSMarc Zyngier 	vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
8989ed24f4bSMarc Zyngier 	vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
8999ed24f4bSMarc Zyngier 	irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
9009ed24f4bSMarc Zyngier 
9019ed24f4bSMarc Zyngier 	trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
9029ed24f4bSMarc Zyngier 
9039ed24f4bSMarc Zyngier 	switch (irq_type) {
9049ed24f4bSMarc Zyngier 	case KVM_ARM_IRQ_TYPE_CPU:
9059ed24f4bSMarc Zyngier 		if (irqchip_in_kernel(kvm))
9069ed24f4bSMarc Zyngier 			return -ENXIO;
9079ed24f4bSMarc Zyngier 
9089ed24f4bSMarc Zyngier 		if (vcpu_idx >= nrcpus)
9099ed24f4bSMarc Zyngier 			return -EINVAL;
9109ed24f4bSMarc Zyngier 
9119ed24f4bSMarc Zyngier 		vcpu = kvm_get_vcpu(kvm, vcpu_idx);
9129ed24f4bSMarc Zyngier 		if (!vcpu)
9139ed24f4bSMarc Zyngier 			return -EINVAL;
9149ed24f4bSMarc Zyngier 
9159ed24f4bSMarc Zyngier 		if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
9169ed24f4bSMarc Zyngier 			return -EINVAL;
9179ed24f4bSMarc Zyngier 
9189ed24f4bSMarc Zyngier 		return vcpu_interrupt_line(vcpu, irq_num, level);
9199ed24f4bSMarc Zyngier 	case KVM_ARM_IRQ_TYPE_PPI:
9209ed24f4bSMarc Zyngier 		if (!irqchip_in_kernel(kvm))
9219ed24f4bSMarc Zyngier 			return -ENXIO;
9229ed24f4bSMarc Zyngier 
9239ed24f4bSMarc Zyngier 		if (vcpu_idx >= nrcpus)
9249ed24f4bSMarc Zyngier 			return -EINVAL;
9259ed24f4bSMarc Zyngier 
9269ed24f4bSMarc Zyngier 		vcpu = kvm_get_vcpu(kvm, vcpu_idx);
9279ed24f4bSMarc Zyngier 		if (!vcpu)
9289ed24f4bSMarc Zyngier 			return -EINVAL;
9299ed24f4bSMarc Zyngier 
9309ed24f4bSMarc Zyngier 		if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
9319ed24f4bSMarc Zyngier 			return -EINVAL;
9329ed24f4bSMarc Zyngier 
9339ed24f4bSMarc Zyngier 		return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL);
9349ed24f4bSMarc Zyngier 	case KVM_ARM_IRQ_TYPE_SPI:
9359ed24f4bSMarc Zyngier 		if (!irqchip_in_kernel(kvm))
9369ed24f4bSMarc Zyngier 			return -ENXIO;
9379ed24f4bSMarc Zyngier 
9389ed24f4bSMarc Zyngier 		if (irq_num < VGIC_NR_PRIVATE_IRQS)
9399ed24f4bSMarc Zyngier 			return -EINVAL;
9409ed24f4bSMarc Zyngier 
9419ed24f4bSMarc Zyngier 		return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL);
9429ed24f4bSMarc Zyngier 	}
9439ed24f4bSMarc Zyngier 
9449ed24f4bSMarc Zyngier 	return -EINVAL;
9459ed24f4bSMarc Zyngier }
9469ed24f4bSMarc Zyngier 
9479ed24f4bSMarc Zyngier static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
9489ed24f4bSMarc Zyngier 			       const struct kvm_vcpu_init *init)
9499ed24f4bSMarc Zyngier {
9509ed24f4bSMarc Zyngier 	unsigned int i, ret;
9519ed24f4bSMarc Zyngier 	int phys_target = kvm_target_cpu();
9529ed24f4bSMarc Zyngier 
9539ed24f4bSMarc Zyngier 	if (init->target != phys_target)
9549ed24f4bSMarc Zyngier 		return -EINVAL;
9559ed24f4bSMarc Zyngier 
9569ed24f4bSMarc Zyngier 	/*
9579ed24f4bSMarc Zyngier 	 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
9589ed24f4bSMarc Zyngier 	 * use the same target.
9599ed24f4bSMarc Zyngier 	 */
9609ed24f4bSMarc Zyngier 	if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
9619ed24f4bSMarc Zyngier 		return -EINVAL;
9629ed24f4bSMarc Zyngier 
9639ed24f4bSMarc Zyngier 	/* -ENOENT for unknown features, -EINVAL for invalid combinations. */
9649ed24f4bSMarc Zyngier 	for (i = 0; i < sizeof(init->features) * 8; i++) {
9659ed24f4bSMarc Zyngier 		bool set = (init->features[i / 32] & (1 << (i % 32)));
9669ed24f4bSMarc Zyngier 
9679ed24f4bSMarc Zyngier 		if (set && i >= KVM_VCPU_MAX_FEATURES)
9689ed24f4bSMarc Zyngier 			return -ENOENT;
9699ed24f4bSMarc Zyngier 
9709ed24f4bSMarc Zyngier 		/*
9719ed24f4bSMarc Zyngier 		 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
9729ed24f4bSMarc Zyngier 		 * use the same feature set.
9739ed24f4bSMarc Zyngier 		 */
9749ed24f4bSMarc Zyngier 		if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
9759ed24f4bSMarc Zyngier 		    test_bit(i, vcpu->arch.features) != set)
9769ed24f4bSMarc Zyngier 			return -EINVAL;
9779ed24f4bSMarc Zyngier 
9789ed24f4bSMarc Zyngier 		if (set)
9799ed24f4bSMarc Zyngier 			set_bit(i, vcpu->arch.features);
9809ed24f4bSMarc Zyngier 	}
9819ed24f4bSMarc Zyngier 
9829ed24f4bSMarc Zyngier 	vcpu->arch.target = phys_target;
9839ed24f4bSMarc Zyngier 
9849ed24f4bSMarc Zyngier 	/* Now we know what it is, we can reset it. */
9859ed24f4bSMarc Zyngier 	ret = kvm_reset_vcpu(vcpu);
9869ed24f4bSMarc Zyngier 	if (ret) {
9879ed24f4bSMarc Zyngier 		vcpu->arch.target = -1;
9889ed24f4bSMarc Zyngier 		bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
9899ed24f4bSMarc Zyngier 	}
9909ed24f4bSMarc Zyngier 
9919ed24f4bSMarc Zyngier 	return ret;
9929ed24f4bSMarc Zyngier }
9939ed24f4bSMarc Zyngier 
9949ed24f4bSMarc Zyngier static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
9959ed24f4bSMarc Zyngier 					 struct kvm_vcpu_init *init)
9969ed24f4bSMarc Zyngier {
9979ed24f4bSMarc Zyngier 	int ret;
9989ed24f4bSMarc Zyngier 
9999ed24f4bSMarc Zyngier 	ret = kvm_vcpu_set_target(vcpu, init);
10009ed24f4bSMarc Zyngier 	if (ret)
10019ed24f4bSMarc Zyngier 		return ret;
10029ed24f4bSMarc Zyngier 
10039ed24f4bSMarc Zyngier 	/*
10049ed24f4bSMarc Zyngier 	 * Ensure a rebooted VM will fault in RAM pages and detect if the
10059ed24f4bSMarc Zyngier 	 * guest MMU is turned off and flush the caches as needed.
1006892713e9SZenghui Yu 	 *
10077ae2f3dbSMarc Zyngier 	 * S2FWB enforces all memory accesses to RAM being cacheable,
10087ae2f3dbSMarc Zyngier 	 * ensuring that the data side is always coherent. We still
10097ae2f3dbSMarc Zyngier 	 * need to invalidate the I-cache though, as FWB does *not*
10107ae2f3dbSMarc Zyngier 	 * imply CTR_EL0.DIC.
10119ed24f4bSMarc Zyngier 	 */
10127ae2f3dbSMarc Zyngier 	if (vcpu->arch.has_run_once) {
10137ae2f3dbSMarc Zyngier 		if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
10149ed24f4bSMarc Zyngier 			stage2_unmap_vm(vcpu->kvm);
10157ae2f3dbSMarc Zyngier 		else
10167ae2f3dbSMarc Zyngier 			__flush_icache_all();
10177ae2f3dbSMarc Zyngier 	}
10189ed24f4bSMarc Zyngier 
10199ed24f4bSMarc Zyngier 	vcpu_reset_hcr(vcpu);
10209ed24f4bSMarc Zyngier 
10219ed24f4bSMarc Zyngier 	/*
10229ed24f4bSMarc Zyngier 	 * Handle the "start in power-off" case.
10239ed24f4bSMarc Zyngier 	 */
10249ed24f4bSMarc Zyngier 	if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
10259ed24f4bSMarc Zyngier 		vcpu_power_off(vcpu);
10269ed24f4bSMarc Zyngier 	else
10279ed24f4bSMarc Zyngier 		vcpu->arch.power_off = false;
10289ed24f4bSMarc Zyngier 
10299ed24f4bSMarc Zyngier 	return 0;
10309ed24f4bSMarc Zyngier }
10319ed24f4bSMarc Zyngier 
10329ed24f4bSMarc Zyngier static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
10339ed24f4bSMarc Zyngier 				 struct kvm_device_attr *attr)
10349ed24f4bSMarc Zyngier {
10359ed24f4bSMarc Zyngier 	int ret = -ENXIO;
10369ed24f4bSMarc Zyngier 
10379ed24f4bSMarc Zyngier 	switch (attr->group) {
10389ed24f4bSMarc Zyngier 	default:
10399ed24f4bSMarc Zyngier 		ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
10409ed24f4bSMarc Zyngier 		break;
10419ed24f4bSMarc Zyngier 	}
10429ed24f4bSMarc Zyngier 
10439ed24f4bSMarc Zyngier 	return ret;
10449ed24f4bSMarc Zyngier }
10459ed24f4bSMarc Zyngier 
10469ed24f4bSMarc Zyngier static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
10479ed24f4bSMarc Zyngier 				 struct kvm_device_attr *attr)
10489ed24f4bSMarc Zyngier {
10499ed24f4bSMarc Zyngier 	int ret = -ENXIO;
10509ed24f4bSMarc Zyngier 
10519ed24f4bSMarc Zyngier 	switch (attr->group) {
10529ed24f4bSMarc Zyngier 	default:
10539ed24f4bSMarc Zyngier 		ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
10549ed24f4bSMarc Zyngier 		break;
10559ed24f4bSMarc Zyngier 	}
10569ed24f4bSMarc Zyngier 
10579ed24f4bSMarc Zyngier 	return ret;
10589ed24f4bSMarc Zyngier }
10599ed24f4bSMarc Zyngier 
10609ed24f4bSMarc Zyngier static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
10619ed24f4bSMarc Zyngier 				 struct kvm_device_attr *attr)
10629ed24f4bSMarc Zyngier {
10639ed24f4bSMarc Zyngier 	int ret = -ENXIO;
10649ed24f4bSMarc Zyngier 
10659ed24f4bSMarc Zyngier 	switch (attr->group) {
10669ed24f4bSMarc Zyngier 	default:
10679ed24f4bSMarc Zyngier 		ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
10689ed24f4bSMarc Zyngier 		break;
10699ed24f4bSMarc Zyngier 	}
10709ed24f4bSMarc Zyngier 
10719ed24f4bSMarc Zyngier 	return ret;
10729ed24f4bSMarc Zyngier }
10739ed24f4bSMarc Zyngier 
10749ed24f4bSMarc Zyngier static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
10759ed24f4bSMarc Zyngier 				   struct kvm_vcpu_events *events)
10769ed24f4bSMarc Zyngier {
10779ed24f4bSMarc Zyngier 	memset(events, 0, sizeof(*events));
10789ed24f4bSMarc Zyngier 
10799ed24f4bSMarc Zyngier 	return __kvm_arm_vcpu_get_events(vcpu, events);
10809ed24f4bSMarc Zyngier }
10819ed24f4bSMarc Zyngier 
10829ed24f4bSMarc Zyngier static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
10839ed24f4bSMarc Zyngier 				   struct kvm_vcpu_events *events)
10849ed24f4bSMarc Zyngier {
10859ed24f4bSMarc Zyngier 	int i;
10869ed24f4bSMarc Zyngier 
10879ed24f4bSMarc Zyngier 	/* check whether the reserved field is zero */
10889ed24f4bSMarc Zyngier 	for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
10899ed24f4bSMarc Zyngier 		if (events->reserved[i])
10909ed24f4bSMarc Zyngier 			return -EINVAL;
10919ed24f4bSMarc Zyngier 
10929ed24f4bSMarc Zyngier 	/* check whether the pad field is zero */
10939ed24f4bSMarc Zyngier 	for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
10949ed24f4bSMarc Zyngier 		if (events->exception.pad[i])
10959ed24f4bSMarc Zyngier 			return -EINVAL;
10969ed24f4bSMarc Zyngier 
10979ed24f4bSMarc Zyngier 	return __kvm_arm_vcpu_set_events(vcpu, events);
10989ed24f4bSMarc Zyngier }
10999ed24f4bSMarc Zyngier 
11009ed24f4bSMarc Zyngier long kvm_arch_vcpu_ioctl(struct file *filp,
11019ed24f4bSMarc Zyngier 			 unsigned int ioctl, unsigned long arg)
11029ed24f4bSMarc Zyngier {
11039ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu = filp->private_data;
11049ed24f4bSMarc Zyngier 	void __user *argp = (void __user *)arg;
11059ed24f4bSMarc Zyngier 	struct kvm_device_attr attr;
11069ed24f4bSMarc Zyngier 	long r;
11079ed24f4bSMarc Zyngier 
11089ed24f4bSMarc Zyngier 	switch (ioctl) {
11099ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_INIT: {
11109ed24f4bSMarc Zyngier 		struct kvm_vcpu_init init;
11119ed24f4bSMarc Zyngier 
11129ed24f4bSMarc Zyngier 		r = -EFAULT;
11139ed24f4bSMarc Zyngier 		if (copy_from_user(&init, argp, sizeof(init)))
11149ed24f4bSMarc Zyngier 			break;
11159ed24f4bSMarc Zyngier 
11169ed24f4bSMarc Zyngier 		r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
11179ed24f4bSMarc Zyngier 		break;
11189ed24f4bSMarc Zyngier 	}
11199ed24f4bSMarc Zyngier 	case KVM_SET_ONE_REG:
11209ed24f4bSMarc Zyngier 	case KVM_GET_ONE_REG: {
11219ed24f4bSMarc Zyngier 		struct kvm_one_reg reg;
11229ed24f4bSMarc Zyngier 
11239ed24f4bSMarc Zyngier 		r = -ENOEXEC;
11249ed24f4bSMarc Zyngier 		if (unlikely(!kvm_vcpu_initialized(vcpu)))
11259ed24f4bSMarc Zyngier 			break;
11269ed24f4bSMarc Zyngier 
11279ed24f4bSMarc Zyngier 		r = -EFAULT;
11289ed24f4bSMarc Zyngier 		if (copy_from_user(&reg, argp, sizeof(reg)))
11299ed24f4bSMarc Zyngier 			break;
11309ed24f4bSMarc Zyngier 
11319ed24f4bSMarc Zyngier 		if (ioctl == KVM_SET_ONE_REG)
11329ed24f4bSMarc Zyngier 			r = kvm_arm_set_reg(vcpu, &reg);
11339ed24f4bSMarc Zyngier 		else
11349ed24f4bSMarc Zyngier 			r = kvm_arm_get_reg(vcpu, &reg);
11359ed24f4bSMarc Zyngier 		break;
11369ed24f4bSMarc Zyngier 	}
11379ed24f4bSMarc Zyngier 	case KVM_GET_REG_LIST: {
11389ed24f4bSMarc Zyngier 		struct kvm_reg_list __user *user_list = argp;
11399ed24f4bSMarc Zyngier 		struct kvm_reg_list reg_list;
11409ed24f4bSMarc Zyngier 		unsigned n;
11419ed24f4bSMarc Zyngier 
11429ed24f4bSMarc Zyngier 		r = -ENOEXEC;
11439ed24f4bSMarc Zyngier 		if (unlikely(!kvm_vcpu_initialized(vcpu)))
11449ed24f4bSMarc Zyngier 			break;
11459ed24f4bSMarc Zyngier 
11469ed24f4bSMarc Zyngier 		r = -EPERM;
11479ed24f4bSMarc Zyngier 		if (!kvm_arm_vcpu_is_finalized(vcpu))
11489ed24f4bSMarc Zyngier 			break;
11499ed24f4bSMarc Zyngier 
11509ed24f4bSMarc Zyngier 		r = -EFAULT;
11519ed24f4bSMarc Zyngier 		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
11529ed24f4bSMarc Zyngier 			break;
11539ed24f4bSMarc Zyngier 		n = reg_list.n;
11549ed24f4bSMarc Zyngier 		reg_list.n = kvm_arm_num_regs(vcpu);
11559ed24f4bSMarc Zyngier 		if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
11569ed24f4bSMarc Zyngier 			break;
11579ed24f4bSMarc Zyngier 		r = -E2BIG;
11589ed24f4bSMarc Zyngier 		if (n < reg_list.n)
11599ed24f4bSMarc Zyngier 			break;
11609ed24f4bSMarc Zyngier 		r = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
11619ed24f4bSMarc Zyngier 		break;
11629ed24f4bSMarc Zyngier 	}
11639ed24f4bSMarc Zyngier 	case KVM_SET_DEVICE_ATTR: {
11649ed24f4bSMarc Zyngier 		r = -EFAULT;
11659ed24f4bSMarc Zyngier 		if (copy_from_user(&attr, argp, sizeof(attr)))
11669ed24f4bSMarc Zyngier 			break;
11679ed24f4bSMarc Zyngier 		r = kvm_arm_vcpu_set_attr(vcpu, &attr);
11689ed24f4bSMarc Zyngier 		break;
11699ed24f4bSMarc Zyngier 	}
11709ed24f4bSMarc Zyngier 	case KVM_GET_DEVICE_ATTR: {
11719ed24f4bSMarc Zyngier 		r = -EFAULT;
11729ed24f4bSMarc Zyngier 		if (copy_from_user(&attr, argp, sizeof(attr)))
11739ed24f4bSMarc Zyngier 			break;
11749ed24f4bSMarc Zyngier 		r = kvm_arm_vcpu_get_attr(vcpu, &attr);
11759ed24f4bSMarc Zyngier 		break;
11769ed24f4bSMarc Zyngier 	}
11779ed24f4bSMarc Zyngier 	case KVM_HAS_DEVICE_ATTR: {
11789ed24f4bSMarc Zyngier 		r = -EFAULT;
11799ed24f4bSMarc Zyngier 		if (copy_from_user(&attr, argp, sizeof(attr)))
11809ed24f4bSMarc Zyngier 			break;
11819ed24f4bSMarc Zyngier 		r = kvm_arm_vcpu_has_attr(vcpu, &attr);
11829ed24f4bSMarc Zyngier 		break;
11839ed24f4bSMarc Zyngier 	}
11849ed24f4bSMarc Zyngier 	case KVM_GET_VCPU_EVENTS: {
11859ed24f4bSMarc Zyngier 		struct kvm_vcpu_events events;
11869ed24f4bSMarc Zyngier 
11879ed24f4bSMarc Zyngier 		if (kvm_arm_vcpu_get_events(vcpu, &events))
11889ed24f4bSMarc Zyngier 			return -EINVAL;
11899ed24f4bSMarc Zyngier 
11909ed24f4bSMarc Zyngier 		if (copy_to_user(argp, &events, sizeof(events)))
11919ed24f4bSMarc Zyngier 			return -EFAULT;
11929ed24f4bSMarc Zyngier 
11939ed24f4bSMarc Zyngier 		return 0;
11949ed24f4bSMarc Zyngier 	}
11959ed24f4bSMarc Zyngier 	case KVM_SET_VCPU_EVENTS: {
11969ed24f4bSMarc Zyngier 		struct kvm_vcpu_events events;
11979ed24f4bSMarc Zyngier 
11989ed24f4bSMarc Zyngier 		if (copy_from_user(&events, argp, sizeof(events)))
11999ed24f4bSMarc Zyngier 			return -EFAULT;
12009ed24f4bSMarc Zyngier 
12019ed24f4bSMarc Zyngier 		return kvm_arm_vcpu_set_events(vcpu, &events);
12029ed24f4bSMarc Zyngier 	}
12039ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_FINALIZE: {
12049ed24f4bSMarc Zyngier 		int what;
12059ed24f4bSMarc Zyngier 
12069ed24f4bSMarc Zyngier 		if (!kvm_vcpu_initialized(vcpu))
12079ed24f4bSMarc Zyngier 			return -ENOEXEC;
12089ed24f4bSMarc Zyngier 
12099ed24f4bSMarc Zyngier 		if (get_user(what, (const int __user *)argp))
12109ed24f4bSMarc Zyngier 			return -EFAULT;
12119ed24f4bSMarc Zyngier 
12129ed24f4bSMarc Zyngier 		return kvm_arm_vcpu_finalize(vcpu, what);
12139ed24f4bSMarc Zyngier 	}
12149ed24f4bSMarc Zyngier 	default:
12159ed24f4bSMarc Zyngier 		r = -EINVAL;
12169ed24f4bSMarc Zyngier 	}
12179ed24f4bSMarc Zyngier 
12189ed24f4bSMarc Zyngier 	return r;
12199ed24f4bSMarc Zyngier }
12209ed24f4bSMarc Zyngier 
12219ed24f4bSMarc Zyngier void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
12229ed24f4bSMarc Zyngier {
12239ed24f4bSMarc Zyngier 
12249ed24f4bSMarc Zyngier }
12259ed24f4bSMarc Zyngier 
12269ed24f4bSMarc Zyngier void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
12279ed24f4bSMarc Zyngier 					struct kvm_memory_slot *memslot)
12289ed24f4bSMarc Zyngier {
12299ed24f4bSMarc Zyngier 	kvm_flush_remote_tlbs(kvm);
12309ed24f4bSMarc Zyngier }
12319ed24f4bSMarc Zyngier 
12329ed24f4bSMarc Zyngier static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
12339ed24f4bSMarc Zyngier 					struct kvm_arm_device_addr *dev_addr)
12349ed24f4bSMarc Zyngier {
12359ed24f4bSMarc Zyngier 	unsigned long dev_id, type;
12369ed24f4bSMarc Zyngier 
12379ed24f4bSMarc Zyngier 	dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
12389ed24f4bSMarc Zyngier 		KVM_ARM_DEVICE_ID_SHIFT;
12399ed24f4bSMarc Zyngier 	type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
12409ed24f4bSMarc Zyngier 		KVM_ARM_DEVICE_TYPE_SHIFT;
12419ed24f4bSMarc Zyngier 
12429ed24f4bSMarc Zyngier 	switch (dev_id) {
12439ed24f4bSMarc Zyngier 	case KVM_ARM_DEVICE_VGIC_V2:
12449ed24f4bSMarc Zyngier 		if (!vgic_present)
12459ed24f4bSMarc Zyngier 			return -ENXIO;
12469ed24f4bSMarc Zyngier 		return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
12479ed24f4bSMarc Zyngier 	default:
12489ed24f4bSMarc Zyngier 		return -ENODEV;
12499ed24f4bSMarc Zyngier 	}
12509ed24f4bSMarc Zyngier }
12519ed24f4bSMarc Zyngier 
12529ed24f4bSMarc Zyngier long kvm_arch_vm_ioctl(struct file *filp,
12539ed24f4bSMarc Zyngier 		       unsigned int ioctl, unsigned long arg)
12549ed24f4bSMarc Zyngier {
12559ed24f4bSMarc Zyngier 	struct kvm *kvm = filp->private_data;
12569ed24f4bSMarc Zyngier 	void __user *argp = (void __user *)arg;
12579ed24f4bSMarc Zyngier 
12589ed24f4bSMarc Zyngier 	switch (ioctl) {
12599ed24f4bSMarc Zyngier 	case KVM_CREATE_IRQCHIP: {
12609ed24f4bSMarc Zyngier 		int ret;
12619ed24f4bSMarc Zyngier 		if (!vgic_present)
12629ed24f4bSMarc Zyngier 			return -ENXIO;
12639ed24f4bSMarc Zyngier 		mutex_lock(&kvm->lock);
12649ed24f4bSMarc Zyngier 		ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
12659ed24f4bSMarc Zyngier 		mutex_unlock(&kvm->lock);
12669ed24f4bSMarc Zyngier 		return ret;
12679ed24f4bSMarc Zyngier 	}
12689ed24f4bSMarc Zyngier 	case KVM_ARM_SET_DEVICE_ADDR: {
12699ed24f4bSMarc Zyngier 		struct kvm_arm_device_addr dev_addr;
12709ed24f4bSMarc Zyngier 
12719ed24f4bSMarc Zyngier 		if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
12729ed24f4bSMarc Zyngier 			return -EFAULT;
12739ed24f4bSMarc Zyngier 		return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
12749ed24f4bSMarc Zyngier 	}
12759ed24f4bSMarc Zyngier 	case KVM_ARM_PREFERRED_TARGET: {
12769ed24f4bSMarc Zyngier 		int err;
12779ed24f4bSMarc Zyngier 		struct kvm_vcpu_init init;
12789ed24f4bSMarc Zyngier 
12799ed24f4bSMarc Zyngier 		err = kvm_vcpu_preferred_target(&init);
12809ed24f4bSMarc Zyngier 		if (err)
12819ed24f4bSMarc Zyngier 			return err;
12829ed24f4bSMarc Zyngier 
12839ed24f4bSMarc Zyngier 		if (copy_to_user(argp, &init, sizeof(init)))
12849ed24f4bSMarc Zyngier 			return -EFAULT;
12859ed24f4bSMarc Zyngier 
12869ed24f4bSMarc Zyngier 		return 0;
12879ed24f4bSMarc Zyngier 	}
12889ed24f4bSMarc Zyngier 	default:
12899ed24f4bSMarc Zyngier 		return -EINVAL;
12909ed24f4bSMarc Zyngier 	}
12919ed24f4bSMarc Zyngier }
12929ed24f4bSMarc Zyngier 
129330c95391SDavid Brazdil static unsigned long nvhe_percpu_size(void)
129430c95391SDavid Brazdil {
129530c95391SDavid Brazdil 	return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
129630c95391SDavid Brazdil 		(unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start);
129730c95391SDavid Brazdil }
129830c95391SDavid Brazdil 
129930c95391SDavid Brazdil static unsigned long nvhe_percpu_order(void)
130030c95391SDavid Brazdil {
130130c95391SDavid Brazdil 	unsigned long size = nvhe_percpu_size();
130230c95391SDavid Brazdil 
130330c95391SDavid Brazdil 	return size ? get_order(size) : 0;
130430c95391SDavid Brazdil }
130530c95391SDavid Brazdil 
13069ef2b48bSWill Deacon static int kvm_map_vectors(void)
13079ef2b48bSWill Deacon {
13089ef2b48bSWill Deacon 	/*
13099ef2b48bSWill Deacon 	 * SV2  = ARM64_SPECTRE_V2
13109ef2b48bSWill Deacon 	 * HEL2 = ARM64_HARDEN_EL2_VECTORS
13119ef2b48bSWill Deacon 	 *
13129ef2b48bSWill Deacon 	 * !SV2 + !HEL2 -> use direct vectors
13139ef2b48bSWill Deacon 	 *  SV2 + !HEL2 -> use hardened vectors in place
13149ef2b48bSWill Deacon 	 * !SV2 +  HEL2 -> allocate one vector slot and use exec mapping
13159ef2b48bSWill Deacon 	 *  SV2 +  HEL2 -> use hardened vectors and use exec mapping
13169ef2b48bSWill Deacon 	 */
13179ef2b48bSWill Deacon 	if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
13189ef2b48bSWill Deacon 		__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
13199ef2b48bSWill Deacon 		__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
13209ef2b48bSWill Deacon 	}
13219ef2b48bSWill Deacon 
13229ef2b48bSWill Deacon 	if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
13239ef2b48bSWill Deacon 		phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
13249ef2b48bSWill Deacon 		unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
13259ef2b48bSWill Deacon 
13269ef2b48bSWill Deacon 		/*
13279ef2b48bSWill Deacon 		 * Always allocate a spare vector slot, as we don't
13289ef2b48bSWill Deacon 		 * know yet which CPUs have a BP hardening slot that
13299ef2b48bSWill Deacon 		 * we can reuse.
13309ef2b48bSWill Deacon 		 */
13319ef2b48bSWill Deacon 		__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
13329ef2b48bSWill Deacon 		BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
13339ef2b48bSWill Deacon 		return create_hyp_exec_mappings(vect_pa, size,
13349ef2b48bSWill Deacon 						&__kvm_bp_vect_base);
13359ef2b48bSWill Deacon 	}
13369ef2b48bSWill Deacon 
13379ef2b48bSWill Deacon 	return 0;
13389ef2b48bSWill Deacon }
13399ef2b48bSWill Deacon 
13409ed24f4bSMarc Zyngier static void cpu_init_hyp_mode(void)
13419ed24f4bSMarc Zyngier {
134263fec243SDavid Brazdil 	struct kvm_nvhe_init_params *params = this_cpu_ptr_nvhe_sym(kvm_init_params);
134304e4caa8SAndrew Scull 	struct arm_smccc_res res;
1344d3e1086cSDavid Brazdil 	unsigned long tcr;
13459ed24f4bSMarc Zyngier 
13469ed24f4bSMarc Zyngier 	/* Switch from the HYP stub to our own HYP init vector */
13479ed24f4bSMarc Zyngier 	__hyp_set_vectors(kvm_get_idmap_vector());
13489ed24f4bSMarc Zyngier 
134971b3ec5fSDavid Brazdil 	/*
135071b3ec5fSDavid Brazdil 	 * Calculate the raw per-cpu offset without a translation from the
135171b3ec5fSDavid Brazdil 	 * kernel's mapping to the linear mapping, and store it in tpidr_el2
135271b3ec5fSDavid Brazdil 	 * so that we can use adr_l to access per-cpu variables in EL2.
135371b3ec5fSDavid Brazdil 	 */
135463fec243SDavid Brazdil 	params->tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
135530c95391SDavid Brazdil 			    (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
135671b3ec5fSDavid Brazdil 
1357d3e1086cSDavid Brazdil 	params->mair_el2 = read_sysreg(mair_el1);
1358d3e1086cSDavid Brazdil 
1359d3e1086cSDavid Brazdil 	/*
1360d3e1086cSDavid Brazdil 	 * The ID map may be configured to use an extended virtual address
1361d3e1086cSDavid Brazdil 	 * range. This is only the case if system RAM is out of range for the
1362d3e1086cSDavid Brazdil 	 * currently configured page size and VA_BITS, in which case we will
1363d3e1086cSDavid Brazdil 	 * also need the extended virtual range for the HYP ID map, or we won't
1364d3e1086cSDavid Brazdil 	 * be able to enable the EL2 MMU.
1365d3e1086cSDavid Brazdil 	 *
1366d3e1086cSDavid Brazdil 	 * However, at EL2, there is only one TTBR register, and we can't switch
1367d3e1086cSDavid Brazdil 	 * between translation tables *and* update TCR_EL2.T0SZ at the same
1368d3e1086cSDavid Brazdil 	 * time. Bottom line: we need to use the extended range with *both* our
1369d3e1086cSDavid Brazdil 	 * translation tables.
1370d3e1086cSDavid Brazdil 	 *
1371d3e1086cSDavid Brazdil 	 * So use the same T0SZ value we use for the ID map.
1372d3e1086cSDavid Brazdil 	 */
1373d3e1086cSDavid Brazdil 	tcr = (read_sysreg(tcr_el1) & TCR_EL2_MASK) | TCR_EL2_RES1;
1374d3e1086cSDavid Brazdil 	tcr &= ~TCR_T0SZ_MASK;
1375d3e1086cSDavid Brazdil 	tcr |= (idmap_t0sz & GENMASK(TCR_TxSZ_WIDTH - 1, 0)) << TCR_T0SZ_OFFSET;
1376d3e1086cSDavid Brazdil 	params->tcr_el2 = tcr;
1377d3e1086cSDavid Brazdil 
137863fec243SDavid Brazdil 	params->stack_hyp_va = kern_hyp_va(__this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE);
137963fec243SDavid Brazdil 	params->pgd_pa = kvm_mmu_get_httbr();
138063fec243SDavid Brazdil 
138163fec243SDavid Brazdil 	/*
138263fec243SDavid Brazdil 	 * Flush the init params from the data cache because the struct will
138363fec243SDavid Brazdil 	 * be read while the MMU is off.
138463fec243SDavid Brazdil 	 */
138563fec243SDavid Brazdil 	kvm_flush_dcache_to_poc(params, sizeof(*params));
13869ed24f4bSMarc Zyngier 
138771b3ec5fSDavid Brazdil 	/*
138871b3ec5fSDavid Brazdil 	 * Call initialization code, and switch to the full blown HYP code.
138971b3ec5fSDavid Brazdil 	 * If the cpucaps haven't been finalized yet, something has gone very
139071b3ec5fSDavid Brazdil 	 * wrong, and hyp will crash and burn when it uses any
139171b3ec5fSDavid Brazdil 	 * cpus_have_const_cap() wrapper.
139271b3ec5fSDavid Brazdil 	 */
139371b3ec5fSDavid Brazdil 	BUG_ON(!system_capabilities_finalized());
139463fec243SDavid Brazdil 	arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), virt_to_phys(params), &res);
139504e4caa8SAndrew Scull 	WARN_ON(res.a0 != SMCCC_RET_SUCCESS);
139671b3ec5fSDavid Brazdil 
139771b3ec5fSDavid Brazdil 	/*
139871b3ec5fSDavid Brazdil 	 * Disabling SSBD on a non-VHE system requires us to enable SSBS
139971b3ec5fSDavid Brazdil 	 * at EL2.
140071b3ec5fSDavid Brazdil 	 */
140171b3ec5fSDavid Brazdil 	if (this_cpu_has_cap(ARM64_SSBS) &&
1402d63d975aSMarc Zyngier 	    arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) {
140313aeb9b4SDavid Brazdil 		kvm_call_hyp_nvhe(__kvm_enable_ssbs);
140471b3ec5fSDavid Brazdil 	}
14059ed24f4bSMarc Zyngier }
14069ed24f4bSMarc Zyngier 
14079ed24f4bSMarc Zyngier static void cpu_hyp_reset(void)
14089ed24f4bSMarc Zyngier {
14099ed24f4bSMarc Zyngier 	if (!is_kernel_in_hyp_mode())
14109ed24f4bSMarc Zyngier 		__hyp_reset_vectors();
14119ed24f4bSMarc Zyngier }
14129ed24f4bSMarc Zyngier 
14139ed24f4bSMarc Zyngier static void cpu_hyp_reinit(void)
14149ed24f4bSMarc Zyngier {
14152a1198c9SDavid Brazdil 	kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt);
14169ed24f4bSMarc Zyngier 
14179ed24f4bSMarc Zyngier 	cpu_hyp_reset();
14189ed24f4bSMarc Zyngier 
141914ef9d04SMarc Zyngier 	*this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)kvm_get_hyp_vector();
1420a0e47952SAndrew Scull 
14219ed24f4bSMarc Zyngier 	if (is_kernel_in_hyp_mode())
14229ed24f4bSMarc Zyngier 		kvm_timer_init_vhe();
14239ed24f4bSMarc Zyngier 	else
14249ed24f4bSMarc Zyngier 		cpu_init_hyp_mode();
14259ed24f4bSMarc Zyngier 
14269ed24f4bSMarc Zyngier 	kvm_arm_init_debug();
14279ed24f4bSMarc Zyngier 
14289ed24f4bSMarc Zyngier 	if (vgic_present)
14299ed24f4bSMarc Zyngier 		kvm_vgic_init_cpu_hardware();
14309ed24f4bSMarc Zyngier }
14319ed24f4bSMarc Zyngier 
14329ed24f4bSMarc Zyngier static void _kvm_arch_hardware_enable(void *discard)
14339ed24f4bSMarc Zyngier {
14349ed24f4bSMarc Zyngier 	if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
14359ed24f4bSMarc Zyngier 		cpu_hyp_reinit();
14369ed24f4bSMarc Zyngier 		__this_cpu_write(kvm_arm_hardware_enabled, 1);
14379ed24f4bSMarc Zyngier 	}
14389ed24f4bSMarc Zyngier }
14399ed24f4bSMarc Zyngier 
14409ed24f4bSMarc Zyngier int kvm_arch_hardware_enable(void)
14419ed24f4bSMarc Zyngier {
14429ed24f4bSMarc Zyngier 	_kvm_arch_hardware_enable(NULL);
14439ed24f4bSMarc Zyngier 	return 0;
14449ed24f4bSMarc Zyngier }
14459ed24f4bSMarc Zyngier 
14469ed24f4bSMarc Zyngier static void _kvm_arch_hardware_disable(void *discard)
14479ed24f4bSMarc Zyngier {
14489ed24f4bSMarc Zyngier 	if (__this_cpu_read(kvm_arm_hardware_enabled)) {
14499ed24f4bSMarc Zyngier 		cpu_hyp_reset();
14509ed24f4bSMarc Zyngier 		__this_cpu_write(kvm_arm_hardware_enabled, 0);
14519ed24f4bSMarc Zyngier 	}
14529ed24f4bSMarc Zyngier }
14539ed24f4bSMarc Zyngier 
14549ed24f4bSMarc Zyngier void kvm_arch_hardware_disable(void)
14559ed24f4bSMarc Zyngier {
14569ed24f4bSMarc Zyngier 	_kvm_arch_hardware_disable(NULL);
14579ed24f4bSMarc Zyngier }
14589ed24f4bSMarc Zyngier 
14599ed24f4bSMarc Zyngier #ifdef CONFIG_CPU_PM
14609ed24f4bSMarc Zyngier static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
14619ed24f4bSMarc Zyngier 				    unsigned long cmd,
14629ed24f4bSMarc Zyngier 				    void *v)
14639ed24f4bSMarc Zyngier {
14649ed24f4bSMarc Zyngier 	/*
14659ed24f4bSMarc Zyngier 	 * kvm_arm_hardware_enabled is left with its old value over
14669ed24f4bSMarc Zyngier 	 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
14679ed24f4bSMarc Zyngier 	 * re-enable hyp.
14689ed24f4bSMarc Zyngier 	 */
14699ed24f4bSMarc Zyngier 	switch (cmd) {
14709ed24f4bSMarc Zyngier 	case CPU_PM_ENTER:
14719ed24f4bSMarc Zyngier 		if (__this_cpu_read(kvm_arm_hardware_enabled))
14729ed24f4bSMarc Zyngier 			/*
14739ed24f4bSMarc Zyngier 			 * don't update kvm_arm_hardware_enabled here
14749ed24f4bSMarc Zyngier 			 * so that the hardware will be re-enabled
14759ed24f4bSMarc Zyngier 			 * when we resume. See below.
14769ed24f4bSMarc Zyngier 			 */
14779ed24f4bSMarc Zyngier 			cpu_hyp_reset();
14789ed24f4bSMarc Zyngier 
14799ed24f4bSMarc Zyngier 		return NOTIFY_OK;
14809ed24f4bSMarc Zyngier 	case CPU_PM_ENTER_FAILED:
14819ed24f4bSMarc Zyngier 	case CPU_PM_EXIT:
14829ed24f4bSMarc Zyngier 		if (__this_cpu_read(kvm_arm_hardware_enabled))
14839ed24f4bSMarc Zyngier 			/* The hardware was enabled before suspend. */
14849ed24f4bSMarc Zyngier 			cpu_hyp_reinit();
14859ed24f4bSMarc Zyngier 
14869ed24f4bSMarc Zyngier 		return NOTIFY_OK;
14879ed24f4bSMarc Zyngier 
14889ed24f4bSMarc Zyngier 	default:
14899ed24f4bSMarc Zyngier 		return NOTIFY_DONE;
14909ed24f4bSMarc Zyngier 	}
14919ed24f4bSMarc Zyngier }
14929ed24f4bSMarc Zyngier 
14939ed24f4bSMarc Zyngier static struct notifier_block hyp_init_cpu_pm_nb = {
14949ed24f4bSMarc Zyngier 	.notifier_call = hyp_init_cpu_pm_notifier,
14959ed24f4bSMarc Zyngier };
14969ed24f4bSMarc Zyngier 
14979ed24f4bSMarc Zyngier static void __init hyp_cpu_pm_init(void)
14989ed24f4bSMarc Zyngier {
14999ed24f4bSMarc Zyngier 	cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
15009ed24f4bSMarc Zyngier }
15019ed24f4bSMarc Zyngier static void __init hyp_cpu_pm_exit(void)
15029ed24f4bSMarc Zyngier {
15039ed24f4bSMarc Zyngier 	cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
15049ed24f4bSMarc Zyngier }
15059ed24f4bSMarc Zyngier #else
15069ed24f4bSMarc Zyngier static inline void hyp_cpu_pm_init(void)
15079ed24f4bSMarc Zyngier {
15089ed24f4bSMarc Zyngier }
15099ed24f4bSMarc Zyngier static inline void hyp_cpu_pm_exit(void)
15109ed24f4bSMarc Zyngier {
15119ed24f4bSMarc Zyngier }
15129ed24f4bSMarc Zyngier #endif
15139ed24f4bSMarc Zyngier 
151494f5e8a4SDavid Brazdil static void init_cpu_logical_map(void)
151594f5e8a4SDavid Brazdil {
151694f5e8a4SDavid Brazdil 	unsigned int cpu;
151794f5e8a4SDavid Brazdil 
151894f5e8a4SDavid Brazdil 	/*
151994f5e8a4SDavid Brazdil 	 * Copy the MPIDR <-> logical CPU ID mapping to hyp.
152094f5e8a4SDavid Brazdil 	 * Only copy the set of online CPUs whose features have been chacked
152194f5e8a4SDavid Brazdil 	 * against the finalized system capabilities. The hypervisor will not
152294f5e8a4SDavid Brazdil 	 * allow any other CPUs from the `possible` set to boot.
152394f5e8a4SDavid Brazdil 	 */
152494f5e8a4SDavid Brazdil 	for_each_online_cpu(cpu)
152594f5e8a4SDavid Brazdil 		kvm_nvhe_sym(__cpu_logical_map)[cpu] = cpu_logical_map(cpu);
152694f5e8a4SDavid Brazdil }
152794f5e8a4SDavid Brazdil 
1528*eeeee719SDavid Brazdil static bool init_psci_relay(void)
1529*eeeee719SDavid Brazdil {
1530*eeeee719SDavid Brazdil 	/*
1531*eeeee719SDavid Brazdil 	 * If PSCI has not been initialized, protected KVM cannot install
1532*eeeee719SDavid Brazdil 	 * itself on newly booted CPUs.
1533*eeeee719SDavid Brazdil 	 */
1534*eeeee719SDavid Brazdil 	if (!psci_ops.get_version) {
1535*eeeee719SDavid Brazdil 		kvm_err("Cannot initialize protected mode without PSCI\n");
1536*eeeee719SDavid Brazdil 		return false;
1537*eeeee719SDavid Brazdil 	}
1538*eeeee719SDavid Brazdil 
1539*eeeee719SDavid Brazdil 	kvm_nvhe_sym(kvm_host_psci_version) = psci_ops.get_version();
1540*eeeee719SDavid Brazdil 	kvm_nvhe_sym(kvm_host_psci_0_1_function_ids) = get_psci_0_1_function_ids();
1541*eeeee719SDavid Brazdil 	return true;
1542*eeeee719SDavid Brazdil }
1543*eeeee719SDavid Brazdil 
15449ed24f4bSMarc Zyngier static int init_common_resources(void)
15459ed24f4bSMarc Zyngier {
1546039aeb9dSLinus Torvalds 	return kvm_set_ipa_limit();
15479ed24f4bSMarc Zyngier }
15489ed24f4bSMarc Zyngier 
15499ed24f4bSMarc Zyngier static int init_subsystems(void)
15509ed24f4bSMarc Zyngier {
15519ed24f4bSMarc Zyngier 	int err = 0;
15529ed24f4bSMarc Zyngier 
15539ed24f4bSMarc Zyngier 	/*
15549ed24f4bSMarc Zyngier 	 * Enable hardware so that subsystem initialisation can access EL2.
15559ed24f4bSMarc Zyngier 	 */
15569ed24f4bSMarc Zyngier 	on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
15579ed24f4bSMarc Zyngier 
15589ed24f4bSMarc Zyngier 	/*
15599ed24f4bSMarc Zyngier 	 * Register CPU lower-power notifier
15609ed24f4bSMarc Zyngier 	 */
15619ed24f4bSMarc Zyngier 	hyp_cpu_pm_init();
15629ed24f4bSMarc Zyngier 
15639ed24f4bSMarc Zyngier 	/*
15649ed24f4bSMarc Zyngier 	 * Init HYP view of VGIC
15659ed24f4bSMarc Zyngier 	 */
15669ed24f4bSMarc Zyngier 	err = kvm_vgic_hyp_init();
15679ed24f4bSMarc Zyngier 	switch (err) {
15689ed24f4bSMarc Zyngier 	case 0:
15699ed24f4bSMarc Zyngier 		vgic_present = true;
15709ed24f4bSMarc Zyngier 		break;
15719ed24f4bSMarc Zyngier 	case -ENODEV:
15729ed24f4bSMarc Zyngier 	case -ENXIO:
15739ed24f4bSMarc Zyngier 		vgic_present = false;
15749ed24f4bSMarc Zyngier 		err = 0;
15759ed24f4bSMarc Zyngier 		break;
15769ed24f4bSMarc Zyngier 	default:
15779ed24f4bSMarc Zyngier 		goto out;
15789ed24f4bSMarc Zyngier 	}
15799ed24f4bSMarc Zyngier 
15809ed24f4bSMarc Zyngier 	/*
15819ed24f4bSMarc Zyngier 	 * Init HYP architected timer support
15829ed24f4bSMarc Zyngier 	 */
15839ed24f4bSMarc Zyngier 	err = kvm_timer_hyp_init(vgic_present);
15849ed24f4bSMarc Zyngier 	if (err)
15859ed24f4bSMarc Zyngier 		goto out;
15869ed24f4bSMarc Zyngier 
15879ed24f4bSMarc Zyngier 	kvm_perf_init();
15889ed24f4bSMarc Zyngier 	kvm_coproc_table_init();
15899ed24f4bSMarc Zyngier 
15909ed24f4bSMarc Zyngier out:
15919ed24f4bSMarc Zyngier 	on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
15929ed24f4bSMarc Zyngier 
15939ed24f4bSMarc Zyngier 	return err;
15949ed24f4bSMarc Zyngier }
15959ed24f4bSMarc Zyngier 
15969ed24f4bSMarc Zyngier static void teardown_hyp_mode(void)
15979ed24f4bSMarc Zyngier {
15989ed24f4bSMarc Zyngier 	int cpu;
15999ed24f4bSMarc Zyngier 
16009ed24f4bSMarc Zyngier 	free_hyp_pgds();
160130c95391SDavid Brazdil 	for_each_possible_cpu(cpu) {
16029ed24f4bSMarc Zyngier 		free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
160330c95391SDavid Brazdil 		free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order());
160430c95391SDavid Brazdil 	}
16059ed24f4bSMarc Zyngier }
16069ed24f4bSMarc Zyngier 
16079ed24f4bSMarc Zyngier /**
16089ed24f4bSMarc Zyngier  * Inits Hyp-mode on all online CPUs
16099ed24f4bSMarc Zyngier  */
16109ed24f4bSMarc Zyngier static int init_hyp_mode(void)
16119ed24f4bSMarc Zyngier {
16129ed24f4bSMarc Zyngier 	int cpu;
16139ed24f4bSMarc Zyngier 	int err = 0;
16149ed24f4bSMarc Zyngier 
16159ed24f4bSMarc Zyngier 	/*
16169ed24f4bSMarc Zyngier 	 * Allocate Hyp PGD and setup Hyp identity mapping
16179ed24f4bSMarc Zyngier 	 */
16189ed24f4bSMarc Zyngier 	err = kvm_mmu_init();
16199ed24f4bSMarc Zyngier 	if (err)
16209ed24f4bSMarc Zyngier 		goto out_err;
16219ed24f4bSMarc Zyngier 
16229ed24f4bSMarc Zyngier 	/*
16239ed24f4bSMarc Zyngier 	 * Allocate stack pages for Hypervisor-mode
16249ed24f4bSMarc Zyngier 	 */
16259ed24f4bSMarc Zyngier 	for_each_possible_cpu(cpu) {
16269ed24f4bSMarc Zyngier 		unsigned long stack_page;
16279ed24f4bSMarc Zyngier 
16289ed24f4bSMarc Zyngier 		stack_page = __get_free_page(GFP_KERNEL);
16299ed24f4bSMarc Zyngier 		if (!stack_page) {
16309ed24f4bSMarc Zyngier 			err = -ENOMEM;
16319ed24f4bSMarc Zyngier 			goto out_err;
16329ed24f4bSMarc Zyngier 		}
16339ed24f4bSMarc Zyngier 
16349ed24f4bSMarc Zyngier 		per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
16359ed24f4bSMarc Zyngier 	}
16369ed24f4bSMarc Zyngier 
16379ed24f4bSMarc Zyngier 	/*
163830c95391SDavid Brazdil 	 * Allocate and initialize pages for Hypervisor-mode percpu regions.
163930c95391SDavid Brazdil 	 */
164030c95391SDavid Brazdil 	for_each_possible_cpu(cpu) {
164130c95391SDavid Brazdil 		struct page *page;
164230c95391SDavid Brazdil 		void *page_addr;
164330c95391SDavid Brazdil 
164430c95391SDavid Brazdil 		page = alloc_pages(GFP_KERNEL, nvhe_percpu_order());
164530c95391SDavid Brazdil 		if (!page) {
164630c95391SDavid Brazdil 			err = -ENOMEM;
164730c95391SDavid Brazdil 			goto out_err;
164830c95391SDavid Brazdil 		}
164930c95391SDavid Brazdil 
165030c95391SDavid Brazdil 		page_addr = page_address(page);
165130c95391SDavid Brazdil 		memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size());
165230c95391SDavid Brazdil 		kvm_arm_hyp_percpu_base[cpu] = (unsigned long)page_addr;
165330c95391SDavid Brazdil 	}
165430c95391SDavid Brazdil 
165530c95391SDavid Brazdil 	/*
16569ed24f4bSMarc Zyngier 	 * Map the Hyp-code called directly from the host
16579ed24f4bSMarc Zyngier 	 */
16589ed24f4bSMarc Zyngier 	err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
16599ed24f4bSMarc Zyngier 				  kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
16609ed24f4bSMarc Zyngier 	if (err) {
16619ed24f4bSMarc Zyngier 		kvm_err("Cannot map world-switch code\n");
16629ed24f4bSMarc Zyngier 		goto out_err;
16639ed24f4bSMarc Zyngier 	}
16649ed24f4bSMarc Zyngier 
16652d7bf218SDavid Brazdil 	err = create_hyp_mappings(kvm_ksym_ref(__hyp_data_ro_after_init_start),
16662d7bf218SDavid Brazdil 				  kvm_ksym_ref(__hyp_data_ro_after_init_end),
16672d7bf218SDavid Brazdil 				  PAGE_HYP_RO);
16682d7bf218SDavid Brazdil 	if (err) {
16692d7bf218SDavid Brazdil 		kvm_err("Cannot map .hyp.data..ro_after_init section\n");
16702d7bf218SDavid Brazdil 		goto out_err;
16712d7bf218SDavid Brazdil 	}
16722d7bf218SDavid Brazdil 
16739ed24f4bSMarc Zyngier 	err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
16749ed24f4bSMarc Zyngier 				  kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
16759ed24f4bSMarc Zyngier 	if (err) {
16769ed24f4bSMarc Zyngier 		kvm_err("Cannot map rodata section\n");
16779ed24f4bSMarc Zyngier 		goto out_err;
16789ed24f4bSMarc Zyngier 	}
16799ed24f4bSMarc Zyngier 
16809ed24f4bSMarc Zyngier 	err = create_hyp_mappings(kvm_ksym_ref(__bss_start),
16819ed24f4bSMarc Zyngier 				  kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
16829ed24f4bSMarc Zyngier 	if (err) {
16839ed24f4bSMarc Zyngier 		kvm_err("Cannot map bss section\n");
16849ed24f4bSMarc Zyngier 		goto out_err;
16859ed24f4bSMarc Zyngier 	}
16869ed24f4bSMarc Zyngier 
16879ed24f4bSMarc Zyngier 	err = kvm_map_vectors();
16889ed24f4bSMarc Zyngier 	if (err) {
16899ed24f4bSMarc Zyngier 		kvm_err("Cannot map vectors\n");
16909ed24f4bSMarc Zyngier 		goto out_err;
16919ed24f4bSMarc Zyngier 	}
16929ed24f4bSMarc Zyngier 
16939ed24f4bSMarc Zyngier 	/*
16949ed24f4bSMarc Zyngier 	 * Map the Hyp stack pages
16959ed24f4bSMarc Zyngier 	 */
16969ed24f4bSMarc Zyngier 	for_each_possible_cpu(cpu) {
16979ed24f4bSMarc Zyngier 		char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
16989ed24f4bSMarc Zyngier 		err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE,
16999ed24f4bSMarc Zyngier 					  PAGE_HYP);
17009ed24f4bSMarc Zyngier 
17019ed24f4bSMarc Zyngier 		if (err) {
17029ed24f4bSMarc Zyngier 			kvm_err("Cannot map hyp stack\n");
17039ed24f4bSMarc Zyngier 			goto out_err;
17049ed24f4bSMarc Zyngier 		}
17059ed24f4bSMarc Zyngier 	}
17069ed24f4bSMarc Zyngier 
170730c95391SDavid Brazdil 	/*
170830c95391SDavid Brazdil 	 * Map Hyp percpu pages
170930c95391SDavid Brazdil 	 */
17109ed24f4bSMarc Zyngier 	for_each_possible_cpu(cpu) {
171130c95391SDavid Brazdil 		char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu];
171230c95391SDavid Brazdil 		char *percpu_end = percpu_begin + nvhe_percpu_size();
17139ed24f4bSMarc Zyngier 
171430c95391SDavid Brazdil 		err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP);
17159ed24f4bSMarc Zyngier 
17169ed24f4bSMarc Zyngier 		if (err) {
171730c95391SDavid Brazdil 			kvm_err("Cannot map hyp percpu region\n");
17186e3bfbb2SAndrew Scull 			goto out_err;
17196e3bfbb2SAndrew Scull 		}
17209ed24f4bSMarc Zyngier 	}
17219ed24f4bSMarc Zyngier 
1722*eeeee719SDavid Brazdil 	if (is_protected_kvm_enabled()) {
172394f5e8a4SDavid Brazdil 		init_cpu_logical_map();
172494f5e8a4SDavid Brazdil 
1725*eeeee719SDavid Brazdil 		if (!init_psci_relay())
1726*eeeee719SDavid Brazdil 			goto out_err;
1727*eeeee719SDavid Brazdil 	}
1728*eeeee719SDavid Brazdil 
17299ed24f4bSMarc Zyngier 	return 0;
17309ed24f4bSMarc Zyngier 
17319ed24f4bSMarc Zyngier out_err:
17329ed24f4bSMarc Zyngier 	teardown_hyp_mode();
17339ed24f4bSMarc Zyngier 	kvm_err("error initializing Hyp mode: %d\n", err);
17349ed24f4bSMarc Zyngier 	return err;
17359ed24f4bSMarc Zyngier }
17369ed24f4bSMarc Zyngier 
17379ed24f4bSMarc Zyngier static void check_kvm_target_cpu(void *ret)
17389ed24f4bSMarc Zyngier {
17399ed24f4bSMarc Zyngier 	*(int *)ret = kvm_target_cpu();
17409ed24f4bSMarc Zyngier }
17419ed24f4bSMarc Zyngier 
17429ed24f4bSMarc Zyngier struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
17439ed24f4bSMarc Zyngier {
17449ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu;
17459ed24f4bSMarc Zyngier 	int i;
17469ed24f4bSMarc Zyngier 
17479ed24f4bSMarc Zyngier 	mpidr &= MPIDR_HWID_BITMASK;
17489ed24f4bSMarc Zyngier 	kvm_for_each_vcpu(i, vcpu, kvm) {
17499ed24f4bSMarc Zyngier 		if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
17509ed24f4bSMarc Zyngier 			return vcpu;
17519ed24f4bSMarc Zyngier 	}
17529ed24f4bSMarc Zyngier 	return NULL;
17539ed24f4bSMarc Zyngier }
17549ed24f4bSMarc Zyngier 
17559ed24f4bSMarc Zyngier bool kvm_arch_has_irq_bypass(void)
17569ed24f4bSMarc Zyngier {
17579ed24f4bSMarc Zyngier 	return true;
17589ed24f4bSMarc Zyngier }
17599ed24f4bSMarc Zyngier 
17609ed24f4bSMarc Zyngier int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
17619ed24f4bSMarc Zyngier 				      struct irq_bypass_producer *prod)
17629ed24f4bSMarc Zyngier {
17639ed24f4bSMarc Zyngier 	struct kvm_kernel_irqfd *irqfd =
17649ed24f4bSMarc Zyngier 		container_of(cons, struct kvm_kernel_irqfd, consumer);
17659ed24f4bSMarc Zyngier 
17669ed24f4bSMarc Zyngier 	return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
17679ed24f4bSMarc Zyngier 					  &irqfd->irq_entry);
17689ed24f4bSMarc Zyngier }
17699ed24f4bSMarc Zyngier void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
17709ed24f4bSMarc Zyngier 				      struct irq_bypass_producer *prod)
17719ed24f4bSMarc Zyngier {
17729ed24f4bSMarc Zyngier 	struct kvm_kernel_irqfd *irqfd =
17739ed24f4bSMarc Zyngier 		container_of(cons, struct kvm_kernel_irqfd, consumer);
17749ed24f4bSMarc Zyngier 
17759ed24f4bSMarc Zyngier 	kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
17769ed24f4bSMarc Zyngier 				     &irqfd->irq_entry);
17779ed24f4bSMarc Zyngier }
17789ed24f4bSMarc Zyngier 
17799ed24f4bSMarc Zyngier void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
17809ed24f4bSMarc Zyngier {
17819ed24f4bSMarc Zyngier 	struct kvm_kernel_irqfd *irqfd =
17829ed24f4bSMarc Zyngier 		container_of(cons, struct kvm_kernel_irqfd, consumer);
17839ed24f4bSMarc Zyngier 
17849ed24f4bSMarc Zyngier 	kvm_arm_halt_guest(irqfd->kvm);
17859ed24f4bSMarc Zyngier }
17869ed24f4bSMarc Zyngier 
17879ed24f4bSMarc Zyngier void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
17889ed24f4bSMarc Zyngier {
17899ed24f4bSMarc Zyngier 	struct kvm_kernel_irqfd *irqfd =
17909ed24f4bSMarc Zyngier 		container_of(cons, struct kvm_kernel_irqfd, consumer);
17919ed24f4bSMarc Zyngier 
17929ed24f4bSMarc Zyngier 	kvm_arm_resume_guest(irqfd->kvm);
17939ed24f4bSMarc Zyngier }
17949ed24f4bSMarc Zyngier 
17959ed24f4bSMarc Zyngier /**
17969ed24f4bSMarc Zyngier  * Initialize Hyp-mode and memory mappings on all CPUs.
17979ed24f4bSMarc Zyngier  */
17989ed24f4bSMarc Zyngier int kvm_arch_init(void *opaque)
17999ed24f4bSMarc Zyngier {
18009ed24f4bSMarc Zyngier 	int err;
18019ed24f4bSMarc Zyngier 	int ret, cpu;
18029ed24f4bSMarc Zyngier 	bool in_hyp_mode;
18039ed24f4bSMarc Zyngier 
18049ed24f4bSMarc Zyngier 	if (!is_hyp_mode_available()) {
18059ed24f4bSMarc Zyngier 		kvm_info("HYP mode not available\n");
18069ed24f4bSMarc Zyngier 		return -ENODEV;
18079ed24f4bSMarc Zyngier 	}
18089ed24f4bSMarc Zyngier 
18099ed24f4bSMarc Zyngier 	in_hyp_mode = is_kernel_in_hyp_mode();
18109ed24f4bSMarc Zyngier 
18119ed24f4bSMarc Zyngier 	if (!in_hyp_mode && kvm_arch_requires_vhe()) {
18129ed24f4bSMarc Zyngier 		kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n");
18139ed24f4bSMarc Zyngier 		return -ENODEV;
18149ed24f4bSMarc Zyngier 	}
18159ed24f4bSMarc Zyngier 
181696d389caSRob Herring 	if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
181796d389caSRob Herring 	    cpus_have_final_cap(ARM64_WORKAROUND_1508412))
1818abf532ccSRob Herring 		kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
1819abf532ccSRob Herring 			 "Only trusted guests should be used on this system.\n");
1820abf532ccSRob Herring 
18219ed24f4bSMarc Zyngier 	for_each_online_cpu(cpu) {
18229ed24f4bSMarc Zyngier 		smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
18239ed24f4bSMarc Zyngier 		if (ret < 0) {
18249ed24f4bSMarc Zyngier 			kvm_err("Error, CPU %d not supported!\n", cpu);
18259ed24f4bSMarc Zyngier 			return -ENODEV;
18269ed24f4bSMarc Zyngier 		}
18279ed24f4bSMarc Zyngier 	}
18289ed24f4bSMarc Zyngier 
18299ed24f4bSMarc Zyngier 	err = init_common_resources();
18309ed24f4bSMarc Zyngier 	if (err)
18319ed24f4bSMarc Zyngier 		return err;
18329ed24f4bSMarc Zyngier 
18339ed24f4bSMarc Zyngier 	err = kvm_arm_init_sve();
18349ed24f4bSMarc Zyngier 	if (err)
18359ed24f4bSMarc Zyngier 		return err;
18369ed24f4bSMarc Zyngier 
18379ed24f4bSMarc Zyngier 	if (!in_hyp_mode) {
18389ed24f4bSMarc Zyngier 		err = init_hyp_mode();
18399ed24f4bSMarc Zyngier 		if (err)
18409ed24f4bSMarc Zyngier 			goto out_err;
18419ed24f4bSMarc Zyngier 	}
18429ed24f4bSMarc Zyngier 
18439ed24f4bSMarc Zyngier 	err = init_subsystems();
18449ed24f4bSMarc Zyngier 	if (err)
18459ed24f4bSMarc Zyngier 		goto out_hyp;
18469ed24f4bSMarc Zyngier 
18473eb681fbSDavid Brazdil 	if (is_protected_kvm_enabled())
18483eb681fbSDavid Brazdil 		kvm_info("Protected nVHE mode initialized successfully\n");
18493eb681fbSDavid Brazdil 	else if (in_hyp_mode)
18509ed24f4bSMarc Zyngier 		kvm_info("VHE mode initialized successfully\n");
18519ed24f4bSMarc Zyngier 	else
18529ed24f4bSMarc Zyngier 		kvm_info("Hyp mode initialized successfully\n");
18539ed24f4bSMarc Zyngier 
18549ed24f4bSMarc Zyngier 	return 0;
18559ed24f4bSMarc Zyngier 
18569ed24f4bSMarc Zyngier out_hyp:
18579ed24f4bSMarc Zyngier 	hyp_cpu_pm_exit();
18589ed24f4bSMarc Zyngier 	if (!in_hyp_mode)
18599ed24f4bSMarc Zyngier 		teardown_hyp_mode();
18609ed24f4bSMarc Zyngier out_err:
18619ed24f4bSMarc Zyngier 	return err;
18629ed24f4bSMarc Zyngier }
18639ed24f4bSMarc Zyngier 
18649ed24f4bSMarc Zyngier /* NOP: Compiling as a module not supported */
18659ed24f4bSMarc Zyngier void kvm_arch_exit(void)
18669ed24f4bSMarc Zyngier {
18679ed24f4bSMarc Zyngier 	kvm_perf_teardown();
18689ed24f4bSMarc Zyngier }
18699ed24f4bSMarc Zyngier 
1870d8b369c4SDavid Brazdil static int __init early_kvm_mode_cfg(char *arg)
1871d8b369c4SDavid Brazdil {
1872d8b369c4SDavid Brazdil 	if (!arg)
1873d8b369c4SDavid Brazdil 		return -EINVAL;
1874d8b369c4SDavid Brazdil 
1875d8b369c4SDavid Brazdil 	if (strcmp(arg, "protected") == 0) {
1876d8b369c4SDavid Brazdil 		kvm_mode = KVM_MODE_PROTECTED;
1877d8b369c4SDavid Brazdil 		return 0;
1878d8b369c4SDavid Brazdil 	}
1879d8b369c4SDavid Brazdil 
1880d8b369c4SDavid Brazdil 	return -EINVAL;
1881d8b369c4SDavid Brazdil }
1882d8b369c4SDavid Brazdil early_param("kvm-arm.mode", early_kvm_mode_cfg);
1883d8b369c4SDavid Brazdil 
18843eb681fbSDavid Brazdil enum kvm_mode kvm_get_mode(void)
18853eb681fbSDavid Brazdil {
18863eb681fbSDavid Brazdil 	return kvm_mode;
18873eb681fbSDavid Brazdil }
18883eb681fbSDavid Brazdil 
18899ed24f4bSMarc Zyngier static int arm_init(void)
18909ed24f4bSMarc Zyngier {
18919ed24f4bSMarc Zyngier 	int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
18929ed24f4bSMarc Zyngier 	return rc;
18939ed24f4bSMarc Zyngier }
18949ed24f4bSMarc Zyngier 
18959ed24f4bSMarc Zyngier module_init(arm_init);
1896