xref: /openbmc/linux/arch/arm64/kvm/arm.c (revision 11a163f2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5  */
6 
7 #include <linux/bug.h>
8 #include <linux/cpu_pm.h>
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/vmalloc.h>
15 #include <linux/fs.h>
16 #include <linux/mman.h>
17 #include <linux/sched.h>
18 #include <linux/kvm.h>
19 #include <linux/kvm_irqfd.h>
20 #include <linux/irqbypass.h>
21 #include <linux/sched/stat.h>
22 #include <trace/events/kvm.h>
23 
24 #define CREATE_TRACE_POINTS
25 #include "trace_arm.h"
26 
27 #include <linux/uaccess.h>
28 #include <asm/ptrace.h>
29 #include <asm/mman.h>
30 #include <asm/tlbflush.h>
31 #include <asm/cacheflush.h>
32 #include <asm/cpufeature.h>
33 #include <asm/virt.h>
34 #include <asm/kvm_arm.h>
35 #include <asm/kvm_asm.h>
36 #include <asm/kvm_mmu.h>
37 #include <asm/kvm_emulate.h>
38 #include <asm/kvm_coproc.h>
39 #include <asm/sections.h>
40 
41 #include <kvm/arm_hypercalls.h>
42 #include <kvm/arm_pmu.h>
43 #include <kvm/arm_psci.h>
44 
45 #ifdef REQUIRES_VIRT
46 __asm__(".arch_extension	virt");
47 #endif
48 
49 DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
50 
51 static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
52 unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
53 
54 /* The VMID used in the VTTBR */
55 static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
56 static u32 kvm_next_vmid;
57 static DEFINE_SPINLOCK(kvm_vmid_lock);
58 
59 static bool vgic_present;
60 
61 static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
62 DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
63 
64 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
65 {
66 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
67 }
68 
69 int kvm_arch_hardware_setup(void *opaque)
70 {
71 	return 0;
72 }
73 
74 int kvm_arch_check_processor_compat(void *opaque)
75 {
76 	return 0;
77 }
78 
79 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
80 			    struct kvm_enable_cap *cap)
81 {
82 	int r;
83 
84 	if (cap->flags)
85 		return -EINVAL;
86 
87 	switch (cap->cap) {
88 	case KVM_CAP_ARM_NISV_TO_USER:
89 		r = 0;
90 		kvm->arch.return_nisv_io_abort_to_user = true;
91 		break;
92 	default:
93 		r = -EINVAL;
94 		break;
95 	}
96 
97 	return r;
98 }
99 
100 static int kvm_arm_default_max_vcpus(void)
101 {
102 	return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
103 }
104 
105 /**
106  * kvm_arch_init_vm - initializes a VM data structure
107  * @kvm:	pointer to the KVM struct
108  */
109 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
110 {
111 	int ret;
112 
113 	ret = kvm_arm_setup_stage2(kvm, type);
114 	if (ret)
115 		return ret;
116 
117 	ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu);
118 	if (ret)
119 		return ret;
120 
121 	ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
122 	if (ret)
123 		goto out_free_stage2_pgd;
124 
125 	kvm_vgic_early_init(kvm);
126 
127 	/* The maximum number of VCPUs is limited by the host's GIC model */
128 	kvm->arch.max_vcpus = kvm_arm_default_max_vcpus();
129 
130 	return ret;
131 out_free_stage2_pgd:
132 	kvm_free_stage2_pgd(&kvm->arch.mmu);
133 	return ret;
134 }
135 
136 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
137 {
138 	return VM_FAULT_SIGBUS;
139 }
140 
141 
142 /**
143  * kvm_arch_destroy_vm - destroy the VM data structure
144  * @kvm:	pointer to the KVM struct
145  */
146 void kvm_arch_destroy_vm(struct kvm *kvm)
147 {
148 	int i;
149 
150 	bitmap_free(kvm->arch.pmu_filter);
151 
152 	kvm_vgic_destroy(kvm);
153 
154 	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
155 		if (kvm->vcpus[i]) {
156 			kvm_vcpu_destroy(kvm->vcpus[i]);
157 			kvm->vcpus[i] = NULL;
158 		}
159 	}
160 	atomic_set(&kvm->online_vcpus, 0);
161 }
162 
163 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
164 {
165 	int r;
166 	switch (ext) {
167 	case KVM_CAP_IRQCHIP:
168 		r = vgic_present;
169 		break;
170 	case KVM_CAP_IOEVENTFD:
171 	case KVM_CAP_DEVICE_CTRL:
172 	case KVM_CAP_USER_MEMORY:
173 	case KVM_CAP_SYNC_MMU:
174 	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
175 	case KVM_CAP_ONE_REG:
176 	case KVM_CAP_ARM_PSCI:
177 	case KVM_CAP_ARM_PSCI_0_2:
178 	case KVM_CAP_READONLY_MEM:
179 	case KVM_CAP_MP_STATE:
180 	case KVM_CAP_IMMEDIATE_EXIT:
181 	case KVM_CAP_VCPU_EVENTS:
182 	case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
183 	case KVM_CAP_ARM_NISV_TO_USER:
184 	case KVM_CAP_ARM_INJECT_EXT_DABT:
185 		r = 1;
186 		break;
187 	case KVM_CAP_ARM_SET_DEVICE_ADDR:
188 		r = 1;
189 		break;
190 	case KVM_CAP_NR_VCPUS:
191 		r = num_online_cpus();
192 		break;
193 	case KVM_CAP_MAX_VCPUS:
194 	case KVM_CAP_MAX_VCPU_ID:
195 		if (kvm)
196 			r = kvm->arch.max_vcpus;
197 		else
198 			r = kvm_arm_default_max_vcpus();
199 		break;
200 	case KVM_CAP_MSI_DEVID:
201 		if (!kvm)
202 			r = -EINVAL;
203 		else
204 			r = kvm->arch.vgic.msis_require_devid;
205 		break;
206 	case KVM_CAP_ARM_USER_IRQ:
207 		/*
208 		 * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
209 		 * (bump this number if adding more devices)
210 		 */
211 		r = 1;
212 		break;
213 	case KVM_CAP_STEAL_TIME:
214 		r = kvm_arm_pvtime_supported();
215 		break;
216 	default:
217 		r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
218 		break;
219 	}
220 	return r;
221 }
222 
223 long kvm_arch_dev_ioctl(struct file *filp,
224 			unsigned int ioctl, unsigned long arg)
225 {
226 	return -EINVAL;
227 }
228 
229 struct kvm *kvm_arch_alloc_vm(void)
230 {
231 	if (!has_vhe())
232 		return kzalloc(sizeof(struct kvm), GFP_KERNEL);
233 
234 	return vzalloc(sizeof(struct kvm));
235 }
236 
237 void kvm_arch_free_vm(struct kvm *kvm)
238 {
239 	if (!has_vhe())
240 		kfree(kvm);
241 	else
242 		vfree(kvm);
243 }
244 
245 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
246 {
247 	if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
248 		return -EBUSY;
249 
250 	if (id >= kvm->arch.max_vcpus)
251 		return -EINVAL;
252 
253 	return 0;
254 }
255 
256 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
257 {
258 	int err;
259 
260 	/* Force users to call KVM_ARM_VCPU_INIT */
261 	vcpu->arch.target = -1;
262 	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
263 
264 	vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
265 
266 	/* Set up the timer */
267 	kvm_timer_vcpu_init(vcpu);
268 
269 	kvm_pmu_vcpu_init(vcpu);
270 
271 	kvm_arm_reset_debug_ptr(vcpu);
272 
273 	kvm_arm_pvtime_vcpu_init(&vcpu->arch);
274 
275 	vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
276 
277 	err = kvm_vgic_vcpu_init(vcpu);
278 	if (err)
279 		return err;
280 
281 	return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
282 }
283 
284 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
285 {
286 }
287 
288 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
289 {
290 	if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
291 		static_branch_dec(&userspace_irqchip_in_use);
292 
293 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
294 	kvm_timer_vcpu_terminate(vcpu);
295 	kvm_pmu_vcpu_destroy(vcpu);
296 
297 	kvm_arm_vcpu_destroy(vcpu);
298 }
299 
300 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
301 {
302 	return kvm_timer_is_pending(vcpu);
303 }
304 
305 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
306 {
307 	/*
308 	 * If we're about to block (most likely because we've just hit a
309 	 * WFI), we need to sync back the state of the GIC CPU interface
310 	 * so that we have the latest PMR and group enables. This ensures
311 	 * that kvm_arch_vcpu_runnable has up-to-date data to decide
312 	 * whether we have pending interrupts.
313 	 *
314 	 * For the same reason, we want to tell GICv4 that we need
315 	 * doorbells to be signalled, should an interrupt become pending.
316 	 */
317 	preempt_disable();
318 	kvm_vgic_vmcr_sync(vcpu);
319 	vgic_v4_put(vcpu, true);
320 	preempt_enable();
321 }
322 
323 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
324 {
325 	preempt_disable();
326 	vgic_v4_load(vcpu);
327 	preempt_enable();
328 }
329 
330 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
331 {
332 	struct kvm_s2_mmu *mmu;
333 	int *last_ran;
334 
335 	mmu = vcpu->arch.hw_mmu;
336 	last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
337 
338 	/*
339 	 * We might get preempted before the vCPU actually runs, but
340 	 * over-invalidation doesn't affect correctness.
341 	 */
342 	if (*last_ran != vcpu->vcpu_id) {
343 		kvm_call_hyp(__kvm_tlb_flush_local_vmid, mmu);
344 		*last_ran = vcpu->vcpu_id;
345 	}
346 
347 	vcpu->cpu = cpu;
348 
349 	kvm_vgic_load(vcpu);
350 	kvm_timer_vcpu_load(vcpu);
351 	if (has_vhe())
352 		kvm_vcpu_load_sysregs_vhe(vcpu);
353 	kvm_arch_vcpu_load_fp(vcpu);
354 	kvm_vcpu_pmu_restore_guest(vcpu);
355 	if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
356 		kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
357 
358 	if (single_task_running())
359 		vcpu_clear_wfx_traps(vcpu);
360 	else
361 		vcpu_set_wfx_traps(vcpu);
362 
363 	if (vcpu_has_ptrauth(vcpu))
364 		vcpu_ptrauth_disable(vcpu);
365 }
366 
367 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
368 {
369 	kvm_arch_vcpu_put_fp(vcpu);
370 	if (has_vhe())
371 		kvm_vcpu_put_sysregs_vhe(vcpu);
372 	kvm_timer_vcpu_put(vcpu);
373 	kvm_vgic_put(vcpu);
374 	kvm_vcpu_pmu_restore_host(vcpu);
375 
376 	vcpu->cpu = -1;
377 }
378 
379 static void vcpu_power_off(struct kvm_vcpu *vcpu)
380 {
381 	vcpu->arch.power_off = true;
382 	kvm_make_request(KVM_REQ_SLEEP, vcpu);
383 	kvm_vcpu_kick(vcpu);
384 }
385 
386 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
387 				    struct kvm_mp_state *mp_state)
388 {
389 	if (vcpu->arch.power_off)
390 		mp_state->mp_state = KVM_MP_STATE_STOPPED;
391 	else
392 		mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
393 
394 	return 0;
395 }
396 
397 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
398 				    struct kvm_mp_state *mp_state)
399 {
400 	int ret = 0;
401 
402 	switch (mp_state->mp_state) {
403 	case KVM_MP_STATE_RUNNABLE:
404 		vcpu->arch.power_off = false;
405 		break;
406 	case KVM_MP_STATE_STOPPED:
407 		vcpu_power_off(vcpu);
408 		break;
409 	default:
410 		ret = -EINVAL;
411 	}
412 
413 	return ret;
414 }
415 
416 /**
417  * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
418  * @v:		The VCPU pointer
419  *
420  * If the guest CPU is not waiting for interrupts or an interrupt line is
421  * asserted, the CPU is by definition runnable.
422  */
423 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
424 {
425 	bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
426 	return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
427 		&& !v->arch.power_off && !v->arch.pause);
428 }
429 
430 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
431 {
432 	return vcpu_mode_priv(vcpu);
433 }
434 
435 /* Just ensure a guest exit from a particular CPU */
436 static void exit_vm_noop(void *info)
437 {
438 }
439 
440 void force_vm_exit(const cpumask_t *mask)
441 {
442 	preempt_disable();
443 	smp_call_function_many(mask, exit_vm_noop, NULL, true);
444 	preempt_enable();
445 }
446 
447 /**
448  * need_new_vmid_gen - check that the VMID is still valid
449  * @vmid: The VMID to check
450  *
451  * return true if there is a new generation of VMIDs being used
452  *
453  * The hardware supports a limited set of values with the value zero reserved
454  * for the host, so we check if an assigned value belongs to a previous
455  * generation, which requires us to assign a new value. If we're the first to
456  * use a VMID for the new generation, we must flush necessary caches and TLBs
457  * on all CPUs.
458  */
459 static bool need_new_vmid_gen(struct kvm_vmid *vmid)
460 {
461 	u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
462 	smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
463 	return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen);
464 }
465 
466 /**
467  * update_vmid - Update the vmid with a valid VMID for the current generation
468  * @vmid: The stage-2 VMID information struct
469  */
470 static void update_vmid(struct kvm_vmid *vmid)
471 {
472 	if (!need_new_vmid_gen(vmid))
473 		return;
474 
475 	spin_lock(&kvm_vmid_lock);
476 
477 	/*
478 	 * We need to re-check the vmid_gen here to ensure that if another vcpu
479 	 * already allocated a valid vmid for this vm, then this vcpu should
480 	 * use the same vmid.
481 	 */
482 	if (!need_new_vmid_gen(vmid)) {
483 		spin_unlock(&kvm_vmid_lock);
484 		return;
485 	}
486 
487 	/* First user of a new VMID generation? */
488 	if (unlikely(kvm_next_vmid == 0)) {
489 		atomic64_inc(&kvm_vmid_gen);
490 		kvm_next_vmid = 1;
491 
492 		/*
493 		 * On SMP we know no other CPUs can use this CPU's or each
494 		 * other's VMID after force_vm_exit returns since the
495 		 * kvm_vmid_lock blocks them from reentry to the guest.
496 		 */
497 		force_vm_exit(cpu_all_mask);
498 		/*
499 		 * Now broadcast TLB + ICACHE invalidation over the inner
500 		 * shareable domain to make sure all data structures are
501 		 * clean.
502 		 */
503 		kvm_call_hyp(__kvm_flush_vm_context);
504 	}
505 
506 	vmid->vmid = kvm_next_vmid;
507 	kvm_next_vmid++;
508 	kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
509 
510 	smp_wmb();
511 	WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen));
512 
513 	spin_unlock(&kvm_vmid_lock);
514 }
515 
516 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
517 {
518 	struct kvm *kvm = vcpu->kvm;
519 	int ret = 0;
520 
521 	if (likely(vcpu->arch.has_run_once))
522 		return 0;
523 
524 	if (!kvm_arm_vcpu_is_finalized(vcpu))
525 		return -EPERM;
526 
527 	vcpu->arch.has_run_once = true;
528 
529 	if (likely(irqchip_in_kernel(kvm))) {
530 		/*
531 		 * Map the VGIC hardware resources before running a vcpu the
532 		 * first time on this VM.
533 		 */
534 		if (unlikely(!vgic_ready(kvm))) {
535 			ret = kvm_vgic_map_resources(kvm);
536 			if (ret)
537 				return ret;
538 		}
539 	} else {
540 		/*
541 		 * Tell the rest of the code that there are userspace irqchip
542 		 * VMs in the wild.
543 		 */
544 		static_branch_inc(&userspace_irqchip_in_use);
545 	}
546 
547 	ret = kvm_timer_enable(vcpu);
548 	if (ret)
549 		return ret;
550 
551 	ret = kvm_arm_pmu_v3_enable(vcpu);
552 
553 	return ret;
554 }
555 
556 bool kvm_arch_intc_initialized(struct kvm *kvm)
557 {
558 	return vgic_initialized(kvm);
559 }
560 
561 void kvm_arm_halt_guest(struct kvm *kvm)
562 {
563 	int i;
564 	struct kvm_vcpu *vcpu;
565 
566 	kvm_for_each_vcpu(i, vcpu, kvm)
567 		vcpu->arch.pause = true;
568 	kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP);
569 }
570 
571 void kvm_arm_resume_guest(struct kvm *kvm)
572 {
573 	int i;
574 	struct kvm_vcpu *vcpu;
575 
576 	kvm_for_each_vcpu(i, vcpu, kvm) {
577 		vcpu->arch.pause = false;
578 		rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
579 	}
580 }
581 
582 static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
583 {
584 	struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
585 
586 	rcuwait_wait_event(wait,
587 			   (!vcpu->arch.power_off) &&(!vcpu->arch.pause),
588 			   TASK_INTERRUPTIBLE);
589 
590 	if (vcpu->arch.power_off || vcpu->arch.pause) {
591 		/* Awaken to handle a signal, request we sleep again later. */
592 		kvm_make_request(KVM_REQ_SLEEP, vcpu);
593 	}
594 
595 	/*
596 	 * Make sure we will observe a potential reset request if we've
597 	 * observed a change to the power state. Pairs with the smp_wmb() in
598 	 * kvm_psci_vcpu_on().
599 	 */
600 	smp_rmb();
601 }
602 
603 static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
604 {
605 	return vcpu->arch.target >= 0;
606 }
607 
608 static void check_vcpu_requests(struct kvm_vcpu *vcpu)
609 {
610 	if (kvm_request_pending(vcpu)) {
611 		if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
612 			vcpu_req_sleep(vcpu);
613 
614 		if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
615 			kvm_reset_vcpu(vcpu);
616 
617 		/*
618 		 * Clear IRQ_PENDING requests that were made to guarantee
619 		 * that a VCPU sees new virtual interrupts.
620 		 */
621 		kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
622 
623 		if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
624 			kvm_update_stolen_time(vcpu);
625 
626 		if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
627 			/* The distributor enable bits were changed */
628 			preempt_disable();
629 			vgic_v4_put(vcpu, false);
630 			vgic_v4_load(vcpu);
631 			preempt_enable();
632 		}
633 	}
634 }
635 
636 /**
637  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
638  * @vcpu:	The VCPU pointer
639  *
640  * This function is called through the VCPU_RUN ioctl called from user space. It
641  * will execute VM code in a loop until the time slice for the process is used
642  * or some emulation is needed from user space in which case the function will
643  * return with return value 0 and with the kvm_run structure filled in with the
644  * required data for the requested emulation.
645  */
646 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
647 {
648 	struct kvm_run *run = vcpu->run;
649 	int ret;
650 
651 	if (unlikely(!kvm_vcpu_initialized(vcpu)))
652 		return -ENOEXEC;
653 
654 	ret = kvm_vcpu_first_run_init(vcpu);
655 	if (ret)
656 		return ret;
657 
658 	if (run->exit_reason == KVM_EXIT_MMIO) {
659 		ret = kvm_handle_mmio_return(vcpu);
660 		if (ret)
661 			return ret;
662 	}
663 
664 	if (run->immediate_exit)
665 		return -EINTR;
666 
667 	vcpu_load(vcpu);
668 
669 	kvm_sigset_activate(vcpu);
670 
671 	ret = 1;
672 	run->exit_reason = KVM_EXIT_UNKNOWN;
673 	while (ret > 0) {
674 		/*
675 		 * Check conditions before entering the guest
676 		 */
677 		cond_resched();
678 
679 		update_vmid(&vcpu->arch.hw_mmu->vmid);
680 
681 		check_vcpu_requests(vcpu);
682 
683 		/*
684 		 * Preparing the interrupts to be injected also
685 		 * involves poking the GIC, which must be done in a
686 		 * non-preemptible context.
687 		 */
688 		preempt_disable();
689 
690 		kvm_pmu_flush_hwstate(vcpu);
691 
692 		local_irq_disable();
693 
694 		kvm_vgic_flush_hwstate(vcpu);
695 
696 		/*
697 		 * Exit if we have a signal pending so that we can deliver the
698 		 * signal to user space.
699 		 */
700 		if (signal_pending(current)) {
701 			ret = -EINTR;
702 			run->exit_reason = KVM_EXIT_INTR;
703 		}
704 
705 		/*
706 		 * If we're using a userspace irqchip, then check if we need
707 		 * to tell a userspace irqchip about timer or PMU level
708 		 * changes and if so, exit to userspace (the actual level
709 		 * state gets updated in kvm_timer_update_run and
710 		 * kvm_pmu_update_run below).
711 		 */
712 		if (static_branch_unlikely(&userspace_irqchip_in_use)) {
713 			if (kvm_timer_should_notify_user(vcpu) ||
714 			    kvm_pmu_should_notify_user(vcpu)) {
715 				ret = -EINTR;
716 				run->exit_reason = KVM_EXIT_INTR;
717 			}
718 		}
719 
720 		/*
721 		 * Ensure we set mode to IN_GUEST_MODE after we disable
722 		 * interrupts and before the final VCPU requests check.
723 		 * See the comment in kvm_vcpu_exiting_guest_mode() and
724 		 * Documentation/virt/kvm/vcpu-requests.rst
725 		 */
726 		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
727 
728 		if (ret <= 0 || need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) ||
729 		    kvm_request_pending(vcpu)) {
730 			vcpu->mode = OUTSIDE_GUEST_MODE;
731 			isb(); /* Ensure work in x_flush_hwstate is committed */
732 			kvm_pmu_sync_hwstate(vcpu);
733 			if (static_branch_unlikely(&userspace_irqchip_in_use))
734 				kvm_timer_sync_user(vcpu);
735 			kvm_vgic_sync_hwstate(vcpu);
736 			local_irq_enable();
737 			preempt_enable();
738 			continue;
739 		}
740 
741 		kvm_arm_setup_debug(vcpu);
742 
743 		/**************************************************************
744 		 * Enter the guest
745 		 */
746 		trace_kvm_entry(*vcpu_pc(vcpu));
747 		guest_enter_irqoff();
748 
749 		ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
750 
751 		vcpu->mode = OUTSIDE_GUEST_MODE;
752 		vcpu->stat.exits++;
753 		/*
754 		 * Back from guest
755 		 *************************************************************/
756 
757 		kvm_arm_clear_debug(vcpu);
758 
759 		/*
760 		 * We must sync the PMU state before the vgic state so
761 		 * that the vgic can properly sample the updated state of the
762 		 * interrupt line.
763 		 */
764 		kvm_pmu_sync_hwstate(vcpu);
765 
766 		/*
767 		 * Sync the vgic state before syncing the timer state because
768 		 * the timer code needs to know if the virtual timer
769 		 * interrupts are active.
770 		 */
771 		kvm_vgic_sync_hwstate(vcpu);
772 
773 		/*
774 		 * Sync the timer hardware state before enabling interrupts as
775 		 * we don't want vtimer interrupts to race with syncing the
776 		 * timer virtual interrupt state.
777 		 */
778 		if (static_branch_unlikely(&userspace_irqchip_in_use))
779 			kvm_timer_sync_user(vcpu);
780 
781 		kvm_arch_vcpu_ctxsync_fp(vcpu);
782 
783 		/*
784 		 * We may have taken a host interrupt in HYP mode (ie
785 		 * while executing the guest). This interrupt is still
786 		 * pending, as we haven't serviced it yet!
787 		 *
788 		 * We're now back in SVC mode, with interrupts
789 		 * disabled.  Enabling the interrupts now will have
790 		 * the effect of taking the interrupt again, in SVC
791 		 * mode this time.
792 		 */
793 		local_irq_enable();
794 
795 		/*
796 		 * We do local_irq_enable() before calling guest_exit() so
797 		 * that if a timer interrupt hits while running the guest we
798 		 * account that tick as being spent in the guest.  We enable
799 		 * preemption after calling guest_exit() so that if we get
800 		 * preempted we make sure ticks after that is not counted as
801 		 * guest time.
802 		 */
803 		guest_exit();
804 		trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
805 
806 		/* Exit types that need handling before we can be preempted */
807 		handle_exit_early(vcpu, ret);
808 
809 		preempt_enable();
810 
811 		ret = handle_exit(vcpu, ret);
812 	}
813 
814 	/* Tell userspace about in-kernel device output levels */
815 	if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
816 		kvm_timer_update_run(vcpu);
817 		kvm_pmu_update_run(vcpu);
818 	}
819 
820 	kvm_sigset_deactivate(vcpu);
821 
822 	vcpu_put(vcpu);
823 	return ret;
824 }
825 
826 static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
827 {
828 	int bit_index;
829 	bool set;
830 	unsigned long *hcr;
831 
832 	if (number == KVM_ARM_IRQ_CPU_IRQ)
833 		bit_index = __ffs(HCR_VI);
834 	else /* KVM_ARM_IRQ_CPU_FIQ */
835 		bit_index = __ffs(HCR_VF);
836 
837 	hcr = vcpu_hcr(vcpu);
838 	if (level)
839 		set = test_and_set_bit(bit_index, hcr);
840 	else
841 		set = test_and_clear_bit(bit_index, hcr);
842 
843 	/*
844 	 * If we didn't change anything, no need to wake up or kick other CPUs
845 	 */
846 	if (set == level)
847 		return 0;
848 
849 	/*
850 	 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
851 	 * trigger a world-switch round on the running physical CPU to set the
852 	 * virtual IRQ/FIQ fields in the HCR appropriately.
853 	 */
854 	kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
855 	kvm_vcpu_kick(vcpu);
856 
857 	return 0;
858 }
859 
860 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
861 			  bool line_status)
862 {
863 	u32 irq = irq_level->irq;
864 	unsigned int irq_type, vcpu_idx, irq_num;
865 	int nrcpus = atomic_read(&kvm->online_vcpus);
866 	struct kvm_vcpu *vcpu = NULL;
867 	bool level = irq_level->level;
868 
869 	irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
870 	vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
871 	vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
872 	irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
873 
874 	trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
875 
876 	switch (irq_type) {
877 	case KVM_ARM_IRQ_TYPE_CPU:
878 		if (irqchip_in_kernel(kvm))
879 			return -ENXIO;
880 
881 		if (vcpu_idx >= nrcpus)
882 			return -EINVAL;
883 
884 		vcpu = kvm_get_vcpu(kvm, vcpu_idx);
885 		if (!vcpu)
886 			return -EINVAL;
887 
888 		if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
889 			return -EINVAL;
890 
891 		return vcpu_interrupt_line(vcpu, irq_num, level);
892 	case KVM_ARM_IRQ_TYPE_PPI:
893 		if (!irqchip_in_kernel(kvm))
894 			return -ENXIO;
895 
896 		if (vcpu_idx >= nrcpus)
897 			return -EINVAL;
898 
899 		vcpu = kvm_get_vcpu(kvm, vcpu_idx);
900 		if (!vcpu)
901 			return -EINVAL;
902 
903 		if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
904 			return -EINVAL;
905 
906 		return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL);
907 	case KVM_ARM_IRQ_TYPE_SPI:
908 		if (!irqchip_in_kernel(kvm))
909 			return -ENXIO;
910 
911 		if (irq_num < VGIC_NR_PRIVATE_IRQS)
912 			return -EINVAL;
913 
914 		return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL);
915 	}
916 
917 	return -EINVAL;
918 }
919 
920 static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
921 			       const struct kvm_vcpu_init *init)
922 {
923 	unsigned int i, ret;
924 	int phys_target = kvm_target_cpu();
925 
926 	if (init->target != phys_target)
927 		return -EINVAL;
928 
929 	/*
930 	 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
931 	 * use the same target.
932 	 */
933 	if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
934 		return -EINVAL;
935 
936 	/* -ENOENT for unknown features, -EINVAL for invalid combinations. */
937 	for (i = 0; i < sizeof(init->features) * 8; i++) {
938 		bool set = (init->features[i / 32] & (1 << (i % 32)));
939 
940 		if (set && i >= KVM_VCPU_MAX_FEATURES)
941 			return -ENOENT;
942 
943 		/*
944 		 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
945 		 * use the same feature set.
946 		 */
947 		if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
948 		    test_bit(i, vcpu->arch.features) != set)
949 			return -EINVAL;
950 
951 		if (set)
952 			set_bit(i, vcpu->arch.features);
953 	}
954 
955 	vcpu->arch.target = phys_target;
956 
957 	/* Now we know what it is, we can reset it. */
958 	ret = kvm_reset_vcpu(vcpu);
959 	if (ret) {
960 		vcpu->arch.target = -1;
961 		bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
962 	}
963 
964 	return ret;
965 }
966 
967 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
968 					 struct kvm_vcpu_init *init)
969 {
970 	int ret;
971 
972 	ret = kvm_vcpu_set_target(vcpu, init);
973 	if (ret)
974 		return ret;
975 
976 	/*
977 	 * Ensure a rebooted VM will fault in RAM pages and detect if the
978 	 * guest MMU is turned off and flush the caches as needed.
979 	 *
980 	 * S2FWB enforces all memory accesses to RAM being cacheable,
981 	 * ensuring that the data side is always coherent. We still
982 	 * need to invalidate the I-cache though, as FWB does *not*
983 	 * imply CTR_EL0.DIC.
984 	 */
985 	if (vcpu->arch.has_run_once) {
986 		if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
987 			stage2_unmap_vm(vcpu->kvm);
988 		else
989 			__flush_icache_all();
990 	}
991 
992 	vcpu_reset_hcr(vcpu);
993 
994 	/*
995 	 * Handle the "start in power-off" case.
996 	 */
997 	if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
998 		vcpu_power_off(vcpu);
999 	else
1000 		vcpu->arch.power_off = false;
1001 
1002 	return 0;
1003 }
1004 
1005 static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
1006 				 struct kvm_device_attr *attr)
1007 {
1008 	int ret = -ENXIO;
1009 
1010 	switch (attr->group) {
1011 	default:
1012 		ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
1013 		break;
1014 	}
1015 
1016 	return ret;
1017 }
1018 
1019 static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
1020 				 struct kvm_device_attr *attr)
1021 {
1022 	int ret = -ENXIO;
1023 
1024 	switch (attr->group) {
1025 	default:
1026 		ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
1027 		break;
1028 	}
1029 
1030 	return ret;
1031 }
1032 
1033 static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
1034 				 struct kvm_device_attr *attr)
1035 {
1036 	int ret = -ENXIO;
1037 
1038 	switch (attr->group) {
1039 	default:
1040 		ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
1041 		break;
1042 	}
1043 
1044 	return ret;
1045 }
1046 
1047 static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
1048 				   struct kvm_vcpu_events *events)
1049 {
1050 	memset(events, 0, sizeof(*events));
1051 
1052 	return __kvm_arm_vcpu_get_events(vcpu, events);
1053 }
1054 
1055 static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
1056 				   struct kvm_vcpu_events *events)
1057 {
1058 	int i;
1059 
1060 	/* check whether the reserved field is zero */
1061 	for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
1062 		if (events->reserved[i])
1063 			return -EINVAL;
1064 
1065 	/* check whether the pad field is zero */
1066 	for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
1067 		if (events->exception.pad[i])
1068 			return -EINVAL;
1069 
1070 	return __kvm_arm_vcpu_set_events(vcpu, events);
1071 }
1072 
1073 long kvm_arch_vcpu_ioctl(struct file *filp,
1074 			 unsigned int ioctl, unsigned long arg)
1075 {
1076 	struct kvm_vcpu *vcpu = filp->private_data;
1077 	void __user *argp = (void __user *)arg;
1078 	struct kvm_device_attr attr;
1079 	long r;
1080 
1081 	switch (ioctl) {
1082 	case KVM_ARM_VCPU_INIT: {
1083 		struct kvm_vcpu_init init;
1084 
1085 		r = -EFAULT;
1086 		if (copy_from_user(&init, argp, sizeof(init)))
1087 			break;
1088 
1089 		r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
1090 		break;
1091 	}
1092 	case KVM_SET_ONE_REG:
1093 	case KVM_GET_ONE_REG: {
1094 		struct kvm_one_reg reg;
1095 
1096 		r = -ENOEXEC;
1097 		if (unlikely(!kvm_vcpu_initialized(vcpu)))
1098 			break;
1099 
1100 		r = -EFAULT;
1101 		if (copy_from_user(&reg, argp, sizeof(reg)))
1102 			break;
1103 
1104 		if (ioctl == KVM_SET_ONE_REG)
1105 			r = kvm_arm_set_reg(vcpu, &reg);
1106 		else
1107 			r = kvm_arm_get_reg(vcpu, &reg);
1108 		break;
1109 	}
1110 	case KVM_GET_REG_LIST: {
1111 		struct kvm_reg_list __user *user_list = argp;
1112 		struct kvm_reg_list reg_list;
1113 		unsigned n;
1114 
1115 		r = -ENOEXEC;
1116 		if (unlikely(!kvm_vcpu_initialized(vcpu)))
1117 			break;
1118 
1119 		r = -EPERM;
1120 		if (!kvm_arm_vcpu_is_finalized(vcpu))
1121 			break;
1122 
1123 		r = -EFAULT;
1124 		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
1125 			break;
1126 		n = reg_list.n;
1127 		reg_list.n = kvm_arm_num_regs(vcpu);
1128 		if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
1129 			break;
1130 		r = -E2BIG;
1131 		if (n < reg_list.n)
1132 			break;
1133 		r = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
1134 		break;
1135 	}
1136 	case KVM_SET_DEVICE_ATTR: {
1137 		r = -EFAULT;
1138 		if (copy_from_user(&attr, argp, sizeof(attr)))
1139 			break;
1140 		r = kvm_arm_vcpu_set_attr(vcpu, &attr);
1141 		break;
1142 	}
1143 	case KVM_GET_DEVICE_ATTR: {
1144 		r = -EFAULT;
1145 		if (copy_from_user(&attr, argp, sizeof(attr)))
1146 			break;
1147 		r = kvm_arm_vcpu_get_attr(vcpu, &attr);
1148 		break;
1149 	}
1150 	case KVM_HAS_DEVICE_ATTR: {
1151 		r = -EFAULT;
1152 		if (copy_from_user(&attr, argp, sizeof(attr)))
1153 			break;
1154 		r = kvm_arm_vcpu_has_attr(vcpu, &attr);
1155 		break;
1156 	}
1157 	case KVM_GET_VCPU_EVENTS: {
1158 		struct kvm_vcpu_events events;
1159 
1160 		if (kvm_arm_vcpu_get_events(vcpu, &events))
1161 			return -EINVAL;
1162 
1163 		if (copy_to_user(argp, &events, sizeof(events)))
1164 			return -EFAULT;
1165 
1166 		return 0;
1167 	}
1168 	case KVM_SET_VCPU_EVENTS: {
1169 		struct kvm_vcpu_events events;
1170 
1171 		if (copy_from_user(&events, argp, sizeof(events)))
1172 			return -EFAULT;
1173 
1174 		return kvm_arm_vcpu_set_events(vcpu, &events);
1175 	}
1176 	case KVM_ARM_VCPU_FINALIZE: {
1177 		int what;
1178 
1179 		if (!kvm_vcpu_initialized(vcpu))
1180 			return -ENOEXEC;
1181 
1182 		if (get_user(what, (const int __user *)argp))
1183 			return -EFAULT;
1184 
1185 		return kvm_arm_vcpu_finalize(vcpu, what);
1186 	}
1187 	default:
1188 		r = -EINVAL;
1189 	}
1190 
1191 	return r;
1192 }
1193 
1194 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
1195 {
1196 
1197 }
1198 
1199 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
1200 					struct kvm_memory_slot *memslot)
1201 {
1202 	kvm_flush_remote_tlbs(kvm);
1203 }
1204 
1205 static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
1206 					struct kvm_arm_device_addr *dev_addr)
1207 {
1208 	unsigned long dev_id, type;
1209 
1210 	dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
1211 		KVM_ARM_DEVICE_ID_SHIFT;
1212 	type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
1213 		KVM_ARM_DEVICE_TYPE_SHIFT;
1214 
1215 	switch (dev_id) {
1216 	case KVM_ARM_DEVICE_VGIC_V2:
1217 		if (!vgic_present)
1218 			return -ENXIO;
1219 		return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
1220 	default:
1221 		return -ENODEV;
1222 	}
1223 }
1224 
1225 long kvm_arch_vm_ioctl(struct file *filp,
1226 		       unsigned int ioctl, unsigned long arg)
1227 {
1228 	struct kvm *kvm = filp->private_data;
1229 	void __user *argp = (void __user *)arg;
1230 
1231 	switch (ioctl) {
1232 	case KVM_CREATE_IRQCHIP: {
1233 		int ret;
1234 		if (!vgic_present)
1235 			return -ENXIO;
1236 		mutex_lock(&kvm->lock);
1237 		ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
1238 		mutex_unlock(&kvm->lock);
1239 		return ret;
1240 	}
1241 	case KVM_ARM_SET_DEVICE_ADDR: {
1242 		struct kvm_arm_device_addr dev_addr;
1243 
1244 		if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
1245 			return -EFAULT;
1246 		return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
1247 	}
1248 	case KVM_ARM_PREFERRED_TARGET: {
1249 		int err;
1250 		struct kvm_vcpu_init init;
1251 
1252 		err = kvm_vcpu_preferred_target(&init);
1253 		if (err)
1254 			return err;
1255 
1256 		if (copy_to_user(argp, &init, sizeof(init)))
1257 			return -EFAULT;
1258 
1259 		return 0;
1260 	}
1261 	default:
1262 		return -EINVAL;
1263 	}
1264 }
1265 
1266 static unsigned long nvhe_percpu_size(void)
1267 {
1268 	return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
1269 		(unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start);
1270 }
1271 
1272 static unsigned long nvhe_percpu_order(void)
1273 {
1274 	unsigned long size = nvhe_percpu_size();
1275 
1276 	return size ? get_order(size) : 0;
1277 }
1278 
1279 static int kvm_map_vectors(void)
1280 {
1281 	/*
1282 	 * SV2  = ARM64_SPECTRE_V2
1283 	 * HEL2 = ARM64_HARDEN_EL2_VECTORS
1284 	 *
1285 	 * !SV2 + !HEL2 -> use direct vectors
1286 	 *  SV2 + !HEL2 -> use hardened vectors in place
1287 	 * !SV2 +  HEL2 -> allocate one vector slot and use exec mapping
1288 	 *  SV2 +  HEL2 -> use hardened vectors and use exec mapping
1289 	 */
1290 	if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
1291 		__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
1292 		__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
1293 	}
1294 
1295 	if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
1296 		phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
1297 		unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
1298 
1299 		/*
1300 		 * Always allocate a spare vector slot, as we don't
1301 		 * know yet which CPUs have a BP hardening slot that
1302 		 * we can reuse.
1303 		 */
1304 		__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
1305 		BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
1306 		return create_hyp_exec_mappings(vect_pa, size,
1307 						&__kvm_bp_vect_base);
1308 	}
1309 
1310 	return 0;
1311 }
1312 
1313 static void cpu_init_hyp_mode(void)
1314 {
1315 	phys_addr_t pgd_ptr;
1316 	unsigned long hyp_stack_ptr;
1317 	unsigned long vector_ptr;
1318 	unsigned long tpidr_el2;
1319 	struct arm_smccc_res res;
1320 
1321 	/* Switch from the HYP stub to our own HYP init vector */
1322 	__hyp_set_vectors(kvm_get_idmap_vector());
1323 
1324 	/*
1325 	 * Calculate the raw per-cpu offset without a translation from the
1326 	 * kernel's mapping to the linear mapping, and store it in tpidr_el2
1327 	 * so that we can use adr_l to access per-cpu variables in EL2.
1328 	 */
1329 	tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
1330 		    (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
1331 
1332 	pgd_ptr = kvm_mmu_get_httbr();
1333 	hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE;
1334 	hyp_stack_ptr = kern_hyp_va(hyp_stack_ptr);
1335 	vector_ptr = (unsigned long)kern_hyp_va(kvm_ksym_ref(__kvm_hyp_host_vector));
1336 
1337 	/*
1338 	 * Call initialization code, and switch to the full blown HYP code.
1339 	 * If the cpucaps haven't been finalized yet, something has gone very
1340 	 * wrong, and hyp will crash and burn when it uses any
1341 	 * cpus_have_const_cap() wrapper.
1342 	 */
1343 	BUG_ON(!system_capabilities_finalized());
1344 	arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init),
1345 			  pgd_ptr, tpidr_el2, hyp_stack_ptr, vector_ptr, &res);
1346 	WARN_ON(res.a0 != SMCCC_RET_SUCCESS);
1347 
1348 	/*
1349 	 * Disabling SSBD on a non-VHE system requires us to enable SSBS
1350 	 * at EL2.
1351 	 */
1352 	if (this_cpu_has_cap(ARM64_SSBS) &&
1353 	    arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) {
1354 		kvm_call_hyp_nvhe(__kvm_enable_ssbs);
1355 	}
1356 }
1357 
1358 static void cpu_hyp_reset(void)
1359 {
1360 	if (!is_kernel_in_hyp_mode())
1361 		__hyp_reset_vectors();
1362 }
1363 
1364 static void cpu_hyp_reinit(void)
1365 {
1366 	kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt);
1367 
1368 	cpu_hyp_reset();
1369 
1370 	*this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)kvm_get_hyp_vector();
1371 
1372 	if (is_kernel_in_hyp_mode())
1373 		kvm_timer_init_vhe();
1374 	else
1375 		cpu_init_hyp_mode();
1376 
1377 	kvm_arm_init_debug();
1378 
1379 	if (vgic_present)
1380 		kvm_vgic_init_cpu_hardware();
1381 }
1382 
1383 static void _kvm_arch_hardware_enable(void *discard)
1384 {
1385 	if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
1386 		cpu_hyp_reinit();
1387 		__this_cpu_write(kvm_arm_hardware_enabled, 1);
1388 	}
1389 }
1390 
1391 int kvm_arch_hardware_enable(void)
1392 {
1393 	_kvm_arch_hardware_enable(NULL);
1394 	return 0;
1395 }
1396 
1397 static void _kvm_arch_hardware_disable(void *discard)
1398 {
1399 	if (__this_cpu_read(kvm_arm_hardware_enabled)) {
1400 		cpu_hyp_reset();
1401 		__this_cpu_write(kvm_arm_hardware_enabled, 0);
1402 	}
1403 }
1404 
1405 void kvm_arch_hardware_disable(void)
1406 {
1407 	_kvm_arch_hardware_disable(NULL);
1408 }
1409 
1410 #ifdef CONFIG_CPU_PM
1411 static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
1412 				    unsigned long cmd,
1413 				    void *v)
1414 {
1415 	/*
1416 	 * kvm_arm_hardware_enabled is left with its old value over
1417 	 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
1418 	 * re-enable hyp.
1419 	 */
1420 	switch (cmd) {
1421 	case CPU_PM_ENTER:
1422 		if (__this_cpu_read(kvm_arm_hardware_enabled))
1423 			/*
1424 			 * don't update kvm_arm_hardware_enabled here
1425 			 * so that the hardware will be re-enabled
1426 			 * when we resume. See below.
1427 			 */
1428 			cpu_hyp_reset();
1429 
1430 		return NOTIFY_OK;
1431 	case CPU_PM_ENTER_FAILED:
1432 	case CPU_PM_EXIT:
1433 		if (__this_cpu_read(kvm_arm_hardware_enabled))
1434 			/* The hardware was enabled before suspend. */
1435 			cpu_hyp_reinit();
1436 
1437 		return NOTIFY_OK;
1438 
1439 	default:
1440 		return NOTIFY_DONE;
1441 	}
1442 }
1443 
1444 static struct notifier_block hyp_init_cpu_pm_nb = {
1445 	.notifier_call = hyp_init_cpu_pm_notifier,
1446 };
1447 
1448 static void __init hyp_cpu_pm_init(void)
1449 {
1450 	cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
1451 }
1452 static void __init hyp_cpu_pm_exit(void)
1453 {
1454 	cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
1455 }
1456 #else
1457 static inline void hyp_cpu_pm_init(void)
1458 {
1459 }
1460 static inline void hyp_cpu_pm_exit(void)
1461 {
1462 }
1463 #endif
1464 
1465 static int init_common_resources(void)
1466 {
1467 	return kvm_set_ipa_limit();
1468 }
1469 
1470 static int init_subsystems(void)
1471 {
1472 	int err = 0;
1473 
1474 	/*
1475 	 * Enable hardware so that subsystem initialisation can access EL2.
1476 	 */
1477 	on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
1478 
1479 	/*
1480 	 * Register CPU lower-power notifier
1481 	 */
1482 	hyp_cpu_pm_init();
1483 
1484 	/*
1485 	 * Init HYP view of VGIC
1486 	 */
1487 	err = kvm_vgic_hyp_init();
1488 	switch (err) {
1489 	case 0:
1490 		vgic_present = true;
1491 		break;
1492 	case -ENODEV:
1493 	case -ENXIO:
1494 		vgic_present = false;
1495 		err = 0;
1496 		break;
1497 	default:
1498 		goto out;
1499 	}
1500 
1501 	/*
1502 	 * Init HYP architected timer support
1503 	 */
1504 	err = kvm_timer_hyp_init(vgic_present);
1505 	if (err)
1506 		goto out;
1507 
1508 	kvm_perf_init();
1509 	kvm_coproc_table_init();
1510 
1511 out:
1512 	on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
1513 
1514 	return err;
1515 }
1516 
1517 static void teardown_hyp_mode(void)
1518 {
1519 	int cpu;
1520 
1521 	free_hyp_pgds();
1522 	for_each_possible_cpu(cpu) {
1523 		free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
1524 		free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order());
1525 	}
1526 }
1527 
1528 /**
1529  * Inits Hyp-mode on all online CPUs
1530  */
1531 static int init_hyp_mode(void)
1532 {
1533 	int cpu;
1534 	int err = 0;
1535 
1536 	/*
1537 	 * Allocate Hyp PGD and setup Hyp identity mapping
1538 	 */
1539 	err = kvm_mmu_init();
1540 	if (err)
1541 		goto out_err;
1542 
1543 	/*
1544 	 * Allocate stack pages for Hypervisor-mode
1545 	 */
1546 	for_each_possible_cpu(cpu) {
1547 		unsigned long stack_page;
1548 
1549 		stack_page = __get_free_page(GFP_KERNEL);
1550 		if (!stack_page) {
1551 			err = -ENOMEM;
1552 			goto out_err;
1553 		}
1554 
1555 		per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
1556 	}
1557 
1558 	/*
1559 	 * Allocate and initialize pages for Hypervisor-mode percpu regions.
1560 	 */
1561 	for_each_possible_cpu(cpu) {
1562 		struct page *page;
1563 		void *page_addr;
1564 
1565 		page = alloc_pages(GFP_KERNEL, nvhe_percpu_order());
1566 		if (!page) {
1567 			err = -ENOMEM;
1568 			goto out_err;
1569 		}
1570 
1571 		page_addr = page_address(page);
1572 		memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size());
1573 		kvm_arm_hyp_percpu_base[cpu] = (unsigned long)page_addr;
1574 	}
1575 
1576 	/*
1577 	 * Map the Hyp-code called directly from the host
1578 	 */
1579 	err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
1580 				  kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
1581 	if (err) {
1582 		kvm_err("Cannot map world-switch code\n");
1583 		goto out_err;
1584 	}
1585 
1586 	err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
1587 				  kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
1588 	if (err) {
1589 		kvm_err("Cannot map rodata section\n");
1590 		goto out_err;
1591 	}
1592 
1593 	err = create_hyp_mappings(kvm_ksym_ref(__bss_start),
1594 				  kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
1595 	if (err) {
1596 		kvm_err("Cannot map bss section\n");
1597 		goto out_err;
1598 	}
1599 
1600 	err = kvm_map_vectors();
1601 	if (err) {
1602 		kvm_err("Cannot map vectors\n");
1603 		goto out_err;
1604 	}
1605 
1606 	/*
1607 	 * Map the Hyp stack pages
1608 	 */
1609 	for_each_possible_cpu(cpu) {
1610 		char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
1611 		err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE,
1612 					  PAGE_HYP);
1613 
1614 		if (err) {
1615 			kvm_err("Cannot map hyp stack\n");
1616 			goto out_err;
1617 		}
1618 	}
1619 
1620 	/*
1621 	 * Map Hyp percpu pages
1622 	 */
1623 	for_each_possible_cpu(cpu) {
1624 		char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu];
1625 		char *percpu_end = percpu_begin + nvhe_percpu_size();
1626 
1627 		err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP);
1628 
1629 		if (err) {
1630 			kvm_err("Cannot map hyp percpu region\n");
1631 			goto out_err;
1632 		}
1633 	}
1634 
1635 	return 0;
1636 
1637 out_err:
1638 	teardown_hyp_mode();
1639 	kvm_err("error initializing Hyp mode: %d\n", err);
1640 	return err;
1641 }
1642 
1643 static void check_kvm_target_cpu(void *ret)
1644 {
1645 	*(int *)ret = kvm_target_cpu();
1646 }
1647 
1648 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
1649 {
1650 	struct kvm_vcpu *vcpu;
1651 	int i;
1652 
1653 	mpidr &= MPIDR_HWID_BITMASK;
1654 	kvm_for_each_vcpu(i, vcpu, kvm) {
1655 		if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
1656 			return vcpu;
1657 	}
1658 	return NULL;
1659 }
1660 
1661 bool kvm_arch_has_irq_bypass(void)
1662 {
1663 	return true;
1664 }
1665 
1666 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
1667 				      struct irq_bypass_producer *prod)
1668 {
1669 	struct kvm_kernel_irqfd *irqfd =
1670 		container_of(cons, struct kvm_kernel_irqfd, consumer);
1671 
1672 	return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
1673 					  &irqfd->irq_entry);
1674 }
1675 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
1676 				      struct irq_bypass_producer *prod)
1677 {
1678 	struct kvm_kernel_irqfd *irqfd =
1679 		container_of(cons, struct kvm_kernel_irqfd, consumer);
1680 
1681 	kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
1682 				     &irqfd->irq_entry);
1683 }
1684 
1685 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
1686 {
1687 	struct kvm_kernel_irqfd *irqfd =
1688 		container_of(cons, struct kvm_kernel_irqfd, consumer);
1689 
1690 	kvm_arm_halt_guest(irqfd->kvm);
1691 }
1692 
1693 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
1694 {
1695 	struct kvm_kernel_irqfd *irqfd =
1696 		container_of(cons, struct kvm_kernel_irqfd, consumer);
1697 
1698 	kvm_arm_resume_guest(irqfd->kvm);
1699 }
1700 
1701 /**
1702  * Initialize Hyp-mode and memory mappings on all CPUs.
1703  */
1704 int kvm_arch_init(void *opaque)
1705 {
1706 	int err;
1707 	int ret, cpu;
1708 	bool in_hyp_mode;
1709 
1710 	if (!is_hyp_mode_available()) {
1711 		kvm_info("HYP mode not available\n");
1712 		return -ENODEV;
1713 	}
1714 
1715 	in_hyp_mode = is_kernel_in_hyp_mode();
1716 
1717 	if (!in_hyp_mode && kvm_arch_requires_vhe()) {
1718 		kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n");
1719 		return -ENODEV;
1720 	}
1721 
1722 	if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE))
1723 		kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
1724 			 "Only trusted guests should be used on this system.\n");
1725 
1726 	for_each_online_cpu(cpu) {
1727 		smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
1728 		if (ret < 0) {
1729 			kvm_err("Error, CPU %d not supported!\n", cpu);
1730 			return -ENODEV;
1731 		}
1732 	}
1733 
1734 	err = init_common_resources();
1735 	if (err)
1736 		return err;
1737 
1738 	err = kvm_arm_init_sve();
1739 	if (err)
1740 		return err;
1741 
1742 	if (!in_hyp_mode) {
1743 		err = init_hyp_mode();
1744 		if (err)
1745 			goto out_err;
1746 	}
1747 
1748 	err = init_subsystems();
1749 	if (err)
1750 		goto out_hyp;
1751 
1752 	if (in_hyp_mode)
1753 		kvm_info("VHE mode initialized successfully\n");
1754 	else
1755 		kvm_info("Hyp mode initialized successfully\n");
1756 
1757 	return 0;
1758 
1759 out_hyp:
1760 	hyp_cpu_pm_exit();
1761 	if (!in_hyp_mode)
1762 		teardown_hyp_mode();
1763 out_err:
1764 	return err;
1765 }
1766 
1767 /* NOP: Compiling as a module not supported */
1768 void kvm_arch_exit(void)
1769 {
1770 	kvm_perf_teardown();
1771 }
1772 
1773 static int arm_init(void)
1774 {
1775 	int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1776 	return rc;
1777 }
1778 
1779 module_init(arm_init);
1780