xref: /openbmc/linux/arch/arm64/kvm/arch_timer.c (revision e721eb06)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <linux/cpu.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/uaccess.h>
13 
14 #include <clocksource/arm_arch_timer.h>
15 #include <asm/arch_timer.h>
16 #include <asm/kvm_emulate.h>
17 #include <asm/kvm_hyp.h>
18 
19 #include <kvm/arm_vgic.h>
20 #include <kvm/arm_arch_timer.h>
21 
22 #include "trace.h"
23 
24 static struct timecounter *timecounter;
25 static unsigned int host_vtimer_irq;
26 static unsigned int host_ptimer_irq;
27 static u32 host_vtimer_irq_flags;
28 static u32 host_ptimer_irq_flags;
29 
30 static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
31 
32 static const struct kvm_irq_level default_ptimer_irq = {
33 	.irq	= 30,
34 	.level	= 1,
35 };
36 
37 static const struct kvm_irq_level default_vtimer_irq = {
38 	.irq	= 27,
39 	.level	= 1,
40 };
41 
42 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
43 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
44 				 struct arch_timer_context *timer_ctx);
45 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
46 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
47 				struct arch_timer_context *timer,
48 				enum kvm_arch_timer_regs treg,
49 				u64 val);
50 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
51 			      struct arch_timer_context *timer,
52 			      enum kvm_arch_timer_regs treg);
53 
54 u64 kvm_phys_timer_read(void)
55 {
56 	return timecounter->cc->read(timecounter->cc);
57 }
58 
59 static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
60 {
61 	if (has_vhe()) {
62 		map->direct_vtimer = vcpu_vtimer(vcpu);
63 		map->direct_ptimer = vcpu_ptimer(vcpu);
64 		map->emul_ptimer = NULL;
65 	} else {
66 		map->direct_vtimer = vcpu_vtimer(vcpu);
67 		map->direct_ptimer = NULL;
68 		map->emul_ptimer = vcpu_ptimer(vcpu);
69 	}
70 
71 	trace_kvm_get_timer_map(vcpu->vcpu_id, map);
72 }
73 
74 static inline bool userspace_irqchip(struct kvm *kvm)
75 {
76 	return static_branch_unlikely(&userspace_irqchip_in_use) &&
77 		unlikely(!irqchip_in_kernel(kvm));
78 }
79 
80 static void soft_timer_start(struct hrtimer *hrt, u64 ns)
81 {
82 	hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
83 		      HRTIMER_MODE_ABS_HARD);
84 }
85 
86 static void soft_timer_cancel(struct hrtimer *hrt)
87 {
88 	hrtimer_cancel(hrt);
89 }
90 
91 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
92 {
93 	struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
94 	struct arch_timer_context *ctx;
95 	struct timer_map map;
96 
97 	/*
98 	 * We may see a timer interrupt after vcpu_put() has been called which
99 	 * sets the CPU's vcpu pointer to NULL, because even though the timer
100 	 * has been disabled in timer_save_state(), the hardware interrupt
101 	 * signal may not have been retired from the interrupt controller yet.
102 	 */
103 	if (!vcpu)
104 		return IRQ_HANDLED;
105 
106 	get_timer_map(vcpu, &map);
107 
108 	if (irq == host_vtimer_irq)
109 		ctx = map.direct_vtimer;
110 	else
111 		ctx = map.direct_ptimer;
112 
113 	if (kvm_timer_should_fire(ctx))
114 		kvm_timer_update_irq(vcpu, true, ctx);
115 
116 	if (userspace_irqchip(vcpu->kvm) &&
117 	    !static_branch_unlikely(&has_gic_active_state))
118 		disable_percpu_irq(host_vtimer_irq);
119 
120 	return IRQ_HANDLED;
121 }
122 
123 static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
124 {
125 	u64 cval, now;
126 
127 	cval = timer_ctx->cnt_cval;
128 	now = kvm_phys_timer_read() - timer_ctx->cntvoff;
129 
130 	if (now < cval) {
131 		u64 ns;
132 
133 		ns = cyclecounter_cyc2ns(timecounter->cc,
134 					 cval - now,
135 					 timecounter->mask,
136 					 &timecounter->frac);
137 		return ns;
138 	}
139 
140 	return 0;
141 }
142 
143 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
144 {
145 	WARN_ON(timer_ctx && timer_ctx->loaded);
146 	return timer_ctx &&
147 	       !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
148 		(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
149 }
150 
151 /*
152  * Returns the earliest expiration time in ns among guest timers.
153  * Note that it will return 0 if none of timers can fire.
154  */
155 static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
156 {
157 	u64 min_delta = ULLONG_MAX;
158 	int i;
159 
160 	for (i = 0; i < NR_KVM_TIMERS; i++) {
161 		struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
162 
163 		WARN(ctx->loaded, "timer %d loaded\n", i);
164 		if (kvm_timer_irq_can_fire(ctx))
165 			min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
166 	}
167 
168 	/* If none of timers can fire, then return 0 */
169 	if (min_delta == ULLONG_MAX)
170 		return 0;
171 
172 	return min_delta;
173 }
174 
175 static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
176 {
177 	struct arch_timer_cpu *timer;
178 	struct kvm_vcpu *vcpu;
179 	u64 ns;
180 
181 	timer = container_of(hrt, struct arch_timer_cpu, bg_timer);
182 	vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
183 
184 	/*
185 	 * Check that the timer has really expired from the guest's
186 	 * PoV (NTP on the host may have forced it to expire
187 	 * early). If we should have slept longer, restart it.
188 	 */
189 	ns = kvm_timer_earliest_exp(vcpu);
190 	if (unlikely(ns)) {
191 		hrtimer_forward_now(hrt, ns_to_ktime(ns));
192 		return HRTIMER_RESTART;
193 	}
194 
195 	kvm_vcpu_wake_up(vcpu);
196 	return HRTIMER_NORESTART;
197 }
198 
199 static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
200 {
201 	struct arch_timer_context *ctx;
202 	struct kvm_vcpu *vcpu;
203 	u64 ns;
204 
205 	ctx = container_of(hrt, struct arch_timer_context, hrtimer);
206 	vcpu = ctx->vcpu;
207 
208 	trace_kvm_timer_hrtimer_expire(ctx);
209 
210 	/*
211 	 * Check that the timer has really expired from the guest's
212 	 * PoV (NTP on the host may have forced it to expire
213 	 * early). If not ready, schedule for a later time.
214 	 */
215 	ns = kvm_timer_compute_delta(ctx);
216 	if (unlikely(ns)) {
217 		hrtimer_forward_now(hrt, ns_to_ktime(ns));
218 		return HRTIMER_RESTART;
219 	}
220 
221 	kvm_timer_update_irq(vcpu, true, ctx);
222 	return HRTIMER_NORESTART;
223 }
224 
225 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
226 {
227 	enum kvm_arch_timers index;
228 	u64 cval, now;
229 
230 	if (!timer_ctx)
231 		return false;
232 
233 	index = arch_timer_ctx_index(timer_ctx);
234 
235 	if (timer_ctx->loaded) {
236 		u32 cnt_ctl = 0;
237 
238 		switch (index) {
239 		case TIMER_VTIMER:
240 			cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
241 			break;
242 		case TIMER_PTIMER:
243 			cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
244 			break;
245 		case NR_KVM_TIMERS:
246 			/* GCC is braindead */
247 			cnt_ctl = 0;
248 			break;
249 		}
250 
251 		return  (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) &&
252 		        (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) &&
253 		       !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
254 	}
255 
256 	if (!kvm_timer_irq_can_fire(timer_ctx))
257 		return false;
258 
259 	cval = timer_ctx->cnt_cval;
260 	now = kvm_phys_timer_read() - timer_ctx->cntvoff;
261 
262 	return cval <= now;
263 }
264 
265 bool kvm_timer_is_pending(struct kvm_vcpu *vcpu)
266 {
267 	struct timer_map map;
268 
269 	get_timer_map(vcpu, &map);
270 
271 	return kvm_timer_should_fire(map.direct_vtimer) ||
272 	       kvm_timer_should_fire(map.direct_ptimer) ||
273 	       kvm_timer_should_fire(map.emul_ptimer);
274 }
275 
276 /*
277  * Reflect the timer output level into the kvm_run structure
278  */
279 void kvm_timer_update_run(struct kvm_vcpu *vcpu)
280 {
281 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
282 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
283 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
284 
285 	/* Populate the device bitmap with the timer states */
286 	regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
287 				    KVM_ARM_DEV_EL1_PTIMER);
288 	if (kvm_timer_should_fire(vtimer))
289 		regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
290 	if (kvm_timer_should_fire(ptimer))
291 		regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
292 }
293 
294 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
295 				 struct arch_timer_context *timer_ctx)
296 {
297 	int ret;
298 
299 	timer_ctx->irq.level = new_level;
300 	trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
301 				   timer_ctx->irq.level);
302 
303 	if (!userspace_irqchip(vcpu->kvm)) {
304 		ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
305 					  timer_ctx->irq.irq,
306 					  timer_ctx->irq.level,
307 					  timer_ctx);
308 		WARN_ON(ret);
309 	}
310 }
311 
312 /* Only called for a fully emulated timer */
313 static void timer_emulate(struct arch_timer_context *ctx)
314 {
315 	bool should_fire = kvm_timer_should_fire(ctx);
316 
317 	trace_kvm_timer_emulate(ctx, should_fire);
318 
319 	if (should_fire != ctx->irq.level) {
320 		kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
321 		return;
322 	}
323 
324 	/*
325 	 * If the timer can fire now, we don't need to have a soft timer
326 	 * scheduled for the future.  If the timer cannot fire at all,
327 	 * then we also don't need a soft timer.
328 	 */
329 	if (!kvm_timer_irq_can_fire(ctx)) {
330 		soft_timer_cancel(&ctx->hrtimer);
331 		return;
332 	}
333 
334 	soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
335 }
336 
337 static void timer_save_state(struct arch_timer_context *ctx)
338 {
339 	struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
340 	enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
341 	unsigned long flags;
342 
343 	if (!timer->enabled)
344 		return;
345 
346 	local_irq_save(flags);
347 
348 	if (!ctx->loaded)
349 		goto out;
350 
351 	switch (index) {
352 	case TIMER_VTIMER:
353 		ctx->cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
354 		ctx->cnt_cval = read_sysreg_el0(SYS_CNTV_CVAL);
355 
356 		/* Disable the timer */
357 		write_sysreg_el0(0, SYS_CNTV_CTL);
358 		isb();
359 
360 		break;
361 	case TIMER_PTIMER:
362 		ctx->cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
363 		ctx->cnt_cval = read_sysreg_el0(SYS_CNTP_CVAL);
364 
365 		/* Disable the timer */
366 		write_sysreg_el0(0, SYS_CNTP_CTL);
367 		isb();
368 
369 		break;
370 	case NR_KVM_TIMERS:
371 		BUG();
372 	}
373 
374 	trace_kvm_timer_save_state(ctx);
375 
376 	ctx->loaded = false;
377 out:
378 	local_irq_restore(flags);
379 }
380 
381 /*
382  * Schedule the background timer before calling kvm_vcpu_block, so that this
383  * thread is removed from its waitqueue and made runnable when there's a timer
384  * interrupt to handle.
385  */
386 static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
387 {
388 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
389 	struct timer_map map;
390 
391 	get_timer_map(vcpu, &map);
392 
393 	/*
394 	 * If no timers are capable of raising interrupts (disabled or
395 	 * masked), then there's no more work for us to do.
396 	 */
397 	if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
398 	    !kvm_timer_irq_can_fire(map.direct_ptimer) &&
399 	    !kvm_timer_irq_can_fire(map.emul_ptimer))
400 		return;
401 
402 	/*
403 	 * At least one guest time will expire. Schedule a background timer.
404 	 * Set the earliest expiration time among the guest timers.
405 	 */
406 	soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
407 }
408 
409 static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
410 {
411 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
412 
413 	soft_timer_cancel(&timer->bg_timer);
414 }
415 
416 static void timer_restore_state(struct arch_timer_context *ctx)
417 {
418 	struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
419 	enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
420 	unsigned long flags;
421 
422 	if (!timer->enabled)
423 		return;
424 
425 	local_irq_save(flags);
426 
427 	if (ctx->loaded)
428 		goto out;
429 
430 	switch (index) {
431 	case TIMER_VTIMER:
432 		write_sysreg_el0(ctx->cnt_cval, SYS_CNTV_CVAL);
433 		isb();
434 		write_sysreg_el0(ctx->cnt_ctl, SYS_CNTV_CTL);
435 		break;
436 	case TIMER_PTIMER:
437 		write_sysreg_el0(ctx->cnt_cval, SYS_CNTP_CVAL);
438 		isb();
439 		write_sysreg_el0(ctx->cnt_ctl, SYS_CNTP_CTL);
440 		break;
441 	case NR_KVM_TIMERS:
442 		BUG();
443 	}
444 
445 	trace_kvm_timer_restore_state(ctx);
446 
447 	ctx->loaded = true;
448 out:
449 	local_irq_restore(flags);
450 }
451 
452 static void set_cntvoff(u64 cntvoff)
453 {
454 	kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff);
455 }
456 
457 static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active)
458 {
459 	int r;
460 	r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active);
461 	WARN_ON(r);
462 }
463 
464 static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
465 {
466 	struct kvm_vcpu *vcpu = ctx->vcpu;
467 	bool phys_active = false;
468 
469 	/*
470 	 * Update the timer output so that it is likely to match the
471 	 * state we're about to restore. If the timer expires between
472 	 * this point and the register restoration, we'll take the
473 	 * interrupt anyway.
474 	 */
475 	kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
476 
477 	if (irqchip_in_kernel(vcpu->kvm))
478 		phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq);
479 
480 	phys_active |= ctx->irq.level;
481 
482 	set_timer_irq_phys_active(ctx, phys_active);
483 }
484 
485 static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
486 {
487 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
488 
489 	/*
490 	 * Update the timer output so that it is likely to match the
491 	 * state we're about to restore. If the timer expires between
492 	 * this point and the register restoration, we'll take the
493 	 * interrupt anyway.
494 	 */
495 	kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
496 
497 	/*
498 	 * When using a userspace irqchip with the architected timers and a
499 	 * host interrupt controller that doesn't support an active state, we
500 	 * must still prevent continuously exiting from the guest, and
501 	 * therefore mask the physical interrupt by disabling it on the host
502 	 * interrupt controller when the virtual level is high, such that the
503 	 * guest can make forward progress.  Once we detect the output level
504 	 * being de-asserted, we unmask the interrupt again so that we exit
505 	 * from the guest when the timer fires.
506 	 */
507 	if (vtimer->irq.level)
508 		disable_percpu_irq(host_vtimer_irq);
509 	else
510 		enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
511 }
512 
513 void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
514 {
515 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
516 	struct timer_map map;
517 
518 	if (unlikely(!timer->enabled))
519 		return;
520 
521 	get_timer_map(vcpu, &map);
522 
523 	if (static_branch_likely(&has_gic_active_state)) {
524 		kvm_timer_vcpu_load_gic(map.direct_vtimer);
525 		if (map.direct_ptimer)
526 			kvm_timer_vcpu_load_gic(map.direct_ptimer);
527 	} else {
528 		kvm_timer_vcpu_load_nogic(vcpu);
529 	}
530 
531 	set_cntvoff(map.direct_vtimer->cntvoff);
532 
533 	kvm_timer_unblocking(vcpu);
534 
535 	timer_restore_state(map.direct_vtimer);
536 	if (map.direct_ptimer)
537 		timer_restore_state(map.direct_ptimer);
538 
539 	if (map.emul_ptimer)
540 		timer_emulate(map.emul_ptimer);
541 }
542 
543 bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
544 {
545 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
546 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
547 	struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
548 	bool vlevel, plevel;
549 
550 	if (likely(irqchip_in_kernel(vcpu->kvm)))
551 		return false;
552 
553 	vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
554 	plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
555 
556 	return kvm_timer_should_fire(vtimer) != vlevel ||
557 	       kvm_timer_should_fire(ptimer) != plevel;
558 }
559 
560 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
561 {
562 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
563 	struct timer_map map;
564 	struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
565 
566 	if (unlikely(!timer->enabled))
567 		return;
568 
569 	get_timer_map(vcpu, &map);
570 
571 	timer_save_state(map.direct_vtimer);
572 	if (map.direct_ptimer)
573 		timer_save_state(map.direct_ptimer);
574 
575 	/*
576 	 * Cancel soft timer emulation, because the only case where we
577 	 * need it after a vcpu_put is in the context of a sleeping VCPU, and
578 	 * in that case we already factor in the deadline for the physical
579 	 * timer when scheduling the bg_timer.
580 	 *
581 	 * In any case, we re-schedule the hrtimer for the physical timer when
582 	 * coming back to the VCPU thread in kvm_timer_vcpu_load().
583 	 */
584 	if (map.emul_ptimer)
585 		soft_timer_cancel(&map.emul_ptimer->hrtimer);
586 
587 	if (rcuwait_active(wait))
588 		kvm_timer_blocking(vcpu);
589 
590 	/*
591 	 * The kernel may decide to run userspace after calling vcpu_put, so
592 	 * we reset cntvoff to 0 to ensure a consistent read between user
593 	 * accesses to the virtual counter and kernel access to the physical
594 	 * counter of non-VHE case. For VHE, the virtual counter uses a fixed
595 	 * virtual offset of zero, so no need to zero CNTVOFF_EL2 register.
596 	 */
597 	set_cntvoff(0);
598 }
599 
600 /*
601  * With a userspace irqchip we have to check if the guest de-asserted the
602  * timer and if so, unmask the timer irq signal on the host interrupt
603  * controller to ensure that we see future timer signals.
604  */
605 static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
606 {
607 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
608 
609 	if (!kvm_timer_should_fire(vtimer)) {
610 		kvm_timer_update_irq(vcpu, false, vtimer);
611 		if (static_branch_likely(&has_gic_active_state))
612 			set_timer_irq_phys_active(vtimer, false);
613 		else
614 			enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
615 	}
616 }
617 
618 void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
619 {
620 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
621 
622 	if (unlikely(!timer->enabled))
623 		return;
624 
625 	if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
626 		unmask_vtimer_irq_user(vcpu);
627 }
628 
629 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
630 {
631 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
632 	struct timer_map map;
633 
634 	get_timer_map(vcpu, &map);
635 
636 	/*
637 	 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
638 	 * and to 0 for ARMv7.  We provide an implementation that always
639 	 * resets the timer to be disabled and unmasked and is compliant with
640 	 * the ARMv7 architecture.
641 	 */
642 	vcpu_vtimer(vcpu)->cnt_ctl = 0;
643 	vcpu_ptimer(vcpu)->cnt_ctl = 0;
644 
645 	if (timer->enabled) {
646 		kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
647 		kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu));
648 
649 		if (irqchip_in_kernel(vcpu->kvm)) {
650 			kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq);
651 			if (map.direct_ptimer)
652 				kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq);
653 		}
654 	}
655 
656 	if (map.emul_ptimer)
657 		soft_timer_cancel(&map.emul_ptimer->hrtimer);
658 
659 	return 0;
660 }
661 
662 /* Make the updates of cntvoff for all vtimer contexts atomic */
663 static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
664 {
665 	int i;
666 	struct kvm *kvm = vcpu->kvm;
667 	struct kvm_vcpu *tmp;
668 
669 	mutex_lock(&kvm->lock);
670 	kvm_for_each_vcpu(i, tmp, kvm)
671 		vcpu_vtimer(tmp)->cntvoff = cntvoff;
672 
673 	/*
674 	 * When called from the vcpu create path, the CPU being created is not
675 	 * included in the loop above, so we just set it here as well.
676 	 */
677 	vcpu_vtimer(vcpu)->cntvoff = cntvoff;
678 	mutex_unlock(&kvm->lock);
679 }
680 
681 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
682 {
683 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
684 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
685 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
686 
687 	/* Synchronize cntvoff across all vtimers of a VM. */
688 	update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
689 	ptimer->cntvoff = 0;
690 
691 	hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
692 	timer->bg_timer.function = kvm_bg_timer_expire;
693 
694 	hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
695 	hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
696 	vtimer->hrtimer.function = kvm_hrtimer_expire;
697 	ptimer->hrtimer.function = kvm_hrtimer_expire;
698 
699 	vtimer->irq.irq = default_vtimer_irq.irq;
700 	ptimer->irq.irq = default_ptimer_irq.irq;
701 
702 	vtimer->host_timer_irq = host_vtimer_irq;
703 	ptimer->host_timer_irq = host_ptimer_irq;
704 
705 	vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
706 	ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
707 
708 	vtimer->vcpu = vcpu;
709 	ptimer->vcpu = vcpu;
710 }
711 
712 static void kvm_timer_init_interrupt(void *info)
713 {
714 	enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
715 	enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags);
716 }
717 
718 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
719 {
720 	struct arch_timer_context *timer;
721 
722 	switch (regid) {
723 	case KVM_REG_ARM_TIMER_CTL:
724 		timer = vcpu_vtimer(vcpu);
725 		kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
726 		break;
727 	case KVM_REG_ARM_TIMER_CNT:
728 		timer = vcpu_vtimer(vcpu);
729 		update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
730 		break;
731 	case KVM_REG_ARM_TIMER_CVAL:
732 		timer = vcpu_vtimer(vcpu);
733 		kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
734 		break;
735 	case KVM_REG_ARM_PTIMER_CTL:
736 		timer = vcpu_ptimer(vcpu);
737 		kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
738 		break;
739 	case KVM_REG_ARM_PTIMER_CVAL:
740 		timer = vcpu_ptimer(vcpu);
741 		kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
742 		break;
743 
744 	default:
745 		return -1;
746 	}
747 
748 	return 0;
749 }
750 
751 static u64 read_timer_ctl(struct arch_timer_context *timer)
752 {
753 	/*
754 	 * Set ISTATUS bit if it's expired.
755 	 * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
756 	 * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
757 	 * regardless of ENABLE bit for our implementation convenience.
758 	 */
759 	if (!kvm_timer_compute_delta(timer))
760 		return timer->cnt_ctl | ARCH_TIMER_CTRL_IT_STAT;
761 	else
762 		return timer->cnt_ctl;
763 }
764 
765 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
766 {
767 	switch (regid) {
768 	case KVM_REG_ARM_TIMER_CTL:
769 		return kvm_arm_timer_read(vcpu,
770 					  vcpu_vtimer(vcpu), TIMER_REG_CTL);
771 	case KVM_REG_ARM_TIMER_CNT:
772 		return kvm_arm_timer_read(vcpu,
773 					  vcpu_vtimer(vcpu), TIMER_REG_CNT);
774 	case KVM_REG_ARM_TIMER_CVAL:
775 		return kvm_arm_timer_read(vcpu,
776 					  vcpu_vtimer(vcpu), TIMER_REG_CVAL);
777 	case KVM_REG_ARM_PTIMER_CTL:
778 		return kvm_arm_timer_read(vcpu,
779 					  vcpu_ptimer(vcpu), TIMER_REG_CTL);
780 	case KVM_REG_ARM_PTIMER_CNT:
781 		return kvm_arm_timer_read(vcpu,
782 					  vcpu_ptimer(vcpu), TIMER_REG_CNT);
783 	case KVM_REG_ARM_PTIMER_CVAL:
784 		return kvm_arm_timer_read(vcpu,
785 					  vcpu_ptimer(vcpu), TIMER_REG_CVAL);
786 	}
787 	return (u64)-1;
788 }
789 
790 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
791 			      struct arch_timer_context *timer,
792 			      enum kvm_arch_timer_regs treg)
793 {
794 	u64 val;
795 
796 	switch (treg) {
797 	case TIMER_REG_TVAL:
798 		val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff;
799 		val &= lower_32_bits(val);
800 		break;
801 
802 	case TIMER_REG_CTL:
803 		val = read_timer_ctl(timer);
804 		break;
805 
806 	case TIMER_REG_CVAL:
807 		val = timer->cnt_cval;
808 		break;
809 
810 	case TIMER_REG_CNT:
811 		val = kvm_phys_timer_read() - timer->cntvoff;
812 		break;
813 
814 	default:
815 		BUG();
816 	}
817 
818 	return val;
819 }
820 
821 u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
822 			      enum kvm_arch_timers tmr,
823 			      enum kvm_arch_timer_regs treg)
824 {
825 	u64 val;
826 
827 	preempt_disable();
828 	kvm_timer_vcpu_put(vcpu);
829 
830 	val = kvm_arm_timer_read(vcpu, vcpu_get_timer(vcpu, tmr), treg);
831 
832 	kvm_timer_vcpu_load(vcpu);
833 	preempt_enable();
834 
835 	return val;
836 }
837 
838 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
839 				struct arch_timer_context *timer,
840 				enum kvm_arch_timer_regs treg,
841 				u64 val)
842 {
843 	switch (treg) {
844 	case TIMER_REG_TVAL:
845 		timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + (s32)val;
846 		break;
847 
848 	case TIMER_REG_CTL:
849 		timer->cnt_ctl = val & ~ARCH_TIMER_CTRL_IT_STAT;
850 		break;
851 
852 	case TIMER_REG_CVAL:
853 		timer->cnt_cval = val;
854 		break;
855 
856 	default:
857 		BUG();
858 	}
859 }
860 
861 void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
862 				enum kvm_arch_timers tmr,
863 				enum kvm_arch_timer_regs treg,
864 				u64 val)
865 {
866 	preempt_disable();
867 	kvm_timer_vcpu_put(vcpu);
868 
869 	kvm_arm_timer_write(vcpu, vcpu_get_timer(vcpu, tmr), treg, val);
870 
871 	kvm_timer_vcpu_load(vcpu);
872 	preempt_enable();
873 }
874 
875 static int kvm_timer_starting_cpu(unsigned int cpu)
876 {
877 	kvm_timer_init_interrupt(NULL);
878 	return 0;
879 }
880 
881 static int kvm_timer_dying_cpu(unsigned int cpu)
882 {
883 	disable_percpu_irq(host_vtimer_irq);
884 	return 0;
885 }
886 
887 int kvm_timer_hyp_init(bool has_gic)
888 {
889 	struct arch_timer_kvm_info *info;
890 	int err;
891 
892 	info = arch_timer_get_kvm_info();
893 	timecounter = &info->timecounter;
894 
895 	if (!timecounter->cc) {
896 		kvm_err("kvm_arch_timer: uninitialized timecounter\n");
897 		return -ENODEV;
898 	}
899 
900 	/* First, do the virtual EL1 timer irq */
901 
902 	if (info->virtual_irq <= 0) {
903 		kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
904 			info->virtual_irq);
905 		return -ENODEV;
906 	}
907 	host_vtimer_irq = info->virtual_irq;
908 
909 	host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
910 	if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
911 	    host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
912 		kvm_err("Invalid trigger for vtimer IRQ%d, assuming level low\n",
913 			host_vtimer_irq);
914 		host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
915 	}
916 
917 	err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
918 				 "kvm guest vtimer", kvm_get_running_vcpus());
919 	if (err) {
920 		kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
921 			host_vtimer_irq, err);
922 		return err;
923 	}
924 
925 	if (has_gic) {
926 		err = irq_set_vcpu_affinity(host_vtimer_irq,
927 					    kvm_get_running_vcpus());
928 		if (err) {
929 			kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
930 			goto out_free_irq;
931 		}
932 
933 		static_branch_enable(&has_gic_active_state);
934 	}
935 
936 	kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
937 
938 	/* Now let's do the physical EL1 timer irq */
939 
940 	if (info->physical_irq > 0) {
941 		host_ptimer_irq = info->physical_irq;
942 		host_ptimer_irq_flags = irq_get_trigger_type(host_ptimer_irq);
943 		if (host_ptimer_irq_flags != IRQF_TRIGGER_HIGH &&
944 		    host_ptimer_irq_flags != IRQF_TRIGGER_LOW) {
945 			kvm_err("Invalid trigger for ptimer IRQ%d, assuming level low\n",
946 				host_ptimer_irq);
947 			host_ptimer_irq_flags = IRQF_TRIGGER_LOW;
948 		}
949 
950 		err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
951 					 "kvm guest ptimer", kvm_get_running_vcpus());
952 		if (err) {
953 			kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
954 				host_ptimer_irq, err);
955 			return err;
956 		}
957 
958 		if (has_gic) {
959 			err = irq_set_vcpu_affinity(host_ptimer_irq,
960 						    kvm_get_running_vcpus());
961 			if (err) {
962 				kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
963 				goto out_free_irq;
964 			}
965 		}
966 
967 		kvm_debug("physical timer IRQ%d\n", host_ptimer_irq);
968 	} else if (has_vhe()) {
969 		kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
970 			info->physical_irq);
971 		err = -ENODEV;
972 		goto out_free_irq;
973 	}
974 
975 	cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
976 			  "kvm/arm/timer:starting", kvm_timer_starting_cpu,
977 			  kvm_timer_dying_cpu);
978 	return 0;
979 out_free_irq:
980 	free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
981 	return err;
982 }
983 
984 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
985 {
986 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
987 
988 	soft_timer_cancel(&timer->bg_timer);
989 }
990 
991 static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
992 {
993 	int vtimer_irq, ptimer_irq;
994 	int i, ret;
995 
996 	vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
997 	ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
998 	if (ret)
999 		return false;
1000 
1001 	ptimer_irq = vcpu_ptimer(vcpu)->irq.irq;
1002 	ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu));
1003 	if (ret)
1004 		return false;
1005 
1006 	kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
1007 		if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq ||
1008 		    vcpu_ptimer(vcpu)->irq.irq != ptimer_irq)
1009 			return false;
1010 	}
1011 
1012 	return true;
1013 }
1014 
1015 bool kvm_arch_timer_get_input_level(int vintid)
1016 {
1017 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
1018 	struct arch_timer_context *timer;
1019 
1020 	if (vintid == vcpu_vtimer(vcpu)->irq.irq)
1021 		timer = vcpu_vtimer(vcpu);
1022 	else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
1023 		timer = vcpu_ptimer(vcpu);
1024 	else
1025 		BUG();
1026 
1027 	return kvm_timer_should_fire(timer);
1028 }
1029 
1030 int kvm_timer_enable(struct kvm_vcpu *vcpu)
1031 {
1032 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1033 	struct timer_map map;
1034 	int ret;
1035 
1036 	if (timer->enabled)
1037 		return 0;
1038 
1039 	/* Without a VGIC we do not map virtual IRQs to physical IRQs */
1040 	if (!irqchip_in_kernel(vcpu->kvm))
1041 		goto no_vgic;
1042 
1043 	if (!vgic_initialized(vcpu->kvm))
1044 		return -ENODEV;
1045 
1046 	if (!timer_irqs_are_valid(vcpu)) {
1047 		kvm_debug("incorrectly configured timer irqs\n");
1048 		return -EINVAL;
1049 	}
1050 
1051 	get_timer_map(vcpu, &map);
1052 
1053 	ret = kvm_vgic_map_phys_irq(vcpu,
1054 				    map.direct_vtimer->host_timer_irq,
1055 				    map.direct_vtimer->irq.irq,
1056 				    kvm_arch_timer_get_input_level);
1057 	if (ret)
1058 		return ret;
1059 
1060 	if (map.direct_ptimer) {
1061 		ret = kvm_vgic_map_phys_irq(vcpu,
1062 					    map.direct_ptimer->host_timer_irq,
1063 					    map.direct_ptimer->irq.irq,
1064 					    kvm_arch_timer_get_input_level);
1065 	}
1066 
1067 	if (ret)
1068 		return ret;
1069 
1070 no_vgic:
1071 	timer->enabled = 1;
1072 	return 0;
1073 }
1074 
1075 /*
1076  * On VHE system, we only need to configure the EL2 timer trap register once,
1077  * not for every world switch.
1078  * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
1079  * and this makes those bits have no effect for the host kernel execution.
1080  */
1081 void kvm_timer_init_vhe(void)
1082 {
1083 	/* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
1084 	u32 cnthctl_shift = 10;
1085 	u64 val;
1086 
1087 	/*
1088 	 * VHE systems allow the guest direct access to the EL1 physical
1089 	 * timer/counter.
1090 	 */
1091 	val = read_sysreg(cnthctl_el2);
1092 	val |= (CNTHCTL_EL1PCEN << cnthctl_shift);
1093 	val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
1094 	write_sysreg(val, cnthctl_el2);
1095 }
1096 
1097 static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
1098 {
1099 	struct kvm_vcpu *vcpu;
1100 	int i;
1101 
1102 	kvm_for_each_vcpu(i, vcpu, kvm) {
1103 		vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
1104 		vcpu_ptimer(vcpu)->irq.irq = ptimer_irq;
1105 	}
1106 }
1107 
1108 int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1109 {
1110 	int __user *uaddr = (int __user *)(long)attr->addr;
1111 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
1112 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
1113 	int irq;
1114 
1115 	if (!irqchip_in_kernel(vcpu->kvm))
1116 		return -EINVAL;
1117 
1118 	if (get_user(irq, uaddr))
1119 		return -EFAULT;
1120 
1121 	if (!(irq_is_ppi(irq)))
1122 		return -EINVAL;
1123 
1124 	if (vcpu->arch.timer_cpu.enabled)
1125 		return -EBUSY;
1126 
1127 	switch (attr->attr) {
1128 	case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1129 		set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
1130 		break;
1131 	case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1132 		set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
1133 		break;
1134 	default:
1135 		return -ENXIO;
1136 	}
1137 
1138 	return 0;
1139 }
1140 
1141 int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1142 {
1143 	int __user *uaddr = (int __user *)(long)attr->addr;
1144 	struct arch_timer_context *timer;
1145 	int irq;
1146 
1147 	switch (attr->attr) {
1148 	case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1149 		timer = vcpu_vtimer(vcpu);
1150 		break;
1151 	case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1152 		timer = vcpu_ptimer(vcpu);
1153 		break;
1154 	default:
1155 		return -ENXIO;
1156 	}
1157 
1158 	irq = timer->irq.irq;
1159 	return put_user(irq, uaddr);
1160 }
1161 
1162 int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1163 {
1164 	switch (attr->attr) {
1165 	case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1166 	case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1167 		return 0;
1168 	}
1169 
1170 	return -ENXIO;
1171 }
1172