xref: /openbmc/linux/arch/arm64/kvm/arch_timer.c (revision 844f5ed5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <linux/cpu.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/uaccess.h>
14 
15 #include <clocksource/arm_arch_timer.h>
16 #include <asm/arch_timer.h>
17 #include <asm/kvm_emulate.h>
18 #include <asm/kvm_hyp.h>
19 
20 #include <kvm/arm_vgic.h>
21 #include <kvm/arm_arch_timer.h>
22 
23 #include "trace.h"
24 
25 static struct timecounter *timecounter;
26 static unsigned int host_vtimer_irq;
27 static unsigned int host_ptimer_irq;
28 static u32 host_vtimer_irq_flags;
29 static u32 host_ptimer_irq_flags;
30 
31 static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
32 
33 static const struct kvm_irq_level default_ptimer_irq = {
34 	.irq	= 30,
35 	.level	= 1,
36 };
37 
38 static const struct kvm_irq_level default_vtimer_irq = {
39 	.irq	= 27,
40 	.level	= 1,
41 };
42 
43 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
44 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
45 				 struct arch_timer_context *timer_ctx);
46 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
47 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
48 				struct arch_timer_context *timer,
49 				enum kvm_arch_timer_regs treg,
50 				u64 val);
51 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
52 			      struct arch_timer_context *timer,
53 			      enum kvm_arch_timer_regs treg);
54 
55 u32 timer_get_ctl(struct arch_timer_context *ctxt)
56 {
57 	struct kvm_vcpu *vcpu = ctxt->vcpu;
58 
59 	switch(arch_timer_ctx_index(ctxt)) {
60 	case TIMER_VTIMER:
61 		return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0);
62 	case TIMER_PTIMER:
63 		return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0);
64 	default:
65 		WARN_ON(1);
66 		return 0;
67 	}
68 }
69 
70 u64 timer_get_cval(struct arch_timer_context *ctxt)
71 {
72 	struct kvm_vcpu *vcpu = ctxt->vcpu;
73 
74 	switch(arch_timer_ctx_index(ctxt)) {
75 	case TIMER_VTIMER:
76 		return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
77 	case TIMER_PTIMER:
78 		return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
79 	default:
80 		WARN_ON(1);
81 		return 0;
82 	}
83 }
84 
85 static u64 timer_get_offset(struct arch_timer_context *ctxt)
86 {
87 	struct kvm_vcpu *vcpu = ctxt->vcpu;
88 
89 	switch(arch_timer_ctx_index(ctxt)) {
90 	case TIMER_VTIMER:
91 		return __vcpu_sys_reg(vcpu, CNTVOFF_EL2);
92 	default:
93 		return 0;
94 	}
95 }
96 
97 static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
98 {
99 	struct kvm_vcpu *vcpu = ctxt->vcpu;
100 
101 	switch(arch_timer_ctx_index(ctxt)) {
102 	case TIMER_VTIMER:
103 		__vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl;
104 		break;
105 	case TIMER_PTIMER:
106 		__vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
107 		break;
108 	default:
109 		WARN_ON(1);
110 	}
111 }
112 
113 static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
114 {
115 	struct kvm_vcpu *vcpu = ctxt->vcpu;
116 
117 	switch(arch_timer_ctx_index(ctxt)) {
118 	case TIMER_VTIMER:
119 		__vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval;
120 		break;
121 	case TIMER_PTIMER:
122 		__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
123 		break;
124 	default:
125 		WARN_ON(1);
126 	}
127 }
128 
129 static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
130 {
131 	struct kvm_vcpu *vcpu = ctxt->vcpu;
132 
133 	switch(arch_timer_ctx_index(ctxt)) {
134 	case TIMER_VTIMER:
135 		__vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset;
136 		break;
137 	default:
138 		WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
139 	}
140 }
141 
142 u64 kvm_phys_timer_read(void)
143 {
144 	return timecounter->cc->read(timecounter->cc);
145 }
146 
147 static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
148 {
149 	if (has_vhe()) {
150 		map->direct_vtimer = vcpu_vtimer(vcpu);
151 		map->direct_ptimer = vcpu_ptimer(vcpu);
152 		map->emul_ptimer = NULL;
153 	} else {
154 		map->direct_vtimer = vcpu_vtimer(vcpu);
155 		map->direct_ptimer = NULL;
156 		map->emul_ptimer = vcpu_ptimer(vcpu);
157 	}
158 
159 	trace_kvm_get_timer_map(vcpu->vcpu_id, map);
160 }
161 
162 static inline bool userspace_irqchip(struct kvm *kvm)
163 {
164 	return static_branch_unlikely(&userspace_irqchip_in_use) &&
165 		unlikely(!irqchip_in_kernel(kvm));
166 }
167 
168 static void soft_timer_start(struct hrtimer *hrt, u64 ns)
169 {
170 	hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
171 		      HRTIMER_MODE_ABS_HARD);
172 }
173 
174 static void soft_timer_cancel(struct hrtimer *hrt)
175 {
176 	hrtimer_cancel(hrt);
177 }
178 
179 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
180 {
181 	struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
182 	struct arch_timer_context *ctx;
183 	struct timer_map map;
184 
185 	/*
186 	 * We may see a timer interrupt after vcpu_put() has been called which
187 	 * sets the CPU's vcpu pointer to NULL, because even though the timer
188 	 * has been disabled in timer_save_state(), the hardware interrupt
189 	 * signal may not have been retired from the interrupt controller yet.
190 	 */
191 	if (!vcpu)
192 		return IRQ_HANDLED;
193 
194 	get_timer_map(vcpu, &map);
195 
196 	if (irq == host_vtimer_irq)
197 		ctx = map.direct_vtimer;
198 	else
199 		ctx = map.direct_ptimer;
200 
201 	if (kvm_timer_should_fire(ctx))
202 		kvm_timer_update_irq(vcpu, true, ctx);
203 
204 	if (userspace_irqchip(vcpu->kvm) &&
205 	    !static_branch_unlikely(&has_gic_active_state))
206 		disable_percpu_irq(host_vtimer_irq);
207 
208 	return IRQ_HANDLED;
209 }
210 
211 static u64 kvm_counter_compute_delta(struct arch_timer_context *timer_ctx,
212 				     u64 val)
213 {
214 	u64 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
215 
216 	if (now < val) {
217 		u64 ns;
218 
219 		ns = cyclecounter_cyc2ns(timecounter->cc,
220 					 val - now,
221 					 timecounter->mask,
222 					 &timecounter->frac);
223 		return ns;
224 	}
225 
226 	return 0;
227 }
228 
229 static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
230 {
231 	return kvm_counter_compute_delta(timer_ctx, timer_get_cval(timer_ctx));
232 }
233 
234 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
235 {
236 	WARN_ON(timer_ctx && timer_ctx->loaded);
237 	return timer_ctx &&
238 		((timer_get_ctl(timer_ctx) &
239 		  (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
240 }
241 
242 static bool vcpu_has_wfit_active(struct kvm_vcpu *vcpu)
243 {
244 	return (cpus_have_final_cap(ARM64_HAS_WFXT) &&
245 		vcpu_get_flag(vcpu, IN_WFIT));
246 }
247 
248 static u64 wfit_delay_ns(struct kvm_vcpu *vcpu)
249 {
250 	struct arch_timer_context *ctx = vcpu_vtimer(vcpu);
251 	u64 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
252 
253 	return kvm_counter_compute_delta(ctx, val);
254 }
255 
256 /*
257  * Returns the earliest expiration time in ns among guest timers.
258  * Note that it will return 0 if none of timers can fire.
259  */
260 static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
261 {
262 	u64 min_delta = ULLONG_MAX;
263 	int i;
264 
265 	for (i = 0; i < NR_KVM_TIMERS; i++) {
266 		struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
267 
268 		WARN(ctx->loaded, "timer %d loaded\n", i);
269 		if (kvm_timer_irq_can_fire(ctx))
270 			min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
271 	}
272 
273 	if (vcpu_has_wfit_active(vcpu))
274 		min_delta = min(min_delta, wfit_delay_ns(vcpu));
275 
276 	/* If none of timers can fire, then return 0 */
277 	if (min_delta == ULLONG_MAX)
278 		return 0;
279 
280 	return min_delta;
281 }
282 
283 static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
284 {
285 	struct arch_timer_cpu *timer;
286 	struct kvm_vcpu *vcpu;
287 	u64 ns;
288 
289 	timer = container_of(hrt, struct arch_timer_cpu, bg_timer);
290 	vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
291 
292 	/*
293 	 * Check that the timer has really expired from the guest's
294 	 * PoV (NTP on the host may have forced it to expire
295 	 * early). If we should have slept longer, restart it.
296 	 */
297 	ns = kvm_timer_earliest_exp(vcpu);
298 	if (unlikely(ns)) {
299 		hrtimer_forward_now(hrt, ns_to_ktime(ns));
300 		return HRTIMER_RESTART;
301 	}
302 
303 	kvm_vcpu_wake_up(vcpu);
304 	return HRTIMER_NORESTART;
305 }
306 
307 static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
308 {
309 	struct arch_timer_context *ctx;
310 	struct kvm_vcpu *vcpu;
311 	u64 ns;
312 
313 	ctx = container_of(hrt, struct arch_timer_context, hrtimer);
314 	vcpu = ctx->vcpu;
315 
316 	trace_kvm_timer_hrtimer_expire(ctx);
317 
318 	/*
319 	 * Check that the timer has really expired from the guest's
320 	 * PoV (NTP on the host may have forced it to expire
321 	 * early). If not ready, schedule for a later time.
322 	 */
323 	ns = kvm_timer_compute_delta(ctx);
324 	if (unlikely(ns)) {
325 		hrtimer_forward_now(hrt, ns_to_ktime(ns));
326 		return HRTIMER_RESTART;
327 	}
328 
329 	kvm_timer_update_irq(vcpu, true, ctx);
330 	return HRTIMER_NORESTART;
331 }
332 
333 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
334 {
335 	enum kvm_arch_timers index;
336 	u64 cval, now;
337 
338 	if (!timer_ctx)
339 		return false;
340 
341 	index = arch_timer_ctx_index(timer_ctx);
342 
343 	if (timer_ctx->loaded) {
344 		u32 cnt_ctl = 0;
345 
346 		switch (index) {
347 		case TIMER_VTIMER:
348 			cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
349 			break;
350 		case TIMER_PTIMER:
351 			cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
352 			break;
353 		case NR_KVM_TIMERS:
354 			/* GCC is braindead */
355 			cnt_ctl = 0;
356 			break;
357 		}
358 
359 		return  (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) &&
360 		        (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) &&
361 		       !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
362 	}
363 
364 	if (!kvm_timer_irq_can_fire(timer_ctx))
365 		return false;
366 
367 	cval = timer_get_cval(timer_ctx);
368 	now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
369 
370 	return cval <= now;
371 }
372 
373 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
374 {
375 	return vcpu_has_wfit_active(vcpu) && wfit_delay_ns(vcpu) == 0;
376 }
377 
378 /*
379  * Reflect the timer output level into the kvm_run structure
380  */
381 void kvm_timer_update_run(struct kvm_vcpu *vcpu)
382 {
383 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
384 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
385 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
386 
387 	/* Populate the device bitmap with the timer states */
388 	regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
389 				    KVM_ARM_DEV_EL1_PTIMER);
390 	if (kvm_timer_should_fire(vtimer))
391 		regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
392 	if (kvm_timer_should_fire(ptimer))
393 		regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
394 }
395 
396 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
397 				 struct arch_timer_context *timer_ctx)
398 {
399 	int ret;
400 
401 	timer_ctx->irq.level = new_level;
402 	trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
403 				   timer_ctx->irq.level);
404 
405 	if (!userspace_irqchip(vcpu->kvm)) {
406 		ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
407 					  timer_ctx->irq.irq,
408 					  timer_ctx->irq.level,
409 					  timer_ctx);
410 		WARN_ON(ret);
411 	}
412 }
413 
414 /* Only called for a fully emulated timer */
415 static void timer_emulate(struct arch_timer_context *ctx)
416 {
417 	bool should_fire = kvm_timer_should_fire(ctx);
418 
419 	trace_kvm_timer_emulate(ctx, should_fire);
420 
421 	if (should_fire != ctx->irq.level) {
422 		kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
423 		return;
424 	}
425 
426 	/*
427 	 * If the timer can fire now, we don't need to have a soft timer
428 	 * scheduled for the future.  If the timer cannot fire at all,
429 	 * then we also don't need a soft timer.
430 	 */
431 	if (should_fire || !kvm_timer_irq_can_fire(ctx))
432 		return;
433 
434 	soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
435 }
436 
437 static void set_cntvoff(u64 cntvoff)
438 {
439 	kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff);
440 }
441 
442 static void timer_save_state(struct arch_timer_context *ctx)
443 {
444 	struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
445 	enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
446 	unsigned long flags;
447 
448 	if (!timer->enabled)
449 		return;
450 
451 	local_irq_save(flags);
452 
453 	if (!ctx->loaded)
454 		goto out;
455 
456 	switch (index) {
457 	case TIMER_VTIMER:
458 		timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL));
459 		timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL));
460 
461 		/* Disable the timer */
462 		write_sysreg_el0(0, SYS_CNTV_CTL);
463 		isb();
464 
465 		/*
466 		 * The kernel may decide to run userspace after
467 		 * calling vcpu_put, so we reset cntvoff to 0 to
468 		 * ensure a consistent read between user accesses to
469 		 * the virtual counter and kernel access to the
470 		 * physical counter of non-VHE case.
471 		 *
472 		 * For VHE, the virtual counter uses a fixed virtual
473 		 * offset of zero, so no need to zero CNTVOFF_EL2
474 		 * register, but this is actually useful when switching
475 		 * between EL1/vEL2 with NV.
476 		 *
477 		 * Do it unconditionally, as this is either unavoidable
478 		 * or dirt cheap.
479 		 */
480 		set_cntvoff(0);
481 		break;
482 	case TIMER_PTIMER:
483 		timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
484 		timer_set_cval(ctx, read_sysreg_el0(SYS_CNTP_CVAL));
485 
486 		/* Disable the timer */
487 		write_sysreg_el0(0, SYS_CNTP_CTL);
488 		isb();
489 
490 		break;
491 	case NR_KVM_TIMERS:
492 		BUG();
493 	}
494 
495 	trace_kvm_timer_save_state(ctx);
496 
497 	ctx->loaded = false;
498 out:
499 	local_irq_restore(flags);
500 }
501 
502 /*
503  * Schedule the background timer before calling kvm_vcpu_halt, so that this
504  * thread is removed from its waitqueue and made runnable when there's a timer
505  * interrupt to handle.
506  */
507 static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
508 {
509 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
510 	struct timer_map map;
511 
512 	get_timer_map(vcpu, &map);
513 
514 	/*
515 	 * If no timers are capable of raising interrupts (disabled or
516 	 * masked), then there's no more work for us to do.
517 	 */
518 	if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
519 	    !kvm_timer_irq_can_fire(map.direct_ptimer) &&
520 	    !kvm_timer_irq_can_fire(map.emul_ptimer) &&
521 	    !vcpu_has_wfit_active(vcpu))
522 		return;
523 
524 	/*
525 	 * At least one guest time will expire. Schedule a background timer.
526 	 * Set the earliest expiration time among the guest timers.
527 	 */
528 	soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
529 }
530 
531 static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
532 {
533 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
534 
535 	soft_timer_cancel(&timer->bg_timer);
536 }
537 
538 static void timer_restore_state(struct arch_timer_context *ctx)
539 {
540 	struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
541 	enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
542 	unsigned long flags;
543 
544 	if (!timer->enabled)
545 		return;
546 
547 	local_irq_save(flags);
548 
549 	if (ctx->loaded)
550 		goto out;
551 
552 	switch (index) {
553 	case TIMER_VTIMER:
554 		set_cntvoff(timer_get_offset(ctx));
555 		write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL);
556 		isb();
557 		write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL);
558 		break;
559 	case TIMER_PTIMER:
560 		write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL);
561 		isb();
562 		write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
563 		break;
564 	case NR_KVM_TIMERS:
565 		BUG();
566 	}
567 
568 	trace_kvm_timer_restore_state(ctx);
569 
570 	ctx->loaded = true;
571 out:
572 	local_irq_restore(flags);
573 }
574 
575 static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active)
576 {
577 	int r;
578 	r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active);
579 	WARN_ON(r);
580 }
581 
582 static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
583 {
584 	struct kvm_vcpu *vcpu = ctx->vcpu;
585 	bool phys_active = false;
586 
587 	/*
588 	 * Update the timer output so that it is likely to match the
589 	 * state we're about to restore. If the timer expires between
590 	 * this point and the register restoration, we'll take the
591 	 * interrupt anyway.
592 	 */
593 	kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
594 
595 	if (irqchip_in_kernel(vcpu->kvm))
596 		phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq);
597 
598 	phys_active |= ctx->irq.level;
599 
600 	set_timer_irq_phys_active(ctx, phys_active);
601 }
602 
603 static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
604 {
605 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
606 
607 	/*
608 	 * Update the timer output so that it is likely to match the
609 	 * state we're about to restore. If the timer expires between
610 	 * this point and the register restoration, we'll take the
611 	 * interrupt anyway.
612 	 */
613 	kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
614 
615 	/*
616 	 * When using a userspace irqchip with the architected timers and a
617 	 * host interrupt controller that doesn't support an active state, we
618 	 * must still prevent continuously exiting from the guest, and
619 	 * therefore mask the physical interrupt by disabling it on the host
620 	 * interrupt controller when the virtual level is high, such that the
621 	 * guest can make forward progress.  Once we detect the output level
622 	 * being de-asserted, we unmask the interrupt again so that we exit
623 	 * from the guest when the timer fires.
624 	 */
625 	if (vtimer->irq.level)
626 		disable_percpu_irq(host_vtimer_irq);
627 	else
628 		enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
629 }
630 
631 void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
632 {
633 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
634 	struct timer_map map;
635 
636 	if (unlikely(!timer->enabled))
637 		return;
638 
639 	get_timer_map(vcpu, &map);
640 
641 	if (static_branch_likely(&has_gic_active_state)) {
642 		kvm_timer_vcpu_load_gic(map.direct_vtimer);
643 		if (map.direct_ptimer)
644 			kvm_timer_vcpu_load_gic(map.direct_ptimer);
645 	} else {
646 		kvm_timer_vcpu_load_nogic(vcpu);
647 	}
648 
649 	kvm_timer_unblocking(vcpu);
650 
651 	timer_restore_state(map.direct_vtimer);
652 	if (map.direct_ptimer)
653 		timer_restore_state(map.direct_ptimer);
654 
655 	if (map.emul_ptimer)
656 		timer_emulate(map.emul_ptimer);
657 }
658 
659 bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
660 {
661 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
662 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
663 	struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
664 	bool vlevel, plevel;
665 
666 	if (likely(irqchip_in_kernel(vcpu->kvm)))
667 		return false;
668 
669 	vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
670 	plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
671 
672 	return kvm_timer_should_fire(vtimer) != vlevel ||
673 	       kvm_timer_should_fire(ptimer) != plevel;
674 }
675 
676 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
677 {
678 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
679 	struct timer_map map;
680 
681 	if (unlikely(!timer->enabled))
682 		return;
683 
684 	get_timer_map(vcpu, &map);
685 
686 	timer_save_state(map.direct_vtimer);
687 	if (map.direct_ptimer)
688 		timer_save_state(map.direct_ptimer);
689 
690 	/*
691 	 * Cancel soft timer emulation, because the only case where we
692 	 * need it after a vcpu_put is in the context of a sleeping VCPU, and
693 	 * in that case we already factor in the deadline for the physical
694 	 * timer when scheduling the bg_timer.
695 	 *
696 	 * In any case, we re-schedule the hrtimer for the physical timer when
697 	 * coming back to the VCPU thread in kvm_timer_vcpu_load().
698 	 */
699 	if (map.emul_ptimer)
700 		soft_timer_cancel(&map.emul_ptimer->hrtimer);
701 
702 	if (kvm_vcpu_is_blocking(vcpu))
703 		kvm_timer_blocking(vcpu);
704 }
705 
706 /*
707  * With a userspace irqchip we have to check if the guest de-asserted the
708  * timer and if so, unmask the timer irq signal on the host interrupt
709  * controller to ensure that we see future timer signals.
710  */
711 static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
712 {
713 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
714 
715 	if (!kvm_timer_should_fire(vtimer)) {
716 		kvm_timer_update_irq(vcpu, false, vtimer);
717 		if (static_branch_likely(&has_gic_active_state))
718 			set_timer_irq_phys_active(vtimer, false);
719 		else
720 			enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
721 	}
722 }
723 
724 void kvm_timer_sync_user(struct kvm_vcpu *vcpu)
725 {
726 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
727 
728 	if (unlikely(!timer->enabled))
729 		return;
730 
731 	if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
732 		unmask_vtimer_irq_user(vcpu);
733 }
734 
735 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
736 {
737 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
738 	struct timer_map map;
739 
740 	get_timer_map(vcpu, &map);
741 
742 	/*
743 	 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
744 	 * and to 0 for ARMv7.  We provide an implementation that always
745 	 * resets the timer to be disabled and unmasked and is compliant with
746 	 * the ARMv7 architecture.
747 	 */
748 	timer_set_ctl(vcpu_vtimer(vcpu), 0);
749 	timer_set_ctl(vcpu_ptimer(vcpu), 0);
750 
751 	if (timer->enabled) {
752 		kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
753 		kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu));
754 
755 		if (irqchip_in_kernel(vcpu->kvm)) {
756 			kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq);
757 			if (map.direct_ptimer)
758 				kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq);
759 		}
760 	}
761 
762 	if (map.emul_ptimer)
763 		soft_timer_cancel(&map.emul_ptimer->hrtimer);
764 
765 	return 0;
766 }
767 
768 /* Make the updates of cntvoff for all vtimer contexts atomic */
769 static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
770 {
771 	unsigned long i;
772 	struct kvm *kvm = vcpu->kvm;
773 	struct kvm_vcpu *tmp;
774 
775 	mutex_lock(&kvm->lock);
776 	kvm_for_each_vcpu(i, tmp, kvm)
777 		timer_set_offset(vcpu_vtimer(tmp), cntvoff);
778 
779 	/*
780 	 * When called from the vcpu create path, the CPU being created is not
781 	 * included in the loop above, so we just set it here as well.
782 	 */
783 	timer_set_offset(vcpu_vtimer(vcpu), cntvoff);
784 	mutex_unlock(&kvm->lock);
785 }
786 
787 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
788 {
789 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
790 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
791 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
792 
793 	vtimer->vcpu = vcpu;
794 	ptimer->vcpu = vcpu;
795 
796 	/* Synchronize cntvoff across all vtimers of a VM. */
797 	update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
798 	timer_set_offset(ptimer, 0);
799 
800 	hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
801 	timer->bg_timer.function = kvm_bg_timer_expire;
802 
803 	hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
804 	hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
805 	vtimer->hrtimer.function = kvm_hrtimer_expire;
806 	ptimer->hrtimer.function = kvm_hrtimer_expire;
807 
808 	vtimer->irq.irq = default_vtimer_irq.irq;
809 	ptimer->irq.irq = default_ptimer_irq.irq;
810 
811 	vtimer->host_timer_irq = host_vtimer_irq;
812 	ptimer->host_timer_irq = host_ptimer_irq;
813 
814 	vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
815 	ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
816 }
817 
818 void kvm_timer_cpu_up(void)
819 {
820 	enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
821 	if (host_ptimer_irq)
822 		enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags);
823 }
824 
825 void kvm_timer_cpu_down(void)
826 {
827 	disable_percpu_irq(host_vtimer_irq);
828 	if (host_ptimer_irq)
829 		disable_percpu_irq(host_ptimer_irq);
830 }
831 
832 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
833 {
834 	struct arch_timer_context *timer;
835 
836 	switch (regid) {
837 	case KVM_REG_ARM_TIMER_CTL:
838 		timer = vcpu_vtimer(vcpu);
839 		kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
840 		break;
841 	case KVM_REG_ARM_TIMER_CNT:
842 		timer = vcpu_vtimer(vcpu);
843 		update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
844 		break;
845 	case KVM_REG_ARM_TIMER_CVAL:
846 		timer = vcpu_vtimer(vcpu);
847 		kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
848 		break;
849 	case KVM_REG_ARM_PTIMER_CTL:
850 		timer = vcpu_ptimer(vcpu);
851 		kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
852 		break;
853 	case KVM_REG_ARM_PTIMER_CVAL:
854 		timer = vcpu_ptimer(vcpu);
855 		kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
856 		break;
857 
858 	default:
859 		return -1;
860 	}
861 
862 	return 0;
863 }
864 
865 static u64 read_timer_ctl(struct arch_timer_context *timer)
866 {
867 	/*
868 	 * Set ISTATUS bit if it's expired.
869 	 * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
870 	 * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
871 	 * regardless of ENABLE bit for our implementation convenience.
872 	 */
873 	u32 ctl = timer_get_ctl(timer);
874 
875 	if (!kvm_timer_compute_delta(timer))
876 		ctl |= ARCH_TIMER_CTRL_IT_STAT;
877 
878 	return ctl;
879 }
880 
881 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
882 {
883 	switch (regid) {
884 	case KVM_REG_ARM_TIMER_CTL:
885 		return kvm_arm_timer_read(vcpu,
886 					  vcpu_vtimer(vcpu), TIMER_REG_CTL);
887 	case KVM_REG_ARM_TIMER_CNT:
888 		return kvm_arm_timer_read(vcpu,
889 					  vcpu_vtimer(vcpu), TIMER_REG_CNT);
890 	case KVM_REG_ARM_TIMER_CVAL:
891 		return kvm_arm_timer_read(vcpu,
892 					  vcpu_vtimer(vcpu), TIMER_REG_CVAL);
893 	case KVM_REG_ARM_PTIMER_CTL:
894 		return kvm_arm_timer_read(vcpu,
895 					  vcpu_ptimer(vcpu), TIMER_REG_CTL);
896 	case KVM_REG_ARM_PTIMER_CNT:
897 		return kvm_arm_timer_read(vcpu,
898 					  vcpu_ptimer(vcpu), TIMER_REG_CNT);
899 	case KVM_REG_ARM_PTIMER_CVAL:
900 		return kvm_arm_timer_read(vcpu,
901 					  vcpu_ptimer(vcpu), TIMER_REG_CVAL);
902 	}
903 	return (u64)-1;
904 }
905 
906 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
907 			      struct arch_timer_context *timer,
908 			      enum kvm_arch_timer_regs treg)
909 {
910 	u64 val;
911 
912 	switch (treg) {
913 	case TIMER_REG_TVAL:
914 		val = timer_get_cval(timer) - kvm_phys_timer_read() + timer_get_offset(timer);
915 		val = lower_32_bits(val);
916 		break;
917 
918 	case TIMER_REG_CTL:
919 		val = read_timer_ctl(timer);
920 		break;
921 
922 	case TIMER_REG_CVAL:
923 		val = timer_get_cval(timer);
924 		break;
925 
926 	case TIMER_REG_CNT:
927 		val = kvm_phys_timer_read() - timer_get_offset(timer);
928 		break;
929 
930 	default:
931 		BUG();
932 	}
933 
934 	return val;
935 }
936 
937 u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
938 			      enum kvm_arch_timers tmr,
939 			      enum kvm_arch_timer_regs treg)
940 {
941 	struct arch_timer_context *timer;
942 	struct timer_map map;
943 	u64 val;
944 
945 	get_timer_map(vcpu, &map);
946 	timer = vcpu_get_timer(vcpu, tmr);
947 
948 	if (timer == map.emul_ptimer)
949 		return kvm_arm_timer_read(vcpu, timer, treg);
950 
951 	preempt_disable();
952 	timer_save_state(timer);
953 
954 	val = kvm_arm_timer_read(vcpu, timer, treg);
955 
956 	timer_restore_state(timer);
957 	preempt_enable();
958 
959 	return val;
960 }
961 
962 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
963 				struct arch_timer_context *timer,
964 				enum kvm_arch_timer_regs treg,
965 				u64 val)
966 {
967 	switch (treg) {
968 	case TIMER_REG_TVAL:
969 		timer_set_cval(timer, kvm_phys_timer_read() - timer_get_offset(timer) + (s32)val);
970 		break;
971 
972 	case TIMER_REG_CTL:
973 		timer_set_ctl(timer, val & ~ARCH_TIMER_CTRL_IT_STAT);
974 		break;
975 
976 	case TIMER_REG_CVAL:
977 		timer_set_cval(timer, val);
978 		break;
979 
980 	default:
981 		BUG();
982 	}
983 }
984 
985 void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
986 				enum kvm_arch_timers tmr,
987 				enum kvm_arch_timer_regs treg,
988 				u64 val)
989 {
990 	struct arch_timer_context *timer;
991 	struct timer_map map;
992 
993 	get_timer_map(vcpu, &map);
994 	timer = vcpu_get_timer(vcpu, tmr);
995 	if (timer == map.emul_ptimer) {
996 		soft_timer_cancel(&timer->hrtimer);
997 		kvm_arm_timer_write(vcpu, timer, treg, val);
998 		timer_emulate(timer);
999 	} else {
1000 		preempt_disable();
1001 		timer_save_state(timer);
1002 		kvm_arm_timer_write(vcpu, timer, treg, val);
1003 		timer_restore_state(timer);
1004 		preempt_enable();
1005 	}
1006 }
1007 
1008 static int timer_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
1009 {
1010 	if (vcpu)
1011 		irqd_set_forwarded_to_vcpu(d);
1012 	else
1013 		irqd_clr_forwarded_to_vcpu(d);
1014 
1015 	return 0;
1016 }
1017 
1018 static int timer_irq_set_irqchip_state(struct irq_data *d,
1019 				       enum irqchip_irq_state which, bool val)
1020 {
1021 	if (which != IRQCHIP_STATE_ACTIVE || !irqd_is_forwarded_to_vcpu(d))
1022 		return irq_chip_set_parent_state(d, which, val);
1023 
1024 	if (val)
1025 		irq_chip_mask_parent(d);
1026 	else
1027 		irq_chip_unmask_parent(d);
1028 
1029 	return 0;
1030 }
1031 
1032 static void timer_irq_eoi(struct irq_data *d)
1033 {
1034 	if (!irqd_is_forwarded_to_vcpu(d))
1035 		irq_chip_eoi_parent(d);
1036 }
1037 
1038 static void timer_irq_ack(struct irq_data *d)
1039 {
1040 	d = d->parent_data;
1041 	if (d->chip->irq_ack)
1042 		d->chip->irq_ack(d);
1043 }
1044 
1045 static struct irq_chip timer_chip = {
1046 	.name			= "KVM",
1047 	.irq_ack		= timer_irq_ack,
1048 	.irq_mask		= irq_chip_mask_parent,
1049 	.irq_unmask		= irq_chip_unmask_parent,
1050 	.irq_eoi		= timer_irq_eoi,
1051 	.irq_set_type		= irq_chip_set_type_parent,
1052 	.irq_set_vcpu_affinity	= timer_irq_set_vcpu_affinity,
1053 	.irq_set_irqchip_state	= timer_irq_set_irqchip_state,
1054 };
1055 
1056 static int timer_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1057 				  unsigned int nr_irqs, void *arg)
1058 {
1059 	irq_hw_number_t hwirq = (uintptr_t)arg;
1060 
1061 	return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
1062 					     &timer_chip, NULL);
1063 }
1064 
1065 static void timer_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1066 				  unsigned int nr_irqs)
1067 {
1068 }
1069 
1070 static const struct irq_domain_ops timer_domain_ops = {
1071 	.alloc	= timer_irq_domain_alloc,
1072 	.free	= timer_irq_domain_free,
1073 };
1074 
1075 static struct irq_ops arch_timer_irq_ops = {
1076 	.get_input_level = kvm_arch_timer_get_input_level,
1077 };
1078 
1079 static void kvm_irq_fixup_flags(unsigned int virq, u32 *flags)
1080 {
1081 	*flags = irq_get_trigger_type(virq);
1082 	if (*flags != IRQF_TRIGGER_HIGH && *flags != IRQF_TRIGGER_LOW) {
1083 		kvm_err("Invalid trigger for timer IRQ%d, assuming level low\n",
1084 			virq);
1085 		*flags = IRQF_TRIGGER_LOW;
1086 	}
1087 }
1088 
1089 static int kvm_irq_init(struct arch_timer_kvm_info *info)
1090 {
1091 	struct irq_domain *domain = NULL;
1092 
1093 	if (info->virtual_irq <= 0) {
1094 		kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
1095 			info->virtual_irq);
1096 		return -ENODEV;
1097 	}
1098 
1099 	host_vtimer_irq = info->virtual_irq;
1100 	kvm_irq_fixup_flags(host_vtimer_irq, &host_vtimer_irq_flags);
1101 
1102 	if (kvm_vgic_global_state.no_hw_deactivation) {
1103 		struct fwnode_handle *fwnode;
1104 		struct irq_data *data;
1105 
1106 		fwnode = irq_domain_alloc_named_fwnode("kvm-timer");
1107 		if (!fwnode)
1108 			return -ENOMEM;
1109 
1110 		/* Assume both vtimer and ptimer in the same parent */
1111 		data = irq_get_irq_data(host_vtimer_irq);
1112 		domain = irq_domain_create_hierarchy(data->domain, 0,
1113 						     NR_KVM_TIMERS, fwnode,
1114 						     &timer_domain_ops, NULL);
1115 		if (!domain) {
1116 			irq_domain_free_fwnode(fwnode);
1117 			return -ENOMEM;
1118 		}
1119 
1120 		arch_timer_irq_ops.flags |= VGIC_IRQ_SW_RESAMPLE;
1121 		WARN_ON(irq_domain_push_irq(domain, host_vtimer_irq,
1122 					    (void *)TIMER_VTIMER));
1123 	}
1124 
1125 	if (info->physical_irq > 0) {
1126 		host_ptimer_irq = info->physical_irq;
1127 		kvm_irq_fixup_flags(host_ptimer_irq, &host_ptimer_irq_flags);
1128 
1129 		if (domain)
1130 			WARN_ON(irq_domain_push_irq(domain, host_ptimer_irq,
1131 						    (void *)TIMER_PTIMER));
1132 	}
1133 
1134 	return 0;
1135 }
1136 
1137 int __init kvm_timer_hyp_init(bool has_gic)
1138 {
1139 	struct arch_timer_kvm_info *info;
1140 	int err;
1141 
1142 	info = arch_timer_get_kvm_info();
1143 	timecounter = &info->timecounter;
1144 
1145 	if (!timecounter->cc) {
1146 		kvm_err("kvm_arch_timer: uninitialized timecounter\n");
1147 		return -ENODEV;
1148 	}
1149 
1150 	err = kvm_irq_init(info);
1151 	if (err)
1152 		return err;
1153 
1154 	/* First, do the virtual EL1 timer irq */
1155 
1156 	err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
1157 				 "kvm guest vtimer", kvm_get_running_vcpus());
1158 	if (err) {
1159 		kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
1160 			host_vtimer_irq, err);
1161 		return err;
1162 	}
1163 
1164 	if (has_gic) {
1165 		err = irq_set_vcpu_affinity(host_vtimer_irq,
1166 					    kvm_get_running_vcpus());
1167 		if (err) {
1168 			kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1169 			goto out_free_irq;
1170 		}
1171 
1172 		static_branch_enable(&has_gic_active_state);
1173 	}
1174 
1175 	kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
1176 
1177 	/* Now let's do the physical EL1 timer irq */
1178 
1179 	if (info->physical_irq > 0) {
1180 		err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
1181 					 "kvm guest ptimer", kvm_get_running_vcpus());
1182 		if (err) {
1183 			kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
1184 				host_ptimer_irq, err);
1185 			return err;
1186 		}
1187 
1188 		if (has_gic) {
1189 			err = irq_set_vcpu_affinity(host_ptimer_irq,
1190 						    kvm_get_running_vcpus());
1191 			if (err) {
1192 				kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1193 				goto out_free_irq;
1194 			}
1195 		}
1196 
1197 		kvm_debug("physical timer IRQ%d\n", host_ptimer_irq);
1198 	} else if (has_vhe()) {
1199 		kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
1200 			info->physical_irq);
1201 		err = -ENODEV;
1202 		goto out_free_irq;
1203 	}
1204 
1205 	return 0;
1206 out_free_irq:
1207 	free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
1208 	return err;
1209 }
1210 
1211 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
1212 {
1213 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1214 
1215 	soft_timer_cancel(&timer->bg_timer);
1216 }
1217 
1218 static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
1219 {
1220 	int vtimer_irq, ptimer_irq, ret;
1221 	unsigned long i;
1222 
1223 	vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
1224 	ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
1225 	if (ret)
1226 		return false;
1227 
1228 	ptimer_irq = vcpu_ptimer(vcpu)->irq.irq;
1229 	ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu));
1230 	if (ret)
1231 		return false;
1232 
1233 	kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
1234 		if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq ||
1235 		    vcpu_ptimer(vcpu)->irq.irq != ptimer_irq)
1236 			return false;
1237 	}
1238 
1239 	return true;
1240 }
1241 
1242 bool kvm_arch_timer_get_input_level(int vintid)
1243 {
1244 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
1245 	struct arch_timer_context *timer;
1246 
1247 	if (WARN(!vcpu, "No vcpu context!\n"))
1248 		return false;
1249 
1250 	if (vintid == vcpu_vtimer(vcpu)->irq.irq)
1251 		timer = vcpu_vtimer(vcpu);
1252 	else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
1253 		timer = vcpu_ptimer(vcpu);
1254 	else
1255 		BUG();
1256 
1257 	return kvm_timer_should_fire(timer);
1258 }
1259 
1260 int kvm_timer_enable(struct kvm_vcpu *vcpu)
1261 {
1262 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1263 	struct timer_map map;
1264 	int ret;
1265 
1266 	if (timer->enabled)
1267 		return 0;
1268 
1269 	/* Without a VGIC we do not map virtual IRQs to physical IRQs */
1270 	if (!irqchip_in_kernel(vcpu->kvm))
1271 		goto no_vgic;
1272 
1273 	/*
1274 	 * At this stage, we have the guarantee that the vgic is both
1275 	 * available and initialized.
1276 	 */
1277 	if (!timer_irqs_are_valid(vcpu)) {
1278 		kvm_debug("incorrectly configured timer irqs\n");
1279 		return -EINVAL;
1280 	}
1281 
1282 	get_timer_map(vcpu, &map);
1283 
1284 	ret = kvm_vgic_map_phys_irq(vcpu,
1285 				    map.direct_vtimer->host_timer_irq,
1286 				    map.direct_vtimer->irq.irq,
1287 				    &arch_timer_irq_ops);
1288 	if (ret)
1289 		return ret;
1290 
1291 	if (map.direct_ptimer) {
1292 		ret = kvm_vgic_map_phys_irq(vcpu,
1293 					    map.direct_ptimer->host_timer_irq,
1294 					    map.direct_ptimer->irq.irq,
1295 					    &arch_timer_irq_ops);
1296 	}
1297 
1298 	if (ret)
1299 		return ret;
1300 
1301 no_vgic:
1302 	timer->enabled = 1;
1303 	return 0;
1304 }
1305 
1306 /*
1307  * On VHE system, we only need to configure the EL2 timer trap register once,
1308  * not for every world switch.
1309  * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
1310  * and this makes those bits have no effect for the host kernel execution.
1311  */
1312 void kvm_timer_init_vhe(void)
1313 {
1314 	/* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
1315 	u32 cnthctl_shift = 10;
1316 	u64 val;
1317 
1318 	/*
1319 	 * VHE systems allow the guest direct access to the EL1 physical
1320 	 * timer/counter.
1321 	 */
1322 	val = read_sysreg(cnthctl_el2);
1323 	val |= (CNTHCTL_EL1PCEN << cnthctl_shift);
1324 	val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
1325 	write_sysreg(val, cnthctl_el2);
1326 }
1327 
1328 static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
1329 {
1330 	struct kvm_vcpu *vcpu;
1331 	unsigned long i;
1332 
1333 	kvm_for_each_vcpu(i, vcpu, kvm) {
1334 		vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
1335 		vcpu_ptimer(vcpu)->irq.irq = ptimer_irq;
1336 	}
1337 }
1338 
1339 int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1340 {
1341 	int __user *uaddr = (int __user *)(long)attr->addr;
1342 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
1343 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
1344 	int irq;
1345 
1346 	if (!irqchip_in_kernel(vcpu->kvm))
1347 		return -EINVAL;
1348 
1349 	if (get_user(irq, uaddr))
1350 		return -EFAULT;
1351 
1352 	if (!(irq_is_ppi(irq)))
1353 		return -EINVAL;
1354 
1355 	if (vcpu->arch.timer_cpu.enabled)
1356 		return -EBUSY;
1357 
1358 	switch (attr->attr) {
1359 	case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1360 		set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
1361 		break;
1362 	case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1363 		set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
1364 		break;
1365 	default:
1366 		return -ENXIO;
1367 	}
1368 
1369 	return 0;
1370 }
1371 
1372 int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1373 {
1374 	int __user *uaddr = (int __user *)(long)attr->addr;
1375 	struct arch_timer_context *timer;
1376 	int irq;
1377 
1378 	switch (attr->attr) {
1379 	case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1380 		timer = vcpu_vtimer(vcpu);
1381 		break;
1382 	case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1383 		timer = vcpu_ptimer(vcpu);
1384 		break;
1385 	default:
1386 		return -ENXIO;
1387 	}
1388 
1389 	irq = timer->irq.irq;
1390 	return put_user(irq, uaddr);
1391 }
1392 
1393 int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1394 {
1395 	switch (attr->attr) {
1396 	case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1397 	case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1398 		return 0;
1399 	}
1400 
1401 	return -ENXIO;
1402 }
1403