19ed24f4bSMarc Zyngier // SPDX-License-Identifier: GPL-2.0-only
29ed24f4bSMarc Zyngier /*
39ed24f4bSMarc Zyngier * Copyright (C) 2012 ARM Ltd.
49ed24f4bSMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com>
59ed24f4bSMarc Zyngier */
69ed24f4bSMarc Zyngier
79ed24f4bSMarc Zyngier #include <linux/cpu.h>
89ed24f4bSMarc Zyngier #include <linux/kvm.h>
99ed24f4bSMarc Zyngier #include <linux/kvm_host.h>
109ed24f4bSMarc Zyngier #include <linux/interrupt.h>
119ed24f4bSMarc Zyngier #include <linux/irq.h>
125f592296SMarc Zyngier #include <linux/irqdomain.h>
139ed24f4bSMarc Zyngier #include <linux/uaccess.h>
149ed24f4bSMarc Zyngier
159ed24f4bSMarc Zyngier #include <clocksource/arm_arch_timer.h>
169ed24f4bSMarc Zyngier #include <asm/arch_timer.h>
179ed24f4bSMarc Zyngier #include <asm/kvm_emulate.h>
189ed24f4bSMarc Zyngier #include <asm/kvm_hyp.h>
1981dc9504SMarc Zyngier #include <asm/kvm_nested.h>
209ed24f4bSMarc Zyngier
219ed24f4bSMarc Zyngier #include <kvm/arm_vgic.h>
229ed24f4bSMarc Zyngier #include <kvm/arm_arch_timer.h>
239ed24f4bSMarc Zyngier
249ed24f4bSMarc Zyngier #include "trace.h"
259ed24f4bSMarc Zyngier
269ed24f4bSMarc Zyngier static struct timecounter *timecounter;
279ed24f4bSMarc Zyngier static unsigned int host_vtimer_irq;
289ed24f4bSMarc Zyngier static unsigned int host_ptimer_irq;
299ed24f4bSMarc Zyngier static u32 host_vtimer_irq_flags;
309ed24f4bSMarc Zyngier static u32 host_ptimer_irq_flags;
319ed24f4bSMarc Zyngier
329ed24f4bSMarc Zyngier static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
339ed24f4bSMarc Zyngier
345591805dSMarc Zyngier static const u8 default_ppi[] = {
355591805dSMarc Zyngier [TIMER_PTIMER] = 30,
365591805dSMarc Zyngier [TIMER_VTIMER] = 27,
3781dc9504SMarc Zyngier [TIMER_HPTIMER] = 26,
3881dc9504SMarc Zyngier [TIMER_HVTIMER] = 28,
399ed24f4bSMarc Zyngier };
409ed24f4bSMarc Zyngier
419ed24f4bSMarc Zyngier static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
429ed24f4bSMarc Zyngier static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
439ed24f4bSMarc Zyngier struct arch_timer_context *timer_ctx);
449ed24f4bSMarc Zyngier static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
459ed24f4bSMarc Zyngier static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
469ed24f4bSMarc Zyngier struct arch_timer_context *timer,
479ed24f4bSMarc Zyngier enum kvm_arch_timer_regs treg,
489ed24f4bSMarc Zyngier u64 val);
499ed24f4bSMarc Zyngier static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
509ed24f4bSMarc Zyngier struct arch_timer_context *timer,
519ed24f4bSMarc Zyngier enum kvm_arch_timer_regs treg);
5281dc9504SMarc Zyngier static bool kvm_arch_timer_get_input_level(int vintid);
5381dc9504SMarc Zyngier
5481dc9504SMarc Zyngier static struct irq_ops arch_timer_irq_ops = {
5581dc9504SMarc Zyngier .get_input_level = kvm_arch_timer_get_input_level,
5681dc9504SMarc Zyngier };
579ed24f4bSMarc Zyngier
nr_timers(struct kvm_vcpu * vcpu)58476fcd4bSMarc Zyngier static int nr_timers(struct kvm_vcpu *vcpu)
59476fcd4bSMarc Zyngier {
6081dc9504SMarc Zyngier if (!vcpu_has_nv(vcpu))
6181dc9504SMarc Zyngier return NR_KVM_EL0_TIMERS;
6281dc9504SMarc Zyngier
63476fcd4bSMarc Zyngier return NR_KVM_TIMERS;
64476fcd4bSMarc Zyngier }
659ed24f4bSMarc Zyngier
timer_get_ctl(struct arch_timer_context * ctxt)6641ce82f6SMarc Zyngier u32 timer_get_ctl(struct arch_timer_context *ctxt)
6741ce82f6SMarc Zyngier {
6841ce82f6SMarc Zyngier struct kvm_vcpu *vcpu = ctxt->vcpu;
6941ce82f6SMarc Zyngier
7041ce82f6SMarc Zyngier switch(arch_timer_ctx_index(ctxt)) {
7141ce82f6SMarc Zyngier case TIMER_VTIMER:
7241ce82f6SMarc Zyngier return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0);
7341ce82f6SMarc Zyngier case TIMER_PTIMER:
7441ce82f6SMarc Zyngier return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0);
7581dc9504SMarc Zyngier case TIMER_HVTIMER:
7681dc9504SMarc Zyngier return __vcpu_sys_reg(vcpu, CNTHV_CTL_EL2);
7781dc9504SMarc Zyngier case TIMER_HPTIMER:
7881dc9504SMarc Zyngier return __vcpu_sys_reg(vcpu, CNTHP_CTL_EL2);
7941ce82f6SMarc Zyngier default:
8041ce82f6SMarc Zyngier WARN_ON(1);
8141ce82f6SMarc Zyngier return 0;
8241ce82f6SMarc Zyngier }
8341ce82f6SMarc Zyngier }
8441ce82f6SMarc Zyngier
timer_get_cval(struct arch_timer_context * ctxt)8541ce82f6SMarc Zyngier u64 timer_get_cval(struct arch_timer_context *ctxt)
8641ce82f6SMarc Zyngier {
8741ce82f6SMarc Zyngier struct kvm_vcpu *vcpu = ctxt->vcpu;
8841ce82f6SMarc Zyngier
8941ce82f6SMarc Zyngier switch(arch_timer_ctx_index(ctxt)) {
9041ce82f6SMarc Zyngier case TIMER_VTIMER:
9141ce82f6SMarc Zyngier return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
9241ce82f6SMarc Zyngier case TIMER_PTIMER:
9341ce82f6SMarc Zyngier return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
9481dc9504SMarc Zyngier case TIMER_HVTIMER:
9581dc9504SMarc Zyngier return __vcpu_sys_reg(vcpu, CNTHV_CVAL_EL2);
9681dc9504SMarc Zyngier case TIMER_HPTIMER:
9781dc9504SMarc Zyngier return __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2);
9841ce82f6SMarc Zyngier default:
9941ce82f6SMarc Zyngier WARN_ON(1);
10041ce82f6SMarc Zyngier return 0;
10141ce82f6SMarc Zyngier }
10241ce82f6SMarc Zyngier }
10341ce82f6SMarc Zyngier
timer_get_offset(struct arch_timer_context * ctxt)10441ce82f6SMarc Zyngier static u64 timer_get_offset(struct arch_timer_context *ctxt)
10541ce82f6SMarc Zyngier {
1061e0eec09SMarc Zyngier u64 offset = 0;
10741ce82f6SMarc Zyngier
1081e0eec09SMarc Zyngier if (!ctxt)
10941ce82f6SMarc Zyngier return 0;
1101e0eec09SMarc Zyngier
1111e0eec09SMarc Zyngier if (ctxt->offset.vm_offset)
1121e0eec09SMarc Zyngier offset += *ctxt->offset.vm_offset;
1131e0eec09SMarc Zyngier if (ctxt->offset.vcpu_offset)
1141e0eec09SMarc Zyngier offset += *ctxt->offset.vcpu_offset;
1151e0eec09SMarc Zyngier
1161e0eec09SMarc Zyngier return offset;
11741ce82f6SMarc Zyngier }
11841ce82f6SMarc Zyngier
timer_set_ctl(struct arch_timer_context * ctxt,u32 ctl)11941ce82f6SMarc Zyngier static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
12041ce82f6SMarc Zyngier {
12141ce82f6SMarc Zyngier struct kvm_vcpu *vcpu = ctxt->vcpu;
12241ce82f6SMarc Zyngier
12341ce82f6SMarc Zyngier switch(arch_timer_ctx_index(ctxt)) {
12441ce82f6SMarc Zyngier case TIMER_VTIMER:
12541ce82f6SMarc Zyngier __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl;
12641ce82f6SMarc Zyngier break;
12741ce82f6SMarc Zyngier case TIMER_PTIMER:
12841ce82f6SMarc Zyngier __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
12941ce82f6SMarc Zyngier break;
13081dc9504SMarc Zyngier case TIMER_HVTIMER:
13181dc9504SMarc Zyngier __vcpu_sys_reg(vcpu, CNTHV_CTL_EL2) = ctl;
13281dc9504SMarc Zyngier break;
13381dc9504SMarc Zyngier case TIMER_HPTIMER:
13481dc9504SMarc Zyngier __vcpu_sys_reg(vcpu, CNTHP_CTL_EL2) = ctl;
13581dc9504SMarc Zyngier break;
13641ce82f6SMarc Zyngier default:
13741ce82f6SMarc Zyngier WARN_ON(1);
13841ce82f6SMarc Zyngier }
13941ce82f6SMarc Zyngier }
14041ce82f6SMarc Zyngier
timer_set_cval(struct arch_timer_context * ctxt,u64 cval)14141ce82f6SMarc Zyngier static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
14241ce82f6SMarc Zyngier {
14341ce82f6SMarc Zyngier struct kvm_vcpu *vcpu = ctxt->vcpu;
14441ce82f6SMarc Zyngier
14541ce82f6SMarc Zyngier switch(arch_timer_ctx_index(ctxt)) {
14641ce82f6SMarc Zyngier case TIMER_VTIMER:
14741ce82f6SMarc Zyngier __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval;
14841ce82f6SMarc Zyngier break;
14941ce82f6SMarc Zyngier case TIMER_PTIMER:
15041ce82f6SMarc Zyngier __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
15141ce82f6SMarc Zyngier break;
15281dc9504SMarc Zyngier case TIMER_HVTIMER:
15381dc9504SMarc Zyngier __vcpu_sys_reg(vcpu, CNTHV_CVAL_EL2) = cval;
15481dc9504SMarc Zyngier break;
15581dc9504SMarc Zyngier case TIMER_HPTIMER:
15681dc9504SMarc Zyngier __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = cval;
15781dc9504SMarc Zyngier break;
15841ce82f6SMarc Zyngier default:
15941ce82f6SMarc Zyngier WARN_ON(1);
16041ce82f6SMarc Zyngier }
16141ce82f6SMarc Zyngier }
16241ce82f6SMarc Zyngier
timer_set_offset(struct arch_timer_context * ctxt,u64 offset)16341ce82f6SMarc Zyngier static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
16441ce82f6SMarc Zyngier {
16547053904SMarc Zyngier if (!ctxt->offset.vm_offset) {
16641ce82f6SMarc Zyngier WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
16747053904SMarc Zyngier return;
16841ce82f6SMarc Zyngier }
16947053904SMarc Zyngier
17047053904SMarc Zyngier WRITE_ONCE(*ctxt->offset.vm_offset, offset);
17141ce82f6SMarc Zyngier }
17241ce82f6SMarc Zyngier
kvm_phys_timer_read(void)1739ed24f4bSMarc Zyngier u64 kvm_phys_timer_read(void)
1749ed24f4bSMarc Zyngier {
1759ed24f4bSMarc Zyngier return timecounter->cc->read(timecounter->cc);
1769ed24f4bSMarc Zyngier }
1779ed24f4bSMarc Zyngier
get_timer_map(struct kvm_vcpu * vcpu,struct timer_map * map)17894046732SMarc Zyngier void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
1799ed24f4bSMarc Zyngier {
18081dc9504SMarc Zyngier if (vcpu_has_nv(vcpu)) {
18181dc9504SMarc Zyngier if (is_hyp_ctxt(vcpu)) {
18281dc9504SMarc Zyngier map->direct_vtimer = vcpu_hvtimer(vcpu);
18381dc9504SMarc Zyngier map->direct_ptimer = vcpu_hptimer(vcpu);
18481dc9504SMarc Zyngier map->emul_vtimer = vcpu_vtimer(vcpu);
18581dc9504SMarc Zyngier map->emul_ptimer = vcpu_ptimer(vcpu);
18681dc9504SMarc Zyngier } else {
1879ed24f4bSMarc Zyngier map->direct_vtimer = vcpu_vtimer(vcpu);
1889ed24f4bSMarc Zyngier map->direct_ptimer = vcpu_ptimer(vcpu);
18981dc9504SMarc Zyngier map->emul_vtimer = vcpu_hvtimer(vcpu);
19081dc9504SMarc Zyngier map->emul_ptimer = vcpu_hptimer(vcpu);
19181dc9504SMarc Zyngier }
19281dc9504SMarc Zyngier } else if (has_vhe()) {
19381dc9504SMarc Zyngier map->direct_vtimer = vcpu_vtimer(vcpu);
19481dc9504SMarc Zyngier map->direct_ptimer = vcpu_ptimer(vcpu);
19581dc9504SMarc Zyngier map->emul_vtimer = NULL;
1969ed24f4bSMarc Zyngier map->emul_ptimer = NULL;
1979ed24f4bSMarc Zyngier } else {
1989ed24f4bSMarc Zyngier map->direct_vtimer = vcpu_vtimer(vcpu);
1999ed24f4bSMarc Zyngier map->direct_ptimer = NULL;
20081dc9504SMarc Zyngier map->emul_vtimer = NULL;
2019ed24f4bSMarc Zyngier map->emul_ptimer = vcpu_ptimer(vcpu);
2029ed24f4bSMarc Zyngier }
2039ed24f4bSMarc Zyngier
2049ed24f4bSMarc Zyngier trace_kvm_get_timer_map(vcpu->vcpu_id, map);
2059ed24f4bSMarc Zyngier }
2069ed24f4bSMarc Zyngier
userspace_irqchip(struct kvm * kvm)2079ed24f4bSMarc Zyngier static inline bool userspace_irqchip(struct kvm *kvm)
2089ed24f4bSMarc Zyngier {
209dd2f9861SRaghavendra Rao Ananta return unlikely(!irqchip_in_kernel(kvm));
2109ed24f4bSMarc Zyngier }
2119ed24f4bSMarc Zyngier
soft_timer_start(struct hrtimer * hrt,u64 ns)2129ed24f4bSMarc Zyngier static void soft_timer_start(struct hrtimer *hrt, u64 ns)
2139ed24f4bSMarc Zyngier {
2149ed24f4bSMarc Zyngier hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
2159ed24f4bSMarc Zyngier HRTIMER_MODE_ABS_HARD);
2169ed24f4bSMarc Zyngier }
2179ed24f4bSMarc Zyngier
soft_timer_cancel(struct hrtimer * hrt)2189ed24f4bSMarc Zyngier static void soft_timer_cancel(struct hrtimer *hrt)
2199ed24f4bSMarc Zyngier {
2209ed24f4bSMarc Zyngier hrtimer_cancel(hrt);
2219ed24f4bSMarc Zyngier }
2229ed24f4bSMarc Zyngier
kvm_arch_timer_handler(int irq,void * dev_id)2239ed24f4bSMarc Zyngier static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
2249ed24f4bSMarc Zyngier {
2259ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
2269ed24f4bSMarc Zyngier struct arch_timer_context *ctx;
2279ed24f4bSMarc Zyngier struct timer_map map;
2289ed24f4bSMarc Zyngier
2299ed24f4bSMarc Zyngier /*
2309ed24f4bSMarc Zyngier * We may see a timer interrupt after vcpu_put() has been called which
2319ed24f4bSMarc Zyngier * sets the CPU's vcpu pointer to NULL, because even though the timer
2329ed24f4bSMarc Zyngier * has been disabled in timer_save_state(), the hardware interrupt
2339ed24f4bSMarc Zyngier * signal may not have been retired from the interrupt controller yet.
2349ed24f4bSMarc Zyngier */
2359ed24f4bSMarc Zyngier if (!vcpu)
2369ed24f4bSMarc Zyngier return IRQ_HANDLED;
2379ed24f4bSMarc Zyngier
2389ed24f4bSMarc Zyngier get_timer_map(vcpu, &map);
2399ed24f4bSMarc Zyngier
2409ed24f4bSMarc Zyngier if (irq == host_vtimer_irq)
2419ed24f4bSMarc Zyngier ctx = map.direct_vtimer;
2429ed24f4bSMarc Zyngier else
2439ed24f4bSMarc Zyngier ctx = map.direct_ptimer;
2449ed24f4bSMarc Zyngier
2459ed24f4bSMarc Zyngier if (kvm_timer_should_fire(ctx))
2469ed24f4bSMarc Zyngier kvm_timer_update_irq(vcpu, true, ctx);
2479ed24f4bSMarc Zyngier
2489ed24f4bSMarc Zyngier if (userspace_irqchip(vcpu->kvm) &&
2499ed24f4bSMarc Zyngier !static_branch_unlikely(&has_gic_active_state))
2509ed24f4bSMarc Zyngier disable_percpu_irq(host_vtimer_irq);
2519ed24f4bSMarc Zyngier
2529ed24f4bSMarc Zyngier return IRQ_HANDLED;
2539ed24f4bSMarc Zyngier }
2549ed24f4bSMarc Zyngier
kvm_counter_compute_delta(struct arch_timer_context * timer_ctx,u64 val)255daf85a5fSMarc Zyngier static u64 kvm_counter_compute_delta(struct arch_timer_context *timer_ctx,
256daf85a5fSMarc Zyngier u64 val)
2579ed24f4bSMarc Zyngier {
258daf85a5fSMarc Zyngier u64 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
2599ed24f4bSMarc Zyngier
260daf85a5fSMarc Zyngier if (now < val) {
2619ed24f4bSMarc Zyngier u64 ns;
2629ed24f4bSMarc Zyngier
2639ed24f4bSMarc Zyngier ns = cyclecounter_cyc2ns(timecounter->cc,
264daf85a5fSMarc Zyngier val - now,
2659ed24f4bSMarc Zyngier timecounter->mask,
2660d0ae656SMarc Zyngier &timer_ctx->ns_frac);
2679ed24f4bSMarc Zyngier return ns;
2689ed24f4bSMarc Zyngier }
2699ed24f4bSMarc Zyngier
2709ed24f4bSMarc Zyngier return 0;
2719ed24f4bSMarc Zyngier }
2729ed24f4bSMarc Zyngier
kvm_timer_compute_delta(struct arch_timer_context * timer_ctx)273daf85a5fSMarc Zyngier static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
274daf85a5fSMarc Zyngier {
275daf85a5fSMarc Zyngier return kvm_counter_compute_delta(timer_ctx, timer_get_cval(timer_ctx));
276daf85a5fSMarc Zyngier }
277daf85a5fSMarc Zyngier
kvm_timer_irq_can_fire(struct arch_timer_context * timer_ctx)2789ed24f4bSMarc Zyngier static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
2799ed24f4bSMarc Zyngier {
2809ed24f4bSMarc Zyngier WARN_ON(timer_ctx && timer_ctx->loaded);
2819ed24f4bSMarc Zyngier return timer_ctx &&
28241ce82f6SMarc Zyngier ((timer_get_ctl(timer_ctx) &
28341ce82f6SMarc Zyngier (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
2849ed24f4bSMarc Zyngier }
2859ed24f4bSMarc Zyngier
vcpu_has_wfit_active(struct kvm_vcpu * vcpu)28689f5074cSMarc Zyngier static bool vcpu_has_wfit_active(struct kvm_vcpu *vcpu)
28789f5074cSMarc Zyngier {
28889f5074cSMarc Zyngier return (cpus_have_final_cap(ARM64_HAS_WFXT) &&
289eebc538dSMarc Zyngier vcpu_get_flag(vcpu, IN_WFIT));
29089f5074cSMarc Zyngier }
29189f5074cSMarc Zyngier
wfit_delay_ns(struct kvm_vcpu * vcpu)29289f5074cSMarc Zyngier static u64 wfit_delay_ns(struct kvm_vcpu *vcpu)
29389f5074cSMarc Zyngier {
29489f5074cSMarc Zyngier u64 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
29581dc9504SMarc Zyngier struct arch_timer_context *ctx;
29681dc9504SMarc Zyngier
29781dc9504SMarc Zyngier ctx = (vcpu_has_nv(vcpu) && is_hyp_ctxt(vcpu)) ? vcpu_hvtimer(vcpu)
29881dc9504SMarc Zyngier : vcpu_vtimer(vcpu);
29989f5074cSMarc Zyngier
30089f5074cSMarc Zyngier return kvm_counter_compute_delta(ctx, val);
30189f5074cSMarc Zyngier }
30289f5074cSMarc Zyngier
3039ed24f4bSMarc Zyngier /*
3049ed24f4bSMarc Zyngier * Returns the earliest expiration time in ns among guest timers.
3059ed24f4bSMarc Zyngier * Note that it will return 0 if none of timers can fire.
3069ed24f4bSMarc Zyngier */
kvm_timer_earliest_exp(struct kvm_vcpu * vcpu)3079ed24f4bSMarc Zyngier static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
3089ed24f4bSMarc Zyngier {
3099ed24f4bSMarc Zyngier u64 min_delta = ULLONG_MAX;
3109ed24f4bSMarc Zyngier int i;
3119ed24f4bSMarc Zyngier
312476fcd4bSMarc Zyngier for (i = 0; i < nr_timers(vcpu); i++) {
3139ed24f4bSMarc Zyngier struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
3149ed24f4bSMarc Zyngier
3159ed24f4bSMarc Zyngier WARN(ctx->loaded, "timer %d loaded\n", i);
3169ed24f4bSMarc Zyngier if (kvm_timer_irq_can_fire(ctx))
3179ed24f4bSMarc Zyngier min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
3189ed24f4bSMarc Zyngier }
3199ed24f4bSMarc Zyngier
32089f5074cSMarc Zyngier if (vcpu_has_wfit_active(vcpu))
32189f5074cSMarc Zyngier min_delta = min(min_delta, wfit_delay_ns(vcpu));
32289f5074cSMarc Zyngier
3239ed24f4bSMarc Zyngier /* If none of timers can fire, then return 0 */
3249ed24f4bSMarc Zyngier if (min_delta == ULLONG_MAX)
3259ed24f4bSMarc Zyngier return 0;
3269ed24f4bSMarc Zyngier
3279ed24f4bSMarc Zyngier return min_delta;
3289ed24f4bSMarc Zyngier }
3299ed24f4bSMarc Zyngier
kvm_bg_timer_expire(struct hrtimer * hrt)3309ed24f4bSMarc Zyngier static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
3319ed24f4bSMarc Zyngier {
3329ed24f4bSMarc Zyngier struct arch_timer_cpu *timer;
3339ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu;
3349ed24f4bSMarc Zyngier u64 ns;
3359ed24f4bSMarc Zyngier
3369ed24f4bSMarc Zyngier timer = container_of(hrt, struct arch_timer_cpu, bg_timer);
3379ed24f4bSMarc Zyngier vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
3389ed24f4bSMarc Zyngier
3399ed24f4bSMarc Zyngier /*
3409ed24f4bSMarc Zyngier * Check that the timer has really expired from the guest's
3419ed24f4bSMarc Zyngier * PoV (NTP on the host may have forced it to expire
3429ed24f4bSMarc Zyngier * early). If we should have slept longer, restart it.
3439ed24f4bSMarc Zyngier */
3449ed24f4bSMarc Zyngier ns = kvm_timer_earliest_exp(vcpu);
3459ed24f4bSMarc Zyngier if (unlikely(ns)) {
3469ed24f4bSMarc Zyngier hrtimer_forward_now(hrt, ns_to_ktime(ns));
3479ed24f4bSMarc Zyngier return HRTIMER_RESTART;
3489ed24f4bSMarc Zyngier }
3499ed24f4bSMarc Zyngier
3509ed24f4bSMarc Zyngier kvm_vcpu_wake_up(vcpu);
3519ed24f4bSMarc Zyngier return HRTIMER_NORESTART;
3529ed24f4bSMarc Zyngier }
3539ed24f4bSMarc Zyngier
kvm_hrtimer_expire(struct hrtimer * hrt)3549ed24f4bSMarc Zyngier static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
3559ed24f4bSMarc Zyngier {
3569ed24f4bSMarc Zyngier struct arch_timer_context *ctx;
3579ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu;
3589ed24f4bSMarc Zyngier u64 ns;
3599ed24f4bSMarc Zyngier
3609ed24f4bSMarc Zyngier ctx = container_of(hrt, struct arch_timer_context, hrtimer);
3619ed24f4bSMarc Zyngier vcpu = ctx->vcpu;
3629ed24f4bSMarc Zyngier
3639ed24f4bSMarc Zyngier trace_kvm_timer_hrtimer_expire(ctx);
3649ed24f4bSMarc Zyngier
3659ed24f4bSMarc Zyngier /*
3669ed24f4bSMarc Zyngier * Check that the timer has really expired from the guest's
3679ed24f4bSMarc Zyngier * PoV (NTP on the host may have forced it to expire
3689ed24f4bSMarc Zyngier * early). If not ready, schedule for a later time.
3699ed24f4bSMarc Zyngier */
3709ed24f4bSMarc Zyngier ns = kvm_timer_compute_delta(ctx);
3719ed24f4bSMarc Zyngier if (unlikely(ns)) {
3729ed24f4bSMarc Zyngier hrtimer_forward_now(hrt, ns_to_ktime(ns));
3739ed24f4bSMarc Zyngier return HRTIMER_RESTART;
3749ed24f4bSMarc Zyngier }
3759ed24f4bSMarc Zyngier
3769ed24f4bSMarc Zyngier kvm_timer_update_irq(vcpu, true, ctx);
3779ed24f4bSMarc Zyngier return HRTIMER_NORESTART;
3789ed24f4bSMarc Zyngier }
3799ed24f4bSMarc Zyngier
kvm_timer_should_fire(struct arch_timer_context * timer_ctx)3809ed24f4bSMarc Zyngier static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
3819ed24f4bSMarc Zyngier {
3829ed24f4bSMarc Zyngier enum kvm_arch_timers index;
3839ed24f4bSMarc Zyngier u64 cval, now;
3849ed24f4bSMarc Zyngier
3859ed24f4bSMarc Zyngier if (!timer_ctx)
3869ed24f4bSMarc Zyngier return false;
3879ed24f4bSMarc Zyngier
3889ed24f4bSMarc Zyngier index = arch_timer_ctx_index(timer_ctx);
3899ed24f4bSMarc Zyngier
3909ed24f4bSMarc Zyngier if (timer_ctx->loaded) {
3919ed24f4bSMarc Zyngier u32 cnt_ctl = 0;
3929ed24f4bSMarc Zyngier
3939ed24f4bSMarc Zyngier switch (index) {
3949ed24f4bSMarc Zyngier case TIMER_VTIMER:
39581dc9504SMarc Zyngier case TIMER_HVTIMER:
3969ed24f4bSMarc Zyngier cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
3979ed24f4bSMarc Zyngier break;
3989ed24f4bSMarc Zyngier case TIMER_PTIMER:
39981dc9504SMarc Zyngier case TIMER_HPTIMER:
4009ed24f4bSMarc Zyngier cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
4019ed24f4bSMarc Zyngier break;
4029ed24f4bSMarc Zyngier case NR_KVM_TIMERS:
4039ed24f4bSMarc Zyngier /* GCC is braindead */
4049ed24f4bSMarc Zyngier cnt_ctl = 0;
4059ed24f4bSMarc Zyngier break;
4069ed24f4bSMarc Zyngier }
4079ed24f4bSMarc Zyngier
4089ed24f4bSMarc Zyngier return (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) &&
4099ed24f4bSMarc Zyngier (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) &&
4109ed24f4bSMarc Zyngier !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
4119ed24f4bSMarc Zyngier }
4129ed24f4bSMarc Zyngier
4139ed24f4bSMarc Zyngier if (!kvm_timer_irq_can_fire(timer_ctx))
4149ed24f4bSMarc Zyngier return false;
4159ed24f4bSMarc Zyngier
41641ce82f6SMarc Zyngier cval = timer_get_cval(timer_ctx);
41741ce82f6SMarc Zyngier now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
4189ed24f4bSMarc Zyngier
4199ed24f4bSMarc Zyngier return cval <= now;
4209ed24f4bSMarc Zyngier }
4219ed24f4bSMarc Zyngier
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)422b57de4ffSMarc Zyngier int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
4239ed24f4bSMarc Zyngier {
42489f5074cSMarc Zyngier return vcpu_has_wfit_active(vcpu) && wfit_delay_ns(vcpu) == 0;
4259ed24f4bSMarc Zyngier }
4269ed24f4bSMarc Zyngier
4279ed24f4bSMarc Zyngier /*
4289ed24f4bSMarc Zyngier * Reflect the timer output level into the kvm_run structure
4299ed24f4bSMarc Zyngier */
kvm_timer_update_run(struct kvm_vcpu * vcpu)4309ed24f4bSMarc Zyngier void kvm_timer_update_run(struct kvm_vcpu *vcpu)
4319ed24f4bSMarc Zyngier {
4329ed24f4bSMarc Zyngier struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
4339ed24f4bSMarc Zyngier struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
4349ed24f4bSMarc Zyngier struct kvm_sync_regs *regs = &vcpu->run->s.regs;
4359ed24f4bSMarc Zyngier
4369ed24f4bSMarc Zyngier /* Populate the device bitmap with the timer states */
4379ed24f4bSMarc Zyngier regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
4389ed24f4bSMarc Zyngier KVM_ARM_DEV_EL1_PTIMER);
4399ed24f4bSMarc Zyngier if (kvm_timer_should_fire(vtimer))
4409ed24f4bSMarc Zyngier regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
4419ed24f4bSMarc Zyngier if (kvm_timer_should_fire(ptimer))
4429ed24f4bSMarc Zyngier regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
4439ed24f4bSMarc Zyngier }
4449ed24f4bSMarc Zyngier
kvm_timer_update_irq(struct kvm_vcpu * vcpu,bool new_level,struct arch_timer_context * timer_ctx)4459ed24f4bSMarc Zyngier static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
4469ed24f4bSMarc Zyngier struct arch_timer_context *timer_ctx)
4479ed24f4bSMarc Zyngier {
4489ed24f4bSMarc Zyngier int ret;
4499ed24f4bSMarc Zyngier
4509ed24f4bSMarc Zyngier timer_ctx->irq.level = new_level;
45133c54946SMarc Zyngier trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_irq(timer_ctx),
4529ed24f4bSMarc Zyngier timer_ctx->irq.level);
4539ed24f4bSMarc Zyngier
4549ed24f4bSMarc Zyngier if (!userspace_irqchip(vcpu->kvm)) {
4559ed24f4bSMarc Zyngier ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
45633c54946SMarc Zyngier timer_irq(timer_ctx),
4579ed24f4bSMarc Zyngier timer_ctx->irq.level,
4589ed24f4bSMarc Zyngier timer_ctx);
4599ed24f4bSMarc Zyngier WARN_ON(ret);
4609ed24f4bSMarc Zyngier }
4619ed24f4bSMarc Zyngier }
4629ed24f4bSMarc Zyngier
4639ed24f4bSMarc Zyngier /* Only called for a fully emulated timer */
timer_emulate(struct arch_timer_context * ctx)4649ed24f4bSMarc Zyngier static void timer_emulate(struct arch_timer_context *ctx)
4659ed24f4bSMarc Zyngier {
4669ed24f4bSMarc Zyngier bool should_fire = kvm_timer_should_fire(ctx);
4679ed24f4bSMarc Zyngier
4689ed24f4bSMarc Zyngier trace_kvm_timer_emulate(ctx, should_fire);
4699ed24f4bSMarc Zyngier
470*6f796f08SMarc Zyngier if (should_fire != ctx->irq.level)
4719ed24f4bSMarc Zyngier kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
4729ed24f4bSMarc Zyngier
4739ed24f4bSMarc Zyngier /*
4749ed24f4bSMarc Zyngier * If the timer can fire now, we don't need to have a soft timer
4759ed24f4bSMarc Zyngier * scheduled for the future. If the timer cannot fire at all,
4769ed24f4bSMarc Zyngier * then we also don't need a soft timer.
4779ed24f4bSMarc Zyngier */
4784d74ecfaSMarc Zyngier if (should_fire || !kvm_timer_irq_can_fire(ctx))
4799ed24f4bSMarc Zyngier return;
4809ed24f4bSMarc Zyngier
4819ed24f4bSMarc Zyngier soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
4829ed24f4bSMarc Zyngier }
4839ed24f4bSMarc Zyngier
set_cntvoff(u64 cntvoff)484fc6ee952SMarc Zyngier static void set_cntvoff(u64 cntvoff)
485fc6ee952SMarc Zyngier {
486fc6ee952SMarc Zyngier kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff);
487fc6ee952SMarc Zyngier }
488fc6ee952SMarc Zyngier
set_cntpoff(u64 cntpoff)4892b4825a8SMarc Zyngier static void set_cntpoff(u64 cntpoff)
4902b4825a8SMarc Zyngier {
4912b4825a8SMarc Zyngier if (has_cntpoff())
4922b4825a8SMarc Zyngier write_sysreg_s(cntpoff, SYS_CNTPOFF_EL2);
4932b4825a8SMarc Zyngier }
4942b4825a8SMarc Zyngier
timer_save_state(struct arch_timer_context * ctx)4959ed24f4bSMarc Zyngier static void timer_save_state(struct arch_timer_context *ctx)
4969ed24f4bSMarc Zyngier {
4979ed24f4bSMarc Zyngier struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
4989ed24f4bSMarc Zyngier enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
4999ed24f4bSMarc Zyngier unsigned long flags;
5009ed24f4bSMarc Zyngier
5019ed24f4bSMarc Zyngier if (!timer->enabled)
5029ed24f4bSMarc Zyngier return;
5039ed24f4bSMarc Zyngier
5049ed24f4bSMarc Zyngier local_irq_save(flags);
5059ed24f4bSMarc Zyngier
5069ed24f4bSMarc Zyngier if (!ctx->loaded)
5079ed24f4bSMarc Zyngier goto out;
5089ed24f4bSMarc Zyngier
5099ed24f4bSMarc Zyngier switch (index) {
510c605ee24SMarc Zyngier u64 cval;
511c605ee24SMarc Zyngier
5129ed24f4bSMarc Zyngier case TIMER_VTIMER:
51381dc9504SMarc Zyngier case TIMER_HVTIMER:
51441ce82f6SMarc Zyngier timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL));
51541ce82f6SMarc Zyngier timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL));
5169ed24f4bSMarc Zyngier
5179ed24f4bSMarc Zyngier /* Disable the timer */
5189ed24f4bSMarc Zyngier write_sysreg_el0(0, SYS_CNTV_CTL);
5199ed24f4bSMarc Zyngier isb();
5209ed24f4bSMarc Zyngier
521fc6ee952SMarc Zyngier /*
522fc6ee952SMarc Zyngier * The kernel may decide to run userspace after
523fc6ee952SMarc Zyngier * calling vcpu_put, so we reset cntvoff to 0 to
524fc6ee952SMarc Zyngier * ensure a consistent read between user accesses to
525fc6ee952SMarc Zyngier * the virtual counter and kernel access to the
526fc6ee952SMarc Zyngier * physical counter of non-VHE case.
527fc6ee952SMarc Zyngier *
528fc6ee952SMarc Zyngier * For VHE, the virtual counter uses a fixed virtual
529fc6ee952SMarc Zyngier * offset of zero, so no need to zero CNTVOFF_EL2
530fc6ee952SMarc Zyngier * register, but this is actually useful when switching
531fc6ee952SMarc Zyngier * between EL1/vEL2 with NV.
532fc6ee952SMarc Zyngier *
533fc6ee952SMarc Zyngier * Do it unconditionally, as this is either unavoidable
534fc6ee952SMarc Zyngier * or dirt cheap.
535fc6ee952SMarc Zyngier */
536fc6ee952SMarc Zyngier set_cntvoff(0);
5379ed24f4bSMarc Zyngier break;
5389ed24f4bSMarc Zyngier case TIMER_PTIMER:
53981dc9504SMarc Zyngier case TIMER_HPTIMER:
54041ce82f6SMarc Zyngier timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
541c605ee24SMarc Zyngier cval = read_sysreg_el0(SYS_CNTP_CVAL);
542c605ee24SMarc Zyngier
543c605ee24SMarc Zyngier cval -= timer_get_offset(ctx);
544c605ee24SMarc Zyngier
545c605ee24SMarc Zyngier timer_set_cval(ctx, cval);
5469ed24f4bSMarc Zyngier
5479ed24f4bSMarc Zyngier /* Disable the timer */
5489ed24f4bSMarc Zyngier write_sysreg_el0(0, SYS_CNTP_CTL);
5499ed24f4bSMarc Zyngier isb();
5509ed24f4bSMarc Zyngier
5512b4825a8SMarc Zyngier set_cntpoff(0);
5529ed24f4bSMarc Zyngier break;
5539ed24f4bSMarc Zyngier case NR_KVM_TIMERS:
5549ed24f4bSMarc Zyngier BUG();
5559ed24f4bSMarc Zyngier }
5569ed24f4bSMarc Zyngier
5579ed24f4bSMarc Zyngier trace_kvm_timer_save_state(ctx);
5589ed24f4bSMarc Zyngier
5599ed24f4bSMarc Zyngier ctx->loaded = false;
5609ed24f4bSMarc Zyngier out:
5619ed24f4bSMarc Zyngier local_irq_restore(flags);
5629ed24f4bSMarc Zyngier }
5639ed24f4bSMarc Zyngier
5649ed24f4bSMarc Zyngier /*
56591b99ea7SSean Christopherson * Schedule the background timer before calling kvm_vcpu_halt, so that this
5669ed24f4bSMarc Zyngier * thread is removed from its waitqueue and made runnable when there's a timer
5679ed24f4bSMarc Zyngier * interrupt to handle.
5689ed24f4bSMarc Zyngier */
kvm_timer_blocking(struct kvm_vcpu * vcpu)5699ed24f4bSMarc Zyngier static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
5709ed24f4bSMarc Zyngier {
5719ed24f4bSMarc Zyngier struct arch_timer_cpu *timer = vcpu_timer(vcpu);
5729ed24f4bSMarc Zyngier struct timer_map map;
5739ed24f4bSMarc Zyngier
5749ed24f4bSMarc Zyngier get_timer_map(vcpu, &map);
5759ed24f4bSMarc Zyngier
5769ed24f4bSMarc Zyngier /*
5779ed24f4bSMarc Zyngier * If no timers are capable of raising interrupts (disabled or
5789ed24f4bSMarc Zyngier * masked), then there's no more work for us to do.
5799ed24f4bSMarc Zyngier */
5809ed24f4bSMarc Zyngier if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
5819ed24f4bSMarc Zyngier !kvm_timer_irq_can_fire(map.direct_ptimer) &&
58281dc9504SMarc Zyngier !kvm_timer_irq_can_fire(map.emul_vtimer) &&
58389f5074cSMarc Zyngier !kvm_timer_irq_can_fire(map.emul_ptimer) &&
58489f5074cSMarc Zyngier !vcpu_has_wfit_active(vcpu))
5859ed24f4bSMarc Zyngier return;
5869ed24f4bSMarc Zyngier
5879ed24f4bSMarc Zyngier /*
5889ed24f4bSMarc Zyngier * At least one guest time will expire. Schedule a background timer.
5899ed24f4bSMarc Zyngier * Set the earliest expiration time among the guest timers.
5909ed24f4bSMarc Zyngier */
5919ed24f4bSMarc Zyngier soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
5929ed24f4bSMarc Zyngier }
5939ed24f4bSMarc Zyngier
kvm_timer_unblocking(struct kvm_vcpu * vcpu)5949ed24f4bSMarc Zyngier static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
5959ed24f4bSMarc Zyngier {
5969ed24f4bSMarc Zyngier struct arch_timer_cpu *timer = vcpu_timer(vcpu);
5979ed24f4bSMarc Zyngier
5989ed24f4bSMarc Zyngier soft_timer_cancel(&timer->bg_timer);
5999ed24f4bSMarc Zyngier }
6009ed24f4bSMarc Zyngier
timer_restore_state(struct arch_timer_context * ctx)6019ed24f4bSMarc Zyngier static void timer_restore_state(struct arch_timer_context *ctx)
6029ed24f4bSMarc Zyngier {
6039ed24f4bSMarc Zyngier struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
6049ed24f4bSMarc Zyngier enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
6059ed24f4bSMarc Zyngier unsigned long flags;
6069ed24f4bSMarc Zyngier
6079ed24f4bSMarc Zyngier if (!timer->enabled)
6089ed24f4bSMarc Zyngier return;
6099ed24f4bSMarc Zyngier
6109ed24f4bSMarc Zyngier local_irq_save(flags);
6119ed24f4bSMarc Zyngier
6129ed24f4bSMarc Zyngier if (ctx->loaded)
6139ed24f4bSMarc Zyngier goto out;
6149ed24f4bSMarc Zyngier
6159ed24f4bSMarc Zyngier switch (index) {
616c605ee24SMarc Zyngier u64 cval, offset;
617c605ee24SMarc Zyngier
6189ed24f4bSMarc Zyngier case TIMER_VTIMER:
61981dc9504SMarc Zyngier case TIMER_HVTIMER:
620fc6ee952SMarc Zyngier set_cntvoff(timer_get_offset(ctx));
62141ce82f6SMarc Zyngier write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL);
6229ed24f4bSMarc Zyngier isb();
62341ce82f6SMarc Zyngier write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL);
6249ed24f4bSMarc Zyngier break;
6259ed24f4bSMarc Zyngier case TIMER_PTIMER:
62681dc9504SMarc Zyngier case TIMER_HPTIMER:
627c605ee24SMarc Zyngier cval = timer_get_cval(ctx);
628c605ee24SMarc Zyngier offset = timer_get_offset(ctx);
629c605ee24SMarc Zyngier set_cntpoff(offset);
630c605ee24SMarc Zyngier cval += offset;
631c605ee24SMarc Zyngier write_sysreg_el0(cval, SYS_CNTP_CVAL);
6329ed24f4bSMarc Zyngier isb();
63341ce82f6SMarc Zyngier write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
6349ed24f4bSMarc Zyngier break;
6359ed24f4bSMarc Zyngier case NR_KVM_TIMERS:
6369ed24f4bSMarc Zyngier BUG();
6379ed24f4bSMarc Zyngier }
6389ed24f4bSMarc Zyngier
6399ed24f4bSMarc Zyngier trace_kvm_timer_restore_state(ctx);
6409ed24f4bSMarc Zyngier
6419ed24f4bSMarc Zyngier ctx->loaded = true;
6429ed24f4bSMarc Zyngier out:
6439ed24f4bSMarc Zyngier local_irq_restore(flags);
6449ed24f4bSMarc Zyngier }
6459ed24f4bSMarc Zyngier
set_timer_irq_phys_active(struct arch_timer_context * ctx,bool active)6469ed24f4bSMarc Zyngier static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active)
6479ed24f4bSMarc Zyngier {
6489ed24f4bSMarc Zyngier int r;
6499ed24f4bSMarc Zyngier r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active);
6509ed24f4bSMarc Zyngier WARN_ON(r);
6519ed24f4bSMarc Zyngier }
6529ed24f4bSMarc Zyngier
kvm_timer_vcpu_load_gic(struct arch_timer_context * ctx)6539ed24f4bSMarc Zyngier static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
6549ed24f4bSMarc Zyngier {
6559ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu = ctx->vcpu;
6569ed24f4bSMarc Zyngier bool phys_active = false;
6579ed24f4bSMarc Zyngier
6589ed24f4bSMarc Zyngier /*
6599ed24f4bSMarc Zyngier * Update the timer output so that it is likely to match the
6609ed24f4bSMarc Zyngier * state we're about to restore. If the timer expires between
6619ed24f4bSMarc Zyngier * this point and the register restoration, we'll take the
6629ed24f4bSMarc Zyngier * interrupt anyway.
6639ed24f4bSMarc Zyngier */
6649ed24f4bSMarc Zyngier kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
6659ed24f4bSMarc Zyngier
6669ed24f4bSMarc Zyngier if (irqchip_in_kernel(vcpu->kvm))
66733c54946SMarc Zyngier phys_active = kvm_vgic_map_is_active(vcpu, timer_irq(ctx));
6689ed24f4bSMarc Zyngier
6699ed24f4bSMarc Zyngier phys_active |= ctx->irq.level;
6709ed24f4bSMarc Zyngier
6719ed24f4bSMarc Zyngier set_timer_irq_phys_active(ctx, phys_active);
6729ed24f4bSMarc Zyngier }
6739ed24f4bSMarc Zyngier
kvm_timer_vcpu_load_nogic(struct kvm_vcpu * vcpu)6749ed24f4bSMarc Zyngier static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
6759ed24f4bSMarc Zyngier {
6769ed24f4bSMarc Zyngier struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
6779ed24f4bSMarc Zyngier
6789ed24f4bSMarc Zyngier /*
6799ed24f4bSMarc Zyngier * Update the timer output so that it is likely to match the
6809ed24f4bSMarc Zyngier * state we're about to restore. If the timer expires between
6819ed24f4bSMarc Zyngier * this point and the register restoration, we'll take the
6829ed24f4bSMarc Zyngier * interrupt anyway.
6839ed24f4bSMarc Zyngier */
6849ed24f4bSMarc Zyngier kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
6859ed24f4bSMarc Zyngier
6869ed24f4bSMarc Zyngier /*
6879ed24f4bSMarc Zyngier * When using a userspace irqchip with the architected timers and a
6889ed24f4bSMarc Zyngier * host interrupt controller that doesn't support an active state, we
6899ed24f4bSMarc Zyngier * must still prevent continuously exiting from the guest, and
6909ed24f4bSMarc Zyngier * therefore mask the physical interrupt by disabling it on the host
6919ed24f4bSMarc Zyngier * interrupt controller when the virtual level is high, such that the
6929ed24f4bSMarc Zyngier * guest can make forward progress. Once we detect the output level
6939ed24f4bSMarc Zyngier * being de-asserted, we unmask the interrupt again so that we exit
6949ed24f4bSMarc Zyngier * from the guest when the timer fires.
6959ed24f4bSMarc Zyngier */
6969ed24f4bSMarc Zyngier if (vtimer->irq.level)
6979ed24f4bSMarc Zyngier disable_percpu_irq(host_vtimer_irq);
6989ed24f4bSMarc Zyngier else
6999ed24f4bSMarc Zyngier enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
7009ed24f4bSMarc Zyngier }
7019ed24f4bSMarc Zyngier
702c605ee24SMarc Zyngier /* If _pred is true, set bit in _set, otherwise set it in _clr */
703c605ee24SMarc Zyngier #define assign_clear_set_bit(_pred, _bit, _clr, _set) \
704c605ee24SMarc Zyngier do { \
705c605ee24SMarc Zyngier if (_pred) \
706c605ee24SMarc Zyngier (_set) |= (_bit); \
707c605ee24SMarc Zyngier else \
708c605ee24SMarc Zyngier (_clr) |= (_bit); \
709c605ee24SMarc Zyngier } while (0)
710c605ee24SMarc Zyngier
kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu * vcpu,struct timer_map * map)71181dc9504SMarc Zyngier static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu,
71281dc9504SMarc Zyngier struct timer_map *map)
71381dc9504SMarc Zyngier {
71481dc9504SMarc Zyngier int hw, ret;
71581dc9504SMarc Zyngier
71681dc9504SMarc Zyngier if (!irqchip_in_kernel(vcpu->kvm))
71781dc9504SMarc Zyngier return;
71881dc9504SMarc Zyngier
71981dc9504SMarc Zyngier /*
72081dc9504SMarc Zyngier * We only ever unmap the vtimer irq on a VHE system that runs nested
72181dc9504SMarc Zyngier * virtualization, in which case we have both a valid emul_vtimer,
72281dc9504SMarc Zyngier * emul_ptimer, direct_vtimer, and direct_ptimer.
72381dc9504SMarc Zyngier *
72481dc9504SMarc Zyngier * Since this is called from kvm_timer_vcpu_load(), a change between
72581dc9504SMarc Zyngier * vEL2 and vEL1/0 will have just happened, and the timer_map will
72681dc9504SMarc Zyngier * represent this, and therefore we switch the emul/direct mappings
72781dc9504SMarc Zyngier * below.
72881dc9504SMarc Zyngier */
72981dc9504SMarc Zyngier hw = kvm_vgic_get_map(vcpu, timer_irq(map->direct_vtimer));
73081dc9504SMarc Zyngier if (hw < 0) {
73181dc9504SMarc Zyngier kvm_vgic_unmap_phys_irq(vcpu, timer_irq(map->emul_vtimer));
73281dc9504SMarc Zyngier kvm_vgic_unmap_phys_irq(vcpu, timer_irq(map->emul_ptimer));
73381dc9504SMarc Zyngier
73481dc9504SMarc Zyngier ret = kvm_vgic_map_phys_irq(vcpu,
73581dc9504SMarc Zyngier map->direct_vtimer->host_timer_irq,
73681dc9504SMarc Zyngier timer_irq(map->direct_vtimer),
73781dc9504SMarc Zyngier &arch_timer_irq_ops);
73881dc9504SMarc Zyngier WARN_ON_ONCE(ret);
73981dc9504SMarc Zyngier ret = kvm_vgic_map_phys_irq(vcpu,
74081dc9504SMarc Zyngier map->direct_ptimer->host_timer_irq,
74181dc9504SMarc Zyngier timer_irq(map->direct_ptimer),
74281dc9504SMarc Zyngier &arch_timer_irq_ops);
74381dc9504SMarc Zyngier WARN_ON_ONCE(ret);
74481dc9504SMarc Zyngier
74581dc9504SMarc Zyngier /*
74681dc9504SMarc Zyngier * The virtual offset behaviour is "interresting", as it
74781dc9504SMarc Zyngier * always applies when HCR_EL2.E2H==0, but only when
74881dc9504SMarc Zyngier * accessed from EL1 when HCR_EL2.E2H==1. So make sure we
74981dc9504SMarc Zyngier * track E2H when putting the HV timer in "direct" mode.
75081dc9504SMarc Zyngier */
75181dc9504SMarc Zyngier if (map->direct_vtimer == vcpu_hvtimer(vcpu)) {
75281dc9504SMarc Zyngier struct arch_timer_offset *offs = &map->direct_vtimer->offset;
75381dc9504SMarc Zyngier
75481dc9504SMarc Zyngier if (vcpu_el2_e2h_is_set(vcpu))
75581dc9504SMarc Zyngier offs->vcpu_offset = NULL;
75681dc9504SMarc Zyngier else
75781dc9504SMarc Zyngier offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2);
75881dc9504SMarc Zyngier }
75981dc9504SMarc Zyngier }
76081dc9504SMarc Zyngier }
76181dc9504SMarc Zyngier
timer_set_traps(struct kvm_vcpu * vcpu,struct timer_map * map)762c605ee24SMarc Zyngier static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
763c605ee24SMarc Zyngier {
764c605ee24SMarc Zyngier bool tpt, tpc;
765c605ee24SMarc Zyngier u64 clr, set;
766c605ee24SMarc Zyngier
767c605ee24SMarc Zyngier /*
768c605ee24SMarc Zyngier * No trapping gets configured here with nVHE. See
769c605ee24SMarc Zyngier * __timer_enable_traps(), which is where the stuff happens.
770c605ee24SMarc Zyngier */
771c605ee24SMarc Zyngier if (!has_vhe())
772c605ee24SMarc Zyngier return;
773c605ee24SMarc Zyngier
774c605ee24SMarc Zyngier /*
775c605ee24SMarc Zyngier * Our default policy is not to trap anything. As we progress
776c605ee24SMarc Zyngier * within this function, reality kicks in and we start adding
777c605ee24SMarc Zyngier * traps based on emulation requirements.
778c605ee24SMarc Zyngier */
779c605ee24SMarc Zyngier tpt = tpc = false;
780c605ee24SMarc Zyngier
781c605ee24SMarc Zyngier /*
782c605ee24SMarc Zyngier * We have two possibility to deal with a physical offset:
783c605ee24SMarc Zyngier *
784c605ee24SMarc Zyngier * - Either we have CNTPOFF (yay!) or the offset is 0:
785c605ee24SMarc Zyngier * we let the guest freely access the HW
786c605ee24SMarc Zyngier *
787c605ee24SMarc Zyngier * - or neither of these condition apply:
788c605ee24SMarc Zyngier * we trap accesses to the HW, but still use it
789c605ee24SMarc Zyngier * after correcting the physical offset
790c605ee24SMarc Zyngier */
791c605ee24SMarc Zyngier if (!has_cntpoff() && timer_get_offset(map->direct_ptimer))
792c605ee24SMarc Zyngier tpt = tpc = true;
793c605ee24SMarc Zyngier
794c605ee24SMarc Zyngier /*
79581dc9504SMarc Zyngier * Apply the enable bits that the guest hypervisor has requested for
79681dc9504SMarc Zyngier * its own guest. We can only add traps that wouldn't have been set
79781dc9504SMarc Zyngier * above.
79881dc9504SMarc Zyngier */
79981dc9504SMarc Zyngier if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
80081dc9504SMarc Zyngier u64 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
80181dc9504SMarc Zyngier
80281dc9504SMarc Zyngier /* Use the VHE format for mental sanity */
80381dc9504SMarc Zyngier if (!vcpu_el2_e2h_is_set(vcpu))
80481dc9504SMarc Zyngier val = (val & (CNTHCTL_EL1PCEN | CNTHCTL_EL1PCTEN)) << 10;
80581dc9504SMarc Zyngier
80681dc9504SMarc Zyngier tpt |= !(val & (CNTHCTL_EL1PCEN << 10));
80781dc9504SMarc Zyngier tpc |= !(val & (CNTHCTL_EL1PCTEN << 10));
80881dc9504SMarc Zyngier }
80981dc9504SMarc Zyngier
81081dc9504SMarc Zyngier /*
811c605ee24SMarc Zyngier * Now that we have collected our requirements, compute the
812c605ee24SMarc Zyngier * trap and enable bits.
813c605ee24SMarc Zyngier */
814c605ee24SMarc Zyngier set = 0;
815c605ee24SMarc Zyngier clr = 0;
816c605ee24SMarc Zyngier
817c605ee24SMarc Zyngier assign_clear_set_bit(tpt, CNTHCTL_EL1PCEN << 10, set, clr);
818c605ee24SMarc Zyngier assign_clear_set_bit(tpc, CNTHCTL_EL1PCTEN << 10, set, clr);
819c605ee24SMarc Zyngier
820fe769e6cSMarc Zyngier /* This only happens on VHE, so use the CNTHCTL_EL2 accessor. */
821fe769e6cSMarc Zyngier sysreg_clear_set(cnthctl_el2, clr, set);
822c605ee24SMarc Zyngier }
823c605ee24SMarc Zyngier
kvm_timer_vcpu_load(struct kvm_vcpu * vcpu)8249ed24f4bSMarc Zyngier void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
8259ed24f4bSMarc Zyngier {
8269ed24f4bSMarc Zyngier struct arch_timer_cpu *timer = vcpu_timer(vcpu);
8279ed24f4bSMarc Zyngier struct timer_map map;
8289ed24f4bSMarc Zyngier
8299ed24f4bSMarc Zyngier if (unlikely(!timer->enabled))
8309ed24f4bSMarc Zyngier return;
8319ed24f4bSMarc Zyngier
8329ed24f4bSMarc Zyngier get_timer_map(vcpu, &map);
8339ed24f4bSMarc Zyngier
8349ed24f4bSMarc Zyngier if (static_branch_likely(&has_gic_active_state)) {
83581dc9504SMarc Zyngier if (vcpu_has_nv(vcpu))
83681dc9504SMarc Zyngier kvm_timer_vcpu_load_nested_switch(vcpu, &map);
83781dc9504SMarc Zyngier
8389ed24f4bSMarc Zyngier kvm_timer_vcpu_load_gic(map.direct_vtimer);
8399ed24f4bSMarc Zyngier if (map.direct_ptimer)
8409ed24f4bSMarc Zyngier kvm_timer_vcpu_load_gic(map.direct_ptimer);
8419ed24f4bSMarc Zyngier } else {
8429ed24f4bSMarc Zyngier kvm_timer_vcpu_load_nogic(vcpu);
8439ed24f4bSMarc Zyngier }
8449ed24f4bSMarc Zyngier
8459ed24f4bSMarc Zyngier kvm_timer_unblocking(vcpu);
8469ed24f4bSMarc Zyngier
8479ed24f4bSMarc Zyngier timer_restore_state(map.direct_vtimer);
8489ed24f4bSMarc Zyngier if (map.direct_ptimer)
8499ed24f4bSMarc Zyngier timer_restore_state(map.direct_ptimer);
85081dc9504SMarc Zyngier if (map.emul_vtimer)
85181dc9504SMarc Zyngier timer_emulate(map.emul_vtimer);
8529ed24f4bSMarc Zyngier if (map.emul_ptimer)
8539ed24f4bSMarc Zyngier timer_emulate(map.emul_ptimer);
854c605ee24SMarc Zyngier
855c605ee24SMarc Zyngier timer_set_traps(vcpu, &map);
8569ed24f4bSMarc Zyngier }
8579ed24f4bSMarc Zyngier
kvm_timer_should_notify_user(struct kvm_vcpu * vcpu)8589ed24f4bSMarc Zyngier bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
8599ed24f4bSMarc Zyngier {
8609ed24f4bSMarc Zyngier struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
8619ed24f4bSMarc Zyngier struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
8629ed24f4bSMarc Zyngier struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
8639ed24f4bSMarc Zyngier bool vlevel, plevel;
8649ed24f4bSMarc Zyngier
8659ed24f4bSMarc Zyngier if (likely(irqchip_in_kernel(vcpu->kvm)))
8669ed24f4bSMarc Zyngier return false;
8679ed24f4bSMarc Zyngier
8689ed24f4bSMarc Zyngier vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
8699ed24f4bSMarc Zyngier plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
8709ed24f4bSMarc Zyngier
8719ed24f4bSMarc Zyngier return kvm_timer_should_fire(vtimer) != vlevel ||
8729ed24f4bSMarc Zyngier kvm_timer_should_fire(ptimer) != plevel;
8739ed24f4bSMarc Zyngier }
8749ed24f4bSMarc Zyngier
kvm_timer_vcpu_put(struct kvm_vcpu * vcpu)8759ed24f4bSMarc Zyngier void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
8769ed24f4bSMarc Zyngier {
8779ed24f4bSMarc Zyngier struct arch_timer_cpu *timer = vcpu_timer(vcpu);
8789ed24f4bSMarc Zyngier struct timer_map map;
8799ed24f4bSMarc Zyngier
8809ed24f4bSMarc Zyngier if (unlikely(!timer->enabled))
8819ed24f4bSMarc Zyngier return;
8829ed24f4bSMarc Zyngier
8839ed24f4bSMarc Zyngier get_timer_map(vcpu, &map);
8849ed24f4bSMarc Zyngier
8859ed24f4bSMarc Zyngier timer_save_state(map.direct_vtimer);
8869ed24f4bSMarc Zyngier if (map.direct_ptimer)
8879ed24f4bSMarc Zyngier timer_save_state(map.direct_ptimer);
8889ed24f4bSMarc Zyngier
8899ed24f4bSMarc Zyngier /*
8909ed24f4bSMarc Zyngier * Cancel soft timer emulation, because the only case where we
8919ed24f4bSMarc Zyngier * need it after a vcpu_put is in the context of a sleeping VCPU, and
8929ed24f4bSMarc Zyngier * in that case we already factor in the deadline for the physical
8939ed24f4bSMarc Zyngier * timer when scheduling the bg_timer.
8949ed24f4bSMarc Zyngier *
8959ed24f4bSMarc Zyngier * In any case, we re-schedule the hrtimer for the physical timer when
8969ed24f4bSMarc Zyngier * coming back to the VCPU thread in kvm_timer_vcpu_load().
8979ed24f4bSMarc Zyngier */
89881dc9504SMarc Zyngier if (map.emul_vtimer)
89981dc9504SMarc Zyngier soft_timer_cancel(&map.emul_vtimer->hrtimer);
9009ed24f4bSMarc Zyngier if (map.emul_ptimer)
9019ed24f4bSMarc Zyngier soft_timer_cancel(&map.emul_ptimer->hrtimer);
9029ed24f4bSMarc Zyngier
903d92a5d1cSSean Christopherson if (kvm_vcpu_is_blocking(vcpu))
9049ed24f4bSMarc Zyngier kvm_timer_blocking(vcpu);
9059ed24f4bSMarc Zyngier }
9069ed24f4bSMarc Zyngier
9079ed24f4bSMarc Zyngier /*
9089ed24f4bSMarc Zyngier * With a userspace irqchip we have to check if the guest de-asserted the
9099ed24f4bSMarc Zyngier * timer and if so, unmask the timer irq signal on the host interrupt
9109ed24f4bSMarc Zyngier * controller to ensure that we see future timer signals.
9119ed24f4bSMarc Zyngier */
unmask_vtimer_irq_user(struct kvm_vcpu * vcpu)9129ed24f4bSMarc Zyngier static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
9139ed24f4bSMarc Zyngier {
9149ed24f4bSMarc Zyngier struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
9159ed24f4bSMarc Zyngier
9169ed24f4bSMarc Zyngier if (!kvm_timer_should_fire(vtimer)) {
9179ed24f4bSMarc Zyngier kvm_timer_update_irq(vcpu, false, vtimer);
9189ed24f4bSMarc Zyngier if (static_branch_likely(&has_gic_active_state))
9199ed24f4bSMarc Zyngier set_timer_irq_phys_active(vtimer, false);
9209ed24f4bSMarc Zyngier else
9219ed24f4bSMarc Zyngier enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
9229ed24f4bSMarc Zyngier }
9239ed24f4bSMarc Zyngier }
9249ed24f4bSMarc Zyngier
kvm_timer_sync_user(struct kvm_vcpu * vcpu)9253c5ff0c6SMarc Zyngier void kvm_timer_sync_user(struct kvm_vcpu *vcpu)
9269ed24f4bSMarc Zyngier {
9279ed24f4bSMarc Zyngier struct arch_timer_cpu *timer = vcpu_timer(vcpu);
9289ed24f4bSMarc Zyngier
9299ed24f4bSMarc Zyngier if (unlikely(!timer->enabled))
9309ed24f4bSMarc Zyngier return;
9319ed24f4bSMarc Zyngier
9329ed24f4bSMarc Zyngier if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
9339ed24f4bSMarc Zyngier unmask_vtimer_irq_user(vcpu);
9349ed24f4bSMarc Zyngier }
9359ed24f4bSMarc Zyngier
kvm_timer_vcpu_reset(struct kvm_vcpu * vcpu)9369ed24f4bSMarc Zyngier int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
9379ed24f4bSMarc Zyngier {
9389ed24f4bSMarc Zyngier struct arch_timer_cpu *timer = vcpu_timer(vcpu);
9399ed24f4bSMarc Zyngier struct timer_map map;
9409ed24f4bSMarc Zyngier
9419ed24f4bSMarc Zyngier get_timer_map(vcpu, &map);
9429ed24f4bSMarc Zyngier
9439ed24f4bSMarc Zyngier /*
9449ed24f4bSMarc Zyngier * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
9459ed24f4bSMarc Zyngier * and to 0 for ARMv7. We provide an implementation that always
9469ed24f4bSMarc Zyngier * resets the timer to be disabled and unmasked and is compliant with
9479ed24f4bSMarc Zyngier * the ARMv7 architecture.
9489ed24f4bSMarc Zyngier */
949476fcd4bSMarc Zyngier for (int i = 0; i < nr_timers(vcpu); i++)
9505591805dSMarc Zyngier timer_set_ctl(vcpu_get_timer(vcpu, i), 0);
9515591805dSMarc Zyngier
95281dc9504SMarc Zyngier /*
95381dc9504SMarc Zyngier * A vcpu running at EL2 is in charge of the offset applied to
95481dc9504SMarc Zyngier * the virtual timer, so use the physical VM offset, and point
95581dc9504SMarc Zyngier * the vcpu offset to CNTVOFF_EL2.
95681dc9504SMarc Zyngier */
95781dc9504SMarc Zyngier if (vcpu_has_nv(vcpu)) {
95881dc9504SMarc Zyngier struct arch_timer_offset *offs = &vcpu_vtimer(vcpu)->offset;
95981dc9504SMarc Zyngier
96081dc9504SMarc Zyngier offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2);
96181dc9504SMarc Zyngier offs->vm_offset = &vcpu->kvm->arch.timer_data.poffset;
96281dc9504SMarc Zyngier }
9639ed24f4bSMarc Zyngier
9649ed24f4bSMarc Zyngier if (timer->enabled) {
965476fcd4bSMarc Zyngier for (int i = 0; i < nr_timers(vcpu); i++)
9665591805dSMarc Zyngier kvm_timer_update_irq(vcpu, false,
9675591805dSMarc Zyngier vcpu_get_timer(vcpu, i));
9689ed24f4bSMarc Zyngier
9699ed24f4bSMarc Zyngier if (irqchip_in_kernel(vcpu->kvm)) {
97033c54946SMarc Zyngier kvm_vgic_reset_mapped_irq(vcpu, timer_irq(map.direct_vtimer));
9719ed24f4bSMarc Zyngier if (map.direct_ptimer)
97233c54946SMarc Zyngier kvm_vgic_reset_mapped_irq(vcpu, timer_irq(map.direct_ptimer));
9739ed24f4bSMarc Zyngier }
9749ed24f4bSMarc Zyngier }
9759ed24f4bSMarc Zyngier
97681dc9504SMarc Zyngier if (map.emul_vtimer)
97781dc9504SMarc Zyngier soft_timer_cancel(&map.emul_vtimer->hrtimer);
9789ed24f4bSMarc Zyngier if (map.emul_ptimer)
9799ed24f4bSMarc Zyngier soft_timer_cancel(&map.emul_ptimer->hrtimer);
9809ed24f4bSMarc Zyngier
9819ed24f4bSMarc Zyngier return 0;
9829ed24f4bSMarc Zyngier }
9839ed24f4bSMarc Zyngier
timer_context_init(struct kvm_vcpu * vcpu,int timerid)9845591805dSMarc Zyngier static void timer_context_init(struct kvm_vcpu *vcpu, int timerid)
9855591805dSMarc Zyngier {
9865591805dSMarc Zyngier struct arch_timer_context *ctxt = vcpu_get_timer(vcpu, timerid);
9875591805dSMarc Zyngier struct kvm *kvm = vcpu->kvm;
9885591805dSMarc Zyngier
9895591805dSMarc Zyngier ctxt->vcpu = vcpu;
9905591805dSMarc Zyngier
9915591805dSMarc Zyngier if (timerid == TIMER_VTIMER)
9925591805dSMarc Zyngier ctxt->offset.vm_offset = &kvm->arch.timer_data.voffset;
9935591805dSMarc Zyngier else
9945591805dSMarc Zyngier ctxt->offset.vm_offset = &kvm->arch.timer_data.poffset;
9955591805dSMarc Zyngier
9965591805dSMarc Zyngier hrtimer_init(&ctxt->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
9975591805dSMarc Zyngier ctxt->hrtimer.function = kvm_hrtimer_expire;
9985591805dSMarc Zyngier
9995591805dSMarc Zyngier switch (timerid) {
10005591805dSMarc Zyngier case TIMER_PTIMER:
100181dc9504SMarc Zyngier case TIMER_HPTIMER:
10025591805dSMarc Zyngier ctxt->host_timer_irq = host_ptimer_irq;
10035591805dSMarc Zyngier break;
10045591805dSMarc Zyngier case TIMER_VTIMER:
100581dc9504SMarc Zyngier case TIMER_HVTIMER:
10065591805dSMarc Zyngier ctxt->host_timer_irq = host_vtimer_irq;
10075591805dSMarc Zyngier break;
10085591805dSMarc Zyngier }
10095591805dSMarc Zyngier }
10105591805dSMarc Zyngier
kvm_timer_vcpu_init(struct kvm_vcpu * vcpu)10119ed24f4bSMarc Zyngier void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
10129ed24f4bSMarc Zyngier {
10139ed24f4bSMarc Zyngier struct arch_timer_cpu *timer = vcpu_timer(vcpu);
10149ed24f4bSMarc Zyngier
10155591805dSMarc Zyngier for (int i = 0; i < NR_KVM_TIMERS; i++)
10165591805dSMarc Zyngier timer_context_init(vcpu, i);
101741ce82f6SMarc Zyngier
101830ec7997SMarc Zyngier /* Synchronize offsets across timers of a VM if not already provided */
101930ec7997SMarc Zyngier if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags)) {
10205591805dSMarc Zyngier timer_set_offset(vcpu_vtimer(vcpu), kvm_phys_timer_read());
10215591805dSMarc Zyngier timer_set_offset(vcpu_ptimer(vcpu), 0);
102230ec7997SMarc Zyngier }
10239ed24f4bSMarc Zyngier
10249ed24f4bSMarc Zyngier hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
10259ed24f4bSMarc Zyngier timer->bg_timer.function = kvm_bg_timer_expire;
10269ed24f4bSMarc Zyngier }
10279ed24f4bSMarc Zyngier
kvm_timer_init_vm(struct kvm * kvm)10288a5eb2d2SMarc Zyngier void kvm_timer_init_vm(struct kvm *kvm)
10298a5eb2d2SMarc Zyngier {
10308a5eb2d2SMarc Zyngier for (int i = 0; i < NR_KVM_TIMERS; i++)
10318a5eb2d2SMarc Zyngier kvm->arch.timer_data.ppi[i] = default_ppi[i];
10329ed24f4bSMarc Zyngier }
10339ed24f4bSMarc Zyngier
kvm_timer_cpu_up(void)1034466d27e4SMarc Zyngier void kvm_timer_cpu_up(void)
10359ed24f4bSMarc Zyngier {
10369ed24f4bSMarc Zyngier enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
1037466d27e4SMarc Zyngier if (host_ptimer_irq)
10389ed24f4bSMarc Zyngier enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags);
10399ed24f4bSMarc Zyngier }
10409ed24f4bSMarc Zyngier
kvm_timer_cpu_down(void)1041466d27e4SMarc Zyngier void kvm_timer_cpu_down(void)
1042466d27e4SMarc Zyngier {
1043466d27e4SMarc Zyngier disable_percpu_irq(host_vtimer_irq);
1044466d27e4SMarc Zyngier if (host_ptimer_irq)
1045466d27e4SMarc Zyngier disable_percpu_irq(host_ptimer_irq);
1046466d27e4SMarc Zyngier }
1047466d27e4SMarc Zyngier
kvm_arm_timer_set_reg(struct kvm_vcpu * vcpu,u64 regid,u64 value)10489ed24f4bSMarc Zyngier int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
10499ed24f4bSMarc Zyngier {
10509ed24f4bSMarc Zyngier struct arch_timer_context *timer;
10519ed24f4bSMarc Zyngier
10529ed24f4bSMarc Zyngier switch (regid) {
10539ed24f4bSMarc Zyngier case KVM_REG_ARM_TIMER_CTL:
10549ed24f4bSMarc Zyngier timer = vcpu_vtimer(vcpu);
10559ed24f4bSMarc Zyngier kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
10569ed24f4bSMarc Zyngier break;
10579ed24f4bSMarc Zyngier case KVM_REG_ARM_TIMER_CNT:
105830ec7997SMarc Zyngier if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET,
105930ec7997SMarc Zyngier &vcpu->kvm->arch.flags)) {
10609ed24f4bSMarc Zyngier timer = vcpu_vtimer(vcpu);
106147053904SMarc Zyngier timer_set_offset(timer, kvm_phys_timer_read() - value);
106230ec7997SMarc Zyngier }
10639ed24f4bSMarc Zyngier break;
10649ed24f4bSMarc Zyngier case KVM_REG_ARM_TIMER_CVAL:
10659ed24f4bSMarc Zyngier timer = vcpu_vtimer(vcpu);
10669ed24f4bSMarc Zyngier kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
10679ed24f4bSMarc Zyngier break;
10689ed24f4bSMarc Zyngier case KVM_REG_ARM_PTIMER_CTL:
10699ed24f4bSMarc Zyngier timer = vcpu_ptimer(vcpu);
10709ed24f4bSMarc Zyngier kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
10719ed24f4bSMarc Zyngier break;
107230ec7997SMarc Zyngier case KVM_REG_ARM_PTIMER_CNT:
107330ec7997SMarc Zyngier if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET,
107430ec7997SMarc Zyngier &vcpu->kvm->arch.flags)) {
107530ec7997SMarc Zyngier timer = vcpu_ptimer(vcpu);
107630ec7997SMarc Zyngier timer_set_offset(timer, kvm_phys_timer_read() - value);
107730ec7997SMarc Zyngier }
107830ec7997SMarc Zyngier break;
10799ed24f4bSMarc Zyngier case KVM_REG_ARM_PTIMER_CVAL:
10809ed24f4bSMarc Zyngier timer = vcpu_ptimer(vcpu);
10819ed24f4bSMarc Zyngier kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
10829ed24f4bSMarc Zyngier break;
10839ed24f4bSMarc Zyngier
10849ed24f4bSMarc Zyngier default:
10859ed24f4bSMarc Zyngier return -1;
10869ed24f4bSMarc Zyngier }
10879ed24f4bSMarc Zyngier
10889ed24f4bSMarc Zyngier return 0;
10899ed24f4bSMarc Zyngier }
10909ed24f4bSMarc Zyngier
read_timer_ctl(struct arch_timer_context * timer)10919ed24f4bSMarc Zyngier static u64 read_timer_ctl(struct arch_timer_context *timer)
10929ed24f4bSMarc Zyngier {
10939ed24f4bSMarc Zyngier /*
10949ed24f4bSMarc Zyngier * Set ISTATUS bit if it's expired.
10959ed24f4bSMarc Zyngier * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
10969ed24f4bSMarc Zyngier * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
10979ed24f4bSMarc Zyngier * regardless of ENABLE bit for our implementation convenience.
10989ed24f4bSMarc Zyngier */
109941ce82f6SMarc Zyngier u32 ctl = timer_get_ctl(timer);
110041ce82f6SMarc Zyngier
11019ed24f4bSMarc Zyngier if (!kvm_timer_compute_delta(timer))
110241ce82f6SMarc Zyngier ctl |= ARCH_TIMER_CTRL_IT_STAT;
110341ce82f6SMarc Zyngier
110441ce82f6SMarc Zyngier return ctl;
11059ed24f4bSMarc Zyngier }
11069ed24f4bSMarc Zyngier
kvm_arm_timer_get_reg(struct kvm_vcpu * vcpu,u64 regid)11079ed24f4bSMarc Zyngier u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
11089ed24f4bSMarc Zyngier {
11099ed24f4bSMarc Zyngier switch (regid) {
11109ed24f4bSMarc Zyngier case KVM_REG_ARM_TIMER_CTL:
11119ed24f4bSMarc Zyngier return kvm_arm_timer_read(vcpu,
11129ed24f4bSMarc Zyngier vcpu_vtimer(vcpu), TIMER_REG_CTL);
11139ed24f4bSMarc Zyngier case KVM_REG_ARM_TIMER_CNT:
11149ed24f4bSMarc Zyngier return kvm_arm_timer_read(vcpu,
11159ed24f4bSMarc Zyngier vcpu_vtimer(vcpu), TIMER_REG_CNT);
11169ed24f4bSMarc Zyngier case KVM_REG_ARM_TIMER_CVAL:
11179ed24f4bSMarc Zyngier return kvm_arm_timer_read(vcpu,
11189ed24f4bSMarc Zyngier vcpu_vtimer(vcpu), TIMER_REG_CVAL);
11199ed24f4bSMarc Zyngier case KVM_REG_ARM_PTIMER_CTL:
11209ed24f4bSMarc Zyngier return kvm_arm_timer_read(vcpu,
11219ed24f4bSMarc Zyngier vcpu_ptimer(vcpu), TIMER_REG_CTL);
11229ed24f4bSMarc Zyngier case KVM_REG_ARM_PTIMER_CNT:
11239ed24f4bSMarc Zyngier return kvm_arm_timer_read(vcpu,
11249ed24f4bSMarc Zyngier vcpu_ptimer(vcpu), TIMER_REG_CNT);
11259ed24f4bSMarc Zyngier case KVM_REG_ARM_PTIMER_CVAL:
11269ed24f4bSMarc Zyngier return kvm_arm_timer_read(vcpu,
11279ed24f4bSMarc Zyngier vcpu_ptimer(vcpu), TIMER_REG_CVAL);
11289ed24f4bSMarc Zyngier }
11299ed24f4bSMarc Zyngier return (u64)-1;
11309ed24f4bSMarc Zyngier }
11319ed24f4bSMarc Zyngier
kvm_arm_timer_read(struct kvm_vcpu * vcpu,struct arch_timer_context * timer,enum kvm_arch_timer_regs treg)11329ed24f4bSMarc Zyngier static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
11339ed24f4bSMarc Zyngier struct arch_timer_context *timer,
11349ed24f4bSMarc Zyngier enum kvm_arch_timer_regs treg)
11359ed24f4bSMarc Zyngier {
11369ed24f4bSMarc Zyngier u64 val;
11379ed24f4bSMarc Zyngier
11389ed24f4bSMarc Zyngier switch (treg) {
11399ed24f4bSMarc Zyngier case TIMER_REG_TVAL:
114041ce82f6SMarc Zyngier val = timer_get_cval(timer) - kvm_phys_timer_read() + timer_get_offset(timer);
114141ce82f6SMarc Zyngier val = lower_32_bits(val);
11429ed24f4bSMarc Zyngier break;
11439ed24f4bSMarc Zyngier
11449ed24f4bSMarc Zyngier case TIMER_REG_CTL:
11459ed24f4bSMarc Zyngier val = read_timer_ctl(timer);
11469ed24f4bSMarc Zyngier break;
11479ed24f4bSMarc Zyngier
11489ed24f4bSMarc Zyngier case TIMER_REG_CVAL:
114941ce82f6SMarc Zyngier val = timer_get_cval(timer);
11509ed24f4bSMarc Zyngier break;
11519ed24f4bSMarc Zyngier
11529ed24f4bSMarc Zyngier case TIMER_REG_CNT:
115341ce82f6SMarc Zyngier val = kvm_phys_timer_read() - timer_get_offset(timer);
11549ed24f4bSMarc Zyngier break;
11559ed24f4bSMarc Zyngier
115681dc9504SMarc Zyngier case TIMER_REG_VOFF:
115781dc9504SMarc Zyngier val = *timer->offset.vcpu_offset;
115881dc9504SMarc Zyngier break;
115981dc9504SMarc Zyngier
11609ed24f4bSMarc Zyngier default:
11619ed24f4bSMarc Zyngier BUG();
11629ed24f4bSMarc Zyngier }
11639ed24f4bSMarc Zyngier
11649ed24f4bSMarc Zyngier return val;
11659ed24f4bSMarc Zyngier }
11669ed24f4bSMarc Zyngier
kvm_arm_timer_read_sysreg(struct kvm_vcpu * vcpu,enum kvm_arch_timers tmr,enum kvm_arch_timer_regs treg)11679ed24f4bSMarc Zyngier u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
11689ed24f4bSMarc Zyngier enum kvm_arch_timers tmr,
11699ed24f4bSMarc Zyngier enum kvm_arch_timer_regs treg)
11709ed24f4bSMarc Zyngier {
1171fc6ee952SMarc Zyngier struct arch_timer_context *timer;
1172fc6ee952SMarc Zyngier struct timer_map map;
11739ed24f4bSMarc Zyngier u64 val;
11749ed24f4bSMarc Zyngier
1175fc6ee952SMarc Zyngier get_timer_map(vcpu, &map);
1176fc6ee952SMarc Zyngier timer = vcpu_get_timer(vcpu, tmr);
1177fc6ee952SMarc Zyngier
117881dc9504SMarc Zyngier if (timer == map.emul_vtimer || timer == map.emul_ptimer)
1179fc6ee952SMarc Zyngier return kvm_arm_timer_read(vcpu, timer, treg);
1180fc6ee952SMarc Zyngier
11819ed24f4bSMarc Zyngier preempt_disable();
1182fc6ee952SMarc Zyngier timer_save_state(timer);
11839ed24f4bSMarc Zyngier
1184fc6ee952SMarc Zyngier val = kvm_arm_timer_read(vcpu, timer, treg);
11859ed24f4bSMarc Zyngier
1186fc6ee952SMarc Zyngier timer_restore_state(timer);
11879ed24f4bSMarc Zyngier preempt_enable();
11889ed24f4bSMarc Zyngier
11899ed24f4bSMarc Zyngier return val;
11909ed24f4bSMarc Zyngier }
11919ed24f4bSMarc Zyngier
kvm_arm_timer_write(struct kvm_vcpu * vcpu,struct arch_timer_context * timer,enum kvm_arch_timer_regs treg,u64 val)11929ed24f4bSMarc Zyngier static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
11939ed24f4bSMarc Zyngier struct arch_timer_context *timer,
11949ed24f4bSMarc Zyngier enum kvm_arch_timer_regs treg,
11959ed24f4bSMarc Zyngier u64 val)
11969ed24f4bSMarc Zyngier {
11979ed24f4bSMarc Zyngier switch (treg) {
11989ed24f4bSMarc Zyngier case TIMER_REG_TVAL:
119941ce82f6SMarc Zyngier timer_set_cval(timer, kvm_phys_timer_read() - timer_get_offset(timer) + (s32)val);
12009ed24f4bSMarc Zyngier break;
12019ed24f4bSMarc Zyngier
12029ed24f4bSMarc Zyngier case TIMER_REG_CTL:
120341ce82f6SMarc Zyngier timer_set_ctl(timer, val & ~ARCH_TIMER_CTRL_IT_STAT);
12049ed24f4bSMarc Zyngier break;
12059ed24f4bSMarc Zyngier
12069ed24f4bSMarc Zyngier case TIMER_REG_CVAL:
120741ce82f6SMarc Zyngier timer_set_cval(timer, val);
12089ed24f4bSMarc Zyngier break;
12099ed24f4bSMarc Zyngier
121081dc9504SMarc Zyngier case TIMER_REG_VOFF:
121181dc9504SMarc Zyngier *timer->offset.vcpu_offset = val;
121281dc9504SMarc Zyngier break;
121381dc9504SMarc Zyngier
12149ed24f4bSMarc Zyngier default:
12159ed24f4bSMarc Zyngier BUG();
12169ed24f4bSMarc Zyngier }
12179ed24f4bSMarc Zyngier }
12189ed24f4bSMarc Zyngier
kvm_arm_timer_write_sysreg(struct kvm_vcpu * vcpu,enum kvm_arch_timers tmr,enum kvm_arch_timer_regs treg,u64 val)12199ed24f4bSMarc Zyngier void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
12209ed24f4bSMarc Zyngier enum kvm_arch_timers tmr,
12219ed24f4bSMarc Zyngier enum kvm_arch_timer_regs treg,
12229ed24f4bSMarc Zyngier u64 val)
12239ed24f4bSMarc Zyngier {
1224fc6ee952SMarc Zyngier struct arch_timer_context *timer;
1225fc6ee952SMarc Zyngier struct timer_map map;
1226fc6ee952SMarc Zyngier
1227fc6ee952SMarc Zyngier get_timer_map(vcpu, &map);
1228fc6ee952SMarc Zyngier timer = vcpu_get_timer(vcpu, tmr);
122981dc9504SMarc Zyngier if (timer == map.emul_vtimer || timer == map.emul_ptimer) {
1230fc6ee952SMarc Zyngier soft_timer_cancel(&timer->hrtimer);
1231fc6ee952SMarc Zyngier kvm_arm_timer_write(vcpu, timer, treg, val);
1232fc6ee952SMarc Zyngier timer_emulate(timer);
1233fc6ee952SMarc Zyngier } else {
12349ed24f4bSMarc Zyngier preempt_disable();
1235fc6ee952SMarc Zyngier timer_save_state(timer);
1236fc6ee952SMarc Zyngier kvm_arm_timer_write(vcpu, timer, treg, val);
1237fc6ee952SMarc Zyngier timer_restore_state(timer);
12389ed24f4bSMarc Zyngier preempt_enable();
12399ed24f4bSMarc Zyngier }
1240fc6ee952SMarc Zyngier }
12419ed24f4bSMarc Zyngier
timer_irq_set_vcpu_affinity(struct irq_data * d,void * vcpu)12425f592296SMarc Zyngier static int timer_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
12435f592296SMarc Zyngier {
12445f592296SMarc Zyngier if (vcpu)
12455f592296SMarc Zyngier irqd_set_forwarded_to_vcpu(d);
12465f592296SMarc Zyngier else
12475f592296SMarc Zyngier irqd_clr_forwarded_to_vcpu(d);
12485f592296SMarc Zyngier
12495f592296SMarc Zyngier return 0;
12505f592296SMarc Zyngier }
12515f592296SMarc Zyngier
timer_irq_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool val)12525f592296SMarc Zyngier static int timer_irq_set_irqchip_state(struct irq_data *d,
12535f592296SMarc Zyngier enum irqchip_irq_state which, bool val)
12545f592296SMarc Zyngier {
12555f592296SMarc Zyngier if (which != IRQCHIP_STATE_ACTIVE || !irqd_is_forwarded_to_vcpu(d))
12565f592296SMarc Zyngier return irq_chip_set_parent_state(d, which, val);
12575f592296SMarc Zyngier
12585f592296SMarc Zyngier if (val)
12595f592296SMarc Zyngier irq_chip_mask_parent(d);
12605f592296SMarc Zyngier else
12615f592296SMarc Zyngier irq_chip_unmask_parent(d);
12625f592296SMarc Zyngier
12635f592296SMarc Zyngier return 0;
12645f592296SMarc Zyngier }
12655f592296SMarc Zyngier
timer_irq_eoi(struct irq_data * d)12665f592296SMarc Zyngier static void timer_irq_eoi(struct irq_data *d)
12675f592296SMarc Zyngier {
12685f592296SMarc Zyngier if (!irqd_is_forwarded_to_vcpu(d))
12695f592296SMarc Zyngier irq_chip_eoi_parent(d);
12705f592296SMarc Zyngier }
12715f592296SMarc Zyngier
timer_irq_ack(struct irq_data * d)12725f592296SMarc Zyngier static void timer_irq_ack(struct irq_data *d)
12735f592296SMarc Zyngier {
12745f592296SMarc Zyngier d = d->parent_data;
12755f592296SMarc Zyngier if (d->chip->irq_ack)
12765f592296SMarc Zyngier d->chip->irq_ack(d);
12775f592296SMarc Zyngier }
12785f592296SMarc Zyngier
12795f592296SMarc Zyngier static struct irq_chip timer_chip = {
12805f592296SMarc Zyngier .name = "KVM",
12815f592296SMarc Zyngier .irq_ack = timer_irq_ack,
12825f592296SMarc Zyngier .irq_mask = irq_chip_mask_parent,
12835f592296SMarc Zyngier .irq_unmask = irq_chip_unmask_parent,
12845f592296SMarc Zyngier .irq_eoi = timer_irq_eoi,
12855f592296SMarc Zyngier .irq_set_type = irq_chip_set_type_parent,
12865f592296SMarc Zyngier .irq_set_vcpu_affinity = timer_irq_set_vcpu_affinity,
12875f592296SMarc Zyngier .irq_set_irqchip_state = timer_irq_set_irqchip_state,
12885f592296SMarc Zyngier };
12895f592296SMarc Zyngier
timer_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)12905f592296SMarc Zyngier static int timer_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
12915f592296SMarc Zyngier unsigned int nr_irqs, void *arg)
12925f592296SMarc Zyngier {
12935f592296SMarc Zyngier irq_hw_number_t hwirq = (uintptr_t)arg;
12945f592296SMarc Zyngier
12955f592296SMarc Zyngier return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
12965f592296SMarc Zyngier &timer_chip, NULL);
12975f592296SMarc Zyngier }
12985f592296SMarc Zyngier
timer_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)12995f592296SMarc Zyngier static void timer_irq_domain_free(struct irq_domain *domain, unsigned int virq,
13005f592296SMarc Zyngier unsigned int nr_irqs)
13015f592296SMarc Zyngier {
13025f592296SMarc Zyngier }
13035f592296SMarc Zyngier
13045f592296SMarc Zyngier static const struct irq_domain_ops timer_domain_ops = {
13055f592296SMarc Zyngier .alloc = timer_irq_domain_alloc,
13065f592296SMarc Zyngier .free = timer_irq_domain_free,
13075f592296SMarc Zyngier };
13085f592296SMarc Zyngier
kvm_irq_fixup_flags(unsigned int virq,u32 * flags)13092f2f7e39SMarc Zyngier static void kvm_irq_fixup_flags(unsigned int virq, u32 *flags)
13102f2f7e39SMarc Zyngier {
13112f2f7e39SMarc Zyngier *flags = irq_get_trigger_type(virq);
13122f2f7e39SMarc Zyngier if (*flags != IRQF_TRIGGER_HIGH && *flags != IRQF_TRIGGER_LOW) {
13132f2f7e39SMarc Zyngier kvm_err("Invalid trigger for timer IRQ%d, assuming level low\n",
13142f2f7e39SMarc Zyngier virq);
13152f2f7e39SMarc Zyngier *flags = IRQF_TRIGGER_LOW;
13162f2f7e39SMarc Zyngier }
13172f2f7e39SMarc Zyngier }
13182f2f7e39SMarc Zyngier
kvm_irq_init(struct arch_timer_kvm_info * info)13192f2f7e39SMarc Zyngier static int kvm_irq_init(struct arch_timer_kvm_info *info)
13202f2f7e39SMarc Zyngier {
13215f592296SMarc Zyngier struct irq_domain *domain = NULL;
13225f592296SMarc Zyngier
13232f2f7e39SMarc Zyngier if (info->virtual_irq <= 0) {
13242f2f7e39SMarc Zyngier kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
13252f2f7e39SMarc Zyngier info->virtual_irq);
13262f2f7e39SMarc Zyngier return -ENODEV;
13272f2f7e39SMarc Zyngier }
13282f2f7e39SMarc Zyngier
13292f2f7e39SMarc Zyngier host_vtimer_irq = info->virtual_irq;
13302f2f7e39SMarc Zyngier kvm_irq_fixup_flags(host_vtimer_irq, &host_vtimer_irq_flags);
13312f2f7e39SMarc Zyngier
13325f592296SMarc Zyngier if (kvm_vgic_global_state.no_hw_deactivation) {
13335f592296SMarc Zyngier struct fwnode_handle *fwnode;
13345f592296SMarc Zyngier struct irq_data *data;
13355f592296SMarc Zyngier
13365f592296SMarc Zyngier fwnode = irq_domain_alloc_named_fwnode("kvm-timer");
13375f592296SMarc Zyngier if (!fwnode)
13385f592296SMarc Zyngier return -ENOMEM;
13395f592296SMarc Zyngier
13405f592296SMarc Zyngier /* Assume both vtimer and ptimer in the same parent */
13415f592296SMarc Zyngier data = irq_get_irq_data(host_vtimer_irq);
13425f592296SMarc Zyngier domain = irq_domain_create_hierarchy(data->domain, 0,
13435f592296SMarc Zyngier NR_KVM_TIMERS, fwnode,
13445f592296SMarc Zyngier &timer_domain_ops, NULL);
13455f592296SMarc Zyngier if (!domain) {
13465f592296SMarc Zyngier irq_domain_free_fwnode(fwnode);
13475f592296SMarc Zyngier return -ENOMEM;
13485f592296SMarc Zyngier }
13495f592296SMarc Zyngier
13505f592296SMarc Zyngier arch_timer_irq_ops.flags |= VGIC_IRQ_SW_RESAMPLE;
13515f592296SMarc Zyngier WARN_ON(irq_domain_push_irq(domain, host_vtimer_irq,
13525f592296SMarc Zyngier (void *)TIMER_VTIMER));
13535f592296SMarc Zyngier }
13545f592296SMarc Zyngier
13552f2f7e39SMarc Zyngier if (info->physical_irq > 0) {
13562f2f7e39SMarc Zyngier host_ptimer_irq = info->physical_irq;
13572f2f7e39SMarc Zyngier kvm_irq_fixup_flags(host_ptimer_irq, &host_ptimer_irq_flags);
13585f592296SMarc Zyngier
13595f592296SMarc Zyngier if (domain)
13605f592296SMarc Zyngier WARN_ON(irq_domain_push_irq(domain, host_ptimer_irq,
13615f592296SMarc Zyngier (void *)TIMER_PTIMER));
13622f2f7e39SMarc Zyngier }
13632f2f7e39SMarc Zyngier
13642f2f7e39SMarc Zyngier return 0;
13652f2f7e39SMarc Zyngier }
13662f2f7e39SMarc Zyngier
kvm_timer_hyp_init(bool has_gic)13678d20bd63SSean Christopherson int __init kvm_timer_hyp_init(bool has_gic)
13689ed24f4bSMarc Zyngier {
13699ed24f4bSMarc Zyngier struct arch_timer_kvm_info *info;
13709ed24f4bSMarc Zyngier int err;
13719ed24f4bSMarc Zyngier
13729ed24f4bSMarc Zyngier info = arch_timer_get_kvm_info();
13739ed24f4bSMarc Zyngier timecounter = &info->timecounter;
13749ed24f4bSMarc Zyngier
13759ed24f4bSMarc Zyngier if (!timecounter->cc) {
13769ed24f4bSMarc Zyngier kvm_err("kvm_arch_timer: uninitialized timecounter\n");
13779ed24f4bSMarc Zyngier return -ENODEV;
13789ed24f4bSMarc Zyngier }
13799ed24f4bSMarc Zyngier
13802f2f7e39SMarc Zyngier err = kvm_irq_init(info);
13812f2f7e39SMarc Zyngier if (err)
13822f2f7e39SMarc Zyngier return err;
13832f2f7e39SMarc Zyngier
13849ed24f4bSMarc Zyngier /* First, do the virtual EL1 timer irq */
13859ed24f4bSMarc Zyngier
13869ed24f4bSMarc Zyngier err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
13879ed24f4bSMarc Zyngier "kvm guest vtimer", kvm_get_running_vcpus());
13889ed24f4bSMarc Zyngier if (err) {
13899ed24f4bSMarc Zyngier kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
13909ed24f4bSMarc Zyngier host_vtimer_irq, err);
13919ed24f4bSMarc Zyngier return err;
13929ed24f4bSMarc Zyngier }
13939ed24f4bSMarc Zyngier
13949ed24f4bSMarc Zyngier if (has_gic) {
13959ed24f4bSMarc Zyngier err = irq_set_vcpu_affinity(host_vtimer_irq,
13969ed24f4bSMarc Zyngier kvm_get_running_vcpus());
13979ed24f4bSMarc Zyngier if (err) {
13989ed24f4bSMarc Zyngier kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
139921e87daeSDan Carpenter goto out_free_vtimer_irq;
14009ed24f4bSMarc Zyngier }
14019ed24f4bSMarc Zyngier
14029ed24f4bSMarc Zyngier static_branch_enable(&has_gic_active_state);
14039ed24f4bSMarc Zyngier }
14049ed24f4bSMarc Zyngier
14059ed24f4bSMarc Zyngier kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
14069ed24f4bSMarc Zyngier
14079ed24f4bSMarc Zyngier /* Now let's do the physical EL1 timer irq */
14089ed24f4bSMarc Zyngier
14099ed24f4bSMarc Zyngier if (info->physical_irq > 0) {
14109ed24f4bSMarc Zyngier err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
14119ed24f4bSMarc Zyngier "kvm guest ptimer", kvm_get_running_vcpus());
14129ed24f4bSMarc Zyngier if (err) {
14139ed24f4bSMarc Zyngier kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
14149ed24f4bSMarc Zyngier host_ptimer_irq, err);
141521e87daeSDan Carpenter goto out_free_vtimer_irq;
14169ed24f4bSMarc Zyngier }
14179ed24f4bSMarc Zyngier
14189ed24f4bSMarc Zyngier if (has_gic) {
14199ed24f4bSMarc Zyngier err = irq_set_vcpu_affinity(host_ptimer_irq,
14209ed24f4bSMarc Zyngier kvm_get_running_vcpus());
14219ed24f4bSMarc Zyngier if (err) {
14229ed24f4bSMarc Zyngier kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
142321e87daeSDan Carpenter goto out_free_ptimer_irq;
14249ed24f4bSMarc Zyngier }
14259ed24f4bSMarc Zyngier }
14269ed24f4bSMarc Zyngier
14279ed24f4bSMarc Zyngier kvm_debug("physical timer IRQ%d\n", host_ptimer_irq);
14289ed24f4bSMarc Zyngier } else if (has_vhe()) {
14299ed24f4bSMarc Zyngier kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
14309ed24f4bSMarc Zyngier info->physical_irq);
14319ed24f4bSMarc Zyngier err = -ENODEV;
143221e87daeSDan Carpenter goto out_free_vtimer_irq;
14339ed24f4bSMarc Zyngier }
14349ed24f4bSMarc Zyngier
14359ed24f4bSMarc Zyngier return 0;
143621e87daeSDan Carpenter
143721e87daeSDan Carpenter out_free_ptimer_irq:
143821e87daeSDan Carpenter if (info->physical_irq > 0)
143921e87daeSDan Carpenter free_percpu_irq(host_ptimer_irq, kvm_get_running_vcpus());
144021e87daeSDan Carpenter out_free_vtimer_irq:
14419ed24f4bSMarc Zyngier free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
14429ed24f4bSMarc Zyngier return err;
14439ed24f4bSMarc Zyngier }
14449ed24f4bSMarc Zyngier
kvm_timer_vcpu_terminate(struct kvm_vcpu * vcpu)14459ed24f4bSMarc Zyngier void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
14469ed24f4bSMarc Zyngier {
14479ed24f4bSMarc Zyngier struct arch_timer_cpu *timer = vcpu_timer(vcpu);
14489ed24f4bSMarc Zyngier
14499ed24f4bSMarc Zyngier soft_timer_cancel(&timer->bg_timer);
14509ed24f4bSMarc Zyngier }
14519ed24f4bSMarc Zyngier
timer_irqs_are_valid(struct kvm_vcpu * vcpu)14529ed24f4bSMarc Zyngier static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
14539ed24f4bSMarc Zyngier {
14548a5eb2d2SMarc Zyngier u32 ppis = 0;
14558a5eb2d2SMarc Zyngier bool valid;
14569ed24f4bSMarc Zyngier
1457b22498c4SMarc Zyngier mutex_lock(&vcpu->kvm->arch.config_lock);
14589ed24f4bSMarc Zyngier
1459476fcd4bSMarc Zyngier for (int i = 0; i < nr_timers(vcpu); i++) {
14608a5eb2d2SMarc Zyngier struct arch_timer_context *ctx;
14618a5eb2d2SMarc Zyngier int irq;
14629ed24f4bSMarc Zyngier
14638a5eb2d2SMarc Zyngier ctx = vcpu_get_timer(vcpu, i);
14648a5eb2d2SMarc Zyngier irq = timer_irq(ctx);
14658a5eb2d2SMarc Zyngier if (kvm_vgic_set_owner(vcpu, irq, ctx))
14668a5eb2d2SMarc Zyngier break;
14678a5eb2d2SMarc Zyngier
14688a5eb2d2SMarc Zyngier /*
14698a5eb2d2SMarc Zyngier * We know by construction that we only have PPIs, so
14708a5eb2d2SMarc Zyngier * all values are less than 32.
14718a5eb2d2SMarc Zyngier */
14728a5eb2d2SMarc Zyngier ppis |= BIT(irq);
14739ed24f4bSMarc Zyngier }
14749ed24f4bSMarc Zyngier
1475476fcd4bSMarc Zyngier valid = hweight32(ppis) == nr_timers(vcpu);
14768a5eb2d2SMarc Zyngier
14778a5eb2d2SMarc Zyngier if (valid)
14788a5eb2d2SMarc Zyngier set_bit(KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE, &vcpu->kvm->arch.flags);
14798a5eb2d2SMarc Zyngier
1480b22498c4SMarc Zyngier mutex_unlock(&vcpu->kvm->arch.config_lock);
14818a5eb2d2SMarc Zyngier
14828a5eb2d2SMarc Zyngier return valid;
14839ed24f4bSMarc Zyngier }
14849ed24f4bSMarc Zyngier
kvm_arch_timer_get_input_level(int vintid)148581dc9504SMarc Zyngier static bool kvm_arch_timer_get_input_level(int vintid)
14869ed24f4bSMarc Zyngier {
14879ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
14889ed24f4bSMarc Zyngier
1489efedd01dSMarc Zyngier if (WARN(!vcpu, "No vcpu context!\n"))
1490efedd01dSMarc Zyngier return false;
1491efedd01dSMarc Zyngier
1492476fcd4bSMarc Zyngier for (int i = 0; i < nr_timers(vcpu); i++) {
14938a5eb2d2SMarc Zyngier struct arch_timer_context *ctx;
14949ed24f4bSMarc Zyngier
14958a5eb2d2SMarc Zyngier ctx = vcpu_get_timer(vcpu, i);
14968a5eb2d2SMarc Zyngier if (timer_irq(ctx) == vintid)
14978a5eb2d2SMarc Zyngier return kvm_timer_should_fire(ctx);
14988a5eb2d2SMarc Zyngier }
14998a5eb2d2SMarc Zyngier
15008a5eb2d2SMarc Zyngier /* A timer IRQ has fired, but no matching timer was found? */
15018a5eb2d2SMarc Zyngier WARN_RATELIMIT(1, "timer INTID%d unknown\n", vintid);
15028a5eb2d2SMarc Zyngier
15038a5eb2d2SMarc Zyngier return false;
15049ed24f4bSMarc Zyngier }
15059ed24f4bSMarc Zyngier
kvm_timer_enable(struct kvm_vcpu * vcpu)15069ed24f4bSMarc Zyngier int kvm_timer_enable(struct kvm_vcpu *vcpu)
15079ed24f4bSMarc Zyngier {
15089ed24f4bSMarc Zyngier struct arch_timer_cpu *timer = vcpu_timer(vcpu);
15099ed24f4bSMarc Zyngier struct timer_map map;
15109ed24f4bSMarc Zyngier int ret;
15119ed24f4bSMarc Zyngier
15129ed24f4bSMarc Zyngier if (timer->enabled)
15139ed24f4bSMarc Zyngier return 0;
15149ed24f4bSMarc Zyngier
15159ed24f4bSMarc Zyngier /* Without a VGIC we do not map virtual IRQs to physical IRQs */
15169ed24f4bSMarc Zyngier if (!irqchip_in_kernel(vcpu->kvm))
15179ed24f4bSMarc Zyngier goto no_vgic;
15189ed24f4bSMarc Zyngier
1519f16570baSAlexandru Elisei /*
1520f16570baSAlexandru Elisei * At this stage, we have the guarantee that the vgic is both
1521f16570baSAlexandru Elisei * available and initialized.
1522f16570baSAlexandru Elisei */
15239ed24f4bSMarc Zyngier if (!timer_irqs_are_valid(vcpu)) {
15249ed24f4bSMarc Zyngier kvm_debug("incorrectly configured timer irqs\n");
15259ed24f4bSMarc Zyngier return -EINVAL;
15269ed24f4bSMarc Zyngier }
15279ed24f4bSMarc Zyngier
15289ed24f4bSMarc Zyngier get_timer_map(vcpu, &map);
15299ed24f4bSMarc Zyngier
15309ed24f4bSMarc Zyngier ret = kvm_vgic_map_phys_irq(vcpu,
15319ed24f4bSMarc Zyngier map.direct_vtimer->host_timer_irq,
153233c54946SMarc Zyngier timer_irq(map.direct_vtimer),
1533db75f1a3SMarc Zyngier &arch_timer_irq_ops);
15349ed24f4bSMarc Zyngier if (ret)
15359ed24f4bSMarc Zyngier return ret;
15369ed24f4bSMarc Zyngier
15379ed24f4bSMarc Zyngier if (map.direct_ptimer) {
15389ed24f4bSMarc Zyngier ret = kvm_vgic_map_phys_irq(vcpu,
15399ed24f4bSMarc Zyngier map.direct_ptimer->host_timer_irq,
154033c54946SMarc Zyngier timer_irq(map.direct_ptimer),
1541db75f1a3SMarc Zyngier &arch_timer_irq_ops);
15429ed24f4bSMarc Zyngier }
15439ed24f4bSMarc Zyngier
15449ed24f4bSMarc Zyngier if (ret)
15459ed24f4bSMarc Zyngier return ret;
15469ed24f4bSMarc Zyngier
15479ed24f4bSMarc Zyngier no_vgic:
15489ed24f4bSMarc Zyngier timer->enabled = 1;
15499ed24f4bSMarc Zyngier return 0;
15509ed24f4bSMarc Zyngier }
15519ed24f4bSMarc Zyngier
1552c605ee24SMarc Zyngier /* If we have CNTPOFF, permanently set ECV to enable it */
kvm_timer_init_vhe(void)15539ed24f4bSMarc Zyngier void kvm_timer_init_vhe(void)
15549ed24f4bSMarc Zyngier {
15552b4825a8SMarc Zyngier if (cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF))
1556fe769e6cSMarc Zyngier sysreg_clear_set(cnthctl_el2, 0, CNTHCTL_ECV);
15579ed24f4bSMarc Zyngier }
15589ed24f4bSMarc Zyngier
kvm_arm_timer_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)15599ed24f4bSMarc Zyngier int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
15609ed24f4bSMarc Zyngier {
15619ed24f4bSMarc Zyngier int __user *uaddr = (int __user *)(long)attr->addr;
15628a5eb2d2SMarc Zyngier int irq, idx, ret = 0;
15639ed24f4bSMarc Zyngier
15649ed24f4bSMarc Zyngier if (!irqchip_in_kernel(vcpu->kvm))
15659ed24f4bSMarc Zyngier return -EINVAL;
15669ed24f4bSMarc Zyngier
15679ed24f4bSMarc Zyngier if (get_user(irq, uaddr))
15689ed24f4bSMarc Zyngier return -EFAULT;
15699ed24f4bSMarc Zyngier
15709ed24f4bSMarc Zyngier if (!(irq_is_ppi(irq)))
15719ed24f4bSMarc Zyngier return -EINVAL;
15729ed24f4bSMarc Zyngier
1573b22498c4SMarc Zyngier mutex_lock(&vcpu->kvm->arch.config_lock);
15748a5eb2d2SMarc Zyngier
15758a5eb2d2SMarc Zyngier if (test_bit(KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE,
15768a5eb2d2SMarc Zyngier &vcpu->kvm->arch.flags)) {
15778a5eb2d2SMarc Zyngier ret = -EBUSY;
15788a5eb2d2SMarc Zyngier goto out;
15798a5eb2d2SMarc Zyngier }
15809ed24f4bSMarc Zyngier
15819ed24f4bSMarc Zyngier switch (attr->attr) {
15829ed24f4bSMarc Zyngier case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
15838a5eb2d2SMarc Zyngier idx = TIMER_VTIMER;
15849ed24f4bSMarc Zyngier break;
15859ed24f4bSMarc Zyngier case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
15868a5eb2d2SMarc Zyngier idx = TIMER_PTIMER;
15879ed24f4bSMarc Zyngier break;
158881dc9504SMarc Zyngier case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER:
158981dc9504SMarc Zyngier idx = TIMER_HVTIMER;
159081dc9504SMarc Zyngier break;
159181dc9504SMarc Zyngier case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER:
159281dc9504SMarc Zyngier idx = TIMER_HPTIMER;
15939ed24f4bSMarc Zyngier break;
15949ed24f4bSMarc Zyngier default:
15958a5eb2d2SMarc Zyngier ret = -ENXIO;
15968a5eb2d2SMarc Zyngier goto out;
15979ed24f4bSMarc Zyngier }
15989ed24f4bSMarc Zyngier
15998a5eb2d2SMarc Zyngier /*
16008a5eb2d2SMarc Zyngier * We cannot validate the IRQ unicity before we run, so take it at
16018a5eb2d2SMarc Zyngier * face value. The verdict will be given on first vcpu run, for each
16028a5eb2d2SMarc Zyngier * vcpu. Yes this is late. Blame it on the stupid API.
16038a5eb2d2SMarc Zyngier */
16048a5eb2d2SMarc Zyngier vcpu->kvm->arch.timer_data.ppi[idx] = irq;
16058a5eb2d2SMarc Zyngier
16068a5eb2d2SMarc Zyngier out:
1607b22498c4SMarc Zyngier mutex_unlock(&vcpu->kvm->arch.config_lock);
16088a5eb2d2SMarc Zyngier return ret;
16099ed24f4bSMarc Zyngier }
16109ed24f4bSMarc Zyngier
kvm_arm_timer_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)16119ed24f4bSMarc Zyngier int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
16129ed24f4bSMarc Zyngier {
16139ed24f4bSMarc Zyngier int __user *uaddr = (int __user *)(long)attr->addr;
16149ed24f4bSMarc Zyngier struct arch_timer_context *timer;
16159ed24f4bSMarc Zyngier int irq;
16169ed24f4bSMarc Zyngier
16179ed24f4bSMarc Zyngier switch (attr->attr) {
16189ed24f4bSMarc Zyngier case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
16199ed24f4bSMarc Zyngier timer = vcpu_vtimer(vcpu);
16209ed24f4bSMarc Zyngier break;
16219ed24f4bSMarc Zyngier case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
16229ed24f4bSMarc Zyngier timer = vcpu_ptimer(vcpu);
16239ed24f4bSMarc Zyngier break;
162481dc9504SMarc Zyngier case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER:
162581dc9504SMarc Zyngier timer = vcpu_hvtimer(vcpu);
162681dc9504SMarc Zyngier break;
162781dc9504SMarc Zyngier case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER:
162881dc9504SMarc Zyngier timer = vcpu_hptimer(vcpu);
162981dc9504SMarc Zyngier break;
16309ed24f4bSMarc Zyngier default:
16319ed24f4bSMarc Zyngier return -ENXIO;
16329ed24f4bSMarc Zyngier }
16339ed24f4bSMarc Zyngier
163433c54946SMarc Zyngier irq = timer_irq(timer);
16359ed24f4bSMarc Zyngier return put_user(irq, uaddr);
16369ed24f4bSMarc Zyngier }
16379ed24f4bSMarc Zyngier
kvm_arm_timer_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)16389ed24f4bSMarc Zyngier int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
16399ed24f4bSMarc Zyngier {
16409ed24f4bSMarc Zyngier switch (attr->attr) {
16419ed24f4bSMarc Zyngier case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
16429ed24f4bSMarc Zyngier case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
164381dc9504SMarc Zyngier case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER:
164481dc9504SMarc Zyngier case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER:
16459ed24f4bSMarc Zyngier return 0;
16469ed24f4bSMarc Zyngier }
16479ed24f4bSMarc Zyngier
16489ed24f4bSMarc Zyngier return -ENXIO;
16499ed24f4bSMarc Zyngier }
165030ec7997SMarc Zyngier
kvm_vm_ioctl_set_counter_offset(struct kvm * kvm,struct kvm_arm_counter_offset * offset)165130ec7997SMarc Zyngier int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
165230ec7997SMarc Zyngier struct kvm_arm_counter_offset *offset)
165330ec7997SMarc Zyngier {
165430ec7997SMarc Zyngier int ret = 0;
165530ec7997SMarc Zyngier
165630ec7997SMarc Zyngier if (offset->reserved)
165730ec7997SMarc Zyngier return -EINVAL;
165830ec7997SMarc Zyngier
165930ec7997SMarc Zyngier mutex_lock(&kvm->lock);
166030ec7997SMarc Zyngier
166130ec7997SMarc Zyngier if (lock_all_vcpus(kvm)) {
166230ec7997SMarc Zyngier set_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &kvm->arch.flags);
166330ec7997SMarc Zyngier
166430ec7997SMarc Zyngier /*
166530ec7997SMarc Zyngier * If userspace decides to set the offset using this
166630ec7997SMarc Zyngier * API rather than merely restoring the counter
166730ec7997SMarc Zyngier * values, the offset applies to both the virtual and
166830ec7997SMarc Zyngier * physical views.
166930ec7997SMarc Zyngier */
167030ec7997SMarc Zyngier kvm->arch.timer_data.voffset = offset->counter_offset;
167130ec7997SMarc Zyngier kvm->arch.timer_data.poffset = offset->counter_offset;
167230ec7997SMarc Zyngier
167330ec7997SMarc Zyngier unlock_all_vcpus(kvm);
167430ec7997SMarc Zyngier } else {
167530ec7997SMarc Zyngier ret = -EBUSY;
167630ec7997SMarc Zyngier }
167730ec7997SMarc Zyngier
167830ec7997SMarc Zyngier mutex_unlock(&kvm->lock);
167930ec7997SMarc Zyngier
168030ec7997SMarc Zyngier return ret;
168130ec7997SMarc Zyngier }
1682