xref: /openbmc/linux/arch/riscv/kvm/vcpu_timer.c (revision 432a8b35)
13a9f66cbSAtish Patra // SPDX-License-Identifier: GPL-2.0
23a9f66cbSAtish Patra /*
33a9f66cbSAtish Patra  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
43a9f66cbSAtish Patra  *
53a9f66cbSAtish Patra  * Authors:
63a9f66cbSAtish Patra  *     Atish Patra <atish.patra@wdc.com>
73a9f66cbSAtish Patra  */
83a9f66cbSAtish Patra 
93a9f66cbSAtish Patra #include <linux/errno.h>
103a9f66cbSAtish Patra #include <linux/err.h>
113a9f66cbSAtish Patra #include <linux/kvm_host.h>
123a9f66cbSAtish Patra #include <linux/uaccess.h>
133a9f66cbSAtish Patra #include <clocksource/timer-riscv.h>
143a9f66cbSAtish Patra #include <asm/csr.h>
153a9f66cbSAtish Patra #include <asm/delay.h>
163a9f66cbSAtish Patra #include <asm/kvm_vcpu_timer.h>
173a9f66cbSAtish Patra 
kvm_riscv_current_cycles(struct kvm_guest_timer * gt)183a9f66cbSAtish Patra static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt)
193a9f66cbSAtish Patra {
203a9f66cbSAtish Patra 	return get_cycles64() + gt->time_delta;
213a9f66cbSAtish Patra }
223a9f66cbSAtish Patra 
kvm_riscv_delta_cycles2ns(u64 cycles,struct kvm_guest_timer * gt,struct kvm_vcpu_timer * t)233a9f66cbSAtish Patra static u64 kvm_riscv_delta_cycles2ns(u64 cycles,
243a9f66cbSAtish Patra 				     struct kvm_guest_timer *gt,
253a9f66cbSAtish Patra 				     struct kvm_vcpu_timer *t)
263a9f66cbSAtish Patra {
273a9f66cbSAtish Patra 	unsigned long flags;
283a9f66cbSAtish Patra 	u64 cycles_now, cycles_delta, delta_ns;
293a9f66cbSAtish Patra 
303a9f66cbSAtish Patra 	local_irq_save(flags);
313a9f66cbSAtish Patra 	cycles_now = kvm_riscv_current_cycles(gt);
323a9f66cbSAtish Patra 	if (cycles_now < cycles)
333a9f66cbSAtish Patra 		cycles_delta = cycles - cycles_now;
343a9f66cbSAtish Patra 	else
353a9f66cbSAtish Patra 		cycles_delta = 0;
363a9f66cbSAtish Patra 	delta_ns = (cycles_delta * gt->nsec_mult) >> gt->nsec_shift;
373a9f66cbSAtish Patra 	local_irq_restore(flags);
383a9f66cbSAtish Patra 
393a9f66cbSAtish Patra 	return delta_ns;
403a9f66cbSAtish Patra }
413a9f66cbSAtish Patra 
kvm_riscv_vcpu_hrtimer_expired(struct hrtimer * h)423a9f66cbSAtish Patra static enum hrtimer_restart kvm_riscv_vcpu_hrtimer_expired(struct hrtimer *h)
433a9f66cbSAtish Patra {
443a9f66cbSAtish Patra 	u64 delta_ns;
453a9f66cbSAtish Patra 	struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
463a9f66cbSAtish Patra 	struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
473a9f66cbSAtish Patra 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
483a9f66cbSAtish Patra 
493a9f66cbSAtish Patra 	if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
503a9f66cbSAtish Patra 		delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
513a9f66cbSAtish Patra 		hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
523a9f66cbSAtish Patra 		return HRTIMER_RESTART;
533a9f66cbSAtish Patra 	}
543a9f66cbSAtish Patra 
553a9f66cbSAtish Patra 	t->next_set = false;
563a9f66cbSAtish Patra 	kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_TIMER);
573a9f66cbSAtish Patra 
583a9f66cbSAtish Patra 	return HRTIMER_NORESTART;
593a9f66cbSAtish Patra }
603a9f66cbSAtish Patra 
kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer * t)613a9f66cbSAtish Patra static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t)
623a9f66cbSAtish Patra {
633a9f66cbSAtish Patra 	if (!t->init_done || !t->next_set)
643a9f66cbSAtish Patra 		return -EINVAL;
653a9f66cbSAtish Patra 
663a9f66cbSAtish Patra 	hrtimer_cancel(&t->hrt);
673a9f66cbSAtish Patra 	t->next_set = false;
683a9f66cbSAtish Patra 
693a9f66cbSAtish Patra 	return 0;
703a9f66cbSAtish Patra }
713a9f66cbSAtish Patra 
kvm_riscv_vcpu_update_vstimecmp(struct kvm_vcpu * vcpu,u64 ncycles)728f5cb44bSAtish Patra static int kvm_riscv_vcpu_update_vstimecmp(struct kvm_vcpu *vcpu, u64 ncycles)
738f5cb44bSAtish Patra {
748f5cb44bSAtish Patra #if defined(CONFIG_32BIT)
758f5cb44bSAtish Patra 		csr_write(CSR_VSTIMECMP, ncycles & 0xFFFFFFFF);
768f5cb44bSAtish Patra 		csr_write(CSR_VSTIMECMPH, ncycles >> 32);
778f5cb44bSAtish Patra #else
788f5cb44bSAtish Patra 		csr_write(CSR_VSTIMECMP, ncycles);
798f5cb44bSAtish Patra #endif
808f5cb44bSAtish Patra 		return 0;
818f5cb44bSAtish Patra }
828f5cb44bSAtish Patra 
kvm_riscv_vcpu_update_hrtimer(struct kvm_vcpu * vcpu,u64 ncycles)838f5cb44bSAtish Patra static int kvm_riscv_vcpu_update_hrtimer(struct kvm_vcpu *vcpu, u64 ncycles)
843a9f66cbSAtish Patra {
853a9f66cbSAtish Patra 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
863a9f66cbSAtish Patra 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
873a9f66cbSAtish Patra 	u64 delta_ns;
883a9f66cbSAtish Patra 
893a9f66cbSAtish Patra 	if (!t->init_done)
903a9f66cbSAtish Patra 		return -EINVAL;
913a9f66cbSAtish Patra 
923a9f66cbSAtish Patra 	kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_TIMER);
933a9f66cbSAtish Patra 
943a9f66cbSAtish Patra 	delta_ns = kvm_riscv_delta_cycles2ns(ncycles, gt, t);
953a9f66cbSAtish Patra 	t->next_cycles = ncycles;
963a9f66cbSAtish Patra 	hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
973a9f66cbSAtish Patra 	t->next_set = true;
983a9f66cbSAtish Patra 
993a9f66cbSAtish Patra 	return 0;
1003a9f66cbSAtish Patra }
1013a9f66cbSAtish Patra 
kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu * vcpu,u64 ncycles)1028f5cb44bSAtish Patra int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
1038f5cb44bSAtish Patra {
1048f5cb44bSAtish Patra 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
1058f5cb44bSAtish Patra 
1068f5cb44bSAtish Patra 	return t->timer_next_event(vcpu, ncycles);
1078f5cb44bSAtish Patra }
1088f5cb44bSAtish Patra 
kvm_riscv_vcpu_vstimer_expired(struct hrtimer * h)1098f5cb44bSAtish Patra static enum hrtimer_restart kvm_riscv_vcpu_vstimer_expired(struct hrtimer *h)
1108f5cb44bSAtish Patra {
1118f5cb44bSAtish Patra 	u64 delta_ns;
1128f5cb44bSAtish Patra 	struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
1138f5cb44bSAtish Patra 	struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
1148f5cb44bSAtish Patra 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
1158f5cb44bSAtish Patra 
1168f5cb44bSAtish Patra 	if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
1178f5cb44bSAtish Patra 		delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
1188f5cb44bSAtish Patra 		hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
1198f5cb44bSAtish Patra 		return HRTIMER_RESTART;
1208f5cb44bSAtish Patra 	}
1218f5cb44bSAtish Patra 
1228f5cb44bSAtish Patra 	t->next_set = false;
1238f5cb44bSAtish Patra 	kvm_vcpu_kick(vcpu);
1248f5cb44bSAtish Patra 
1258f5cb44bSAtish Patra 	return HRTIMER_NORESTART;
1268f5cb44bSAtish Patra }
1278f5cb44bSAtish Patra 
kvm_riscv_vcpu_timer_pending(struct kvm_vcpu * vcpu)1288f5cb44bSAtish Patra bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu)
1298f5cb44bSAtish Patra {
1308f5cb44bSAtish Patra 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
1318f5cb44bSAtish Patra 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
1328f5cb44bSAtish Patra 
1338f5cb44bSAtish Patra 	if (!kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t) ||
1348f5cb44bSAtish Patra 	    kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER))
1358f5cb44bSAtish Patra 		return true;
1368f5cb44bSAtish Patra 	else
1378f5cb44bSAtish Patra 		return false;
1388f5cb44bSAtish Patra }
1398f5cb44bSAtish Patra 
kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu * vcpu)1408f5cb44bSAtish Patra static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu)
1418f5cb44bSAtish Patra {
1428f5cb44bSAtish Patra 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
1438f5cb44bSAtish Patra 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
1448f5cb44bSAtish Patra 	u64 delta_ns;
1458f5cb44bSAtish Patra 
1468f5cb44bSAtish Patra 	if (!t->init_done)
1478f5cb44bSAtish Patra 		return;
1488f5cb44bSAtish Patra 
1498f5cb44bSAtish Patra 	delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
1508f5cb44bSAtish Patra 	hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
1518f5cb44bSAtish Patra 	t->next_set = true;
1528f5cb44bSAtish Patra }
1538f5cb44bSAtish Patra 
kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu * vcpu)1548f5cb44bSAtish Patra static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu)
1558f5cb44bSAtish Patra {
1568f5cb44bSAtish Patra 	kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
1578f5cb44bSAtish Patra }
1588f5cb44bSAtish Patra 
kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1593a9f66cbSAtish Patra int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
1603a9f66cbSAtish Patra 				 const struct kvm_one_reg *reg)
1613a9f66cbSAtish Patra {
1623a9f66cbSAtish Patra 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
1633a9f66cbSAtish Patra 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
1643a9f66cbSAtish Patra 	u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
1653a9f66cbSAtish Patra 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
1663a9f66cbSAtish Patra 					    KVM_REG_SIZE_MASK |
1673a9f66cbSAtish Patra 					    KVM_REG_RISCV_TIMER);
1683a9f66cbSAtish Patra 	u64 reg_val;
1693a9f66cbSAtish Patra 
1703a9f66cbSAtish Patra 	if (KVM_REG_SIZE(reg->id) != sizeof(u64))
1713a9f66cbSAtish Patra 		return -EINVAL;
1723a9f66cbSAtish Patra 	if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
1732a88f38cSDaniel Henrique Barboza 		return -ENOENT;
1743a9f66cbSAtish Patra 
1753a9f66cbSAtish Patra 	switch (reg_num) {
1763a9f66cbSAtish Patra 	case KVM_REG_RISCV_TIMER_REG(frequency):
1773a9f66cbSAtish Patra 		reg_val = riscv_timebase;
1783a9f66cbSAtish Patra 		break;
1793a9f66cbSAtish Patra 	case KVM_REG_RISCV_TIMER_REG(time):
1803a9f66cbSAtish Patra 		reg_val = kvm_riscv_current_cycles(gt);
1813a9f66cbSAtish Patra 		break;
1823a9f66cbSAtish Patra 	case KVM_REG_RISCV_TIMER_REG(compare):
1833a9f66cbSAtish Patra 		reg_val = t->next_cycles;
1843a9f66cbSAtish Patra 		break;
1853a9f66cbSAtish Patra 	case KVM_REG_RISCV_TIMER_REG(state):
1863a9f66cbSAtish Patra 		reg_val = (t->next_set) ? KVM_RISCV_TIMER_STATE_ON :
1873a9f66cbSAtish Patra 					  KVM_RISCV_TIMER_STATE_OFF;
1883a9f66cbSAtish Patra 		break;
1893a9f66cbSAtish Patra 	default:
1902a88f38cSDaniel Henrique Barboza 		return -ENOENT;
1917b161d9cSran jianping 	}
1923a9f66cbSAtish Patra 
1933a9f66cbSAtish Patra 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
1943a9f66cbSAtish Patra 		return -EFAULT;
1953a9f66cbSAtish Patra 
1963a9f66cbSAtish Patra 	return 0;
1973a9f66cbSAtish Patra }
1983a9f66cbSAtish Patra 
kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1993a9f66cbSAtish Patra int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu,
2003a9f66cbSAtish Patra 				 const struct kvm_one_reg *reg)
2013a9f66cbSAtish Patra {
2023a9f66cbSAtish Patra 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
2033a9f66cbSAtish Patra 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
2043a9f66cbSAtish Patra 	u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
2053a9f66cbSAtish Patra 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
2063a9f66cbSAtish Patra 					    KVM_REG_SIZE_MASK |
2073a9f66cbSAtish Patra 					    KVM_REG_RISCV_TIMER);
2083a9f66cbSAtish Patra 	u64 reg_val;
2093a9f66cbSAtish Patra 	int ret = 0;
2103a9f66cbSAtish Patra 
2113a9f66cbSAtish Patra 	if (KVM_REG_SIZE(reg->id) != sizeof(u64))
2123a9f66cbSAtish Patra 		return -EINVAL;
2133a9f66cbSAtish Patra 	if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
2142a88f38cSDaniel Henrique Barboza 		return -ENOENT;
2153a9f66cbSAtish Patra 
2163a9f66cbSAtish Patra 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
2173a9f66cbSAtish Patra 		return -EFAULT;
2183a9f66cbSAtish Patra 
2193a9f66cbSAtish Patra 	switch (reg_num) {
2203a9f66cbSAtish Patra 	case KVM_REG_RISCV_TIMER_REG(frequency):
221*432a8b35SDaniel Henrique Barboza 		if (reg_val != riscv_timebase)
222*432a8b35SDaniel Henrique Barboza 			return -EINVAL;
2233a9f66cbSAtish Patra 		break;
2243a9f66cbSAtish Patra 	case KVM_REG_RISCV_TIMER_REG(time):
2253a9f66cbSAtish Patra 		gt->time_delta = reg_val - get_cycles64();
2263a9f66cbSAtish Patra 		break;
2273a9f66cbSAtish Patra 	case KVM_REG_RISCV_TIMER_REG(compare):
2283a9f66cbSAtish Patra 		t->next_cycles = reg_val;
2293a9f66cbSAtish Patra 		break;
2303a9f66cbSAtish Patra 	case KVM_REG_RISCV_TIMER_REG(state):
2313a9f66cbSAtish Patra 		if (reg_val == KVM_RISCV_TIMER_STATE_ON)
2323a9f66cbSAtish Patra 			ret = kvm_riscv_vcpu_timer_next_event(vcpu, reg_val);
2333a9f66cbSAtish Patra 		else
2343a9f66cbSAtish Patra 			ret = kvm_riscv_vcpu_timer_cancel(t);
2353a9f66cbSAtish Patra 		break;
2363a9f66cbSAtish Patra 	default:
2372a88f38cSDaniel Henrique Barboza 		ret = -ENOENT;
2383a9f66cbSAtish Patra 		break;
2397b161d9cSran jianping 	}
2403a9f66cbSAtish Patra 
2413a9f66cbSAtish Patra 	return ret;
2423a9f66cbSAtish Patra }
2433a9f66cbSAtish Patra 
kvm_riscv_vcpu_timer_init(struct kvm_vcpu * vcpu)2443a9f66cbSAtish Patra int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu)
2453a9f66cbSAtish Patra {
2463a9f66cbSAtish Patra 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
2473a9f66cbSAtish Patra 
2483a9f66cbSAtish Patra 	if (t->init_done)
2493a9f66cbSAtish Patra 		return -EINVAL;
2503a9f66cbSAtish Patra 
2513a9f66cbSAtish Patra 	hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2523a9f66cbSAtish Patra 	t->init_done = true;
2533a9f66cbSAtish Patra 	t->next_set = false;
2543a9f66cbSAtish Patra 
2558f5cb44bSAtish Patra 	/* Enable sstc for every vcpu if available in hardware */
2568f5cb44bSAtish Patra 	if (riscv_isa_extension_available(NULL, SSTC)) {
2578f5cb44bSAtish Patra 		t->sstc_enabled = true;
2588f5cb44bSAtish Patra 		t->hrt.function = kvm_riscv_vcpu_vstimer_expired;
2598f5cb44bSAtish Patra 		t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp;
2608f5cb44bSAtish Patra 	} else {
2618f5cb44bSAtish Patra 		t->sstc_enabled = false;
2628f5cb44bSAtish Patra 		t->hrt.function = kvm_riscv_vcpu_hrtimer_expired;
2638f5cb44bSAtish Patra 		t->timer_next_event = kvm_riscv_vcpu_update_hrtimer;
2648f5cb44bSAtish Patra 	}
2658f5cb44bSAtish Patra 
2663a9f66cbSAtish Patra 	return 0;
2673a9f66cbSAtish Patra }
2683a9f66cbSAtish Patra 
kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu * vcpu)2693a9f66cbSAtish Patra int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu)
2703a9f66cbSAtish Patra {
2713a9f66cbSAtish Patra 	int ret;
2723a9f66cbSAtish Patra 
2733a9f66cbSAtish Patra 	ret = kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
2743a9f66cbSAtish Patra 	vcpu->arch.timer.init_done = false;
2753a9f66cbSAtish Patra 
2763a9f66cbSAtish Patra 	return ret;
2773a9f66cbSAtish Patra }
2783a9f66cbSAtish Patra 
kvm_riscv_vcpu_timer_reset(struct kvm_vcpu * vcpu)2793a9f66cbSAtish Patra int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu)
2803a9f66cbSAtish Patra {
2818f5cb44bSAtish Patra 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
2828f5cb44bSAtish Patra 
2838f5cb44bSAtish Patra 	t->next_cycles = -1ULL;
2843a9f66cbSAtish Patra 	return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
2853a9f66cbSAtish Patra }
2863a9f66cbSAtish Patra 
kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu * vcpu)2878f5cb44bSAtish Patra static void kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu *vcpu)
2888f5cb44bSAtish Patra {
2898f5cb44bSAtish Patra 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
2908f5cb44bSAtish Patra 
2918f5cb44bSAtish Patra #if defined(CONFIG_32BIT)
2928f5cb44bSAtish Patra 	csr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta));
2938f5cb44bSAtish Patra 	csr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32));
2948f5cb44bSAtish Patra #else
2958f5cb44bSAtish Patra 	csr_write(CSR_HTIMEDELTA, gt->time_delta);
2968f5cb44bSAtish Patra #endif
2978f5cb44bSAtish Patra }
2988f5cb44bSAtish Patra 
kvm_riscv_vcpu_timer_restore(struct kvm_vcpu * vcpu)2993a9f66cbSAtish Patra void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
3003a9f66cbSAtish Patra {
3018f5cb44bSAtish Patra 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
3023a9f66cbSAtish Patra 
3038f5cb44bSAtish Patra 	kvm_riscv_vcpu_update_timedelta(vcpu);
3048f5cb44bSAtish Patra 
3058f5cb44bSAtish Patra 	if (!t->sstc_enabled)
3068f5cb44bSAtish Patra 		return;
3078f5cb44bSAtish Patra 
3088f5cb44bSAtish Patra #if defined(CONFIG_32BIT)
3098f5cb44bSAtish Patra 	csr_write(CSR_VSTIMECMP, (u32)t->next_cycles);
3108f5cb44bSAtish Patra 	csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32));
3113a9f66cbSAtish Patra #else
3128f5cb44bSAtish Patra 	csr_write(CSR_VSTIMECMP, t->next_cycles);
3133a9f66cbSAtish Patra #endif
3148f5cb44bSAtish Patra 
3158f5cb44bSAtish Patra 	/* timer should be enabled for the remaining operations */
3168f5cb44bSAtish Patra 	if (unlikely(!t->init_done))
3178f5cb44bSAtish Patra 		return;
3188f5cb44bSAtish Patra 
3198f5cb44bSAtish Patra 	kvm_riscv_vcpu_timer_unblocking(vcpu);
3208f5cb44bSAtish Patra }
3218f5cb44bSAtish Patra 
kvm_riscv_vcpu_timer_sync(struct kvm_vcpu * vcpu)322cea8896bSAnup Patel void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu)
323cea8896bSAnup Patel {
324cea8896bSAnup Patel 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
325cea8896bSAnup Patel 
326cea8896bSAnup Patel 	if (!t->sstc_enabled)
327cea8896bSAnup Patel 		return;
328cea8896bSAnup Patel 
329cea8896bSAnup Patel #if defined(CONFIG_32BIT)
330cea8896bSAnup Patel 	t->next_cycles = csr_read(CSR_VSTIMECMP);
331cea8896bSAnup Patel 	t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32;
332cea8896bSAnup Patel #else
333cea8896bSAnup Patel 	t->next_cycles = csr_read(CSR_VSTIMECMP);
334cea8896bSAnup Patel #endif
335cea8896bSAnup Patel }
336cea8896bSAnup Patel 
kvm_riscv_vcpu_timer_save(struct kvm_vcpu * vcpu)3378f5cb44bSAtish Patra void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
3388f5cb44bSAtish Patra {
3398f5cb44bSAtish Patra 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
3408f5cb44bSAtish Patra 
3418f5cb44bSAtish Patra 	if (!t->sstc_enabled)
3428f5cb44bSAtish Patra 		return;
3438f5cb44bSAtish Patra 
344cea8896bSAnup Patel 	/*
345cea8896bSAnup Patel 	 * The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync()
346cea8896bSAnup Patel 	 * upon every VM exit so no need to save here.
347cea8896bSAnup Patel 	 */
348cea8896bSAnup Patel 
3498f5cb44bSAtish Patra 	/* timer should be enabled for the remaining operations */
3508f5cb44bSAtish Patra 	if (unlikely(!t->init_done))
3518f5cb44bSAtish Patra 		return;
3528f5cb44bSAtish Patra 
3538f5cb44bSAtish Patra 	if (kvm_vcpu_is_blocking(vcpu))
3548f5cb44bSAtish Patra 		kvm_riscv_vcpu_timer_blocking(vcpu);
3553a9f66cbSAtish Patra }
3563a9f66cbSAtish Patra 
kvm_riscv_guest_timer_init(struct kvm * kvm)357cca986faSNikolay Borisov void kvm_riscv_guest_timer_init(struct kvm *kvm)
3583a9f66cbSAtish Patra {
3593a9f66cbSAtish Patra 	struct kvm_guest_timer *gt = &kvm->arch.timer;
3603a9f66cbSAtish Patra 
3613a9f66cbSAtish Patra 	riscv_cs_get_mult_shift(&gt->nsec_mult, &gt->nsec_shift);
3623a9f66cbSAtish Patra 	gt->time_delta = -get_cycles64();
3633a9f66cbSAtish Patra }
364