xref: /openbmc/linux/arch/riscv/kvm/vcpu_timer.c (revision cca986fa)
13a9f66cbSAtish Patra // SPDX-License-Identifier: GPL-2.0
23a9f66cbSAtish Patra /*
33a9f66cbSAtish Patra  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
43a9f66cbSAtish Patra  *
53a9f66cbSAtish Patra  * Authors:
63a9f66cbSAtish Patra  *     Atish Patra <atish.patra@wdc.com>
73a9f66cbSAtish Patra  */
83a9f66cbSAtish Patra 
93a9f66cbSAtish Patra #include <linux/errno.h>
103a9f66cbSAtish Patra #include <linux/err.h>
113a9f66cbSAtish Patra #include <linux/kvm_host.h>
123a9f66cbSAtish Patra #include <linux/uaccess.h>
133a9f66cbSAtish Patra #include <clocksource/timer-riscv.h>
143a9f66cbSAtish Patra #include <asm/csr.h>
153a9f66cbSAtish Patra #include <asm/delay.h>
163a9f66cbSAtish Patra #include <asm/kvm_vcpu_timer.h>
173a9f66cbSAtish Patra 
183a9f66cbSAtish Patra static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt)
193a9f66cbSAtish Patra {
203a9f66cbSAtish Patra 	return get_cycles64() + gt->time_delta;
213a9f66cbSAtish Patra }
223a9f66cbSAtish Patra 
233a9f66cbSAtish Patra static u64 kvm_riscv_delta_cycles2ns(u64 cycles,
243a9f66cbSAtish Patra 				     struct kvm_guest_timer *gt,
253a9f66cbSAtish Patra 				     struct kvm_vcpu_timer *t)
263a9f66cbSAtish Patra {
273a9f66cbSAtish Patra 	unsigned long flags;
283a9f66cbSAtish Patra 	u64 cycles_now, cycles_delta, delta_ns;
293a9f66cbSAtish Patra 
303a9f66cbSAtish Patra 	local_irq_save(flags);
313a9f66cbSAtish Patra 	cycles_now = kvm_riscv_current_cycles(gt);
323a9f66cbSAtish Patra 	if (cycles_now < cycles)
333a9f66cbSAtish Patra 		cycles_delta = cycles - cycles_now;
343a9f66cbSAtish Patra 	else
353a9f66cbSAtish Patra 		cycles_delta = 0;
363a9f66cbSAtish Patra 	delta_ns = (cycles_delta * gt->nsec_mult) >> gt->nsec_shift;
373a9f66cbSAtish Patra 	local_irq_restore(flags);
383a9f66cbSAtish Patra 
393a9f66cbSAtish Patra 	return delta_ns;
403a9f66cbSAtish Patra }
413a9f66cbSAtish Patra 
423a9f66cbSAtish Patra static enum hrtimer_restart kvm_riscv_vcpu_hrtimer_expired(struct hrtimer *h)
433a9f66cbSAtish Patra {
443a9f66cbSAtish Patra 	u64 delta_ns;
453a9f66cbSAtish Patra 	struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
463a9f66cbSAtish Patra 	struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
473a9f66cbSAtish Patra 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
483a9f66cbSAtish Patra 
493a9f66cbSAtish Patra 	if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
503a9f66cbSAtish Patra 		delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
513a9f66cbSAtish Patra 		hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
523a9f66cbSAtish Patra 		return HRTIMER_RESTART;
533a9f66cbSAtish Patra 	}
543a9f66cbSAtish Patra 
553a9f66cbSAtish Patra 	t->next_set = false;
563a9f66cbSAtish Patra 	kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_TIMER);
573a9f66cbSAtish Patra 
583a9f66cbSAtish Patra 	return HRTIMER_NORESTART;
593a9f66cbSAtish Patra }
603a9f66cbSAtish Patra 
613a9f66cbSAtish Patra static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t)
623a9f66cbSAtish Patra {
633a9f66cbSAtish Patra 	if (!t->init_done || !t->next_set)
643a9f66cbSAtish Patra 		return -EINVAL;
653a9f66cbSAtish Patra 
663a9f66cbSAtish Patra 	hrtimer_cancel(&t->hrt);
673a9f66cbSAtish Patra 	t->next_set = false;
683a9f66cbSAtish Patra 
693a9f66cbSAtish Patra 	return 0;
703a9f66cbSAtish Patra }
713a9f66cbSAtish Patra 
723a9f66cbSAtish Patra int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
733a9f66cbSAtish Patra {
743a9f66cbSAtish Patra 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
753a9f66cbSAtish Patra 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
763a9f66cbSAtish Patra 	u64 delta_ns;
773a9f66cbSAtish Patra 
783a9f66cbSAtish Patra 	if (!t->init_done)
793a9f66cbSAtish Patra 		return -EINVAL;
803a9f66cbSAtish Patra 
813a9f66cbSAtish Patra 	kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_TIMER);
823a9f66cbSAtish Patra 
833a9f66cbSAtish Patra 	delta_ns = kvm_riscv_delta_cycles2ns(ncycles, gt, t);
843a9f66cbSAtish Patra 	t->next_cycles = ncycles;
853a9f66cbSAtish Patra 	hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
863a9f66cbSAtish Patra 	t->next_set = true;
873a9f66cbSAtish Patra 
883a9f66cbSAtish Patra 	return 0;
893a9f66cbSAtish Patra }
903a9f66cbSAtish Patra 
913a9f66cbSAtish Patra int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
923a9f66cbSAtish Patra 				 const struct kvm_one_reg *reg)
933a9f66cbSAtish Patra {
943a9f66cbSAtish Patra 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
953a9f66cbSAtish Patra 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
963a9f66cbSAtish Patra 	u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
973a9f66cbSAtish Patra 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
983a9f66cbSAtish Patra 					    KVM_REG_SIZE_MASK |
993a9f66cbSAtish Patra 					    KVM_REG_RISCV_TIMER);
1003a9f66cbSAtish Patra 	u64 reg_val;
1013a9f66cbSAtish Patra 
1023a9f66cbSAtish Patra 	if (KVM_REG_SIZE(reg->id) != sizeof(u64))
1033a9f66cbSAtish Patra 		return -EINVAL;
1043a9f66cbSAtish Patra 	if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
1053a9f66cbSAtish Patra 		return -EINVAL;
1063a9f66cbSAtish Patra 
1073a9f66cbSAtish Patra 	switch (reg_num) {
1083a9f66cbSAtish Patra 	case KVM_REG_RISCV_TIMER_REG(frequency):
1093a9f66cbSAtish Patra 		reg_val = riscv_timebase;
1103a9f66cbSAtish Patra 		break;
1113a9f66cbSAtish Patra 	case KVM_REG_RISCV_TIMER_REG(time):
1123a9f66cbSAtish Patra 		reg_val = kvm_riscv_current_cycles(gt);
1133a9f66cbSAtish Patra 		break;
1143a9f66cbSAtish Patra 	case KVM_REG_RISCV_TIMER_REG(compare):
1153a9f66cbSAtish Patra 		reg_val = t->next_cycles;
1163a9f66cbSAtish Patra 		break;
1173a9f66cbSAtish Patra 	case KVM_REG_RISCV_TIMER_REG(state):
1183a9f66cbSAtish Patra 		reg_val = (t->next_set) ? KVM_RISCV_TIMER_STATE_ON :
1193a9f66cbSAtish Patra 					  KVM_RISCV_TIMER_STATE_OFF;
1203a9f66cbSAtish Patra 		break;
1213a9f66cbSAtish Patra 	default:
1223a9f66cbSAtish Patra 		return -EINVAL;
1237b161d9cSran jianping 	}
1243a9f66cbSAtish Patra 
1253a9f66cbSAtish Patra 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
1263a9f66cbSAtish Patra 		return -EFAULT;
1273a9f66cbSAtish Patra 
1283a9f66cbSAtish Patra 	return 0;
1293a9f66cbSAtish Patra }
1303a9f66cbSAtish Patra 
1313a9f66cbSAtish Patra int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu,
1323a9f66cbSAtish Patra 				 const struct kvm_one_reg *reg)
1333a9f66cbSAtish Patra {
1343a9f66cbSAtish Patra 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
1353a9f66cbSAtish Patra 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
1363a9f66cbSAtish Patra 	u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
1373a9f66cbSAtish Patra 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
1383a9f66cbSAtish Patra 					    KVM_REG_SIZE_MASK |
1393a9f66cbSAtish Patra 					    KVM_REG_RISCV_TIMER);
1403a9f66cbSAtish Patra 	u64 reg_val;
1413a9f66cbSAtish Patra 	int ret = 0;
1423a9f66cbSAtish Patra 
1433a9f66cbSAtish Patra 	if (KVM_REG_SIZE(reg->id) != sizeof(u64))
1443a9f66cbSAtish Patra 		return -EINVAL;
1453a9f66cbSAtish Patra 	if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
1463a9f66cbSAtish Patra 		return -EINVAL;
1473a9f66cbSAtish Patra 
1483a9f66cbSAtish Patra 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
1493a9f66cbSAtish Patra 		return -EFAULT;
1503a9f66cbSAtish Patra 
1513a9f66cbSAtish Patra 	switch (reg_num) {
1523a9f66cbSAtish Patra 	case KVM_REG_RISCV_TIMER_REG(frequency):
1533a9f66cbSAtish Patra 		ret = -EOPNOTSUPP;
1543a9f66cbSAtish Patra 		break;
1553a9f66cbSAtish Patra 	case KVM_REG_RISCV_TIMER_REG(time):
1563a9f66cbSAtish Patra 		gt->time_delta = reg_val - get_cycles64();
1573a9f66cbSAtish Patra 		break;
1583a9f66cbSAtish Patra 	case KVM_REG_RISCV_TIMER_REG(compare):
1593a9f66cbSAtish Patra 		t->next_cycles = reg_val;
1603a9f66cbSAtish Patra 		break;
1613a9f66cbSAtish Patra 	case KVM_REG_RISCV_TIMER_REG(state):
1623a9f66cbSAtish Patra 		if (reg_val == KVM_RISCV_TIMER_STATE_ON)
1633a9f66cbSAtish Patra 			ret = kvm_riscv_vcpu_timer_next_event(vcpu, reg_val);
1643a9f66cbSAtish Patra 		else
1653a9f66cbSAtish Patra 			ret = kvm_riscv_vcpu_timer_cancel(t);
1663a9f66cbSAtish Patra 		break;
1673a9f66cbSAtish Patra 	default:
1683a9f66cbSAtish Patra 		ret = -EINVAL;
1693a9f66cbSAtish Patra 		break;
1707b161d9cSran jianping 	}
1713a9f66cbSAtish Patra 
1723a9f66cbSAtish Patra 	return ret;
1733a9f66cbSAtish Patra }
1743a9f66cbSAtish Patra 
1753a9f66cbSAtish Patra int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu)
1763a9f66cbSAtish Patra {
1773a9f66cbSAtish Patra 	struct kvm_vcpu_timer *t = &vcpu->arch.timer;
1783a9f66cbSAtish Patra 
1793a9f66cbSAtish Patra 	if (t->init_done)
1803a9f66cbSAtish Patra 		return -EINVAL;
1813a9f66cbSAtish Patra 
1823a9f66cbSAtish Patra 	hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1833a9f66cbSAtish Patra 	t->hrt.function = kvm_riscv_vcpu_hrtimer_expired;
1843a9f66cbSAtish Patra 	t->init_done = true;
1853a9f66cbSAtish Patra 	t->next_set = false;
1863a9f66cbSAtish Patra 
1873a9f66cbSAtish Patra 	return 0;
1883a9f66cbSAtish Patra }
1893a9f66cbSAtish Patra 
1903a9f66cbSAtish Patra int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu)
1913a9f66cbSAtish Patra {
1923a9f66cbSAtish Patra 	int ret;
1933a9f66cbSAtish Patra 
1943a9f66cbSAtish Patra 	ret = kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
1953a9f66cbSAtish Patra 	vcpu->arch.timer.init_done = false;
1963a9f66cbSAtish Patra 
1973a9f66cbSAtish Patra 	return ret;
1983a9f66cbSAtish Patra }
1993a9f66cbSAtish Patra 
2003a9f66cbSAtish Patra int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu)
2013a9f66cbSAtish Patra {
2023a9f66cbSAtish Patra 	return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
2033a9f66cbSAtish Patra }
2043a9f66cbSAtish Patra 
2053a9f66cbSAtish Patra void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
2063a9f66cbSAtish Patra {
2073a9f66cbSAtish Patra 	struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
2083a9f66cbSAtish Patra 
2093a9f66cbSAtish Patra #ifdef CONFIG_64BIT
2103a9f66cbSAtish Patra 	csr_write(CSR_HTIMEDELTA, gt->time_delta);
2113a9f66cbSAtish Patra #else
2123a9f66cbSAtish Patra 	csr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta));
2133a9f66cbSAtish Patra 	csr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32));
2143a9f66cbSAtish Patra #endif
2153a9f66cbSAtish Patra }
2163a9f66cbSAtish Patra 
217*cca986faSNikolay Borisov void kvm_riscv_guest_timer_init(struct kvm *kvm)
2183a9f66cbSAtish Patra {
2193a9f66cbSAtish Patra 	struct kvm_guest_timer *gt = &kvm->arch.timer;
2203a9f66cbSAtish Patra 
2213a9f66cbSAtish Patra 	riscv_cs_get_mult_shift(&gt->nsec_mult, &gt->nsec_shift);
2223a9f66cbSAtish Patra 	gt->time_delta = -get_cycles64();
2233a9f66cbSAtish Patra }
224