1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2019 Western Digital Corporation or its affiliates. 4 * 5 * Authors: 6 * Atish Patra <atish.patra@wdc.com> 7 */ 8 9 #include <linux/errno.h> 10 #include <linux/err.h> 11 #include <linux/kvm_host.h> 12 #include <linux/uaccess.h> 13 #include <clocksource/timer-riscv.h> 14 #include <asm/csr.h> 15 #include <asm/delay.h> 16 #include <asm/kvm_vcpu_timer.h> 17 18 static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt) 19 { 20 return get_cycles64() + gt->time_delta; 21 } 22 23 static u64 kvm_riscv_delta_cycles2ns(u64 cycles, 24 struct kvm_guest_timer *gt, 25 struct kvm_vcpu_timer *t) 26 { 27 unsigned long flags; 28 u64 cycles_now, cycles_delta, delta_ns; 29 30 local_irq_save(flags); 31 cycles_now = kvm_riscv_current_cycles(gt); 32 if (cycles_now < cycles) 33 cycles_delta = cycles - cycles_now; 34 else 35 cycles_delta = 0; 36 delta_ns = (cycles_delta * gt->nsec_mult) >> gt->nsec_shift; 37 local_irq_restore(flags); 38 39 return delta_ns; 40 } 41 42 static enum hrtimer_restart kvm_riscv_vcpu_hrtimer_expired(struct hrtimer *h) 43 { 44 u64 delta_ns; 45 struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt); 46 struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer); 47 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; 48 49 if (kvm_riscv_current_cycles(gt) < t->next_cycles) { 50 delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); 51 hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns)); 52 return HRTIMER_RESTART; 53 } 54 55 t->next_set = false; 56 kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_TIMER); 57 58 return HRTIMER_NORESTART; 59 } 60 61 static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t) 62 { 63 if (!t->init_done || !t->next_set) 64 return -EINVAL; 65 66 hrtimer_cancel(&t->hrt); 67 t->next_set = false; 68 69 return 0; 70 } 71 72 static int kvm_riscv_vcpu_update_vstimecmp(struct kvm_vcpu *vcpu, u64 ncycles) 73 { 74 #if defined(CONFIG_32BIT) 75 csr_write(CSR_VSTIMECMP, ncycles & 0xFFFFFFFF); 76 csr_write(CSR_VSTIMECMPH, ncycles >> 32); 77 #else 78 csr_write(CSR_VSTIMECMP, ncycles); 79 #endif 80 return 0; 81 } 82 83 static int kvm_riscv_vcpu_update_hrtimer(struct kvm_vcpu *vcpu, u64 ncycles) 84 { 85 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 86 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; 87 u64 delta_ns; 88 89 if (!t->init_done) 90 return -EINVAL; 91 92 kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_TIMER); 93 94 delta_ns = kvm_riscv_delta_cycles2ns(ncycles, gt, t); 95 t->next_cycles = ncycles; 96 hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL); 97 t->next_set = true; 98 99 return 0; 100 } 101 102 int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles) 103 { 104 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 105 106 return t->timer_next_event(vcpu, ncycles); 107 } 108 109 static enum hrtimer_restart kvm_riscv_vcpu_vstimer_expired(struct hrtimer *h) 110 { 111 u64 delta_ns; 112 struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt); 113 struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer); 114 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; 115 116 if (kvm_riscv_current_cycles(gt) < t->next_cycles) { 117 delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); 118 hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns)); 119 return HRTIMER_RESTART; 120 } 121 122 t->next_set = false; 123 kvm_vcpu_kick(vcpu); 124 125 return HRTIMER_NORESTART; 126 } 127 128 bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu) 129 { 130 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 131 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; 132 133 if (!kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t) || 134 kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER)) 135 return true; 136 else 137 return false; 138 } 139 140 static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu) 141 { 142 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 143 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; 144 u64 delta_ns; 145 146 if (!t->init_done) 147 return; 148 149 delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); 150 hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL); 151 t->next_set = true; 152 } 153 154 static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu) 155 { 156 kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer); 157 } 158 159 int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu, 160 const struct kvm_one_reg *reg) 161 { 162 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 163 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; 164 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; 165 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 166 KVM_REG_SIZE_MASK | 167 KVM_REG_RISCV_TIMER); 168 u64 reg_val; 169 170 if (KVM_REG_SIZE(reg->id) != sizeof(u64)) 171 return -EINVAL; 172 if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64)) 173 return -EINVAL; 174 175 switch (reg_num) { 176 case KVM_REG_RISCV_TIMER_REG(frequency): 177 reg_val = riscv_timebase; 178 break; 179 case KVM_REG_RISCV_TIMER_REG(time): 180 reg_val = kvm_riscv_current_cycles(gt); 181 break; 182 case KVM_REG_RISCV_TIMER_REG(compare): 183 reg_val = t->next_cycles; 184 break; 185 case KVM_REG_RISCV_TIMER_REG(state): 186 reg_val = (t->next_set) ? KVM_RISCV_TIMER_STATE_ON : 187 KVM_RISCV_TIMER_STATE_OFF; 188 break; 189 default: 190 return -EINVAL; 191 } 192 193 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) 194 return -EFAULT; 195 196 return 0; 197 } 198 199 int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu, 200 const struct kvm_one_reg *reg) 201 { 202 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 203 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; 204 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; 205 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 206 KVM_REG_SIZE_MASK | 207 KVM_REG_RISCV_TIMER); 208 u64 reg_val; 209 int ret = 0; 210 211 if (KVM_REG_SIZE(reg->id) != sizeof(u64)) 212 return -EINVAL; 213 if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64)) 214 return -EINVAL; 215 216 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) 217 return -EFAULT; 218 219 switch (reg_num) { 220 case KVM_REG_RISCV_TIMER_REG(frequency): 221 ret = -EOPNOTSUPP; 222 break; 223 case KVM_REG_RISCV_TIMER_REG(time): 224 gt->time_delta = reg_val - get_cycles64(); 225 break; 226 case KVM_REG_RISCV_TIMER_REG(compare): 227 t->next_cycles = reg_val; 228 break; 229 case KVM_REG_RISCV_TIMER_REG(state): 230 if (reg_val == KVM_RISCV_TIMER_STATE_ON) 231 ret = kvm_riscv_vcpu_timer_next_event(vcpu, reg_val); 232 else 233 ret = kvm_riscv_vcpu_timer_cancel(t); 234 break; 235 default: 236 ret = -EINVAL; 237 break; 238 } 239 240 return ret; 241 } 242 243 int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu) 244 { 245 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 246 247 if (t->init_done) 248 return -EINVAL; 249 250 hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 251 t->init_done = true; 252 t->next_set = false; 253 254 /* Enable sstc for every vcpu if available in hardware */ 255 if (riscv_isa_extension_available(NULL, SSTC)) { 256 t->sstc_enabled = true; 257 t->hrt.function = kvm_riscv_vcpu_vstimer_expired; 258 t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp; 259 } else { 260 t->sstc_enabled = false; 261 t->hrt.function = kvm_riscv_vcpu_hrtimer_expired; 262 t->timer_next_event = kvm_riscv_vcpu_update_hrtimer; 263 } 264 265 return 0; 266 } 267 268 int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu) 269 { 270 int ret; 271 272 ret = kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer); 273 vcpu->arch.timer.init_done = false; 274 275 return ret; 276 } 277 278 int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu) 279 { 280 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 281 282 t->next_cycles = -1ULL; 283 return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer); 284 } 285 286 static void kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu *vcpu) 287 { 288 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; 289 290 #if defined(CONFIG_32BIT) 291 csr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta)); 292 csr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32)); 293 #else 294 csr_write(CSR_HTIMEDELTA, gt->time_delta); 295 #endif 296 } 297 298 void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu) 299 { 300 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 301 302 kvm_riscv_vcpu_update_timedelta(vcpu); 303 304 if (!t->sstc_enabled) 305 return; 306 307 #if defined(CONFIG_32BIT) 308 csr_write(CSR_VSTIMECMP, (u32)t->next_cycles); 309 csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32)); 310 #else 311 csr_write(CSR_VSTIMECMP, t->next_cycles); 312 #endif 313 314 /* timer should be enabled for the remaining operations */ 315 if (unlikely(!t->init_done)) 316 return; 317 318 kvm_riscv_vcpu_timer_unblocking(vcpu); 319 } 320 321 void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu) 322 { 323 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 324 325 if (!t->sstc_enabled) 326 return; 327 328 #if defined(CONFIG_32BIT) 329 t->next_cycles = csr_read(CSR_VSTIMECMP); 330 t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32; 331 #else 332 t->next_cycles = csr_read(CSR_VSTIMECMP); 333 #endif 334 } 335 336 void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu) 337 { 338 struct kvm_vcpu_timer *t = &vcpu->arch.timer; 339 340 if (!t->sstc_enabled) 341 return; 342 343 /* 344 * The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync() 345 * upon every VM exit so no need to save here. 346 */ 347 348 /* timer should be enabled for the remaining operations */ 349 if (unlikely(!t->init_done)) 350 return; 351 352 if (kvm_vcpu_is_blocking(vcpu)) 353 kvm_riscv_vcpu_timer_blocking(vcpu); 354 } 355 356 void kvm_riscv_guest_timer_init(struct kvm *kvm) 357 { 358 struct kvm_guest_timer *gt = &kvm->arch.timer; 359 360 riscv_cs_get_mult_shift(>->nsec_mult, >->nsec_shift); 361 gt->time_delta = -get_cycles64(); 362 } 363