1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 ARM Ltd. 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #include <linux/cpu.h> 8 #include <linux/kvm.h> 9 #include <linux/kvm_host.h> 10 #include <linux/interrupt.h> 11 #include <linux/irq.h> 12 #include <linux/uaccess.h> 13 14 #include <clocksource/arm_arch_timer.h> 15 #include <asm/arch_timer.h> 16 #include <asm/kvm_emulate.h> 17 #include <asm/kvm_hyp.h> 18 19 #include <kvm/arm_vgic.h> 20 #include <kvm/arm_arch_timer.h> 21 22 #include "trace.h" 23 24 static struct timecounter *timecounter; 25 static unsigned int host_vtimer_irq; 26 static unsigned int host_ptimer_irq; 27 static u32 host_vtimer_irq_flags; 28 static u32 host_ptimer_irq_flags; 29 30 static DEFINE_STATIC_KEY_FALSE(has_gic_active_state); 31 32 static const struct kvm_irq_level default_ptimer_irq = { 33 .irq = 30, 34 .level = 1, 35 }; 36 37 static const struct kvm_irq_level default_vtimer_irq = { 38 .irq = 27, 39 .level = 1, 40 }; 41 42 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx); 43 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, 44 struct arch_timer_context *timer_ctx); 45 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx); 46 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu, 47 struct arch_timer_context *timer, 48 enum kvm_arch_timer_regs treg, 49 u64 val); 50 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, 51 struct arch_timer_context *timer, 52 enum kvm_arch_timer_regs treg); 53 54 u32 timer_get_ctl(struct arch_timer_context *ctxt) 55 { 56 struct kvm_vcpu *vcpu = ctxt->vcpu; 57 58 switch(arch_timer_ctx_index(ctxt)) { 59 case TIMER_VTIMER: 60 return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0); 61 case TIMER_PTIMER: 62 return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0); 63 default: 64 WARN_ON(1); 65 return 0; 66 } 67 } 68 69 u64 timer_get_cval(struct arch_timer_context *ctxt) 70 { 71 struct kvm_vcpu *vcpu = ctxt->vcpu; 72 73 switch(arch_timer_ctx_index(ctxt)) { 74 case TIMER_VTIMER: 75 return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0); 76 case TIMER_PTIMER: 77 return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0); 78 default: 79 WARN_ON(1); 80 return 0; 81 } 82 } 83 84 static u64 timer_get_offset(struct arch_timer_context *ctxt) 85 { 86 struct kvm_vcpu *vcpu = ctxt->vcpu; 87 88 switch(arch_timer_ctx_index(ctxt)) { 89 case TIMER_VTIMER: 90 return __vcpu_sys_reg(vcpu, CNTVOFF_EL2); 91 default: 92 return 0; 93 } 94 } 95 96 static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl) 97 { 98 struct kvm_vcpu *vcpu = ctxt->vcpu; 99 100 switch(arch_timer_ctx_index(ctxt)) { 101 case TIMER_VTIMER: 102 __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl; 103 break; 104 case TIMER_PTIMER: 105 __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl; 106 break; 107 default: 108 WARN_ON(1); 109 } 110 } 111 112 static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval) 113 { 114 struct kvm_vcpu *vcpu = ctxt->vcpu; 115 116 switch(arch_timer_ctx_index(ctxt)) { 117 case TIMER_VTIMER: 118 __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval; 119 break; 120 case TIMER_PTIMER: 121 __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval; 122 break; 123 default: 124 WARN_ON(1); 125 } 126 } 127 128 static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset) 129 { 130 struct kvm_vcpu *vcpu = ctxt->vcpu; 131 132 switch(arch_timer_ctx_index(ctxt)) { 133 case TIMER_VTIMER: 134 __vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset; 135 break; 136 default: 137 WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt)); 138 } 139 } 140 141 u64 kvm_phys_timer_read(void) 142 { 143 return timecounter->cc->read(timecounter->cc); 144 } 145 146 static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map) 147 { 148 if (has_vhe()) { 149 map->direct_vtimer = vcpu_vtimer(vcpu); 150 map->direct_ptimer = vcpu_ptimer(vcpu); 151 map->emul_ptimer = NULL; 152 } else { 153 map->direct_vtimer = vcpu_vtimer(vcpu); 154 map->direct_ptimer = NULL; 155 map->emul_ptimer = vcpu_ptimer(vcpu); 156 } 157 158 trace_kvm_get_timer_map(vcpu->vcpu_id, map); 159 } 160 161 static inline bool userspace_irqchip(struct kvm *kvm) 162 { 163 return static_branch_unlikely(&userspace_irqchip_in_use) && 164 unlikely(!irqchip_in_kernel(kvm)); 165 } 166 167 static void soft_timer_start(struct hrtimer *hrt, u64 ns) 168 { 169 hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns), 170 HRTIMER_MODE_ABS_HARD); 171 } 172 173 static void soft_timer_cancel(struct hrtimer *hrt) 174 { 175 hrtimer_cancel(hrt); 176 } 177 178 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) 179 { 180 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; 181 struct arch_timer_context *ctx; 182 struct timer_map map; 183 184 /* 185 * We may see a timer interrupt after vcpu_put() has been called which 186 * sets the CPU's vcpu pointer to NULL, because even though the timer 187 * has been disabled in timer_save_state(), the hardware interrupt 188 * signal may not have been retired from the interrupt controller yet. 189 */ 190 if (!vcpu) 191 return IRQ_HANDLED; 192 193 get_timer_map(vcpu, &map); 194 195 if (irq == host_vtimer_irq) 196 ctx = map.direct_vtimer; 197 else 198 ctx = map.direct_ptimer; 199 200 if (kvm_timer_should_fire(ctx)) 201 kvm_timer_update_irq(vcpu, true, ctx); 202 203 if (userspace_irqchip(vcpu->kvm) && 204 !static_branch_unlikely(&has_gic_active_state)) 205 disable_percpu_irq(host_vtimer_irq); 206 207 return IRQ_HANDLED; 208 } 209 210 static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx) 211 { 212 u64 cval, now; 213 214 cval = timer_get_cval(timer_ctx); 215 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx); 216 217 if (now < cval) { 218 u64 ns; 219 220 ns = cyclecounter_cyc2ns(timecounter->cc, 221 cval - now, 222 timecounter->mask, 223 &timecounter->frac); 224 return ns; 225 } 226 227 return 0; 228 } 229 230 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx) 231 { 232 WARN_ON(timer_ctx && timer_ctx->loaded); 233 return timer_ctx && 234 ((timer_get_ctl(timer_ctx) & 235 (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE); 236 } 237 238 /* 239 * Returns the earliest expiration time in ns among guest timers. 240 * Note that it will return 0 if none of timers can fire. 241 */ 242 static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu) 243 { 244 u64 min_delta = ULLONG_MAX; 245 int i; 246 247 for (i = 0; i < NR_KVM_TIMERS; i++) { 248 struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i]; 249 250 WARN(ctx->loaded, "timer %d loaded\n", i); 251 if (kvm_timer_irq_can_fire(ctx)) 252 min_delta = min(min_delta, kvm_timer_compute_delta(ctx)); 253 } 254 255 /* If none of timers can fire, then return 0 */ 256 if (min_delta == ULLONG_MAX) 257 return 0; 258 259 return min_delta; 260 } 261 262 static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt) 263 { 264 struct arch_timer_cpu *timer; 265 struct kvm_vcpu *vcpu; 266 u64 ns; 267 268 timer = container_of(hrt, struct arch_timer_cpu, bg_timer); 269 vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu); 270 271 /* 272 * Check that the timer has really expired from the guest's 273 * PoV (NTP on the host may have forced it to expire 274 * early). If we should have slept longer, restart it. 275 */ 276 ns = kvm_timer_earliest_exp(vcpu); 277 if (unlikely(ns)) { 278 hrtimer_forward_now(hrt, ns_to_ktime(ns)); 279 return HRTIMER_RESTART; 280 } 281 282 kvm_vcpu_wake_up(vcpu); 283 return HRTIMER_NORESTART; 284 } 285 286 static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt) 287 { 288 struct arch_timer_context *ctx; 289 struct kvm_vcpu *vcpu; 290 u64 ns; 291 292 ctx = container_of(hrt, struct arch_timer_context, hrtimer); 293 vcpu = ctx->vcpu; 294 295 trace_kvm_timer_hrtimer_expire(ctx); 296 297 /* 298 * Check that the timer has really expired from the guest's 299 * PoV (NTP on the host may have forced it to expire 300 * early). If not ready, schedule for a later time. 301 */ 302 ns = kvm_timer_compute_delta(ctx); 303 if (unlikely(ns)) { 304 hrtimer_forward_now(hrt, ns_to_ktime(ns)); 305 return HRTIMER_RESTART; 306 } 307 308 kvm_timer_update_irq(vcpu, true, ctx); 309 return HRTIMER_NORESTART; 310 } 311 312 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) 313 { 314 enum kvm_arch_timers index; 315 u64 cval, now; 316 317 if (!timer_ctx) 318 return false; 319 320 index = arch_timer_ctx_index(timer_ctx); 321 322 if (timer_ctx->loaded) { 323 u32 cnt_ctl = 0; 324 325 switch (index) { 326 case TIMER_VTIMER: 327 cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL); 328 break; 329 case TIMER_PTIMER: 330 cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL); 331 break; 332 case NR_KVM_TIMERS: 333 /* GCC is braindead */ 334 cnt_ctl = 0; 335 break; 336 } 337 338 return (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) && 339 (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) && 340 !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK); 341 } 342 343 if (!kvm_timer_irq_can_fire(timer_ctx)) 344 return false; 345 346 cval = timer_get_cval(timer_ctx); 347 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx); 348 349 return cval <= now; 350 } 351 352 bool kvm_timer_is_pending(struct kvm_vcpu *vcpu) 353 { 354 struct timer_map map; 355 356 get_timer_map(vcpu, &map); 357 358 return kvm_timer_should_fire(map.direct_vtimer) || 359 kvm_timer_should_fire(map.direct_ptimer) || 360 kvm_timer_should_fire(map.emul_ptimer); 361 } 362 363 /* 364 * Reflect the timer output level into the kvm_run structure 365 */ 366 void kvm_timer_update_run(struct kvm_vcpu *vcpu) 367 { 368 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 369 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); 370 struct kvm_sync_regs *regs = &vcpu->run->s.regs; 371 372 /* Populate the device bitmap with the timer states */ 373 regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER | 374 KVM_ARM_DEV_EL1_PTIMER); 375 if (kvm_timer_should_fire(vtimer)) 376 regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER; 377 if (kvm_timer_should_fire(ptimer)) 378 regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER; 379 } 380 381 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, 382 struct arch_timer_context *timer_ctx) 383 { 384 int ret; 385 386 timer_ctx->irq.level = new_level; 387 trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq, 388 timer_ctx->irq.level); 389 390 if (!userspace_irqchip(vcpu->kvm)) { 391 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, 392 timer_ctx->irq.irq, 393 timer_ctx->irq.level, 394 timer_ctx); 395 WARN_ON(ret); 396 } 397 } 398 399 /* Only called for a fully emulated timer */ 400 static void timer_emulate(struct arch_timer_context *ctx) 401 { 402 bool should_fire = kvm_timer_should_fire(ctx); 403 404 trace_kvm_timer_emulate(ctx, should_fire); 405 406 if (should_fire != ctx->irq.level) { 407 kvm_timer_update_irq(ctx->vcpu, should_fire, ctx); 408 return; 409 } 410 411 /* 412 * If the timer can fire now, we don't need to have a soft timer 413 * scheduled for the future. If the timer cannot fire at all, 414 * then we also don't need a soft timer. 415 */ 416 if (!kvm_timer_irq_can_fire(ctx)) { 417 soft_timer_cancel(&ctx->hrtimer); 418 return; 419 } 420 421 soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx)); 422 } 423 424 static void timer_save_state(struct arch_timer_context *ctx) 425 { 426 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu); 427 enum kvm_arch_timers index = arch_timer_ctx_index(ctx); 428 unsigned long flags; 429 430 if (!timer->enabled) 431 return; 432 433 local_irq_save(flags); 434 435 if (!ctx->loaded) 436 goto out; 437 438 switch (index) { 439 case TIMER_VTIMER: 440 timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL)); 441 timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL)); 442 443 /* Disable the timer */ 444 write_sysreg_el0(0, SYS_CNTV_CTL); 445 isb(); 446 447 break; 448 case TIMER_PTIMER: 449 timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL)); 450 timer_set_cval(ctx, read_sysreg_el0(SYS_CNTP_CVAL)); 451 452 /* Disable the timer */ 453 write_sysreg_el0(0, SYS_CNTP_CTL); 454 isb(); 455 456 break; 457 case NR_KVM_TIMERS: 458 BUG(); 459 } 460 461 trace_kvm_timer_save_state(ctx); 462 463 ctx->loaded = false; 464 out: 465 local_irq_restore(flags); 466 } 467 468 /* 469 * Schedule the background timer before calling kvm_vcpu_block, so that this 470 * thread is removed from its waitqueue and made runnable when there's a timer 471 * interrupt to handle. 472 */ 473 static void kvm_timer_blocking(struct kvm_vcpu *vcpu) 474 { 475 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 476 struct timer_map map; 477 478 get_timer_map(vcpu, &map); 479 480 /* 481 * If no timers are capable of raising interrupts (disabled or 482 * masked), then there's no more work for us to do. 483 */ 484 if (!kvm_timer_irq_can_fire(map.direct_vtimer) && 485 !kvm_timer_irq_can_fire(map.direct_ptimer) && 486 !kvm_timer_irq_can_fire(map.emul_ptimer)) 487 return; 488 489 /* 490 * At least one guest time will expire. Schedule a background timer. 491 * Set the earliest expiration time among the guest timers. 492 */ 493 soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu)); 494 } 495 496 static void kvm_timer_unblocking(struct kvm_vcpu *vcpu) 497 { 498 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 499 500 soft_timer_cancel(&timer->bg_timer); 501 } 502 503 static void timer_restore_state(struct arch_timer_context *ctx) 504 { 505 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu); 506 enum kvm_arch_timers index = arch_timer_ctx_index(ctx); 507 unsigned long flags; 508 509 if (!timer->enabled) 510 return; 511 512 local_irq_save(flags); 513 514 if (ctx->loaded) 515 goto out; 516 517 switch (index) { 518 case TIMER_VTIMER: 519 write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL); 520 isb(); 521 write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL); 522 break; 523 case TIMER_PTIMER: 524 write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL); 525 isb(); 526 write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL); 527 break; 528 case NR_KVM_TIMERS: 529 BUG(); 530 } 531 532 trace_kvm_timer_restore_state(ctx); 533 534 ctx->loaded = true; 535 out: 536 local_irq_restore(flags); 537 } 538 539 static void set_cntvoff(u64 cntvoff) 540 { 541 kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff); 542 } 543 544 static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active) 545 { 546 int r; 547 r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active); 548 WARN_ON(r); 549 } 550 551 static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx) 552 { 553 struct kvm_vcpu *vcpu = ctx->vcpu; 554 bool phys_active = false; 555 556 /* 557 * Update the timer output so that it is likely to match the 558 * state we're about to restore. If the timer expires between 559 * this point and the register restoration, we'll take the 560 * interrupt anyway. 561 */ 562 kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx); 563 564 if (irqchip_in_kernel(vcpu->kvm)) 565 phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq); 566 567 phys_active |= ctx->irq.level; 568 569 set_timer_irq_phys_active(ctx, phys_active); 570 } 571 572 static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu) 573 { 574 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 575 576 /* 577 * Update the timer output so that it is likely to match the 578 * state we're about to restore. If the timer expires between 579 * this point and the register restoration, we'll take the 580 * interrupt anyway. 581 */ 582 kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer); 583 584 /* 585 * When using a userspace irqchip with the architected timers and a 586 * host interrupt controller that doesn't support an active state, we 587 * must still prevent continuously exiting from the guest, and 588 * therefore mask the physical interrupt by disabling it on the host 589 * interrupt controller when the virtual level is high, such that the 590 * guest can make forward progress. Once we detect the output level 591 * being de-asserted, we unmask the interrupt again so that we exit 592 * from the guest when the timer fires. 593 */ 594 if (vtimer->irq.level) 595 disable_percpu_irq(host_vtimer_irq); 596 else 597 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); 598 } 599 600 void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) 601 { 602 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 603 struct timer_map map; 604 605 if (unlikely(!timer->enabled)) 606 return; 607 608 get_timer_map(vcpu, &map); 609 610 if (static_branch_likely(&has_gic_active_state)) { 611 kvm_timer_vcpu_load_gic(map.direct_vtimer); 612 if (map.direct_ptimer) 613 kvm_timer_vcpu_load_gic(map.direct_ptimer); 614 } else { 615 kvm_timer_vcpu_load_nogic(vcpu); 616 } 617 618 set_cntvoff(timer_get_offset(map.direct_vtimer)); 619 620 kvm_timer_unblocking(vcpu); 621 622 timer_restore_state(map.direct_vtimer); 623 if (map.direct_ptimer) 624 timer_restore_state(map.direct_ptimer); 625 626 if (map.emul_ptimer) 627 timer_emulate(map.emul_ptimer); 628 } 629 630 bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu) 631 { 632 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 633 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); 634 struct kvm_sync_regs *sregs = &vcpu->run->s.regs; 635 bool vlevel, plevel; 636 637 if (likely(irqchip_in_kernel(vcpu->kvm))) 638 return false; 639 640 vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER; 641 plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER; 642 643 return kvm_timer_should_fire(vtimer) != vlevel || 644 kvm_timer_should_fire(ptimer) != plevel; 645 } 646 647 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) 648 { 649 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 650 struct timer_map map; 651 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); 652 653 if (unlikely(!timer->enabled)) 654 return; 655 656 get_timer_map(vcpu, &map); 657 658 timer_save_state(map.direct_vtimer); 659 if (map.direct_ptimer) 660 timer_save_state(map.direct_ptimer); 661 662 /* 663 * Cancel soft timer emulation, because the only case where we 664 * need it after a vcpu_put is in the context of a sleeping VCPU, and 665 * in that case we already factor in the deadline for the physical 666 * timer when scheduling the bg_timer. 667 * 668 * In any case, we re-schedule the hrtimer for the physical timer when 669 * coming back to the VCPU thread in kvm_timer_vcpu_load(). 670 */ 671 if (map.emul_ptimer) 672 soft_timer_cancel(&map.emul_ptimer->hrtimer); 673 674 if (rcuwait_active(wait)) 675 kvm_timer_blocking(vcpu); 676 677 /* 678 * The kernel may decide to run userspace after calling vcpu_put, so 679 * we reset cntvoff to 0 to ensure a consistent read between user 680 * accesses to the virtual counter and kernel access to the physical 681 * counter of non-VHE case. For VHE, the virtual counter uses a fixed 682 * virtual offset of zero, so no need to zero CNTVOFF_EL2 register. 683 */ 684 set_cntvoff(0); 685 } 686 687 /* 688 * With a userspace irqchip we have to check if the guest de-asserted the 689 * timer and if so, unmask the timer irq signal on the host interrupt 690 * controller to ensure that we see future timer signals. 691 */ 692 static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu) 693 { 694 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 695 696 if (!kvm_timer_should_fire(vtimer)) { 697 kvm_timer_update_irq(vcpu, false, vtimer); 698 if (static_branch_likely(&has_gic_active_state)) 699 set_timer_irq_phys_active(vtimer, false); 700 else 701 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); 702 } 703 } 704 705 void kvm_timer_sync_user(struct kvm_vcpu *vcpu) 706 { 707 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 708 709 if (unlikely(!timer->enabled)) 710 return; 711 712 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) 713 unmask_vtimer_irq_user(vcpu); 714 } 715 716 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) 717 { 718 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 719 struct timer_map map; 720 721 get_timer_map(vcpu, &map); 722 723 /* 724 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8 725 * and to 0 for ARMv7. We provide an implementation that always 726 * resets the timer to be disabled and unmasked and is compliant with 727 * the ARMv7 architecture. 728 */ 729 timer_set_ctl(vcpu_vtimer(vcpu), 0); 730 timer_set_ctl(vcpu_ptimer(vcpu), 0); 731 732 if (timer->enabled) { 733 kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu)); 734 kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu)); 735 736 if (irqchip_in_kernel(vcpu->kvm)) { 737 kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq); 738 if (map.direct_ptimer) 739 kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq); 740 } 741 } 742 743 if (map.emul_ptimer) 744 soft_timer_cancel(&map.emul_ptimer->hrtimer); 745 746 return 0; 747 } 748 749 /* Make the updates of cntvoff for all vtimer contexts atomic */ 750 static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff) 751 { 752 int i; 753 struct kvm *kvm = vcpu->kvm; 754 struct kvm_vcpu *tmp; 755 756 mutex_lock(&kvm->lock); 757 kvm_for_each_vcpu(i, tmp, kvm) 758 timer_set_offset(vcpu_vtimer(tmp), cntvoff); 759 760 /* 761 * When called from the vcpu create path, the CPU being created is not 762 * included in the loop above, so we just set it here as well. 763 */ 764 timer_set_offset(vcpu_vtimer(vcpu), cntvoff); 765 mutex_unlock(&kvm->lock); 766 } 767 768 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) 769 { 770 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 771 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 772 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); 773 774 vtimer->vcpu = vcpu; 775 ptimer->vcpu = vcpu; 776 777 /* Synchronize cntvoff across all vtimers of a VM. */ 778 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read()); 779 timer_set_offset(ptimer, 0); 780 781 hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); 782 timer->bg_timer.function = kvm_bg_timer_expire; 783 784 hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); 785 hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); 786 vtimer->hrtimer.function = kvm_hrtimer_expire; 787 ptimer->hrtimer.function = kvm_hrtimer_expire; 788 789 vtimer->irq.irq = default_vtimer_irq.irq; 790 ptimer->irq.irq = default_ptimer_irq.irq; 791 792 vtimer->host_timer_irq = host_vtimer_irq; 793 ptimer->host_timer_irq = host_ptimer_irq; 794 795 vtimer->host_timer_irq_flags = host_vtimer_irq_flags; 796 ptimer->host_timer_irq_flags = host_ptimer_irq_flags; 797 } 798 799 static void kvm_timer_init_interrupt(void *info) 800 { 801 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); 802 enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags); 803 } 804 805 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) 806 { 807 struct arch_timer_context *timer; 808 809 switch (regid) { 810 case KVM_REG_ARM_TIMER_CTL: 811 timer = vcpu_vtimer(vcpu); 812 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value); 813 break; 814 case KVM_REG_ARM_TIMER_CNT: 815 timer = vcpu_vtimer(vcpu); 816 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value); 817 break; 818 case KVM_REG_ARM_TIMER_CVAL: 819 timer = vcpu_vtimer(vcpu); 820 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value); 821 break; 822 case KVM_REG_ARM_PTIMER_CTL: 823 timer = vcpu_ptimer(vcpu); 824 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value); 825 break; 826 case KVM_REG_ARM_PTIMER_CVAL: 827 timer = vcpu_ptimer(vcpu); 828 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value); 829 break; 830 831 default: 832 return -1; 833 } 834 835 return 0; 836 } 837 838 static u64 read_timer_ctl(struct arch_timer_context *timer) 839 { 840 /* 841 * Set ISTATUS bit if it's expired. 842 * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is 843 * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit 844 * regardless of ENABLE bit for our implementation convenience. 845 */ 846 u32 ctl = timer_get_ctl(timer); 847 848 if (!kvm_timer_compute_delta(timer)) 849 ctl |= ARCH_TIMER_CTRL_IT_STAT; 850 851 return ctl; 852 } 853 854 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) 855 { 856 switch (regid) { 857 case KVM_REG_ARM_TIMER_CTL: 858 return kvm_arm_timer_read(vcpu, 859 vcpu_vtimer(vcpu), TIMER_REG_CTL); 860 case KVM_REG_ARM_TIMER_CNT: 861 return kvm_arm_timer_read(vcpu, 862 vcpu_vtimer(vcpu), TIMER_REG_CNT); 863 case KVM_REG_ARM_TIMER_CVAL: 864 return kvm_arm_timer_read(vcpu, 865 vcpu_vtimer(vcpu), TIMER_REG_CVAL); 866 case KVM_REG_ARM_PTIMER_CTL: 867 return kvm_arm_timer_read(vcpu, 868 vcpu_ptimer(vcpu), TIMER_REG_CTL); 869 case KVM_REG_ARM_PTIMER_CNT: 870 return kvm_arm_timer_read(vcpu, 871 vcpu_ptimer(vcpu), TIMER_REG_CNT); 872 case KVM_REG_ARM_PTIMER_CVAL: 873 return kvm_arm_timer_read(vcpu, 874 vcpu_ptimer(vcpu), TIMER_REG_CVAL); 875 } 876 return (u64)-1; 877 } 878 879 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, 880 struct arch_timer_context *timer, 881 enum kvm_arch_timer_regs treg) 882 { 883 u64 val; 884 885 switch (treg) { 886 case TIMER_REG_TVAL: 887 val = timer_get_cval(timer) - kvm_phys_timer_read() + timer_get_offset(timer); 888 val = lower_32_bits(val); 889 break; 890 891 case TIMER_REG_CTL: 892 val = read_timer_ctl(timer); 893 break; 894 895 case TIMER_REG_CVAL: 896 val = timer_get_cval(timer); 897 break; 898 899 case TIMER_REG_CNT: 900 val = kvm_phys_timer_read() - timer_get_offset(timer); 901 break; 902 903 default: 904 BUG(); 905 } 906 907 return val; 908 } 909 910 u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu, 911 enum kvm_arch_timers tmr, 912 enum kvm_arch_timer_regs treg) 913 { 914 u64 val; 915 916 preempt_disable(); 917 kvm_timer_vcpu_put(vcpu); 918 919 val = kvm_arm_timer_read(vcpu, vcpu_get_timer(vcpu, tmr), treg); 920 921 kvm_timer_vcpu_load(vcpu); 922 preempt_enable(); 923 924 return val; 925 } 926 927 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu, 928 struct arch_timer_context *timer, 929 enum kvm_arch_timer_regs treg, 930 u64 val) 931 { 932 switch (treg) { 933 case TIMER_REG_TVAL: 934 timer_set_cval(timer, kvm_phys_timer_read() - timer_get_offset(timer) + (s32)val); 935 break; 936 937 case TIMER_REG_CTL: 938 timer_set_ctl(timer, val & ~ARCH_TIMER_CTRL_IT_STAT); 939 break; 940 941 case TIMER_REG_CVAL: 942 timer_set_cval(timer, val); 943 break; 944 945 default: 946 BUG(); 947 } 948 } 949 950 void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu, 951 enum kvm_arch_timers tmr, 952 enum kvm_arch_timer_regs treg, 953 u64 val) 954 { 955 preempt_disable(); 956 kvm_timer_vcpu_put(vcpu); 957 958 kvm_arm_timer_write(vcpu, vcpu_get_timer(vcpu, tmr), treg, val); 959 960 kvm_timer_vcpu_load(vcpu); 961 preempt_enable(); 962 } 963 964 static int kvm_timer_starting_cpu(unsigned int cpu) 965 { 966 kvm_timer_init_interrupt(NULL); 967 return 0; 968 } 969 970 static int kvm_timer_dying_cpu(unsigned int cpu) 971 { 972 disable_percpu_irq(host_vtimer_irq); 973 return 0; 974 } 975 976 int kvm_timer_hyp_init(bool has_gic) 977 { 978 struct arch_timer_kvm_info *info; 979 int err; 980 981 info = arch_timer_get_kvm_info(); 982 timecounter = &info->timecounter; 983 984 if (!timecounter->cc) { 985 kvm_err("kvm_arch_timer: uninitialized timecounter\n"); 986 return -ENODEV; 987 } 988 989 /* First, do the virtual EL1 timer irq */ 990 991 if (info->virtual_irq <= 0) { 992 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n", 993 info->virtual_irq); 994 return -ENODEV; 995 } 996 host_vtimer_irq = info->virtual_irq; 997 998 host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq); 999 if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH && 1000 host_vtimer_irq_flags != IRQF_TRIGGER_LOW) { 1001 kvm_err("Invalid trigger for vtimer IRQ%d, assuming level low\n", 1002 host_vtimer_irq); 1003 host_vtimer_irq_flags = IRQF_TRIGGER_LOW; 1004 } 1005 1006 err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler, 1007 "kvm guest vtimer", kvm_get_running_vcpus()); 1008 if (err) { 1009 kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n", 1010 host_vtimer_irq, err); 1011 return err; 1012 } 1013 1014 if (has_gic) { 1015 err = irq_set_vcpu_affinity(host_vtimer_irq, 1016 kvm_get_running_vcpus()); 1017 if (err) { 1018 kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); 1019 goto out_free_irq; 1020 } 1021 1022 static_branch_enable(&has_gic_active_state); 1023 } 1024 1025 kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq); 1026 1027 /* Now let's do the physical EL1 timer irq */ 1028 1029 if (info->physical_irq > 0) { 1030 host_ptimer_irq = info->physical_irq; 1031 host_ptimer_irq_flags = irq_get_trigger_type(host_ptimer_irq); 1032 if (host_ptimer_irq_flags != IRQF_TRIGGER_HIGH && 1033 host_ptimer_irq_flags != IRQF_TRIGGER_LOW) { 1034 kvm_err("Invalid trigger for ptimer IRQ%d, assuming level low\n", 1035 host_ptimer_irq); 1036 host_ptimer_irq_flags = IRQF_TRIGGER_LOW; 1037 } 1038 1039 err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler, 1040 "kvm guest ptimer", kvm_get_running_vcpus()); 1041 if (err) { 1042 kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n", 1043 host_ptimer_irq, err); 1044 return err; 1045 } 1046 1047 if (has_gic) { 1048 err = irq_set_vcpu_affinity(host_ptimer_irq, 1049 kvm_get_running_vcpus()); 1050 if (err) { 1051 kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); 1052 goto out_free_irq; 1053 } 1054 } 1055 1056 kvm_debug("physical timer IRQ%d\n", host_ptimer_irq); 1057 } else if (has_vhe()) { 1058 kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n", 1059 info->physical_irq); 1060 err = -ENODEV; 1061 goto out_free_irq; 1062 } 1063 1064 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, 1065 "kvm/arm/timer:starting", kvm_timer_starting_cpu, 1066 kvm_timer_dying_cpu); 1067 return 0; 1068 out_free_irq: 1069 free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus()); 1070 return err; 1071 } 1072 1073 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) 1074 { 1075 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 1076 1077 soft_timer_cancel(&timer->bg_timer); 1078 } 1079 1080 static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu) 1081 { 1082 int vtimer_irq, ptimer_irq; 1083 int i, ret; 1084 1085 vtimer_irq = vcpu_vtimer(vcpu)->irq.irq; 1086 ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu)); 1087 if (ret) 1088 return false; 1089 1090 ptimer_irq = vcpu_ptimer(vcpu)->irq.irq; 1091 ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu)); 1092 if (ret) 1093 return false; 1094 1095 kvm_for_each_vcpu(i, vcpu, vcpu->kvm) { 1096 if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq || 1097 vcpu_ptimer(vcpu)->irq.irq != ptimer_irq) 1098 return false; 1099 } 1100 1101 return true; 1102 } 1103 1104 bool kvm_arch_timer_get_input_level(int vintid) 1105 { 1106 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 1107 struct arch_timer_context *timer; 1108 1109 if (vintid == vcpu_vtimer(vcpu)->irq.irq) 1110 timer = vcpu_vtimer(vcpu); 1111 else if (vintid == vcpu_ptimer(vcpu)->irq.irq) 1112 timer = vcpu_ptimer(vcpu); 1113 else 1114 BUG(); 1115 1116 return kvm_timer_should_fire(timer); 1117 } 1118 1119 int kvm_timer_enable(struct kvm_vcpu *vcpu) 1120 { 1121 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 1122 struct timer_map map; 1123 int ret; 1124 1125 if (timer->enabled) 1126 return 0; 1127 1128 /* Without a VGIC we do not map virtual IRQs to physical IRQs */ 1129 if (!irqchip_in_kernel(vcpu->kvm)) 1130 goto no_vgic; 1131 1132 if (!vgic_initialized(vcpu->kvm)) 1133 return -ENODEV; 1134 1135 if (!timer_irqs_are_valid(vcpu)) { 1136 kvm_debug("incorrectly configured timer irqs\n"); 1137 return -EINVAL; 1138 } 1139 1140 get_timer_map(vcpu, &map); 1141 1142 ret = kvm_vgic_map_phys_irq(vcpu, 1143 map.direct_vtimer->host_timer_irq, 1144 map.direct_vtimer->irq.irq, 1145 kvm_arch_timer_get_input_level); 1146 if (ret) 1147 return ret; 1148 1149 if (map.direct_ptimer) { 1150 ret = kvm_vgic_map_phys_irq(vcpu, 1151 map.direct_ptimer->host_timer_irq, 1152 map.direct_ptimer->irq.irq, 1153 kvm_arch_timer_get_input_level); 1154 } 1155 1156 if (ret) 1157 return ret; 1158 1159 no_vgic: 1160 timer->enabled = 1; 1161 return 0; 1162 } 1163 1164 /* 1165 * On VHE system, we only need to configure the EL2 timer trap register once, 1166 * not for every world switch. 1167 * The host kernel runs at EL2 with HCR_EL2.TGE == 1, 1168 * and this makes those bits have no effect for the host kernel execution. 1169 */ 1170 void kvm_timer_init_vhe(void) 1171 { 1172 /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */ 1173 u32 cnthctl_shift = 10; 1174 u64 val; 1175 1176 /* 1177 * VHE systems allow the guest direct access to the EL1 physical 1178 * timer/counter. 1179 */ 1180 val = read_sysreg(cnthctl_el2); 1181 val |= (CNTHCTL_EL1PCEN << cnthctl_shift); 1182 val |= (CNTHCTL_EL1PCTEN << cnthctl_shift); 1183 write_sysreg(val, cnthctl_el2); 1184 } 1185 1186 static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq) 1187 { 1188 struct kvm_vcpu *vcpu; 1189 int i; 1190 1191 kvm_for_each_vcpu(i, vcpu, kvm) { 1192 vcpu_vtimer(vcpu)->irq.irq = vtimer_irq; 1193 vcpu_ptimer(vcpu)->irq.irq = ptimer_irq; 1194 } 1195 } 1196 1197 int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) 1198 { 1199 int __user *uaddr = (int __user *)(long)attr->addr; 1200 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 1201 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); 1202 int irq; 1203 1204 if (!irqchip_in_kernel(vcpu->kvm)) 1205 return -EINVAL; 1206 1207 if (get_user(irq, uaddr)) 1208 return -EFAULT; 1209 1210 if (!(irq_is_ppi(irq))) 1211 return -EINVAL; 1212 1213 if (vcpu->arch.timer_cpu.enabled) 1214 return -EBUSY; 1215 1216 switch (attr->attr) { 1217 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER: 1218 set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq); 1219 break; 1220 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER: 1221 set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq); 1222 break; 1223 default: 1224 return -ENXIO; 1225 } 1226 1227 return 0; 1228 } 1229 1230 int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) 1231 { 1232 int __user *uaddr = (int __user *)(long)attr->addr; 1233 struct arch_timer_context *timer; 1234 int irq; 1235 1236 switch (attr->attr) { 1237 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER: 1238 timer = vcpu_vtimer(vcpu); 1239 break; 1240 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER: 1241 timer = vcpu_ptimer(vcpu); 1242 break; 1243 default: 1244 return -ENXIO; 1245 } 1246 1247 irq = timer->irq.irq; 1248 return put_user(irq, uaddr); 1249 } 1250 1251 int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) 1252 { 1253 switch (attr->attr) { 1254 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER: 1255 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER: 1256 return 0; 1257 } 1258 1259 return -ENXIO; 1260 } 1261