1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 4 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 5 */ 6 7 #include <linux/bug.h> 8 #include <linux/cpu_pm.h> 9 #include <linux/errno.h> 10 #include <linux/err.h> 11 #include <linux/kvm_host.h> 12 #include <linux/list.h> 13 #include <linux/module.h> 14 #include <linux/vmalloc.h> 15 #include <linux/fs.h> 16 #include <linux/mman.h> 17 #include <linux/sched.h> 18 #include <linux/kvm.h> 19 #include <linux/kvm_irqfd.h> 20 #include <linux/irqbypass.h> 21 #include <linux/sched/stat.h> 22 #include <trace/events/kvm.h> 23 24 #define CREATE_TRACE_POINTS 25 #include "trace_arm.h" 26 27 #include <linux/uaccess.h> 28 #include <asm/ptrace.h> 29 #include <asm/mman.h> 30 #include <asm/tlbflush.h> 31 #include <asm/cacheflush.h> 32 #include <asm/cpufeature.h> 33 #include <asm/virt.h> 34 #include <asm/kvm_arm.h> 35 #include <asm/kvm_asm.h> 36 #include <asm/kvm_mmu.h> 37 #include <asm/kvm_emulate.h> 38 #include <asm/kvm_coproc.h> 39 #include <asm/sections.h> 40 41 #include <kvm/arm_hypercalls.h> 42 #include <kvm/arm_pmu.h> 43 #include <kvm/arm_psci.h> 44 45 #ifdef REQUIRES_VIRT 46 __asm__(".arch_extension virt"); 47 #endif 48 49 DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector); 50 51 static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); 52 unsigned long kvm_arm_hyp_percpu_base[NR_CPUS]; 53 54 /* The VMID used in the VTTBR */ 55 static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); 56 static u32 kvm_next_vmid; 57 static DEFINE_SPINLOCK(kvm_vmid_lock); 58 59 static bool vgic_present; 60 61 static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); 62 DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use); 63 64 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 65 { 66 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; 67 } 68 69 int kvm_arch_hardware_setup(void *opaque) 70 { 71 return 0; 72 } 73 74 int kvm_arch_check_processor_compat(void *opaque) 75 { 76 return 0; 77 } 78 79 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 80 struct kvm_enable_cap *cap) 81 { 82 int r; 83 84 if (cap->flags) 85 return -EINVAL; 86 87 switch (cap->cap) { 88 case KVM_CAP_ARM_NISV_TO_USER: 89 r = 0; 90 kvm->arch.return_nisv_io_abort_to_user = true; 91 break; 92 default: 93 r = -EINVAL; 94 break; 95 } 96 97 return r; 98 } 99 100 static int kvm_arm_default_max_vcpus(void) 101 { 102 return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS; 103 } 104 105 static void set_default_spectre(struct kvm *kvm) 106 { 107 /* 108 * The default is to expose CSV2 == 1 if the HW isn't affected. 109 * Although this is a per-CPU feature, we make it global because 110 * asymmetric systems are just a nuisance. 111 * 112 * Userspace can override this as long as it doesn't promise 113 * the impossible. 114 */ 115 if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) 116 kvm->arch.pfr0_csv2 = 1; 117 if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) 118 kvm->arch.pfr0_csv3 = 1; 119 } 120 121 /** 122 * kvm_arch_init_vm - initializes a VM data structure 123 * @kvm: pointer to the KVM struct 124 */ 125 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 126 { 127 int ret; 128 129 ret = kvm_arm_setup_stage2(kvm, type); 130 if (ret) 131 return ret; 132 133 ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu); 134 if (ret) 135 return ret; 136 137 ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP); 138 if (ret) 139 goto out_free_stage2_pgd; 140 141 kvm_vgic_early_init(kvm); 142 143 /* The maximum number of VCPUs is limited by the host's GIC model */ 144 kvm->arch.max_vcpus = kvm_arm_default_max_vcpus(); 145 146 set_default_spectre(kvm); 147 148 return ret; 149 out_free_stage2_pgd: 150 kvm_free_stage2_pgd(&kvm->arch.mmu); 151 return ret; 152 } 153 154 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 155 { 156 return VM_FAULT_SIGBUS; 157 } 158 159 160 /** 161 * kvm_arch_destroy_vm - destroy the VM data structure 162 * @kvm: pointer to the KVM struct 163 */ 164 void kvm_arch_destroy_vm(struct kvm *kvm) 165 { 166 int i; 167 168 bitmap_free(kvm->arch.pmu_filter); 169 170 kvm_vgic_destroy(kvm); 171 172 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 173 if (kvm->vcpus[i]) { 174 kvm_vcpu_destroy(kvm->vcpus[i]); 175 kvm->vcpus[i] = NULL; 176 } 177 } 178 atomic_set(&kvm->online_vcpus, 0); 179 } 180 181 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 182 { 183 int r; 184 switch (ext) { 185 case KVM_CAP_IRQCHIP: 186 r = vgic_present; 187 break; 188 case KVM_CAP_IOEVENTFD: 189 case KVM_CAP_DEVICE_CTRL: 190 case KVM_CAP_USER_MEMORY: 191 case KVM_CAP_SYNC_MMU: 192 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 193 case KVM_CAP_ONE_REG: 194 case KVM_CAP_ARM_PSCI: 195 case KVM_CAP_ARM_PSCI_0_2: 196 case KVM_CAP_READONLY_MEM: 197 case KVM_CAP_MP_STATE: 198 case KVM_CAP_IMMEDIATE_EXIT: 199 case KVM_CAP_VCPU_EVENTS: 200 case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2: 201 case KVM_CAP_ARM_NISV_TO_USER: 202 case KVM_CAP_ARM_INJECT_EXT_DABT: 203 r = 1; 204 break; 205 case KVM_CAP_ARM_SET_DEVICE_ADDR: 206 r = 1; 207 break; 208 case KVM_CAP_NR_VCPUS: 209 r = num_online_cpus(); 210 break; 211 case KVM_CAP_MAX_VCPUS: 212 case KVM_CAP_MAX_VCPU_ID: 213 if (kvm) 214 r = kvm->arch.max_vcpus; 215 else 216 r = kvm_arm_default_max_vcpus(); 217 break; 218 case KVM_CAP_MSI_DEVID: 219 if (!kvm) 220 r = -EINVAL; 221 else 222 r = kvm->arch.vgic.msis_require_devid; 223 break; 224 case KVM_CAP_ARM_USER_IRQ: 225 /* 226 * 1: EL1_VTIMER, EL1_PTIMER, and PMU. 227 * (bump this number if adding more devices) 228 */ 229 r = 1; 230 break; 231 case KVM_CAP_STEAL_TIME: 232 r = kvm_arm_pvtime_supported(); 233 break; 234 default: 235 r = kvm_arch_vm_ioctl_check_extension(kvm, ext); 236 break; 237 } 238 return r; 239 } 240 241 long kvm_arch_dev_ioctl(struct file *filp, 242 unsigned int ioctl, unsigned long arg) 243 { 244 return -EINVAL; 245 } 246 247 struct kvm *kvm_arch_alloc_vm(void) 248 { 249 if (!has_vhe()) 250 return kzalloc(sizeof(struct kvm), GFP_KERNEL); 251 252 return vzalloc(sizeof(struct kvm)); 253 } 254 255 void kvm_arch_free_vm(struct kvm *kvm) 256 { 257 if (!has_vhe()) 258 kfree(kvm); 259 else 260 vfree(kvm); 261 } 262 263 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) 264 { 265 if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) 266 return -EBUSY; 267 268 if (id >= kvm->arch.max_vcpus) 269 return -EINVAL; 270 271 return 0; 272 } 273 274 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) 275 { 276 int err; 277 278 /* Force users to call KVM_ARM_VCPU_INIT */ 279 vcpu->arch.target = -1; 280 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); 281 282 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; 283 284 /* Set up the timer */ 285 kvm_timer_vcpu_init(vcpu); 286 287 kvm_pmu_vcpu_init(vcpu); 288 289 kvm_arm_reset_debug_ptr(vcpu); 290 291 kvm_arm_pvtime_vcpu_init(&vcpu->arch); 292 293 vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; 294 295 err = kvm_vgic_vcpu_init(vcpu); 296 if (err) 297 return err; 298 299 return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP); 300 } 301 302 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 303 { 304 } 305 306 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 307 { 308 if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm))) 309 static_branch_dec(&userspace_irqchip_in_use); 310 311 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); 312 kvm_timer_vcpu_terminate(vcpu); 313 kvm_pmu_vcpu_destroy(vcpu); 314 315 kvm_arm_vcpu_destroy(vcpu); 316 } 317 318 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 319 { 320 return kvm_timer_is_pending(vcpu); 321 } 322 323 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) 324 { 325 /* 326 * If we're about to block (most likely because we've just hit a 327 * WFI), we need to sync back the state of the GIC CPU interface 328 * so that we have the latest PMR and group enables. This ensures 329 * that kvm_arch_vcpu_runnable has up-to-date data to decide 330 * whether we have pending interrupts. 331 * 332 * For the same reason, we want to tell GICv4 that we need 333 * doorbells to be signalled, should an interrupt become pending. 334 */ 335 preempt_disable(); 336 kvm_vgic_vmcr_sync(vcpu); 337 vgic_v4_put(vcpu, true); 338 preempt_enable(); 339 } 340 341 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) 342 { 343 preempt_disable(); 344 vgic_v4_load(vcpu); 345 preempt_enable(); 346 } 347 348 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 349 { 350 struct kvm_s2_mmu *mmu; 351 int *last_ran; 352 353 mmu = vcpu->arch.hw_mmu; 354 last_ran = this_cpu_ptr(mmu->last_vcpu_ran); 355 356 /* 357 * We might get preempted before the vCPU actually runs, but 358 * over-invalidation doesn't affect correctness. 359 */ 360 if (*last_ran != vcpu->vcpu_id) { 361 kvm_call_hyp(__kvm_tlb_flush_local_vmid, mmu); 362 *last_ran = vcpu->vcpu_id; 363 } 364 365 vcpu->cpu = cpu; 366 367 kvm_vgic_load(vcpu); 368 kvm_timer_vcpu_load(vcpu); 369 if (has_vhe()) 370 kvm_vcpu_load_sysregs_vhe(vcpu); 371 kvm_arch_vcpu_load_fp(vcpu); 372 kvm_vcpu_pmu_restore_guest(vcpu); 373 if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) 374 kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); 375 376 if (single_task_running()) 377 vcpu_clear_wfx_traps(vcpu); 378 else 379 vcpu_set_wfx_traps(vcpu); 380 381 if (vcpu_has_ptrauth(vcpu)) 382 vcpu_ptrauth_disable(vcpu); 383 } 384 385 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 386 { 387 kvm_arch_vcpu_put_fp(vcpu); 388 if (has_vhe()) 389 kvm_vcpu_put_sysregs_vhe(vcpu); 390 kvm_timer_vcpu_put(vcpu); 391 kvm_vgic_put(vcpu); 392 kvm_vcpu_pmu_restore_host(vcpu); 393 394 vcpu->cpu = -1; 395 } 396 397 static void vcpu_power_off(struct kvm_vcpu *vcpu) 398 { 399 vcpu->arch.power_off = true; 400 kvm_make_request(KVM_REQ_SLEEP, vcpu); 401 kvm_vcpu_kick(vcpu); 402 } 403 404 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 405 struct kvm_mp_state *mp_state) 406 { 407 if (vcpu->arch.power_off) 408 mp_state->mp_state = KVM_MP_STATE_STOPPED; 409 else 410 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; 411 412 return 0; 413 } 414 415 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 416 struct kvm_mp_state *mp_state) 417 { 418 int ret = 0; 419 420 switch (mp_state->mp_state) { 421 case KVM_MP_STATE_RUNNABLE: 422 vcpu->arch.power_off = false; 423 break; 424 case KVM_MP_STATE_STOPPED: 425 vcpu_power_off(vcpu); 426 break; 427 default: 428 ret = -EINVAL; 429 } 430 431 return ret; 432 } 433 434 /** 435 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled 436 * @v: The VCPU pointer 437 * 438 * If the guest CPU is not waiting for interrupts or an interrupt line is 439 * asserted, the CPU is by definition runnable. 440 */ 441 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 442 { 443 bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF); 444 return ((irq_lines || kvm_vgic_vcpu_pending_irq(v)) 445 && !v->arch.power_off && !v->arch.pause); 446 } 447 448 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) 449 { 450 return vcpu_mode_priv(vcpu); 451 } 452 453 /* Just ensure a guest exit from a particular CPU */ 454 static void exit_vm_noop(void *info) 455 { 456 } 457 458 void force_vm_exit(const cpumask_t *mask) 459 { 460 preempt_disable(); 461 smp_call_function_many(mask, exit_vm_noop, NULL, true); 462 preempt_enable(); 463 } 464 465 /** 466 * need_new_vmid_gen - check that the VMID is still valid 467 * @vmid: The VMID to check 468 * 469 * return true if there is a new generation of VMIDs being used 470 * 471 * The hardware supports a limited set of values with the value zero reserved 472 * for the host, so we check if an assigned value belongs to a previous 473 * generation, which requires us to assign a new value. If we're the first to 474 * use a VMID for the new generation, we must flush necessary caches and TLBs 475 * on all CPUs. 476 */ 477 static bool need_new_vmid_gen(struct kvm_vmid *vmid) 478 { 479 u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen); 480 smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */ 481 return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen); 482 } 483 484 /** 485 * update_vmid - Update the vmid with a valid VMID for the current generation 486 * @vmid: The stage-2 VMID information struct 487 */ 488 static void update_vmid(struct kvm_vmid *vmid) 489 { 490 if (!need_new_vmid_gen(vmid)) 491 return; 492 493 spin_lock(&kvm_vmid_lock); 494 495 /* 496 * We need to re-check the vmid_gen here to ensure that if another vcpu 497 * already allocated a valid vmid for this vm, then this vcpu should 498 * use the same vmid. 499 */ 500 if (!need_new_vmid_gen(vmid)) { 501 spin_unlock(&kvm_vmid_lock); 502 return; 503 } 504 505 /* First user of a new VMID generation? */ 506 if (unlikely(kvm_next_vmid == 0)) { 507 atomic64_inc(&kvm_vmid_gen); 508 kvm_next_vmid = 1; 509 510 /* 511 * On SMP we know no other CPUs can use this CPU's or each 512 * other's VMID after force_vm_exit returns since the 513 * kvm_vmid_lock blocks them from reentry to the guest. 514 */ 515 force_vm_exit(cpu_all_mask); 516 /* 517 * Now broadcast TLB + ICACHE invalidation over the inner 518 * shareable domain to make sure all data structures are 519 * clean. 520 */ 521 kvm_call_hyp(__kvm_flush_vm_context); 522 } 523 524 vmid->vmid = kvm_next_vmid; 525 kvm_next_vmid++; 526 kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1; 527 528 smp_wmb(); 529 WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen)); 530 531 spin_unlock(&kvm_vmid_lock); 532 } 533 534 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) 535 { 536 struct kvm *kvm = vcpu->kvm; 537 int ret = 0; 538 539 if (likely(vcpu->arch.has_run_once)) 540 return 0; 541 542 if (!kvm_arm_vcpu_is_finalized(vcpu)) 543 return -EPERM; 544 545 vcpu->arch.has_run_once = true; 546 547 if (likely(irqchip_in_kernel(kvm))) { 548 /* 549 * Map the VGIC hardware resources before running a vcpu the 550 * first time on this VM. 551 */ 552 if (unlikely(!vgic_ready(kvm))) { 553 ret = kvm_vgic_map_resources(kvm); 554 if (ret) 555 return ret; 556 } 557 } else { 558 /* 559 * Tell the rest of the code that there are userspace irqchip 560 * VMs in the wild. 561 */ 562 static_branch_inc(&userspace_irqchip_in_use); 563 } 564 565 ret = kvm_timer_enable(vcpu); 566 if (ret) 567 return ret; 568 569 ret = kvm_arm_pmu_v3_enable(vcpu); 570 571 return ret; 572 } 573 574 bool kvm_arch_intc_initialized(struct kvm *kvm) 575 { 576 return vgic_initialized(kvm); 577 } 578 579 void kvm_arm_halt_guest(struct kvm *kvm) 580 { 581 int i; 582 struct kvm_vcpu *vcpu; 583 584 kvm_for_each_vcpu(i, vcpu, kvm) 585 vcpu->arch.pause = true; 586 kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP); 587 } 588 589 void kvm_arm_resume_guest(struct kvm *kvm) 590 { 591 int i; 592 struct kvm_vcpu *vcpu; 593 594 kvm_for_each_vcpu(i, vcpu, kvm) { 595 vcpu->arch.pause = false; 596 rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu)); 597 } 598 } 599 600 static void vcpu_req_sleep(struct kvm_vcpu *vcpu) 601 { 602 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); 603 604 rcuwait_wait_event(wait, 605 (!vcpu->arch.power_off) &&(!vcpu->arch.pause), 606 TASK_INTERRUPTIBLE); 607 608 if (vcpu->arch.power_off || vcpu->arch.pause) { 609 /* Awaken to handle a signal, request we sleep again later. */ 610 kvm_make_request(KVM_REQ_SLEEP, vcpu); 611 } 612 613 /* 614 * Make sure we will observe a potential reset request if we've 615 * observed a change to the power state. Pairs with the smp_wmb() in 616 * kvm_psci_vcpu_on(). 617 */ 618 smp_rmb(); 619 } 620 621 static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) 622 { 623 return vcpu->arch.target >= 0; 624 } 625 626 static void check_vcpu_requests(struct kvm_vcpu *vcpu) 627 { 628 if (kvm_request_pending(vcpu)) { 629 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) 630 vcpu_req_sleep(vcpu); 631 632 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) 633 kvm_reset_vcpu(vcpu); 634 635 /* 636 * Clear IRQ_PENDING requests that were made to guarantee 637 * that a VCPU sees new virtual interrupts. 638 */ 639 kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu); 640 641 if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu)) 642 kvm_update_stolen_time(vcpu); 643 644 if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) { 645 /* The distributor enable bits were changed */ 646 preempt_disable(); 647 vgic_v4_put(vcpu, false); 648 vgic_v4_load(vcpu); 649 preempt_enable(); 650 } 651 } 652 } 653 654 /** 655 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code 656 * @vcpu: The VCPU pointer 657 * 658 * This function is called through the VCPU_RUN ioctl called from user space. It 659 * will execute VM code in a loop until the time slice for the process is used 660 * or some emulation is needed from user space in which case the function will 661 * return with return value 0 and with the kvm_run structure filled in with the 662 * required data for the requested emulation. 663 */ 664 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) 665 { 666 struct kvm_run *run = vcpu->run; 667 int ret; 668 669 if (unlikely(!kvm_vcpu_initialized(vcpu))) 670 return -ENOEXEC; 671 672 ret = kvm_vcpu_first_run_init(vcpu); 673 if (ret) 674 return ret; 675 676 if (run->exit_reason == KVM_EXIT_MMIO) { 677 ret = kvm_handle_mmio_return(vcpu); 678 if (ret) 679 return ret; 680 } 681 682 if (run->immediate_exit) 683 return -EINTR; 684 685 vcpu_load(vcpu); 686 687 kvm_sigset_activate(vcpu); 688 689 ret = 1; 690 run->exit_reason = KVM_EXIT_UNKNOWN; 691 while (ret > 0) { 692 /* 693 * Check conditions before entering the guest 694 */ 695 cond_resched(); 696 697 update_vmid(&vcpu->arch.hw_mmu->vmid); 698 699 check_vcpu_requests(vcpu); 700 701 /* 702 * Preparing the interrupts to be injected also 703 * involves poking the GIC, which must be done in a 704 * non-preemptible context. 705 */ 706 preempt_disable(); 707 708 kvm_pmu_flush_hwstate(vcpu); 709 710 local_irq_disable(); 711 712 kvm_vgic_flush_hwstate(vcpu); 713 714 /* 715 * Exit if we have a signal pending so that we can deliver the 716 * signal to user space. 717 */ 718 if (signal_pending(current)) { 719 ret = -EINTR; 720 run->exit_reason = KVM_EXIT_INTR; 721 } 722 723 /* 724 * If we're using a userspace irqchip, then check if we need 725 * to tell a userspace irqchip about timer or PMU level 726 * changes and if so, exit to userspace (the actual level 727 * state gets updated in kvm_timer_update_run and 728 * kvm_pmu_update_run below). 729 */ 730 if (static_branch_unlikely(&userspace_irqchip_in_use)) { 731 if (kvm_timer_should_notify_user(vcpu) || 732 kvm_pmu_should_notify_user(vcpu)) { 733 ret = -EINTR; 734 run->exit_reason = KVM_EXIT_INTR; 735 } 736 } 737 738 /* 739 * Ensure we set mode to IN_GUEST_MODE after we disable 740 * interrupts and before the final VCPU requests check. 741 * See the comment in kvm_vcpu_exiting_guest_mode() and 742 * Documentation/virt/kvm/vcpu-requests.rst 743 */ 744 smp_store_mb(vcpu->mode, IN_GUEST_MODE); 745 746 if (ret <= 0 || need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) || 747 kvm_request_pending(vcpu)) { 748 vcpu->mode = OUTSIDE_GUEST_MODE; 749 isb(); /* Ensure work in x_flush_hwstate is committed */ 750 kvm_pmu_sync_hwstate(vcpu); 751 if (static_branch_unlikely(&userspace_irqchip_in_use)) 752 kvm_timer_sync_user(vcpu); 753 kvm_vgic_sync_hwstate(vcpu); 754 local_irq_enable(); 755 preempt_enable(); 756 continue; 757 } 758 759 kvm_arm_setup_debug(vcpu); 760 761 /************************************************************** 762 * Enter the guest 763 */ 764 trace_kvm_entry(*vcpu_pc(vcpu)); 765 guest_enter_irqoff(); 766 767 ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu); 768 769 vcpu->mode = OUTSIDE_GUEST_MODE; 770 vcpu->stat.exits++; 771 /* 772 * Back from guest 773 *************************************************************/ 774 775 kvm_arm_clear_debug(vcpu); 776 777 /* 778 * We must sync the PMU state before the vgic state so 779 * that the vgic can properly sample the updated state of the 780 * interrupt line. 781 */ 782 kvm_pmu_sync_hwstate(vcpu); 783 784 /* 785 * Sync the vgic state before syncing the timer state because 786 * the timer code needs to know if the virtual timer 787 * interrupts are active. 788 */ 789 kvm_vgic_sync_hwstate(vcpu); 790 791 /* 792 * Sync the timer hardware state before enabling interrupts as 793 * we don't want vtimer interrupts to race with syncing the 794 * timer virtual interrupt state. 795 */ 796 if (static_branch_unlikely(&userspace_irqchip_in_use)) 797 kvm_timer_sync_user(vcpu); 798 799 kvm_arch_vcpu_ctxsync_fp(vcpu); 800 801 /* 802 * We may have taken a host interrupt in HYP mode (ie 803 * while executing the guest). This interrupt is still 804 * pending, as we haven't serviced it yet! 805 * 806 * We're now back in SVC mode, with interrupts 807 * disabled. Enabling the interrupts now will have 808 * the effect of taking the interrupt again, in SVC 809 * mode this time. 810 */ 811 local_irq_enable(); 812 813 /* 814 * We do local_irq_enable() before calling guest_exit() so 815 * that if a timer interrupt hits while running the guest we 816 * account that tick as being spent in the guest. We enable 817 * preemption after calling guest_exit() so that if we get 818 * preempted we make sure ticks after that is not counted as 819 * guest time. 820 */ 821 guest_exit(); 822 trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); 823 824 /* Exit types that need handling before we can be preempted */ 825 handle_exit_early(vcpu, ret); 826 827 preempt_enable(); 828 829 /* 830 * The ARMv8 architecture doesn't give the hypervisor 831 * a mechanism to prevent a guest from dropping to AArch32 EL0 832 * if implemented by the CPU. If we spot the guest in such 833 * state and that we decided it wasn't supposed to do so (like 834 * with the asymmetric AArch32 case), return to userspace with 835 * a fatal error. 836 */ 837 if (!system_supports_32bit_el0() && vcpu_mode_is_32bit(vcpu)) { 838 /* 839 * As we have caught the guest red-handed, decide that 840 * it isn't fit for purpose anymore by making the vcpu 841 * invalid. The VMM can try and fix it by issuing a 842 * KVM_ARM_VCPU_INIT if it really wants to. 843 */ 844 vcpu->arch.target = -1; 845 ret = ARM_EXCEPTION_IL; 846 } 847 848 ret = handle_exit(vcpu, ret); 849 } 850 851 /* Tell userspace about in-kernel device output levels */ 852 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { 853 kvm_timer_update_run(vcpu); 854 kvm_pmu_update_run(vcpu); 855 } 856 857 kvm_sigset_deactivate(vcpu); 858 859 vcpu_put(vcpu); 860 return ret; 861 } 862 863 static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) 864 { 865 int bit_index; 866 bool set; 867 unsigned long *hcr; 868 869 if (number == KVM_ARM_IRQ_CPU_IRQ) 870 bit_index = __ffs(HCR_VI); 871 else /* KVM_ARM_IRQ_CPU_FIQ */ 872 bit_index = __ffs(HCR_VF); 873 874 hcr = vcpu_hcr(vcpu); 875 if (level) 876 set = test_and_set_bit(bit_index, hcr); 877 else 878 set = test_and_clear_bit(bit_index, hcr); 879 880 /* 881 * If we didn't change anything, no need to wake up or kick other CPUs 882 */ 883 if (set == level) 884 return 0; 885 886 /* 887 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and 888 * trigger a world-switch round on the running physical CPU to set the 889 * virtual IRQ/FIQ fields in the HCR appropriately. 890 */ 891 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); 892 kvm_vcpu_kick(vcpu); 893 894 return 0; 895 } 896 897 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, 898 bool line_status) 899 { 900 u32 irq = irq_level->irq; 901 unsigned int irq_type, vcpu_idx, irq_num; 902 int nrcpus = atomic_read(&kvm->online_vcpus); 903 struct kvm_vcpu *vcpu = NULL; 904 bool level = irq_level->level; 905 906 irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; 907 vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; 908 vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1); 909 irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; 910 911 trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); 912 913 switch (irq_type) { 914 case KVM_ARM_IRQ_TYPE_CPU: 915 if (irqchip_in_kernel(kvm)) 916 return -ENXIO; 917 918 if (vcpu_idx >= nrcpus) 919 return -EINVAL; 920 921 vcpu = kvm_get_vcpu(kvm, vcpu_idx); 922 if (!vcpu) 923 return -EINVAL; 924 925 if (irq_num > KVM_ARM_IRQ_CPU_FIQ) 926 return -EINVAL; 927 928 return vcpu_interrupt_line(vcpu, irq_num, level); 929 case KVM_ARM_IRQ_TYPE_PPI: 930 if (!irqchip_in_kernel(kvm)) 931 return -ENXIO; 932 933 if (vcpu_idx >= nrcpus) 934 return -EINVAL; 935 936 vcpu = kvm_get_vcpu(kvm, vcpu_idx); 937 if (!vcpu) 938 return -EINVAL; 939 940 if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS) 941 return -EINVAL; 942 943 return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL); 944 case KVM_ARM_IRQ_TYPE_SPI: 945 if (!irqchip_in_kernel(kvm)) 946 return -ENXIO; 947 948 if (irq_num < VGIC_NR_PRIVATE_IRQS) 949 return -EINVAL; 950 951 return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL); 952 } 953 954 return -EINVAL; 955 } 956 957 static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, 958 const struct kvm_vcpu_init *init) 959 { 960 unsigned int i, ret; 961 int phys_target = kvm_target_cpu(); 962 963 if (init->target != phys_target) 964 return -EINVAL; 965 966 /* 967 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must 968 * use the same target. 969 */ 970 if (vcpu->arch.target != -1 && vcpu->arch.target != init->target) 971 return -EINVAL; 972 973 /* -ENOENT for unknown features, -EINVAL for invalid combinations. */ 974 for (i = 0; i < sizeof(init->features) * 8; i++) { 975 bool set = (init->features[i / 32] & (1 << (i % 32))); 976 977 if (set && i >= KVM_VCPU_MAX_FEATURES) 978 return -ENOENT; 979 980 /* 981 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must 982 * use the same feature set. 983 */ 984 if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES && 985 test_bit(i, vcpu->arch.features) != set) 986 return -EINVAL; 987 988 if (set) 989 set_bit(i, vcpu->arch.features); 990 } 991 992 vcpu->arch.target = phys_target; 993 994 /* Now we know what it is, we can reset it. */ 995 ret = kvm_reset_vcpu(vcpu); 996 if (ret) { 997 vcpu->arch.target = -1; 998 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); 999 } 1000 1001 return ret; 1002 } 1003 1004 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, 1005 struct kvm_vcpu_init *init) 1006 { 1007 int ret; 1008 1009 ret = kvm_vcpu_set_target(vcpu, init); 1010 if (ret) 1011 return ret; 1012 1013 /* 1014 * Ensure a rebooted VM will fault in RAM pages and detect if the 1015 * guest MMU is turned off and flush the caches as needed. 1016 * 1017 * S2FWB enforces all memory accesses to RAM being cacheable, 1018 * ensuring that the data side is always coherent. We still 1019 * need to invalidate the I-cache though, as FWB does *not* 1020 * imply CTR_EL0.DIC. 1021 */ 1022 if (vcpu->arch.has_run_once) { 1023 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) 1024 stage2_unmap_vm(vcpu->kvm); 1025 else 1026 __flush_icache_all(); 1027 } 1028 1029 vcpu_reset_hcr(vcpu); 1030 1031 /* 1032 * Handle the "start in power-off" case. 1033 */ 1034 if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) 1035 vcpu_power_off(vcpu); 1036 else 1037 vcpu->arch.power_off = false; 1038 1039 return 0; 1040 } 1041 1042 static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, 1043 struct kvm_device_attr *attr) 1044 { 1045 int ret = -ENXIO; 1046 1047 switch (attr->group) { 1048 default: 1049 ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr); 1050 break; 1051 } 1052 1053 return ret; 1054 } 1055 1056 static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, 1057 struct kvm_device_attr *attr) 1058 { 1059 int ret = -ENXIO; 1060 1061 switch (attr->group) { 1062 default: 1063 ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr); 1064 break; 1065 } 1066 1067 return ret; 1068 } 1069 1070 static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, 1071 struct kvm_device_attr *attr) 1072 { 1073 int ret = -ENXIO; 1074 1075 switch (attr->group) { 1076 default: 1077 ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr); 1078 break; 1079 } 1080 1081 return ret; 1082 } 1083 1084 static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, 1085 struct kvm_vcpu_events *events) 1086 { 1087 memset(events, 0, sizeof(*events)); 1088 1089 return __kvm_arm_vcpu_get_events(vcpu, events); 1090 } 1091 1092 static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, 1093 struct kvm_vcpu_events *events) 1094 { 1095 int i; 1096 1097 /* check whether the reserved field is zero */ 1098 for (i = 0; i < ARRAY_SIZE(events->reserved); i++) 1099 if (events->reserved[i]) 1100 return -EINVAL; 1101 1102 /* check whether the pad field is zero */ 1103 for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++) 1104 if (events->exception.pad[i]) 1105 return -EINVAL; 1106 1107 return __kvm_arm_vcpu_set_events(vcpu, events); 1108 } 1109 1110 long kvm_arch_vcpu_ioctl(struct file *filp, 1111 unsigned int ioctl, unsigned long arg) 1112 { 1113 struct kvm_vcpu *vcpu = filp->private_data; 1114 void __user *argp = (void __user *)arg; 1115 struct kvm_device_attr attr; 1116 long r; 1117 1118 switch (ioctl) { 1119 case KVM_ARM_VCPU_INIT: { 1120 struct kvm_vcpu_init init; 1121 1122 r = -EFAULT; 1123 if (copy_from_user(&init, argp, sizeof(init))) 1124 break; 1125 1126 r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init); 1127 break; 1128 } 1129 case KVM_SET_ONE_REG: 1130 case KVM_GET_ONE_REG: { 1131 struct kvm_one_reg reg; 1132 1133 r = -ENOEXEC; 1134 if (unlikely(!kvm_vcpu_initialized(vcpu))) 1135 break; 1136 1137 r = -EFAULT; 1138 if (copy_from_user(®, argp, sizeof(reg))) 1139 break; 1140 1141 if (ioctl == KVM_SET_ONE_REG) 1142 r = kvm_arm_set_reg(vcpu, ®); 1143 else 1144 r = kvm_arm_get_reg(vcpu, ®); 1145 break; 1146 } 1147 case KVM_GET_REG_LIST: { 1148 struct kvm_reg_list __user *user_list = argp; 1149 struct kvm_reg_list reg_list; 1150 unsigned n; 1151 1152 r = -ENOEXEC; 1153 if (unlikely(!kvm_vcpu_initialized(vcpu))) 1154 break; 1155 1156 r = -EPERM; 1157 if (!kvm_arm_vcpu_is_finalized(vcpu)) 1158 break; 1159 1160 r = -EFAULT; 1161 if (copy_from_user(®_list, user_list, sizeof(reg_list))) 1162 break; 1163 n = reg_list.n; 1164 reg_list.n = kvm_arm_num_regs(vcpu); 1165 if (copy_to_user(user_list, ®_list, sizeof(reg_list))) 1166 break; 1167 r = -E2BIG; 1168 if (n < reg_list.n) 1169 break; 1170 r = kvm_arm_copy_reg_indices(vcpu, user_list->reg); 1171 break; 1172 } 1173 case KVM_SET_DEVICE_ATTR: { 1174 r = -EFAULT; 1175 if (copy_from_user(&attr, argp, sizeof(attr))) 1176 break; 1177 r = kvm_arm_vcpu_set_attr(vcpu, &attr); 1178 break; 1179 } 1180 case KVM_GET_DEVICE_ATTR: { 1181 r = -EFAULT; 1182 if (copy_from_user(&attr, argp, sizeof(attr))) 1183 break; 1184 r = kvm_arm_vcpu_get_attr(vcpu, &attr); 1185 break; 1186 } 1187 case KVM_HAS_DEVICE_ATTR: { 1188 r = -EFAULT; 1189 if (copy_from_user(&attr, argp, sizeof(attr))) 1190 break; 1191 r = kvm_arm_vcpu_has_attr(vcpu, &attr); 1192 break; 1193 } 1194 case KVM_GET_VCPU_EVENTS: { 1195 struct kvm_vcpu_events events; 1196 1197 if (kvm_arm_vcpu_get_events(vcpu, &events)) 1198 return -EINVAL; 1199 1200 if (copy_to_user(argp, &events, sizeof(events))) 1201 return -EFAULT; 1202 1203 return 0; 1204 } 1205 case KVM_SET_VCPU_EVENTS: { 1206 struct kvm_vcpu_events events; 1207 1208 if (copy_from_user(&events, argp, sizeof(events))) 1209 return -EFAULT; 1210 1211 return kvm_arm_vcpu_set_events(vcpu, &events); 1212 } 1213 case KVM_ARM_VCPU_FINALIZE: { 1214 int what; 1215 1216 if (!kvm_vcpu_initialized(vcpu)) 1217 return -ENOEXEC; 1218 1219 if (get_user(what, (const int __user *)argp)) 1220 return -EFAULT; 1221 1222 return kvm_arm_vcpu_finalize(vcpu, what); 1223 } 1224 default: 1225 r = -EINVAL; 1226 } 1227 1228 return r; 1229 } 1230 1231 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) 1232 { 1233 1234 } 1235 1236 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, 1237 struct kvm_memory_slot *memslot) 1238 { 1239 kvm_flush_remote_tlbs(kvm); 1240 } 1241 1242 static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, 1243 struct kvm_arm_device_addr *dev_addr) 1244 { 1245 unsigned long dev_id, type; 1246 1247 dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >> 1248 KVM_ARM_DEVICE_ID_SHIFT; 1249 type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >> 1250 KVM_ARM_DEVICE_TYPE_SHIFT; 1251 1252 switch (dev_id) { 1253 case KVM_ARM_DEVICE_VGIC_V2: 1254 if (!vgic_present) 1255 return -ENXIO; 1256 return kvm_vgic_addr(kvm, type, &dev_addr->addr, true); 1257 default: 1258 return -ENODEV; 1259 } 1260 } 1261 1262 long kvm_arch_vm_ioctl(struct file *filp, 1263 unsigned int ioctl, unsigned long arg) 1264 { 1265 struct kvm *kvm = filp->private_data; 1266 void __user *argp = (void __user *)arg; 1267 1268 switch (ioctl) { 1269 case KVM_CREATE_IRQCHIP: { 1270 int ret; 1271 if (!vgic_present) 1272 return -ENXIO; 1273 mutex_lock(&kvm->lock); 1274 ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); 1275 mutex_unlock(&kvm->lock); 1276 return ret; 1277 } 1278 case KVM_ARM_SET_DEVICE_ADDR: { 1279 struct kvm_arm_device_addr dev_addr; 1280 1281 if (copy_from_user(&dev_addr, argp, sizeof(dev_addr))) 1282 return -EFAULT; 1283 return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr); 1284 } 1285 case KVM_ARM_PREFERRED_TARGET: { 1286 int err; 1287 struct kvm_vcpu_init init; 1288 1289 err = kvm_vcpu_preferred_target(&init); 1290 if (err) 1291 return err; 1292 1293 if (copy_to_user(argp, &init, sizeof(init))) 1294 return -EFAULT; 1295 1296 return 0; 1297 } 1298 default: 1299 return -EINVAL; 1300 } 1301 } 1302 1303 static unsigned long nvhe_percpu_size(void) 1304 { 1305 return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) - 1306 (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start); 1307 } 1308 1309 static unsigned long nvhe_percpu_order(void) 1310 { 1311 unsigned long size = nvhe_percpu_size(); 1312 1313 return size ? get_order(size) : 0; 1314 } 1315 1316 static int kvm_map_vectors(void) 1317 { 1318 /* 1319 * SV2 = ARM64_SPECTRE_V2 1320 * HEL2 = ARM64_HARDEN_EL2_VECTORS 1321 * 1322 * !SV2 + !HEL2 -> use direct vectors 1323 * SV2 + !HEL2 -> use hardened vectors in place 1324 * !SV2 + HEL2 -> allocate one vector slot and use exec mapping 1325 * SV2 + HEL2 -> use hardened vectors and use exec mapping 1326 */ 1327 if (cpus_have_const_cap(ARM64_SPECTRE_V2)) { 1328 __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs); 1329 __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base); 1330 } 1331 1332 if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) { 1333 phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs); 1334 unsigned long size = __BP_HARDEN_HYP_VECS_SZ; 1335 1336 /* 1337 * Always allocate a spare vector slot, as we don't 1338 * know yet which CPUs have a BP hardening slot that 1339 * we can reuse. 1340 */ 1341 __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot); 1342 BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS); 1343 return create_hyp_exec_mappings(vect_pa, size, 1344 &__kvm_bp_vect_base); 1345 } 1346 1347 return 0; 1348 } 1349 1350 static void cpu_init_hyp_mode(void) 1351 { 1352 phys_addr_t pgd_ptr; 1353 unsigned long hyp_stack_ptr; 1354 unsigned long vector_ptr; 1355 unsigned long tpidr_el2; 1356 struct arm_smccc_res res; 1357 1358 /* Switch from the HYP stub to our own HYP init vector */ 1359 __hyp_set_vectors(kvm_get_idmap_vector()); 1360 1361 /* 1362 * Calculate the raw per-cpu offset without a translation from the 1363 * kernel's mapping to the linear mapping, and store it in tpidr_el2 1364 * so that we can use adr_l to access per-cpu variables in EL2. 1365 */ 1366 tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) - 1367 (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start)); 1368 1369 pgd_ptr = kvm_mmu_get_httbr(); 1370 hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE; 1371 hyp_stack_ptr = kern_hyp_va(hyp_stack_ptr); 1372 vector_ptr = (unsigned long)kern_hyp_va(kvm_ksym_ref(__kvm_hyp_host_vector)); 1373 1374 /* 1375 * Call initialization code, and switch to the full blown HYP code. 1376 * If the cpucaps haven't been finalized yet, something has gone very 1377 * wrong, and hyp will crash and burn when it uses any 1378 * cpus_have_const_cap() wrapper. 1379 */ 1380 BUG_ON(!system_capabilities_finalized()); 1381 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), 1382 pgd_ptr, tpidr_el2, hyp_stack_ptr, vector_ptr, &res); 1383 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); 1384 1385 /* 1386 * Disabling SSBD on a non-VHE system requires us to enable SSBS 1387 * at EL2. 1388 */ 1389 if (this_cpu_has_cap(ARM64_SSBS) && 1390 arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) { 1391 kvm_call_hyp_nvhe(__kvm_enable_ssbs); 1392 } 1393 } 1394 1395 static void cpu_hyp_reset(void) 1396 { 1397 if (!is_kernel_in_hyp_mode()) 1398 __hyp_reset_vectors(); 1399 } 1400 1401 static void cpu_hyp_reinit(void) 1402 { 1403 kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt); 1404 1405 cpu_hyp_reset(); 1406 1407 *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)kvm_get_hyp_vector(); 1408 1409 if (is_kernel_in_hyp_mode()) 1410 kvm_timer_init_vhe(); 1411 else 1412 cpu_init_hyp_mode(); 1413 1414 kvm_arm_init_debug(); 1415 1416 if (vgic_present) 1417 kvm_vgic_init_cpu_hardware(); 1418 } 1419 1420 static void _kvm_arch_hardware_enable(void *discard) 1421 { 1422 if (!__this_cpu_read(kvm_arm_hardware_enabled)) { 1423 cpu_hyp_reinit(); 1424 __this_cpu_write(kvm_arm_hardware_enabled, 1); 1425 } 1426 } 1427 1428 int kvm_arch_hardware_enable(void) 1429 { 1430 _kvm_arch_hardware_enable(NULL); 1431 return 0; 1432 } 1433 1434 static void _kvm_arch_hardware_disable(void *discard) 1435 { 1436 if (__this_cpu_read(kvm_arm_hardware_enabled)) { 1437 cpu_hyp_reset(); 1438 __this_cpu_write(kvm_arm_hardware_enabled, 0); 1439 } 1440 } 1441 1442 void kvm_arch_hardware_disable(void) 1443 { 1444 _kvm_arch_hardware_disable(NULL); 1445 } 1446 1447 #ifdef CONFIG_CPU_PM 1448 static int hyp_init_cpu_pm_notifier(struct notifier_block *self, 1449 unsigned long cmd, 1450 void *v) 1451 { 1452 /* 1453 * kvm_arm_hardware_enabled is left with its old value over 1454 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should 1455 * re-enable hyp. 1456 */ 1457 switch (cmd) { 1458 case CPU_PM_ENTER: 1459 if (__this_cpu_read(kvm_arm_hardware_enabled)) 1460 /* 1461 * don't update kvm_arm_hardware_enabled here 1462 * so that the hardware will be re-enabled 1463 * when we resume. See below. 1464 */ 1465 cpu_hyp_reset(); 1466 1467 return NOTIFY_OK; 1468 case CPU_PM_ENTER_FAILED: 1469 case CPU_PM_EXIT: 1470 if (__this_cpu_read(kvm_arm_hardware_enabled)) 1471 /* The hardware was enabled before suspend. */ 1472 cpu_hyp_reinit(); 1473 1474 return NOTIFY_OK; 1475 1476 default: 1477 return NOTIFY_DONE; 1478 } 1479 } 1480 1481 static struct notifier_block hyp_init_cpu_pm_nb = { 1482 .notifier_call = hyp_init_cpu_pm_notifier, 1483 }; 1484 1485 static void __init hyp_cpu_pm_init(void) 1486 { 1487 cpu_pm_register_notifier(&hyp_init_cpu_pm_nb); 1488 } 1489 static void __init hyp_cpu_pm_exit(void) 1490 { 1491 cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb); 1492 } 1493 #else 1494 static inline void hyp_cpu_pm_init(void) 1495 { 1496 } 1497 static inline void hyp_cpu_pm_exit(void) 1498 { 1499 } 1500 #endif 1501 1502 static int init_common_resources(void) 1503 { 1504 return kvm_set_ipa_limit(); 1505 } 1506 1507 static int init_subsystems(void) 1508 { 1509 int err = 0; 1510 1511 /* 1512 * Enable hardware so that subsystem initialisation can access EL2. 1513 */ 1514 on_each_cpu(_kvm_arch_hardware_enable, NULL, 1); 1515 1516 /* 1517 * Register CPU lower-power notifier 1518 */ 1519 hyp_cpu_pm_init(); 1520 1521 /* 1522 * Init HYP view of VGIC 1523 */ 1524 err = kvm_vgic_hyp_init(); 1525 switch (err) { 1526 case 0: 1527 vgic_present = true; 1528 break; 1529 case -ENODEV: 1530 case -ENXIO: 1531 vgic_present = false; 1532 err = 0; 1533 break; 1534 default: 1535 goto out; 1536 } 1537 1538 /* 1539 * Init HYP architected timer support 1540 */ 1541 err = kvm_timer_hyp_init(vgic_present); 1542 if (err) 1543 goto out; 1544 1545 kvm_perf_init(); 1546 kvm_coproc_table_init(); 1547 1548 out: 1549 on_each_cpu(_kvm_arch_hardware_disable, NULL, 1); 1550 1551 return err; 1552 } 1553 1554 static void teardown_hyp_mode(void) 1555 { 1556 int cpu; 1557 1558 free_hyp_pgds(); 1559 for_each_possible_cpu(cpu) { 1560 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); 1561 free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order()); 1562 } 1563 } 1564 1565 /** 1566 * Inits Hyp-mode on all online CPUs 1567 */ 1568 static int init_hyp_mode(void) 1569 { 1570 int cpu; 1571 int err = 0; 1572 1573 /* 1574 * Allocate Hyp PGD and setup Hyp identity mapping 1575 */ 1576 err = kvm_mmu_init(); 1577 if (err) 1578 goto out_err; 1579 1580 /* 1581 * Allocate stack pages for Hypervisor-mode 1582 */ 1583 for_each_possible_cpu(cpu) { 1584 unsigned long stack_page; 1585 1586 stack_page = __get_free_page(GFP_KERNEL); 1587 if (!stack_page) { 1588 err = -ENOMEM; 1589 goto out_err; 1590 } 1591 1592 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; 1593 } 1594 1595 /* 1596 * Allocate and initialize pages for Hypervisor-mode percpu regions. 1597 */ 1598 for_each_possible_cpu(cpu) { 1599 struct page *page; 1600 void *page_addr; 1601 1602 page = alloc_pages(GFP_KERNEL, nvhe_percpu_order()); 1603 if (!page) { 1604 err = -ENOMEM; 1605 goto out_err; 1606 } 1607 1608 page_addr = page_address(page); 1609 memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size()); 1610 kvm_arm_hyp_percpu_base[cpu] = (unsigned long)page_addr; 1611 } 1612 1613 /* 1614 * Map the Hyp-code called directly from the host 1615 */ 1616 err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start), 1617 kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC); 1618 if (err) { 1619 kvm_err("Cannot map world-switch code\n"); 1620 goto out_err; 1621 } 1622 1623 err = create_hyp_mappings(kvm_ksym_ref(__start_rodata), 1624 kvm_ksym_ref(__end_rodata), PAGE_HYP_RO); 1625 if (err) { 1626 kvm_err("Cannot map rodata section\n"); 1627 goto out_err; 1628 } 1629 1630 err = create_hyp_mappings(kvm_ksym_ref(__bss_start), 1631 kvm_ksym_ref(__bss_stop), PAGE_HYP_RO); 1632 if (err) { 1633 kvm_err("Cannot map bss section\n"); 1634 goto out_err; 1635 } 1636 1637 err = kvm_map_vectors(); 1638 if (err) { 1639 kvm_err("Cannot map vectors\n"); 1640 goto out_err; 1641 } 1642 1643 /* 1644 * Map the Hyp stack pages 1645 */ 1646 for_each_possible_cpu(cpu) { 1647 char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); 1648 err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE, 1649 PAGE_HYP); 1650 1651 if (err) { 1652 kvm_err("Cannot map hyp stack\n"); 1653 goto out_err; 1654 } 1655 } 1656 1657 /* 1658 * Map Hyp percpu pages 1659 */ 1660 for_each_possible_cpu(cpu) { 1661 char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu]; 1662 char *percpu_end = percpu_begin + nvhe_percpu_size(); 1663 1664 err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP); 1665 1666 if (err) { 1667 kvm_err("Cannot map hyp percpu region\n"); 1668 goto out_err; 1669 } 1670 } 1671 1672 return 0; 1673 1674 out_err: 1675 teardown_hyp_mode(); 1676 kvm_err("error initializing Hyp mode: %d\n", err); 1677 return err; 1678 } 1679 1680 static void check_kvm_target_cpu(void *ret) 1681 { 1682 *(int *)ret = kvm_target_cpu(); 1683 } 1684 1685 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) 1686 { 1687 struct kvm_vcpu *vcpu; 1688 int i; 1689 1690 mpidr &= MPIDR_HWID_BITMASK; 1691 kvm_for_each_vcpu(i, vcpu, kvm) { 1692 if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu)) 1693 return vcpu; 1694 } 1695 return NULL; 1696 } 1697 1698 bool kvm_arch_has_irq_bypass(void) 1699 { 1700 return true; 1701 } 1702 1703 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, 1704 struct irq_bypass_producer *prod) 1705 { 1706 struct kvm_kernel_irqfd *irqfd = 1707 container_of(cons, struct kvm_kernel_irqfd, consumer); 1708 1709 return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq, 1710 &irqfd->irq_entry); 1711 } 1712 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, 1713 struct irq_bypass_producer *prod) 1714 { 1715 struct kvm_kernel_irqfd *irqfd = 1716 container_of(cons, struct kvm_kernel_irqfd, consumer); 1717 1718 kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq, 1719 &irqfd->irq_entry); 1720 } 1721 1722 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons) 1723 { 1724 struct kvm_kernel_irqfd *irqfd = 1725 container_of(cons, struct kvm_kernel_irqfd, consumer); 1726 1727 kvm_arm_halt_guest(irqfd->kvm); 1728 } 1729 1730 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons) 1731 { 1732 struct kvm_kernel_irqfd *irqfd = 1733 container_of(cons, struct kvm_kernel_irqfd, consumer); 1734 1735 kvm_arm_resume_guest(irqfd->kvm); 1736 } 1737 1738 /** 1739 * Initialize Hyp-mode and memory mappings on all CPUs. 1740 */ 1741 int kvm_arch_init(void *opaque) 1742 { 1743 int err; 1744 int ret, cpu; 1745 bool in_hyp_mode; 1746 1747 if (!is_hyp_mode_available()) { 1748 kvm_info("HYP mode not available\n"); 1749 return -ENODEV; 1750 } 1751 1752 in_hyp_mode = is_kernel_in_hyp_mode(); 1753 1754 if (!in_hyp_mode && kvm_arch_requires_vhe()) { 1755 kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n"); 1756 return -ENODEV; 1757 } 1758 1759 if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) || 1760 cpus_have_final_cap(ARM64_WORKAROUND_1508412)) 1761 kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \ 1762 "Only trusted guests should be used on this system.\n"); 1763 1764 for_each_online_cpu(cpu) { 1765 smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1); 1766 if (ret < 0) { 1767 kvm_err("Error, CPU %d not supported!\n", cpu); 1768 return -ENODEV; 1769 } 1770 } 1771 1772 err = init_common_resources(); 1773 if (err) 1774 return err; 1775 1776 err = kvm_arm_init_sve(); 1777 if (err) 1778 return err; 1779 1780 if (!in_hyp_mode) { 1781 err = init_hyp_mode(); 1782 if (err) 1783 goto out_err; 1784 } 1785 1786 err = init_subsystems(); 1787 if (err) 1788 goto out_hyp; 1789 1790 if (in_hyp_mode) 1791 kvm_info("VHE mode initialized successfully\n"); 1792 else 1793 kvm_info("Hyp mode initialized successfully\n"); 1794 1795 return 0; 1796 1797 out_hyp: 1798 hyp_cpu_pm_exit(); 1799 if (!in_hyp_mode) 1800 teardown_hyp_mode(); 1801 out_err: 1802 return err; 1803 } 1804 1805 /* NOP: Compiling as a module not supported */ 1806 void kvm_arch_exit(void) 1807 { 1808 kvm_perf_teardown(); 1809 } 1810 1811 static int arm_init(void) 1812 { 1813 int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 1814 return rc; 1815 } 1816 1817 module_init(arm_init); 1818