1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 4 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 5 */ 6 7 #include <linux/bug.h> 8 #include <linux/cpu_pm.h> 9 #include <linux/errno.h> 10 #include <linux/err.h> 11 #include <linux/kvm_host.h> 12 #include <linux/list.h> 13 #include <linux/module.h> 14 #include <linux/vmalloc.h> 15 #include <linux/fs.h> 16 #include <linux/mman.h> 17 #include <linux/sched.h> 18 #include <linux/kvm.h> 19 #include <linux/kvm_irqfd.h> 20 #include <linux/irqbypass.h> 21 #include <linux/sched/stat.h> 22 #include <trace/events/kvm.h> 23 24 #define CREATE_TRACE_POINTS 25 #include "trace_arm.h" 26 27 #include <linux/uaccess.h> 28 #include <asm/ptrace.h> 29 #include <asm/mman.h> 30 #include <asm/tlbflush.h> 31 #include <asm/cacheflush.h> 32 #include <asm/cpufeature.h> 33 #include <asm/virt.h> 34 #include <asm/kvm_arm.h> 35 #include <asm/kvm_asm.h> 36 #include <asm/kvm_mmu.h> 37 #include <asm/kvm_emulate.h> 38 #include <asm/kvm_coproc.h> 39 #include <asm/sections.h> 40 41 #include <kvm/arm_hypercalls.h> 42 #include <kvm/arm_pmu.h> 43 #include <kvm/arm_psci.h> 44 45 #ifdef REQUIRES_VIRT 46 __asm__(".arch_extension virt"); 47 #endif 48 49 DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data); 50 static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); 51 52 /* The VMID used in the VTTBR */ 53 static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); 54 static u32 kvm_next_vmid; 55 static DEFINE_SPINLOCK(kvm_vmid_lock); 56 57 static bool vgic_present; 58 59 static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); 60 DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use); 61 62 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 63 { 64 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; 65 } 66 67 int kvm_arch_hardware_setup(void *opaque) 68 { 69 return 0; 70 } 71 72 int kvm_arch_check_processor_compat(void *opaque) 73 { 74 return 0; 75 } 76 77 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 78 struct kvm_enable_cap *cap) 79 { 80 int r; 81 82 if (cap->flags) 83 return -EINVAL; 84 85 switch (cap->cap) { 86 case KVM_CAP_ARM_NISV_TO_USER: 87 r = 0; 88 kvm->arch.return_nisv_io_abort_to_user = true; 89 break; 90 default: 91 r = -EINVAL; 92 break; 93 } 94 95 return r; 96 } 97 98 static int kvm_arm_default_max_vcpus(void) 99 { 100 return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS; 101 } 102 103 /** 104 * kvm_arch_init_vm - initializes a VM data structure 105 * @kvm: pointer to the KVM struct 106 */ 107 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 108 { 109 int ret, cpu; 110 111 ret = kvm_arm_setup_stage2(kvm, type); 112 if (ret) 113 return ret; 114 115 kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran)); 116 if (!kvm->arch.last_vcpu_ran) 117 return -ENOMEM; 118 119 for_each_possible_cpu(cpu) 120 *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1; 121 122 ret = kvm_alloc_stage2_pgd(kvm); 123 if (ret) 124 goto out_fail_alloc; 125 126 ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP); 127 if (ret) 128 goto out_free_stage2_pgd; 129 130 kvm_vgic_early_init(kvm); 131 132 /* Mark the initial VMID generation invalid */ 133 kvm->arch.vmid.vmid_gen = 0; 134 135 /* The maximum number of VCPUs is limited by the host's GIC model */ 136 kvm->arch.max_vcpus = kvm_arm_default_max_vcpus(); 137 138 return ret; 139 out_free_stage2_pgd: 140 kvm_free_stage2_pgd(kvm); 141 out_fail_alloc: 142 free_percpu(kvm->arch.last_vcpu_ran); 143 kvm->arch.last_vcpu_ran = NULL; 144 return ret; 145 } 146 147 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 148 { 149 return VM_FAULT_SIGBUS; 150 } 151 152 153 /** 154 * kvm_arch_destroy_vm - destroy the VM data structure 155 * @kvm: pointer to the KVM struct 156 */ 157 void kvm_arch_destroy_vm(struct kvm *kvm) 158 { 159 int i; 160 161 kvm_vgic_destroy(kvm); 162 163 free_percpu(kvm->arch.last_vcpu_ran); 164 kvm->arch.last_vcpu_ran = NULL; 165 166 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 167 if (kvm->vcpus[i]) { 168 kvm_vcpu_destroy(kvm->vcpus[i]); 169 kvm->vcpus[i] = NULL; 170 } 171 } 172 atomic_set(&kvm->online_vcpus, 0); 173 } 174 175 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 176 { 177 int r; 178 switch (ext) { 179 case KVM_CAP_IRQCHIP: 180 r = vgic_present; 181 break; 182 case KVM_CAP_IOEVENTFD: 183 case KVM_CAP_DEVICE_CTRL: 184 case KVM_CAP_USER_MEMORY: 185 case KVM_CAP_SYNC_MMU: 186 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 187 case KVM_CAP_ONE_REG: 188 case KVM_CAP_ARM_PSCI: 189 case KVM_CAP_ARM_PSCI_0_2: 190 case KVM_CAP_READONLY_MEM: 191 case KVM_CAP_MP_STATE: 192 case KVM_CAP_IMMEDIATE_EXIT: 193 case KVM_CAP_VCPU_EVENTS: 194 case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2: 195 case KVM_CAP_ARM_NISV_TO_USER: 196 case KVM_CAP_ARM_INJECT_EXT_DABT: 197 r = 1; 198 break; 199 case KVM_CAP_ARM_SET_DEVICE_ADDR: 200 r = 1; 201 break; 202 case KVM_CAP_NR_VCPUS: 203 r = num_online_cpus(); 204 break; 205 case KVM_CAP_MAX_VCPUS: 206 case KVM_CAP_MAX_VCPU_ID: 207 if (kvm) 208 r = kvm->arch.max_vcpus; 209 else 210 r = kvm_arm_default_max_vcpus(); 211 break; 212 case KVM_CAP_MSI_DEVID: 213 if (!kvm) 214 r = -EINVAL; 215 else 216 r = kvm->arch.vgic.msis_require_devid; 217 break; 218 case KVM_CAP_ARM_USER_IRQ: 219 /* 220 * 1: EL1_VTIMER, EL1_PTIMER, and PMU. 221 * (bump this number if adding more devices) 222 */ 223 r = 1; 224 break; 225 default: 226 r = kvm_arch_vm_ioctl_check_extension(kvm, ext); 227 break; 228 } 229 return r; 230 } 231 232 long kvm_arch_dev_ioctl(struct file *filp, 233 unsigned int ioctl, unsigned long arg) 234 { 235 return -EINVAL; 236 } 237 238 struct kvm *kvm_arch_alloc_vm(void) 239 { 240 if (!has_vhe()) 241 return kzalloc(sizeof(struct kvm), GFP_KERNEL); 242 243 return vzalloc(sizeof(struct kvm)); 244 } 245 246 void kvm_arch_free_vm(struct kvm *kvm) 247 { 248 if (!has_vhe()) 249 kfree(kvm); 250 else 251 vfree(kvm); 252 } 253 254 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) 255 { 256 if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) 257 return -EBUSY; 258 259 if (id >= kvm->arch.max_vcpus) 260 return -EINVAL; 261 262 return 0; 263 } 264 265 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) 266 { 267 int err; 268 269 /* Force users to call KVM_ARM_VCPU_INIT */ 270 vcpu->arch.target = -1; 271 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); 272 273 /* Set up the timer */ 274 kvm_timer_vcpu_init(vcpu); 275 276 kvm_pmu_vcpu_init(vcpu); 277 278 kvm_arm_reset_debug_ptr(vcpu); 279 280 kvm_arm_pvtime_vcpu_init(&vcpu->arch); 281 282 err = kvm_vgic_vcpu_init(vcpu); 283 if (err) 284 return err; 285 286 return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP); 287 } 288 289 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 290 { 291 } 292 293 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 294 { 295 if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm))) 296 static_branch_dec(&userspace_irqchip_in_use); 297 298 kvm_mmu_free_memory_caches(vcpu); 299 kvm_timer_vcpu_terminate(vcpu); 300 kvm_pmu_vcpu_destroy(vcpu); 301 302 kvm_arm_vcpu_destroy(vcpu); 303 } 304 305 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 306 { 307 return kvm_timer_is_pending(vcpu); 308 } 309 310 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) 311 { 312 /* 313 * If we're about to block (most likely because we've just hit a 314 * WFI), we need to sync back the state of the GIC CPU interface 315 * so that we have the latest PMR and group enables. This ensures 316 * that kvm_arch_vcpu_runnable has up-to-date data to decide 317 * whether we have pending interrupts. 318 * 319 * For the same reason, we want to tell GICv4 that we need 320 * doorbells to be signalled, should an interrupt become pending. 321 */ 322 preempt_disable(); 323 kvm_vgic_vmcr_sync(vcpu); 324 vgic_v4_put(vcpu, true); 325 preempt_enable(); 326 } 327 328 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) 329 { 330 preempt_disable(); 331 vgic_v4_load(vcpu); 332 preempt_enable(); 333 } 334 335 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 336 { 337 int *last_ran; 338 339 last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran); 340 341 /* 342 * We might get preempted before the vCPU actually runs, but 343 * over-invalidation doesn't affect correctness. 344 */ 345 if (*last_ran != vcpu->vcpu_id) { 346 kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu); 347 *last_ran = vcpu->vcpu_id; 348 } 349 350 vcpu->cpu = cpu; 351 352 kvm_vgic_load(vcpu); 353 kvm_timer_vcpu_load(vcpu); 354 kvm_vcpu_load_sysregs(vcpu); 355 kvm_arch_vcpu_load_fp(vcpu); 356 kvm_vcpu_pmu_restore_guest(vcpu); 357 if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) 358 kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); 359 360 if (single_task_running()) 361 vcpu_clear_wfx_traps(vcpu); 362 else 363 vcpu_set_wfx_traps(vcpu); 364 365 if (vcpu_has_ptrauth(vcpu)) 366 vcpu_ptrauth_disable(vcpu); 367 } 368 369 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 370 { 371 kvm_arch_vcpu_put_fp(vcpu); 372 kvm_vcpu_put_sysregs(vcpu); 373 kvm_timer_vcpu_put(vcpu); 374 kvm_vgic_put(vcpu); 375 kvm_vcpu_pmu_restore_host(vcpu); 376 377 vcpu->cpu = -1; 378 } 379 380 static void vcpu_power_off(struct kvm_vcpu *vcpu) 381 { 382 vcpu->arch.power_off = true; 383 kvm_make_request(KVM_REQ_SLEEP, vcpu); 384 kvm_vcpu_kick(vcpu); 385 } 386 387 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 388 struct kvm_mp_state *mp_state) 389 { 390 if (vcpu->arch.power_off) 391 mp_state->mp_state = KVM_MP_STATE_STOPPED; 392 else 393 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; 394 395 return 0; 396 } 397 398 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 399 struct kvm_mp_state *mp_state) 400 { 401 int ret = 0; 402 403 switch (mp_state->mp_state) { 404 case KVM_MP_STATE_RUNNABLE: 405 vcpu->arch.power_off = false; 406 break; 407 case KVM_MP_STATE_STOPPED: 408 vcpu_power_off(vcpu); 409 break; 410 default: 411 ret = -EINVAL; 412 } 413 414 return ret; 415 } 416 417 /** 418 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled 419 * @v: The VCPU pointer 420 * 421 * If the guest CPU is not waiting for interrupts or an interrupt line is 422 * asserted, the CPU is by definition runnable. 423 */ 424 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 425 { 426 bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF); 427 return ((irq_lines || kvm_vgic_vcpu_pending_irq(v)) 428 && !v->arch.power_off && !v->arch.pause); 429 } 430 431 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) 432 { 433 return vcpu_mode_priv(vcpu); 434 } 435 436 /* Just ensure a guest exit from a particular CPU */ 437 static void exit_vm_noop(void *info) 438 { 439 } 440 441 void force_vm_exit(const cpumask_t *mask) 442 { 443 preempt_disable(); 444 smp_call_function_many(mask, exit_vm_noop, NULL, true); 445 preempt_enable(); 446 } 447 448 /** 449 * need_new_vmid_gen - check that the VMID is still valid 450 * @vmid: The VMID to check 451 * 452 * return true if there is a new generation of VMIDs being used 453 * 454 * The hardware supports a limited set of values with the value zero reserved 455 * for the host, so we check if an assigned value belongs to a previous 456 * generation, which requires us to assign a new value. If we're the first to 457 * use a VMID for the new generation, we must flush necessary caches and TLBs 458 * on all CPUs. 459 */ 460 static bool need_new_vmid_gen(struct kvm_vmid *vmid) 461 { 462 u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen); 463 smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */ 464 return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen); 465 } 466 467 /** 468 * update_vmid - Update the vmid with a valid VMID for the current generation 469 * @kvm: The guest that struct vmid belongs to 470 * @vmid: The stage-2 VMID information struct 471 */ 472 static void update_vmid(struct kvm_vmid *vmid) 473 { 474 if (!need_new_vmid_gen(vmid)) 475 return; 476 477 spin_lock(&kvm_vmid_lock); 478 479 /* 480 * We need to re-check the vmid_gen here to ensure that if another vcpu 481 * already allocated a valid vmid for this vm, then this vcpu should 482 * use the same vmid. 483 */ 484 if (!need_new_vmid_gen(vmid)) { 485 spin_unlock(&kvm_vmid_lock); 486 return; 487 } 488 489 /* First user of a new VMID generation? */ 490 if (unlikely(kvm_next_vmid == 0)) { 491 atomic64_inc(&kvm_vmid_gen); 492 kvm_next_vmid = 1; 493 494 /* 495 * On SMP we know no other CPUs can use this CPU's or each 496 * other's VMID after force_vm_exit returns since the 497 * kvm_vmid_lock blocks them from reentry to the guest. 498 */ 499 force_vm_exit(cpu_all_mask); 500 /* 501 * Now broadcast TLB + ICACHE invalidation over the inner 502 * shareable domain to make sure all data structures are 503 * clean. 504 */ 505 kvm_call_hyp(__kvm_flush_vm_context); 506 } 507 508 vmid->vmid = kvm_next_vmid; 509 kvm_next_vmid++; 510 kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1; 511 512 smp_wmb(); 513 WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen)); 514 515 spin_unlock(&kvm_vmid_lock); 516 } 517 518 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) 519 { 520 struct kvm *kvm = vcpu->kvm; 521 int ret = 0; 522 523 if (likely(vcpu->arch.has_run_once)) 524 return 0; 525 526 if (!kvm_arm_vcpu_is_finalized(vcpu)) 527 return -EPERM; 528 529 vcpu->arch.has_run_once = true; 530 531 if (likely(irqchip_in_kernel(kvm))) { 532 /* 533 * Map the VGIC hardware resources before running a vcpu the 534 * first time on this VM. 535 */ 536 if (unlikely(!vgic_ready(kvm))) { 537 ret = kvm_vgic_map_resources(kvm); 538 if (ret) 539 return ret; 540 } 541 } else { 542 /* 543 * Tell the rest of the code that there are userspace irqchip 544 * VMs in the wild. 545 */ 546 static_branch_inc(&userspace_irqchip_in_use); 547 } 548 549 ret = kvm_timer_enable(vcpu); 550 if (ret) 551 return ret; 552 553 ret = kvm_arm_pmu_v3_enable(vcpu); 554 555 return ret; 556 } 557 558 bool kvm_arch_intc_initialized(struct kvm *kvm) 559 { 560 return vgic_initialized(kvm); 561 } 562 563 void kvm_arm_halt_guest(struct kvm *kvm) 564 { 565 int i; 566 struct kvm_vcpu *vcpu; 567 568 kvm_for_each_vcpu(i, vcpu, kvm) 569 vcpu->arch.pause = true; 570 kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP); 571 } 572 573 void kvm_arm_resume_guest(struct kvm *kvm) 574 { 575 int i; 576 struct kvm_vcpu *vcpu; 577 578 kvm_for_each_vcpu(i, vcpu, kvm) { 579 vcpu->arch.pause = false; 580 rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu)); 581 } 582 } 583 584 static void vcpu_req_sleep(struct kvm_vcpu *vcpu) 585 { 586 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); 587 588 rcuwait_wait_event(wait, 589 (!vcpu->arch.power_off) &&(!vcpu->arch.pause), 590 TASK_INTERRUPTIBLE); 591 592 if (vcpu->arch.power_off || vcpu->arch.pause) { 593 /* Awaken to handle a signal, request we sleep again later. */ 594 kvm_make_request(KVM_REQ_SLEEP, vcpu); 595 } 596 597 /* 598 * Make sure we will observe a potential reset request if we've 599 * observed a change to the power state. Pairs with the smp_wmb() in 600 * kvm_psci_vcpu_on(). 601 */ 602 smp_rmb(); 603 } 604 605 static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) 606 { 607 return vcpu->arch.target >= 0; 608 } 609 610 static void check_vcpu_requests(struct kvm_vcpu *vcpu) 611 { 612 if (kvm_request_pending(vcpu)) { 613 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) 614 vcpu_req_sleep(vcpu); 615 616 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) 617 kvm_reset_vcpu(vcpu); 618 619 /* 620 * Clear IRQ_PENDING requests that were made to guarantee 621 * that a VCPU sees new virtual interrupts. 622 */ 623 kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu); 624 625 if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu)) 626 kvm_update_stolen_time(vcpu); 627 628 if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) { 629 /* The distributor enable bits were changed */ 630 preempt_disable(); 631 vgic_v4_put(vcpu, false); 632 vgic_v4_load(vcpu); 633 preempt_enable(); 634 } 635 } 636 } 637 638 /** 639 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code 640 * @vcpu: The VCPU pointer 641 * 642 * This function is called through the VCPU_RUN ioctl called from user space. It 643 * will execute VM code in a loop until the time slice for the process is used 644 * or some emulation is needed from user space in which case the function will 645 * return with return value 0 and with the kvm_run structure filled in with the 646 * required data for the requested emulation. 647 */ 648 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) 649 { 650 struct kvm_run *run = vcpu->run; 651 int ret; 652 653 if (unlikely(!kvm_vcpu_initialized(vcpu))) 654 return -ENOEXEC; 655 656 ret = kvm_vcpu_first_run_init(vcpu); 657 if (ret) 658 return ret; 659 660 if (run->exit_reason == KVM_EXIT_MMIO) { 661 ret = kvm_handle_mmio_return(vcpu, run); 662 if (ret) 663 return ret; 664 } 665 666 if (run->immediate_exit) 667 return -EINTR; 668 669 vcpu_load(vcpu); 670 671 kvm_sigset_activate(vcpu); 672 673 ret = 1; 674 run->exit_reason = KVM_EXIT_UNKNOWN; 675 while (ret > 0) { 676 /* 677 * Check conditions before entering the guest 678 */ 679 cond_resched(); 680 681 update_vmid(&vcpu->kvm->arch.vmid); 682 683 check_vcpu_requests(vcpu); 684 685 /* 686 * Preparing the interrupts to be injected also 687 * involves poking the GIC, which must be done in a 688 * non-preemptible context. 689 */ 690 preempt_disable(); 691 692 kvm_pmu_flush_hwstate(vcpu); 693 694 local_irq_disable(); 695 696 kvm_vgic_flush_hwstate(vcpu); 697 698 /* 699 * Exit if we have a signal pending so that we can deliver the 700 * signal to user space. 701 */ 702 if (signal_pending(current)) { 703 ret = -EINTR; 704 run->exit_reason = KVM_EXIT_INTR; 705 } 706 707 /* 708 * If we're using a userspace irqchip, then check if we need 709 * to tell a userspace irqchip about timer or PMU level 710 * changes and if so, exit to userspace (the actual level 711 * state gets updated in kvm_timer_update_run and 712 * kvm_pmu_update_run below). 713 */ 714 if (static_branch_unlikely(&userspace_irqchip_in_use)) { 715 if (kvm_timer_should_notify_user(vcpu) || 716 kvm_pmu_should_notify_user(vcpu)) { 717 ret = -EINTR; 718 run->exit_reason = KVM_EXIT_INTR; 719 } 720 } 721 722 /* 723 * Ensure we set mode to IN_GUEST_MODE after we disable 724 * interrupts and before the final VCPU requests check. 725 * See the comment in kvm_vcpu_exiting_guest_mode() and 726 * Documentation/virt/kvm/vcpu-requests.rst 727 */ 728 smp_store_mb(vcpu->mode, IN_GUEST_MODE); 729 730 if (ret <= 0 || need_new_vmid_gen(&vcpu->kvm->arch.vmid) || 731 kvm_request_pending(vcpu)) { 732 vcpu->mode = OUTSIDE_GUEST_MODE; 733 isb(); /* Ensure work in x_flush_hwstate is committed */ 734 kvm_pmu_sync_hwstate(vcpu); 735 if (static_branch_unlikely(&userspace_irqchip_in_use)) 736 kvm_timer_sync_hwstate(vcpu); 737 kvm_vgic_sync_hwstate(vcpu); 738 local_irq_enable(); 739 preempt_enable(); 740 continue; 741 } 742 743 kvm_arm_setup_debug(vcpu); 744 745 /************************************************************** 746 * Enter the guest 747 */ 748 trace_kvm_entry(*vcpu_pc(vcpu)); 749 guest_enter_irqoff(); 750 751 ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu); 752 753 vcpu->mode = OUTSIDE_GUEST_MODE; 754 vcpu->stat.exits++; 755 /* 756 * Back from guest 757 *************************************************************/ 758 759 kvm_arm_clear_debug(vcpu); 760 761 /* 762 * We must sync the PMU state before the vgic state so 763 * that the vgic can properly sample the updated state of the 764 * interrupt line. 765 */ 766 kvm_pmu_sync_hwstate(vcpu); 767 768 /* 769 * Sync the vgic state before syncing the timer state because 770 * the timer code needs to know if the virtual timer 771 * interrupts are active. 772 */ 773 kvm_vgic_sync_hwstate(vcpu); 774 775 /* 776 * Sync the timer hardware state before enabling interrupts as 777 * we don't want vtimer interrupts to race with syncing the 778 * timer virtual interrupt state. 779 */ 780 if (static_branch_unlikely(&userspace_irqchip_in_use)) 781 kvm_timer_sync_hwstate(vcpu); 782 783 kvm_arch_vcpu_ctxsync_fp(vcpu); 784 785 /* 786 * We may have taken a host interrupt in HYP mode (ie 787 * while executing the guest). This interrupt is still 788 * pending, as we haven't serviced it yet! 789 * 790 * We're now back in SVC mode, with interrupts 791 * disabled. Enabling the interrupts now will have 792 * the effect of taking the interrupt again, in SVC 793 * mode this time. 794 */ 795 local_irq_enable(); 796 797 /* 798 * We do local_irq_enable() before calling guest_exit() so 799 * that if a timer interrupt hits while running the guest we 800 * account that tick as being spent in the guest. We enable 801 * preemption after calling guest_exit() so that if we get 802 * preempted we make sure ticks after that is not counted as 803 * guest time. 804 */ 805 guest_exit(); 806 trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); 807 808 /* Exit types that need handling before we can be preempted */ 809 handle_exit_early(vcpu, run, ret); 810 811 preempt_enable(); 812 813 ret = handle_exit(vcpu, run, ret); 814 } 815 816 /* Tell userspace about in-kernel device output levels */ 817 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { 818 kvm_timer_update_run(vcpu); 819 kvm_pmu_update_run(vcpu); 820 } 821 822 kvm_sigset_deactivate(vcpu); 823 824 vcpu_put(vcpu); 825 return ret; 826 } 827 828 static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) 829 { 830 int bit_index; 831 bool set; 832 unsigned long *hcr; 833 834 if (number == KVM_ARM_IRQ_CPU_IRQ) 835 bit_index = __ffs(HCR_VI); 836 else /* KVM_ARM_IRQ_CPU_FIQ */ 837 bit_index = __ffs(HCR_VF); 838 839 hcr = vcpu_hcr(vcpu); 840 if (level) 841 set = test_and_set_bit(bit_index, hcr); 842 else 843 set = test_and_clear_bit(bit_index, hcr); 844 845 /* 846 * If we didn't change anything, no need to wake up or kick other CPUs 847 */ 848 if (set == level) 849 return 0; 850 851 /* 852 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and 853 * trigger a world-switch round on the running physical CPU to set the 854 * virtual IRQ/FIQ fields in the HCR appropriately. 855 */ 856 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); 857 kvm_vcpu_kick(vcpu); 858 859 return 0; 860 } 861 862 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, 863 bool line_status) 864 { 865 u32 irq = irq_level->irq; 866 unsigned int irq_type, vcpu_idx, irq_num; 867 int nrcpus = atomic_read(&kvm->online_vcpus); 868 struct kvm_vcpu *vcpu = NULL; 869 bool level = irq_level->level; 870 871 irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; 872 vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; 873 vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1); 874 irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; 875 876 trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); 877 878 switch (irq_type) { 879 case KVM_ARM_IRQ_TYPE_CPU: 880 if (irqchip_in_kernel(kvm)) 881 return -ENXIO; 882 883 if (vcpu_idx >= nrcpus) 884 return -EINVAL; 885 886 vcpu = kvm_get_vcpu(kvm, vcpu_idx); 887 if (!vcpu) 888 return -EINVAL; 889 890 if (irq_num > KVM_ARM_IRQ_CPU_FIQ) 891 return -EINVAL; 892 893 return vcpu_interrupt_line(vcpu, irq_num, level); 894 case KVM_ARM_IRQ_TYPE_PPI: 895 if (!irqchip_in_kernel(kvm)) 896 return -ENXIO; 897 898 if (vcpu_idx >= nrcpus) 899 return -EINVAL; 900 901 vcpu = kvm_get_vcpu(kvm, vcpu_idx); 902 if (!vcpu) 903 return -EINVAL; 904 905 if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS) 906 return -EINVAL; 907 908 return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL); 909 case KVM_ARM_IRQ_TYPE_SPI: 910 if (!irqchip_in_kernel(kvm)) 911 return -ENXIO; 912 913 if (irq_num < VGIC_NR_PRIVATE_IRQS) 914 return -EINVAL; 915 916 return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL); 917 } 918 919 return -EINVAL; 920 } 921 922 static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, 923 const struct kvm_vcpu_init *init) 924 { 925 unsigned int i, ret; 926 int phys_target = kvm_target_cpu(); 927 928 if (init->target != phys_target) 929 return -EINVAL; 930 931 /* 932 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must 933 * use the same target. 934 */ 935 if (vcpu->arch.target != -1 && vcpu->arch.target != init->target) 936 return -EINVAL; 937 938 /* -ENOENT for unknown features, -EINVAL for invalid combinations. */ 939 for (i = 0; i < sizeof(init->features) * 8; i++) { 940 bool set = (init->features[i / 32] & (1 << (i % 32))); 941 942 if (set && i >= KVM_VCPU_MAX_FEATURES) 943 return -ENOENT; 944 945 /* 946 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must 947 * use the same feature set. 948 */ 949 if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES && 950 test_bit(i, vcpu->arch.features) != set) 951 return -EINVAL; 952 953 if (set) 954 set_bit(i, vcpu->arch.features); 955 } 956 957 vcpu->arch.target = phys_target; 958 959 /* Now we know what it is, we can reset it. */ 960 ret = kvm_reset_vcpu(vcpu); 961 if (ret) { 962 vcpu->arch.target = -1; 963 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); 964 } 965 966 return ret; 967 } 968 969 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, 970 struct kvm_vcpu_init *init) 971 { 972 int ret; 973 974 ret = kvm_vcpu_set_target(vcpu, init); 975 if (ret) 976 return ret; 977 978 /* 979 * Ensure a rebooted VM will fault in RAM pages and detect if the 980 * guest MMU is turned off and flush the caches as needed. 981 * 982 * S2FWB enforces all memory accesses to RAM being cacheable, 983 * ensuring that the data side is always coherent. We still 984 * need to invalidate the I-cache though, as FWB does *not* 985 * imply CTR_EL0.DIC. 986 */ 987 if (vcpu->arch.has_run_once) { 988 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) 989 stage2_unmap_vm(vcpu->kvm); 990 else 991 __flush_icache_all(); 992 } 993 994 vcpu_reset_hcr(vcpu); 995 996 /* 997 * Handle the "start in power-off" case. 998 */ 999 if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) 1000 vcpu_power_off(vcpu); 1001 else 1002 vcpu->arch.power_off = false; 1003 1004 return 0; 1005 } 1006 1007 static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, 1008 struct kvm_device_attr *attr) 1009 { 1010 int ret = -ENXIO; 1011 1012 switch (attr->group) { 1013 default: 1014 ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr); 1015 break; 1016 } 1017 1018 return ret; 1019 } 1020 1021 static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, 1022 struct kvm_device_attr *attr) 1023 { 1024 int ret = -ENXIO; 1025 1026 switch (attr->group) { 1027 default: 1028 ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr); 1029 break; 1030 } 1031 1032 return ret; 1033 } 1034 1035 static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, 1036 struct kvm_device_attr *attr) 1037 { 1038 int ret = -ENXIO; 1039 1040 switch (attr->group) { 1041 default: 1042 ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr); 1043 break; 1044 } 1045 1046 return ret; 1047 } 1048 1049 static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, 1050 struct kvm_vcpu_events *events) 1051 { 1052 memset(events, 0, sizeof(*events)); 1053 1054 return __kvm_arm_vcpu_get_events(vcpu, events); 1055 } 1056 1057 static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, 1058 struct kvm_vcpu_events *events) 1059 { 1060 int i; 1061 1062 /* check whether the reserved field is zero */ 1063 for (i = 0; i < ARRAY_SIZE(events->reserved); i++) 1064 if (events->reserved[i]) 1065 return -EINVAL; 1066 1067 /* check whether the pad field is zero */ 1068 for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++) 1069 if (events->exception.pad[i]) 1070 return -EINVAL; 1071 1072 return __kvm_arm_vcpu_set_events(vcpu, events); 1073 } 1074 1075 long kvm_arch_vcpu_ioctl(struct file *filp, 1076 unsigned int ioctl, unsigned long arg) 1077 { 1078 struct kvm_vcpu *vcpu = filp->private_data; 1079 void __user *argp = (void __user *)arg; 1080 struct kvm_device_attr attr; 1081 long r; 1082 1083 switch (ioctl) { 1084 case KVM_ARM_VCPU_INIT: { 1085 struct kvm_vcpu_init init; 1086 1087 r = -EFAULT; 1088 if (copy_from_user(&init, argp, sizeof(init))) 1089 break; 1090 1091 r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init); 1092 break; 1093 } 1094 case KVM_SET_ONE_REG: 1095 case KVM_GET_ONE_REG: { 1096 struct kvm_one_reg reg; 1097 1098 r = -ENOEXEC; 1099 if (unlikely(!kvm_vcpu_initialized(vcpu))) 1100 break; 1101 1102 r = -EFAULT; 1103 if (copy_from_user(®, argp, sizeof(reg))) 1104 break; 1105 1106 if (ioctl == KVM_SET_ONE_REG) 1107 r = kvm_arm_set_reg(vcpu, ®); 1108 else 1109 r = kvm_arm_get_reg(vcpu, ®); 1110 break; 1111 } 1112 case KVM_GET_REG_LIST: { 1113 struct kvm_reg_list __user *user_list = argp; 1114 struct kvm_reg_list reg_list; 1115 unsigned n; 1116 1117 r = -ENOEXEC; 1118 if (unlikely(!kvm_vcpu_initialized(vcpu))) 1119 break; 1120 1121 r = -EPERM; 1122 if (!kvm_arm_vcpu_is_finalized(vcpu)) 1123 break; 1124 1125 r = -EFAULT; 1126 if (copy_from_user(®_list, user_list, sizeof(reg_list))) 1127 break; 1128 n = reg_list.n; 1129 reg_list.n = kvm_arm_num_regs(vcpu); 1130 if (copy_to_user(user_list, ®_list, sizeof(reg_list))) 1131 break; 1132 r = -E2BIG; 1133 if (n < reg_list.n) 1134 break; 1135 r = kvm_arm_copy_reg_indices(vcpu, user_list->reg); 1136 break; 1137 } 1138 case KVM_SET_DEVICE_ATTR: { 1139 r = -EFAULT; 1140 if (copy_from_user(&attr, argp, sizeof(attr))) 1141 break; 1142 r = kvm_arm_vcpu_set_attr(vcpu, &attr); 1143 break; 1144 } 1145 case KVM_GET_DEVICE_ATTR: { 1146 r = -EFAULT; 1147 if (copy_from_user(&attr, argp, sizeof(attr))) 1148 break; 1149 r = kvm_arm_vcpu_get_attr(vcpu, &attr); 1150 break; 1151 } 1152 case KVM_HAS_DEVICE_ATTR: { 1153 r = -EFAULT; 1154 if (copy_from_user(&attr, argp, sizeof(attr))) 1155 break; 1156 r = kvm_arm_vcpu_has_attr(vcpu, &attr); 1157 break; 1158 } 1159 case KVM_GET_VCPU_EVENTS: { 1160 struct kvm_vcpu_events events; 1161 1162 if (kvm_arm_vcpu_get_events(vcpu, &events)) 1163 return -EINVAL; 1164 1165 if (copy_to_user(argp, &events, sizeof(events))) 1166 return -EFAULT; 1167 1168 return 0; 1169 } 1170 case KVM_SET_VCPU_EVENTS: { 1171 struct kvm_vcpu_events events; 1172 1173 if (copy_from_user(&events, argp, sizeof(events))) 1174 return -EFAULT; 1175 1176 return kvm_arm_vcpu_set_events(vcpu, &events); 1177 } 1178 case KVM_ARM_VCPU_FINALIZE: { 1179 int what; 1180 1181 if (!kvm_vcpu_initialized(vcpu)) 1182 return -ENOEXEC; 1183 1184 if (get_user(what, (const int __user *)argp)) 1185 return -EFAULT; 1186 1187 return kvm_arm_vcpu_finalize(vcpu, what); 1188 } 1189 default: 1190 r = -EINVAL; 1191 } 1192 1193 return r; 1194 } 1195 1196 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) 1197 { 1198 1199 } 1200 1201 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, 1202 struct kvm_memory_slot *memslot) 1203 { 1204 kvm_flush_remote_tlbs(kvm); 1205 } 1206 1207 static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, 1208 struct kvm_arm_device_addr *dev_addr) 1209 { 1210 unsigned long dev_id, type; 1211 1212 dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >> 1213 KVM_ARM_DEVICE_ID_SHIFT; 1214 type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >> 1215 KVM_ARM_DEVICE_TYPE_SHIFT; 1216 1217 switch (dev_id) { 1218 case KVM_ARM_DEVICE_VGIC_V2: 1219 if (!vgic_present) 1220 return -ENXIO; 1221 return kvm_vgic_addr(kvm, type, &dev_addr->addr, true); 1222 default: 1223 return -ENODEV; 1224 } 1225 } 1226 1227 long kvm_arch_vm_ioctl(struct file *filp, 1228 unsigned int ioctl, unsigned long arg) 1229 { 1230 struct kvm *kvm = filp->private_data; 1231 void __user *argp = (void __user *)arg; 1232 1233 switch (ioctl) { 1234 case KVM_CREATE_IRQCHIP: { 1235 int ret; 1236 if (!vgic_present) 1237 return -ENXIO; 1238 mutex_lock(&kvm->lock); 1239 ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); 1240 mutex_unlock(&kvm->lock); 1241 return ret; 1242 } 1243 case KVM_ARM_SET_DEVICE_ADDR: { 1244 struct kvm_arm_device_addr dev_addr; 1245 1246 if (copy_from_user(&dev_addr, argp, sizeof(dev_addr))) 1247 return -EFAULT; 1248 return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr); 1249 } 1250 case KVM_ARM_PREFERRED_TARGET: { 1251 int err; 1252 struct kvm_vcpu_init init; 1253 1254 err = kvm_vcpu_preferred_target(&init); 1255 if (err) 1256 return err; 1257 1258 if (copy_to_user(argp, &init, sizeof(init))) 1259 return -EFAULT; 1260 1261 return 0; 1262 } 1263 default: 1264 return -EINVAL; 1265 } 1266 } 1267 1268 static void cpu_init_hyp_mode(void) 1269 { 1270 phys_addr_t pgd_ptr; 1271 unsigned long hyp_stack_ptr; 1272 unsigned long vector_ptr; 1273 unsigned long tpidr_el2; 1274 1275 /* Switch from the HYP stub to our own HYP init vector */ 1276 __hyp_set_vectors(kvm_get_idmap_vector()); 1277 1278 /* 1279 * Calculate the raw per-cpu offset without a translation from the 1280 * kernel's mapping to the linear mapping, and store it in tpidr_el2 1281 * so that we can use adr_l to access per-cpu variables in EL2. 1282 */ 1283 tpidr_el2 = ((unsigned long)this_cpu_ptr(&kvm_host_data) - 1284 (unsigned long)kvm_ksym_ref(&kvm_host_data)); 1285 1286 pgd_ptr = kvm_mmu_get_httbr(); 1287 hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE; 1288 vector_ptr = (unsigned long)kvm_get_hyp_vector(); 1289 1290 /* 1291 * Call initialization code, and switch to the full blown HYP code. 1292 * If the cpucaps haven't been finalized yet, something has gone very 1293 * wrong, and hyp will crash and burn when it uses any 1294 * cpus_have_const_cap() wrapper. 1295 */ 1296 BUG_ON(!system_capabilities_finalized()); 1297 __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2); 1298 1299 /* 1300 * Disabling SSBD on a non-VHE system requires us to enable SSBS 1301 * at EL2. 1302 */ 1303 if (this_cpu_has_cap(ARM64_SSBS) && 1304 arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) { 1305 kvm_call_hyp(__kvm_enable_ssbs); 1306 } 1307 } 1308 1309 static void cpu_hyp_reset(void) 1310 { 1311 if (!is_kernel_in_hyp_mode()) 1312 __hyp_reset_vectors(); 1313 } 1314 1315 static void cpu_hyp_reinit(void) 1316 { 1317 kvm_init_host_cpu_context(&this_cpu_ptr(&kvm_host_data)->host_ctxt); 1318 1319 cpu_hyp_reset(); 1320 1321 if (is_kernel_in_hyp_mode()) 1322 kvm_timer_init_vhe(); 1323 else 1324 cpu_init_hyp_mode(); 1325 1326 kvm_arm_init_debug(); 1327 1328 if (vgic_present) 1329 kvm_vgic_init_cpu_hardware(); 1330 } 1331 1332 static void _kvm_arch_hardware_enable(void *discard) 1333 { 1334 if (!__this_cpu_read(kvm_arm_hardware_enabled)) { 1335 cpu_hyp_reinit(); 1336 __this_cpu_write(kvm_arm_hardware_enabled, 1); 1337 } 1338 } 1339 1340 int kvm_arch_hardware_enable(void) 1341 { 1342 _kvm_arch_hardware_enable(NULL); 1343 return 0; 1344 } 1345 1346 static void _kvm_arch_hardware_disable(void *discard) 1347 { 1348 if (__this_cpu_read(kvm_arm_hardware_enabled)) { 1349 cpu_hyp_reset(); 1350 __this_cpu_write(kvm_arm_hardware_enabled, 0); 1351 } 1352 } 1353 1354 void kvm_arch_hardware_disable(void) 1355 { 1356 _kvm_arch_hardware_disable(NULL); 1357 } 1358 1359 #ifdef CONFIG_CPU_PM 1360 static int hyp_init_cpu_pm_notifier(struct notifier_block *self, 1361 unsigned long cmd, 1362 void *v) 1363 { 1364 /* 1365 * kvm_arm_hardware_enabled is left with its old value over 1366 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should 1367 * re-enable hyp. 1368 */ 1369 switch (cmd) { 1370 case CPU_PM_ENTER: 1371 if (__this_cpu_read(kvm_arm_hardware_enabled)) 1372 /* 1373 * don't update kvm_arm_hardware_enabled here 1374 * so that the hardware will be re-enabled 1375 * when we resume. See below. 1376 */ 1377 cpu_hyp_reset(); 1378 1379 return NOTIFY_OK; 1380 case CPU_PM_ENTER_FAILED: 1381 case CPU_PM_EXIT: 1382 if (__this_cpu_read(kvm_arm_hardware_enabled)) 1383 /* The hardware was enabled before suspend. */ 1384 cpu_hyp_reinit(); 1385 1386 return NOTIFY_OK; 1387 1388 default: 1389 return NOTIFY_DONE; 1390 } 1391 } 1392 1393 static struct notifier_block hyp_init_cpu_pm_nb = { 1394 .notifier_call = hyp_init_cpu_pm_notifier, 1395 }; 1396 1397 static void __init hyp_cpu_pm_init(void) 1398 { 1399 cpu_pm_register_notifier(&hyp_init_cpu_pm_nb); 1400 } 1401 static void __init hyp_cpu_pm_exit(void) 1402 { 1403 cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb); 1404 } 1405 #else 1406 static inline void hyp_cpu_pm_init(void) 1407 { 1408 } 1409 static inline void hyp_cpu_pm_exit(void) 1410 { 1411 } 1412 #endif 1413 1414 static int init_common_resources(void) 1415 { 1416 return kvm_set_ipa_limit(); 1417 } 1418 1419 static int init_subsystems(void) 1420 { 1421 int err = 0; 1422 1423 /* 1424 * Enable hardware so that subsystem initialisation can access EL2. 1425 */ 1426 on_each_cpu(_kvm_arch_hardware_enable, NULL, 1); 1427 1428 /* 1429 * Register CPU lower-power notifier 1430 */ 1431 hyp_cpu_pm_init(); 1432 1433 /* 1434 * Init HYP view of VGIC 1435 */ 1436 err = kvm_vgic_hyp_init(); 1437 switch (err) { 1438 case 0: 1439 vgic_present = true; 1440 break; 1441 case -ENODEV: 1442 case -ENXIO: 1443 vgic_present = false; 1444 err = 0; 1445 break; 1446 default: 1447 goto out; 1448 } 1449 1450 /* 1451 * Init HYP architected timer support 1452 */ 1453 err = kvm_timer_hyp_init(vgic_present); 1454 if (err) 1455 goto out; 1456 1457 kvm_perf_init(); 1458 kvm_coproc_table_init(); 1459 1460 out: 1461 on_each_cpu(_kvm_arch_hardware_disable, NULL, 1); 1462 1463 return err; 1464 } 1465 1466 static void teardown_hyp_mode(void) 1467 { 1468 int cpu; 1469 1470 free_hyp_pgds(); 1471 for_each_possible_cpu(cpu) 1472 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); 1473 } 1474 1475 /** 1476 * Inits Hyp-mode on all online CPUs 1477 */ 1478 static int init_hyp_mode(void) 1479 { 1480 int cpu; 1481 int err = 0; 1482 1483 /* 1484 * Allocate Hyp PGD and setup Hyp identity mapping 1485 */ 1486 err = kvm_mmu_init(); 1487 if (err) 1488 goto out_err; 1489 1490 /* 1491 * Allocate stack pages for Hypervisor-mode 1492 */ 1493 for_each_possible_cpu(cpu) { 1494 unsigned long stack_page; 1495 1496 stack_page = __get_free_page(GFP_KERNEL); 1497 if (!stack_page) { 1498 err = -ENOMEM; 1499 goto out_err; 1500 } 1501 1502 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; 1503 } 1504 1505 /* 1506 * Map the Hyp-code called directly from the host 1507 */ 1508 err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start), 1509 kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC); 1510 if (err) { 1511 kvm_err("Cannot map world-switch code\n"); 1512 goto out_err; 1513 } 1514 1515 err = create_hyp_mappings(kvm_ksym_ref(__start_rodata), 1516 kvm_ksym_ref(__end_rodata), PAGE_HYP_RO); 1517 if (err) { 1518 kvm_err("Cannot map rodata section\n"); 1519 goto out_err; 1520 } 1521 1522 err = create_hyp_mappings(kvm_ksym_ref(__bss_start), 1523 kvm_ksym_ref(__bss_stop), PAGE_HYP_RO); 1524 if (err) { 1525 kvm_err("Cannot map bss section\n"); 1526 goto out_err; 1527 } 1528 1529 err = kvm_map_vectors(); 1530 if (err) { 1531 kvm_err("Cannot map vectors\n"); 1532 goto out_err; 1533 } 1534 1535 /* 1536 * Map the Hyp stack pages 1537 */ 1538 for_each_possible_cpu(cpu) { 1539 char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); 1540 err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE, 1541 PAGE_HYP); 1542 1543 if (err) { 1544 kvm_err("Cannot map hyp stack\n"); 1545 goto out_err; 1546 } 1547 } 1548 1549 for_each_possible_cpu(cpu) { 1550 kvm_host_data_t *cpu_data; 1551 1552 cpu_data = per_cpu_ptr(&kvm_host_data, cpu); 1553 err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP); 1554 1555 if (err) { 1556 kvm_err("Cannot map host CPU state: %d\n", err); 1557 goto out_err; 1558 } 1559 } 1560 1561 err = hyp_map_aux_data(); 1562 if (err) 1563 kvm_err("Cannot map host auxiliary data: %d\n", err); 1564 1565 return 0; 1566 1567 out_err: 1568 teardown_hyp_mode(); 1569 kvm_err("error initializing Hyp mode: %d\n", err); 1570 return err; 1571 } 1572 1573 static void check_kvm_target_cpu(void *ret) 1574 { 1575 *(int *)ret = kvm_target_cpu(); 1576 } 1577 1578 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) 1579 { 1580 struct kvm_vcpu *vcpu; 1581 int i; 1582 1583 mpidr &= MPIDR_HWID_BITMASK; 1584 kvm_for_each_vcpu(i, vcpu, kvm) { 1585 if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu)) 1586 return vcpu; 1587 } 1588 return NULL; 1589 } 1590 1591 bool kvm_arch_has_irq_bypass(void) 1592 { 1593 return true; 1594 } 1595 1596 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, 1597 struct irq_bypass_producer *prod) 1598 { 1599 struct kvm_kernel_irqfd *irqfd = 1600 container_of(cons, struct kvm_kernel_irqfd, consumer); 1601 1602 return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq, 1603 &irqfd->irq_entry); 1604 } 1605 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, 1606 struct irq_bypass_producer *prod) 1607 { 1608 struct kvm_kernel_irqfd *irqfd = 1609 container_of(cons, struct kvm_kernel_irqfd, consumer); 1610 1611 kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq, 1612 &irqfd->irq_entry); 1613 } 1614 1615 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons) 1616 { 1617 struct kvm_kernel_irqfd *irqfd = 1618 container_of(cons, struct kvm_kernel_irqfd, consumer); 1619 1620 kvm_arm_halt_guest(irqfd->kvm); 1621 } 1622 1623 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons) 1624 { 1625 struct kvm_kernel_irqfd *irqfd = 1626 container_of(cons, struct kvm_kernel_irqfd, consumer); 1627 1628 kvm_arm_resume_guest(irqfd->kvm); 1629 } 1630 1631 /** 1632 * Initialize Hyp-mode and memory mappings on all CPUs. 1633 */ 1634 int kvm_arch_init(void *opaque) 1635 { 1636 int err; 1637 int ret, cpu; 1638 bool in_hyp_mode; 1639 1640 if (!is_hyp_mode_available()) { 1641 kvm_info("HYP mode not available\n"); 1642 return -ENODEV; 1643 } 1644 1645 in_hyp_mode = is_kernel_in_hyp_mode(); 1646 1647 if (!in_hyp_mode && kvm_arch_requires_vhe()) { 1648 kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n"); 1649 return -ENODEV; 1650 } 1651 1652 for_each_online_cpu(cpu) { 1653 smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1); 1654 if (ret < 0) { 1655 kvm_err("Error, CPU %d not supported!\n", cpu); 1656 return -ENODEV; 1657 } 1658 } 1659 1660 err = init_common_resources(); 1661 if (err) 1662 return err; 1663 1664 err = kvm_arm_init_sve(); 1665 if (err) 1666 return err; 1667 1668 if (!in_hyp_mode) { 1669 err = init_hyp_mode(); 1670 if (err) 1671 goto out_err; 1672 } 1673 1674 err = init_subsystems(); 1675 if (err) 1676 goto out_hyp; 1677 1678 if (in_hyp_mode) 1679 kvm_info("VHE mode initialized successfully\n"); 1680 else 1681 kvm_info("Hyp mode initialized successfully\n"); 1682 1683 return 0; 1684 1685 out_hyp: 1686 hyp_cpu_pm_exit(); 1687 if (!in_hyp_mode) 1688 teardown_hyp_mode(); 1689 out_err: 1690 return err; 1691 } 1692 1693 /* NOP: Compiling as a module not supported */ 1694 void kvm_arch_exit(void) 1695 { 1696 kvm_perf_teardown(); 1697 } 1698 1699 static int arm_init(void) 1700 { 1701 int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 1702 return rc; 1703 } 1704 1705 module_init(arm_init); 1706