1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015, 2016 ARM Ltd. 4 */ 5 6 #include <linux/uaccess.h> 7 #include <linux/interrupt.h> 8 #include <linux/cpu.h> 9 #include <linux/kvm_host.h> 10 #include <kvm/arm_vgic.h> 11 #include <asm/kvm_emulate.h> 12 #include <asm/kvm_mmu.h> 13 #include "vgic.h" 14 15 /* 16 * Initialization rules: there are multiple stages to the vgic 17 * initialization, both for the distributor and the CPU interfaces. The basic 18 * idea is that even though the VGIC is not functional or not requested from 19 * user space, the critical path of the run loop can still call VGIC functions 20 * that just won't do anything, without them having to check additional 21 * initialization flags to ensure they don't look at uninitialized data 22 * structures. 23 * 24 * Distributor: 25 * 26 * - kvm_vgic_early_init(): initialization of static data that doesn't 27 * depend on any sizing information or emulation type. No allocation 28 * is allowed there. 29 * 30 * - vgic_init(): allocation and initialization of the generic data 31 * structures that depend on sizing information (number of CPUs, 32 * number of interrupts). Also initializes the vcpu specific data 33 * structures. Can be executed lazily for GICv2. 34 * 35 * CPU Interface: 36 * 37 * - kvm_vgic_vcpu_init(): initialization of static data that 38 * doesn't depend on any sizing information or emulation type. No 39 * allocation is allowed there. 40 */ 41 42 /* EARLY INIT */ 43 44 /** 45 * kvm_vgic_early_init() - Initialize static VGIC VCPU data structures 46 * @kvm: The VM whose VGIC districutor should be initialized 47 * 48 * Only do initialization of static structures that don't require any 49 * allocation or sizing information from userspace. vgic_init() called 50 * kvm_vgic_dist_init() which takes care of the rest. 51 */ 52 void kvm_vgic_early_init(struct kvm *kvm) 53 { 54 struct vgic_dist *dist = &kvm->arch.vgic; 55 56 INIT_LIST_HEAD(&dist->lpi_list_head); 57 INIT_LIST_HEAD(&dist->lpi_translation_cache); 58 raw_spin_lock_init(&dist->lpi_list_lock); 59 } 60 61 /* CREATION */ 62 63 /** 64 * kvm_vgic_create: triggered by the instantiation of the VGIC device by 65 * user space, either through the legacy KVM_CREATE_IRQCHIP ioctl (v2 only) 66 * or through the generic KVM_CREATE_DEVICE API ioctl. 67 * irqchip_in_kernel() tells you if this function succeeded or not. 68 * @kvm: kvm struct pointer 69 * @type: KVM_DEV_TYPE_ARM_VGIC_V[23] 70 */ 71 int kvm_vgic_create(struct kvm *kvm, u32 type) 72 { 73 struct kvm_vcpu *vcpu; 74 unsigned long i; 75 int ret; 76 77 if (irqchip_in_kernel(kvm)) 78 return -EEXIST; 79 80 /* 81 * This function is also called by the KVM_CREATE_IRQCHIP handler, 82 * which had no chance yet to check the availability of the GICv2 83 * emulation. So check this here again. KVM_CREATE_DEVICE does 84 * the proper checks already. 85 */ 86 if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && 87 !kvm_vgic_global_state.can_emulate_gicv2) 88 return -ENODEV; 89 90 ret = -EBUSY; 91 if (!lock_all_vcpus(kvm)) 92 return ret; 93 94 kvm_for_each_vcpu(i, vcpu, kvm) { 95 if (vcpu_has_run_once(vcpu)) 96 goto out_unlock; 97 } 98 ret = 0; 99 100 if (type == KVM_DEV_TYPE_ARM_VGIC_V2) 101 kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS; 102 else 103 kvm->arch.max_vcpus = VGIC_V3_MAX_CPUS; 104 105 if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus) { 106 ret = -E2BIG; 107 goto out_unlock; 108 } 109 110 kvm->arch.vgic.in_kernel = true; 111 kvm->arch.vgic.vgic_model = type; 112 113 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; 114 115 if (type == KVM_DEV_TYPE_ARM_VGIC_V2) 116 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; 117 else 118 INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions); 119 120 out_unlock: 121 unlock_all_vcpus(kvm); 122 return ret; 123 } 124 125 /* INIT/DESTROY */ 126 127 /** 128 * kvm_vgic_dist_init: initialize the dist data structures 129 * @kvm: kvm struct pointer 130 * @nr_spis: number of spis, frozen by caller 131 */ 132 static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) 133 { 134 struct vgic_dist *dist = &kvm->arch.vgic; 135 struct kvm_vcpu *vcpu0 = kvm_get_vcpu(kvm, 0); 136 int i; 137 138 dist->spis = kcalloc(nr_spis, sizeof(struct vgic_irq), GFP_KERNEL_ACCOUNT); 139 if (!dist->spis) 140 return -ENOMEM; 141 142 /* 143 * In the following code we do not take the irq struct lock since 144 * no other action on irq structs can happen while the VGIC is 145 * not initialized yet: 146 * If someone wants to inject an interrupt or does a MMIO access, we 147 * require prior initialization in case of a virtual GICv3 or trigger 148 * initialization when using a virtual GICv2. 149 */ 150 for (i = 0; i < nr_spis; i++) { 151 struct vgic_irq *irq = &dist->spis[i]; 152 153 irq->intid = i + VGIC_NR_PRIVATE_IRQS; 154 INIT_LIST_HEAD(&irq->ap_list); 155 raw_spin_lock_init(&irq->irq_lock); 156 irq->vcpu = NULL; 157 irq->target_vcpu = vcpu0; 158 kref_init(&irq->refcount); 159 switch (dist->vgic_model) { 160 case KVM_DEV_TYPE_ARM_VGIC_V2: 161 irq->targets = 0; 162 irq->group = 0; 163 break; 164 case KVM_DEV_TYPE_ARM_VGIC_V3: 165 irq->mpidr = 0; 166 irq->group = 1; 167 break; 168 default: 169 kfree(dist->spis); 170 dist->spis = NULL; 171 return -EINVAL; 172 } 173 } 174 return 0; 175 } 176 177 /** 178 * kvm_vgic_vcpu_init() - Initialize static VGIC VCPU data 179 * structures and register VCPU-specific KVM iodevs 180 * 181 * @vcpu: pointer to the VCPU being created and initialized 182 * 183 * Only do initialization, but do not actually enable the 184 * VGIC CPU interface 185 */ 186 int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) 187 { 188 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 189 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 190 int ret = 0; 191 int i; 192 193 vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; 194 195 INIT_LIST_HEAD(&vgic_cpu->ap_list_head); 196 raw_spin_lock_init(&vgic_cpu->ap_list_lock); 197 atomic_set(&vgic_cpu->vgic_v3.its_vpe.vlpi_count, 0); 198 199 /* 200 * Enable and configure all SGIs to be edge-triggered and 201 * configure all PPIs as level-triggered. 202 */ 203 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { 204 struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; 205 206 INIT_LIST_HEAD(&irq->ap_list); 207 raw_spin_lock_init(&irq->irq_lock); 208 irq->intid = i; 209 irq->vcpu = NULL; 210 irq->target_vcpu = vcpu; 211 kref_init(&irq->refcount); 212 if (vgic_irq_is_sgi(i)) { 213 /* SGIs */ 214 irq->enabled = 1; 215 irq->config = VGIC_CONFIG_EDGE; 216 } else { 217 /* PPIs */ 218 irq->config = VGIC_CONFIG_LEVEL; 219 } 220 } 221 222 if (!irqchip_in_kernel(vcpu->kvm)) 223 return 0; 224 225 /* 226 * If we are creating a VCPU with a GICv3 we must also register the 227 * KVM io device for the redistributor that belongs to this VCPU. 228 */ 229 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { 230 mutex_lock(&vcpu->kvm->lock); 231 ret = vgic_register_redist_iodev(vcpu); 232 mutex_unlock(&vcpu->kvm->lock); 233 } 234 return ret; 235 } 236 237 static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu) 238 { 239 if (kvm_vgic_global_state.type == VGIC_V2) 240 vgic_v2_enable(vcpu); 241 else 242 vgic_v3_enable(vcpu); 243 } 244 245 /* 246 * vgic_init: allocates and initializes dist and vcpu data structures 247 * depending on two dimensioning parameters: 248 * - the number of spis 249 * - the number of vcpus 250 * The function is generally called when nr_spis has been explicitly set 251 * by the guest through the KVM DEVICE API. If not nr_spis is set to 256. 252 * vgic_initialized() returns true when this function has succeeded. 253 * Must be called with kvm->lock held! 254 */ 255 int vgic_init(struct kvm *kvm) 256 { 257 struct vgic_dist *dist = &kvm->arch.vgic; 258 struct kvm_vcpu *vcpu; 259 int ret = 0, i; 260 unsigned long idx; 261 262 if (vgic_initialized(kvm)) 263 return 0; 264 265 /* Are we also in the middle of creating a VCPU? */ 266 if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus)) 267 return -EBUSY; 268 269 /* freeze the number of spis */ 270 if (!dist->nr_spis) 271 dist->nr_spis = VGIC_NR_IRQS_LEGACY - VGIC_NR_PRIVATE_IRQS; 272 273 ret = kvm_vgic_dist_init(kvm, dist->nr_spis); 274 if (ret) 275 goto out; 276 277 /* Initialize groups on CPUs created before the VGIC type was known */ 278 kvm_for_each_vcpu(idx, vcpu, kvm) { 279 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 280 281 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { 282 struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; 283 switch (dist->vgic_model) { 284 case KVM_DEV_TYPE_ARM_VGIC_V3: 285 irq->group = 1; 286 irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu); 287 break; 288 case KVM_DEV_TYPE_ARM_VGIC_V2: 289 irq->group = 0; 290 irq->targets = 1U << idx; 291 break; 292 default: 293 ret = -EINVAL; 294 goto out; 295 } 296 } 297 } 298 299 if (vgic_has_its(kvm)) 300 vgic_lpi_translation_cache_init(kvm); 301 302 /* 303 * If we have GICv4.1 enabled, unconditionnaly request enable the 304 * v4 support so that we get HW-accelerated vSGIs. Otherwise, only 305 * enable it if we present a virtual ITS to the guest. 306 */ 307 if (vgic_supports_direct_msis(kvm)) { 308 ret = vgic_v4_init(kvm); 309 if (ret) 310 goto out; 311 } 312 313 kvm_for_each_vcpu(idx, vcpu, kvm) 314 kvm_vgic_vcpu_enable(vcpu); 315 316 ret = kvm_vgic_setup_default_irq_routing(kvm); 317 if (ret) 318 goto out; 319 320 vgic_debug_init(kvm); 321 322 dist->implementation_rev = 2; 323 dist->initialized = true; 324 325 out: 326 return ret; 327 } 328 329 static void kvm_vgic_dist_destroy(struct kvm *kvm) 330 { 331 struct vgic_dist *dist = &kvm->arch.vgic; 332 struct vgic_redist_region *rdreg, *next; 333 334 dist->ready = false; 335 dist->initialized = false; 336 337 kfree(dist->spis); 338 dist->spis = NULL; 339 dist->nr_spis = 0; 340 dist->vgic_dist_base = VGIC_ADDR_UNDEF; 341 342 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { 343 list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list) 344 vgic_v3_free_redist_region(rdreg); 345 INIT_LIST_HEAD(&dist->rd_regions); 346 } else { 347 dist->vgic_cpu_base = VGIC_ADDR_UNDEF; 348 } 349 350 if (vgic_has_its(kvm)) 351 vgic_lpi_translation_cache_destroy(kvm); 352 353 if (vgic_supports_direct_msis(kvm)) 354 vgic_v4_teardown(kvm); 355 } 356 357 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) 358 { 359 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 360 361 /* 362 * Retire all pending LPIs on this vcpu anyway as we're 363 * going to destroy it. 364 */ 365 vgic_flush_pending_lpis(vcpu); 366 367 INIT_LIST_HEAD(&vgic_cpu->ap_list_head); 368 vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; 369 } 370 371 /* To be called with kvm->lock held */ 372 static void __kvm_vgic_destroy(struct kvm *kvm) 373 { 374 struct kvm_vcpu *vcpu; 375 unsigned long i; 376 377 vgic_debug_destroy(kvm); 378 379 kvm_for_each_vcpu(i, vcpu, kvm) 380 kvm_vgic_vcpu_destroy(vcpu); 381 382 kvm_vgic_dist_destroy(kvm); 383 } 384 385 void kvm_vgic_destroy(struct kvm *kvm) 386 { 387 mutex_lock(&kvm->lock); 388 __kvm_vgic_destroy(kvm); 389 mutex_unlock(&kvm->lock); 390 } 391 392 /** 393 * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest 394 * is a GICv2. A GICv3 must be explicitly initialized by the guest using the 395 * KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group. 396 * @kvm: kvm struct pointer 397 */ 398 int vgic_lazy_init(struct kvm *kvm) 399 { 400 int ret = 0; 401 402 if (unlikely(!vgic_initialized(kvm))) { 403 /* 404 * We only provide the automatic initialization of the VGIC 405 * for the legacy case of a GICv2. Any other type must 406 * be explicitly initialized once setup with the respective 407 * KVM device call. 408 */ 409 if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2) 410 return -EBUSY; 411 412 mutex_lock(&kvm->lock); 413 ret = vgic_init(kvm); 414 mutex_unlock(&kvm->lock); 415 } 416 417 return ret; 418 } 419 420 /* RESOURCE MAPPING */ 421 422 /** 423 * Map the MMIO regions depending on the VGIC model exposed to the guest 424 * called on the first VCPU run. 425 * Also map the virtual CPU interface into the VM. 426 * v2 calls vgic_init() if not already done. 427 * v3 and derivatives return an error if the VGIC is not initialized. 428 * vgic_ready() returns true if this function has succeeded. 429 * @kvm: kvm struct pointer 430 */ 431 int kvm_vgic_map_resources(struct kvm *kvm) 432 { 433 struct vgic_dist *dist = &kvm->arch.vgic; 434 int ret = 0; 435 436 if (likely(vgic_ready(kvm))) 437 return 0; 438 439 mutex_lock(&kvm->lock); 440 if (vgic_ready(kvm)) 441 goto out; 442 443 if (!irqchip_in_kernel(kvm)) 444 goto out; 445 446 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) 447 ret = vgic_v2_map_resources(kvm); 448 else 449 ret = vgic_v3_map_resources(kvm); 450 451 if (ret) 452 __kvm_vgic_destroy(kvm); 453 else 454 dist->ready = true; 455 456 out: 457 mutex_unlock(&kvm->lock); 458 return ret; 459 } 460 461 /* GENERIC PROBE */ 462 463 static int vgic_init_cpu_starting(unsigned int cpu) 464 { 465 enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0); 466 return 0; 467 } 468 469 470 static int vgic_init_cpu_dying(unsigned int cpu) 471 { 472 disable_percpu_irq(kvm_vgic_global_state.maint_irq); 473 return 0; 474 } 475 476 static irqreturn_t vgic_maintenance_handler(int irq, void *data) 477 { 478 /* 479 * We cannot rely on the vgic maintenance interrupt to be 480 * delivered synchronously. This means we can only use it to 481 * exit the VM, and we perform the handling of EOIed 482 * interrupts on the exit path (see vgic_fold_lr_state). 483 */ 484 return IRQ_HANDLED; 485 } 486 487 static struct gic_kvm_info *gic_kvm_info; 488 489 void __init vgic_set_kvm_info(const struct gic_kvm_info *info) 490 { 491 BUG_ON(gic_kvm_info != NULL); 492 gic_kvm_info = kmalloc(sizeof(*info), GFP_KERNEL); 493 if (gic_kvm_info) 494 *gic_kvm_info = *info; 495 } 496 497 /** 498 * kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware 499 * 500 * For a specific CPU, initialize the GIC VE hardware. 501 */ 502 void kvm_vgic_init_cpu_hardware(void) 503 { 504 BUG_ON(preemptible()); 505 506 /* 507 * We want to make sure the list registers start out clear so that we 508 * only have the program the used registers. 509 */ 510 if (kvm_vgic_global_state.type == VGIC_V2) 511 vgic_v2_init_lrs(); 512 else 513 kvm_call_hyp(__vgic_v3_init_lrs); 514 } 515 516 /** 517 * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable 518 * according to the host GIC model. Accordingly calls either 519 * vgic_v2/v3_probe which registers the KVM_DEVICE that can be 520 * instantiated by a guest later on . 521 */ 522 int kvm_vgic_hyp_init(void) 523 { 524 bool has_mask; 525 int ret; 526 527 if (!gic_kvm_info) 528 return -ENODEV; 529 530 has_mask = !gic_kvm_info->no_maint_irq_mask; 531 532 if (has_mask && !gic_kvm_info->maint_irq) { 533 kvm_err("No vgic maintenance irq\n"); 534 return -ENXIO; 535 } 536 537 /* 538 * If we get one of these oddball non-GICs, taint the kernel, 539 * as we have no idea of how they *really* behave. 540 */ 541 if (gic_kvm_info->no_hw_deactivation) { 542 kvm_info("Non-architectural vgic, tainting kernel\n"); 543 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); 544 kvm_vgic_global_state.no_hw_deactivation = true; 545 } 546 547 switch (gic_kvm_info->type) { 548 case GIC_V2: 549 ret = vgic_v2_probe(gic_kvm_info); 550 break; 551 case GIC_V3: 552 ret = vgic_v3_probe(gic_kvm_info); 553 if (!ret) { 554 static_branch_enable(&kvm_vgic_global_state.gicv3_cpuif); 555 kvm_info("GIC system register CPU interface enabled\n"); 556 } 557 break; 558 default: 559 ret = -ENODEV; 560 } 561 562 kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq; 563 564 kfree(gic_kvm_info); 565 gic_kvm_info = NULL; 566 567 if (ret) 568 return ret; 569 570 if (!has_mask) 571 return 0; 572 573 ret = request_percpu_irq(kvm_vgic_global_state.maint_irq, 574 vgic_maintenance_handler, 575 "vgic", kvm_get_running_vcpus()); 576 if (ret) { 577 kvm_err("Cannot register interrupt %d\n", 578 kvm_vgic_global_state.maint_irq); 579 return ret; 580 } 581 582 ret = cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, 583 "kvm/arm/vgic:starting", 584 vgic_init_cpu_starting, vgic_init_cpu_dying); 585 if (ret) { 586 kvm_err("Cannot register vgic CPU notifier\n"); 587 goto out_free_irq; 588 } 589 590 kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq); 591 return 0; 592 593 out_free_irq: 594 free_percpu_irq(kvm_vgic_global_state.maint_irq, 595 kvm_get_running_vcpus()); 596 return ret; 597 } 598