1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015, 2016 ARM Ltd. 4 */ 5 6 #include <linux/uaccess.h> 7 #include <linux/interrupt.h> 8 #include <linux/cpu.h> 9 #include <linux/kvm_host.h> 10 #include <kvm/arm_vgic.h> 11 #include <asm/kvm_emulate.h> 12 #include <asm/kvm_mmu.h> 13 #include "vgic.h" 14 15 /* 16 * Initialization rules: there are multiple stages to the vgic 17 * initialization, both for the distributor and the CPU interfaces. The basic 18 * idea is that even though the VGIC is not functional or not requested from 19 * user space, the critical path of the run loop can still call VGIC functions 20 * that just won't do anything, without them having to check additional 21 * initialization flags to ensure they don't look at uninitialized data 22 * structures. 23 * 24 * Distributor: 25 * 26 * - kvm_vgic_early_init(): initialization of static data that doesn't 27 * depend on any sizing information or emulation type. No allocation 28 * is allowed there. 29 * 30 * - vgic_init(): allocation and initialization of the generic data 31 * structures that depend on sizing information (number of CPUs, 32 * number of interrupts). Also initializes the vcpu specific data 33 * structures. Can be executed lazily for GICv2. 34 * 35 * CPU Interface: 36 * 37 * - kvm_vgic_vcpu_init(): initialization of static data that 38 * doesn't depend on any sizing information or emulation type. No 39 * allocation is allowed there. 40 */ 41 42 /* EARLY INIT */ 43 44 /** 45 * kvm_vgic_early_init() - Initialize static VGIC VCPU data structures 46 * @kvm: The VM whose VGIC districutor should be initialized 47 * 48 * Only do initialization of static structures that don't require any 49 * allocation or sizing information from userspace. vgic_init() called 50 * kvm_vgic_dist_init() which takes care of the rest. 51 */ 52 void kvm_vgic_early_init(struct kvm *kvm) 53 { 54 struct vgic_dist *dist = &kvm->arch.vgic; 55 56 INIT_LIST_HEAD(&dist->lpi_list_head); 57 INIT_LIST_HEAD(&dist->lpi_translation_cache); 58 raw_spin_lock_init(&dist->lpi_list_lock); 59 } 60 61 /* CREATION */ 62 63 /** 64 * kvm_vgic_create: triggered by the instantiation of the VGIC device by 65 * user space, either through the legacy KVM_CREATE_IRQCHIP ioctl (v2 only) 66 * or through the generic KVM_CREATE_DEVICE API ioctl. 67 * irqchip_in_kernel() tells you if this function succeeded or not. 68 * @kvm: kvm struct pointer 69 * @type: KVM_DEV_TYPE_ARM_VGIC_V[23] 70 */ 71 int kvm_vgic_create(struct kvm *kvm, u32 type) 72 { 73 int i, ret; 74 struct kvm_vcpu *vcpu; 75 76 if (irqchip_in_kernel(kvm)) 77 return -EEXIST; 78 79 /* 80 * This function is also called by the KVM_CREATE_IRQCHIP handler, 81 * which had no chance yet to check the availability of the GICv2 82 * emulation. So check this here again. KVM_CREATE_DEVICE does 83 * the proper checks already. 84 */ 85 if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && 86 !kvm_vgic_global_state.can_emulate_gicv2) 87 return -ENODEV; 88 89 ret = -EBUSY; 90 if (!lock_all_vcpus(kvm)) 91 return ret; 92 93 kvm_for_each_vcpu(i, vcpu, kvm) { 94 if (vcpu->arch.has_run_once) 95 goto out_unlock; 96 } 97 ret = 0; 98 99 if (type == KVM_DEV_TYPE_ARM_VGIC_V2) 100 kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS; 101 else 102 kvm->arch.max_vcpus = VGIC_V3_MAX_CPUS; 103 104 if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus) { 105 ret = -E2BIG; 106 goto out_unlock; 107 } 108 109 kvm->arch.vgic.in_kernel = true; 110 kvm->arch.vgic.vgic_model = type; 111 112 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; 113 114 if (type == KVM_DEV_TYPE_ARM_VGIC_V2) 115 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; 116 else 117 INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions); 118 119 out_unlock: 120 unlock_all_vcpus(kvm); 121 return ret; 122 } 123 124 /* INIT/DESTROY */ 125 126 /** 127 * kvm_vgic_dist_init: initialize the dist data structures 128 * @kvm: kvm struct pointer 129 * @nr_spis: number of spis, frozen by caller 130 */ 131 static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) 132 { 133 struct vgic_dist *dist = &kvm->arch.vgic; 134 struct kvm_vcpu *vcpu0 = kvm_get_vcpu(kvm, 0); 135 int i; 136 137 dist->spis = kcalloc(nr_spis, sizeof(struct vgic_irq), GFP_KERNEL_ACCOUNT); 138 if (!dist->spis) 139 return -ENOMEM; 140 141 /* 142 * In the following code we do not take the irq struct lock since 143 * no other action on irq structs can happen while the VGIC is 144 * not initialized yet: 145 * If someone wants to inject an interrupt or does a MMIO access, we 146 * require prior initialization in case of a virtual GICv3 or trigger 147 * initialization when using a virtual GICv2. 148 */ 149 for (i = 0; i < nr_spis; i++) { 150 struct vgic_irq *irq = &dist->spis[i]; 151 152 irq->intid = i + VGIC_NR_PRIVATE_IRQS; 153 INIT_LIST_HEAD(&irq->ap_list); 154 raw_spin_lock_init(&irq->irq_lock); 155 irq->vcpu = NULL; 156 irq->target_vcpu = vcpu0; 157 kref_init(&irq->refcount); 158 switch (dist->vgic_model) { 159 case KVM_DEV_TYPE_ARM_VGIC_V2: 160 irq->targets = 0; 161 irq->group = 0; 162 break; 163 case KVM_DEV_TYPE_ARM_VGIC_V3: 164 irq->mpidr = 0; 165 irq->group = 1; 166 break; 167 default: 168 kfree(dist->spis); 169 dist->spis = NULL; 170 return -EINVAL; 171 } 172 } 173 return 0; 174 } 175 176 /** 177 * kvm_vgic_vcpu_init() - Initialize static VGIC VCPU data 178 * structures and register VCPU-specific KVM iodevs 179 * 180 * @vcpu: pointer to the VCPU being created and initialized 181 * 182 * Only do initialization, but do not actually enable the 183 * VGIC CPU interface 184 */ 185 int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) 186 { 187 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 188 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 189 int ret = 0; 190 int i; 191 192 vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; 193 194 INIT_LIST_HEAD(&vgic_cpu->ap_list_head); 195 raw_spin_lock_init(&vgic_cpu->ap_list_lock); 196 atomic_set(&vgic_cpu->vgic_v3.its_vpe.vlpi_count, 0); 197 198 /* 199 * Enable and configure all SGIs to be edge-triggered and 200 * configure all PPIs as level-triggered. 201 */ 202 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { 203 struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; 204 205 INIT_LIST_HEAD(&irq->ap_list); 206 raw_spin_lock_init(&irq->irq_lock); 207 irq->intid = i; 208 irq->vcpu = NULL; 209 irq->target_vcpu = vcpu; 210 kref_init(&irq->refcount); 211 if (vgic_irq_is_sgi(i)) { 212 /* SGIs */ 213 irq->enabled = 1; 214 irq->config = VGIC_CONFIG_EDGE; 215 } else { 216 /* PPIs */ 217 irq->config = VGIC_CONFIG_LEVEL; 218 } 219 } 220 221 if (!irqchip_in_kernel(vcpu->kvm)) 222 return 0; 223 224 /* 225 * If we are creating a VCPU with a GICv3 we must also register the 226 * KVM io device for the redistributor that belongs to this VCPU. 227 */ 228 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { 229 mutex_lock(&vcpu->kvm->lock); 230 ret = vgic_register_redist_iodev(vcpu); 231 mutex_unlock(&vcpu->kvm->lock); 232 } 233 return ret; 234 } 235 236 static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu) 237 { 238 if (kvm_vgic_global_state.type == VGIC_V2) 239 vgic_v2_enable(vcpu); 240 else 241 vgic_v3_enable(vcpu); 242 } 243 244 /* 245 * vgic_init: allocates and initializes dist and vcpu data structures 246 * depending on two dimensioning parameters: 247 * - the number of spis 248 * - the number of vcpus 249 * The function is generally called when nr_spis has been explicitly set 250 * by the guest through the KVM DEVICE API. If not nr_spis is set to 256. 251 * vgic_initialized() returns true when this function has succeeded. 252 * Must be called with kvm->lock held! 253 */ 254 int vgic_init(struct kvm *kvm) 255 { 256 struct vgic_dist *dist = &kvm->arch.vgic; 257 struct kvm_vcpu *vcpu; 258 int ret = 0, i, idx; 259 260 if (vgic_initialized(kvm)) 261 return 0; 262 263 /* Are we also in the middle of creating a VCPU? */ 264 if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus)) 265 return -EBUSY; 266 267 /* freeze the number of spis */ 268 if (!dist->nr_spis) 269 dist->nr_spis = VGIC_NR_IRQS_LEGACY - VGIC_NR_PRIVATE_IRQS; 270 271 ret = kvm_vgic_dist_init(kvm, dist->nr_spis); 272 if (ret) 273 goto out; 274 275 /* Initialize groups on CPUs created before the VGIC type was known */ 276 kvm_for_each_vcpu(idx, vcpu, kvm) { 277 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 278 279 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { 280 struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; 281 switch (dist->vgic_model) { 282 case KVM_DEV_TYPE_ARM_VGIC_V3: 283 irq->group = 1; 284 irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu); 285 break; 286 case KVM_DEV_TYPE_ARM_VGIC_V2: 287 irq->group = 0; 288 irq->targets = 1U << idx; 289 break; 290 default: 291 ret = -EINVAL; 292 goto out; 293 } 294 } 295 } 296 297 if (vgic_has_its(kvm)) 298 vgic_lpi_translation_cache_init(kvm); 299 300 /* 301 * If we have GICv4.1 enabled, unconditionnaly request enable the 302 * v4 support so that we get HW-accelerated vSGIs. Otherwise, only 303 * enable it if we present a virtual ITS to the guest. 304 */ 305 if (vgic_supports_direct_msis(kvm)) { 306 ret = vgic_v4_init(kvm); 307 if (ret) 308 goto out; 309 } 310 311 kvm_for_each_vcpu(i, vcpu, kvm) 312 kvm_vgic_vcpu_enable(vcpu); 313 314 ret = kvm_vgic_setup_default_irq_routing(kvm); 315 if (ret) 316 goto out; 317 318 vgic_debug_init(kvm); 319 320 dist->implementation_rev = 2; 321 dist->initialized = true; 322 323 out: 324 return ret; 325 } 326 327 static void kvm_vgic_dist_destroy(struct kvm *kvm) 328 { 329 struct vgic_dist *dist = &kvm->arch.vgic; 330 struct vgic_redist_region *rdreg, *next; 331 332 dist->ready = false; 333 dist->initialized = false; 334 335 kfree(dist->spis); 336 dist->spis = NULL; 337 dist->nr_spis = 0; 338 dist->vgic_dist_base = VGIC_ADDR_UNDEF; 339 340 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { 341 list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list) 342 vgic_v3_free_redist_region(rdreg); 343 INIT_LIST_HEAD(&dist->rd_regions); 344 } else { 345 dist->vgic_cpu_base = VGIC_ADDR_UNDEF; 346 } 347 348 if (vgic_has_its(kvm)) 349 vgic_lpi_translation_cache_destroy(kvm); 350 351 if (vgic_supports_direct_msis(kvm)) 352 vgic_v4_teardown(kvm); 353 } 354 355 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) 356 { 357 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 358 359 /* 360 * Retire all pending LPIs on this vcpu anyway as we're 361 * going to destroy it. 362 */ 363 vgic_flush_pending_lpis(vcpu); 364 365 INIT_LIST_HEAD(&vgic_cpu->ap_list_head); 366 vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; 367 } 368 369 /* To be called with kvm->lock held */ 370 static void __kvm_vgic_destroy(struct kvm *kvm) 371 { 372 struct kvm_vcpu *vcpu; 373 int i; 374 375 vgic_debug_destroy(kvm); 376 377 kvm_for_each_vcpu(i, vcpu, kvm) 378 kvm_vgic_vcpu_destroy(vcpu); 379 380 kvm_vgic_dist_destroy(kvm); 381 } 382 383 void kvm_vgic_destroy(struct kvm *kvm) 384 { 385 mutex_lock(&kvm->lock); 386 __kvm_vgic_destroy(kvm); 387 mutex_unlock(&kvm->lock); 388 } 389 390 /** 391 * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest 392 * is a GICv2. A GICv3 must be explicitly initialized by the guest using the 393 * KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group. 394 * @kvm: kvm struct pointer 395 */ 396 int vgic_lazy_init(struct kvm *kvm) 397 { 398 int ret = 0; 399 400 if (unlikely(!vgic_initialized(kvm))) { 401 /* 402 * We only provide the automatic initialization of the VGIC 403 * for the legacy case of a GICv2. Any other type must 404 * be explicitly initialized once setup with the respective 405 * KVM device call. 406 */ 407 if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2) 408 return -EBUSY; 409 410 mutex_lock(&kvm->lock); 411 ret = vgic_init(kvm); 412 mutex_unlock(&kvm->lock); 413 } 414 415 return ret; 416 } 417 418 /* RESOURCE MAPPING */ 419 420 /** 421 * Map the MMIO regions depending on the VGIC model exposed to the guest 422 * called on the first VCPU run. 423 * Also map the virtual CPU interface into the VM. 424 * v2 calls vgic_init() if not already done. 425 * v3 and derivatives return an error if the VGIC is not initialized. 426 * vgic_ready() returns true if this function has succeeded. 427 * @kvm: kvm struct pointer 428 */ 429 int kvm_vgic_map_resources(struct kvm *kvm) 430 { 431 struct vgic_dist *dist = &kvm->arch.vgic; 432 int ret = 0; 433 434 if (likely(vgic_ready(kvm))) 435 return 0; 436 437 mutex_lock(&kvm->lock); 438 if (vgic_ready(kvm)) 439 goto out; 440 441 if (!irqchip_in_kernel(kvm)) 442 goto out; 443 444 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) 445 ret = vgic_v2_map_resources(kvm); 446 else 447 ret = vgic_v3_map_resources(kvm); 448 449 if (ret) 450 __kvm_vgic_destroy(kvm); 451 else 452 dist->ready = true; 453 454 out: 455 mutex_unlock(&kvm->lock); 456 return ret; 457 } 458 459 /* GENERIC PROBE */ 460 461 static int vgic_init_cpu_starting(unsigned int cpu) 462 { 463 enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0); 464 return 0; 465 } 466 467 468 static int vgic_init_cpu_dying(unsigned int cpu) 469 { 470 disable_percpu_irq(kvm_vgic_global_state.maint_irq); 471 return 0; 472 } 473 474 static irqreturn_t vgic_maintenance_handler(int irq, void *data) 475 { 476 /* 477 * We cannot rely on the vgic maintenance interrupt to be 478 * delivered synchronously. This means we can only use it to 479 * exit the VM, and we perform the handling of EOIed 480 * interrupts on the exit path (see vgic_fold_lr_state). 481 */ 482 return IRQ_HANDLED; 483 } 484 485 static struct gic_kvm_info *gic_kvm_info; 486 487 void __init vgic_set_kvm_info(const struct gic_kvm_info *info) 488 { 489 BUG_ON(gic_kvm_info != NULL); 490 gic_kvm_info = kmalloc(sizeof(*info), GFP_KERNEL); 491 if (gic_kvm_info) 492 *gic_kvm_info = *info; 493 } 494 495 /** 496 * kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware 497 * 498 * For a specific CPU, initialize the GIC VE hardware. 499 */ 500 void kvm_vgic_init_cpu_hardware(void) 501 { 502 BUG_ON(preemptible()); 503 504 /* 505 * We want to make sure the list registers start out clear so that we 506 * only have the program the used registers. 507 */ 508 if (kvm_vgic_global_state.type == VGIC_V2) 509 vgic_v2_init_lrs(); 510 else 511 kvm_call_hyp(__vgic_v3_init_lrs); 512 } 513 514 /** 515 * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable 516 * according to the host GIC model. Accordingly calls either 517 * vgic_v2/v3_probe which registers the KVM_DEVICE that can be 518 * instantiated by a guest later on . 519 */ 520 int kvm_vgic_hyp_init(void) 521 { 522 bool has_mask; 523 int ret; 524 525 if (!gic_kvm_info) 526 return -ENODEV; 527 528 has_mask = !gic_kvm_info->no_maint_irq_mask; 529 530 if (has_mask && !gic_kvm_info->maint_irq) { 531 kvm_err("No vgic maintenance irq\n"); 532 return -ENXIO; 533 } 534 535 /* 536 * If we get one of these oddball non-GICs, taint the kernel, 537 * as we have no idea of how they *really* behave. 538 */ 539 if (gic_kvm_info->no_hw_deactivation) { 540 kvm_info("Non-architectural vgic, tainting kernel\n"); 541 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); 542 kvm_vgic_global_state.no_hw_deactivation = true; 543 } 544 545 switch (gic_kvm_info->type) { 546 case GIC_V2: 547 ret = vgic_v2_probe(gic_kvm_info); 548 break; 549 case GIC_V3: 550 ret = vgic_v3_probe(gic_kvm_info); 551 if (!ret) { 552 static_branch_enable(&kvm_vgic_global_state.gicv3_cpuif); 553 kvm_info("GIC system register CPU interface enabled\n"); 554 } 555 break; 556 default: 557 ret = -ENODEV; 558 } 559 560 kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq; 561 562 kfree(gic_kvm_info); 563 gic_kvm_info = NULL; 564 565 if (ret) 566 return ret; 567 568 if (!has_mask) 569 return 0; 570 571 ret = request_percpu_irq(kvm_vgic_global_state.maint_irq, 572 vgic_maintenance_handler, 573 "vgic", kvm_get_running_vcpus()); 574 if (ret) { 575 kvm_err("Cannot register interrupt %d\n", 576 kvm_vgic_global_state.maint_irq); 577 return ret; 578 } 579 580 ret = cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, 581 "kvm/arm/vgic:starting", 582 vgic_init_cpu_starting, vgic_init_cpu_dying); 583 if (ret) { 584 kvm_err("Cannot register vgic CPU notifier\n"); 585 goto out_free_irq; 586 } 587 588 kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq); 589 return 0; 590 591 out_free_irq: 592 free_percpu_irq(kvm_vgic_global_state.maint_irq, 593 kvm_get_running_vcpus()); 594 return ret; 595 } 596