1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VGICv2 MMIO handling functions 4 */ 5 6 #include <linux/irqchip/arm-gic.h> 7 #include <linux/kvm.h> 8 #include <linux/kvm_host.h> 9 #include <linux/nospec.h> 10 11 #include <kvm/iodev.h> 12 #include <kvm/arm_vgic.h> 13 14 #include "vgic.h" 15 #include "vgic-mmio.h" 16 17 /* 18 * The Revision field in the IIDR have the following meanings: 19 * 20 * Revision 1: Report GICv2 interrupts as group 0 instead of group 1 21 * Revision 2: Interrupt groups are guest-configurable and signaled using 22 * their configured groups. 23 */ 24 25 static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu, 26 gpa_t addr, unsigned int len) 27 { 28 struct vgic_dist *vgic = &vcpu->kvm->arch.vgic; 29 u32 value; 30 31 switch (addr & 0x0c) { 32 case GIC_DIST_CTRL: 33 value = vgic->enabled ? GICD_ENABLE : 0; 34 break; 35 case GIC_DIST_CTR: 36 value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS; 37 value = (value >> 5) - 1; 38 value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; 39 break; 40 case GIC_DIST_IIDR: 41 value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) | 42 (vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) | 43 (IMPLEMENTER_ARM << GICD_IIDR_IMPLEMENTER_SHIFT); 44 break; 45 default: 46 return 0; 47 } 48 49 return value; 50 } 51 52 static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu, 53 gpa_t addr, unsigned int len, 54 unsigned long val) 55 { 56 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 57 bool was_enabled = dist->enabled; 58 59 switch (addr & 0x0c) { 60 case GIC_DIST_CTRL: 61 dist->enabled = val & GICD_ENABLE; 62 if (!was_enabled && dist->enabled) 63 vgic_kick_vcpus(vcpu->kvm); 64 break; 65 case GIC_DIST_CTR: 66 case GIC_DIST_IIDR: 67 /* Nothing to do */ 68 return; 69 } 70 } 71 72 static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu, 73 gpa_t addr, unsigned int len, 74 unsigned long val) 75 { 76 switch (addr & 0x0c) { 77 case GIC_DIST_IIDR: 78 if (val != vgic_mmio_read_v2_misc(vcpu, addr, len)) 79 return -EINVAL; 80 81 /* 82 * If we observe a write to GICD_IIDR we know that userspace 83 * has been updated and has had a chance to cope with older 84 * kernels (VGICv2 IIDR.Revision == 0) incorrectly reporting 85 * interrupts as group 1, and therefore we now allow groups to 86 * be user writable. Doing this by default would break 87 * migration from old kernels to new kernels with legacy 88 * userspace. 89 */ 90 vcpu->kvm->arch.vgic.v2_groups_user_writable = true; 91 return 0; 92 } 93 94 vgic_mmio_write_v2_misc(vcpu, addr, len, val); 95 return 0; 96 } 97 98 static int vgic_mmio_uaccess_write_v2_group(struct kvm_vcpu *vcpu, 99 gpa_t addr, unsigned int len, 100 unsigned long val) 101 { 102 if (vcpu->kvm->arch.vgic.v2_groups_user_writable) 103 vgic_mmio_write_group(vcpu, addr, len, val); 104 105 return 0; 106 } 107 108 static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu, 109 gpa_t addr, unsigned int len, 110 unsigned long val) 111 { 112 int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus); 113 int intid = val & 0xf; 114 int targets = (val >> 16) & 0xff; 115 int mode = (val >> 24) & 0x03; 116 struct kvm_vcpu *vcpu; 117 unsigned long flags, c; 118 119 switch (mode) { 120 case 0x0: /* as specified by targets */ 121 break; 122 case 0x1: 123 targets = (1U << nr_vcpus) - 1; /* all, ... */ 124 targets &= ~(1U << source_vcpu->vcpu_id); /* but self */ 125 break; 126 case 0x2: /* this very vCPU only */ 127 targets = (1U << source_vcpu->vcpu_id); 128 break; 129 case 0x3: /* reserved */ 130 return; 131 } 132 133 kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) { 134 struct vgic_irq *irq; 135 136 if (!(targets & (1U << c))) 137 continue; 138 139 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); 140 141 raw_spin_lock_irqsave(&irq->irq_lock, flags); 142 irq->pending_latch = true; 143 irq->source |= 1U << source_vcpu->vcpu_id; 144 145 vgic_queue_irq_unlock(source_vcpu->kvm, irq, flags); 146 vgic_put_irq(source_vcpu->kvm, irq); 147 } 148 } 149 150 static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu, 151 gpa_t addr, unsigned int len) 152 { 153 u32 intid = VGIC_ADDR_TO_INTID(addr, 8); 154 int i; 155 u64 val = 0; 156 157 for (i = 0; i < len; i++) { 158 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 159 160 val |= (u64)irq->targets << (i * 8); 161 162 vgic_put_irq(vcpu->kvm, irq); 163 } 164 165 return val; 166 } 167 168 static void vgic_mmio_write_target(struct kvm_vcpu *vcpu, 169 gpa_t addr, unsigned int len, 170 unsigned long val) 171 { 172 u32 intid = VGIC_ADDR_TO_INTID(addr, 8); 173 u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0); 174 int i; 175 unsigned long flags; 176 177 /* GICD_ITARGETSR[0-7] are read-only */ 178 if (intid < VGIC_NR_PRIVATE_IRQS) 179 return; 180 181 for (i = 0; i < len; i++) { 182 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i); 183 int target; 184 185 raw_spin_lock_irqsave(&irq->irq_lock, flags); 186 187 irq->targets = (val >> (i * 8)) & cpu_mask; 188 target = irq->targets ? __ffs(irq->targets) : 0; 189 irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); 190 191 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 192 vgic_put_irq(vcpu->kvm, irq); 193 } 194 } 195 196 static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu, 197 gpa_t addr, unsigned int len) 198 { 199 u32 intid = addr & 0x0f; 200 int i; 201 u64 val = 0; 202 203 for (i = 0; i < len; i++) { 204 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 205 206 val |= (u64)irq->source << (i * 8); 207 208 vgic_put_irq(vcpu->kvm, irq); 209 } 210 return val; 211 } 212 213 static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu, 214 gpa_t addr, unsigned int len, 215 unsigned long val) 216 { 217 u32 intid = addr & 0x0f; 218 int i; 219 unsigned long flags; 220 221 for (i = 0; i < len; i++) { 222 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 223 224 raw_spin_lock_irqsave(&irq->irq_lock, flags); 225 226 irq->source &= ~((val >> (i * 8)) & 0xff); 227 if (!irq->source) 228 irq->pending_latch = false; 229 230 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 231 vgic_put_irq(vcpu->kvm, irq); 232 } 233 } 234 235 static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu, 236 gpa_t addr, unsigned int len, 237 unsigned long val) 238 { 239 u32 intid = addr & 0x0f; 240 int i; 241 unsigned long flags; 242 243 for (i = 0; i < len; i++) { 244 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 245 246 raw_spin_lock_irqsave(&irq->irq_lock, flags); 247 248 irq->source |= (val >> (i * 8)) & 0xff; 249 250 if (irq->source) { 251 irq->pending_latch = true; 252 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 253 } else { 254 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 255 } 256 vgic_put_irq(vcpu->kvm, irq); 257 } 258 } 259 260 #define GICC_ARCH_VERSION_V2 0x2 261 262 /* These are for userland accesses only, there is no guest-facing emulation. */ 263 static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu, 264 gpa_t addr, unsigned int len) 265 { 266 struct vgic_vmcr vmcr; 267 u32 val; 268 269 vgic_get_vmcr(vcpu, &vmcr); 270 271 switch (addr & 0xff) { 272 case GIC_CPU_CTRL: 273 val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT; 274 val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT; 275 val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT; 276 val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT; 277 val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT; 278 val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT; 279 280 break; 281 case GIC_CPU_PRIMASK: 282 /* 283 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the 284 * PMR field as GICH_VMCR.VMPriMask rather than 285 * GICC_PMR.Priority, so we expose the upper five bits of 286 * priority mask to userspace using the lower bits in the 287 * unsigned long. 288 */ 289 val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >> 290 GICV_PMR_PRIORITY_SHIFT; 291 break; 292 case GIC_CPU_BINPOINT: 293 val = vmcr.bpr; 294 break; 295 case GIC_CPU_ALIAS_BINPOINT: 296 val = vmcr.abpr; 297 break; 298 case GIC_CPU_IDENT: 299 val = ((PRODUCT_ID_KVM << 20) | 300 (GICC_ARCH_VERSION_V2 << 16) | 301 IMPLEMENTER_ARM); 302 break; 303 default: 304 return 0; 305 } 306 307 return val; 308 } 309 310 static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu, 311 gpa_t addr, unsigned int len, 312 unsigned long val) 313 { 314 struct vgic_vmcr vmcr; 315 316 vgic_get_vmcr(vcpu, &vmcr); 317 318 switch (addr & 0xff) { 319 case GIC_CPU_CTRL: 320 vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0); 321 vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1); 322 vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl); 323 vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn); 324 vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR); 325 vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS); 326 327 break; 328 case GIC_CPU_PRIMASK: 329 /* 330 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the 331 * PMR field as GICH_VMCR.VMPriMask rather than 332 * GICC_PMR.Priority, so we expose the upper five bits of 333 * priority mask to userspace using the lower bits in the 334 * unsigned long. 335 */ 336 vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) & 337 GICV_PMR_PRIORITY_MASK; 338 break; 339 case GIC_CPU_BINPOINT: 340 vmcr.bpr = val; 341 break; 342 case GIC_CPU_ALIAS_BINPOINT: 343 vmcr.abpr = val; 344 break; 345 } 346 347 vgic_set_vmcr(vcpu, &vmcr); 348 } 349 350 static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu, 351 gpa_t addr, unsigned int len) 352 { 353 int n; /* which APRn is this */ 354 355 n = (addr >> 2) & 0x3; 356 357 if (kvm_vgic_global_state.type == VGIC_V2) { 358 /* GICv2 hardware systems support max. 32 groups */ 359 if (n != 0) 360 return 0; 361 return vcpu->arch.vgic_cpu.vgic_v2.vgic_apr; 362 } else { 363 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3; 364 365 if (n > vgic_v3_max_apr_idx(vcpu)) 366 return 0; 367 368 n = array_index_nospec(n, 4); 369 370 /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */ 371 return vgicv3->vgic_ap1r[n]; 372 } 373 } 374 375 static void vgic_mmio_write_apr(struct kvm_vcpu *vcpu, 376 gpa_t addr, unsigned int len, 377 unsigned long val) 378 { 379 int n; /* which APRn is this */ 380 381 n = (addr >> 2) & 0x3; 382 383 if (kvm_vgic_global_state.type == VGIC_V2) { 384 /* GICv2 hardware systems support max. 32 groups */ 385 if (n != 0) 386 return; 387 vcpu->arch.vgic_cpu.vgic_v2.vgic_apr = val; 388 } else { 389 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3; 390 391 if (n > vgic_v3_max_apr_idx(vcpu)) 392 return; 393 394 n = array_index_nospec(n, 4); 395 396 /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */ 397 vgicv3->vgic_ap1r[n] = val; 398 } 399 } 400 401 static const struct vgic_register_region vgic_v2_dist_registers[] = { 402 REGISTER_DESC_WITH_LENGTH_UACCESS(GIC_DIST_CTRL, 403 vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 404 NULL, vgic_mmio_uaccess_write_v2_misc, 405 12, VGIC_ACCESS_32bit), 406 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP, 407 vgic_mmio_read_group, vgic_mmio_write_group, 408 NULL, vgic_mmio_uaccess_write_v2_group, 1, 409 VGIC_ACCESS_32bit), 410 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET, 411 vgic_mmio_read_enable, vgic_mmio_write_senable, 412 NULL, vgic_uaccess_write_senable, 1, 413 VGIC_ACCESS_32bit), 414 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR, 415 vgic_mmio_read_enable, vgic_mmio_write_cenable, 416 NULL, vgic_uaccess_write_cenable, 1, 417 VGIC_ACCESS_32bit), 418 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET, 419 vgic_mmio_read_pending, vgic_mmio_write_spending, 420 NULL, vgic_uaccess_write_spending, 1, 421 VGIC_ACCESS_32bit), 422 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR, 423 vgic_mmio_read_pending, vgic_mmio_write_cpending, 424 NULL, vgic_uaccess_write_cpending, 1, 425 VGIC_ACCESS_32bit), 426 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET, 427 vgic_mmio_read_active, vgic_mmio_write_sactive, 428 vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1, 429 VGIC_ACCESS_32bit), 430 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR, 431 vgic_mmio_read_active, vgic_mmio_write_cactive, 432 vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 1, 433 VGIC_ACCESS_32bit), 434 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI, 435 vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL, 436 8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), 437 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET, 438 vgic_mmio_read_target, vgic_mmio_write_target, NULL, NULL, 8, 439 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), 440 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG, 441 vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2, 442 VGIC_ACCESS_32bit), 443 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT, 444 vgic_mmio_read_raz, vgic_mmio_write_sgir, 4, 445 VGIC_ACCESS_32bit), 446 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR, 447 vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16, 448 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), 449 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET, 450 vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16, 451 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), 452 }; 453 454 static const struct vgic_register_region vgic_v2_cpu_registers[] = { 455 REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL, 456 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4, 457 VGIC_ACCESS_32bit), 458 REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK, 459 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4, 460 VGIC_ACCESS_32bit), 461 REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT, 462 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4, 463 VGIC_ACCESS_32bit), 464 REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT, 465 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4, 466 VGIC_ACCESS_32bit), 467 REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO, 468 vgic_mmio_read_apr, vgic_mmio_write_apr, 16, 469 VGIC_ACCESS_32bit), 470 REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT, 471 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4, 472 VGIC_ACCESS_32bit), 473 }; 474 475 unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev) 476 { 477 dev->regions = vgic_v2_dist_registers; 478 dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers); 479 480 kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops); 481 482 return SZ_4K; 483 } 484 485 int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr) 486 { 487 const struct vgic_register_region *region; 488 struct vgic_io_device iodev; 489 struct vgic_reg_attr reg_attr; 490 struct kvm_vcpu *vcpu; 491 gpa_t addr; 492 int ret; 493 494 ret = vgic_v2_parse_attr(dev, attr, ®_attr); 495 if (ret) 496 return ret; 497 498 vcpu = reg_attr.vcpu; 499 addr = reg_attr.addr; 500 501 switch (attr->group) { 502 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: 503 iodev.regions = vgic_v2_dist_registers; 504 iodev.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers); 505 iodev.base_addr = 0; 506 break; 507 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: 508 iodev.regions = vgic_v2_cpu_registers; 509 iodev.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers); 510 iodev.base_addr = 0; 511 break; 512 default: 513 return -ENXIO; 514 } 515 516 /* We only support aligned 32-bit accesses. */ 517 if (addr & 3) 518 return -ENXIO; 519 520 region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32)); 521 if (!region) 522 return -ENXIO; 523 524 return 0; 525 } 526 527 int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write, 528 int offset, u32 *val) 529 { 530 struct vgic_io_device dev = { 531 .regions = vgic_v2_cpu_registers, 532 .nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers), 533 .iodev_type = IODEV_CPUIF, 534 }; 535 536 return vgic_uaccess(vcpu, &dev, is_write, offset, val); 537 } 538 539 int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, 540 int offset, u32 *val) 541 { 542 struct vgic_io_device dev = { 543 .regions = vgic_v2_dist_registers, 544 .nr_regions = ARRAY_SIZE(vgic_v2_dist_registers), 545 .iodev_type = IODEV_DIST, 546 }; 547 548 return vgic_uaccess(vcpu, &dev, is_write, offset, val); 549 } 550