1 /* 2 * ARM Generic Interrupt Controller using KVM in-kernel support 3 * 4 * Copyright (c) 2012 Linaro Limited 5 * Written by Peter Maydell 6 * Save/Restore logic added by Christoffer Dall. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation, either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License along 19 * with this program; if not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "qapi/error.h" 24 #include "qemu-common.h" 25 #include "cpu.h" 26 #include "hw/sysbus.h" 27 #include "migration/migration.h" 28 #include "sysemu/kvm.h" 29 #include "kvm_arm.h" 30 #include "gic_internal.h" 31 #include "vgic_common.h" 32 33 //#define DEBUG_GIC_KVM 34 35 #ifdef DEBUG_GIC_KVM 36 static const int debug_gic_kvm = 1; 37 #else 38 static const int debug_gic_kvm = 0; 39 #endif 40 41 #define DPRINTF(fmt, ...) do { \ 42 if (debug_gic_kvm) { \ 43 printf("arm_gic: " fmt , ## __VA_ARGS__); \ 44 } \ 45 } while (0) 46 47 #define TYPE_KVM_ARM_GIC "kvm-arm-gic" 48 #define KVM_ARM_GIC(obj) \ 49 OBJECT_CHECK(GICState, (obj), TYPE_KVM_ARM_GIC) 50 #define KVM_ARM_GIC_CLASS(klass) \ 51 OBJECT_CLASS_CHECK(KVMARMGICClass, (klass), TYPE_KVM_ARM_GIC) 52 #define KVM_ARM_GIC_GET_CLASS(obj) \ 53 OBJECT_GET_CLASS(KVMARMGICClass, (obj), TYPE_KVM_ARM_GIC) 54 55 typedef struct KVMARMGICClass { 56 ARMGICCommonClass parent_class; 57 DeviceRealize parent_realize; 58 void (*parent_reset)(DeviceState *dev); 59 } KVMARMGICClass; 60 61 void kvm_arm_gic_set_irq(uint32_t num_irq, int irq, int level) 62 { 63 /* Meaning of the 'irq' parameter: 64 * [0..N-1] : external interrupts 65 * [N..N+31] : PPI (internal) interrupts for CPU 0 66 * [N+32..N+63] : PPI (internal interrupts for CPU 1 67 * ... 68 * Convert this to the kernel's desired encoding, which 69 * has separate fields in the irq number for type, 70 * CPU number and interrupt number. 71 */ 72 int kvm_irq, irqtype, cpu; 73 74 if (irq < (num_irq - GIC_INTERNAL)) { 75 /* External interrupt. The kernel numbers these like the GIC 76 * hardware, with external interrupt IDs starting after the 77 * internal ones. 78 */ 79 irqtype = KVM_ARM_IRQ_TYPE_SPI; 80 cpu = 0; 81 irq += GIC_INTERNAL; 82 } else { 83 /* Internal interrupt: decode into (cpu, interrupt id) */ 84 irqtype = KVM_ARM_IRQ_TYPE_PPI; 85 irq -= (num_irq - GIC_INTERNAL); 86 cpu = irq / GIC_INTERNAL; 87 irq %= GIC_INTERNAL; 88 } 89 kvm_irq = (irqtype << KVM_ARM_IRQ_TYPE_SHIFT) 90 | (cpu << KVM_ARM_IRQ_VCPU_SHIFT) | irq; 91 92 kvm_set_irq(kvm_state, kvm_irq, !!level); 93 } 94 95 static void kvm_arm_gicv2_set_irq(void *opaque, int irq, int level) 96 { 97 GICState *s = (GICState *)opaque; 98 99 kvm_arm_gic_set_irq(s->num_irq, irq, level); 100 } 101 102 static bool kvm_arm_gic_can_save_restore(GICState *s) 103 { 104 return s->dev_fd >= 0; 105 } 106 107 #define KVM_VGIC_ATTR(offset, cpu) \ 108 ((((uint64_t)(cpu) << KVM_DEV_ARM_VGIC_CPUID_SHIFT) & \ 109 KVM_DEV_ARM_VGIC_CPUID_MASK) | \ 110 (((uint64_t)(offset) << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) & \ 111 KVM_DEV_ARM_VGIC_OFFSET_MASK)) 112 113 static void kvm_gicd_access(GICState *s, int offset, int cpu, 114 uint32_t *val, bool write) 115 { 116 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS, 117 KVM_VGIC_ATTR(offset, cpu), val, write); 118 } 119 120 static void kvm_gicc_access(GICState *s, int offset, int cpu, 121 uint32_t *val, bool write) 122 { 123 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS, 124 KVM_VGIC_ATTR(offset, cpu), val, write); 125 } 126 127 #define for_each_irq_reg(_ctr, _max_irq, _field_width) \ 128 for (_ctr = 0; _ctr < ((_max_irq) / (32 / (_field_width))); _ctr++) 129 130 /* 131 * Translate from the in-kernel field for an IRQ value to/from the qemu 132 * representation. 133 */ 134 typedef void (*vgic_translate_fn)(GICState *s, int irq, int cpu, 135 uint32_t *field, bool to_kernel); 136 137 /* synthetic translate function used for clear/set registers to completely 138 * clear a setting using a clear-register before setting the remaining bits 139 * using a set-register */ 140 static void translate_clear(GICState *s, int irq, int cpu, 141 uint32_t *field, bool to_kernel) 142 { 143 if (to_kernel) { 144 *field = ~0; 145 } else { 146 /* does not make sense: qemu model doesn't use set/clear regs */ 147 abort(); 148 } 149 } 150 151 static void translate_group(GICState *s, int irq, int cpu, 152 uint32_t *field, bool to_kernel) 153 { 154 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 155 156 if (to_kernel) { 157 *field = GIC_TEST_GROUP(irq, cm); 158 } else { 159 if (*field & 1) { 160 GIC_SET_GROUP(irq, cm); 161 } 162 } 163 } 164 165 static void translate_enabled(GICState *s, int irq, int cpu, 166 uint32_t *field, bool to_kernel) 167 { 168 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 169 170 if (to_kernel) { 171 *field = GIC_TEST_ENABLED(irq, cm); 172 } else { 173 if (*field & 1) { 174 GIC_SET_ENABLED(irq, cm); 175 } 176 } 177 } 178 179 static void translate_pending(GICState *s, int irq, int cpu, 180 uint32_t *field, bool to_kernel) 181 { 182 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 183 184 if (to_kernel) { 185 *field = gic_test_pending(s, irq, cm); 186 } else { 187 if (*field & 1) { 188 GIC_SET_PENDING(irq, cm); 189 /* TODO: Capture is level-line is held high in the kernel */ 190 } 191 } 192 } 193 194 static void translate_active(GICState *s, int irq, int cpu, 195 uint32_t *field, bool to_kernel) 196 { 197 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 198 199 if (to_kernel) { 200 *field = GIC_TEST_ACTIVE(irq, cm); 201 } else { 202 if (*field & 1) { 203 GIC_SET_ACTIVE(irq, cm); 204 } 205 } 206 } 207 208 static void translate_trigger(GICState *s, int irq, int cpu, 209 uint32_t *field, bool to_kernel) 210 { 211 if (to_kernel) { 212 *field = (GIC_TEST_EDGE_TRIGGER(irq)) ? 0x2 : 0x0; 213 } else { 214 if (*field & 0x2) { 215 GIC_SET_EDGE_TRIGGER(irq); 216 } 217 } 218 } 219 220 static void translate_priority(GICState *s, int irq, int cpu, 221 uint32_t *field, bool to_kernel) 222 { 223 if (to_kernel) { 224 *field = GIC_GET_PRIORITY(irq, cpu) & 0xff; 225 } else { 226 gic_set_priority(s, cpu, irq, *field & 0xff, MEMTXATTRS_UNSPECIFIED); 227 } 228 } 229 230 static void translate_targets(GICState *s, int irq, int cpu, 231 uint32_t *field, bool to_kernel) 232 { 233 if (to_kernel) { 234 *field = s->irq_target[irq] & 0xff; 235 } else { 236 s->irq_target[irq] = *field & 0xff; 237 } 238 } 239 240 static void translate_sgisource(GICState *s, int irq, int cpu, 241 uint32_t *field, bool to_kernel) 242 { 243 if (to_kernel) { 244 *field = s->sgi_pending[irq][cpu] & 0xff; 245 } else { 246 s->sgi_pending[irq][cpu] = *field & 0xff; 247 } 248 } 249 250 /* Read a register group from the kernel VGIC */ 251 static void kvm_dist_get(GICState *s, uint32_t offset, int width, 252 int maxirq, vgic_translate_fn translate_fn) 253 { 254 uint32_t reg; 255 int i; 256 int j; 257 int irq; 258 int cpu; 259 int regsz = 32 / width; /* irqs per kernel register */ 260 uint32_t field; 261 262 for_each_irq_reg(i, maxirq, width) { 263 irq = i * regsz; 264 cpu = 0; 265 while ((cpu < s->num_cpu && irq < GIC_INTERNAL) || cpu == 0) { 266 kvm_gicd_access(s, offset, cpu, ®, false); 267 for (j = 0; j < regsz; j++) { 268 field = extract32(reg, j * width, width); 269 translate_fn(s, irq + j, cpu, &field, false); 270 } 271 272 cpu++; 273 } 274 offset += 4; 275 } 276 } 277 278 /* Write a register group to the kernel VGIC */ 279 static void kvm_dist_put(GICState *s, uint32_t offset, int width, 280 int maxirq, vgic_translate_fn translate_fn) 281 { 282 uint32_t reg; 283 int i; 284 int j; 285 int irq; 286 int cpu; 287 int regsz = 32 / width; /* irqs per kernel register */ 288 uint32_t field; 289 290 for_each_irq_reg(i, maxirq, width) { 291 irq = i * regsz; 292 cpu = 0; 293 while ((cpu < s->num_cpu && irq < GIC_INTERNAL) || cpu == 0) { 294 reg = 0; 295 for (j = 0; j < regsz; j++) { 296 translate_fn(s, irq + j, cpu, &field, true); 297 reg = deposit32(reg, j * width, width, field); 298 } 299 kvm_gicd_access(s, offset, cpu, ®, true); 300 301 cpu++; 302 } 303 offset += 4; 304 } 305 } 306 307 static void kvm_arm_gic_put(GICState *s) 308 { 309 uint32_t reg; 310 int i; 311 int cpu; 312 int num_cpu; 313 int num_irq; 314 315 /* Note: We do the restore in a slightly different order than the save 316 * (where the order doesn't matter and is simply ordered according to the 317 * register offset values */ 318 319 /***************************************************************** 320 * Distributor State 321 */ 322 323 /* s->ctlr -> GICD_CTLR */ 324 reg = s->ctlr; 325 kvm_gicd_access(s, 0x0, 0, ®, true); 326 327 /* Sanity checking on GICD_TYPER and s->num_irq, s->num_cpu */ 328 kvm_gicd_access(s, 0x4, 0, ®, false); 329 num_irq = ((reg & 0x1f) + 1) * 32; 330 num_cpu = ((reg & 0xe0) >> 5) + 1; 331 332 if (num_irq < s->num_irq) { 333 fprintf(stderr, "Restoring %u IRQs, but kernel supports max %d\n", 334 s->num_irq, num_irq); 335 abort(); 336 } else if (num_cpu != s->num_cpu) { 337 fprintf(stderr, "Restoring %u CPU interfaces, kernel only has %d\n", 338 s->num_cpu, num_cpu); 339 /* Did we not create the VCPUs in the kernel yet? */ 340 abort(); 341 } 342 343 /* TODO: Consider checking compatibility with the IIDR ? */ 344 345 /* irq_state[n].enabled -> GICD_ISENABLERn */ 346 kvm_dist_put(s, 0x180, 1, s->num_irq, translate_clear); 347 kvm_dist_put(s, 0x100, 1, s->num_irq, translate_enabled); 348 349 /* irq_state[n].group -> GICD_IGROUPRn */ 350 kvm_dist_put(s, 0x80, 1, s->num_irq, translate_group); 351 352 /* s->irq_target[irq] -> GICD_ITARGETSRn 353 * (restore targets before pending to ensure the pending state is set on 354 * the appropriate CPU interfaces in the kernel) */ 355 kvm_dist_put(s, 0x800, 8, s->num_irq, translate_targets); 356 357 /* irq_state[n].trigger -> GICD_ICFGRn 358 * (restore configuration registers before pending IRQs so we treat 359 * level/edge correctly) */ 360 kvm_dist_put(s, 0xc00, 2, s->num_irq, translate_trigger); 361 362 /* irq_state[n].pending + irq_state[n].level -> GICD_ISPENDRn */ 363 kvm_dist_put(s, 0x280, 1, s->num_irq, translate_clear); 364 kvm_dist_put(s, 0x200, 1, s->num_irq, translate_pending); 365 366 /* irq_state[n].active -> GICD_ISACTIVERn */ 367 kvm_dist_put(s, 0x380, 1, s->num_irq, translate_clear); 368 kvm_dist_put(s, 0x300, 1, s->num_irq, translate_active); 369 370 371 /* s->priorityX[irq] -> ICD_IPRIORITYRn */ 372 kvm_dist_put(s, 0x400, 8, s->num_irq, translate_priority); 373 374 /* s->sgi_pending -> ICD_CPENDSGIRn */ 375 kvm_dist_put(s, 0xf10, 8, GIC_NR_SGIS, translate_clear); 376 kvm_dist_put(s, 0xf20, 8, GIC_NR_SGIS, translate_sgisource); 377 378 379 /***************************************************************** 380 * CPU Interface(s) State 381 */ 382 383 for (cpu = 0; cpu < s->num_cpu; cpu++) { 384 /* s->cpu_ctlr[cpu] -> GICC_CTLR */ 385 reg = s->cpu_ctlr[cpu]; 386 kvm_gicc_access(s, 0x00, cpu, ®, true); 387 388 /* s->priority_mask[cpu] -> GICC_PMR */ 389 reg = (s->priority_mask[cpu] & 0xff); 390 kvm_gicc_access(s, 0x04, cpu, ®, true); 391 392 /* s->bpr[cpu] -> GICC_BPR */ 393 reg = (s->bpr[cpu] & 0x7); 394 kvm_gicc_access(s, 0x08, cpu, ®, true); 395 396 /* s->abpr[cpu] -> GICC_ABPR */ 397 reg = (s->abpr[cpu] & 0x7); 398 kvm_gicc_access(s, 0x1c, cpu, ®, true); 399 400 /* s->apr[n][cpu] -> GICC_APRn */ 401 for (i = 0; i < 4; i++) { 402 reg = s->apr[i][cpu]; 403 kvm_gicc_access(s, 0xd0 + i * 4, cpu, ®, true); 404 } 405 } 406 } 407 408 static void kvm_arm_gic_get(GICState *s) 409 { 410 uint32_t reg; 411 int i; 412 int cpu; 413 414 /***************************************************************** 415 * Distributor State 416 */ 417 418 /* GICD_CTLR -> s->ctlr */ 419 kvm_gicd_access(s, 0x0, 0, ®, false); 420 s->ctlr = reg; 421 422 /* Sanity checking on GICD_TYPER -> s->num_irq, s->num_cpu */ 423 kvm_gicd_access(s, 0x4, 0, ®, false); 424 s->num_irq = ((reg & 0x1f) + 1) * 32; 425 s->num_cpu = ((reg & 0xe0) >> 5) + 1; 426 427 if (s->num_irq > GIC_MAXIRQ) { 428 fprintf(stderr, "Too many IRQs reported from the kernel: %d\n", 429 s->num_irq); 430 abort(); 431 } 432 433 /* GICD_IIDR -> ? */ 434 kvm_gicd_access(s, 0x8, 0, ®, false); 435 436 /* Clear all the IRQ settings */ 437 for (i = 0; i < s->num_irq; i++) { 438 memset(&s->irq_state[i], 0, sizeof(s->irq_state[0])); 439 } 440 441 /* GICD_IGROUPRn -> irq_state[n].group */ 442 kvm_dist_get(s, 0x80, 1, s->num_irq, translate_group); 443 444 /* GICD_ISENABLERn -> irq_state[n].enabled */ 445 kvm_dist_get(s, 0x100, 1, s->num_irq, translate_enabled); 446 447 /* GICD_ISPENDRn -> irq_state[n].pending + irq_state[n].level */ 448 kvm_dist_get(s, 0x200, 1, s->num_irq, translate_pending); 449 450 /* GICD_ISACTIVERn -> irq_state[n].active */ 451 kvm_dist_get(s, 0x300, 1, s->num_irq, translate_active); 452 453 /* GICD_ICFRn -> irq_state[n].trigger */ 454 kvm_dist_get(s, 0xc00, 2, s->num_irq, translate_trigger); 455 456 /* GICD_IPRIORITYRn -> s->priorityX[irq] */ 457 kvm_dist_get(s, 0x400, 8, s->num_irq, translate_priority); 458 459 /* GICD_ITARGETSRn -> s->irq_target[irq] */ 460 kvm_dist_get(s, 0x800, 8, s->num_irq, translate_targets); 461 462 /* GICD_CPENDSGIRn -> s->sgi_pending */ 463 kvm_dist_get(s, 0xf10, 8, GIC_NR_SGIS, translate_sgisource); 464 465 466 /***************************************************************** 467 * CPU Interface(s) State 468 */ 469 470 for (cpu = 0; cpu < s->num_cpu; cpu++) { 471 /* GICC_CTLR -> s->cpu_ctlr[cpu] */ 472 kvm_gicc_access(s, 0x00, cpu, ®, false); 473 s->cpu_ctlr[cpu] = reg; 474 475 /* GICC_PMR -> s->priority_mask[cpu] */ 476 kvm_gicc_access(s, 0x04, cpu, ®, false); 477 s->priority_mask[cpu] = (reg & 0xff); 478 479 /* GICC_BPR -> s->bpr[cpu] */ 480 kvm_gicc_access(s, 0x08, cpu, ®, false); 481 s->bpr[cpu] = (reg & 0x7); 482 483 /* GICC_ABPR -> s->abpr[cpu] */ 484 kvm_gicc_access(s, 0x1c, cpu, ®, false); 485 s->abpr[cpu] = (reg & 0x7); 486 487 /* GICC_APRn -> s->apr[n][cpu] */ 488 for (i = 0; i < 4; i++) { 489 kvm_gicc_access(s, 0xd0 + i * 4, cpu, ®, false); 490 s->apr[i][cpu] = reg; 491 } 492 } 493 } 494 495 static void kvm_arm_gic_reset(DeviceState *dev) 496 { 497 GICState *s = ARM_GIC_COMMON(dev); 498 KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s); 499 500 kgc->parent_reset(dev); 501 502 if (kvm_arm_gic_can_save_restore(s)) { 503 kvm_arm_gic_put(s); 504 } 505 } 506 507 static void kvm_arm_gic_realize(DeviceState *dev, Error **errp) 508 { 509 int i; 510 GICState *s = KVM_ARM_GIC(dev); 511 KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s); 512 Error *local_err = NULL; 513 int ret; 514 515 kgc->parent_realize(dev, &local_err); 516 if (local_err) { 517 error_propagate(errp, local_err); 518 return; 519 } 520 521 if (s->security_extn) { 522 error_setg(errp, "the in-kernel VGIC does not implement the " 523 "security extensions"); 524 return; 525 } 526 527 gic_init_irqs_and_mmio(s, kvm_arm_gicv2_set_irq, NULL); 528 529 for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) { 530 qemu_irq irq = qdev_get_gpio_in(dev, i); 531 kvm_irqchip_set_qemuirq_gsi(kvm_state, irq, i); 532 } 533 534 /* Try to create the device via the device control API */ 535 s->dev_fd = -1; 536 ret = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_V2, false); 537 if (ret >= 0) { 538 s->dev_fd = ret; 539 540 /* Newstyle API is used, we may have attributes */ 541 if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0)) { 542 uint32_t numirqs = s->num_irq; 543 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0, 544 &numirqs, true); 545 } 546 /* Tell the kernel to complete VGIC initialization now */ 547 if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, 548 KVM_DEV_ARM_VGIC_CTRL_INIT)) { 549 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, 550 KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true); 551 } 552 } else if (ret != -ENODEV && ret != -ENOTSUP) { 553 error_setg_errno(errp, -ret, "error creating in-kernel VGIC"); 554 return; 555 } 556 557 /* Distributor */ 558 kvm_arm_register_device(&s->iomem, 559 (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT) 560 | KVM_VGIC_V2_ADDR_TYPE_DIST, 561 KVM_DEV_ARM_VGIC_GRP_ADDR, 562 KVM_VGIC_V2_ADDR_TYPE_DIST, 563 s->dev_fd); 564 /* CPU interface for current core. Unlike arm_gic, we don't 565 * provide the "interface for core #N" memory regions, because 566 * cores with a VGIC don't have those. 567 */ 568 kvm_arm_register_device(&s->cpuiomem[0], 569 (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT) 570 | KVM_VGIC_V2_ADDR_TYPE_CPU, 571 KVM_DEV_ARM_VGIC_GRP_ADDR, 572 KVM_VGIC_V2_ADDR_TYPE_CPU, 573 s->dev_fd); 574 575 if (!kvm_arm_gic_can_save_restore(s)) { 576 error_setg(&s->migration_blocker, "This operating system kernel does " 577 "not support vGICv2 migration"); 578 migrate_add_blocker(s->migration_blocker); 579 } 580 } 581 582 static void kvm_arm_gic_class_init(ObjectClass *klass, void *data) 583 { 584 DeviceClass *dc = DEVICE_CLASS(klass); 585 ARMGICCommonClass *agcc = ARM_GIC_COMMON_CLASS(klass); 586 KVMARMGICClass *kgc = KVM_ARM_GIC_CLASS(klass); 587 588 agcc->pre_save = kvm_arm_gic_get; 589 agcc->post_load = kvm_arm_gic_put; 590 kgc->parent_realize = dc->realize; 591 kgc->parent_reset = dc->reset; 592 dc->realize = kvm_arm_gic_realize; 593 dc->reset = kvm_arm_gic_reset; 594 } 595 596 static const TypeInfo kvm_arm_gic_info = { 597 .name = TYPE_KVM_ARM_GIC, 598 .parent = TYPE_ARM_GIC_COMMON, 599 .instance_size = sizeof(GICState), 600 .class_init = kvm_arm_gic_class_init, 601 .class_size = sizeof(KVMARMGICClass), 602 }; 603 604 static void kvm_arm_gic_register_types(void) 605 { 606 type_register_static(&kvm_arm_gic_info); 607 } 608 609 type_init(kvm_arm_gic_register_types) 610