1 /* 2 * ARM Generic Interrupt Controller using KVM in-kernel support 3 * 4 * Copyright (c) 2012 Linaro Limited 5 * Written by Peter Maydell 6 * Save/Restore logic added by Christoffer Dall. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation, either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License along 19 * with this program; if not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "qapi/error.h" 24 #include "qemu-common.h" 25 #include "cpu.h" 26 #include "hw/sysbus.h" 27 #include "migration/blocker.h" 28 #include "sysemu/kvm.h" 29 #include "kvm_arm.h" 30 #include "gic_internal.h" 31 #include "vgic_common.h" 32 33 #define TYPE_KVM_ARM_GIC "kvm-arm-gic" 34 #define KVM_ARM_GIC(obj) \ 35 OBJECT_CHECK(GICState, (obj), TYPE_KVM_ARM_GIC) 36 #define KVM_ARM_GIC_CLASS(klass) \ 37 OBJECT_CLASS_CHECK(KVMARMGICClass, (klass), TYPE_KVM_ARM_GIC) 38 #define KVM_ARM_GIC_GET_CLASS(obj) \ 39 OBJECT_GET_CLASS(KVMARMGICClass, (obj), TYPE_KVM_ARM_GIC) 40 41 typedef struct KVMARMGICClass { 42 ARMGICCommonClass parent_class; 43 DeviceRealize parent_realize; 44 void (*parent_reset)(DeviceState *dev); 45 } KVMARMGICClass; 46 47 void kvm_arm_gic_set_irq(uint32_t num_irq, int irq, int level) 48 { 49 /* Meaning of the 'irq' parameter: 50 * [0..N-1] : external interrupts 51 * [N..N+31] : PPI (internal) interrupts for CPU 0 52 * [N+32..N+63] : PPI (internal interrupts for CPU 1 53 * ... 54 * Convert this to the kernel's desired encoding, which 55 * has separate fields in the irq number for type, 56 * CPU number and interrupt number. 57 */ 58 int kvm_irq, irqtype, cpu; 59 60 if (irq < (num_irq - GIC_INTERNAL)) { 61 /* External interrupt. The kernel numbers these like the GIC 62 * hardware, with external interrupt IDs starting after the 63 * internal ones. 64 */ 65 irqtype = KVM_ARM_IRQ_TYPE_SPI; 66 cpu = 0; 67 irq += GIC_INTERNAL; 68 } else { 69 /* Internal interrupt: decode into (cpu, interrupt id) */ 70 irqtype = KVM_ARM_IRQ_TYPE_PPI; 71 irq -= (num_irq - GIC_INTERNAL); 72 cpu = irq / GIC_INTERNAL; 73 irq %= GIC_INTERNAL; 74 } 75 kvm_irq = (irqtype << KVM_ARM_IRQ_TYPE_SHIFT) 76 | (cpu << KVM_ARM_IRQ_VCPU_SHIFT) | irq; 77 78 kvm_set_irq(kvm_state, kvm_irq, !!level); 79 } 80 81 static void kvm_arm_gicv2_set_irq(void *opaque, int irq, int level) 82 { 83 GICState *s = (GICState *)opaque; 84 85 kvm_arm_gic_set_irq(s->num_irq, irq, level); 86 } 87 88 static bool kvm_arm_gic_can_save_restore(GICState *s) 89 { 90 return s->dev_fd >= 0; 91 } 92 93 #define KVM_VGIC_ATTR(offset, cpu) \ 94 ((((uint64_t)(cpu) << KVM_DEV_ARM_VGIC_CPUID_SHIFT) & \ 95 KVM_DEV_ARM_VGIC_CPUID_MASK) | \ 96 (((uint64_t)(offset) << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) & \ 97 KVM_DEV_ARM_VGIC_OFFSET_MASK)) 98 99 static void kvm_gicd_access(GICState *s, int offset, int cpu, 100 uint32_t *val, bool write) 101 { 102 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS, 103 KVM_VGIC_ATTR(offset, cpu), val, write, &error_abort); 104 } 105 106 static void kvm_gicc_access(GICState *s, int offset, int cpu, 107 uint32_t *val, bool write) 108 { 109 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS, 110 KVM_VGIC_ATTR(offset, cpu), val, write, &error_abort); 111 } 112 113 #define for_each_irq_reg(_ctr, _max_irq, _field_width) \ 114 for (_ctr = 0; _ctr < ((_max_irq) / (32 / (_field_width))); _ctr++) 115 116 /* 117 * Translate from the in-kernel field for an IRQ value to/from the qemu 118 * representation. 119 */ 120 typedef void (*vgic_translate_fn)(GICState *s, int irq, int cpu, 121 uint32_t *field, bool to_kernel); 122 123 /* synthetic translate function used for clear/set registers to completely 124 * clear a setting using a clear-register before setting the remaining bits 125 * using a set-register */ 126 static void translate_clear(GICState *s, int irq, int cpu, 127 uint32_t *field, bool to_kernel) 128 { 129 if (to_kernel) { 130 *field = ~0; 131 } else { 132 /* does not make sense: qemu model doesn't use set/clear regs */ 133 abort(); 134 } 135 } 136 137 static void translate_group(GICState *s, int irq, int cpu, 138 uint32_t *field, bool to_kernel) 139 { 140 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 141 142 if (to_kernel) { 143 *field = GIC_TEST_GROUP(irq, cm); 144 } else { 145 if (*field & 1) { 146 GIC_SET_GROUP(irq, cm); 147 } 148 } 149 } 150 151 static void translate_enabled(GICState *s, int irq, int cpu, 152 uint32_t *field, bool to_kernel) 153 { 154 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 155 156 if (to_kernel) { 157 *field = GIC_TEST_ENABLED(irq, cm); 158 } else { 159 if (*field & 1) { 160 GIC_SET_ENABLED(irq, cm); 161 } 162 } 163 } 164 165 static void translate_pending(GICState *s, int irq, int cpu, 166 uint32_t *field, bool to_kernel) 167 { 168 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 169 170 if (to_kernel) { 171 *field = gic_test_pending(s, irq, cm); 172 } else { 173 if (*field & 1) { 174 GIC_SET_PENDING(irq, cm); 175 /* TODO: Capture is level-line is held high in the kernel */ 176 } 177 } 178 } 179 180 static void translate_active(GICState *s, int irq, int cpu, 181 uint32_t *field, bool to_kernel) 182 { 183 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; 184 185 if (to_kernel) { 186 *field = GIC_TEST_ACTIVE(irq, cm); 187 } else { 188 if (*field & 1) { 189 GIC_SET_ACTIVE(irq, cm); 190 } 191 } 192 } 193 194 static void translate_trigger(GICState *s, int irq, int cpu, 195 uint32_t *field, bool to_kernel) 196 { 197 if (to_kernel) { 198 *field = (GIC_TEST_EDGE_TRIGGER(irq)) ? 0x2 : 0x0; 199 } else { 200 if (*field & 0x2) { 201 GIC_SET_EDGE_TRIGGER(irq); 202 } 203 } 204 } 205 206 static void translate_priority(GICState *s, int irq, int cpu, 207 uint32_t *field, bool to_kernel) 208 { 209 if (to_kernel) { 210 *field = GIC_GET_PRIORITY(irq, cpu) & 0xff; 211 } else { 212 gic_set_priority(s, cpu, irq, *field & 0xff, MEMTXATTRS_UNSPECIFIED); 213 } 214 } 215 216 static void translate_targets(GICState *s, int irq, int cpu, 217 uint32_t *field, bool to_kernel) 218 { 219 if (to_kernel) { 220 *field = s->irq_target[irq] & 0xff; 221 } else { 222 s->irq_target[irq] = *field & 0xff; 223 } 224 } 225 226 static void translate_sgisource(GICState *s, int irq, int cpu, 227 uint32_t *field, bool to_kernel) 228 { 229 if (to_kernel) { 230 *field = s->sgi_pending[irq][cpu] & 0xff; 231 } else { 232 s->sgi_pending[irq][cpu] = *field & 0xff; 233 } 234 } 235 236 /* Read a register group from the kernel VGIC */ 237 static void kvm_dist_get(GICState *s, uint32_t offset, int width, 238 int maxirq, vgic_translate_fn translate_fn) 239 { 240 uint32_t reg; 241 int i; 242 int j; 243 int irq; 244 int cpu; 245 int regsz = 32 / width; /* irqs per kernel register */ 246 uint32_t field; 247 248 for_each_irq_reg(i, maxirq, width) { 249 irq = i * regsz; 250 cpu = 0; 251 while ((cpu < s->num_cpu && irq < GIC_INTERNAL) || cpu == 0) { 252 kvm_gicd_access(s, offset, cpu, ®, false); 253 for (j = 0; j < regsz; j++) { 254 field = extract32(reg, j * width, width); 255 translate_fn(s, irq + j, cpu, &field, false); 256 } 257 258 cpu++; 259 } 260 offset += 4; 261 } 262 } 263 264 /* Write a register group to the kernel VGIC */ 265 static void kvm_dist_put(GICState *s, uint32_t offset, int width, 266 int maxirq, vgic_translate_fn translate_fn) 267 { 268 uint32_t reg; 269 int i; 270 int j; 271 int irq; 272 int cpu; 273 int regsz = 32 / width; /* irqs per kernel register */ 274 uint32_t field; 275 276 for_each_irq_reg(i, maxirq, width) { 277 irq = i * regsz; 278 cpu = 0; 279 while ((cpu < s->num_cpu && irq < GIC_INTERNAL) || cpu == 0) { 280 reg = 0; 281 for (j = 0; j < regsz; j++) { 282 translate_fn(s, irq + j, cpu, &field, true); 283 reg = deposit32(reg, j * width, width, field); 284 } 285 kvm_gicd_access(s, offset, cpu, ®, true); 286 287 cpu++; 288 } 289 offset += 4; 290 } 291 } 292 293 static void kvm_arm_gic_put(GICState *s) 294 { 295 uint32_t reg; 296 int i; 297 int cpu; 298 int num_cpu; 299 int num_irq; 300 301 /* Note: We do the restore in a slightly different order than the save 302 * (where the order doesn't matter and is simply ordered according to the 303 * register offset values */ 304 305 /***************************************************************** 306 * Distributor State 307 */ 308 309 /* s->ctlr -> GICD_CTLR */ 310 reg = s->ctlr; 311 kvm_gicd_access(s, 0x0, 0, ®, true); 312 313 /* Sanity checking on GICD_TYPER and s->num_irq, s->num_cpu */ 314 kvm_gicd_access(s, 0x4, 0, ®, false); 315 num_irq = ((reg & 0x1f) + 1) * 32; 316 num_cpu = ((reg & 0xe0) >> 5) + 1; 317 318 if (num_irq < s->num_irq) { 319 fprintf(stderr, "Restoring %u IRQs, but kernel supports max %d\n", 320 s->num_irq, num_irq); 321 abort(); 322 } else if (num_cpu != s->num_cpu) { 323 fprintf(stderr, "Restoring %u CPU interfaces, kernel only has %d\n", 324 s->num_cpu, num_cpu); 325 /* Did we not create the VCPUs in the kernel yet? */ 326 abort(); 327 } 328 329 /* TODO: Consider checking compatibility with the IIDR ? */ 330 331 /* irq_state[n].enabled -> GICD_ISENABLERn */ 332 kvm_dist_put(s, 0x180, 1, s->num_irq, translate_clear); 333 kvm_dist_put(s, 0x100, 1, s->num_irq, translate_enabled); 334 335 /* irq_state[n].group -> GICD_IGROUPRn */ 336 kvm_dist_put(s, 0x80, 1, s->num_irq, translate_group); 337 338 /* s->irq_target[irq] -> GICD_ITARGETSRn 339 * (restore targets before pending to ensure the pending state is set on 340 * the appropriate CPU interfaces in the kernel) */ 341 kvm_dist_put(s, 0x800, 8, s->num_irq, translate_targets); 342 343 /* irq_state[n].trigger -> GICD_ICFGRn 344 * (restore configuration registers before pending IRQs so we treat 345 * level/edge correctly) */ 346 kvm_dist_put(s, 0xc00, 2, s->num_irq, translate_trigger); 347 348 /* irq_state[n].pending + irq_state[n].level -> GICD_ISPENDRn */ 349 kvm_dist_put(s, 0x280, 1, s->num_irq, translate_clear); 350 kvm_dist_put(s, 0x200, 1, s->num_irq, translate_pending); 351 352 /* irq_state[n].active -> GICD_ISACTIVERn */ 353 kvm_dist_put(s, 0x380, 1, s->num_irq, translate_clear); 354 kvm_dist_put(s, 0x300, 1, s->num_irq, translate_active); 355 356 357 /* s->priorityX[irq] -> ICD_IPRIORITYRn */ 358 kvm_dist_put(s, 0x400, 8, s->num_irq, translate_priority); 359 360 /* s->sgi_pending -> ICD_CPENDSGIRn */ 361 kvm_dist_put(s, 0xf10, 8, GIC_NR_SGIS, translate_clear); 362 kvm_dist_put(s, 0xf20, 8, GIC_NR_SGIS, translate_sgisource); 363 364 365 /***************************************************************** 366 * CPU Interface(s) State 367 */ 368 369 for (cpu = 0; cpu < s->num_cpu; cpu++) { 370 /* s->cpu_ctlr[cpu] -> GICC_CTLR */ 371 reg = s->cpu_ctlr[cpu]; 372 kvm_gicc_access(s, 0x00, cpu, ®, true); 373 374 /* s->priority_mask[cpu] -> GICC_PMR */ 375 reg = (s->priority_mask[cpu] & 0xff); 376 kvm_gicc_access(s, 0x04, cpu, ®, true); 377 378 /* s->bpr[cpu] -> GICC_BPR */ 379 reg = (s->bpr[cpu] & 0x7); 380 kvm_gicc_access(s, 0x08, cpu, ®, true); 381 382 /* s->abpr[cpu] -> GICC_ABPR */ 383 reg = (s->abpr[cpu] & 0x7); 384 kvm_gicc_access(s, 0x1c, cpu, ®, true); 385 386 /* s->apr[n][cpu] -> GICC_APRn */ 387 for (i = 0; i < 4; i++) { 388 reg = s->apr[i][cpu]; 389 kvm_gicc_access(s, 0xd0 + i * 4, cpu, ®, true); 390 } 391 } 392 } 393 394 static void kvm_arm_gic_get(GICState *s) 395 { 396 uint32_t reg; 397 int i; 398 int cpu; 399 400 /***************************************************************** 401 * Distributor State 402 */ 403 404 /* GICD_CTLR -> s->ctlr */ 405 kvm_gicd_access(s, 0x0, 0, ®, false); 406 s->ctlr = reg; 407 408 /* Sanity checking on GICD_TYPER -> s->num_irq, s->num_cpu */ 409 kvm_gicd_access(s, 0x4, 0, ®, false); 410 s->num_irq = ((reg & 0x1f) + 1) * 32; 411 s->num_cpu = ((reg & 0xe0) >> 5) + 1; 412 413 if (s->num_irq > GIC_MAXIRQ) { 414 fprintf(stderr, "Too many IRQs reported from the kernel: %d\n", 415 s->num_irq); 416 abort(); 417 } 418 419 /* GICD_IIDR -> ? */ 420 kvm_gicd_access(s, 0x8, 0, ®, false); 421 422 /* Clear all the IRQ settings */ 423 for (i = 0; i < s->num_irq; i++) { 424 memset(&s->irq_state[i], 0, sizeof(s->irq_state[0])); 425 } 426 427 /* GICD_IGROUPRn -> irq_state[n].group */ 428 kvm_dist_get(s, 0x80, 1, s->num_irq, translate_group); 429 430 /* GICD_ISENABLERn -> irq_state[n].enabled */ 431 kvm_dist_get(s, 0x100, 1, s->num_irq, translate_enabled); 432 433 /* GICD_ISPENDRn -> irq_state[n].pending + irq_state[n].level */ 434 kvm_dist_get(s, 0x200, 1, s->num_irq, translate_pending); 435 436 /* GICD_ISACTIVERn -> irq_state[n].active */ 437 kvm_dist_get(s, 0x300, 1, s->num_irq, translate_active); 438 439 /* GICD_ICFRn -> irq_state[n].trigger */ 440 kvm_dist_get(s, 0xc00, 2, s->num_irq, translate_trigger); 441 442 /* GICD_IPRIORITYRn -> s->priorityX[irq] */ 443 kvm_dist_get(s, 0x400, 8, s->num_irq, translate_priority); 444 445 /* GICD_ITARGETSRn -> s->irq_target[irq] */ 446 kvm_dist_get(s, 0x800, 8, s->num_irq, translate_targets); 447 448 /* GICD_CPENDSGIRn -> s->sgi_pending */ 449 kvm_dist_get(s, 0xf10, 8, GIC_NR_SGIS, translate_sgisource); 450 451 452 /***************************************************************** 453 * CPU Interface(s) State 454 */ 455 456 for (cpu = 0; cpu < s->num_cpu; cpu++) { 457 /* GICC_CTLR -> s->cpu_ctlr[cpu] */ 458 kvm_gicc_access(s, 0x00, cpu, ®, false); 459 s->cpu_ctlr[cpu] = reg; 460 461 /* GICC_PMR -> s->priority_mask[cpu] */ 462 kvm_gicc_access(s, 0x04, cpu, ®, false); 463 s->priority_mask[cpu] = (reg & 0xff); 464 465 /* GICC_BPR -> s->bpr[cpu] */ 466 kvm_gicc_access(s, 0x08, cpu, ®, false); 467 s->bpr[cpu] = (reg & 0x7); 468 469 /* GICC_ABPR -> s->abpr[cpu] */ 470 kvm_gicc_access(s, 0x1c, cpu, ®, false); 471 s->abpr[cpu] = (reg & 0x7); 472 473 /* GICC_APRn -> s->apr[n][cpu] */ 474 for (i = 0; i < 4; i++) { 475 kvm_gicc_access(s, 0xd0 + i * 4, cpu, ®, false); 476 s->apr[i][cpu] = reg; 477 } 478 } 479 } 480 481 static void kvm_arm_gic_reset(DeviceState *dev) 482 { 483 GICState *s = ARM_GIC_COMMON(dev); 484 KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s); 485 486 kgc->parent_reset(dev); 487 488 if (kvm_arm_gic_can_save_restore(s)) { 489 kvm_arm_gic_put(s); 490 } 491 } 492 493 static void kvm_arm_gic_realize(DeviceState *dev, Error **errp) 494 { 495 int i; 496 GICState *s = KVM_ARM_GIC(dev); 497 KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s); 498 Error *local_err = NULL; 499 int ret; 500 501 kgc->parent_realize(dev, &local_err); 502 if (local_err) { 503 error_propagate(errp, local_err); 504 return; 505 } 506 507 if (s->security_extn) { 508 error_setg(errp, "the in-kernel VGIC does not implement the " 509 "security extensions"); 510 return; 511 } 512 513 if (!kvm_arm_gic_can_save_restore(s)) { 514 error_setg(&s->migration_blocker, "This operating system kernel does " 515 "not support vGICv2 migration"); 516 migrate_add_blocker(s->migration_blocker, &local_err); 517 if (local_err) { 518 error_propagate(errp, local_err); 519 error_free(s->migration_blocker); 520 return; 521 } 522 } 523 524 gic_init_irqs_and_mmio(s, kvm_arm_gicv2_set_irq, NULL); 525 526 for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) { 527 qemu_irq irq = qdev_get_gpio_in(dev, i); 528 kvm_irqchip_set_qemuirq_gsi(kvm_state, irq, i); 529 } 530 531 /* Try to create the device via the device control API */ 532 s->dev_fd = -1; 533 ret = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_V2, false); 534 if (ret >= 0) { 535 s->dev_fd = ret; 536 537 /* Newstyle API is used, we may have attributes */ 538 if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0)) { 539 uint32_t numirqs = s->num_irq; 540 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0, 541 &numirqs, true, &error_abort); 542 } 543 /* Tell the kernel to complete VGIC initialization now */ 544 if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, 545 KVM_DEV_ARM_VGIC_CTRL_INIT)) { 546 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, 547 KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true, 548 &error_abort); 549 } 550 } else if (ret != -ENODEV && ret != -ENOTSUP) { 551 error_setg_errno(errp, -ret, "error creating in-kernel VGIC"); 552 return; 553 } 554 555 /* Distributor */ 556 kvm_arm_register_device(&s->iomem, 557 (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT) 558 | KVM_VGIC_V2_ADDR_TYPE_DIST, 559 KVM_DEV_ARM_VGIC_GRP_ADDR, 560 KVM_VGIC_V2_ADDR_TYPE_DIST, 561 s->dev_fd); 562 /* CPU interface for current core. Unlike arm_gic, we don't 563 * provide the "interface for core #N" memory regions, because 564 * cores with a VGIC don't have those. 565 */ 566 kvm_arm_register_device(&s->cpuiomem[0], 567 (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT) 568 | KVM_VGIC_V2_ADDR_TYPE_CPU, 569 KVM_DEV_ARM_VGIC_GRP_ADDR, 570 KVM_VGIC_V2_ADDR_TYPE_CPU, 571 s->dev_fd); 572 573 if (kvm_has_gsi_routing()) { 574 /* set up irq routing */ 575 kvm_init_irq_routing(kvm_state); 576 for (i = 0; i < s->num_irq - GIC_INTERNAL; ++i) { 577 kvm_irqchip_add_irq_route(kvm_state, i, 0, i); 578 } 579 580 kvm_gsi_routing_allowed = true; 581 582 kvm_irqchip_commit_routes(kvm_state); 583 } 584 } 585 586 static void kvm_arm_gic_class_init(ObjectClass *klass, void *data) 587 { 588 DeviceClass *dc = DEVICE_CLASS(klass); 589 ARMGICCommonClass *agcc = ARM_GIC_COMMON_CLASS(klass); 590 KVMARMGICClass *kgc = KVM_ARM_GIC_CLASS(klass); 591 592 agcc->pre_save = kvm_arm_gic_get; 593 agcc->post_load = kvm_arm_gic_put; 594 device_class_set_parent_realize(dc, kvm_arm_gic_realize, 595 &kgc->parent_realize); 596 device_class_set_parent_reset(dc, kvm_arm_gic_reset, &kgc->parent_reset); 597 } 598 599 static const TypeInfo kvm_arm_gic_info = { 600 .name = TYPE_KVM_ARM_GIC, 601 .parent = TYPE_ARM_GIC_COMMON, 602 .instance_size = sizeof(GICState), 603 .class_init = kvm_arm_gic_class_init, 604 .class_size = sizeof(KVMARMGICClass), 605 }; 606 607 static void kvm_arm_gic_register_types(void) 608 { 609 type_register_static(&kvm_arm_gic_info); 610 } 611 612 type_init(kvm_arm_gic_register_types) 613