xref: /openbmc/qemu/hw/intc/arm_gic_kvm.c (revision ad80e367)
1 /*
2  * ARM Generic Interrupt Controller using KVM in-kernel support
3  *
4  * Copyright (c) 2012 Linaro Limited
5  * Written by Peter Maydell
6  * Save/Restore logic added by Christoffer Dall.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation, either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include "qemu/osdep.h"
23 #include "qapi/error.h"
24 #include "qemu/module.h"
25 #include "migration/blocker.h"
26 #include "sysemu/kvm.h"
27 #include "kvm_arm.h"
28 #include "gic_internal.h"
29 #include "vgic_common.h"
30 #include "qom/object.h"
31 
32 #define TYPE_KVM_ARM_GIC "kvm-arm-gic"
33 typedef struct KVMARMGICClass KVMARMGICClass;
34 /* This is reusing the GICState typedef from ARM_GIC_COMMON */
35 DECLARE_OBJ_CHECKERS(GICState, KVMARMGICClass,
36                      KVM_ARM_GIC, TYPE_KVM_ARM_GIC)
37 
38 struct KVMARMGICClass {
39     ARMGICCommonClass parent_class;
40     DeviceRealize parent_realize;
41     ResettablePhases parent_phases;
42 };
43 
kvm_arm_gic_set_irq(uint32_t num_irq,int irq,int level)44 void kvm_arm_gic_set_irq(uint32_t num_irq, int irq, int level)
45 {
46     /* Meaning of the 'irq' parameter:
47      *  [0..N-1] : external interrupts
48      *  [N..N+31] : PPI (internal) interrupts for CPU 0
49      *  [N+32..N+63] : PPI (internal interrupts for CPU 1
50      *  ...
51      * Convert this to the kernel's desired encoding, which
52      * has separate fields in the irq number for type,
53      * CPU number and interrupt number.
54      */
55     int irqtype, cpu;
56 
57     if (irq < (num_irq - GIC_INTERNAL)) {
58         /* External interrupt. The kernel numbers these like the GIC
59          * hardware, with external interrupt IDs starting after the
60          * internal ones.
61          */
62         irqtype = KVM_ARM_IRQ_TYPE_SPI;
63         cpu = 0;
64         irq += GIC_INTERNAL;
65     } else {
66         /* Internal interrupt: decode into (cpu, interrupt id) */
67         irqtype = KVM_ARM_IRQ_TYPE_PPI;
68         irq -= (num_irq - GIC_INTERNAL);
69         cpu = irq / GIC_INTERNAL;
70         irq %= GIC_INTERNAL;
71     }
72     kvm_arm_set_irq(cpu, irqtype, irq, !!level);
73 }
74 
kvm_arm_gicv2_set_irq(void * opaque,int irq,int level)75 static void kvm_arm_gicv2_set_irq(void *opaque, int irq, int level)
76 {
77     GICState *s = (GICState *)opaque;
78 
79     kvm_arm_gic_set_irq(s->num_irq, irq, level);
80 }
81 
kvm_arm_gic_can_save_restore(GICState * s)82 static bool kvm_arm_gic_can_save_restore(GICState *s)
83 {
84     return s->dev_fd >= 0;
85 }
86 
87 #define KVM_VGIC_ATTR(offset, cpu) \
88     ((((uint64_t)(cpu) << KVM_DEV_ARM_VGIC_CPUID_SHIFT) & \
89       KVM_DEV_ARM_VGIC_CPUID_MASK) | \
90      (((uint64_t)(offset) << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) & \
91       KVM_DEV_ARM_VGIC_OFFSET_MASK))
92 
kvm_gicd_access(GICState * s,int offset,int cpu,uint32_t * val,bool write)93 static void kvm_gicd_access(GICState *s, int offset, int cpu,
94                             uint32_t *val, bool write)
95 {
96     kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS,
97                       KVM_VGIC_ATTR(offset, cpu), val, write, &error_abort);
98 }
99 
kvm_gicc_access(GICState * s,int offset,int cpu,uint32_t * val,bool write)100 static void kvm_gicc_access(GICState *s, int offset, int cpu,
101                             uint32_t *val, bool write)
102 {
103     kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS,
104                       KVM_VGIC_ATTR(offset, cpu), val, write, &error_abort);
105 }
106 
107 #define for_each_irq_reg(_ctr, _max_irq, _field_width) \
108     for (_ctr = 0; _ctr < ((_max_irq) / (32 / (_field_width))); _ctr++)
109 
110 /*
111  * Translate from the in-kernel field for an IRQ value to/from the qemu
112  * representation.
113  */
114 typedef void (*vgic_translate_fn)(GICState *s, int irq, int cpu,
115                                   uint32_t *field, bool to_kernel);
116 
117 /* synthetic translate function used for clear/set registers to completely
118  * clear a setting using a clear-register before setting the remaining bits
119  * using a set-register */
translate_clear(GICState * s,int irq,int cpu,uint32_t * field,bool to_kernel)120 static void translate_clear(GICState *s, int irq, int cpu,
121                             uint32_t *field, bool to_kernel)
122 {
123     if (to_kernel) {
124         *field = ~0;
125     } else {
126         /* does not make sense: qemu model doesn't use set/clear regs */
127         abort();
128     }
129 }
130 
translate_group(GICState * s,int irq,int cpu,uint32_t * field,bool to_kernel)131 static void translate_group(GICState *s, int irq, int cpu,
132                             uint32_t *field, bool to_kernel)
133 {
134     int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
135 
136     if (to_kernel) {
137         *field = GIC_DIST_TEST_GROUP(irq, cm);
138     } else {
139         if (*field & 1) {
140             GIC_DIST_SET_GROUP(irq, cm);
141         }
142     }
143 }
144 
translate_enabled(GICState * s,int irq,int cpu,uint32_t * field,bool to_kernel)145 static void translate_enabled(GICState *s, int irq, int cpu,
146                               uint32_t *field, bool to_kernel)
147 {
148     int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
149 
150     if (to_kernel) {
151         *field = GIC_DIST_TEST_ENABLED(irq, cm);
152     } else {
153         if (*field & 1) {
154             GIC_DIST_SET_ENABLED(irq, cm);
155         }
156     }
157 }
158 
translate_pending(GICState * s,int irq,int cpu,uint32_t * field,bool to_kernel)159 static void translate_pending(GICState *s, int irq, int cpu,
160                               uint32_t *field, bool to_kernel)
161 {
162     int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
163 
164     if (to_kernel) {
165         *field = gic_test_pending(s, irq, cm);
166     } else {
167         if (*field & 1) {
168             GIC_DIST_SET_PENDING(irq, cm);
169             /* TODO: Capture is level-line is held high in the kernel */
170         }
171     }
172 }
173 
translate_active(GICState * s,int irq,int cpu,uint32_t * field,bool to_kernel)174 static void translate_active(GICState *s, int irq, int cpu,
175                              uint32_t *field, bool to_kernel)
176 {
177     int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
178 
179     if (to_kernel) {
180         *field = GIC_DIST_TEST_ACTIVE(irq, cm);
181     } else {
182         if (*field & 1) {
183             GIC_DIST_SET_ACTIVE(irq, cm);
184         }
185     }
186 }
187 
translate_trigger(GICState * s,int irq,int cpu,uint32_t * field,bool to_kernel)188 static void translate_trigger(GICState *s, int irq, int cpu,
189                               uint32_t *field, bool to_kernel)
190 {
191     if (to_kernel) {
192         *field = (GIC_DIST_TEST_EDGE_TRIGGER(irq)) ? 0x2 : 0x0;
193     } else {
194         if (*field & 0x2) {
195             GIC_DIST_SET_EDGE_TRIGGER(irq);
196         }
197     }
198 }
199 
translate_priority(GICState * s,int irq,int cpu,uint32_t * field,bool to_kernel)200 static void translate_priority(GICState *s, int irq, int cpu,
201                                uint32_t *field, bool to_kernel)
202 {
203     if (to_kernel) {
204         *field = GIC_DIST_GET_PRIORITY(irq, cpu) & 0xff;
205     } else {
206         gic_dist_set_priority(s, cpu, irq,
207                               *field & 0xff, MEMTXATTRS_UNSPECIFIED);
208     }
209 }
210 
translate_targets(GICState * s,int irq,int cpu,uint32_t * field,bool to_kernel)211 static void translate_targets(GICState *s, int irq, int cpu,
212                               uint32_t *field, bool to_kernel)
213 {
214     if (to_kernel) {
215         *field = s->irq_target[irq] & 0xff;
216     } else {
217         s->irq_target[irq] = *field & 0xff;
218     }
219 }
220 
translate_sgisource(GICState * s,int irq,int cpu,uint32_t * field,bool to_kernel)221 static void translate_sgisource(GICState *s, int irq, int cpu,
222                                 uint32_t *field, bool to_kernel)
223 {
224     if (to_kernel) {
225         *field = s->sgi_pending[irq][cpu] & 0xff;
226     } else {
227         s->sgi_pending[irq][cpu] = *field & 0xff;
228     }
229 }
230 
231 /* Read a register group from the kernel VGIC */
kvm_dist_get(GICState * s,uint32_t offset,int width,int maxirq,vgic_translate_fn translate_fn)232 static void kvm_dist_get(GICState *s, uint32_t offset, int width,
233                          int maxirq, vgic_translate_fn translate_fn)
234 {
235     uint32_t reg;
236     int i;
237     int j;
238     int irq;
239     int cpu;
240     int regsz = 32 / width; /* irqs per kernel register */
241     uint32_t field;
242 
243     for_each_irq_reg(i, maxirq, width) {
244         irq = i * regsz;
245         cpu = 0;
246         while ((cpu < s->num_cpu && irq < GIC_INTERNAL) || cpu == 0) {
247             kvm_gicd_access(s, offset, cpu, &reg, false);
248             for (j = 0; j < regsz; j++) {
249                 field = extract32(reg, j * width, width);
250                 translate_fn(s, irq + j, cpu, &field, false);
251             }
252 
253             cpu++;
254         }
255         offset += 4;
256     }
257 }
258 
259 /* Write a register group to the kernel VGIC */
kvm_dist_put(GICState * s,uint32_t offset,int width,int maxirq,vgic_translate_fn translate_fn)260 static void kvm_dist_put(GICState *s, uint32_t offset, int width,
261                          int maxirq, vgic_translate_fn translate_fn)
262 {
263     uint32_t reg;
264     int i;
265     int j;
266     int irq;
267     int cpu;
268     int regsz = 32 / width; /* irqs per kernel register */
269     uint32_t field;
270 
271     for_each_irq_reg(i, maxirq, width) {
272         irq = i * regsz;
273         cpu = 0;
274         while ((cpu < s->num_cpu && irq < GIC_INTERNAL) || cpu == 0) {
275             reg = 0;
276             for (j = 0; j < regsz; j++) {
277                 translate_fn(s, irq + j, cpu, &field, true);
278                 reg = deposit32(reg, j * width, width, field);
279             }
280             kvm_gicd_access(s, offset, cpu, &reg, true);
281 
282             cpu++;
283         }
284         offset += 4;
285     }
286 }
287 
kvm_arm_gic_put(GICState * s)288 static void kvm_arm_gic_put(GICState *s)
289 {
290     uint32_t reg;
291     int i;
292     int cpu;
293     int num_cpu;
294     int num_irq;
295 
296     /* Note: We do the restore in a slightly different order than the save
297      * (where the order doesn't matter and is simply ordered according to the
298      * register offset values */
299 
300     /*****************************************************************
301      * Distributor State
302      */
303 
304     /* s->ctlr -> GICD_CTLR */
305     reg = s->ctlr;
306     kvm_gicd_access(s, 0x0, 0, &reg, true);
307 
308     /* Sanity checking on GICD_TYPER and s->num_irq, s->num_cpu */
309     kvm_gicd_access(s, 0x4, 0, &reg, false);
310     num_irq = ((reg & 0x1f) + 1) * 32;
311     num_cpu = ((reg & 0xe0) >> 5) + 1;
312 
313     if (num_irq < s->num_irq) {
314             fprintf(stderr, "Restoring %u IRQs, but kernel supports max %d\n",
315                     s->num_irq, num_irq);
316             abort();
317     } else if (num_cpu != s->num_cpu) {
318             fprintf(stderr, "Restoring %u CPU interfaces, kernel only has %d\n",
319                     s->num_cpu, num_cpu);
320             /* Did we not create the VCPUs in the kernel yet? */
321             abort();
322     }
323 
324     /* TODO: Consider checking compatibility with the IIDR ? */
325 
326     /* irq_state[n].enabled -> GICD_ISENABLERn */
327     kvm_dist_put(s, 0x180, 1, s->num_irq, translate_clear);
328     kvm_dist_put(s, 0x100, 1, s->num_irq, translate_enabled);
329 
330     /* irq_state[n].group -> GICD_IGROUPRn */
331     kvm_dist_put(s, 0x80, 1, s->num_irq, translate_group);
332 
333     /* s->irq_target[irq] -> GICD_ITARGETSRn
334      * (restore targets before pending to ensure the pending state is set on
335      * the appropriate CPU interfaces in the kernel) */
336     kvm_dist_put(s, 0x800, 8, s->num_irq, translate_targets);
337 
338     /* irq_state[n].trigger -> GICD_ICFGRn
339      * (restore configuration registers before pending IRQs so we treat
340      * level/edge correctly) */
341     kvm_dist_put(s, 0xc00, 2, s->num_irq, translate_trigger);
342 
343     /* irq_state[n].pending + irq_state[n].level -> GICD_ISPENDRn */
344     kvm_dist_put(s, 0x280, 1, s->num_irq, translate_clear);
345     kvm_dist_put(s, 0x200, 1, s->num_irq, translate_pending);
346 
347     /* irq_state[n].active -> GICD_ISACTIVERn */
348     kvm_dist_put(s, 0x380, 1, s->num_irq, translate_clear);
349     kvm_dist_put(s, 0x300, 1, s->num_irq, translate_active);
350 
351 
352     /* s->priorityX[irq] -> ICD_IPRIORITYRn */
353     kvm_dist_put(s, 0x400, 8, s->num_irq, translate_priority);
354 
355     /* s->sgi_pending -> ICD_CPENDSGIRn */
356     kvm_dist_put(s, 0xf10, 8, GIC_NR_SGIS, translate_clear);
357     kvm_dist_put(s, 0xf20, 8, GIC_NR_SGIS, translate_sgisource);
358 
359 
360     /*****************************************************************
361      * CPU Interface(s) State
362      */
363 
364     for (cpu = 0; cpu < s->num_cpu; cpu++) {
365         /* s->cpu_ctlr[cpu] -> GICC_CTLR */
366         reg = s->cpu_ctlr[cpu];
367         kvm_gicc_access(s, 0x00, cpu, &reg, true);
368 
369         /* s->priority_mask[cpu] -> GICC_PMR */
370         reg = (s->priority_mask[cpu] & 0xff);
371         kvm_gicc_access(s, 0x04, cpu, &reg, true);
372 
373         /* s->bpr[cpu] -> GICC_BPR */
374         reg = (s->bpr[cpu] & 0x7);
375         kvm_gicc_access(s, 0x08, cpu, &reg, true);
376 
377         /* s->abpr[cpu] -> GICC_ABPR */
378         reg = (s->abpr[cpu] & 0x7);
379         kvm_gicc_access(s, 0x1c, cpu, &reg, true);
380 
381         /* s->apr[n][cpu] -> GICC_APRn */
382         for (i = 0; i < 4; i++) {
383             reg = s->apr[i][cpu];
384             kvm_gicc_access(s, 0xd0 + i * 4, cpu, &reg, true);
385         }
386     }
387 }
388 
kvm_arm_gic_get(GICState * s)389 static void kvm_arm_gic_get(GICState *s)
390 {
391     uint32_t reg;
392     int i;
393     int cpu;
394 
395     /*****************************************************************
396      * Distributor State
397      */
398 
399     /* GICD_CTLR -> s->ctlr */
400     kvm_gicd_access(s, 0x0, 0, &reg, false);
401     s->ctlr = reg;
402 
403     /* Sanity checking on GICD_TYPER -> s->num_irq, s->num_cpu */
404     kvm_gicd_access(s, 0x4, 0, &reg, false);
405     s->num_irq = ((reg & 0x1f) + 1) * 32;
406     s->num_cpu = ((reg & 0xe0) >> 5) + 1;
407 
408     if (s->num_irq > GIC_MAXIRQ) {
409             fprintf(stderr, "Too many IRQs reported from the kernel: %d\n",
410                     s->num_irq);
411             abort();
412     }
413 
414     /* GICD_IIDR -> ? */
415     kvm_gicd_access(s, 0x8, 0, &reg, false);
416 
417     /* Clear all the IRQ settings */
418     for (i = 0; i < s->num_irq; i++) {
419         memset(&s->irq_state[i], 0, sizeof(s->irq_state[0]));
420     }
421 
422     /* GICD_IGROUPRn -> irq_state[n].group */
423     kvm_dist_get(s, 0x80, 1, s->num_irq, translate_group);
424 
425     /* GICD_ISENABLERn -> irq_state[n].enabled */
426     kvm_dist_get(s, 0x100, 1, s->num_irq, translate_enabled);
427 
428     /* GICD_ISPENDRn -> irq_state[n].pending + irq_state[n].level */
429     kvm_dist_get(s, 0x200, 1, s->num_irq, translate_pending);
430 
431     /* GICD_ISACTIVERn -> irq_state[n].active */
432     kvm_dist_get(s, 0x300, 1, s->num_irq, translate_active);
433 
434     /* GICD_ICFRn -> irq_state[n].trigger */
435     kvm_dist_get(s, 0xc00, 2, s->num_irq, translate_trigger);
436 
437     /* GICD_IPRIORITYRn -> s->priorityX[irq] */
438     kvm_dist_get(s, 0x400, 8, s->num_irq, translate_priority);
439 
440     /* GICD_ITARGETSRn -> s->irq_target[irq] */
441     kvm_dist_get(s, 0x800, 8, s->num_irq, translate_targets);
442 
443     /* GICD_CPENDSGIRn -> s->sgi_pending */
444     kvm_dist_get(s, 0xf10, 8, GIC_NR_SGIS, translate_sgisource);
445 
446 
447     /*****************************************************************
448      * CPU Interface(s) State
449      */
450 
451     for (cpu = 0; cpu < s->num_cpu; cpu++) {
452         /* GICC_CTLR -> s->cpu_ctlr[cpu] */
453         kvm_gicc_access(s, 0x00, cpu, &reg, false);
454         s->cpu_ctlr[cpu] = reg;
455 
456         /* GICC_PMR -> s->priority_mask[cpu] */
457         kvm_gicc_access(s, 0x04, cpu, &reg, false);
458         s->priority_mask[cpu] = (reg & 0xff);
459 
460         /* GICC_BPR -> s->bpr[cpu] */
461         kvm_gicc_access(s, 0x08, cpu, &reg, false);
462         s->bpr[cpu] = (reg & 0x7);
463 
464         /* GICC_ABPR -> s->abpr[cpu] */
465         kvm_gicc_access(s, 0x1c, cpu, &reg, false);
466         s->abpr[cpu] = (reg & 0x7);
467 
468         /* GICC_APRn -> s->apr[n][cpu] */
469         for (i = 0; i < 4; i++) {
470             kvm_gicc_access(s, 0xd0 + i * 4, cpu, &reg, false);
471             s->apr[i][cpu] = reg;
472         }
473     }
474 }
475 
kvm_arm_gic_reset_hold(Object * obj,ResetType type)476 static void kvm_arm_gic_reset_hold(Object *obj, ResetType type)
477 {
478     GICState *s = ARM_GIC_COMMON(obj);
479     KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s);
480 
481     if (kgc->parent_phases.hold) {
482         kgc->parent_phases.hold(obj, type);
483     }
484 
485     if (kvm_arm_gic_can_save_restore(s)) {
486         kvm_arm_gic_put(s);
487     }
488 }
489 
kvm_arm_gic_realize(DeviceState * dev,Error ** errp)490 static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
491 {
492     int i;
493     GICState *s = KVM_ARM_GIC(dev);
494     KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s);
495     Error *local_err = NULL;
496     int ret;
497 
498     kgc->parent_realize(dev, &local_err);
499     if (local_err) {
500         error_propagate(errp, local_err);
501         return;
502     }
503 
504     if (s->security_extn) {
505         error_setg(errp, "the in-kernel VGIC does not implement the "
506                    "security extensions");
507         return;
508     }
509 
510     if (s->virt_extn) {
511         error_setg(errp, "the in-kernel VGIC does not implement the "
512                    "virtualization extensions");
513         return;
514     }
515 
516     if (!kvm_arm_gic_can_save_restore(s)) {
517         error_setg(&s->migration_blocker, "This operating system kernel does "
518                                           "not support vGICv2 migration");
519         if (migrate_add_blocker(&s->migration_blocker, errp) < 0) {
520             return;
521         }
522     }
523 
524     gic_init_irqs_and_mmio(s, kvm_arm_gicv2_set_irq, NULL, NULL);
525 
526     for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) {
527         qemu_irq irq = qdev_get_gpio_in(dev, i);
528         kvm_irqchip_set_qemuirq_gsi(kvm_state, irq, i);
529     }
530 
531     /* Try to create the device via the device control API */
532     s->dev_fd = -1;
533     ret = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_V2, false);
534     if (ret >= 0) {
535         s->dev_fd = ret;
536 
537         /* Newstyle API is used, we may have attributes */
538         if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0)) {
539             uint32_t numirqs = s->num_irq;
540             kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0,
541                               &numirqs, true, &error_abort);
542         }
543         /* Tell the kernel to complete VGIC initialization now */
544         if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
545                                   KVM_DEV_ARM_VGIC_CTRL_INIT)) {
546             kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
547                               KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true,
548                               &error_abort);
549         }
550     } else if (kvm_check_extension(kvm_state, KVM_CAP_DEVICE_CTRL)) {
551         error_setg_errno(errp, -ret, "error creating in-kernel VGIC");
552         error_append_hint(errp,
553                           "Perhaps the host CPU does not support GICv2?\n");
554     } else if (ret != -ENODEV && ret != -ENOTSUP) {
555         /*
556          * Very ancient kernel without KVM_CAP_DEVICE_CTRL: assume that
557          * ENODEV or ENOTSUP mean "can't create GICv2 with KVM_CREATE_DEVICE",
558          * and that we will get a GICv2 via KVM_CREATE_IRQCHIP.
559          */
560         error_setg_errno(errp, -ret, "error creating in-kernel VGIC");
561         return;
562     }
563 
564     /* Distributor */
565     kvm_arm_register_device(&s->iomem,
566                             (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT)
567                             | KVM_VGIC_V2_ADDR_TYPE_DIST,
568                             KVM_DEV_ARM_VGIC_GRP_ADDR,
569                             KVM_VGIC_V2_ADDR_TYPE_DIST,
570                             s->dev_fd, 0);
571     /* CPU interface for current core. Unlike arm_gic, we don't
572      * provide the "interface for core #N" memory regions, because
573      * cores with a VGIC don't have those.
574      */
575     kvm_arm_register_device(&s->cpuiomem[0],
576                             (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT)
577                             | KVM_VGIC_V2_ADDR_TYPE_CPU,
578                             KVM_DEV_ARM_VGIC_GRP_ADDR,
579                             KVM_VGIC_V2_ADDR_TYPE_CPU,
580                             s->dev_fd, 0);
581 
582     if (kvm_has_gsi_routing()) {
583         /* set up irq routing */
584         for (i = 0; i < s->num_irq - GIC_INTERNAL; ++i) {
585             kvm_irqchip_add_irq_route(kvm_state, i, 0, i);
586         }
587 
588         kvm_gsi_routing_allowed = true;
589 
590         kvm_irqchip_commit_routes(kvm_state);
591     }
592 }
593 
kvm_arm_gic_class_init(ObjectClass * klass,void * data)594 static void kvm_arm_gic_class_init(ObjectClass *klass, void *data)
595 {
596     DeviceClass *dc = DEVICE_CLASS(klass);
597     ResettableClass *rc = RESETTABLE_CLASS(klass);
598     ARMGICCommonClass *agcc = ARM_GIC_COMMON_CLASS(klass);
599     KVMARMGICClass *kgc = KVM_ARM_GIC_CLASS(klass);
600 
601     agcc->pre_save = kvm_arm_gic_get;
602     agcc->post_load = kvm_arm_gic_put;
603     device_class_set_parent_realize(dc, kvm_arm_gic_realize,
604                                     &kgc->parent_realize);
605     resettable_class_set_parent_phases(rc, NULL, kvm_arm_gic_reset_hold, NULL,
606                                        &kgc->parent_phases);
607 }
608 
609 static const TypeInfo kvm_arm_gic_info = {
610     .name = TYPE_KVM_ARM_GIC,
611     .parent = TYPE_ARM_GIC_COMMON,
612     .instance_size = sizeof(GICState),
613     .class_init = kvm_arm_gic_class_init,
614     .class_size = sizeof(KVMARMGICClass),
615 };
616 
kvm_arm_gic_register_types(void)617 static void kvm_arm_gic_register_types(void)
618 {
619     type_register_static(&kvm_arm_gic_info);
620 }
621 
622 type_init(kvm_arm_gic_register_types)
623