xref: /openbmc/qemu/hw/intc/arm_gicv3_common.c (revision db725815985654007ade0fd53590d613fd657208)
1 /*
2  * ARM GICv3 support - common bits of emulated and KVM kernel model
3  *
4  * Copyright (c) 2012 Linaro Limited
5  * Copyright (c) 2015 Huawei.
6  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7  * Written by Peter Maydell
8  * Reworked for GICv3 by Shlomo Pongratz and Pavel Fedin
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation, either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License along
21  * with this program; if not, see <http://www.gnu.org/licenses/>.
22  */
23 
24 #include "qemu/osdep.h"
25 #include "qapi/error.h"
26 #include "qemu/module.h"
27 #include "qom/cpu.h"
28 #include "hw/intc/arm_gicv3_common.h"
29 #include "migration/vmstate.h"
30 #include "gicv3_internal.h"
31 #include "hw/arm/linux-boot-if.h"
32 #include "sysemu/kvm.h"
33 
34 
35 static void gicv3_gicd_no_migration_shift_bug_post_load(GICv3State *cs)
36 {
37     if (cs->gicd_no_migration_shift_bug) {
38         return;
39     }
40 
41     /* Older versions of QEMU had a bug in the handling of state save/restore
42      * to the KVM GICv3: they got the offset in the bitmap arrays wrong,
43      * so that instead of the data for external interrupts 32 and up
44      * starting at bit position 32 in the bitmap, it started at bit
45      * position 64. If we're receiving data from a QEMU with that bug,
46      * we must move the data down into the right place.
47      */
48     memmove(cs->group, (uint8_t *)cs->group + GIC_INTERNAL / 8,
49             sizeof(cs->group) - GIC_INTERNAL / 8);
50     memmove(cs->grpmod, (uint8_t *)cs->grpmod + GIC_INTERNAL / 8,
51             sizeof(cs->grpmod) - GIC_INTERNAL / 8);
52     memmove(cs->enabled, (uint8_t *)cs->enabled + GIC_INTERNAL / 8,
53             sizeof(cs->enabled) - GIC_INTERNAL / 8);
54     memmove(cs->pending, (uint8_t *)cs->pending + GIC_INTERNAL / 8,
55             sizeof(cs->pending) - GIC_INTERNAL / 8);
56     memmove(cs->active, (uint8_t *)cs->active + GIC_INTERNAL / 8,
57             sizeof(cs->active) - GIC_INTERNAL / 8);
58     memmove(cs->edge_trigger, (uint8_t *)cs->edge_trigger + GIC_INTERNAL / 8,
59             sizeof(cs->edge_trigger) - GIC_INTERNAL / 8);
60 
61     /*
62      * While this new version QEMU doesn't have this kind of bug as we fix it,
63      * so it needs to set the flag to true to indicate that and it's necessary
64      * for next migration to work from this new version QEMU.
65      */
66     cs->gicd_no_migration_shift_bug = true;
67 }
68 
69 static int gicv3_pre_save(void *opaque)
70 {
71     GICv3State *s = (GICv3State *)opaque;
72     ARMGICv3CommonClass *c = ARM_GICV3_COMMON_GET_CLASS(s);
73 
74     if (c->pre_save) {
75         c->pre_save(s);
76     }
77 
78     return 0;
79 }
80 
81 static int gicv3_post_load(void *opaque, int version_id)
82 {
83     GICv3State *s = (GICv3State *)opaque;
84     ARMGICv3CommonClass *c = ARM_GICV3_COMMON_GET_CLASS(s);
85 
86     gicv3_gicd_no_migration_shift_bug_post_load(s);
87 
88     if (c->post_load) {
89         c->post_load(s);
90     }
91     return 0;
92 }
93 
94 static bool virt_state_needed(void *opaque)
95 {
96     GICv3CPUState *cs = opaque;
97 
98     return cs->num_list_regs != 0;
99 }
100 
101 static const VMStateDescription vmstate_gicv3_cpu_virt = {
102     .name = "arm_gicv3_cpu/virt",
103     .version_id = 1,
104     .minimum_version_id = 1,
105     .needed = virt_state_needed,
106     .fields = (VMStateField[]) {
107         VMSTATE_UINT64_2DARRAY(ich_apr, GICv3CPUState, 3, 4),
108         VMSTATE_UINT64(ich_hcr_el2, GICv3CPUState),
109         VMSTATE_UINT64_ARRAY(ich_lr_el2, GICv3CPUState, GICV3_LR_MAX),
110         VMSTATE_UINT64(ich_vmcr_el2, GICv3CPUState),
111         VMSTATE_END_OF_LIST()
112     }
113 };
114 
115 static int vmstate_gicv3_cpu_pre_load(void *opaque)
116 {
117     GICv3CPUState *cs = opaque;
118 
119    /*
120     * If the sre_el1 subsection is not transferred this
121     * means SRE_EL1 is 0x7 (which might not be the same as
122     * our reset value).
123     */
124     cs->icc_sre_el1 = 0x7;
125     return 0;
126 }
127 
128 static bool icc_sre_el1_reg_needed(void *opaque)
129 {
130     GICv3CPUState *cs = opaque;
131 
132     return cs->icc_sre_el1 != 7;
133 }
134 
135 const VMStateDescription vmstate_gicv3_cpu_sre_el1 = {
136     .name = "arm_gicv3_cpu/sre_el1",
137     .version_id = 1,
138     .minimum_version_id = 1,
139     .needed = icc_sre_el1_reg_needed,
140     .fields = (VMStateField[]) {
141         VMSTATE_UINT64(icc_sre_el1, GICv3CPUState),
142         VMSTATE_END_OF_LIST()
143     }
144 };
145 
146 static const VMStateDescription vmstate_gicv3_cpu = {
147     .name = "arm_gicv3_cpu",
148     .version_id = 1,
149     .minimum_version_id = 1,
150     .pre_load = vmstate_gicv3_cpu_pre_load,
151     .fields = (VMStateField[]) {
152         VMSTATE_UINT32(level, GICv3CPUState),
153         VMSTATE_UINT32(gicr_ctlr, GICv3CPUState),
154         VMSTATE_UINT32_ARRAY(gicr_statusr, GICv3CPUState, 2),
155         VMSTATE_UINT32(gicr_waker, GICv3CPUState),
156         VMSTATE_UINT64(gicr_propbaser, GICv3CPUState),
157         VMSTATE_UINT64(gicr_pendbaser, GICv3CPUState),
158         VMSTATE_UINT32(gicr_igroupr0, GICv3CPUState),
159         VMSTATE_UINT32(gicr_ienabler0, GICv3CPUState),
160         VMSTATE_UINT32(gicr_ipendr0, GICv3CPUState),
161         VMSTATE_UINT32(gicr_iactiver0, GICv3CPUState),
162         VMSTATE_UINT32(edge_trigger, GICv3CPUState),
163         VMSTATE_UINT32(gicr_igrpmodr0, GICv3CPUState),
164         VMSTATE_UINT32(gicr_nsacr, GICv3CPUState),
165         VMSTATE_UINT8_ARRAY(gicr_ipriorityr, GICv3CPUState, GIC_INTERNAL),
166         VMSTATE_UINT64_ARRAY(icc_ctlr_el1, GICv3CPUState, 2),
167         VMSTATE_UINT64(icc_pmr_el1, GICv3CPUState),
168         VMSTATE_UINT64_ARRAY(icc_bpr, GICv3CPUState, 3),
169         VMSTATE_UINT64_2DARRAY(icc_apr, GICv3CPUState, 3, 4),
170         VMSTATE_UINT64_ARRAY(icc_igrpen, GICv3CPUState, 3),
171         VMSTATE_UINT64(icc_ctlr_el3, GICv3CPUState),
172         VMSTATE_END_OF_LIST()
173     },
174     .subsections = (const VMStateDescription * []) {
175         &vmstate_gicv3_cpu_virt,
176         &vmstate_gicv3_cpu_sre_el1,
177         NULL
178     }
179 };
180 
181 static int gicv3_pre_load(void *opaque)
182 {
183     GICv3State *cs = opaque;
184 
185    /*
186     * The gicd_no_migration_shift_bug flag is used for migration compatibility
187     * for old version QEMU which may have the GICD bmp shift bug under KVM mode.
188     * Strictly, what we want to know is whether the migration source is using
189     * KVM. Since we don't have any way to determine that, we look at whether the
190     * destination is using KVM; this is close enough because for the older QEMU
191     * versions with this bug KVM -> TCG migration didn't work anyway. If the
192     * source is a newer QEMU without this bug it will transmit the migration
193     * subsection which sets the flag to true; otherwise it will remain set to
194     * the value we select here.
195     */
196     if (kvm_enabled()) {
197         cs->gicd_no_migration_shift_bug = false;
198     }
199 
200     return 0;
201 }
202 
203 static bool needed_always(void *opaque)
204 {
205     return true;
206 }
207 
208 const VMStateDescription vmstate_gicv3_gicd_no_migration_shift_bug = {
209     .name = "arm_gicv3/gicd_no_migration_shift_bug",
210     .version_id = 1,
211     .minimum_version_id = 1,
212     .needed = needed_always,
213     .fields = (VMStateField[]) {
214         VMSTATE_BOOL(gicd_no_migration_shift_bug, GICv3State),
215         VMSTATE_END_OF_LIST()
216     }
217 };
218 
219 static const VMStateDescription vmstate_gicv3 = {
220     .name = "arm_gicv3",
221     .version_id = 1,
222     .minimum_version_id = 1,
223     .pre_load = gicv3_pre_load,
224     .pre_save = gicv3_pre_save,
225     .post_load = gicv3_post_load,
226     .priority = MIG_PRI_GICV3,
227     .fields = (VMStateField[]) {
228         VMSTATE_UINT32(gicd_ctlr, GICv3State),
229         VMSTATE_UINT32_ARRAY(gicd_statusr, GICv3State, 2),
230         VMSTATE_UINT32_ARRAY(group, GICv3State, GICV3_BMP_SIZE),
231         VMSTATE_UINT32_ARRAY(grpmod, GICv3State, GICV3_BMP_SIZE),
232         VMSTATE_UINT32_ARRAY(enabled, GICv3State, GICV3_BMP_SIZE),
233         VMSTATE_UINT32_ARRAY(pending, GICv3State, GICV3_BMP_SIZE),
234         VMSTATE_UINT32_ARRAY(active, GICv3State, GICV3_BMP_SIZE),
235         VMSTATE_UINT32_ARRAY(level, GICv3State, GICV3_BMP_SIZE),
236         VMSTATE_UINT32_ARRAY(edge_trigger, GICv3State, GICV3_BMP_SIZE),
237         VMSTATE_UINT8_ARRAY(gicd_ipriority, GICv3State, GICV3_MAXIRQ),
238         VMSTATE_UINT64_ARRAY(gicd_irouter, GICv3State, GICV3_MAXIRQ),
239         VMSTATE_UINT32_ARRAY(gicd_nsacr, GICv3State,
240                              DIV_ROUND_UP(GICV3_MAXIRQ, 16)),
241         VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, GICv3State, num_cpu,
242                                              vmstate_gicv3_cpu, GICv3CPUState),
243         VMSTATE_END_OF_LIST()
244     },
245     .subsections = (const VMStateDescription * []) {
246         &vmstate_gicv3_gicd_no_migration_shift_bug,
247         NULL
248     }
249 };
250 
251 void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler,
252                               const MemoryRegionOps *ops, Error **errp)
253 {
254     SysBusDevice *sbd = SYS_BUS_DEVICE(s);
255     int rdist_capacity = 0;
256     int i;
257 
258     for (i = 0; i < s->nb_redist_regions; i++) {
259         rdist_capacity += s->redist_region_count[i];
260     }
261     if (rdist_capacity < s->num_cpu) {
262         error_setg(errp, "Capacity of the redist regions(%d) "
263                    "is less than number of vcpus(%d)",
264                    rdist_capacity, s->num_cpu);
265         return;
266     }
267 
268     /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU.
269      * GPIO array layout is thus:
270      *  [0..N-1] spi
271      *  [N..N+31] PPIs for CPU 0
272      *  [N+32..N+63] PPIs for CPU 1
273      *   ...
274      */
275     i = s->num_irq - GIC_INTERNAL + GIC_INTERNAL * s->num_cpu;
276     qdev_init_gpio_in(DEVICE(s), handler, i);
277 
278     for (i = 0; i < s->num_cpu; i++) {
279         sysbus_init_irq(sbd, &s->cpu[i].parent_irq);
280     }
281     for (i = 0; i < s->num_cpu; i++) {
282         sysbus_init_irq(sbd, &s->cpu[i].parent_fiq);
283     }
284     for (i = 0; i < s->num_cpu; i++) {
285         sysbus_init_irq(sbd, &s->cpu[i].parent_virq);
286     }
287     for (i = 0; i < s->num_cpu; i++) {
288         sysbus_init_irq(sbd, &s->cpu[i].parent_vfiq);
289     }
290 
291     memory_region_init_io(&s->iomem_dist, OBJECT(s), ops, s,
292                           "gicv3_dist", 0x10000);
293     sysbus_init_mmio(sbd, &s->iomem_dist);
294 
295     s->iomem_redist = g_new0(MemoryRegion, s->nb_redist_regions);
296     for (i = 0; i < s->nb_redist_regions; i++) {
297         char *name = g_strdup_printf("gicv3_redist_region[%d]", i);
298 
299         memory_region_init_io(&s->iomem_redist[i], OBJECT(s),
300                               ops ? &ops[1] : NULL, s, name,
301                               s->redist_region_count[i] * GICV3_REDIST_SIZE);
302         sysbus_init_mmio(sbd, &s->iomem_redist[i]);
303         g_free(name);
304     }
305 }
306 
307 static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
308 {
309     GICv3State *s = ARM_GICV3_COMMON(dev);
310     int i;
311 
312     /* revision property is actually reserved and currently used only in order
313      * to keep the interface compatible with GICv2 code, avoiding extra
314      * conditions. However, in future it could be used, for example, if we
315      * implement GICv4.
316      */
317     if (s->revision != 3) {
318         error_setg(errp, "unsupported GIC revision %d", s->revision);
319         return;
320     }
321 
322     if (s->num_irq > GICV3_MAXIRQ) {
323         error_setg(errp,
324                    "requested %u interrupt lines exceeds GIC maximum %d",
325                    s->num_irq, GICV3_MAXIRQ);
326         return;
327     }
328     if (s->num_irq < GIC_INTERNAL) {
329         error_setg(errp,
330                    "requested %u interrupt lines is below GIC minimum %d",
331                    s->num_irq, GIC_INTERNAL);
332         return;
333     }
334 
335     /* ITLinesNumber is represented as (N / 32) - 1, so this is an
336      * implementation imposed restriction, not an architectural one,
337      * so we don't have to deal with bitfields where only some of the
338      * bits in a 32-bit word should be valid.
339      */
340     if (s->num_irq % 32) {
341         error_setg(errp,
342                    "%d interrupt lines unsupported: not divisible by 32",
343                    s->num_irq);
344         return;
345     }
346 
347     s->cpu = g_new0(GICv3CPUState, s->num_cpu);
348 
349     for (i = 0; i < s->num_cpu; i++) {
350         CPUState *cpu = qemu_get_cpu(i);
351         uint64_t cpu_affid;
352         int last;
353 
354         s->cpu[i].cpu = cpu;
355         s->cpu[i].gic = s;
356         /* Store GICv3CPUState in CPUARMState gicv3state pointer */
357         gicv3_set_gicv3state(cpu, &s->cpu[i]);
358 
359         /* Pre-construct the GICR_TYPER:
360          * For our implementation:
361          *  Top 32 bits are the affinity value of the associated CPU
362          *  CommonLPIAff == 01 (redistributors with same Aff3 share LPI table)
363          *  Processor_Number == CPU index starting from 0
364          *  DPGS == 0 (GICR_CTLR.DPG* not supported)
365          *  Last == 1 if this is the last redistributor in a series of
366          *            contiguous redistributor pages
367          *  DirectLPI == 0 (direct injection of LPIs not supported)
368          *  VLPIS == 0 (virtual LPIs not supported)
369          *  PLPIS == 0 (physical LPIs not supported)
370          */
371         cpu_affid = object_property_get_uint(OBJECT(cpu), "mp-affinity", NULL);
372         last = (i == s->num_cpu - 1);
373 
374         /* The CPU mp-affinity property is in MPIDR register format; squash
375          * the affinity bytes into 32 bits as the GICR_TYPER has them.
376          */
377         cpu_affid = ((cpu_affid & 0xFF00000000ULL) >> 8) |
378                      (cpu_affid & 0xFFFFFF);
379         s->cpu[i].gicr_typer = (cpu_affid << 32) |
380             (1 << 24) |
381             (i << 8) |
382             (last << 4);
383     }
384 }
385 
386 static void arm_gicv3_finalize(Object *obj)
387 {
388     GICv3State *s = ARM_GICV3_COMMON(obj);
389 
390     g_free(s->redist_region_count);
391 }
392 
393 static void arm_gicv3_common_reset(DeviceState *dev)
394 {
395     GICv3State *s = ARM_GICV3_COMMON(dev);
396     int i;
397 
398     for (i = 0; i < s->num_cpu; i++) {
399         GICv3CPUState *cs = &s->cpu[i];
400 
401         cs->level = 0;
402         cs->gicr_ctlr = 0;
403         cs->gicr_statusr[GICV3_S] = 0;
404         cs->gicr_statusr[GICV3_NS] = 0;
405         cs->gicr_waker = GICR_WAKER_ProcessorSleep | GICR_WAKER_ChildrenAsleep;
406         cs->gicr_propbaser = 0;
407         cs->gicr_pendbaser = 0;
408         /* If we're resetting a TZ-aware GIC as if secure firmware
409          * had set it up ready to start a kernel in non-secure, we
410          * need to set interrupts to group 1 so the kernel can use them.
411          * Otherwise they reset to group 0 like the hardware.
412          */
413         if (s->irq_reset_nonsecure) {
414             cs->gicr_igroupr0 = 0xffffffff;
415         } else {
416             cs->gicr_igroupr0 = 0;
417         }
418 
419         cs->gicr_ienabler0 = 0;
420         cs->gicr_ipendr0 = 0;
421         cs->gicr_iactiver0 = 0;
422         cs->edge_trigger = 0xffff;
423         cs->gicr_igrpmodr0 = 0;
424         cs->gicr_nsacr = 0;
425         memset(cs->gicr_ipriorityr, 0, sizeof(cs->gicr_ipriorityr));
426 
427         cs->hppi.prio = 0xff;
428 
429         /* State in the CPU interface must *not* be reset here, because it
430          * is part of the CPU's reset domain, not the GIC device's.
431          */
432     }
433 
434     /* For our implementation affinity routing is always enabled */
435     if (s->security_extn) {
436         s->gicd_ctlr = GICD_CTLR_ARE_S | GICD_CTLR_ARE_NS;
437     } else {
438         s->gicd_ctlr = GICD_CTLR_DS | GICD_CTLR_ARE;
439     }
440 
441     s->gicd_statusr[GICV3_S] = 0;
442     s->gicd_statusr[GICV3_NS] = 0;
443 
444     memset(s->group, 0, sizeof(s->group));
445     memset(s->grpmod, 0, sizeof(s->grpmod));
446     memset(s->enabled, 0, sizeof(s->enabled));
447     memset(s->pending, 0, sizeof(s->pending));
448     memset(s->active, 0, sizeof(s->active));
449     memset(s->level, 0, sizeof(s->level));
450     memset(s->edge_trigger, 0, sizeof(s->edge_trigger));
451     memset(s->gicd_ipriority, 0, sizeof(s->gicd_ipriority));
452     memset(s->gicd_irouter, 0, sizeof(s->gicd_irouter));
453     memset(s->gicd_nsacr, 0, sizeof(s->gicd_nsacr));
454     /* GICD_IROUTER are UNKNOWN at reset so in theory the guest must
455      * write these to get sane behaviour and we need not populate the
456      * pointer cache here; however having the cache be different for
457      * "happened to be 0 from reset" and "guest wrote 0" would be
458      * too confusing.
459      */
460     gicv3_cache_all_target_cpustates(s);
461 
462     if (s->irq_reset_nonsecure) {
463         /* If we're resetting a TZ-aware GIC as if secure firmware
464          * had set it up ready to start a kernel in non-secure, we
465          * need to set interrupts to group 1 so the kernel can use them.
466          * Otherwise they reset to group 0 like the hardware.
467          */
468         for (i = GIC_INTERNAL; i < s->num_irq; i++) {
469             gicv3_gicd_group_set(s, i);
470         }
471     }
472     s->gicd_no_migration_shift_bug = true;
473 }
474 
475 static void arm_gic_common_linux_init(ARMLinuxBootIf *obj,
476                                       bool secure_boot)
477 {
478     GICv3State *s = ARM_GICV3_COMMON(obj);
479 
480     if (s->security_extn && !secure_boot) {
481         /* We're directly booting a kernel into NonSecure. If this GIC
482          * implements the security extensions then we must configure it
483          * to have all the interrupts be NonSecure (this is a job that
484          * is done by the Secure boot firmware in real hardware, and in
485          * this mode QEMU is acting as a minimalist firmware-and-bootloader
486          * equivalent).
487          */
488         s->irq_reset_nonsecure = true;
489     }
490 }
491 
492 static Property arm_gicv3_common_properties[] = {
493     DEFINE_PROP_UINT32("num-cpu", GICv3State, num_cpu, 1),
494     DEFINE_PROP_UINT32("num-irq", GICv3State, num_irq, 32),
495     DEFINE_PROP_UINT32("revision", GICv3State, revision, 3),
496     DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0),
497     DEFINE_PROP_ARRAY("redist-region-count", GICv3State, nb_redist_regions,
498                       redist_region_count, qdev_prop_uint32, uint32_t),
499     DEFINE_PROP_END_OF_LIST(),
500 };
501 
502 static void arm_gicv3_common_class_init(ObjectClass *klass, void *data)
503 {
504     DeviceClass *dc = DEVICE_CLASS(klass);
505     ARMLinuxBootIfClass *albifc = ARM_LINUX_BOOT_IF_CLASS(klass);
506 
507     dc->reset = arm_gicv3_common_reset;
508     dc->realize = arm_gicv3_common_realize;
509     dc->props = arm_gicv3_common_properties;
510     dc->vmsd = &vmstate_gicv3;
511     albifc->arm_linux_init = arm_gic_common_linux_init;
512 }
513 
514 static const TypeInfo arm_gicv3_common_type = {
515     .name = TYPE_ARM_GICV3_COMMON,
516     .parent = TYPE_SYS_BUS_DEVICE,
517     .instance_size = sizeof(GICv3State),
518     .class_size = sizeof(ARMGICv3CommonClass),
519     .class_init = arm_gicv3_common_class_init,
520     .instance_finalize = arm_gicv3_finalize,
521     .abstract = true,
522     .interfaces = (InterfaceInfo []) {
523         { TYPE_ARM_LINUX_BOOT_IF },
524         { },
525     },
526 };
527 
528 static void register_types(void)
529 {
530     type_register_static(&arm_gicv3_common_type);
531 }
532 
533 type_init(register_types)
534