xref: /openbmc/qemu/hw/ppc/spapr_irq.c (revision 31cf4b97)
1 /*
2  * QEMU PowerPC sPAPR IRQ interface
3  *
4  * Copyright (c) 2018, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/error-report.h"
13 #include "qapi/error.h"
14 #include "hw/ppc/spapr.h"
15 #include "hw/ppc/spapr_xive.h"
16 #include "hw/ppc/xics.h"
17 #include "sysemu/kvm.h"
18 
19 #include "trace.h"
20 
21 void spapr_irq_msi_init(sPAPRMachineState *spapr, uint32_t nr_msis)
22 {
23     spapr->irq_map_nr = nr_msis;
24     spapr->irq_map = bitmap_new(spapr->irq_map_nr);
25 }
26 
27 int spapr_irq_msi_alloc(sPAPRMachineState *spapr, uint32_t num, bool align,
28                         Error **errp)
29 {
30     int irq;
31 
32     /*
33      * The 'align_mask' parameter of bitmap_find_next_zero_area()
34      * should be one less than a power of 2; 0 means no
35      * alignment. Adapt the 'align' value of the former allocator
36      * to fit the requirements of bitmap_find_next_zero_area()
37      */
38     align -= 1;
39 
40     irq = bitmap_find_next_zero_area(spapr->irq_map, spapr->irq_map_nr, 0, num,
41                                      align);
42     if (irq == spapr->irq_map_nr) {
43         error_setg(errp, "can't find a free %d-IRQ block", num);
44         return -1;
45     }
46 
47     bitmap_set(spapr->irq_map, irq, num);
48 
49     return irq + SPAPR_IRQ_MSI;
50 }
51 
52 void spapr_irq_msi_free(sPAPRMachineState *spapr, int irq, uint32_t num)
53 {
54     bitmap_clear(spapr->irq_map, irq - SPAPR_IRQ_MSI, num);
55 }
56 
57 void spapr_irq_msi_reset(sPAPRMachineState *spapr)
58 {
59     bitmap_clear(spapr->irq_map, 0, spapr->irq_map_nr);
60 }
61 
62 
63 /*
64  * XICS IRQ backend.
65  */
66 
67 static ICSState *spapr_ics_create(sPAPRMachineState *spapr,
68                                   const char *type_ics,
69                                   int nr_irqs, Error **errp)
70 {
71     Error *local_err = NULL;
72     Object *obj;
73 
74     obj = object_new(type_ics);
75     object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort);
76     object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr),
77                                    &error_abort);
78     object_property_set_int(obj, nr_irqs, "nr-irqs", &local_err);
79     if (local_err) {
80         goto error;
81     }
82     object_property_set_bool(obj, true, "realized", &local_err);
83     if (local_err) {
84         goto error;
85     }
86 
87     return ICS_BASE(obj);
88 
89 error:
90     error_propagate(errp, local_err);
91     return NULL;
92 }
93 
94 static void spapr_irq_init_xics(sPAPRMachineState *spapr, Error **errp)
95 {
96     MachineState *machine = MACHINE(spapr);
97     int nr_irqs = spapr->irq->nr_irqs;
98     Error *local_err = NULL;
99 
100     if (kvm_enabled()) {
101         if (machine_kernel_irqchip_allowed(machine) &&
102             !xics_kvm_init(spapr, &local_err)) {
103             spapr->icp_type = TYPE_KVM_ICP;
104             spapr->ics = spapr_ics_create(spapr, TYPE_ICS_KVM, nr_irqs,
105                                           &local_err);
106         }
107         if (machine_kernel_irqchip_required(machine) && !spapr->ics) {
108             error_prepend(&local_err,
109                           "kernel_irqchip requested but unavailable: ");
110             goto error;
111         }
112         error_free(local_err);
113         local_err = NULL;
114     }
115 
116     if (!spapr->ics) {
117         xics_spapr_init(spapr);
118         spapr->icp_type = TYPE_ICP;
119         spapr->ics = spapr_ics_create(spapr, TYPE_ICS_SIMPLE, nr_irqs,
120                                       &local_err);
121     }
122 
123 error:
124     error_propagate(errp, local_err);
125 }
126 
127 #define ICS_IRQ_FREE(ics, srcno)   \
128     (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK)))
129 
130 static int spapr_irq_claim_xics(sPAPRMachineState *spapr, int irq, bool lsi,
131                                 Error **errp)
132 {
133     ICSState *ics = spapr->ics;
134 
135     assert(ics);
136 
137     if (!ics_valid_irq(ics, irq)) {
138         error_setg(errp, "IRQ %d is invalid", irq);
139         return -1;
140     }
141 
142     if (!ICS_IRQ_FREE(ics, irq - ics->offset)) {
143         error_setg(errp, "IRQ %d is not free", irq);
144         return -1;
145     }
146 
147     ics_set_irq_type(ics, irq - ics->offset, lsi);
148     return 0;
149 }
150 
151 static void spapr_irq_free_xics(sPAPRMachineState *spapr, int irq, int num)
152 {
153     ICSState *ics = spapr->ics;
154     uint32_t srcno = irq - ics->offset;
155     int i;
156 
157     if (ics_valid_irq(ics, irq)) {
158         trace_spapr_irq_free(0, irq, num);
159         for (i = srcno; i < srcno + num; ++i) {
160             if (ICS_IRQ_FREE(ics, i)) {
161                 trace_spapr_irq_free_warn(0, i);
162             }
163             memset(&ics->irqs[i], 0, sizeof(ICSIRQState));
164         }
165     }
166 }
167 
168 static qemu_irq spapr_qirq_xics(sPAPRMachineState *spapr, int irq)
169 {
170     ICSState *ics = spapr->ics;
171     uint32_t srcno = irq - ics->offset;
172 
173     if (ics_valid_irq(ics, irq)) {
174         return spapr->qirqs[srcno];
175     }
176 
177     return NULL;
178 }
179 
180 static void spapr_irq_print_info_xics(sPAPRMachineState *spapr, Monitor *mon)
181 {
182     CPUState *cs;
183 
184     CPU_FOREACH(cs) {
185         PowerPCCPU *cpu = POWERPC_CPU(cs);
186 
187         icp_pic_print_info(cpu->icp, mon);
188     }
189 
190     ics_pic_print_info(spapr->ics, mon);
191 }
192 
193 static void spapr_irq_cpu_intc_create_xics(sPAPRMachineState *spapr,
194                                            PowerPCCPU *cpu, Error **errp)
195 {
196     Error *local_err = NULL;
197     Object *obj;
198 
199     obj = icp_create(OBJECT(cpu), spapr->icp_type, XICS_FABRIC(spapr),
200                      &local_err);
201     if (local_err) {
202         error_propagate(errp, local_err);
203         return;
204     }
205 
206     cpu->icp = ICP(obj);
207 }
208 
209 static int spapr_irq_post_load_xics(sPAPRMachineState *spapr, int version_id)
210 {
211     if (!object_dynamic_cast(OBJECT(spapr->ics), TYPE_ICS_KVM)) {
212         CPUState *cs;
213         CPU_FOREACH(cs) {
214             PowerPCCPU *cpu = POWERPC_CPU(cs);
215             icp_resend(cpu->icp);
216         }
217     }
218     return 0;
219 }
220 
221 static void spapr_irq_set_irq_xics(void *opaque, int srcno, int val)
222 {
223     sPAPRMachineState *spapr = opaque;
224     MachineState *machine = MACHINE(opaque);
225 
226     if (kvm_enabled() && machine_kernel_irqchip_allowed(machine)) {
227         ics_kvm_set_irq(spapr->ics, srcno, val);
228     } else {
229         ics_simple_set_irq(spapr->ics, srcno, val);
230     }
231 }
232 
233 static void spapr_irq_reset_xics(sPAPRMachineState *spapr, Error **errp)
234 {
235     /* TODO: create the KVM XICS device */
236 }
237 
238 #define SPAPR_IRQ_XICS_NR_IRQS     0x1000
239 #define SPAPR_IRQ_XICS_NR_MSIS     \
240     (XICS_IRQ_BASE + SPAPR_IRQ_XICS_NR_IRQS - SPAPR_IRQ_MSI)
241 
242 sPAPRIrq spapr_irq_xics = {
243     .nr_irqs     = SPAPR_IRQ_XICS_NR_IRQS,
244     .nr_msis     = SPAPR_IRQ_XICS_NR_MSIS,
245     .ov5         = SPAPR_OV5_XIVE_LEGACY,
246 
247     .init        = spapr_irq_init_xics,
248     .claim       = spapr_irq_claim_xics,
249     .free        = spapr_irq_free_xics,
250     .qirq        = spapr_qirq_xics,
251     .print_info  = spapr_irq_print_info_xics,
252     .dt_populate = spapr_dt_xics,
253     .cpu_intc_create = spapr_irq_cpu_intc_create_xics,
254     .post_load   = spapr_irq_post_load_xics,
255     .reset       = spapr_irq_reset_xics,
256     .set_irq     = spapr_irq_set_irq_xics,
257 };
258 
259 /*
260  * XIVE IRQ backend.
261  */
262 static void spapr_irq_init_xive(sPAPRMachineState *spapr, Error **errp)
263 {
264     MachineState *machine = MACHINE(spapr);
265     uint32_t nr_servers = spapr_max_server_number(spapr);
266     DeviceState *dev;
267     int i;
268 
269     /* KVM XIVE device not yet available */
270     if (kvm_enabled()) {
271         if (machine_kernel_irqchip_required(machine)) {
272             error_setg(errp, "kernel_irqchip requested. no KVM XIVE support");
273             return;
274         }
275     }
276 
277     dev = qdev_create(NULL, TYPE_SPAPR_XIVE);
278     qdev_prop_set_uint32(dev, "nr-irqs", spapr->irq->nr_irqs);
279     /*
280      * 8 XIVE END structures per CPU. One for each available priority
281      */
282     qdev_prop_set_uint32(dev, "nr-ends", nr_servers << 3);
283     qdev_init_nofail(dev);
284 
285     spapr->xive = SPAPR_XIVE(dev);
286 
287     /* Enable the CPU IPIs */
288     for (i = 0; i < nr_servers; ++i) {
289         spapr_xive_irq_claim(spapr->xive, SPAPR_IRQ_IPI + i, false);
290     }
291 
292     spapr_xive_hcall_init(spapr);
293 }
294 
295 static int spapr_irq_claim_xive(sPAPRMachineState *spapr, int irq, bool lsi,
296                                 Error **errp)
297 {
298     if (!spapr_xive_irq_claim(spapr->xive, irq, lsi)) {
299         error_setg(errp, "IRQ %d is invalid", irq);
300         return -1;
301     }
302     return 0;
303 }
304 
305 static void spapr_irq_free_xive(sPAPRMachineState *spapr, int irq, int num)
306 {
307     int i;
308 
309     for (i = irq; i < irq + num; ++i) {
310         spapr_xive_irq_free(spapr->xive, i);
311     }
312 }
313 
314 static qemu_irq spapr_qirq_xive(sPAPRMachineState *spapr, int irq)
315 {
316     sPAPRXive *xive = spapr->xive;
317 
318     if (irq >= xive->nr_irqs) {
319         return NULL;
320     }
321 
322     /* The sPAPR machine/device should have claimed the IRQ before */
323     assert(xive_eas_is_valid(&xive->eat[irq]));
324 
325     return spapr->qirqs[irq];
326 }
327 
328 static void spapr_irq_print_info_xive(sPAPRMachineState *spapr,
329                                       Monitor *mon)
330 {
331     CPUState *cs;
332 
333     CPU_FOREACH(cs) {
334         PowerPCCPU *cpu = POWERPC_CPU(cs);
335 
336         xive_tctx_pic_print_info(cpu->tctx, mon);
337     }
338 
339     spapr_xive_pic_print_info(spapr->xive, mon);
340 }
341 
342 static void spapr_irq_cpu_intc_create_xive(sPAPRMachineState *spapr,
343                                            PowerPCCPU *cpu, Error **errp)
344 {
345     Error *local_err = NULL;
346     Object *obj;
347 
348     obj = xive_tctx_create(OBJECT(cpu), XIVE_ROUTER(spapr->xive), &local_err);
349     if (local_err) {
350         error_propagate(errp, local_err);
351         return;
352     }
353 
354     cpu->tctx = XIVE_TCTX(obj);
355 
356     /*
357      * (TCG) Early setting the OS CAM line for hotplugged CPUs as they
358      * don't beneficiate from the reset of the XIVE IRQ backend
359      */
360     spapr_xive_set_tctx_os_cam(cpu->tctx);
361 }
362 
363 static int spapr_irq_post_load_xive(sPAPRMachineState *spapr, int version_id)
364 {
365     return 0;
366 }
367 
368 static void spapr_irq_reset_xive(sPAPRMachineState *spapr, Error **errp)
369 {
370     CPUState *cs;
371 
372     CPU_FOREACH(cs) {
373         PowerPCCPU *cpu = POWERPC_CPU(cs);
374 
375         /* (TCG) Set the OS CAM line of the thread interrupt context. */
376         spapr_xive_set_tctx_os_cam(cpu->tctx);
377     }
378 
379     /* Activate the XIVE MMIOs */
380     spapr_xive_mmio_set_enabled(spapr->xive, true);
381 }
382 
383 static void spapr_irq_set_irq_xive(void *opaque, int srcno, int val)
384 {
385     sPAPRMachineState *spapr = opaque;
386 
387     xive_source_set_irq(&spapr->xive->source, srcno, val);
388 }
389 
390 /*
391  * XIVE uses the full IRQ number space. Set it to 8K to be compatible
392  * with XICS.
393  */
394 
395 #define SPAPR_IRQ_XIVE_NR_IRQS     0x2000
396 #define SPAPR_IRQ_XIVE_NR_MSIS     (SPAPR_IRQ_XIVE_NR_IRQS - SPAPR_IRQ_MSI)
397 
398 sPAPRIrq spapr_irq_xive = {
399     .nr_irqs     = SPAPR_IRQ_XIVE_NR_IRQS,
400     .nr_msis     = SPAPR_IRQ_XIVE_NR_MSIS,
401     .ov5         = SPAPR_OV5_XIVE_EXPLOIT,
402 
403     .init        = spapr_irq_init_xive,
404     .claim       = spapr_irq_claim_xive,
405     .free        = spapr_irq_free_xive,
406     .qirq        = spapr_qirq_xive,
407     .print_info  = spapr_irq_print_info_xive,
408     .dt_populate = spapr_dt_xive,
409     .cpu_intc_create = spapr_irq_cpu_intc_create_xive,
410     .post_load   = spapr_irq_post_load_xive,
411     .reset       = spapr_irq_reset_xive,
412     .set_irq     = spapr_irq_set_irq_xive,
413 };
414 
415 /*
416  * Dual XIVE and XICS IRQ backend.
417  *
418  * Both interrupt mode, XIVE and XICS, objects are created but the
419  * machine starts in legacy interrupt mode (XICS). It can be changed
420  * by the CAS negotiation process and, in that case, the new mode is
421  * activated after an extra machine reset.
422  */
423 
424 /*
425  * Returns the sPAPR IRQ backend negotiated by CAS. XICS is the
426  * default.
427  */
428 static sPAPRIrq *spapr_irq_current(sPAPRMachineState *spapr)
429 {
430     return spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT) ?
431         &spapr_irq_xive : &spapr_irq_xics;
432 }
433 
434 static void spapr_irq_init_dual(sPAPRMachineState *spapr, Error **errp)
435 {
436     MachineState *machine = MACHINE(spapr);
437     Error *local_err = NULL;
438 
439     if (kvm_enabled() && machine_kernel_irqchip_allowed(machine)) {
440         error_setg(errp, "No KVM support for the 'dual' machine");
441         return;
442     }
443 
444     spapr_irq_xics.init(spapr, &local_err);
445     if (local_err) {
446         error_propagate(errp, local_err);
447         return;
448     }
449 
450     /*
451      * Align the XICS and the XIVE IRQ number space under QEMU.
452      *
453      * However, the XICS KVM device still considers that the IRQ
454      * numbers should start at XICS_IRQ_BASE (0x1000). Either we
455      * should introduce a KVM device ioctl to set the offset or ignore
456      * the lower 4K numbers when using the get/set ioctl of the XICS
457      * KVM device. The second option seems the least intrusive.
458      */
459     spapr->ics->offset = 0;
460 
461     spapr_irq_xive.init(spapr, &local_err);
462     if (local_err) {
463         error_propagate(errp, local_err);
464         return;
465     }
466 }
467 
468 static int spapr_irq_claim_dual(sPAPRMachineState *spapr, int irq, bool lsi,
469                                 Error **errp)
470 {
471     Error *local_err = NULL;
472     int ret;
473 
474     ret = spapr_irq_xics.claim(spapr, irq, lsi, &local_err);
475     if (local_err) {
476         error_propagate(errp, local_err);
477         return ret;
478     }
479 
480     ret = spapr_irq_xive.claim(spapr, irq, lsi, &local_err);
481     if (local_err) {
482         error_propagate(errp, local_err);
483         return ret;
484     }
485 
486     return ret;
487 }
488 
489 static void spapr_irq_free_dual(sPAPRMachineState *spapr, int irq, int num)
490 {
491     spapr_irq_xics.free(spapr, irq, num);
492     spapr_irq_xive.free(spapr, irq, num);
493 }
494 
495 static qemu_irq spapr_qirq_dual(sPAPRMachineState *spapr, int irq)
496 {
497     sPAPRXive *xive = spapr->xive;
498     ICSState *ics = spapr->ics;
499 
500     if (irq >= spapr->irq->nr_irqs) {
501         return NULL;
502     }
503 
504     /*
505      * The IRQ number should have been claimed under both interrupt
506      * controllers.
507      */
508     assert(!ICS_IRQ_FREE(ics, irq - ics->offset));
509     assert(xive_eas_is_valid(&xive->eat[irq]));
510 
511     return spapr->qirqs[irq];
512 }
513 
514 static void spapr_irq_print_info_dual(sPAPRMachineState *spapr, Monitor *mon)
515 {
516     spapr_irq_current(spapr)->print_info(spapr, mon);
517 }
518 
519 static void spapr_irq_dt_populate_dual(sPAPRMachineState *spapr,
520                                        uint32_t nr_servers, void *fdt,
521                                        uint32_t phandle)
522 {
523     spapr_irq_current(spapr)->dt_populate(spapr, nr_servers, fdt, phandle);
524 }
525 
526 static void spapr_irq_cpu_intc_create_dual(sPAPRMachineState *spapr,
527                                            PowerPCCPU *cpu, Error **errp)
528 {
529     Error *local_err = NULL;
530 
531     spapr_irq_xive.cpu_intc_create(spapr, cpu, &local_err);
532     if (local_err) {
533         error_propagate(errp, local_err);
534         return;
535     }
536 
537     spapr_irq_xics.cpu_intc_create(spapr, cpu, errp);
538 }
539 
540 static int spapr_irq_post_load_dual(sPAPRMachineState *spapr, int version_id)
541 {
542     /*
543      * Force a reset of the XIVE backend after migration. The machine
544      * defaults to XICS at startup.
545      */
546     if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
547         spapr_irq_xive.reset(spapr, &error_fatal);
548     }
549 
550     return spapr_irq_current(spapr)->post_load(spapr, version_id);
551 }
552 
553 static void spapr_irq_reset_dual(sPAPRMachineState *spapr, Error **errp)
554 {
555     /*
556      * Deactivate the XIVE MMIOs. The XIVE backend will reenable them
557      * if selected.
558      */
559     spapr_xive_mmio_set_enabled(spapr->xive, false);
560 
561     spapr_irq_current(spapr)->reset(spapr, errp);
562 }
563 
564 static void spapr_irq_set_irq_dual(void *opaque, int srcno, int val)
565 {
566     sPAPRMachineState *spapr = opaque;
567 
568     spapr_irq_current(spapr)->set_irq(spapr, srcno, val);
569 }
570 
571 /*
572  * Define values in sync with the XIVE and XICS backend
573  */
574 #define SPAPR_IRQ_DUAL_NR_IRQS     0x2000
575 #define SPAPR_IRQ_DUAL_NR_MSIS     (SPAPR_IRQ_DUAL_NR_IRQS - SPAPR_IRQ_MSI)
576 
577 sPAPRIrq spapr_irq_dual = {
578     .nr_irqs     = SPAPR_IRQ_DUAL_NR_IRQS,
579     .nr_msis     = SPAPR_IRQ_DUAL_NR_MSIS,
580     .ov5         = SPAPR_OV5_XIVE_BOTH,
581 
582     .init        = spapr_irq_init_dual,
583     .claim       = spapr_irq_claim_dual,
584     .free        = spapr_irq_free_dual,
585     .qirq        = spapr_qirq_dual,
586     .print_info  = spapr_irq_print_info_dual,
587     .dt_populate = spapr_irq_dt_populate_dual,
588     .cpu_intc_create = spapr_irq_cpu_intc_create_dual,
589     .post_load   = spapr_irq_post_load_dual,
590     .reset       = spapr_irq_reset_dual,
591     .set_irq     = spapr_irq_set_irq_dual
592 };
593 
594 /*
595  * sPAPR IRQ frontend routines for devices
596  */
597 void spapr_irq_init(sPAPRMachineState *spapr, Error **errp)
598 {
599     /* Initialize the MSI IRQ allocator. */
600     if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
601         spapr_irq_msi_init(spapr, spapr->irq->nr_msis);
602     }
603 
604     spapr->irq->init(spapr, errp);
605 
606     spapr->qirqs = qemu_allocate_irqs(spapr->irq->set_irq, spapr,
607                                       spapr->irq->nr_irqs);
608 }
609 
610 int spapr_irq_claim(sPAPRMachineState *spapr, int irq, bool lsi, Error **errp)
611 {
612     return spapr->irq->claim(spapr, irq, lsi, errp);
613 }
614 
615 void spapr_irq_free(sPAPRMachineState *spapr, int irq, int num)
616 {
617     spapr->irq->free(spapr, irq, num);
618 }
619 
620 qemu_irq spapr_qirq(sPAPRMachineState *spapr, int irq)
621 {
622     return spapr->irq->qirq(spapr, irq);
623 }
624 
625 int spapr_irq_post_load(sPAPRMachineState *spapr, int version_id)
626 {
627     return spapr->irq->post_load(spapr, version_id);
628 }
629 
630 void spapr_irq_reset(sPAPRMachineState *spapr, Error **errp)
631 {
632     if (spapr->irq->reset) {
633         spapr->irq->reset(spapr, errp);
634     }
635 }
636 
637 /*
638  * XICS legacy routines - to deprecate one day
639  */
640 
641 static int ics_find_free_block(ICSState *ics, int num, int alignnum)
642 {
643     int first, i;
644 
645     for (first = 0; first < ics->nr_irqs; first += alignnum) {
646         if (num > (ics->nr_irqs - first)) {
647             return -1;
648         }
649         for (i = first; i < first + num; ++i) {
650             if (!ICS_IRQ_FREE(ics, i)) {
651                 break;
652             }
653         }
654         if (i == (first + num)) {
655             return first;
656         }
657     }
658 
659     return -1;
660 }
661 
662 int spapr_irq_find(sPAPRMachineState *spapr, int num, bool align, Error **errp)
663 {
664     ICSState *ics = spapr->ics;
665     int first = -1;
666 
667     assert(ics);
668 
669     /*
670      * MSIMesage::data is used for storing VIRQ so
671      * it has to be aligned to num to support multiple
672      * MSI vectors. MSI-X is not affected by this.
673      * The hint is used for the first IRQ, the rest should
674      * be allocated continuously.
675      */
676     if (align) {
677         assert((num == 1) || (num == 2) || (num == 4) ||
678                (num == 8) || (num == 16) || (num == 32));
679         first = ics_find_free_block(ics, num, num);
680     } else {
681         first = ics_find_free_block(ics, num, 1);
682     }
683 
684     if (first < 0) {
685         error_setg(errp, "can't find a free %d-IRQ block", num);
686         return -1;
687     }
688 
689     return first + ics->offset;
690 }
691 
692 #define SPAPR_IRQ_XICS_LEGACY_NR_IRQS     0x400
693 
694 sPAPRIrq spapr_irq_xics_legacy = {
695     .nr_irqs     = SPAPR_IRQ_XICS_LEGACY_NR_IRQS,
696     .nr_msis     = SPAPR_IRQ_XICS_LEGACY_NR_IRQS,
697     .ov5         = SPAPR_OV5_XIVE_LEGACY,
698 
699     .init        = spapr_irq_init_xics,
700     .claim       = spapr_irq_claim_xics,
701     .free        = spapr_irq_free_xics,
702     .qirq        = spapr_qirq_xics,
703     .print_info  = spapr_irq_print_info_xics,
704     .dt_populate = spapr_dt_xics,
705     .cpu_intc_create = spapr_irq_cpu_intc_create_xics,
706     .post_load   = spapr_irq_post_load_xics,
707     .set_irq     = spapr_irq_set_irq_xics,
708 };
709