xref: /openbmc/qemu/hw/ppc/spapr_irq.c (revision 642e92719e2790dfa0e12be1cfd822a0ff2322aa)
1 /*
2  * QEMU PowerPC sPAPR IRQ interface
3  *
4  * Copyright (c) 2018, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/error-report.h"
13 #include "qapi/error.h"
14 #include "hw/irq.h"
15 #include "hw/ppc/spapr.h"
16 #include "hw/ppc/spapr_cpu_core.h"
17 #include "hw/ppc/spapr_xive.h"
18 #include "hw/ppc/xics.h"
19 #include "hw/ppc/xics_spapr.h"
20 #include "hw/qdev-properties.h"
21 #include "cpu-models.h"
22 #include "sysemu/kvm.h"
23 
24 #include "trace.h"
25 
26 void spapr_irq_msi_init(SpaprMachineState *spapr, uint32_t nr_msis)
27 {
28     spapr->irq_map_nr = nr_msis;
29     spapr->irq_map = bitmap_new(spapr->irq_map_nr);
30 }
31 
32 int spapr_irq_msi_alloc(SpaprMachineState *spapr, uint32_t num, bool align,
33                         Error **errp)
34 {
35     int irq;
36 
37     /*
38      * The 'align_mask' parameter of bitmap_find_next_zero_area()
39      * should be one less than a power of 2; 0 means no
40      * alignment. Adapt the 'align' value of the former allocator
41      * to fit the requirements of bitmap_find_next_zero_area()
42      */
43     align -= 1;
44 
45     irq = bitmap_find_next_zero_area(spapr->irq_map, spapr->irq_map_nr, 0, num,
46                                      align);
47     if (irq == spapr->irq_map_nr) {
48         error_setg(errp, "can't find a free %d-IRQ block", num);
49         return -1;
50     }
51 
52     bitmap_set(spapr->irq_map, irq, num);
53 
54     return irq + SPAPR_IRQ_MSI;
55 }
56 
57 void spapr_irq_msi_free(SpaprMachineState *spapr, int irq, uint32_t num)
58 {
59     bitmap_clear(spapr->irq_map, irq - SPAPR_IRQ_MSI, num);
60 }
61 
62 static void spapr_irq_init_kvm(SpaprMachineState *spapr,
63                                   SpaprIrq *irq, Error **errp)
64 {
65     MachineState *machine = MACHINE(spapr);
66     Error *local_err = NULL;
67 
68     if (kvm_enabled() && machine_kernel_irqchip_allowed(machine)) {
69         irq->init_kvm(spapr, &local_err);
70         if (local_err && machine_kernel_irqchip_required(machine)) {
71             error_prepend(&local_err,
72                           "kernel_irqchip requested but unavailable: ");
73             error_propagate(errp, local_err);
74             return;
75         }
76 
77         if (!local_err) {
78             return;
79         }
80 
81         /*
82          * We failed to initialize the KVM device, fallback to
83          * emulated mode
84          */
85         error_prepend(&local_err, "kernel_irqchip allowed but unavailable: ");
86         error_append_hint(&local_err, "Falling back to kernel-irqchip=off\n");
87         warn_report_err(local_err);
88     }
89 }
90 
91 /*
92  * XICS IRQ backend.
93  */
94 
95 static void spapr_irq_init_xics(SpaprMachineState *spapr, int nr_irqs,
96                                 Error **errp)
97 {
98     Object *obj;
99     Error *local_err = NULL;
100 
101     obj = object_new(TYPE_ICS);
102     object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort);
103     object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr),
104                                    &error_fatal);
105     object_property_set_int(obj, nr_irqs, "nr-irqs",  &error_fatal);
106     object_property_set_bool(obj, true, "realized", &local_err);
107     if (local_err) {
108         error_propagate(errp, local_err);
109         return;
110     }
111 
112     spapr->ics = ICS(obj);
113 
114     xics_spapr_init(spapr);
115 }
116 
117 static int spapr_irq_claim_xics(SpaprMachineState *spapr, int irq, bool lsi,
118                                 Error **errp)
119 {
120     ICSState *ics = spapr->ics;
121 
122     assert(ics);
123 
124     if (!ics_valid_irq(ics, irq)) {
125         error_setg(errp, "IRQ %d is invalid", irq);
126         return -1;
127     }
128 
129     if (!ics_irq_free(ics, irq - ics->offset)) {
130         error_setg(errp, "IRQ %d is not free", irq);
131         return -1;
132     }
133 
134     ics_set_irq_type(ics, irq - ics->offset, lsi);
135     return 0;
136 }
137 
138 static void spapr_irq_free_xics(SpaprMachineState *spapr, int irq, int num)
139 {
140     ICSState *ics = spapr->ics;
141     uint32_t srcno = irq - ics->offset;
142     int i;
143 
144     if (ics_valid_irq(ics, irq)) {
145         trace_spapr_irq_free(0, irq, num);
146         for (i = srcno; i < srcno + num; ++i) {
147             if (ics_irq_free(ics, i)) {
148                 trace_spapr_irq_free_warn(0, i);
149             }
150             memset(&ics->irqs[i], 0, sizeof(ICSIRQState));
151         }
152     }
153 }
154 
155 static qemu_irq spapr_qirq_xics(SpaprMachineState *spapr, int irq)
156 {
157     ICSState *ics = spapr->ics;
158     uint32_t srcno = irq - ics->offset;
159 
160     if (ics_valid_irq(ics, irq)) {
161         return spapr->qirqs[srcno];
162     }
163 
164     return NULL;
165 }
166 
167 static void spapr_irq_print_info_xics(SpaprMachineState *spapr, Monitor *mon)
168 {
169     CPUState *cs;
170 
171     CPU_FOREACH(cs) {
172         PowerPCCPU *cpu = POWERPC_CPU(cs);
173 
174         icp_pic_print_info(spapr_cpu_state(cpu)->icp, mon);
175     }
176 
177     ics_pic_print_info(spapr->ics, mon);
178 }
179 
180 static void spapr_irq_cpu_intc_create_xics(SpaprMachineState *spapr,
181                                            PowerPCCPU *cpu, Error **errp)
182 {
183     Error *local_err = NULL;
184     Object *obj;
185     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
186 
187     obj = icp_create(OBJECT(cpu), TYPE_ICP, XICS_FABRIC(spapr),
188                      &local_err);
189     if (local_err) {
190         error_propagate(errp, local_err);
191         return;
192     }
193 
194     spapr_cpu->icp = ICP(obj);
195 }
196 
197 static int spapr_irq_post_load_xics(SpaprMachineState *spapr, int version_id)
198 {
199     if (!kvm_irqchip_in_kernel()) {
200         CPUState *cs;
201         CPU_FOREACH(cs) {
202             PowerPCCPU *cpu = POWERPC_CPU(cs);
203             icp_resend(spapr_cpu_state(cpu)->icp);
204         }
205     }
206     return 0;
207 }
208 
209 static void spapr_irq_set_irq_xics(void *opaque, int srcno, int val)
210 {
211     SpaprMachineState *spapr = opaque;
212 
213     ics_set_irq(spapr->ics, srcno, val);
214 }
215 
216 static void spapr_irq_reset_xics(SpaprMachineState *spapr, Error **errp)
217 {
218     Error *local_err = NULL;
219 
220     spapr_irq_init_kvm(spapr, &spapr_irq_xics, &local_err);
221     if (local_err) {
222         error_propagate(errp, local_err);
223         return;
224     }
225 }
226 
227 static const char *spapr_irq_get_nodename_xics(SpaprMachineState *spapr)
228 {
229     return XICS_NODENAME;
230 }
231 
232 static void spapr_irq_init_kvm_xics(SpaprMachineState *spapr, Error **errp)
233 {
234     if (kvm_enabled()) {
235         xics_kvm_connect(spapr, errp);
236     }
237 }
238 
239 #define SPAPR_IRQ_XICS_NR_IRQS     0x1000
240 #define SPAPR_IRQ_XICS_NR_MSIS     \
241     (XICS_IRQ_BASE + SPAPR_IRQ_XICS_NR_IRQS - SPAPR_IRQ_MSI)
242 
243 SpaprIrq spapr_irq_xics = {
244     .nr_irqs     = SPAPR_IRQ_XICS_NR_IRQS,
245     .nr_msis     = SPAPR_IRQ_XICS_NR_MSIS,
246     .ov5         = SPAPR_OV5_XIVE_LEGACY,
247 
248     .init        = spapr_irq_init_xics,
249     .claim       = spapr_irq_claim_xics,
250     .free        = spapr_irq_free_xics,
251     .qirq        = spapr_qirq_xics,
252     .print_info  = spapr_irq_print_info_xics,
253     .dt_populate = spapr_dt_xics,
254     .cpu_intc_create = spapr_irq_cpu_intc_create_xics,
255     .post_load   = spapr_irq_post_load_xics,
256     .reset       = spapr_irq_reset_xics,
257     .set_irq     = spapr_irq_set_irq_xics,
258     .get_nodename = spapr_irq_get_nodename_xics,
259     .init_kvm    = spapr_irq_init_kvm_xics,
260 };
261 
262 /*
263  * XIVE IRQ backend.
264  */
265 static void spapr_irq_init_xive(SpaprMachineState *spapr, int nr_irqs,
266                                 Error **errp)
267 {
268     uint32_t nr_servers = spapr_max_server_number(spapr);
269     DeviceState *dev;
270     int i;
271 
272     dev = qdev_create(NULL, TYPE_SPAPR_XIVE);
273     qdev_prop_set_uint32(dev, "nr-irqs", nr_irqs);
274     /*
275      * 8 XIVE END structures per CPU. One for each available priority
276      */
277     qdev_prop_set_uint32(dev, "nr-ends", nr_servers << 3);
278     qdev_init_nofail(dev);
279 
280     spapr->xive = SPAPR_XIVE(dev);
281 
282     /* Enable the CPU IPIs */
283     for (i = 0; i < nr_servers; ++i) {
284         spapr_xive_irq_claim(spapr->xive, SPAPR_IRQ_IPI + i, false);
285     }
286 
287     spapr_xive_hcall_init(spapr);
288 }
289 
290 static int spapr_irq_claim_xive(SpaprMachineState *spapr, int irq, bool lsi,
291                                 Error **errp)
292 {
293     if (!spapr_xive_irq_claim(spapr->xive, irq, lsi)) {
294         error_setg(errp, "IRQ %d is invalid", irq);
295         return -1;
296     }
297     return 0;
298 }
299 
300 static void spapr_irq_free_xive(SpaprMachineState *spapr, int irq, int num)
301 {
302     int i;
303 
304     for (i = irq; i < irq + num; ++i) {
305         spapr_xive_irq_free(spapr->xive, i);
306     }
307 }
308 
309 static qemu_irq spapr_qirq_xive(SpaprMachineState *spapr, int irq)
310 {
311     SpaprXive *xive = spapr->xive;
312 
313     if (irq >= xive->nr_irqs) {
314         return NULL;
315     }
316 
317     /* The sPAPR machine/device should have claimed the IRQ before */
318     assert(xive_eas_is_valid(&xive->eat[irq]));
319 
320     return spapr->qirqs[irq];
321 }
322 
323 static void spapr_irq_print_info_xive(SpaprMachineState *spapr,
324                                       Monitor *mon)
325 {
326     CPUState *cs;
327 
328     CPU_FOREACH(cs) {
329         PowerPCCPU *cpu = POWERPC_CPU(cs);
330 
331         xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, mon);
332     }
333 
334     spapr_xive_pic_print_info(spapr->xive, mon);
335 }
336 
337 static void spapr_irq_cpu_intc_create_xive(SpaprMachineState *spapr,
338                                            PowerPCCPU *cpu, Error **errp)
339 {
340     Error *local_err = NULL;
341     Object *obj;
342     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
343 
344     obj = xive_tctx_create(OBJECT(cpu), XIVE_ROUTER(spapr->xive), &local_err);
345     if (local_err) {
346         error_propagate(errp, local_err);
347         return;
348     }
349 
350     spapr_cpu->tctx = XIVE_TCTX(obj);
351 
352     /*
353      * (TCG) Early setting the OS CAM line for hotplugged CPUs as they
354      * don't beneficiate from the reset of the XIVE IRQ backend
355      */
356     spapr_xive_set_tctx_os_cam(spapr_cpu->tctx);
357 }
358 
359 static int spapr_irq_post_load_xive(SpaprMachineState *spapr, int version_id)
360 {
361     return spapr_xive_post_load(spapr->xive, version_id);
362 }
363 
364 static void spapr_irq_reset_xive(SpaprMachineState *spapr, Error **errp)
365 {
366     CPUState *cs;
367     Error *local_err = NULL;
368 
369     CPU_FOREACH(cs) {
370         PowerPCCPU *cpu = POWERPC_CPU(cs);
371 
372         /* (TCG) Set the OS CAM line of the thread interrupt context. */
373         spapr_xive_set_tctx_os_cam(spapr_cpu_state(cpu)->tctx);
374     }
375 
376     spapr_irq_init_kvm(spapr, &spapr_irq_xive, &local_err);
377     if (local_err) {
378         error_propagate(errp, local_err);
379         return;
380     }
381 
382     /* Activate the XIVE MMIOs */
383     spapr_xive_mmio_set_enabled(spapr->xive, true);
384 }
385 
386 static void spapr_irq_set_irq_xive(void *opaque, int srcno, int val)
387 {
388     SpaprMachineState *spapr = opaque;
389 
390     if (kvm_irqchip_in_kernel()) {
391         kvmppc_xive_source_set_irq(&spapr->xive->source, srcno, val);
392     } else {
393         xive_source_set_irq(&spapr->xive->source, srcno, val);
394     }
395 }
396 
397 static const char *spapr_irq_get_nodename_xive(SpaprMachineState *spapr)
398 {
399     return spapr->xive->nodename;
400 }
401 
402 static void spapr_irq_init_kvm_xive(SpaprMachineState *spapr, Error **errp)
403 {
404     if (kvm_enabled()) {
405         kvmppc_xive_connect(spapr->xive, errp);
406     }
407 }
408 
409 /*
410  * XIVE uses the full IRQ number space. Set it to 8K to be compatible
411  * with XICS.
412  */
413 
414 #define SPAPR_IRQ_XIVE_NR_IRQS     0x2000
415 #define SPAPR_IRQ_XIVE_NR_MSIS     (SPAPR_IRQ_XIVE_NR_IRQS - SPAPR_IRQ_MSI)
416 
417 SpaprIrq spapr_irq_xive = {
418     .nr_irqs     = SPAPR_IRQ_XIVE_NR_IRQS,
419     .nr_msis     = SPAPR_IRQ_XIVE_NR_MSIS,
420     .ov5         = SPAPR_OV5_XIVE_EXPLOIT,
421 
422     .init        = spapr_irq_init_xive,
423     .claim       = spapr_irq_claim_xive,
424     .free        = spapr_irq_free_xive,
425     .qirq        = spapr_qirq_xive,
426     .print_info  = spapr_irq_print_info_xive,
427     .dt_populate = spapr_dt_xive,
428     .cpu_intc_create = spapr_irq_cpu_intc_create_xive,
429     .post_load   = spapr_irq_post_load_xive,
430     .reset       = spapr_irq_reset_xive,
431     .set_irq     = spapr_irq_set_irq_xive,
432     .get_nodename = spapr_irq_get_nodename_xive,
433     .init_kvm    = spapr_irq_init_kvm_xive,
434 };
435 
436 /*
437  * Dual XIVE and XICS IRQ backend.
438  *
439  * Both interrupt mode, XIVE and XICS, objects are created but the
440  * machine starts in legacy interrupt mode (XICS). It can be changed
441  * by the CAS negotiation process and, in that case, the new mode is
442  * activated after an extra machine reset.
443  */
444 
445 /*
446  * Returns the sPAPR IRQ backend negotiated by CAS. XICS is the
447  * default.
448  */
449 static SpaprIrq *spapr_irq_current(SpaprMachineState *spapr)
450 {
451     return spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT) ?
452         &spapr_irq_xive : &spapr_irq_xics;
453 }
454 
455 static void spapr_irq_init_dual(SpaprMachineState *spapr, int nr_irqs,
456                                 Error **errp)
457 {
458     Error *local_err = NULL;
459 
460     spapr_irq_xics.init(spapr, spapr_irq_xics.nr_irqs, &local_err);
461     if (local_err) {
462         error_propagate(errp, local_err);
463         return;
464     }
465 
466     spapr_irq_xive.init(spapr, spapr_irq_xive.nr_irqs, &local_err);
467     if (local_err) {
468         error_propagate(errp, local_err);
469         return;
470     }
471 }
472 
473 static int spapr_irq_claim_dual(SpaprMachineState *spapr, int irq, bool lsi,
474                                 Error **errp)
475 {
476     Error *local_err = NULL;
477     int ret;
478 
479     ret = spapr_irq_xics.claim(spapr, irq, lsi, &local_err);
480     if (local_err) {
481         error_propagate(errp, local_err);
482         return ret;
483     }
484 
485     ret = spapr_irq_xive.claim(spapr, irq, lsi, &local_err);
486     if (local_err) {
487         error_propagate(errp, local_err);
488         return ret;
489     }
490 
491     return ret;
492 }
493 
494 static void spapr_irq_free_dual(SpaprMachineState *spapr, int irq, int num)
495 {
496     spapr_irq_xics.free(spapr, irq, num);
497     spapr_irq_xive.free(spapr, irq, num);
498 }
499 
500 static qemu_irq spapr_qirq_dual(SpaprMachineState *spapr, int irq)
501 {
502     return spapr_irq_current(spapr)->qirq(spapr, irq);
503 }
504 
505 static void spapr_irq_print_info_dual(SpaprMachineState *spapr, Monitor *mon)
506 {
507     spapr_irq_current(spapr)->print_info(spapr, mon);
508 }
509 
510 static void spapr_irq_dt_populate_dual(SpaprMachineState *spapr,
511                                        uint32_t nr_servers, void *fdt,
512                                        uint32_t phandle)
513 {
514     spapr_irq_current(spapr)->dt_populate(spapr, nr_servers, fdt, phandle);
515 }
516 
517 static void spapr_irq_cpu_intc_create_dual(SpaprMachineState *spapr,
518                                            PowerPCCPU *cpu, Error **errp)
519 {
520     Error *local_err = NULL;
521 
522     spapr_irq_xive.cpu_intc_create(spapr, cpu, &local_err);
523     if (local_err) {
524         error_propagate(errp, local_err);
525         return;
526     }
527 
528     spapr_irq_xics.cpu_intc_create(spapr, cpu, errp);
529 }
530 
531 static int spapr_irq_post_load_dual(SpaprMachineState *spapr, int version_id)
532 {
533     /*
534      * Force a reset of the XIVE backend after migration. The machine
535      * defaults to XICS at startup.
536      */
537     if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
538         if (kvm_irqchip_in_kernel()) {
539             xics_kvm_disconnect(spapr, &error_fatal);
540         }
541         spapr_irq_xive.reset(spapr, &error_fatal);
542     }
543 
544     return spapr_irq_current(spapr)->post_load(spapr, version_id);
545 }
546 
547 static void spapr_irq_reset_dual(SpaprMachineState *spapr, Error **errp)
548 {
549     Error *local_err = NULL;
550 
551     /*
552      * Deactivate the XIVE MMIOs. The XIVE backend will reenable them
553      * if selected.
554      */
555     spapr_xive_mmio_set_enabled(spapr->xive, false);
556 
557     /* Destroy all KVM devices */
558     if (kvm_irqchip_in_kernel()) {
559         xics_kvm_disconnect(spapr, &local_err);
560         if (local_err) {
561             error_propagate(errp, local_err);
562             error_prepend(errp, "KVM XICS disconnect failed: ");
563             return;
564         }
565         kvmppc_xive_disconnect(spapr->xive, &local_err);
566         if (local_err) {
567             error_propagate(errp, local_err);
568             error_prepend(errp, "KVM XIVE disconnect failed: ");
569             return;
570         }
571     }
572 
573     spapr_irq_current(spapr)->reset(spapr, errp);
574 }
575 
576 static void spapr_irq_set_irq_dual(void *opaque, int srcno, int val)
577 {
578     SpaprMachineState *spapr = opaque;
579 
580     spapr_irq_current(spapr)->set_irq(spapr, srcno, val);
581 }
582 
583 static const char *spapr_irq_get_nodename_dual(SpaprMachineState *spapr)
584 {
585     return spapr_irq_current(spapr)->get_nodename(spapr);
586 }
587 
588 /*
589  * Define values in sync with the XIVE and XICS backend
590  */
591 #define SPAPR_IRQ_DUAL_NR_IRQS     0x2000
592 #define SPAPR_IRQ_DUAL_NR_MSIS     (SPAPR_IRQ_DUAL_NR_IRQS - SPAPR_IRQ_MSI)
593 
594 SpaprIrq spapr_irq_dual = {
595     .nr_irqs     = SPAPR_IRQ_DUAL_NR_IRQS,
596     .nr_msis     = SPAPR_IRQ_DUAL_NR_MSIS,
597     .ov5         = SPAPR_OV5_XIVE_BOTH,
598 
599     .init        = spapr_irq_init_dual,
600     .claim       = spapr_irq_claim_dual,
601     .free        = spapr_irq_free_dual,
602     .qirq        = spapr_qirq_dual,
603     .print_info  = spapr_irq_print_info_dual,
604     .dt_populate = spapr_irq_dt_populate_dual,
605     .cpu_intc_create = spapr_irq_cpu_intc_create_dual,
606     .post_load   = spapr_irq_post_load_dual,
607     .reset       = spapr_irq_reset_dual,
608     .set_irq     = spapr_irq_set_irq_dual,
609     .get_nodename = spapr_irq_get_nodename_dual,
610     .init_kvm    = NULL, /* should not be used */
611 };
612 
613 
614 static void spapr_irq_check(SpaprMachineState *spapr, Error **errp)
615 {
616     MachineState *machine = MACHINE(spapr);
617 
618     /*
619      * Sanity checks on non-P9 machines. On these, XIVE is not
620      * advertised, see spapr_dt_ov5_platform_support()
621      */
622     if (!ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00,
623                                0, spapr->max_compat_pvr)) {
624         /*
625          * If the 'dual' interrupt mode is selected, force XICS as CAS
626          * negotiation is useless.
627          */
628         if (spapr->irq == &spapr_irq_dual) {
629             spapr->irq = &spapr_irq_xics;
630             return;
631         }
632 
633         /*
634          * Non-P9 machines using only XIVE is a bogus setup. We have two
635          * scenarios to take into account because of the compat mode:
636          *
637          * 1. POWER7/8 machines should fail to init later on when creating
638          *    the XIVE interrupt presenters because a POWER9 exception
639          *    model is required.
640 
641          * 2. POWER9 machines using the POWER8 compat mode won't fail and
642          *    will let the OS boot with a partial XIVE setup : DT
643          *    properties but no hcalls.
644          *
645          * To cover both and not confuse the OS, add an early failure in
646          * QEMU.
647          */
648         if (spapr->irq == &spapr_irq_xive) {
649             error_setg(errp, "XIVE-only machines require a POWER9 CPU");
650             return;
651         }
652     }
653 
654     /*
655      * On a POWER9 host, some older KVM XICS devices cannot be destroyed and
656      * re-created. Detect that early to avoid QEMU to exit later when the
657      * guest reboots.
658      */
659     if (kvm_enabled() &&
660         spapr->irq == &spapr_irq_dual &&
661         machine_kernel_irqchip_required(machine) &&
662         xics_kvm_has_broken_disconnect(spapr)) {
663         error_setg(errp, "KVM is too old to support ic-mode=dual,kernel-irqchip=on");
664         return;
665     }
666 }
667 
668 /*
669  * sPAPR IRQ frontend routines for devices
670  */
671 void spapr_irq_init(SpaprMachineState *spapr, Error **errp)
672 {
673     MachineState *machine = MACHINE(spapr);
674     Error *local_err = NULL;
675 
676     if (machine_kernel_irqchip_split(machine)) {
677         error_setg(errp, "kernel_irqchip split mode not supported on pseries");
678         return;
679     }
680 
681     if (!kvm_enabled() && machine_kernel_irqchip_required(machine)) {
682         error_setg(errp,
683                    "kernel_irqchip requested but only available with KVM");
684         return;
685     }
686 
687     spapr_irq_check(spapr, &local_err);
688     if (local_err) {
689         error_propagate(errp, local_err);
690         return;
691     }
692 
693     /* Initialize the MSI IRQ allocator. */
694     if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
695         spapr_irq_msi_init(spapr, spapr->irq->nr_msis);
696     }
697 
698     spapr->irq->init(spapr, spapr->irq->nr_irqs, errp);
699 
700     spapr->qirqs = qemu_allocate_irqs(spapr->irq->set_irq, spapr,
701                                       spapr->irq->nr_irqs);
702 }
703 
704 int spapr_irq_claim(SpaprMachineState *spapr, int irq, bool lsi, Error **errp)
705 {
706     return spapr->irq->claim(spapr, irq, lsi, errp);
707 }
708 
709 void spapr_irq_free(SpaprMachineState *spapr, int irq, int num)
710 {
711     spapr->irq->free(spapr, irq, num);
712 }
713 
714 qemu_irq spapr_qirq(SpaprMachineState *spapr, int irq)
715 {
716     return spapr->irq->qirq(spapr, irq);
717 }
718 
719 int spapr_irq_post_load(SpaprMachineState *spapr, int version_id)
720 {
721     return spapr->irq->post_load(spapr, version_id);
722 }
723 
724 void spapr_irq_reset(SpaprMachineState *spapr, Error **errp)
725 {
726     assert(!spapr->irq_map || bitmap_empty(spapr->irq_map, spapr->irq_map_nr));
727 
728     if (spapr->irq->reset) {
729         spapr->irq->reset(spapr, errp);
730     }
731 }
732 
733 int spapr_irq_get_phandle(SpaprMachineState *spapr, void *fdt, Error **errp)
734 {
735     const char *nodename = spapr->irq->get_nodename(spapr);
736     int offset, phandle;
737 
738     offset = fdt_subnode_offset(fdt, 0, nodename);
739     if (offset < 0) {
740         error_setg(errp, "Can't find node \"%s\": %s", nodename,
741                    fdt_strerror(offset));
742         return -1;
743     }
744 
745     phandle = fdt_get_phandle(fdt, offset);
746     if (!phandle) {
747         error_setg(errp, "Can't get phandle of node \"%s\"", nodename);
748         return -1;
749     }
750 
751     return phandle;
752 }
753 
754 /*
755  * XICS legacy routines - to deprecate one day
756  */
757 
758 static int ics_find_free_block(ICSState *ics, int num, int alignnum)
759 {
760     int first, i;
761 
762     for (first = 0; first < ics->nr_irqs; first += alignnum) {
763         if (num > (ics->nr_irqs - first)) {
764             return -1;
765         }
766         for (i = first; i < first + num; ++i) {
767             if (!ics_irq_free(ics, i)) {
768                 break;
769             }
770         }
771         if (i == (first + num)) {
772             return first;
773         }
774     }
775 
776     return -1;
777 }
778 
779 int spapr_irq_find(SpaprMachineState *spapr, int num, bool align, Error **errp)
780 {
781     ICSState *ics = spapr->ics;
782     int first = -1;
783 
784     assert(ics);
785 
786     /*
787      * MSIMesage::data is used for storing VIRQ so
788      * it has to be aligned to num to support multiple
789      * MSI vectors. MSI-X is not affected by this.
790      * The hint is used for the first IRQ, the rest should
791      * be allocated continuously.
792      */
793     if (align) {
794         assert((num == 1) || (num == 2) || (num == 4) ||
795                (num == 8) || (num == 16) || (num == 32));
796         first = ics_find_free_block(ics, num, num);
797     } else {
798         first = ics_find_free_block(ics, num, 1);
799     }
800 
801     if (first < 0) {
802         error_setg(errp, "can't find a free %d-IRQ block", num);
803         return -1;
804     }
805 
806     return first + ics->offset;
807 }
808 
809 #define SPAPR_IRQ_XICS_LEGACY_NR_IRQS     0x400
810 
811 SpaprIrq spapr_irq_xics_legacy = {
812     .nr_irqs     = SPAPR_IRQ_XICS_LEGACY_NR_IRQS,
813     .nr_msis     = SPAPR_IRQ_XICS_LEGACY_NR_IRQS,
814     .ov5         = SPAPR_OV5_XIVE_LEGACY,
815 
816     .init        = spapr_irq_init_xics,
817     .claim       = spapr_irq_claim_xics,
818     .free        = spapr_irq_free_xics,
819     .qirq        = spapr_qirq_xics,
820     .print_info  = spapr_irq_print_info_xics,
821     .dt_populate = spapr_dt_xics,
822     .cpu_intc_create = spapr_irq_cpu_intc_create_xics,
823     .post_load   = spapr_irq_post_load_xics,
824     .reset       = spapr_irq_reset_xics,
825     .set_irq     = spapr_irq_set_irq_xics,
826     .get_nodename = spapr_irq_get_nodename_xics,
827     .init_kvm    = spapr_irq_init_kvm_xics,
828 };
829