xref: /openbmc/qemu/hw/intc/spapr_xive.c (revision ace6fcde9b398113482b0c5955c237d64413f2e6)
1 /*
2  * QEMU PowerPC sPAPR XIVE interrupt controller model
3  *
4  * Copyright (c) 2017-2018, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "qapi/type-helpers.h"
15 #include "qemu/error-report.h"
16 #include "target/ppc/cpu.h"
17 #include "sysemu/cpus.h"
18 #include "sysemu/reset.h"
19 #include "migration/vmstate.h"
20 #include "monitor/monitor.h"
21 #include "hw/ppc/fdt.h"
22 #include "hw/ppc/spapr.h"
23 #include "hw/ppc/spapr_cpu_core.h"
24 #include "hw/ppc/spapr_xive.h"
25 #include "hw/ppc/xive.h"
26 #include "hw/ppc/xive_regs.h"
27 #include "hw/qdev-properties.h"
28 #include "trace.h"
29 
30 /*
31  * XIVE Virtualization Controller BAR and Thread Management BAR that we
32  * use for the ESB pages and the TIMA pages
33  */
34 #define SPAPR_XIVE_VC_BASE   0x0006010000000000ull
35 #define SPAPR_XIVE_TM_BASE   0x0006030203180000ull
36 
37 /*
38  * The allocation of VP blocks is a complex operation in OPAL and the
39  * VP identifiers have a relation with the number of HW chips, the
40  * size of the VP blocks, VP grouping, etc. The QEMU sPAPR XIVE
41  * controller model does not have the same constraints and can use a
42  * simple mapping scheme of the CPU vcpu_id
43  *
44  * These identifiers are never returned to the OS.
45  */
46 
47 #define SPAPR_XIVE_NVT_BASE 0x400
48 
49 /*
50  * sPAPR NVT and END indexing helpers
51  */
52 static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx)
53 {
54     return nvt_idx - SPAPR_XIVE_NVT_BASE;
55 }
56 
57 static void spapr_xive_cpu_to_nvt(PowerPCCPU *cpu,
58                                   uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
59 {
60     assert(cpu);
61 
62     if (out_nvt_blk) {
63         *out_nvt_blk = SPAPR_XIVE_BLOCK_ID;
64     }
65 
66     if (out_nvt_blk) {
67         *out_nvt_idx = SPAPR_XIVE_NVT_BASE + cpu->vcpu_id;
68     }
69 }
70 
71 static int spapr_xive_target_to_nvt(uint32_t target,
72                                     uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
73 {
74     PowerPCCPU *cpu = spapr_find_cpu(target);
75 
76     if (!cpu) {
77         return -1;
78     }
79 
80     spapr_xive_cpu_to_nvt(cpu, out_nvt_blk, out_nvt_idx);
81     return 0;
82 }
83 
84 /*
85  * sPAPR END indexing uses a simple mapping of the CPU vcpu_id, 8
86  * priorities per CPU
87  */
88 int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx,
89                              uint32_t *out_server, uint8_t *out_prio)
90 {
91 
92     assert(end_blk == SPAPR_XIVE_BLOCK_ID);
93 
94     if (out_server) {
95         *out_server = end_idx >> 3;
96     }
97 
98     if (out_prio) {
99         *out_prio = end_idx & 0x7;
100     }
101     return 0;
102 }
103 
104 static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio,
105                                   uint8_t *out_end_blk, uint32_t *out_end_idx)
106 {
107     assert(cpu);
108 
109     if (out_end_blk) {
110         *out_end_blk = SPAPR_XIVE_BLOCK_ID;
111     }
112 
113     if (out_end_idx) {
114         *out_end_idx = (cpu->vcpu_id << 3) + prio;
115     }
116 }
117 
118 static int spapr_xive_target_to_end(uint32_t target, uint8_t prio,
119                                     uint8_t *out_end_blk, uint32_t *out_end_idx)
120 {
121     PowerPCCPU *cpu = spapr_find_cpu(target);
122 
123     if (!cpu) {
124         return -1;
125     }
126 
127     spapr_xive_cpu_to_end(cpu, prio, out_end_blk, out_end_idx);
128     return 0;
129 }
130 
131 /*
132  * On sPAPR machines, use a simplified output for the XIVE END
133  * structure dumping only the information related to the OS EQ.
134  */
135 static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end,
136                                           Monitor *mon)
137 {
138     uint64_t qaddr_base = xive_end_qaddr(end);
139     uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
140     uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
141     uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
142     uint32_t qentries = 1 << (qsize + 10);
143     uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
144     uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
145     g_autoptr(GString) buf = g_string_new("");
146     g_autoptr(HumanReadableText) info = NULL;
147 
148     monitor_printf(mon, "%3d/%d % 6d/%5d @%"PRIx64" ^%d",
149                    spapr_xive_nvt_to_target(0, nvt),
150                    priority, qindex, qentries, qaddr_base, qgen);
151 
152     xive_end_queue_pic_print_info(end, 6, buf);
153 
154     info = human_readable_text_from_str(buf);
155     monitor_puts(mon, info->human_readable_text);
156 }
157 
158 /*
159  * kvm_irqchip_in_kernel() will cause the compiler to turn this
160  * info a nop if CONFIG_KVM isn't defined.
161  */
162 #define spapr_xive_in_kernel(xive) \
163     (kvm_irqchip_in_kernel() && (xive)->fd != -1)
164 
165 static void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon)
166 {
167     XiveSource *xsrc = &xive->source;
168     int i;
169 
170     if (spapr_xive_in_kernel(xive)) {
171         Error *local_err = NULL;
172 
173         kvmppc_xive_synchronize_state(xive, &local_err);
174         if (local_err) {
175             error_report_err(local_err);
176             return;
177         }
178     }
179 
180     monitor_printf(mon, "  LISN         PQ    EISN     CPU/PRIO EQ\n");
181 
182     for (i = 0; i < xive->nr_irqs; i++) {
183         uint8_t pq = xive_source_esb_get(xsrc, i);
184         XiveEAS *eas = &xive->eat[i];
185 
186         if (!xive_eas_is_valid(eas)) {
187             continue;
188         }
189 
190         monitor_printf(mon, "  %08x %s %c%c%c %s %08x ", i,
191                        xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
192                        pq & XIVE_ESB_VAL_P ? 'P' : '-',
193                        pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
194                        xive_source_is_asserted(xsrc, i) ? 'A' : ' ',
195                        xive_eas_is_masked(eas) ? "M" : " ",
196                        (int) xive_get_field64(EAS_END_DATA, eas->w));
197 
198         if (!xive_eas_is_masked(eas)) {
199             uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
200             XiveEND *end;
201 
202             assert(end_idx < xive->nr_ends);
203             end = &xive->endt[end_idx];
204 
205             if (xive_end_is_valid(end)) {
206                 spapr_xive_end_pic_print_info(xive, end, mon);
207             }
208         }
209         monitor_printf(mon, "\n");
210     }
211 }
212 
213 void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable)
214 {
215     memory_region_set_enabled(&xive->source.esb_mmio, enable);
216     memory_region_set_enabled(&xive->tm_mmio, enable);
217 
218     /* Disable the END ESBs until a guest OS makes use of them */
219     memory_region_set_enabled(&xive->end_source.esb_mmio, false);
220 }
221 
222 static void spapr_xive_tm_write(void *opaque, hwaddr offset,
223                           uint64_t value, unsigned size)
224 {
225     XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx;
226 
227     xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
228 }
229 
230 static uint64_t spapr_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
231 {
232     XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx;
233 
234     return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
235 }
236 
237 const MemoryRegionOps spapr_xive_tm_ops = {
238     .read = spapr_xive_tm_read,
239     .write = spapr_xive_tm_write,
240     .endianness = DEVICE_BIG_ENDIAN,
241     .valid = {
242         .min_access_size = 1,
243         .max_access_size = 8,
244     },
245     .impl = {
246         .min_access_size = 1,
247         .max_access_size = 8,
248     },
249 };
250 
251 static void spapr_xive_end_reset(XiveEND *end)
252 {
253     memset(end, 0, sizeof(*end));
254 
255     /* switch off the escalation and notification ESBs */
256     end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q);
257 }
258 
259 static void spapr_xive_reset(void *dev)
260 {
261     SpaprXive *xive = SPAPR_XIVE(dev);
262     int i;
263 
264     /*
265      * The XiveSource has its own reset handler, which mask off all
266      * IRQs (!P|Q)
267      */
268 
269     /* Mask all valid EASs in the IRQ number space. */
270     for (i = 0; i < xive->nr_irqs; i++) {
271         XiveEAS *eas = &xive->eat[i];
272         if (xive_eas_is_valid(eas)) {
273             eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED);
274         } else {
275             eas->w = 0;
276         }
277     }
278 
279     /* Clear all ENDs */
280     for (i = 0; i < xive->nr_ends; i++) {
281         spapr_xive_end_reset(&xive->endt[i]);
282     }
283 }
284 
285 static void spapr_xive_instance_init(Object *obj)
286 {
287     SpaprXive *xive = SPAPR_XIVE(obj);
288 
289     object_initialize_child(obj, "source", &xive->source, TYPE_XIVE_SOURCE);
290 
291     object_initialize_child(obj, "end_source", &xive->end_source,
292                             TYPE_XIVE_END_SOURCE);
293 
294     /* Not connected to the KVM XIVE device */
295     xive->fd = -1;
296 }
297 
298 static void spapr_xive_realize(DeviceState *dev, Error **errp)
299 {
300     SpaprXive *xive = SPAPR_XIVE(dev);
301     SpaprXiveClass *sxc = SPAPR_XIVE_GET_CLASS(xive);
302     XiveSource *xsrc = &xive->source;
303     XiveENDSource *end_xsrc = &xive->end_source;
304     Error *local_err = NULL;
305 
306     /* Set by spapr_irq_init() */
307     g_assert(xive->nr_irqs);
308     g_assert(xive->nr_ends);
309 
310     sxc->parent_realize(dev, &local_err);
311     if (local_err) {
312         error_propagate(errp, local_err);
313         return;
314     }
315 
316     /*
317      * Initialize the internal sources, for IPIs and virtual devices.
318      */
319     object_property_set_int(OBJECT(xsrc), "nr-irqs", xive->nr_irqs,
320                             &error_fatal);
321     object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
322     if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
323         return;
324     }
325 
326     /*
327      * Initialize the END ESB source
328      */
329     object_property_set_int(OBJECT(end_xsrc), "nr-ends", xive->nr_irqs,
330                             &error_fatal);
331     object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
332                              &error_abort);
333     if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
334         return;
335     }
336 
337     /* Set the mapping address of the END ESB pages after the source ESBs */
338     xive->end_base = xive->vc_base + xive_source_esb_len(xsrc);
339 
340     /*
341      * Allocate the routing tables
342      */
343     xive->eat = g_new0(XiveEAS, xive->nr_irqs);
344     xive->endt = g_new0(XiveEND, xive->nr_ends);
345 
346     xive->nodename = g_strdup_printf("interrupt-controller@%" PRIx64,
347                            xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT));
348 
349     qemu_register_reset(spapr_xive_reset, dev);
350 
351     /* TIMA initialization */
352     memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &spapr_xive_tm_ops,
353                           xive, "xive.tima", 4ull << TM_SHIFT);
354 
355     /*
356      * Map all regions. These will be enabled or disabled at reset and
357      * can also be overridden by KVM memory regions if active
358      */
359     memory_region_add_subregion(get_system_memory(), xive->vc_base,
360                                 &xsrc->esb_mmio);
361     memory_region_add_subregion(get_system_memory(), xive->end_base,
362                                 &end_xsrc->esb_mmio);
363     memory_region_add_subregion(get_system_memory(), xive->tm_base,
364                                 &xive->tm_mmio);
365 }
366 
367 static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk,
368                               uint32_t eas_idx, XiveEAS *eas)
369 {
370     SpaprXive *xive = SPAPR_XIVE(xrtr);
371 
372     if (eas_idx >= xive->nr_irqs) {
373         return -1;
374     }
375 
376     *eas = xive->eat[eas_idx];
377     return 0;
378 }
379 
380 static int spapr_xive_get_end(XiveRouter *xrtr,
381                               uint8_t end_blk, uint32_t end_idx, XiveEND *end)
382 {
383     SpaprXive *xive = SPAPR_XIVE(xrtr);
384 
385     if (end_idx >= xive->nr_ends) {
386         return -1;
387     }
388 
389     memcpy(end, &xive->endt[end_idx], sizeof(XiveEND));
390     return 0;
391 }
392 
393 static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk,
394                                 uint32_t end_idx, XiveEND *end,
395                                 uint8_t word_number)
396 {
397     SpaprXive *xive = SPAPR_XIVE(xrtr);
398 
399     if (end_idx >= xive->nr_ends) {
400         return -1;
401     }
402 
403     memcpy(&xive->endt[end_idx], end, sizeof(XiveEND));
404     return 0;
405 }
406 
407 static int spapr_xive_get_nvt(XiveRouter *xrtr,
408                               uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt)
409 {
410     uint32_t vcpu_id = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
411     PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
412 
413     if (!cpu) {
414         /* TODO: should we assert() if we can find a NVT ? */
415         return -1;
416     }
417 
418     /*
419      * sPAPR does not maintain a NVT table. Return that the NVT is
420      * valid if we have found a matching CPU
421      */
422     nvt->w0 = cpu_to_be32(NVT_W0_VALID);
423     return 0;
424 }
425 
426 static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk,
427                                 uint32_t nvt_idx, XiveNVT *nvt,
428                                 uint8_t word_number)
429 {
430     /*
431      * We don't need to write back to the NVTs because the sPAPR
432      * machine should never hit a non-scheduled NVT. It should never
433      * get called.
434      */
435     g_assert_not_reached();
436 }
437 
438 static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format,
439                                 uint8_t nvt_blk, uint32_t nvt_idx,
440                                 bool cam_ignore, uint8_t priority,
441                                 uint32_t logic_serv, XiveTCTXMatch *match)
442 {
443     CPUState *cs;
444     int count = 0;
445 
446     CPU_FOREACH(cs) {
447         PowerPCCPU *cpu = POWERPC_CPU(cs);
448         XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx;
449         int ring;
450 
451         /*
452          * Skip partially initialized vCPUs. This can happen when
453          * vCPUs are hotplugged.
454          */
455         if (!tctx) {
456             continue;
457         }
458 
459         /*
460          * Check the thread context CAM lines and record matches.
461          */
462         ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, nvt_idx,
463                                          cam_ignore, logic_serv);
464         /*
465          * Save the matching thread interrupt context and follow on to
466          * check for duplicates which are invalid.
467          */
468         if (ring != -1) {
469             if (match->tctx) {
470                 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread "
471                               "context NVT %x/%x\n", nvt_blk, nvt_idx);
472                 return -1;
473             }
474 
475             match->ring = ring;
476             match->tctx = tctx;
477             count++;
478         }
479     }
480 
481     return count;
482 }
483 
484 static uint32_t spapr_xive_presenter_get_config(XivePresenter *xptr)
485 {
486     uint32_t cfg = 0;
487 
488     /*
489      * Let's claim GEN1 TIMA format. If running with KVM on P10, the
490      * correct answer is deep in the hardware and not accessible to
491      * us.  But it shouldn't matter as it only affects the presenter
492      * as seen by a guest OS.
493      */
494     cfg |= XIVE_PRESENTER_GEN1_TIMA_OS;
495 
496     return cfg;
497 }
498 
499 static uint8_t spapr_xive_get_block_id(XiveRouter *xrtr)
500 {
501     return SPAPR_XIVE_BLOCK_ID;
502 }
503 
504 static int spapr_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
505                              uint8_t *pq)
506 {
507     SpaprXive *xive = SPAPR_XIVE(xrtr);
508 
509     assert(SPAPR_XIVE_BLOCK_ID == blk);
510 
511     *pq = xive_source_esb_get(&xive->source, idx);
512     return 0;
513 }
514 
515 static int spapr_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
516                              uint8_t *pq)
517 {
518     SpaprXive *xive = SPAPR_XIVE(xrtr);
519 
520     assert(SPAPR_XIVE_BLOCK_ID == blk);
521 
522     *pq = xive_source_esb_set(&xive->source, idx, *pq);
523     return 0;
524 }
525 
526 
527 static const VMStateDescription vmstate_spapr_xive_end = {
528     .name = TYPE_SPAPR_XIVE "/end",
529     .version_id = 1,
530     .minimum_version_id = 1,
531     .fields = (const VMStateField []) {
532         VMSTATE_UINT32(w0, XiveEND),
533         VMSTATE_UINT32(w1, XiveEND),
534         VMSTATE_UINT32(w2, XiveEND),
535         VMSTATE_UINT32(w3, XiveEND),
536         VMSTATE_UINT32(w4, XiveEND),
537         VMSTATE_UINT32(w5, XiveEND),
538         VMSTATE_UINT32(w6, XiveEND),
539         VMSTATE_UINT32(w7, XiveEND),
540         VMSTATE_END_OF_LIST()
541     },
542 };
543 
544 static const VMStateDescription vmstate_spapr_xive_eas = {
545     .name = TYPE_SPAPR_XIVE "/eas",
546     .version_id = 1,
547     .minimum_version_id = 1,
548     .fields = (const VMStateField []) {
549         VMSTATE_UINT64(w, XiveEAS),
550         VMSTATE_END_OF_LIST()
551     },
552 };
553 
554 static int vmstate_spapr_xive_pre_save(void *opaque)
555 {
556     SpaprXive *xive = SPAPR_XIVE(opaque);
557 
558     if (spapr_xive_in_kernel(xive)) {
559         return kvmppc_xive_pre_save(xive);
560     }
561 
562     return 0;
563 }
564 
565 /*
566  * Called by the sPAPR IRQ backend 'post_load' method at the machine
567  * level.
568  */
569 static int spapr_xive_post_load(SpaprInterruptController *intc, int version_id)
570 {
571     SpaprXive *xive = SPAPR_XIVE(intc);
572 
573     if (spapr_xive_in_kernel(xive)) {
574         return kvmppc_xive_post_load(xive, version_id);
575     }
576 
577     return 0;
578 }
579 
580 static const VMStateDescription vmstate_spapr_xive = {
581     .name = TYPE_SPAPR_XIVE,
582     .version_id = 1,
583     .minimum_version_id = 1,
584     .pre_save = vmstate_spapr_xive_pre_save,
585     .post_load = NULL, /* handled at the machine level */
586     .fields = (const VMStateField[]) {
587         VMSTATE_UINT32_EQUAL(nr_irqs, SpaprXive, NULL),
588         VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, SpaprXive, nr_irqs,
589                                      vmstate_spapr_xive_eas, XiveEAS),
590         VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, SpaprXive, nr_ends,
591                                              vmstate_spapr_xive_end, XiveEND),
592         VMSTATE_END_OF_LIST()
593     },
594 };
595 
596 static int spapr_xive_claim_irq(SpaprInterruptController *intc, int lisn,
597                                 bool lsi, Error **errp)
598 {
599     SpaprXive *xive = SPAPR_XIVE(intc);
600     XiveSource *xsrc = &xive->source;
601 
602     assert(lisn < xive->nr_irqs);
603 
604     trace_spapr_xive_claim_irq(lisn, lsi);
605 
606     if (xive_eas_is_valid(&xive->eat[lisn])) {
607         error_setg(errp, "IRQ %d is not free", lisn);
608         return -EBUSY;
609     }
610 
611     /*
612      * Set default values when allocating an IRQ number
613      */
614     xive->eat[lisn].w |= cpu_to_be64(EAS_VALID | EAS_MASKED);
615     if (lsi) {
616         xive_source_irq_set_lsi(xsrc, lisn);
617     }
618 
619     if (spapr_xive_in_kernel(xive)) {
620         return kvmppc_xive_source_reset_one(xsrc, lisn, errp);
621     }
622 
623     return 0;
624 }
625 
626 static void spapr_xive_free_irq(SpaprInterruptController *intc, int lisn)
627 {
628     SpaprXive *xive = SPAPR_XIVE(intc);
629     assert(lisn < xive->nr_irqs);
630 
631     trace_spapr_xive_free_irq(lisn);
632 
633     xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID);
634 }
635 
636 static Property spapr_xive_properties[] = {
637     DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0),
638     DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0),
639     DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE),
640     DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE),
641     DEFINE_PROP_UINT8("hv-prio", SpaprXive, hv_prio, 7),
642     DEFINE_PROP_END_OF_LIST(),
643 };
644 
645 static int spapr_xive_cpu_intc_create(SpaprInterruptController *intc,
646                                       PowerPCCPU *cpu, Error **errp)
647 {
648     SpaprXive *xive = SPAPR_XIVE(intc);
649     Object *obj;
650     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
651 
652     obj = xive_tctx_create(OBJECT(cpu), XIVE_PRESENTER(xive), errp);
653     if (!obj) {
654         return -1;
655     }
656 
657     spapr_cpu->tctx = XIVE_TCTX(obj);
658     return 0;
659 }
660 
661 static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t os_cam)
662 {
663     uint32_t qw1w2 = cpu_to_be32(TM_QW1W2_VO | os_cam);
664     memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
665 }
666 
667 static void spapr_xive_cpu_intc_reset(SpaprInterruptController *intc,
668                                      PowerPCCPU *cpu)
669 {
670     XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx;
671     uint8_t  nvt_blk;
672     uint32_t nvt_idx;
673 
674     xive_tctx_reset(tctx);
675 
676     /*
677      * When a Virtual Processor is scheduled to run on a HW thread,
678      * the hypervisor pushes its identifier in the OS CAM line.
679      * Emulate the same behavior under QEMU.
680      */
681     spapr_xive_cpu_to_nvt(cpu, &nvt_blk, &nvt_idx);
682 
683     xive_tctx_set_os_cam(tctx, xive_nvt_cam_line(nvt_blk, nvt_idx));
684 }
685 
686 static void spapr_xive_cpu_intc_destroy(SpaprInterruptController *intc,
687                                         PowerPCCPU *cpu)
688 {
689     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
690 
691     xive_tctx_destroy(spapr_cpu->tctx);
692     spapr_cpu->tctx = NULL;
693 }
694 
695 static void spapr_xive_set_irq(SpaprInterruptController *intc, int irq, int val)
696 {
697     SpaprXive *xive = SPAPR_XIVE(intc);
698 
699     trace_spapr_xive_set_irq(irq, val);
700 
701     if (spapr_xive_in_kernel(xive)) {
702         kvmppc_xive_source_set_irq(&xive->source, irq, val);
703     } else {
704         xive_source_set_irq(&xive->source, irq, val);
705     }
706 }
707 
708 static void spapr_xive_print_info(SpaprInterruptController *intc, Monitor *mon)
709 {
710     SpaprXive *xive = SPAPR_XIVE(intc);
711     CPUState *cs;
712     g_autoptr(GString) buf = g_string_new("");
713     g_autoptr(HumanReadableText) info = NULL;
714 
715     CPU_FOREACH(cs) {
716         PowerPCCPU *cpu = POWERPC_CPU(cs);
717 
718         xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, buf);
719     }
720     info = human_readable_text_from_str(buf);
721     monitor_puts(mon, info->human_readable_text);
722 
723     spapr_xive_pic_print_info(xive, mon);
724 }
725 
726 static void spapr_xive_dt(SpaprInterruptController *intc, uint32_t nr_servers,
727                           void *fdt, uint32_t phandle)
728 {
729     SpaprXive *xive = SPAPR_XIVE(intc);
730     int node;
731     uint64_t timas[2 * 2];
732     /* Interrupt number ranges for the IPIs */
733     uint32_t lisn_ranges[] = {
734         cpu_to_be32(SPAPR_IRQ_IPI),
735         cpu_to_be32(SPAPR_IRQ_IPI + nr_servers),
736     };
737     /*
738      * EQ size - the sizes of pages supported by the system 4K, 64K,
739      * 2M, 16M. We only advertise 64K for the moment.
740      */
741     uint32_t eq_sizes[] = {
742         cpu_to_be32(16), /* 64K */
743     };
744     /*
745      * QEMU/KVM only needs to define a single range to reserve the
746      * escalation priority. A priority bitmask would have been more
747      * appropriate.
748      */
749     uint32_t plat_res_int_priorities[] = {
750         cpu_to_be32(xive->hv_prio),    /* start */
751         cpu_to_be32(0xff - xive->hv_prio), /* count */
752     };
753 
754     /* Thread Interrupt Management Area : User (ring 3) and OS (ring 2) */
755     timas[0] = cpu_to_be64(xive->tm_base +
756                            XIVE_TM_USER_PAGE * (1ull << TM_SHIFT));
757     timas[1] = cpu_to_be64(1ull << TM_SHIFT);
758     timas[2] = cpu_to_be64(xive->tm_base +
759                            XIVE_TM_OS_PAGE * (1ull << TM_SHIFT));
760     timas[3] = cpu_to_be64(1ull << TM_SHIFT);
761 
762     _FDT(node = fdt_add_subnode(fdt, 0, xive->nodename));
763 
764     _FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe"));
765     _FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas)));
766 
767     _FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe"));
768     _FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes,
769                      sizeof(eq_sizes)));
770     _FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges,
771                      sizeof(lisn_ranges)));
772 
773     /* For Linux to link the LSIs to the interrupt controller. */
774     _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0));
775     _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2));
776 
777     /* For SLOF */
778     _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle));
779     _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle));
780 
781     /*
782      * The "ibm,plat-res-int-priorities" property defines the priority
783      * ranges reserved by the hypervisor
784      */
785     _FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities",
786                      plat_res_int_priorities, sizeof(plat_res_int_priorities)));
787 }
788 
789 static int spapr_xive_activate(SpaprInterruptController *intc,
790                                uint32_t nr_servers, Error **errp)
791 {
792     SpaprXive *xive = SPAPR_XIVE(intc);
793 
794     if (kvm_enabled()) {
795         int rc = spapr_irq_init_kvm(kvmppc_xive_connect, intc, nr_servers,
796                                     errp);
797         if (rc < 0) {
798             return rc;
799         }
800     }
801 
802     /* Activate the XIVE MMIOs */
803     spapr_xive_mmio_set_enabled(xive, true);
804 
805     return 0;
806 }
807 
808 static void spapr_xive_deactivate(SpaprInterruptController *intc)
809 {
810     SpaprXive *xive = SPAPR_XIVE(intc);
811 
812     spapr_xive_mmio_set_enabled(xive, false);
813 
814     if (spapr_xive_in_kernel(xive)) {
815         kvmppc_xive_disconnect(intc);
816     }
817 }
818 
819 static bool spapr_xive_in_kernel_xptr(const XivePresenter *xptr)
820 {
821     return spapr_xive_in_kernel(SPAPR_XIVE(xptr));
822 }
823 
824 static void spapr_xive_class_init(ObjectClass *klass, void *data)
825 {
826     DeviceClass *dc = DEVICE_CLASS(klass);
827     XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
828     SpaprInterruptControllerClass *sicc = SPAPR_INTC_CLASS(klass);
829     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
830     SpaprXiveClass *sxc = SPAPR_XIVE_CLASS(klass);
831 
832     dc->desc    = "sPAPR XIVE Interrupt Controller";
833     device_class_set_props(dc, spapr_xive_properties);
834     device_class_set_parent_realize(dc, spapr_xive_realize,
835                                     &sxc->parent_realize);
836     dc->vmsd    = &vmstate_spapr_xive;
837 
838     xrc->get_eas = spapr_xive_get_eas;
839     xrc->get_pq  = spapr_xive_get_pq;
840     xrc->set_pq  = spapr_xive_set_pq;
841     xrc->get_end = spapr_xive_get_end;
842     xrc->write_end = spapr_xive_write_end;
843     xrc->get_nvt = spapr_xive_get_nvt;
844     xrc->write_nvt = spapr_xive_write_nvt;
845     xrc->get_block_id = spapr_xive_get_block_id;
846 
847     sicc->activate = spapr_xive_activate;
848     sicc->deactivate = spapr_xive_deactivate;
849     sicc->cpu_intc_create = spapr_xive_cpu_intc_create;
850     sicc->cpu_intc_reset = spapr_xive_cpu_intc_reset;
851     sicc->cpu_intc_destroy = spapr_xive_cpu_intc_destroy;
852     sicc->claim_irq = spapr_xive_claim_irq;
853     sicc->free_irq = spapr_xive_free_irq;
854     sicc->set_irq = spapr_xive_set_irq;
855     sicc->print_info = spapr_xive_print_info;
856     sicc->dt = spapr_xive_dt;
857     sicc->post_load = spapr_xive_post_load;
858 
859     xpc->match_nvt  = spapr_xive_match_nvt;
860     xpc->get_config = spapr_xive_presenter_get_config;
861     xpc->in_kernel  = spapr_xive_in_kernel_xptr;
862 }
863 
864 static const TypeInfo spapr_xive_info = {
865     .name = TYPE_SPAPR_XIVE,
866     .parent = TYPE_XIVE_ROUTER,
867     .instance_init = spapr_xive_instance_init,
868     .instance_size = sizeof(SpaprXive),
869     .class_init = spapr_xive_class_init,
870     .class_size = sizeof(SpaprXiveClass),
871     .interfaces = (InterfaceInfo[]) {
872         { TYPE_SPAPR_INTC },
873         { }
874     },
875 };
876 
877 static void spapr_xive_register_types(void)
878 {
879     type_register_static(&spapr_xive_info);
880 }
881 
882 type_init(spapr_xive_register_types)
883 
884 /*
885  * XIVE hcalls
886  *
887  * The terminology used by the XIVE hcalls is the following :
888  *
889  *   TARGET vCPU number
890  *   EQ     Event Queue assigned by OS to receive event data
891  *   ESB    page for source interrupt management
892  *   LISN   Logical Interrupt Source Number identifying a source in the
893  *          machine
894  *   EISN   Effective Interrupt Source Number used by guest OS to
895  *          identify source in the guest
896  *
897  * The EAS, END, NVT structures are not exposed.
898  */
899 
900 /*
901  * On POWER9, the KVM XIVE device uses priority 7 for the escalation
902  * interrupts. So we only allow the guest to use priorities [0..6].
903  */
904 static bool spapr_xive_priority_is_reserved(SpaprXive *xive, uint8_t priority)
905 {
906     return priority >= xive->hv_prio;
907 }
908 
909 /*
910  * The H_INT_GET_SOURCE_INFO hcall() is used to obtain the logical
911  * real address of the MMIO page through which the Event State Buffer
912  * entry associated with the value of the "lisn" parameter is managed.
913  *
914  * Parameters:
915  * Input
916  * - R4: "flags"
917  *         Bits 0-63 reserved
918  * - R5: "lisn" is per "interrupts", "interrupt-map", or
919  *       "ibm,xive-lisn-ranges" properties, or as returned by the
920  *       ibm,query-interrupt-source-number RTAS call, or as returned
921  *       by the H_ALLOCATE_VAS_WINDOW hcall
922  *
923  * Output
924  * - R4: "flags"
925  *         Bits 0-59: Reserved
926  *         Bit 60: H_INT_ESB must be used for Event State Buffer
927  *                 management
928  *         Bit 61: 1 == LSI  0 == MSI
929  *         Bit 62: the full function page supports trigger
930  *         Bit 63: Store EOI Supported
931  * - R5: Logical Real address of full function Event State Buffer
932  *       management page, -1 if H_INT_ESB hcall flag is set to 1.
933  * - R6: Logical Real Address of trigger only Event State Buffer
934  *       management page or -1.
935  * - R7: Power of 2 page size for the ESB management pages returned in
936  *       R5 and R6.
937  */
938 
939 #define SPAPR_XIVE_SRC_H_INT_ESB     PPC_BIT(60) /* ESB manage with H_INT_ESB */
940 #define SPAPR_XIVE_SRC_LSI           PPC_BIT(61) /* Virtual LSI type */
941 #define SPAPR_XIVE_SRC_TRIGGER       PPC_BIT(62) /* Trigger and management
942                                                     on same page */
943 #define SPAPR_XIVE_SRC_STORE_EOI     PPC_BIT(63) /* Store EOI support */
944 
945 static target_ulong h_int_get_source_info(PowerPCCPU *cpu,
946                                           SpaprMachineState *spapr,
947                                           target_ulong opcode,
948                                           target_ulong *args)
949 {
950     SpaprXive *xive = spapr->xive;
951     XiveSource *xsrc = &xive->source;
952     target_ulong flags  = args[0];
953     target_ulong lisn   = args[1];
954 
955     trace_spapr_xive_get_source_info(flags, lisn);
956 
957     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
958         return H_FUNCTION;
959     }
960 
961     if (flags) {
962         return H_PARAMETER;
963     }
964 
965     if (lisn >= xive->nr_irqs) {
966         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
967                       lisn);
968         return H_P2;
969     }
970 
971     if (!xive_eas_is_valid(&xive->eat[lisn])) {
972         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
973                       lisn);
974         return H_P2;
975     }
976 
977     /*
978      * All sources are emulated under the main XIVE object and share
979      * the same characteristics.
980      */
981     args[0] = 0;
982     if (!xive_source_esb_has_2page(xsrc)) {
983         args[0] |= SPAPR_XIVE_SRC_TRIGGER;
984     }
985     if (xsrc->esb_flags & XIVE_SRC_STORE_EOI) {
986         args[0] |= SPAPR_XIVE_SRC_STORE_EOI;
987     }
988 
989     /*
990      * Force the use of the H_INT_ESB hcall in case of an LSI
991      * interrupt. This is necessary under KVM to re-trigger the
992      * interrupt if the level is still asserted
993      */
994     if (xive_source_irq_is_lsi(xsrc, lisn)) {
995         args[0] |= SPAPR_XIVE_SRC_H_INT_ESB | SPAPR_XIVE_SRC_LSI;
996     }
997 
998     if (!(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
999         args[1] = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn);
1000     } else {
1001         args[1] = -1;
1002     }
1003 
1004     if (xive_source_esb_has_2page(xsrc) &&
1005         !(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
1006         args[2] = xive->vc_base + xive_source_esb_page(xsrc, lisn);
1007     } else {
1008         args[2] = -1;
1009     }
1010 
1011     if (xive_source_esb_has_2page(xsrc)) {
1012         args[3] = xsrc->esb_shift - 1;
1013     } else {
1014         args[3] = xsrc->esb_shift;
1015     }
1016 
1017     return H_SUCCESS;
1018 }
1019 
1020 /*
1021  * The H_INT_SET_SOURCE_CONFIG hcall() is used to assign a Logical
1022  * Interrupt Source to a target. The Logical Interrupt Source is
1023  * designated with the "lisn" parameter and the target is designated
1024  * with the "target" and "priority" parameters.  Upon return from the
1025  * hcall(), no additional interrupts will be directed to the old EQ.
1026  *
1027  * Parameters:
1028  * Input:
1029  * - R4: "flags"
1030  *         Bits 0-61: Reserved
1031  *         Bit 62: set the "eisn" in the EAS
1032  *         Bit 63: masks the interrupt source in the hardware interrupt
1033  *       control structure. An interrupt masked by this mechanism will
1034  *       be dropped, but it's source state bits will still be
1035  *       set. There is no race-free way of unmasking and restoring the
1036  *       source. Thus this should only be used in interrupts that are
1037  *       also masked at the source, and only in cases where the
1038  *       interrupt is not meant to be used for a large amount of time
1039  *       because no valid target exists for it for example
1040  * - R5: "lisn" is per "interrupts", "interrupt-map", or
1041  *       "ibm,xive-lisn-ranges" properties, or as returned by the
1042  *       ibm,query-interrupt-source-number RTAS call, or as returned by
1043  *       the H_ALLOCATE_VAS_WINDOW hcall
1044  * - R6: "target" is per "ibm,ppc-interrupt-server#s" or
1045  *       "ibm,ppc-interrupt-gserver#s"
1046  * - R7: "priority" is a valid priority not in
1047  *       "ibm,plat-res-int-priorities"
1048  * - R8: "eisn" is the guest EISN associated with the "lisn"
1049  *
1050  * Output:
1051  * - None
1052  */
1053 
1054 #define SPAPR_XIVE_SRC_SET_EISN PPC_BIT(62)
1055 #define SPAPR_XIVE_SRC_MASK     PPC_BIT(63)
1056 
1057 static target_ulong h_int_set_source_config(PowerPCCPU *cpu,
1058                                             SpaprMachineState *spapr,
1059                                             target_ulong opcode,
1060                                             target_ulong *args)
1061 {
1062     SpaprXive *xive = spapr->xive;
1063     XiveEAS eas, new_eas;
1064     target_ulong flags    = args[0];
1065     target_ulong lisn     = args[1];
1066     target_ulong target   = args[2];
1067     target_ulong priority = args[3];
1068     target_ulong eisn     = args[4];
1069     uint8_t end_blk;
1070     uint32_t end_idx;
1071 
1072     trace_spapr_xive_set_source_config(flags, lisn, target, priority, eisn);
1073 
1074     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1075         return H_FUNCTION;
1076     }
1077 
1078     if (flags & ~(SPAPR_XIVE_SRC_SET_EISN | SPAPR_XIVE_SRC_MASK)) {
1079         return H_PARAMETER;
1080     }
1081 
1082     if (lisn >= xive->nr_irqs) {
1083         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1084                       lisn);
1085         return H_P2;
1086     }
1087 
1088     eas = xive->eat[lisn];
1089     if (!xive_eas_is_valid(&eas)) {
1090         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1091                       lisn);
1092         return H_P2;
1093     }
1094 
1095     /* priority 0xff is used to reset the EAS */
1096     if (priority == 0xff) {
1097         new_eas.w = cpu_to_be64(EAS_VALID | EAS_MASKED);
1098         goto out;
1099     }
1100 
1101     if (flags & SPAPR_XIVE_SRC_MASK) {
1102         new_eas.w = eas.w | cpu_to_be64(EAS_MASKED);
1103     } else {
1104         new_eas.w = eas.w & cpu_to_be64(~EAS_MASKED);
1105     }
1106 
1107     if (spapr_xive_priority_is_reserved(xive, priority)) {
1108         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1109                       " is reserved\n", priority);
1110         return H_P4;
1111     }
1112 
1113     /*
1114      * Validate that "target" is part of the list of threads allocated
1115      * to the partition. For that, find the END corresponding to the
1116      * target.
1117      */
1118     if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1119         return H_P3;
1120     }
1121 
1122     new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk);
1123     new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx);
1124 
1125     if (flags & SPAPR_XIVE_SRC_SET_EISN) {
1126         new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn);
1127     }
1128 
1129     if (spapr_xive_in_kernel(xive)) {
1130         Error *local_err = NULL;
1131 
1132         kvmppc_xive_set_source_config(xive, lisn, &new_eas, &local_err);
1133         if (local_err) {
1134             error_report_err(local_err);
1135             return H_HARDWARE;
1136         }
1137     }
1138 
1139 out:
1140     xive->eat[lisn] = new_eas;
1141     return H_SUCCESS;
1142 }
1143 
1144 /*
1145  * The H_INT_GET_SOURCE_CONFIG hcall() is used to determine to which
1146  * target/priority pair is assigned to the specified Logical Interrupt
1147  * Source.
1148  *
1149  * Parameters:
1150  * Input:
1151  * - R4: "flags"
1152  *         Bits 0-63 Reserved
1153  * - R5: "lisn" is per "interrupts", "interrupt-map", or
1154  *       "ibm,xive-lisn-ranges" properties, or as returned by the
1155  *       ibm,query-interrupt-source-number RTAS call, or as
1156  *       returned by the H_ALLOCATE_VAS_WINDOW hcall
1157  *
1158  * Output:
1159  * - R4: Target to which the specified Logical Interrupt Source is
1160  *       assigned
1161  * - R5: Priority to which the specified Logical Interrupt Source is
1162  *       assigned
1163  * - R6: EISN for the specified Logical Interrupt Source (this will be
1164  *       equivalent to the LISN if not changed by H_INT_SET_SOURCE_CONFIG)
1165  */
1166 static target_ulong h_int_get_source_config(PowerPCCPU *cpu,
1167                                             SpaprMachineState *spapr,
1168                                             target_ulong opcode,
1169                                             target_ulong *args)
1170 {
1171     SpaprXive *xive = spapr->xive;
1172     target_ulong flags = args[0];
1173     target_ulong lisn = args[1];
1174     XiveEAS eas;
1175     XiveEND *end;
1176     uint8_t nvt_blk;
1177     uint32_t end_idx, nvt_idx;
1178 
1179     trace_spapr_xive_get_source_config(flags, lisn);
1180 
1181     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1182         return H_FUNCTION;
1183     }
1184 
1185     if (flags) {
1186         return H_PARAMETER;
1187     }
1188 
1189     if (lisn >= xive->nr_irqs) {
1190         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1191                       lisn);
1192         return H_P2;
1193     }
1194 
1195     eas = xive->eat[lisn];
1196     if (!xive_eas_is_valid(&eas)) {
1197         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1198                       lisn);
1199         return H_P2;
1200     }
1201 
1202     /* EAS_END_BLOCK is unused on sPAPR */
1203     end_idx = xive_get_field64(EAS_END_INDEX, eas.w);
1204 
1205     assert(end_idx < xive->nr_ends);
1206     end = &xive->endt[end_idx];
1207 
1208     nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
1209     nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
1210     args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
1211 
1212     if (xive_eas_is_masked(&eas)) {
1213         args[1] = 0xff;
1214     } else {
1215         args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
1216     }
1217 
1218     args[2] = xive_get_field64(EAS_END_DATA, eas.w);
1219 
1220     return H_SUCCESS;
1221 }
1222 
1223 /*
1224  * The H_INT_GET_QUEUE_INFO hcall() is used to get the logical real
1225  * address of the notification management page associated with the
1226  * specified target and priority.
1227  *
1228  * Parameters:
1229  * Input:
1230  * - R4: "flags"
1231  *         Bits 0-63 Reserved
1232  * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
1233  *       "ibm,ppc-interrupt-gserver#s"
1234  * - R6: "priority" is a valid priority not in
1235  *       "ibm,plat-res-int-priorities"
1236  *
1237  * Output:
1238  * - R4: Logical real address of notification page
1239  * - R5: Power of 2 page size of the notification page
1240  */
1241 static target_ulong h_int_get_queue_info(PowerPCCPU *cpu,
1242                                          SpaprMachineState *spapr,
1243                                          target_ulong opcode,
1244                                          target_ulong *args)
1245 {
1246     SpaprXive *xive = spapr->xive;
1247     XiveENDSource *end_xsrc = &xive->end_source;
1248     target_ulong flags = args[0];
1249     target_ulong target = args[1];
1250     target_ulong priority = args[2];
1251     XiveEND *end;
1252     uint8_t end_blk;
1253     uint32_t end_idx;
1254 
1255     trace_spapr_xive_get_queue_info(flags, target, priority);
1256 
1257     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1258         return H_FUNCTION;
1259     }
1260 
1261     if (flags) {
1262         return H_PARAMETER;
1263     }
1264 
1265     /*
1266      * H_STATE should be returned if a H_INT_RESET is in progress.
1267      * This is not needed when running the emulation under QEMU
1268      */
1269 
1270     if (spapr_xive_priority_is_reserved(xive, priority)) {
1271         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1272                       " is reserved\n", priority);
1273         return H_P3;
1274     }
1275 
1276     /*
1277      * Validate that "target" is part of the list of threads allocated
1278      * to the partition. For that, find the END corresponding to the
1279      * target.
1280      */
1281     if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1282         return H_P2;
1283     }
1284 
1285     assert(end_idx < xive->nr_ends);
1286     end = &xive->endt[end_idx];
1287 
1288     args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx;
1289     if (xive_end_is_enqueue(end)) {
1290         args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
1291     } else {
1292         args[1] = 0;
1293     }
1294 
1295     return H_SUCCESS;
1296 }
1297 
1298 /*
1299  * The H_INT_SET_QUEUE_CONFIG hcall() is used to set or reset a EQ for
1300  * a given "target" and "priority".  It is also used to set the
1301  * notification config associated with the EQ.  An EQ size of 0 is
1302  * used to reset the EQ config for a given target and priority. If
1303  * resetting the EQ config, the END associated with the given "target"
1304  * and "priority" will be changed to disable queueing.
1305  *
1306  * Upon return from the hcall(), no additional interrupts will be
1307  * directed to the old EQ (if one was set). The old EQ (if one was
1308  * set) should be investigated for interrupts that occurred prior to
1309  * or during the hcall().
1310  *
1311  * Parameters:
1312  * Input:
1313  * - R4: "flags"
1314  *         Bits 0-62: Reserved
1315  *         Bit 63: Unconditional Notify (n) per the XIVE spec
1316  * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
1317  *       "ibm,ppc-interrupt-gserver#s"
1318  * - R6: "priority" is a valid priority not in
1319  *       "ibm,plat-res-int-priorities"
1320  * - R7: "eventQueue": The logical real address of the start of the EQ
1321  * - R8: "eventQueueSize": The power of 2 EQ size per "ibm,xive-eq-sizes"
1322  *
1323  * Output:
1324  * - None
1325  */
1326 
1327 #define SPAPR_XIVE_END_ALWAYS_NOTIFY PPC_BIT(63)
1328 
1329 static target_ulong h_int_set_queue_config(PowerPCCPU *cpu,
1330                                            SpaprMachineState *spapr,
1331                                            target_ulong opcode,
1332                                            target_ulong *args)
1333 {
1334     SpaprXive *xive = spapr->xive;
1335     target_ulong flags = args[0];
1336     target_ulong target = args[1];
1337     target_ulong priority = args[2];
1338     target_ulong qpage = args[3];
1339     target_ulong qsize = args[4];
1340     XiveEND end;
1341     uint8_t end_blk, nvt_blk;
1342     uint32_t end_idx, nvt_idx;
1343 
1344     trace_spapr_xive_set_queue_config(flags, target, priority, qpage, qsize);
1345 
1346     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1347         return H_FUNCTION;
1348     }
1349 
1350     if (flags & ~SPAPR_XIVE_END_ALWAYS_NOTIFY) {
1351         return H_PARAMETER;
1352     }
1353 
1354     /*
1355      * H_STATE should be returned if a H_INT_RESET is in progress.
1356      * This is not needed when running the emulation under QEMU
1357      */
1358 
1359     if (spapr_xive_priority_is_reserved(xive, priority)) {
1360         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1361                       " is reserved\n", priority);
1362         return H_P3;
1363     }
1364 
1365     /*
1366      * Validate that "target" is part of the list of threads allocated
1367      * to the partition. For that, find the END corresponding to the
1368      * target.
1369      */
1370 
1371     if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1372         return H_P2;
1373     }
1374 
1375     assert(end_idx < xive->nr_ends);
1376     memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND));
1377 
1378     switch (qsize) {
1379     case 12:
1380     case 16:
1381     case 21:
1382     case 24:
1383         if (!QEMU_IS_ALIGNED(qpage, 1ul << qsize)) {
1384             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: EQ @0x%" HWADDR_PRIx
1385                           " is not naturally aligned with %" HWADDR_PRIx "\n",
1386                           qpage, (hwaddr)1 << qsize);
1387             return H_P4;
1388         }
1389         end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff);
1390         end.w3 = cpu_to_be32(qpage & 0xffffffff);
1391         end.w0 |= cpu_to_be32(END_W0_ENQUEUE);
1392         end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12);
1393         break;
1394     case 0:
1395         /* reset queue and disable queueing */
1396         spapr_xive_end_reset(&end);
1397         goto out;
1398 
1399     default:
1400         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EQ size %"PRIx64"\n",
1401                       qsize);
1402         return H_P5;
1403     }
1404 
1405     if (qsize) {
1406         hwaddr plen = 1 << qsize;
1407         void *eq;
1408 
1409         /*
1410          * Validate the guest EQ. We should also check that the queue
1411          * has been zeroed by the OS.
1412          */
1413         eq = address_space_map(CPU(cpu)->as, qpage, &plen, true,
1414                                MEMTXATTRS_UNSPECIFIED);
1415         if (plen != 1 << qsize) {
1416             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to map EQ @0x%"
1417                           HWADDR_PRIx "\n", qpage);
1418             return H_P4;
1419         }
1420         address_space_unmap(CPU(cpu)->as, eq, plen, true, plen);
1421     }
1422 
1423     /* "target" should have been validated above */
1424     if (spapr_xive_target_to_nvt(target, &nvt_blk, &nvt_idx)) {
1425         g_assert_not_reached();
1426     }
1427 
1428     /*
1429      * Ensure the priority and target are correctly set (they will not
1430      * be right after allocation)
1431      */
1432     end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) |
1433         xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx);
1434     end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority);
1435 
1436     if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) {
1437         end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY);
1438     } else {
1439         end.w0 &= cpu_to_be32((uint32_t)~END_W0_UCOND_NOTIFY);
1440     }
1441 
1442     /*
1443      * The generation bit for the END starts at 1 and The END page
1444      * offset counter starts at 0.
1445      */
1446     end.w1 = cpu_to_be32(END_W1_GENERATION) |
1447         xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul);
1448     end.w0 |= cpu_to_be32(END_W0_VALID);
1449 
1450     /*
1451      * TODO: issue syncs required to ensure all in-flight interrupts
1452      * are complete on the old END
1453      */
1454 
1455 out:
1456     if (spapr_xive_in_kernel(xive)) {
1457         Error *local_err = NULL;
1458 
1459         kvmppc_xive_set_queue_config(xive, end_blk, end_idx, &end, &local_err);
1460         if (local_err) {
1461             error_report_err(local_err);
1462             return H_HARDWARE;
1463         }
1464     }
1465 
1466     /* Update END */
1467     memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND));
1468     return H_SUCCESS;
1469 }
1470 
1471 /*
1472  * The H_INT_GET_QUEUE_CONFIG hcall() is used to get a EQ for a given
1473  * target and priority.
1474  *
1475  * Parameters:
1476  * Input:
1477  * - R4: "flags"
1478  *         Bits 0-62: Reserved
1479  *         Bit 63: Debug: Return debug data
1480  * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
1481  *       "ibm,ppc-interrupt-gserver#s"
1482  * - R6: "priority" is a valid priority not in
1483  *       "ibm,plat-res-int-priorities"
1484  *
1485  * Output:
1486  * - R4: "flags":
1487  *       Bits 0-61: Reserved
1488  *       Bit 62: The value of Event Queue Generation Number (g) per
1489  *              the XIVE spec if "Debug" = 1
1490  *       Bit 63: The value of Unconditional Notify (n) per the XIVE spec
1491  * - R5: The logical real address of the start of the EQ
1492  * - R6: The power of 2 EQ size per "ibm,xive-eq-sizes"
1493  * - R7: The value of Event Queue Offset Counter per XIVE spec
1494  *       if "Debug" = 1, else 0
1495  *
1496  */
1497 
1498 #define SPAPR_XIVE_END_DEBUG     PPC_BIT(63)
1499 
1500 static target_ulong h_int_get_queue_config(PowerPCCPU *cpu,
1501                                            SpaprMachineState *spapr,
1502                                            target_ulong opcode,
1503                                            target_ulong *args)
1504 {
1505     SpaprXive *xive = spapr->xive;
1506     target_ulong flags = args[0];
1507     target_ulong target = args[1];
1508     target_ulong priority = args[2];
1509     XiveEND *end;
1510     uint8_t end_blk;
1511     uint32_t end_idx;
1512 
1513     trace_spapr_xive_get_queue_config(flags, target, priority);
1514 
1515     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1516         return H_FUNCTION;
1517     }
1518 
1519     if (flags & ~SPAPR_XIVE_END_DEBUG) {
1520         return H_PARAMETER;
1521     }
1522 
1523     /*
1524      * H_STATE should be returned if a H_INT_RESET is in progress.
1525      * This is not needed when running the emulation under QEMU
1526      */
1527 
1528     if (spapr_xive_priority_is_reserved(xive, priority)) {
1529         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1530                       " is reserved\n", priority);
1531         return H_P3;
1532     }
1533 
1534     /*
1535      * Validate that "target" is part of the list of threads allocated
1536      * to the partition. For that, find the END corresponding to the
1537      * target.
1538      */
1539     if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1540         return H_P2;
1541     }
1542 
1543     assert(end_idx < xive->nr_ends);
1544     end = &xive->endt[end_idx];
1545 
1546     args[0] = 0;
1547     if (xive_end_is_notify(end)) {
1548         args[0] |= SPAPR_XIVE_END_ALWAYS_NOTIFY;
1549     }
1550 
1551     if (xive_end_is_enqueue(end)) {
1552         args[1] = xive_end_qaddr(end);
1553         args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
1554     } else {
1555         args[1] = 0;
1556         args[2] = 0;
1557     }
1558 
1559     if (spapr_xive_in_kernel(xive)) {
1560         Error *local_err = NULL;
1561 
1562         kvmppc_xive_get_queue_config(xive, end_blk, end_idx, end, &local_err);
1563         if (local_err) {
1564             error_report_err(local_err);
1565             return H_HARDWARE;
1566         }
1567     }
1568 
1569     /* TODO: do we need any locking on the END ? */
1570     if (flags & SPAPR_XIVE_END_DEBUG) {
1571         /* Load the event queue generation number into the return flags */
1572         args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 62;
1573 
1574         /* Load R7 with the event queue offset counter */
1575         args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1576     } else {
1577         args[3] = 0;
1578     }
1579 
1580     return H_SUCCESS;
1581 }
1582 
1583 /*
1584  * The H_INT_SET_OS_REPORTING_LINE hcall() is used to set the
1585  * reporting cache line pair for the calling thread.  The reporting
1586  * cache lines will contain the OS interrupt context when the OS
1587  * issues a CI store byte to @TIMA+0xC10 to acknowledge the OS
1588  * interrupt. The reporting cache lines can be reset by inputting -1
1589  * in "reportingLine".  Issuing the CI store byte without reporting
1590  * cache lines registered will result in the data not being accessible
1591  * to the OS.
1592  *
1593  * Parameters:
1594  * Input:
1595  * - R4: "flags"
1596  *         Bits 0-63: Reserved
1597  * - R5: "reportingLine": The logical real address of the reporting cache
1598  *       line pair
1599  *
1600  * Output:
1601  * - None
1602  */
1603 static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu,
1604                                                 SpaprMachineState *spapr,
1605                                                 target_ulong opcode,
1606                                                 target_ulong *args)
1607 {
1608     target_ulong flags   = args[0];
1609 
1610     trace_spapr_xive_set_os_reporting_line(flags);
1611 
1612     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1613         return H_FUNCTION;
1614     }
1615 
1616     /*
1617      * H_STATE should be returned if a H_INT_RESET is in progress.
1618      * This is not needed when running the emulation under QEMU
1619      */
1620 
1621     /* TODO: H_INT_SET_OS_REPORTING_LINE */
1622     return H_FUNCTION;
1623 }
1624 
1625 /*
1626  * The H_INT_GET_OS_REPORTING_LINE hcall() is used to get the logical
1627  * real address of the reporting cache line pair set for the input
1628  * "target".  If no reporting cache line pair has been set, -1 is
1629  * returned.
1630  *
1631  * Parameters:
1632  * Input:
1633  * - R4: "flags"
1634  *         Bits 0-63: Reserved
1635  * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
1636  *       "ibm,ppc-interrupt-gserver#s"
1637  * - R6: "reportingLine": The logical real address of the reporting
1638  *        cache line pair
1639  *
1640  * Output:
1641  * - R4: The logical real address of the reporting line if set, else -1
1642  */
1643 static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu,
1644                                                 SpaprMachineState *spapr,
1645                                                 target_ulong opcode,
1646                                                 target_ulong *args)
1647 {
1648     target_ulong flags   = args[0];
1649 
1650     trace_spapr_xive_get_os_reporting_line(flags);
1651 
1652     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1653         return H_FUNCTION;
1654     }
1655 
1656     /*
1657      * H_STATE should be returned if a H_INT_RESET is in progress.
1658      * This is not needed when running the emulation under QEMU
1659      */
1660 
1661     /* TODO: H_INT_GET_OS_REPORTING_LINE */
1662     return H_FUNCTION;
1663 }
1664 
1665 /*
1666  * The H_INT_ESB hcall() is used to issue a load or store to the ESB
1667  * page for the input "lisn".  This hcall is only supported for LISNs
1668  * that have the ESB hcall flag set to 1 when returned from hcall()
1669  * H_INT_GET_SOURCE_INFO.
1670  *
1671  * Parameters:
1672  * Input:
1673  * - R4: "flags"
1674  *         Bits 0-62: Reserved
1675  *         bit 63: Store: Store=1, store operation, else load operation
1676  * - R5: "lisn" is per "interrupts", "interrupt-map", or
1677  *       "ibm,xive-lisn-ranges" properties, or as returned by the
1678  *       ibm,query-interrupt-source-number RTAS call, or as
1679  *       returned by the H_ALLOCATE_VAS_WINDOW hcall
1680  * - R6: "esbOffset" is the offset into the ESB page for the load or
1681  *       store operation
1682  * - R7: "storeData" is the data to write for a store operation
1683  *
1684  * Output:
1685  * - R4: The value of the load if load operation, else -1
1686  */
1687 
1688 #define SPAPR_XIVE_ESB_STORE PPC_BIT(63)
1689 
1690 static target_ulong h_int_esb(PowerPCCPU *cpu,
1691                               SpaprMachineState *spapr,
1692                               target_ulong opcode,
1693                               target_ulong *args)
1694 {
1695     SpaprXive *xive = spapr->xive;
1696     XiveEAS eas;
1697     target_ulong flags  = args[0];
1698     target_ulong lisn   = args[1];
1699     target_ulong offset = args[2];
1700     target_ulong data   = args[3];
1701     hwaddr mmio_addr;
1702     XiveSource *xsrc = &xive->source;
1703 
1704     trace_spapr_xive_esb(flags, lisn, offset, data);
1705 
1706     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1707         return H_FUNCTION;
1708     }
1709 
1710     if (flags & ~SPAPR_XIVE_ESB_STORE) {
1711         return H_PARAMETER;
1712     }
1713 
1714     if (lisn >= xive->nr_irqs) {
1715         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1716                       lisn);
1717         return H_P2;
1718     }
1719 
1720     eas = xive->eat[lisn];
1721     if (!xive_eas_is_valid(&eas)) {
1722         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1723                       lisn);
1724         return H_P2;
1725     }
1726 
1727     if (offset > (1ull << xsrc->esb_shift)) {
1728         return H_P3;
1729     }
1730 
1731     if (spapr_xive_in_kernel(xive)) {
1732         args[0] = kvmppc_xive_esb_rw(xsrc, lisn, offset, data,
1733                                      flags & SPAPR_XIVE_ESB_STORE);
1734     } else {
1735         mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset;
1736 
1737         if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8,
1738                           (flags & SPAPR_XIVE_ESB_STORE),
1739                           MEMTXATTRS_UNSPECIFIED)) {
1740             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%"
1741                           HWADDR_PRIx "\n", mmio_addr);
1742             return H_HARDWARE;
1743         }
1744         args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data;
1745     }
1746     return H_SUCCESS;
1747 }
1748 
1749 /*
1750  * The H_INT_SYNC hcall() is used to issue hardware syncs that will
1751  * ensure any in flight events for the input lisn are in the event
1752  * queue.
1753  *
1754  * Parameters:
1755  * Input:
1756  * - R4: "flags"
1757  *         Bits 0-63: Reserved
1758  * - R5: "lisn" is per "interrupts", "interrupt-map", or
1759  *       "ibm,xive-lisn-ranges" properties, or as returned by the
1760  *       ibm,query-interrupt-source-number RTAS call, or as
1761  *       returned by the H_ALLOCATE_VAS_WINDOW hcall
1762  *
1763  * Output:
1764  * - None
1765  */
1766 static target_ulong h_int_sync(PowerPCCPU *cpu,
1767                                SpaprMachineState *spapr,
1768                                target_ulong opcode,
1769                                target_ulong *args)
1770 {
1771     SpaprXive *xive = spapr->xive;
1772     XiveEAS eas;
1773     target_ulong flags = args[0];
1774     target_ulong lisn = args[1];
1775 
1776     trace_spapr_xive_sync(flags, lisn);
1777 
1778     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1779         return H_FUNCTION;
1780     }
1781 
1782     if (flags) {
1783         return H_PARAMETER;
1784     }
1785 
1786     if (lisn >= xive->nr_irqs) {
1787         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1788                       lisn);
1789         return H_P2;
1790     }
1791 
1792     eas = xive->eat[lisn];
1793     if (!xive_eas_is_valid(&eas)) {
1794         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1795                       lisn);
1796         return H_P2;
1797     }
1798 
1799     /*
1800      * H_STATE should be returned if a H_INT_RESET is in progress.
1801      * This is not needed when running the emulation under QEMU
1802      */
1803 
1804     /*
1805      * This is not real hardware. Nothing to be done unless when
1806      * under KVM
1807      */
1808 
1809     if (spapr_xive_in_kernel(xive)) {
1810         Error *local_err = NULL;
1811 
1812         kvmppc_xive_sync_source(xive, lisn, &local_err);
1813         if (local_err) {
1814             error_report_err(local_err);
1815             return H_HARDWARE;
1816         }
1817     }
1818     return H_SUCCESS;
1819 }
1820 
1821 /*
1822  * The H_INT_RESET hcall() is used to reset all of the partition's
1823  * interrupt exploitation structures to their initial state.  This
1824  * means losing all previously set interrupt state set via
1825  * H_INT_SET_SOURCE_CONFIG and H_INT_SET_QUEUE_CONFIG.
1826  *
1827  * Parameters:
1828  * Input:
1829  * - R4: "flags"
1830  *         Bits 0-63: Reserved
1831  *
1832  * Output:
1833  * - None
1834  */
1835 static target_ulong h_int_reset(PowerPCCPU *cpu,
1836                                 SpaprMachineState *spapr,
1837                                 target_ulong opcode,
1838                                 target_ulong *args)
1839 {
1840     SpaprXive *xive = spapr->xive;
1841     target_ulong flags   = args[0];
1842 
1843     trace_spapr_xive_reset(flags);
1844 
1845     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1846         return H_FUNCTION;
1847     }
1848 
1849     if (flags) {
1850         return H_PARAMETER;
1851     }
1852 
1853     device_cold_reset(DEVICE(xive));
1854 
1855     if (spapr_xive_in_kernel(xive)) {
1856         Error *local_err = NULL;
1857 
1858         kvmppc_xive_reset(xive, &local_err);
1859         if (local_err) {
1860             error_report_err(local_err);
1861             return H_HARDWARE;
1862         }
1863     }
1864     return H_SUCCESS;
1865 }
1866 
1867 void spapr_xive_hcall_init(SpaprMachineState *spapr)
1868 {
1869     spapr_register_hypercall(H_INT_GET_SOURCE_INFO, h_int_get_source_info);
1870     spapr_register_hypercall(H_INT_SET_SOURCE_CONFIG, h_int_set_source_config);
1871     spapr_register_hypercall(H_INT_GET_SOURCE_CONFIG, h_int_get_source_config);
1872     spapr_register_hypercall(H_INT_GET_QUEUE_INFO, h_int_get_queue_info);
1873     spapr_register_hypercall(H_INT_SET_QUEUE_CONFIG, h_int_set_queue_config);
1874     spapr_register_hypercall(H_INT_GET_QUEUE_CONFIG, h_int_get_queue_config);
1875     spapr_register_hypercall(H_INT_SET_OS_REPORTING_LINE,
1876                              h_int_set_os_reporting_line);
1877     spapr_register_hypercall(H_INT_GET_OS_REPORTING_LINE,
1878                              h_int_get_os_reporting_line);
1879     spapr_register_hypercall(H_INT_ESB, h_int_esb);
1880     spapr_register_hypercall(H_INT_SYNC, h_int_sync);
1881     spapr_register_hypercall(H_INT_RESET, h_int_reset);
1882 }
1883