xref: /openbmc/qemu/hw/intc/spapr_xive.c (revision 64552b6b)
1 /*
2  * QEMU PowerPC sPAPR XIVE interrupt controller model
3  *
4  * Copyright (c) 2017-2018, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "qemu/error-report.h"
15 #include "target/ppc/cpu.h"
16 #include "sysemu/cpus.h"
17 #include "sysemu/reset.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/fdt.h"
20 #include "hw/ppc/spapr.h"
21 #include "hw/ppc/spapr_cpu_core.h"
22 #include "hw/ppc/spapr_xive.h"
23 #include "hw/ppc/xive.h"
24 #include "hw/ppc/xive_regs.h"
25 
26 /*
27  * XIVE Virtualization Controller BAR and Thread Managment BAR that we
28  * use for the ESB pages and the TIMA pages
29  */
30 #define SPAPR_XIVE_VC_BASE   0x0006010000000000ull
31 #define SPAPR_XIVE_TM_BASE   0x0006030203180000ull
32 
33 /*
34  * The allocation of VP blocks is a complex operation in OPAL and the
35  * VP identifiers have a relation with the number of HW chips, the
36  * size of the VP blocks, VP grouping, etc. The QEMU sPAPR XIVE
37  * controller model does not have the same constraints and can use a
38  * simple mapping scheme of the CPU vcpu_id
39  *
40  * These identifiers are never returned to the OS.
41  */
42 
43 #define SPAPR_XIVE_NVT_BASE 0x400
44 
45 /*
46  * sPAPR NVT and END indexing helpers
47  */
48 static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx)
49 {
50     return nvt_idx - SPAPR_XIVE_NVT_BASE;
51 }
52 
53 static void spapr_xive_cpu_to_nvt(PowerPCCPU *cpu,
54                                   uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
55 {
56     assert(cpu);
57 
58     if (out_nvt_blk) {
59         *out_nvt_blk = SPAPR_XIVE_BLOCK_ID;
60     }
61 
62     if (out_nvt_blk) {
63         *out_nvt_idx = SPAPR_XIVE_NVT_BASE + cpu->vcpu_id;
64     }
65 }
66 
67 static int spapr_xive_target_to_nvt(uint32_t target,
68                                     uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
69 {
70     PowerPCCPU *cpu = spapr_find_cpu(target);
71 
72     if (!cpu) {
73         return -1;
74     }
75 
76     spapr_xive_cpu_to_nvt(cpu, out_nvt_blk, out_nvt_idx);
77     return 0;
78 }
79 
80 /*
81  * sPAPR END indexing uses a simple mapping of the CPU vcpu_id, 8
82  * priorities per CPU
83  */
84 int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx,
85                              uint32_t *out_server, uint8_t *out_prio)
86 {
87 
88     assert(end_blk == SPAPR_XIVE_BLOCK_ID);
89 
90     if (out_server) {
91         *out_server = end_idx >> 3;
92     }
93 
94     if (out_prio) {
95         *out_prio = end_idx & 0x7;
96     }
97     return 0;
98 }
99 
100 static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio,
101                                   uint8_t *out_end_blk, uint32_t *out_end_idx)
102 {
103     assert(cpu);
104 
105     if (out_end_blk) {
106         *out_end_blk = SPAPR_XIVE_BLOCK_ID;
107     }
108 
109     if (out_end_idx) {
110         *out_end_idx = (cpu->vcpu_id << 3) + prio;
111     }
112 }
113 
114 static int spapr_xive_target_to_end(uint32_t target, uint8_t prio,
115                                     uint8_t *out_end_blk, uint32_t *out_end_idx)
116 {
117     PowerPCCPU *cpu = spapr_find_cpu(target);
118 
119     if (!cpu) {
120         return -1;
121     }
122 
123     spapr_xive_cpu_to_end(cpu, prio, out_end_blk, out_end_idx);
124     return 0;
125 }
126 
127 /*
128  * On sPAPR machines, use a simplified output for the XIVE END
129  * structure dumping only the information related to the OS EQ.
130  */
131 static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end,
132                                           Monitor *mon)
133 {
134     uint64_t qaddr_base = xive_end_qaddr(end);
135     uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
136     uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
137     uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
138     uint32_t qentries = 1 << (qsize + 10);
139     uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
140     uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
141 
142     monitor_printf(mon, "%3d/%d % 6d/%5d @%"PRIx64" ^%d",
143                    spapr_xive_nvt_to_target(0, nvt),
144                    priority, qindex, qentries, qaddr_base, qgen);
145 
146     xive_end_queue_pic_print_info(end, 6, mon);
147     monitor_printf(mon, "]");
148 }
149 
150 void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon)
151 {
152     XiveSource *xsrc = &xive->source;
153     int i;
154 
155     if (kvm_irqchip_in_kernel()) {
156         Error *local_err = NULL;
157 
158         kvmppc_xive_synchronize_state(xive, &local_err);
159         if (local_err) {
160             error_report_err(local_err);
161             return;
162         }
163     }
164 
165     monitor_printf(mon, "  LISN         PQ    EISN     CPU/PRIO EQ\n");
166 
167     for (i = 0; i < xive->nr_irqs; i++) {
168         uint8_t pq = xive_source_esb_get(xsrc, i);
169         XiveEAS *eas = &xive->eat[i];
170 
171         if (!xive_eas_is_valid(eas)) {
172             continue;
173         }
174 
175         monitor_printf(mon, "  %08x %s %c%c%c %s %08x ", i,
176                        xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
177                        pq & XIVE_ESB_VAL_P ? 'P' : '-',
178                        pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
179                        xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ',
180                        xive_eas_is_masked(eas) ? "M" : " ",
181                        (int) xive_get_field64(EAS_END_DATA, eas->w));
182 
183         if (!xive_eas_is_masked(eas)) {
184             uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
185             XiveEND *end;
186 
187             assert(end_idx < xive->nr_ends);
188             end = &xive->endt[end_idx];
189 
190             if (xive_end_is_valid(end)) {
191                 spapr_xive_end_pic_print_info(xive, end, mon);
192             }
193         }
194         monitor_printf(mon, "\n");
195     }
196 }
197 
198 void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable)
199 {
200     memory_region_set_enabled(&xive->source.esb_mmio, enable);
201     memory_region_set_enabled(&xive->tm_mmio, enable);
202 
203     /* Disable the END ESBs until a guest OS makes use of them */
204     memory_region_set_enabled(&xive->end_source.esb_mmio, false);
205 }
206 
207 /*
208  * When a Virtual Processor is scheduled to run on a HW thread, the
209  * hypervisor pushes its identifier in the OS CAM line. Emulate the
210  * same behavior under QEMU.
211  */
212 void spapr_xive_set_tctx_os_cam(XiveTCTX *tctx)
213 {
214     uint8_t  nvt_blk;
215     uint32_t nvt_idx;
216     uint32_t nvt_cam;
217 
218     spapr_xive_cpu_to_nvt(POWERPC_CPU(tctx->cs), &nvt_blk, &nvt_idx);
219 
220     nvt_cam = cpu_to_be32(TM_QW1W2_VO | xive_nvt_cam_line(nvt_blk, nvt_idx));
221     memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &nvt_cam, 4);
222 }
223 
224 static void spapr_xive_end_reset(XiveEND *end)
225 {
226     memset(end, 0, sizeof(*end));
227 
228     /* switch off the escalation and notification ESBs */
229     end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q);
230 }
231 
232 static void spapr_xive_reset(void *dev)
233 {
234     SpaprXive *xive = SPAPR_XIVE(dev);
235     int i;
236 
237     /*
238      * The XiveSource has its own reset handler, which mask off all
239      * IRQs (!P|Q)
240      */
241 
242     /* Mask all valid EASs in the IRQ number space. */
243     for (i = 0; i < xive->nr_irqs; i++) {
244         XiveEAS *eas = &xive->eat[i];
245         if (xive_eas_is_valid(eas)) {
246             eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED);
247         } else {
248             eas->w = 0;
249         }
250     }
251 
252     /* Clear all ENDs */
253     for (i = 0; i < xive->nr_ends; i++) {
254         spapr_xive_end_reset(&xive->endt[i]);
255     }
256 }
257 
258 static void spapr_xive_instance_init(Object *obj)
259 {
260     SpaprXive *xive = SPAPR_XIVE(obj);
261 
262     object_initialize_child(obj, "source", &xive->source, sizeof(xive->source),
263                             TYPE_XIVE_SOURCE, &error_abort, NULL);
264 
265     object_initialize_child(obj, "end_source", &xive->end_source,
266                             sizeof(xive->end_source), TYPE_XIVE_END_SOURCE,
267                             &error_abort, NULL);
268 
269     /* Not connected to the KVM XIVE device */
270     xive->fd = -1;
271 }
272 
273 static void spapr_xive_realize(DeviceState *dev, Error **errp)
274 {
275     SpaprXive *xive = SPAPR_XIVE(dev);
276     XiveSource *xsrc = &xive->source;
277     XiveENDSource *end_xsrc = &xive->end_source;
278     Error *local_err = NULL;
279 
280     if (!xive->nr_irqs) {
281         error_setg(errp, "Number of interrupt needs to be greater 0");
282         return;
283     }
284 
285     if (!xive->nr_ends) {
286         error_setg(errp, "Number of interrupt needs to be greater 0");
287         return;
288     }
289 
290     /*
291      * Initialize the internal sources, for IPIs and virtual devices.
292      */
293     object_property_set_int(OBJECT(xsrc), xive->nr_irqs, "nr-irqs",
294                             &error_fatal);
295     object_property_add_const_link(OBJECT(xsrc), "xive", OBJECT(xive),
296                                    &error_fatal);
297     object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err);
298     if (local_err) {
299         error_propagate(errp, local_err);
300         return;
301     }
302     sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio);
303 
304     /*
305      * Initialize the END ESB source
306      */
307     object_property_set_int(OBJECT(end_xsrc), xive->nr_irqs, "nr-ends",
308                             &error_fatal);
309     object_property_add_const_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
310                                    &error_fatal);
311     object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err);
312     if (local_err) {
313         error_propagate(errp, local_err);
314         return;
315     }
316     sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio);
317 
318     /* Set the mapping address of the END ESB pages after the source ESBs */
319     xive->end_base = xive->vc_base + (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
320 
321     /*
322      * Allocate the routing tables
323      */
324     xive->eat = g_new0(XiveEAS, xive->nr_irqs);
325     xive->endt = g_new0(XiveEND, xive->nr_ends);
326 
327     xive->nodename = g_strdup_printf("interrupt-controller@%" PRIx64,
328                            xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT));
329 
330     qemu_register_reset(spapr_xive_reset, dev);
331 
332     /* TIMA initialization */
333     memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops, xive,
334                           "xive.tima", 4ull << TM_SHIFT);
335     sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio);
336 
337     /*
338      * Map all regions. These will be enabled or disabled at reset and
339      * can also be overridden by KVM memory regions if active
340      */
341     sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base);
342     sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base);
343     sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base);
344 }
345 
346 static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk,
347                               uint32_t eas_idx, XiveEAS *eas)
348 {
349     SpaprXive *xive = SPAPR_XIVE(xrtr);
350 
351     if (eas_idx >= xive->nr_irqs) {
352         return -1;
353     }
354 
355     *eas = xive->eat[eas_idx];
356     return 0;
357 }
358 
359 static int spapr_xive_get_end(XiveRouter *xrtr,
360                               uint8_t end_blk, uint32_t end_idx, XiveEND *end)
361 {
362     SpaprXive *xive = SPAPR_XIVE(xrtr);
363 
364     if (end_idx >= xive->nr_ends) {
365         return -1;
366     }
367 
368     memcpy(end, &xive->endt[end_idx], sizeof(XiveEND));
369     return 0;
370 }
371 
372 static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk,
373                                 uint32_t end_idx, XiveEND *end,
374                                 uint8_t word_number)
375 {
376     SpaprXive *xive = SPAPR_XIVE(xrtr);
377 
378     if (end_idx >= xive->nr_ends) {
379         return -1;
380     }
381 
382     memcpy(&xive->endt[end_idx], end, sizeof(XiveEND));
383     return 0;
384 }
385 
386 static int spapr_xive_get_nvt(XiveRouter *xrtr,
387                               uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt)
388 {
389     uint32_t vcpu_id = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
390     PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
391 
392     if (!cpu) {
393         /* TODO: should we assert() if we can find a NVT ? */
394         return -1;
395     }
396 
397     /*
398      * sPAPR does not maintain a NVT table. Return that the NVT is
399      * valid if we have found a matching CPU
400      */
401     nvt->w0 = cpu_to_be32(NVT_W0_VALID);
402     return 0;
403 }
404 
405 static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk,
406                                 uint32_t nvt_idx, XiveNVT *nvt,
407                                 uint8_t word_number)
408 {
409     /*
410      * We don't need to write back to the NVTs because the sPAPR
411      * machine should never hit a non-scheduled NVT. It should never
412      * get called.
413      */
414     g_assert_not_reached();
415 }
416 
417 static XiveTCTX *spapr_xive_get_tctx(XiveRouter *xrtr, CPUState *cs)
418 {
419     PowerPCCPU *cpu = POWERPC_CPU(cs);
420 
421     return spapr_cpu_state(cpu)->tctx;
422 }
423 
424 static const VMStateDescription vmstate_spapr_xive_end = {
425     .name = TYPE_SPAPR_XIVE "/end",
426     .version_id = 1,
427     .minimum_version_id = 1,
428     .fields = (VMStateField []) {
429         VMSTATE_UINT32(w0, XiveEND),
430         VMSTATE_UINT32(w1, XiveEND),
431         VMSTATE_UINT32(w2, XiveEND),
432         VMSTATE_UINT32(w3, XiveEND),
433         VMSTATE_UINT32(w4, XiveEND),
434         VMSTATE_UINT32(w5, XiveEND),
435         VMSTATE_UINT32(w6, XiveEND),
436         VMSTATE_UINT32(w7, XiveEND),
437         VMSTATE_END_OF_LIST()
438     },
439 };
440 
441 static const VMStateDescription vmstate_spapr_xive_eas = {
442     .name = TYPE_SPAPR_XIVE "/eas",
443     .version_id = 1,
444     .minimum_version_id = 1,
445     .fields = (VMStateField []) {
446         VMSTATE_UINT64(w, XiveEAS),
447         VMSTATE_END_OF_LIST()
448     },
449 };
450 
451 static int vmstate_spapr_xive_pre_save(void *opaque)
452 {
453     if (kvm_irqchip_in_kernel()) {
454         return kvmppc_xive_pre_save(SPAPR_XIVE(opaque));
455     }
456 
457     return 0;
458 }
459 
460 /*
461  * Called by the sPAPR IRQ backend 'post_load' method at the machine
462  * level.
463  */
464 int spapr_xive_post_load(SpaprXive *xive, int version_id)
465 {
466     if (kvm_irqchip_in_kernel()) {
467         return kvmppc_xive_post_load(xive, version_id);
468     }
469 
470     return 0;
471 }
472 
473 static const VMStateDescription vmstate_spapr_xive = {
474     .name = TYPE_SPAPR_XIVE,
475     .version_id = 1,
476     .minimum_version_id = 1,
477     .pre_save = vmstate_spapr_xive_pre_save,
478     .post_load = NULL, /* handled at the machine level */
479     .fields = (VMStateField[]) {
480         VMSTATE_UINT32_EQUAL(nr_irqs, SpaprXive, NULL),
481         VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, SpaprXive, nr_irqs,
482                                      vmstate_spapr_xive_eas, XiveEAS),
483         VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, SpaprXive, nr_ends,
484                                              vmstate_spapr_xive_end, XiveEND),
485         VMSTATE_END_OF_LIST()
486     },
487 };
488 
489 static Property spapr_xive_properties[] = {
490     DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0),
491     DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0),
492     DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE),
493     DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE),
494     DEFINE_PROP_END_OF_LIST(),
495 };
496 
497 static void spapr_xive_class_init(ObjectClass *klass, void *data)
498 {
499     DeviceClass *dc = DEVICE_CLASS(klass);
500     XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
501 
502     dc->desc    = "sPAPR XIVE Interrupt Controller";
503     dc->props   = spapr_xive_properties;
504     dc->realize = spapr_xive_realize;
505     dc->vmsd    = &vmstate_spapr_xive;
506 
507     xrc->get_eas = spapr_xive_get_eas;
508     xrc->get_end = spapr_xive_get_end;
509     xrc->write_end = spapr_xive_write_end;
510     xrc->get_nvt = spapr_xive_get_nvt;
511     xrc->write_nvt = spapr_xive_write_nvt;
512     xrc->get_tctx = spapr_xive_get_tctx;
513 }
514 
515 static const TypeInfo spapr_xive_info = {
516     .name = TYPE_SPAPR_XIVE,
517     .parent = TYPE_XIVE_ROUTER,
518     .instance_init = spapr_xive_instance_init,
519     .instance_size = sizeof(SpaprXive),
520     .class_init = spapr_xive_class_init,
521 };
522 
523 static void spapr_xive_register_types(void)
524 {
525     type_register_static(&spapr_xive_info);
526 }
527 
528 type_init(spapr_xive_register_types)
529 
530 bool spapr_xive_irq_claim(SpaprXive *xive, uint32_t lisn, bool lsi)
531 {
532     XiveSource *xsrc = &xive->source;
533 
534     if (lisn >= xive->nr_irqs) {
535         return false;
536     }
537 
538     xive->eat[lisn].w |= cpu_to_be64(EAS_VALID);
539     if (lsi) {
540         xive_source_irq_set_lsi(xsrc, lisn);
541     }
542 
543     if (kvm_irqchip_in_kernel()) {
544         Error *local_err = NULL;
545 
546         kvmppc_xive_source_reset_one(xsrc, lisn, &local_err);
547         if (local_err) {
548             error_report_err(local_err);
549             return false;
550         }
551     }
552 
553     return true;
554 }
555 
556 bool spapr_xive_irq_free(SpaprXive *xive, uint32_t lisn)
557 {
558     if (lisn >= xive->nr_irqs) {
559         return false;
560     }
561 
562     xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID);
563     return true;
564 }
565 
566 /*
567  * XIVE hcalls
568  *
569  * The terminology used by the XIVE hcalls is the following :
570  *
571  *   TARGET vCPU number
572  *   EQ     Event Queue assigned by OS to receive event data
573  *   ESB    page for source interrupt management
574  *   LISN   Logical Interrupt Source Number identifying a source in the
575  *          machine
576  *   EISN   Effective Interrupt Source Number used by guest OS to
577  *          identify source in the guest
578  *
579  * The EAS, END, NVT structures are not exposed.
580  */
581 
582 /*
583  * Linux hosts under OPAL reserve priority 7 for their own escalation
584  * interrupts (DD2.X POWER9). So we only allow the guest to use
585  * priorities [0..6].
586  */
587 static bool spapr_xive_priority_is_reserved(uint8_t priority)
588 {
589     switch (priority) {
590     case 0 ... 6:
591         return false;
592     case 7: /* OPAL escalation queue */
593     default:
594         return true;
595     }
596 }
597 
598 /*
599  * The H_INT_GET_SOURCE_INFO hcall() is used to obtain the logical
600  * real address of the MMIO page through which the Event State Buffer
601  * entry associated with the value of the "lisn" parameter is managed.
602  *
603  * Parameters:
604  * Input
605  * - R4: "flags"
606  *         Bits 0-63 reserved
607  * - R5: "lisn" is per "interrupts", "interrupt-map", or
608  *       "ibm,xive-lisn-ranges" properties, or as returned by the
609  *       ibm,query-interrupt-source-number RTAS call, or as returned
610  *       by the H_ALLOCATE_VAS_WINDOW hcall
611  *
612  * Output
613  * - R4: "flags"
614  *         Bits 0-59: Reserved
615  *         Bit 60: H_INT_ESB must be used for Event State Buffer
616  *                 management
617  *         Bit 61: 1 == LSI  0 == MSI
618  *         Bit 62: the full function page supports trigger
619  *         Bit 63: Store EOI Supported
620  * - R5: Logical Real address of full function Event State Buffer
621  *       management page, -1 if H_INT_ESB hcall flag is set to 1.
622  * - R6: Logical Real Address of trigger only Event State Buffer
623  *       management page or -1.
624  * - R7: Power of 2 page size for the ESB management pages returned in
625  *       R5 and R6.
626  */
627 
628 #define SPAPR_XIVE_SRC_H_INT_ESB     PPC_BIT(60) /* ESB manage with H_INT_ESB */
629 #define SPAPR_XIVE_SRC_LSI           PPC_BIT(61) /* Virtual LSI type */
630 #define SPAPR_XIVE_SRC_TRIGGER       PPC_BIT(62) /* Trigger and management
631                                                     on same page */
632 #define SPAPR_XIVE_SRC_STORE_EOI     PPC_BIT(63) /* Store EOI support */
633 
634 static target_ulong h_int_get_source_info(PowerPCCPU *cpu,
635                                           SpaprMachineState *spapr,
636                                           target_ulong opcode,
637                                           target_ulong *args)
638 {
639     SpaprXive *xive = spapr->xive;
640     XiveSource *xsrc = &xive->source;
641     target_ulong flags  = args[0];
642     target_ulong lisn   = args[1];
643 
644     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
645         return H_FUNCTION;
646     }
647 
648     if (flags) {
649         return H_PARAMETER;
650     }
651 
652     if (lisn >= xive->nr_irqs) {
653         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
654                       lisn);
655         return H_P2;
656     }
657 
658     if (!xive_eas_is_valid(&xive->eat[lisn])) {
659         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
660                       lisn);
661         return H_P2;
662     }
663 
664     /*
665      * All sources are emulated under the main XIVE object and share
666      * the same characteristics.
667      */
668     args[0] = 0;
669     if (!xive_source_esb_has_2page(xsrc)) {
670         args[0] |= SPAPR_XIVE_SRC_TRIGGER;
671     }
672     if (xsrc->esb_flags & XIVE_SRC_STORE_EOI) {
673         args[0] |= SPAPR_XIVE_SRC_STORE_EOI;
674     }
675 
676     /*
677      * Force the use of the H_INT_ESB hcall in case of an LSI
678      * interrupt. This is necessary under KVM to re-trigger the
679      * interrupt if the level is still asserted
680      */
681     if (xive_source_irq_is_lsi(xsrc, lisn)) {
682         args[0] |= SPAPR_XIVE_SRC_H_INT_ESB | SPAPR_XIVE_SRC_LSI;
683     }
684 
685     if (!(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
686         args[1] = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn);
687     } else {
688         args[1] = -1;
689     }
690 
691     if (xive_source_esb_has_2page(xsrc) &&
692         !(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
693         args[2] = xive->vc_base + xive_source_esb_page(xsrc, lisn);
694     } else {
695         args[2] = -1;
696     }
697 
698     if (xive_source_esb_has_2page(xsrc)) {
699         args[3] = xsrc->esb_shift - 1;
700     } else {
701         args[3] = xsrc->esb_shift;
702     }
703 
704     return H_SUCCESS;
705 }
706 
707 /*
708  * The H_INT_SET_SOURCE_CONFIG hcall() is used to assign a Logical
709  * Interrupt Source to a target. The Logical Interrupt Source is
710  * designated with the "lisn" parameter and the target is designated
711  * with the "target" and "priority" parameters.  Upon return from the
712  * hcall(), no additional interrupts will be directed to the old EQ.
713  *
714  * Parameters:
715  * Input:
716  * - R4: "flags"
717  *         Bits 0-61: Reserved
718  *         Bit 62: set the "eisn" in the EAS
719  *         Bit 63: masks the interrupt source in the hardware interrupt
720  *       control structure. An interrupt masked by this mechanism will
721  *       be dropped, but it's source state bits will still be
722  *       set. There is no race-free way of unmasking and restoring the
723  *       source. Thus this should only be used in interrupts that are
724  *       also masked at the source, and only in cases where the
725  *       interrupt is not meant to be used for a large amount of time
726  *       because no valid target exists for it for example
727  * - R5: "lisn" is per "interrupts", "interrupt-map", or
728  *       "ibm,xive-lisn-ranges" properties, or as returned by the
729  *       ibm,query-interrupt-source-number RTAS call, or as returned by
730  *       the H_ALLOCATE_VAS_WINDOW hcall
731  * - R6: "target" is per "ibm,ppc-interrupt-server#s" or
732  *       "ibm,ppc-interrupt-gserver#s"
733  * - R7: "priority" is a valid priority not in
734  *       "ibm,plat-res-int-priorities"
735  * - R8: "eisn" is the guest EISN associated with the "lisn"
736  *
737  * Output:
738  * - None
739  */
740 
741 #define SPAPR_XIVE_SRC_SET_EISN PPC_BIT(62)
742 #define SPAPR_XIVE_SRC_MASK     PPC_BIT(63)
743 
744 static target_ulong h_int_set_source_config(PowerPCCPU *cpu,
745                                             SpaprMachineState *spapr,
746                                             target_ulong opcode,
747                                             target_ulong *args)
748 {
749     SpaprXive *xive = spapr->xive;
750     XiveEAS eas, new_eas;
751     target_ulong flags    = args[0];
752     target_ulong lisn     = args[1];
753     target_ulong target   = args[2];
754     target_ulong priority = args[3];
755     target_ulong eisn     = args[4];
756     uint8_t end_blk;
757     uint32_t end_idx;
758 
759     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
760         return H_FUNCTION;
761     }
762 
763     if (flags & ~(SPAPR_XIVE_SRC_SET_EISN | SPAPR_XIVE_SRC_MASK)) {
764         return H_PARAMETER;
765     }
766 
767     if (lisn >= xive->nr_irqs) {
768         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
769                       lisn);
770         return H_P2;
771     }
772 
773     eas = xive->eat[lisn];
774     if (!xive_eas_is_valid(&eas)) {
775         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
776                       lisn);
777         return H_P2;
778     }
779 
780     /* priority 0xff is used to reset the EAS */
781     if (priority == 0xff) {
782         new_eas.w = cpu_to_be64(EAS_VALID | EAS_MASKED);
783         goto out;
784     }
785 
786     if (flags & SPAPR_XIVE_SRC_MASK) {
787         new_eas.w = eas.w | cpu_to_be64(EAS_MASKED);
788     } else {
789         new_eas.w = eas.w & cpu_to_be64(~EAS_MASKED);
790     }
791 
792     if (spapr_xive_priority_is_reserved(priority)) {
793         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
794                       " is reserved\n", priority);
795         return H_P4;
796     }
797 
798     /*
799      * Validate that "target" is part of the list of threads allocated
800      * to the partition. For that, find the END corresponding to the
801      * target.
802      */
803     if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
804         return H_P3;
805     }
806 
807     new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk);
808     new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx);
809 
810     if (flags & SPAPR_XIVE_SRC_SET_EISN) {
811         new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn);
812     }
813 
814     if (kvm_irqchip_in_kernel()) {
815         Error *local_err = NULL;
816 
817         kvmppc_xive_set_source_config(xive, lisn, &new_eas, &local_err);
818         if (local_err) {
819             error_report_err(local_err);
820             return H_HARDWARE;
821         }
822     }
823 
824 out:
825     xive->eat[lisn] = new_eas;
826     return H_SUCCESS;
827 }
828 
829 /*
830  * The H_INT_GET_SOURCE_CONFIG hcall() is used to determine to which
831  * target/priority pair is assigned to the specified Logical Interrupt
832  * Source.
833  *
834  * Parameters:
835  * Input:
836  * - R4: "flags"
837  *         Bits 0-63 Reserved
838  * - R5: "lisn" is per "interrupts", "interrupt-map", or
839  *       "ibm,xive-lisn-ranges" properties, or as returned by the
840  *       ibm,query-interrupt-source-number RTAS call, or as
841  *       returned by the H_ALLOCATE_VAS_WINDOW hcall
842  *
843  * Output:
844  * - R4: Target to which the specified Logical Interrupt Source is
845  *       assigned
846  * - R5: Priority to which the specified Logical Interrupt Source is
847  *       assigned
848  * - R6: EISN for the specified Logical Interrupt Source (this will be
849  *       equivalent to the LISN if not changed by H_INT_SET_SOURCE_CONFIG)
850  */
851 static target_ulong h_int_get_source_config(PowerPCCPU *cpu,
852                                             SpaprMachineState *spapr,
853                                             target_ulong opcode,
854                                             target_ulong *args)
855 {
856     SpaprXive *xive = spapr->xive;
857     target_ulong flags = args[0];
858     target_ulong lisn = args[1];
859     XiveEAS eas;
860     XiveEND *end;
861     uint8_t nvt_blk;
862     uint32_t end_idx, nvt_idx;
863 
864     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
865         return H_FUNCTION;
866     }
867 
868     if (flags) {
869         return H_PARAMETER;
870     }
871 
872     if (lisn >= xive->nr_irqs) {
873         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
874                       lisn);
875         return H_P2;
876     }
877 
878     eas = xive->eat[lisn];
879     if (!xive_eas_is_valid(&eas)) {
880         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
881                       lisn);
882         return H_P2;
883     }
884 
885     /* EAS_END_BLOCK is unused on sPAPR */
886     end_idx = xive_get_field64(EAS_END_INDEX, eas.w);
887 
888     assert(end_idx < xive->nr_ends);
889     end = &xive->endt[end_idx];
890 
891     nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
892     nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
893     args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
894 
895     if (xive_eas_is_masked(&eas)) {
896         args[1] = 0xff;
897     } else {
898         args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
899     }
900 
901     args[2] = xive_get_field64(EAS_END_DATA, eas.w);
902 
903     return H_SUCCESS;
904 }
905 
906 /*
907  * The H_INT_GET_QUEUE_INFO hcall() is used to get the logical real
908  * address of the notification management page associated with the
909  * specified target and priority.
910  *
911  * Parameters:
912  * Input:
913  * - R4: "flags"
914  *         Bits 0-63 Reserved
915  * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
916  *       "ibm,ppc-interrupt-gserver#s"
917  * - R6: "priority" is a valid priority not in
918  *       "ibm,plat-res-int-priorities"
919  *
920  * Output:
921  * - R4: Logical real address of notification page
922  * - R5: Power of 2 page size of the notification page
923  */
924 static target_ulong h_int_get_queue_info(PowerPCCPU *cpu,
925                                          SpaprMachineState *spapr,
926                                          target_ulong opcode,
927                                          target_ulong *args)
928 {
929     SpaprXive *xive = spapr->xive;
930     XiveENDSource *end_xsrc = &xive->end_source;
931     target_ulong flags = args[0];
932     target_ulong target = args[1];
933     target_ulong priority = args[2];
934     XiveEND *end;
935     uint8_t end_blk;
936     uint32_t end_idx;
937 
938     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
939         return H_FUNCTION;
940     }
941 
942     if (flags) {
943         return H_PARAMETER;
944     }
945 
946     /*
947      * H_STATE should be returned if a H_INT_RESET is in progress.
948      * This is not needed when running the emulation under QEMU
949      */
950 
951     if (spapr_xive_priority_is_reserved(priority)) {
952         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
953                       " is reserved\n", priority);
954         return H_P3;
955     }
956 
957     /*
958      * Validate that "target" is part of the list of threads allocated
959      * to the partition. For that, find the END corresponding to the
960      * target.
961      */
962     if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
963         return H_P2;
964     }
965 
966     assert(end_idx < xive->nr_ends);
967     end = &xive->endt[end_idx];
968 
969     args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx;
970     if (xive_end_is_enqueue(end)) {
971         args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
972     } else {
973         args[1] = 0;
974     }
975 
976     return H_SUCCESS;
977 }
978 
979 /*
980  * The H_INT_SET_QUEUE_CONFIG hcall() is used to set or reset a EQ for
981  * a given "target" and "priority".  It is also used to set the
982  * notification config associated with the EQ.  An EQ size of 0 is
983  * used to reset the EQ config for a given target and priority. If
984  * resetting the EQ config, the END associated with the given "target"
985  * and "priority" will be changed to disable queueing.
986  *
987  * Upon return from the hcall(), no additional interrupts will be
988  * directed to the old EQ (if one was set). The old EQ (if one was
989  * set) should be investigated for interrupts that occurred prior to
990  * or during the hcall().
991  *
992  * Parameters:
993  * Input:
994  * - R4: "flags"
995  *         Bits 0-62: Reserved
996  *         Bit 63: Unconditional Notify (n) per the XIVE spec
997  * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
998  *       "ibm,ppc-interrupt-gserver#s"
999  * - R6: "priority" is a valid priority not in
1000  *       "ibm,plat-res-int-priorities"
1001  * - R7: "eventQueue": The logical real address of the start of the EQ
1002  * - R8: "eventQueueSize": The power of 2 EQ size per "ibm,xive-eq-sizes"
1003  *
1004  * Output:
1005  * - None
1006  */
1007 
1008 #define SPAPR_XIVE_END_ALWAYS_NOTIFY PPC_BIT(63)
1009 
1010 static target_ulong h_int_set_queue_config(PowerPCCPU *cpu,
1011                                            SpaprMachineState *spapr,
1012                                            target_ulong opcode,
1013                                            target_ulong *args)
1014 {
1015     SpaprXive *xive = spapr->xive;
1016     target_ulong flags = args[0];
1017     target_ulong target = args[1];
1018     target_ulong priority = args[2];
1019     target_ulong qpage = args[3];
1020     target_ulong qsize = args[4];
1021     XiveEND end;
1022     uint8_t end_blk, nvt_blk;
1023     uint32_t end_idx, nvt_idx;
1024 
1025     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1026         return H_FUNCTION;
1027     }
1028 
1029     if (flags & ~SPAPR_XIVE_END_ALWAYS_NOTIFY) {
1030         return H_PARAMETER;
1031     }
1032 
1033     /*
1034      * H_STATE should be returned if a H_INT_RESET is in progress.
1035      * This is not needed when running the emulation under QEMU
1036      */
1037 
1038     if (spapr_xive_priority_is_reserved(priority)) {
1039         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1040                       " is reserved\n", priority);
1041         return H_P3;
1042     }
1043 
1044     /*
1045      * Validate that "target" is part of the list of threads allocated
1046      * to the partition. For that, find the END corresponding to the
1047      * target.
1048      */
1049 
1050     if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1051         return H_P2;
1052     }
1053 
1054     assert(end_idx < xive->nr_ends);
1055     memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND));
1056 
1057     switch (qsize) {
1058     case 12:
1059     case 16:
1060     case 21:
1061     case 24:
1062         if (!QEMU_IS_ALIGNED(qpage, 1ul << qsize)) {
1063             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: EQ @0x%" HWADDR_PRIx
1064                           " is not naturally aligned with %" HWADDR_PRIx "\n",
1065                           qpage, (hwaddr)1 << qsize);
1066             return H_P4;
1067         }
1068         end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff);
1069         end.w3 = cpu_to_be32(qpage & 0xffffffff);
1070         end.w0 |= cpu_to_be32(END_W0_ENQUEUE);
1071         end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12);
1072         break;
1073     case 0:
1074         /* reset queue and disable queueing */
1075         spapr_xive_end_reset(&end);
1076         goto out;
1077 
1078     default:
1079         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EQ size %"PRIx64"\n",
1080                       qsize);
1081         return H_P5;
1082     }
1083 
1084     if (qsize) {
1085         hwaddr plen = 1 << qsize;
1086         void *eq;
1087 
1088         /*
1089          * Validate the guest EQ. We should also check that the queue
1090          * has been zeroed by the OS.
1091          */
1092         eq = address_space_map(CPU(cpu)->as, qpage, &plen, true,
1093                                MEMTXATTRS_UNSPECIFIED);
1094         if (plen != 1 << qsize) {
1095             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to map EQ @0x%"
1096                           HWADDR_PRIx "\n", qpage);
1097             return H_P4;
1098         }
1099         address_space_unmap(CPU(cpu)->as, eq, plen, true, plen);
1100     }
1101 
1102     /* "target" should have been validated above */
1103     if (spapr_xive_target_to_nvt(target, &nvt_blk, &nvt_idx)) {
1104         g_assert_not_reached();
1105     }
1106 
1107     /*
1108      * Ensure the priority and target are correctly set (they will not
1109      * be right after allocation)
1110      */
1111     end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) |
1112         xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx);
1113     end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority);
1114 
1115     if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) {
1116         end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY);
1117     } else {
1118         end.w0 &= cpu_to_be32((uint32_t)~END_W0_UCOND_NOTIFY);
1119     }
1120 
1121     /*
1122      * The generation bit for the END starts at 1 and The END page
1123      * offset counter starts at 0.
1124      */
1125     end.w1 = cpu_to_be32(END_W1_GENERATION) |
1126         xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul);
1127     end.w0 |= cpu_to_be32(END_W0_VALID);
1128 
1129     /*
1130      * TODO: issue syncs required to ensure all in-flight interrupts
1131      * are complete on the old END
1132      */
1133 
1134 out:
1135     if (kvm_irqchip_in_kernel()) {
1136         Error *local_err = NULL;
1137 
1138         kvmppc_xive_set_queue_config(xive, end_blk, end_idx, &end, &local_err);
1139         if (local_err) {
1140             error_report_err(local_err);
1141             return H_HARDWARE;
1142         }
1143     }
1144 
1145     /* Update END */
1146     memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND));
1147     return H_SUCCESS;
1148 }
1149 
1150 /*
1151  * The H_INT_GET_QUEUE_CONFIG hcall() is used to get a EQ for a given
1152  * target and priority.
1153  *
1154  * Parameters:
1155  * Input:
1156  * - R4: "flags"
1157  *         Bits 0-62: Reserved
1158  *         Bit 63: Debug: Return debug data
1159  * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
1160  *       "ibm,ppc-interrupt-gserver#s"
1161  * - R6: "priority" is a valid priority not in
1162  *       "ibm,plat-res-int-priorities"
1163  *
1164  * Output:
1165  * - R4: "flags":
1166  *       Bits 0-61: Reserved
1167  *       Bit 62: The value of Event Queue Generation Number (g) per
1168  *              the XIVE spec if "Debug" = 1
1169  *       Bit 63: The value of Unconditional Notify (n) per the XIVE spec
1170  * - R5: The logical real address of the start of the EQ
1171  * - R6: The power of 2 EQ size per "ibm,xive-eq-sizes"
1172  * - R7: The value of Event Queue Offset Counter per XIVE spec
1173  *       if "Debug" = 1, else 0
1174  *
1175  */
1176 
1177 #define SPAPR_XIVE_END_DEBUG     PPC_BIT(63)
1178 
1179 static target_ulong h_int_get_queue_config(PowerPCCPU *cpu,
1180                                            SpaprMachineState *spapr,
1181                                            target_ulong opcode,
1182                                            target_ulong *args)
1183 {
1184     SpaprXive *xive = spapr->xive;
1185     target_ulong flags = args[0];
1186     target_ulong target = args[1];
1187     target_ulong priority = args[2];
1188     XiveEND *end;
1189     uint8_t end_blk;
1190     uint32_t end_idx;
1191 
1192     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1193         return H_FUNCTION;
1194     }
1195 
1196     if (flags & ~SPAPR_XIVE_END_DEBUG) {
1197         return H_PARAMETER;
1198     }
1199 
1200     /*
1201      * H_STATE should be returned if a H_INT_RESET is in progress.
1202      * This is not needed when running the emulation under QEMU
1203      */
1204 
1205     if (spapr_xive_priority_is_reserved(priority)) {
1206         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1207                       " is reserved\n", priority);
1208         return H_P3;
1209     }
1210 
1211     /*
1212      * Validate that "target" is part of the list of threads allocated
1213      * to the partition. For that, find the END corresponding to the
1214      * target.
1215      */
1216     if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1217         return H_P2;
1218     }
1219 
1220     assert(end_idx < xive->nr_ends);
1221     end = &xive->endt[end_idx];
1222 
1223     args[0] = 0;
1224     if (xive_end_is_notify(end)) {
1225         args[0] |= SPAPR_XIVE_END_ALWAYS_NOTIFY;
1226     }
1227 
1228     if (xive_end_is_enqueue(end)) {
1229         args[1] = xive_end_qaddr(end);
1230         args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
1231     } else {
1232         args[1] = 0;
1233         args[2] = 0;
1234     }
1235 
1236     if (kvm_irqchip_in_kernel()) {
1237         Error *local_err = NULL;
1238 
1239         kvmppc_xive_get_queue_config(xive, end_blk, end_idx, end, &local_err);
1240         if (local_err) {
1241             error_report_err(local_err);
1242             return H_HARDWARE;
1243         }
1244     }
1245 
1246     /* TODO: do we need any locking on the END ? */
1247     if (flags & SPAPR_XIVE_END_DEBUG) {
1248         /* Load the event queue generation number into the return flags */
1249         args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 62;
1250 
1251         /* Load R7 with the event queue offset counter */
1252         args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1253     } else {
1254         args[3] = 0;
1255     }
1256 
1257     return H_SUCCESS;
1258 }
1259 
1260 /*
1261  * The H_INT_SET_OS_REPORTING_LINE hcall() is used to set the
1262  * reporting cache line pair for the calling thread.  The reporting
1263  * cache lines will contain the OS interrupt context when the OS
1264  * issues a CI store byte to @TIMA+0xC10 to acknowledge the OS
1265  * interrupt. The reporting cache lines can be reset by inputting -1
1266  * in "reportingLine".  Issuing the CI store byte without reporting
1267  * cache lines registered will result in the data not being accessible
1268  * to the OS.
1269  *
1270  * Parameters:
1271  * Input:
1272  * - R4: "flags"
1273  *         Bits 0-63: Reserved
1274  * - R5: "reportingLine": The logical real address of the reporting cache
1275  *       line pair
1276  *
1277  * Output:
1278  * - None
1279  */
1280 static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu,
1281                                                 SpaprMachineState *spapr,
1282                                                 target_ulong opcode,
1283                                                 target_ulong *args)
1284 {
1285     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1286         return H_FUNCTION;
1287     }
1288 
1289     /*
1290      * H_STATE should be returned if a H_INT_RESET is in progress.
1291      * This is not needed when running the emulation under QEMU
1292      */
1293 
1294     /* TODO: H_INT_SET_OS_REPORTING_LINE */
1295     return H_FUNCTION;
1296 }
1297 
1298 /*
1299  * The H_INT_GET_OS_REPORTING_LINE hcall() is used to get the logical
1300  * real address of the reporting cache line pair set for the input
1301  * "target".  If no reporting cache line pair has been set, -1 is
1302  * returned.
1303  *
1304  * Parameters:
1305  * Input:
1306  * - R4: "flags"
1307  *         Bits 0-63: Reserved
1308  * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
1309  *       "ibm,ppc-interrupt-gserver#s"
1310  * - R6: "reportingLine": The logical real address of the reporting
1311  *        cache line pair
1312  *
1313  * Output:
1314  * - R4: The logical real address of the reporting line if set, else -1
1315  */
1316 static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu,
1317                                                 SpaprMachineState *spapr,
1318                                                 target_ulong opcode,
1319                                                 target_ulong *args)
1320 {
1321     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1322         return H_FUNCTION;
1323     }
1324 
1325     /*
1326      * H_STATE should be returned if a H_INT_RESET is in progress.
1327      * This is not needed when running the emulation under QEMU
1328      */
1329 
1330     /* TODO: H_INT_GET_OS_REPORTING_LINE */
1331     return H_FUNCTION;
1332 }
1333 
1334 /*
1335  * The H_INT_ESB hcall() is used to issue a load or store to the ESB
1336  * page for the input "lisn".  This hcall is only supported for LISNs
1337  * that have the ESB hcall flag set to 1 when returned from hcall()
1338  * H_INT_GET_SOURCE_INFO.
1339  *
1340  * Parameters:
1341  * Input:
1342  * - R4: "flags"
1343  *         Bits 0-62: Reserved
1344  *         bit 63: Store: Store=1, store operation, else load operation
1345  * - R5: "lisn" is per "interrupts", "interrupt-map", or
1346  *       "ibm,xive-lisn-ranges" properties, or as returned by the
1347  *       ibm,query-interrupt-source-number RTAS call, or as
1348  *       returned by the H_ALLOCATE_VAS_WINDOW hcall
1349  * - R6: "esbOffset" is the offset into the ESB page for the load or
1350  *       store operation
1351  * - R7: "storeData" is the data to write for a store operation
1352  *
1353  * Output:
1354  * - R4: The value of the load if load operation, else -1
1355  */
1356 
1357 #define SPAPR_XIVE_ESB_STORE PPC_BIT(63)
1358 
1359 static target_ulong h_int_esb(PowerPCCPU *cpu,
1360                               SpaprMachineState *spapr,
1361                               target_ulong opcode,
1362                               target_ulong *args)
1363 {
1364     SpaprXive *xive = spapr->xive;
1365     XiveEAS eas;
1366     target_ulong flags  = args[0];
1367     target_ulong lisn   = args[1];
1368     target_ulong offset = args[2];
1369     target_ulong data   = args[3];
1370     hwaddr mmio_addr;
1371     XiveSource *xsrc = &xive->source;
1372 
1373     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1374         return H_FUNCTION;
1375     }
1376 
1377     if (flags & ~SPAPR_XIVE_ESB_STORE) {
1378         return H_PARAMETER;
1379     }
1380 
1381     if (lisn >= xive->nr_irqs) {
1382         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1383                       lisn);
1384         return H_P2;
1385     }
1386 
1387     eas = xive->eat[lisn];
1388     if (!xive_eas_is_valid(&eas)) {
1389         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1390                       lisn);
1391         return H_P2;
1392     }
1393 
1394     if (offset > (1ull << xsrc->esb_shift)) {
1395         return H_P3;
1396     }
1397 
1398     if (kvm_irqchip_in_kernel()) {
1399         args[0] = kvmppc_xive_esb_rw(xsrc, lisn, offset, data,
1400                                      flags & SPAPR_XIVE_ESB_STORE);
1401     } else {
1402         mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset;
1403 
1404         if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8,
1405                           (flags & SPAPR_XIVE_ESB_STORE))) {
1406             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%"
1407                           HWADDR_PRIx "\n", mmio_addr);
1408             return H_HARDWARE;
1409         }
1410         args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data;
1411     }
1412     return H_SUCCESS;
1413 }
1414 
1415 /*
1416  * The H_INT_SYNC hcall() is used to issue hardware syncs that will
1417  * ensure any in flight events for the input lisn are in the event
1418  * queue.
1419  *
1420  * Parameters:
1421  * Input:
1422  * - R4: "flags"
1423  *         Bits 0-63: Reserved
1424  * - R5: "lisn" is per "interrupts", "interrupt-map", or
1425  *       "ibm,xive-lisn-ranges" properties, or as returned by the
1426  *       ibm,query-interrupt-source-number RTAS call, or as
1427  *       returned by the H_ALLOCATE_VAS_WINDOW hcall
1428  *
1429  * Output:
1430  * - None
1431  */
1432 static target_ulong h_int_sync(PowerPCCPU *cpu,
1433                                SpaprMachineState *spapr,
1434                                target_ulong opcode,
1435                                target_ulong *args)
1436 {
1437     SpaprXive *xive = spapr->xive;
1438     XiveEAS eas;
1439     target_ulong flags = args[0];
1440     target_ulong lisn = args[1];
1441 
1442     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1443         return H_FUNCTION;
1444     }
1445 
1446     if (flags) {
1447         return H_PARAMETER;
1448     }
1449 
1450     if (lisn >= xive->nr_irqs) {
1451         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1452                       lisn);
1453         return H_P2;
1454     }
1455 
1456     eas = xive->eat[lisn];
1457     if (!xive_eas_is_valid(&eas)) {
1458         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1459                       lisn);
1460         return H_P2;
1461     }
1462 
1463     /*
1464      * H_STATE should be returned if a H_INT_RESET is in progress.
1465      * This is not needed when running the emulation under QEMU
1466      */
1467 
1468     /*
1469      * This is not real hardware. Nothing to be done unless when
1470      * under KVM
1471      */
1472 
1473     if (kvm_irqchip_in_kernel()) {
1474         Error *local_err = NULL;
1475 
1476         kvmppc_xive_sync_source(xive, lisn, &local_err);
1477         if (local_err) {
1478             error_report_err(local_err);
1479             return H_HARDWARE;
1480         }
1481     }
1482     return H_SUCCESS;
1483 }
1484 
1485 /*
1486  * The H_INT_RESET hcall() is used to reset all of the partition's
1487  * interrupt exploitation structures to their initial state.  This
1488  * means losing all previously set interrupt state set via
1489  * H_INT_SET_SOURCE_CONFIG and H_INT_SET_QUEUE_CONFIG.
1490  *
1491  * Parameters:
1492  * Input:
1493  * - R4: "flags"
1494  *         Bits 0-63: Reserved
1495  *
1496  * Output:
1497  * - None
1498  */
1499 static target_ulong h_int_reset(PowerPCCPU *cpu,
1500                                 SpaprMachineState *spapr,
1501                                 target_ulong opcode,
1502                                 target_ulong *args)
1503 {
1504     SpaprXive *xive = spapr->xive;
1505     target_ulong flags   = args[0];
1506 
1507     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1508         return H_FUNCTION;
1509     }
1510 
1511     if (flags) {
1512         return H_PARAMETER;
1513     }
1514 
1515     device_reset(DEVICE(xive));
1516 
1517     if (kvm_irqchip_in_kernel()) {
1518         Error *local_err = NULL;
1519 
1520         kvmppc_xive_reset(xive, &local_err);
1521         if (local_err) {
1522             error_report_err(local_err);
1523             return H_HARDWARE;
1524         }
1525     }
1526     return H_SUCCESS;
1527 }
1528 
1529 void spapr_xive_hcall_init(SpaprMachineState *spapr)
1530 {
1531     spapr_register_hypercall(H_INT_GET_SOURCE_INFO, h_int_get_source_info);
1532     spapr_register_hypercall(H_INT_SET_SOURCE_CONFIG, h_int_set_source_config);
1533     spapr_register_hypercall(H_INT_GET_SOURCE_CONFIG, h_int_get_source_config);
1534     spapr_register_hypercall(H_INT_GET_QUEUE_INFO, h_int_get_queue_info);
1535     spapr_register_hypercall(H_INT_SET_QUEUE_CONFIG, h_int_set_queue_config);
1536     spapr_register_hypercall(H_INT_GET_QUEUE_CONFIG, h_int_get_queue_config);
1537     spapr_register_hypercall(H_INT_SET_OS_REPORTING_LINE,
1538                              h_int_set_os_reporting_line);
1539     spapr_register_hypercall(H_INT_GET_OS_REPORTING_LINE,
1540                              h_int_get_os_reporting_line);
1541     spapr_register_hypercall(H_INT_ESB, h_int_esb);
1542     spapr_register_hypercall(H_INT_SYNC, h_int_sync);
1543     spapr_register_hypercall(H_INT_RESET, h_int_reset);
1544 }
1545 
1546 void spapr_dt_xive(SpaprMachineState *spapr, uint32_t nr_servers, void *fdt,
1547                    uint32_t phandle)
1548 {
1549     SpaprXive *xive = spapr->xive;
1550     int node;
1551     uint64_t timas[2 * 2];
1552     /* Interrupt number ranges for the IPIs */
1553     uint32_t lisn_ranges[] = {
1554         cpu_to_be32(0),
1555         cpu_to_be32(nr_servers),
1556     };
1557     /*
1558      * EQ size - the sizes of pages supported by the system 4K, 64K,
1559      * 2M, 16M. We only advertise 64K for the moment.
1560      */
1561     uint32_t eq_sizes[] = {
1562         cpu_to_be32(16), /* 64K */
1563     };
1564     /*
1565      * The following array is in sync with the reserved priorities
1566      * defined by the 'spapr_xive_priority_is_reserved' routine.
1567      */
1568     uint32_t plat_res_int_priorities[] = {
1569         cpu_to_be32(7),    /* start */
1570         cpu_to_be32(0xf8), /* count */
1571     };
1572 
1573     /* Thread Interrupt Management Area : User (ring 3) and OS (ring 2) */
1574     timas[0] = cpu_to_be64(xive->tm_base +
1575                            XIVE_TM_USER_PAGE * (1ull << TM_SHIFT));
1576     timas[1] = cpu_to_be64(1ull << TM_SHIFT);
1577     timas[2] = cpu_to_be64(xive->tm_base +
1578                            XIVE_TM_OS_PAGE * (1ull << TM_SHIFT));
1579     timas[3] = cpu_to_be64(1ull << TM_SHIFT);
1580 
1581     _FDT(node = fdt_add_subnode(fdt, 0, xive->nodename));
1582 
1583     _FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe"));
1584     _FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas)));
1585 
1586     _FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe"));
1587     _FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes,
1588                      sizeof(eq_sizes)));
1589     _FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges,
1590                      sizeof(lisn_ranges)));
1591 
1592     /* For Linux to link the LSIs to the interrupt controller. */
1593     _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0));
1594     _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2));
1595 
1596     /* For SLOF */
1597     _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle));
1598     _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle));
1599 
1600     /*
1601      * The "ibm,plat-res-int-priorities" property defines the priority
1602      * ranges reserved by the hypervisor
1603      */
1604     _FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities",
1605                      plat_res_int_priorities, sizeof(plat_res_int_priorities)));
1606 }
1607