xref: /openbmc/qemu/hw/intc/pnv_xive.c (revision feecc6a0435d46da45b2d383693fe1292043606c)
1 /*
2  * QEMU PowerPC XIVE interrupt controller model
3  *
4  * Copyright (c) 2017-2019, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/fdt.h"
20 #include "hw/ppc/pnv.h"
21 #include "hw/ppc/pnv_core.h"
22 #include "hw/ppc/pnv_xscom.h"
23 #include "hw/ppc/pnv_xive.h"
24 #include "hw/ppc/xive_regs.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/ppc/ppc.h"
27 
28 #include <libfdt.h>
29 
30 #include "pnv_xive_regs.h"
31 
32 #undef XIVE_DEBUG
33 
34 /*
35  * Virtual structures table (VST)
36  */
37 #define SBE_PER_BYTE   4
38 
39 typedef struct XiveVstInfo {
40     const char *name;
41     uint32_t    size;
42     uint32_t    max_blocks;
43 } XiveVstInfo;
44 
45 static const XiveVstInfo vst_infos[] = {
46     [VST_TSEL_IVT]  = { "EAT",  sizeof(XiveEAS), 16 },
47     [VST_TSEL_SBE]  = { "SBE",  1,               16 },
48     [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
49     [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
50 
51     /*
52      *  Interrupt fifo backing store table (not modeled) :
53      *
54      * 0 - IPI,
55      * 1 - HWD,
56      * 2 - First escalate,
57      * 3 - Second escalate,
58      * 4 - Redistribution,
59      * 5 - IPI cascaded queue ?
60      */
61     [VST_TSEL_IRQ]  = { "IRQ",  1,               6  },
62 };
63 
64 #define xive_error(xive, fmt, ...)                                      \
65     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
66                   (xive)->chip->chip_id, ## __VA_ARGS__);
67 
68 /*
69  * QEMU version of the GETFIELD/SETFIELD macros
70  *
71  * TODO: It might be better to use the existing extract64() and
72  * deposit64() but this means that all the register definitions will
73  * change and become incompatible with the ones found in skiboot.
74  *
75  * Keep it as it is for now until we find a common ground.
76  */
77 static inline uint64_t GETFIELD(uint64_t mask, uint64_t word)
78 {
79     return (word & mask) >> ctz64(mask);
80 }
81 
82 static inline uint64_t SETFIELD(uint64_t mask, uint64_t word,
83                                 uint64_t value)
84 {
85     return (word & ~mask) | ((value << ctz64(mask)) & mask);
86 }
87 
88 /*
89  * Remote access to controllers. HW uses MMIOs. For now, a simple scan
90  * of the chips is good enough.
91  *
92  * TODO: Block scope support
93  */
94 static PnvXive *pnv_xive_get_ic(uint8_t blk)
95 {
96     PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
97     int i;
98 
99     for (i = 0; i < pnv->num_chips; i++) {
100         Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]);
101         PnvXive *xive = &chip9->xive;
102 
103         if (xive->chip->chip_id == blk) {
104             return xive;
105         }
106     }
107     return NULL;
108 }
109 
110 /*
111  * VST accessors for SBE, EAT, ENDT, NVT
112  *
113  * Indirect VST tables are arrays of VSDs pointing to a page (of same
114  * size). Each page is a direct VST table.
115  */
116 
117 #define XIVE_VSD_SIZE 8
118 
119 /* Indirect page size can be 4K, 64K, 2M, 16M. */
120 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
121 {
122      return page_shift == 12 || page_shift == 16 ||
123          page_shift == 21 || page_shift == 24;
124 }
125 
126 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
127                                          uint64_t vsd, uint32_t idx)
128 {
129     const XiveVstInfo *info = &vst_infos[type];
130     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
131     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
132     uint32_t idx_max;
133 
134     idx_max = vst_tsize / info->size - 1;
135     if (idx > idx_max) {
136 #ifdef XIVE_DEBUG
137         xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
138                    info->name, idx, idx_max);
139 #endif
140         return 0;
141     }
142 
143     return vst_addr + idx * info->size;
144 }
145 
146 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
147                                            uint64_t vsd, uint32_t idx)
148 {
149     const XiveVstInfo *info = &vst_infos[type];
150     uint64_t vsd_addr;
151     uint32_t vsd_idx;
152     uint32_t page_shift;
153     uint32_t vst_per_page;
154 
155     /* Get the page size of the indirect table. */
156     vsd_addr = vsd & VSD_ADDRESS_MASK;
157     vsd = ldq_be_dma(&address_space_memory, vsd_addr);
158 
159     if (!(vsd & VSD_ADDRESS_MASK)) {
160 #ifdef XIVE_DEBUG
161         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
162 #endif
163         return 0;
164     }
165 
166     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
167 
168     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
169         xive_error(xive, "VST: invalid %s page shift %d", info->name,
170                    page_shift);
171         return 0;
172     }
173 
174     vst_per_page = (1ull << page_shift) / info->size;
175     vsd_idx = idx / vst_per_page;
176 
177     /* Load the VSD we are looking for, if not already done */
178     if (vsd_idx) {
179         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
180         vsd = ldq_be_dma(&address_space_memory, vsd_addr);
181 
182         if (!(vsd & VSD_ADDRESS_MASK)) {
183 #ifdef XIVE_DEBUG
184             xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
185 #endif
186             return 0;
187         }
188 
189         /*
190          * Check that the pages have a consistent size across the
191          * indirect table
192          */
193         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
194             xive_error(xive, "VST: %s entry %x indirect page size differ !?",
195                        info->name, idx);
196             return 0;
197         }
198     }
199 
200     return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
201 }
202 
203 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
204                                   uint32_t idx)
205 {
206     const XiveVstInfo *info = &vst_infos[type];
207     uint64_t vsd;
208 
209     if (blk >= info->max_blocks) {
210         xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
211                    blk, info->name, idx);
212         return 0;
213     }
214 
215     vsd = xive->vsds[type][blk];
216 
217     /* Remote VST access */
218     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
219         xive = pnv_xive_get_ic(blk);
220 
221         return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0;
222     }
223 
224     if (VSD_INDIRECT & vsd) {
225         return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
226     }
227 
228     return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
229 }
230 
231 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
232                              uint32_t idx, void *data)
233 {
234     const XiveVstInfo *info = &vst_infos[type];
235     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
236 
237     if (!addr) {
238         return -1;
239     }
240 
241     cpu_physical_memory_read(addr, data, info->size);
242     return 0;
243 }
244 
245 #define XIVE_VST_WORD_ALL -1
246 
247 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
248                               uint32_t idx, void *data, uint32_t word_number)
249 {
250     const XiveVstInfo *info = &vst_infos[type];
251     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
252 
253     if (!addr) {
254         return -1;
255     }
256 
257     if (word_number == XIVE_VST_WORD_ALL) {
258         cpu_physical_memory_write(addr, data, info->size);
259     } else {
260         cpu_physical_memory_write(addr + word_number * 4,
261                                   data + word_number * 4, 4);
262     }
263     return 0;
264 }
265 
266 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
267                             XiveEND *end)
268 {
269     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
270 }
271 
272 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
273                               XiveEND *end, uint8_t word_number)
274 {
275     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
276                               word_number);
277 }
278 
279 static int pnv_xive_end_update(PnvXive *xive)
280 {
281     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
282                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
283     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
284                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
285     int i;
286     uint64_t eqc_watch[4];
287 
288     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
289         eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
290     }
291 
292     return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
293                               XIVE_VST_WORD_ALL);
294 }
295 
296 static void pnv_xive_end_cache_load(PnvXive *xive)
297 {
298     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
299                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
300     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
301                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
302     uint64_t eqc_watch[4] = { 0 };
303     int i;
304 
305     if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
306         xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
307     }
308 
309     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
310         xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
311     }
312 }
313 
314 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
315                             XiveNVT *nvt)
316 {
317     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
318 }
319 
320 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
321                               XiveNVT *nvt, uint8_t word_number)
322 {
323     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
324                               word_number);
325 }
326 
327 static int pnv_xive_nvt_update(PnvXive *xive)
328 {
329     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
330                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
331     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
332                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
333     int i;
334     uint64_t vpc_watch[8];
335 
336     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
337         vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
338     }
339 
340     return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
341                               XIVE_VST_WORD_ALL);
342 }
343 
344 static void pnv_xive_nvt_cache_load(PnvXive *xive)
345 {
346     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
347                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
348     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
349                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
350     uint64_t vpc_watch[8] = { 0 };
351     int i;
352 
353     if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
354         xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
355     }
356 
357     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
358         xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
359     }
360 }
361 
362 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
363                             XiveEAS *eas)
364 {
365     PnvXive *xive = PNV_XIVE(xrtr);
366 
367     if (pnv_xive_get_ic(blk) != xive) {
368         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
369         return -1;
370     }
371 
372     return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
373 }
374 
375 static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
376                               uint8_t nvt_blk, uint32_t nvt_idx,
377                               bool cam_ignore, uint8_t priority,
378                               uint32_t logic_serv, XiveTCTXMatch *match)
379 {
380     PnvXive *xive = PNV_XIVE(xptr);
381     PnvChip *chip = xive->chip;
382     int count = 0;
383     int i, j;
384 
385     for (i = 0; i < chip->nr_cores; i++) {
386         PnvCore *pc = chip->cores[i];
387         CPUCore *cc = CPU_CORE(pc);
388 
389         for (j = 0; j < cc->nr_threads; j++) {
390             PowerPCCPU *cpu = pc->threads[j];
391             XiveTCTX *tctx;
392             int ring;
393 
394             tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
395 
396             /*
397              * Check the thread context CAM lines and record matches.
398              */
399             ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
400                                              nvt_idx, cam_ignore, logic_serv);
401             /*
402              * Save the context and follow on to catch duplicates, that we
403              * don't support yet.
404              */
405             if (ring != -1) {
406                 if (match->tctx) {
407                     qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
408                                   "thread context NVT %x/%x\n",
409                                   nvt_blk, nvt_idx);
410                     return -1;
411                 }
412 
413                 match->ring = ring;
414                 match->tctx = tctx;
415                 count++;
416             }
417         }
418     }
419 
420     return count;
421 }
422 
423 static XiveTCTX *pnv_xive_get_tctx(XiveRouter *xrtr, CPUState *cs)
424 {
425     PowerPCCPU *cpu = POWERPC_CPU(cs);
426     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
427     PnvXive *xive = NULL;
428     CPUPPCState *env = &cpu->env;
429     int pir = env->spr_cb[SPR_PIR].default_value;
430 
431     /*
432      * Perform an extra check on the HW thread enablement.
433      *
434      * The TIMA is shared among the chips and to identify the chip
435      * from which the access is being done, we extract the chip id
436      * from the PIR.
437      */
438     xive = pnv_xive_get_ic((pir >> 8) & 0xf);
439     if (!xive) {
440         return NULL;
441     }
442 
443     if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) {
444         xive_error(PNV_XIVE(xrtr), "IC: CPU %x is not enabled", pir);
445     }
446 
447     return tctx;
448 }
449 
450 /*
451  * The internal sources (IPIs) of the interrupt controller have no
452  * knowledge of the XIVE chip on which they reside. Encode the block
453  * id in the source interrupt number before forwarding the source
454  * event notification to the Router. This is required on a multichip
455  * system.
456  */
457 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno)
458 {
459     PnvXive *xive = PNV_XIVE(xn);
460     uint8_t blk = xive->chip->chip_id;
461 
462     xive_router_notify(xn, XIVE_EAS(blk, srcno));
463 }
464 
465 /*
466  * XIVE helpers
467  */
468 
469 static uint64_t pnv_xive_vc_size(PnvXive *xive)
470 {
471     return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
472 }
473 
474 static uint64_t pnv_xive_edt_shift(PnvXive *xive)
475 {
476     return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
477 }
478 
479 static uint64_t pnv_xive_pc_size(PnvXive *xive)
480 {
481     return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
482 }
483 
484 static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk)
485 {
486     uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk];
487     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
488 
489     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
490 }
491 
492 /*
493  * EDT Table
494  *
495  * The Virtualization Controller MMIO region containing the IPI ESB
496  * pages and END ESB pages is sub-divided into "sets" which map
497  * portions of the VC region to the different ESB pages. It is
498  * configured at runtime through the EDT "Domain Table" to let the
499  * firmware decide how to split the VC address space between IPI ESB
500  * pages and END ESB pages.
501  */
502 
503 /*
504  * Computes the overall size of the IPI or the END ESB pages
505  */
506 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
507 {
508     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
509     uint64_t size = 0;
510     int i;
511 
512     for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
513         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
514 
515         if (edt_type == type) {
516             size += edt_size;
517         }
518     }
519 
520     return size;
521 }
522 
523 /*
524  * Maps an offset of the VC region in the IPI or END region using the
525  * layout defined by the EDT "Domaine Table"
526  */
527 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
528                                               uint64_t type)
529 {
530     int i;
531     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
532     uint64_t edt_offset = vc_offset;
533 
534     for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
535         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
536 
537         if (edt_type != type) {
538             edt_offset -= edt_size;
539         }
540     }
541 
542     return edt_offset;
543 }
544 
545 static void pnv_xive_edt_resize(PnvXive *xive)
546 {
547     uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
548     uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
549 
550     memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
551     memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
552 
553     memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
554     memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
555 }
556 
557 /*
558  * XIVE Table configuration. Only EDT is supported.
559  */
560 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
561 {
562     uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
563     uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
564     uint64_t *xive_table;
565     uint8_t max_index;
566 
567     switch (tsel) {
568     case CQ_TAR_TSEL_BLK:
569         max_index = ARRAY_SIZE(xive->blk);
570         xive_table = xive->blk;
571         break;
572     case CQ_TAR_TSEL_MIG:
573         max_index = ARRAY_SIZE(xive->mig);
574         xive_table = xive->mig;
575         break;
576     case CQ_TAR_TSEL_EDT:
577         max_index = ARRAY_SIZE(xive->edt);
578         xive_table = xive->edt;
579         break;
580     case CQ_TAR_TSEL_VDT:
581         max_index = ARRAY_SIZE(xive->vdt);
582         xive_table = xive->vdt;
583         break;
584     default:
585         xive_error(xive, "IC: invalid table %d", (int) tsel);
586         return -1;
587     }
588 
589     if (tsel_index >= max_index) {
590         xive_error(xive, "IC: invalid index %d", (int) tsel_index);
591         return -1;
592     }
593 
594     xive_table[tsel_index] = val;
595 
596     if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
597         xive->regs[CQ_TAR >> 3] =
598             SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
599     }
600 
601     /*
602      * EDT configuration is complete. Resize the MMIO windows exposing
603      * the IPI and the END ESBs in the VC region.
604      */
605     if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
606         pnv_xive_edt_resize(xive);
607     }
608 
609     return 0;
610 }
611 
612 /*
613  * Virtual Structure Tables (VST) configuration
614  */
615 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
616                                        uint8_t blk, uint64_t vsd)
617 {
618     XiveENDSource *end_xsrc = &xive->end_source;
619     XiveSource *xsrc = &xive->ipi_source;
620     const XiveVstInfo *info = &vst_infos[type];
621     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
622     uint64_t vst_tsize = 1ull << page_shift;
623     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
624 
625     /* Basic checks */
626 
627     if (VSD_INDIRECT & vsd) {
628         if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
629             xive_error(xive, "VST: %s indirect tables are not enabled",
630                        info->name);
631             return;
632         }
633 
634         if (!pnv_xive_vst_page_size_allowed(page_shift)) {
635             xive_error(xive, "VST: invalid %s page shift %d", info->name,
636                        page_shift);
637             return;
638         }
639     }
640 
641     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
642         xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
643                    " page shift %d", info->name, vst_addr, page_shift);
644         return;
645     }
646 
647     /* Record the table configuration (in SRAM on HW) */
648     xive->vsds[type][blk] = vsd;
649 
650     /* Now tune the models with the configuration provided by the FW */
651 
652     switch (type) {
653     case VST_TSEL_IVT:  /* Nothing to be done */
654         break;
655 
656     case VST_TSEL_EQDT:
657         /*
658          * Backing store pages for the END.
659          *
660          * If the table is direct, we can compute the number of PQ
661          * entries provisioned by FW (such as skiboot) and resize the
662          * END ESB window accordingly.
663          */
664         if (!(VSD_INDIRECT & vsd)) {
665             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
666                                    * (1ull << xsrc->esb_shift));
667         }
668         memory_region_add_subregion(&xive->end_edt_mmio, 0,
669                                     &end_xsrc->esb_mmio);
670         break;
671 
672     case VST_TSEL_SBE:
673         /*
674          * Backing store pages for the source PQ bits. The model does
675          * not use these PQ bits backed in RAM because the XiveSource
676          * model has its own.
677          *
678          * If the table is direct, we can compute the number of PQ
679          * entries provisioned by FW (such as skiboot) and resize the
680          * ESB window accordingly.
681          */
682         if (!(VSD_INDIRECT & vsd)) {
683             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
684                                    * (1ull << xsrc->esb_shift));
685         }
686         memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
687         break;
688 
689     case VST_TSEL_VPDT: /* Not modeled */
690     case VST_TSEL_IRQ:  /* Not modeled */
691         /*
692          * These tables contains the backing store pages for the
693          * interrupt fifos of the VC sub-engine in case of overflow.
694          */
695         break;
696 
697     default:
698         g_assert_not_reached();
699     }
700 }
701 
702 /*
703  * Both PC and VC sub-engines are configured as each use the Virtual
704  * Structure Tables : SBE, EAS, END and NVT.
705  */
706 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
707 {
708     uint8_t mode = GETFIELD(VSD_MODE, vsd);
709     uint8_t type = GETFIELD(VST_TABLE_SELECT,
710                             xive->regs[VC_VSD_TABLE_ADDR >> 3]);
711     uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
712                            xive->regs[VC_VSD_TABLE_ADDR >> 3]);
713     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
714 
715     if (type > VST_TSEL_IRQ) {
716         xive_error(xive, "VST: invalid table type %d", type);
717         return;
718     }
719 
720     if (blk >= vst_infos[type].max_blocks) {
721         xive_error(xive, "VST: invalid block id %d for"
722                       " %s table", blk, vst_infos[type].name);
723         return;
724     }
725 
726     /*
727      * Only take the VC sub-engine configuration into account because
728      * the XiveRouter model combines both VC and PC sub-engines
729      */
730     if (pc_engine) {
731         return;
732     }
733 
734     if (!vst_addr) {
735         xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
736         return;
737     }
738 
739     switch (mode) {
740     case VSD_MODE_FORWARD:
741         xive->vsds[type][blk] = vsd;
742         break;
743 
744     case VSD_MODE_EXCLUSIVE:
745         pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
746         break;
747 
748     default:
749         xive_error(xive, "VST: unsupported table mode %d", mode);
750         return;
751     }
752 }
753 
754 /*
755  * Interrupt controller MMIO region. The layout is compatible between
756  * 4K and 64K pages :
757  *
758  * Page 0           sub-engine BARs
759  *  0x000 - 0x3FF   IC registers
760  *  0x400 - 0x7FF   PC registers
761  *  0x800 - 0xFFF   VC registers
762  *
763  * Page 1           Notify page (writes only)
764  *  0x000 - 0x7FF   HW interrupt triggers (PSI, PHB)
765  *  0x800 - 0xFFF   forwards and syncs
766  *
767  * Page 2           LSI Trigger page (writes only) (not modeled)
768  * Page 3           LSI SB EOI page (reads only) (not modeled)
769  *
770  * Page 4-7         indirect TIMA
771  */
772 
773 /*
774  * IC - registers MMIO
775  */
776 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
777                                   uint64_t val, unsigned size)
778 {
779     PnvXive *xive = PNV_XIVE(opaque);
780     MemoryRegion *sysmem = get_system_memory();
781     uint32_t reg = offset >> 3;
782     bool is_chip0 = xive->chip->chip_id == 0;
783 
784     switch (offset) {
785 
786     /*
787      * XIVE CQ (PowerBus bridge) settings
788      */
789     case CQ_MSGSND:     /* msgsnd for doorbells */
790     case CQ_FIRMASK_OR: /* FIR error reporting */
791         break;
792     case CQ_PBI_CTL:
793         if (val & CQ_PBI_PC_64K) {
794             xive->pc_shift = 16;
795         }
796         if (val & CQ_PBI_VC_64K) {
797             xive->vc_shift = 16;
798         }
799         break;
800     case CQ_CFG_PB_GEN: /* PowerBus General Configuration */
801         /*
802          * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
803          */
804         break;
805 
806     /*
807      * XIVE Virtualization Controller settings
808      */
809     case VC_GLOBAL_CONFIG:
810         break;
811 
812     /*
813      * XIVE Presenter Controller settings
814      */
815     case PC_GLOBAL_CONFIG:
816         /*
817          * PC_GCONF_CHIPID_OVR
818          *   Overrides Int command Chip ID with the Chip ID field (DEBUG)
819          */
820         break;
821     case PC_TCTXT_CFG:
822         /*
823          * TODO: block group support
824          *
825          * PC_TCTXT_CFG_BLKGRP_EN
826          * PC_TCTXT_CFG_HARD_CHIPID_BLK :
827          *   Moves the chipid into block field for hardwired CAM compares.
828          *   Block offset value is adjusted to 0b0..01 & ThrdId
829          *
830          *   Will require changes in xive_presenter_tctx_match(). I am
831          *   not sure how to handle that yet.
832          */
833 
834         /* Overrides hardwired chip ID with the chip ID field */
835         if (val & PC_TCTXT_CHIPID_OVERRIDE) {
836             xive->tctx_chipid = GETFIELD(PC_TCTXT_CHIPID, val);
837         }
838         break;
839     case PC_TCTXT_TRACK:
840         /*
841          * PC_TCTXT_TRACK_EN:
842          *   enable block tracking and exchange of block ownership
843          *   information between Interrupt controllers
844          */
845         break;
846 
847     /*
848      * Misc settings
849      */
850     case VC_SBC_CONFIG: /* Store EOI configuration */
851         /*
852          * Configure store EOI if required by firwmare (skiboot has removed
853          * support recently though)
854          */
855         if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
856             xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
857         }
858         break;
859 
860     case VC_EQC_CONFIG: /* TODO: silent escalation */
861     case VC_AIB_TX_ORDER_TAG2: /* relax ordering */
862         break;
863 
864     /*
865      * XIVE BAR settings (XSCOM only)
866      */
867     case CQ_RST_CTL:
868         /* bit4: resets all BAR registers */
869         break;
870 
871     case CQ_IC_BAR: /* IC BAR. 8 pages */
872         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
873         if (!(val & CQ_IC_BAR_VALID)) {
874             xive->ic_base = 0;
875             if (xive->regs[reg] & CQ_IC_BAR_VALID) {
876                 memory_region_del_subregion(&xive->ic_mmio,
877                                             &xive->ic_reg_mmio);
878                 memory_region_del_subregion(&xive->ic_mmio,
879                                             &xive->ic_notify_mmio);
880                 memory_region_del_subregion(&xive->ic_mmio,
881                                             &xive->ic_lsi_mmio);
882                 memory_region_del_subregion(&xive->ic_mmio,
883                                             &xive->tm_indirect_mmio);
884 
885                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
886             }
887         } else {
888             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
889             if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
890                 memory_region_add_subregion(sysmem, xive->ic_base,
891                                             &xive->ic_mmio);
892 
893                 memory_region_add_subregion(&xive->ic_mmio,  0,
894                                             &xive->ic_reg_mmio);
895                 memory_region_add_subregion(&xive->ic_mmio,
896                                             1ul << xive->ic_shift,
897                                             &xive->ic_notify_mmio);
898                 memory_region_add_subregion(&xive->ic_mmio,
899                                             2ul << xive->ic_shift,
900                                             &xive->ic_lsi_mmio);
901                 memory_region_add_subregion(&xive->ic_mmio,
902                                             4ull << xive->ic_shift,
903                                             &xive->tm_indirect_mmio);
904             }
905         }
906         break;
907 
908     case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */
909     case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */
910         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
911         if (!(val & CQ_TM_BAR_VALID)) {
912             xive->tm_base = 0;
913             if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
914                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
915             }
916         } else {
917             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
918             if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
919                 memory_region_add_subregion(sysmem, xive->tm_base,
920                                             &xive->tm_mmio);
921             }
922         }
923         break;
924 
925     case CQ_PC_BARM:
926         xive->regs[reg] = val;
927         memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
928         break;
929     case CQ_PC_BAR: /* From 32M to 512G */
930         if (!(val & CQ_PC_BAR_VALID)) {
931             xive->pc_base = 0;
932             if (xive->regs[reg] & CQ_PC_BAR_VALID) {
933                 memory_region_del_subregion(sysmem, &xive->pc_mmio);
934             }
935         } else {
936             xive->pc_base = val & ~(CQ_PC_BAR_VALID);
937             if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
938                 memory_region_add_subregion(sysmem, xive->pc_base,
939                                             &xive->pc_mmio);
940             }
941         }
942         break;
943 
944     case CQ_VC_BARM:
945         xive->regs[reg] = val;
946         memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
947         break;
948     case CQ_VC_BAR: /* From 64M to 4TB */
949         if (!(val & CQ_VC_BAR_VALID)) {
950             xive->vc_base = 0;
951             if (xive->regs[reg] & CQ_VC_BAR_VALID) {
952                 memory_region_del_subregion(sysmem, &xive->vc_mmio);
953             }
954         } else {
955             xive->vc_base = val & ~(CQ_VC_BAR_VALID);
956             if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
957                 memory_region_add_subregion(sysmem, xive->vc_base,
958                                             &xive->vc_mmio);
959             }
960         }
961         break;
962 
963     /*
964      * XIVE Table settings.
965      */
966     case CQ_TAR: /* Table Address */
967         break;
968     case CQ_TDR: /* Table Data */
969         pnv_xive_table_set_data(xive, val);
970         break;
971 
972     /*
973      * XIVE VC & PC Virtual Structure Table settings
974      */
975     case VC_VSD_TABLE_ADDR:
976     case PC_VSD_TABLE_ADDR: /* Virtual table selector */
977         break;
978     case VC_VSD_TABLE_DATA: /* Virtual table setting */
979     case PC_VSD_TABLE_DATA:
980         pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
981         break;
982 
983     /*
984      * Interrupt fifo overflow in memory backing store (Not modeled)
985      */
986     case VC_IRQ_CONFIG_IPI:
987     case VC_IRQ_CONFIG_HW:
988     case VC_IRQ_CONFIG_CASCADE1:
989     case VC_IRQ_CONFIG_CASCADE2:
990     case VC_IRQ_CONFIG_REDIST:
991     case VC_IRQ_CONFIG_IPI_CASC:
992         break;
993 
994     /*
995      * XIVE hardware thread enablement
996      */
997     case PC_THREAD_EN_REG0: /* Physical Thread Enable */
998     case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */
999         break;
1000 
1001     case PC_THREAD_EN_REG0_SET:
1002         xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
1003         break;
1004     case PC_THREAD_EN_REG1_SET:
1005         xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
1006         break;
1007     case PC_THREAD_EN_REG0_CLR:
1008         xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
1009         break;
1010     case PC_THREAD_EN_REG1_CLR:
1011         xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
1012         break;
1013 
1014     /*
1015      * Indirect TIMA access set up. Defines the PIR of the HW thread
1016      * to use.
1017      */
1018     case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
1019         break;
1020 
1021     /*
1022      * XIVE PC & VC cache updates for EAS, NVT and END
1023      */
1024     case VC_IVC_SCRUB_MASK:
1025     case VC_IVC_SCRUB_TRIG:
1026         break;
1027 
1028     case VC_EQC_CWATCH_SPEC:
1029         val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
1030         break;
1031     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1032         break;
1033     case VC_EQC_CWATCH_DAT0:
1034         /* writing to DATA0 triggers the cache write */
1035         xive->regs[reg] = val;
1036         pnv_xive_end_update(xive);
1037         break;
1038     case VC_EQC_SCRUB_MASK:
1039     case VC_EQC_SCRUB_TRIG:
1040         /*
1041          * The scrubbing registers flush the cache in RAM and can also
1042          * invalidate.
1043          */
1044         break;
1045 
1046     case PC_VPC_CWATCH_SPEC:
1047         val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
1048         break;
1049     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1050         break;
1051     case PC_VPC_CWATCH_DAT0:
1052         /* writing to DATA0 triggers the cache write */
1053         xive->regs[reg] = val;
1054         pnv_xive_nvt_update(xive);
1055         break;
1056     case PC_VPC_SCRUB_MASK:
1057     case PC_VPC_SCRUB_TRIG:
1058         /*
1059          * The scrubbing registers flush the cache in RAM and can also
1060          * invalidate.
1061          */
1062         break;
1063 
1064 
1065     /*
1066      * XIVE PC & VC cache invalidation
1067      */
1068     case PC_AT_KILL:
1069         break;
1070     case VC_AT_MACRO_KILL:
1071         break;
1072     case PC_AT_KILL_MASK:
1073     case VC_AT_MACRO_KILL_MASK:
1074         break;
1075 
1076     default:
1077         xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
1078         return;
1079     }
1080 
1081     xive->regs[reg] = val;
1082 }
1083 
1084 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
1085 {
1086     PnvXive *xive = PNV_XIVE(opaque);
1087     uint64_t val = 0;
1088     uint32_t reg = offset >> 3;
1089 
1090     switch (offset) {
1091     case CQ_CFG_PB_GEN:
1092     case CQ_IC_BAR:
1093     case CQ_TM1_BAR:
1094     case CQ_TM2_BAR:
1095     case CQ_PC_BAR:
1096     case CQ_PC_BARM:
1097     case CQ_VC_BAR:
1098     case CQ_VC_BARM:
1099     case CQ_TAR:
1100     case CQ_TDR:
1101     case CQ_PBI_CTL:
1102 
1103     case PC_TCTXT_CFG:
1104     case PC_TCTXT_TRACK:
1105     case PC_TCTXT_INDIR0:
1106     case PC_TCTXT_INDIR1:
1107     case PC_TCTXT_INDIR2:
1108     case PC_TCTXT_INDIR3:
1109     case PC_GLOBAL_CONFIG:
1110 
1111     case PC_VPC_SCRUB_MASK:
1112 
1113     case VC_GLOBAL_CONFIG:
1114     case VC_AIB_TX_ORDER_TAG2:
1115 
1116     case VC_IRQ_CONFIG_IPI:
1117     case VC_IRQ_CONFIG_HW:
1118     case VC_IRQ_CONFIG_CASCADE1:
1119     case VC_IRQ_CONFIG_CASCADE2:
1120     case VC_IRQ_CONFIG_REDIST:
1121     case VC_IRQ_CONFIG_IPI_CASC:
1122 
1123     case VC_EQC_SCRUB_MASK:
1124     case VC_IVC_SCRUB_MASK:
1125     case VC_SBC_CONFIG:
1126     case VC_AT_MACRO_KILL_MASK:
1127     case VC_VSD_TABLE_ADDR:
1128     case PC_VSD_TABLE_ADDR:
1129     case VC_VSD_TABLE_DATA:
1130     case PC_VSD_TABLE_DATA:
1131     case PC_THREAD_EN_REG0:
1132     case PC_THREAD_EN_REG1:
1133         val = xive->regs[reg];
1134         break;
1135 
1136     /*
1137      * XIVE hardware thread enablement
1138      */
1139     case PC_THREAD_EN_REG0_SET:
1140     case PC_THREAD_EN_REG0_CLR:
1141         val = xive->regs[PC_THREAD_EN_REG0 >> 3];
1142         break;
1143     case PC_THREAD_EN_REG1_SET:
1144     case PC_THREAD_EN_REG1_CLR:
1145         val = xive->regs[PC_THREAD_EN_REG1 >> 3];
1146         break;
1147 
1148     case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */
1149         val = 0xffffff0000000000;
1150         break;
1151 
1152     /*
1153      * XIVE PC & VC cache updates for EAS, NVT and END
1154      */
1155     case VC_EQC_CWATCH_SPEC:
1156         xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
1157         val = xive->regs[reg];
1158         break;
1159     case VC_EQC_CWATCH_DAT0:
1160         /*
1161          * Load DATA registers from cache with data requested by the
1162          * SPEC register
1163          */
1164         pnv_xive_end_cache_load(xive);
1165         val = xive->regs[reg];
1166         break;
1167     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1168         val = xive->regs[reg];
1169         break;
1170 
1171     case PC_VPC_CWATCH_SPEC:
1172         xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
1173         val = xive->regs[reg];
1174         break;
1175     case PC_VPC_CWATCH_DAT0:
1176         /*
1177          * Load DATA registers from cache with data requested by the
1178          * SPEC register
1179          */
1180         pnv_xive_nvt_cache_load(xive);
1181         val = xive->regs[reg];
1182         break;
1183     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1184         val = xive->regs[reg];
1185         break;
1186 
1187     case PC_VPC_SCRUB_TRIG:
1188     case VC_IVC_SCRUB_TRIG:
1189     case VC_EQC_SCRUB_TRIG:
1190         xive->regs[reg] &= ~VC_SCRUB_VALID;
1191         val = xive->regs[reg];
1192         break;
1193 
1194     /*
1195      * XIVE PC & VC cache invalidation
1196      */
1197     case PC_AT_KILL:
1198         xive->regs[reg] &= ~PC_AT_KILL_VALID;
1199         val = xive->regs[reg];
1200         break;
1201     case VC_AT_MACRO_KILL:
1202         xive->regs[reg] &= ~VC_KILL_VALID;
1203         val = xive->regs[reg];
1204         break;
1205 
1206     /*
1207      * XIVE synchronisation
1208      */
1209     case VC_EQC_CONFIG:
1210         val = VC_EQC_SYNC_MASK;
1211         break;
1212 
1213     default:
1214         xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
1215     }
1216 
1217     return val;
1218 }
1219 
1220 static const MemoryRegionOps pnv_xive_ic_reg_ops = {
1221     .read = pnv_xive_ic_reg_read,
1222     .write = pnv_xive_ic_reg_write,
1223     .endianness = DEVICE_BIG_ENDIAN,
1224     .valid = {
1225         .min_access_size = 8,
1226         .max_access_size = 8,
1227     },
1228     .impl = {
1229         .min_access_size = 8,
1230         .max_access_size = 8,
1231     },
1232 };
1233 
1234 /*
1235  * IC - Notify MMIO port page (write only)
1236  */
1237 #define PNV_XIVE_FORWARD_IPI        0x800 /* Forward IPI */
1238 #define PNV_XIVE_FORWARD_HW         0x880 /* Forward HW */
1239 #define PNV_XIVE_FORWARD_OS_ESC     0x900 /* Forward OS escalation */
1240 #define PNV_XIVE_FORWARD_HW_ESC     0x980 /* Forward Hyp escalation */
1241 #define PNV_XIVE_FORWARD_REDIS      0xa00 /* Forward Redistribution */
1242 #define PNV_XIVE_RESERVED5          0xa80 /* Cache line 5 PowerBUS operation */
1243 #define PNV_XIVE_RESERVED6          0xb00 /* Cache line 6 PowerBUS operation */
1244 #define PNV_XIVE_RESERVED7          0xb80 /* Cache line 7 PowerBUS operation */
1245 
1246 /* VC synchronisation */
1247 #define PNV_XIVE_SYNC_IPI           0xc00 /* Sync IPI */
1248 #define PNV_XIVE_SYNC_HW            0xc80 /* Sync HW */
1249 #define PNV_XIVE_SYNC_OS_ESC        0xd00 /* Sync OS escalation */
1250 #define PNV_XIVE_SYNC_HW_ESC        0xd80 /* Sync Hyp escalation */
1251 #define PNV_XIVE_SYNC_REDIS         0xe00 /* Sync Redistribution */
1252 
1253 /* PC synchronisation */
1254 #define PNV_XIVE_SYNC_PULL          0xe80 /* Sync pull context */
1255 #define PNV_XIVE_SYNC_PUSH          0xf00 /* Sync push context */
1256 #define PNV_XIVE_SYNC_VPC           0xf80 /* Sync remove VPC store */
1257 
1258 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
1259 {
1260     uint8_t blk;
1261     uint32_t idx;
1262 
1263     if (val & XIVE_TRIGGER_END) {
1264         xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1265                    addr, val);
1266         return;
1267     }
1268 
1269     /*
1270      * Forward the source event notification directly to the Router.
1271      * The source interrupt number should already be correctly encoded
1272      * with the chip block id by the sending device (PHB, PSI).
1273      */
1274     blk = XIVE_EAS_BLOCK(val);
1275     idx = XIVE_EAS_INDEX(val);
1276 
1277     xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx));
1278 }
1279 
1280 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
1281                                      unsigned size)
1282 {
1283     PnvXive *xive = PNV_XIVE(opaque);
1284 
1285     /* VC: HW triggers */
1286     switch (addr) {
1287     case 0x000 ... 0x7FF:
1288         pnv_xive_ic_hw_trigger(opaque, addr, val);
1289         break;
1290 
1291     /* VC: Forwarded IRQs */
1292     case PNV_XIVE_FORWARD_IPI:
1293     case PNV_XIVE_FORWARD_HW:
1294     case PNV_XIVE_FORWARD_OS_ESC:
1295     case PNV_XIVE_FORWARD_HW_ESC:
1296     case PNV_XIVE_FORWARD_REDIS:
1297         /* TODO: forwarded IRQs. Should be like HW triggers */
1298         xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
1299                    addr, val);
1300         break;
1301 
1302     /* VC syncs */
1303     case PNV_XIVE_SYNC_IPI:
1304     case PNV_XIVE_SYNC_HW:
1305     case PNV_XIVE_SYNC_OS_ESC:
1306     case PNV_XIVE_SYNC_HW_ESC:
1307     case PNV_XIVE_SYNC_REDIS:
1308         break;
1309 
1310     /* PC syncs */
1311     case PNV_XIVE_SYNC_PULL:
1312     case PNV_XIVE_SYNC_PUSH:
1313     case PNV_XIVE_SYNC_VPC:
1314         break;
1315 
1316     default:
1317         xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
1318     }
1319 }
1320 
1321 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
1322                                         unsigned size)
1323 {
1324     PnvXive *xive = PNV_XIVE(opaque);
1325 
1326     /* loads are invalid */
1327     xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
1328     return -1;
1329 }
1330 
1331 static const MemoryRegionOps pnv_xive_ic_notify_ops = {
1332     .read = pnv_xive_ic_notify_read,
1333     .write = pnv_xive_ic_notify_write,
1334     .endianness = DEVICE_BIG_ENDIAN,
1335     .valid = {
1336         .min_access_size = 8,
1337         .max_access_size = 8,
1338     },
1339     .impl = {
1340         .min_access_size = 8,
1341         .max_access_size = 8,
1342     },
1343 };
1344 
1345 /*
1346  * IC - LSI MMIO handlers (not modeled)
1347  */
1348 
1349 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
1350                               uint64_t val, unsigned size)
1351 {
1352     PnvXive *xive = PNV_XIVE(opaque);
1353 
1354     xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
1355 }
1356 
1357 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
1358 {
1359     PnvXive *xive = PNV_XIVE(opaque);
1360 
1361     xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
1362     return -1;
1363 }
1364 
1365 static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
1366     .read = pnv_xive_ic_lsi_read,
1367     .write = pnv_xive_ic_lsi_write,
1368     .endianness = DEVICE_BIG_ENDIAN,
1369     .valid = {
1370         .min_access_size = 8,
1371         .max_access_size = 8,
1372     },
1373     .impl = {
1374         .min_access_size = 8,
1375         .max_access_size = 8,
1376     },
1377 };
1378 
1379 /*
1380  * IC - Indirect TIMA MMIO handlers
1381  */
1382 
1383 /*
1384  * When the TIMA is accessed from the indirect page, the thread id
1385  * (PIR) has to be configured in the IC registers before. This is used
1386  * for resets and for debug purpose also.
1387  */
1388 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
1389 {
1390     uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
1391     PowerPCCPU *cpu = NULL;
1392     int pir;
1393 
1394     if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
1395         xive_error(xive, "IC: no indirect TIMA access in progress");
1396         return NULL;
1397     }
1398 
1399     pir = GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir) & 0xff;
1400     cpu = ppc_get_vcpu_by_pir(pir);
1401     if (!cpu) {
1402         xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
1403         return NULL;
1404     }
1405 
1406     /* Check that HW thread is XIVE enabled */
1407     if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) {
1408         xive_error(xive, "IC: CPU %x is not enabled", pir);
1409     }
1410 
1411     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1412 }
1413 
1414 static void xive_tm_indirect_write(void *opaque, hwaddr offset,
1415                                    uint64_t value, unsigned size)
1416 {
1417     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1418 
1419     xive_tctx_tm_write(tctx, offset, value, size);
1420 }
1421 
1422 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
1423                                       unsigned size)
1424 {
1425     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1426 
1427     return xive_tctx_tm_read(tctx, offset, size);
1428 }
1429 
1430 static const MemoryRegionOps xive_tm_indirect_ops = {
1431     .read = xive_tm_indirect_read,
1432     .write = xive_tm_indirect_write,
1433     .endianness = DEVICE_BIG_ENDIAN,
1434     .valid = {
1435         .min_access_size = 1,
1436         .max_access_size = 8,
1437     },
1438     .impl = {
1439         .min_access_size = 1,
1440         .max_access_size = 8,
1441     },
1442 };
1443 
1444 /*
1445  * Interrupt controller XSCOM region.
1446  */
1447 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
1448 {
1449     switch (addr >> 3) {
1450     case X_VC_EQC_CONFIG:
1451         /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1452         return VC_EQC_SYNC_MASK;
1453     default:
1454         return pnv_xive_ic_reg_read(opaque, addr, size);
1455     }
1456 }
1457 
1458 static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
1459                                 uint64_t val, unsigned size)
1460 {
1461     pnv_xive_ic_reg_write(opaque, addr, val, size);
1462 }
1463 
1464 static const MemoryRegionOps pnv_xive_xscom_ops = {
1465     .read = pnv_xive_xscom_read,
1466     .write = pnv_xive_xscom_write,
1467     .endianness = DEVICE_BIG_ENDIAN,
1468     .valid = {
1469         .min_access_size = 8,
1470         .max_access_size = 8,
1471     },
1472     .impl = {
1473         .min_access_size = 8,
1474         .max_access_size = 8,
1475     }
1476 };
1477 
1478 /*
1479  * Virtualization Controller MMIO region containing the IPI and END ESB pages
1480  */
1481 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
1482                                  unsigned size)
1483 {
1484     PnvXive *xive = PNV_XIVE(opaque);
1485     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1486     uint64_t edt_type = 0;
1487     uint64_t edt_offset;
1488     MemTxResult result;
1489     AddressSpace *edt_as = NULL;
1490     uint64_t ret = -1;
1491 
1492     if (edt_index < XIVE_TABLE_EDT_MAX) {
1493         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1494     }
1495 
1496     switch (edt_type) {
1497     case CQ_TDR_EDT_IPI:
1498         edt_as = &xive->ipi_as;
1499         break;
1500     case CQ_TDR_EDT_EQ:
1501         edt_as = &xive->end_as;
1502         break;
1503     default:
1504         xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
1505         return -1;
1506     }
1507 
1508     /* Remap the offset for the targeted address space */
1509     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1510 
1511     ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
1512                             &result);
1513 
1514     if (result != MEMTX_OK) {
1515         xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
1516                    HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
1517                    offset, edt_offset);
1518         return -1;
1519     }
1520 
1521     return ret;
1522 }
1523 
1524 static void pnv_xive_vc_write(void *opaque, hwaddr offset,
1525                               uint64_t val, unsigned size)
1526 {
1527     PnvXive *xive = PNV_XIVE(opaque);
1528     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1529     uint64_t edt_type = 0;
1530     uint64_t edt_offset;
1531     MemTxResult result;
1532     AddressSpace *edt_as = NULL;
1533 
1534     if (edt_index < XIVE_TABLE_EDT_MAX) {
1535         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1536     }
1537 
1538     switch (edt_type) {
1539     case CQ_TDR_EDT_IPI:
1540         edt_as = &xive->ipi_as;
1541         break;
1542     case CQ_TDR_EDT_EQ:
1543         edt_as = &xive->end_as;
1544         break;
1545     default:
1546         xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
1547                    offset);
1548         return;
1549     }
1550 
1551     /* Remap the offset for the targeted address space */
1552     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1553 
1554     address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
1555     if (result != MEMTX_OK) {
1556         xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
1557     }
1558 }
1559 
1560 static const MemoryRegionOps pnv_xive_vc_ops = {
1561     .read = pnv_xive_vc_read,
1562     .write = pnv_xive_vc_write,
1563     .endianness = DEVICE_BIG_ENDIAN,
1564     .valid = {
1565         .min_access_size = 8,
1566         .max_access_size = 8,
1567     },
1568     .impl = {
1569         .min_access_size = 8,
1570         .max_access_size = 8,
1571     },
1572 };
1573 
1574 /*
1575  * Presenter Controller MMIO region. The Virtualization Controller
1576  * updates the IPB in the NVT table when required. Not modeled.
1577  */
1578 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr,
1579                                  unsigned size)
1580 {
1581     PnvXive *xive = PNV_XIVE(opaque);
1582 
1583     xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr);
1584     return -1;
1585 }
1586 
1587 static void pnv_xive_pc_write(void *opaque, hwaddr addr,
1588                               uint64_t value, unsigned size)
1589 {
1590     PnvXive *xive = PNV_XIVE(opaque);
1591 
1592     xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
1593 }
1594 
1595 static const MemoryRegionOps pnv_xive_pc_ops = {
1596     .read = pnv_xive_pc_read,
1597     .write = pnv_xive_pc_write,
1598     .endianness = DEVICE_BIG_ENDIAN,
1599     .valid = {
1600         .min_access_size = 8,
1601         .max_access_size = 8,
1602     },
1603     .impl = {
1604         .min_access_size = 8,
1605         .max_access_size = 8,
1606     },
1607 };
1608 
1609 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
1610 {
1611     XiveRouter *xrtr = XIVE_ROUTER(xive);
1612     uint8_t blk = xive->chip->chip_id;
1613     uint32_t srcno0 = XIVE_EAS(blk, 0);
1614     uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk);
1615     XiveEAS eas;
1616     XiveEND end;
1617     int i;
1618 
1619     monitor_printf(mon, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0,
1620                    srcno0 + nr_ipis - 1);
1621     xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
1622 
1623     monitor_printf(mon, "XIVE[%x] EAT %08x .. %08x\n", blk, srcno0,
1624                    srcno0 + nr_ipis - 1);
1625     for (i = 0; i < nr_ipis; i++) {
1626         if (xive_router_get_eas(xrtr, blk, i, &eas)) {
1627             break;
1628         }
1629         if (!xive_eas_is_masked(&eas)) {
1630             xive_eas_pic_print_info(&eas, i, mon);
1631         }
1632     }
1633 
1634     monitor_printf(mon, "XIVE[%x] ENDT\n", blk);
1635     i = 0;
1636     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1637         xive_end_pic_print_info(&end, i++, mon);
1638     }
1639 
1640     monitor_printf(mon, "XIVE[%x] END Escalation EAT\n", blk);
1641     i = 0;
1642     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1643         xive_end_eas_pic_print_info(&end, i++, mon);
1644     }
1645 }
1646 
1647 static void pnv_xive_reset(void *dev)
1648 {
1649     PnvXive *xive = PNV_XIVE(dev);
1650     XiveSource *xsrc = &xive->ipi_source;
1651     XiveENDSource *end_xsrc = &xive->end_source;
1652 
1653     /*
1654      * Use the PnvChip id to identify the XIVE interrupt controller.
1655      * It can be overriden by configuration at runtime.
1656      */
1657     xive->tctx_chipid = xive->chip->chip_id;
1658 
1659     /* Default page size (Should be changed at runtime to 64k) */
1660     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1661 
1662     /* Clear subregions */
1663     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1664         memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
1665     }
1666 
1667     if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
1668         memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
1669     }
1670 
1671     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1672         memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
1673     }
1674 
1675     if (memory_region_is_mapped(&xive->end_edt_mmio)) {
1676         memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
1677     }
1678 }
1679 
1680 static void pnv_xive_init(Object *obj)
1681 {
1682     PnvXive *xive = PNV_XIVE(obj);
1683 
1684     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1685                             sizeof(xive->ipi_source), TYPE_XIVE_SOURCE,
1686                             &error_abort, NULL);
1687     object_initialize_child(obj, "end_source", &xive->end_source,
1688                             sizeof(xive->end_source), TYPE_XIVE_END_SOURCE,
1689                             &error_abort, NULL);
1690 }
1691 
1692 /*
1693  *  Maximum number of IRQs and ENDs supported by HW
1694  */
1695 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1696 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1697 
1698 static void pnv_xive_realize(DeviceState *dev, Error **errp)
1699 {
1700     PnvXive *xive = PNV_XIVE(dev);
1701     XiveSource *xsrc = &xive->ipi_source;
1702     XiveENDSource *end_xsrc = &xive->end_source;
1703     Error *local_err = NULL;
1704 
1705     assert(xive->chip);
1706 
1707     /*
1708      * The XiveSource and XiveENDSource objects are realized with the
1709      * maximum allowed HW configuration. The ESB MMIO regions will be
1710      * resized dynamically when the controller is configured by the FW
1711      * to limit accesses to resources not provisioned.
1712      */
1713     object_property_set_int(OBJECT(xsrc), PNV_XIVE_NR_IRQS, "nr-irqs",
1714                             &error_fatal);
1715     object_property_set_link(OBJECT(xsrc), OBJECT(xive), "xive",
1716                              &error_abort);
1717     object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err);
1718     if (local_err) {
1719         error_propagate(errp, local_err);
1720         return;
1721     }
1722 
1723     object_property_set_int(OBJECT(end_xsrc), PNV_XIVE_NR_ENDS, "nr-ends",
1724                             &error_fatal);
1725     object_property_set_link(OBJECT(end_xsrc), OBJECT(xive), "xive",
1726                              &error_abort);
1727     object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err);
1728     if (local_err) {
1729         error_propagate(errp, local_err);
1730         return;
1731     }
1732 
1733     /* Default page size. Generally changed at runtime to 64k */
1734     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1735 
1736     /* XSCOM region, used for initial configuration of the BARs */
1737     memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
1738                           xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
1739 
1740     /* Interrupt controller MMIO regions */
1741     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1742                        PNV9_XIVE_IC_SIZE);
1743 
1744     memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
1745                           xive, "xive-ic-reg", 1 << xive->ic_shift);
1746     memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
1747                           &pnv_xive_ic_notify_ops,
1748                           xive, "xive-ic-notify", 1 << xive->ic_shift);
1749 
1750     /* The Pervasive LSI trigger and EOI pages (not modeled) */
1751     memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
1752                           xive, "xive-ic-lsi", 2 << xive->ic_shift);
1753 
1754     /* Thread Interrupt Management Area (Indirect) */
1755     memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
1756                           &xive_tm_indirect_ops,
1757                           xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
1758     /*
1759      * Overall Virtualization Controller MMIO region containing the
1760      * IPI ESB pages and END ESB pages. The layout is defined by the
1761      * EDT "Domain table" and the accesses are dispatched using
1762      * address spaces for each.
1763      */
1764     memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
1765                           "xive-vc", PNV9_XIVE_VC_SIZE);
1766 
1767     memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
1768                        PNV9_XIVE_VC_SIZE);
1769     address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
1770     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
1771                        PNV9_XIVE_VC_SIZE);
1772     address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
1773 
1774     /*
1775      * The MMIO windows exposing the IPI ESBs and the END ESBs in the
1776      * VC region. Their size is configured by the FW in the EDT table.
1777      */
1778     memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
1779     memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
1780 
1781     /* Presenter Controller MMIO region (not modeled) */
1782     memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
1783                           "xive-pc", PNV9_XIVE_PC_SIZE);
1784 
1785     /* Thread Interrupt Management Area (Direct) */
1786     memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops,
1787                           xive, "xive-tima", PNV9_XIVE_TM_SIZE);
1788 
1789     qemu_register_reset(pnv_xive_reset, dev);
1790 }
1791 
1792 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
1793                              int xscom_offset)
1794 {
1795     const char compat[] = "ibm,power9-xive-x";
1796     char *name;
1797     int offset;
1798     uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
1799     uint32_t reg[] = {
1800         cpu_to_be32(lpc_pcba),
1801         cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
1802     };
1803 
1804     name = g_strdup_printf("xive@%x", lpc_pcba);
1805     offset = fdt_add_subnode(fdt, xscom_offset, name);
1806     _FDT(offset);
1807     g_free(name);
1808 
1809     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
1810     _FDT((fdt_setprop(fdt, offset, "compatible", compat,
1811                       sizeof(compat))));
1812     return 0;
1813 }
1814 
1815 static Property pnv_xive_properties[] = {
1816     DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
1817     DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
1818     DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
1819     DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
1820     /* The PnvChip id identifies the XIVE interrupt controller. */
1821     DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *),
1822     DEFINE_PROP_END_OF_LIST(),
1823 };
1824 
1825 static void pnv_xive_class_init(ObjectClass *klass, void *data)
1826 {
1827     DeviceClass *dc = DEVICE_CLASS(klass);
1828     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
1829     XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
1830     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1831     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
1832 
1833     xdc->dt_xscom = pnv_xive_dt_xscom;
1834 
1835     dc->desc = "PowerNV XIVE Interrupt Controller";
1836     dc->realize = pnv_xive_realize;
1837     dc->props = pnv_xive_properties;
1838 
1839     xrc->get_eas = pnv_xive_get_eas;
1840     xrc->get_end = pnv_xive_get_end;
1841     xrc->write_end = pnv_xive_write_end;
1842     xrc->get_nvt = pnv_xive_get_nvt;
1843     xrc->write_nvt = pnv_xive_write_nvt;
1844     xrc->get_tctx = pnv_xive_get_tctx;
1845 
1846     xnc->notify = pnv_xive_notify;
1847     xpc->match_nvt  = pnv_xive_match_nvt;
1848 };
1849 
1850 static const TypeInfo pnv_xive_info = {
1851     .name          = TYPE_PNV_XIVE,
1852     .parent        = TYPE_XIVE_ROUTER,
1853     .instance_init = pnv_xive_init,
1854     .instance_size = sizeof(PnvXive),
1855     .class_init    = pnv_xive_class_init,
1856     .interfaces    = (InterfaceInfo[]) {
1857         { TYPE_PNV_XSCOM_INTERFACE },
1858         { }
1859     }
1860 };
1861 
1862 static void pnv_xive_register_types(void)
1863 {
1864     type_register_static(&pnv_xive_info);
1865 }
1866 
1867 type_init(pnv_xive_register_types)
1868