xref: /openbmc/qemu/hw/intc/pnv_xive.c (revision 2a886794)
1 /*
2  * QEMU PowerPC XIVE interrupt controller model
3  *
4  * Copyright (c) 2017-2019, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/fdt.h"
20 #include "hw/ppc/pnv.h"
21 #include "hw/ppc/pnv_core.h"
22 #include "hw/ppc/pnv_xscom.h"
23 #include "hw/ppc/pnv_xive.h"
24 #include "hw/ppc/xive_regs.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/ppc/ppc.h"
27 
28 #include <libfdt.h>
29 
30 #include "pnv_xive_regs.h"
31 
32 #undef XIVE_DEBUG
33 
34 /*
35  * Virtual structures table (VST)
36  */
37 #define SBE_PER_BYTE   4
38 
39 typedef struct XiveVstInfo {
40     const char *name;
41     uint32_t    size;
42     uint32_t    max_blocks;
43 } XiveVstInfo;
44 
45 static const XiveVstInfo vst_infos[] = {
46     [VST_TSEL_IVT]  = { "EAT",  sizeof(XiveEAS), 16 },
47     [VST_TSEL_SBE]  = { "SBE",  1,               16 },
48     [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
49     [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
50 
51     /*
52      *  Interrupt fifo backing store table (not modeled) :
53      *
54      * 0 - IPI,
55      * 1 - HWD,
56      * 2 - First escalate,
57      * 3 - Second escalate,
58      * 4 - Redistribution,
59      * 5 - IPI cascaded queue ?
60      */
61     [VST_TSEL_IRQ]  = { "IRQ",  1,               6  },
62 };
63 
64 #define xive_error(xive, fmt, ...)                                      \
65     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
66                   (xive)->chip->chip_id, ## __VA_ARGS__);
67 
68 /*
69  * QEMU version of the GETFIELD/SETFIELD macros
70  *
71  * TODO: It might be better to use the existing extract64() and
72  * deposit64() but this means that all the register definitions will
73  * change and become incompatible with the ones found in skiboot.
74  *
75  * Keep it as it is for now until we find a common ground.
76  */
77 static inline uint64_t GETFIELD(uint64_t mask, uint64_t word)
78 {
79     return (word & mask) >> ctz64(mask);
80 }
81 
82 static inline uint64_t SETFIELD(uint64_t mask, uint64_t word,
83                                 uint64_t value)
84 {
85     return (word & ~mask) | ((value << ctz64(mask)) & mask);
86 }
87 
88 /*
89  * Remote access to controllers. HW uses MMIOs. For now, a simple scan
90  * of the chips is good enough.
91  *
92  * TODO: Block scope support
93  */
94 static PnvXive *pnv_xive_get_ic(uint8_t blk)
95 {
96     PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
97     int i;
98 
99     for (i = 0; i < pnv->num_chips; i++) {
100         Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]);
101         PnvXive *xive = &chip9->xive;
102 
103         if (xive->chip->chip_id == blk) {
104             return xive;
105         }
106     }
107     return NULL;
108 }
109 
110 /*
111  * VST accessors for SBE, EAT, ENDT, NVT
112  *
113  * Indirect VST tables are arrays of VSDs pointing to a page (of same
114  * size). Each page is a direct VST table.
115  */
116 
117 #define XIVE_VSD_SIZE 8
118 
119 /* Indirect page size can be 4K, 64K, 2M, 16M. */
120 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
121 {
122      return page_shift == 12 || page_shift == 16 ||
123          page_shift == 21 || page_shift == 24;
124 }
125 
126 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
127                                          uint64_t vsd, uint32_t idx)
128 {
129     const XiveVstInfo *info = &vst_infos[type];
130     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
131     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
132     uint32_t idx_max;
133 
134     idx_max = vst_tsize / info->size - 1;
135     if (idx > idx_max) {
136 #ifdef XIVE_DEBUG
137         xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
138                    info->name, idx, idx_max);
139 #endif
140         return 0;
141     }
142 
143     return vst_addr + idx * info->size;
144 }
145 
146 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
147                                            uint64_t vsd, uint32_t idx)
148 {
149     const XiveVstInfo *info = &vst_infos[type];
150     uint64_t vsd_addr;
151     uint32_t vsd_idx;
152     uint32_t page_shift;
153     uint32_t vst_per_page;
154 
155     /* Get the page size of the indirect table. */
156     vsd_addr = vsd & VSD_ADDRESS_MASK;
157     vsd = ldq_be_dma(&address_space_memory, vsd_addr);
158 
159     if (!(vsd & VSD_ADDRESS_MASK)) {
160 #ifdef XIVE_DEBUG
161         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
162 #endif
163         return 0;
164     }
165 
166     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
167 
168     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
169         xive_error(xive, "VST: invalid %s page shift %d", info->name,
170                    page_shift);
171         return 0;
172     }
173 
174     vst_per_page = (1ull << page_shift) / info->size;
175     vsd_idx = idx / vst_per_page;
176 
177     /* Load the VSD we are looking for, if not already done */
178     if (vsd_idx) {
179         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
180         vsd = ldq_be_dma(&address_space_memory, vsd_addr);
181 
182         if (!(vsd & VSD_ADDRESS_MASK)) {
183 #ifdef XIVE_DEBUG
184             xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
185 #endif
186             return 0;
187         }
188 
189         /*
190          * Check that the pages have a consistent size across the
191          * indirect table
192          */
193         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
194             xive_error(xive, "VST: %s entry %x indirect page size differ !?",
195                        info->name, idx);
196             return 0;
197         }
198     }
199 
200     return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
201 }
202 
203 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
204                                   uint32_t idx)
205 {
206     const XiveVstInfo *info = &vst_infos[type];
207     uint64_t vsd;
208 
209     if (blk >= info->max_blocks) {
210         xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
211                    blk, info->name, idx);
212         return 0;
213     }
214 
215     vsd = xive->vsds[type][blk];
216 
217     /* Remote VST access */
218     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
219         xive = pnv_xive_get_ic(blk);
220 
221         return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0;
222     }
223 
224     if (VSD_INDIRECT & vsd) {
225         return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
226     }
227 
228     return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
229 }
230 
231 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
232                              uint32_t idx, void *data)
233 {
234     const XiveVstInfo *info = &vst_infos[type];
235     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
236 
237     if (!addr) {
238         return -1;
239     }
240 
241     cpu_physical_memory_read(addr, data, info->size);
242     return 0;
243 }
244 
245 #define XIVE_VST_WORD_ALL -1
246 
247 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
248                               uint32_t idx, void *data, uint32_t word_number)
249 {
250     const XiveVstInfo *info = &vst_infos[type];
251     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
252 
253     if (!addr) {
254         return -1;
255     }
256 
257     if (word_number == XIVE_VST_WORD_ALL) {
258         cpu_physical_memory_write(addr, data, info->size);
259     } else {
260         cpu_physical_memory_write(addr + word_number * 4,
261                                   data + word_number * 4, 4);
262     }
263     return 0;
264 }
265 
266 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
267                             XiveEND *end)
268 {
269     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
270 }
271 
272 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
273                               XiveEND *end, uint8_t word_number)
274 {
275     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
276                               word_number);
277 }
278 
279 static int pnv_xive_end_update(PnvXive *xive)
280 {
281     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
282                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
283     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
284                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
285     int i;
286     uint64_t eqc_watch[4];
287 
288     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
289         eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
290     }
291 
292     return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
293                               XIVE_VST_WORD_ALL);
294 }
295 
296 static void pnv_xive_end_cache_load(PnvXive *xive)
297 {
298     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
299                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
300     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
301                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
302     uint64_t eqc_watch[4] = { 0 };
303     int i;
304 
305     if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
306         xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
307     }
308 
309     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
310         xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
311     }
312 }
313 
314 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
315                             XiveNVT *nvt)
316 {
317     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
318 }
319 
320 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
321                               XiveNVT *nvt, uint8_t word_number)
322 {
323     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
324                               word_number);
325 }
326 
327 static int pnv_xive_nvt_update(PnvXive *xive)
328 {
329     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
330                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
331     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
332                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
333     int i;
334     uint64_t vpc_watch[8];
335 
336     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
337         vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
338     }
339 
340     return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
341                               XIVE_VST_WORD_ALL);
342 }
343 
344 static void pnv_xive_nvt_cache_load(PnvXive *xive)
345 {
346     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
347                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
348     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
349                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
350     uint64_t vpc_watch[8] = { 0 };
351     int i;
352 
353     if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
354         xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
355     }
356 
357     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
358         xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
359     }
360 }
361 
362 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
363                             XiveEAS *eas)
364 {
365     PnvXive *xive = PNV_XIVE(xrtr);
366 
367     if (pnv_xive_get_ic(blk) != xive) {
368         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
369         return -1;
370     }
371 
372     return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
373 }
374 
375 /*
376  * One bit per thread id. The first register PC_THREAD_EN_REG0 covers
377  * the first cores 0-15 (normal) of the chip or 0-7 (fused). The
378  * second register covers cores 16-23 (normal) or 8-11 (fused).
379  */
380 static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu)
381 {
382     int pir = ppc_cpu_pir(cpu);
383     uint32_t fc = PNV9_PIR2FUSEDCORE(pir);
384     uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1;
385     uint32_t bit = pir & 0x3f;
386 
387     return xive->regs[reg >> 3] & PPC_BIT(bit);
388 }
389 
390 static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
391                               uint8_t nvt_blk, uint32_t nvt_idx,
392                               bool cam_ignore, uint8_t priority,
393                               uint32_t logic_serv, XiveTCTXMatch *match)
394 {
395     PnvXive *xive = PNV_XIVE(xptr);
396     PnvChip *chip = xive->chip;
397     int count = 0;
398     int i, j;
399 
400     for (i = 0; i < chip->nr_cores; i++) {
401         PnvCore *pc = chip->cores[i];
402         CPUCore *cc = CPU_CORE(pc);
403 
404         for (j = 0; j < cc->nr_threads; j++) {
405             PowerPCCPU *cpu = pc->threads[j];
406             XiveTCTX *tctx;
407             int ring;
408 
409             if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
410                 continue;
411             }
412 
413             tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
414 
415             /*
416              * Check the thread context CAM lines and record matches.
417              */
418             ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
419                                              nvt_idx, cam_ignore, logic_serv);
420             /*
421              * Save the context and follow on to catch duplicates, that we
422              * don't support yet.
423              */
424             if (ring != -1) {
425                 if (match->tctx) {
426                     qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
427                                   "thread context NVT %x/%x\n",
428                                   nvt_blk, nvt_idx);
429                     return -1;
430                 }
431 
432                 match->ring = ring;
433                 match->tctx = tctx;
434                 count++;
435             }
436         }
437     }
438 
439     return count;
440 }
441 
442 static XiveTCTX *pnv_xive_get_tctx(XiveRouter *xrtr, CPUState *cs)
443 {
444     PowerPCCPU *cpu = POWERPC_CPU(cs);
445     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
446     PnvXive *xive = NULL;
447     CPUPPCState *env = &cpu->env;
448     int pir = env->spr_cb[SPR_PIR].default_value;
449 
450     /*
451      * Perform an extra check on the HW thread enablement.
452      *
453      * The TIMA is shared among the chips and to identify the chip
454      * from which the access is being done, we extract the chip id
455      * from the PIR.
456      */
457     xive = pnv_xive_get_ic((pir >> 8) & 0xf);
458     if (!xive) {
459         return NULL;
460     }
461 
462     if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) {
463         xive_error(PNV_XIVE(xrtr), "IC: CPU %x is not enabled", pir);
464     }
465 
466     return tctx;
467 }
468 
469 /*
470  * The internal sources (IPIs) of the interrupt controller have no
471  * knowledge of the XIVE chip on which they reside. Encode the block
472  * id in the source interrupt number before forwarding the source
473  * event notification to the Router. This is required on a multichip
474  * system.
475  */
476 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno)
477 {
478     PnvXive *xive = PNV_XIVE(xn);
479     uint8_t blk = xive->chip->chip_id;
480 
481     xive_router_notify(xn, XIVE_EAS(blk, srcno));
482 }
483 
484 /*
485  * XIVE helpers
486  */
487 
488 static uint64_t pnv_xive_vc_size(PnvXive *xive)
489 {
490     return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
491 }
492 
493 static uint64_t pnv_xive_edt_shift(PnvXive *xive)
494 {
495     return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
496 }
497 
498 static uint64_t pnv_xive_pc_size(PnvXive *xive)
499 {
500     return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
501 }
502 
503 static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk)
504 {
505     uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk];
506     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
507 
508     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
509 }
510 
511 /*
512  * EDT Table
513  *
514  * The Virtualization Controller MMIO region containing the IPI ESB
515  * pages and END ESB pages is sub-divided into "sets" which map
516  * portions of the VC region to the different ESB pages. It is
517  * configured at runtime through the EDT "Domain Table" to let the
518  * firmware decide how to split the VC address space between IPI ESB
519  * pages and END ESB pages.
520  */
521 
522 /*
523  * Computes the overall size of the IPI or the END ESB pages
524  */
525 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
526 {
527     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
528     uint64_t size = 0;
529     int i;
530 
531     for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
532         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
533 
534         if (edt_type == type) {
535             size += edt_size;
536         }
537     }
538 
539     return size;
540 }
541 
542 /*
543  * Maps an offset of the VC region in the IPI or END region using the
544  * layout defined by the EDT "Domaine Table"
545  */
546 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
547                                               uint64_t type)
548 {
549     int i;
550     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
551     uint64_t edt_offset = vc_offset;
552 
553     for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
554         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
555 
556         if (edt_type != type) {
557             edt_offset -= edt_size;
558         }
559     }
560 
561     return edt_offset;
562 }
563 
564 static void pnv_xive_edt_resize(PnvXive *xive)
565 {
566     uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
567     uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
568 
569     memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
570     memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
571 
572     memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
573     memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
574 }
575 
576 /*
577  * XIVE Table configuration. Only EDT is supported.
578  */
579 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
580 {
581     uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
582     uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
583     uint64_t *xive_table;
584     uint8_t max_index;
585 
586     switch (tsel) {
587     case CQ_TAR_TSEL_BLK:
588         max_index = ARRAY_SIZE(xive->blk);
589         xive_table = xive->blk;
590         break;
591     case CQ_TAR_TSEL_MIG:
592         max_index = ARRAY_SIZE(xive->mig);
593         xive_table = xive->mig;
594         break;
595     case CQ_TAR_TSEL_EDT:
596         max_index = ARRAY_SIZE(xive->edt);
597         xive_table = xive->edt;
598         break;
599     case CQ_TAR_TSEL_VDT:
600         max_index = ARRAY_SIZE(xive->vdt);
601         xive_table = xive->vdt;
602         break;
603     default:
604         xive_error(xive, "IC: invalid table %d", (int) tsel);
605         return -1;
606     }
607 
608     if (tsel_index >= max_index) {
609         xive_error(xive, "IC: invalid index %d", (int) tsel_index);
610         return -1;
611     }
612 
613     xive_table[tsel_index] = val;
614 
615     if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
616         xive->regs[CQ_TAR >> 3] =
617             SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
618     }
619 
620     /*
621      * EDT configuration is complete. Resize the MMIO windows exposing
622      * the IPI and the END ESBs in the VC region.
623      */
624     if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
625         pnv_xive_edt_resize(xive);
626     }
627 
628     return 0;
629 }
630 
631 /*
632  * Virtual Structure Tables (VST) configuration
633  */
634 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
635                                        uint8_t blk, uint64_t vsd)
636 {
637     XiveENDSource *end_xsrc = &xive->end_source;
638     XiveSource *xsrc = &xive->ipi_source;
639     const XiveVstInfo *info = &vst_infos[type];
640     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
641     uint64_t vst_tsize = 1ull << page_shift;
642     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
643 
644     /* Basic checks */
645 
646     if (VSD_INDIRECT & vsd) {
647         if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
648             xive_error(xive, "VST: %s indirect tables are not enabled",
649                        info->name);
650             return;
651         }
652 
653         if (!pnv_xive_vst_page_size_allowed(page_shift)) {
654             xive_error(xive, "VST: invalid %s page shift %d", info->name,
655                        page_shift);
656             return;
657         }
658     }
659 
660     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
661         xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
662                    " page shift %d", info->name, vst_addr, page_shift);
663         return;
664     }
665 
666     /* Record the table configuration (in SRAM on HW) */
667     xive->vsds[type][blk] = vsd;
668 
669     /* Now tune the models with the configuration provided by the FW */
670 
671     switch (type) {
672     case VST_TSEL_IVT:  /* Nothing to be done */
673         break;
674 
675     case VST_TSEL_EQDT:
676         /*
677          * Backing store pages for the END.
678          *
679          * If the table is direct, we can compute the number of PQ
680          * entries provisioned by FW (such as skiboot) and resize the
681          * END ESB window accordingly.
682          */
683         if (!(VSD_INDIRECT & vsd)) {
684             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
685                                    * (1ull << xsrc->esb_shift));
686         }
687         memory_region_add_subregion(&xive->end_edt_mmio, 0,
688                                     &end_xsrc->esb_mmio);
689         break;
690 
691     case VST_TSEL_SBE:
692         /*
693          * Backing store pages for the source PQ bits. The model does
694          * not use these PQ bits backed in RAM because the XiveSource
695          * model has its own.
696          *
697          * If the table is direct, we can compute the number of PQ
698          * entries provisioned by FW (such as skiboot) and resize the
699          * ESB window accordingly.
700          */
701         if (!(VSD_INDIRECT & vsd)) {
702             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
703                                    * (1ull << xsrc->esb_shift));
704         }
705         memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
706         break;
707 
708     case VST_TSEL_VPDT: /* Not modeled */
709     case VST_TSEL_IRQ:  /* Not modeled */
710         /*
711          * These tables contains the backing store pages for the
712          * interrupt fifos of the VC sub-engine in case of overflow.
713          */
714         break;
715 
716     default:
717         g_assert_not_reached();
718     }
719 }
720 
721 /*
722  * Both PC and VC sub-engines are configured as each use the Virtual
723  * Structure Tables : SBE, EAS, END and NVT.
724  */
725 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
726 {
727     uint8_t mode = GETFIELD(VSD_MODE, vsd);
728     uint8_t type = GETFIELD(VST_TABLE_SELECT,
729                             xive->regs[VC_VSD_TABLE_ADDR >> 3]);
730     uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
731                            xive->regs[VC_VSD_TABLE_ADDR >> 3]);
732     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
733 
734     if (type > VST_TSEL_IRQ) {
735         xive_error(xive, "VST: invalid table type %d", type);
736         return;
737     }
738 
739     if (blk >= vst_infos[type].max_blocks) {
740         xive_error(xive, "VST: invalid block id %d for"
741                       " %s table", blk, vst_infos[type].name);
742         return;
743     }
744 
745     /*
746      * Only take the VC sub-engine configuration into account because
747      * the XiveRouter model combines both VC and PC sub-engines
748      */
749     if (pc_engine) {
750         return;
751     }
752 
753     if (!vst_addr) {
754         xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
755         return;
756     }
757 
758     switch (mode) {
759     case VSD_MODE_FORWARD:
760         xive->vsds[type][blk] = vsd;
761         break;
762 
763     case VSD_MODE_EXCLUSIVE:
764         pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
765         break;
766 
767     default:
768         xive_error(xive, "VST: unsupported table mode %d", mode);
769         return;
770     }
771 }
772 
773 /*
774  * Interrupt controller MMIO region. The layout is compatible between
775  * 4K and 64K pages :
776  *
777  * Page 0           sub-engine BARs
778  *  0x000 - 0x3FF   IC registers
779  *  0x400 - 0x7FF   PC registers
780  *  0x800 - 0xFFF   VC registers
781  *
782  * Page 1           Notify page (writes only)
783  *  0x000 - 0x7FF   HW interrupt triggers (PSI, PHB)
784  *  0x800 - 0xFFF   forwards and syncs
785  *
786  * Page 2           LSI Trigger page (writes only) (not modeled)
787  * Page 3           LSI SB EOI page (reads only) (not modeled)
788  *
789  * Page 4-7         indirect TIMA
790  */
791 
792 /*
793  * IC - registers MMIO
794  */
795 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
796                                   uint64_t val, unsigned size)
797 {
798     PnvXive *xive = PNV_XIVE(opaque);
799     MemoryRegion *sysmem = get_system_memory();
800     uint32_t reg = offset >> 3;
801     bool is_chip0 = xive->chip->chip_id == 0;
802 
803     switch (offset) {
804 
805     /*
806      * XIVE CQ (PowerBus bridge) settings
807      */
808     case CQ_MSGSND:     /* msgsnd for doorbells */
809     case CQ_FIRMASK_OR: /* FIR error reporting */
810         break;
811     case CQ_PBI_CTL:
812         if (val & CQ_PBI_PC_64K) {
813             xive->pc_shift = 16;
814         }
815         if (val & CQ_PBI_VC_64K) {
816             xive->vc_shift = 16;
817         }
818         break;
819     case CQ_CFG_PB_GEN: /* PowerBus General Configuration */
820         /*
821          * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
822          */
823         break;
824 
825     /*
826      * XIVE Virtualization Controller settings
827      */
828     case VC_GLOBAL_CONFIG:
829         break;
830 
831     /*
832      * XIVE Presenter Controller settings
833      */
834     case PC_GLOBAL_CONFIG:
835         /*
836          * PC_GCONF_CHIPID_OVR
837          *   Overrides Int command Chip ID with the Chip ID field (DEBUG)
838          */
839         break;
840     case PC_TCTXT_CFG:
841         /*
842          * TODO: block group support
843          *
844          * PC_TCTXT_CFG_BLKGRP_EN
845          * PC_TCTXT_CFG_HARD_CHIPID_BLK :
846          *   Moves the chipid into block field for hardwired CAM compares.
847          *   Block offset value is adjusted to 0b0..01 & ThrdId
848          *
849          *   Will require changes in xive_presenter_tctx_match(). I am
850          *   not sure how to handle that yet.
851          */
852 
853         /* Overrides hardwired chip ID with the chip ID field */
854         if (val & PC_TCTXT_CHIPID_OVERRIDE) {
855             xive->tctx_chipid = GETFIELD(PC_TCTXT_CHIPID, val);
856         }
857         break;
858     case PC_TCTXT_TRACK:
859         /*
860          * PC_TCTXT_TRACK_EN:
861          *   enable block tracking and exchange of block ownership
862          *   information between Interrupt controllers
863          */
864         break;
865 
866     /*
867      * Misc settings
868      */
869     case VC_SBC_CONFIG: /* Store EOI configuration */
870         /*
871          * Configure store EOI if required by firwmare (skiboot has removed
872          * support recently though)
873          */
874         if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
875             xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
876         }
877         break;
878 
879     case VC_EQC_CONFIG: /* TODO: silent escalation */
880     case VC_AIB_TX_ORDER_TAG2: /* relax ordering */
881         break;
882 
883     /*
884      * XIVE BAR settings (XSCOM only)
885      */
886     case CQ_RST_CTL:
887         /* bit4: resets all BAR registers */
888         break;
889 
890     case CQ_IC_BAR: /* IC BAR. 8 pages */
891         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
892         if (!(val & CQ_IC_BAR_VALID)) {
893             xive->ic_base = 0;
894             if (xive->regs[reg] & CQ_IC_BAR_VALID) {
895                 memory_region_del_subregion(&xive->ic_mmio,
896                                             &xive->ic_reg_mmio);
897                 memory_region_del_subregion(&xive->ic_mmio,
898                                             &xive->ic_notify_mmio);
899                 memory_region_del_subregion(&xive->ic_mmio,
900                                             &xive->ic_lsi_mmio);
901                 memory_region_del_subregion(&xive->ic_mmio,
902                                             &xive->tm_indirect_mmio);
903 
904                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
905             }
906         } else {
907             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
908             if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
909                 memory_region_add_subregion(sysmem, xive->ic_base,
910                                             &xive->ic_mmio);
911 
912                 memory_region_add_subregion(&xive->ic_mmio,  0,
913                                             &xive->ic_reg_mmio);
914                 memory_region_add_subregion(&xive->ic_mmio,
915                                             1ul << xive->ic_shift,
916                                             &xive->ic_notify_mmio);
917                 memory_region_add_subregion(&xive->ic_mmio,
918                                             2ul << xive->ic_shift,
919                                             &xive->ic_lsi_mmio);
920                 memory_region_add_subregion(&xive->ic_mmio,
921                                             4ull << xive->ic_shift,
922                                             &xive->tm_indirect_mmio);
923             }
924         }
925         break;
926 
927     case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */
928     case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */
929         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
930         if (!(val & CQ_TM_BAR_VALID)) {
931             xive->tm_base = 0;
932             if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
933                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
934             }
935         } else {
936             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
937             if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
938                 memory_region_add_subregion(sysmem, xive->tm_base,
939                                             &xive->tm_mmio);
940             }
941         }
942         break;
943 
944     case CQ_PC_BARM:
945         xive->regs[reg] = val;
946         memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
947         break;
948     case CQ_PC_BAR: /* From 32M to 512G */
949         if (!(val & CQ_PC_BAR_VALID)) {
950             xive->pc_base = 0;
951             if (xive->regs[reg] & CQ_PC_BAR_VALID) {
952                 memory_region_del_subregion(sysmem, &xive->pc_mmio);
953             }
954         } else {
955             xive->pc_base = val & ~(CQ_PC_BAR_VALID);
956             if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
957                 memory_region_add_subregion(sysmem, xive->pc_base,
958                                             &xive->pc_mmio);
959             }
960         }
961         break;
962 
963     case CQ_VC_BARM:
964         xive->regs[reg] = val;
965         memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
966         break;
967     case CQ_VC_BAR: /* From 64M to 4TB */
968         if (!(val & CQ_VC_BAR_VALID)) {
969             xive->vc_base = 0;
970             if (xive->regs[reg] & CQ_VC_BAR_VALID) {
971                 memory_region_del_subregion(sysmem, &xive->vc_mmio);
972             }
973         } else {
974             xive->vc_base = val & ~(CQ_VC_BAR_VALID);
975             if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
976                 memory_region_add_subregion(sysmem, xive->vc_base,
977                                             &xive->vc_mmio);
978             }
979         }
980         break;
981 
982     /*
983      * XIVE Table settings.
984      */
985     case CQ_TAR: /* Table Address */
986         break;
987     case CQ_TDR: /* Table Data */
988         pnv_xive_table_set_data(xive, val);
989         break;
990 
991     /*
992      * XIVE VC & PC Virtual Structure Table settings
993      */
994     case VC_VSD_TABLE_ADDR:
995     case PC_VSD_TABLE_ADDR: /* Virtual table selector */
996         break;
997     case VC_VSD_TABLE_DATA: /* Virtual table setting */
998     case PC_VSD_TABLE_DATA:
999         pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
1000         break;
1001 
1002     /*
1003      * Interrupt fifo overflow in memory backing store (Not modeled)
1004      */
1005     case VC_IRQ_CONFIG_IPI:
1006     case VC_IRQ_CONFIG_HW:
1007     case VC_IRQ_CONFIG_CASCADE1:
1008     case VC_IRQ_CONFIG_CASCADE2:
1009     case VC_IRQ_CONFIG_REDIST:
1010     case VC_IRQ_CONFIG_IPI_CASC:
1011         break;
1012 
1013     /*
1014      * XIVE hardware thread enablement
1015      */
1016     case PC_THREAD_EN_REG0: /* Physical Thread Enable */
1017     case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */
1018         break;
1019 
1020     case PC_THREAD_EN_REG0_SET:
1021         xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
1022         break;
1023     case PC_THREAD_EN_REG1_SET:
1024         xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
1025         break;
1026     case PC_THREAD_EN_REG0_CLR:
1027         xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
1028         break;
1029     case PC_THREAD_EN_REG1_CLR:
1030         xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
1031         break;
1032 
1033     /*
1034      * Indirect TIMA access set up. Defines the PIR of the HW thread
1035      * to use.
1036      */
1037     case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
1038         break;
1039 
1040     /*
1041      * XIVE PC & VC cache updates for EAS, NVT and END
1042      */
1043     case VC_IVC_SCRUB_MASK:
1044     case VC_IVC_SCRUB_TRIG:
1045         break;
1046 
1047     case VC_EQC_CWATCH_SPEC:
1048         val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
1049         break;
1050     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1051         break;
1052     case VC_EQC_CWATCH_DAT0:
1053         /* writing to DATA0 triggers the cache write */
1054         xive->regs[reg] = val;
1055         pnv_xive_end_update(xive);
1056         break;
1057     case VC_EQC_SCRUB_MASK:
1058     case VC_EQC_SCRUB_TRIG:
1059         /*
1060          * The scrubbing registers flush the cache in RAM and can also
1061          * invalidate.
1062          */
1063         break;
1064 
1065     case PC_VPC_CWATCH_SPEC:
1066         val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
1067         break;
1068     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1069         break;
1070     case PC_VPC_CWATCH_DAT0:
1071         /* writing to DATA0 triggers the cache write */
1072         xive->regs[reg] = val;
1073         pnv_xive_nvt_update(xive);
1074         break;
1075     case PC_VPC_SCRUB_MASK:
1076     case PC_VPC_SCRUB_TRIG:
1077         /*
1078          * The scrubbing registers flush the cache in RAM and can also
1079          * invalidate.
1080          */
1081         break;
1082 
1083 
1084     /*
1085      * XIVE PC & VC cache invalidation
1086      */
1087     case PC_AT_KILL:
1088         break;
1089     case VC_AT_MACRO_KILL:
1090         break;
1091     case PC_AT_KILL_MASK:
1092     case VC_AT_MACRO_KILL_MASK:
1093         break;
1094 
1095     default:
1096         xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
1097         return;
1098     }
1099 
1100     xive->regs[reg] = val;
1101 }
1102 
1103 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
1104 {
1105     PnvXive *xive = PNV_XIVE(opaque);
1106     uint64_t val = 0;
1107     uint32_t reg = offset >> 3;
1108 
1109     switch (offset) {
1110     case CQ_CFG_PB_GEN:
1111     case CQ_IC_BAR:
1112     case CQ_TM1_BAR:
1113     case CQ_TM2_BAR:
1114     case CQ_PC_BAR:
1115     case CQ_PC_BARM:
1116     case CQ_VC_BAR:
1117     case CQ_VC_BARM:
1118     case CQ_TAR:
1119     case CQ_TDR:
1120     case CQ_PBI_CTL:
1121 
1122     case PC_TCTXT_CFG:
1123     case PC_TCTXT_TRACK:
1124     case PC_TCTXT_INDIR0:
1125     case PC_TCTXT_INDIR1:
1126     case PC_TCTXT_INDIR2:
1127     case PC_TCTXT_INDIR3:
1128     case PC_GLOBAL_CONFIG:
1129 
1130     case PC_VPC_SCRUB_MASK:
1131 
1132     case VC_GLOBAL_CONFIG:
1133     case VC_AIB_TX_ORDER_TAG2:
1134 
1135     case VC_IRQ_CONFIG_IPI:
1136     case VC_IRQ_CONFIG_HW:
1137     case VC_IRQ_CONFIG_CASCADE1:
1138     case VC_IRQ_CONFIG_CASCADE2:
1139     case VC_IRQ_CONFIG_REDIST:
1140     case VC_IRQ_CONFIG_IPI_CASC:
1141 
1142     case VC_EQC_SCRUB_MASK:
1143     case VC_IVC_SCRUB_MASK:
1144     case VC_SBC_CONFIG:
1145     case VC_AT_MACRO_KILL_MASK:
1146     case VC_VSD_TABLE_ADDR:
1147     case PC_VSD_TABLE_ADDR:
1148     case VC_VSD_TABLE_DATA:
1149     case PC_VSD_TABLE_DATA:
1150     case PC_THREAD_EN_REG0:
1151     case PC_THREAD_EN_REG1:
1152         val = xive->regs[reg];
1153         break;
1154 
1155     /*
1156      * XIVE hardware thread enablement
1157      */
1158     case PC_THREAD_EN_REG0_SET:
1159     case PC_THREAD_EN_REG0_CLR:
1160         val = xive->regs[PC_THREAD_EN_REG0 >> 3];
1161         break;
1162     case PC_THREAD_EN_REG1_SET:
1163     case PC_THREAD_EN_REG1_CLR:
1164         val = xive->regs[PC_THREAD_EN_REG1 >> 3];
1165         break;
1166 
1167     case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */
1168         val = 0xffffff0000000000;
1169         break;
1170 
1171     /*
1172      * XIVE PC & VC cache updates for EAS, NVT and END
1173      */
1174     case VC_EQC_CWATCH_SPEC:
1175         xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
1176         val = xive->regs[reg];
1177         break;
1178     case VC_EQC_CWATCH_DAT0:
1179         /*
1180          * Load DATA registers from cache with data requested by the
1181          * SPEC register
1182          */
1183         pnv_xive_end_cache_load(xive);
1184         val = xive->regs[reg];
1185         break;
1186     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1187         val = xive->regs[reg];
1188         break;
1189 
1190     case PC_VPC_CWATCH_SPEC:
1191         xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
1192         val = xive->regs[reg];
1193         break;
1194     case PC_VPC_CWATCH_DAT0:
1195         /*
1196          * Load DATA registers from cache with data requested by the
1197          * SPEC register
1198          */
1199         pnv_xive_nvt_cache_load(xive);
1200         val = xive->regs[reg];
1201         break;
1202     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1203         val = xive->regs[reg];
1204         break;
1205 
1206     case PC_VPC_SCRUB_TRIG:
1207     case VC_IVC_SCRUB_TRIG:
1208     case VC_EQC_SCRUB_TRIG:
1209         xive->regs[reg] &= ~VC_SCRUB_VALID;
1210         val = xive->regs[reg];
1211         break;
1212 
1213     /*
1214      * XIVE PC & VC cache invalidation
1215      */
1216     case PC_AT_KILL:
1217         xive->regs[reg] &= ~PC_AT_KILL_VALID;
1218         val = xive->regs[reg];
1219         break;
1220     case VC_AT_MACRO_KILL:
1221         xive->regs[reg] &= ~VC_KILL_VALID;
1222         val = xive->regs[reg];
1223         break;
1224 
1225     /*
1226      * XIVE synchronisation
1227      */
1228     case VC_EQC_CONFIG:
1229         val = VC_EQC_SYNC_MASK;
1230         break;
1231 
1232     default:
1233         xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
1234     }
1235 
1236     return val;
1237 }
1238 
1239 static const MemoryRegionOps pnv_xive_ic_reg_ops = {
1240     .read = pnv_xive_ic_reg_read,
1241     .write = pnv_xive_ic_reg_write,
1242     .endianness = DEVICE_BIG_ENDIAN,
1243     .valid = {
1244         .min_access_size = 8,
1245         .max_access_size = 8,
1246     },
1247     .impl = {
1248         .min_access_size = 8,
1249         .max_access_size = 8,
1250     },
1251 };
1252 
1253 /*
1254  * IC - Notify MMIO port page (write only)
1255  */
1256 #define PNV_XIVE_FORWARD_IPI        0x800 /* Forward IPI */
1257 #define PNV_XIVE_FORWARD_HW         0x880 /* Forward HW */
1258 #define PNV_XIVE_FORWARD_OS_ESC     0x900 /* Forward OS escalation */
1259 #define PNV_XIVE_FORWARD_HW_ESC     0x980 /* Forward Hyp escalation */
1260 #define PNV_XIVE_FORWARD_REDIS      0xa00 /* Forward Redistribution */
1261 #define PNV_XIVE_RESERVED5          0xa80 /* Cache line 5 PowerBUS operation */
1262 #define PNV_XIVE_RESERVED6          0xb00 /* Cache line 6 PowerBUS operation */
1263 #define PNV_XIVE_RESERVED7          0xb80 /* Cache line 7 PowerBUS operation */
1264 
1265 /* VC synchronisation */
1266 #define PNV_XIVE_SYNC_IPI           0xc00 /* Sync IPI */
1267 #define PNV_XIVE_SYNC_HW            0xc80 /* Sync HW */
1268 #define PNV_XIVE_SYNC_OS_ESC        0xd00 /* Sync OS escalation */
1269 #define PNV_XIVE_SYNC_HW_ESC        0xd80 /* Sync Hyp escalation */
1270 #define PNV_XIVE_SYNC_REDIS         0xe00 /* Sync Redistribution */
1271 
1272 /* PC synchronisation */
1273 #define PNV_XIVE_SYNC_PULL          0xe80 /* Sync pull context */
1274 #define PNV_XIVE_SYNC_PUSH          0xf00 /* Sync push context */
1275 #define PNV_XIVE_SYNC_VPC           0xf80 /* Sync remove VPC store */
1276 
1277 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
1278 {
1279     uint8_t blk;
1280     uint32_t idx;
1281 
1282     if (val & XIVE_TRIGGER_END) {
1283         xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1284                    addr, val);
1285         return;
1286     }
1287 
1288     /*
1289      * Forward the source event notification directly to the Router.
1290      * The source interrupt number should already be correctly encoded
1291      * with the chip block id by the sending device (PHB, PSI).
1292      */
1293     blk = XIVE_EAS_BLOCK(val);
1294     idx = XIVE_EAS_INDEX(val);
1295 
1296     xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx));
1297 }
1298 
1299 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
1300                                      unsigned size)
1301 {
1302     PnvXive *xive = PNV_XIVE(opaque);
1303 
1304     /* VC: HW triggers */
1305     switch (addr) {
1306     case 0x000 ... 0x7FF:
1307         pnv_xive_ic_hw_trigger(opaque, addr, val);
1308         break;
1309 
1310     /* VC: Forwarded IRQs */
1311     case PNV_XIVE_FORWARD_IPI:
1312     case PNV_XIVE_FORWARD_HW:
1313     case PNV_XIVE_FORWARD_OS_ESC:
1314     case PNV_XIVE_FORWARD_HW_ESC:
1315     case PNV_XIVE_FORWARD_REDIS:
1316         /* TODO: forwarded IRQs. Should be like HW triggers */
1317         xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
1318                    addr, val);
1319         break;
1320 
1321     /* VC syncs */
1322     case PNV_XIVE_SYNC_IPI:
1323     case PNV_XIVE_SYNC_HW:
1324     case PNV_XIVE_SYNC_OS_ESC:
1325     case PNV_XIVE_SYNC_HW_ESC:
1326     case PNV_XIVE_SYNC_REDIS:
1327         break;
1328 
1329     /* PC syncs */
1330     case PNV_XIVE_SYNC_PULL:
1331     case PNV_XIVE_SYNC_PUSH:
1332     case PNV_XIVE_SYNC_VPC:
1333         break;
1334 
1335     default:
1336         xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
1337     }
1338 }
1339 
1340 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
1341                                         unsigned size)
1342 {
1343     PnvXive *xive = PNV_XIVE(opaque);
1344 
1345     /* loads are invalid */
1346     xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
1347     return -1;
1348 }
1349 
1350 static const MemoryRegionOps pnv_xive_ic_notify_ops = {
1351     .read = pnv_xive_ic_notify_read,
1352     .write = pnv_xive_ic_notify_write,
1353     .endianness = DEVICE_BIG_ENDIAN,
1354     .valid = {
1355         .min_access_size = 8,
1356         .max_access_size = 8,
1357     },
1358     .impl = {
1359         .min_access_size = 8,
1360         .max_access_size = 8,
1361     },
1362 };
1363 
1364 /*
1365  * IC - LSI MMIO handlers (not modeled)
1366  */
1367 
1368 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
1369                               uint64_t val, unsigned size)
1370 {
1371     PnvXive *xive = PNV_XIVE(opaque);
1372 
1373     xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
1374 }
1375 
1376 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
1377 {
1378     PnvXive *xive = PNV_XIVE(opaque);
1379 
1380     xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
1381     return -1;
1382 }
1383 
1384 static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
1385     .read = pnv_xive_ic_lsi_read,
1386     .write = pnv_xive_ic_lsi_write,
1387     .endianness = DEVICE_BIG_ENDIAN,
1388     .valid = {
1389         .min_access_size = 8,
1390         .max_access_size = 8,
1391     },
1392     .impl = {
1393         .min_access_size = 8,
1394         .max_access_size = 8,
1395     },
1396 };
1397 
1398 /*
1399  * IC - Indirect TIMA MMIO handlers
1400  */
1401 
1402 /*
1403  * When the TIMA is accessed from the indirect page, the thread id of
1404  * the target CPU is configured in the PC_TCTXT_INDIR0 register before
1405  * use. This is used for resets and for debug purpose also.
1406  */
1407 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
1408 {
1409     PnvChip *chip = xive->chip;
1410     uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
1411     PowerPCCPU *cpu = NULL;
1412     int pir;
1413 
1414     if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
1415         xive_error(xive, "IC: no indirect TIMA access in progress");
1416         return NULL;
1417     }
1418 
1419     pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir);
1420     cpu = pnv_chip_find_cpu(chip, pir);
1421     if (!cpu) {
1422         xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
1423         return NULL;
1424     }
1425 
1426     /* Check that HW thread is XIVE enabled */
1427     if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
1428         xive_error(xive, "IC: CPU %x is not enabled", pir);
1429     }
1430 
1431     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1432 }
1433 
1434 static void xive_tm_indirect_write(void *opaque, hwaddr offset,
1435                                    uint64_t value, unsigned size)
1436 {
1437     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1438 
1439     xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
1440 }
1441 
1442 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
1443                                       unsigned size)
1444 {
1445     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1446 
1447     return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
1448 }
1449 
1450 static const MemoryRegionOps xive_tm_indirect_ops = {
1451     .read = xive_tm_indirect_read,
1452     .write = xive_tm_indirect_write,
1453     .endianness = DEVICE_BIG_ENDIAN,
1454     .valid = {
1455         .min_access_size = 1,
1456         .max_access_size = 8,
1457     },
1458     .impl = {
1459         .min_access_size = 1,
1460         .max_access_size = 8,
1461     },
1462 };
1463 
1464 /*
1465  * Interrupt controller XSCOM region.
1466  */
1467 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
1468 {
1469     switch (addr >> 3) {
1470     case X_VC_EQC_CONFIG:
1471         /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1472         return VC_EQC_SYNC_MASK;
1473     default:
1474         return pnv_xive_ic_reg_read(opaque, addr, size);
1475     }
1476 }
1477 
1478 static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
1479                                 uint64_t val, unsigned size)
1480 {
1481     pnv_xive_ic_reg_write(opaque, addr, val, size);
1482 }
1483 
1484 static const MemoryRegionOps pnv_xive_xscom_ops = {
1485     .read = pnv_xive_xscom_read,
1486     .write = pnv_xive_xscom_write,
1487     .endianness = DEVICE_BIG_ENDIAN,
1488     .valid = {
1489         .min_access_size = 8,
1490         .max_access_size = 8,
1491     },
1492     .impl = {
1493         .min_access_size = 8,
1494         .max_access_size = 8,
1495     }
1496 };
1497 
1498 /*
1499  * Virtualization Controller MMIO region containing the IPI and END ESB pages
1500  */
1501 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
1502                                  unsigned size)
1503 {
1504     PnvXive *xive = PNV_XIVE(opaque);
1505     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1506     uint64_t edt_type = 0;
1507     uint64_t edt_offset;
1508     MemTxResult result;
1509     AddressSpace *edt_as = NULL;
1510     uint64_t ret = -1;
1511 
1512     if (edt_index < XIVE_TABLE_EDT_MAX) {
1513         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1514     }
1515 
1516     switch (edt_type) {
1517     case CQ_TDR_EDT_IPI:
1518         edt_as = &xive->ipi_as;
1519         break;
1520     case CQ_TDR_EDT_EQ:
1521         edt_as = &xive->end_as;
1522         break;
1523     default:
1524         xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
1525         return -1;
1526     }
1527 
1528     /* Remap the offset for the targeted address space */
1529     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1530 
1531     ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
1532                             &result);
1533 
1534     if (result != MEMTX_OK) {
1535         xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
1536                    HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
1537                    offset, edt_offset);
1538         return -1;
1539     }
1540 
1541     return ret;
1542 }
1543 
1544 static void pnv_xive_vc_write(void *opaque, hwaddr offset,
1545                               uint64_t val, unsigned size)
1546 {
1547     PnvXive *xive = PNV_XIVE(opaque);
1548     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1549     uint64_t edt_type = 0;
1550     uint64_t edt_offset;
1551     MemTxResult result;
1552     AddressSpace *edt_as = NULL;
1553 
1554     if (edt_index < XIVE_TABLE_EDT_MAX) {
1555         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1556     }
1557 
1558     switch (edt_type) {
1559     case CQ_TDR_EDT_IPI:
1560         edt_as = &xive->ipi_as;
1561         break;
1562     case CQ_TDR_EDT_EQ:
1563         edt_as = &xive->end_as;
1564         break;
1565     default:
1566         xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
1567                    offset);
1568         return;
1569     }
1570 
1571     /* Remap the offset for the targeted address space */
1572     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1573 
1574     address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
1575     if (result != MEMTX_OK) {
1576         xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
1577     }
1578 }
1579 
1580 static const MemoryRegionOps pnv_xive_vc_ops = {
1581     .read = pnv_xive_vc_read,
1582     .write = pnv_xive_vc_write,
1583     .endianness = DEVICE_BIG_ENDIAN,
1584     .valid = {
1585         .min_access_size = 8,
1586         .max_access_size = 8,
1587     },
1588     .impl = {
1589         .min_access_size = 8,
1590         .max_access_size = 8,
1591     },
1592 };
1593 
1594 /*
1595  * Presenter Controller MMIO region. The Virtualization Controller
1596  * updates the IPB in the NVT table when required. Not modeled.
1597  */
1598 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr,
1599                                  unsigned size)
1600 {
1601     PnvXive *xive = PNV_XIVE(opaque);
1602 
1603     xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr);
1604     return -1;
1605 }
1606 
1607 static void pnv_xive_pc_write(void *opaque, hwaddr addr,
1608                               uint64_t value, unsigned size)
1609 {
1610     PnvXive *xive = PNV_XIVE(opaque);
1611 
1612     xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
1613 }
1614 
1615 static const MemoryRegionOps pnv_xive_pc_ops = {
1616     .read = pnv_xive_pc_read,
1617     .write = pnv_xive_pc_write,
1618     .endianness = DEVICE_BIG_ENDIAN,
1619     .valid = {
1620         .min_access_size = 8,
1621         .max_access_size = 8,
1622     },
1623     .impl = {
1624         .min_access_size = 8,
1625         .max_access_size = 8,
1626     },
1627 };
1628 
1629 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
1630 {
1631     XiveRouter *xrtr = XIVE_ROUTER(xive);
1632     uint8_t blk = xive->chip->chip_id;
1633     uint32_t srcno0 = XIVE_EAS(blk, 0);
1634     uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk);
1635     XiveEAS eas;
1636     XiveEND end;
1637     int i;
1638 
1639     monitor_printf(mon, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0,
1640                    srcno0 + nr_ipis - 1);
1641     xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
1642 
1643     monitor_printf(mon, "XIVE[%x] EAT %08x .. %08x\n", blk, srcno0,
1644                    srcno0 + nr_ipis - 1);
1645     for (i = 0; i < nr_ipis; i++) {
1646         if (xive_router_get_eas(xrtr, blk, i, &eas)) {
1647             break;
1648         }
1649         if (!xive_eas_is_masked(&eas)) {
1650             xive_eas_pic_print_info(&eas, i, mon);
1651         }
1652     }
1653 
1654     monitor_printf(mon, "XIVE[%x] ENDT\n", blk);
1655     i = 0;
1656     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1657         xive_end_pic_print_info(&end, i++, mon);
1658     }
1659 
1660     monitor_printf(mon, "XIVE[%x] END Escalation EAT\n", blk);
1661     i = 0;
1662     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1663         xive_end_eas_pic_print_info(&end, i++, mon);
1664     }
1665 }
1666 
1667 static void pnv_xive_reset(void *dev)
1668 {
1669     PnvXive *xive = PNV_XIVE(dev);
1670     XiveSource *xsrc = &xive->ipi_source;
1671     XiveENDSource *end_xsrc = &xive->end_source;
1672 
1673     /*
1674      * Use the PnvChip id to identify the XIVE interrupt controller.
1675      * It can be overriden by configuration at runtime.
1676      */
1677     xive->tctx_chipid = xive->chip->chip_id;
1678 
1679     /* Default page size (Should be changed at runtime to 64k) */
1680     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1681 
1682     /* Clear subregions */
1683     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1684         memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
1685     }
1686 
1687     if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
1688         memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
1689     }
1690 
1691     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1692         memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
1693     }
1694 
1695     if (memory_region_is_mapped(&xive->end_edt_mmio)) {
1696         memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
1697     }
1698 }
1699 
1700 static void pnv_xive_init(Object *obj)
1701 {
1702     PnvXive *xive = PNV_XIVE(obj);
1703 
1704     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1705                             sizeof(xive->ipi_source), TYPE_XIVE_SOURCE,
1706                             &error_abort, NULL);
1707     object_initialize_child(obj, "end_source", &xive->end_source,
1708                             sizeof(xive->end_source), TYPE_XIVE_END_SOURCE,
1709                             &error_abort, NULL);
1710 }
1711 
1712 /*
1713  *  Maximum number of IRQs and ENDs supported by HW
1714  */
1715 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1716 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1717 
1718 static void pnv_xive_realize(DeviceState *dev, Error **errp)
1719 {
1720     PnvXive *xive = PNV_XIVE(dev);
1721     XiveSource *xsrc = &xive->ipi_source;
1722     XiveENDSource *end_xsrc = &xive->end_source;
1723     Error *local_err = NULL;
1724 
1725     assert(xive->chip);
1726 
1727     /*
1728      * The XiveSource and XiveENDSource objects are realized with the
1729      * maximum allowed HW configuration. The ESB MMIO regions will be
1730      * resized dynamically when the controller is configured by the FW
1731      * to limit accesses to resources not provisioned.
1732      */
1733     object_property_set_int(OBJECT(xsrc), PNV_XIVE_NR_IRQS, "nr-irqs",
1734                             &error_fatal);
1735     object_property_set_link(OBJECT(xsrc), OBJECT(xive), "xive",
1736                              &error_abort);
1737     object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err);
1738     if (local_err) {
1739         error_propagate(errp, local_err);
1740         return;
1741     }
1742 
1743     object_property_set_int(OBJECT(end_xsrc), PNV_XIVE_NR_ENDS, "nr-ends",
1744                             &error_fatal);
1745     object_property_set_link(OBJECT(end_xsrc), OBJECT(xive), "xive",
1746                              &error_abort);
1747     object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err);
1748     if (local_err) {
1749         error_propagate(errp, local_err);
1750         return;
1751     }
1752 
1753     /* Default page size. Generally changed at runtime to 64k */
1754     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1755 
1756     /* XSCOM region, used for initial configuration of the BARs */
1757     memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
1758                           xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
1759 
1760     /* Interrupt controller MMIO regions */
1761     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1762                        PNV9_XIVE_IC_SIZE);
1763 
1764     memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
1765                           xive, "xive-ic-reg", 1 << xive->ic_shift);
1766     memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
1767                           &pnv_xive_ic_notify_ops,
1768                           xive, "xive-ic-notify", 1 << xive->ic_shift);
1769 
1770     /* The Pervasive LSI trigger and EOI pages (not modeled) */
1771     memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
1772                           xive, "xive-ic-lsi", 2 << xive->ic_shift);
1773 
1774     /* Thread Interrupt Management Area (Indirect) */
1775     memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
1776                           &xive_tm_indirect_ops,
1777                           xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
1778     /*
1779      * Overall Virtualization Controller MMIO region containing the
1780      * IPI ESB pages and END ESB pages. The layout is defined by the
1781      * EDT "Domain table" and the accesses are dispatched using
1782      * address spaces for each.
1783      */
1784     memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
1785                           "xive-vc", PNV9_XIVE_VC_SIZE);
1786 
1787     memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
1788                        PNV9_XIVE_VC_SIZE);
1789     address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
1790     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
1791                        PNV9_XIVE_VC_SIZE);
1792     address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
1793 
1794     /*
1795      * The MMIO windows exposing the IPI ESBs and the END ESBs in the
1796      * VC region. Their size is configured by the FW in the EDT table.
1797      */
1798     memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
1799     memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
1800 
1801     /* Presenter Controller MMIO region (not modeled) */
1802     memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
1803                           "xive-pc", PNV9_XIVE_PC_SIZE);
1804 
1805     /* Thread Interrupt Management Area (Direct) */
1806     memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops,
1807                           xive, "xive-tima", PNV9_XIVE_TM_SIZE);
1808 
1809     qemu_register_reset(pnv_xive_reset, dev);
1810 }
1811 
1812 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
1813                              int xscom_offset)
1814 {
1815     const char compat[] = "ibm,power9-xive-x";
1816     char *name;
1817     int offset;
1818     uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
1819     uint32_t reg[] = {
1820         cpu_to_be32(lpc_pcba),
1821         cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
1822     };
1823 
1824     name = g_strdup_printf("xive@%x", lpc_pcba);
1825     offset = fdt_add_subnode(fdt, xscom_offset, name);
1826     _FDT(offset);
1827     g_free(name);
1828 
1829     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
1830     _FDT((fdt_setprop(fdt, offset, "compatible", compat,
1831                       sizeof(compat))));
1832     return 0;
1833 }
1834 
1835 static Property pnv_xive_properties[] = {
1836     DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
1837     DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
1838     DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
1839     DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
1840     /* The PnvChip id identifies the XIVE interrupt controller. */
1841     DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *),
1842     DEFINE_PROP_END_OF_LIST(),
1843 };
1844 
1845 static void pnv_xive_class_init(ObjectClass *klass, void *data)
1846 {
1847     DeviceClass *dc = DEVICE_CLASS(klass);
1848     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
1849     XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
1850     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1851     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
1852 
1853     xdc->dt_xscom = pnv_xive_dt_xscom;
1854 
1855     dc->desc = "PowerNV XIVE Interrupt Controller";
1856     dc->realize = pnv_xive_realize;
1857     dc->props = pnv_xive_properties;
1858 
1859     xrc->get_eas = pnv_xive_get_eas;
1860     xrc->get_end = pnv_xive_get_end;
1861     xrc->write_end = pnv_xive_write_end;
1862     xrc->get_nvt = pnv_xive_get_nvt;
1863     xrc->write_nvt = pnv_xive_write_nvt;
1864     xrc->get_tctx = pnv_xive_get_tctx;
1865 
1866     xnc->notify = pnv_xive_notify;
1867     xpc->match_nvt  = pnv_xive_match_nvt;
1868 };
1869 
1870 static const TypeInfo pnv_xive_info = {
1871     .name          = TYPE_PNV_XIVE,
1872     .parent        = TYPE_XIVE_ROUTER,
1873     .instance_init = pnv_xive_init,
1874     .instance_size = sizeof(PnvXive),
1875     .class_init    = pnv_xive_class_init,
1876     .interfaces    = (InterfaceInfo[]) {
1877         { TYPE_PNV_XSCOM_INTERFACE },
1878         { }
1879     }
1880 };
1881 
1882 static void pnv_xive_register_types(void)
1883 {
1884     type_register_static(&pnv_xive_info);
1885 }
1886 
1887 type_init(pnv_xive_register_types)
1888