xref: /openbmc/qemu/hw/intc/pnv_xive.c (revision 8b3aaaa1a9c2182ed9e3c406c90bb4257b43e753)
1 /*
2  * QEMU PowerPC XIVE interrupt controller model
3  *
4  * Copyright (c) 2017-2019, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/fdt.h"
20 #include "hw/ppc/pnv.h"
21 #include "hw/ppc/pnv_core.h"
22 #include "hw/ppc/pnv_xscom.h"
23 #include "hw/ppc/pnv_xive.h"
24 #include "hw/ppc/xive_regs.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/ppc/ppc.h"
27 
28 #include <libfdt.h>
29 
30 #include "pnv_xive_regs.h"
31 
32 #undef XIVE_DEBUG
33 
34 /*
35  * Virtual structures table (VST)
36  */
37 #define SBE_PER_BYTE   4
38 
39 typedef struct XiveVstInfo {
40     const char *name;
41     uint32_t    size;
42     uint32_t    max_blocks;
43 } XiveVstInfo;
44 
45 static const XiveVstInfo vst_infos[] = {
46     [VST_TSEL_IVT]  = { "EAT",  sizeof(XiveEAS), 16 },
47     [VST_TSEL_SBE]  = { "SBE",  1,               16 },
48     [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
49     [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
50 
51     /*
52      *  Interrupt fifo backing store table (not modeled) :
53      *
54      * 0 - IPI,
55      * 1 - HWD,
56      * 2 - First escalate,
57      * 3 - Second escalate,
58      * 4 - Redistribution,
59      * 5 - IPI cascaded queue ?
60      */
61     [VST_TSEL_IRQ]  = { "IRQ",  1,               6  },
62 };
63 
64 #define xive_error(xive, fmt, ...)                                      \
65     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
66                   (xive)->chip->chip_id, ## __VA_ARGS__);
67 
68 /*
69  * QEMU version of the GETFIELD/SETFIELD macros
70  *
71  * TODO: It might be better to use the existing extract64() and
72  * deposit64() but this means that all the register definitions will
73  * change and become incompatible with the ones found in skiboot.
74  *
75  * Keep it as it is for now until we find a common ground.
76  */
77 static inline uint64_t GETFIELD(uint64_t mask, uint64_t word)
78 {
79     return (word & mask) >> ctz64(mask);
80 }
81 
82 static inline uint64_t SETFIELD(uint64_t mask, uint64_t word,
83                                 uint64_t value)
84 {
85     return (word & ~mask) | ((value << ctz64(mask)) & mask);
86 }
87 
88 /*
89  * Remote access to controllers. HW uses MMIOs. For now, a simple scan
90  * of the chips is good enough.
91  *
92  * TODO: Block scope support
93  */
94 static PnvXive *pnv_xive_get_ic(uint8_t blk)
95 {
96     PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
97     int i;
98 
99     for (i = 0; i < pnv->num_chips; i++) {
100         Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]);
101         PnvXive *xive = &chip9->xive;
102 
103         if (xive->chip->chip_id == blk) {
104             return xive;
105         }
106     }
107     return NULL;
108 }
109 
110 /*
111  * VST accessors for SBE, EAT, ENDT, NVT
112  *
113  * Indirect VST tables are arrays of VSDs pointing to a page (of same
114  * size). Each page is a direct VST table.
115  */
116 
117 #define XIVE_VSD_SIZE 8
118 
119 /* Indirect page size can be 4K, 64K, 2M, 16M. */
120 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
121 {
122      return page_shift == 12 || page_shift == 16 ||
123          page_shift == 21 || page_shift == 24;
124 }
125 
126 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
127                                          uint64_t vsd, uint32_t idx)
128 {
129     const XiveVstInfo *info = &vst_infos[type];
130     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
131     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
132     uint32_t idx_max;
133 
134     idx_max = vst_tsize / info->size - 1;
135     if (idx > idx_max) {
136 #ifdef XIVE_DEBUG
137         xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
138                    info->name, idx, idx_max);
139 #endif
140         return 0;
141     }
142 
143     return vst_addr + idx * info->size;
144 }
145 
146 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
147                                            uint64_t vsd, uint32_t idx)
148 {
149     const XiveVstInfo *info = &vst_infos[type];
150     uint64_t vsd_addr;
151     uint32_t vsd_idx;
152     uint32_t page_shift;
153     uint32_t vst_per_page;
154 
155     /* Get the page size of the indirect table. */
156     vsd_addr = vsd & VSD_ADDRESS_MASK;
157     vsd = ldq_be_dma(&address_space_memory, vsd_addr);
158 
159     if (!(vsd & VSD_ADDRESS_MASK)) {
160 #ifdef XIVE_DEBUG
161         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
162 #endif
163         return 0;
164     }
165 
166     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
167 
168     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
169         xive_error(xive, "VST: invalid %s page shift %d", info->name,
170                    page_shift);
171         return 0;
172     }
173 
174     vst_per_page = (1ull << page_shift) / info->size;
175     vsd_idx = idx / vst_per_page;
176 
177     /* Load the VSD we are looking for, if not already done */
178     if (vsd_idx) {
179         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
180         vsd = ldq_be_dma(&address_space_memory, vsd_addr);
181 
182         if (!(vsd & VSD_ADDRESS_MASK)) {
183 #ifdef XIVE_DEBUG
184             xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
185 #endif
186             return 0;
187         }
188 
189         /*
190          * Check that the pages have a consistent size across the
191          * indirect table
192          */
193         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
194             xive_error(xive, "VST: %s entry %x indirect page size differ !?",
195                        info->name, idx);
196             return 0;
197         }
198     }
199 
200     return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
201 }
202 
203 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
204                                   uint32_t idx)
205 {
206     const XiveVstInfo *info = &vst_infos[type];
207     uint64_t vsd;
208 
209     if (blk >= info->max_blocks) {
210         xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
211                    blk, info->name, idx);
212         return 0;
213     }
214 
215     vsd = xive->vsds[type][blk];
216 
217     /* Remote VST access */
218     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
219         xive = pnv_xive_get_ic(blk);
220 
221         return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0;
222     }
223 
224     if (VSD_INDIRECT & vsd) {
225         return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
226     }
227 
228     return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
229 }
230 
231 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
232                              uint32_t idx, void *data)
233 {
234     const XiveVstInfo *info = &vst_infos[type];
235     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
236 
237     if (!addr) {
238         return -1;
239     }
240 
241     cpu_physical_memory_read(addr, data, info->size);
242     return 0;
243 }
244 
245 #define XIVE_VST_WORD_ALL -1
246 
247 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
248                               uint32_t idx, void *data, uint32_t word_number)
249 {
250     const XiveVstInfo *info = &vst_infos[type];
251     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
252 
253     if (!addr) {
254         return -1;
255     }
256 
257     if (word_number == XIVE_VST_WORD_ALL) {
258         cpu_physical_memory_write(addr, data, info->size);
259     } else {
260         cpu_physical_memory_write(addr + word_number * 4,
261                                   data + word_number * 4, 4);
262     }
263     return 0;
264 }
265 
266 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
267                             XiveEND *end)
268 {
269     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
270 }
271 
272 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
273                               XiveEND *end, uint8_t word_number)
274 {
275     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
276                               word_number);
277 }
278 
279 static int pnv_xive_end_update(PnvXive *xive)
280 {
281     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
282                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
283     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
284                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
285     int i;
286     uint64_t eqc_watch[4];
287 
288     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
289         eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
290     }
291 
292     return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
293                               XIVE_VST_WORD_ALL);
294 }
295 
296 static void pnv_xive_end_cache_load(PnvXive *xive)
297 {
298     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
299                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
300     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
301                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
302     uint64_t eqc_watch[4] = { 0 };
303     int i;
304 
305     if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
306         xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
307     }
308 
309     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
310         xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
311     }
312 }
313 
314 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
315                             XiveNVT *nvt)
316 {
317     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
318 }
319 
320 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
321                               XiveNVT *nvt, uint8_t word_number)
322 {
323     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
324                               word_number);
325 }
326 
327 static int pnv_xive_nvt_update(PnvXive *xive)
328 {
329     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
330                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
331     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
332                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
333     int i;
334     uint64_t vpc_watch[8];
335 
336     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
337         vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
338     }
339 
340     return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
341                               XIVE_VST_WORD_ALL);
342 }
343 
344 static void pnv_xive_nvt_cache_load(PnvXive *xive)
345 {
346     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
347                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
348     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
349                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
350     uint64_t vpc_watch[8] = { 0 };
351     int i;
352 
353     if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
354         xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
355     }
356 
357     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
358         xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
359     }
360 }
361 
362 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
363                             XiveEAS *eas)
364 {
365     PnvXive *xive = PNV_XIVE(xrtr);
366 
367     if (pnv_xive_get_ic(blk) != xive) {
368         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
369         return -1;
370     }
371 
372     return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
373 }
374 
375 /*
376  * One bit per thread id. The first register PC_THREAD_EN_REG0 covers
377  * the first cores 0-15 (normal) of the chip or 0-7 (fused). The
378  * second register covers cores 16-23 (normal) or 8-11 (fused).
379  */
380 static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu)
381 {
382     int pir = ppc_cpu_pir(cpu);
383     uint32_t fc = PNV9_PIR2FUSEDCORE(pir);
384     uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1;
385     uint32_t bit = pir & 0x3f;
386 
387     return xive->regs[reg >> 3] & PPC_BIT(bit);
388 }
389 
390 static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
391                               uint8_t nvt_blk, uint32_t nvt_idx,
392                               bool cam_ignore, uint8_t priority,
393                               uint32_t logic_serv, XiveTCTXMatch *match)
394 {
395     PnvXive *xive = PNV_XIVE(xptr);
396     PnvChip *chip = xive->chip;
397     int count = 0;
398     int i, j;
399 
400     for (i = 0; i < chip->nr_cores; i++) {
401         PnvCore *pc = chip->cores[i];
402         CPUCore *cc = CPU_CORE(pc);
403 
404         for (j = 0; j < cc->nr_threads; j++) {
405             PowerPCCPU *cpu = pc->threads[j];
406             XiveTCTX *tctx;
407             int ring;
408 
409             if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
410                 continue;
411             }
412 
413             tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
414 
415             /*
416              * Check the thread context CAM lines and record matches.
417              */
418             ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
419                                              nvt_idx, cam_ignore, logic_serv);
420             /*
421              * Save the context and follow on to catch duplicates, that we
422              * don't support yet.
423              */
424             if (ring != -1) {
425                 if (match->tctx) {
426                     qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
427                                   "thread context NVT %x/%x\n",
428                                   nvt_blk, nvt_idx);
429                     return -1;
430                 }
431 
432                 match->ring = ring;
433                 match->tctx = tctx;
434                 count++;
435             }
436         }
437     }
438 
439     return count;
440 }
441 
442 /*
443  * The TIMA MMIO space is shared among the chips and to identify the
444  * chip from which the access is being done, we extract the chip id
445  * from the PIR.
446  */
447 static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu)
448 {
449     int pir = ppc_cpu_pir(cpu);
450     PnvChip *chip;
451     PnvXive *xive;
452 
453     chip = pnv_get_chip(PNV9_PIR2CHIP(pir));
454     assert(chip);
455     xive = &PNV9_CHIP(chip)->xive;
456 
457     if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
458         xive_error(xive, "IC: CPU %x is not enabled", pir);
459     }
460     return xive;
461 }
462 
463 /*
464  * The internal sources (IPIs) of the interrupt controller have no
465  * knowledge of the XIVE chip on which they reside. Encode the block
466  * id in the source interrupt number before forwarding the source
467  * event notification to the Router. This is required on a multichip
468  * system.
469  */
470 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno)
471 {
472     PnvXive *xive = PNV_XIVE(xn);
473     uint8_t blk = xive->chip->chip_id;
474 
475     xive_router_notify(xn, XIVE_EAS(blk, srcno));
476 }
477 
478 /*
479  * XIVE helpers
480  */
481 
482 static uint64_t pnv_xive_vc_size(PnvXive *xive)
483 {
484     return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
485 }
486 
487 static uint64_t pnv_xive_edt_shift(PnvXive *xive)
488 {
489     return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
490 }
491 
492 static uint64_t pnv_xive_pc_size(PnvXive *xive)
493 {
494     return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
495 }
496 
497 static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk)
498 {
499     uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk];
500     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
501 
502     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
503 }
504 
505 /*
506  * EDT Table
507  *
508  * The Virtualization Controller MMIO region containing the IPI ESB
509  * pages and END ESB pages is sub-divided into "sets" which map
510  * portions of the VC region to the different ESB pages. It is
511  * configured at runtime through the EDT "Domain Table" to let the
512  * firmware decide how to split the VC address space between IPI ESB
513  * pages and END ESB pages.
514  */
515 
516 /*
517  * Computes the overall size of the IPI or the END ESB pages
518  */
519 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
520 {
521     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
522     uint64_t size = 0;
523     int i;
524 
525     for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
526         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
527 
528         if (edt_type == type) {
529             size += edt_size;
530         }
531     }
532 
533     return size;
534 }
535 
536 /*
537  * Maps an offset of the VC region in the IPI or END region using the
538  * layout defined by the EDT "Domaine Table"
539  */
540 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
541                                               uint64_t type)
542 {
543     int i;
544     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
545     uint64_t edt_offset = vc_offset;
546 
547     for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
548         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
549 
550         if (edt_type != type) {
551             edt_offset -= edt_size;
552         }
553     }
554 
555     return edt_offset;
556 }
557 
558 static void pnv_xive_edt_resize(PnvXive *xive)
559 {
560     uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
561     uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
562 
563     memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
564     memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
565 
566     memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
567     memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
568 }
569 
570 /*
571  * XIVE Table configuration. Only EDT is supported.
572  */
573 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
574 {
575     uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
576     uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
577     uint64_t *xive_table;
578     uint8_t max_index;
579 
580     switch (tsel) {
581     case CQ_TAR_TSEL_BLK:
582         max_index = ARRAY_SIZE(xive->blk);
583         xive_table = xive->blk;
584         break;
585     case CQ_TAR_TSEL_MIG:
586         max_index = ARRAY_SIZE(xive->mig);
587         xive_table = xive->mig;
588         break;
589     case CQ_TAR_TSEL_EDT:
590         max_index = ARRAY_SIZE(xive->edt);
591         xive_table = xive->edt;
592         break;
593     case CQ_TAR_TSEL_VDT:
594         max_index = ARRAY_SIZE(xive->vdt);
595         xive_table = xive->vdt;
596         break;
597     default:
598         xive_error(xive, "IC: invalid table %d", (int) tsel);
599         return -1;
600     }
601 
602     if (tsel_index >= max_index) {
603         xive_error(xive, "IC: invalid index %d", (int) tsel_index);
604         return -1;
605     }
606 
607     xive_table[tsel_index] = val;
608 
609     if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
610         xive->regs[CQ_TAR >> 3] =
611             SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
612     }
613 
614     /*
615      * EDT configuration is complete. Resize the MMIO windows exposing
616      * the IPI and the END ESBs in the VC region.
617      */
618     if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
619         pnv_xive_edt_resize(xive);
620     }
621 
622     return 0;
623 }
624 
625 /*
626  * Virtual Structure Tables (VST) configuration
627  */
628 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
629                                        uint8_t blk, uint64_t vsd)
630 {
631     XiveENDSource *end_xsrc = &xive->end_source;
632     XiveSource *xsrc = &xive->ipi_source;
633     const XiveVstInfo *info = &vst_infos[type];
634     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
635     uint64_t vst_tsize = 1ull << page_shift;
636     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
637 
638     /* Basic checks */
639 
640     if (VSD_INDIRECT & vsd) {
641         if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
642             xive_error(xive, "VST: %s indirect tables are not enabled",
643                        info->name);
644             return;
645         }
646 
647         if (!pnv_xive_vst_page_size_allowed(page_shift)) {
648             xive_error(xive, "VST: invalid %s page shift %d", info->name,
649                        page_shift);
650             return;
651         }
652     }
653 
654     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
655         xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
656                    " page shift %d", info->name, vst_addr, page_shift);
657         return;
658     }
659 
660     /* Record the table configuration (in SRAM on HW) */
661     xive->vsds[type][blk] = vsd;
662 
663     /* Now tune the models with the configuration provided by the FW */
664 
665     switch (type) {
666     case VST_TSEL_IVT:  /* Nothing to be done */
667         break;
668 
669     case VST_TSEL_EQDT:
670         /*
671          * Backing store pages for the END.
672          *
673          * If the table is direct, we can compute the number of PQ
674          * entries provisioned by FW (such as skiboot) and resize the
675          * END ESB window accordingly.
676          */
677         if (!(VSD_INDIRECT & vsd)) {
678             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
679                                    * (1ull << xsrc->esb_shift));
680         }
681         memory_region_add_subregion(&xive->end_edt_mmio, 0,
682                                     &end_xsrc->esb_mmio);
683         break;
684 
685     case VST_TSEL_SBE:
686         /*
687          * Backing store pages for the source PQ bits. The model does
688          * not use these PQ bits backed in RAM because the XiveSource
689          * model has its own.
690          *
691          * If the table is direct, we can compute the number of PQ
692          * entries provisioned by FW (such as skiboot) and resize the
693          * ESB window accordingly.
694          */
695         if (!(VSD_INDIRECT & vsd)) {
696             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
697                                    * (1ull << xsrc->esb_shift));
698         }
699         memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
700         break;
701 
702     case VST_TSEL_VPDT: /* Not modeled */
703     case VST_TSEL_IRQ:  /* Not modeled */
704         /*
705          * These tables contains the backing store pages for the
706          * interrupt fifos of the VC sub-engine in case of overflow.
707          */
708         break;
709 
710     default:
711         g_assert_not_reached();
712     }
713 }
714 
715 /*
716  * Both PC and VC sub-engines are configured as each use the Virtual
717  * Structure Tables : SBE, EAS, END and NVT.
718  */
719 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
720 {
721     uint8_t mode = GETFIELD(VSD_MODE, vsd);
722     uint8_t type = GETFIELD(VST_TABLE_SELECT,
723                             xive->regs[VC_VSD_TABLE_ADDR >> 3]);
724     uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
725                            xive->regs[VC_VSD_TABLE_ADDR >> 3]);
726     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
727 
728     if (type > VST_TSEL_IRQ) {
729         xive_error(xive, "VST: invalid table type %d", type);
730         return;
731     }
732 
733     if (blk >= vst_infos[type].max_blocks) {
734         xive_error(xive, "VST: invalid block id %d for"
735                       " %s table", blk, vst_infos[type].name);
736         return;
737     }
738 
739     /*
740      * Only take the VC sub-engine configuration into account because
741      * the XiveRouter model combines both VC and PC sub-engines
742      */
743     if (pc_engine) {
744         return;
745     }
746 
747     if (!vst_addr) {
748         xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
749         return;
750     }
751 
752     switch (mode) {
753     case VSD_MODE_FORWARD:
754         xive->vsds[type][blk] = vsd;
755         break;
756 
757     case VSD_MODE_EXCLUSIVE:
758         pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
759         break;
760 
761     default:
762         xive_error(xive, "VST: unsupported table mode %d", mode);
763         return;
764     }
765 }
766 
767 /*
768  * Interrupt controller MMIO region. The layout is compatible between
769  * 4K and 64K pages :
770  *
771  * Page 0           sub-engine BARs
772  *  0x000 - 0x3FF   IC registers
773  *  0x400 - 0x7FF   PC registers
774  *  0x800 - 0xFFF   VC registers
775  *
776  * Page 1           Notify page (writes only)
777  *  0x000 - 0x7FF   HW interrupt triggers (PSI, PHB)
778  *  0x800 - 0xFFF   forwards and syncs
779  *
780  * Page 2           LSI Trigger page (writes only) (not modeled)
781  * Page 3           LSI SB EOI page (reads only) (not modeled)
782  *
783  * Page 4-7         indirect TIMA
784  */
785 
786 /*
787  * IC - registers MMIO
788  */
789 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
790                                   uint64_t val, unsigned size)
791 {
792     PnvXive *xive = PNV_XIVE(opaque);
793     MemoryRegion *sysmem = get_system_memory();
794     uint32_t reg = offset >> 3;
795     bool is_chip0 = xive->chip->chip_id == 0;
796 
797     switch (offset) {
798 
799     /*
800      * XIVE CQ (PowerBus bridge) settings
801      */
802     case CQ_MSGSND:     /* msgsnd for doorbells */
803     case CQ_FIRMASK_OR: /* FIR error reporting */
804         break;
805     case CQ_PBI_CTL:
806         if (val & CQ_PBI_PC_64K) {
807             xive->pc_shift = 16;
808         }
809         if (val & CQ_PBI_VC_64K) {
810             xive->vc_shift = 16;
811         }
812         break;
813     case CQ_CFG_PB_GEN: /* PowerBus General Configuration */
814         /*
815          * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
816          */
817         break;
818 
819     /*
820      * XIVE Virtualization Controller settings
821      */
822     case VC_GLOBAL_CONFIG:
823         break;
824 
825     /*
826      * XIVE Presenter Controller settings
827      */
828     case PC_GLOBAL_CONFIG:
829         /*
830          * PC_GCONF_CHIPID_OVR
831          *   Overrides Int command Chip ID with the Chip ID field (DEBUG)
832          */
833         break;
834     case PC_TCTXT_CFG:
835         /*
836          * TODO: block group support
837          *
838          * PC_TCTXT_CFG_BLKGRP_EN
839          * PC_TCTXT_CFG_HARD_CHIPID_BLK :
840          *   Moves the chipid into block field for hardwired CAM compares.
841          *   Block offset value is adjusted to 0b0..01 & ThrdId
842          *
843          *   Will require changes in xive_presenter_tctx_match(). I am
844          *   not sure how to handle that yet.
845          */
846 
847         /* Overrides hardwired chip ID with the chip ID field */
848         if (val & PC_TCTXT_CHIPID_OVERRIDE) {
849             xive->tctx_chipid = GETFIELD(PC_TCTXT_CHIPID, val);
850         }
851         break;
852     case PC_TCTXT_TRACK:
853         /*
854          * PC_TCTXT_TRACK_EN:
855          *   enable block tracking and exchange of block ownership
856          *   information between Interrupt controllers
857          */
858         break;
859 
860     /*
861      * Misc settings
862      */
863     case VC_SBC_CONFIG: /* Store EOI configuration */
864         /*
865          * Configure store EOI if required by firwmare (skiboot has removed
866          * support recently though)
867          */
868         if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
869             xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
870         }
871         break;
872 
873     case VC_EQC_CONFIG: /* TODO: silent escalation */
874     case VC_AIB_TX_ORDER_TAG2: /* relax ordering */
875         break;
876 
877     /*
878      * XIVE BAR settings (XSCOM only)
879      */
880     case CQ_RST_CTL:
881         /* bit4: resets all BAR registers */
882         break;
883 
884     case CQ_IC_BAR: /* IC BAR. 8 pages */
885         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
886         if (!(val & CQ_IC_BAR_VALID)) {
887             xive->ic_base = 0;
888             if (xive->regs[reg] & CQ_IC_BAR_VALID) {
889                 memory_region_del_subregion(&xive->ic_mmio,
890                                             &xive->ic_reg_mmio);
891                 memory_region_del_subregion(&xive->ic_mmio,
892                                             &xive->ic_notify_mmio);
893                 memory_region_del_subregion(&xive->ic_mmio,
894                                             &xive->ic_lsi_mmio);
895                 memory_region_del_subregion(&xive->ic_mmio,
896                                             &xive->tm_indirect_mmio);
897 
898                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
899             }
900         } else {
901             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
902             if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
903                 memory_region_add_subregion(sysmem, xive->ic_base,
904                                             &xive->ic_mmio);
905 
906                 memory_region_add_subregion(&xive->ic_mmio,  0,
907                                             &xive->ic_reg_mmio);
908                 memory_region_add_subregion(&xive->ic_mmio,
909                                             1ul << xive->ic_shift,
910                                             &xive->ic_notify_mmio);
911                 memory_region_add_subregion(&xive->ic_mmio,
912                                             2ul << xive->ic_shift,
913                                             &xive->ic_lsi_mmio);
914                 memory_region_add_subregion(&xive->ic_mmio,
915                                             4ull << xive->ic_shift,
916                                             &xive->tm_indirect_mmio);
917             }
918         }
919         break;
920 
921     case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */
922     case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */
923         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
924         if (!(val & CQ_TM_BAR_VALID)) {
925             xive->tm_base = 0;
926             if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
927                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
928             }
929         } else {
930             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
931             if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
932                 memory_region_add_subregion(sysmem, xive->tm_base,
933                                             &xive->tm_mmio);
934             }
935         }
936         break;
937 
938     case CQ_PC_BARM:
939         xive->regs[reg] = val;
940         memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
941         break;
942     case CQ_PC_BAR: /* From 32M to 512G */
943         if (!(val & CQ_PC_BAR_VALID)) {
944             xive->pc_base = 0;
945             if (xive->regs[reg] & CQ_PC_BAR_VALID) {
946                 memory_region_del_subregion(sysmem, &xive->pc_mmio);
947             }
948         } else {
949             xive->pc_base = val & ~(CQ_PC_BAR_VALID);
950             if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
951                 memory_region_add_subregion(sysmem, xive->pc_base,
952                                             &xive->pc_mmio);
953             }
954         }
955         break;
956 
957     case CQ_VC_BARM:
958         xive->regs[reg] = val;
959         memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
960         break;
961     case CQ_VC_BAR: /* From 64M to 4TB */
962         if (!(val & CQ_VC_BAR_VALID)) {
963             xive->vc_base = 0;
964             if (xive->regs[reg] & CQ_VC_BAR_VALID) {
965                 memory_region_del_subregion(sysmem, &xive->vc_mmio);
966             }
967         } else {
968             xive->vc_base = val & ~(CQ_VC_BAR_VALID);
969             if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
970                 memory_region_add_subregion(sysmem, xive->vc_base,
971                                             &xive->vc_mmio);
972             }
973         }
974         break;
975 
976     /*
977      * XIVE Table settings.
978      */
979     case CQ_TAR: /* Table Address */
980         break;
981     case CQ_TDR: /* Table Data */
982         pnv_xive_table_set_data(xive, val);
983         break;
984 
985     /*
986      * XIVE VC & PC Virtual Structure Table settings
987      */
988     case VC_VSD_TABLE_ADDR:
989     case PC_VSD_TABLE_ADDR: /* Virtual table selector */
990         break;
991     case VC_VSD_TABLE_DATA: /* Virtual table setting */
992     case PC_VSD_TABLE_DATA:
993         pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
994         break;
995 
996     /*
997      * Interrupt fifo overflow in memory backing store (Not modeled)
998      */
999     case VC_IRQ_CONFIG_IPI:
1000     case VC_IRQ_CONFIG_HW:
1001     case VC_IRQ_CONFIG_CASCADE1:
1002     case VC_IRQ_CONFIG_CASCADE2:
1003     case VC_IRQ_CONFIG_REDIST:
1004     case VC_IRQ_CONFIG_IPI_CASC:
1005         break;
1006 
1007     /*
1008      * XIVE hardware thread enablement
1009      */
1010     case PC_THREAD_EN_REG0: /* Physical Thread Enable */
1011     case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */
1012         break;
1013 
1014     case PC_THREAD_EN_REG0_SET:
1015         xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
1016         break;
1017     case PC_THREAD_EN_REG1_SET:
1018         xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
1019         break;
1020     case PC_THREAD_EN_REG0_CLR:
1021         xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
1022         break;
1023     case PC_THREAD_EN_REG1_CLR:
1024         xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
1025         break;
1026 
1027     /*
1028      * Indirect TIMA access set up. Defines the PIR of the HW thread
1029      * to use.
1030      */
1031     case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
1032         break;
1033 
1034     /*
1035      * XIVE PC & VC cache updates for EAS, NVT and END
1036      */
1037     case VC_IVC_SCRUB_MASK:
1038     case VC_IVC_SCRUB_TRIG:
1039         break;
1040 
1041     case VC_EQC_CWATCH_SPEC:
1042         val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
1043         break;
1044     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1045         break;
1046     case VC_EQC_CWATCH_DAT0:
1047         /* writing to DATA0 triggers the cache write */
1048         xive->regs[reg] = val;
1049         pnv_xive_end_update(xive);
1050         break;
1051     case VC_EQC_SCRUB_MASK:
1052     case VC_EQC_SCRUB_TRIG:
1053         /*
1054          * The scrubbing registers flush the cache in RAM and can also
1055          * invalidate.
1056          */
1057         break;
1058 
1059     case PC_VPC_CWATCH_SPEC:
1060         val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
1061         break;
1062     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1063         break;
1064     case PC_VPC_CWATCH_DAT0:
1065         /* writing to DATA0 triggers the cache write */
1066         xive->regs[reg] = val;
1067         pnv_xive_nvt_update(xive);
1068         break;
1069     case PC_VPC_SCRUB_MASK:
1070     case PC_VPC_SCRUB_TRIG:
1071         /*
1072          * The scrubbing registers flush the cache in RAM and can also
1073          * invalidate.
1074          */
1075         break;
1076 
1077 
1078     /*
1079      * XIVE PC & VC cache invalidation
1080      */
1081     case PC_AT_KILL:
1082         break;
1083     case VC_AT_MACRO_KILL:
1084         break;
1085     case PC_AT_KILL_MASK:
1086     case VC_AT_MACRO_KILL_MASK:
1087         break;
1088 
1089     default:
1090         xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
1091         return;
1092     }
1093 
1094     xive->regs[reg] = val;
1095 }
1096 
1097 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
1098 {
1099     PnvXive *xive = PNV_XIVE(opaque);
1100     uint64_t val = 0;
1101     uint32_t reg = offset >> 3;
1102 
1103     switch (offset) {
1104     case CQ_CFG_PB_GEN:
1105     case CQ_IC_BAR:
1106     case CQ_TM1_BAR:
1107     case CQ_TM2_BAR:
1108     case CQ_PC_BAR:
1109     case CQ_PC_BARM:
1110     case CQ_VC_BAR:
1111     case CQ_VC_BARM:
1112     case CQ_TAR:
1113     case CQ_TDR:
1114     case CQ_PBI_CTL:
1115 
1116     case PC_TCTXT_CFG:
1117     case PC_TCTXT_TRACK:
1118     case PC_TCTXT_INDIR0:
1119     case PC_TCTXT_INDIR1:
1120     case PC_TCTXT_INDIR2:
1121     case PC_TCTXT_INDIR3:
1122     case PC_GLOBAL_CONFIG:
1123 
1124     case PC_VPC_SCRUB_MASK:
1125 
1126     case VC_GLOBAL_CONFIG:
1127     case VC_AIB_TX_ORDER_TAG2:
1128 
1129     case VC_IRQ_CONFIG_IPI:
1130     case VC_IRQ_CONFIG_HW:
1131     case VC_IRQ_CONFIG_CASCADE1:
1132     case VC_IRQ_CONFIG_CASCADE2:
1133     case VC_IRQ_CONFIG_REDIST:
1134     case VC_IRQ_CONFIG_IPI_CASC:
1135 
1136     case VC_EQC_SCRUB_MASK:
1137     case VC_IVC_SCRUB_MASK:
1138     case VC_SBC_CONFIG:
1139     case VC_AT_MACRO_KILL_MASK:
1140     case VC_VSD_TABLE_ADDR:
1141     case PC_VSD_TABLE_ADDR:
1142     case VC_VSD_TABLE_DATA:
1143     case PC_VSD_TABLE_DATA:
1144     case PC_THREAD_EN_REG0:
1145     case PC_THREAD_EN_REG1:
1146         val = xive->regs[reg];
1147         break;
1148 
1149     /*
1150      * XIVE hardware thread enablement
1151      */
1152     case PC_THREAD_EN_REG0_SET:
1153     case PC_THREAD_EN_REG0_CLR:
1154         val = xive->regs[PC_THREAD_EN_REG0 >> 3];
1155         break;
1156     case PC_THREAD_EN_REG1_SET:
1157     case PC_THREAD_EN_REG1_CLR:
1158         val = xive->regs[PC_THREAD_EN_REG1 >> 3];
1159         break;
1160 
1161     case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */
1162         val = 0xffffff0000000000;
1163         break;
1164 
1165     /*
1166      * XIVE PC & VC cache updates for EAS, NVT and END
1167      */
1168     case VC_EQC_CWATCH_SPEC:
1169         xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
1170         val = xive->regs[reg];
1171         break;
1172     case VC_EQC_CWATCH_DAT0:
1173         /*
1174          * Load DATA registers from cache with data requested by the
1175          * SPEC register
1176          */
1177         pnv_xive_end_cache_load(xive);
1178         val = xive->regs[reg];
1179         break;
1180     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1181         val = xive->regs[reg];
1182         break;
1183 
1184     case PC_VPC_CWATCH_SPEC:
1185         xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
1186         val = xive->regs[reg];
1187         break;
1188     case PC_VPC_CWATCH_DAT0:
1189         /*
1190          * Load DATA registers from cache with data requested by the
1191          * SPEC register
1192          */
1193         pnv_xive_nvt_cache_load(xive);
1194         val = xive->regs[reg];
1195         break;
1196     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1197         val = xive->regs[reg];
1198         break;
1199 
1200     case PC_VPC_SCRUB_TRIG:
1201     case VC_IVC_SCRUB_TRIG:
1202     case VC_EQC_SCRUB_TRIG:
1203         xive->regs[reg] &= ~VC_SCRUB_VALID;
1204         val = xive->regs[reg];
1205         break;
1206 
1207     /*
1208      * XIVE PC & VC cache invalidation
1209      */
1210     case PC_AT_KILL:
1211         xive->regs[reg] &= ~PC_AT_KILL_VALID;
1212         val = xive->regs[reg];
1213         break;
1214     case VC_AT_MACRO_KILL:
1215         xive->regs[reg] &= ~VC_KILL_VALID;
1216         val = xive->regs[reg];
1217         break;
1218 
1219     /*
1220      * XIVE synchronisation
1221      */
1222     case VC_EQC_CONFIG:
1223         val = VC_EQC_SYNC_MASK;
1224         break;
1225 
1226     default:
1227         xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
1228     }
1229 
1230     return val;
1231 }
1232 
1233 static const MemoryRegionOps pnv_xive_ic_reg_ops = {
1234     .read = pnv_xive_ic_reg_read,
1235     .write = pnv_xive_ic_reg_write,
1236     .endianness = DEVICE_BIG_ENDIAN,
1237     .valid = {
1238         .min_access_size = 8,
1239         .max_access_size = 8,
1240     },
1241     .impl = {
1242         .min_access_size = 8,
1243         .max_access_size = 8,
1244     },
1245 };
1246 
1247 /*
1248  * IC - Notify MMIO port page (write only)
1249  */
1250 #define PNV_XIVE_FORWARD_IPI        0x800 /* Forward IPI */
1251 #define PNV_XIVE_FORWARD_HW         0x880 /* Forward HW */
1252 #define PNV_XIVE_FORWARD_OS_ESC     0x900 /* Forward OS escalation */
1253 #define PNV_XIVE_FORWARD_HW_ESC     0x980 /* Forward Hyp escalation */
1254 #define PNV_XIVE_FORWARD_REDIS      0xa00 /* Forward Redistribution */
1255 #define PNV_XIVE_RESERVED5          0xa80 /* Cache line 5 PowerBUS operation */
1256 #define PNV_XIVE_RESERVED6          0xb00 /* Cache line 6 PowerBUS operation */
1257 #define PNV_XIVE_RESERVED7          0xb80 /* Cache line 7 PowerBUS operation */
1258 
1259 /* VC synchronisation */
1260 #define PNV_XIVE_SYNC_IPI           0xc00 /* Sync IPI */
1261 #define PNV_XIVE_SYNC_HW            0xc80 /* Sync HW */
1262 #define PNV_XIVE_SYNC_OS_ESC        0xd00 /* Sync OS escalation */
1263 #define PNV_XIVE_SYNC_HW_ESC        0xd80 /* Sync Hyp escalation */
1264 #define PNV_XIVE_SYNC_REDIS         0xe00 /* Sync Redistribution */
1265 
1266 /* PC synchronisation */
1267 #define PNV_XIVE_SYNC_PULL          0xe80 /* Sync pull context */
1268 #define PNV_XIVE_SYNC_PUSH          0xf00 /* Sync push context */
1269 #define PNV_XIVE_SYNC_VPC           0xf80 /* Sync remove VPC store */
1270 
1271 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
1272 {
1273     uint8_t blk;
1274     uint32_t idx;
1275 
1276     if (val & XIVE_TRIGGER_END) {
1277         xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1278                    addr, val);
1279         return;
1280     }
1281 
1282     /*
1283      * Forward the source event notification directly to the Router.
1284      * The source interrupt number should already be correctly encoded
1285      * with the chip block id by the sending device (PHB, PSI).
1286      */
1287     blk = XIVE_EAS_BLOCK(val);
1288     idx = XIVE_EAS_INDEX(val);
1289 
1290     xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx));
1291 }
1292 
1293 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
1294                                      unsigned size)
1295 {
1296     PnvXive *xive = PNV_XIVE(opaque);
1297 
1298     /* VC: HW triggers */
1299     switch (addr) {
1300     case 0x000 ... 0x7FF:
1301         pnv_xive_ic_hw_trigger(opaque, addr, val);
1302         break;
1303 
1304     /* VC: Forwarded IRQs */
1305     case PNV_XIVE_FORWARD_IPI:
1306     case PNV_XIVE_FORWARD_HW:
1307     case PNV_XIVE_FORWARD_OS_ESC:
1308     case PNV_XIVE_FORWARD_HW_ESC:
1309     case PNV_XIVE_FORWARD_REDIS:
1310         /* TODO: forwarded IRQs. Should be like HW triggers */
1311         xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
1312                    addr, val);
1313         break;
1314 
1315     /* VC syncs */
1316     case PNV_XIVE_SYNC_IPI:
1317     case PNV_XIVE_SYNC_HW:
1318     case PNV_XIVE_SYNC_OS_ESC:
1319     case PNV_XIVE_SYNC_HW_ESC:
1320     case PNV_XIVE_SYNC_REDIS:
1321         break;
1322 
1323     /* PC syncs */
1324     case PNV_XIVE_SYNC_PULL:
1325     case PNV_XIVE_SYNC_PUSH:
1326     case PNV_XIVE_SYNC_VPC:
1327         break;
1328 
1329     default:
1330         xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
1331     }
1332 }
1333 
1334 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
1335                                         unsigned size)
1336 {
1337     PnvXive *xive = PNV_XIVE(opaque);
1338 
1339     /* loads are invalid */
1340     xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
1341     return -1;
1342 }
1343 
1344 static const MemoryRegionOps pnv_xive_ic_notify_ops = {
1345     .read = pnv_xive_ic_notify_read,
1346     .write = pnv_xive_ic_notify_write,
1347     .endianness = DEVICE_BIG_ENDIAN,
1348     .valid = {
1349         .min_access_size = 8,
1350         .max_access_size = 8,
1351     },
1352     .impl = {
1353         .min_access_size = 8,
1354         .max_access_size = 8,
1355     },
1356 };
1357 
1358 /*
1359  * IC - LSI MMIO handlers (not modeled)
1360  */
1361 
1362 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
1363                               uint64_t val, unsigned size)
1364 {
1365     PnvXive *xive = PNV_XIVE(opaque);
1366 
1367     xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
1368 }
1369 
1370 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
1371 {
1372     PnvXive *xive = PNV_XIVE(opaque);
1373 
1374     xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
1375     return -1;
1376 }
1377 
1378 static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
1379     .read = pnv_xive_ic_lsi_read,
1380     .write = pnv_xive_ic_lsi_write,
1381     .endianness = DEVICE_BIG_ENDIAN,
1382     .valid = {
1383         .min_access_size = 8,
1384         .max_access_size = 8,
1385     },
1386     .impl = {
1387         .min_access_size = 8,
1388         .max_access_size = 8,
1389     },
1390 };
1391 
1392 /*
1393  * IC - Indirect TIMA MMIO handlers
1394  */
1395 
1396 /*
1397  * When the TIMA is accessed from the indirect page, the thread id of
1398  * the target CPU is configured in the PC_TCTXT_INDIR0 register before
1399  * use. This is used for resets and for debug purpose also.
1400  */
1401 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
1402 {
1403     PnvChip *chip = xive->chip;
1404     uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
1405     PowerPCCPU *cpu = NULL;
1406     int pir;
1407 
1408     if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
1409         xive_error(xive, "IC: no indirect TIMA access in progress");
1410         return NULL;
1411     }
1412 
1413     pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir);
1414     cpu = pnv_chip_find_cpu(chip, pir);
1415     if (!cpu) {
1416         xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
1417         return NULL;
1418     }
1419 
1420     /* Check that HW thread is XIVE enabled */
1421     if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
1422         xive_error(xive, "IC: CPU %x is not enabled", pir);
1423     }
1424 
1425     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1426 }
1427 
1428 static void xive_tm_indirect_write(void *opaque, hwaddr offset,
1429                                    uint64_t value, unsigned size)
1430 {
1431     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1432 
1433     xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
1434 }
1435 
1436 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
1437                                       unsigned size)
1438 {
1439     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1440 
1441     return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
1442 }
1443 
1444 static const MemoryRegionOps xive_tm_indirect_ops = {
1445     .read = xive_tm_indirect_read,
1446     .write = xive_tm_indirect_write,
1447     .endianness = DEVICE_BIG_ENDIAN,
1448     .valid = {
1449         .min_access_size = 1,
1450         .max_access_size = 8,
1451     },
1452     .impl = {
1453         .min_access_size = 1,
1454         .max_access_size = 8,
1455     },
1456 };
1457 
1458 static void pnv_xive_tm_write(void *opaque, hwaddr offset,
1459                               uint64_t value, unsigned size)
1460 {
1461     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1462     PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1463     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1464 
1465     xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size);
1466 }
1467 
1468 static uint64_t pnv_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
1469 {
1470     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1471     PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1472     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1473 
1474     return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size);
1475 }
1476 
1477 const MemoryRegionOps pnv_xive_tm_ops = {
1478     .read = pnv_xive_tm_read,
1479     .write = pnv_xive_tm_write,
1480     .endianness = DEVICE_BIG_ENDIAN,
1481     .valid = {
1482         .min_access_size = 1,
1483         .max_access_size = 8,
1484     },
1485     .impl = {
1486         .min_access_size = 1,
1487         .max_access_size = 8,
1488     },
1489 };
1490 
1491 /*
1492  * Interrupt controller XSCOM region.
1493  */
1494 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
1495 {
1496     switch (addr >> 3) {
1497     case X_VC_EQC_CONFIG:
1498         /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1499         return VC_EQC_SYNC_MASK;
1500     default:
1501         return pnv_xive_ic_reg_read(opaque, addr, size);
1502     }
1503 }
1504 
1505 static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
1506                                 uint64_t val, unsigned size)
1507 {
1508     pnv_xive_ic_reg_write(opaque, addr, val, size);
1509 }
1510 
1511 static const MemoryRegionOps pnv_xive_xscom_ops = {
1512     .read = pnv_xive_xscom_read,
1513     .write = pnv_xive_xscom_write,
1514     .endianness = DEVICE_BIG_ENDIAN,
1515     .valid = {
1516         .min_access_size = 8,
1517         .max_access_size = 8,
1518     },
1519     .impl = {
1520         .min_access_size = 8,
1521         .max_access_size = 8,
1522     }
1523 };
1524 
1525 /*
1526  * Virtualization Controller MMIO region containing the IPI and END ESB pages
1527  */
1528 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
1529                                  unsigned size)
1530 {
1531     PnvXive *xive = PNV_XIVE(opaque);
1532     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1533     uint64_t edt_type = 0;
1534     uint64_t edt_offset;
1535     MemTxResult result;
1536     AddressSpace *edt_as = NULL;
1537     uint64_t ret = -1;
1538 
1539     if (edt_index < XIVE_TABLE_EDT_MAX) {
1540         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1541     }
1542 
1543     switch (edt_type) {
1544     case CQ_TDR_EDT_IPI:
1545         edt_as = &xive->ipi_as;
1546         break;
1547     case CQ_TDR_EDT_EQ:
1548         edt_as = &xive->end_as;
1549         break;
1550     default:
1551         xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
1552         return -1;
1553     }
1554 
1555     /* Remap the offset for the targeted address space */
1556     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1557 
1558     ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
1559                             &result);
1560 
1561     if (result != MEMTX_OK) {
1562         xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
1563                    HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
1564                    offset, edt_offset);
1565         return -1;
1566     }
1567 
1568     return ret;
1569 }
1570 
1571 static void pnv_xive_vc_write(void *opaque, hwaddr offset,
1572                               uint64_t val, unsigned size)
1573 {
1574     PnvXive *xive = PNV_XIVE(opaque);
1575     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1576     uint64_t edt_type = 0;
1577     uint64_t edt_offset;
1578     MemTxResult result;
1579     AddressSpace *edt_as = NULL;
1580 
1581     if (edt_index < XIVE_TABLE_EDT_MAX) {
1582         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1583     }
1584 
1585     switch (edt_type) {
1586     case CQ_TDR_EDT_IPI:
1587         edt_as = &xive->ipi_as;
1588         break;
1589     case CQ_TDR_EDT_EQ:
1590         edt_as = &xive->end_as;
1591         break;
1592     default:
1593         xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
1594                    offset);
1595         return;
1596     }
1597 
1598     /* Remap the offset for the targeted address space */
1599     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1600 
1601     address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
1602     if (result != MEMTX_OK) {
1603         xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
1604     }
1605 }
1606 
1607 static const MemoryRegionOps pnv_xive_vc_ops = {
1608     .read = pnv_xive_vc_read,
1609     .write = pnv_xive_vc_write,
1610     .endianness = DEVICE_BIG_ENDIAN,
1611     .valid = {
1612         .min_access_size = 8,
1613         .max_access_size = 8,
1614     },
1615     .impl = {
1616         .min_access_size = 8,
1617         .max_access_size = 8,
1618     },
1619 };
1620 
1621 /*
1622  * Presenter Controller MMIO region. The Virtualization Controller
1623  * updates the IPB in the NVT table when required. Not modeled.
1624  */
1625 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr,
1626                                  unsigned size)
1627 {
1628     PnvXive *xive = PNV_XIVE(opaque);
1629 
1630     xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr);
1631     return -1;
1632 }
1633 
1634 static void pnv_xive_pc_write(void *opaque, hwaddr addr,
1635                               uint64_t value, unsigned size)
1636 {
1637     PnvXive *xive = PNV_XIVE(opaque);
1638 
1639     xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
1640 }
1641 
1642 static const MemoryRegionOps pnv_xive_pc_ops = {
1643     .read = pnv_xive_pc_read,
1644     .write = pnv_xive_pc_write,
1645     .endianness = DEVICE_BIG_ENDIAN,
1646     .valid = {
1647         .min_access_size = 8,
1648         .max_access_size = 8,
1649     },
1650     .impl = {
1651         .min_access_size = 8,
1652         .max_access_size = 8,
1653     },
1654 };
1655 
1656 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
1657 {
1658     XiveRouter *xrtr = XIVE_ROUTER(xive);
1659     uint8_t blk = xive->chip->chip_id;
1660     uint32_t srcno0 = XIVE_EAS(blk, 0);
1661     uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk);
1662     XiveEAS eas;
1663     XiveEND end;
1664     int i;
1665 
1666     monitor_printf(mon, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0,
1667                    srcno0 + nr_ipis - 1);
1668     xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
1669 
1670     monitor_printf(mon, "XIVE[%x] EAT %08x .. %08x\n", blk, srcno0,
1671                    srcno0 + nr_ipis - 1);
1672     for (i = 0; i < nr_ipis; i++) {
1673         if (xive_router_get_eas(xrtr, blk, i, &eas)) {
1674             break;
1675         }
1676         if (!xive_eas_is_masked(&eas)) {
1677             xive_eas_pic_print_info(&eas, i, mon);
1678         }
1679     }
1680 
1681     monitor_printf(mon, "XIVE[%x] ENDT\n", blk);
1682     i = 0;
1683     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1684         xive_end_pic_print_info(&end, i++, mon);
1685     }
1686 
1687     monitor_printf(mon, "XIVE[%x] END Escalation EAT\n", blk);
1688     i = 0;
1689     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1690         xive_end_eas_pic_print_info(&end, i++, mon);
1691     }
1692 }
1693 
1694 static void pnv_xive_reset(void *dev)
1695 {
1696     PnvXive *xive = PNV_XIVE(dev);
1697     XiveSource *xsrc = &xive->ipi_source;
1698     XiveENDSource *end_xsrc = &xive->end_source;
1699 
1700     /*
1701      * Use the PnvChip id to identify the XIVE interrupt controller.
1702      * It can be overriden by configuration at runtime.
1703      */
1704     xive->tctx_chipid = xive->chip->chip_id;
1705 
1706     /* Default page size (Should be changed at runtime to 64k) */
1707     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1708 
1709     /* Clear subregions */
1710     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1711         memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
1712     }
1713 
1714     if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
1715         memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
1716     }
1717 
1718     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1719         memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
1720     }
1721 
1722     if (memory_region_is_mapped(&xive->end_edt_mmio)) {
1723         memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
1724     }
1725 }
1726 
1727 static void pnv_xive_init(Object *obj)
1728 {
1729     PnvXive *xive = PNV_XIVE(obj);
1730 
1731     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1732                             sizeof(xive->ipi_source), TYPE_XIVE_SOURCE,
1733                             &error_abort, NULL);
1734     object_initialize_child(obj, "end_source", &xive->end_source,
1735                             sizeof(xive->end_source), TYPE_XIVE_END_SOURCE,
1736                             &error_abort, NULL);
1737 }
1738 
1739 /*
1740  *  Maximum number of IRQs and ENDs supported by HW
1741  */
1742 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1743 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1744 
1745 static void pnv_xive_realize(DeviceState *dev, Error **errp)
1746 {
1747     PnvXive *xive = PNV_XIVE(dev);
1748     XiveSource *xsrc = &xive->ipi_source;
1749     XiveENDSource *end_xsrc = &xive->end_source;
1750     Error *local_err = NULL;
1751 
1752     assert(xive->chip);
1753 
1754     /*
1755      * The XiveSource and XiveENDSource objects are realized with the
1756      * maximum allowed HW configuration. The ESB MMIO regions will be
1757      * resized dynamically when the controller is configured by the FW
1758      * to limit accesses to resources not provisioned.
1759      */
1760     object_property_set_int(OBJECT(xsrc), PNV_XIVE_NR_IRQS, "nr-irqs",
1761                             &error_fatal);
1762     object_property_set_link(OBJECT(xsrc), OBJECT(xive), "xive",
1763                              &error_abort);
1764     object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err);
1765     if (local_err) {
1766         error_propagate(errp, local_err);
1767         return;
1768     }
1769 
1770     object_property_set_int(OBJECT(end_xsrc), PNV_XIVE_NR_ENDS, "nr-ends",
1771                             &error_fatal);
1772     object_property_set_link(OBJECT(end_xsrc), OBJECT(xive), "xive",
1773                              &error_abort);
1774     object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err);
1775     if (local_err) {
1776         error_propagate(errp, local_err);
1777         return;
1778     }
1779 
1780     /* Default page size. Generally changed at runtime to 64k */
1781     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1782 
1783     /* XSCOM region, used for initial configuration of the BARs */
1784     memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
1785                           xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
1786 
1787     /* Interrupt controller MMIO regions */
1788     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1789                        PNV9_XIVE_IC_SIZE);
1790 
1791     memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
1792                           xive, "xive-ic-reg", 1 << xive->ic_shift);
1793     memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
1794                           &pnv_xive_ic_notify_ops,
1795                           xive, "xive-ic-notify", 1 << xive->ic_shift);
1796 
1797     /* The Pervasive LSI trigger and EOI pages (not modeled) */
1798     memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
1799                           xive, "xive-ic-lsi", 2 << xive->ic_shift);
1800 
1801     /* Thread Interrupt Management Area (Indirect) */
1802     memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
1803                           &xive_tm_indirect_ops,
1804                           xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
1805     /*
1806      * Overall Virtualization Controller MMIO region containing the
1807      * IPI ESB pages and END ESB pages. The layout is defined by the
1808      * EDT "Domain table" and the accesses are dispatched using
1809      * address spaces for each.
1810      */
1811     memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
1812                           "xive-vc", PNV9_XIVE_VC_SIZE);
1813 
1814     memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
1815                        PNV9_XIVE_VC_SIZE);
1816     address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
1817     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
1818                        PNV9_XIVE_VC_SIZE);
1819     address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
1820 
1821     /*
1822      * The MMIO windows exposing the IPI ESBs and the END ESBs in the
1823      * VC region. Their size is configured by the FW in the EDT table.
1824      */
1825     memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
1826     memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
1827 
1828     /* Presenter Controller MMIO region (not modeled) */
1829     memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
1830                           "xive-pc", PNV9_XIVE_PC_SIZE);
1831 
1832     /* Thread Interrupt Management Area (Direct) */
1833     memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &pnv_xive_tm_ops,
1834                           xive, "xive-tima", PNV9_XIVE_TM_SIZE);
1835 
1836     qemu_register_reset(pnv_xive_reset, dev);
1837 }
1838 
1839 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
1840                              int xscom_offset)
1841 {
1842     const char compat[] = "ibm,power9-xive-x";
1843     char *name;
1844     int offset;
1845     uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
1846     uint32_t reg[] = {
1847         cpu_to_be32(lpc_pcba),
1848         cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
1849     };
1850 
1851     name = g_strdup_printf("xive@%x", lpc_pcba);
1852     offset = fdt_add_subnode(fdt, xscom_offset, name);
1853     _FDT(offset);
1854     g_free(name);
1855 
1856     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
1857     _FDT((fdt_setprop(fdt, offset, "compatible", compat,
1858                       sizeof(compat))));
1859     return 0;
1860 }
1861 
1862 static Property pnv_xive_properties[] = {
1863     DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
1864     DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
1865     DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
1866     DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
1867     /* The PnvChip id identifies the XIVE interrupt controller. */
1868     DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *),
1869     DEFINE_PROP_END_OF_LIST(),
1870 };
1871 
1872 static void pnv_xive_class_init(ObjectClass *klass, void *data)
1873 {
1874     DeviceClass *dc = DEVICE_CLASS(klass);
1875     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
1876     XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
1877     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1878     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
1879 
1880     xdc->dt_xscom = pnv_xive_dt_xscom;
1881 
1882     dc->desc = "PowerNV XIVE Interrupt Controller";
1883     dc->realize = pnv_xive_realize;
1884     dc->props = pnv_xive_properties;
1885 
1886     xrc->get_eas = pnv_xive_get_eas;
1887     xrc->get_end = pnv_xive_get_end;
1888     xrc->write_end = pnv_xive_write_end;
1889     xrc->get_nvt = pnv_xive_get_nvt;
1890     xrc->write_nvt = pnv_xive_write_nvt;
1891 
1892     xnc->notify = pnv_xive_notify;
1893     xpc->match_nvt  = pnv_xive_match_nvt;
1894 };
1895 
1896 static const TypeInfo pnv_xive_info = {
1897     .name          = TYPE_PNV_XIVE,
1898     .parent        = TYPE_XIVE_ROUTER,
1899     .instance_init = pnv_xive_init,
1900     .instance_size = sizeof(PnvXive),
1901     .class_init    = pnv_xive_class_init,
1902     .interfaces    = (InterfaceInfo[]) {
1903         { TYPE_PNV_XSCOM_INTERFACE },
1904         { }
1905     }
1906 };
1907 
1908 static void pnv_xive_register_types(void)
1909 {
1910     type_register_static(&pnv_xive_info);
1911 }
1912 
1913 type_init(pnv_xive_register_types)
1914