xref: /openbmc/qemu/hw/intc/pnv_xive.c (revision f6476697)
1 /*
2  * QEMU PowerPC XIVE interrupt controller model
3  *
4  * Copyright (c) 2017-2019, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/fdt.h"
20 #include "hw/ppc/pnv.h"
21 #include "hw/ppc/pnv_core.h"
22 #include "hw/ppc/pnv_xscom.h"
23 #include "hw/ppc/pnv_xive.h"
24 #include "hw/ppc/xive_regs.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/ppc/ppc.h"
27 #include "trace.h"
28 
29 #include <libfdt.h>
30 
31 #include "pnv_xive_regs.h"
32 
33 #undef XIVE_DEBUG
34 
35 /*
36  * Virtual structures table (VST)
37  */
38 #define SBE_PER_BYTE   4
39 
40 typedef struct XiveVstInfo {
41     const char *name;
42     uint32_t    size;
43     uint32_t    max_blocks;
44 } XiveVstInfo;
45 
46 static const XiveVstInfo vst_infos[] = {
47     [VST_TSEL_IVT]  = { "EAT",  sizeof(XiveEAS), 16 },
48     [VST_TSEL_SBE]  = { "SBE",  1,               16 },
49     [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
50     [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
51 
52     /*
53      *  Interrupt fifo backing store table (not modeled) :
54      *
55      * 0 - IPI,
56      * 1 - HWD,
57      * 2 - First escalate,
58      * 3 - Second escalate,
59      * 4 - Redistribution,
60      * 5 - IPI cascaded queue ?
61      */
62     [VST_TSEL_IRQ]  = { "IRQ",  1,               6  },
63 };
64 
65 #define xive_error(xive, fmt, ...)                                      \
66     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
67                   (xive)->chip->chip_id, ## __VA_ARGS__);
68 
69 /*
70  * QEMU version of the GETFIELD/SETFIELD macros
71  *
72  * TODO: It might be better to use the existing extract64() and
73  * deposit64() but this means that all the register definitions will
74  * change and become incompatible with the ones found in skiboot.
75  *
76  * Keep it as it is for now until we find a common ground.
77  */
78 static inline uint64_t GETFIELD(uint64_t mask, uint64_t word)
79 {
80     return (word & mask) >> ctz64(mask);
81 }
82 
83 static inline uint64_t SETFIELD(uint64_t mask, uint64_t word,
84                                 uint64_t value)
85 {
86     return (word & ~mask) | ((value << ctz64(mask)) & mask);
87 }
88 
89 /*
90  * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID
91  * field overrides the hardwired chip ID in the Powerbus operations
92  * and for CAM compares
93  */
94 static uint8_t pnv_xive_block_id(PnvXive *xive)
95 {
96     uint8_t blk = xive->chip->chip_id;
97     uint64_t cfg_val = xive->regs[PC_TCTXT_CFG >> 3];
98 
99     if (cfg_val & PC_TCTXT_CHIPID_OVERRIDE) {
100         blk = GETFIELD(PC_TCTXT_CHIPID, cfg_val);
101     }
102 
103     return blk;
104 }
105 
106 /*
107  * Remote access to controllers. HW uses MMIOs. For now, a simple scan
108  * of the chips is good enough.
109  *
110  * TODO: Block scope support
111  */
112 static PnvXive *pnv_xive_get_remote(uint8_t blk)
113 {
114     PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
115     int i;
116 
117     for (i = 0; i < pnv->num_chips; i++) {
118         Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]);
119         PnvXive *xive = &chip9->xive;
120 
121         if (pnv_xive_block_id(xive) == blk) {
122             return xive;
123         }
124     }
125     return NULL;
126 }
127 
128 /*
129  * VST accessors for SBE, EAT, ENDT, NVT
130  *
131  * Indirect VST tables are arrays of VSDs pointing to a page (of same
132  * size). Each page is a direct VST table.
133  */
134 
135 #define XIVE_VSD_SIZE 8
136 
137 /* Indirect page size can be 4K, 64K, 2M, 16M. */
138 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
139 {
140      return page_shift == 12 || page_shift == 16 ||
141          page_shift == 21 || page_shift == 24;
142 }
143 
144 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
145                                          uint64_t vsd, uint32_t idx)
146 {
147     const XiveVstInfo *info = &vst_infos[type];
148     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
149     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
150     uint32_t idx_max;
151 
152     idx_max = vst_tsize / info->size - 1;
153     if (idx > idx_max) {
154 #ifdef XIVE_DEBUG
155         xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
156                    info->name, idx, idx_max);
157 #endif
158         return 0;
159     }
160 
161     return vst_addr + idx * info->size;
162 }
163 
164 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
165                                            uint64_t vsd, uint32_t idx)
166 {
167     const XiveVstInfo *info = &vst_infos[type];
168     uint64_t vsd_addr;
169     uint32_t vsd_idx;
170     uint32_t page_shift;
171     uint32_t vst_per_page;
172 
173     /* Get the page size of the indirect table. */
174     vsd_addr = vsd & VSD_ADDRESS_MASK;
175     vsd = ldq_be_dma(&address_space_memory, vsd_addr);
176 
177     if (!(vsd & VSD_ADDRESS_MASK)) {
178 #ifdef XIVE_DEBUG
179         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
180 #endif
181         return 0;
182     }
183 
184     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
185 
186     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
187         xive_error(xive, "VST: invalid %s page shift %d", info->name,
188                    page_shift);
189         return 0;
190     }
191 
192     vst_per_page = (1ull << page_shift) / info->size;
193     vsd_idx = idx / vst_per_page;
194 
195     /* Load the VSD we are looking for, if not already done */
196     if (vsd_idx) {
197         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
198         vsd = ldq_be_dma(&address_space_memory, vsd_addr);
199 
200         if (!(vsd & VSD_ADDRESS_MASK)) {
201 #ifdef XIVE_DEBUG
202             xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
203 #endif
204             return 0;
205         }
206 
207         /*
208          * Check that the pages have a consistent size across the
209          * indirect table
210          */
211         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
212             xive_error(xive, "VST: %s entry %x indirect page size differ !?",
213                        info->name, idx);
214             return 0;
215         }
216     }
217 
218     return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
219 }
220 
221 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
222                                   uint32_t idx)
223 {
224     const XiveVstInfo *info = &vst_infos[type];
225     uint64_t vsd;
226 
227     if (blk >= info->max_blocks) {
228         xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
229                    blk, info->name, idx);
230         return 0;
231     }
232 
233     vsd = xive->vsds[type][blk];
234 
235     /* Remote VST access */
236     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
237         xive = pnv_xive_get_remote(blk);
238 
239         return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0;
240     }
241 
242     if (VSD_INDIRECT & vsd) {
243         return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
244     }
245 
246     return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
247 }
248 
249 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
250                              uint32_t idx, void *data)
251 {
252     const XiveVstInfo *info = &vst_infos[type];
253     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
254 
255     if (!addr) {
256         return -1;
257     }
258 
259     cpu_physical_memory_read(addr, data, info->size);
260     return 0;
261 }
262 
263 #define XIVE_VST_WORD_ALL -1
264 
265 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
266                               uint32_t idx, void *data, uint32_t word_number)
267 {
268     const XiveVstInfo *info = &vst_infos[type];
269     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
270 
271     if (!addr) {
272         return -1;
273     }
274 
275     if (word_number == XIVE_VST_WORD_ALL) {
276         cpu_physical_memory_write(addr, data, info->size);
277     } else {
278         cpu_physical_memory_write(addr + word_number * 4,
279                                   data + word_number * 4, 4);
280     }
281     return 0;
282 }
283 
284 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
285                             XiveEND *end)
286 {
287     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
288 }
289 
290 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
291                               XiveEND *end, uint8_t word_number)
292 {
293     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
294                               word_number);
295 }
296 
297 static int pnv_xive_end_update(PnvXive *xive)
298 {
299     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
300                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
301     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
302                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
303     int i;
304     uint64_t eqc_watch[4];
305 
306     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
307         eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
308     }
309 
310     return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
311                               XIVE_VST_WORD_ALL);
312 }
313 
314 static void pnv_xive_end_cache_load(PnvXive *xive)
315 {
316     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
317                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
318     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
319                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
320     uint64_t eqc_watch[4] = { 0 };
321     int i;
322 
323     if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
324         xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
325     }
326 
327     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
328         xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
329     }
330 }
331 
332 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
333                             XiveNVT *nvt)
334 {
335     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
336 }
337 
338 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
339                               XiveNVT *nvt, uint8_t word_number)
340 {
341     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
342                               word_number);
343 }
344 
345 static int pnv_xive_nvt_update(PnvXive *xive)
346 {
347     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
348                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
349     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
350                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
351     int i;
352     uint64_t vpc_watch[8];
353 
354     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
355         vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
356     }
357 
358     return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
359                               XIVE_VST_WORD_ALL);
360 }
361 
362 static void pnv_xive_nvt_cache_load(PnvXive *xive)
363 {
364     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
365                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
366     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
367                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
368     uint64_t vpc_watch[8] = { 0 };
369     int i;
370 
371     if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
372         xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
373     }
374 
375     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
376         xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
377     }
378 }
379 
380 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
381                             XiveEAS *eas)
382 {
383     PnvXive *xive = PNV_XIVE(xrtr);
384 
385     /*
386      * EAT lookups should be local to the IC
387      */
388     if (pnv_xive_block_id(xive) != blk) {
389         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
390         return -1;
391     }
392 
393     return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
394 }
395 
396 /*
397  * One bit per thread id. The first register PC_THREAD_EN_REG0 covers
398  * the first cores 0-15 (normal) of the chip or 0-7 (fused). The
399  * second register covers cores 16-23 (normal) or 8-11 (fused).
400  */
401 static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu)
402 {
403     int pir = ppc_cpu_pir(cpu);
404     uint32_t fc = PNV9_PIR2FUSEDCORE(pir);
405     uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1;
406     uint32_t bit = pir & 0x3f;
407 
408     return xive->regs[reg >> 3] & PPC_BIT(bit);
409 }
410 
411 static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
412                               uint8_t nvt_blk, uint32_t nvt_idx,
413                               bool cam_ignore, uint8_t priority,
414                               uint32_t logic_serv, XiveTCTXMatch *match)
415 {
416     PnvXive *xive = PNV_XIVE(xptr);
417     PnvChip *chip = xive->chip;
418     int count = 0;
419     int i, j;
420 
421     for (i = 0; i < chip->nr_cores; i++) {
422         PnvCore *pc = chip->cores[i];
423         CPUCore *cc = CPU_CORE(pc);
424 
425         for (j = 0; j < cc->nr_threads; j++) {
426             PowerPCCPU *cpu = pc->threads[j];
427             XiveTCTX *tctx;
428             int ring;
429 
430             if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
431                 continue;
432             }
433 
434             tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
435 
436             /*
437              * Check the thread context CAM lines and record matches.
438              */
439             ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
440                                              nvt_idx, cam_ignore, logic_serv);
441             /*
442              * Save the context and follow on to catch duplicates, that we
443              * don't support yet.
444              */
445             if (ring != -1) {
446                 if (match->tctx) {
447                     qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
448                                   "thread context NVT %x/%x\n",
449                                   nvt_blk, nvt_idx);
450                     return -1;
451                 }
452 
453                 match->ring = ring;
454                 match->tctx = tctx;
455                 count++;
456             }
457         }
458     }
459 
460     return count;
461 }
462 
463 static uint8_t pnv_xive_get_block_id(XiveRouter *xrtr)
464 {
465     return pnv_xive_block_id(PNV_XIVE(xrtr));
466 }
467 
468 /*
469  * The TIMA MMIO space is shared among the chips and to identify the
470  * chip from which the access is being done, we extract the chip id
471  * from the PIR.
472  */
473 static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu)
474 {
475     int pir = ppc_cpu_pir(cpu);
476     XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
477     PnvXive *xive = PNV_XIVE(xptr);
478 
479     if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
480         xive_error(xive, "IC: CPU %x is not enabled", pir);
481     }
482     return xive;
483 }
484 
485 /*
486  * The internal sources (IPIs) of the interrupt controller have no
487  * knowledge of the XIVE chip on which they reside. Encode the block
488  * id in the source interrupt number before forwarding the source
489  * event notification to the Router. This is required on a multichip
490  * system.
491  */
492 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno)
493 {
494     PnvXive *xive = PNV_XIVE(xn);
495     uint8_t blk = pnv_xive_block_id(xive);
496 
497     xive_router_notify(xn, XIVE_EAS(blk, srcno));
498 }
499 
500 /*
501  * XIVE helpers
502  */
503 
504 static uint64_t pnv_xive_vc_size(PnvXive *xive)
505 {
506     return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
507 }
508 
509 static uint64_t pnv_xive_edt_shift(PnvXive *xive)
510 {
511     return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
512 }
513 
514 static uint64_t pnv_xive_pc_size(PnvXive *xive)
515 {
516     return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
517 }
518 
519 static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk)
520 {
521     uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk];
522     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
523 
524     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
525 }
526 
527 /*
528  * Compute the number of entries per indirect subpage.
529  */
530 static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type)
531 {
532     uint8_t blk = pnv_xive_block_id(xive);
533     uint64_t vsd = xive->vsds[type][blk];
534     const XiveVstInfo *info = &vst_infos[type];
535     uint64_t vsd_addr;
536     uint32_t page_shift;
537 
538     /* For direct tables, fake a valid value */
539     if (!(VSD_INDIRECT & vsd)) {
540         return 1;
541     }
542 
543     /* Get the page size of the indirect table. */
544     vsd_addr = vsd & VSD_ADDRESS_MASK;
545     vsd = ldq_be_dma(&address_space_memory, vsd_addr);
546 
547     if (!(vsd & VSD_ADDRESS_MASK)) {
548 #ifdef XIVE_DEBUG
549         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
550 #endif
551         return 0;
552     }
553 
554     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
555 
556     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
557         xive_error(xive, "VST: invalid %s page shift %d", info->name,
558                    page_shift);
559         return 0;
560     }
561 
562     return (1ull << page_shift) / info->size;
563 }
564 
565 /*
566  * EDT Table
567  *
568  * The Virtualization Controller MMIO region containing the IPI ESB
569  * pages and END ESB pages is sub-divided into "sets" which map
570  * portions of the VC region to the different ESB pages. It is
571  * configured at runtime through the EDT "Domain Table" to let the
572  * firmware decide how to split the VC address space between IPI ESB
573  * pages and END ESB pages.
574  */
575 
576 /*
577  * Computes the overall size of the IPI or the END ESB pages
578  */
579 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
580 {
581     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
582     uint64_t size = 0;
583     int i;
584 
585     for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
586         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
587 
588         if (edt_type == type) {
589             size += edt_size;
590         }
591     }
592 
593     return size;
594 }
595 
596 /*
597  * Maps an offset of the VC region in the IPI or END region using the
598  * layout defined by the EDT "Domaine Table"
599  */
600 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
601                                               uint64_t type)
602 {
603     int i;
604     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
605     uint64_t edt_offset = vc_offset;
606 
607     for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
608         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
609 
610         if (edt_type != type) {
611             edt_offset -= edt_size;
612         }
613     }
614 
615     return edt_offset;
616 }
617 
618 static void pnv_xive_edt_resize(PnvXive *xive)
619 {
620     uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
621     uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
622 
623     memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
624     memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
625 
626     memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
627     memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
628 }
629 
630 /*
631  * XIVE Table configuration. Only EDT is supported.
632  */
633 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
634 {
635     uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
636     uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
637     uint64_t *xive_table;
638     uint8_t max_index;
639 
640     switch (tsel) {
641     case CQ_TAR_TSEL_BLK:
642         max_index = ARRAY_SIZE(xive->blk);
643         xive_table = xive->blk;
644         break;
645     case CQ_TAR_TSEL_MIG:
646         max_index = ARRAY_SIZE(xive->mig);
647         xive_table = xive->mig;
648         break;
649     case CQ_TAR_TSEL_EDT:
650         max_index = ARRAY_SIZE(xive->edt);
651         xive_table = xive->edt;
652         break;
653     case CQ_TAR_TSEL_VDT:
654         max_index = ARRAY_SIZE(xive->vdt);
655         xive_table = xive->vdt;
656         break;
657     default:
658         xive_error(xive, "IC: invalid table %d", (int) tsel);
659         return -1;
660     }
661 
662     if (tsel_index >= max_index) {
663         xive_error(xive, "IC: invalid index %d", (int) tsel_index);
664         return -1;
665     }
666 
667     xive_table[tsel_index] = val;
668 
669     if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
670         xive->regs[CQ_TAR >> 3] =
671             SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
672     }
673 
674     /*
675      * EDT configuration is complete. Resize the MMIO windows exposing
676      * the IPI and the END ESBs in the VC region.
677      */
678     if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
679         pnv_xive_edt_resize(xive);
680     }
681 
682     return 0;
683 }
684 
685 /*
686  * Virtual Structure Tables (VST) configuration
687  */
688 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
689                                        uint8_t blk, uint64_t vsd)
690 {
691     XiveENDSource *end_xsrc = &xive->end_source;
692     XiveSource *xsrc = &xive->ipi_source;
693     const XiveVstInfo *info = &vst_infos[type];
694     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
695     uint64_t vst_tsize = 1ull << page_shift;
696     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
697 
698     /* Basic checks */
699 
700     if (VSD_INDIRECT & vsd) {
701         if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
702             xive_error(xive, "VST: %s indirect tables are not enabled",
703                        info->name);
704             return;
705         }
706 
707         if (!pnv_xive_vst_page_size_allowed(page_shift)) {
708             xive_error(xive, "VST: invalid %s page shift %d", info->name,
709                        page_shift);
710             return;
711         }
712     }
713 
714     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
715         xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
716                    " page shift %d", info->name, vst_addr, page_shift);
717         return;
718     }
719 
720     /* Record the table configuration (in SRAM on HW) */
721     xive->vsds[type][blk] = vsd;
722 
723     /* Now tune the models with the configuration provided by the FW */
724 
725     switch (type) {
726     case VST_TSEL_IVT:  /* Nothing to be done */
727         break;
728 
729     case VST_TSEL_EQDT:
730         /*
731          * Backing store pages for the END.
732          *
733          * If the table is direct, we can compute the number of PQ
734          * entries provisioned by FW (such as skiboot) and resize the
735          * END ESB window accordingly.
736          */
737         if (!(VSD_INDIRECT & vsd)) {
738             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
739                                    * (1ull << xsrc->esb_shift));
740         }
741         memory_region_add_subregion(&xive->end_edt_mmio, 0,
742                                     &end_xsrc->esb_mmio);
743         break;
744 
745     case VST_TSEL_SBE:
746         /*
747          * Backing store pages for the source PQ bits. The model does
748          * not use these PQ bits backed in RAM because the XiveSource
749          * model has its own.
750          *
751          * If the table is direct, we can compute the number of PQ
752          * entries provisioned by FW (such as skiboot) and resize the
753          * ESB window accordingly.
754          */
755         if (!(VSD_INDIRECT & vsd)) {
756             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
757                                    * (1ull << xsrc->esb_shift));
758         }
759         memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
760         break;
761 
762     case VST_TSEL_VPDT: /* Not modeled */
763     case VST_TSEL_IRQ:  /* Not modeled */
764         /*
765          * These tables contains the backing store pages for the
766          * interrupt fifos of the VC sub-engine in case of overflow.
767          */
768         break;
769 
770     default:
771         g_assert_not_reached();
772     }
773 }
774 
775 /*
776  * Both PC and VC sub-engines are configured as each use the Virtual
777  * Structure Tables : SBE, EAS, END and NVT.
778  */
779 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
780 {
781     uint8_t mode = GETFIELD(VSD_MODE, vsd);
782     uint8_t type = GETFIELD(VST_TABLE_SELECT,
783                             xive->regs[VC_VSD_TABLE_ADDR >> 3]);
784     uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
785                            xive->regs[VC_VSD_TABLE_ADDR >> 3]);
786     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
787 
788     if (type > VST_TSEL_IRQ) {
789         xive_error(xive, "VST: invalid table type %d", type);
790         return;
791     }
792 
793     if (blk >= vst_infos[type].max_blocks) {
794         xive_error(xive, "VST: invalid block id %d for"
795                       " %s table", blk, vst_infos[type].name);
796         return;
797     }
798 
799     /*
800      * Only take the VC sub-engine configuration into account because
801      * the XiveRouter model combines both VC and PC sub-engines
802      */
803     if (pc_engine) {
804         return;
805     }
806 
807     if (!vst_addr) {
808         xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
809         return;
810     }
811 
812     switch (mode) {
813     case VSD_MODE_FORWARD:
814         xive->vsds[type][blk] = vsd;
815         break;
816 
817     case VSD_MODE_EXCLUSIVE:
818         pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
819         break;
820 
821     default:
822         xive_error(xive, "VST: unsupported table mode %d", mode);
823         return;
824     }
825 }
826 
827 /*
828  * Interrupt controller MMIO region. The layout is compatible between
829  * 4K and 64K pages :
830  *
831  * Page 0           sub-engine BARs
832  *  0x000 - 0x3FF   IC registers
833  *  0x400 - 0x7FF   PC registers
834  *  0x800 - 0xFFF   VC registers
835  *
836  * Page 1           Notify page (writes only)
837  *  0x000 - 0x7FF   HW interrupt triggers (PSI, PHB)
838  *  0x800 - 0xFFF   forwards and syncs
839  *
840  * Page 2           LSI Trigger page (writes only) (not modeled)
841  * Page 3           LSI SB EOI page (reads only) (not modeled)
842  *
843  * Page 4-7         indirect TIMA
844  */
845 
846 /*
847  * IC - registers MMIO
848  */
849 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
850                                   uint64_t val, unsigned size)
851 {
852     PnvXive *xive = PNV_XIVE(opaque);
853     MemoryRegion *sysmem = get_system_memory();
854     uint32_t reg = offset >> 3;
855     bool is_chip0 = xive->chip->chip_id == 0;
856 
857     switch (offset) {
858 
859     /*
860      * XIVE CQ (PowerBus bridge) settings
861      */
862     case CQ_MSGSND:     /* msgsnd for doorbells */
863     case CQ_FIRMASK_OR: /* FIR error reporting */
864         break;
865     case CQ_PBI_CTL:
866         if (val & CQ_PBI_PC_64K) {
867             xive->pc_shift = 16;
868         }
869         if (val & CQ_PBI_VC_64K) {
870             xive->vc_shift = 16;
871         }
872         break;
873     case CQ_CFG_PB_GEN: /* PowerBus General Configuration */
874         /*
875          * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
876          */
877         break;
878 
879     /*
880      * XIVE Virtualization Controller settings
881      */
882     case VC_GLOBAL_CONFIG:
883         break;
884 
885     /*
886      * XIVE Presenter Controller settings
887      */
888     case PC_GLOBAL_CONFIG:
889         /*
890          * PC_GCONF_CHIPID_OVR
891          *   Overrides Int command Chip ID with the Chip ID field (DEBUG)
892          */
893         break;
894     case PC_TCTXT_CFG:
895         /*
896          * TODO: block group support
897          */
898         break;
899     case PC_TCTXT_TRACK:
900         /*
901          * PC_TCTXT_TRACK_EN:
902          *   enable block tracking and exchange of block ownership
903          *   information between Interrupt controllers
904          */
905         break;
906 
907     /*
908      * Misc settings
909      */
910     case VC_SBC_CONFIG: /* Store EOI configuration */
911         /*
912          * Configure store EOI if required by firwmare (skiboot has removed
913          * support recently though)
914          */
915         if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
916             xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
917         }
918         break;
919 
920     case VC_EQC_CONFIG: /* TODO: silent escalation */
921     case VC_AIB_TX_ORDER_TAG2: /* relax ordering */
922         break;
923 
924     /*
925      * XIVE BAR settings (XSCOM only)
926      */
927     case CQ_RST_CTL:
928         /* bit4: resets all BAR registers */
929         break;
930 
931     case CQ_IC_BAR: /* IC BAR. 8 pages */
932         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
933         if (!(val & CQ_IC_BAR_VALID)) {
934             xive->ic_base = 0;
935             if (xive->regs[reg] & CQ_IC_BAR_VALID) {
936                 memory_region_del_subregion(&xive->ic_mmio,
937                                             &xive->ic_reg_mmio);
938                 memory_region_del_subregion(&xive->ic_mmio,
939                                             &xive->ic_notify_mmio);
940                 memory_region_del_subregion(&xive->ic_mmio,
941                                             &xive->ic_lsi_mmio);
942                 memory_region_del_subregion(&xive->ic_mmio,
943                                             &xive->tm_indirect_mmio);
944 
945                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
946             }
947         } else {
948             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
949             if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
950                 memory_region_add_subregion(sysmem, xive->ic_base,
951                                             &xive->ic_mmio);
952 
953                 memory_region_add_subregion(&xive->ic_mmio,  0,
954                                             &xive->ic_reg_mmio);
955                 memory_region_add_subregion(&xive->ic_mmio,
956                                             1ul << xive->ic_shift,
957                                             &xive->ic_notify_mmio);
958                 memory_region_add_subregion(&xive->ic_mmio,
959                                             2ul << xive->ic_shift,
960                                             &xive->ic_lsi_mmio);
961                 memory_region_add_subregion(&xive->ic_mmio,
962                                             4ull << xive->ic_shift,
963                                             &xive->tm_indirect_mmio);
964             }
965         }
966         break;
967 
968     case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */
969     case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */
970         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
971         if (!(val & CQ_TM_BAR_VALID)) {
972             xive->tm_base = 0;
973             if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
974                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
975             }
976         } else {
977             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
978             if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
979                 memory_region_add_subregion(sysmem, xive->tm_base,
980                                             &xive->tm_mmio);
981             }
982         }
983         break;
984 
985     case CQ_PC_BARM:
986         xive->regs[reg] = val;
987         memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
988         break;
989     case CQ_PC_BAR: /* From 32M to 512G */
990         if (!(val & CQ_PC_BAR_VALID)) {
991             xive->pc_base = 0;
992             if (xive->regs[reg] & CQ_PC_BAR_VALID) {
993                 memory_region_del_subregion(sysmem, &xive->pc_mmio);
994             }
995         } else {
996             xive->pc_base = val & ~(CQ_PC_BAR_VALID);
997             if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
998                 memory_region_add_subregion(sysmem, xive->pc_base,
999                                             &xive->pc_mmio);
1000             }
1001         }
1002         break;
1003 
1004     case CQ_VC_BARM:
1005         xive->regs[reg] = val;
1006         memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
1007         break;
1008     case CQ_VC_BAR: /* From 64M to 4TB */
1009         if (!(val & CQ_VC_BAR_VALID)) {
1010             xive->vc_base = 0;
1011             if (xive->regs[reg] & CQ_VC_BAR_VALID) {
1012                 memory_region_del_subregion(sysmem, &xive->vc_mmio);
1013             }
1014         } else {
1015             xive->vc_base = val & ~(CQ_VC_BAR_VALID);
1016             if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
1017                 memory_region_add_subregion(sysmem, xive->vc_base,
1018                                             &xive->vc_mmio);
1019             }
1020         }
1021         break;
1022 
1023     /*
1024      * XIVE Table settings.
1025      */
1026     case CQ_TAR: /* Table Address */
1027         break;
1028     case CQ_TDR: /* Table Data */
1029         pnv_xive_table_set_data(xive, val);
1030         break;
1031 
1032     /*
1033      * XIVE VC & PC Virtual Structure Table settings
1034      */
1035     case VC_VSD_TABLE_ADDR:
1036     case PC_VSD_TABLE_ADDR: /* Virtual table selector */
1037         break;
1038     case VC_VSD_TABLE_DATA: /* Virtual table setting */
1039     case PC_VSD_TABLE_DATA:
1040         pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
1041         break;
1042 
1043     /*
1044      * Interrupt fifo overflow in memory backing store (Not modeled)
1045      */
1046     case VC_IRQ_CONFIG_IPI:
1047     case VC_IRQ_CONFIG_HW:
1048     case VC_IRQ_CONFIG_CASCADE1:
1049     case VC_IRQ_CONFIG_CASCADE2:
1050     case VC_IRQ_CONFIG_REDIST:
1051     case VC_IRQ_CONFIG_IPI_CASC:
1052         break;
1053 
1054     /*
1055      * XIVE hardware thread enablement
1056      */
1057     case PC_THREAD_EN_REG0: /* Physical Thread Enable */
1058     case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */
1059         break;
1060 
1061     case PC_THREAD_EN_REG0_SET:
1062         xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
1063         break;
1064     case PC_THREAD_EN_REG1_SET:
1065         xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
1066         break;
1067     case PC_THREAD_EN_REG0_CLR:
1068         xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
1069         break;
1070     case PC_THREAD_EN_REG1_CLR:
1071         xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
1072         break;
1073 
1074     /*
1075      * Indirect TIMA access set up. Defines the PIR of the HW thread
1076      * to use.
1077      */
1078     case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
1079         break;
1080 
1081     /*
1082      * XIVE PC & VC cache updates for EAS, NVT and END
1083      */
1084     case VC_IVC_SCRUB_MASK:
1085     case VC_IVC_SCRUB_TRIG:
1086         break;
1087 
1088     case VC_EQC_CWATCH_SPEC:
1089         val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
1090         break;
1091     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1092         break;
1093     case VC_EQC_CWATCH_DAT0:
1094         /* writing to DATA0 triggers the cache write */
1095         xive->regs[reg] = val;
1096         pnv_xive_end_update(xive);
1097         break;
1098     case VC_EQC_SCRUB_MASK:
1099     case VC_EQC_SCRUB_TRIG:
1100         /*
1101          * The scrubbing registers flush the cache in RAM and can also
1102          * invalidate.
1103          */
1104         break;
1105 
1106     case PC_VPC_CWATCH_SPEC:
1107         val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
1108         break;
1109     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1110         break;
1111     case PC_VPC_CWATCH_DAT0:
1112         /* writing to DATA0 triggers the cache write */
1113         xive->regs[reg] = val;
1114         pnv_xive_nvt_update(xive);
1115         break;
1116     case PC_VPC_SCRUB_MASK:
1117     case PC_VPC_SCRUB_TRIG:
1118         /*
1119          * The scrubbing registers flush the cache in RAM and can also
1120          * invalidate.
1121          */
1122         break;
1123 
1124 
1125     /*
1126      * XIVE PC & VC cache invalidation
1127      */
1128     case PC_AT_KILL:
1129         break;
1130     case VC_AT_MACRO_KILL:
1131         break;
1132     case PC_AT_KILL_MASK:
1133     case VC_AT_MACRO_KILL_MASK:
1134         break;
1135 
1136     default:
1137         xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
1138         return;
1139     }
1140 
1141     xive->regs[reg] = val;
1142 }
1143 
1144 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
1145 {
1146     PnvXive *xive = PNV_XIVE(opaque);
1147     uint64_t val = 0;
1148     uint32_t reg = offset >> 3;
1149 
1150     switch (offset) {
1151     case CQ_CFG_PB_GEN:
1152     case CQ_IC_BAR:
1153     case CQ_TM1_BAR:
1154     case CQ_TM2_BAR:
1155     case CQ_PC_BAR:
1156     case CQ_PC_BARM:
1157     case CQ_VC_BAR:
1158     case CQ_VC_BARM:
1159     case CQ_TAR:
1160     case CQ_TDR:
1161     case CQ_PBI_CTL:
1162 
1163     case PC_TCTXT_CFG:
1164     case PC_TCTXT_TRACK:
1165     case PC_TCTXT_INDIR0:
1166     case PC_TCTXT_INDIR1:
1167     case PC_TCTXT_INDIR2:
1168     case PC_TCTXT_INDIR3:
1169     case PC_GLOBAL_CONFIG:
1170 
1171     case PC_VPC_SCRUB_MASK:
1172 
1173     case VC_GLOBAL_CONFIG:
1174     case VC_AIB_TX_ORDER_TAG2:
1175 
1176     case VC_IRQ_CONFIG_IPI:
1177     case VC_IRQ_CONFIG_HW:
1178     case VC_IRQ_CONFIG_CASCADE1:
1179     case VC_IRQ_CONFIG_CASCADE2:
1180     case VC_IRQ_CONFIG_REDIST:
1181     case VC_IRQ_CONFIG_IPI_CASC:
1182 
1183     case VC_EQC_SCRUB_MASK:
1184     case VC_IVC_SCRUB_MASK:
1185     case VC_SBC_CONFIG:
1186     case VC_AT_MACRO_KILL_MASK:
1187     case VC_VSD_TABLE_ADDR:
1188     case PC_VSD_TABLE_ADDR:
1189     case VC_VSD_TABLE_DATA:
1190     case PC_VSD_TABLE_DATA:
1191     case PC_THREAD_EN_REG0:
1192     case PC_THREAD_EN_REG1:
1193         val = xive->regs[reg];
1194         break;
1195 
1196     /*
1197      * XIVE hardware thread enablement
1198      */
1199     case PC_THREAD_EN_REG0_SET:
1200     case PC_THREAD_EN_REG0_CLR:
1201         val = xive->regs[PC_THREAD_EN_REG0 >> 3];
1202         break;
1203     case PC_THREAD_EN_REG1_SET:
1204     case PC_THREAD_EN_REG1_CLR:
1205         val = xive->regs[PC_THREAD_EN_REG1 >> 3];
1206         break;
1207 
1208     case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */
1209         val = 0xffffff0000000000;
1210         break;
1211 
1212     /*
1213      * XIVE PC & VC cache updates for EAS, NVT and END
1214      */
1215     case VC_EQC_CWATCH_SPEC:
1216         xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
1217         val = xive->regs[reg];
1218         break;
1219     case VC_EQC_CWATCH_DAT0:
1220         /*
1221          * Load DATA registers from cache with data requested by the
1222          * SPEC register
1223          */
1224         pnv_xive_end_cache_load(xive);
1225         val = xive->regs[reg];
1226         break;
1227     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1228         val = xive->regs[reg];
1229         break;
1230 
1231     case PC_VPC_CWATCH_SPEC:
1232         xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
1233         val = xive->regs[reg];
1234         break;
1235     case PC_VPC_CWATCH_DAT0:
1236         /*
1237          * Load DATA registers from cache with data requested by the
1238          * SPEC register
1239          */
1240         pnv_xive_nvt_cache_load(xive);
1241         val = xive->regs[reg];
1242         break;
1243     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1244         val = xive->regs[reg];
1245         break;
1246 
1247     case PC_VPC_SCRUB_TRIG:
1248     case VC_IVC_SCRUB_TRIG:
1249     case VC_EQC_SCRUB_TRIG:
1250         xive->regs[reg] &= ~VC_SCRUB_VALID;
1251         val = xive->regs[reg];
1252         break;
1253 
1254     /*
1255      * XIVE PC & VC cache invalidation
1256      */
1257     case PC_AT_KILL:
1258         xive->regs[reg] &= ~PC_AT_KILL_VALID;
1259         val = xive->regs[reg];
1260         break;
1261     case VC_AT_MACRO_KILL:
1262         xive->regs[reg] &= ~VC_KILL_VALID;
1263         val = xive->regs[reg];
1264         break;
1265 
1266     /*
1267      * XIVE synchronisation
1268      */
1269     case VC_EQC_CONFIG:
1270         val = VC_EQC_SYNC_MASK;
1271         break;
1272 
1273     default:
1274         xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
1275     }
1276 
1277     return val;
1278 }
1279 
1280 static const MemoryRegionOps pnv_xive_ic_reg_ops = {
1281     .read = pnv_xive_ic_reg_read,
1282     .write = pnv_xive_ic_reg_write,
1283     .endianness = DEVICE_BIG_ENDIAN,
1284     .valid = {
1285         .min_access_size = 8,
1286         .max_access_size = 8,
1287     },
1288     .impl = {
1289         .min_access_size = 8,
1290         .max_access_size = 8,
1291     },
1292 };
1293 
1294 /*
1295  * IC - Notify MMIO port page (write only)
1296  */
1297 #define PNV_XIVE_FORWARD_IPI        0x800 /* Forward IPI */
1298 #define PNV_XIVE_FORWARD_HW         0x880 /* Forward HW */
1299 #define PNV_XIVE_FORWARD_OS_ESC     0x900 /* Forward OS escalation */
1300 #define PNV_XIVE_FORWARD_HW_ESC     0x980 /* Forward Hyp escalation */
1301 #define PNV_XIVE_FORWARD_REDIS      0xa00 /* Forward Redistribution */
1302 #define PNV_XIVE_RESERVED5          0xa80 /* Cache line 5 PowerBUS operation */
1303 #define PNV_XIVE_RESERVED6          0xb00 /* Cache line 6 PowerBUS operation */
1304 #define PNV_XIVE_RESERVED7          0xb80 /* Cache line 7 PowerBUS operation */
1305 
1306 /* VC synchronisation */
1307 #define PNV_XIVE_SYNC_IPI           0xc00 /* Sync IPI */
1308 #define PNV_XIVE_SYNC_HW            0xc80 /* Sync HW */
1309 #define PNV_XIVE_SYNC_OS_ESC        0xd00 /* Sync OS escalation */
1310 #define PNV_XIVE_SYNC_HW_ESC        0xd80 /* Sync Hyp escalation */
1311 #define PNV_XIVE_SYNC_REDIS         0xe00 /* Sync Redistribution */
1312 
1313 /* PC synchronisation */
1314 #define PNV_XIVE_SYNC_PULL          0xe80 /* Sync pull context */
1315 #define PNV_XIVE_SYNC_PUSH          0xf00 /* Sync push context */
1316 #define PNV_XIVE_SYNC_VPC           0xf80 /* Sync remove VPC store */
1317 
1318 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
1319 {
1320     uint8_t blk;
1321     uint32_t idx;
1322 
1323     trace_pnv_xive_ic_hw_trigger(addr, val);
1324 
1325     if (val & XIVE_TRIGGER_END) {
1326         xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1327                    addr, val);
1328         return;
1329     }
1330 
1331     /*
1332      * Forward the source event notification directly to the Router.
1333      * The source interrupt number should already be correctly encoded
1334      * with the chip block id by the sending device (PHB, PSI).
1335      */
1336     blk = XIVE_EAS_BLOCK(val);
1337     idx = XIVE_EAS_INDEX(val);
1338 
1339     xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx));
1340 }
1341 
1342 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
1343                                      unsigned size)
1344 {
1345     PnvXive *xive = PNV_XIVE(opaque);
1346 
1347     /* VC: HW triggers */
1348     switch (addr) {
1349     case 0x000 ... 0x7FF:
1350         pnv_xive_ic_hw_trigger(opaque, addr, val);
1351         break;
1352 
1353     /* VC: Forwarded IRQs */
1354     case PNV_XIVE_FORWARD_IPI:
1355     case PNV_XIVE_FORWARD_HW:
1356     case PNV_XIVE_FORWARD_OS_ESC:
1357     case PNV_XIVE_FORWARD_HW_ESC:
1358     case PNV_XIVE_FORWARD_REDIS:
1359         /* TODO: forwarded IRQs. Should be like HW triggers */
1360         xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
1361                    addr, val);
1362         break;
1363 
1364     /* VC syncs */
1365     case PNV_XIVE_SYNC_IPI:
1366     case PNV_XIVE_SYNC_HW:
1367     case PNV_XIVE_SYNC_OS_ESC:
1368     case PNV_XIVE_SYNC_HW_ESC:
1369     case PNV_XIVE_SYNC_REDIS:
1370         break;
1371 
1372     /* PC syncs */
1373     case PNV_XIVE_SYNC_PULL:
1374     case PNV_XIVE_SYNC_PUSH:
1375     case PNV_XIVE_SYNC_VPC:
1376         break;
1377 
1378     default:
1379         xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
1380     }
1381 }
1382 
1383 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
1384                                         unsigned size)
1385 {
1386     PnvXive *xive = PNV_XIVE(opaque);
1387 
1388     /* loads are invalid */
1389     xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
1390     return -1;
1391 }
1392 
1393 static const MemoryRegionOps pnv_xive_ic_notify_ops = {
1394     .read = pnv_xive_ic_notify_read,
1395     .write = pnv_xive_ic_notify_write,
1396     .endianness = DEVICE_BIG_ENDIAN,
1397     .valid = {
1398         .min_access_size = 8,
1399         .max_access_size = 8,
1400     },
1401     .impl = {
1402         .min_access_size = 8,
1403         .max_access_size = 8,
1404     },
1405 };
1406 
1407 /*
1408  * IC - LSI MMIO handlers (not modeled)
1409  */
1410 
1411 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
1412                               uint64_t val, unsigned size)
1413 {
1414     PnvXive *xive = PNV_XIVE(opaque);
1415 
1416     xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
1417 }
1418 
1419 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
1420 {
1421     PnvXive *xive = PNV_XIVE(opaque);
1422 
1423     xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
1424     return -1;
1425 }
1426 
1427 static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
1428     .read = pnv_xive_ic_lsi_read,
1429     .write = pnv_xive_ic_lsi_write,
1430     .endianness = DEVICE_BIG_ENDIAN,
1431     .valid = {
1432         .min_access_size = 8,
1433         .max_access_size = 8,
1434     },
1435     .impl = {
1436         .min_access_size = 8,
1437         .max_access_size = 8,
1438     },
1439 };
1440 
1441 /*
1442  * IC - Indirect TIMA MMIO handlers
1443  */
1444 
1445 /*
1446  * When the TIMA is accessed from the indirect page, the thread id of
1447  * the target CPU is configured in the PC_TCTXT_INDIR0 register before
1448  * use. This is used for resets and for debug purpose also.
1449  */
1450 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
1451 {
1452     PnvChip *chip = xive->chip;
1453     uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
1454     PowerPCCPU *cpu = NULL;
1455     int pir;
1456 
1457     if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
1458         xive_error(xive, "IC: no indirect TIMA access in progress");
1459         return NULL;
1460     }
1461 
1462     pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir);
1463     cpu = pnv_chip_find_cpu(chip, pir);
1464     if (!cpu) {
1465         xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
1466         return NULL;
1467     }
1468 
1469     /* Check that HW thread is XIVE enabled */
1470     if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
1471         xive_error(xive, "IC: CPU %x is not enabled", pir);
1472     }
1473 
1474     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1475 }
1476 
1477 static void xive_tm_indirect_write(void *opaque, hwaddr offset,
1478                                    uint64_t value, unsigned size)
1479 {
1480     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1481 
1482     xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
1483 }
1484 
1485 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
1486                                       unsigned size)
1487 {
1488     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1489 
1490     return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
1491 }
1492 
1493 static const MemoryRegionOps xive_tm_indirect_ops = {
1494     .read = xive_tm_indirect_read,
1495     .write = xive_tm_indirect_write,
1496     .endianness = DEVICE_BIG_ENDIAN,
1497     .valid = {
1498         .min_access_size = 1,
1499         .max_access_size = 8,
1500     },
1501     .impl = {
1502         .min_access_size = 1,
1503         .max_access_size = 8,
1504     },
1505 };
1506 
1507 static void pnv_xive_tm_write(void *opaque, hwaddr offset,
1508                               uint64_t value, unsigned size)
1509 {
1510     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1511     PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1512     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1513 
1514     xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size);
1515 }
1516 
1517 static uint64_t pnv_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
1518 {
1519     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1520     PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1521     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1522 
1523     return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size);
1524 }
1525 
1526 const MemoryRegionOps pnv_xive_tm_ops = {
1527     .read = pnv_xive_tm_read,
1528     .write = pnv_xive_tm_write,
1529     .endianness = DEVICE_BIG_ENDIAN,
1530     .valid = {
1531         .min_access_size = 1,
1532         .max_access_size = 8,
1533     },
1534     .impl = {
1535         .min_access_size = 1,
1536         .max_access_size = 8,
1537     },
1538 };
1539 
1540 /*
1541  * Interrupt controller XSCOM region.
1542  */
1543 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
1544 {
1545     switch (addr >> 3) {
1546     case X_VC_EQC_CONFIG:
1547         /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1548         return VC_EQC_SYNC_MASK;
1549     default:
1550         return pnv_xive_ic_reg_read(opaque, addr, size);
1551     }
1552 }
1553 
1554 static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
1555                                 uint64_t val, unsigned size)
1556 {
1557     pnv_xive_ic_reg_write(opaque, addr, val, size);
1558 }
1559 
1560 static const MemoryRegionOps pnv_xive_xscom_ops = {
1561     .read = pnv_xive_xscom_read,
1562     .write = pnv_xive_xscom_write,
1563     .endianness = DEVICE_BIG_ENDIAN,
1564     .valid = {
1565         .min_access_size = 8,
1566         .max_access_size = 8,
1567     },
1568     .impl = {
1569         .min_access_size = 8,
1570         .max_access_size = 8,
1571     }
1572 };
1573 
1574 /*
1575  * Virtualization Controller MMIO region containing the IPI and END ESB pages
1576  */
1577 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
1578                                  unsigned size)
1579 {
1580     PnvXive *xive = PNV_XIVE(opaque);
1581     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1582     uint64_t edt_type = 0;
1583     uint64_t edt_offset;
1584     MemTxResult result;
1585     AddressSpace *edt_as = NULL;
1586     uint64_t ret = -1;
1587 
1588     if (edt_index < XIVE_TABLE_EDT_MAX) {
1589         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1590     }
1591 
1592     switch (edt_type) {
1593     case CQ_TDR_EDT_IPI:
1594         edt_as = &xive->ipi_as;
1595         break;
1596     case CQ_TDR_EDT_EQ:
1597         edt_as = &xive->end_as;
1598         break;
1599     default:
1600         xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
1601         return -1;
1602     }
1603 
1604     /* Remap the offset for the targeted address space */
1605     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1606 
1607     ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
1608                             &result);
1609 
1610     if (result != MEMTX_OK) {
1611         xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
1612                    HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
1613                    offset, edt_offset);
1614         return -1;
1615     }
1616 
1617     return ret;
1618 }
1619 
1620 static void pnv_xive_vc_write(void *opaque, hwaddr offset,
1621                               uint64_t val, unsigned size)
1622 {
1623     PnvXive *xive = PNV_XIVE(opaque);
1624     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1625     uint64_t edt_type = 0;
1626     uint64_t edt_offset;
1627     MemTxResult result;
1628     AddressSpace *edt_as = NULL;
1629 
1630     if (edt_index < XIVE_TABLE_EDT_MAX) {
1631         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1632     }
1633 
1634     switch (edt_type) {
1635     case CQ_TDR_EDT_IPI:
1636         edt_as = &xive->ipi_as;
1637         break;
1638     case CQ_TDR_EDT_EQ:
1639         edt_as = &xive->end_as;
1640         break;
1641     default:
1642         xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
1643                    offset);
1644         return;
1645     }
1646 
1647     /* Remap the offset for the targeted address space */
1648     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1649 
1650     address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
1651     if (result != MEMTX_OK) {
1652         xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
1653     }
1654 }
1655 
1656 static const MemoryRegionOps pnv_xive_vc_ops = {
1657     .read = pnv_xive_vc_read,
1658     .write = pnv_xive_vc_write,
1659     .endianness = DEVICE_BIG_ENDIAN,
1660     .valid = {
1661         .min_access_size = 8,
1662         .max_access_size = 8,
1663     },
1664     .impl = {
1665         .min_access_size = 8,
1666         .max_access_size = 8,
1667     },
1668 };
1669 
1670 /*
1671  * Presenter Controller MMIO region. The Virtualization Controller
1672  * updates the IPB in the NVT table when required. Not modeled.
1673  */
1674 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr,
1675                                  unsigned size)
1676 {
1677     PnvXive *xive = PNV_XIVE(opaque);
1678 
1679     xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr);
1680     return -1;
1681 }
1682 
1683 static void pnv_xive_pc_write(void *opaque, hwaddr addr,
1684                               uint64_t value, unsigned size)
1685 {
1686     PnvXive *xive = PNV_XIVE(opaque);
1687 
1688     xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
1689 }
1690 
1691 static const MemoryRegionOps pnv_xive_pc_ops = {
1692     .read = pnv_xive_pc_read,
1693     .write = pnv_xive_pc_write,
1694     .endianness = DEVICE_BIG_ENDIAN,
1695     .valid = {
1696         .min_access_size = 8,
1697         .max_access_size = 8,
1698     },
1699     .impl = {
1700         .min_access_size = 8,
1701         .max_access_size = 8,
1702     },
1703 };
1704 
1705 static void xive_nvt_pic_print_info(XiveNVT *nvt, uint32_t nvt_idx,
1706                                     Monitor *mon)
1707 {
1708     uint8_t  eq_blk = xive_get_field32(NVT_W1_EQ_BLOCK, nvt->w1);
1709     uint32_t eq_idx = xive_get_field32(NVT_W1_EQ_INDEX, nvt->w1);
1710 
1711     if (!xive_nvt_is_valid(nvt)) {
1712         return;
1713     }
1714 
1715     monitor_printf(mon, "  %08x end:%02x/%04x IPB:%02x\n", nvt_idx,
1716                    eq_blk, eq_idx,
1717                    xive_get_field32(NVT_W4_IPB, nvt->w4));
1718 }
1719 
1720 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
1721 {
1722     XiveRouter *xrtr = XIVE_ROUTER(xive);
1723     uint8_t blk = pnv_xive_block_id(xive);
1724     uint8_t chip_id = xive->chip->chip_id;
1725     uint32_t srcno0 = XIVE_EAS(blk, 0);
1726     uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk);
1727     XiveEAS eas;
1728     XiveEND end;
1729     XiveNVT nvt;
1730     int i;
1731     uint64_t xive_nvt_per_subpage;
1732 
1733     monitor_printf(mon, "XIVE[%x] #%d Source %08x .. %08x\n", chip_id, blk,
1734                    srcno0, srcno0 + nr_ipis - 1);
1735     xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
1736 
1737     monitor_printf(mon, "XIVE[%x] #%d EAT %08x .. %08x\n", chip_id, blk,
1738                    srcno0, srcno0 + nr_ipis - 1);
1739     for (i = 0; i < nr_ipis; i++) {
1740         if (xive_router_get_eas(xrtr, blk, i, &eas)) {
1741             break;
1742         }
1743         if (!xive_eas_is_masked(&eas)) {
1744             xive_eas_pic_print_info(&eas, i, mon);
1745         }
1746     }
1747 
1748     monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk);
1749     i = 0;
1750     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1751         xive_end_pic_print_info(&end, i++, mon);
1752     }
1753 
1754     monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk);
1755     i = 0;
1756     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1757         xive_end_eas_pic_print_info(&end, i++, mon);
1758     }
1759 
1760     monitor_printf(mon, "XIVE[%x] #%d NVTT %08x .. %08x\n", chip_id, blk,
1761                    0, XIVE_NVT_COUNT - 1);
1762     xive_nvt_per_subpage = pnv_xive_vst_per_subpage(xive, VST_TSEL_VPDT);
1763     for (i = 0; i < XIVE_NVT_COUNT; i += xive_nvt_per_subpage) {
1764         while (!xive_router_get_nvt(xrtr, blk, i, &nvt)) {
1765             xive_nvt_pic_print_info(&nvt, i++, mon);
1766         }
1767     }
1768 }
1769 
1770 static void pnv_xive_reset(void *dev)
1771 {
1772     PnvXive *xive = PNV_XIVE(dev);
1773     XiveSource *xsrc = &xive->ipi_source;
1774     XiveENDSource *end_xsrc = &xive->end_source;
1775 
1776     /* Default page size (Should be changed at runtime to 64k) */
1777     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1778 
1779     /* Clear subregions */
1780     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1781         memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
1782     }
1783 
1784     if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
1785         memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
1786     }
1787 
1788     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1789         memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
1790     }
1791 
1792     if (memory_region_is_mapped(&xive->end_edt_mmio)) {
1793         memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
1794     }
1795 }
1796 
1797 static void pnv_xive_init(Object *obj)
1798 {
1799     PnvXive *xive = PNV_XIVE(obj);
1800 
1801     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1802                             TYPE_XIVE_SOURCE);
1803     object_initialize_child(obj, "end_source", &xive->end_source,
1804                             TYPE_XIVE_END_SOURCE);
1805 }
1806 
1807 /*
1808  *  Maximum number of IRQs and ENDs supported by HW
1809  */
1810 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1811 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1812 
1813 static void pnv_xive_realize(DeviceState *dev, Error **errp)
1814 {
1815     PnvXive *xive = PNV_XIVE(dev);
1816     PnvXiveClass *pxc = PNV_XIVE_GET_CLASS(dev);
1817     XiveSource *xsrc = &xive->ipi_source;
1818     XiveENDSource *end_xsrc = &xive->end_source;
1819     Error *local_err = NULL;
1820 
1821     pxc->parent_realize(dev, &local_err);
1822     if (local_err) {
1823         error_propagate(errp, local_err);
1824         return;
1825     }
1826 
1827     assert(xive->chip);
1828 
1829     /*
1830      * The XiveSource and XiveENDSource objects are realized with the
1831      * maximum allowed HW configuration. The ESB MMIO regions will be
1832      * resized dynamically when the controller is configured by the FW
1833      * to limit accesses to resources not provisioned.
1834      */
1835     object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE_NR_IRQS,
1836                             &error_fatal);
1837     object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
1838     if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
1839         return;
1840     }
1841 
1842     object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE_NR_ENDS,
1843                             &error_fatal);
1844     object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
1845                              &error_abort);
1846     if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
1847         return;
1848     }
1849 
1850     /* Default page size. Generally changed at runtime to 64k */
1851     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1852 
1853     /* XSCOM region, used for initial configuration of the BARs */
1854     memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
1855                           xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
1856 
1857     /* Interrupt controller MMIO regions */
1858     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1859                        PNV9_XIVE_IC_SIZE);
1860 
1861     memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
1862                           xive, "xive-ic-reg", 1 << xive->ic_shift);
1863     memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
1864                           &pnv_xive_ic_notify_ops,
1865                           xive, "xive-ic-notify", 1 << xive->ic_shift);
1866 
1867     /* The Pervasive LSI trigger and EOI pages (not modeled) */
1868     memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
1869                           xive, "xive-ic-lsi", 2 << xive->ic_shift);
1870 
1871     /* Thread Interrupt Management Area (Indirect) */
1872     memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
1873                           &xive_tm_indirect_ops,
1874                           xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
1875     /*
1876      * Overall Virtualization Controller MMIO region containing the
1877      * IPI ESB pages and END ESB pages. The layout is defined by the
1878      * EDT "Domain table" and the accesses are dispatched using
1879      * address spaces for each.
1880      */
1881     memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
1882                           "xive-vc", PNV9_XIVE_VC_SIZE);
1883 
1884     memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
1885                        PNV9_XIVE_VC_SIZE);
1886     address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
1887     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
1888                        PNV9_XIVE_VC_SIZE);
1889     address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
1890 
1891     /*
1892      * The MMIO windows exposing the IPI ESBs and the END ESBs in the
1893      * VC region. Their size is configured by the FW in the EDT table.
1894      */
1895     memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
1896     memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
1897 
1898     /* Presenter Controller MMIO region (not modeled) */
1899     memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
1900                           "xive-pc", PNV9_XIVE_PC_SIZE);
1901 
1902     /* Thread Interrupt Management Area (Direct) */
1903     memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &pnv_xive_tm_ops,
1904                           xive, "xive-tima", PNV9_XIVE_TM_SIZE);
1905 
1906     qemu_register_reset(pnv_xive_reset, dev);
1907 }
1908 
1909 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
1910                              int xscom_offset)
1911 {
1912     const char compat[] = "ibm,power9-xive-x";
1913     char *name;
1914     int offset;
1915     uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
1916     uint32_t reg[] = {
1917         cpu_to_be32(lpc_pcba),
1918         cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
1919     };
1920 
1921     name = g_strdup_printf("xive@%x", lpc_pcba);
1922     offset = fdt_add_subnode(fdt, xscom_offset, name);
1923     _FDT(offset);
1924     g_free(name);
1925 
1926     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
1927     _FDT((fdt_setprop(fdt, offset, "compatible", compat,
1928                       sizeof(compat))));
1929     return 0;
1930 }
1931 
1932 static Property pnv_xive_properties[] = {
1933     DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
1934     DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
1935     DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
1936     DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
1937     /* The PnvChip id identifies the XIVE interrupt controller. */
1938     DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *),
1939     DEFINE_PROP_END_OF_LIST(),
1940 };
1941 
1942 static void pnv_xive_class_init(ObjectClass *klass, void *data)
1943 {
1944     DeviceClass *dc = DEVICE_CLASS(klass);
1945     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
1946     XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
1947     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1948     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
1949     PnvXiveClass *pxc = PNV_XIVE_CLASS(klass);
1950 
1951     xdc->dt_xscom = pnv_xive_dt_xscom;
1952 
1953     dc->desc = "PowerNV XIVE Interrupt Controller";
1954     device_class_set_parent_realize(dc, pnv_xive_realize, &pxc->parent_realize);
1955     dc->realize = pnv_xive_realize;
1956     device_class_set_props(dc, pnv_xive_properties);
1957 
1958     xrc->get_eas = pnv_xive_get_eas;
1959     xrc->get_end = pnv_xive_get_end;
1960     xrc->write_end = pnv_xive_write_end;
1961     xrc->get_nvt = pnv_xive_get_nvt;
1962     xrc->write_nvt = pnv_xive_write_nvt;
1963     xrc->get_block_id = pnv_xive_get_block_id;
1964 
1965     xnc->notify = pnv_xive_notify;
1966     xpc->match_nvt  = pnv_xive_match_nvt;
1967 };
1968 
1969 static const TypeInfo pnv_xive_info = {
1970     .name          = TYPE_PNV_XIVE,
1971     .parent        = TYPE_XIVE_ROUTER,
1972     .instance_init = pnv_xive_init,
1973     .instance_size = sizeof(PnvXive),
1974     .class_init    = pnv_xive_class_init,
1975     .class_size    = sizeof(PnvXiveClass),
1976     .interfaces    = (InterfaceInfo[]) {
1977         { TYPE_PNV_XSCOM_INTERFACE },
1978         { }
1979     }
1980 };
1981 
1982 static void pnv_xive_register_types(void)
1983 {
1984     type_register_static(&pnv_xive_info);
1985 }
1986 
1987 type_init(pnv_xive_register_types)
1988